diff --git a/external-service-impl/mqtt/pom.xml b/external-service-impl/mqtt/pom.xml
index 5ec51069b212e..b28c1b6491806 100644
--- a/external-service-impl/mqtt/pom.xml
+++ b/external-service-impl/mqtt/pom.xml
@@ -172,6 +172,10 @@
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
diff --git a/external-service-impl/mqtt/src/main/i18n/en/org/apache/iotdb/mqtt/i18n/MqttMessages.java b/external-service-impl/mqtt/src/main/i18n/en/org/apache/iotdb/mqtt/i18n/MqttMessages.java
new file mode 100644
index 0000000000000..81e24b940c605
--- /dev/null
+++ b/external-service-impl/mqtt/src/main/i18n/en/org/apache/iotdb/mqtt/i18n/MqttMessages.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.mqtt.i18n;
+
+public final class MqttMessages {
+
+ // --- LinePayloadFormatter ---
+ public static final String INVALID_LINE_PROTOCOL = "Invalid line protocol format ,line is {}";
+ public static final String TAGS_ERROR = "The tags is error , line is {}";
+ public static final String ATTRIBUTES_ERROR = "The attributes is error , line is {}";
+ public static final String FIELDS_ERROR = "The fields is error , line is {}";
+ public static final String TIMESTAMP_ERROR = "The timestamp is error , line is {}";
+
+ // --- MPPPublishHandler ---
+ public static final String ON_PUBLISH_EXCEPTION =
+ "onPublish execution exception, msg is [{}], error is ";
+ public static final String PROCESS_RESULT = "process result: {}";
+
+ // --- MQTTService ---
+ public static final String SERVER_START_EXCEPTION = "Exception while starting server";
+ public static final String STOPPING_MQTT_SERVICE = "Stopping IoTDB MQTT service...";
+ public static final String MQTT_SERVICE_STOPPED = "IoTDB MQTT service stopped.";
+
+ // --- PayloadFormatManager ---
+ public static final String MQTT_DIR = "mqttDir: {}";
+ public static final String PAYLOAD_FORMAT_MANAGER_INIT_ERROR =
+ "MQTT PayloadFormatManager init() error.";
+ public static final String FORMATTER_IS_NULL = "PayloadFormatManager(), formatter is null.";
+ public static final String FIND_MQTT_PLUGIN =
+ "PayloadFormatManager(), find MQTT Payload Plugin {}.";
+ public static final String MQTT_PLUGIN_JAR_URLS = "MQTT Plugin jarURLs: {}";
+
+ // --- JSONPayloadFormatter ---
+ public static final String PAYLOAD_INVALID = "payload is invalidate";
+
+ private MqttMessages() {}
+}
diff --git a/external-service-impl/mqtt/src/main/i18n/zh/org/apache/iotdb/mqtt/i18n/MqttMessages.java b/external-service-impl/mqtt/src/main/i18n/zh/org/apache/iotdb/mqtt/i18n/MqttMessages.java
new file mode 100644
index 0000000000000..eb9b4dc62c5f5
--- /dev/null
+++ b/external-service-impl/mqtt/src/main/i18n/zh/org/apache/iotdb/mqtt/i18n/MqttMessages.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.mqtt.i18n;
+
+public final class MqttMessages {
+
+ // --- LinePayloadFormatter ---
+ public static final String INVALID_LINE_PROTOCOL = "行协议格式无效,行内容:{}";
+ public static final String TAGS_ERROR = "标签格式错误,行内容:{}";
+ public static final String ATTRIBUTES_ERROR = "属性格式错误,行内容:{}";
+ public static final String FIELDS_ERROR = "字段格式错误,行内容:{}";
+ public static final String TIMESTAMP_ERROR = "时间戳格式错误,行内容:{}";
+
+ // --- MPPPublishHandler ---
+ public static final String ON_PUBLISH_EXCEPTION =
+ "onPublish 执行异常,消息为 [{}],错误:";
+ public static final String PROCESS_RESULT = "处理结果:{}";
+
+ // --- MQTTService ---
+ public static final String SERVER_START_EXCEPTION = "启动服务器时发生异常";
+ public static final String STOPPING_MQTT_SERVICE = "正在停止 IoTDB MQTT 服务...";
+ public static final String MQTT_SERVICE_STOPPED = "IoTDB MQTT 服务已停止。";
+
+ // --- PayloadFormatManager ---
+ public static final String MQTT_DIR = "mqttDir:{}";
+ public static final String PAYLOAD_FORMAT_MANAGER_INIT_ERROR =
+ "MQTT PayloadFormatManager init() 出错。";
+ public static final String FORMATTER_IS_NULL = "PayloadFormatManager(),formatter 为 null。";
+ public static final String FIND_MQTT_PLUGIN =
+ "PayloadFormatManager(),找到 MQTT Payload 插件 {}。";
+ public static final String MQTT_PLUGIN_JAR_URLS = "MQTT 插件 jarURLs:{}";
+
+ // --- JSONPayloadFormatter ---
+ public static final String PAYLOAD_INVALID = "payload 无效";
+
+ private MqttMessages() {}
+}
diff --git a/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/JSONPayloadFormatter.java b/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/JSONPayloadFormatter.java
index 7a348c850b42d..72adde783df42 100644
--- a/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/JSONPayloadFormatter.java
+++ b/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/JSONPayloadFormatter.java
@@ -18,6 +18,8 @@
package org.apache.iotdb.mqtt;
+import org.apache.iotdb.mqtt.i18n.MqttMessages;
+
import com.google.common.collect.Lists;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
@@ -79,7 +81,7 @@ public List format(String topic, ByteBuf payload) {
}
return messages;
}
- throw new JsonParseException("payload is invalidate");
+ throw new JsonParseException(MqttMessages.PAYLOAD_INVALID);
}
@Override
diff --git a/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/LinePayloadFormatter.java b/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/LinePayloadFormatter.java
index f80c3eb0b5e4f..3c8b9ae044d3d 100644
--- a/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/LinePayloadFormatter.java
+++ b/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/LinePayloadFormatter.java
@@ -18,6 +18,8 @@
package org.apache.iotdb.mqtt;
+import org.apache.iotdb.mqtt.i18n.MqttMessages;
+
import io.netty.buffer.ByteBuf;
import org.apache.tsfile.enums.TSDataType;
import org.apache.tsfile.external.commons.lang3.NotImplementedException;
@@ -83,7 +85,7 @@ public List format(String topic, ByteBuf payload) {
try {
Matcher matcher = pattern.matcher(line.trim());
if (!matcher.matches()) {
- log.warn("Invalid line protocol format ,line is {}", line);
+ log.warn(MqttMessages.INVALID_LINE_PROTOCOL, line);
continue;
}
@@ -95,25 +97,25 @@ public List format(String topic, ByteBuf payload) {
// Parsing Tags
if (!setTags(matcher, message)) {
- log.warn("The tags is error , line is {}", line);
+ log.warn(MqttMessages.TAGS_ERROR, line);
continue;
}
// Parsing Attributes
if (!setAttributes(matcher, message)) {
- log.warn("The attributes is error , line is {}", line);
+ log.warn(MqttMessages.ATTRIBUTES_ERROR, line);
continue;
}
// Parsing Fields
if (!setFields(matcher, message)) {
- log.warn("The fields is error , line is {}", line);
+ log.warn(MqttMessages.FIELDS_ERROR, line);
continue;
}
// Parsing timestamp
if (!setTimestamp(matcher, message)) {
- log.warn("The timestamp is error , line is {}", line);
+ log.warn(MqttMessages.TIMESTAMP_ERROR, line);
continue;
}
diff --git a/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/MPPPublishHandler.java b/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/MPPPublishHandler.java
index 6e8ce1fd7cbae..60e22ebd9dcc7 100644
--- a/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/MPPPublishHandler.java
+++ b/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/MPPPublishHandler.java
@@ -43,6 +43,7 @@
import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertRowStatement;
import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement;
import org.apache.iotdb.db.utils.CommonUtils;
+import org.apache.iotdb.mqtt.i18n.MqttMessages;
import org.apache.iotdb.rpc.TSStatusCode;
import org.apache.iotdb.service.rpc.thrift.TSProtocolVersion;
@@ -159,7 +160,7 @@ public void onPublish(InterceptPublishMessage msg) {
}
}
} catch (Throwable t) {
- LOG.warn("onPublish execution exception, msg is [{}], error is ", msg, t);
+ LOG.warn(MqttMessages.ON_PUBLISH_EXCEPTION, msg, t);
} finally {
// release the payload of the message
super.onPublish(msg);
@@ -191,7 +192,7 @@ private void insertTable(TableMessage message, MqttClientSession session) {
tsStatus = result.status;
if (LOG.isDebugEnabled()) {
- LOG.debug("process result: {}", tsStatus);
+ LOG.debug(MqttMessages.PROCESS_RESULT, tsStatus);
}
if (tsStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
&& tsStatus.getCode() != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) {
@@ -310,7 +311,7 @@ private void insertTree(TreeMessage message, MqttClientSession session) {
false);
tsStatus = result.status;
if (LOG.isDebugEnabled()) {
- LOG.debug("process result: {}", tsStatus);
+ LOG.debug(MqttMessages.PROCESS_RESULT, tsStatus);
}
if (tsStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
&& tsStatus.getCode() != TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) {
diff --git a/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/MQTTService.java b/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/MQTTService.java
index 3b2be5ac3f4f1..7c9ed2d475501 100644
--- a/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/MQTTService.java
+++ b/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/MQTTService.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.externalservice.api.IExternalService;
+import org.apache.iotdb.mqtt.i18n.MqttMessages;
import io.moquette.BrokerConstants;
import io.moquette.broker.Server;
@@ -62,7 +63,7 @@ public void startup() {
try {
server.startServer(config, handlers, null, authenticator, null);
} catch (IOException e) {
- throw new RuntimeException("Exception while starting server", e);
+ throw new RuntimeException(MqttMessages.SERVER_START_EXCEPTION, e);
}
LOG.info(
@@ -74,9 +75,9 @@ public void startup() {
.addShutdownHook(
new Thread(
() -> {
- LOG.info("Stopping IoTDB MQTT service...");
+ LOG.info(MqttMessages.STOPPING_MQTT_SERVICE);
shutdown();
- LOG.info("IoTDB MQTT service stopped.");
+ LOG.info(MqttMessages.MQTT_SERVICE_STOPPED);
}));
}
diff --git a/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/PayloadFormatManager.java b/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/PayloadFormatManager.java
index 228b890b47f91..80bf09f4efc98 100644
--- a/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/PayloadFormatManager.java
+++ b/external-service-impl/mqtt/src/main/java/org/apache/iotdb/mqtt/PayloadFormatManager.java
@@ -20,6 +20,7 @@
import org.apache.iotdb.commons.file.SystemFileFactory;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.mqtt.i18n.MqttMessages;
import com.google.common.base.Preconditions;
import org.apache.tsfile.external.commons.io.FileUtils;
@@ -54,13 +55,13 @@ private PayloadFormatManager() {}
private static void init() {
mqttDir = IoTDBDescriptor.getInstance().getConfig().getMqttDir();
- logger.info("mqttDir: {}", mqttDir);
+ logger.info(MqttMessages.MQTT_DIR, mqttDir);
try {
makeMqttPluginDir();
buildMqttPluginMap();
} catch (IOException e) {
- logger.error("MQTT PayloadFormatManager init() error.", e);
+ logger.error(MqttMessages.PAYLOAD_FORMAT_MANAGER_INIT_ERROR, e);
}
}
@@ -83,17 +84,17 @@ private static void buildMqttPluginMap() throws IOException {
ServiceLoader.load(PayloadFormatter.class, PayloadFormatManager.class.getClassLoader());
for (PayloadFormatter formatter : payloadFormatters) {
if (formatter == null) {
- logger.error("PayloadFormatManager(), formatter is null.");
+ logger.error(MqttMessages.FORMATTER_IS_NULL);
continue;
}
String pluginName = formatter.getName();
mqttPayloadPluginMap.put(pluginName, formatter);
- logger.info("PayloadFormatManager(), find MQTT Payload Plugin {}.", pluginName);
+ logger.info(MqttMessages.FIND_MQTT_PLUGIN, pluginName);
}
URL[] jarURLs = getPluginJarURLs(mqttDir);
- logger.debug("MQTT Plugin jarURLs: {}", Arrays.toString(jarURLs));
+ logger.debug(MqttMessages.MQTT_PLUGIN_JAR_URLS, Arrays.toString(jarURLs));
for (URL jarUrl : jarURLs) {
ClassLoader classLoader = new URLClassLoader(new URL[] {jarUrl});
@@ -104,7 +105,7 @@ private static void buildMqttPluginMap() throws IOException {
for (PayloadFormatter formatter : payloadFormatters2) {
if (formatter == null) {
- logger.error("PayloadFormatManager(), formatter is null.");
+ logger.error(MqttMessages.FORMATTER_IS_NULL);
continue;
}
@@ -113,7 +114,7 @@ private static void buildMqttPluginMap() throws IOException {
continue;
}
mqttPayloadPluginMap.put(pluginName, formatter);
- logger.info("PayloadFormatManager(), find MQTT Payload Plugin {}.", pluginName);
+ logger.info(MqttMessages.FIND_MQTT_PLUGIN, pluginName);
}
}
}
diff --git a/external-service-impl/rest/pom.xml b/external-service-impl/rest/pom.xml
index 9c3473a01c5f2..7ef130ace317e 100644
--- a/external-service-impl/rest/pom.xml
+++ b/external-service-impl/rest/pom.xml
@@ -214,6 +214,10 @@
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
diff --git a/external-service-impl/rest/src/main/i18n/en/org/apache/iotdb/rest/i18n/RestMessages.java b/external-service-impl/rest/src/main/i18n/en/org/apache/iotdb/rest/i18n/RestMessages.java
new file mode 100644
index 0000000000000..bbe6f60a363bb
--- /dev/null
+++ b/external-service-impl/rest/src/main/i18n/en/org/apache/iotdb/rest/i18n/RestMessages.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.rest.i18n;
+
+public final class RestMessages {
+
+ // --- RestService ---
+ public static final String REST_SERVICE_START_FAILED = "RestService failed to start: {}";
+ public static final String REST_SERVICE_START_SUCCESS = "start RestService successfully";
+ public static final String REST_SERVICE_STOP_FAILED = "RestService failed to stop: {}";
+
+ // --- StatementConstructionHandler (v1 / v2 / table) ---
+ public static final String INVALID_INPUT = "Invalid input: ";
+
+ // --- RequestValidationHandler (v2) ---
+ public static final String PREFIX_PATHS_EMPTY = "prefix_paths should not be empty";
+
+ private RestMessages() {}
+}
diff --git a/external-service-impl/rest/src/main/i18n/zh/org/apache/iotdb/rest/i18n/RestMessages.java b/external-service-impl/rest/src/main/i18n/zh/org/apache/iotdb/rest/i18n/RestMessages.java
new file mode 100644
index 0000000000000..db2742dfd171a
--- /dev/null
+++ b/external-service-impl/rest/src/main/i18n/zh/org/apache/iotdb/rest/i18n/RestMessages.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.rest.i18n;
+
+public final class RestMessages {
+
+ // --- RestService ---
+ public static final String REST_SERVICE_START_FAILED = "RestService 启动失败:{}";
+ public static final String REST_SERVICE_START_SUCCESS = "RestService 启动成功";
+ public static final String REST_SERVICE_STOP_FAILED = "RestService 停止失败:{}";
+
+ // --- StatementConstructionHandler (v1 / v2 / table) ---
+ public static final String INVALID_INPUT = "无效输入:";
+
+ // --- RequestValidationHandler (v2) ---
+ public static final String PREFIX_PATHS_EMPTY = "prefix_paths 不能为空";
+
+ private RestMessages() {}
+}
diff --git a/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/RestService.java b/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/RestService.java
index 9a90e6b18f2ea..2e072d87961c0 100644
--- a/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/RestService.java
+++ b/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/RestService.java
@@ -19,6 +19,7 @@
import org.apache.iotdb.db.conf.rest.IoTDBRestServiceConfig;
import org.apache.iotdb.db.conf.rest.IoTDBRestServiceDescriptor;
import org.apache.iotdb.externalservice.api.IExternalService;
+import org.apache.iotdb.rest.i18n.RestMessages;
import org.apache.iotdb.rest.protocol.filter.ApiOriginFilter;
import org.eclipse.jetty.http.HttpVersion;
@@ -108,10 +109,10 @@ private void serverStart() {
try {
server.start();
} catch (Exception e) {
- LOGGER.warn("RestService failed to start: {}", e.getMessage());
+ LOGGER.warn(RestMessages.REST_SERVICE_START_FAILED, e.getMessage());
server.destroy();
}
- LOGGER.info("start RestService successfully");
+ LOGGER.info(RestMessages.REST_SERVICE_START_SUCCESS);
}
@Override
@@ -136,7 +137,7 @@ public void stop() {
try {
server.stop();
} catch (Exception e) {
- LOGGER.warn("RestService failed to stop: {}", e.getMessage());
+ LOGGER.warn(RestMessages.REST_SERVICE_STOP_FAILED, e.getMessage());
} finally {
server.destroy();
}
diff --git a/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/table/v1/handler/StatementConstructionHandler.java b/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/table/v1/handler/StatementConstructionHandler.java
index e4f9c23c69a7b..28cf4d0c1b196 100644
--- a/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/table/v1/handler/StatementConstructionHandler.java
+++ b/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/table/v1/handler/StatementConstructionHandler.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory;
import org.apache.iotdb.db.exception.WriteProcessRejectException;
import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement;
+import org.apache.iotdb.rest.i18n.RestMessages;
import org.apache.iotdb.rest.protocol.table.v1.model.InsertTabletRequest;
import org.apache.tsfile.enums.ColumnCategory;
@@ -162,7 +163,8 @@ public static InsertTabletStatement constructInsertTabletStatement(
columns[columnIndex] = binaryValues;
break;
default:
- throw new IllegalArgumentException("Invalid input: " + rawDataType.get(columnIndex));
+ throw new IllegalArgumentException(
+ RestMessages.INVALID_INPUT + rawDataType.get(columnIndex));
}
}
insertStatement.setColumns(columns);
diff --git a/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v1/handler/StatementConstructionHandler.java b/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v1/handler/StatementConstructionHandler.java
index 74f679ec611d3..9fbd9ac669683 100644
--- a/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v1/handler/StatementConstructionHandler.java
+++ b/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v1/handler/StatementConstructionHandler.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.db.exception.WriteProcessRejectException;
import org.apache.iotdb.db.queryengine.plan.analyze.cache.schema.DataNodeDevicePathCache;
import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement;
+import org.apache.iotdb.rest.i18n.RestMessages;
import org.apache.iotdb.rest.protocol.v1.model.InsertTabletRequest;
import org.apache.tsfile.enums.TSDataType;
@@ -163,7 +164,8 @@ public static InsertTabletStatement constructInsertTabletStatement(
columns[columnIndex] = binaryValues;
break;
default:
- throw new IllegalArgumentException("Invalid input: " + rawDataType.get(columnIndex));
+ throw new IllegalArgumentException(
+ RestMessages.INVALID_INPUT + rawDataType.get(columnIndex));
}
}
diff --git a/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v2/handler/RequestValidationHandler.java b/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v2/handler/RequestValidationHandler.java
index 5f999b3ddccdf..b5b5858de5603 100644
--- a/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v2/handler/RequestValidationHandler.java
+++ b/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v2/handler/RequestValidationHandler.java
@@ -17,6 +17,7 @@
package org.apache.iotdb.rest.protocol.v2.handler;
+import org.apache.iotdb.rest.i18n.RestMessages;
import org.apache.iotdb.rest.protocol.v2.model.ExpressionRequest;
import org.apache.iotdb.rest.protocol.v2.model.InsertRecordsRequest;
import org.apache.iotdb.rest.protocol.v2.model.InsertTabletRequest;
@@ -44,7 +45,7 @@ public static void validateSQL(SQL sql) {
public static void validatePrefixPaths(PrefixPathList prefixPathList) {
Objects.requireNonNull(prefixPathList.getPrefixPaths(), "prefix_paths should not be null");
if (prefixPathList.getPrefixPaths().isEmpty()) {
- throw new IllegalArgumentException("prefix_paths should not be empty");
+ throw new IllegalArgumentException(RestMessages.PREFIX_PATHS_EMPTY);
}
}
diff --git a/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v2/handler/StatementConstructionHandler.java b/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v2/handler/StatementConstructionHandler.java
index 729517bf50031..679c4556e90c0 100644
--- a/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v2/handler/StatementConstructionHandler.java
+++ b/external-service-impl/rest/src/main/java/org/apache/iotdb/rest/protocol/v2/handler/StatementConstructionHandler.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertRowStatement;
import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertRowsStatement;
import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement;
+import org.apache.iotdb.rest.i18n.RestMessages;
import org.apache.iotdb.rest.protocol.utils.InsertRowDataUtils;
import org.apache.iotdb.rest.protocol.v2.model.InsertRecordsRequest;
import org.apache.iotdb.rest.protocol.v2.model.InsertTabletRequest;
@@ -173,7 +174,8 @@ public static InsertTabletStatement constructInsertTabletStatement(
columns[columnIndex] = binaryValues;
break;
default:
- throw new IllegalArgumentException("Invalid input: " + rawDataType.get(columnIndex));
+ throw new IllegalArgumentException(
+ RestMessages.INVALID_INPUT + rawDataType.get(columnIndex));
}
}
diff --git a/iotdb-api/pipe-api/pom.xml b/iotdb-api/pipe-api/pom.xml
index 07f18c904b297..9935e244e9cdf 100644
--- a/iotdb-api/pipe-api/pom.xml
+++ b/iotdb-api/pipe-api/pom.xml
@@ -41,6 +41,14 @@
test
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ get-jar-with-dependencies
diff --git a/iotdb-api/pipe-api/src/main/i18n/en/org/apache/iotdb/pipe/api/i18n/PipeApiMessages.java b/iotdb-api/pipe-api/src/main/i18n/en/org/apache/iotdb/pipe/api/i18n/PipeApiMessages.java
new file mode 100644
index 0000000000000..7e5e923729082
--- /dev/null
+++ b/iotdb-api/pipe-api/src/main/i18n/en/org/apache/iotdb/pipe/api/i18n/PipeApiMessages.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.pipe.api.i18n;
+
+public final class PipeApiMessages {
+
+ // --- PipeParameterValidator ---
+ public static final String PARAMETER_SHOULD_BE_SET = "Parameter %s should be set.";
+
+ private PipeApiMessages() {}
+}
diff --git a/iotdb-api/pipe-api/src/main/i18n/zh/org/apache/iotdb/pipe/api/i18n/PipeApiMessages.java b/iotdb-api/pipe-api/src/main/i18n/zh/org/apache/iotdb/pipe/api/i18n/PipeApiMessages.java
new file mode 100644
index 0000000000000..66310327d331d
--- /dev/null
+++ b/iotdb-api/pipe-api/src/main/i18n/zh/org/apache/iotdb/pipe/api/i18n/PipeApiMessages.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.pipe.api.i18n;
+
+public final class PipeApiMessages {
+
+ // --- PipeParameterValidator ---
+ public static final String PARAMETER_SHOULD_BE_SET = "参数 %s 必须设置。";
+
+ private PipeApiMessages() {}
+}
diff --git a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameterValidator.java b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameterValidator.java
index 99d547a23c6d2..d72eeb792e04f 100644
--- a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameterValidator.java
+++ b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/customizer/parameter/PipeParameterValidator.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.pipe.api.exception.PipeAttributeNotProvidedException;
import org.apache.iotdb.pipe.api.exception.PipeParameterNotValidException;
+import org.apache.iotdb.pipe.api.i18n.PipeApiMessages;
import java.util.Arrays;
import java.util.Collections;
@@ -94,7 +95,8 @@ public PipeParameterValidator validateAttributeValueRange(
throws PipeAttributeNotProvidedException {
if (!parameters.hasAttribute(key)) {
if (!canBeOptional) {
- throw new PipeParameterNotValidException(String.format("Parameter %s should be set.", key));
+ throw new PipeParameterNotValidException(
+ String.format(PipeApiMessages.PARAMETER_SHOULD_BE_SET, key));
}
return this;
}
diff --git a/iotdb-api/trigger-api/pom.xml b/iotdb-api/trigger-api/pom.xml
index 909e23791c7b7..9a6c4546e36ce 100644
--- a/iotdb-api/trigger-api/pom.xml
+++ b/iotdb-api/trigger-api/pom.xml
@@ -35,6 +35,14 @@
${tsfile.version}
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ get-jar-with-dependencies
diff --git a/iotdb-api/trigger-api/src/main/i18n/en/org/apache/iotdb/trigger/api/i18n/TriggerApiMessages.java b/iotdb-api/trigger-api/src/main/i18n/en/org/apache/iotdb/trigger/api/i18n/TriggerApiMessages.java
new file mode 100644
index 0000000000000..8621aa31fc4a7
--- /dev/null
+++ b/iotdb-api/trigger-api/src/main/i18n/en/org/apache/iotdb/trigger/api/i18n/TriggerApiMessages.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.trigger.api.i18n;
+
+public final class TriggerApiMessages {
+
+ // --- TriggerType ---
+ public static final String NO_SUCH_TRIGGER_TYPE = "No such trigger type (id: %d)";
+
+ // --- TriggerEvent ---
+ public static final String NO_SUCH_TRIGGER_EVENT = "No such trigger event (id: %d)";
+
+ // --- FailureStrategy ---
+ public static final String UNSUPPORTED_FAILURE_STRATEGY_TYPE =
+ "Unsupported FailureStrategy Type.";
+
+ private TriggerApiMessages() {}
+}
diff --git a/iotdb-api/trigger-api/src/main/i18n/zh/org/apache/iotdb/trigger/api/i18n/TriggerApiMessages.java b/iotdb-api/trigger-api/src/main/i18n/zh/org/apache/iotdb/trigger/api/i18n/TriggerApiMessages.java
new file mode 100644
index 0000000000000..778d8abef10a4
--- /dev/null
+++ b/iotdb-api/trigger-api/src/main/i18n/zh/org/apache/iotdb/trigger/api/i18n/TriggerApiMessages.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.trigger.api.i18n;
+
+public final class TriggerApiMessages {
+
+ // --- TriggerType ---
+ public static final String NO_SUCH_TRIGGER_TYPE = "不存在该触发器类型(id: %d)";
+
+ // --- TriggerEvent ---
+ public static final String NO_SUCH_TRIGGER_EVENT = "不存在该触发器事件(id: %d)";
+
+ // --- FailureStrategy ---
+ public static final String UNSUPPORTED_FAILURE_STRATEGY_TYPE =
+ "不支持的 FailureStrategy 类型。";
+
+ private TriggerApiMessages() {}
+}
diff --git a/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/FailureStrategy.java b/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/FailureStrategy.java
index 18d9478f14b46..8e3d2f6499217 100644
--- a/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/FailureStrategy.java
+++ b/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/FailureStrategy.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.trigger.api.enums;
import org.apache.iotdb.trigger.api.Trigger;
+import org.apache.iotdb.trigger.api.i18n.TriggerApiMessages;
import org.apache.tsfile.write.record.Tablet;
@@ -62,7 +63,8 @@ public static FailureStrategy construct(int id) {
case 1:
return FailureStrategy.PESSIMISTIC;
default:
- throw new UnsupportedOperationException("Unsupported FailureStrategy Type.");
+ throw new UnsupportedOperationException(
+ TriggerApiMessages.UNSUPPORTED_FAILURE_STRATEGY_TYPE);
}
}
}
diff --git a/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/TriggerEvent.java b/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/TriggerEvent.java
index a47c0b3a53fca..9be92aaaa842d 100644
--- a/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/TriggerEvent.java
+++ b/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/TriggerEvent.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.trigger.api.enums;
+import org.apache.iotdb.trigger.api.i18n.TriggerApiMessages;
+
public enum TriggerEvent {
BEFORE_INSERT((byte) 0, "BEFORE_INSERT"),
AFTER_INSERT((byte) 1, "AFTER_INSERT");
@@ -47,7 +49,8 @@ public static TriggerEvent construct(byte id) {
case 1:
return AFTER_INSERT;
default:
- throw new IllegalArgumentException(String.format("No such trigger event (id: %d)", id));
+ throw new IllegalArgumentException(
+ String.format(TriggerApiMessages.NO_SUCH_TRIGGER_EVENT, id));
}
}
}
diff --git a/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/TriggerType.java b/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/TriggerType.java
index bf801d7f46999..94a4441e37fce 100644
--- a/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/TriggerType.java
+++ b/iotdb-api/trigger-api/src/main/java/org/apache/iotdb/trigger/api/enums/TriggerType.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.trigger.api.enums;
+import org.apache.iotdb.trigger.api.i18n.TriggerApiMessages;
+
public enum TriggerType {
STATEFUL((byte) 0, "STATEFUL"),
STATELESS((byte) 1, "STATELESS");
@@ -47,7 +49,8 @@ public static TriggerType construct(byte id) {
case 1:
return STATELESS;
default:
- throw new IllegalArgumentException(String.format("No such trigger type (id: %d)", id));
+ throw new IllegalArgumentException(
+ String.format(TriggerApiMessages.NO_SUCH_TRIGGER_TYPE, id));
}
}
}
diff --git a/iotdb-api/udf-api/pom.xml b/iotdb-api/udf-api/pom.xml
index 4235a63430c15..9a22a1f6f82bb 100644
--- a/iotdb-api/udf-api/pom.xml
+++ b/iotdb-api/udf-api/pom.xml
@@ -35,6 +35,14 @@
${tsfile.version}
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ get-jar-with-dependencies
diff --git a/iotdb-api/udf-api/src/main/i18n/en/org/apache/iotdb/udf/api/i18n/UdfApiMessages.java b/iotdb-api/udf-api/src/main/i18n/en/org/apache/iotdb/udf/api/i18n/UdfApiMessages.java
new file mode 100644
index 0000000000000..f66b0ec1b7bcf
--- /dev/null
+++ b/iotdb-api/udf-api/src/main/i18n/en/org/apache/iotdb/udf/api/i18n/UdfApiMessages.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.udf.api.i18n;
+
+public final class UdfApiMessages {
+
+ // MapTableFunctionHandle
+ public static final String UNSUPPORTED_VALUE_TYPE = "Unsupported value type.";
+ public static final String UNKNOWN_TYPE = "Unknown type: ";
+
+ // Argument
+ public static final String UNKNOWN_ARGUMENT_TYPE = "Unknown argument type: ";
+
+ // TableFunctionProcessorProvider
+ public static final String TABLE_FUNCTION_DOES_NOT_PROCESS_INPUT_DATA =
+ "this table function does not process input data";
+ public static final String TABLE_FUNCTION_DOES_NOT_PROCESS_LEAF_DATA =
+ "this table function does not process leaf data";
+
+ // DescribedSchema
+ public static final String DESCRIBED_SCHEMA_HAS_NO_FIELDS = "DescribedSchema has no fields";
+
+ // ScalarArgument
+ public static final String UNKNOWN_SCALAR_ARG_TYPE = "Unknown type: ";
+
+ // TableArgument
+ public static final String FIELD_NAMES_AND_TYPES_MUST_HAVE_SAME_SIZE =
+ "fieldNames and fieldTypes must have the same size";
+
+ // ParameterSpecification
+ public static final String NON_NULL_DEFAULT_VALUE_FOR_REQUIRED_ARG =
+ "non-null default value for a required argument";
+
+ // ScalarFunctionAnalysis
+ public static final String SCALAR_FUNCTION_ANALYSIS_OUTPUT_DATA_TYPE_NOT_SET =
+ "ScalarFunctionAnalysis outputDataType is not set.";
+
+ // AggregateFunctionAnalysis
+ public static final String AGGREGATE_FUNCTION_ANALYSIS_OUTPUT_DATA_TYPE_NOT_SET =
+ "AggregateFunctionAnalysis outputDataType is not set.";
+
+ // UDTFConfigurations
+ public static final String ACCESS_STRATEGY_NOT_SET = "Access strategy is not set.";
+
+ // UDFConfigurations
+ public static final String UDF_OUTPUT_DATA_TYPE_NOT_SET = "UDF outputDataType is not set.";
+
+ // SlidingTimeWindowAccessStrategy
+ public static final String METHOD_DEPRECATED_SINCE_V014 =
+ "The method is deprecated since v0.14.";
+
+ // Type
+ public static final String UNSUPPORTED_TYPE = "Unsupported type: ";
+
+ // RowImpl
+ public static final String INDEX_OUT_OF_BOUND = "Index out of bound error!";
+ public static final String INVALID_INPUT = "Invalid input: ";
+
+ private UdfApiMessages() {}
+}
diff --git a/iotdb-api/udf-api/src/main/i18n/zh/org/apache/iotdb/udf/api/i18n/UdfApiMessages.java b/iotdb-api/udf-api/src/main/i18n/zh/org/apache/iotdb/udf/api/i18n/UdfApiMessages.java
new file mode 100644
index 0000000000000..71dbd416214f8
--- /dev/null
+++ b/iotdb-api/udf-api/src/main/i18n/zh/org/apache/iotdb/udf/api/i18n/UdfApiMessages.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.udf.api.i18n;
+
+public final class UdfApiMessages {
+
+ // MapTableFunctionHandle
+ public static final String UNSUPPORTED_VALUE_TYPE = "不支持的值类型。";
+ public static final String UNKNOWN_TYPE = "未知类型:";
+
+ // Argument
+ public static final String UNKNOWN_ARGUMENT_TYPE = "未知参数类型:";
+
+ // TableFunctionProcessorProvider
+ public static final String TABLE_FUNCTION_DOES_NOT_PROCESS_INPUT_DATA =
+ "此表函数不处理输入数据";
+ public static final String TABLE_FUNCTION_DOES_NOT_PROCESS_LEAF_DATA =
+ "此表函数不处理叶子数据";
+
+ // DescribedSchema
+ public static final String DESCRIBED_SCHEMA_HAS_NO_FIELDS = "DescribedSchema 没有字段";
+
+ // ScalarArgument
+ public static final String UNKNOWN_SCALAR_ARG_TYPE = "未知类型:";
+
+ // TableArgument
+ public static final String FIELD_NAMES_AND_TYPES_MUST_HAVE_SAME_SIZE =
+ "fieldNames 和 fieldTypes 必须具有相同的大小";
+
+ // ParameterSpecification
+ public static final String NON_NULL_DEFAULT_VALUE_FOR_REQUIRED_ARG =
+ "必填参数不能有非 null 默认值";
+
+ // ScalarFunctionAnalysis
+ public static final String SCALAR_FUNCTION_ANALYSIS_OUTPUT_DATA_TYPE_NOT_SET =
+ "ScalarFunctionAnalysis 的 outputDataType 未设置。";
+
+ // AggregateFunctionAnalysis
+ public static final String AGGREGATE_FUNCTION_ANALYSIS_OUTPUT_DATA_TYPE_NOT_SET =
+ "AggregateFunctionAnalysis 的 outputDataType 未设置。";
+
+ // UDTFConfigurations
+ public static final String ACCESS_STRATEGY_NOT_SET = "访问策略未设置。";
+
+ // UDFConfigurations
+ public static final String UDF_OUTPUT_DATA_TYPE_NOT_SET = "UDF 的 outputDataType 未设置。";
+
+ // SlidingTimeWindowAccessStrategy
+ public static final String METHOD_DEPRECATED_SINCE_V014 = "该方法自 v0.14 起已废弃。";
+
+ // Type
+ public static final String UNSUPPORTED_TYPE = "不支持的类型:";
+
+ // RowImpl
+ public static final String INDEX_OUT_OF_BOUND = "索引越界错误!";
+ public static final String INVALID_INPUT = "无效输入:";
+
+ private UdfApiMessages() {}
+}
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/analysis/AggregateFunctionAnalysis.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/analysis/AggregateFunctionAnalysis.java
index 2eb4ba76aac67..168f1854ca84b 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/analysis/AggregateFunctionAnalysis.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/analysis/AggregateFunctionAnalysis.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.udf.api.customizer.analysis;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import org.apache.iotdb.udf.api.type.Type;
public class AggregateFunctionAnalysis implements FunctionAnalysis {
@@ -54,7 +55,8 @@ public Builder removable(boolean removable) {
public AggregateFunctionAnalysis build() throws IllegalArgumentException {
if (outputDataType == null) {
- throw new IllegalArgumentException("AggregateFunctionAnalysis outputDataType is not set.");
+ throw new IllegalArgumentException(
+ UdfApiMessages.AGGREGATE_FUNCTION_ANALYSIS_OUTPUT_DATA_TYPE_NOT_SET);
}
return new AggregateFunctionAnalysis(outputDataType, removable);
}
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/analysis/ScalarFunctionAnalysis.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/analysis/ScalarFunctionAnalysis.java
index dc633203ed2fa..6adadd9edbeb3 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/analysis/ScalarFunctionAnalysis.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/analysis/ScalarFunctionAnalysis.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.udf.api.customizer.analysis;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import org.apache.iotdb.udf.api.type.Type;
public class ScalarFunctionAnalysis implements FunctionAnalysis {
@@ -43,7 +44,8 @@ public Builder outputDataType(Type outputDataType) {
public ScalarFunctionAnalysis build() throws IllegalArgumentException {
if (outputDataType == null) {
- throw new IllegalArgumentException("ScalarFunctionAnalysis outputDataType is not set.");
+ throw new IllegalArgumentException(
+ UdfApiMessages.SCALAR_FUNCTION_ANALYSIS_OUTPUT_DATA_TYPE_NOT_SET);
}
return new ScalarFunctionAnalysis(outputDataType);
}
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/config/UDFConfigurations.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/config/UDFConfigurations.java
index d572db2a1ef97..12ba3e72012e7 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/config/UDFConfigurations.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/config/UDFConfigurations.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.udf.api.customizer.config;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import org.apache.iotdb.udf.api.type.Type;
public abstract class UDFConfigurations {
@@ -31,7 +32,7 @@ public Type getOutputDataType() {
public void check() {
if (outputDataType == null) {
- throw new RuntimeException("UDF outputDataType is not set.");
+ throw new RuntimeException(UdfApiMessages.UDF_OUTPUT_DATA_TYPE_NOT_SET);
}
}
}
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/config/UDTFConfigurations.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/config/UDTFConfigurations.java
index 6ca701b2ca887..4021fddab68d2 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/config/UDTFConfigurations.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/config/UDTFConfigurations.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
import org.apache.iotdb.udf.api.customizer.strategy.SlidingSizeWindowAccessStrategy;
import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import org.apache.iotdb.udf.api.type.Type;
import java.time.ZoneId;
@@ -100,7 +101,7 @@ public UDTFConfigurations setAccessStrategy(AccessStrategy accessStrategy) {
public void check() {
super.check();
if (accessStrategy == null) {
- throw new RuntimeException("Access strategy is not set.");
+ throw new RuntimeException(UdfApiMessages.ACCESS_STRATEGY_NOT_SET);
}
accessStrategy.check();
}
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/strategy/SlidingTimeWindowAccessStrategy.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/strategy/SlidingTimeWindowAccessStrategy.java
index 19eb5df98544b..8ec89957d137d 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/strategy/SlidingTimeWindowAccessStrategy.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/customizer/strategy/SlidingTimeWindowAccessStrategy.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.udf.api.collector.PointCollector;
import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import java.time.ZoneId;
@@ -108,7 +109,7 @@ public SlidingTimeWindowAccessStrategy(
String slidingStepString,
String displayWindowBeginString,
String displayWindowEndString) {
- throw new UnsupportedOperationException("The method is deprecated since v0.14.");
+ throw new UnsupportedOperationException(UdfApiMessages.METHOD_DEPRECATED_SINCE_V014);
}
/**
@@ -127,7 +128,7 @@ public SlidingTimeWindowAccessStrategy(
*/
@Deprecated
public SlidingTimeWindowAccessStrategy(String timeIntervalString, String slidingStepString) {
- throw new UnsupportedOperationException("The method is deprecated since v0.14.");
+ throw new UnsupportedOperationException(UdfApiMessages.METHOD_DEPRECATED_SINCE_V014);
}
/**
@@ -142,7 +143,7 @@ public SlidingTimeWindowAccessStrategy(String timeIntervalString, String sliding
* @throws UnsupportedOperationException deprecated since v0.14
*/
public SlidingTimeWindowAccessStrategy(String timeIntervalString) {
- throw new UnsupportedOperationException("The method is deprecated since v0.14.");
+ throw new UnsupportedOperationException(UdfApiMessages.METHOD_DEPRECATED_SINCE_V014);
}
/**
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/MapTableFunctionHandle.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/MapTableFunctionHandle.java
index da27eb22cd234..43a119a4a2f88 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/MapTableFunctionHandle.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/MapTableFunctionHandle.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.udf.api.relational.table;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import org.apache.iotdb.udf.api.type.Type;
import java.nio.ByteBuffer;
@@ -39,7 +40,7 @@ public class MapTableFunctionHandle implements TableFunctionHandle {
public void addProperty(String key, Object value) {
if (!SUPPORT_VALUE_TYPE.contains(value.getClass())) {
- throw new IllegalArgumentException("Unsupported value type.");
+ throw new IllegalArgumentException(UdfApiMessages.UNSUPPORTED_VALUE_TYPE);
}
map.put(key, value);
}
@@ -134,7 +135,7 @@ public void deserialize(byte[] bytes) {
map.put(key, new String(b, StandardCharsets.UTF_8));
break;
default:
- throw new IllegalArgumentException("Unknown type: " + type);
+ throw new IllegalArgumentException(UdfApiMessages.UNKNOWN_TYPE + type);
}
}
}
@@ -171,7 +172,7 @@ public static class Builder {
public Builder addProperty(String key, Object value) {
if (!SUPPORT_VALUE_TYPE.contains(value.getClass())) {
- throw new IllegalArgumentException("Unsupported value type.");
+ throw new IllegalArgumentException(UdfApiMessages.UNSUPPORTED_VALUE_TYPE);
}
map.put(key, value);
return this;
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/TableFunctionProcessorProvider.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/TableFunctionProcessorProvider.java
index fcbc1a198c1a4..f86d631f5989a 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/TableFunctionProcessorProvider.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/TableFunctionProcessorProvider.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.udf.api.relational.table;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import org.apache.iotdb.udf.api.relational.table.processor.TableFunctionDataProcessor;
import org.apache.iotdb.udf.api.relational.table.processor.TableFunctionLeafProcessor;
@@ -29,7 +30,8 @@ public interface TableFunctionProcessorProvider {
* It is called once per each partition processed by the table function.
*/
default TableFunctionDataProcessor getDataProcessor() {
- throw new UnsupportedOperationException("this table function does not process input data");
+ throw new UnsupportedOperationException(
+ UdfApiMessages.TABLE_FUNCTION_DOES_NOT_PROCESS_INPUT_DATA);
}
/**
@@ -38,6 +40,7 @@ default TableFunctionDataProcessor getDataProcessor() {
* It is called once per each split processed by the table function.
*/
default TableFunctionLeafProcessor getSplitProcessor() {
- throw new UnsupportedOperationException("this table function does not process leaf data");
+ throw new UnsupportedOperationException(
+ UdfApiMessages.TABLE_FUNCTION_DOES_NOT_PROCESS_LEAF_DATA);
}
}
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/Argument.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/Argument.java
index dcf68347082a1..9237d7b5ce50c 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/Argument.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/Argument.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.udf.api.relational.table.argument;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
+
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -37,7 +39,7 @@ static Argument deserialize(ByteBuffer buffer) {
case SCALAR_ARGUMENT:
return ScalarArgument.deserialize(buffer);
default:
- throw new IllegalArgumentException("Unknown argument type: " + type);
+ throw new IllegalArgumentException(UdfApiMessages.UNKNOWN_ARGUMENT_TYPE + type);
}
}
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/DescribedSchema.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/DescribedSchema.java
index aecea22067f52..b4a3385ad5bfb 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/DescribedSchema.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/DescribedSchema.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.udf.api.relational.table.argument;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import org.apache.iotdb.udf.api.type.Type;
import java.util.ArrayList;
@@ -33,7 +34,7 @@ public class DescribedSchema {
private DescribedSchema(List fields) {
requireNonNull(fields, "fields is null");
if (fields.isEmpty()) {
- throw new IllegalArgumentException("DescribedSchema has no fields");
+ throw new IllegalArgumentException(UdfApiMessages.DESCRIBED_SCHEMA_HAS_NO_FIELDS);
}
this.fields = fields;
}
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/ScalarArgument.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/ScalarArgument.java
index 1898f85612368..1fbc53e11d71d 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/ScalarArgument.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/ScalarArgument.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.udf.api.relational.table.argument;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import org.apache.iotdb.udf.api.type.Type;
import org.apache.tsfile.utils.Binary;
@@ -81,7 +82,7 @@ public void serialize(ByteBuffer buffer) {
buffer.put(blobBytes);
break;
default:
- throw new IllegalArgumentException("Unknown type: " + type);
+ throw new IllegalArgumentException(UdfApiMessages.UNKNOWN_SCALAR_ARG_TYPE + type);
}
}
@@ -121,7 +122,7 @@ public void serialize(DataOutputStream buffer) throws IOException {
buffer.write(blobBytes);
break;
default:
- throw new IllegalArgumentException("Unknown type: " + type);
+ throw new IllegalArgumentException(UdfApiMessages.UNKNOWN_SCALAR_ARG_TYPE + type);
}
}
@@ -151,7 +152,7 @@ public static ScalarArgument deserialize(ByteBuffer buffer) {
buffer.get(bytes);
return new ScalarArgument(type, new String(bytes));
default:
- throw new IllegalArgumentException("Unknown type: " + type);
+ throw new IllegalArgumentException(UdfApiMessages.UNKNOWN_SCALAR_ARG_TYPE + type);
}
}
}
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/TableArgument.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/TableArgument.java
index 5b3b44be40042..3e75452a29bcf 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/TableArgument.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/argument/TableArgument.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.udf.api.relational.table.argument;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import org.apache.iotdb.udf.api.type.Type;
import java.io.DataOutputStream;
@@ -46,7 +47,7 @@ public TableArgument(
this.fieldNames = requireNonNull(fieldNames, "fieldNames is null");
this.fieldTypes = requireNonNull(fieldTypes, "fieldTypes is null");
if (fieldNames.size() != fieldTypes.size()) {
- throw new IllegalArgumentException("fieldNames and fieldTypes must have the same size");
+ throw new IllegalArgumentException(UdfApiMessages.FIELD_NAMES_AND_TYPES_MUST_HAVE_SAME_SIZE);
}
this.partitionBy = requireNonNull(partitionBy, "partitionBy is null");
this.orderBy = requireNonNull(orderBy, "orderBy is null");
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/specification/ParameterSpecification.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/specification/ParameterSpecification.java
index 20b831d3568f3..6806e905322c8 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/specification/ParameterSpecification.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/relational/table/specification/ParameterSpecification.java
@@ -37,6 +37,8 @@
package org.apache.iotdb.udf.api.relational.table.specification;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
+
import java.util.Optional;
/**
@@ -59,7 +61,7 @@ public abstract class ParameterSpecification {
this.required = required;
this.defaultValue = defaultValue;
if (required && defaultValue.isPresent()) {
- throw new IllegalArgumentException("non-null default value for a required argument");
+ throw new IllegalArgumentException(UdfApiMessages.NON_NULL_DEFAULT_VALUE_FOR_REQUIRED_ARG);
}
}
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/type/Type.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/type/Type.java
index c4c38c285dce9..02ea5a663ab75 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/type/Type.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/type/Type.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.udf.api.type;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
+
import org.apache.tsfile.utils.Binary;
import java.time.LocalDate;
@@ -77,7 +79,7 @@ public static Type valueOf(byte type) {
return t;
}
}
- throw new IllegalArgumentException("Unsupported type: " + type);
+ throw new IllegalArgumentException(UdfApiMessages.UNSUPPORTED_TYPE + type);
}
public boolean checkObjectType(Object o) {
diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/utils/RowImpl.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/utils/RowImpl.java
index 533ab9574df6d..a082a1968ce6d 100644
--- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/utils/RowImpl.java
+++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/utils/RowImpl.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.udf.api.utils;
import org.apache.iotdb.udf.api.access.Row;
+import org.apache.iotdb.udf.api.i18n.UdfApiMessages;
import org.apache.iotdb.udf.api.type.Binary;
import org.apache.iotdb.udf.api.type.Type;
@@ -46,7 +47,7 @@ public long getTime() {
@Override
public int getInt(int columnIndex) {
if (columnIndex >= size()) {
- throw new IndexOutOfBoundsException("Index out of bound error!");
+ throw new IndexOutOfBoundsException(UdfApiMessages.INDEX_OUT_OF_BOUND);
}
return (int) rowRecord[columnIndex];
}
@@ -54,7 +55,7 @@ public int getInt(int columnIndex) {
@Override
public long getLong(int columnIndex) {
if (columnIndex >= size()) {
- throw new IndexOutOfBoundsException("Index out of bound error!");
+ throw new IndexOutOfBoundsException(UdfApiMessages.INDEX_OUT_OF_BOUND);
}
return (long) rowRecord[columnIndex];
}
@@ -62,7 +63,7 @@ public long getLong(int columnIndex) {
@Override
public float getFloat(int columnIndex) {
if (columnIndex >= size()) {
- throw new IndexOutOfBoundsException("Index out of bound error!");
+ throw new IndexOutOfBoundsException(UdfApiMessages.INDEX_OUT_OF_BOUND);
}
return (float) rowRecord[columnIndex];
}
@@ -70,7 +71,7 @@ public float getFloat(int columnIndex) {
@Override
public double getDouble(int columnIndex) {
if (columnIndex >= size()) {
- throw new IndexOutOfBoundsException("Index out of bound error!");
+ throw new IndexOutOfBoundsException(UdfApiMessages.INDEX_OUT_OF_BOUND);
}
return (double) rowRecord[columnIndex];
}
@@ -78,7 +79,7 @@ public double getDouble(int columnIndex) {
@Override
public boolean getBoolean(int columnIndex) {
if (columnIndex >= size()) {
- throw new IndexOutOfBoundsException("Index out of bound error!");
+ throw new IndexOutOfBoundsException(UdfApiMessages.INDEX_OUT_OF_BOUND);
}
return (boolean) rowRecord[columnIndex];
}
@@ -86,7 +87,7 @@ public boolean getBoolean(int columnIndex) {
@Override
public Binary getBinary(int columnIndex) {
if (columnIndex >= size()) {
- throw new IndexOutOfBoundsException("Index out of bound error!");
+ throw new IndexOutOfBoundsException(UdfApiMessages.INDEX_OUT_OF_BOUND);
}
return transformToUDFBinary((org.apache.tsfile.utils.Binary) rowRecord[columnIndex]);
}
@@ -94,7 +95,7 @@ public Binary getBinary(int columnIndex) {
@Override
public String getString(int columnIndex) {
if (columnIndex >= size()) {
- throw new IndexOutOfBoundsException("Index out of bound error!");
+ throw new IndexOutOfBoundsException(UdfApiMessages.INDEX_OUT_OF_BOUND);
}
return rowRecord[columnIndex].toString();
}
@@ -151,7 +152,7 @@ private static Type transformToUDFDataType(TSDataType tsDataType) {
case 11:
return Type.STRING;
default:
- throw new IllegalArgumentException("Invalid input: " + type);
+ throw new IllegalArgumentException(UdfApiMessages.INVALID_INPUT + type);
}
}
}
diff --git a/iotdb-client/cli/pom.xml b/iotdb-client/cli/pom.xml
index e727adb57bfbe..3b57dbb191b3e 100644
--- a/iotdb-client/cli/pom.xml
+++ b/iotdb-client/cli/pom.xml
@@ -247,6 +247,10 @@
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
diff --git a/iotdb-client/cli/src/main/i18n/en/org/apache/iotdb/cli/i18n/CliMessages.java b/iotdb-client/cli/src/main/i18n/en/org/apache/iotdb/cli/i18n/CliMessages.java
new file mode 100644
index 0000000000000..cdc272c13fa1a
--- /dev/null
+++ b/iotdb-client/cli/src/main/i18n/en/org/apache/iotdb/cli/i18n/CliMessages.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.cli.i18n;
+
+public final class CliMessages {
+
+ // CliContext
+ public static final String EXITING_WITH_CODE = "Exiting with code %d";
+
+ // IoTDBDataBackTool
+ public static final String TARGET_DIR_EMPTY =
+ " -targetdir cannot be empty, The backup folder must be specified";
+ public static final String TARGET_DIR_USE_ABSOLUTE_PATH =
+ "-targetdir parameter exception, please use absolute path";
+ public static final String TARGET_DATA_DIR_USE_ABSOLUTE_PATH =
+ "-targetdatadir parameter exception, please use absolute path";
+ public static final String TARGET_WAL_DIR_USE_ABSOLUTE_PATH =
+ "-targetwaldir parameter exception, please use absolute path";
+ public static final String BACKUP_FOLDER_EXISTS = "The backup folder already exists:{}";
+ public static final String ALL_OPERATIONS_COMPLETE = "all operations are complete";
+ public static final String COPY_FILE_ERROR = "copy file error";
+ public static final String COPY_FILE_ERROR_WITH_PATH = "copy file error {}";
+ public static final String START_READ_CONFIG = "Start to read config file {}";
+ public static final String READ_CONFIG_ERROR = "Read config file {} error";
+ public static final String DIRECTORY_CREATED = "Directory created successfully:{}";
+ public static final String FAILED_TO_CREATE_DIRECTORY = "Failed to create directory:{}";
+ public static final String LINK_FILE_ERROR = "link file error {}";
+ public static final String PROPERTIES_FILE_UPDATE_ERROR = "properties file update error.";
+ public static final String FAILED_TO_READ_DATA = "Failed to read data from file: {}";
+ public static final String FAILED_TO_WRITE_DATA = "Failed to write data to file: {}";
+ public static final String FAILED_TO_CREATE_FILE = "Failed to create file: {}";
+
+ // AbstractDataTool
+ public static final String USE_HELP_FOR_MORE = "Use -help for more information";
+
+ // ImportTsFileRemotely
+ public static final String SYNC_CLIENT_INIT_ERROR = "Sync client init error because %s";
+
+ // UnsupportedOperationException
+ public static final String NOT_SUPPORTED_YET = "Not supported yet.";
+
+ // ImportData
+ public static final String UNKNOWN_TYPE_INFER_KEY = "Unknown type infer key: %s";
+ public static final String UNKNOWN_TYPE_INFER_VALUE = "Unknown type infer value: %s";
+ public static final String NAN_CANNOT_CONVERT = "NaN can not convert to %s";
+ public static final String BOOLEAN_CANNOT_CONVERT = "Boolean can not convert to %s";
+ public static final String DATE_CANNOT_CONVERT = "Date can not convert to %s";
+ public static final String TIMESTAMP_CANNOT_CONVERT = "Timestamp can not convert to %s";
+ public static final String BLOB_CANNOT_CONVERT = "Blob can not convert to %s";
+ public static final String CANNOT_CONVERT = "%s can not convert to %s";
+
+ private CliMessages() {}
+}
diff --git a/iotdb-client/cli/src/main/i18n/zh/org/apache/iotdb/cli/i18n/CliMessages.java b/iotdb-client/cli/src/main/i18n/zh/org/apache/iotdb/cli/i18n/CliMessages.java
new file mode 100644
index 0000000000000..a518f34c38092
--- /dev/null
+++ b/iotdb-client/cli/src/main/i18n/zh/org/apache/iotdb/cli/i18n/CliMessages.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.cli.i18n;
+
+public final class CliMessages {
+
+ // CliContext
+ public static final String EXITING_WITH_CODE = "正在退出,退出码 %d";
+
+ // IoTDBDataBackTool
+ public static final String TARGET_DIR_EMPTY =
+ " -targetdir 不能为空,必须指定备份目录";
+ public static final String TARGET_DIR_USE_ABSOLUTE_PATH =
+ "-targetdir 参数异常,请使用绝对路径";
+ public static final String TARGET_DATA_DIR_USE_ABSOLUTE_PATH =
+ "-targetdatadir 参数异常,请使用绝对路径";
+ public static final String TARGET_WAL_DIR_USE_ABSOLUTE_PATH =
+ "-targetwaldir 参数异常,请使用绝对路径";
+ public static final String BACKUP_FOLDER_EXISTS = "备份目录已存在:{}";
+ public static final String ALL_OPERATIONS_COMPLETE = "所有操作已完成";
+ public static final String COPY_FILE_ERROR = "复制文件错误";
+ public static final String COPY_FILE_ERROR_WITH_PATH = "复制文件错误 {}";
+ public static final String START_READ_CONFIG = "开始读取配置文件 {}";
+ public static final String READ_CONFIG_ERROR = "读取配置文件 {} 错误";
+ public static final String DIRECTORY_CREATED = "目录创建成功:{}";
+ public static final String FAILED_TO_CREATE_DIRECTORY = "创建目录失败:{}";
+ public static final String LINK_FILE_ERROR = "创建文件链接错误 {}";
+ public static final String PROPERTIES_FILE_UPDATE_ERROR = "属性文件更新错误。";
+ public static final String FAILED_TO_READ_DATA = "从文件读取数据失败:{}";
+ public static final String FAILED_TO_WRITE_DATA = "向文件写入数据失败:{}";
+ public static final String FAILED_TO_CREATE_FILE = "创建文件失败:{}";
+
+ // AbstractDataTool
+ public static final String USE_HELP_FOR_MORE = "使用 -help 获取更多信息";
+
+ // ImportTsFileRemotely
+ public static final String SYNC_CLIENT_INIT_ERROR = "同步客户端初始化失败,原因:%s";
+
+ // UnsupportedOperationException
+ public static final String NOT_SUPPORTED_YET = "尚不支持此操作。";
+
+ // ImportData
+ public static final String UNKNOWN_TYPE_INFER_KEY = "未知的类型推断键:%s";
+ public static final String UNKNOWN_TYPE_INFER_VALUE = "未知的类型推断值:%s";
+ public static final String NAN_CANNOT_CONVERT = "NaN 无法转换为 %s";
+ public static final String BOOLEAN_CANNOT_CONVERT = "Boolean 无法转换为 %s";
+ public static final String DATE_CANNOT_CONVERT = "Date 无法转换为 %s";
+ public static final String TIMESTAMP_CANNOT_CONVERT = "Timestamp 无法转换为 %s";
+ public static final String BLOB_CANNOT_CONVERT = "Blob 无法转换为 %s";
+ public static final String CANNOT_CONVERT = "%s 无法转换为 %s";
+
+ private CliMessages() {}
+}
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java
index 688158e78b251..22ecdc3a40ed1 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/AbstractCli.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.cli;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.utils.CliContext;
import org.apache.iotdb.common.rpc.thrift.Model;
import org.apache.iotdb.exception.ArgsErrorException;
@@ -259,7 +260,7 @@ static String checkRequiredArg(
if (isRequired) {
String msg = String.format("%s: Required values for option '%s' not provided", IOTDB, name);
ctx.getPrinter().println(msg);
- ctx.getPrinter().println("Use -help for more information");
+ ctx.getPrinter().println(CliMessages.USE_HELP_FOR_MORE);
throw new ArgsErrorException(msg);
} else if (defaultValue == null) {
String msg = String.format("%s: Required values for option '%s' is null.", IOTDB, name);
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/utils/CliContext.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/utils/CliContext.java
index 1edd2d928e952..f72e5430c3671 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/utils/CliContext.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/utils/CliContext.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.cli.utils;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.type.ExitType;
import org.jline.reader.LineReader;
@@ -94,7 +95,7 @@ public void exit(int exitCode) {
if (exitType == ExitType.SYSTEM_EXIT) {
System.exit(exitCode);
} else {
- throw new RuntimeException("Exiting with code " + exitCode);
+ throw new RuntimeException(String.format(CliMessages.EXITING_WITH_CODE, exitCode));
}
}
}
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/backup/IoTDBDataBackTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/backup/IoTDBDataBackTool.java
index 975dfdd7b2461..c257502dfdbf1 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/backup/IoTDBDataBackTool.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/backup/IoTDBDataBackTool.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.tool.backup;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.commons.conf.CommonConfig;
import org.apache.iotdb.commons.conf.IoTDBConstant;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
@@ -136,11 +137,11 @@ public static boolean vaildParam(String dnDataDirs, String dnWalDirs) {
boolean isVaild = true;
if (type == null || type.trim().length() == 0 || !type.equals("quick")) {
if (targetDirParam.isEmpty()) {
- LOGGER.error(" -targetdir cannot be empty, The backup folder must be specified");
+ LOGGER.error(CliMessages.TARGET_DIR_EMPTY);
isVaild = false;
} else {
if (isRelativePath(targetDirParam)) {
- LOGGER.error("-targetdir parameter exception, please use absolute path");
+ LOGGER.error(CliMessages.TARGET_DIR_USE_ABSOLUTE_PATH);
isVaild = false;
}
}
@@ -152,7 +153,7 @@ public static boolean vaildParam(String dnDataDirs, String dnWalDirs) {
isVaild = false;
}
if (targetPathVild(targetDataDirParam)) {
- LOGGER.error("-targetdatadir parameter exception, please use absolute path");
+ LOGGER.error(CliMessages.TARGET_DATA_DIR_USE_ABSOLUTE_PATH);
isVaild = false;
}
}
@@ -164,7 +165,7 @@ public static boolean vaildParam(String dnDataDirs, String dnWalDirs) {
isVaild = false;
}
if (targetPathVild(targetWalDirParam)) {
- LOGGER.error("-targetwaldir parameter exception, please use absolute path");
+ LOGGER.error(CliMessages.TARGET_WAL_DIR_USE_ABSOLUTE_PATH);
isVaild = false;
}
}
@@ -194,7 +195,7 @@ public static void main(String[] args) throws IOException {
.append("iotdb_backup");
File targetDir = new File(targetDirString.toString());
if (targetDir.exists()) {
- LOGGER.error("The backup folder already exists:{}", targetDirString);
+ LOGGER.error(CliMessages.BACKUP_FOLDER_EXISTS, targetDirString);
System.exit(0);
}
@@ -345,7 +346,7 @@ public static void main(String[] args) throws IOException {
dnMapProperties);
}
}
- LOGGER.info("all operations are complete");
+ LOGGER.info(CliMessages.ALL_OPERATIONS_COMPLETE);
delFile(filename);
}
@@ -353,7 +354,7 @@ private static void checkQuickMode(Map dnDataDirsMap) {
for (Map.Entry entry : dnDataDirsMap.entrySet()) {
File backupDir = new File(entry.getValue());
if (backupDir.exists()) {
- LOGGER.error("The backup folder already exists:{}", entry.getValue());
+ LOGGER.error(CliMessages.BACKUP_FOLDER_EXISTS, entry.getValue());
System.exit(0);
}
}
@@ -417,7 +418,7 @@ private static void ioTDBDataBack(
Files.copy(file.toPath(), targetFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
targetFileCount.incrementAndGet();
} catch (IOException e) {
- LOGGER.error("copy file error", e);
+ LOGGER.error(CliMessages.COPY_FILE_ERROR, e);
}
}
}
@@ -434,7 +435,7 @@ private static void ioTDBDataBack(
Files.copy(file.toPath(), targetFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
targetFileCount.incrementAndGet();
} catch (IOException e) {
- LOGGER.error("copy file error", e);
+ LOGGER.error(CliMessages.COPY_FILE_ERROR, e);
}
}
}
@@ -894,10 +895,10 @@ private static Properties getProperties(String configName) {
Properties properties = new Properties();
if (url != null) {
try (InputStream inputStream = url.openStream()) {
- LOGGER.info("Start to read config file {}", url);
+ LOGGER.info(CliMessages.START_READ_CONFIG, url);
properties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
} catch (Exception e) {
- LOGGER.error("Read config file {} error", url, e);
+ LOGGER.error(CliMessages.READ_CONFIG_ERROR, url, e);
}
}
return properties;
@@ -953,7 +954,7 @@ public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs)
}
});
} catch (IOException e) {
- LOGGER.error("copy file error {}", sourceDirectory, e);
+ LOGGER.error(CliMessages.COPY_FILE_ERROR_WITH_PATH, sourceDirectory, e);
}
}
@@ -962,9 +963,9 @@ public static void createDirectory(String directoryPath) {
if (!directory.exists()) {
boolean created = directory.mkdirs();
if (created) {
- LOGGER.info("Directory created successfully:{}", directoryPath);
+ LOGGER.info(CliMessages.DIRECTORY_CREATED, directoryPath);
} else {
- LOGGER.error("Failed to create directory:{}", directoryPath);
+ LOGGER.error(CliMessages.FAILED_TO_CREATE_DIRECTORY, directoryPath);
}
}
}
@@ -986,12 +987,12 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs)
try {
Files.createLink(targetFile, file);
} catch (UnsupportedOperationException | IOException e) {
- LOGGER.debug("link file error {}", e);
+ LOGGER.debug(CliMessages.LINK_FILE_ERROR, e);
try {
Files.copy(file, targetFile, StandardCopyOption.REPLACE_EXISTING);
} catch (IOException ex) {
targetFileCount.decrementAndGet();
- LOGGER.error("copy file error {}", ex);
+ LOGGER.error(CliMessages.COPY_FILE_ERROR_WITH_PATH, ex);
}
}
}
@@ -1079,7 +1080,7 @@ public static void propertiesFileUpdate(String filePath, String key, String newV
fileOutputStream.close();
} catch (IOException e) {
- LOGGER.error("properties file update error.", e);
+ LOGGER.error(CliMessages.PROPERTIES_FILE_UPDATE_ERROR, e);
}
}
@@ -1115,7 +1116,7 @@ public static int readFileData(String filename) {
return Integer.parseInt(lines.get(0));
}
} catch (IOException e) {
- LOGGER.error("Failed to read data from file: {}", filename, e);
+ LOGGER.error(CliMessages.FAILED_TO_READ_DATA, filename, e);
}
return 0;
}
@@ -1134,7 +1135,7 @@ public static void writeFileData(String filename, int data) {
try {
Files.write(filePath, Integer.toString(data).getBytes(StandardCharsets.UTF_8));
} catch (IOException e) {
- LOGGER.error("Failed to write data to file: {}", filename, e);
+ LOGGER.error(CliMessages.FAILED_TO_WRITE_DATA, filename, e);
}
}
@@ -1144,7 +1145,7 @@ public static void createFile(String filename) {
try {
Files.createFile(filePath);
} catch (IOException e) {
- LOGGER.error("Failed to create file: {}", filename, e);
+ LOGGER.error(CliMessages.FAILED_TO_CREATE_FILE, filename, e);
}
}
}
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/AbstractDataTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/AbstractDataTool.java
index 11695d786b871..4ef95c78eda14 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/AbstractDataTool.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/AbstractDataTool.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.tool.data;
import org.apache.iotdb.calc.utils.constant.SqlConstant;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.type.ExitType;
import org.apache.iotdb.cli.utils.CliContext;
import org.apache.iotdb.cli.utils.IoTPrinter;
@@ -143,7 +144,7 @@ protected static String checkRequiredArg(
}
String msg = String.format("Required values for option '%s' not provided", name);
LOGGER.info(msg);
- LOGGER.info("Use -help for more information");
+ LOGGER.info(CliMessages.USE_HELP_FOR_MORE);
throw new ArgsErrorException(msg);
}
return str;
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ImportData.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ImportData.java
index 4c7dd7e80c8a5..6e52bedbb2dc7 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ImportData.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/data/ImportData.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.tool.data;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.utils.IoTPrinter;
import org.apache.iotdb.commons.exception.IllegalPathException;
import org.apache.iotdb.commons.utils.NodeUrlUtils;
@@ -379,29 +380,29 @@ public static File createFailDir(CommandLine commandLine) {
private static void applyTypeInferArgs(String key, String value) throws ArgsErrorException {
if (!Constants.TYPE_INFER_KEY_DICT.containsKey(key)) {
- throw new ArgsErrorException("Unknown type infer key: " + key);
+ throw new ArgsErrorException(String.format(CliMessages.UNKNOWN_TYPE_INFER_KEY, key));
}
if (!Constants.TYPE_INFER_VALUE_DICT.containsKey(value)) {
- throw new ArgsErrorException("Unknown type infer value: " + value);
+ throw new ArgsErrorException(String.format(CliMessages.UNKNOWN_TYPE_INFER_VALUE, value));
}
if (key.equals(Constants.DATATYPE_NAN)
&& !(value.equals(Constants.DATATYPE_FLOAT)
|| value.equals(Constants.DATATYPE_DOUBLE)
|| value.equals(Constants.DATATYPE_TEXT)
|| value.equals(Constants.DATATYPE_STRING))) {
- throw new ArgsErrorException("NaN can not convert to " + value);
+ throw new ArgsErrorException(String.format(CliMessages.NAN_CANNOT_CONVERT, value));
}
if (key.equals(Constants.DATATYPE_BOOLEAN)
&& !(value.equals(Constants.DATATYPE_BOOLEAN)
|| value.equals(Constants.DATATYPE_TEXT)
|| value.equals(Constants.DATATYPE_STRING))) {
- throw new ArgsErrorException("Boolean can not convert to " + value);
+ throw new ArgsErrorException(String.format(CliMessages.BOOLEAN_CANNOT_CONVERT, value));
}
if (key.equals(Constants.DATATYPE_DATE)
&& !(value.equals(Constants.DATATYPE_DATE)
|| value.equals(Constants.DATATYPE_TEXT)
|| value.equals(Constants.DATATYPE_STRING))) {
- throw new ArgsErrorException("Date can not convert to " + value);
+ throw new ArgsErrorException(String.format(CliMessages.DATE_CANNOT_CONVERT, value));
}
if (key.equals(Constants.DATATYPE_TIMESTAMP)
&& !(value.equals(Constants.DATATYPE_TIMESTAMP)
@@ -409,15 +410,15 @@ private static void applyTypeInferArgs(String key, String value) throws ArgsErro
|| value.equals(Constants.DATATYPE_STRING)
|| value.equals(Constants.DATATYPE_DOUBLE)
|| value.equals(Constants.DATATYPE_LONG))) {
- throw new ArgsErrorException("Timestamp can not convert to " + value);
+ throw new ArgsErrorException(String.format(CliMessages.TIMESTAMP_CANNOT_CONVERT, value));
}
if (key.equals(Constants.DATATYPE_BLOB) && !(value.equals(Constants.DATATYPE_BLOB))) {
- throw new ArgsErrorException("Blob can not convert to " + value);
+ throw new ArgsErrorException(String.format(CliMessages.BLOB_CANNOT_CONVERT, value));
}
final TSDataType srcType = Constants.TYPE_INFER_VALUE_DICT.get(key);
final TSDataType dstType = Constants.TYPE_INFER_VALUE_DICT.get(value);
if (dstType.getType() < srcType.getType()) {
- throw new ArgsErrorException(key + " can not convert to " + value);
+ throw new ArgsErrorException(String.format(CliMessages.CANNOT_CONVERT, key, value));
}
Constants.TYPE_INFER_KEY_DICT.put(key, Constants.TYPE_INFER_VALUE_DICT.get(value));
}
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchema.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchema.java
index 402030ecd7ea7..58659e2226320 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchema.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchema.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.tool.schema;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.type.ExitType;
import org.apache.iotdb.cli.utils.CliContext;
import org.apache.iotdb.cli.utils.IoTPrinter;
@@ -77,7 +78,7 @@ public static void main(String[] args) {
parseSpecialParams(commandLine);
} catch (ArgsErrorException e) {
ioTPrinter.println("Args args: " + e.getMessage());
- ioTPrinter.println("Use -help for more information");
+ ioTPrinter.println(CliMessages.USE_HELP_FOR_MORE);
System.exit(Constants.CODE_ERROR);
} catch (Exception e) {
ioTPrinter.println("Encounter an error, because " + e.getMessage());
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchemaTable.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchemaTable.java
index 2ea66f360e1ce..1ed3e2c6b862a 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchemaTable.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchemaTable.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.tool.schema;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.utils.IoTPrinter;
import org.apache.iotdb.isession.ITableSession;
import org.apache.iotdb.isession.SessionDataSet;
@@ -316,6 +317,6 @@ private void exportSchemaBySelect(
@Override
protected void exportSchemaToCsvFile(String pathPattern, int index) {
- throw new UnsupportedOperationException("Not supported yet.");
+ throw new UnsupportedOperationException(CliMessages.NOT_SUPPORTED_YET);
}
}
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchemaTree.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchemaTree.java
index c5fbdbc49d1f4..334d8a8d8dc42 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchemaTree.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ExportSchemaTree.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.tool.schema;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.utils.IoTPrinter;
import org.apache.iotdb.isession.SessionDataSet;
import org.apache.iotdb.rpc.IoTDBConnectionException;
@@ -59,7 +60,7 @@ public void init()
@Override
protected void exportSchemaToSqlFile() {
- throw new UnsupportedOperationException("Not supported yet.");
+ throw new UnsupportedOperationException(CliMessages.NOT_SUPPORTED_YET);
}
@Override
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchemaTable.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchemaTable.java
index 89d9c56ad9fa0..011042c34d29c 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchemaTable.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchemaTable.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.tool.schema;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.utils.IoTPrinter;
import org.apache.iotdb.isession.ITableSession;
import org.apache.iotdb.isession.SessionDataSet;
@@ -187,6 +188,6 @@ protected void importSchemaFromSqlFile(File file) {
@Override
protected void importSchemaFromCsvFile(File file) {
- throw new UnsupportedOperationException("Not supported yet.");
+ throw new UnsupportedOperationException(CliMessages.NOT_SUPPORTED_YET);
}
}
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchemaTree.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchemaTree.java
index c5edb7d52e929..9526447fd9654 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchemaTree.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/schema/ImportSchemaTree.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.tool.schema;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.utils.IoTPrinter;
import org.apache.iotdb.commons.exception.IllegalPathException;
import org.apache.iotdb.rpc.IoTDBConnectionException;
@@ -93,7 +94,7 @@ protected Runnable getAsyncImportRunnable() {
@Override
protected void importSchemaFromSqlFile(File file) {
- throw new UnsupportedOperationException("Not supported yet.");
+ throw new UnsupportedOperationException(CliMessages.NOT_SUPPORTED_YET);
}
@Override
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/AbstractTsFileTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/AbstractTsFileTool.java
index 64b9f806d5889..cf676c81ef276 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/AbstractTsFileTool.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/AbstractTsFileTool.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.tool.tsfile;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.utils.IoTPrinter;
import org.apache.iotdb.exception.ArgsErrorException;
import org.apache.iotdb.session.Session;
@@ -67,7 +68,7 @@ protected static String checkRequiredArg(String arg, String name, CommandLine co
if (str == null) {
String msg = String.format("Required values for option '%s' not provided", name);
ioTPrinter.println(msg);
- ioTPrinter.println("Use -help for more information");
+ ioTPrinter.println(CliMessages.USE_HELP_FOR_MORE);
throw new ArgsErrorException(msg);
}
return str;
diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileRemotely.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileRemotely.java
index aca33ba4dbf3d..bec4a13c37214 100644
--- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileRemotely.java
+++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/tsfile/ImportTsFileRemotely.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.tool.tsfile;
+import org.apache.iotdb.cli.i18n.CliMessages;
import org.apache.iotdb.cli.utils.IoTPrinter;
import org.apache.iotdb.common.rpc.thrift.TEndPoint;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
@@ -309,7 +310,7 @@ private void initClient() {
"",
"");
} catch (final TTransportException e) {
- throw new PipeException("Sync client init error because " + e.getMessage());
+ throw new PipeException(String.format(CliMessages.SYNC_CLIENT_INIT_ERROR, e.getMessage()));
}
}
diff --git a/iotdb-client/isession/pom.xml b/iotdb-client/isession/pom.xml
index ec63adb33fad1..7dfd65096053a 100644
--- a/iotdb-client/isession/pom.xml
+++ b/iotdb-client/isession/pom.xml
@@ -59,4 +59,12 @@
libthrift
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+
diff --git a/iotdb-client/isession/src/main/i18n/en/org/apache/iotdb/isession/i18n/ISessionMessages.java b/iotdb-client/isession/src/main/i18n/en/org/apache/iotdb/isession/i18n/ISessionMessages.java
new file mode 100644
index 0000000000000..76ba04d1ba568
--- /dev/null
+++ b/iotdb-client/isession/src/main/i18n/en/org/apache/iotdb/isession/i18n/ISessionMessages.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.isession.i18n;
+
+public final class ISessionMessages {
+
+ // --- SessionDataSet ---
+ public static final String OBJECT_TYPE_ONLY_SUPPORT_GET_STRING =
+ "OBJECT Type only support getString";
+
+ // --- Template ---
+ public static final String DUPLICATED_CHILD_IN_TEMPLATE =
+ "Duplicated child of node in template.";
+ public static final String NOT_DIRECT_CHILD_OF_TEMPLATE =
+ "It is not a direct child of the template: ";
+
+ private ISessionMessages() {}
+}
diff --git a/iotdb-client/isession/src/main/i18n/zh/org/apache/iotdb/isession/i18n/ISessionMessages.java b/iotdb-client/isession/src/main/i18n/zh/org/apache/iotdb/isession/i18n/ISessionMessages.java
new file mode 100644
index 0000000000000..09937e460d0d6
--- /dev/null
+++ b/iotdb-client/isession/src/main/i18n/zh/org/apache/iotdb/isession/i18n/ISessionMessages.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.isession.i18n;
+
+public final class ISessionMessages {
+
+ // --- SessionDataSet ---
+ public static final String OBJECT_TYPE_ONLY_SUPPORT_GET_STRING =
+ "OBJECT 类型仅支持 getString";
+
+ // --- Template ---
+ public static final String DUPLICATED_CHILD_IN_TEMPLATE =
+ "模板中存在重复的子节点。";
+ public static final String NOT_DIRECT_CHILD_OF_TEMPLATE =
+ "这不是该模板的直接子节点:";
+
+ private ISessionMessages() {}
+}
diff --git a/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/SessionDataSet.java b/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/SessionDataSet.java
index 9a1fcd291486c..885f5894411cb 100644
--- a/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/SessionDataSet.java
+++ b/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/SessionDataSet.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.isession;
+import org.apache.iotdb.isession.i18n.ISessionMessages;
import org.apache.iotdb.rpc.IoTDBConnectionException;
import org.apache.iotdb.rpc.IoTDBRpcDataSet;
import org.apache.iotdb.rpc.RpcUtils;
@@ -333,7 +334,7 @@ public Binary getBlob(int columnIndex) throws StatementExecutionException {
}
if (dataType.equals(TSDataType.OBJECT)) {
- throw new StatementExecutionException("OBJECT Type only support getString");
+ throw new StatementExecutionException(ISessionMessages.OBJECT_TYPE_ONLY_SUPPORT_GET_STRING);
}
return ioTDBRpcDataSet.getBinary(columnIndex);
}
@@ -345,7 +346,7 @@ public Binary getBlob(String columnName) throws StatementExecutionException {
}
if (dataType.equals(TSDataType.OBJECT)) {
- throw new StatementExecutionException("OBJECT Type only support getString");
+ throw new StatementExecutionException(ISessionMessages.OBJECT_TYPE_ONLY_SUPPORT_GET_STRING);
}
return ioTDBRpcDataSet.getBinary(columnName);
}
diff --git a/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/template/Template.java b/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/template/Template.java
index 90171c7dd5088..fe58a83cef1ab 100644
--- a/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/template/Template.java
+++ b/iotdb-client/isession/src/main/java/org/apache/iotdb/isession/template/Template.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.isession.template;
+import org.apache.iotdb.isession.i18n.ISessionMessages;
import org.apache.iotdb.rpc.StatementExecutionException;
import org.apache.tsfile.common.constant.TsFileConstant;
@@ -75,7 +76,7 @@ public void setShareTime(boolean shareTime) {
public void addToTemplate(TemplateNode child) throws StatementExecutionException {
if (children.containsKey(child.getName())) {
- throw new StatementExecutionException("Duplicated child of node in template.");
+ throw new StatementExecutionException(ISessionMessages.DUPLICATED_CHILD_IN_TEMPLATE);
}
children.put(child.getName(), child);
}
@@ -84,7 +85,7 @@ public void deleteFromTemplate(String name) throws StatementExecutionException {
if (children.containsKey(name)) {
children.remove(name);
} else {
- throw new StatementExecutionException("It is not a direct child of the template: " + name);
+ throw new StatementExecutionException(ISessionMessages.NOT_DIRECT_CHILD_OF_TEMPLATE + name);
}
}
diff --git a/iotdb-client/jdbc/src/main/i18n/en/org/apache/iotdb/jdbc/i18n/JdbcMessages.java b/iotdb-client/jdbc/src/main/i18n/en/org/apache/iotdb/jdbc/i18n/JdbcMessages.java
new file mode 100644
index 0000000000000..2e52fb640ae61
--- /dev/null
+++ b/iotdb-client/jdbc/src/main/i18n/en/org/apache/iotdb/jdbc/i18n/JdbcMessages.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.jdbc.i18n;
+
+public final class JdbcMessages {
+
+ // IoTDBDriver
+ public static final String REGISTER_DRIVER_ERROR =
+ "Error occurs when registering TsFile driver";
+ public static final String METHOD_NOT_SUPPORTED = "Method not supported";
+
+ // StringUtils
+ public static final String TO_PLAIN_STRING_ERROR = "To plain String method Error:";
+ public static final String CONSISTENT_TO_STRING_ERROR = "consistent to String Error:";
+
+ // GroupedLSBWatermarkEncoder
+ public static final String CANNOT_FIND_MD5 = "ERROR: Cannot find MD5 algorithm!";
+ public static final String MIN_BIT_BIGGER_THAN_MAX =
+ "Error: minBitPosition is bigger than maxBitPosition";
+
+ // IoTDBTracingInfo
+ public static final String INVALID_STATISTICS_NAME = "Invalid statistics name!";
+
+ // IoTDBStatement
+ public static final String CANNOT_UNWRAP_TO = "Cannot unwrap to ";
+ public static final String CANCEL_STATEMENT_ERROR = "Error occurs when canceling statement.";
+ public static final String CLOSE_STATEMENT_ERROR = "Error occurs when closing statement.";
+ public static final String NOT_SUPPORT_CLOSE_ON_COMPLETION = "Not support closeOnCompletion";
+ public static final String QUERY_RESULT_SHOULD_NOT_BE_NULL =
+ "execResp.queryResult should never be null.";
+ public static final String DIRECTION_NOT_SUPPORTED = "direction %d is not supported!";
+ public static final String FETCH_SIZE_MUST_BE_NON_NEGATIVE = "fetchSize %d must be >= 0!";
+ public static final String NOT_SUPPORT_GET_GENERATED_KEYS = "Not support getGeneratedKeys";
+ public static final String NOT_SUPPORT_GET_MAX_FIELD_SIZE = "Not support getMaxFieldSize";
+ public static final String MAX_ROWS_MUST_BE_NON_NEGATIVE = "maxRows %d must be >= 0!";
+ public static final String NOT_SUPPORT_GET_MORE_RESULTS = "Not support getMoreResults";
+ public static final String NOT_SUPPORT_GET_RESULT_SET_CONCURRENCY =
+ "Not support getResultSetConcurrency";
+ public static final String NOT_SUPPORT_GET_RESULT_SET_HOLDABILITY =
+ "Not support getResultSetHoldability";
+ public static final String NOT_SUPPORT_IS_CLOSE_ON_COMPLETION =
+ "Not support isCloseOnCompletion";
+ public static final String NOT_SUPPORT_IS_POOLABLE = "Not support isPoolable";
+ public static final String NOT_SUPPORT_SET_POOLABLE = "Not support setPoolable";
+ public static final String NOT_SUPPORT_SET_CURSOR_NAME = "Not support setCursorName";
+ public static final String NOT_SUPPORT_SET_ESCAPE_PROCESSING =
+ "Not support setEscapeProcessing";
+ public static final String CANNOT_AFTER_CONNECTION_CLOSED =
+ "Cannot %s after connection has been closed!";
+
+ // IoTDBTablePreparedStatement
+ public static final String FAILED_TO_PREPARE_STATEMENT = "Failed to prepare statement: ";
+ public static final String PARAMETER_UNSET = "Parameter #%d is unset";
+ public static final String FAILED_TO_EXECUTE_PREPARED_STATEMENT =
+ "Failed to execute prepared statement: ";
+ public static final String FAILED_TO_DEALLOCATE_PREPARED_STATEMENT =
+ "Failed to deallocate prepared statement: {}";
+ public static final String ERROR_DEALLOCATING_PREPARED_STATEMENT =
+ "Error deallocating prepared statement";
+ public static final String FAILED_TO_GET_TIME_PRECISION =
+ "Failed to get time precision: ";
+ public static final String FAILED_TO_READ_BINARY_STREAM =
+ "Failed to read binary stream: ";
+
+ // IoTDBPreparedStatement
+ public static final String NO_TYPE_MATCHED = "No type was matched";
+ public static final String SQL_DEBUG = "SQL {}";
+ public static final String PARAMETERS_DEBUG = "parameters {}";
+
+ // IoTDBResultMetadata
+ public static final String NO_COLUMN_EXISTS = "No column exists";
+ public static final String COLUMN_DOES_NOT_EXIST = "column %d does not exist";
+ public static final String COLUMN_INDEX_START_FROM_1 = "column index should start from 1";
+
+ // IoTDBAbstractDatabaseMetadata
+ public static final String NO_DATA_TYPE_MATCHED = "No data type was matched: {}";
+ public static final String GET_READ_ONLY_ERROR = "Get is readOnly error: {}";
+ public static final String CANNOT_GET_READ_ONLY_MODE = "Can not get the read-only mode";
+ public static final String GET_SYSTEM_FUNCTIONS_ERROR = "Get system functions error: {}";
+ public static final String GET_MAX_CONCURRENT_CLIENT_ERROR =
+ "Get max concurrentClientNUm error: {}";
+ public static final String GET_MAX_STATEMENT_LENGTH_ERROR =
+ "Get max statement length error: {}";
+ public static final String GET_PROCEDURES_ERROR = "Get procedures error: {}";
+ public static final String GET_PROCEDURE_COLUMNS_ERROR = "Get procedure columns error: {}";
+ public static final String GET_BEST_ROW_IDENTIFIER_ERROR =
+ "Get best row identifier error: {}";
+ public static final String GET_VERSION_COLUMNS_ERROR = "Get version columns error: {}";
+ public static final String GET_IMPORT_KEYS_ERROR = "Get import keys error: {}";
+ public static final String GET_EXPORTED_KEYS_ERROR = "Get exported keys error: {}";
+ public static final String GET_CROSS_REFERENCE_ERROR = "Get cross reference error: {}";
+ public static final String GET_INDEX_INFO_ERROR = "Get index info error: {}";
+ public static final String GET_UDTS_ERROR = "Get UDTS error: {}";
+ public static final String GET_SUPER_TYPES_ERROR = "Get super types error: {}";
+ public static final String GET_SUPER_TABLES_ERROR = "Get super tables error: {}";
+ public static final String GET_ATTRIBUTES_ERROR = "Get attributes error: {}";
+ public static final String GET_DB_MAJOR_VERSION_ERROR =
+ "Get database major version error: {}";
+ public static final String GET_DB_MINOR_VERSION_ERROR =
+ "Get database minor version error: {}";
+
+ // IoTDBDatabaseMetadata
+ public static final String INIT_SQL_KEYWORDS_ERROR =
+ "Error when initializing SQL keywords: ";
+ public static final String GET_TABLES_SQL = "Get tables: sql: {}";
+ public static final String GET_PRIMARY_KEYS_ERROR = "Get primary keys error: {}";
+ public static final String FAILED_TO_FETCH_METADATA_JSON =
+ "Failed to fetch metadata in json because: ";
+
+ // IoTDBRelationalDatabaseMetadata
+ public static final String RELATIONAL_INIT_SQL_KEYWORDS_ERROR =
+ "Error when initializing SQL keywords: ";
+ public static final String RELATIONAL_GET_PRIMARY_KEYS_ERROR =
+ "Get primary keys error: {}";
+
+ // IoTDBDataSource
+ public static final String GET_CONNECTION_ERROR = "get connection error:";
+
+ // IoTDBDataSourceFactory
+ public static final String REMAINING_PROPERTIES = "Remaining properties {}";
+
+ // IoTDBJDBCResultSet
+ public static final String CLOSE_SERVER_SIDE_ERROR =
+ "Error occurs for close operation in server side because ";
+ public static final String CLOSE_CONNECTING_ERROR =
+ "Error occurs when connecting to server for close operation ";
+ public static final String GET_METADATA_ERROR = "get meta data error: {}";
+
+ // IoTDBConnection
+ public static final String INPUT_URL_NULL = "Input url cannot be null";
+ public static final String NOT_SUPPORT_IS_WRAPPER_FOR = "Does not support isWrapperFor";
+ public static final String NOT_SUPPORT_UNWRAP = "Does not support unwrap";
+ public static final String NOT_SUPPORT_ABORT = "Does not support abort";
+ public static final String NOT_SUPPORT_CREATE_ARRAY_OF = "Does not support createArrayOf";
+ public static final String NOT_SUPPORT_CREATE_BLOB = "Does not support createBlob";
+ public static final String NOT_SUPPORT_CREATE_CLOB = "Does not support createClob";
+ public static final String NOT_SUPPORT_CREATE_NCLOB = "Does not suppport createNClob";
+ public static final String NOT_SUPPORT_CREATE_SQLXML = "Does not support createSQLXML";
+ public static final String CANNOT_CREATE_STATEMENT_CLOSED =
+ "Cannot create statement because connection is closed";
+ public static final String NOT_SUPPORT_CREATE_STATEMENT = "Does not support createStatement";
+ public static final String NOT_SUPPORT_CREATE_STRUCT = "Does not support createStruct";
+ public static final String NOT_SUPPORT_GET_CLIENT_INFO = "Does not support getClientInfo";
+ public static final String NOT_SUPPORT_SET_CLIENT_INFO = "Does not support setClientInfo";
+ public static final String NOT_SUPPORT_SET_HOLDABILITY = "Does not support setHoldability";
+ public static final String NOT_SUPPORT_GET_SCHEMA = "Does not support getSchema";
+ public static final String NOT_SUPPORT_SET_TRANSACTION_ISOLATION =
+ "Does not support setTransactionIsolation";
+ public static final String NOT_SUPPORT_GET_TYPE_MAP = "Does not support getTypeMap";
+ public static final String NOT_SUPPORT_SET_TYPE_MAP = "Does not support setTypeMap";
+ public static final String NOT_SUPPORT_READ_ONLY = "Does not support readOnly";
+ public static final String NOT_SUPPORT_NATIVE_SQL = "Does not support nativeSQL";
+ public static final String NOT_SUPPORT_RELEASE_SAVEPOINT =
+ "Does not support releaseSavepoint";
+ public static final String SET_TIMEZONE_ERROR = "Set time_zone error: ";
+ public static final String NOT_SUPPORT_CLIENT_INFO_TYPE =
+ "Does not support this type of client info: ";
+ public static final String NOT_SUPPORT_SET_NETWORK_TIMEOUT =
+ "Does not support setNetworkTimeout";
+ public static final String QUERY_TIMEOUT_MUST_BE_NON_NEGATIVE =
+ "queryTimeout %d must be >= 0!";
+ public static final String NOT_SUPPORT_SET_SAVEPOINT = "Does not support setSavepoint";
+ public static final String USE_DATABASE_ERROR = "Use database error: {}";
+ public static final String RECONNECT_INTERRUPTED = "reconnect is interrupted.";
+ public static final String PARAMETER_IS_UNSET_PREFIX = "Parameter #";
+ public static final String PARAMETER_IS_UNSET_SUFFIX = " is unset";
+
+ private JdbcMessages() {}
+}
diff --git a/iotdb-client/jdbc/src/main/i18n/zh/org/apache/iotdb/jdbc/i18n/JdbcMessages.java b/iotdb-client/jdbc/src/main/i18n/zh/org/apache/iotdb/jdbc/i18n/JdbcMessages.java
new file mode 100644
index 0000000000000..831fd9c727d94
--- /dev/null
+++ b/iotdb-client/jdbc/src/main/i18n/zh/org/apache/iotdb/jdbc/i18n/JdbcMessages.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.jdbc.i18n;
+
+public final class JdbcMessages {
+
+ // IoTDBDriver
+ public static final String REGISTER_DRIVER_ERROR =
+ "注册 TsFile 驱动时发生错误";
+ public static final String METHOD_NOT_SUPPORTED = "不支持此方法";
+
+ // StringUtils
+ public static final String TO_PLAIN_STRING_ERROR = "转换为纯文本字符串方法错误:";
+ public static final String CONSISTENT_TO_STRING_ERROR = "一致性 toString 错误:";
+
+ // GroupedLSBWatermarkEncoder
+ public static final String CANNOT_FIND_MD5 = "错误:无法找到 MD5 算法!";
+ public static final String MIN_BIT_BIGGER_THAN_MAX =
+ "错误:minBitPosition 大于 maxBitPosition";
+
+ // IoTDBTracingInfo
+ public static final String INVALID_STATISTICS_NAME = "无效的统计名称!";
+
+ // IoTDBStatement
+ public static final String CANNOT_UNWRAP_TO = "无法转换为 ";
+ public static final String CANCEL_STATEMENT_ERROR = "取消语句时发生错误。";
+ public static final String CLOSE_STATEMENT_ERROR = "关闭语句时发生错误。";
+ public static final String NOT_SUPPORT_CLOSE_ON_COMPLETION = "不支持 closeOnCompletion";
+ public static final String QUERY_RESULT_SHOULD_NOT_BE_NULL =
+ "execResp.queryResult 不应为 null。";
+ public static final String DIRECTION_NOT_SUPPORTED = "不支持方向 %d!";
+ public static final String FETCH_SIZE_MUST_BE_NON_NEGATIVE = "fetchSize %d 必须 >= 0!";
+ public static final String NOT_SUPPORT_GET_GENERATED_KEYS = "不支持 getGeneratedKeys";
+ public static final String NOT_SUPPORT_GET_MAX_FIELD_SIZE = "不支持 getMaxFieldSize";
+ public static final String MAX_ROWS_MUST_BE_NON_NEGATIVE = "maxRows %d 必须 >= 0!";
+ public static final String NOT_SUPPORT_GET_MORE_RESULTS = "不支持 getMoreResults";
+ public static final String NOT_SUPPORT_GET_RESULT_SET_CONCURRENCY =
+ "不支持 getResultSetConcurrency";
+ public static final String NOT_SUPPORT_GET_RESULT_SET_HOLDABILITY =
+ "不支持 getResultSetHoldability";
+ public static final String NOT_SUPPORT_IS_CLOSE_ON_COMPLETION =
+ "不支持 isCloseOnCompletion";
+ public static final String NOT_SUPPORT_IS_POOLABLE = "不支持 isPoolable";
+ public static final String NOT_SUPPORT_SET_POOLABLE = "不支持 setPoolable";
+ public static final String NOT_SUPPORT_SET_CURSOR_NAME = "不支持 setCursorName";
+ public static final String NOT_SUPPORT_SET_ESCAPE_PROCESSING =
+ "不支持 setEscapeProcessing";
+ public static final String CANNOT_AFTER_CONNECTION_CLOSED =
+ "连接已关闭后无法执行 %s!";
+
+ // IoTDBTablePreparedStatement
+ public static final String FAILED_TO_PREPARE_STATEMENT = "预编译语句失败:";
+ public static final String PARAMETER_UNSET = "参数 #%d 未设置";
+ public static final String FAILED_TO_EXECUTE_PREPARED_STATEMENT =
+ "执行预编译语句失败:";
+ public static final String FAILED_TO_DEALLOCATE_PREPARED_STATEMENT =
+ "释放预编译语句失败:{}";
+ public static final String ERROR_DEALLOCATING_PREPARED_STATEMENT =
+ "释放预编译语句时出错";
+ public static final String FAILED_TO_GET_TIME_PRECISION =
+ "获取时间精度失败:";
+ public static final String FAILED_TO_READ_BINARY_STREAM =
+ "读取二进制流失败:";
+
+ // IoTDBPreparedStatement
+ public static final String NO_TYPE_MATCHED = "没有匹配的类型";
+ public static final String SQL_DEBUG = "SQL {}";
+ public static final String PARAMETERS_DEBUG = "参数 {}";
+
+ // IoTDBResultMetadata
+ public static final String NO_COLUMN_EXISTS = "不存在任何列";
+ public static final String COLUMN_DOES_NOT_EXIST = "列 %d 不存在";
+ public static final String COLUMN_INDEX_START_FROM_1 = "列索引应从 1 开始";
+
+ // IoTDBAbstractDatabaseMetadata
+ public static final String NO_DATA_TYPE_MATCHED = "没有匹配的数据类型:{}";
+ public static final String GET_READ_ONLY_ERROR = "获取只读模式错误:{}";
+ public static final String CANNOT_GET_READ_ONLY_MODE = "无法获取只读模式";
+ public static final String GET_SYSTEM_FUNCTIONS_ERROR = "获取系统函数错误:{}";
+ public static final String GET_MAX_CONCURRENT_CLIENT_ERROR =
+ "获取最大并发客户端数错误:{}";
+ public static final String GET_MAX_STATEMENT_LENGTH_ERROR =
+ "获取最大语句长度错误:{}";
+ public static final String GET_PROCEDURES_ERROR = "获取存储过程错误:{}";
+ public static final String GET_PROCEDURE_COLUMNS_ERROR = "获取存储过程列错误:{}";
+ public static final String GET_BEST_ROW_IDENTIFIER_ERROR =
+ "获取最佳行标识符错误:{}";
+ public static final String GET_VERSION_COLUMNS_ERROR = "获取版本列错误:{}";
+ public static final String GET_IMPORT_KEYS_ERROR = "获取导入键错误:{}";
+ public static final String GET_EXPORTED_KEYS_ERROR = "获取导出键错误:{}";
+ public static final String GET_CROSS_REFERENCE_ERROR = "获取交叉引用错误:{}";
+ public static final String GET_INDEX_INFO_ERROR = "获取索引信息错误:{}";
+ public static final String GET_UDTS_ERROR = "获取 UDT 错误:{}";
+ public static final String GET_SUPER_TYPES_ERROR = "获取父类型错误:{}";
+ public static final String GET_SUPER_TABLES_ERROR = "获取父表错误:{}";
+ public static final String GET_ATTRIBUTES_ERROR = "获取属性错误:{}";
+ public static final String GET_DB_MAJOR_VERSION_ERROR =
+ "获取数据库主版本号错误:{}";
+ public static final String GET_DB_MINOR_VERSION_ERROR =
+ "获取数据库次版本号错误:{}";
+
+ // IoTDBDatabaseMetadata
+ public static final String INIT_SQL_KEYWORDS_ERROR =
+ "初始化 SQL 关键字时出错:";
+ public static final String GET_TABLES_SQL = "获取表:SQL:{}";
+ public static final String GET_PRIMARY_KEYS_ERROR = "获取主键错误:{}";
+ public static final String FAILED_TO_FETCH_METADATA_JSON =
+ "获取 JSON 格式的元数据失败:";
+
+ // IoTDBRelationalDatabaseMetadata
+ public static final String RELATIONAL_INIT_SQL_KEYWORDS_ERROR =
+ "初始化 SQL 关键字时出错:";
+ public static final String RELATIONAL_GET_PRIMARY_KEYS_ERROR =
+ "获取主键错误:{}";
+
+ // IoTDBDataSource
+ public static final String GET_CONNECTION_ERROR = "获取连接错误:";
+
+ // IoTDBDataSourceFactory
+ public static final String REMAINING_PROPERTIES = "剩余属性 {}";
+
+ // IoTDBJDBCResultSet
+ public static final String CLOSE_SERVER_SIDE_ERROR =
+ "服务端关闭操作时发生错误 ";
+ public static final String CLOSE_CONNECTING_ERROR =
+ "连接服务器进行关闭操作时发生错误 ";
+ public static final String GET_METADATA_ERROR = "获取元数据错误:{}";
+
+ // IoTDBConnection
+ public static final String INPUT_URL_NULL = "输入的 URL 不能为 null";
+ public static final String NOT_SUPPORT_IS_WRAPPER_FOR = "不支持 isWrapperFor";
+ public static final String NOT_SUPPORT_UNWRAP = "不支持 unwrap";
+ public static final String NOT_SUPPORT_ABORT = "不支持 abort";
+ public static final String NOT_SUPPORT_CREATE_ARRAY_OF = "不支持 createArrayOf";
+ public static final String NOT_SUPPORT_CREATE_BLOB = "不支持 createBlob";
+ public static final String NOT_SUPPORT_CREATE_CLOB = "不支持 createClob";
+ public static final String NOT_SUPPORT_CREATE_NCLOB = "不支持 createNClob";
+ public static final String NOT_SUPPORT_CREATE_SQLXML = "不支持 createSQLXML";
+ public static final String CANNOT_CREATE_STATEMENT_CLOSED =
+ "连接已关闭,无法创建语句";
+ public static final String NOT_SUPPORT_CREATE_STATEMENT = "不支持 createStatement";
+ public static final String NOT_SUPPORT_CREATE_STRUCT = "不支持 createStruct";
+ public static final String NOT_SUPPORT_GET_CLIENT_INFO = "不支持 getClientInfo";
+ public static final String NOT_SUPPORT_SET_CLIENT_INFO = "不支持 setClientInfo";
+ public static final String NOT_SUPPORT_SET_HOLDABILITY = "不支持 setHoldability";
+ public static final String NOT_SUPPORT_GET_SCHEMA = "不支持 getSchema";
+ public static final String NOT_SUPPORT_SET_TRANSACTION_ISOLATION =
+ "不支持 setTransactionIsolation";
+ public static final String NOT_SUPPORT_GET_TYPE_MAP = "不支持 getTypeMap";
+ public static final String NOT_SUPPORT_SET_TYPE_MAP = "不支持 setTypeMap";
+ public static final String NOT_SUPPORT_READ_ONLY = "不支持 readOnly";
+ public static final String NOT_SUPPORT_NATIVE_SQL = "不支持 nativeSQL";
+ public static final String NOT_SUPPORT_RELEASE_SAVEPOINT =
+ "不支持 releaseSavepoint";
+ public static final String SET_TIMEZONE_ERROR = "设置时区错误:";
+ public static final String NOT_SUPPORT_CLIENT_INFO_TYPE =
+ "不支持此类型的客户端信息:";
+ public static final String NOT_SUPPORT_SET_NETWORK_TIMEOUT =
+ "不支持 setNetworkTimeout";
+ public static final String QUERY_TIMEOUT_MUST_BE_NON_NEGATIVE =
+ "queryTimeout %d 必须 >= 0!";
+ public static final String NOT_SUPPORT_SET_SAVEPOINT = "不支持 setSavepoint";
+ public static final String USE_DATABASE_ERROR = "使用数据库错误:{}";
+ public static final String RECONNECT_INTERRUPTED = "重新连接被中断。";
+ public static final String PARAMETER_IS_UNSET_PREFIX = "参数 #";
+ public static final String PARAMETER_IS_UNSET_SUFFIX = " 未设置";
+
+ private JdbcMessages() {}
+}
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/Constant.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/Constant.java
index ecb0b552a81d9..2570431bed72b 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/Constant.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/Constant.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
+
public class Constant {
private Constant() {}
@@ -27,7 +29,7 @@ private Constant() {}
public static final String TABLE_DIALECT = "table";
public static final String TREE_DIALECT = "tree";
- public static final String METHOD_NOT_SUPPORTED = "Method not supported";
+ public static final String METHOD_NOT_SUPPORTED = JdbcMessages.METHOD_NOT_SUPPORTED;
static final String PARAMETER_NOT_NULL = "The parameter cannot be null";
static final String PARAMETER_SUPPORTED =
"Parameter only supports BOOLEAN,INT32,INT64,FLOAT,DOUBLE,TEXT data type";
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/GroupedLSBWatermarkEncoder.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/GroupedLSBWatermarkEncoder.java
index d1d830fb2c2cd..58579f7d437d9 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/GroupedLSBWatermarkEncoder.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/GroupedLSBWatermarkEncoder.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
+
import org.apache.thrift.EncodingUtils;
import org.apache.tsfile.enums.TSDataType;
import org.apache.tsfile.read.common.Field;
@@ -52,7 +54,7 @@ public static int hashMod(String val, Integer base) {
try {
md = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
- throw new RuntimeException("ERROR: Cannot find MD5 algorithm!");
+ throw new RuntimeException(JdbcMessages.CANNOT_FIND_MD5);
}
md.update(val.getBytes());
BigInteger resultInteger = new BigInteger(1, md.digest());
@@ -69,7 +71,7 @@ private int getGroupId(long timestamp) {
private int getBitPosition(long timestamp) {
if (maxBitPosition <= minBitPosition) {
- throw new RuntimeException("Error: minBitPosition is bigger than maxBitPosition");
+ throw new RuntimeException(JdbcMessages.MIN_BIT_BIGGER_THAN_MAX);
}
int range = maxBitPosition - minBitPosition;
return minBitPosition
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBAbstractDatabaseMetadata.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBAbstractDatabaseMetadata.java
index f14e2ee6b3270..0f81bca6069aa 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBAbstractDatabaseMetadata.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBAbstractDatabaseMetadata.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
import org.apache.iotdb.service.rpc.thrift.IClientRPCService;
import org.apache.thrift.TException;
@@ -51,7 +52,7 @@
public abstract class IoTDBAbstractDatabaseMetadata implements DatabaseMetaData {
private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBAbstractDatabaseMetadata.class);
- private static final String METHOD_NOT_SUPPORTED_STRING = "Method not supported";
+ private static final String METHOD_NOT_SUPPORTED_STRING = JdbcMessages.METHOD_NOT_SUPPORTED;
protected static final String CONVERT_ERROR_MSG = "Convert tsBlock error: {}";
protected IoTDBConnection connection;
@@ -420,7 +421,7 @@ public static ByteBuffer convertTsBlock(
tsBlockBuilder.getColumnBuilder(j).writeBoolean((boolean) valuesInRow.get(j));
break;
default:
- LOGGER.error("No data type was matched: {}", columnType);
+ LOGGER.error(JdbcMessages.NO_DATA_TYPE_MATCHED, columnType);
break;
}
}
@@ -552,9 +553,9 @@ public boolean isReadOnly() throws SQLException {
try {
return client.getProperties().isReadOnly;
} catch (TException e) {
- LOGGER.error("Get is readOnly error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_READ_ONLY_ERROR, e.getMessage());
}
- throw new SQLException("Can not get the read-only mode");
+ throw new SQLException(JdbcMessages.CANNOT_GET_READ_ONLY_MODE);
}
@Override
@@ -695,7 +696,7 @@ public String getSystemFunctions() throws SQLException {
result = result.substring(0, result.length() - 1);
}
} catch (Exception ex) {
- LOGGER.error("Get system functions error: {}", ex.getMessage());
+ LOGGER.error(JdbcMessages.GET_SYSTEM_FUNCTIONS_ERROR, ex.getMessage());
} finally {
close(resultSet, statement);
}
@@ -1040,7 +1041,7 @@ public int getMaxConnections() throws SQLException {
try {
maxcount = client.getProperties().getMaxConcurrentClientNum();
} catch (TException e) {
- LOGGER.error("Get max concurrentClientNUm error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_MAX_CONCURRENT_CLIENT_ERROR, e.getMessage());
}
return maxcount;
}
@@ -1085,7 +1086,7 @@ public int getMaxStatementLength() throws SQLException {
try {
return client.getProperties().getThriftMaxFrameSize();
} catch (TException e) {
- LOGGER.error("Get max statement length error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_MAX_STATEMENT_LENGTH_ERROR, e.getMessage());
}
return 0;
}
@@ -1166,7 +1167,7 @@ public ResultSet getProcedures(String catalog, String schemaPattern, String proc
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get procedures error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_PROCEDURES_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -1225,7 +1226,7 @@ public ResultSet getProcedureColumns(
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get procedure columns error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_PROCEDURE_COLUMNS_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -1610,7 +1611,7 @@ public ResultSet getBestRowIdentifier(
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get best row identifier error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_BEST_ROW_IDENTIFIER_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -1655,7 +1656,7 @@ public ResultSet getVersionColumns(String catalog, String schema, String table)
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get version columns error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_VERSION_COLUMNS_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -1709,7 +1710,7 @@ public ResultSet getImportedKeys(String catalog, String schema, String table)
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get import keys error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_IMPORT_KEYS_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -1760,7 +1761,7 @@ public ResultSet getExportedKeys(String catalog, String schema, String table)
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get exported keys error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_EXPORTED_KEYS_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -1817,7 +1818,7 @@ public ResultSet getCrossReference(
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get cross reference error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_CROSS_REFERENCE_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -2154,7 +2155,7 @@ public ResultSet getIndexInfo(
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get index info error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_INDEX_INFO_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -2259,7 +2260,7 @@ public ResultSet getUDTs(
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get UDTS error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_UDTS_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -2327,7 +2328,7 @@ public ResultSet getSuperTypes(String catalog, String schemaPattern, String type
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get super types error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_SUPER_TYPES_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -2368,7 +2369,7 @@ public ResultSet getSuperTables(String catalog, String schemaPattern, String tab
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get super tables error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_SUPER_TABLES_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -2427,7 +2428,7 @@ public ResultSet getAttributes(
columnNameIndex.put(fields[i].getName(), i);
}
} catch (Exception e) {
- LOGGER.error("Get attributes error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_ATTRIBUTES_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -2469,7 +2470,7 @@ public int getDatabaseMajorVersion() throws SQLException {
majorVersion = Integer.parseInt(versions[0]);
}
} catch (TException e) {
- LOGGER.error("Get database major version error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_DB_MAJOR_VERSION_ERROR, e.getMessage());
}
return majorVersion;
}
@@ -2484,7 +2485,7 @@ public int getDatabaseMinorVersion() throws SQLException {
minorVersion = Integer.parseInt(versions[1]);
}
} catch (TException e) {
- LOGGER.error("Get database minor version error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_DB_MINOR_VERSION_ERROR, e.getMessage());
}
return minorVersion;
}
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBConnection.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBConnection.java
index 54b148f25e4c4..7f37e2206fb87 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBConnection.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBConnection.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.jdbc;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
import org.apache.iotdb.jdbc.relational.IoTDBRelationalDatabaseMetadata;
import org.apache.iotdb.rpc.DeepCopyRpcTransportFactory;
import org.apache.iotdb.rpc.RpcUtils;
@@ -124,7 +125,7 @@ public IoTDBConnection() {
public IoTDBConnection(String url, Properties info) throws SQLException, TTransportException {
if (url == null) {
- throw new IoTDBURLException("Input url cannot be null");
+ throw new IoTDBURLException(JdbcMessages.INPUT_URL_NULL);
}
params = Utils.parseUrl(url, info);
this.url = url;
@@ -155,17 +156,17 @@ public IoTDBConnectionParams getParams() {
@Override
public boolean isWrapperFor(Class> arg0) throws SQLException {
- throw new SQLException("Does not support isWrapperFor");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_IS_WRAPPER_FOR);
}
@Override
public T unwrap(Class arg0) throws SQLException {
- throw new SQLException("Does not support unwrap");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_UNWRAP);
}
@Override
public void abort(Executor arg0) throws SQLException {
- throw new SQLException("Does not support abort");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_ABORT);
}
@Override
@@ -197,33 +198,33 @@ public void commit() throws SQLException {}
@Override
public Array createArrayOf(String arg0, Object[] arg1) throws SQLException {
- throw new SQLException("Does not support createArrayOf");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_CREATE_ARRAY_OF);
}
@Override
public Blob createBlob() throws SQLException {
- throw new SQLException("Does not support createBlob");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_CREATE_BLOB);
}
@Override
public Clob createClob() throws SQLException {
- throw new SQLException("Does not support createClob");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_CREATE_CLOB);
}
@Override
public NClob createNClob() throws SQLException {
- throw new SQLException("Does not suppport createNClob");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_CREATE_NCLOB);
}
@Override
public SQLXML createSQLXML() throws SQLException {
- throw new SQLException("Does not support createSQLXML");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_CREATE_SQLXML);
}
@Override
public Statement createStatement() throws SQLException {
if (isClosed) {
- throw new SQLException("Cannot create statement because connection is closed");
+ throw new SQLException(JdbcMessages.CANNOT_CREATE_STATEMENT_CLOSED);
}
return new IoTDBStatement(this, getClient(), sessionId, zoneId, charset, queryTimeout);
}
@@ -245,12 +246,12 @@ public Statement createStatement(int resultSetType, int resultSetConcurrency)
@Override
public Statement createStatement(int arg0, int arg1, int arg2) throws SQLException {
- throw new SQLException("Does not support createStatement");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_CREATE_STATEMENT);
}
@Override
public Struct createStruct(String arg0, Object[] arg1) throws SQLException {
- throw new SQLException("Does not support createStruct");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_CREATE_STRUCT);
}
@Override
@@ -286,7 +287,7 @@ public void setCatalog(String arg0) throws SQLException {
stmt.execute();
} catch (SQLException e) {
stmt.close();
- logger.error("Use database error: {}", e.getMessage());
+ logger.error(JdbcMessages.USE_DATABASE_ERROR, e.getMessage());
throw e;
}
}
@@ -294,17 +295,17 @@ public void setCatalog(String arg0) throws SQLException {
@Override
public Properties getClientInfo() throws SQLException {
- throw new SQLException("Does not support getClientInfo");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_GET_CLIENT_INFO);
}
@Override
public void setClientInfo(Properties arg0) throws SQLClientInfoException {
- throw new SQLClientInfoException("Does not support setClientInfo", null);
+ throw new SQLClientInfoException(JdbcMessages.NOT_SUPPORT_SET_CLIENT_INFO, null);
}
@Override
public String getClientInfo(String arg0) throws SQLException {
- throw new SQLException("Does not support getClientInfo");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_GET_CLIENT_INFO);
}
@Override
@@ -314,13 +315,13 @@ public int getHoldability() {
@Override
public void setHoldability(int arg0) throws SQLException {
- throw new SQLException("Does not support setHoldability");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_SET_HOLDABILITY);
}
@Override
public DatabaseMetaData getMetaData() throws SQLException {
if (isClosed) {
- throw new SQLException("Cannot create statement because connection is closed");
+ throw new SQLException(JdbcMessages.CANNOT_CREATE_STATEMENT_CLOSED);
}
if (getSqlDialect().equals(Constant.TABLE_DIALECT)) {
return new IoTDBRelationalDatabaseMetadata(this, getClient(), sessionId, zoneId);
@@ -338,7 +339,7 @@ public String getSchema() throws SQLException {
if (getSqlDialect().equals(Constant.TABLE_DIALECT)) {
return getDatabase();
}
- throw new SQLException("Does not support getSchema");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_GET_SCHEMA);
}
@Override
@@ -357,7 +358,7 @@ public void setSchema(String arg0) throws SQLException {
stmt.execute();
} catch (SQLException e) {
stmt.close();
- logger.error("Use database error: {}", e.getMessage());
+ logger.error(JdbcMessages.USE_DATABASE_ERROR, e.getMessage());
throw e;
}
}
@@ -370,17 +371,17 @@ public int getTransactionIsolation() {
@Override
public void setTransactionIsolation(int arg0) throws SQLException {
- throw new SQLException("Does not support setTransactionIsolation");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_SET_TRANSACTION_ISOLATION);
}
@Override
public Map> getTypeMap() throws SQLException {
- throw new SQLException("Does not support getTypeMap");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_GET_TYPE_MAP);
}
@Override
public void setTypeMap(Map> arg0) throws SQLException {
- throw new SQLException("Does not support setTypeMap");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_SET_TYPE_MAP);
}
@Override
@@ -401,7 +402,7 @@ public boolean isReadOnly() {
@Override
public void setReadOnly(boolean readonly) throws SQLException {
if (readonly) {
- throw new SQLException("Does not support readOnly");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_READ_ONLY);
}
}
@@ -412,7 +413,7 @@ public boolean isValid(int arg0) {
@Override
public String nativeSQL(String arg0) throws SQLException {
- throw new SQLException("Does not support nativeSQL");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_NATIVE_SQL);
}
@Override
@@ -470,7 +471,7 @@ public PreparedStatement prepareStatement(
@Override
public void releaseSavepoint(Savepoint arg0) throws SQLException {
- throw new SQLException("Does not support releaseSavepoint");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_RELEASE_SAVEPOINT);
}
@Override
@@ -489,18 +490,18 @@ public void setClientInfo(String name, String value) throws SQLClientInfoExcepti
try {
setTimeZone(value);
} catch (TException | IoTDBSQLException e) {
- throw new SQLClientInfoException("Set time_zone error: ", null, e);
+ throw new SQLClientInfoException(JdbcMessages.SET_TIMEZONE_ERROR, null, e);
}
} else {
HashMap hashMap = new HashMap<>();
hashMap.put(name, ClientInfoStatus.REASON_UNKNOWN_PROPERTY);
- throw new SQLClientInfoException("Does not support this type of client info: ", hashMap);
+ throw new SQLClientInfoException(JdbcMessages.NOT_SUPPORT_CLIENT_INFO_TYPE, hashMap);
}
}
@Override
public void setNetworkTimeout(Executor arg0, int arg1) throws SQLException {
- throw new SQLException("Does not support setNetworkTimeout");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_SET_NETWORK_TIMEOUT);
}
public int getQueryTimeout() {
@@ -509,19 +510,20 @@ public int getQueryTimeout() {
public void setQueryTimeout(int seconds) throws SQLException {
if (seconds < 0) {
- throw new SQLException(String.format("queryTimeout %d must be >= 0!", seconds));
+ throw new SQLException(
+ String.format(JdbcMessages.QUERY_TIMEOUT_MUST_BE_NON_NEGATIVE, seconds));
}
this.queryTimeout = seconds;
}
@Override
public Savepoint setSavepoint() throws SQLException {
- throw new SQLException("Does not support setSavepoint");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_SET_SAVEPOINT);
}
@Override
public Savepoint setSavepoint(String arg0) throws SQLException {
- throw new SQLException("Does not support setSavepoint");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_SET_SAVEPOINT);
}
public IClientRPCService.Iface getClient() {
@@ -647,7 +649,7 @@ public boolean reconnect() {
try {
Thread.sleep(Config.RETRY_INTERVAL_MS);
} catch (InterruptedException e1) {
- logger.error("reconnect is interrupted.", e1);
+ logger.error(JdbcMessages.RECONNECT_INTERRUPTED, e1);
Thread.currentThread().interrupt();
}
}
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSource.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSource.java
index 12e490d92cc04..fe31896eb2574 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSource.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSource.java
@@ -17,6 +17,8 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
+
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -92,7 +94,7 @@ public Connection getConnection() throws SQLException {
try {
return new IoTDBConnection(url, properties);
} catch (TTransportException e) {
- LOGGER.error("get connection error:", e);
+ LOGGER.error(JdbcMessages.GET_CONNECTION_ERROR, e);
}
return null;
}
@@ -105,7 +107,7 @@ public Connection getConnection(String username, String password) {
newProp.setProperty(PWD_STR, password);
return new IoTDBConnection(url, newProp);
} catch (Exception e) {
- LOGGER.error("get connection error:", e);
+ LOGGER.error(JdbcMessages.GET_CONNECTION_ERROR, e);
}
return null;
}
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSourceFactory.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSourceFactory.java
index 8e601d5df6fa5..2e5994ca426fd 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSourceFactory.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDataSourceFactory.java
@@ -17,6 +17,8 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
+
import org.ops4j.pax.jdbc.common.BeanConfig;
import org.osgi.service.jdbc.DataSourceFactory;
import org.slf4j.Logger;
@@ -53,7 +55,7 @@ public void setProperties(IoTDBDataSource ds, Properties prop) {
String password = (String) properties.remove(DataSourceFactory.JDBC_PASSWORD);
ds.setPassword(password);
- logger.info("Remaining properties {}", properties.size());
+ logger.info(JdbcMessages.REMAINING_PROPERTIES, properties.size());
if (!properties.isEmpty()) {
BeanConfig.configure(ds, properties);
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDatabaseMetadata.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDatabaseMetadata.java
index b039bb907f740..2f4ea5af2401c 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDatabaseMetadata.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDatabaseMetadata.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.StatementExecutionException;
import org.apache.iotdb.service.rpc.thrift.IClientRPCService;
@@ -239,7 +240,7 @@ public class IoTDBDatabaseMetadata extends IoTDBAbstractDatabaseMetadata {
sqlKeywordsThatArentSQL92 = keywordBuf.toString();
} catch (Exception e) {
- LOGGER.error("Error when initializing SQL keywords: ", e);
+ LOGGER.error(JdbcMessages.INIT_SQL_KEYWORDS_ERROR, e);
throw new RuntimeException(e);
}
}
@@ -294,7 +295,7 @@ public ResultSet getTables(
}
sql = sql + "." + tableNamePattern;
}
- LOGGER.info("Get tables: sql: {}", sql);
+ LOGGER.info(JdbcMessages.GET_TABLES_SQL, sql);
try (ResultSet rs = stmt.executeQuery(sql)) {
Field[] fields = new Field[10];
fields[0] = new Field("", TABLE_CAT, "TEXT");
@@ -618,7 +619,7 @@ public ResultSet getPrimaryKeys(String catalog, String schema, String table) thr
try {
tsBlock = convertTsBlock(valuesList, tsDataTypeList);
} catch (IOException e) {
- LOGGER.error("Get primary keys error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.GET_PRIMARY_KEYS_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
@@ -664,7 +665,7 @@ public String toString() {
try {
return getMetadataInJsonFunc();
} catch (IoTDBSQLException e) {
- LOGGER.error("Failed to fetch metadata in json because: ", e);
+ LOGGER.error(JdbcMessages.FAILED_TO_FETCH_METADATA_JSON, e);
} catch (TException e) {
boolean flag = connection.reconnect();
this.client = connection.getClient();
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDriver.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDriver.java
index db6e9e7bc13ba..de7065c1c571c 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDriver.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBDriver.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
+
import org.apache.thrift.transport.TTransportException;
import org.osgi.service.component.annotations.Component;
@@ -44,7 +46,7 @@ public class IoTDBDriver implements Driver {
try {
DriverManager.registerDriver(new IoTDBDriver());
} catch (SQLException e) {
- logger.error("Error occurs when registering TsFile driver", e);
+ logger.error(JdbcMessages.REGISTER_DRIVER_ERROR, e);
}
}
@@ -85,7 +87,7 @@ public int getMinorVersion() {
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
- throw new SQLFeatureNotSupportedException("Method not supported");
+ throw new SQLFeatureNotSupportedException(JdbcMessages.METHOD_NOT_SUPPORTED);
}
@Override
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBJDBCResultSet.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBJDBCResultSet.java
index f9c3fe481ac7c..3ea3c52320d1a 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBJDBCResultSet.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBJDBCResultSet.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
import org.apache.iotdb.rpc.IoTDBConnectionException;
import org.apache.iotdb.rpc.IoTDBRpcDataSet;
import org.apache.iotdb.rpc.RpcUtils;
@@ -219,9 +220,9 @@ public void close() throws SQLException {
try {
ioTDBRpcDataSet.close();
} catch (StatementExecutionException e) {
- throw new SQLException("Error occurs for close operation in server side because ", e);
+ throw new SQLException(JdbcMessages.CLOSE_SERVER_SIDE_ERROR, e);
} catch (TException e) {
- throw new SQLException("Error occurs when connecting to server for close operation ", e);
+ throw new SQLException(JdbcMessages.CLOSE_CONNECTING_ERROR, e);
}
}
@@ -578,7 +579,7 @@ public ResultSetMetaData getMetaData() {
this.sgColumns = ((IoTDBJDBCResultSet) statement.getResultSet()).getSgColumns();
}
} catch (SQLException throwables) {
- LOGGER.error("get meta data error: {}", throwables.getMessage());
+ LOGGER.error(JdbcMessages.GET_METADATA_ERROR, throwables.getMessage());
}
return new IoTDBResultMetadata(
nonAlign,
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBPreparedStatement.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBPreparedStatement.java
index c92b6549bf9d0..c4cece01aadae 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBPreparedStatement.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBPreparedStatement.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
import org.apache.iotdb.service.rpc.thrift.IClientRPCService.Iface;
import org.apache.thrift.TException;
@@ -68,7 +69,7 @@
public class IoTDBPreparedStatement extends IoTDBStatement implements PreparedStatement {
private String sql;
- private static final String METHOD_NOT_SUPPORTED_STRING = "Method not supported";
+ private static final String METHOD_NOT_SUPPORTED_STRING = JdbcMessages.METHOD_NOT_SUPPORTED;
private static final Logger logger = LoggerFactory.getLogger(IoTDBPreparedStatement.class);
/** save the SQL parameters as (paramLoc,paramValue) pairs. */
@@ -544,7 +545,7 @@ public void setObject(int parameterIndex, Object parameterObj, int targetSqlType
break;
default:
- logger.error("No type was matched");
+ logger.error(JdbcMessages.NO_TYPE_MATCHED);
break;
}
@@ -1017,11 +1018,12 @@ private String createCompleteSql(final String sql, Map paramete
StringBuilder newSql = new StringBuilder(parts.get(0));
for (int i = 1; i < parts.size(); i++) {
if (logger.isDebugEnabled()) {
- logger.debug("SQL {}", sql);
- logger.debug("parameters {}", parameters.size());
+ logger.debug(JdbcMessages.SQL_DEBUG, sql);
+ logger.debug(JdbcMessages.PARAMETERS_DEBUG, parameters.size());
}
if (!parameters.containsKey(i)) {
- throw new SQLException("Parameter #" + i + " is unset");
+ throw new SQLException(
+ JdbcMessages.PARAMETER_IS_UNSET_PREFIX + i + JdbcMessages.PARAMETER_IS_UNSET_SUFFIX);
}
newSql.append(parameters.get(i));
newSql.append(parts.get(i));
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBResultMetadata.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBResultMetadata.java
index 83bdc26133cd6..59279d88465c3 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBResultMetadata.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBResultMetadata.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
+
import java.sql.Date;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
@@ -192,13 +194,13 @@ public String getColumnName(int column) throws SQLException {
private void checkColumnIndex(int column) throws SQLException {
if (columnInfoList == null || columnInfoList.isEmpty()) {
- throw new SQLException("No column exists");
+ throw new SQLException(JdbcMessages.NO_COLUMN_EXISTS);
}
if (column > columnInfoList.size()) {
- throw new SQLException(String.format("column %d does not exist", column));
+ throw new SQLException(String.format(JdbcMessages.COLUMN_DOES_NOT_EXIST, column));
}
if (column <= 0) {
- throw new SQLException("column index should start from 1");
+ throw new SQLException(JdbcMessages.COLUMN_INDEX_START_FROM_1);
}
}
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBStatement.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBStatement.java
index 8cb0a32417f27..1c6ce8c0e9b14 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBStatement.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBStatement.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.jdbc;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.StatementExecutionException;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -186,7 +187,7 @@ public boolean isWrapperFor(Class> iface) {
@Override
public T unwrap(Class iface) throws SQLException {
- throw new SQLException("Cannot unwrap to " + iface);
+ throw new SQLException(JdbcMessages.CANNOT_UNWRAP_TO + iface);
}
@Override
@@ -210,7 +211,7 @@ public void cancel() throws SQLException {
RpcUtils.verifySuccess(closeResp);
}
} catch (Exception e) {
- throw new SQLException("Error occurs when canceling statement.", e);
+ throw new SQLException(JdbcMessages.CANCEL_STATEMENT_ERROR, e);
}
isCancelled = true;
}
@@ -238,7 +239,7 @@ private void closeClientOperation() throws SQLException {
stmtId = -1;
}
} catch (Exception e) {
- throw new SQLException("Error occurs when closing statement.", e);
+ throw new SQLException(JdbcMessages.CLOSE_STATEMENT_ERROR, e);
}
}
@@ -254,7 +255,7 @@ public void close() throws SQLException {
@Override
public void closeOnCompletion() throws SQLException {
- throw new SQLException("Not support closeOnCompletion");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_CLOSE_ON_COMPLETION);
}
/**
@@ -384,7 +385,7 @@ private boolean executeSQL(String sql) throws TException, SQLException {
if (execResp.isSetColumns()) {
queryId = execResp.getQueryId();
if (execResp.queryResult == null) {
- throw new SQLException("execResp.queryResult should never be null.");
+ throw new SQLException(JdbcMessages.QUERY_RESULT_SHOULD_NOT_BE_NULL);
} else {
this.resultSet =
new IoTDBJDBCResultSet(
@@ -508,7 +509,7 @@ private ResultSet executeQuerySQL(String sql, long timeoutInMS) throws TExceptio
}
if (!execResp.isSetQueryResult()) {
- throw new SQLException("execResp.queryResult should never be null.");
+ throw new SQLException(JdbcMessages.QUERY_RESULT_SHOULD_NOT_BE_NULL);
} else {
this.resultSet =
new IoTDBJDBCResultSet(
@@ -601,7 +602,7 @@ public int getFetchDirection() throws SQLException {
public void setFetchDirection(int direction) throws SQLException {
checkConnection("setFetchDirection");
if (direction != ResultSet.FETCH_FORWARD) {
- throw new SQLException(String.format("direction %d is not supported!", direction));
+ throw new SQLException(String.format(JdbcMessages.DIRECTION_NOT_SUPPORTED, direction));
}
}
@@ -615,24 +616,25 @@ public int getFetchSize() throws SQLException {
public void setFetchSize(int fetchSize) throws SQLException {
checkConnection("setFetchSize");
if (fetchSize < 0) {
- throw new SQLException(String.format("fetchSize %d must be >= 0!", fetchSize));
+ throw new SQLException(
+ String.format(JdbcMessages.FETCH_SIZE_MUST_BE_NON_NEGATIVE, fetchSize));
}
this.fetchSize = fetchSize == 0 ? Config.DEFAULT_FETCH_SIZE : fetchSize;
}
@Override
public ResultSet getGeneratedKeys() throws SQLException {
- throw new SQLException("Not support getGeneratedKeys");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_GET_GENERATED_KEYS);
}
@Override
public int getMaxFieldSize() throws SQLException {
- throw new SQLException("Not support getMaxFieldSize");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_GET_MAX_FIELD_SIZE);
}
@Override
public void setMaxFieldSize(int arg0) throws SQLException {
- throw new SQLException("Not support getMaxFieldSize");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_GET_MAX_FIELD_SIZE);
}
@Override
@@ -644,7 +646,7 @@ public int getMaxRows() throws SQLException {
public void setMaxRows(int num) throws SQLException {
checkConnection("setMaxRows");
if (num < 0) {
- throw new SQLException(String.format("maxRows %d must be >= 0!", num));
+ throw new SQLException(String.format(JdbcMessages.MAX_ROWS_MUST_BE_NON_NEGATIVE, num));
}
this.maxRows = num;
}
@@ -656,7 +658,7 @@ public boolean getMoreResults() throws SQLException {
@Override
public boolean getMoreResults(int arg0) throws SQLException {
- throw new SQLException("Not support getMoreResults");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_GET_MORE_RESULTS);
}
@Override
@@ -678,12 +680,12 @@ public ResultSet getResultSet() throws SQLException {
@Override
public int getResultSetConcurrency() throws SQLException {
- throw new SQLException("Not support getResultSetConcurrency");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_GET_RESULT_SET_CONCURRENCY);
}
@Override
public int getResultSetHoldability() throws SQLException {
- throw new SQLException("Not support getResultSetHoldability");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_GET_RESULT_SET_HOLDABILITY);
}
@Override
@@ -704,7 +706,7 @@ public SQLWarning getWarnings() {
@Override
public boolean isCloseOnCompletion() throws SQLException {
- throw new SQLException("Not support isCloseOnCompletion");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_IS_CLOSE_ON_COMPLETION);
}
@Override
@@ -714,27 +716,27 @@ public boolean isClosed() {
@Override
public boolean isPoolable() throws SQLException {
- throw new SQLException("Not support isPoolable");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_IS_POOLABLE);
}
@Override
public void setPoolable(boolean arg0) throws SQLException {
- throw new SQLException("Not support setPoolable");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_SET_POOLABLE);
}
@Override
public void setCursorName(String arg0) throws SQLException {
- throw new SQLException("Not support setCursorName");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_SET_CURSOR_NAME);
}
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
- throw new SQLException("Not support setEscapeProcessing");
+ throw new SQLException(JdbcMessages.NOT_SUPPORT_SET_ESCAPE_PROCESSING);
}
private void checkConnection(String action) throws SQLException {
if (connection == null || connection.isClosed()) {
- throw new SQLException(String.format("Cannot %s after connection has been closed!", action));
+ throw new SQLException(String.format(JdbcMessages.CANNOT_AFTER_CONNECTION_CLOSED, action));
}
}
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBTablePreparedStatement.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBTablePreparedStatement.java
index 722f1ed1a5783..263306cb18510 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBTablePreparedStatement.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBTablePreparedStatement.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.jdbc;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.StatementExecutionException;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -69,7 +70,7 @@
public class IoTDBTablePreparedStatement extends IoTDBStatement implements PreparedStatement {
private static final Logger logger = LoggerFactory.getLogger(IoTDBTablePreparedStatement.class);
- private static final String METHOD_NOT_SUPPORTED_STRING = "Method not supported";
+ private static final String METHOD_NOT_SUPPORTED_STRING = JdbcMessages.METHOD_NOT_SUPPORTED;
private final String sql;
private final String preparedStatementName;
@@ -114,7 +115,7 @@ public class IoTDBTablePreparedStatement extends IoTDBStatement implements Prepa
parameterTypes[i] = Types.NULL;
}
} catch (TException | StatementExecutionException e) {
- throw new SQLException("Failed to prepare statement: " + e.getMessage(), e);
+ throw new SQLException(JdbcMessages.FAILED_TO_PREPARE_STATEMENT + e.getMessage(), e);
}
} else {
// For non-query statements, only keep text parameters for client-side substitution.
@@ -188,7 +189,10 @@ private TSExecuteStatementResp executeInternal() throws SQLException {
if (parameterTypes[i] == Types.NULL
&& parameterValues[i] == null
&& !parameters.containsKey(i + 1)) {
- throw new SQLException("Parameter #" + (i + 1) + " is unset");
+ throw new SQLException(
+ JdbcMessages.PARAMETER_IS_UNSET_PREFIX
+ + (i + 1)
+ + JdbcMessages.PARAMETER_IS_UNSET_SUFFIX);
}
}
@@ -207,7 +211,7 @@ private TSExecuteStatementResp executeInternal() throws SQLException {
RpcUtils.verifySuccess(resp.getStatus());
return resp;
} catch (TException | StatementExecutionException e) {
- throw new SQLException("Failed to execute prepared statement: " + e.getMessage(), e);
+ throw new SQLException(JdbcMessages.FAILED_TO_EXECUTE_PREPARED_STATEMENT + e.getMessage(), e);
}
}
@@ -245,10 +249,10 @@ public void close() throws SQLException {
try {
TSStatus status = client.deallocatePreparedStatement(req);
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- logger.warn("Failed to deallocate prepared statement: {}", status.getMessage());
+ logger.warn(JdbcMessages.FAILED_TO_DEALLOCATE_PREPARED_STATEMENT, status.getMessage());
}
} catch (TException e) {
- logger.warn("Error deallocating prepared statement", e);
+ logger.warn(JdbcMessages.ERROR_DEALLOCATING_PREPARED_STATEMENT, e);
}
}
super.close();
@@ -433,7 +437,7 @@ public void setTime(int parameterIndex, Time x) throws SQLException {
setPreparedParameterValue(parameterIndex, time, Types.BIGINT);
this.parameters.put(parameterIndex, Long.toString(time));
} catch (TException e) {
- throw new SQLException("Failed to get time precision: " + e.getMessage(), e);
+ throw new SQLException(JdbcMessages.FAILED_TO_GET_TIME_PRECISION + e.getMessage(), e);
}
}
@@ -557,7 +561,7 @@ public void setBinaryStream(int parameterIndex, InputStream x, int length) throw
byte[] bytes = ReadWriteIOUtils.readBytes(x, length);
setBytes(parameterIndex, bytes);
} catch (IOException e) {
- throw new SQLException("Failed to read binary stream: " + e.getMessage(), e);
+ throw new SQLException(JdbcMessages.FAILED_TO_READ_BINARY_STREAM + e.getMessage(), e);
}
}
@@ -689,7 +693,8 @@ private String createCompleteSql(final String sql, Map paramete
StringBuilder newSql = new StringBuilder(parts.get(0));
for (int i = 1; i < parts.size(); i++) {
if (!parameters.containsKey(i)) {
- throw new SQLException("Parameter #" + i + " is unset");
+ throw new SQLException(
+ JdbcMessages.PARAMETER_IS_UNSET_PREFIX + i + JdbcMessages.PARAMETER_IS_UNSET_SUFFIX);
}
newSql.append(parameters.get(i));
newSql.append(parts.get(i));
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBTracingInfo.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBTracingInfo.java
index e6c2a147e7098..57a2724fccc83 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBTracingInfo.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBTracingInfo.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
import org.apache.iotdb.service.rpc.thrift.TSTracingInfo;
import java.util.List;
@@ -69,7 +70,7 @@ public long getStatisticsByName(String name) throws Exception {
case "overlappedPageNum":
return tsTracingInfo.getOverlappedPageNum();
default:
- throw new Exception("Invalid statistics name!");
+ throw new Exception(JdbcMessages.INVALID_STATISTICS_NAME);
}
}
@@ -101,7 +102,7 @@ public String getStatisticsInfoByName(String name) throws Exception {
tsTracingInfo.getOverlappedPageNum(),
(double) tsTracingInfo.getOverlappedPageNum() / tsTracingInfo.getTotalPageNum() * 100);
default:
- throw new Exception("Invalid statistics name!");
+ throw new Exception(JdbcMessages.INVALID_STATISTICS_NAME);
}
}
}
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/StringUtils.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/StringUtils.java
index 959aa7304caa6..a7bdaab0e6730 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/StringUtils.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/StringUtils.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.jdbc;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -55,7 +57,7 @@ public class StringUtils {
try {
toPlainStringMethod = BigDecimal.class.getMethod("toPlainString");
} catch (NoSuchMethodException nsme) {
- LOGGER.warn("To plain String method Error:", nsme);
+ LOGGER.warn(JdbcMessages.TO_PLAIN_STRING_ERROR, nsme);
}
}
@@ -67,7 +69,7 @@ public static String consistentToString(BigDecimal decimal) {
try {
return (String) toPlainStringMethod.invoke(decimal, null);
} catch (InvocationTargetException | IllegalAccessException e) {
- LOGGER.warn("consistent to String Error:", e);
+ LOGGER.warn(JdbcMessages.CONSISTENT_TO_STRING_ERROR, e);
}
}
return decimal.toString();
diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/relational/IoTDBRelationalDatabaseMetadata.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/relational/IoTDBRelationalDatabaseMetadata.java
index 54236998a5a02..7ddfca1c01a9d 100644
--- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/relational/IoTDBRelationalDatabaseMetadata.java
+++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/relational/IoTDBRelationalDatabaseMetadata.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.jdbc.IoTDBAbstractDatabaseMetadata;
import org.apache.iotdb.jdbc.IoTDBConnection;
import org.apache.iotdb.jdbc.IoTDBJDBCResultSet;
+import org.apache.iotdb.jdbc.i18n.JdbcMessages;
import org.apache.iotdb.service.rpc.thrift.IClientRPCService;
import org.apache.tsfile.enums.TSDataType;
@@ -175,7 +176,7 @@ public class IoTDBRelationalDatabaseMetadata extends IoTDBAbstractDatabaseMetada
sqlKeywordsThatArentSQL92 = keywordBuf.toString();
} catch (Exception e) {
- LOGGER.error("Error when initializing SQL keywords: ", e);
+ LOGGER.error(JdbcMessages.RELATIONAL_INIT_SQL_KEYWORDS_ERROR, e);
throw new RuntimeException(e);
}
}
@@ -616,7 +617,7 @@ public ResultSet getPrimaryKeys(String catalog, String schemaPattern, String tab
try {
tsBlock = convertTsBlock(valuesList, tsDataTypeList);
} catch (IOException e) {
- LOGGER.error("Get primary keys error: {}", e.getMessage());
+ LOGGER.error(JdbcMessages.RELATIONAL_GET_PRIMARY_KEYS_ERROR, e.getMessage());
} finally {
close(null, stmt);
}
diff --git a/iotdb-client/service-rpc/pom.xml b/iotdb-client/service-rpc/pom.xml
index 63144e7cb06dd..98a5e0d727e8a 100644
--- a/iotdb-client/service-rpc/pom.xml
+++ b/iotdb-client/service-rpc/pom.xml
@@ -82,6 +82,10 @@
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
diff --git a/iotdb-client/service-rpc/src/main/i18n/en/org/apache/iotdb/rpc/i18n/RpcMessages.java b/iotdb-client/service-rpc/src/main/i18n/en/org/apache/iotdb/rpc/i18n/RpcMessages.java
new file mode 100644
index 0000000000000..0e545164e8d40
--- /dev/null
+++ b/iotdb-client/service-rpc/src/main/i18n/en/org/apache/iotdb/rpc/i18n/RpcMessages.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.rpc.i18n;
+
+public final class RpcMessages {
+
+ // TElasticFramedTransport - FrameError
+ public static final String FRAME_ERROR_HTTP_REQUEST =
+ "Singular frame size (%d) detected, you may be sending HTTP GET/POST%s "
+ + "requests to the Thrift-RPC port, please confirm that you are using the right port";
+ public static final String FRAME_ERROR_TLS_REQUEST =
+ "Singular frame size (%d) detected, you may be sending TLS ClientHello "
+ + "requests%s to the Non-SSL Thrift-RPC port, please confirm that you are using "
+ + "the right configuration";
+ public static final String FRAME_ERROR_NEGATIVE_FRAME_SIZE =
+ "Read a negative frame size (%d)%s!";
+ public static final String FRAME_ERROR_FRAME_SIZE_EXCEEDED =
+ "Frame size (%d) larger than protect max size (%d)%s!";
+ public static final String FRAME_ERROR_STRING_LENGTH_EXCEEDED =
+ "String length (%d) larger than protect max size (%d)%s!";
+
+ // TElasticFramedTransport - SSL
+ public static final String NON_SSL_TO_SSL_PORT =
+ "You may be sending non-SSL requests"
+ + "%s to the SSL-enabled Thrift-RPC port, please confirm that you are "
+ + "using the right configuration";
+
+ // ConfigurableTByteBuffer
+ public static final String UNEXPECTED_END_OF_INPUT = "Unexpected end of input buffer";
+ public static final String NOT_ENOUGH_ROOM_IN_OUTPUT = "Not enough room in output buffer";
+
+ // BaseRpcTransportFactory
+ public static final String COULD_NOT_LOAD_KEYSTORE =
+ "Could not load keystore or truststore file";
+
+ // IoTDBRpcDataSet / IoTDBJDBCDataSet
+ public static final String CLOSE_OPERATION_SERVER_ERROR =
+ "Error occurs for close operation in server side because ";
+ public static final String CLOSE_OPERATION_CONNECTION_ERROR =
+ "Error occurs when connecting to server for close operation ";
+ public static final String DATASET_ALREADY_CLOSED = "This DataSet is already closed";
+ public static final String COLUMN_INDEX_SHOULD_START_FROM_1 =
+ "column index should start from 1";
+ public static final String COLUMN_INDEX_OUT_OF_RANGE =
+ "column index %d out of range %d";
+ public static final String UNKNOWN_COLUMN_NAME = "Unknown column name: ";
+ public static final String NO_RECORD_REMAINS = "No record remains";
+ public static final String CANNOT_CLOSE_DATASET =
+ "Cannot close dataset, because of network connection: {} ";
+
+ // RpcUtils
+ public static final String UNKNOWN_TIME_PRECISION = "Unknown time precision: ";
+ public static final String UNKNOWN_TIME_FACTOR = "Unknown time factor: ";
+
+ // PreparedParameterSerde
+ public static final String FAILED_TO_SERIALIZE_PARAMETERS =
+ "Failed to serialize parameters";
+ public static final String INVALID_PARAMETER_COUNT = "Invalid parameter count: ";
+ public static final String UNSUPPORTED_TYPE = "Unsupported type: ";
+
+ // SynchronizedHandler
+ public static final String ERROR_IN_CALLING_METHOD = "Error in calling method ";
+
+ private RpcMessages() {}
+}
diff --git a/iotdb-client/service-rpc/src/main/i18n/zh/org/apache/iotdb/rpc/i18n/RpcMessages.java b/iotdb-client/service-rpc/src/main/i18n/zh/org/apache/iotdb/rpc/i18n/RpcMessages.java
new file mode 100644
index 0000000000000..03faa1dbb8374
--- /dev/null
+++ b/iotdb-client/service-rpc/src/main/i18n/zh/org/apache/iotdb/rpc/i18n/RpcMessages.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.rpc.i18n;
+
+public final class RpcMessages {
+
+ // TElasticFramedTransport - FrameError
+ public static final String FRAME_ERROR_HTTP_REQUEST =
+ "检测到异常帧大小 (%d),可能正在发送 HTTP GET/POST%s 请求到 Thrift-RPC 端口,请确认使用了正确的端口";
+ public static final String FRAME_ERROR_TLS_REQUEST =
+ "检测到异常帧大小 (%d),可能正在发送 TLS ClientHello 请求%s"
+ + "到未启用 SSL 的 Thrift-RPC 端口,请确认使用了正确的配置";
+ public static final String FRAME_ERROR_NEGATIVE_FRAME_SIZE = "读取到负数帧大小 (%d)%s!";
+ public static final String FRAME_ERROR_FRAME_SIZE_EXCEEDED =
+ "帧大小 (%d) 超过保护最大值 (%d)%s!";
+ public static final String FRAME_ERROR_STRING_LENGTH_EXCEEDED =
+ "字符串长度 (%d) 超过保护最大值 (%d)%s!";
+
+ // TElasticFramedTransport - SSL
+ public static final String NON_SSL_TO_SSL_PORT =
+ "可能正在发送非 SSL 请求%s到启用了 SSL 的 Thrift-RPC 端口,请确认使用了正确的配置";
+
+ // ConfigurableTByteBuffer
+ public static final String UNEXPECTED_END_OF_INPUT = "输入缓冲区意外结束";
+ public static final String NOT_ENOUGH_ROOM_IN_OUTPUT = "输出缓冲区空间不足";
+
+ // BaseRpcTransportFactory
+ public static final String COULD_NOT_LOAD_KEYSTORE = "无法加载密钥库或信任库文件";
+
+ // IoTDBRpcDataSet / IoTDBJDBCDataSet
+ public static final String CLOSE_OPERATION_SERVER_ERROR = "服务端关闭操作失败,原因:";
+ public static final String CLOSE_OPERATION_CONNECTION_ERROR = "连接服务端执行关闭操作时出错 ";
+ public static final String DATASET_ALREADY_CLOSED = "该数据集已关闭";
+ public static final String COLUMN_INDEX_SHOULD_START_FROM_1 = "列索引应从 1 开始";
+ public static final String COLUMN_INDEX_OUT_OF_RANGE = "列索引 %d 超出范围 %d";
+ public static final String UNKNOWN_COLUMN_NAME = "未知列名:";
+ public static final String NO_RECORD_REMAINS = "没有剩余记录";
+ public static final String CANNOT_CLOSE_DATASET = "无法关闭数据集,网络连接异常:{} ";
+
+ // RpcUtils
+ public static final String UNKNOWN_TIME_PRECISION = "未知时间精度:";
+ public static final String UNKNOWN_TIME_FACTOR = "未知时间因子:";
+
+ // PreparedParameterSerde
+ public static final String FAILED_TO_SERIALIZE_PARAMETERS = "序列化参数失败";
+ public static final String INVALID_PARAMETER_COUNT = "无效的参数数量:";
+ public static final String UNSUPPORTED_TYPE = "不支持的类型:";
+
+ // SynchronizedHandler
+ public static final String ERROR_IN_CALLING_METHOD = "调用方法时出错:";
+
+ private RpcMessages() {}
+}
diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/BaseRpcTransportFactory.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/BaseRpcTransportFactory.java
index eccebe2eafe61..22e104b6a58ac 100644
--- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/BaseRpcTransportFactory.java
+++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/BaseRpcTransportFactory.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.rpc;
+import org.apache.iotdb.rpc.i18n.RpcMessages;
+
import org.apache.thrift.transport.TMemoryInputTransport;
import org.apache.thrift.transport.TSSLTransportFactory;
import org.apache.thrift.transport.TSocket;
@@ -103,7 +105,7 @@ public TTransport getTransport(
params.setTrustStore(trustStore, trustStorePwd);
params.setKeyStore(keyStore, keyStorePwd);
} else {
- throw new TTransportException(new IOException("Could not load keystore or truststore file"));
+ throw new TTransportException(new IOException(RpcMessages.COULD_NOT_LOAD_KEYSTORE));
}
TTransport transport = TSSLTransportFactory.getClientSocket(ip, port, timeout, params);
return inner.getTransport(transport);
diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/ConfigurableTByteBuffer.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/ConfigurableTByteBuffer.java
index 5cbd33f3f1e5d..ad801a9e9befa 100644
--- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/ConfigurableTByteBuffer.java
+++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/ConfigurableTByteBuffer.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.rpc;
+import org.apache.iotdb.rpc.i18n.RpcMessages;
+
import org.apache.thrift.TConfiguration;
import org.apache.thrift.transport.TEndpointTransport;
import org.apache.thrift.transport.TTransportException;
@@ -67,7 +69,7 @@ public int read(byte[] buf, int off, int len) throws TTransportException {
try {
this.byteBuffer.get(buf, off, n);
} catch (BufferUnderflowException e) {
- throw new TTransportException("Unexpected end of input buffer", e);
+ throw new TTransportException(RpcMessages.UNEXPECTED_END_OF_INPUT, e);
}
}
@@ -78,7 +80,7 @@ public void write(byte[] buf, int off, int len) throws TTransportException {
try {
this.byteBuffer.put(buf, off, len);
} catch (BufferOverflowException e) {
- throw new TTransportException("Not enough room in output buffer", e);
+ throw new TTransportException(RpcMessages.NOT_ENOUGH_ROOM_IN_OUTPUT, e);
}
}
diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBJDBCDataSet.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBJDBCDataSet.java
index 43064ecdaa9fb..02b5cc12fb569 100644
--- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBJDBCDataSet.java
+++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBJDBCDataSet.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.rpc;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.rpc.i18n.RpcMessages;
import org.apache.iotdb.service.rpc.thrift.IClientRPCService;
import org.apache.iotdb.service.rpc.thrift.TSCloseOperationReq;
import org.apache.iotdb.service.rpc.thrift.TSFetchResultsReq;
@@ -332,10 +333,9 @@ public void close() throws StatementExecutionException, TException {
TSStatus closeResp = client.closeOperation(closeReq);
RpcUtils.verifySuccess(closeResp);
} catch (StatementExecutionException e) {
- throw new StatementExecutionException(
- "Error occurs for close operation in server side because ", e);
+ throw new StatementExecutionException(RpcMessages.CLOSE_OPERATION_SERVER_ERROR, e);
} catch (TException e) {
- throw new TException("Error occurs when connecting to server for close operation ", e);
+ throw new TException(RpcMessages.CLOSE_OPERATION_CONNECTION_ERROR, e);
}
}
client = null;
@@ -352,8 +352,7 @@ public boolean next() throws StatementExecutionException, IoTDBConnectionExcepti
close();
return false;
} catch (TException e) {
- throw new IoTDBConnectionException(
- "Cannot close dataset, because of network connection: {} ", e);
+ throw new IoTDBConnectionException(RpcMessages.CANNOT_CLOSE_DATASET, e);
}
}
if (fetchResults() && hasCachedResults()) {
@@ -364,8 +363,7 @@ public boolean next() throws StatementExecutionException, IoTDBConnectionExcepti
close();
return false;
} catch (TException e) {
- throw new IoTDBConnectionException(
- "Cannot close dataset, because of network connection: {} ", e);
+ throw new IoTDBConnectionException(RpcMessages.CANNOT_CLOSE_DATASET, e);
}
}
}
@@ -653,18 +651,18 @@ public Object getObject(int index, TSDataType tsDataType, byte[][] values) {
public String findColumnNameByIndex(int columnIndex) throws StatementExecutionException {
if (columnIndex <= 0) {
- throw new StatementExecutionException("column index should start from 1");
+ throw new StatementExecutionException(RpcMessages.COLUMN_INDEX_SHOULD_START_FROM_1);
}
if (columnIndex > columnNameList.size()) {
throw new StatementExecutionException(
- String.format("column index %d out of range %d", columnIndex, columnNameList.size()));
+ String.format(RpcMessages.COLUMN_INDEX_OUT_OF_RANGE, columnIndex, columnNameList.size()));
}
return columnNameList.get(columnIndex - 1);
}
public void checkRecord() throws StatementExecutionException {
if (Objects.isNull(tsQueryDataSet)) {
- throw new StatementExecutionException("No record remains");
+ throw new StatementExecutionException(RpcMessages.NO_RECORD_REMAINS);
}
}
diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java
index 940b3460d927b..42bf974a3eb80 100644
--- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java
+++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.rpc;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.rpc.i18n.RpcMessages;
import org.apache.iotdb.service.rpc.thrift.IClientRPCService;
import org.apache.iotdb.service.rpc.thrift.TSCloseOperationReq;
import org.apache.iotdb.service.rpc.thrift.TSFetchResultsReq;
@@ -217,10 +218,9 @@ public void close() throws StatementExecutionException, TException {
TSStatus closeResp = client.closeOperation(closeReq);
RpcUtils.verifySuccess(closeResp);
} catch (StatementExecutionException e) {
- throw new StatementExecutionException(
- "Error occurs for close operation in server side because ", e);
+ throw new StatementExecutionException(RpcMessages.CLOSE_OPERATION_SERVER_ERROR, e);
} catch (TException e) {
- throw new TException("Error occurs when connecting to server for close operation ", e);
+ throw new TException(RpcMessages.CLOSE_OPERATION_CONNECTION_ERROR, e);
}
}
client = null;
@@ -248,15 +248,14 @@ public boolean next() throws StatementExecutionException, IoTDBConnectionExcepti
close();
return false;
} catch (TException e) {
- throw new IoTDBConnectionException(
- "Cannot close dataset, because of network connection: {} ", e);
+ throw new IoTDBConnectionException(RpcMessages.CANNOT_CLOSE_DATASET, e);
}
}
}
public boolean fetchResults() throws StatementExecutionException, IoTDBConnectionException {
if (isClosed) {
- throw new IoTDBConnectionException("This DataSet is already closed");
+ throw new IoTDBConnectionException(RpcMessages.DATASET_ALREADY_CLOSED);
}
TSFetchResultsReq req = new TSFetchResultsReq(sessionId, sql, fetchSize, queryId, true);
req.setStatementId(statementId);
@@ -618,11 +617,11 @@ public int findColumn(String columnName) {
public String findColumnNameByIndex(int columnIndex) throws StatementExecutionException {
if (columnIndex <= 0) {
- throw new StatementExecutionException("column index should start from 1");
+ throw new StatementExecutionException(RpcMessages.COLUMN_INDEX_SHOULD_START_FROM_1);
}
if (columnIndex > columnNameList.size()) {
throw new StatementExecutionException(
- String.format("column index %d out of range %d", columnIndex, columnNameList.size()));
+ String.format(RpcMessages.COLUMN_INDEX_OUT_OF_RANGE, columnIndex, columnNameList.size()));
}
return columnNameList.get(columnIndex - 1);
}
@@ -631,7 +630,7 @@ public String findColumnNameByIndex(int columnIndex) throws StatementExecutionEx
private int getTsBlockColumnIndexForColumnName(String columnName) {
Integer index = columnName2TsBlockColumnIndexMap.get(columnName);
if (index == null) {
- throw new IllegalArgumentException("Unknown column name: " + columnName);
+ throw new IllegalArgumentException(RpcMessages.UNKNOWN_COLUMN_NAME + columnName);
}
return index;
}
@@ -645,7 +644,7 @@ public void checkRecord() throws StatementExecutionException {
|| tsBlockIndex >= tsBlockSize
|| queryResult == null
|| curTsBlock == null) {
- throw new StatementExecutionException("No record remains");
+ throw new StatementExecutionException(RpcMessages.NO_RECORD_REMAINS);
}
}
diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/RpcUtils.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/RpcUtils.java
index a4cc34a81b9d6..1eb309674056e 100644
--- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/RpcUtils.java
+++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/RpcUtils.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TEndPoint;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.rpc.i18n.RpcMessages;
import org.apache.iotdb.service.rpc.thrift.IClientRPCService;
import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementResp;
import org.apache.iotdb.service.rpc.thrift.TSFetchResultsResp;
@@ -459,7 +460,7 @@ public static int getTimeFactor(TSOpenSessionResp openResp) {
case NANOSECOND:
return 1_000_000_000;
default:
- throw new IllegalArgumentException("Unknown time precision: " + precision);
+ throw new IllegalArgumentException(RpcMessages.UNKNOWN_TIME_PRECISION + precision);
}
}
}
@@ -475,7 +476,7 @@ public static String getTimePrecision(int timeFactor) {
case 1_000_000_000:
return NANOSECOND;
default:
- throw new IllegalArgumentException("Unknown time factor: " + timeFactor);
+ throw new IllegalArgumentException(RpcMessages.UNKNOWN_TIME_FACTOR + timeFactor);
}
}
}
diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/SynchronizedHandler.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/SynchronizedHandler.java
index d952dc9eec9f6..4e764664ead90 100644
--- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/SynchronizedHandler.java
+++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/SynchronizedHandler.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.rpc;
+import org.apache.iotdb.rpc.i18n.RpcMessages;
import org.apache.iotdb.service.rpc.thrift.IClientRPCService;
import org.apache.thrift.TException;
@@ -47,10 +48,11 @@ public Object invoke(Object proxy, Method method, Object[] args) throws Throwabl
throw e.getTargetException();
} else {
// should not happen
- throw new TException("Error in calling method " + method.getName(), e.getTargetException());
+ throw new TException(
+ RpcMessages.ERROR_IN_CALLING_METHOD + method.getName(), e.getTargetException());
}
} catch (Exception e) {
- throw new TException("Error in calling method " + method.getName(), e);
+ throw new TException(RpcMessages.ERROR_IN_CALLING_METHOD + method.getName(), e);
}
}
}
diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TElasticFramedTransport.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TElasticFramedTransport.java
index 69ac5fbb56f40..700001601aa6a 100644
--- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TElasticFramedTransport.java
+++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TElasticFramedTransport.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.rpc;
+import org.apache.iotdb.rpc.i18n.RpcMessages;
+
import org.apache.thrift.TConfiguration;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
@@ -161,9 +163,7 @@ public int read(byte[] buf, int off, int len) throws TTransportException {
throw new TTransportException(
TTransportException.CORRUPTED_DATA,
String.format(
- "You may be sending non-SSL requests"
- + "%s to the SSL-enabled Thrift-RPC port, please confirm that you are "
- + "using the right configuration",
+ RpcMessages.NON_SSL_TO_SSL_PORT,
remoteAddress == null ? "" : " from " + remoteAddress));
}
throw e;
@@ -216,16 +216,11 @@ protected void validateFrame(int size) throws TTransportException {
}
private enum FrameError {
- HTTP_REQUEST(
- "Singular frame size (%d) detected, you may be sending HTTP GET/POST%s "
- + "requests to the Thrift-RPC port, please confirm that you are using the right port"),
- TLS_REQUEST(
- "Singular frame size (%d) detected, you may be sending TLS ClientHello "
- + "requests%s to the Non-SSL Thrift-RPC port, please confirm that you are using "
- + "the right configuration"),
- NEGATIVE_FRAME_SIZE("Read a negative frame size (%d)%s!"),
- FRAME_SIZE_EXCEEDED("Frame size (%d) larger than protect max size (%d)%s!"),
- STRING_LENGTH_EXCEEDED("String length (%d) larger than protect max size (%d)%s!");
+ HTTP_REQUEST(RpcMessages.FRAME_ERROR_HTTP_REQUEST),
+ TLS_REQUEST(RpcMessages.FRAME_ERROR_TLS_REQUEST),
+ NEGATIVE_FRAME_SIZE(RpcMessages.FRAME_ERROR_NEGATIVE_FRAME_SIZE),
+ FRAME_SIZE_EXCEEDED(RpcMessages.FRAME_ERROR_FRAME_SIZE_EXCEEDED),
+ STRING_LENGTH_EXCEEDED(RpcMessages.FRAME_ERROR_STRING_LENGTH_EXCEEDED);
private final String messageFormat;
diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/stmt/PreparedParameterSerde.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/stmt/PreparedParameterSerde.java
index 51a23a6af4c6d..86698e4557336 100644
--- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/stmt/PreparedParameterSerde.java
+++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/stmt/PreparedParameterSerde.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.rpc.stmt;
+import org.apache.iotdb.rpc.i18n.RpcMessages;
+
import org.apache.tsfile.enums.TSDataType;
import org.apache.tsfile.utils.Binary;
import org.apache.tsfile.utils.PublicBAOS;
@@ -61,7 +63,7 @@ public static ByteBuffer serialize(Object[] values, int[] jdbcTypes, int count)
return ByteBuffer.wrap(outputStream.getBuf(), 0, outputStream.size());
} catch (IOException e) {
// Should not happen with PublicBAOS
- throw new IllegalStateException("Failed to serialize parameters", e);
+ throw new IllegalStateException(RpcMessages.FAILED_TO_SERIALIZE_PARAMETERS, e);
}
}
@@ -126,7 +128,7 @@ public static List deserialize(ByteBuffer buffer) {
buffer.rewind();
int count = ReadWriteIOUtils.readInt(buffer);
if (count < 0 || count > buffer.remaining()) {
- throw new IllegalArgumentException("Invalid parameter count: " + count);
+ throw new IllegalArgumentException(RpcMessages.INVALID_PARAMETER_COUNT + count);
}
List result = new ArrayList<>(count);
@@ -160,7 +162,7 @@ private static Object deserializeValue(ByteBuffer buffer, TSDataType type) {
case BLOB:
return ReadWriteIOUtils.readBinary(buffer).getValues();
default:
- throw new IllegalArgumentException("Unsupported type: " + type);
+ throw new IllegalArgumentException(RpcMessages.UNSUPPORTED_TYPE + type);
}
}
diff --git a/iotdb-client/session/pom.xml b/iotdb-client/session/pom.xml
index d9306c1f61f0d..f375374d4ee1a 100644
--- a/iotdb-client/session/pom.xml
+++ b/iotdb-client/session/pom.xml
@@ -135,6 +135,10 @@
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
diff --git a/iotdb-client/session/src/main/i18n/en/org/apache/iotdb/session/i18n/SessionMessages.java b/iotdb-client/session/src/main/i18n/en/org/apache/iotdb/session/i18n/SessionMessages.java
new file mode 100644
index 0000000000000..a9d055d563c88
--- /dev/null
+++ b/iotdb-client/session/src/main/i18n/en/org/apache/iotdb/session/i18n/SessionMessages.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.session.i18n;
+
+public final class SessionMessages {
+
+ // Session
+ public static final String NODE_URLS_EMPTY = "nodeUrls shouldn't be empty.";
+ public static final String REDIRECT_TWICE = "{} redirect twice";
+ public static final String REDIRECT_TWICE_EXCEPTION = "%s redirect twice, please try again.";
+ public static final String FAILED_TO_EXECUTE_FOR_ENDPOINT = "failed to execute '{}' for {}";
+ public static final String ALL_VALUES_NULL =
+ "All values of the {} are null,null values are {}";
+ public static final String SOME_VALUES_NULL =
+ "Some values of {} are null,null values are {}";
+ public static final String MEET_ERROR_WHEN_ASYNC_INSERT = "Meet error when async insert!";
+ public static final String MEASUREMENT_NON_NULL = "measurement should be non null value";
+ public static final String NO_TABLET_INSERTING = "No tablet is inserting!";
+ public static final String SESSION_NOT_OPEN =
+ "Session is not open, please invoke Session.open() first";
+ public static final String ALL_INSERT_DATA_IS_NULL = "All inserted data is null.";
+
+ // SessionConnection
+ public static final String CLUSTER_NO_NODES = "Cluster has no nodes to connect";
+ public static final String CLOSE_SESSION_ERROR =
+ "Error occurs when closing session at server. Maybe server is down.";
+ public static final String REDIRECT_QUERY_ERROR =
+ "need to redirect query, should not see this.";
+ public static final String RETRY_RECONNECTING =
+ "Retry attempt #{}, Reconnecting to other datanode";
+ public static final String NODE_DOWN_TRY_NEXT =
+ "The current node may have been down {}, try next node";
+ public static final String LOGIN_FAILED = "login in failed, because {}";
+ public static final String CLOSE_CONNECTION_FAILED = "close connection failed, {}";
+ public static final String THREAD_INTERRUPTED_DURING_RETRY =
+ "Thread {} was interrupted during retry {} with wait time {} ms. Exiting retry loop.";
+
+ // ThriftConnection
+ public static final String CLOSING_SESSION_FAILED =
+ "Closing Session-{} with {} failed.";
+
+ // NodesSupplier
+ public static final String FAILED_TO_CREATE_CONNECTION =
+ "Failed to create connection with {}.";
+ public static final String FAILED_TO_FETCH_DATA_NODE_LIST =
+ "Failed to fetch data node list from {}.";
+
+ // SessionUtils
+ public static final String NODE_URLS_IS_NULL = "nodeUrls is null";
+
+ // InternalNode (template)
+ public static final String DUPLICATED_CHILD_IN_TEMPLATE =
+ "Duplicated child of node in template.";
+
+ // SessionPool
+ public static final String SESSION_POOL_IS_CLOSED = "Session pool is closed";
+ public static final String CLOSE_THE_SESSION_FAILED = "close the session failed.";
+ public static final String TIMEOUT_TO_GET_CONNECTION =
+ "timeout to get a connection from %s";
+ public static final String INTERRUPTED = "Interrupted!";
+ public static final String CLOSING_SESSION_POOL = "closing the session pool, cleaning queues...";
+
+ // SessionPool - operation failed (warn)
+ public static final String INSERT_TABLET_FAILED = "insertTablet failed";
+ public static final String INSERT_ALIGNED_TABLET_FAILED = "insertAlignedTablet failed";
+ public static final String INSERT_TABLETS_FAILED = "insertTablets failed";
+ public static final String INSERT_ALIGNED_TABLETS_FAILED = "insertAlignedTablets failed";
+ public static final String INSERT_RECORDS_FAILED = "insertRecords failed";
+ public static final String INSERT_ALIGNED_RECORDS_FAILED = "insertAlignedRecords failed";
+ public static final String INSERT_RECORD_FAILED = "insertRecord failed";
+ public static final String INSERT_ALIGNED_RECORD_FAILED = "insertAlignedRecord failed";
+ public static final String INSERT_RECORDS_OF_ONE_DEVICE_FAILED =
+ "insertRecordsOfOneDevice failed";
+ public static final String INSERT_STRING_RECORDS_OF_ONE_DEVICE_FAILED =
+ "insertStringRecordsOfOneDevice failed";
+ public static final String INSERT_ALIGNED_RECORDS_OF_ONE_DEVICE_FAILED =
+ "insertAlignedRecordsOfOneDevice failed";
+ public static final String INSERT_ALIGNED_STRING_RECORDS_OF_ONE_DEVICE_FAILED =
+ "insertAlignedStringRecordsOfOneDevice failed";
+ public static final String DELETE_DATA_FAILED = "deleteData failed";
+ public static final String DELETE_TIMESERIES_FAILED = "deleteTimeseries failed";
+ public static final String SET_STORAGE_GROUP_FAILED = "setStorageGroup failed";
+ public static final String DELETE_STORAGE_GROUP_FAILED = "deleteStorageGroup failed";
+ public static final String DELETE_STORAGE_GROUPS_FAILED = "deleteStorageGroups failed";
+ public static final String CREATE_DATABASE_FAILED = "createDatabase failed";
+ public static final String DELETE_DATABASE_FAILED = "deleteDatabase failed";
+ public static final String DELETE_DATABASES_FAILED = "deleteDatabases failed";
+ public static final String CREATE_TIMESERIES_FAILED = "createTimeseries failed";
+ public static final String CREATE_ALIGNED_TIMESERIES_FAILED = "createAlignedTimeseries failed";
+ public static final String CREATE_MULTI_TIMESERIES_FAILED = "createMultiTimeseries failed";
+ public static final String CHECK_TIMESERIES_EXISTS_FAILED = "checkTimeseriesExists failed";
+ public static final String EXECUTE_QUERY_STATEMENT_FAILED = "executeQueryStatement failed";
+ public static final String EXECUTE_NON_QUERY_STATEMENT_FAILED =
+ "executeNonQueryStatement failed";
+ public static final String EXECUTE_RAW_DATA_QUERY_FAILED = "executeRawDataQuery failed";
+ public static final String EXECUTE_LAST_DATA_QUERY_FAILED = "executeLastDataQuery failed";
+ public static final String EXECUTE_AGGREGATION_QUERY_FAILED = "executeAggregationQuery failed";
+ public static final String GET_TIMESTAMP_PRECISION_FAILED = "getTimestampPrecision failed";
+ public static final String TEST_INSERT_TABLET_FAILED = "testInsertTablet failed";
+ public static final String TEST_INSERT_TABLETS_FAILED = "testInsertTablets failed";
+ public static final String TEST_INSERT_RECORD_FAILED = "testInsertRecord failed";
+ public static final String TEST_INSERT_RECORDS_FAILED = "testInsertRecords failed";
+ public static final String CREATE_SCHEMA_TEMPLATE_FAILED = "createSchemaTemplate failed";
+ public static final String ADD_ALIGNED_MEASUREMENTS_IN_TEMPLATE_FAILED =
+ "addAlignedMeasurementsInTemplate failed";
+ public static final String ADD_ALIGNED_MEASUREMENT_IN_TEMPLATE_FAILED =
+ "addAlignedMeasurementInTemplate failed";
+ public static final String ADD_UNALIGNED_MEASUREMENTS_IN_TEMPLATE_FAILED =
+ "addUnalignedMeasurementsInTemplate failed";
+ public static final String ADD_UNALIGNED_MEASUREMENT_IN_TEMPLATE_FAILED =
+ "addUnalignedMeasurementInTemplate failed";
+ public static final String DELETE_NODE_IN_TEMPLATE_FAILED = "deleteNodeInTemplate failed";
+ public static final String COUNT_MEASUREMENTS_IN_TEMPLATE_FAILED =
+ "countMeasurementsInTemplate failed";
+ public static final String IS_MEASUREMENT_IN_TEMPLATE_FAILED =
+ "isMeasurementInTemplate failed";
+ public static final String IS_PATH_EXIST_IN_TEMPLATE_FAILED =
+ "isPathExistInTemplata failed";
+ public static final String SHOW_MEASUREMENTS_IN_TEMPLATE_FAILED =
+ "showMeasurementsInTemplate failed";
+ public static final String SHOW_ALL_TEMPLATES_FAILED = "showAllTemplates failed";
+ public static final String SHOW_PATHS_TEMPLATE_SET_ON_FAILED =
+ "showPathsTemplateSetOn failed";
+ public static final String SHOW_PATHS_TEMPLATE_USING_ON_FAILED =
+ "showPathsTemplateUsingOn failed";
+ public static final String SET_SCHEMA_TEMPLATE_ON_FAILED =
+ "setSchemaTemplate [{}] on [{}] failed";
+ public static final String UNSET_SCHEMA_TEMPLATE_ON_FAILED =
+ "unsetSchemaTemplate [{}] on [{}] failed";
+ public static final String DROP_SCHEMA_TEMPLATE_FAILED =
+ "dropSchemaTemplate [{}] failed";
+ public static final String CREATE_TIMESERIES_OF_SCHEMA_TEMPLATE_FAILED =
+ "createTimeseriesOfSchemaTemplate {} failed";
+ public static final String SET_TIMEZONE_FAILED = "setTimeZone to [{}] failed";
+ public static final String FETCH_ALL_CONNECTIONS_FAILED = "fetchAllConnections failed";
+
+ // SessionPool - unexpected error (error)
+ public static final String UNEXPECTED_ERROR_IN_INSERT_TABLET =
+ "unexpected error in insertTablet";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_TABLET =
+ "unexpected error in insertAlignedTablet";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_TABLETS =
+ "unexpected error in insertTablets";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_TABLETS =
+ "unexpected error in insertAlignedTablets";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_RECORDS =
+ "unexpected error in insertRecords";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORDS =
+ "unexpected error in insertAlignedRecords";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_RECORD =
+ "unexpected error in insertRecord";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORD =
+ "unexpected error in insertAlignedRecord";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_RECORDS_OF_ONE_DEVICE =
+ "unexpected error in insertRecordsOfOneDevice";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_STRING_RECORDS_OF_ONE_DEVICE =
+ "unexpected error in insertStringRecordsOfOneDevice";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORDS_OF_ONE_DEVICE =
+ "unexpected error in insertAlignedRecordsOfOneDevice";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_STRING_RECORDS_OF_ONE_DEVICE =
+ "unexpected error in insertAlignedStringRecordsOfOneDevice";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_DATA =
+ "unexpected error in deleteData";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_TIMESERIES =
+ "unexpected error in deleteTimeseries";
+ public static final String UNEXPECTED_ERROR_IN_SET_STORAGE_GROUP =
+ "unexpected error in setStorageGroup";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_STORAGE_GROUP =
+ "unexpected error in deleteStorageGroup";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_STORAGE_GROUPS =
+ "unexpected error in deleteStorageGroups";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_DATABASE =
+ "unexpected error in createDatabase";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_DATABASE =
+ "unexpected error in deleteDatabase";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_DATABASES =
+ "unexpected error in deleteDatabases";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_TIMESERIES =
+ "unexpected error in createTimeseries";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_ALIGNED_TIMESERIES =
+ "unexpected error in createAlignedTimeseries";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_MULTI_TIMESERIES =
+ "unexpected error in createMultiTimeseries";
+ public static final String UNEXPECTED_ERROR_IN_CHECK_TIMESERIES_EXISTS =
+ "unexpected error in checkTimeseriesExists";
+ public static final String UNEXPECTED_ERROR_IN_EXECUTE_QUERY_STATEMENT =
+ "unexpected error in executeQueryStatement";
+ public static final String UNEXPECTED_ERROR_IN_EXECUTE_NON_QUERY_STATEMENT =
+ "unexpected error in executeNonQueryStatement";
+ public static final String UNEXPECTED_ERROR_IN_EXECUTE_RAW_DATA_QUERY =
+ "unexpected error in executeRawDataQuery";
+ public static final String UNEXPECTED_ERROR_IN_EXECUTE_LAST_DATA_QUERY =
+ "unexpected error in executeLastDataQuery";
+ public static final String UNEXPECTED_ERROR_IN_EXECUTE_AGGREGATION_QUERY =
+ "unexpected error in executeAggregationQuery";
+ public static final String UNEXPECTED_ERROR_IN_GET_TIMESTAMP_PRECISION =
+ "unexpected error in getTimestampPrecision";
+ public static final String UNEXPECTED_ERROR_IN_TEST_INSERT_TABLET =
+ "unexpected error in testInsertTablet";
+ public static final String UNEXPECTED_ERROR_IN_TEST_INSERT_TABLETS =
+ "unexpected error in testInsertTablets";
+ public static final String UNEXPECTED_ERROR_IN_TEST_INSERT_RECORD =
+ "unexpected error in testInsertRecord";
+ public static final String UNEXPECTED_ERROR_IN_TEST_INSERT_RECORDS =
+ "unexpected error in testInsertRecords";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_SCHEMA_TEMPLATE =
+ "unexpected error in createSchemaTemplate";
+ public static final String UNEXPECTED_ERROR_IN_ADD_ALIGNED_MEASUREMENTS_IN_TEMPLATE =
+ "unexpected error in addAlignedMeasurementsInTemplate";
+ public static final String UNEXPECTED_ERROR_IN_ADD_ALIGNED_MEASUREMENT_IN_TEMPLATE =
+ "unexpected error in addAlignedMeasurementInTemplate";
+ public static final String UNEXPECTED_ERROR_IN_ADD_UNALIGNED_MEASUREMENTS_IN_TEMPLATE =
+ "unexpected error in addUnalignedMeasurementsInTemplate";
+ public static final String UNEXPECTED_ERROR_IN_ADD_UNALIGNED_MEASUREMENT_IN_TEMPLATE =
+ "unexpected error in addUnalignedMeasurementInTemplate";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_NODE_IN_TEMPLATE =
+ "unexpected error in deleteNodeInTemplate";
+ public static final String UNEXPECTED_ERROR_IN_COUNT_MEASUREMENTS_IN_TEMPLATE =
+ "unexpected error in countMeasurementsInTemplate";
+ public static final String UNEXPECTED_ERROR_IN_IS_MEASUREMENT_IN_TEMPLATE =
+ "unexpected error in isMeasurementInTemplate";
+ public static final String UNEXPECTED_ERROR_IN_IS_PATH_EXIST_IN_TEMPLATE =
+ "unexpected error in isPathExistInTemplate";
+ public static final String UNEXPECTED_ERROR_IN_SHOW_MEASUREMENTS_IN_TEMPLATE =
+ "unexpected error in showMeasurementsInTemplate";
+ public static final String UNEXPECTED_ERROR_IN_SHOW_ALL_TEMPLATES =
+ "unexpected error in showAllTemplates";
+ public static final String UNEXPECTED_ERROR_IN_SHOW_PATHS_TEMPLATE_SET_ON =
+ "unexpected error in showPathsTemplateSetOn";
+ public static final String UNEXPECTED_ERROR_IN_SHOW_PATHS_TEMPLATE_USING_ON =
+ "unexpected error in showPathsTemplateUsingOn";
+ public static final String UNEXPECTED_ERROR_IN_SET_SCHEMA_TEMPLATE =
+ "unexpected error in setSchemaTemplate";
+ public static final String UNEXPECTED_ERROR_IN_UNSET_SCHEMA_TEMPLATE =
+ "unexpected error in unsetSchemaTemplate";
+ public static final String UNEXPECTED_ERROR_IN_DROP_SCHEMA_TEMPLATE =
+ "unexpected error in dropSchemaTemplate";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_TIMESERIES_USING_SCHEMA_TEMPLATE =
+ "unexpected error in createTimeseriesUsingSchemaTemplate";
+ public static final String REDIRECT_TWICE_SUFFIX = " redirect twice, please try again.";
+
+ private SessionMessages() {}
+}
diff --git a/iotdb-client/session/src/main/i18n/zh/org/apache/iotdb/session/i18n/SessionMessages.java b/iotdb-client/session/src/main/i18n/zh/org/apache/iotdb/session/i18n/SessionMessages.java
new file mode 100644
index 0000000000000..7c132e7a33dc6
--- /dev/null
+++ b/iotdb-client/session/src/main/i18n/zh/org/apache/iotdb/session/i18n/SessionMessages.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.session.i18n;
+
+public final class SessionMessages {
+
+ // Session
+ public static final String NODE_URLS_EMPTY = "nodeUrls 不能为空。";
+ public static final String REDIRECT_TWICE = "{} 重定向了两次";
+ public static final String REDIRECT_TWICE_EXCEPTION = "%s 重定向了两次,请重试。";
+ public static final String FAILED_TO_EXECUTE_FOR_ENDPOINT = "在 {} 上执行 '{}' 失败";
+ public static final String ALL_VALUES_NULL =
+ "{} 的所有值均为 null,null 值为 {}";
+ public static final String SOME_VALUES_NULL =
+ "{} 的部分值为 null,null 值为 {}";
+ public static final String MEET_ERROR_WHEN_ASYNC_INSERT = "异步写入时遇到错误!";
+ public static final String MEASUREMENT_NON_NULL = "测量值名称不能为 null";
+ public static final String NO_TABLET_INSERTING = "没有可写入的 Tablet!";
+ public static final String SESSION_NOT_OPEN =
+ "Session 尚未打开,请先调用 Session.open()";
+ public static final String ALL_INSERT_DATA_IS_NULL = "所有写入数据均为 null。";
+
+ // SessionConnection
+ public static final String CLUSTER_NO_NODES = "集群中没有可连接的节点";
+ public static final String CLOSE_SESSION_ERROR =
+ "关闭服务端 Session 时发生错误,服务器可能已宕机。";
+ public static final String REDIRECT_QUERY_ERROR =
+ "需要重定向查询,不应看到此错误。";
+ public static final String RETRY_RECONNECTING =
+ "第 {} 次重试,正在重连其他 DataNode";
+ public static final String NODE_DOWN_TRY_NEXT =
+ "当前节点 {} 可能已宕机,尝试下一个节点";
+ public static final String LOGIN_FAILED = "登录失败,原因:{}";
+ public static final String CLOSE_CONNECTION_FAILED = "关闭连接失败,{}";
+ public static final String THREAD_INTERRUPTED_DURING_RETRY =
+ "线程 {} 在第 {} 次重试等待 {} 毫秒时被中断,退出重试循环。";
+
+ // ThriftConnection
+ public static final String CLOSING_SESSION_FAILED =
+ "关闭 Session-{} 与 {} 的连接失败。";
+
+ // NodesSupplier
+ public static final String FAILED_TO_CREATE_CONNECTION =
+ "创建与 {} 的连接失败。";
+ public static final String FAILED_TO_FETCH_DATA_NODE_LIST =
+ "从 {} 获取 DataNode 列表失败。";
+
+ // SessionUtils
+ public static final String NODE_URLS_IS_NULL = "nodeUrls 为 null";
+
+ // InternalNode (template)
+ public static final String DUPLICATED_CHILD_IN_TEMPLATE =
+ "模板中存在重复的子节点。";
+
+ // SessionPool
+ public static final String SESSION_POOL_IS_CLOSED = "Session 连接池已关闭";
+ public static final String CLOSE_THE_SESSION_FAILED = "关闭 Session 失败。";
+ public static final String TIMEOUT_TO_GET_CONNECTION =
+ "从 %s 获取连接超时";
+ public static final String INTERRUPTED = "被中断!";
+ public static final String CLOSING_SESSION_POOL = "正在关闭 Session 连接池,清理队列...";
+
+ // SessionPool - operation failed (warn)
+ public static final String INSERT_TABLET_FAILED = "insertTablet 失败";
+ public static final String INSERT_ALIGNED_TABLET_FAILED = "insertAlignedTablet 失败";
+ public static final String INSERT_TABLETS_FAILED = "insertTablets 失败";
+ public static final String INSERT_ALIGNED_TABLETS_FAILED = "insertAlignedTablets 失败";
+ public static final String INSERT_RECORDS_FAILED = "insertRecords 失败";
+ public static final String INSERT_ALIGNED_RECORDS_FAILED = "insertAlignedRecords 失败";
+ public static final String INSERT_RECORD_FAILED = "insertRecord 失败";
+ public static final String INSERT_ALIGNED_RECORD_FAILED = "insertAlignedRecord 失败";
+ public static final String INSERT_RECORDS_OF_ONE_DEVICE_FAILED =
+ "insertRecordsOfOneDevice 失败";
+ public static final String INSERT_STRING_RECORDS_OF_ONE_DEVICE_FAILED =
+ "insertStringRecordsOfOneDevice 失败";
+ public static final String INSERT_ALIGNED_RECORDS_OF_ONE_DEVICE_FAILED =
+ "insertAlignedRecordsOfOneDevice 失败";
+ public static final String INSERT_ALIGNED_STRING_RECORDS_OF_ONE_DEVICE_FAILED =
+ "insertAlignedStringRecordsOfOneDevice 失败";
+ public static final String DELETE_DATA_FAILED = "deleteData 失败";
+ public static final String DELETE_TIMESERIES_FAILED = "deleteTimeseries 失败";
+ public static final String SET_STORAGE_GROUP_FAILED = "setStorageGroup 失败";
+ public static final String DELETE_STORAGE_GROUP_FAILED = "deleteStorageGroup 失败";
+ public static final String DELETE_STORAGE_GROUPS_FAILED = "deleteStorageGroups 失败";
+ public static final String CREATE_DATABASE_FAILED = "createDatabase 失败";
+ public static final String DELETE_DATABASE_FAILED = "deleteDatabase 失败";
+ public static final String DELETE_DATABASES_FAILED = "deleteDatabases 失败";
+ public static final String CREATE_TIMESERIES_FAILED = "createTimeseries 失败";
+ public static final String CREATE_ALIGNED_TIMESERIES_FAILED = "createAlignedTimeseries 失败";
+ public static final String CREATE_MULTI_TIMESERIES_FAILED = "createMultiTimeseries 失败";
+ public static final String CHECK_TIMESERIES_EXISTS_FAILED = "checkTimeseriesExists 失败";
+ public static final String EXECUTE_QUERY_STATEMENT_FAILED = "executeQueryStatement 失败";
+ public static final String EXECUTE_NON_QUERY_STATEMENT_FAILED =
+ "executeNonQueryStatement 失败";
+ public static final String EXECUTE_RAW_DATA_QUERY_FAILED = "executeRawDataQuery 失败";
+ public static final String EXECUTE_LAST_DATA_QUERY_FAILED = "executeLastDataQuery 失败";
+ public static final String EXECUTE_AGGREGATION_QUERY_FAILED = "executeAggregationQuery 失败";
+ public static final String GET_TIMESTAMP_PRECISION_FAILED = "getTimestampPrecision 失败";
+ public static final String TEST_INSERT_TABLET_FAILED = "testInsertTablet 失败";
+ public static final String TEST_INSERT_TABLETS_FAILED = "testInsertTablets 失败";
+ public static final String TEST_INSERT_RECORD_FAILED = "testInsertRecord 失败";
+ public static final String TEST_INSERT_RECORDS_FAILED = "testInsertRecords 失败";
+ public static final String CREATE_SCHEMA_TEMPLATE_FAILED = "createSchemaTemplate 失败";
+ public static final String ADD_ALIGNED_MEASUREMENTS_IN_TEMPLATE_FAILED =
+ "addAlignedMeasurementsInTemplate 失败";
+ public static final String ADD_ALIGNED_MEASUREMENT_IN_TEMPLATE_FAILED =
+ "addAlignedMeasurementInTemplate 失败";
+ public static final String ADD_UNALIGNED_MEASUREMENTS_IN_TEMPLATE_FAILED =
+ "addUnalignedMeasurementsInTemplate 失败";
+ public static final String ADD_UNALIGNED_MEASUREMENT_IN_TEMPLATE_FAILED =
+ "addUnalignedMeasurementInTemplate 失败";
+ public static final String DELETE_NODE_IN_TEMPLATE_FAILED = "deleteNodeInTemplate 失败";
+ public static final String COUNT_MEASUREMENTS_IN_TEMPLATE_FAILED =
+ "countMeasurementsInTemplate 失败";
+ public static final String IS_MEASUREMENT_IN_TEMPLATE_FAILED =
+ "isMeasurementInTemplate 失败";
+ public static final String IS_PATH_EXIST_IN_TEMPLATE_FAILED =
+ "isPathExistInTemplata 失败";
+ public static final String SHOW_MEASUREMENTS_IN_TEMPLATE_FAILED =
+ "showMeasurementsInTemplate 失败";
+ public static final String SHOW_ALL_TEMPLATES_FAILED = "showAllTemplates 失败";
+ public static final String SHOW_PATHS_TEMPLATE_SET_ON_FAILED =
+ "showPathsTemplateSetOn 失败";
+ public static final String SHOW_PATHS_TEMPLATE_USING_ON_FAILED =
+ "showPathsTemplateUsingOn 失败";
+ public static final String SET_SCHEMA_TEMPLATE_ON_FAILED =
+ "setSchemaTemplate [{}] 在 [{}] 上失败";
+ public static final String UNSET_SCHEMA_TEMPLATE_ON_FAILED =
+ "unsetSchemaTemplate [{}] 在 [{}] 上失败";
+ public static final String DROP_SCHEMA_TEMPLATE_FAILED =
+ "dropSchemaTemplate [{}] 失败";
+ public static final String CREATE_TIMESERIES_OF_SCHEMA_TEMPLATE_FAILED =
+ "createTimeseriesOfSchemaTemplate {} 失败";
+ public static final String SET_TIMEZONE_FAILED = "设置时区为 [{}] 失败";
+ public static final String FETCH_ALL_CONNECTIONS_FAILED = "fetchAllConnections 失败";
+
+ // SessionPool - unexpected error (error)
+ public static final String UNEXPECTED_ERROR_IN_INSERT_TABLET =
+ "insertTablet 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_TABLET =
+ "insertAlignedTablet 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_TABLETS =
+ "insertTablets 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_TABLETS =
+ "insertAlignedTablets 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_RECORDS =
+ "insertRecords 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORDS =
+ "insertAlignedRecords 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_RECORD =
+ "insertRecord 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORD =
+ "insertAlignedRecord 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_RECORDS_OF_ONE_DEVICE =
+ "insertRecordsOfOneDevice 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_STRING_RECORDS_OF_ONE_DEVICE =
+ "insertStringRecordsOfOneDevice 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORDS_OF_ONE_DEVICE =
+ "insertAlignedRecordsOfOneDevice 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_INSERT_ALIGNED_STRING_RECORDS_OF_ONE_DEVICE =
+ "insertAlignedStringRecordsOfOneDevice 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_DATA =
+ "deleteData 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_TIMESERIES =
+ "deleteTimeseries 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_SET_STORAGE_GROUP =
+ "setStorageGroup 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_STORAGE_GROUP =
+ "deleteStorageGroup 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_STORAGE_GROUPS =
+ "deleteStorageGroups 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_DATABASE =
+ "createDatabase 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_DATABASE =
+ "deleteDatabase 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_DATABASES =
+ "deleteDatabases 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_TIMESERIES =
+ "createTimeseries 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_ALIGNED_TIMESERIES =
+ "createAlignedTimeseries 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_MULTI_TIMESERIES =
+ "createMultiTimeseries 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_CHECK_TIMESERIES_EXISTS =
+ "checkTimeseriesExists 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_EXECUTE_QUERY_STATEMENT =
+ "executeQueryStatement 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_EXECUTE_NON_QUERY_STATEMENT =
+ "executeNonQueryStatement 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_EXECUTE_RAW_DATA_QUERY =
+ "executeRawDataQuery 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_EXECUTE_LAST_DATA_QUERY =
+ "executeLastDataQuery 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_EXECUTE_AGGREGATION_QUERY =
+ "executeAggregationQuery 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_GET_TIMESTAMP_PRECISION =
+ "getTimestampPrecision 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_TEST_INSERT_TABLET =
+ "testInsertTablet 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_TEST_INSERT_TABLETS =
+ "testInsertTablets 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_TEST_INSERT_RECORD =
+ "testInsertRecord 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_TEST_INSERT_RECORDS =
+ "testInsertRecords 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_SCHEMA_TEMPLATE =
+ "createSchemaTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_ADD_ALIGNED_MEASUREMENTS_IN_TEMPLATE =
+ "addAlignedMeasurementsInTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_ADD_ALIGNED_MEASUREMENT_IN_TEMPLATE =
+ "addAlignedMeasurementInTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_ADD_UNALIGNED_MEASUREMENTS_IN_TEMPLATE =
+ "addUnalignedMeasurementsInTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_ADD_UNALIGNED_MEASUREMENT_IN_TEMPLATE =
+ "addUnalignedMeasurementInTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_DELETE_NODE_IN_TEMPLATE =
+ "deleteNodeInTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_COUNT_MEASUREMENTS_IN_TEMPLATE =
+ "countMeasurementsInTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_IS_MEASUREMENT_IN_TEMPLATE =
+ "isMeasurementInTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_IS_PATH_EXIST_IN_TEMPLATE =
+ "isPathExistInTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_SHOW_MEASUREMENTS_IN_TEMPLATE =
+ "showMeasurementsInTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_SHOW_ALL_TEMPLATES =
+ "showAllTemplates 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_SHOW_PATHS_TEMPLATE_SET_ON =
+ "showPathsTemplateSetOn 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_SHOW_PATHS_TEMPLATE_USING_ON =
+ "showPathsTemplateUsingOn 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_SET_SCHEMA_TEMPLATE =
+ "setSchemaTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_UNSET_SCHEMA_TEMPLATE =
+ "unsetSchemaTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_DROP_SCHEMA_TEMPLATE =
+ "dropSchemaTemplate 发生意外错误";
+ public static final String UNEXPECTED_ERROR_IN_CREATE_TIMESERIES_USING_SCHEMA_TEMPLATE =
+ "createTimeseriesUsingSchemaTemplate 发生意外错误";
+ public static final String REDIRECT_TWICE_SUFFIX = " 重定向两次,请重试。";
+
+ private SessionMessages() {}
+}
diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/NodesSupplier.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/NodesSupplier.java
index aa28b9bc0c70c..d57844b499705 100644
--- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/NodesSupplier.java
+++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/NodesSupplier.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TEndPoint;
import org.apache.iotdb.isession.INodeSupplier;
import org.apache.iotdb.isession.SessionDataSet;
+import org.apache.iotdb.session.i18n.SessionMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -194,7 +195,7 @@ private boolean createConnection(TEndPoint endPoint) {
version);
return true;
} catch (Exception e) {
- LOGGER.warn("Failed to create connection with {}.", endPoint);
+ LOGGER.warn(SessionMessages.FAILED_TO_CREATE_CONNECTION, endPoint);
destroyCurrentClient();
return false;
}
@@ -227,7 +228,7 @@ private boolean updateDataNodeList() {
client.executeQueryStatement(SHOW_AVAILABLE_URLS_COMMAND, TIMEOUT_IN_MS, FETCH_SIZE)) {
updateAvailableNodes(sessionDataSet);
} catch (Exception e1) {
- LOGGER.warn("Failed to fetch data node list from {}.", client.endPoint);
+ LOGGER.warn(SessionMessages.FAILED_TO_FETCH_DATA_NODE_LIST, client.endPoint);
return false;
}
return true;
diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/Session.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/Session.java
index c38bff5e9d4a1..c6cd7df3d645f 100644
--- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/Session.java
+++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/Session.java
@@ -56,6 +56,7 @@
import org.apache.iotdb.service.rpc.thrift.TSQueryTemplateResp;
import org.apache.iotdb.service.rpc.thrift.TSSetSchemaTemplateReq;
import org.apache.iotdb.service.rpc.thrift.TSUnsetSchemaTemplateReq;
+import org.apache.iotdb.session.i18n.SessionMessages;
import org.apache.iotdb.session.rpccompress.TabletEncoder;
import org.apache.iotdb.session.template.MeasurementNode;
import org.apache.iotdb.session.template.TemplateQueryType;
@@ -217,7 +218,7 @@ public class Session implements ISession {
"All values are null and this submission is ignored,deviceId is [{}],times are [{}],measurements are [{}]";
private static final String ALL_VALUES_ARE_NULL_MULTI_DEVICES =
"All values are null and this submission is ignored,deviceIds are [{}],times are [{}],measurements are [{}]";
- private static final String ALL_INSERT_DATA_IS_NULL = "All inserted data is null.";
+ private static final String ALL_INSERT_DATA_IS_NULL = SessionMessages.ALL_INSERT_DATA_IS_NULL;
protected static final String TABLE = "table";
protected static final String TREE = "tree";
@@ -431,7 +432,7 @@ public Session(
boolean enableRedirection,
Version version) {
if (nodeUrls.isEmpty()) {
- throw new IllegalArgumentException("nodeUrls shouldn't be empty.");
+ throw new IllegalArgumentException(SessionMessages.NODE_URLS_EMPTY);
}
nodeUrls = shuffleNodeUrls(nodeUrls);
this.nodeUrls = nodeUrls;
@@ -448,7 +449,7 @@ public Session(
public Session(AbstractSessionBuilder builder) {
if (builder.nodeUrls != null) {
if (builder.nodeUrls.isEmpty()) {
- throw new IllegalArgumentException("nodeUrls shouldn't be empty.");
+ throw new IllegalArgumentException(SessionMessages.NODE_URLS_EMPTY);
}
builder.nodeUrls = shuffleNodeUrls(builder.nodeUrls);
this.nodeUrls = builder.nodeUrls;
@@ -1022,8 +1023,8 @@ private SessionDataSet executeStatementMayRedirect(String sql, long timeoutInMs)
try {
return getDefaultSessionConnection().executeQueryStatement(sql, queryTimeoutInMs);
} catch (RedirectException redirectException) {
- logger.error("{} redirect twice", sql, redirectException);
- throw new StatementExecutionException(sql + " redirect twice, please try again.");
+ logger.error(SessionMessages.REDIRECT_TWICE, sql, redirectException);
+ throw new StatementExecutionException(sql + SessionMessages.REDIRECT_TWICE_SUFFIX);
}
} else {
throw new StatementExecutionException(MSG_DONOT_ENABLE_REDIRECT);
@@ -1072,7 +1073,7 @@ public void executeNonQueryStatement(String sql)
try {
sessionConnection.executeNonQueryStatement(sql);
} catch (Throwable t) {
- logger.warn("failed to execute '{}' for {}", sql, entry.getKey());
+ logger.warn(SessionMessages.FAILED_TO_EXECUTE_FOR_ENDPOINT, sql, entry.getKey());
iterator.remove();
}
}
@@ -1829,10 +1830,10 @@ private boolean filterNullValueAndMeasurement(
}
}
if (valuesList.isEmpty()) {
- logger.info("All values of the {} are null,null values are {}", deviceId, nullMap);
+ logger.info(SessionMessages.ALL_VALUES_NULL, deviceId, nullMap);
return true;
} else {
- logger.info("Some values of {} are null,null values are {}", deviceId, nullMap);
+ logger.info(SessionMessages.SOME_VALUES_NULL, deviceId, nullMap);
}
return false;
}
@@ -1876,10 +1877,10 @@ private boolean filterNullValueAndMeasurementWithStringType(
}
}
if (valuesList.isEmpty()) {
- logger.info("All values of the {} are null,null values are {}", deviceId, nullMap);
+ logger.info(SessionMessages.ALL_VALUES_NULL, deviceId, nullMap);
return true;
} else {
- logger.info("Some values of {} are null,null values are {}", deviceId, nullMap);
+ logger.info(SessionMessages.SOME_VALUES_NULL, deviceId, nullMap);
}
return false;
}
@@ -2971,7 +2972,7 @@ private void insertRelationalTabletByGroup(Map relati
completableFuture.join();
} catch (CompletionException completionException) {
Throwable cause = completionException.getCause();
- logger.error("Meet error when async insert!", cause);
+ logger.error(SessionMessages.MEET_ERROR_WHEN_ASYNC_INSERT, cause);
if (cause instanceof IoTDBConnectionException) {
throw (IoTDBConnectionException) cause;
} else {
@@ -3045,7 +3046,7 @@ private TSInsertTabletReq genTSInsertTabletReq(Tablet tablet, boolean sorted, bo
for (IMeasurementSchema measurementSchema : tablet.getSchemas()) {
if (measurementSchema.getMeasurementName() == null) {
- throw new IllegalArgumentException("measurement should be non null value");
+ throw new IllegalArgumentException(SessionMessages.MEASUREMENT_NON_NULL);
}
request.addToMeasurements(measurementSchema.getMeasurementName());
request.addToTypes(measurementSchema.getType().ordinal());
@@ -3068,7 +3069,7 @@ private TSInsertTabletReq genTSInsertTabletReq(Tablet tablet, boolean sorted, bo
.serialize());
for (IMeasurementSchema measurementSchema : tablet.getSchemas()) {
if (measurementSchema.getMeasurementName() == null) {
- throw new IllegalArgumentException("measurement should be non null value");
+ throw new IllegalArgumentException(SessionMessages.MEASUREMENT_NON_NULL);
}
encodingTypes.add(
this.columnEncodersMap
@@ -3195,7 +3196,7 @@ private TSInsertTabletsReq genTSInsertTabletsReq(
List tablets, boolean sorted, boolean isAligned) throws BatchExecutionException {
TSInsertTabletsReq request = new TSInsertTabletsReq();
if (tablets.isEmpty()) {
- throw new BatchExecutionException("No tablet is inserting!");
+ throw new BatchExecutionException(SessionMessages.NO_TABLET_INSERTING);
}
for (Tablet tablet : tablets) {
updateTSInsertTabletsReq(request, tablet, sorted, isAligned);
@@ -3214,7 +3215,7 @@ private void updateTSInsertTabletsReq(
request.setIsAligned(isAligned);
for (IMeasurementSchema measurementSchema : tablet.getSchemas()) {
if (measurementSchema.getMeasurementName() == null) {
- throw new IllegalArgumentException("measurement should be non null value");
+ throw new IllegalArgumentException(SessionMessages.MEASUREMENT_NON_NULL);
}
measurements.add(measurementSchema.getMeasurementName());
dataTypes.add(measurementSchema.getType().ordinal());
@@ -4228,7 +4229,7 @@ private void insertByGroup(
completableFuture.join();
} catch (CompletionException completionException) {
Throwable cause = completionException.getCause();
- logger.error("Meet error when async insert!", cause);
+ logger.error(SessionMessages.MEET_ERROR_WHEN_ASYNC_INSERT, cause);
if (cause instanceof IoTDBConnectionException) {
throw (IoTDBConnectionException) cause;
} else {
@@ -4295,7 +4296,7 @@ public String getSqlDialect() {
protected SessionConnection getDefaultSessionConnection() throws IoTDBConnectionException {
if (defaultSessionConnection == null) {
- throw new IoTDBConnectionException("Session is not open, please invoke Session.open() first");
+ throw new IoTDBConnectionException(SessionMessages.SESSION_NOT_OPEN);
}
return defaultSessionConnection;
}
diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/SessionConnection.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/SessionConnection.java
index bf919d50532a4..f6608d8fe4bfe 100644
--- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/SessionConnection.java
+++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/SessionConnection.java
@@ -65,6 +65,7 @@
import org.apache.iotdb.service.rpc.thrift.TSSetSchemaTemplateReq;
import org.apache.iotdb.service.rpc.thrift.TSSetTimeZoneReq;
import org.apache.iotdb.service.rpc.thrift.TSUnsetSchemaTemplateReq;
+import org.apache.iotdb.session.i18n.SessionMessages;
import org.apache.iotdb.session.util.SessionUtils;
import org.apache.thrift.TException;
@@ -264,7 +265,7 @@ private void initClusterConn() throws IoTDBConnectionException {
init(tEndPoint, session.useSSL, session.trustStore, session.trustStorePwd);
} catch (IoTDBConnectionException e) {
if (!reconnect()) {
- logger.error("Cluster has no nodes to connect");
+ logger.error(SessionMessages.CLUSTER_NO_NODES);
throw new IoTDBConnectionException(logForReconnectionFailure());
}
} catch (StatementExecutionException e) {
@@ -283,8 +284,7 @@ public void close() throws IoTDBConnectionException {
try {
client.closeSession(req);
} catch (TException e) {
- throw new IoTDBConnectionException(
- "Error occurs when closing session at server. Maybe server is down.", e);
+ throw new IoTDBConnectionException(SessionMessages.CLOSE_SESSION_ERROR, e);
} finally {
if (transport != null) {
transport.close();
@@ -379,7 +379,7 @@ protected boolean checkTimeseriesExists(String path, long timeout)
try {
dataSet = executeQueryStatement(String.format("SHOW TIMESERIES %s", path), timeout);
} catch (RedirectException e) {
- throw new StatementExecutionException("need to redirect query, should not see this.", e);
+ throw new StatementExecutionException(SessionMessages.REDIRECT_QUERY_ERROR, e);
}
return dataSet.hasNext();
} finally {
@@ -883,7 +883,7 @@ private RetryResult callWithRetry(TFunction rpc) {
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.warn(
- "Thread {} was interrupted during retry {} with wait time {} ms. Exiting retry loop.",
+ SessionMessages.THREAD_INTERRUPTED_DURING_RETRY,
Thread.currentThread().getName(),
i,
retryIntervalInMs);
@@ -961,7 +961,7 @@ private RetryResult callWithRetryAndReconnect(
// 1. the current datanode is unreachable (TException)
// 2. the current datanode is partitioned with other nodes (not in availableNodes)
// 3. asymmetric network partition
- logger.debug("Retry attempt #{}, Reconnecting to other datanode", retryAttempt);
+ logger.debug(SessionMessages.RETRY_RECONNECTING, retryAttempt);
reconnect();
}
try {
@@ -969,7 +969,7 @@ private RetryResult callWithRetryAndReconnect(
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.warn(
- "Thread {} was interrupted during retry {} with wait time {} ms. Exiting retry loop.",
+ SessionMessages.THREAD_INTERRUPTED_DURING_RETRY,
Thread.currentThread().getName(),
retryAttempt,
retryIntervalInMs);
@@ -1082,10 +1082,10 @@ private boolean reconnect() {
init(endPoint, session.useSSL, session.trustStore, session.trustStorePwd);
connectedSuccess = true;
} catch (IoTDBConnectionException e) {
- logger.warn("The current node may have been down {}, try next node", endPoint);
+ logger.warn(SessionMessages.NODE_DOWN_TRY_NEXT, endPoint);
continue;
} catch (StatementExecutionException e) {
- logger.warn("login in failed, because {}", e.getMessage());
+ logger.warn(SessionMessages.LOGIN_FAILED, e.getMessage());
}
break;
}
@@ -1105,7 +1105,7 @@ private boolean reconnect() {
try {
v.close();
} catch (IoTDBConnectionException e) {
- logger.warn("close connection failed, {}", e.getMessage());
+ logger.warn(SessionMessages.CLOSE_CONNECTION_FAILED, e.getMessage());
}
}
return this;
diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/ThriftConnection.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/ThriftConnection.java
index 5e734b8dfa1a3..e3b9e8fc98987 100644
--- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/ThriftConnection.java
+++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/ThriftConnection.java
@@ -31,6 +31,7 @@
import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementResp;
import org.apache.iotdb.service.rpc.thrift.TSOpenSessionReq;
import org.apache.iotdb.service.rpc.thrift.TSOpenSessionResp;
+import org.apache.iotdb.session.i18n.SessionMessages;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
@@ -192,7 +193,7 @@ public void close() {
client.closeSession(new TSCloseSessionReq(sessionId));
}
} catch (TException e) {
- LOGGER.warn("Closing Session-{} with {} failed.", sessionId, endPoint);
+ LOGGER.warn(SessionMessages.CLOSING_SESSION_FAILED, sessionId, endPoint);
} finally {
if (transport.isOpen()) {
transport.close();
diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java
index 949fba1251c50..3eaabe7f1fa78 100644
--- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java
+++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/pool/SessionPool.java
@@ -37,6 +37,7 @@
import org.apache.iotdb.session.DummyNodesSupplier;
import org.apache.iotdb.session.NodesSupplier;
import org.apache.iotdb.session.Session;
+import org.apache.iotdb.session.i18n.SessionMessages;
import org.apache.iotdb.session.util.SessionUtils;
import org.apache.thrift.TException;
@@ -90,8 +91,8 @@
public class SessionPool implements ISessionPool {
private static final Logger LOGGER = LoggerFactory.getLogger(SessionPool.class);
- public static final String SESSION_POOL_IS_CLOSED = "Session pool is closed";
- public static final String CLOSE_THE_SESSION_FAILED = "close the session failed.";
+ public static final String SESSION_POOL_IS_CLOSED = SessionMessages.SESSION_POOL_IS_CLOSED;
+ public static final String CLOSE_THE_SESSION_FAILED = SessionMessages.CLOSE_THE_SESSION_FAILED;
private static final int RETRY = 3;
private static final int FINAL_RETRY = RETRY - 1;
@@ -179,35 +180,42 @@ public class SessionPool implements ISessionPool {
// may be null
protected String database;
- private static final String INSERT_RECORD_FAIL = "insertRecord failed";
+ private static final String INSERT_RECORD_FAIL = SessionMessages.INSERT_RECORD_FAILED;
- private static final String INSERT_RECORD_ERROR_MSG = "unexpected error in insertRecord";
+ private static final String INSERT_RECORD_ERROR_MSG =
+ SessionMessages.UNEXPECTED_ERROR_IN_INSERT_RECORD;
- private static final String INSERT_RECORDS_ERROR_MSG = "unexpected error in insertRecords";
+ private static final String INSERT_RECORDS_ERROR_MSG =
+ SessionMessages.UNEXPECTED_ERROR_IN_INSERT_RECORDS;
- private static final String EXECUTE_LASTDATAQUERY_FAIL = "executeLastDataQuery failed";
+ private static final String EXECUTE_LASTDATAQUERY_FAIL =
+ SessionMessages.EXECUTE_LAST_DATA_QUERY_FAILED;
private static final String EXECUTE_LASTDATAQUERY_ERROR =
- "unexpected error in executeLastDataQuery";
+ SessionMessages.UNEXPECTED_ERROR_IN_EXECUTE_LAST_DATA_QUERY;
- private static final String EXECUTE_AGGREGATION_QUERY_FAIL = "executeAggregationQuery failed";
+ private static final String EXECUTE_AGGREGATION_QUERY_FAIL =
+ SessionMessages.EXECUTE_AGGREGATION_QUERY_FAILED;
private static final String INSERT_RECORDS_OF_ONE_DEVICE_ERROR_MSG =
- "unexpected error in insertRecordsOfOneDevice";
+ SessionMessages.UNEXPECTED_ERROR_IN_INSERT_RECORDS_OF_ONE_DEVICE;
- private static final String DELETE_DATA_ERROR_MSG = "unexpected error in deleteData";
+ private static final String DELETE_DATA_ERROR_MSG =
+ SessionMessages.UNEXPECTED_ERROR_IN_DELETE_DATA;
private static final String CREATE_SCHEMA_TEMPLATE_ERROR_MSG =
- "unexpected error in createSchemaTemplate";
+ SessionMessages.UNEXPECTED_ERROR_IN_CREATE_SCHEMA_TEMPLATE;
private static final String EXECUTE_AGGREGATION_QUERY_ERROR_MSG =
- "unexpected error in executeAggregationQuery";
+ SessionMessages.UNEXPECTED_ERROR_IN_EXECUTE_AGGREGATION_QUERY;
- private static final String DELETE_DATA_FAIL = "deleteData failed";
+ private static final String DELETE_DATA_FAIL = SessionMessages.DELETE_DATA_FAILED;
- private static final String INSERT_RECORDS_OF_ONE_DEVICE_FAIL = "insertRecordsOfOneDevice failed";
+ private static final String INSERT_RECORDS_OF_ONE_DEVICE_FAIL =
+ SessionMessages.INSERT_RECORDS_OF_ONE_DEVICE_FAILED;
- private static final String CREATE_SCHEMA_TEMPLATE_FAIL = "createSchemaTemplate failed";
+ private static final String CREATE_SCHEMA_TEMPLATE_FAIL =
+ SessionMessages.CREATE_SCHEMA_TEMPLATE_FAILED;
public SessionPool(String host, int port, String user, String password, int maxSize) {
this(
@@ -481,7 +489,7 @@ public SessionPool(
this.host = null;
this.port = -1;
if (nodeUrls.isEmpty()) {
- throw new IllegalArgumentException("nodeUrls shouldn't be empty.");
+ throw new IllegalArgumentException(SessionMessages.NODE_URLS_EMPTY);
}
this.nodeUrls = nodeUrls;
this.user = user;
@@ -539,7 +547,7 @@ public SessionPool(AbstractSessionPoolBuilder builder) {
}
if (builder.nodeUrls != null) {
if (builder.nodeUrls.isEmpty()) {
- throw new IllegalArgumentException("nodeUrls shouldn't be empty.");
+ throw new IllegalArgumentException(SessionMessages.NODE_URLS_EMPTY);
}
this.nodeUrls = builder.nodeUrls;
this.host = null;
@@ -701,11 +709,11 @@ private ISession getSession() throws IoTDBConnectionException {
size);
if (System.currentTimeMillis() - start > waitToGetSessionTimeoutInMs) {
throw new IoTDBConnectionException(
- String.format("timeout to get a connection from %s", formattedNodeUrls));
+ String.format(SessionMessages.TIMEOUT_TO_GET_CONNECTION, formattedNodeUrls));
}
}
} catch (InterruptedException e) {
- LOGGER.warn("Interrupted!", e);
+ LOGGER.warn(SessionMessages.INTERRUPTED, e);
Thread.currentThread().interrupt();
// wake up from this.wait(1000) by this.notify()
}
@@ -815,7 +823,7 @@ public synchronized void close() {
this.availableNodes.close();
this.availableNodes = null;
}
- LOGGER.info("closing the session pool, cleaning queues...");
+ LOGGER.info(SessionMessages.CLOSING_SESSION_POOL);
this.closed = true;
queue.clear();
occupied.clear();
@@ -943,13 +951,13 @@ public void insertTablet(Tablet tablet, boolean sorted)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertTablet failed", e);
+ LOGGER.warn(SessionMessages.INSERT_TABLET_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertTablet", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_TABLET, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -986,13 +994,13 @@ public void insertAlignedTablet(Tablet tablet, boolean sorted)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertAlignedTablet failed", e);
+ LOGGER.warn(SessionMessages.INSERT_ALIGNED_TABLET_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertAlignedTablet", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_ALIGNED_TABLET, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1035,13 +1043,13 @@ public void insertTablets(Map tablets, boolean sorted)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertTablets failed", e);
+ LOGGER.warn(SessionMessages.INSERT_TABLETS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertTablets", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_TABLETS, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1062,13 +1070,13 @@ public void insertAlignedTablets(Map tablets, boolean sorted)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertAlignedTablets failed", e);
+ LOGGER.warn(SessionMessages.INSERT_ALIGNED_TABLETS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertAlignedTablets", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_ALIGNED_TABLETS, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1096,7 +1104,7 @@ public void insertRecords(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertRecords failed", e);
+ LOGGER.warn(SessionMessages.INSERT_RECORDS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
@@ -1131,13 +1139,13 @@ public void insertAlignedRecords(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertAlignedRecords failed", e);
+ LOGGER.warn(SessionMessages.INSERT_ALIGNED_RECORDS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertAlignedRecords", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORDS, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1234,13 +1242,13 @@ public void insertStringRecordsOfOneDevice(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertStringRecordsOfOneDevice failed", e);
+ LOGGER.warn(SessionMessages.INSERT_STRING_RECORDS_OF_ONE_DEVICE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertStringRecordsOfOneDevice", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_STRING_RECORDS_OF_ONE_DEVICE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1344,13 +1352,13 @@ public void insertStringRecordsOfOneDevice(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertStringRecordsOfOneDevice failed", e);
+ LOGGER.warn(SessionMessages.INSERT_STRING_RECORDS_OF_ONE_DEVICE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertStringRecordsOfOneDevice", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_STRING_RECORDS_OF_ONE_DEVICE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1379,13 +1387,13 @@ public void insertAlignedRecordsOfOneDevice(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertAlignedRecordsOfOneDevice failed", e);
+ LOGGER.warn(SessionMessages.INSERT_ALIGNED_RECORDS_OF_ONE_DEVICE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertAlignedRecordsOfOneDevice", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORDS_OF_ONE_DEVICE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1412,13 +1420,14 @@ public void insertAlignedStringRecordsOfOneDevice(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertAlignedStringRecordsOfOneDevice failed", e);
+ LOGGER.warn(SessionMessages.INSERT_ALIGNED_STRING_RECORDS_OF_ONE_DEVICE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertAlignedStringRecordsOfOneDevice", e);
+ LOGGER.error(
+ SessionMessages.UNEXPECTED_ERROR_IN_INSERT_ALIGNED_STRING_RECORDS_OF_ONE_DEVICE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1450,13 +1459,13 @@ public void insertAlignedRecordsOfOneDevice(
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertAlignedRecordsOfOneDevice failed", e);
+ LOGGER.warn(SessionMessages.INSERT_ALIGNED_RECORDS_OF_ONE_DEVICE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertAlignedRecordsOfOneDevice", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORDS_OF_ONE_DEVICE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1486,13 +1495,14 @@ public void insertAlignedStringRecordsOfOneDevice(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertAlignedStringRecordsOfOneDevice failed", e);
+ LOGGER.warn(SessionMessages.INSERT_ALIGNED_STRING_RECORDS_OF_ONE_DEVICE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertAlignedStringRecordsOfOneDevice", e);
+ LOGGER.error(
+ SessionMessages.UNEXPECTED_ERROR_IN_INSERT_ALIGNED_STRING_RECORDS_OF_ONE_DEVICE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1518,7 +1528,7 @@ public void insertRecords(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertRecords failed", e);
+ LOGGER.warn(SessionMessages.INSERT_RECORDS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
@@ -1551,13 +1561,13 @@ public void insertAlignedRecords(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertAlignedRecords failed", e);
+ LOGGER.warn(SessionMessages.INSERT_ALIGNED_RECORDS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertAlignedRecords", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORDS, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1641,13 +1651,13 @@ public String getTimestampPrecision()
return timestampPrecision;
} catch (TException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("getTimestampPrecision failed", e);
+ LOGGER.warn(SessionMessages.GET_TIMESTAMP_PRECISION_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, new IoTDBConnectionException(e));
} catch (RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in getTimestampPrecision", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_GET_TIMESTAMP_PRECISION, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1676,13 +1686,13 @@ public void insertAlignedRecord(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertAlignedRecord failed", e);
+ LOGGER.warn(SessionMessages.INSERT_ALIGNED_RECORD_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertAlignedRecord", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORD, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1734,13 +1744,13 @@ public void insertAlignedRecord(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("insertAlignedRecord failed", e);
+ LOGGER.warn(SessionMessages.INSERT_ALIGNED_RECORD_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in insertAlignedRecord", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_INSERT_ALIGNED_RECORD, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1759,13 +1769,13 @@ public void testInsertTablet(Tablet tablet)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("testInsertTablet failed", e);
+ LOGGER.warn(SessionMessages.TEST_INSERT_TABLET_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in testInsertTablet", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_TEST_INSERT_TABLET, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1784,13 +1794,13 @@ public void testInsertTablet(Tablet tablet, boolean sorted)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("testInsertTablet failed", e);
+ LOGGER.warn(SessionMessages.TEST_INSERT_TABLET_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in testInsertTablet", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_TEST_INSERT_TABLET, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1809,13 +1819,13 @@ public void testInsertTablets(Map tablets)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("testInsertTablets failed", e);
+ LOGGER.warn(SessionMessages.TEST_INSERT_TABLETS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in testInsertTablets", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_TEST_INSERT_TABLETS, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1834,13 +1844,13 @@ public void testInsertTablets(Map tablets, boolean sorted)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("testInsertTablets failed", e);
+ LOGGER.warn(SessionMessages.TEST_INSERT_TABLETS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in testInsertTablets", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_TEST_INSERT_TABLETS, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1863,13 +1873,13 @@ public void testInsertRecords(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("testInsertRecords failed", e);
+ LOGGER.warn(SessionMessages.TEST_INSERT_RECORDS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in testInsertRecords", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_TEST_INSERT_RECORDS, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1893,13 +1903,13 @@ public void testInsertRecords(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("testInsertRecords failed", e);
+ LOGGER.warn(SessionMessages.TEST_INSERT_RECORDS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in testInsertRecords", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_TEST_INSERT_RECORDS, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1919,13 +1929,13 @@ public void testInsertRecord(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("testInsertRecord failed", e);
+ LOGGER.warn(SessionMessages.TEST_INSERT_RECORD_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in testInsertRecord", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_TEST_INSERT_RECORD, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1949,13 +1959,13 @@ public void testInsertRecord(
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("testInsertRecord failed", e);
+ LOGGER.warn(SessionMessages.TEST_INSERT_RECORD_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in testInsertRecord", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_TEST_INSERT_RECORD, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -1975,13 +1985,13 @@ public void deleteTimeseries(String path)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("deleteTimeseries failed", e);
+ LOGGER.warn(SessionMessages.DELETE_TIMESERIES_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in deleteTimeseries", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_DELETE_TIMESERIES, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2001,13 +2011,13 @@ public void deleteTimeseries(List paths)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("deleteTimeseries failed", e);
+ LOGGER.warn(SessionMessages.DELETE_TIMESERIES_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in deleteTimeseries", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_DELETE_TIMESERIES, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2110,13 +2120,13 @@ public void setStorageGroup(String storageGroupId)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("setStorageGroup failed", e);
+ LOGGER.warn(SessionMessages.SET_STORAGE_GROUP_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in setStorageGroup", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_SET_STORAGE_GROUP, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2138,13 +2148,13 @@ public void deleteStorageGroup(String storageGroup)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("deleteStorageGroup failed", e);
+ LOGGER.warn(SessionMessages.DELETE_STORAGE_GROUP_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in deleteStorageGroup", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_DELETE_STORAGE_GROUP, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2166,13 +2176,13 @@ public void deleteStorageGroups(List storageGroup)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("deleteStorageGroups failed", e);
+ LOGGER.warn(SessionMessages.DELETE_STORAGE_GROUPS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in deleteStorageGroups", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_DELETE_STORAGE_GROUPS, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2190,13 +2200,13 @@ public void createDatabase(String database)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("createDatabase failed", e);
+ LOGGER.warn(SessionMessages.CREATE_DATABASE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in createDatabase", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_CREATE_DATABASE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2214,13 +2224,13 @@ public void deleteDatabase(String database)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("deleteDatabase failed", e);
+ LOGGER.warn(SessionMessages.DELETE_DATABASE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in deleteDatabase", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_DELETE_DATABASE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2238,13 +2248,13 @@ public void deleteDatabases(List databases)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("deleteDatabases failed", e);
+ LOGGER.warn(SessionMessages.DELETE_DATABASES_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in deleteDatabases", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_DELETE_DATABASES, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2263,13 +2273,13 @@ public void createTimeseries(
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("createTimeseries failed", e);
+ LOGGER.warn(SessionMessages.CREATE_TIMESERIES_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in createTimeseries", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_CREATE_TIMESERIES, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2296,13 +2306,13 @@ public void createTimeseries(
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("createTimeseries failed", e);
+ LOGGER.warn(SessionMessages.CREATE_TIMESERIES_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in createTimeseries", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_CREATE_TIMESERIES, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2327,13 +2337,13 @@ public void createAlignedTimeseries(
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("createAlignedTimeseries failed", e);
+ LOGGER.warn(SessionMessages.CREATE_ALIGNED_TIMESERIES_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in createAlignedTimeseries", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_CREATE_ALIGNED_TIMESERIES, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2367,13 +2377,13 @@ public void createAlignedTimeseries(
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("createAlignedTimeseries failed", e);
+ LOGGER.warn(SessionMessages.CREATE_ALIGNED_TIMESERIES_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in createAlignedTimeseries", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_CREATE_ALIGNED_TIMESERIES, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2407,13 +2417,13 @@ public void createMultiTimeseries(
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("createMultiTimeseries failed", e);
+ LOGGER.warn(SessionMessages.CREATE_MULTI_TIMESERIES_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in createMultiTimeseries", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_CREATE_MULTI_TIMESERIES, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2431,13 +2441,13 @@ public boolean checkTimeseriesExists(String path)
return resp;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("checkTimeseriesExists failed", e);
+ LOGGER.warn(SessionMessages.CHECK_TIMESERIES_EXISTS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in checkTimeseriesExists", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_CHECK_TIMESERIES_EXISTS, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2585,13 +2595,13 @@ public void addAlignedMeasurementsInTemplate(
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("addAlignedMeasurementsInTemplate failed", e);
+ LOGGER.warn(SessionMessages.ADD_ALIGNED_MEASUREMENTS_IN_TEMPLATE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in addAlignedMeasurementsInTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_ADD_ALIGNED_MEASUREMENTS_IN_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2615,13 +2625,13 @@ public void addAlignedMeasurementInTemplate(
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("addAlignedMeasurementInTemplate failed", e);
+ LOGGER.warn(SessionMessages.ADD_ALIGNED_MEASUREMENT_IN_TEMPLATE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in addAlignedMeasurementInTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_ADD_ALIGNED_MEASUREMENT_IN_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2645,13 +2655,13 @@ public void addUnalignedMeasurementsInTemplate(
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("addUnalignedMeasurementsInTemplate failed", e);
+ LOGGER.warn(SessionMessages.ADD_UNALIGNED_MEASUREMENTS_IN_TEMPLATE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in addUnalignedMeasurementsInTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_ADD_UNALIGNED_MEASUREMENTS_IN_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2675,13 +2685,13 @@ public void addUnalignedMeasurementInTemplate(
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("addUnalignedMeasurementInTemplate failed", e);
+ LOGGER.warn(SessionMessages.ADD_UNALIGNED_MEASUREMENT_IN_TEMPLATE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in addUnalignedMeasurementInTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_ADD_UNALIGNED_MEASUREMENT_IN_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2699,13 +2709,13 @@ public void deleteNodeInTemplate(String templateName, String path)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("deleteNodeInTemplate failed", e);
+ LOGGER.warn(SessionMessages.DELETE_NODE_IN_TEMPLATE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in deleteNodeInTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_DELETE_NODE_IN_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2723,13 +2733,13 @@ public int countMeasurementsInTemplate(String name)
return resp;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("countMeasurementsInTemplate failed", e);
+ LOGGER.warn(SessionMessages.COUNT_MEASUREMENTS_IN_TEMPLATE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in countMeasurementsInTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_COUNT_MEASUREMENTS_IN_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2748,13 +2758,13 @@ public boolean isMeasurementInTemplate(String templateName, String path)
return resp;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("isMeasurementInTemplate failed", e);
+ LOGGER.warn(SessionMessages.IS_MEASUREMENT_IN_TEMPLATE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in isMeasurementInTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_IS_MEASUREMENT_IN_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2773,13 +2783,13 @@ public boolean isPathExistInTemplate(String templateName, String path)
return resp;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("isPathExistInTemplata failed", e);
+ LOGGER.warn(SessionMessages.IS_PATH_EXIST_IN_TEMPLATE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in isPathExistInTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_IS_PATH_EXIST_IN_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2798,13 +2808,13 @@ public List showMeasurementsInTemplate(String templateName)
return resp;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("showMeasurementsInTemplate failed", e);
+ LOGGER.warn(SessionMessages.SHOW_MEASUREMENTS_IN_TEMPLATE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in showMeasurementsInTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_SHOW_MEASUREMENTS_IN_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2823,13 +2833,13 @@ public List showMeasurementsInTemplate(String templateName, String patte
return resp;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("showMeasurementsInTemplate failed", e);
+ LOGGER.warn(SessionMessages.SHOW_MEASUREMENTS_IN_TEMPLATE_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in showMeasurementsInTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_SHOW_MEASUREMENTS_IN_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2848,13 +2858,13 @@ public List showAllTemplates()
return resp;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("showAllTemplates failed", e);
+ LOGGER.warn(SessionMessages.SHOW_ALL_TEMPLATES_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in showAllTemplates", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_SHOW_ALL_TEMPLATES, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2873,13 +2883,13 @@ public List showPathsTemplateSetOn(String templateName)
return resp;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("showPathsTemplateSetOn failed", e);
+ LOGGER.warn(SessionMessages.SHOW_PATHS_TEMPLATE_SET_ON_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in showPathsTemplateSetOn", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_SHOW_PATHS_TEMPLATE_SET_ON, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2898,13 +2908,13 @@ public List showPathsTemplateUsingOn(String templateName)
return resp;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("showPathsTemplateUsingOn failed", e);
+ LOGGER.warn(SessionMessages.SHOW_PATHS_TEMPLATE_USING_ON_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in showPathsTemplateUsingOn", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_SHOW_PATHS_TEMPLATE_USING_ON, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2930,13 +2940,13 @@ public void setSchemaTemplate(String templateName, String prefixPath)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("setSchemaTemplate [{}] on [{}] failed", templateName, prefixPath, e);
+ LOGGER.warn(SessionMessages.SET_SCHEMA_TEMPLATE_ON_FAILED, templateName, prefixPath, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in setSchemaTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_SET_SCHEMA_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2954,13 +2964,13 @@ public void unsetSchemaTemplate(String prefixPath, String templateName)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("unsetSchemaTemplate [{}] on [{}] failed", templateName, prefixPath, e);
+ LOGGER.warn(SessionMessages.UNSET_SCHEMA_TEMPLATE_ON_FAILED, templateName, prefixPath, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in unsetSchemaTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_UNSET_SCHEMA_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -2978,13 +2988,13 @@ public void dropSchemaTemplate(String templateName)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("dropSchemaTemplate [{}] failed", templateName, e);
+ LOGGER.warn(SessionMessages.DROP_SCHEMA_TEMPLATE_FAILED, templateName, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in dropSchemaTemplate", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_DROP_SCHEMA_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -3001,13 +3011,14 @@ public void createTimeseriesUsingSchemaTemplate(List devicePathList)
return;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("createTimeseriesOfSchemaTemplate {} failed", devicePathList, e);
+ LOGGER.warn(SessionMessages.CREATE_TIMESERIES_OF_SCHEMA_TEMPLATE_FAILED, devicePathList, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in createTimeseriesUsingSchemaTemplate", e);
+ LOGGER.error(
+ SessionMessages.UNEXPECTED_ERROR_IN_CREATE_TIMESERIES_USING_SCHEMA_TEMPLATE, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -3036,13 +3047,13 @@ public SessionDataSetWrapper executeQueryStatement(String sql)
return wrapper;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("executeQueryStatement failed", e);
+ LOGGER.warn(SessionMessages.EXECUTE_QUERY_STATEMENT_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in executeQueryStatement", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_EXECUTE_QUERY_STATEMENT, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -3074,13 +3085,13 @@ public SessionDataSetWrapper executeQueryStatement(String sql, long timeoutInMs)
return wrapper;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("executeQueryStatement failed", e);
+ LOGGER.warn(SessionMessages.EXECUTE_QUERY_STATEMENT_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in executeQueryStatement", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_EXECUTE_QUERY_STATEMENT, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -3110,13 +3121,13 @@ public void executeNonQueryStatement(String sql)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("executeNonQueryStatement failed", e);
+ LOGGER.warn(SessionMessages.EXECUTE_NON_QUERY_STATEMENT_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, FINAL_RETRY, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in executeNonQueryStatement", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_EXECUTE_NON_QUERY_STATEMENT, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -3136,13 +3147,13 @@ public SessionDataSetWrapper executeRawDataQuery(
return wrapper;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("executeRawDataQuery failed", e);
+ LOGGER.warn(SessionMessages.EXECUTE_RAW_DATA_QUERY_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
throw e;
} catch (Throwable e) {
- LOGGER.error("unexpected error in executeRawDataQuery", e);
+ LOGGER.error(SessionMessages.UNEXPECTED_ERROR_IN_EXECUTE_RAW_DATA_QUERY, e);
putBack(session);
throw new RuntimeException(e);
}
@@ -3244,7 +3255,7 @@ public SessionDataSetWrapper executeFastLastDataQueryForOnePrefixPath(final List
return wrapper;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("executeLastDataQuery failed", e);
+ LOGGER.warn(SessionMessages.EXECUTE_LAST_DATA_QUERY_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
@@ -3273,7 +3284,7 @@ public SessionDataSetWrapper executeLastDataQueryForOneDevice(
return wrapper;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("executeLastDataQuery failed", e);
+ LOGGER.warn(SessionMessages.EXECUTE_LAST_DATA_QUERY_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
@@ -3464,7 +3475,7 @@ public void setTimeZone(String zoneId)
putBack(session);
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("setTimeZone to [{}] failed", zoneId, e);
+ LOGGER.warn(SessionMessages.SET_TIMEZONE_FAILED, zoneId, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (StatementExecutionException | RuntimeException e) {
putBack(session);
@@ -3567,7 +3578,7 @@ public TSConnectionInfoResp fetchAllConnections() throws IoTDBConnectionExceptio
return resp;
} catch (IoTDBConnectionException e) {
// TException means the connection is broken, remove it and get a new one.
- LOGGER.warn("fetchAllConnections failed", e);
+ LOGGER.warn(SessionMessages.FETCH_ALL_CONNECTIONS_FAILED, e);
cleanSessionAndMayThrowConnectionException(session, i, e);
} catch (Throwable t) {
putBack(session);
diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/template/InternalNode.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/template/InternalNode.java
index 6e7bd698c54ab..303fb5377d116 100644
--- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/template/InternalNode.java
+++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/template/InternalNode.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.isession.template.TemplateNode;
import org.apache.iotdb.rpc.StatementExecutionException;
+import org.apache.iotdb.session.i18n.SessionMessages;
import java.util.HashMap;
import java.util.Map;
@@ -38,7 +39,7 @@ public InternalNode(String name, boolean shareTime) {
@Override
public void addChild(TemplateNode node) throws StatementExecutionException {
if (children.containsKey(node.getName())) {
- throw new StatementExecutionException("Duplicated child of node in template.");
+ throw new StatementExecutionException(SessionMessages.DUPLICATED_CHILD_IN_TEMPLATE);
}
this.children.put(node.getName(), node);
}
diff --git a/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/SessionUtils.java b/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/SessionUtils.java
index 1b5ad290678a0..da15b5a0731c5 100644
--- a/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/SessionUtils.java
+++ b/iotdb-client/session/src/main/java/org/apache/iotdb/session/util/SessionUtils.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TEndPoint;
import org.apache.iotdb.rpc.IoTDBConnectionException;
import org.apache.iotdb.rpc.UrlUtils;
+import org.apache.iotdb.session.i18n.SessionMessages;
import org.apache.tsfile.common.conf.TSFileConfig;
import org.apache.tsfile.encoding.encoder.Encoder;
@@ -476,7 +477,7 @@ public static boolean isTabletContainsSingleDevice(Tablet tablet) {
public static List parseSeedNodeUrls(List nodeUrls) {
if (nodeUrls == null) {
- throw new NumberFormatException("nodeUrls is null");
+ throw new NumberFormatException(SessionMessages.NODE_URLS_IS_NULL);
}
List endPointsList = new ArrayList<>();
for (String nodeUrl : nodeUrls) {
diff --git a/iotdb-client/subscription/pom.xml b/iotdb-client/subscription/pom.xml
index c41ef1e3bde89..0ec7a30473d41 100644
--- a/iotdb-client/subscription/pom.xml
+++ b/iotdb-client/subscription/pom.xml
@@ -78,4 +78,12 @@
libthrift
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+
diff --git a/iotdb-client/subscription/src/main/i18n/en/org/apache/iotdb/rpc/subscription/i18n/SubscriptionMessages.java b/iotdb-client/subscription/src/main/i18n/en/org/apache/iotdb/rpc/subscription/i18n/SubscriptionMessages.java
new file mode 100644
index 0000000000000..3b03471138da9
--- /dev/null
+++ b/iotdb-client/subscription/src/main/i18n/en/org/apache/iotdb/rpc/subscription/i18n/SubscriptionMessages.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.rpc.subscription.i18n;
+
+public final class SubscriptionMessages {
+
+ // --- TopicConstant / ConsumerConstant ---
+ public static final String UTILITY_CLASS = "Utility class";
+
+ // --- SubscriptionPollRequest ---
+ public static final String UNEXPECTED_REQUEST_TYPE =
+ "unexpected request type: {}, payload will be null";
+
+ // --- SubscriptionPollResponse ---
+ public static final String UNEXPECTED_RESPONSE_TYPE =
+ "unexpected response type: {}, payload will be null";
+
+ // --- IdentifierUtils ---
+ public static final String NULL_IDENTIFIER_NOT_SUPPORTED = "null identifier is not supported";
+ public static final String EMPTY_IDENTIFIER_NOT_SUPPORTED = "empty identifier is not supported";
+
+ // --- PollTimer ---
+ public static final String INVALID_NEGATIVE_TIMEOUT = "Invalid negative timeout ";
+
+ // --- AbstractSubscriptionPushConsumer ---
+ public static final String PUSH_CONSUMER_CANCEL_AUTO_POLL =
+ "SubscriptionPushConsumer {} cancel auto poll worker";
+ public static final String PUSH_CONSUMER_SUBMIT_AUTO_POLL =
+ "SubscriptionPushConsumer {} submit auto poll worker";
+ public static final String CONSUMER_LISTENER_FAILURE =
+ "Consumer listener result failure when consuming message: {}";
+ public static final String AUTO_POLL_UNEXPECTED = "something unexpected happened when auto poll messages...";
+
+ // --- SubscriptionExecutorServiceManager ---
+ public static final String EXECUTOR_LAUNCHING = "Launching {} with core pool size {}...";
+ public static final String EXECUTOR_SHUTTING_DOWN = "Shutting down {}...";
+ public static final String EXECUTOR_NOT_LAUNCHED_SUBMIT =
+ "{} has not been launched, ignore submit task";
+ public static final String EXECUTOR_NOT_LAUNCHED_INVOKE =
+ "{} has not been launched, ignore invoke all tasks";
+ public static final String EXECUTOR_NOT_LAUNCHED_ZERO =
+ "{} has not been launched, return zero";
+ public static final String EXECUTOR_NOT_LAUNCHED_SCHEDULE =
+ "{} has not been launched, ignore scheduleWithFixedDelay for task";
+
+ // --- AbstractSubscriptionProviders ---
+ public static final String PROVIDER_CLOSE_FAILED =
+ "Failed to close subscription provider {} because of {}";
+ public static final String ADD_NEW_PROVIDER = "add new subscription provider {}";
+ public static final String CLOSE_STALE_PROVIDER = "close and remove stale subscription provider {}";
+ public static final String OPEN_PROVIDERS_FAILED =
+ "Failed to open providers for consumer {} because of {}";
+ public static final String FETCH_ENDPOINTS_FAILED =
+ "Failed to fetch all endpoints for consumer {} because of {}";
+
+ // --- AbstractSubscriptionPullConsumer ---
+ public static final String PULL_CONSUMER_CANCEL_AUTO_COMMIT =
+ "SubscriptionPullConsumer {} cancel auto commit worker";
+ public static final String PULL_CONSUMER_SUBMIT_AUTO_COMMIT =
+ "SubscriptionPullConsumer {} submit auto commit worker";
+ public static final String AUTO_COMMIT_UNEXPECTED =
+ "something unexpected happened when auto commit messages...";
+ public static final String COMMIT_DURING_CLOSE_UNEXPECTED =
+ "something unexpected happened when commit messages during close";
+
+ // --- AbstractSubscriptionConsumer ---
+ public static final String UNEXPECTED_RESPONSE_TYPE_WARN = "unexpected response type: {}";
+ public static final String CONSUMER_CANCEL_HEARTBEAT_WORKER =
+ "SubscriptionConsumer {} cancel heartbeat worker";
+ public static final String CONSUMER_SUBMIT_HEARTBEAT_WORKER =
+ "SubscriptionConsumer {} submit heartbeat worker";
+ public static final String CONSUMER_CANCEL_ENDPOINTS_SYNCER =
+ "SubscriptionConsumer {} cancel endpoints syncer";
+ public static final String CONSUMER_SUBMIT_ENDPOINTS_SYNCER =
+ "SubscriptionConsumer {} submit endpoints syncer";
+
+ private SubscriptionMessages() {}
+}
diff --git a/iotdb-client/subscription/src/main/i18n/zh/org/apache/iotdb/rpc/subscription/i18n/SubscriptionMessages.java b/iotdb-client/subscription/src/main/i18n/zh/org/apache/iotdb/rpc/subscription/i18n/SubscriptionMessages.java
new file mode 100644
index 0000000000000..ccd186c9ed348
--- /dev/null
+++ b/iotdb-client/subscription/src/main/i18n/zh/org/apache/iotdb/rpc/subscription/i18n/SubscriptionMessages.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.rpc.subscription.i18n;
+
+public final class SubscriptionMessages {
+
+ // --- TopicConstant / ConsumerConstant ---
+ public static final String UTILITY_CLASS = "工具类";
+
+ // --- SubscriptionPollRequest ---
+ public static final String UNEXPECTED_REQUEST_TYPE =
+ "意外的请求类型:{},payload 将为 null";
+
+ // --- SubscriptionPollResponse ---
+ public static final String UNEXPECTED_RESPONSE_TYPE =
+ "意外的响应类型:{},payload 将为 null";
+
+ // --- IdentifierUtils ---
+ public static final String NULL_IDENTIFIER_NOT_SUPPORTED = "不支持 null 标识符";
+ public static final String EMPTY_IDENTIFIER_NOT_SUPPORTED = "不支持空标识符";
+
+ // --- PollTimer ---
+ public static final String INVALID_NEGATIVE_TIMEOUT = "无效的负超时时间 ";
+
+ // --- AbstractSubscriptionPushConsumer ---
+ public static final String PUSH_CONSUMER_CANCEL_AUTO_POLL =
+ "SubscriptionPushConsumer {} 取消自动拉取工作线程";
+ public static final String PUSH_CONSUMER_SUBMIT_AUTO_POLL =
+ "SubscriptionPushConsumer {} 提交自动拉取工作线程";
+ public static final String CONSUMER_LISTENER_FAILURE =
+ "消费消息时消费者监听器结果失败:{}";
+ public static final String AUTO_POLL_UNEXPECTED = "自动拉取消息时发生意外情况...";
+
+ // --- SubscriptionExecutorServiceManager ---
+ public static final String EXECUTOR_LAUNCHING = "正在启动 {},核心线程池大小:{}...";
+ public static final String EXECUTOR_SHUTTING_DOWN = "正在关闭 {}...";
+ public static final String EXECUTOR_NOT_LAUNCHED_SUBMIT =
+ "{} 尚未启动,忽略提交任务";
+ public static final String EXECUTOR_NOT_LAUNCHED_INVOKE =
+ "{} 尚未启动,忽略批量调用任务";
+ public static final String EXECUTOR_NOT_LAUNCHED_ZERO =
+ "{} 尚未启动,返回零";
+ public static final String EXECUTOR_NOT_LAUNCHED_SCHEDULE =
+ "{} 尚未启动,忽略 scheduleWithFixedDelay 任务";
+
+ // --- AbstractSubscriptionProviders ---
+ public static final String PROVIDER_CLOSE_FAILED =
+ "关闭订阅提供者 {} 失败,原因:{}";
+ public static final String ADD_NEW_PROVIDER = "添加新的订阅提供者 {}";
+ public static final String CLOSE_STALE_PROVIDER = "关闭并移除过期的订阅提供者 {}";
+ public static final String OPEN_PROVIDERS_FAILED =
+ "为消费者 {} 打开提供者失败,原因:{}";
+ public static final String FETCH_ENDPOINTS_FAILED =
+ "为消费者 {} 获取所有端点失败,原因:{}";
+
+ // --- AbstractSubscriptionPullConsumer ---
+ public static final String PULL_CONSUMER_CANCEL_AUTO_COMMIT =
+ "SubscriptionPullConsumer {} 取消自动提交工作线程";
+ public static final String PULL_CONSUMER_SUBMIT_AUTO_COMMIT =
+ "SubscriptionPullConsumer {} 提交自动提交工作线程";
+ public static final String AUTO_COMMIT_UNEXPECTED =
+ "自动提交消息时发生意外情况...";
+ public static final String COMMIT_DURING_CLOSE_UNEXPECTED =
+ "关闭期间提交消息时发生意外情况";
+
+ // --- AbstractSubscriptionConsumer ---
+ public static final String UNEXPECTED_RESPONSE_TYPE_WARN = "意外的响应类型:{}";
+ public static final String CONSUMER_CANCEL_HEARTBEAT_WORKER =
+ "SubscriptionConsumer {} 取消心跳工作线程";
+ public static final String CONSUMER_SUBMIT_HEARTBEAT_WORKER =
+ "SubscriptionConsumer {} 提交心跳工作线程";
+ public static final String CONSUMER_CANCEL_ENDPOINTS_SYNCER =
+ "SubscriptionConsumer {} 取消端点同步器";
+ public static final String CONSUMER_SUBMIT_ENDPOINTS_SYNCER =
+ "SubscriptionConsumer {} 提交端点同步器";
+
+ private SubscriptionMessages() {}
+}
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/config/ConsumerConstant.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/config/ConsumerConstant.java
index 9c52c8dd7da90..90d2ea7a01fb0 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/config/ConsumerConstant.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/config/ConsumerConstant.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.rpc.subscription.config;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
+
import java.nio.file.Paths;
public class ConsumerConstant {
@@ -84,6 +86,6 @@ public class ConsumerConstant {
public static final long AUTO_POLL_TIMEOUT_MS_MIN_VALUE = 1_000L;
private ConsumerConstant() {
- throw new IllegalStateException("Utility class");
+ throw new IllegalStateException(SubscriptionMessages.UTILITY_CLASS);
}
}
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/config/TopicConstant.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/config/TopicConstant.java
index bb84358648e59..52c8e4de75221 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/config/TopicConstant.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/config/TopicConstant.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.rpc.subscription.config;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
+
public class TopicConstant {
public static final String PATH_KEY = "path";
@@ -60,6 +62,6 @@ public class TopicConstant {
public static final String STRICT_DEFAULT_VALUE = "true";
private TopicConstant() {
- throw new IllegalStateException("Utility class");
+ throw new IllegalStateException(SubscriptionMessages.UTILITY_CLASS);
}
}
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequest.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequest.java
index 3337887b185f5..e7863ad3a55ae 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequest.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollRequest.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.rpc.subscription.payload.poll;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
+
import org.apache.tsfile.utils.PublicBAOS;
import org.apache.tsfile.utils.ReadWriteIOUtils;
import org.slf4j.Logger;
@@ -100,11 +102,11 @@ public static SubscriptionPollRequest deserialize(final ByteBuffer buffer) {
payload = new PollTabletsPayload().deserialize(buffer);
break;
default:
- LOGGER.warn("unexpected request type: {}, payload will be null", requestType);
+ LOGGER.warn(SubscriptionMessages.UNEXPECTED_REQUEST_TYPE, requestType);
break;
}
} else {
- LOGGER.warn("unexpected request type: {}, payload will be null", requestType);
+ LOGGER.warn(SubscriptionMessages.UNEXPECTED_REQUEST_TYPE, requestType);
}
final long timeoutMs = ReadWriteIOUtils.readLong(buffer);
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollResponse.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollResponse.java
index 06baa30acee9f..fdea77701218e 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollResponse.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/rpc/subscription/payload/poll/SubscriptionPollResponse.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.rpc.subscription.payload.poll;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
+
import org.apache.tsfile.utils.PublicBAOS;
import org.apache.tsfile.utils.ReadWriteIOUtils;
import org.slf4j.Logger;
@@ -101,11 +103,11 @@ public static SubscriptionPollResponse deserialize(final ByteBuffer buffer) {
payload = new TerminationPayload().deserialize(buffer);
break;
default:
- LOGGER.warn("unexpected response type: {}, payload will be null", responseType);
+ LOGGER.warn(SubscriptionMessages.UNEXPECTED_RESPONSE_TYPE, responseType);
break;
}
} else {
- LOGGER.warn("unexpected response type: {}, payload will be null", responseType);
+ LOGGER.warn(SubscriptionMessages.UNEXPECTED_RESPONSE_TYPE, responseType);
}
final SubscriptionCommitContext commitContext = SubscriptionCommitContext.deserialize(buffer);
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionConsumer.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionConsumer.java
index 62c8d20e3894d..63e5a263f22b3 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionConsumer.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionConsumer.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.rpc.subscription.exception.SubscriptionRuntimeCriticalException;
import org.apache.iotdb.rpc.subscription.exception.SubscriptionRuntimeNonCriticalException;
import org.apache.iotdb.rpc.subscription.exception.SubscriptionTimeoutException;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
import org.apache.iotdb.rpc.subscription.payload.poll.ErrorPayload;
import org.apache.iotdb.rpc.subscription.payload.poll.FileInitPayload;
import org.apache.iotdb.rpc.subscription.payload.poll.FilePiecePayload;
@@ -642,7 +643,7 @@ private List singlePoll(
for (final SubscriptionPollResponse response : currentResponses) {
final short responseType = response.getResponseType();
if (!SubscriptionPollResponseType.isValidatedResponseType(responseType)) {
- LOGGER.warn("unexpected response type: {}", responseType);
+ LOGGER.warn(SubscriptionMessages.UNEXPECTED_RESPONSE_TYPE_WARN, responseType);
continue;
}
try {
@@ -650,7 +651,8 @@ private List singlePoll(
.getOrDefault(
SubscriptionPollResponseType.valueOf(responseType),
(resp, ignored) -> {
- LOGGER.warn("unexpected response type: {}", responseType);
+ LOGGER.warn(
+ SubscriptionMessages.UNEXPECTED_RESPONSE_TYPE_WARN, responseType);
return Optional.empty();
})
// TODO: reuse previous timer?
@@ -1242,14 +1244,14 @@ private void submitHeartbeatWorker() {
if (isClosed()) {
if (Objects.nonNull(future[0])) {
future[0].cancel(false);
- LOGGER.info("SubscriptionConsumer {} cancel heartbeat worker", this);
+ LOGGER.info(SubscriptionMessages.CONSUMER_CANCEL_HEARTBEAT_WORKER, this);
}
return;
}
providers.heartbeat(this);
},
heartbeatIntervalMs);
- LOGGER.info("SubscriptionConsumer {} submit heartbeat worker", this);
+ LOGGER.info(SubscriptionMessages.CONSUMER_SUBMIT_HEARTBEAT_WORKER, this);
}
/////////////////////////////// sync endpoints ///////////////////////////////
@@ -1262,14 +1264,14 @@ private void submitEndpointsSyncer() {
if (isClosed()) {
if (Objects.nonNull(future[0])) {
future[0].cancel(false);
- LOGGER.info("SubscriptionConsumer {} cancel endpoints syncer", this);
+ LOGGER.info(SubscriptionMessages.CONSUMER_CANCEL_ENDPOINTS_SYNCER, this);
}
return;
}
providers.sync(this);
},
endpointsSyncIntervalMs);
- LOGGER.info("SubscriptionConsumer {} submit endpoints syncer", this);
+ LOGGER.info(SubscriptionMessages.CONSUMER_SUBMIT_ENDPOINTS_SYNCER, this);
}
/////////////////////////////// commit async ///////////////////////////////
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionProviders.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionProviders.java
index b12ca3f927f98..5d48de1a93c9f 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionProviders.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionProviders.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.rpc.IoTDBConnectionException;
import org.apache.iotdb.rpc.subscription.exception.SubscriptionConnectionException;
import org.apache.iotdb.rpc.subscription.exception.SubscriptionException;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
import org.apache.iotdb.rpc.subscription.payload.response.PipeSubscribeHeartbeatResp;
import org.slf4j.Logger;
@@ -141,7 +142,7 @@ void closeProviders() {
try {
provider.close();
} catch (final Exception e) {
- LOGGER.warn("Failed to close subscription provider {} because of {}", provider, e, e);
+ LOGGER.warn(SubscriptionMessages.PROVIDER_CLOSE_FAILED, provider, e, e);
}
}
subscriptionProviders.clear();
@@ -150,7 +151,7 @@ void closeProviders() {
/** Caller should ensure that the method is called in the lock {@link #acquireWriteLock()}. */
void addProvider(final int dataNodeId, final AbstractSubscriptionProvider provider) {
// the subscription provider is opened
- LOGGER.info("add new subscription provider {}", provider);
+ LOGGER.info(SubscriptionMessages.ADD_NEW_PROVIDER, provider);
subscriptionProviders.put(dataNodeId, provider);
}
@@ -164,7 +165,7 @@ void closeAndRemoveProvider(final int dataNodeId)
try {
provider.close();
} finally {
- LOGGER.info("close and remove stale subscription provider {}", provider);
+ LOGGER.info(SubscriptionMessages.CLOSE_STALE_PROVIDER, provider);
subscriptionProviders.remove(dataNodeId);
}
}
@@ -289,7 +290,7 @@ private void syncInternal(final AbstractSubscriptionConsumer consumer) {
try {
openProviders(consumer);
} catch (final Exception e) {
- LOGGER.warn("Failed to open providers for consumer {} because of {}", consumer, e, e);
+ LOGGER.warn(SubscriptionMessages.OPEN_PROVIDERS_FAILED, consumer, e, e);
return;
}
}
@@ -298,7 +299,7 @@ private void syncInternal(final AbstractSubscriptionConsumer consumer) {
try {
allEndPoints = consumer.fetchAllEndPointsWithRedirection();
} catch (final Exception e) {
- LOGGER.warn("Failed to fetch all endpoints for consumer {} because of {}", consumer, e, e);
+ LOGGER.warn(SubscriptionMessages.FETCH_ENDPOINTS_FAILED, consumer, e, e);
return;
}
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionPullConsumer.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionPullConsumer.java
index 991857bc685ee..6449da157323f 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionPullConsumer.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionPullConsumer.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.rpc.subscription.config.ConsumerConstant;
import org.apache.iotdb.rpc.subscription.exception.SubscriptionException;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
import org.apache.iotdb.rpc.subscription.payload.poll.SubscriptionCommitContext;
import org.apache.iotdb.session.subscription.consumer.AsyncCommitCallback;
import org.apache.iotdb.session.subscription.payload.SubscriptionMessage;
@@ -252,14 +253,14 @@ private void submitAutoCommitWorker() {
if (isClosed()) {
if (Objects.nonNull(future[0])) {
future[0].cancel(false);
- LOGGER.info("SubscriptionPullConsumer {} cancel auto commit worker", this);
+ LOGGER.info(SubscriptionMessages.PULL_CONSUMER_CANCEL_AUTO_COMMIT, this);
}
return;
}
new AutoCommitWorker().run();
},
autoCommitIntervalMs);
- LOGGER.info("SubscriptionPullConsumer {} submit auto commit worker", this);
+ LOGGER.info(SubscriptionMessages.PULL_CONSUMER_SUBMIT_AUTO_COMMIT, this);
}
private class AutoCommitWorker implements Runnable {
@@ -281,7 +282,7 @@ public void run() {
ackCommitContexts(entry.getValue());
uncommittedCommitContexts.remove(entry.getKey());
} catch (final Exception e) {
- LOGGER.warn("something unexpected happened when auto commit messages...", e);
+ LOGGER.warn(SubscriptionMessages.AUTO_COMMIT_UNEXPECTED, e);
}
}
}
@@ -294,7 +295,7 @@ private void commitAllUncommittedMessages() {
ackCommitContexts(entry.getValue());
uncommittedCommitContexts.remove(entry.getKey());
} catch (final Exception e) {
- LOGGER.warn("something unexpected happened when commit messages during close", e);
+ LOGGER.warn(SubscriptionMessages.COMMIT_DURING_CLOSE_UNEXPECTED, e);
}
}
}
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionPushConsumer.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionPushConsumer.java
index 3ff93db218b27..d227a317c9b1f 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionPushConsumer.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/AbstractSubscriptionPushConsumer.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.rpc.subscription.config.ConsumerConstant;
import org.apache.iotdb.rpc.subscription.exception.SubscriptionException;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
import org.apache.iotdb.session.subscription.consumer.AckStrategy;
import org.apache.iotdb.session.subscription.consumer.ConsumeListener;
import org.apache.iotdb.session.subscription.consumer.ConsumeResult;
@@ -156,14 +157,14 @@ private void submitAutoPollWorker() {
if (isClosed()) {
if (Objects.nonNull(future[0])) {
future[0].cancel(false);
- LOGGER.info("SubscriptionPushConsumer {} cancel auto poll worker", this);
+ LOGGER.info(SubscriptionMessages.PUSH_CONSUMER_CANCEL_AUTO_POLL, this);
}
return;
}
new AutoPollWorker().run();
},
autoPollIntervalMs);
- LOGGER.info("SubscriptionPushConsumer {} submit auto poll worker", this);
+ LOGGER.info(SubscriptionMessages.PUSH_CONSUMER_SUBMIT_AUTO_POLL, this);
}
class AutoPollWorker implements Runnable {
@@ -202,7 +203,7 @@ public void run() {
if (Objects.equals(ConsumeResult.SUCCESS, consumeResult)) {
messagesToAck.add(message);
} else {
- LOGGER.warn("Consumer listener result failure when consuming message: {}", message);
+ LOGGER.warn(SubscriptionMessages.CONSUMER_LISTENER_FAILURE, message);
messagesToNack.add(message);
}
} catch (final Exception e) {
@@ -217,7 +218,7 @@ public void run() {
nack(messagesToNack);
}
} catch (final Exception e) {
- LOGGER.warn("something unexpected happened when auto poll messages...", e);
+ LOGGER.warn(SubscriptionMessages.AUTO_POLL_UNEXPECTED, e);
}
}
}
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/SubscriptionExecutorServiceManager.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/SubscriptionExecutorServiceManager.java
index e31ae3ba5da7b..6429643effeac 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/SubscriptionExecutorServiceManager.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/consumer/base/SubscriptionExecutorServiceManager.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.session.subscription.consumer.base;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -199,7 +201,7 @@ void launchIfNeeded() {
if (isShutdown()) {
synchronized (this) {
if (isShutdown()) {
- LOGGER.info("Launching {} with core pool size {}...", this.name, this.corePoolSize);
+ LOGGER.info(SubscriptionMessages.EXECUTOR_LAUNCHING, this.name, this.corePoolSize);
this.executor =
Executors.newFixedThreadPool(
@@ -224,7 +226,7 @@ void shutdown() {
if (!isShutdown()) {
synchronized (this) {
if (!isShutdown()) {
- LOGGER.info("Shutting down {}...", this.name);
+ LOGGER.info(SubscriptionMessages.EXECUTOR_SHUTTING_DOWN, this.name);
this.executor.shutdown();
try {
@@ -261,7 +263,7 @@ Future> submit(final Runnable task) {
}
}
- LOGGER.warn("{} has not been launched, ignore submit task", this.name);
+ LOGGER.warn(SubscriptionMessages.EXECUTOR_NOT_LAUNCHED_SUBMIT, this.name);
return null;
}
@@ -276,7 +278,7 @@ List> invokeAll(
}
}
- LOGGER.warn("{} has not been launched, ignore invoke all tasks", this.name);
+ LOGGER.warn(SubscriptionMessages.EXECUTOR_NOT_LAUNCHED_INVOKE, this.name);
return null;
}
@@ -294,7 +296,7 @@ int getAvailableCount() {
}
}
- LOGGER.warn("{} has not been launched, return zero", this.name);
+ LOGGER.warn(SubscriptionMessages.EXECUTOR_NOT_LAUNCHED_ZERO, this.name);
return 0;
}
}
@@ -310,7 +312,7 @@ void launchIfNeeded() {
if (isShutdown()) {
synchronized (this) {
if (isShutdown()) {
- LOGGER.info("Launching {} with core pool size {}...", this.name, this.corePoolSize);
+ LOGGER.info(SubscriptionMessages.EXECUTOR_LAUNCHING, this.name, this.corePoolSize);
this.executor =
Executors.newScheduledThreadPool(
@@ -343,7 +345,7 @@ ScheduledFuture> scheduleWithFixedDelay(
}
}
- LOGGER.warn("{} has not been launched, ignore scheduleWithFixedDelay for task", this.name);
+ LOGGER.warn(SubscriptionMessages.EXECUTOR_NOT_LAUNCHED_SCHEDULE, this.name);
return null;
}
}
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/util/IdentifierUtils.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/util/IdentifierUtils.java
index 6947ac9ef7b42..b7df61732682f 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/util/IdentifierUtils.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/util/IdentifierUtils.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.session.subscription.util;
import org.apache.iotdb.rpc.subscription.exception.SubscriptionIdentifierSemanticException;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
import org.apache.tsfile.common.constant.TsFileConstant;
import org.apache.tsfile.read.common.parser.PathVisitor;
@@ -33,10 +34,12 @@ public class IdentifierUtils {
*/
public static String checkAndParseIdentifier(final String src) {
if (Objects.isNull(src)) {
- throw new SubscriptionIdentifierSemanticException("null identifier is not supported");
+ throw new SubscriptionIdentifierSemanticException(
+ SubscriptionMessages.NULL_IDENTIFIER_NOT_SUPPORTED);
}
if (src.isEmpty()) {
- throw new SubscriptionIdentifierSemanticException("empty identifier is not supported");
+ throw new SubscriptionIdentifierSemanticException(
+ SubscriptionMessages.EMPTY_IDENTIFIER_NOT_SUPPORTED);
}
if (src.startsWith(TsFileConstant.BACK_QUOTE_STRING)
&& src.endsWith(TsFileConstant.BACK_QUOTE_STRING)) {
diff --git a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/util/PollTimer.java b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/util/PollTimer.java
index 1dd077854e8a8..913c27c1ed655 100644
--- a/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/util/PollTimer.java
+++ b/iotdb-client/subscription/src/main/java/org/apache/iotdb/session/subscription/util/PollTimer.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.session.subscription.util;
+import org.apache.iotdb.rpc.subscription.i18n.SubscriptionMessages;
+
public class PollTimer {
private long startMs;
@@ -49,7 +51,7 @@ public boolean notExpired(final long deltaMs) {
public void reset(final long timeoutMs) {
if (timeoutMs < 0L) {
- throw new IllegalArgumentException("Invalid negative timeout " + timeoutMs);
+ throw new IllegalArgumentException(SubscriptionMessages.INVALID_NEGATIVE_TIMEOUT + timeoutMs);
} else {
this.timeoutMs = timeoutMs;
this.startMs = this.currentTimeMs;
diff --git a/iotdb-core/calc-commons/pom.xml b/iotdb-core/calc-commons/pom.xml
index 170604d15fcf0..d854f29cce77e 100644
--- a/iotdb-core/calc-commons/pom.xml
+++ b/iotdb-core/calc-commons/pom.xml
@@ -195,6 +195,10 @@
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+ org.apache.maven.pluginsmaven-failsafe-plugin
diff --git a/iotdb-core/calc-commons/src/main/i18n/en/org/apache/iotdb/calc/i18n/CalcMessages.java b/iotdb-core/calc-commons/src/main/i18n/en/org/apache/iotdb/calc/i18n/CalcMessages.java
new file mode 100644
index 0000000000000..4bf4ee7ba2c0c
--- /dev/null
+++ b/iotdb-core/calc-commons/src/main/i18n/en/org/apache/iotdb/calc/i18n/CalcMessages.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.calc.i18n;
+
+public final class CalcMessages {
+
+ private CalcMessages() {}
+
+ public static final String AND_OPERATOR_ONLY_ACCEPTS_BOOLEAN_OPERANDS =
+ "AND operator only accepts Boolean operands";
+ public static final String ARRAYS_NOT_SAME_LENGTH = "Arrays not same length";
+ public static final String CANNOT_ADD_NAN_TO_T_DIGEST = "Cannot add NaN to t-digest";
+ public static final String CANNOT_BE_ORDERED = " cannot be ordered";
+ public static final String CANNOT_CAST_TO_BOOLEAN = "\"%s\" cannot be cast to [BOOLEAN]";
+ public static final String CANNOT_PARSE_STRING_TO_DOUBLE = "Cannot parse String to double: ";
+ public static final String CANT_HAPPEN_LOOP_FELL_THROUGH =
+ "Can't happen ... loop fell through";
+ public static final String COUNT_ALL_ACCUMULATOR_DOES_NOT_SUPPORT_STATISTICS =
+ "CountAllAccumulator does not support statistics.";
+ public static final String COUNT_IF_ACCUMULATOR_DOES_NOT_SUPPORT_STATISTICS =
+ "CountIfAccumulator does not support statistics";
+ public static final String CURRENT_COLUMN_IS_NOT_OBJECT_COLUMN =
+ "current column is not object column";
+ public static final String CURRENT_TS_BLOCK_SIZE_IS = "Current tsBlock size is : {}";
+ public static final String DATA_TYPE_CANNOT_BE_ORDERED = "Data type: ";
+ public static final String DECODE_BASE32_ERROR = "decode base32 error";
+ public static final String DECODE_BASE64_ERROR = "decode base64 error";
+ public static final String DECODE_BASE64URL_ERROR = "decode base64url error";
+ public static final String DECODE_HEX_ERROR = "decode hex error";
+ public static final String DENSE_RANK_NOT_YET_IMPLEMENTED = "DENSE_RANK not yet implemented";
+ public static final String DISTINCT_AGGREGATION_FUNCTION_CANNOT_BE_PUSH_DOWN =
+ "Distinct aggregation function can not be push down";
+ public static final String DIVISION_BY_ZERO = "Division by zero";
+ public static final String ESCAPE_STRING_MUST_BE_A_SINGLE_CHARACTER =
+ "Escape string must be a single character";
+ public static final String EXCEPTION_HAPPENED_WHEN_EXECUTING_UDTF =
+ "Exception happened when executing UDTF: ";
+ public static final String FAIL_TO_CLOSE_FILE_CHANNEL = "Fail to close fileChannel";
+ public static final String ILLEGAL_STATE_IN_VISIT_LOGICAL_EXPRESSION =
+ "Illegal state in visitLogicalExpression";
+ public static final String INDEX_OUT_OF_PARTITION_BOUNDS = "Index out of Partition's bounds!";
+ public static final String INITIAL_CAPACITY_IS_NEGATIVE = "Initial capacity (";
+ public static final String CAPACITY_EXCEEDS = ") exceeds ";
+ public static final String INPUT_ROW_UTILS_SHOULD_NOT_BE_INSTANTIATED =
+ "InputRowUtils should not be instantiated.";
+ public static final String INVALID_AGGREGATION_FUNCTION = "Invalid Aggregation function: ";
+ public static final String INVALID_TEXT_INPUT_FOR_BOOLEAN =
+ "Invalid text input for boolean type: %s";
+ public static final String INVALID_VALUE = "Invalid value: %f";
+ public static final String IS_NEGATIVE = ") is negative";
+ public static final String LEFT_CHILD_OF_JOIN_NODE_DOESNT_CONTAIN_LEFT_JOIN_KEY =
+ "Left child of JoinNode doesn't contain left join key.";
+ public static final String MAX_TUPLE_SIZE_OF_TS_BLOCK_IS = "maxTupleSizeOfTsBlock is:{}";
+ public static final String MEMORY_IS_NOT_ENOUGH_FOR_CURRENT_QUERY =
+ "Memory is not enough for current query.";
+ public static final String MERGE_SORT_HEAP_SHOULD_BE_EMPTY = "mergeSortHeap should be empty!";
+ public static final String MODULUS_BY_ZERO = "Modulus by zero";
+ public static final String MULTIPLE_I_OBJECT_FILE_SERVICE_PROVIDER_FOUND =
+ "Multiple IObjectFileServiceProvider found";
+ public static final String MULTIPLE_I_TEMPORARY_QUERY_DATA_FILE_SERVICE_PROVIDER_FOUND =
+ "Multiple ITemporaryQueryDataFileServiceProvider found";
+ public static final String NOT_ENOUGH_MEMORY_FOR_SORTING = "Not enough memory for sorting";
+ public static final String NOT_YET_IMPLEMENTED = "not yet implemented";
+ public static final String NO_I_OBJECT_FILE_SERVICE_PROVIDER_FOUND =
+ "No IObjectFileServiceProvider found";
+ public static final String NO_I_TEMPORARY_QUERY_DATA_FILE_SERVICE_PROVIDER_FOUND =
+ "No ITemporaryQueryDataFileServiceProvider found";
+ public static final String OFFSET_LESS_THAN_ZERO = "offset %d is less than 0.";
+ public static final String ONLY_ONE_TUPLE_CAN_BE_SENT_EACH_TIME =
+ "Only one tuple can be sent each time caused by limited memory, oneTupleSize: {}B, maxReturnSize: {}B";
+ public static final String OR_OPERATOR_ONLY_ACCEPTS_BOOLEAN_OPERANDS =
+ "OR operator only accepts Boolean operands";
+ public static final String PERCENTAGE_SHOULD_BE_IN_0_1 = "percentage should be in [0,1], got ";
+ public static final String READ_OBJECT_IS_NOT_SUPPORTED = "readObject is not supported";
+ public static final String RESULT_TS_BLOCK_CANNOT_BE_NULL = "Result tsBlock cannot be null";
+ public static final String RIGHT_CHILD_OF_JOIN_NODE_DOESNT_CONTAIN_RIGHT_JOIN_KEY =
+ "Right child of JoinNode doesn't contain right join key.";
+ public static final String SHOULD_CALL_THE_CONCRETE_VISIT_XX_METHOD =
+ "should call the concrete visitXX() method";
+ public static final String STATE_FOR_GROUP_NOT_FOUND = "State for group %d is not found";
+ public static final String SUM_SHOULD_NEVER_BE_ZERO = "sum should never be zero.";
+ public static final String THIS_ACCUMULATOR_DOES_NOT_SUPPORT_REMOVING_INPUTS =
+ "This Accumulator does not support removing inputs!";
+ public static final String THIS_IS_A_UTILITY_CLASS_AND_CANNOT_BE_INSTANTIATED =
+ "This is a utility class and cannot be instantiated";
+ public static final String TYPE_OF_LEFT_ASOF_JOIN_KEY_IS_NOT_TIMESTAMP =
+ "Type of left ASOF Join key is not TIMESTAMP";
+ public static final String TYPE_OF_RIGHT_ASOF_JOIN_KEY_IS_NOT_TIMESTAMP =
+ "Type of right ASOF Join key is not TIMESTAMP";
+ public static final String UDAF_NOT_SUPPORT_CALCULATE_FROM_STATISTICS =
+ "UDAF not support calculate from statistics now";
+ public static final String UNBOUND_FOLLOWING_NOT_ALLOWED_IN_FRAME_START =
+ "UNBOUND FOLLOWING is not allowed in frame start!";
+ public static final String UNBOUND_PRECEDING_NOT_ALLOWED_IN_FRAME_END =
+ "UNBOUND PRECEDING is not allowed in frame end!";
+ public static final String UNBOUND_PRECEDING_NOT_ALLOWED_IN_FRAME_START =
+ "UNBOUND PRECEDING is not allowed in frame start!";
+ public static final String UNEXPECTED_ANCHOR_TYPE = "unexpected anchor type: ";
+ public static final String UNEXPECTED_EXTRACT_FIELD = "Unexpected extract field: ";
+ public static final String UNEXPECTED_SIZE_FOR_LAST_SEQUENCE =
+ "Unexpected size for last sequence: ";
+ public static final String UNEXPECTED_SKIP_TO_POSITION = "unexpected SKIP TO position: ";
+ public static final String UNEXPECTED_VALUE_FOR_REMAINDER = "Unexpected value for remainder: ";
+ public static final String UNEXPECTED_VALUE_FOR_SEQUENCES =
+ "Unexpected value for sequences: ";
+ public static final String UNHANDLED_LITERAL_TYPE = "Unhandled literal type: ";
+ public static final String UNKNOWN_DATA_TYPE = "Unknown data type: ";
+ public static final String UNKNOWN_DATATYPE = "Unknown datatype: ";
+ public static final String UNKNOWN_RANKING_TYPE = "Unknown ranking type: ";
+ public static final String UNKNOWN_SIGN = "Unknown sign: ";
+ public static final String UNKNOWN_TYPE = "Unknown type: ";
+ public static final String UNREACHABLE = "Unreachable!";
+ public static final String UNSUPPORTED_ARITHMETIC_OPERATOR =
+ "Unsupported arithmetic operator: ";
+ public static final String UNSUPPORTED_ASOF_JOIN_TYPE = "Unsupported ASOF join type: ";
+ public static final String UNSUPPORTED_CAST_TO_TYPE = "Unsupported cast to type: ";
+ public static final String UNSUPPORTED_COLUMN_TRANSFORMER = "Unsupported ColumnTransformer";
+ public static final String UNSUPPORTED_COMPARISON_OPERATOR =
+ "Unsupported comparison operator: ";
+ public static final String UNSUPPORTED_DATA_TYPE = "Unsupported data type: ";
+ public static final String UNSUPPORTED_DATA_TYPE_LOWER = "unsupported data type: ";
+ public static final String UNSUPPORTED_FRAME_BOUND_TYPE = "Unsupported frame bound type: ";
+ public static final String UNSUPPORTED_FUNCTION_KIND = "Unsupported function kind: ";
+ public static final String UNSUPPORTED_JOIN_TYPE = "Unsupported join type: ";
+ public static final String UNSUPPORTED_LOGICAL_OPERATOR = "Unsupported logical operator: ";
+ public static final String UNSUPPORTED_TYPE = "Unsupported type: ";
+ public static final String UNSUPPORTED_TYPE_BINARY = "Unsupported Type";
+ public static final String UNSUPPORTED_TYPE_CLASS = "Unsupported type: ";
+ public static final String UNSUPPORTED_TYPE_FOR_ARITHMETIC_OPERATION =
+ "Unsupported type for arithmetic operation: ";
+ public static final String UNSUPPORTED_TYPE_IN_GENERIC_LITERAL =
+ "Unsupported type in GenericLiteral: ";
+ public static final String WEIGHT_MUST_BE_GE_1 = "weight must be >= 1, was ";
+}
diff --git a/iotdb-core/calc-commons/src/main/i18n/zh/org/apache/iotdb/calc/i18n/CalcMessages.java b/iotdb-core/calc-commons/src/main/i18n/zh/org/apache/iotdb/calc/i18n/CalcMessages.java
new file mode 100644
index 0000000000000..b8d8239863ae6
--- /dev/null
+++ b/iotdb-core/calc-commons/src/main/i18n/zh/org/apache/iotdb/calc/i18n/CalcMessages.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.calc.i18n;
+
+public final class CalcMessages {
+
+ private CalcMessages() {}
+
+ public static final String AND_OPERATOR_ONLY_ACCEPTS_BOOLEAN_OPERANDS =
+ "AND 运算符只接受布尔操作数";
+ public static final String ARRAYS_NOT_SAME_LENGTH = "数组长度不一致";
+ public static final String CANNOT_ADD_NAN_TO_T_DIGEST = "不能将 NaN 添加到 t-digest";
+ public static final String CANNOT_BE_ORDERED = " 不能排序";
+ public static final String CANNOT_CAST_TO_BOOLEAN = "\"%s\" 无法转换为 [BOOLEAN]";
+ public static final String CANNOT_PARSE_STRING_TO_DOUBLE = "无法将字符串解析为 double:";
+ public static final String CANT_HAPPEN_LOOP_FELL_THROUGH = "不应发生……循环穿透";
+ public static final String COUNT_ALL_ACCUMULATOR_DOES_NOT_SUPPORT_STATISTICS =
+ "CountAllAccumulator 不支持统计信息。";
+ public static final String COUNT_IF_ACCUMULATOR_DOES_NOT_SUPPORT_STATISTICS =
+ "CountIfAccumulator 不支持统计信息";
+ public static final String CURRENT_COLUMN_IS_NOT_OBJECT_COLUMN = "当前列不是对象列";
+ public static final String CURRENT_TS_BLOCK_SIZE_IS = "当前 tsBlock 大小为:{}";
+ public static final String DATA_TYPE_CANNOT_BE_ORDERED = "数据类型:";
+ public static final String DECODE_BASE32_ERROR = "base32 解码错误";
+ public static final String DECODE_BASE64_ERROR = "base64 解码错误";
+ public static final String DECODE_BASE64URL_ERROR = "base64url 解码错误";
+ public static final String DECODE_HEX_ERROR = "hex 解码错误";
+ public static final String DENSE_RANK_NOT_YET_IMPLEMENTED = "DENSE_RANK 尚未实现";
+ public static final String DISTINCT_AGGREGATION_FUNCTION_CANNOT_BE_PUSH_DOWN =
+ "DISTINCT 聚合函数不能下推";
+ public static final String DIVISION_BY_ZERO = "除以零";
+ public static final String ESCAPE_STRING_MUST_BE_A_SINGLE_CHARACTER =
+ "转义字符串必须是单个字符";
+ public static final String EXCEPTION_HAPPENED_WHEN_EXECUTING_UDTF =
+ "执行 UDTF 时发生异常:";
+ public static final String FAIL_TO_CLOSE_FILE_CHANNEL = "关闭文件通道失败";
+ public static final String ILLEGAL_STATE_IN_VISIT_LOGICAL_EXPRESSION =
+ "visitLogicalExpression 中状态非法";
+ public static final String INDEX_OUT_OF_PARTITION_BOUNDS = "索引超出分区边界!";
+ public static final String INITIAL_CAPACITY_IS_NEGATIVE = "初始容量 (";
+ public static final String CAPACITY_EXCEEDS = ") 超过 ";
+ public static final String INPUT_ROW_UTILS_SHOULD_NOT_BE_INSTANTIATED =
+ "InputRowUtils 不应被实例化。";
+ public static final String INVALID_AGGREGATION_FUNCTION = "无效的聚合函数:";
+ public static final String INVALID_TEXT_INPUT_FOR_BOOLEAN =
+ "布尔类型的文本输入无效:%s";
+ public static final String INVALID_VALUE = "无效的值:%f";
+ public static final String IS_NEGATIVE = ") 为负数";
+ public static final String LEFT_CHILD_OF_JOIN_NODE_DOESNT_CONTAIN_LEFT_JOIN_KEY =
+ "JoinNode 的左子节点不包含左连接键。";
+ public static final String MAX_TUPLE_SIZE_OF_TS_BLOCK_IS = "maxTupleSizeOfTsBlock 为:{}";
+ public static final String MEMORY_IS_NOT_ENOUGH_FOR_CURRENT_QUERY = "当前查询内存不足。";
+ public static final String MERGE_SORT_HEAP_SHOULD_BE_EMPTY = "归并排序堆应为空!";
+ public static final String MODULUS_BY_ZERO = "对零取模";
+ public static final String MULTIPLE_I_OBJECT_FILE_SERVICE_PROVIDER_FOUND =
+ "找到多个 IObjectFileServiceProvider";
+ public static final String MULTIPLE_I_TEMPORARY_QUERY_DATA_FILE_SERVICE_PROVIDER_FOUND =
+ "找到多个 ITemporaryQueryDataFileServiceProvider";
+ public static final String NOT_ENOUGH_MEMORY_FOR_SORTING = "排序内存不足";
+ public static final String NOT_YET_IMPLEMENTED = "尚未实现";
+ public static final String NO_I_OBJECT_FILE_SERVICE_PROVIDER_FOUND =
+ "未找到 IObjectFileServiceProvider";
+ public static final String NO_I_TEMPORARY_QUERY_DATA_FILE_SERVICE_PROVIDER_FOUND =
+ "未找到 ITemporaryQueryDataFileServiceProvider";
+ public static final String OFFSET_LESS_THAN_ZERO = "偏移量 %d 小于 0。";
+ public static final String ONLY_ONE_TUPLE_CAN_BE_SENT_EACH_TIME =
+ "由于内存限制,每次只能发送一个元组,oneTupleSize: {}B, maxReturnSize: {}B";
+ public static final String OR_OPERATOR_ONLY_ACCEPTS_BOOLEAN_OPERANDS =
+ "OR 运算符只接受布尔操作数";
+ public static final String PERCENTAGE_SHOULD_BE_IN_0_1 = "百分比应在 [0,1] 范围内,实际为 ";
+ public static final String READ_OBJECT_IS_NOT_SUPPORTED = "不支持 readObject";
+ public static final String RESULT_TS_BLOCK_CANNOT_BE_NULL = "结果 tsBlock 不能为空";
+ public static final String RIGHT_CHILD_OF_JOIN_NODE_DOESNT_CONTAIN_RIGHT_JOIN_KEY =
+ "JoinNode 的右子节点不包含右连接键。";
+ public static final String SHOULD_CALL_THE_CONCRETE_VISIT_XX_METHOD =
+ "应该调用具体的 visitXX() 方法";
+ public static final String STATE_FOR_GROUP_NOT_FOUND = "未找到分组 %d 的状态";
+ public static final String SUM_SHOULD_NEVER_BE_ZERO = "sum 不应为零。";
+ public static final String THIS_ACCUMULATOR_DOES_NOT_SUPPORT_REMOVING_INPUTS =
+ "该累加器不支持移除输入!";
+ public static final String THIS_IS_A_UTILITY_CLASS_AND_CANNOT_BE_INSTANTIATED =
+ "这是一个工具类,不能被实例化";
+ public static final String TYPE_OF_LEFT_ASOF_JOIN_KEY_IS_NOT_TIMESTAMP =
+ "左侧 ASOF 连接键的类型不是 TIMESTAMP";
+ public static final String TYPE_OF_RIGHT_ASOF_JOIN_KEY_IS_NOT_TIMESTAMP =
+ "右侧 ASOF 连接键的类型不是 TIMESTAMP";
+ public static final String UDAF_NOT_SUPPORT_CALCULATE_FROM_STATISTICS =
+ "UDAF 目前不支持从统计信息计算";
+ public static final String UNBOUND_FOLLOWING_NOT_ALLOWED_IN_FRAME_START =
+ "帧起始位置不允许使用 UNBOUND FOLLOWING!";
+ public static final String UNBOUND_PRECEDING_NOT_ALLOWED_IN_FRAME_END =
+ "帧结束位置不允许使用 UNBOUND PRECEDING!";
+ public static final String UNBOUND_PRECEDING_NOT_ALLOWED_IN_FRAME_START =
+ "帧起始位置不允许使用 UNBOUND PRECEDING!";
+ public static final String UNEXPECTED_ANCHOR_TYPE = "意外的锚点类型:";
+ public static final String UNEXPECTED_EXTRACT_FIELD = "意外的提取字段:";
+ public static final String UNEXPECTED_SIZE_FOR_LAST_SEQUENCE = "最后一个序列的大小异常:";
+ public static final String UNEXPECTED_SKIP_TO_POSITION = "意外的 SKIP TO 位置:";
+ public static final String UNEXPECTED_VALUE_FOR_REMAINDER = "余数的值异常:";
+ public static final String UNEXPECTED_VALUE_FOR_SEQUENCES = "序列的值异常:";
+ public static final String UNHANDLED_LITERAL_TYPE = "未处理的字面量类型:";
+ public static final String UNKNOWN_DATA_TYPE = "未知数据类型:";
+ public static final String UNKNOWN_DATATYPE = "未知数据类型:";
+ public static final String UNKNOWN_RANKING_TYPE = "未知排名类型:";
+ public static final String UNKNOWN_SIGN = "未知符号:";
+ public static final String UNKNOWN_TYPE = "未知类型:";
+ public static final String UNREACHABLE = "不可达!";
+ public static final String UNSUPPORTED_ARITHMETIC_OPERATOR = "不支持的算术运算符:";
+ public static final String UNSUPPORTED_ASOF_JOIN_TYPE = "不支持的 ASOF 连接类型:";
+ public static final String UNSUPPORTED_CAST_TO_TYPE = "不支持的转换目标类型:";
+ public static final String UNSUPPORTED_COLUMN_TRANSFORMER = "不支持的 ColumnTransformer";
+ public static final String UNSUPPORTED_COMPARISON_OPERATOR = "不支持的比较运算符:";
+ public static final String UNSUPPORTED_DATA_TYPE = "不支持的数据类型:";
+ public static final String UNSUPPORTED_DATA_TYPE_LOWER = "不支持的数据类型:";
+ public static final String UNSUPPORTED_FRAME_BOUND_TYPE = "不支持的帧边界类型:";
+ public static final String UNSUPPORTED_FUNCTION_KIND = "不支持的函数类型:";
+ public static final String UNSUPPORTED_JOIN_TYPE = "不支持的连接类型:";
+ public static final String UNSUPPORTED_LOGICAL_OPERATOR = "不支持的逻辑运算符:";
+ public static final String UNSUPPORTED_TYPE = "不支持的类型:";
+ public static final String UNSUPPORTED_TYPE_BINARY = "不支持的类型";
+ public static final String UNSUPPORTED_TYPE_CLASS = "不支持的类型:";
+ public static final String UNSUPPORTED_TYPE_FOR_ARITHMETIC_OPERATION =
+ "不支持的算术运算类型:";
+ public static final String UNSUPPORTED_TYPE_IN_GENERIC_LITERAL =
+ "GenericLiteral 中不支持的类型:";
+ public static final String WEIGHT_MUST_BE_GE_1 = "权重必须 >= 1,实际为 ";
+}
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/AbstractOperator.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/AbstractOperator.java
index cc09d7fe971f6..1c0e189cd5137 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/AbstractOperator.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/AbstractOperator.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.calc.execution.operator;
+import org.apache.iotdb.calc.i18n.CalcMessages;
+
import org.apache.tsfile.common.conf.TSFileDescriptor;
import org.apache.tsfile.read.common.block.TsBlock;
import org.slf4j.Logger;
@@ -49,21 +51,18 @@ public void initializeMaxTsBlockLength(TsBlock tsBlock) {
if (oneTupleSize > maxReturnSize) {
// make sure at least one-tuple-at-a-time
this.maxTupleSizeOfTsBlock = 1;
- LOGGER.warn(
- "Only one tuple can be sent each time caused by limited memory, oneTupleSize: {}B, maxReturnSize: {}B",
- oneTupleSize,
- maxReturnSize);
+ LOGGER.warn(CalcMessages.ONLY_ONE_TUPLE_CAN_BE_SENT_EACH_TIME, oneTupleSize, maxReturnSize);
} else {
this.maxTupleSizeOfTsBlock = (int) (maxReturnSize / oneTupleSize);
}
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("maxTupleSizeOfTsBlock is:{}", maxTupleSizeOfTsBlock);
+ LOGGER.debug(CalcMessages.MAX_TUPLE_SIZE_OF_TS_BLOCK_IS, maxTupleSizeOfTsBlock);
}
}
public TsBlock checkTsBlockSizeAndGetResult() {
if (resultTsBlock == null) {
- throw new IllegalArgumentException("Result tsBlock cannot be null");
+ throw new IllegalArgumentException(CalcMessages.RESULT_TS_BLOCK_CANNOT_BE_NULL);
} else if (resultTsBlock.isEmpty()) {
TsBlock res = resultTsBlock;
resultTsBlock = null;
@@ -98,7 +97,7 @@ public TsBlock getResultFromRetainedTsBlock() {
startOffset += maxTupleSizeOfTsBlock;
}
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Current tsBlock size is : {}", res.getRetainedSizeInBytes());
+ LOGGER.debug(CalcMessages.CURRENT_TS_BLOCK_SIZE_IS, res.getRetainedSizeInBytes());
}
return res;
}
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/AbstractSortOperator.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/AbstractSortOperator.java
index 82d0b94b33476..c9f73f8493747 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/AbstractSortOperator.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/AbstractSortOperator.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.calc.execution.operator.CommonOperatorContext;
import org.apache.iotdb.calc.execution.operator.Operator;
+import org.apache.iotdb.calc.i18n.CalcMessages;
import org.apache.iotdb.calc.utils.datastructure.MergeSortHeap;
import org.apache.iotdb.calc.utils.datastructure.MergeSortKey;
import org.apache.iotdb.calc.utils.datastructure.SortKey;
@@ -305,7 +306,7 @@ public void clear() {
sortReaders = null;
diskSpiller.reset();
} catch (Exception e) {
- LOGGER.warn("Fail to close fileChannel", e);
+ LOGGER.warn(CalcMessages.FAIL_TO_CLOSE_FILE_CHANNEL, e);
}
}
@@ -361,7 +362,7 @@ protected void resetSortRelatedResource() {
new SortBufferManager(
sortBufferManager.getMaxTsBlockSizeInBytes(), sortBufferManager.getSortBufferSize());
if (mergeSortHeap != null && !mergeSortHeap.isEmpty()) {
- throw new IllegalStateException("mergeSortHeap should be empty!");
+ throw new IllegalStateException(CalcMessages.MERGE_SORT_HEAP_SHOULD_BE_EMPTY);
}
mergeSortHeap = null;
noMoreData = null;
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/FilterAndProjectOperator.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/FilterAndProjectOperator.java
index 16e0cd7b33069..5ab7e58a6962f 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/FilterAndProjectOperator.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/FilterAndProjectOperator.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.calc.execution.operator.CommonOperatorContext;
import org.apache.iotdb.calc.execution.operator.Operator;
+import org.apache.iotdb.calc.i18n.CalcMessages;
import org.apache.iotdb.calc.transformation.dag.column.AbstractCaseWhenThenColumnTransformer;
import org.apache.iotdb.calc.transformation.dag.column.ColumnTransformer;
import org.apache.iotdb.calc.transformation.dag.column.FailFunctionColumnTransformer;
@@ -429,7 +430,7 @@ private int getMaxLevelOfColumnTransformerTree(ColumnTransformer columnTransform
} else if (columnTransformer instanceof FailFunctionColumnTransformer) {
return 0;
} else {
- throw new UnsupportedOperationException("Unsupported ColumnTransformer");
+ throw new UnsupportedOperationException(CalcMessages.UNSUPPORTED_COLUMN_TRANSFORMER);
}
}
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/TopKOperator.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/TopKOperator.java
index bdb424e15b812..53ceeee58a079 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/TopKOperator.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/TopKOperator.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.calc.execution.operator.CommonOperatorContext;
import org.apache.iotdb.calc.execution.operator.Operator;
+import org.apache.iotdb.calc.i18n.CalcMessages;
import org.apache.iotdb.calc.utils.datastructure.MergeSortHeap;
import org.apache.iotdb.calc.utils.datastructure.MergeSortKey;
import org.apache.iotdb.calc.utils.datastructure.SortKey;
@@ -324,7 +325,7 @@ private void initResultTsBlock() {
new Binary[positionCount]);
break;
default:
- throw new UnSupportedDataTypeException("Unknown datatype: " + dataTypes.get(i));
+ throw new UnSupportedDataTypeException(CalcMessages.UNKNOWN_DATATYPE + dataTypes.get(i));
}
}
this.tmpResultTsBlock = constrcutResultTsBlock(positionCount, columns);
@@ -397,7 +398,7 @@ private long getMemoryUsageOfOneMergeSortKey() {
memory += 16;
break;
default:
- throw new UnSupportedDataTypeException("Unknown datatype: " + dataType);
+ throw new UnSupportedDataTypeException(CalcMessages.UNKNOWN_DATATYPE + dataType);
}
}
return memory;
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/function/TableFunctionLeafOperator.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/function/TableFunctionLeafOperator.java
index 0a7f92b249ff8..ced20f7bbf0b6 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/function/TableFunctionLeafOperator.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/function/TableFunctionLeafOperator.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.calc.execution.operator.CommonOperatorContext;
import org.apache.iotdb.calc.execution.operator.process.ProcessOperator;
+import org.apache.iotdb.calc.i18n.CalcMessages;
import org.apache.iotdb.calc.plan.planner.CommonOperatorUtils;
import org.apache.iotdb.commons.exception.IoTDBRuntimeException;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -78,7 +79,7 @@ public TsBlock next() throws Exception {
try {
processor.process(columnBuilders);
} catch (Exception e) {
- LOGGER.warn("Exception happened when executing UDTF: ", e);
+ LOGGER.warn(CalcMessages.EXCEPTION_HAPPENED_WHEN_EXECUTING_UDTF, e);
throw new IoTDBRuntimeException(
e.getMessage(), TSStatusCode.EXECUTE_UDF_ERROR.getStatusCode(), true);
}
@@ -107,7 +108,7 @@ public void close() throws Exception {
try {
processor.beforeDestroy();
} catch (Exception e) {
- LOGGER.warn("Exception happened when executing UDTF: ", e);
+ LOGGER.warn(CalcMessages.EXCEPTION_HAPPENED_WHEN_EXECUTING_UDTF, e);
throw new IoTDBRuntimeException(
e.getMessage(), TSStatusCode.EXECUTE_UDF_ERROR.getStatusCode(), true);
}
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/function/partition/Slice.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/function/partition/Slice.java
index aec1722a1baad..fd21d5c2a72b2 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/function/partition/Slice.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/function/partition/Slice.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.calc.execution.operator.process.function.partition;
+import org.apache.iotdb.calc.i18n.CalcMessages;
import org.apache.iotdb.calc.utils.ObjectTypeUtils;
import org.apache.iotdb.udf.api.relational.access.Record;
import org.apache.iotdb.udf.api.type.Type;
@@ -214,7 +215,7 @@ public Object getObject(int columnIndex) {
@Override
public Optional getObjectFile(int columnIndex) {
if (getDataType(columnIndex) != Type.OBJECT) {
- throw new UnsupportedOperationException("current column is not object column");
+ throw new UnsupportedOperationException(CalcMessages.CURRENT_COLUMN_IS_NOT_OBJECT_COLUMN);
}
return ObjectTypeUtils.getObjectPathFromBinary(getBinarySafely(columnIndex));
}
@@ -222,7 +223,7 @@ public Optional getObjectFile(int columnIndex) {
@Override
public long objectLength(int columnIndex) {
if (getDataType(columnIndex) != Type.OBJECT) {
- throw new UnsupportedOperationException("current column is not object column");
+ throw new UnsupportedOperationException(CalcMessages.CURRENT_COLUMN_IS_NOT_OBJECT_COLUMN);
}
Binary binary = getBinarySafely(columnIndex);
return ObjectTypeUtils.getObjectLength(binary);
@@ -230,7 +231,7 @@ public long objectLength(int columnIndex) {
@Override
public Binary readObject(int columnIndex, long offset, int length) {
- throw new UnsupportedOperationException("readObject is not supported");
+ throw new UnsupportedOperationException(CalcMessages.READ_OBJECT_IS_NOT_SUPPORTED);
}
@Override
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/join/merge/MergeSortComparator.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/join/merge/MergeSortComparator.java
index a18784e6eecc9..cca484b72f191 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/join/merge/MergeSortComparator.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/join/merge/MergeSortComparator.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.calc.execution.operator.process.join.merge;
+import org.apache.iotdb.calc.i18n.CalcMessages;
import org.apache.iotdb.calc.utils.datastructure.SortKey;
import org.apache.iotdb.commons.queryengine.plan.relational.planner.SortOrder;
@@ -102,7 +103,8 @@ public static Comparator getComparator(TSDataType dataType, int index,
(SortKey sortKey) -> sortKey.tsBlock.getColumn(index).getBoolean(sortKey.rowIndex));
break;
default:
- throw new IllegalArgumentException("Data type: " + dataType + " cannot be ordered");
+ throw new IllegalArgumentException(
+ CalcMessages.DATA_TYPE_CANNOT_BE_ORDERED + dataType + CalcMessages.CANNOT_BE_ORDERED);
}
if (!asc) {
comparator = comparator.reversed();
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/join/merge/comparator/JoinKeyComparatorFactory.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/join/merge/comparator/JoinKeyComparatorFactory.java
index af59f42e92b49..841fe965c1935 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/join/merge/comparator/JoinKeyComparatorFactory.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/join/merge/comparator/JoinKeyComparatorFactory.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.calc.execution.operator.process.join.merge.comparator;
+import org.apache.iotdb.calc.i18n.CalcMessages;
+
import org.apache.tsfile.read.common.type.Type;
import java.util.ArrayList;
@@ -88,7 +90,7 @@ public static JoinKeyComparator getComparator(Type type, boolean isAscending) {
: DescBinaryTypeJoinKeyComparator.getInstance();
default:
// other types are not supported.
- throw new UnsupportedOperationException("Unsupported data type: " + type);
+ throw new UnsupportedOperationException(CalcMessages.UNSUPPORTED_DATA_TYPE + type);
}
}
}
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/PatternPartitionExecutor.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/PatternPartitionExecutor.java
index 8f59701b2fdb5..d50451c2f53a2 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/PatternPartitionExecutor.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/PatternPartitionExecutor.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.calc.execution.operator.process.window.partition.Partition;
import org.apache.iotdb.calc.execution.operator.process.window.utils.ColumnList;
import org.apache.iotdb.calc.execution.operator.process.window.utils.RowComparator;
+import org.apache.iotdb.calc.i18n.CalcMessages;
import org.apache.iotdb.commons.exception.SemanticException;
import org.apache.iotdb.commons.queryengine.plan.relational.planner.node.RowsPerMatch;
import org.apache.iotdb.commons.queryengine.plan.relational.planner.node.SkipToPosition;
@@ -409,7 +410,7 @@ private void skipAfterMatch(
lastSkippedPosition = position - 1;
break;
default:
- throw new IllegalStateException("unexpected SKIP TO position: " + skipToPosition);
+ throw new IllegalStateException(CalcMessages.UNEXPECTED_SKIP_TO_POSITION + skipToPosition);
}
}
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/expression/ArithmeticOperator.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/expression/ArithmeticOperator.java
index de2196356285a..7b8bbd5f4d8e3 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/expression/ArithmeticOperator.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/expression/ArithmeticOperator.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.calc.execution.operator.process.rowpattern.expression;
+import org.apache.iotdb.calc.i18n.CalcMessages;
import org.apache.iotdb.commons.exception.SemanticException;
public enum ArithmeticOperator implements BinaryOperator {
@@ -49,7 +50,7 @@ public Object apply(Object left, Object right) {
if (left == null || right == null) return null;
double r = normalizeToDouble(right);
if (r == 0.0) {
- throw new ArithmeticException("Division by zero");
+ throw new ArithmeticException(CalcMessages.DIVISION_BY_ZERO);
}
return normalizeToDouble(left) / r;
}
@@ -60,7 +61,7 @@ public Object apply(Object left, Object right) {
if (left == null || right == null) return null;
double r = normalizeToDouble(right);
if (r == 0.0) {
- throw new ArithmeticException("Modulus by zero");
+ throw new ArithmeticException(CalcMessages.MODULUS_BY_ZERO);
}
return normalizeToDouble(left) % r;
}
@@ -73,10 +74,11 @@ private static double normalizeToDouble(Object obj) {
try {
return Double.parseDouble((String) obj);
} catch (NumberFormatException e) {
- throw new SemanticException("Cannot parse String to double: " + obj);
+ throw new SemanticException(CalcMessages.CANNOT_PARSE_STRING_TO_DOUBLE + obj);
}
} else {
- throw new SemanticException("Unsupported type for arithmetic operation: " + obj.getClass());
+ throw new SemanticException(
+ CalcMessages.UNSUPPORTED_TYPE_FOR_ARITHMETIC_OPERATION + obj.getClass());
}
}
}
diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/expression/CastComputation.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/expression/CastComputation.java
index 1b50f37bc76e8..a5cff7572d769 100644
--- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/expression/CastComputation.java
+++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/process/rowpattern/expression/CastComputation.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.calc.execution.operator.process.rowpattern.expression;
+import org.apache.iotdb.calc.i18n.CalcMessages;
import org.apache.iotdb.commons.queryengine.plan.relational.sql.ast.DataType;
import org.apache.iotdb.commons.queryengine.plan.relational.sql.ast.GenericDataType;
@@ -72,7 +73,7 @@ public Object evaluate(List
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-i18n-source
+ generate-sources
+
+ add-source
+
+
+
+ ${project.basedir}/src/main/i18n/${i18n.locale}
+
+
+
+
+
@@ -265,5 +284,11 @@
true
+
+ with-zh-locale
+
+ zh
+
+
diff --git a/iotdb-core/confignode/src/main/i18n/en/org/apache/iotdb/confignode/i18n/ConfigNodeMessages.java b/iotdb-core/confignode/src/main/i18n/en/org/apache/iotdb/confignode/i18n/ConfigNodeMessages.java
new file mode 100644
index 0000000000000..c8b020e19e3ba
--- /dev/null
+++ b/iotdb-core/confignode/src/main/i18n/en/org/apache/iotdb/confignode/i18n/ConfigNodeMessages.java
@@ -0,0 +1,496 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.i18n;
+
+public final class ConfigNodeMessages {
+
+ public static final String ACQUIRE_TRIGGERTABLELOCK = "acquire TriggerTableLock";
+ public static final String ACQUIRE_UDFTABLELOCK = "acquire UDFTableLock";
+ public static final String ACTIVATING = "Activating {}...";
+ public static final String ADJUSTREGIONGROUPNUM_THE_MAXIMUM_NUMBER_OF_DATAREGIONGROUPS_FOR =
+ "[AdjustRegionGroupNum] The maximum number of DataRegionGroups for Database: {} is adjusted to: {}";
+ public static final String ADJUSTREGIONGROUPNUM_THE_MAXIMUM_NUMBER_OF_SCHEMAREGIONGROUPS_FOR =
+ "[AdjustRegionGroupNum] The maximum number of SchemaRegionGroups for Database: {} is adjusted to: {}";
+ public static final String ADJUSTREGIONGROUPNUM_THE_MINIMUM_NUMBER_OF_DATAREGIONGROUPS_FOR =
+ "[AdjustRegionGroupNum] The minimum number of DataRegionGroups for Database: {} is adjusted to: {}";
+ public static final String ADJUSTREGIONGROUPNUM_THE_MINIMUM_NUMBER_OF_SCHEMAREGIONGROUPS_FOR =
+ "[AdjustRegionGroupNum] The minimum number of SchemaRegionGroups for Database: {} is adjusted to: {}";
+ public static final String CANNOT_FIND_REGIONGROUP_FOR_REGION_WHEN_ADDREGIONNEWLOCATION_IN =
+ "Cannot find RegionGroup for region {} when addRegionNewLocation in {}";
+ public static final String CANNOT_FIND_REGIONGROUP_FOR_REGION_WHEN_REMOVEREGIONOLDLOCATION_IN =
+ "Cannot find RegionGroup for region {} when removeRegionOldLocation in {}";
+ public static final String CAN_ONLY_ALTER_DATATYPE_OF_FIELD_COLUMNS =
+ "Can only alter datatype of FIELD columns";
+ public static final String CAN_T_CLOSE_STANDALONELOG_FOR_CONFIGNODE_SIMPLECONSENSUS_MODE =
+ "Can't close StandAloneLog for ConfigNode SimpleConsensus mode, ";
+ public static final String CAN_T_CONNECT_TO_DATA_NODE = "Can't connect to Data node: {}";
+ public static final String CAN_T_CONSTRUCT_CLUSTERSCHEMAINFO =
+ "Can't construct ClusterSchemaInfo";
+ public static final String CAN_T_DELETE_TEMPORARY_SNAPSHOT_FILE_RETRYING =
+ "Can't delete temporary snapshot file: {}, retrying...";
+ public static final String CAN_T_FORCE_LOGWRITER_FOR_CONFIGNODE_FLUSHWALFORSIMPLECONSENSUS =
+ "Can't force logWriter for ConfigNode flushWALForSimpleConsensus";
+ public static final String CAN_T_FORCE_LOGWRITER_FOR_CONFIGNODE_SIMPLECONSENSUS_MODE =
+ "Can't force logWriter for ConfigNode SimpleConsensus mode";
+ public static final String CAN_T_SERIALIZE_CURRENT_CONFIGPHYSICALPLAN_FOR_CONFIGNODE_SIMPLECONSENSUS_MODE =
+ "Can't serialize current ConfigPhysicalPlan for ConfigNode SimpleConsensus mode";
+ public static final String CAN_T_START_CONFIGNODE_CONSENSUS_GROUP =
+ "Can't start ConfigNode consensus group!";
+ public static final String CHANGE_REGIONS_LEADER_ERROR_ON_DATE_NODE =
+ "Change regions leader error on Date node: {}";
+ public static final String CHECK_BEFORE_DROPPING_TOPIC_TOPIC_EXISTS =
+ "Check before dropping topic: {}, topic exists: {}";
+ public static final String CHECK_BEFORE_DROP_PIPE_PIPE_EXISTS =
+ "Check before drop pipe {}, pipe exists: {}.";
+ public static final String CLUSTERID_HAS_BEEN_GENERATED = "clusterID has been generated: {}";
+ public static final String CLUSTERID_HAS_BEEN_RECOVERED_FROM_SNAPSHOT =
+ "clusterID has been recovered from snapshot: {}";
+ public static final String CLUSTERID_NOT_GENERATED_YET_SHOULD_NEVER_HAPPEN =
+ "clusterId not generated yet, should never happen.";
+ public static final String CONFIGNODESNAPSHOT_FINISH_TO_TAKE_SNAPSHOT_FOR_TIME_CONSUMPTION_MS =
+ "[ConfigNodeSnapshot] Finish to take snapshot for {}, time consumption: {} ms";
+ public static final String CONFIGNODESNAPSHOT_LOAD_SNAPSHOT_FOR_COST_MS =
+ "[ConfigNodeSnapshot] Load snapshot for {} cost {} ms";
+ public static final String CONFIGNODESNAPSHOT_LOAD_SNAPSHOT_SUCCESS_LATESTSNAPSHOTROOTDIR =
+ "[ConfigNodeSnapshot] Load snapshot success, latestSnapshotRootDir: {}";
+ public static final String CONFIGNODESNAPSHOT_START_TO_LOAD_SNAPSHOT_FOR_FROM =
+ "[ConfigNodeSnapshot] Start to load snapshot for {} from {}";
+ public static final String CONFIGNODESNAPSHOT_START_TO_TAKE_SNAPSHOT_FOR_INTO =
+ "[ConfigNodeSnapshot] Start to take snapshot for {} into {}";
+ public static final String CONFIGNODESNAPSHOT_TASK_SNAPSHOT_SUCCESS_SNAPSHOTDIR =
+ "[ConfigNodeSnapshot] Task snapshot success, snapshotDir: {}";
+ public static final String CONFIGNODE_EXITING = "ConfigNode exiting...";
+ public static final String CONFIGNODE_NEED_REDIRECT_TO_RETRY =
+ "ConfigNode need redirect to {}, retry {} ...";
+ public static final String CONFIGNODE_PORT_CHECK_SUCCESSFUL = "configNode port check successful.";
+ public static final String CONFIGNODE_RPC_SERVICE_FINISHED_TO_REMOVE_AINODE_RESULT =
+ "ConfigNode RPC Service finished to remove AINode, result: {}";
+ public static final String CONFIGNODE_RPC_SERVICE_FINISHED_TO_REMOVE_DATANODE_REQ_RESULT =
+ "ConfigNode RPC Service finished to remove DataNode, req: {}, result: {}";
+ public static final String CONFIGNODE_RPC_SERVICE_START_TO_REMOVE_AINODE =
+ "ConfigNode RPC Service start to remove AINode";
+ public static final String CONFIGNODE_RPC_SERVICE_START_TO_REMOVE_DATANODE_REQ =
+ "ConfigNode RPC Service start to remove DataNode, req: {}";
+ public static final String CONFIGNODE_SIMPLECONSENSUSFILE_HAS_EXISTED_FILEPATH =
+ "ConfigNode SimpleConsensusFile has existed,filePath:{}";
+ public static final String CONFIG_REGION_LISTENING_QUEUE_LISTEN_TO_SNAPSHOT_FAILED_THE_HISTORICAL =
+ "Config Region Listening Queue Listen to snapshot failed, the historical data may not be transferred.";
+ public static final String CONFIG_REGION_LISTENING_QUEUE_LISTEN_TO_SNAPSHOT_FAILED_WHEN_STARTUP =
+ "Config Region Listening Queue Listen to snapshot failed when startup, snapshot will be tried again when starting schema transferring pipes";
+ public static final String CONTINUOUS_QUERY_MIN_EVERY_INTERVAL_IN_MS_SHOULD_BE_GREATER =
+ "continuous_query_min_every_interval_in_ms should be greater than 0, but current value is {}, ignore that and use the default value {}";
+ public static final String CONTINUOUS_QUERY_SUBMIT_THREAD_SHOULD_BE_GREATER_THAN_0 =
+ "continuous_query_submit_thread should be greater than 0, but current value is {}, ignore that and use the default value {}";
+ public static final String COULDN_T_LOAD_CONFIGNODE_CONF_FILE_REJECT_CONFIGNODE_STARTUP =
+ "Couldn't load ConfigNode conf file, reject ConfigNode startup.";
+ public static final String COULDN_T_LOAD_THE_CONFIGURATION_FROM_ANY_OF_THE_KNOWN =
+ "Couldn't load the configuration {} from any of the known sources.";
+ public static final String CREATEREGIONGROUPS_DATABASE_HAS_BEEN_DELETED_CORRESPONDING_REGIONGROUPS =
+ "[CreateRegionGroups] Database {} has been deleted, corresponding RegionGroups will not be created.";
+ public static final String CREATE_CONFIGNODE_SIMPLECONSENSUSFILE =
+ "Create ConfigNode SimpleConsensusFile: {}";
+ public static final String CREATE_CONFIGNODE_SIMPLECONSENSUSFILE_FAILED_FILEPATH =
+ "Create ConfigNode SimpleConsensusFile failed, filePath: {}";
+ public static final String CURRENT_NODE_NODEID_IP_PORT_AS_CONFIG_REGION_LEADER_IS =
+ "Current node [nodeId: {}, ip:port: {}] as config region leader is ready to work";
+ public static final String CURRENT_NODE_NODEID_IP_PORT_BECOMES_CONFIG_REGION_LEADER =
+ "Current node [nodeId: {}, ip:port: {}] becomes config region leader";
+ public static final String CURRENT_NODE_NODEID_IP_PORT_IS_NO_LONGER_THE_LEADER =
+ "Current node [nodeId:{}, ip:port: {}] is no longer the leader, ";
+ public static final String DATABASE_INCONSISTENCY_DETECTED_WHEN_ADJUSTING_MAX_REGION_GROUP_COUNT_MESSAGE =
+ "Database inconsistency detected when adjusting max region group count, message: {}, will be corrected by the following adjusting plans";
+ public static final String DATABASE_NOT_EXIST = "Database not exist";
+ public static final String DATA_REGION_CONSENSUS_PROTOCOL_CLASS =
+ "data_region_consensus_protocol_class";
+ public static final String DEACTIVATING = "Deactivating {}...";
+ public static final String DEFAULT_CHARSET_IS = "{} default charset is: {}";
+ public static final String DELETED_FAILED_TAKE_APPROPRIATE_ACTION =
+ "{} deleted failed; take appropriate action.";
+ public static final String DELETE_USELESS_PROCEDURE_WAL_DIR_FAIL =
+ "Delete useless procedure wal dir fail.";
+ public static final String DESERIALIZATION_ERROR_FOR_WRITE_PLAN_REQUEST_BYTEBUFFER =
+ "Deserialization error for write plan, request: {}, bytebuffer: {}";
+ public static final String DOES_NOT_EXIST = "%s does not exist";
+ public static final String DROPPING_TAG_OR_TIME_COLUMN_IS_NOT_SUPPORTED =
+ "Dropping tag or time column is not supported.";
+ public static final String DROP_CQ_FAILED_BECAUSE_ITS_MD5_DOESN_T_MATCH =
+ "Drop CQ {} failed, because its MD5 doesn't match.";
+ public static final String DROP_CQ_FAILED_BECAUSE_IT_DOESN_T_EXIST =
+ "Drop CQ {} failed, because it doesn't exist.";
+ public static final String DROP_CQ_SUCCESSFULLY = "Drop CQ {} successfully.";
+ public static final String DUPLICATED_TEMPLATE_NAME = "Duplicated template name: ";
+ public static final String ENABLESEPARATIONOFADMINPOWERS_IS_NOT_SUPPORTED =
+ "EnableSeparationOfAdminPowers is not supported";
+ public static final String ENVIRONMENT_VARIABLES = "{} environment variables: {}";
+ public static final String ERROR_GET_MATCHED_PATHS_IN_GIVEN_LEVEL =
+ "Error get matched paths in given level.";
+ public static final String ERROR_GET_MATCHED_PATHS_IN_NEXT_LEVEL =
+ "Error get matched paths in next level.";
+ public static final String ERROR_OCCURRED_WHEN_GET_PATHS_SET_ON_TEMPLATE =
+ "Error occurred when get paths set on template {}";
+ public static final String ERROR_STARTING = "Error starting";
+ public static final String EXECUTE_ALTERDATABASE_WITH_RESULT =
+ "Execute AlterDatabase: {} with result: {}";
+ public static final String EXECUTE_GETCLUSTERID_WITH_RESULT =
+ "Execute getClusterId with result {}";
+ public static final String EXECUTE_GETSYSTEMCONFIGURATION_WITH_RESULT =
+ "Execute GetSystemConfiguration with result {}";
+ public static final String EXECUTE_NON_QUERY_PLAN_FAILED = "Execute non-query plan failed";
+ public static final String EXECUTE_QUERY_PLAN_FAILED = "Execute query plan failed";
+ public static final String EXECUTE_REGISTERAINODEREQUEST_WITH_RESULT =
+ "Execute RegisterAINodeRequest {} with result {}";
+ public static final String EXECUTE_REGISTERCONFIGNODEREQUEST_WITH_RESULT =
+ "Execute RegisterConfigNodeRequest {} with result {}";
+ public static final String EXECUTE_REGISTERDATANODEREQUEST_WITH_RESULT =
+ "Execute RegisterDataNodeRequest {} with result {}";
+ public static final String EXECUTE_RESTARTAINODEREQUEST_WITH_RESULT =
+ "Execute RestartAINodeRequest {} with result {}";
+ public static final String EXECUTE_RESTARTDATANODEREQUEST_WITH_RESULT =
+ "Execute RestartDataNodeRequest {} with result {}";
+ public static final String EXECUTE_SETDATABASE_WITH_RESULT =
+ "Execute SetDatabase: {} with result: {}";
+ public static final String FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE =
+ "Failed in the read API executing the consensus layer due to: ";
+ public static final String FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE =
+ "Failed in the write API executing the consensus layer due to: ";
+ public static final String FAILED_ON_AINODE = "{} failed on AINode {}";
+ public static final String FAILED_ON_AINODE_RETRYING = "{} failed on AINode {}, retrying {}...";
+ public static final String FAILED_ON_CONFIGNODE = "{} failed on ConfigNode {}";
+ public static final String FAILED_ON_CONFIGNODE_BECAUSE_RETRYING =
+ "{} failed on ConfigNode {}, because {}, retrying {}...";
+ public static final String FAILED_ON_DATANODE = "{} failed on DataNode {}";
+ public static final String FAILED_ON_DATANODE_RETRYING =
+ "{} failed on DataNode {}, retrying {}...";
+ public static final String FAILED_TO_ALTER_PIPE = "Failed to alter pipe";
+ public static final String FAILED_TO_CHECK_SCHEMA_REGION_USING_TEMPLATE_ON_DATANODE =
+ "Failed to check schema region using template on DataNode {}, {}";
+ public static final String FAILED_TO_CHECK_TIMESERIES_EXISTENCE_ON_DATANODE =
+ "Failed to check timeseries existence on DataNode {}, {}";
+ public static final String FAILED_TO_COUNT_PATHS_USING_TEMPLATE_ON_DATANODE =
+ "Failed to count paths using template on DataNode {}, {}";
+ public static final String FAILED_TO_CREATE_MULTIPLE_PIPES = "Failed to create multiple pipes";
+ public static final String FAILED_TO_CREATE_PIPE = "Failed to create pipe";
+ public static final String FAILED_TO_CREATE_PIPEPLUGIN_SOURCE_PIPEPLUGIN_FAILED_TO_LOAD =
+ "Failed to create PipePlugin [%s], source PipePlugin [%s] failed to load: %s";
+ public static final String FAILED_TO_CREATE_PIPEPLUGIN_SOURCE_PIPEPLUGIN_JAR_DOES_NOT_EXIST =
+ "Failed to create PipePlugin [%s], source PipePlugin [%s] jar [%s] does not exist in install dir.";
+ public static final String FAILED_TO_CREATE_PIPEPLUGIN_THE_SAME_NAME_PIPEPLUGIN_HAS_BEEN =
+ "Failed to create PipePlugin [%s], the same name PipePlugin has been created";
+ public static final String FAILED_TO_CREATE_PIPEPLUGIN_THIS_PIPEPLUGIN_EXISTS_BUT_FAILED_TO =
+ "Failed to create PipePlugin [%s], this PipePlugin exists but failed to load: %s";
+ public static final String FAILED_TO_CREATE_TEMPLATE_BECAUSE_TEMPLATE_NAME_EXISTS =
+ "Failed to create template, because template name {} exists";
+ public static final String FAILED_TO_CREATE_TRIGGER_THE_SAME_NAME_JAR_BUT_DIFFERENT =
+ "Failed to create trigger [%s], the same name Jar [%s] but different MD5 [%s] has existed";
+ public static final String FAILED_TO_CREATE_TRIGGER_THE_SAME_NAME_TRIGGER_HAS_BEEN =
+ "Failed to create trigger [%s], the same name trigger has been created";
+ public static final String FAILED_TO_CREATE_UDF_THE_SAME_NAME_JAR_BUT_DIFFERENT =
+ "Failed to create UDF [%s], the same name Jar [%s] but different MD5 [%s] has existed";
+ public static final String FAILED_TO_CREATE_UDF_THE_SAME_NAME_UDF_HAS_BEEN =
+ "Failed to create UDF [%s], the same name UDF has been created";
+ public static final String FAILED_TO_DECREASE_LISTENER_REFERENCE =
+ "Failed to decrease listener reference";
+ public static final String FAILED_TO_DROP_PIPE = "Failed to drop pipe";
+ public static final String FAILED_TO_DROP_PIPEPLUGIN_THE_PIPEPLUGIN_IS_A_BUILT_IN =
+ "Failed to drop PipePlugin [%s], the PipePlugin is a built-in PipePlugin";
+ public static final String FAILED_TO_DROP_PIPEPLUGIN_THIS_PIPEPLUGIN_HAS_NOT_BEEN_CREATED =
+ "Failed to drop PipePlugin [%s], this PipePlugin has not been created";
+ public static final String FAILED_TO_DROP_TRIGGER_THIS_TRIGGER_HAS_NOT_BEEN_CREATED =
+ "Failed to drop trigger [%s], this trigger has not been created";
+ public static final String FAILED_TO_DROP_UDF_THIS_UDF_HAS_NOT_BEEN_CREATED =
+ "Failed to drop UDF [%s], this UDF has not been created";
+ public static final String FAILED_TO_FETCH_SCHEMAENGINE_BLACK_LIST_ON_DATANODE =
+ "Failed to fetch schemaengine black list on DataNode {}, {}";
+ public static final String FAILED_TO_GET_FIELD = "Failed to get field {}";
+ public static final String FAILED_TO_HANDLE_LEADER_CHANGE = "Failed to handle leader change";
+ public static final String FAILED_TO_HANDLE_META_CHANGES = "Failed to handle meta changes";
+ public static final String FAILED_TO_INCREASE_LISTENER_REFERENCE =
+ "Failed to increase listener reference";
+ public static final String FAILED_TO_LOAD_PIPE_INFO_FROM_SNAPSHOT =
+ "Failed to load pipe info from snapshot, ";
+ public static final String FAILED_TO_LOAD_PIPE_PLUGIN_INFO_FROM_SNAPSHOT =
+ "Failed to load pipe plugin info from snapshot";
+ public static final String FAILED_TO_LOAD_PIPE_TASK_INFO_FROM_SNAPSHOT =
+ "Failed to load pipe task info from snapshot";
+ public static final String FAILED_TO_LOAD_PLUGIN_CLASS_FOR_PLUGIN_WHEN_LOADING_SNAPSHOT =
+ "Failed to load plugin class for plugin [{}] when loading snapshot [{}] ";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_BECAUSE_GET_NULL_DATABASE_NAME =
+ "Failed to load snapshot because get null database name";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_BECAUSE_SNAPSHOT_DIR_NOT_EXISTS =
+ "Failed to load snapshot, because snapshot dir [{}] not exists.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_OF_CQINFO_SNAPSHOT_FILE_DOES_NOT =
+ "Failed to load snapshot of CQInfo, snapshot file [{}] does not exist.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_OF_TEMPLATEPRESETTABLE_SNAPSHOT_FILE_IS_NOT =
+ "Failed to load snapshot of TemplatePreSetTable,snapshot file [{}] is not a valid file.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_OF_TTLINFO_SNAPSHOT_FILE_DOES_NOT =
+ "Failed to load snapshot of TTLInfo, snapshot file [{}] does not exist.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST =
+ "Failed to load snapshot, snapshot file [{}] is not exist.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST_2 =
+ "Failed to load snapshot,snapshot file [{}] is not exist.";
+ public static final String FAILED_TO_LOAD_SUBSCRIPTION_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST =
+ "Failed to load subscription snapshot, snapshot file {} is not exist.";
+ public static final String FAILED_TO_ON_CONFIGNODE_RESPONSE =
+ "Failed to {} on ConfigNode: {}, response: {}";
+ public static final String FAILED_TO_ON_DATANODE = "Failed to {} on DataNode {}, {}";
+ public static final String FAILED_TO_ON_DATANODE_EXCEPTION =
+ "Failed to {} on DataNode: {}, exception: {}";
+ public static final String FAILED_TO_ON_DATANODE_RESPONSE =
+ "Failed to {} on DataNode: {}, response: {}";
+ public static final String FAILED_TO_OPERATE_PIPE = "Failed to operate pipe";
+ public static final String FAILED_TO_SET_PIPE_STATUS = "Failed to set pipe status";
+ public static final String FAILED_TO_SET_PIPE_STATUS_WITH_STOPPED_BY_RUNTIME_EXCEPTION =
+ "Failed to set pipe status with stopped-by-runtime-exception flag";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_BECAUSE_CREATE_TMP_DIR_FAIL =
+ "Failed to take snapshot, because create tmp dir [{}] fail.";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_DIR_IS_ALREADY_EXIST =
+ "Failed to take snapshot, because snapshot dir [{}] is already exist.";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST =
+ "Failed to take snapshot, because snapshot file [{}] is already exist.";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_OF_CQINFO_BECAUSE_SNAPSHOT_FILE_IS =
+ "Failed to take snapshot of CQInfo, because snapshot file [{}] is already exist.";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_OF_TEMPLATEPRESETTABLE_BECAUSE_SNAPSHOT_FILE_IS =
+ "Failed to take snapshot of TemplatePreSetTable, because snapshot file [{}] is already exist.";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_OF_TTLINFO_BECAUSE_SNAPSHOT_FILE_IS =
+ "Failed to take snapshot of TTLInfo, because snapshot file [{}] is already exist.";
+ public static final String FAILED_TO_TAKE_SUBSCRIPTION_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY =
+ "Failed to take subscription snapshot, because snapshot file {} is already exist.";
+ public static final String FAILED_TO_UPDATE_CONFIG_FILE = "Failed to update config file";
+ public static final String FILE_NOT_EXISTS = "File {} not exists";
+ public static final String FOR_RECEIVES = "{} for {} receives: {}";
+ public static final String GET_DATANODE_CPU_CORE_FAIL_WILL_BE_TREATED_AS_ZERO =
+ "Get DataNode {} cpu core fail, will be treated as zero.";
+ public static final String GET_PIPEPLUGIN_JAR_FAILED = "Get PipePlugin_Jar failed";
+ public static final String GET_TRIGGERJAR_FAILED = "Get TriggerJar failed";
+ public static final String GET_UDF_JAR_FAILED = "Get UDF_Jar failed";
+ public static final String GET_URL_FAILED = "get url failed";
+ public static final String GET_USER_OR_ROLE_PERMISSIONINFO_FAILED_BECAUSE =
+ "get user or role permissionInfo failed because ";
+ public static final String HANDLING_CONSUMER_GROUP_META_CHANGES =
+ "Handling consumer group meta changes ...";
+ public static final String HANDLING_PIPE_META_CHANGES = "Handling pipe meta changes ...";
+ public static final String HANDLING_TOPIC_META_CHANGES = "Handling topic meta changes ...";
+ public static final String HAS_REGISTERED_SUCCESSFULLY_WAITING_FOR_THE_LEADER_S_SCHEDULING_TO =
+ "{} {} has registered successfully. Waiting for the leader's scheduling to join the cluster: {}.";
+ public static final String HAS_SUCCESSFULLY_RESTARTED_AND_JOINED_THE_CLUSTER =
+ "{} has successfully restarted and joined the cluster: {}.";
+ public static final String HAS_SUCCESSFULLY_STARTED_AND_JOINED_THE_CLUSTER =
+ "{} has successfully started and joined the cluster: {}.";
+ public static final String ID_TOOK_SNAPSHOT_FAIL = "{} id {} took snapshot fail";
+ public static final String INITSTANDALONECONFIGNODE_MEETS_ERROR_CAN_T_FIND_STANDALONE_LOG_FILES_FILEPATH =
+ "InitStandAloneConfigNode meets error, can't find standalone log files, filePath: {}";
+ public static final String INVALID_AUTHOR_TYPE_ORDINAL = "Invalid Author Type ordinal";
+ public static final String IOTDB_STARTED = "IoTDB started";
+ public static final String IS_DEACTIVATED = "{} is deactivated.";
+ public static final String IS_IN_RESTARTING_PROCESS = "{} is in restarting process...";
+ public static final String LEADER_DISTRIBUTION_POLICY = "leader_distribution_policy";
+ public static final String LEADER_HAS_NOT_BEEN_ELECTED_YET_WAIT_FOR_1_SECOND =
+ "Leader has not been elected yet, wait for 1 second";
+ public static final String LOAD_FAILED_IT_WILL_BE_DELETED = "Load {} failed, it will be deleted.";
+ public static final String LOAD_PROCEDURE_WAL_FAILED = "Load procedure wal failed.";
+ public static final String LOAD_SNAPSHOT_ERROR = "Load snapshot error";
+ public static final String MAKE_DIRS = "Make dirs: {}";
+ public static final String MEET_ERROR_WHEN_DEACTIVATE_CONFIGNODE =
+ "Meet error when deactivate ConfigNode";
+ public static final String MEET_ERROR_WHEN_DOING_START_CHECKING =
+ "Meet error when doing start checking";
+ public static final String MEET_ERROR_WHILE_STARTING_UP = "Meet error while starting up.";
+ public static final String NEW_TYPE_IS_NOT_COMPATIBLE_WITH_THE_EXISTING_ONE =
+ "New type %s is not compatible with the existing one %s";
+ public static final String NODE_IS_ALREADY_IN_REGION_LOCATIONS_WHEN_ADDREGIONNEWLOCATION_IN =
+ "Node is already in region locations when addRegionNewLocation in {}, ";
+ public static final String NODE_IS_NOT_IN_REGION_LOCATIONS_WHEN_REMOVEREGIONOLDLOCATION_IN =
+ "Node is not in region locations when removeRegionOldLocation in {}, ";
+ public static final String OLD_PROCEDURE_FILES_HAVE_BEEN_LOADED_SUCCESSFULLY_TAKING_SNAPSHOT =
+ "Old procedure files have been loaded successfully, taking snapshot...";
+ public static final String PARTITIONTABLECLEANER_THE_TIMEPARTITIONS_ARE_REMOVED_FROM_DATABASE =
+ "[PartitionTableCleaner] The TimePartitions: {} are removed from Database: {}";
+ public static final String PATH1_SHOULD_NOT_BE_NULL = "Path1 should not be null";
+ public static final String PIPEMETASYNCER_IS_TRYING_TO_RESTART_THE_PIPES =
+ "PipeMetaSyncer is trying to restart the pipes: {}";
+ public static final String PIPE_IS_USING_EXTERNAL_SOURCE_SKIP_REGION =
+ "Pipe {} is using external source, skip region leader change. PipeHandleLeaderChangePlan: {}";
+ public static final String PLAN_TYPE_IS_NOT_SUPPORTED = "Plan type %s is not supported.";
+ public static final String PLEASE_SET_THE_CN_SEED_CONFIG_NODE_PARAMETER_IN_IOTDB =
+ "Please set the cn_seed_config_node parameter in iotdb-system.properties file.";
+ public static final String PORTS_USED_IN_CONFIGNODE_HAVE_REPEAT =
+ "ports used in configNode have repeat.";
+ public static final String REACH_EOF = "Reach eof";
+ public static final String RECORDING_CONSUMER_GROUP_META = "Recording consumer group meta: {}";
+ public static final String RECORDING_TOPIC_META = "Recording topic meta: {}";
+ public static final String RECOVERED_CONSENSUS_PIPES_AS_RUNNING_DURING_SNAPSHOT_LOAD =
+ "Recovered consensus pipes {} as RUNNING during snapshot load.";
+ public static final String RELEASE_TRIGGERTABLELOCK = "release TriggerTableLock";
+ public static final String RELEASE_UDFTABLELOCK = "release UDFTableLock";
+ public static final String REMOVED_THE_AINODE_FROM_CLUSTER = "Removed the AINode {} from cluster";
+ public static final String REMOVED_THE_DATANODE_FROM_CLUSTER =
+ "Removed the datanode {} from cluster";
+ public static final String REMOVE_ONLINE_CONFIGNODE_FAILED = "Remove online ConfigNode failed.";
+ public static final String REPORTING_CONFIGNODE_SHUTDOWN_FAILED_THE_CLUSTER_WILL_STILL_TAKE_THE =
+ "Reporting ConfigNode shutdown failed. The cluster will still take the current ConfigNode as Running for a few seconds.";
+ public static final String RETRY_WAIT_FAILED = "Retry wait failed.";
+ public static final String ROUTE_PRIORITY_POLICY = "route_priority_policy";
+ public static final String SCHEMA_OF_MEASUREMENT_IS_NOT_COMPATIBLE_WITH_EXISTING_MEASUREMENT_IN =
+ "Schema of measurement %s is not compatible with existing measurement in template %s";
+ public static final String SCHEMA_REGION_CONSENSUS_PROTOCOL_CLASS =
+ "schema_region_consensus_protocol_class";
+ public static final String SEND_RPC_TO_DATA_NODE_FOR_CHANGING_REGIONS_LEADER_ON =
+ "Send RPC to data node: {} for changing regions leader on it";
+ public static final String SETTTL_THE_TTL_OF_DATABASE_IS_ADJUSTED_TO =
+ "[SetTTL] The ttl of Database: {} is adjusted to: {}";
+ public static final String SNAPSHOT_DIRECTORY_CAN_NOT_BE_CREATED =
+ "snapshot directory [{}] can not be created.";
+ public static final String SNAPSHOT_DIRECTORY_IS_NOT_EMPTY =
+ "Snapshot directory [{}] is not empty.";
+ public static final String SNAPSHOT_DIRECTORY_IS_NOT_EXIST_CAN_NOT_LOAD_SNAPSHOT_WITH =
+ "snapshot directory [{}] is not exist, can not load snapshot with this directory.";
+ public static final String SNAPSHOT_DIRECTORY_IS_NOT_EXIST_START_TO_CREATE_IT =
+ "snapshot directory [{}] is not exist,start to create it.";
+ public static final String STARTING_IOTDB = "Starting IoTDB {}";
+ public static final String START_CONFIGNODE_FAILED_BECAUSE_COULDN_T_MAKE_SYSTEM_DIRS =
+ "Start ConfigNode failed, because couldn't make system dirs: %s.";
+ public static final String START_READING_CONFIGNODE_CONF_FILE =
+ "start reading ConfigNode conf file: {}";
+ public static final String SUCCESSFULLY_APPLY_CONFIGNODE_CURRENT_CONFIGNODEGROUP =
+ "Successfully apply ConfigNode: {}. Current ConfigNodeGroup: {}";
+ public static final String SUCCESSFULLY_CHECK_SCHEMA_REGION_USING_TEMPLATE_ON_DATANODE =
+ "Successfully check schema region using template on DataNode: {}";
+ public static final String SUCCESSFULLY_CHECK_TIMESERIES_EXISTENCE_ON_DATANODE =
+ "Successfully check timeseries existence on DataNode: {}";
+ public static final String SUCCESSFULLY_COUNT_PATHS_USING_TEMPLATE_ON_DATANODE =
+ "Successfully count paths using template on DataNode: {}";
+ public static final String SUCCESSFULLY_FETCH_SCHEMAENGINE_BLACK_LIST_ON_DATANODE =
+ "Successfully fetch schemaengine black list on DataNode: {}";
+ public static final String SUCCESSFULLY_INITIALIZE_CONFIGMANAGER =
+ "Successfully initialize ConfigManager.";
+ public static final String SUCCESSFULLY_ON_CONFIGNODE = "Successfully {} on ConfigNode: {}";
+ public static final String SUCCESSFULLY_ON_DATANODE = "Successfully {} on DataNode: {}";
+ public static final String SUCCESSFULLY_REMOVE_CONFIGNODE_CURRENT_CONFIGNODEGROUP =
+ "Successfully remove ConfigNode: {}. Current ConfigNodeGroup: {}";
+ public static final String SUCCESSFULLY_SETUP_INTERNAL_SERVICES =
+ "Successfully setup internal services.";
+ public static final String SUCCESSFULLY_UPDATE_NODE_S_VERSION =
+ "Successfully update Node {} 's version.";
+ public static final String SYSTEMPROPERTIES_NORMALIZE_FROM_TO_FOR_COMPATIBILITY =
+ "[SystemProperties] Normalize {} from {} to {} for compatibility.";
+ public static final String SYSTEMPROPERTIES_STORE_CONFIG_NODE_ID =
+ "[SystemProperties] store config_node_id: {}";
+ public static final String SYSTEMPROPERTIES_STORE_IS_SEED_CONFIG_NODE =
+ "[SystemProperties] store is_seed_config_node: {}";
+ public static final String TAKE_SNAPSHOT_ERROR = "Take snapshot error";
+ public static final String TAKING_SNAPSHOT_FAIL_PROCEDURE_UPGRADE_FAIL =
+ "Taking snapshot fail, procedure upgrade fail";
+ public static final String TEMPLATE_ALREADY_EXISTS_ON = "Template already exists on ";
+ public static final String TEMPLATE_DOES_NOT_EXIST = "Template %s does not exist";
+ public static final String TEMPLATE_FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY =
+ "template failed to take snapshot, because snapshot file [{}] is already exist.";
+ public static final String TEMPLATE_IS_NOT_SET_ON_PATH = "Template %s is not set on path %s";
+ public static final String TEMPLATE_WITH_ID_DOES_NOT_EXIST = "Template with id=%s does not exist";
+ public static final String THERE_ARE_AI_NODES_IN_CLUSTER_AFTER_EXECUTED_REMOVEAINODEPLAN =
+ "{}, There are {} AI nodes in cluster after executed RemoveAINodePlan";
+ public static final String THERE_ARE_AI_NODES_IN_CLUSTER_BEFORE_EXECUTED_REMOVEAINODEPLAN =
+ "{}, There are {} AI nodes in cluster before executed RemoveAINodePlan";
+ public static final String THERE_ARE_DATA_NODE_IN_CLUSTER_AFTER_EXECUTED_REMOVEDATANODEPLAN =
+ "{}, There are {} data node in cluster after executed RemoveDataNodePlan";
+ public static final String THERE_ARE_DATA_NODE_IN_CLUSTER_BEFORE_EXECUTED_REMOVEDATANODEPLAN =
+ "{}, There are {} data node in cluster before executed RemoveDataNodePlan";
+ public static final String THESE_REQUEST_TYPES_SHOULD_BE_ADDED_TO_ACTIONMAP =
+ "These request types should be added to actionMap: %s";
+ public static final String THE_CHECK_SUM_OF_THE_NO_LOG_BATCH_IS_INCORRECT =
+ "The check sum of the No.%d log batch is incorrect! In ";
+ public static final String THE_CURRENT_CONFIGNODE_CAN_T_JOINED_THE_CLUSTER_BECAUSE_LEADER =
+ "The current ConfigNode can't joined the cluster because leader's scheduling failed. The possible cause is that the ip:port configuration is incorrect.";
+ public static final String THE_CURRENT_CONFIGNODE_CAN_T_SEND_REGISTER_REQUEST_TO_THE =
+ "The current ConfigNode can't send register request to the ConfigNode-leader after all retries!";
+ public static final String THE_CURRENT_IS_NOW_STARTING_AS_THE_SEED_CONFIGNODE =
+ "The current {} is now starting as the Seed-ConfigNode.";
+ public static final String THE_DATA_REPLICATION_FACTOR_SHOULD_BE_POSITIVE =
+ "The data_replication_factor should be positive";
+ public static final String THE_DEFAULT_DATA_REGION_GROUP_NUM_SHOULD_BE_POSITIVE =
+ "The default_data_region_group_num should be positive";
+ public static final String THE_DEFAULT_SCHEMA_REGION_GROUP_NUM_SHOULD_BE_POSITIVE =
+ "The default_schema_region_group_num should be positive";
+ public static final String THE_PARAMETER_CN_TARGET_CONFIG_NODE_LIST_HAS_BEEN_ABANDONED =
+ "The parameter cn_target_config_node_list has been abandoned, only the first ConfigNode address will be used to join in the cluster. Please use cn_seed_config_node instead.";
+ public static final String THE_PARAMETER_CONFIG_NODE_ID_DOESN_T_EXIST_IN =
+ "The parameter config_node_id doesn't exist in ";
+ public static final String THE_PROCEDURE_FRAMEWORK_HAS_BEEN_SUCCESSFULLY_UPGRADED_NOW_IT_USES =
+ "The Procedure framework has been successfully upgraded. Now it uses the consensus layer's services instead of maintaining the WAL itself.";
+ public static final String THE_REMOVE_CONFIGNODE_SCRIPT_HAS_BEEN_DEPRECATED_PLEASE_CONNECT_TO =
+ "The remove-confignode script has been deprecated. Please connect to the CLI and use SQL: remove confignode [confignode_id].";
+ public static final String THE_RESULT_OF_REGISTER_CONFIGNODE_IS_EMPTY =
+ "The result of register ConfigNode is empty!";
+ public static final String THE_RESULT_OF_REGISTER_SELF_CONFIGNODE_IS_RETRY =
+ "The result of register self ConfigNode is {}, retry {} ...";
+ public static final String THE_RESULT_OF_SUBMITTING_REMOVECONFIGNODE_JOB_IS_REMOVECONFIGNODEREQUEST =
+ "The result of submitting RemoveConfigNode job is {}. RemoveConfigNodeRequest: {}";
+ public static final String THE_SCHEMA_REPLICATION_FACTOR_SHOULD_BE_POSITIVE =
+ "The schema_replication_factor should be positive";
+ public static final String THE_SEEDCONFIGNODE_SETTING_IN_CONF_IS_EMPTY =
+ "The seedConfigNode setting in conf is empty";
+ public static final String THE_S_CREATION_HAS_NOT_PASSED_IN_JARNAME_WHICH_DOES =
+ "The %s's creation has not passed in jarName, which does not exist in other pipePlugins. Please check";
+ public static final String THE_TIMESTAMP_PRECISION_SHOULD_BE_MS_US_OR_NS =
+ "The timestamp_precision should be ms, us or ns";
+ public static final String THE_TIME_PARTITION_INTERVAL_SHOULD_BE_POSITIVE =
+ "The time_partition_interval should be positive";
+ public static final String THE_TIME_PARTITION_ORIGIN_SHOULD_BE_NON_NEGATIVE =
+ "The time_partition_origin should be non-negative";
+ public static final String TRY_LISTEN_TO_PLAN_FAILED = "Try listen to plan failed";
+ public static final String UNDEFINED_TEMPLATE = "Undefined template {}";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_THE_CLOSE_METHOD_OF_LOGWRITER =
+ "Unexpected interruption during the close method of logWriter";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_WAITING_FOR_LEADER_ELECTION =
+ "Unexpected interruption during waiting for leader election.";
+ public static final String UNEXPECTED_READ_PLAN = "Unexpected read plan : {}";
+ public static final String UNEXPECTED_WRITE_PLAN_REQUEST_BYTEBUFFER =
+ "Unexpected write plan, request: {}, bytebuffer: {}";
+ public static final String UNKNOWN_FAILURE_DETECTOR = "Unknown failure_detector: %s, please set to \"fixed\" or \"phi_accrual\"";
+ public static final String UNKNOWN_HOST_WHEN_CHECKING_SEED_CONFIGNODE_IP =
+ "Unknown host when checking seed configNode IP {}";
+ public static final String UNKNOWN_LEADER_DISTRIBUTION_POLICY =
+ "Unknown leader_distribution_policy: %s, please set to \"GREEDY\" or \"CFD\" or \"HASH\"";
+ public static final String UNKNOWN_PHYSICALPLAN_CONFIGPHYSICALPLANTYPE =
+ "unknown PhysicalPlan configPhysicalPlanType: ";
+ public static final String UNKNOWN_READ_CONSISTENCY_LEVEL_PLEASE_SET_TO =
+ "Unknown read_consistency_level: %s, please set to \"strong\" or \"weak\"";
+ public static final String UNKNOWN_ROUTE_PRIORITY_POLICY_PLEASE_SET_TO =
+ "Unknown route_priority_policy: %s, please set to \"LEADER\" or \"GREEDY\"";
+ public static final String UNRECOGNIZED_LOG_CONFIGPHYSICALPLANTYPE =
+ "Unrecognized log configPhysicalPlanType: ";
+ public static final String UNRECOGNIZED_REGIONMAINTAINTYPE = "Unrecognized RegionMaintainType: ";
+ public static final String UNSUPPORTED_SUBPLAN_TYPE = "Unsupported subPlan type: %s";
+ public static final String UNSUPPORTED_SUB_PLAN_TYPE = "Unsupported sub plan type: ";
+ public static final String UPDATE_ONLINE_CONFIGNODE_FAILED = "Update online ConfigNode failed.";
+ public static final String UPDATE_PROCEDURE_PID_WAL_FAILED =
+ "Update Procedure (pid={}) wal failed";
+ public static final String UTILITY_CLASS_SYSTEMPROPERTIESUTILS =
+ "Utility class: SystemPropertiesUtils.";
+ public static final String VIEW_IS_NOT_SUPPORTED = "View is not supported.";
+ public static final String WRITE_CONFIGNODE_SYSTEM_PROPERTIES_FAILED =
+ "Write confignode-system.properties failed";
+ public static final String WRONG_MNODE_TYPE = "Wrong MNode Type";
+ public static final String WRONG_NODE_TYPE = "Wrong node type";
+ public static final String YOU_SHOULD_MANUALLY_DELETE_THE_PROCEDURE_WAL_DIR_BEFORE_CONFIGNODE =
+ "You should manually delete the procedure wal dir before ConfigNode restart. {}";
+ public static final String NOT_SUPPORT = "not support";
+
+ private ConfigNodeMessages() {}
+}
diff --git a/iotdb-core/confignode/src/main/i18n/en/org/apache/iotdb/confignode/i18n/ManagerMessages.java b/iotdb-core/confignode/src/main/i18n/en/org/apache/iotdb/confignode/i18n/ManagerMessages.java
new file mode 100644
index 0000000000000..0ff26a0a89119
--- /dev/null
+++ b/iotdb-core/confignode/src/main/i18n/en/org/apache/iotdb/confignode/i18n/ManagerMessages.java
@@ -0,0 +1,512 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.i18n;
+
+public final class ManagerMessages {
+
+ public static final String ACTIVATEDATAALLOTTABLE_ACTIVATE_SERIESPARTITIONSLOT =
+ "[ActivateDataAllotTable] Activate SeriesPartitionSlot {} ";
+ public static final String AFTER_THIS_SUCCESSFUL_SYNC_IF_PIPETASKINFO_IS_EMPTY_DURING_THIS =
+ "After this successful sync, if PipeTaskInfo is empty during this sync and has not been modified afterwards, all subsequent syncs will be skipped";
+ public static final String AFTER_THIS_SUCCESSFUL_SYNC_IF_SUBSCRIPTIONINFO_IS_EMPTY_DURING_THIS =
+ "After this successful sync, if SubscriptionInfo is empty during this sync and has not been modified afterwards, all subsequent syncs will be skipped";
+ public static final String ATTEMPT_TO_REPORT_PIPE_EXCEPTION_TO_A_NULL_PIPETASKMETA =
+ "Attempt to report pipe exception to a null PipeTaskMeta.";
+ public static final String AUTH_RUN_AUTH_PLAN = "Auth: run auth plan: {}";
+ public static final String CLUSTERID = "clusterID: {}";
+ public static final String COLLECTING_PIPE_HEARTBEAT_FROM_DATA_NODES =
+ "Collecting pipe heartbeat {} from data nodes";
+ public static final String CONNECTION_FROM_DATANODE_TO_DATANODE_IS_BROKEN =
+ "Connection from DataNode {} to DataNode {} is broken";
+ public static final String CONSENSUSGROUPSTATISTICS = "[ConsensusGroupStatistics]\t {}: {} -> {}";
+ public static final String CONSENSUSGROUPSTATISTICS_CONSENSUSGROUPSTATISTICSMAP =
+ "[ConsensusGroupStatistics] ConsensusGroupStatisticsMap: ";
+ public static final String CONSENSUSMANAGER_GETLEADERPEER_BEEN_INTERRUPTED =
+ "ConsensusManager getLeaderPeer been interrupted, ";
+ public static final String CONSUMER_IN_CONSUMER_GROUP_FAILED_TO_SUBSCRIBE_TOPICS_RESULT_STATUS =
+ "Consumer {} in consumer group {} failed to subscribe topics {}. Result status: {}.";
+ public static final String CONSUMER_IN_CONSUMER_GROUP_FAILED_TO_UNSUBSCRIBE_TOPICS_RESULT_STATUS =
+ "Consumer {} in consumer group {} failed to unsubscribe topics {}. Result status: {}.";
+ public static final String CREATEPEERFORCONSENSUSGROUP = "createPeerForConsensusGroup {}...";
+ public static final String CREATEREGIONGROUPS_STARTING_TO_CREATE_THE_FOLLOWING_REGIONGROUPS =
+ "[CreateRegionGroups] Starting to create the following RegionGroups:";
+ public static final String CREATE_DATAPARTITION_FAILED_BECAUSE =
+ "Create DataPartition failed because: ";
+ public static final String CREATE_SCHEMAPARTITION_FAILED_BECAUSE =
+ "Create SchemaPartition failed because: ";
+ public static final String DATABASE_DOESN_T_EXIST = "Database: {} doesn't exist";
+ public static final String DATABASE_NOT_EXISTS_WHEN_SETUPPARTITIONBALANCER =
+ "Database {} not exists when setupPartitionBalancer";
+ public static final String DATABASE_NOT_EXISTS_WHEN_UPDATEDATAALLOTTABLE =
+ "Database {} not exists when updateDataAllotTable";
+ public static final String DATANODELOCATION_IS_NULL_DATANODEID =
+ "DataNodeLocation is null, datanodeId {}";
+ public static final String DATAREGIONGROUPEXTENSIONPOLICY_DOESN_T_EXIST =
+ "DataRegionGroupExtensionPolicy %s doesn't exist.";
+ public static final String DECREASE_REFERENCE_COUNT_FOR_SNAPSHOT_ERROR =
+ "Decrease reference count for snapshot {} error.";
+ public static final String DELETING_REGIONS_COSTS_MS = "Deleting regions costs {}ms";
+ public static final String DETECTED_COMPLETION_OF_PIPE_STATIC_META_REMOVE_IT =
+ "Detected completion of pipe {}, static meta: {}, remove it.";
+ public static final String DETECT_PIPERUNTIMECRITICALEXCEPTION_FROM_AGENT_STOP_PIPE =
+ "Detect PipeRuntimeCriticalException {} from agent, stop pipe {}.";
+ public static final String ENABLE_SEPARATION_OF_POWERS_IS_NOT_SUPPORTED =
+ "Enable separation of powers is not supported";
+ public static final String ENDEXECUTECQ_TIME_RANGE_IS_CURRENT_TIME_IS =
+ "[EndExecuteCQ] {}, time range is [{}, {}), current time is {}";
+ public static final String ERROR_HAPPENED_WHILE_SHUTTING_DOWN_PREVIOUS_CQ_SCHEDULE_THREAD_POOL =
+ "Error happened while shutting down previous cq schedule thread pool.";
+ public static final String ERROR_OCCURRED_DURING_CLOSING_PIPECONNECTOR =
+ "Error occurred during closing PipeConnector.";
+ public static final String ERROR_OCCURRED_DURING_CLOSING_PIPEEXTRACTOR =
+ "Error occurred during closing PipeExtractor.";
+ public static final String ERROR_OCCURRED_DURING_CLOSING_PIPEPROCESSOR =
+ "Error occurred during closing PipeProcessor.";
+ public static final String ERROR_WHEN_COUNTING_DATAREGIONGROUPS_IN_DATABASE =
+ "Error when counting DataRegionGroups in Database: {}";
+ public static final String ERROR_WHEN_COUNTING_SCHEMAREGIONGROUPS_IN_DATABASE =
+ "Error when counting SchemaRegionGroups in Database: {}";
+ public static final String EVENT_SERVICE_IS_STARTED_SUCCESSFULLY =
+ "Event service is started successfully.";
+ public static final String EVENT_SERVICE_IS_STOPPED_SUCCESSFULLY =
+ "Event service is stopped successfully.";
+ public static final String EXCEPTION_ENCOUNTERED_WHEN_TRIGGERING_SCHEMA_REGION_SNAPSHOT =
+ "Exception encountered when triggering schema region snapshot.";
+ public static final String EXECUTE_CQ_FAILED = "Execute CQ {} failed";
+ public static final String EXECUTE_CQ_FAILED_TSSTATUS_IS = "Execute CQ {} failed, TSStatus is {}";
+ public static final String EXPECTED_PIPE_HEARTBEAT_NODE_COUNT_IS_FALLBACK_TO_1 =
+ "Expected pipe heartbeat node count is {}, fallback to 1.";
+ public static final String EXTENDREGION_SUBMIT_ADDREGIONPEERPROCEDURE_SUCCESSFULLY =
+ "[ExtendRegion] Submit AddRegionPeerProcedure successfully: {}";
+ public static final String EXTEND_REGION_GROUP_FAILED = "Extend region group failed";
+ public static final String FAILED_IN_THE_READ_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER =
+ "Failed in the read/write API executing the consensus layer due to: ";
+ public static final String FAILED_TO_ACQUIRE_LOCK_WHEN_PARSEHEARTBEAT_FROM_NODE_ID =
+ "Failed to acquire lock when parseHeartbeat from node (id={}).";
+ public static final String FAILED_TO_ACQUIRE_PIPE_LOCK_FOR_AUTO_RESTART_PIPE_TASK =
+ "Failed to acquire pipe lock for auto restart pipe task.";
+ public static final String FAILED_TO_ACQUIRE_PIPE_LOCK_FOR_HANDLING_SUCCESSFUL_RESTART =
+ "Failed to acquire pipe lock for handling successful restart.";
+ public static final String FAILED_TO_ALTER_PIPE_RESULT_STATUS =
+ "Failed to alter pipe {}. Result status: {}.";
+ public static final String FAILED_TO_CHECK_AND_REPAIR_CONSENSUS_PIPES =
+ "Failed to check and repair consensus pipes";
+ public static final String FAILED_TO_CHECK_PASSWORD_FOR_PIPE =
+ "Failed to check password for pipe %s.";
+ public static final String FAILED_TO_CLOSE_CONSUMER_IN_CONSUMER_GROUP_RESULT_STATUS =
+ "Failed to close consumer {} in consumer group {}. Result status: {}.";
+ public static final String FAILED_TO_CLOSE_EXTRACTOR_AFTER_FAILED_TO_INITIALIZE_EXTRACTOR =
+ "Failed to close extractor after failed to initialize extractor. ";
+ public static final String FAILED_TO_CLOSE_SINK_AFTER_FAILED_TO_INITIALIZE_IT_IGNORE =
+ "Failed to close sink after failed to initialize it. Ignore this exception.";
+ public static final String FAILED_TO_COLLECT_COMMITCREATETABLEPLAN =
+ "Failed to collect CommitCreateTablePlan";
+ public static final String FAILED_TO_COLLECT_PIPE_META_LIST_FROM_CONFIG_NODE_TASK =
+ "Failed to collect pipe meta list from config node task agent";
+ public static final String FAILED_TO_COLLECT_UNSETTEMPLATEPLAN =
+ "Failed to collect UnsetTemplatePlan";
+ public static final String FAILED_TO_COLLECT_USER_NAME_FOR_USER_ID =
+ "Failed to collect user name for user id {}";
+ public static final String FAILED_TO_CREATE_CONSUMER_IN_CONSUMER_GROUP_RESULT_STATUS =
+ "Failed to create consumer {} in consumer group {}. Result status: {}.";
+ public static final String FAILED_TO_CREATE_PEER_FOR_CONSENSUS_GROUP =
+ "Failed to create peer for consensus group";
+ public static final String FAILED_TO_CREATE_PIPE_RESULT_STATUS =
+ "Failed to create pipe {}. Result status: {}.";
+ public static final String FAILED_TO_CREATE_SUBTASK_FOR_PIPE_CREATION_TIME =
+ "Failed to create subtask for pipe %s, creation time %d";
+ public static final String FAILED_TO_CREATE_TOPIC_WITH_ATTRIBUTES_RESULT_STATUS =
+ "Failed to create topic {} with attributes {}. Result status: {}.";
+ public static final String FAILED_TO_DEEP_COPY_PIPEMETA = "failed to deep copy pipeMeta";
+ public static final String FAILED_TO_DEREGISTER_PIPE_CONFIG_REGION_CONNECTOR =
+ "Failed to deregister pipe config region connector metrics, PipeConfigNodeSubtask({}) does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_CONFIG_REGION_EXTRACTOR =
+ "Failed to deregister pipe config region extractor metrics, IoTDBConfigRegionExtractor({}) does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_REMAINING_TIME_METRICS_REMAININGTIMEOPERATOR_DOES_NOT =
+ "Failed to deregister pipe remaining time metrics, RemainingTimeOperator({}) does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_TEMPORARY_META_METRICS_PIPETEMPORARYMETA_DOES_NOT =
+ "Failed to deregister pipe temporary meta metrics, PipeTemporaryMeta({}) does not exist";
+ public static final String FAILED_TO_DROP_PIPE_RESULT_STATUS =
+ "Failed to drop pipe {}. Result status: {}.";
+ public static final String FAILED_TO_GET_ALL_PIPE_INFO = "Failed to get all pipe info.";
+ public static final String FAILED_TO_GET_ALL_SUBSCRIPTION_INFO =
+ "Failed to get all subscription info.";
+ public static final String FAILED_TO_GET_ALL_TOPIC_INFO = "Failed to get all topic info.";
+ public static final String FAILED_TO_HANDLE_PIPE_META_CHANGES =
+ "failed to handle pipe meta changes";
+ public static final String FAILED_TO_HANDLE_PIPE_META_CHANGE_RESULT_STATUS =
+ "Failed to handle pipe meta change. Result status: {}.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_FROM_BYTEBUFFER =
+ "Failed to load snapshot from byteBuffer {}.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_A_NORMAL =
+ "Failed to load snapshot,snapshot file [{}] is not a normal file.";
+ public static final String FAILED_TO_MARK_PIPE_CONFIG_REGION_WRITE_PLAN_EVENT_PIPECONFIGNODESUBTASK =
+ "Failed to mark pipe config region write plan event, PipeConfigNodeSubtask({}) does not exist";
+ public static final String FAILED_TO_MARK_PIPE_REGION_COMMIT_REMAININGTIMEOPERATOR_DOES_NOT_EXIST =
+ "Failed to mark pipe region commit, RemainingTimeOperator({}) does not exist";
+ public static final String FAILED_TO_SHOW_SUBSCRIPTION_INFO = "Failed to show subscription info.";
+ public static final String FAILED_TO_SHOW_TOPIC_INFO = "Failed to show topic info.";
+ public static final String FAILED_TO_START_PIPE_RESULT_STATUS =
+ "Failed to start pipe {}. Result status: {}.";
+ public static final String FAILED_TO_STOP_PIPE_RESULT_STATUS =
+ "Failed to stop pipe {}. Result status: {}.";
+ public static final String FAILED_TO_SUBMIT_ASYNC_CONSENSUS_PIPE_CREATION_FOR =
+ "Failed to submit async consensus pipe creation for {}: {}";
+ public static final String FAILED_TO_SUBMIT_ASYNC_CONSENSUS_PIPE_DROP_FOR =
+ "Failed to submit async consensus pipe drop for {}: {}";
+ public static final String FAILED_TO_SYNC_CONSUMER_GROUP_META_RESULT_STATUS =
+ "Failed to sync consumer group meta. Result status: {}.";
+ public static final String FAILED_TO_SYNC_PIPE_META_RESULT_STATUS =
+ "Failed to sync pipe meta. Result status: {}.";
+ public static final String FAILED_TO_SYNC_TEMPLATE_EXTENSION_INFO_TO_DATANODE =
+ "Failed to sync template {} extension info to DataNode {}";
+ public static final String FAILED_TO_SYNC_TOPIC_META_RESULT_STATUS =
+ "Failed to sync topic meta. Result status: {}.";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_CONFIG_REGION_CONNECTOR_METRICS_CONNECTOR =
+ "Failed to unbind from pipe config region connector metrics, connector map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_CONFIG_REGION_EXTRACTOR_METRICS_EXTRACTOR =
+ "Failed to unbind from pipe config region extractor metrics, extractor map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_REMAINING_TIME_METRICS_REMAININGTIMEOPERATOR_MAP =
+ "Failed to unbind from pipe remaining time metrics, RemainingTimeOperator map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_TEMPORARY_META_METRICS_PIPETEMPORARYMETA_MAP =
+ "Failed to unbind from pipe temporary meta metrics, PipeTemporaryMeta map not empty";
+ public static final String FAILED_TO_UPDATE_PIPE_PROCEDURE_TIMER_PIPEPROCEDURE_DOES_NOT_EXIST =
+ "Failed to update pipe procedure timer, PipeProcedure({}) does not exist";
+ public static final String FAILED_TO_UPDATE_THE_LAST_EXECUTION_TIME_OF_CQ_BECAUSE =
+ "Failed to update the last execution time {} of CQ {}, because {}";
+ public static final String FAIL_TO_GET_ALLUDFTABLE = "Fail to get AllUDFTable";
+ public static final String FAIL_TO_GET_PIPEPLUGINTABLE = "Fail to get PipePluginTable";
+ public static final String FAIL_TO_GET_TRIGGERTABLE = "Fail to get TriggerTable";
+ public static final String FAIL_TO_GET_UDFTABLE = "Fail to get UDFTable";
+ public static final String FAIL_TO_TRANSFER_BECAUSE_WILL_RETRY =
+ "Fail to transfer because {}, will retry";
+ public static final String FORCE_UPDATE_NODECACHE_STATUS_CURRENTNANOTIME =
+ "Force update NodeCache: status={}, currentNanoTime={}";
+ public static final String GETDATAPARTITION_INTERFACE_RECEIVE_PARTITIONSLOTSMAP_RETURN =
+ "GetDataPartition interface receive PartitionSlotsMap: {}, return: {}";
+ public static final String GETNODEPATHSPARTITION_RECEIVED_PARTIALPATH_LEVEL_PATHPATTERNTREE_RESP =
+ "[GetNodePathsPartition]:{}Received PartialPath: {}, Level: {}, PathPatternTree: {}, Resp: {}";
+ public static final String GETORCREATEDATAPARTITION_RECEIVE_PARTITIONSLOTSMAP_RETURN_TDATAPARTITIONTABLERESP =
+ "[GetOrCreateDataPartition]:{}Receive PartitionSlotsMap: {}, Return TDataPartitionTableResp: {}";
+ public static final String GETORCREATESCHEMAPARTITION_RECEIVE_DATABASENAMESLOTMAP_RETURN_TSCHEMAPARTITIONTABLERESP =
+ "[GetOrCreateSchemaPartition]:{}Receive databaseNameSlotMap: {}, Return TSchemaPartitionTableResp: {}";
+ public static final String GETORCREATESCHEMAPARTITION_RECEIVE_PATHPATTERNTREE_RETURN_TSCHEMAPARTITIONTABLERESP =
+ "[GetOrCreateSchemaPartition]:{}Receive PathPatternTree: {}, Return TSchemaPartitionTableResp: {}";
+ public static final String GETSCHEMAPARTITION_RECEIVE_PATHS_RETURN =
+ "GetSchemaPartition receive paths: {}, return: {}";
+ public static final String GET_REGION_GROUP_ID_FAIL = "get region group id fail";
+ public static final String HEARTBEAT_SERVICE_IS_STARTED_SUCCESSFULLY =
+ "Heartbeat service is started successfully.";
+ public static final String HEARTBEAT_SERVICE_IS_STOPPED_SUCCESSFULLY =
+ "Heartbeat service is stopped successfully.";
+ public static final String INCORRECT_VERSION_OF = "Incorrect version of ";
+ public static final String INIT_CONSENSUSMANAGER_SUCCESSFULLY_WHEN_RESTARTED =
+ "Init ConsensusManager successfully when restarted";
+ public static final String INTERRUPTED_WHILE_WAITING_FOR_PIPETASKCOORDINATOR_LOCK_CURRENT_THREAD =
+ "Interrupted while waiting for PipeTaskCoordinator lock, current thread: {}";
+ public static final String INTERRUPT_WHEN_WAIT_FOR_CALCULATING_REGION_PRIORITY =
+ "Interrupt when wait for calculating Region priority";
+ public static final String INTERRUPT_WHEN_WAIT_FOR_LEADER_ELECTION =
+ "Interrupt when wait for leader election";
+ public static final String INVALID_EVENT_TYPE = "Invalid event type: ";
+ public static final String IOTCONSENSUSV2_LEADER_CHANGED_FAILED_TO_FLUSH_OLD_LEADER_FOR_REGION =
+ "[IoTConsensusV2 Leader Changed] Failed to flush old leader {} for region {}";
+ public static final String IOTCONSENSUSV2_LEADER_CHANGED_SUCCESSFULLY_FLUSH_OLD_LEADER_FOR_REGION =
+ "[IoTConsensusV2 Leader Changed] Successfully flush old leader {} for region {}";
+ public static final String IOTDBCONFIGNODERECEIVER_DOES_NOT_SUPPORT_LOAD_FILE_V1 =
+ "IoTDBConfigNodeReceiver does not support load file V1.";
+ public static final String IOTDBCONFIGREGIONAIRGAPCONNECTOR_CAN_T_TRANSFER_TABLETINSERTIONEVENT =
+ "IoTDBConfigRegionAirGapConnector can't transfer TabletInsertionEvent.";
+ public static final String IOTDBCONFIGREGIONAIRGAPCONNECTOR_CAN_T_TRANSFER_TSFILEINSERTIONEVENT =
+ "IoTDBConfigRegionAirGapConnector can't transfer TsFileInsertionEvent.";
+ public static final String IOTDBCONFIGREGIONAIRGAPCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBConfigRegionAirGapConnector does not support transferring generic event: {}.";
+ public static final String IOTDBCONFIGREGIONSINK_CAN_T_TRANSFER_TABLETINSERTIONEVENT =
+ "IoTDBConfigRegionSink can't transfer TabletInsertionEvent.";
+ public static final String IOTDBCONFIGREGIONSINK_CAN_T_TRANSFER_TSFILEINSERTIONEVENT =
+ "IoTDBConfigRegionSink can't transfer TsFileInsertionEvent.";
+ public static final String IOTDBCONFIGREGIONSINK_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBConfigRegionSink does not support transferring generic event: {}.";
+ public static final String IOTDBCONFIGREGIONSOURCE_DOES_NOT_TRANSFERRING_EVENTS_UNDER_SIMPLE_CONSENSUS =
+ "IoTDBConfigRegionSource does not transferring events under simple consensus";
+ public static final String LEADERBALANCER_FAILED_TO_CHANGE_THE_LEADER_OF_REGION_TO_DATANODE =
+ "[LeaderBalancer] Failed to change the leader of Region: {} to DataNode: {}";
+ public static final String LEADERBALANCER_REGION_NOT_IN_DATABASEREGIONGROUPMAP =
+ "[LeaderBalancer] Region: {} not in databaseRegionGroupMap";
+ public static final String LEADERBALANCER_REGION_NOT_IN_REGIONLEADERMAP =
+ "[LeaderBalancer] Region: {} not in regionLeaderMap";
+ public static final String LEADERBALANCER_REGION_NOT_IN_REGIONLOCATIONMAP =
+ "[LeaderBalancer] Region: {} not in regionLocationMap";
+ public static final String LEADERBALANCER_REGION_NOT_IN_REGIONSTATISTICSMAP =
+ "[LeaderBalancer] Region: {} not in regionStatisticsMap";
+ public static final String LEADERBALANCER_THE_FOLLOWING_REGIONGROUPS_LEADER_CANNOT_BE =
+ "[LeaderBalancer] The following RegionGroups' leader cannot be selected because their corresponding caches are incomplete: {}";
+ public static final String LEADERBALANCER_TRY_TO_CHANGE_THE_LEADER_OF_REGION_TO_DATANODE =
+ "[LeaderBalancer] Try to change the leader of Region: {} to DataNode: {} ";
+ public static final String LOADSTATISTICS_SERVICE_IS_STARTED_SUCCESSFULLY =
+ "LoadStatistics service is started successfully.";
+ public static final String LOADSTATISTICS_SERVICE_IS_STOPPED_SUCCESSFULLY =
+ "LoadStatistics service is stopped successfully.";
+ public static final String MIGRATEREGION_SUBMIT_REGIONMIGRATEPROCEDURE_SUCCESSFULLY_REGION_ORIGIN_DATANODE =
+ "[MigrateRegion] Submit RegionMigrateProcedure successfully, Region: {}, Origin DataNode: {}, Dest DataNode: {}, Add Coordinator: {}, Remove Coordinator: {}";
+ public static final String MISMATCHED_CRC32_CODE_WHEN_DESERIALIZING_SERVICE_INFO =
+ "Mismatched CRC32 code when deserializing service info.";
+ public static final String NETWORK_ERROR_WHEN_SEAL_CONFIG_REGION_SNAPSHOT_BECAUSE =
+ "Network error when seal config region snapshot %s, because %s.";
+ public static final String NETWORK_ERROR_WHEN_TRANSFER_CONFIG_REGION_WRITE_PLAN_BECAUSE =
+ "Network error when transfer config region write plan %s, because %s.";
+ public static final String NETWORK_ERROR_WHEN_TRANSFER_EVENT_BECAUSE =
+ "Network error when transfer event %s, because %s.";
+ public static final String NODEMANAGER_START_TO_REMOVE_DATANODE =
+ "NodeManager start to remove DataNode {}";
+ public static final String NODEMANAGER_SUBMIT_REMOVEAINODEPLAN_FINISHED =
+ "NodeManager submit RemoveAINodePlan finished, {}";
+ public static final String NODEMANAGER_SUBMIT_REMOVEDATANODEPLAN_FINISHED_REMOVEDATANODEPLAN =
+ "NodeManager submit RemoveDataNodePlan finished, removeDataNodePlan: {}";
+ public static final String NODESTATISTICS = "[NodeStatistics]\t {}: {} -> {}";
+ public static final String NODESTATISTICS_NODESTATISTICSMAP =
+ "[NodeStatistics] NodeStatisticsMap: ";
+ public static final String NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN =
+ "Not has privilege to transfer plan: ";
+ public static final String NOT_IMPLEMENT_YET = "not implement yet";
+ public static final String NO_CORRESPONDING_PIPE_IS_RUNNING_IN_THE_REPORTED_DATAREGION_RUNTIMEMETAFROMAGENT =
+ "No corresponding Pipe is running in the reported DataRegion. runtimeMetaFromAgent is null, runtimeMetaFromCoordinator: {}";
+ public static final String PARTITIONBALANCER_THE_SERIESSLOT_IN_TIMESLOT_WILL_BE =
+ "[PartitionBalancer] The SeriesSlot: {} in TimeSlot: {} will be allocated to DataRegionGroup: {}, because the original target: {} is currently unavailable.";
+ public static final String PHIACCRUALDETECTOR_TOPOLOGY_IS_BROKEN_HEARTBEAT_HISTORY_MS =
+ "[PhiAccrualDetector] Topology {} is broken, heartbeat history (ms): {}";
+ public static final String PHIACCRUALDETECTOR_TOPOLOGY_IS_RECOVERED_HEARTBEAT_HISTORY_MS =
+ "[PhiAccrualDetector] Topology {} is recovered, heartbeat history (ms): {}";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_WAS_FAILED_TO_SUBMIT =
+ "PipeHandleLeaderChangeProcedure was failed to submit.";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_WAS_SUBMITTED_PROCEDUREID =
+ "PipeHandleLeaderChangeProcedure was submitted, procedureId: {}.";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_WAS_FAILED_TO_SUBMIT =
+ "PipeHandleMetaChangeProcedure was failed to submit.";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_WAS_SUBMITTED_PROCEDUREID =
+ "PipeHandleMetaChangeProcedure was submitted, procedureId: {}.";
+ public static final String PIPEHEARTBEAT_IS_STARTED_SUCCESSFULLY =
+ "PipeHeartbeat is started successfully.";
+ public static final String PIPEHEARTBEAT_IS_STOPPED_SUCCESSFULLY =
+ "PipeHeartbeat is stopped successfully.";
+ public static final String PIPEMETASYNCER_IS_STARTED_SUCCESSFULLY =
+ "PipeMetaSyncer is started successfully.";
+ public static final String PIPEMETASYNCER_IS_STOPPED_SUCCESSFULLY =
+ "PipeMetaSyncer is stopped successfully.";
+ public static final String PIPERUNTIMECONFIGNODEAGENT_STARTED =
+ "PipeRuntimeConfigNodeAgent started";
+ public static final String PIPERUNTIMECONFIGNODEAGENT_STOPPED =
+ "PipeRuntimeConfigNodeAgent stopped";
+ public static final String PIPERUNTIMECOORDINATOR_MEETS_ERROR_IN_UPDATING_PIPEMETAKEEPER =
+ "PipeRuntimeCoordinator meets error in updating pipeMetaKeeper, ";
+ public static final String PIPETASKCOORDINATORLOCK_IS_HELD_BY_ANOTHER_THREAD_SKIP_THIS_ROUND_OF =
+ "PipeTaskCoordinatorLock is held by another thread, skip this round of heartbeat to avoid procedure and rpc accumulation as much as possible";
+ public static final String PIPETASKCOORDINATORLOCK_IS_HELD_BY_ANOTHER_THREAD_SKIP_THIS_ROUND_OF_2 =
+ "PipeTaskCoordinatorLock is held by another thread, skip this round of sync to avoid procedure and rpc accumulation as much as possible";
+ public static final String PIPETASKCOORDINATOR_LOCK_ACQUIRED_BY_THREAD =
+ "PipeTaskCoordinator lock acquired by thread {}";
+ public static final String PIPETASKCOORDINATOR_LOCK_FAILED_TO_ACQUIRE_BY_THREAD_BECAUSE_OF_TIMEOUT =
+ "PipeTaskCoordinator lock failed to acquire by thread {} because of timeout";
+ public static final String PIPETASKCOORDINATOR_LOCK_RELEASED_BY_THREAD =
+ "PipeTaskCoordinator lock released by thread {}";
+ public static final String PIPETASKCOORDINATOR_LOCK_WAITING_FOR_THREAD =
+ "PipeTaskCoordinator lock waiting for thread {}";
+ public static final String PIPE_SNAPSHOT_DIR_FOUND_DELETING_IT =
+ "Pipe snapshot dir found, deleting it: {},";
+ public static final String PROCEDUREMANAGER_IS_STARTED_SUCCESSFULLY =
+ "ProcedureManager is started successfully.";
+ public static final String PROCEDUREMANAGER_IS_STOPPED_SUCCESSFULLY =
+ "ProcedureManager is stopped successfully.";
+ public static final String PROCEDURE_DETAILS_ARE = "[{}] procedure details are {}";
+ public static final String REBALANCEDATAALLOTTABLE_DATABASE =
+ "[ReBalanceDataAllotTable] Database: {}, ";
+ public static final String RECEIVED_PIPE_HEARTBEAT_REQUEST_FROM_CONFIG_COORDINATOR =
+ "Received pipe heartbeat request {} from config coordinator.";
+ public static final String RECEIVER_ID = "Receiver id = {}: {}";
+ public static final String RECEIVER_ID_EXCEPTION_ENCOUNTERED_WHILE_EXECUTING_PLAN =
+ "Receiver id = {}: Exception encountered while executing plan {}: ";
+ public static final String RECEIVER_ID_FAILURE_STATUS_ENCOUNTERED_WHILE_EXECUTING_PLAN =
+ "Receiver id = {}: Failure status encountered while executing plan {}: {}";
+ public static final String RECEIVER_ID_PERMISSION_CHECK_FAILED_WHILE_EXECUTING_PLAN =
+ "Receiver id = {}: Permission check failed while executing plan {}: {}";
+ public static final String RECEIVER_ID_UNSUPPORTED_PIPEREQUESTTYPE_ON_CONFIGNODE_RESPONSE_STATUS =
+ "Receiver id = {}: Unsupported PipeRequestType on ConfigNode, response status = {}.";
+ public static final String RECONSTRUCTREGION_SUBMIT_RECONSTRUCTREGIONPROCEDURE_SUCCESSFULLY =
+ "[ReconstructRegion] Submit ReconstructRegionProcedure successfully, {}";
+ public static final String REGIONCLEANER_IS_STARTED_SUCCESSFULLY =
+ "RegionCleaner is started successfully.";
+ public static final String REGIONCLEANER_IS_STOPPED_SUCCESSFULLY =
+ "RegionCleaner is stopped successfully.";
+ public static final String REGIONELECTION_THE_LEADER_OF_REGIONGROUPS_IS_ELECTED =
+ "[RegionElection] The leader of RegionGroups: {} is elected.";
+ public static final String REGIONELECTION_THE_LEADER_OF_REGIONGROUPS_IS_NOT_DETERMINED_AFTER_10 =
+ "[RegionElection] The leader of RegionGroups: {} is not determined after 10 heartbeat interval. Some function might fail.";
+ public static final String REGIONELECTION_WAIT_FOR_LEADER_ELECTION_OF_REGIONGROUPS =
+ "[RegionElection] Wait for leader election of RegionGroups: {}";
+ public static final String REGIONGROUPSTATISTICS_REGIONGROUP =
+ "[RegionGroupStatistics]\t RegionGroup {}: {} -> {}";
+ public static final String REGIONGROUPSTATISTICS_REGIONGROUPSTATISTICSMAP =
+ "[RegionGroupStatistics] RegionGroupStatisticsMap: ";
+ public static final String REGIONGROUPSTATISTICS_REGION_IN_DATANODE =
+ "[RegionGroupStatistics]\t Region in DataNode {}: {} -> {}";
+ public static final String REGIONGROUPSTATISTICS_REGION_IN_DATANODE_NULL =
+ "[RegionGroupStatistics]\t Region in DataNode {}: null -> {}";
+ public static final String REGIONGROUPSTATISTICS_REGION_IN_DATANODE_NULL_2 =
+ "[RegionGroupStatistics]\t Region in DataNode {}: {} -> null";
+ public static final String REGIONGROUPSTATUS_DOESN_T_EXIST =
+ "RegionGroupStatus %s doesn't exist.";
+ public static final String REGIONPRIORITY = "[RegionPriority]\t {}: {}->{}";
+ public static final String REGIONPRIORITY_REGIONPRIORITYMAP =
+ "[RegionPriority] RegionPriorityMap: ";
+ public static final String REGIONPRIORITY_THE_ROUTING_PRIORITY_OF_REGIONGROUPS_IS_CALCULATED =
+ "[RegionPriority] The routing priority of RegionGroups: {} is calculated.";
+ public static final String REGIONPRIORITY_THE_ROUTING_PRIORITY_OF_REGIONGROUPS_IS_NOT_DETERMINED_AFTER =
+ "[RegionPriority] The routing priority of RegionGroups: {} is not determined after 10 heartbeat interval. Some function might fail.";
+ public static final String REGIONPRIORITY_WAIT_FOR_REGION_PRIORITY_UPDATE_OF_REGIONGROUPS =
+ "[RegionPriority] Wait for Region priority update of RegionGroups: {}";
+ public static final String REGION_ID = "Region id ";
+ public static final String REMOVEREGIONPEER_SUBMIT_REMOVEREGIONPEERPROCEDURE_SUCCESSFULLY =
+ "[RemoveRegionPeer] Submit RemoveRegionPeerProcedure successfully: {}";
+ public static final String REMOVE_REGION_TARGET_DATANODE_NOT_FOUND_WILL_SIMPLY_CLEAN_UP =
+ "Remove region: Target DataNode {} not found, will simply clean up the partition table of region {} and do nothing else.";
+ public static final String REPORT_PIPERUNTIMEEXCEPTION_TO_LOCAL_PIPETASKMETA_EXCEPTION_MESSAGE =
+ "Report PipeRuntimeException to local PipeTaskMeta({}), exception message: {}";
+ public static final String RETRYFAILMISSIONS_SERVICE_IS_STARTED_SUCCESSFULLY =
+ "RetryFailMissions service is started successfully.";
+ public static final String RETRYFAILMISSIONS_SERVICE_IS_STOPPED_SUCCESSFULLY =
+ "RetryFailMissions service is stopped successfully.";
+ public static final String SERIALIZATION_FAILED_FOR_THE_ALTER_ENCODING_TIME_SERIES_PLAN_IN =
+ "Serialization failed for the alter encoding time series plan in pipe transmission, skip transfer";
+ public static final String SERIALIZATION_FAILED_FOR_THE_DELETE_LOGICAL_VIEW_PLAN_IN_PIPE =
+ "Serialization failed for the delete logical view plan in pipe transmission, skip transfer";
+ public static final String SERIALIZATION_FAILED_FOR_THE_DELETE_TIME_SERIES_PLAN_IN_PIPE =
+ "Serialization failed for the delete time series plan in pipe transmission, skip transfer";
+ public static final String SOMETHING_WRONG_HAPPENED_WHILE_CALLING_CONSENSUS_LAYER_S_CREATELOCALPEER_API =
+ "Something wrong happened while calling consensus layer's createLocalPeer API.";
+ public static final String SOME_PIPES_NEED_RESTARTING_WILL_RESTART_THEM_AFTER_THIS_SYNC =
+ "Some pipes need restarting, will restart them after this sync";
+ public static final String STARTEXECUTECQ_EXECUTE_CQ_ON_DATANODE_TIME_RANGE_IS_CURRENT_TIME =
+ "[StartExecuteCQ] execute CQ {} on DataNode[{}], time range is [{}, {}), current time is {}";
+ public static final String START_TO_ACTIVATE_UDF_IN_UDF_TABLE_ON_CONFIG_NODES =
+ "Start to activate UDF [{}] in UDF_Table on Config Nodes";
+ public static final String START_TO_ADD_UDF_IN_UDF_TABLE_ON_CONFIG_NODES =
+ "Start to add UDF [{}] in UDF_Table on Config Nodes";
+ public static final String START_TO_CREATE_REGION_ON_DATANODE =
+ "Start to create Region: {} on DataNode: {}";
+ public static final String START_TO_CREATE_UDF_ON_DATA_NODES_NEEDTOSAVEJAR =
+ "Start to create UDF [{}] on Data Nodes, needToSaveJar[{}]";
+ public static final String START_TO_DELETE_REGION_ON_DATANODE =
+ "Start to delete Region: {} on DataNode: {}";
+ public static final String START_TRANSFER_OF = "Start transfer of {}";
+ public static final String STOP_SUBMITTING_CQ_BECAUSE = "Stop submitting CQ {} because {}";
+ public static final String STOP_SUBMITTING_CQ_BECAUSE_CURRENT_NODE_IS_NOT_LEADER_OR =
+ "Stop submitting CQ {} because current node is not leader or current scheduled thread pool is shut down.";
+ public static final String SUBMITTED_ASYNC_CONSENSUS_PIPE_CREATION =
+ "Submitted async consensus pipe creation: {}";
+ public static final String SUBMITTED_ASYNC_CONSENSUS_PIPE_DROP =
+ "Submitted async consensus pipe drop: {}";
+ public static final String SUBMIT_REMOVEAINODEPROCEDURE_SUCCESSFULLY =
+ "Submit RemoveAINodeProcedure successfully, {}";
+ public static final String SUBMIT_REMOVECONFIGNODEPROCEDURE_SUCCESSFULLY =
+ "Submit RemoveConfigNodeProcedure successfully: {}";
+ public static final String SUBMIT_REMOVEDATANODESPROCEDURE_SUCCESSFULLY =
+ "Submit RemoveDataNodesProcedure successfully, {}";
+ public static final String SUBSCRIPTIONCOORDINATORLOCK_IS_HELD_BY_ANOTHER_THREAD_SKIP_THIS_ROUND_OF =
+ "SubscriptionCoordinatorLock is held by another thread, skip this round of sync to avoid procedure and rpc accumulation as much as possible";
+ public static final String SUBSCRIPTIONMETASYNCER_IS_STARTED_SUCCESSFULLY =
+ "SubscriptionMetaSyncer is started successfully.";
+ public static final String SUBSCRIPTIONMETASYNCER_IS_STOPPED_SUCCESSFULLY =
+ "SubscriptionMetaSyncer is stopped successfully.";
+ public static final String SUCCESSFULLY_TRANSFERRED_CONFIG_EVENT =
+ "Successfully transferred config event {}.";
+ public static final String SUCCESSFULLY_TRANSFERRED_CONFIG_REGION_SNAPSHOT =
+ "Successfully transferred config region snapshot {}.";
+ public static final String THERE_IS_NO_RUNNING_DATANODE_TO_EXECUTE_CQ =
+ "There is no RUNNING DataNode to execute CQ {}";
+ public static final String THE_CONFIGNODE_WILL_BE_SHUTDOWN_SOON_MARK_IT_AS_UNKNOWN =
+ "The ConfigNode-{} will be shutdown soon, mark it as Unknown";
+ public static final String THE_CONFIG_REGION_AIR_GAP_CONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING =
+ "The config region air gap connector does not support transferring single file piece bytes.";
+ public static final String THE_CONFIG_REGION_SINK_DOES_NOT_SUPPORT_TRANSFERRING_SINGLE_FILE =
+ "The config region sink does not support transferring single file piece req.";
+ public static final String THE_CONFIG_REGION_SNAPSHOTS_CANNOT_BE_PARSED =
+ "The config region snapshots %s cannot be parsed.";
+ public static final String THE_DATABASE_DOESN_T_EXIST_MAYBE_IT_HAS_BEEN_PRE =
+ "The Database: {} doesn't exist. Maybe it has been pre-deleted.";
+ public static final String THE_DATANODE_WILL_BE_SHUTDOWN_SOON_MARK_IT_AS_UNKNOWN =
+ "The DataNode-{} will be shutdown soon, mark it as Unknown";
+ public static final String THE_REMOVENODEREPLICASELECT_METHOD_OF_GREEDYREGIONGROUPALLOCATOR_IS_YET =
+ "The removeNodeReplicaSelect method of GreedyRegionGroupAllocator is yet to be implemented.";
+ public static final String THE_REMOVENODEREPLICASELECT_METHOD_OF_PARTITEGRAPHPLACEMENTREGIONGROUPALLOCATOR =
+ "The removeNodeReplicaSelect method of PartiteGraphPlacementRegionGroupAllocator is yet to be implemented.";
+ public static final String THE_REMOVE_DATANODE_REQUEST_CHECK_FAILED_REQ_CHECK_RESULT =
+ "The remove DataNode request check failed. req: {}, check result: {}";
+ public static final String TOPOLOGY_ASYMMETRIC_NETWORK_PARTITION_FROM_TO =
+ "[Topology] Asymmetric network partition from {} to {}";
+ public static final String TOPOLOGY_CLUSTER_TOPOLOGY_CHANGED_LATEST =
+ "[Topology] Cluster topology changed, latest: {}";
+ public static final String TOPOLOGY_PROBING_HAS_STARTED_SUCCESSFULLY =
+ "Topology Probing has started successfully";
+ public static final String TOPOLOGY_PROBING_HAS_STOPPED_SUCCESSFULLY =
+ "Topology Probing has stopped successfully";
+ public static final String TOPOLOGY_TOPOLOGY_OF_DATANODE_IS_NOW_TO_DATANODE =
+ "[Topology] Topology of DataNode {} is now {} to DataNode {}";
+ public static final String UNABLE_TO_PARSE_PATH_WHEN_CHECKING_READ_PRIVILEGE_PATH =
+ "Unable to parse path when checking READ privilege, path: {}";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_CREATING_SERVICE_ON_DATANODE =
+ "Unexpected error happened while creating Service {} on DataNode {}: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_DROPPING_CQ =
+ "Unexpected error happened while dropping cq {}: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_DROPPING_SERVICE_ON_DATANODE =
+ "Unexpected error happened while dropping Service {} on DataNode {}: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_FETCHING_CQ_LIST =
+ "Unexpected error happened while fetching cq list: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_GETTING_USER_DEFINED_SERVICE =
+ "Unexpected error happened while getting user-defined Service: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_SHOWING_CQ =
+ "Unexpected error happened while showing cq: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_SHOWING_SERVICE =
+ "Unexpected error happened while showing Service: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_STARTING_SERVICE_ON_DATANODE =
+ "Unexpected error happened while starting Service {} on DataNode {}: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_STOPPING_SERVICE_ON_DATANODE =
+ "Unexpected error happened while stopping Service {} on DataNode {}: ";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_RETRY_CREATING_PEER_FOR_CONSENSUS_GROUP =
+ "Unexpected interruption during retry creating peer for consensus group";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_RETRY_GETTING_LATEST_REGION_ROUTE_MAP =
+ "Unexpected interruption during retry getting latest region route map";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_WAITING_FOR_CONFIGNODE_LEADER_READY =
+ "Unexpected interruption during waiting for configNode leader ready.";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_WAITING_FOR_GET_CLUSTER_ID =
+ "Unexpected interruption during waiting for get cluster id.";
+ public static final String UNEXPECTED_NULL_PROCEDURE_PARAMETERS_FOR_WAITINGPROCEDUREFINISHED =
+ "Unexpected null procedure parameters for waitingProcedureFinished";
+ public static final String UNKNOWN_DATAPARTITION_ALLOCATION_STRATEGY_USING_INHERIT_STRATEGY_BY_DEFAULT =
+ "Unknown DataPartition allocation strategy {}, using INHERIT strategy by default.";
+ public static final String UNKNOWN_TIMEOUTPOLICY = "Unknown TimeoutPolicy: ";
+ public static final String UN_PARSE_ABLE_PATH_NAME_ENCOUNTERED_DURING_TEMPLATE_PRIVILEGE_TRIMMING =
+ "Un-parse-able path name encountered during template privilege trimming, please check";
+ public static final String UPGRADE_CONFIGNODE_CONSENSUS_WAL_DIR_FOR_SIMPLECONSENSUS_FROM_VERSION_1 =
+ "upgrade ConfigNode consensus wal dir for SimpleConsensus from version/1.0 to version/1.1 failed, ";
+ public static final String WRITE_PARTITION_ALLOCATION_RESULT_FAILED_BECAUSE =
+ "Write partition allocation result failed because: {}";
+
+ private ManagerMessages() {}
+}
diff --git a/iotdb-core/confignode/src/main/i18n/en/org/apache/iotdb/confignode/i18n/ProcedureMessages.java b/iotdb-core/confignode/src/main/i18n/en/org/apache/iotdb/confignode/i18n/ProcedureMessages.java
new file mode 100644
index 0000000000000..f6eaa68d16822
--- /dev/null
+++ b/iotdb-core/confignode/src/main/i18n/en/org/apache/iotdb/confignode/i18n/ProcedureMessages.java
@@ -0,0 +1,1002 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.i18n;
+
+public final class ProcedureMessages {
+
+ public static final String ADDNEVERFINISHSUBPROCEDUREPROCEDURE_RUN_AGAIN_WHICH_SHOULD_NEVER_HAPPEN =
+ "AddNeverFinishSubProcedureProcedure run again, which should never happen";
+ public static final String ADDREGIONLOCATION_FINISHED_ADD_REGION_TO_RESULT_IS =
+ "AddRegionLocation finished, add region {} to {}, result is {}";
+ public static final String ADDTABLECOLUMN_COSTS_MS = "AddTableColumn-{}.{}-{} costs {}ms";
+ public static final String ADD_COLUMN_TO_TABLE = "Add column to table {}.{}";
+ public static final String ADD_CONFIGNODE_FAILED = "Add ConfigNode failed ";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "AlterConsumerGroupProcedure: executeFromOperateOnConfigNodes({})";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "AlterConsumerGroupProcedure: executeFromOperateOnDataNodes({})";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_EXECUTEFROMVALIDATE_TRY_TO_VALIDATE =
+ "AlterConsumerGroupProcedure: executeFromValidate, try to validate";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "AlterConsumerGroupProcedure: rollbackFromOperateOnConfigNodes({})";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "AlterConsumerGroupProcedure: rollbackFromOperateOnDataNodes";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_ROLLBACKFROMVALIDATE =
+ "AlterConsumerGroupProcedure: rollbackFromValidate";
+ public static final String ALTERENCODINGCOMPRESSOR_COSTS_MS =
+ "AlterEncodingCompressor-[{}] costs {}ms";
+ public static final String ALTERING_COLUMN_IN_ON_CONFIGNODE =
+ "Altering column {} in {}.{} on configNode";
+ public static final String ALTERING_TIME_SERIES_DATA_TYPE = "altering time series {} data type";
+ public static final String ALTERLOGICALVIEW_COSTS_MS = "AlterLogicalView-[{}] costs {}ms";
+ public static final String ALTERPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK =
+ "AlterPipeProcedureV2: executeFromCalculateInfoForTask({})";
+ public static final String ALTERPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES =
+ "AlterPipeProcedureV2: executeFromOperateOnDataNodes({})";
+ public static final String ALTERPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK =
+ "AlterPipeProcedureV2: executeFromValidateTask({})";
+ public static final String ALTERPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "AlterPipeProcedureV2: executeFromWriteConfigNodeConsensus({})";
+ public static final String ALTERPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "AlterPipeProcedureV2: rollbackFromCalculateInfoForTask({})";
+ public static final String ALTERPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES =
+ "AlterPipeProcedureV2: rollbackFromOperateOnDataNodes({})";
+ public static final String ALTERPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK =
+ "AlterPipeProcedureV2: rollbackFromValidateTask({})";
+ public static final String ALTERPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "AlterPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})";
+ public static final String ALTERTABLECOLUMNDATATYPE_COSTS_MS =
+ "AlterTableColumnDataType-{}.{}-{} costs {}ms";
+ public static final String ALTERTIMESERIESDATATYPE_COSTS_MS =
+ "AlterTimeSeriesDataType-{}-[{}] costs {}ms";
+ public static final String ALTERTOPICPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES_TRY_TO_ALTER_TOPIC =
+ "AlterTopicProcedure: executeFromOperateOnConfigNodes, try to alter topic";
+ public static final String ALTERTOPICPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "AlterTopicProcedure: executeFromOperateOnDataNodes({})";
+ public static final String ALTERTOPICPROCEDURE_EXECUTEFROMVALIDATE =
+ "AlterTopicProcedure: executeFromValidate";
+ public static final String ALTERTOPICPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "AlterTopicProcedure: rollbackFromOperateOnConfigNodes({})";
+ public static final String ALTERTOPICPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "AlterTopicProcedure: rollbackFromOperateOnDataNodes({})";
+ public static final String ALTERTOPICPROCEDURE_ROLLBACKFROMVALIDATE =
+ "AlterTopicProcedure: rollbackFromValidate({})";
+ public static final String ALTER_ENCODING_COMPRESSOR_IN_SCHEMA_REGIONS_FAILED_FAILURES =
+ "Alter encoding compressor %s in schema regions failed. Failures: %s";
+ public static final String ALTER_ENCODING_COMPRESSOR_IN_SCHEMA_REGION_FOR_TIMESERIES =
+ "Alter encoding {} & compressor {} in schema region for timeSeries {}";
+ public static final String ALTER_TIMESERIES_DATA_TYPE_TO_IN_SCHEMA_REGIONS_FAILED_FAILURES =
+ "Alter timeseries %s data type to %s in schema regions failed. Failures: %s";
+ public static final String ALTER_TIME_SERIES_DATA_TYPE_FAILED =
+ "alter time series {} data type failed";
+ public static final String ALTER_VIEW = "Alter view {}";
+ public static final String ALTER_VIEW_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN_ALL =
+ "Alter view %s failed when [%s] because failed to execute in all replicaset of schemaRegion %s. Failure nodes: %s, statuses: %s";
+ public static final String AUTHENTICATION_FAILED = "Authentication failed.";
+ public static final String AUTH_PROCEDURE_CLEAN_DATANODE_CACHE_SUCCESSFULLY =
+ "Auth procedure: clean datanode cache successfully";
+ public static final String BEGIN_TO_CHANGE_DATANODE_STATUS_NODESTATUSMAP =
+ "{}, Begin to change DataNode status, nodeStatusMap: {}";
+ public static final String BEGIN_TO_STOP_DATANODES_AND_KILL_THE_DATANODE_PROCESS =
+ "{}, Begin to stop DataNodes and kill the DataNode process: {}";
+ public static final String BROADCASTDATANODESTATUSCHANGE_FINISHED_DATANODE =
+ "{}, BroadcastDataNodeStatusChange finished, dataNode: {}";
+ public static final String BROADCASTDATANODESTATUSCHANGE_MEETS_ERROR_STATUS_CHANGE_DATANODES_ERROR_DATANODE =
+ "{}, BroadcastDataNodeStatusChange meets error, status change dataNodes: {}, error datanode: {}";
+ public static final String BROADCASTDATANODESTATUSCHANGE_START_DATANODE =
+ "{}, BroadcastDataNodeStatusChange start, dataNode: {}";
+ public static final String CALL_CHANGEREGIONLEADER_FAIL_FOR_THE_TIME_WILL_SLEEP_MS =
+ "Call changeRegionLeader fail for the {} time, will sleep {} ms";
+ public static final String CANNOT_FIND_DATANODES_CONTAIN_THE_GIVEN_REGION =
+ "Cannot find DataNodes contain the given region: {}";
+ public static final String CANNOT_FIND_REGION_REPLICA_NODES_IN_CREATEPEER_REGIONID =
+ "{}, Cannot find region replica nodes in createPeer, regionId: {}";
+ public static final String CANNOT_FIND_REGION_REPLICA_NODES_REGION =
+ "Cannot find region replica nodes, region: {}";
+ public static final String CATCH_EXCEPTION_WHILE_DESERIALIZING_PROCEDURE_THIS_PROCEDURE_WILL_BE_IGNORED =
+ "Catch exception while deserializing procedure, this procedure will be ignored.";
+ public static final String CHANGE_REGION_LEADER_FINISHED_REGIONID_NEWLEADERNODE =
+ "{}, Change region leader finished, regionId: {}, newLeaderNode: {}";
+ public static final String CHECK_AND_INVALIDATE_COLUMN_IN_WHEN_ALTERING_COLUMN_DATA_TYPE =
+ "Check and invalidate column {} in {}.{} when altering column data type";
+ public static final String CHECK_AND_INVALIDATE_COLUMN_IN_WHEN_DROPPING_COLUMN =
+ "Check and invalidate column {} in {}.{} when dropping column";
+ public static final String CHECK_AND_INVALIDATE_SERIES_WHEN_ALTERING_TIME_SERIES_DATA_TYPE =
+ "Check and invalidate series {} when altering time series data type";
+ public static final String CHECK_AND_INVALIDATE_TABLE_WHEN_DROPPING_TABLE =
+ "Check and invalidate table {}.{} when dropping table";
+ public static final String CHECK_DATANODE_TEMPLATE_ACTIVATION_OF_TEMPLATE_SET_ON =
+ "Check DataNode template activation of template {} set on {}";
+ public static final String CHECK_TEMPLATE_EXISTENCE_SET_ON_PATH_WHEN_TRY_SETTING_TEMPLATE =
+ "Check template existence set on path {} when try setting template {}";
+ public static final String CHECK_THE_EXISTENCE_OF_TABLE = "Check the existence of table {}.{}";
+ public static final String CHECK_TIMESERIES_EXISTENCE_UNDER_PATH_WHEN_TRY_SETTING_TEMPLATE =
+ "Check timeseries existence under path {} when try setting template {}";
+ public static final String CLEARING_CACHE_AFTER_ALTER_TIME_SERIES_DATA_TYPE =
+ "clearing cache after alter time series {} data type";
+ public static final String COLUMN_CHECK_FOR_TABLE_WHEN_ADDING_COLUMN =
+ "Column check for table {}.{} when adding column";
+ public static final String COLUMN_CHECK_FOR_TABLE_WHEN_RENAMING_COLUMN =
+ "Column check for table {}.{} when renaming column";
+ public static final String COLUMN_CHECK_FOR_TABLE_WHEN_RENAMING_TABLE =
+ "Column check for table {}.{} when renaming table";
+ public static final String COMMIT_CREATE_TABLE = "Commit create table {}.{}";
+ public static final String COMMIT_RELEASE_INFO_OF_TABLE_WHEN_ADDING_COLUMN =
+ "Commit release info of table {}.{} when adding column";
+ public static final String COMMIT_RELEASE_INFO_OF_TABLE_WHEN_ALTERING_COLUMN =
+ "Commit release info of table {}.{} when altering column";
+ public static final String COMMIT_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_COLUMN =
+ "Commit release info of table {}.{} when renaming column";
+ public static final String COMMIT_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_TABLE =
+ "Commit release info of table {}.{} when renaming table";
+ public static final String COMMIT_RELEASE_INFO_OF_TABLE_WHEN_SETTING_PROPERTIES =
+ "Commit release info of table {}.{} when setting properties";
+ public static final String COMMIT_RELEASE_SCHEMAENGINE_TEMPLATE_SET_ON_PATH =
+ "Commit release schemaengine template {} set on path {}";
+ public static final String COMMIT_RELEASE_TABLE = "Commit release table {}.{}";
+ public static final String COMMIT_SET_SCHEMAENGINE_TEMPLATE_ON_PATH =
+ "Commit set schemaengine template {} on path {}";
+ public static final String CONSENSUSPIPEGUARDIAN_CONSENSUS_PIPE_IS_STOPPED_RESTARTING_ASYNCHRONOUSLY =
+ "[ConsensusPipeGuardian] consensus pipe [{}] is stopped, restarting asynchronously";
+ public static final String CONSENSUSPIPEGUARDIAN_CONSENSUS_PIPE_MISSING_CREATING_ASYNCHRONOUSLY =
+ "[ConsensusPipeGuardian] consensus pipe [{}] missing, creating asynchronously";
+ public static final String CONSENSUSPIPEGUARDIAN_UNEXPECTED_CONSENSUS_PIPE_EXISTS_DROPPING_ASYNCHRONOUSLY =
+ "[ConsensusPipeGuardian] unexpected consensus pipe [{}] exists, dropping asynchronously";
+ public static final String CONSTRUCT_SCHEMAENGINE_BLACK_LIST_OF_DEVICES_IN =
+ "Construct schemaEngine black list of devices in {}.{}";
+ public static final String CONSTRUCT_SCHEMAENGINE_BLACK_LIST_OF_TEMPLATE_SET_ON =
+ "Construct schemaengine black list of template {} set on {}";
+ public static final String CONSTRUCT_SCHEMAENGINE_BLACK_LIST_OF_TIMESERIES =
+ "Construct schemaEngine black list of timeSeries {}";
+ public static final String CONSTRUCT_SCHEMA_BLACK_LIST_WITH_TEMPLATE =
+ "Construct schema black list with template {}";
+ public static final String CONSTRUCT_VIEW_SCHEMAENGINE_BLACK_LIST_OF_VIEW =
+ "Construct view schemaengine black list of view {}";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_ACQUIRELOCK_SKIP_THE_PROCEDURE_DUE_TO =
+ "ConsumerGroupMetaSyncProcedure: acquireLock, skip the procedure due to the last execution time {}";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "ConsumerGroupMetaSyncProcedure: executeFromOperateOnConfigNodes";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "ConsumerGroupMetaSyncProcedure: executeFromOperateOnDataNodes";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_EXECUTEFROMVALIDATE =
+ "ConsumerGroupMetaSyncProcedure: executeFromValidate";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "ConsumerGroupMetaSyncProcedure: rollbackFromOperateOnConfigNodes";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "ConsumerGroupMetaSyncProcedure: rollbackFromOperateOnDataNodes";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_ROLLBACKFROMVALIDATE =
+ "ConsumerGroupMetaSyncProcedure: rollbackFromValidate";
+ public static final String CREATEDATABASE_FAIL_TWICE = "createDatabase fail twice";
+ public static final String CREATED_CONSENSUS_PIPE = "{}, Created consensus pipe {}";
+ public static final String CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMCREATEONCONFIGNODES =
+ "CreatePipePluginProcedure: executeFromCreateOnConfigNodes({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMCREATEONDATANODES =
+ "CreatePipePluginProcedure: executeFromCreateOnDataNodes({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMLOCK =
+ "CreatePipePluginProcedure: executeFromLock({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMUNLOCK =
+ "CreatePipePluginProcedure: executeFromUnlock({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_FAILED_IN_STATE_WILL_ROLLBACK =
+ "CreatePipePluginProcedure failed in state {}, will rollback";
+ public static final String CREATEPIPEPLUGINPROCEDURE_ROLLBACKFROMCREATEONCONFIGNODES =
+ "CreatePipePluginProcedure: rollbackFromCreateOnConfigNodes({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_ROLLBACKFROMCREATEONDATANODES =
+ "CreatePipePluginProcedure: rollbackFromCreateOnDataNodes({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_ROLLBACKFROMLOCK =
+ "CreatePipePluginProcedure: rollbackFromLock({})";
+ public static final String CREATEPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK =
+ "CreatePipeProcedureV2: executeFromCalculateInfoForTask({})";
+ public static final String CREATEPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES =
+ "CreatePipeProcedureV2: executeFromOperateOnDataNodes({})";
+ public static final String CREATEPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK =
+ "CreatePipeProcedureV2: executeFromValidateTask({})";
+ public static final String CREATEPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "CreatePipeProcedureV2: executeFromWriteConfigNodeConsensus({})";
+ public static final String CREATEPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "CreatePipeProcedureV2: rollbackFromCalculateInfoForTask({})";
+ public static final String CREATEPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES =
+ "CreatePipeProcedureV2: rollbackFromOperateOnDataNodes({})";
+ public static final String CREATEPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK =
+ "CreatePipeProcedureV2: rollbackFromValidateTask({})";
+ public static final String CREATEPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "CreatePipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})";
+ public static final String CREATEREGIONGROUPS_ALL_REPLICAS_OF_REGIONGROUP_ARE_CREATED_SUCCESSFULLY =
+ "[CreateRegionGroups] All replicas of RegionGroup: {} are created successfully!";
+ public static final String CREATEREGIONGROUPS_FAILED_TO_CREATE_MOST_OF_REPLICAS_IN_REGIONGROUP_THE =
+ "[CreateRegionGroups] Failed to create most of replicas in RegionGroup: {}, The redundant replicas in this RegionGroup will be deleted.";
+ public static final String CREATEREGIONGROUPS_FAILED_TO_CREATE_SOME_REPLICAS_OF_REGIONGROUP_BUT_THIS =
+ "[CreateRegionGroups] Failed to create some replicas of RegionGroup: {}, but this RegionGroup can still be used.";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "CreateSubscriptionProcedure: executeFromOperateOnConfigNodes";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "CreateSubscriptionProcedure: executeFromOperateOnDataNodes";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_EXECUTEFROMVALIDATE =
+ "CreateSubscriptionProcedure: executeFromValidate";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "CreateSubscriptionProcedure: rollbackFromOperateOnConfigNodes";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "CreateSubscriptionProcedure: rollbackFromOperateOnDataNodes";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_ROLLBACKFROMVALIDATE =
+ "CreateSubscriptionProcedure: rollbackFromValidate";
+ public static final String CREATETABLE_COSTS_MS = "CreateTable-{}.{}-{} costs {}ms";
+ public static final String CREATETOPICPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "CreateTopicProcedure: executeFromOperateOnConfigNodes({})";
+ public static final String CREATETOPICPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "CreateTopicProcedure: executeFromOperateOnDataNodes({})";
+ public static final String CREATETOPICPROCEDURE_EXECUTEFROMVALIDATE =
+ "CreateTopicProcedure: executeFromValidate";
+ public static final String CREATETOPICPROCEDURE_ROLLBACKFROMCREATEONCONFIGNODES =
+ "CreateTopicProcedure: rollbackFromCreateOnConfigNodes({})";
+ public static final String CREATETOPICPROCEDURE_ROLLBACKFROMCREATEONDATANODES =
+ "CreateTopicProcedure: rollbackFromCreateOnDataNodes({})";
+ public static final String CREATETOPICPROCEDURE_ROLLBACKFROMVALIDATE =
+ "CreateTopicProcedure: rollbackFromValidate({})";
+ public static final String DATANODE_IS_SUBMIT_DELETE_OLD_REGION_PEER_WITH_A_SINGLE =
+ "{}, DataNode {} is {}, submit DELETE_OLD_REGION_PEER with a single RPC attempt and let RemoveRegionPeerProcedure handle retries.";
+ public static final String DEACTIVATETEMPLATE_COSTS_MS = "DeactivateTemplate-[{}] costs {}ms";
+ public static final String DEACTIVATE_TEMPLATE_OF = "Deactivate template of {}";
+ public static final String DEACTIVATE_TEMPLATE_OF_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN =
+ "Deactivate template of %s failed when [%s] because failed to execute in all replicaset of %s %s. Failure: %s";
+ public static final String DELETEDATABASEPROCEDURE_DELETE_DATABASE =
+ "[DeleteDatabaseProcedure] Delete database ";
+ public static final String DELETEDATABASEPROCEDURE_DELETE_DATABASESCHEMA_FAILED =
+ "[DeleteDatabaseProcedure] Delete DatabaseSchema failed";
+ public static final String DELETEDATABASEPROCEDURE_INVALIDATE_CACHE_FAILED =
+ "[DeleteDatabaseProcedure] Invalidate cache failed";
+ public static final String DELETEDATABASEPROCEDURE_STATE_STUCK_AT =
+ "[DeleteDatabaseProcedure] State stuck at ";
+ public static final String DELETEDEVICES_COSTS_MS = "DeleteDevices-[{}] costs {}ms";
+ public static final String DELETELOGICALVIEW_COSTS_MS = "DeleteLogicalView-[{}] costs {}ms";
+ public static final String DELETETIMESERIES_COSTS_MS = "DeleteTimeSeries-[{}] costs {}ms";
+ public static final String DELETE_DATA_OF_DEVICES_IN = "Delete data of devices in {}.{}";
+ public static final String DELETE_DATA_OF_TEMPLATE_TIMESERIES =
+ "Delete data of template timeSeries {}";
+ public static final String DELETE_DATA_OF_TIMESERIES = "Delete data of timeSeries {}";
+ public static final String DELETE_DEVICES_IN_IN_SCHEMAENGINE =
+ "Delete devices in {}.{} in schemaEngine";
+ public static final String DELETE_TIMESERIES_SCHEMAENGINE_OF =
+ "Delete timeSeries schemaEngine of {}";
+ public static final String DELETE_TIME_SERIES_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN =
+ "Delete time series %s failed when [%s] because failed to execute in all replicaset of %s %s. Failures: %s";
+ public static final String DELETE_VIEW_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN_ALL =
+ "Delete view %s failed when [%s] because failed to execute in all replicaset of schemaRegion %s. Failures: %s";
+ public static final String DELETE_VIEW_SCHEMAENGINE_OF = "Delete view schemaengine of {}";
+ public static final String DELETING_DATA_FOR_TABLE = "Deleting data for table {}.{}";
+ public static final String DELETING_DEVICES_FOR_TABLE_WHEN_DROPPING_TABLE =
+ "Deleting devices for table {}.{} when dropping table";
+ public static final String DESERIALIZE_MEETS_ERROR_IN_CREATEREGIONGROUPSPROCEDURE =
+ "Deserialize meets error in CreateRegionGroupsProcedure";
+ public static final String DROPPING_COLUMN_IN_ON_CONFIGNODE =
+ "Dropping column {} in {}.{} on configNode";
+ public static final String DROPPING_TABLE_ON_CONFIGNODE = "Dropping table {}.{} on configNode";
+ public static final String DROPPIPEPLUGINPROCEDURE_EXECUTEFROMDROPONCONFIGNODES =
+ "DropPipePluginProcedure: executeFromDropOnConfigNodes({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_EXECUTEFROMDROPONDATANODES =
+ "DropPipePluginProcedure: executeFromDropOnDataNodes({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_EXECUTEFROMLOCK =
+ "DropPipePluginProcedure: executeFromLock({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_EXECUTEFROMUNLOCK =
+ "DropPipePluginProcedure: executeFromUnlock({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_FAILED_IN_STATE_WILL_ROLLBACK =
+ "DropPipePluginProcedure failed in state {}, will rollback";
+ public static final String DROPPIPEPLUGINPROCEDURE_ROLLBACKFROMDROPONCONFIGNODES =
+ "DropPipePluginProcedure: rollbackFromDropOnConfigNodes({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_ROLLBACKFROMDROPONDATANODES =
+ "DropPipePluginProcedure: rollbackFromDropOnDataNodes({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_ROLLBACKFROMLOCK =
+ "DropPipePluginProcedure: rollbackFromLock({})";
+ public static final String DROPPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK =
+ "DropPipeProcedureV2: executeFromCalculateInfoForTask({})";
+ public static final String DROPPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES =
+ "DropPipeProcedureV2: executeFromOperateOnDataNodes({})";
+ public static final String DROPPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK =
+ "DropPipeProcedureV2: executeFromValidateTask({})";
+ public static final String DROPPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "DropPipeProcedureV2: executeFromWriteConfigNodeConsensus({})";
+ public static final String DROPPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "DropPipeProcedureV2: rollbackFromCalculateInfoForTask({})";
+ public static final String DROPPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES =
+ "DropPipeProcedureV2: rollbackFromOperateOnDataNodes({})";
+ public static final String DROPPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK =
+ "DropPipeProcedureV2: rollbackFromValidateTask({})";
+ public static final String DROPPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "DropPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "DropSubscriptionProcedure: executeFromOperateOnConfigNodes";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "DropSubscriptionProcedure: executeFromOperateOnDataNodes";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_EXECUTEFROMVALIDATE =
+ "DropSubscriptionProcedure: executeFromValidate";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_ROLLBACKFROMLOCK =
+ "DropSubscriptionProcedure: rollbackFromLock";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "DropSubscriptionProcedure: rollbackFromOperateOnConfigNodes";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "DropSubscriptionProcedure: rollbackFromOperateOnDataNodes";
+ public static final String DROPTABLECOLUMN_COSTS_MS = "DropTableColumn-{}.{}-{} costs {}ms";
+ public static final String DROPTABLE_COSTS_MS = "DropTable-{}.{}-{} costs {}ms";
+ public static final String DROPTOPICPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "DropTopicProcedure: executeFromOperateOnConfigNodes({})";
+ public static final String DROPTOPICPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "DropTopicProcedure: executeFromOperateOnDataNodes({})";
+ public static final String DROPTOPICPROCEDURE_EXECUTEFROMVALIDATE =
+ "DropTopicProcedure: executeFromValidate({})";
+ public static final String DROPTOPICPROCEDURE_ROLLBACKFROMCREATEONCONFIGNODES =
+ "DropTopicProcedure: rollbackFromCreateOnConfigNodes({})";
+ public static final String DROPTOPICPROCEDURE_ROLLBACKFROMCREATEONDATANODES =
+ "DropTopicProcedure: rollbackFromCreateOnDataNodes({})";
+ public static final String DROPTOPICPROCEDURE_ROLLBACKFROMVALIDATE =
+ "DropTopicProcedure: rollbackFromValidate({})";
+ public static final String ERROR_IN_DESERIALIZE = "Error in deserialize {}";
+ public static final String ERROR_IN_DESERIALIZE_PROCID_THIS_PROCEDURE_WILL_BE_IGNORED_IT =
+ "Error in deserialize {} (procID {}). This procedure will be ignored. It may belong to old version and cannot be used now.";
+ public static final String EXECUTE_AUTH_PLAN_SUCCESS_TO_INVALIDATE_DATANODES =
+ "Execute auth plan {} success. To invalidate datanodes: {}";
+ public static final String EXECUTING_ON_REGION_FOR_COLUMN_IN_WHEN_DROPPING_COLUMN =
+ "Executing on region for column {} in {}.{} when dropping column";
+ public static final String FAILED_TO_ACTIVE_CQ_BECAUSE_OF_NO_SUCH_CQ =
+ "Failed to active CQ {} because of no such cq: {}";
+ public static final String FAILED_TO_ACTIVE_CQ_BECAUSE_THIS_CQ_HAS_ALREADY_BEEN =
+ "Failed to active CQ {} because this cq has already been active";
+ public static final String FAILED_TO_ACTIVE_CQ_SUCCESSFULLY_BECAUSE_OF_UNKNOWN_REASONS =
+ "Failed to active CQ {} successfully because of unknown reasons {}";
+ public static final String FAILED_TO_ALTER_CONSUMER_GROUP_ON_CONFIG_NODES_BECAUSE =
+ "Failed to alter consumer group %s on config nodes, because %s";
+ public static final String FAILED_TO_ALTER_CONSUMER_GROUP_ON_DATA_NODES_BECAUSE =
+ "Failed to alter consumer group (%s -> %s) on data nodes, because %s";
+ public static final String FAILED_TO_ALTER_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER =
+ "Failed to alter pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_ALTER_TOPIC_ON_CONFIG_NODES_BECAUSE =
+ "Failed to alter topic (%s -> %s) on config nodes, because %s";
+ public static final String FAILED_TO_ALTER_TOPIC_ON_DATA_NODES_BECAUSE =
+ "Failed to alter topic (%s -> %s) on data nodes, because %s";
+ public static final String FAILED_TO_CHANGE_DATANODE_STATUS_DATANODEID_NODESTATUS =
+ "{}, Failed to change DataNode status, dataNodeId={}, nodeStatus={}";
+ public static final String FAILED_TO_COMMIT_SET_TEMPLATE_ON_PATH_DUE_TO =
+ "Failed to commit set template {} on path {} due to {}";
+ public static final String FAILED_TO_CREATE_CONSENSUS_PIPE =
+ "{}, Failed to create consensus pipe {}: {}";
+ public static final String FAILED_TO_CREATE_PIPES_WHEN_CREATING_SUBSCRIPTION_WITH_REQUEST_DETAILS =
+ "Failed to create pipes %s when creating subscription with request %s, details: %s, metadata will be synchronized later.";
+ public static final String FAILED_TO_CREATE_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER =
+ "Failed to create pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_CREATE_PIPE_PLUGIN_INSTANCE_ON_DATA_NODES =
+ "Failed to create pipe plugin instance [%s] on data nodes";
+ public static final String FAILED_TO_CREATE_SUBSCRIPTION_WITH_REQUEST_ON_CONFIG_NODES_BECAUSE =
+ "Failed to create subscription with request %s on config nodes, because %s";
+ public static final String FAILED_TO_CREATE_TOPIC_ON_CONFIG_NODES_BECAUSE =
+ "Failed to create topic %s on config nodes, because %s";
+ public static final String FAILED_TO_CREATE_TOPIC_ON_DATA_NODES_BECAUSE =
+ "Failed to create topic %s on data nodes, because %s";
+ public static final String FAILED_TO_DESERIALIZE_DATAPARTITIONTABLES =
+ "Failed to deserialize dataPartitionTables";
+ public static final String FAILED_TO_DESERIALIZE_FINALDATAPARTITIONTABLES =
+ "Failed to deserialize finalDataPartitionTables";
+ public static final String FAILED_TO_DO_INACTIVE_ROLLBACK_OF_CQ_BECAUSE_OF_NO =
+ "Failed to do [INACTIVE] rollback of CQ {} because of no such cq: {}";
+ public static final String FAILED_TO_DO_INACTIVE_ROLLBACK_OF_CQ_BECAUSE_OF_UNKNOWN =
+ "Failed to do [INACTIVE] rollback of CQ {} because of unknown reasons {}";
+ public static final String FAILED_TO_DROP_PIPES_WHEN_DROPPING_SUBSCRIPTION_WITH_REQUEST_BECAUSE =
+ "Failed to drop pipes %s when dropping subscription with request %s, because %s";
+ public static final String FAILED_TO_DROP_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER =
+ "Failed to drop pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_DROP_PIPE_PLUGIN_ON_DATA_NODES =
+ "Failed to drop pipe plugin %s on data nodes";
+ public static final String FAILED_TO_DROP_SUBSCRIPTION_WITH_REQUEST_ON_CONFIG_NODES_BECAUSE =
+ "Failed to drop subscription with request %s on config nodes, because %s";
+ public static final String FAILED_TO_DROP_TOPIC_ON_CONFIG_NODES_BECAUSE =
+ "Failed to drop topic %s on config nodes, because %s";
+ public static final String FAILED_TO_DROP_TOPIC_ON_DATA_NODES_BECAUSE =
+ "Failed to drop topic %s on data nodes, because %s";
+ public static final String FAILED_TO_EXECUTE_IN_ALL_REPLICASET_OF_SCHEMAREGION_WHEN_CHECKING =
+ "Failed to execute in all replicaset of schemaRegion %s when checking templates on path %s. Failures: %s";
+ public static final String FAILED_TO_EXECUTE_IN_ALL_REPLICASET_OF_SCHEMAREGION_WHEN_CHECKING_2 =
+ "Failed to execute in all replicaset of schemaRegion %s when checking the template %s on %s. Failure nodes: %s";
+ public static final String FAILED_TO_EXECUTE_PLAN_BECAUSE =
+ "Failed to execute plan {} because {}";
+ public static final String FAILED_TO_FOR_TABLE_TO_DATANODE_FAILURE_RESULTS =
+ "Failed to {} for table {}.{} to DataNode, failure results: {}";
+ public static final String FAILED_TO_INIT_CQ_BECAUSE_OF_UNKNOWN_REASONS =
+ "Failed to init CQ {} because of unknown reasons {}";
+ public static final String FAILED_TO_INIT_CQ_BECAUSE_SUCH_CQ_ALREADY_EXISTS =
+ "Failed to init CQ {} because such cq already exists";
+ public static final String FAILED_TO_INVALIDATE_COLUMN_S_CACHE_OF_TABLE =
+ "Failed to invalidate {} column {}'s cache of table {}.{}";
+ public static final String FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_DEVICES_IN_TABLE =
+ "Failed to invalidate schemaEngine cache of devices in table {}.{}";
+ public static final String FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_TABLE =
+ "Failed to invalidate schemaEngine cache of table {}.{}";
+ public static final String FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_TIMESERIES =
+ "Failed to invalidate schemaEngine cache of timeSeries {}";
+ public static final String FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_VIEW =
+ "Failed to invalidate schemaengine cache of view {}";
+ public static final String FAILED_TO_INVALIDATE_SCHEMA_CACHE_OF_TEMPLATE_TIMESERIES =
+ "Failed to invalidate schema cache of template timeSeries {}";
+ public static final String FAILED_TO_INVALIDATE_TEMPLATE_CACHE_OF_TEMPLATE_SET_ON =
+ "Failed to invalidate template cache of template {} set on {}";
+ public static final String FAILED_TO_PRE_RELEASE_FOR_TABLE_TO_DATANODE_FAILURE_RESULTS =
+ "Failed to pre-release {} for table {}.{} to DataNode, failure results: {}";
+ public static final String FAILED_TO_PRE_SET_TEMPLATE_ON_PATH_DUE_TO =
+ "Failed to pre set template {} on path {} due to {}";
+ public static final String FAILED_TO_PUSH_CONSUMER_GROUP_META_TO_DATANODES_DETAILS =
+ "Failed to push consumer group meta to dataNodes, details: %s";
+ public static final String FAILED_TO_PUSH_PIPE_META_LIST_TO_DATA_NODES_WILL =
+ "Failed to push pipe meta list to data nodes, will retry later.";
+ public static final String FAILED_TO_PUSH_PIPE_META_TO_DATANODES_DETAILS =
+ "Failed to push pipe meta to dataNodes, details: %s";
+ public static final String FAILED_TO_PUSH_TOPIC_META_TO_DATANODES_DETAILS =
+ "Failed to push topic meta to dataNodes, details: %s";
+ public static final String FAILED_TO_REMOVE_DATA_NODE_BECAUSE_IT_IS_NOT_IN =
+ "Failed to remove data node {} because it is not in running and the configuration of cluster is one replication";
+ public static final String FAILED_TO_ROLLBACK_ALTER_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED =
+ "Failed to rollback alter pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_ROLLBACK_COMMIT_SET_TEMPLATE_ON_PATH_DUE_TO =
+ "Failed to rollback commit set template {} on path {} due to {}";
+ public static final String FAILED_TO_ROLLBACK_CREATE_PIPES_WHEN_CREATING_SUBSCRIPTION_WITH_REQUEST =
+ "Failed to rollback create pipes when creating subscription with request %s, because %s";
+ public static final String FAILED_TO_ROLLBACK_CREATE_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED =
+ "Failed to rollback create pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_ROLLBACK_CREATING_SUBSCRIPTION_WITH_REQUEST_ON_CONFIG_NODES =
+ "Failed to rollback creating subscription with request %s on config nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_CREATING_TOPIC_ON_CONFIG_NODES_BECAUSE =
+ "Failed to rollback creating topic %s on config nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_CREATING_TOPIC_ON_DATA_NODES_BECAUSE =
+ "Failed to rollback creating topic %s on data nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_FROM_ALTERING_CONSUMER_GROUP_ON_CONFIG_NODES =
+ "Failed to rollback from altering consumer group (%s -> %s) on config nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_FROM_ALTERING_CONSUMER_GROUP_ON_DATA_NODES =
+ "Failed to rollback from altering consumer group (%s -> %s) on data nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_FROM_ALTERING_TOPIC_ON_CONFIG_NODES_BECAUSE =
+ "Failed to rollback from altering topic (%s -> %s) on config nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_FROM_ALTERING_TOPIC_ON_DATA_NODES_BECAUSE =
+ "Failed to rollback from altering topic (%s -> %s) on data nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_PIPE_PLUGIN_ON_DATA_NODES =
+ "Failed to rollback pipe plugin [%s] on data nodes";
+ public static final String FAILED_TO_ROLLBACK_PRE_RELEASE_FOR_TABLE_INFO_TO_DATANODE =
+ "Failed to rollback pre-release {} for table {}.{} info to DataNode, failure results: {}";
+ public static final String FAILED_TO_ROLLBACK_PRE_RELEASE_TEMPLATE_INFO_OF_TEMPLATE_SET =
+ "Failed to rollback pre release template info of template {} set on path {} on DataNode {}";
+ public static final String FAILED_TO_ROLLBACK_PRE_SET_TEMPLATE_ON_PATH_DUE_TO =
+ "Failed to rollback pre set template {} on path {} due to {}";
+ public static final String FAILED_TO_ROLLBACK_PRE_UNSET_TEMPLATE_OPERATION_OF_TEMPLATE_SET =
+ "Failed to rollback pre unset template operation of template {} set on {}";
+ public static final String FAILED_TO_ROLLBACK_START_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED =
+ "Failed to rollback start pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_ROLLBACK_STOP_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED =
+ "Failed to rollback stop pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_ROLLBACK_TABLE_CREATION =
+ "Failed to rollback table creation {}.{}";
+ public static final String FAILED_TO_ROLLBACK_TEMPLATE_CACHE_OF_TEMPLATE_SET_ON =
+ "Failed to rollback template cache of template {} set on {}";
+ public static final String FAILED_TO_SERIALIZE_DATAPARTITIONTABLES =
+ "Failed to serialize dataPartitionTables";
+ public static final String FAILED_TO_SERIALIZE_FAILEDDATANODE =
+ "Failed to serialize failedDataNode";
+ public static final String FAILED_TO_SERIALIZE_FINALDATAPARTITIONTABLES =
+ "Failed to serialize finalDataPartitionTables";
+ public static final String FAILED_TO_SERIALIZE_SKIPDATANODE = "Failed to serialize skipDataNode";
+ public static final String FAILED_TO_SET_SCHEMAENGINE_TEMPLATE_ON_PATH_BECAUSE_THERE_S =
+ "Failed to set schemaengine template %s on path %s because there's failure on DataNode %s";
+ public static final String FAILED_TO_START_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER =
+ "Failed to start pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_STOP_AINODE_BECAUSE_BUT_THE_REMOVE_PROCESS_WILL =
+ "Failed to stop AINode {} because {}, but the remove process will continue.";
+ public static final String FAILED_TO_STOP_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER =
+ "Failed to stop pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_SYNC_TABLE_COMMIT_CREATE_INFO_TO_DATANODE_FAILURE =
+ "Failed to sync table {}.{} commit-create info to DataNode {}, failure results: ";
+ public static final String FAILED_TO_SYNC_TABLE_PRE_CREATE_INFO_TO_DATANODE_FAILURE =
+ "Failed to sync table {}.{} pre-create info to DataNode, failure results: {}";
+ public static final String FAILED_TO_SYNC_TABLE_ROLLBACK_CREATE_INFO_TO_DATANODE_FAILURE =
+ "Failed to sync table {}.{} rollback-create info to DataNode {}, failure results: ";
+ public static final String FAILED_TO_SYNC_TEMPLATE_COMMIT_SET_INFO_ON_PATH_TO =
+ "Failed to sync template {} commit-set info on path {} to DataNode {}";
+ public static final String FAILED_TO_SYNC_TEMPLATE_PRE_SET_INFO_ON_PATH_TO =
+ "Failed to sync template {} pre-set info on path {} to DataNode {}";
+ public static final String FAILED_TO_UPDATE_PROCEDURE = "Failed to update procedure {}";
+ public static final String FAILED_TO_UPDATE_TTL_CACHE_OF_DATANODE =
+ "Failed to update ttl cache of dataNode.";
+ public static final String FAILED_TO_WRITE_DATAPARTITIONTABLE_TO_CONSENSUS_LOG =
+ "Failed to write DataPartitionTable to consensus log";
+ public static final String FAIL_IN_CREATECQPROCEDURE = "Fail in CreateCQProcedure";
+ public static final String FAIL_TO_ACTIVE_TRIGGERINSTANCE_ON_DATA_NODES =
+ "Fail to active triggerInstance [%s] on Data Nodes";
+ public static final String FAIL_TO_CONFIG_NODE_INACTIVE_ROLLBACK_OF_TRIGGER =
+ "Fail to [CONFIG_NODE_INACTIVE] rollback of trigger [%s]";
+ public static final String FAIL_TO_CREATE_PIPE_PLUGIN_AFTER_RETRIES =
+ "Fail to create pipe plugin [{}] after {} retries";
+ public static final String FAIL_TO_CREATE_TRIGGERINSTANCE_ON_DATA_NODES =
+ "Fail to create triggerInstance [%s] on Data Nodes";
+ public static final String FAIL_TO_CREATE_TRIGGER_AT_STATE =
+ "Fail to create trigger [%s] at STATE [%s]";
+ public static final String FAIL_TO_DATA_NODE_INACTIVE_ROLLBACK_OF_TRIGGER =
+ "Fail to [DATA_NODE_INACTIVE] rollback of trigger [%s]";
+ public static final String FAIL_TO_DROP_PIPE_PLUGIN_AFTER_RETRIES =
+ "Fail to drop pipe plugin [{}] after {} retries";
+ public static final String FAIL_TO_DROP_TRIGGER_AT_STATE =
+ "Fail to drop trigger [%s] at STATE [%s]";
+ public static final String FAIL_TO_DROP_TRIGGER_ON_DATA_NODES =
+ "Fail to drop trigger [%s] on Data Nodes";
+ public static final String FAIL_TO_EXECUTE_PLAN_AT_STATE =
+ "Fail to execute plan [%s] at state[%s]";
+ public static final String FAIL_TO_REMOVE_AINODE_AT_STATE =
+ "Fail to remove AINode [%s] at STATE [%s], %s";
+ public static final String FAIL_TO_REMOVE_AINODE_ON_CONFIG_NODES =
+ "Fail to remove [%s] AINode on Config Nodes [%s]";
+ public static final String FAIL_WHEN_EXECUTE = "Fail when execute {} ";
+ public static final String FINISH_INACTIVE_ROLLBACK_OF_CQ_SUCCESSFULLY =
+ "Finish [INACTIVE] rollback of CQ {} successfully";
+ public static final String FINISH_INIT_CQ_SUCCESSFULLY = "Finish init CQ {} successfully";
+ public static final String FINISH_SCHEDULING_CQ_SUCCESSFULLY =
+ "Finish Scheduling CQ {} successfully";
+ public static final String FORCE_UPDATE_NODECACHE_DATANODEID_NODESTATUS_CURRENTTIME =
+ "{}, Force update NodeCache: dataNodeId={}, nodeStatus={}, currentTime={}";
+ public static final String FOR_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN_ALL_REPLICASET =
+ "[%s] for %s.%s failed when [%s] because failed to execute in all replicaset of %s %s. Failure nodes: %s";
+ public static final String FOR_FAILED_WHEN_CONSTRUCT_BLACK_LIST_FOR_TABLE_BECAUSE_FAILED =
+ "[%s] for %s.%s failed when construct black list for table because failed to execute in all replicaset of %s %s. Failures: %s";
+ public static final String INVALIDATE_CACHE_OF_DEVICES_IN =
+ "Invalidate cache of devices in {}.{}";
+ public static final String INVALIDATE_CACHE_OF_TEMPLATE_SET_ON =
+ "Invalidate cache of template {} set on {}";
+ public static final String INVALIDATE_CACHE_OF_TEMPLATE_TIMESERIES =
+ "Invalidate cache of template timeSeries {}";
+ public static final String INVALIDATE_CACHE_OF_TIMESERIES = "Invalidate cache of timeSeries {}";
+ public static final String INVALIDATE_CACHE_OF_VIEW = "Invalidate cache of view {}";
+ public static final String INVALIDATE_COLUMN_CACHE_FAILED_FOR_TABLE =
+ "Invalidate column %s cache failed for table %s.%s";
+ public static final String INVALIDATE_SCHEMAENGINE_CACHE_FAILED =
+ "Invalidate schemaEngine cache failed";
+ public static final String INVALIDATE_SCHEMA_CACHE_FAILED = "Invalidate schema cache failed";
+ public static final String INVALIDATE_TEMPLATE_CACHE_FAILED = "Invalidate template cache failed";
+ public static final String INVALIDATE_VIEW_SCHEMAENGINE_CACHE_FAILED =
+ "Invalidate view schemaengine cache failed";
+ public static final String INVALIDATING_CACHE_FOR_COLUMN_IN_WHEN_DROPPING_COLUMN =
+ "Invalidating cache for column {} in {}.{} when dropping column";
+ public static final String INVALIDATING_CACHE_FOR_TABLE_WHEN_DROPPING_TABLE =
+ "Invalidating cache for table {}.{} when dropping table";
+ public static final String INVALID_DATA_TYPE_CANNOT_BE_USED_AS_A_NEW_TYPE =
+ "Invalid data type cannot be used as a new type";
+ public static final String IO_ERROR_WHEN_DESERIALIZE_AUTHPLAN =
+ "IO error when deserialize authplan.";
+ public static final String IO_ERROR_WHEN_DESERIALIZE_SETTTL_PLAN =
+ "IO error when deserialize setTTL plan.";
+ public static final String NO_AVAILABLE_DATANODE_TO_ASSIGN_TASKS =
+ "No available datanode to assign tasks";
+ public static final String NO_DATABASE_LOST_DATA_PARTITION_TABLE_FOR_CONSENSUS_WRITE =
+ "No database lost data partition table for consensus write";
+ public static final String NO_DATAPARTITIONTABLE_AVAILABLE_FOR_CONSENSUS_WRITE =
+ "No DataPartitionTable available for consensus write";
+ public static final String NO_ENOUGH_DATA_NODE_TO_MIGRATE_REGION =
+ "No enough Data node to migrate region: {}";
+ public static final String OPERATION_TIMED_OUT_AFTER = "Operation timed out after ";
+ public static final String PARTITIONTABLECLEANER_PERIODICALLY_ACTIVATE_PARTITIONTABLEAUTOCLEANER_DATABASETTL =
+ "[PartitionTableCleaner] Periodically activate PartitionTableAutoCleaner, databaseTTL: {}";
+ public static final String PARTITIONTABLECLEANER_PERIODICALLY_ACTIVATE_PARTITIONTABLEAUTOCLEANER_FOR =
+ "[PartitionTableCleaner] Periodically activate PartitionTableAutoCleaner for: {}";
+ public static final String PARTITIONTABLECLEANER_THE_PARTITIONTABLEAUTOCLEANER_IS_STARTED_WITH_CYCLE_MS =
+ "[PartitionTableCleaner] The PartitionTableAutoCleaner is started with cycle={}ms";
+ public static final String PID_ADDREGION_CANNOT_ROLL_BACK_BECAUSE_CANNOT_FIND_THE_CORRECT =
+ "[pid{}][AddRegion] Cannot roll back, because cannot find the correct locations";
+ public static final String PID_ADDREGION_IT_APPEARS_THAT_CONSENSUS_WRITE_HAS_NOT_MODIFIED =
+ "[pid{}][AddRegion] It appears that consensus write has not modified the local partition table. ";
+ public static final String PID_ADDREGION_RESET_PEER_LIST_PEER_LIST_OF_CONSENSUS_GROUP =
+ "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} failed to reset to {}, you may manually reset it";
+ public static final String PID_ADDREGION_RESET_PEER_LIST_PEER_LIST_OF_CONSENSUS_GROUP_2 =
+ "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} has been successfully reset to {}";
+ public static final String PID_ADDREGION_RESET_PEER_LIST_PEER_LIST_OF_CONSENSUS_GROUP_3 =
+ "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} will be reset to {}";
+ public static final String PID_ADDREGION_STARTED_WILL_BE_ADDED_TO_DATANODE =
+ "[pid{}][AddRegion] started, {} will be added to DataNode {}.";
+ public static final String PID_ADDREGION_START_TO_ROLL_BACK_BECAUSE =
+ "[pid{}][AddRegion] Start to roll back, because: {}";
+ public static final String PID_ADDREGION_STATE_COMPLETE = "[pid{}][AddRegion] state {} complete";
+ public static final String PID_ADDREGION_STATE_FAILED = "[pid{}][AddRegion] state {} failed";
+ public static final String PID_ADDREGION_SUCCESS_HAS_BEEN_ADDED_TO_DATANODE_PROCEDURE_TOOK =
+ "[pid{}][AddRegion] success, {} has been added to DataNode {}. Procedure took {} (start at {}).";
+ public static final String PID_MIGRATEREGION_STARTED_WILL_BE_MIGRATED_FROM_DATANODE_TO =
+ "[pid{}][MigrateRegion] started, {} will be migrated from DataNode {} to {}.";
+ public static final String PID_MIGRATEREGION_STATE_COMPLETE =
+ "[pid{}][MigrateRegion] state {} complete";
+ public static final String PID_MIGRATEREGION_STATE_FAIL = "[pid{}][MigrateRegion] state {} fail";
+ public static final String PID_MIGRATEREGION_SUB_PROCEDURE_ADDREGIONPEERPROCEDURE =
+ "[pid{}][MigrateRegion] sub-procedure AddRegionPeerProcedure failed, RegionMigrateProcedure will not continue";
+ public static final String PID_MIGRATEREGION_SUCCESS_HAS_BEEN_MIGRATED_FROM_DATANODE_TO_PROCEDURE =
+ "[pid{}][MigrateRegion] success,{} {} has been migrated from DataNode {} to {}. Procedure took {} (started at {}).";
+ public static final String PID_NOTIFYREGIONMIGRATION_STARTED_REGION_ID_IS =
+ "[pid{}][NotifyRegionMigration] started, region id is {}.";
+ public static final String PID_NOTIFYREGIONMIGRATION_STATE_COMPLETE =
+ "[pid{}][NotifyRegionMigration] state {} complete";
+ public static final String PID_NOTIFYREGIONMIGRATION_STATE_FAILED =
+ "[pid{}][NotifyRegionMigration] state {} failed";
+ public static final String PID_RECONSTRUCTREGION_FAILED_BUT_THE_REGION_HAS_BEEN_REMOVED_FROM =
+ "[pid{}][ReconstructRegion] failed, but the region {} has been removed from DataNode {}. Use 'extend region' to fix this.";
+ public static final String PID_RECONSTRUCTREGION_STARTED_REGION_ON_DATANODE_WILL_BE_RECONSTRUCTED =
+ "[pid{}][ReconstructRegion] started, region {} on DataNode {}({}) will be reconstructed.";
+ public static final String PID_RECONSTRUCTREGION_STATE_COMPLETE =
+ "[pid{}][ReconstructRegion] state {} complete";
+ public static final String PID_RECONSTRUCTREGION_STATE_FAIL =
+ "[pid{}][ReconstructRegion] state {} fail";
+ public static final String PID_RECONSTRUCTREGION_SUB_PROCEDURE_REMOVEREGIONPEERPROCEDURE =
+ "[pid{}][ReconstructRegion] sub-procedure RemoveRegionPeerProcedure failed, ReconstructRegionProcedure will not continue";
+ public static final String PID_RECONSTRUCTREGION_SUCCESS_REGION_HAS_BEEN_RECONSTRUCTED =
+ "[pid{}][ReconstructRegion] success, region {} has been reconstructed on DataNode {}. Procedure took {} (started at {})";
+ public static final String PID_REMOVEREGION_DELETE_OLD_REGION_PEER_EXECUTED_FAILED_AFTER_ATTEMPTS =
+ "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER executed failed after {} attempts, procedure will continue. You should manually delete region file. {}";
+ public static final String PID_REMOVEREGION_DELETE_OLD_REGION_PEER_EXECUTED_FAILED_ATTEMPT_WILL =
+ "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER executed failed (attempt {}/{}), will retry after {}ms. {}";
+ public static final String PID_REMOVEREGION_DELETE_OLD_REGION_PEER_TASK_SUBMITTED_FAILED_AFTER =
+ "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER task submitted failed after {} attempts, procedure will continue. You should manually delete region file. {}";
+ public static final String PID_REMOVEREGION_DELETE_OLD_REGION_PEER_TASK_SUBMITTED_FAILED_ATTEMPT =
+ "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER task submitted failed (attempt {}/{}), will retry after {}ms. {}";
+ public static final String PID_REMOVEREGION_EXECUTED_FAILED_CONFIGNODE_BELIEVE_CURRENT_PEER_LIST_OF =
+ "[pid{}][RemoveRegion] {} executed failed, ConfigNode believe current peer list of {} is {}. Procedure will continue. You should manually clear peer list.";
+ public static final String PID_REMOVEREGION_STARTED_REGION_WILL_BE_REMOVED_FROM_DATANODE =
+ "[pid{}][RemoveRegion] started, region {} will be removed from DataNode {}.";
+ public static final String PID_REMOVEREGION_STATE_SUCCESS =
+ "[pid{}][RemoveRegion] state {} success";
+ public static final String PID_REMOVEREGION_SUCCESS_REGION_HAS_BEEN_REMOVED_FROM_DATANODE_PROCEDURE =
+ "[pid{}][RemoveRegion] success, region {} has been removed from DataNode {}. Procedure took {} (started at {})";
+ public static final String PID_REMOVEREGION_TASK_SUBMITTED_FAILED_CONFIGNODE_BELIEVE_CURRENT_PEER_LIST =
+ "[pid{}][RemoveRegion] {} task submitted failed, ConfigNode believe current peer list of {} is {}. Procedure will continue. You should manually clear peer list.";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMCALCULATEINFOFORTASK =
+ "PipeHandleLeaderChangeProcedure: executeFromCalculateInfoForTask";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMHANDLEONCONFIGNODES =
+ "PipeHandleLeaderChangeProcedure: executeFromHandleOnConfigNodes";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMHANDLEONDATANODES =
+ "PipeHandleLeaderChangeProcedure: executeFromHandleOnDataNodes";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMVALIDATETASK =
+ "PipeHandleLeaderChangeProcedure: executeFromValidateTask";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "PipeHandleLeaderChangeProcedure: rollbackFromCalculateInfoForTask";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMCREATEONDATANODES =
+ "PipeHandleLeaderChangeProcedure: rollbackFromCreateOnDataNodes";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMHANDLEONCONFIGNODES =
+ "PipeHandleLeaderChangeProcedure: rollbackFromHandleOnConfigNodes";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMVALIDATETASK =
+ "PipeHandleLeaderChangeProcedure: rollbackFromValidateTask";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMCALCULATEINFOFORTASK =
+ "PipeHandleMetaChangeProcedure: executeFromCalculateInfoForTask";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMHANDLEONDATANODES =
+ "PipeHandleMetaChangeProcedure: executeFromHandleOnDataNodes";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMVALIDATETASK =
+ "PipeHandleMetaChangeProcedure: executeFromValidateTask";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "PipeHandleMetaChangeProcedure: executeFromWriteConfigNodeConsensus";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "PipeHandleMetaChangeProcedure: rollbackFromCalculateInfoForTask";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "PipeHandleMetaChangeProcedure: rollbackFromOperateOnDataNodes";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMVALIDATETASK =
+ "PipeHandleMetaChangeProcedure: rollbackFromValidateTask";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "PipeHandleMetaChangeProcedure: rollbackFromWriteConfigNodeConsensus";
+ public static final String PIPEMETASYNCPROCEDURE_ACQUIRELOCK_SKIP_THE_PROCEDURE_DUE_TO_THE_LAST_EXECUTION =
+ "PipeMetaSyncProcedure: acquireLock, skip the procedure due to the last execution time {}";
+ public static final String PIPEMETASYNCPROCEDURE_EXECUTEFROMCALCULATEINFOFORTASK =
+ "PipeMetaSyncProcedure: executeFromCalculateInfoForTask";
+ public static final String PIPEMETASYNCPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "PipeMetaSyncProcedure: executeFromOperateOnDataNodes";
+ public static final String PIPEMETASYNCPROCEDURE_EXECUTEFROMVALIDATETASK =
+ "PipeMetaSyncProcedure: executeFromValidateTask";
+ public static final String PIPEMETASYNCPROCEDURE_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "PipeMetaSyncProcedure: executeFromWriteConfigNodeConsensus";
+ public static final String PIPEMETASYNCPROCEDURE_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "PipeMetaSyncProcedure: rollbackFromCalculateInfoForTask";
+ public static final String PIPEMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "PipeMetaSyncProcedure: rollbackFromOperateOnDataNodes";
+ public static final String PIPEMETASYNCPROCEDURE_ROLLBACKFROMVALIDATETASK =
+ "PipeMetaSyncProcedure: rollbackFromValidateTask";
+ public static final String PIPEMETASYNCPROCEDURE_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "PipeMetaSyncProcedure: rollbackFromWriteConfigNodeConsensus";
+ public static final String PIPE_NOT_FOUND_IN_PIPETASKINFO_CAN_NOT_PUSH_ITS_META =
+ "Pipe {} not found in PipeTaskInfo, can not push its meta.";
+ public static final String PIPE_PLUGIN_IS_ALREADY_CREATED_AND_ISSETIFNOTEXISTSCONDITION_IS_TRUE_END =
+ "Pipe plugin {} is already created and isSetIfNotExistsCondition is true, end the CreatePipePluginProcedure({})";
+ public static final String PIPE_PLUGIN_IS_ALREADY_CREATED_END_THE_CREATEPIPEPLUGINPROCEDURE =
+ "Pipe plugin {} is already created, end the CreatePipePluginProcedure({})";
+ public static final String PIPE_PLUGIN_IS_NOT_EXIST_END_THE_DROPPIPEPLUGINPROCEDURE =
+ "Pipe plugin {} is not exist, end the DropPipePluginProcedure({})";
+ public static final String PRE_CREATE_TABLE = "Pre create table {}.{}";
+ public static final String PRE_CREATE_TABLE_FAILED = "Pre create table failed";
+ public static final String PRE_RELEASE = "Pre-release ";
+ public static final String PRE_RELEASE_INFO_FOR_TABLE_WHEN_SETTING_PROPERTIES =
+ "Pre release info for table {}.{} when setting properties";
+ public static final String PRE_RELEASE_INFO_OF_TABLE_WHEN_ADDING_COLUMN =
+ "Pre release info of table {}.{} when adding column";
+ public static final String PRE_RELEASE_INFO_OF_TABLE_WHEN_ALTERING_COLUMN =
+ "Pre-release info of table {}.{} when altering column";
+ public static final String PRE_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_COLUMN =
+ "Pre release info of table {}.{} when renaming column";
+ public static final String PRE_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_TABLE =
+ "Pre release info of table {}.{} when renaming table";
+ public static final String PRE_RELEASE_SCHEMAENGINE_TEMPLATE_SET_ON_PATH =
+ "Pre release schemaengine template {} set on path {}";
+ public static final String PRE_RELEASE_TABLE = "Pre release table {}.{}";
+ public static final String PRE_SET_SCHEMAENGINE_TEMPLATE_ON_PATH =
+ "Pre set schemaengine template {} on path {}";
+ public static final String PRE_SET_TEMPLATE_FAILED = "Pre set template failed";
+ public static final String PROCEDUREID = "ProcedureId {}: {}";
+ public static final String PROCEDUREID_ACQUIRED_PIPE_LOCK = "ProcedureId {} acquired pipe lock.";
+ public static final String PROCEDUREID_ACQUIRED_SUBSCRIPTION_LOCK =
+ "ProcedureId {} acquired subscription lock.";
+ public static final String PROCEDUREID_ALL_RETRIES_FAILED_WHEN_TRYING_TO_AT_STATE_WILL =
+ "ProcedureId {}: All {} retries failed when trying to {} at state [{}], will rollback...";
+ public static final String PROCEDUREID_ENCOUNTERED_ERROR_WHEN_TRYING_TO_AT_STATE_RETRY =
+ "ProcedureId {}: Encountered error when trying to {} at state [{}], retry [{}/{}]";
+ public static final String PROCEDUREID_FAILED_TO_ACQUIRE_PIPE_LOCK =
+ "ProcedureId {} failed to acquire pipe lock.";
+ public static final String PROCEDUREID_FAILED_TO_ACQUIRE_SUBSCRIPTION_LOCK =
+ "ProcedureId {} failed to acquire subscription lock.";
+ public static final String PROCEDUREID_FAILED_TO_ROLLBACK_FROM_CALCULATE_INFO_FOR_TASK =
+ "ProcedureId {}: Failed to rollback from calculate info for task.";
+ public static final String PROCEDUREID_FAILED_TO_ROLLBACK_FROM_OPERATE_ON_DATA_NODES =
+ "ProcedureId {}: Failed to rollback from operate on data nodes.";
+ public static final String PROCEDUREID_FAILED_TO_ROLLBACK_FROM_STATE_BECAUSE =
+ "ProcedureId {}: Failed to rollback from state [{}], because {}";
+ public static final String PROCEDUREID_FAILED_TO_ROLLBACK_FROM_VALIDATE_TASK =
+ "ProcedureId {}: Failed to rollback from validate task.";
+ public static final String PROCEDUREID_FAILED_TO_ROLLBACK_FROM_WRITE_CONFIG_NODE_CONSENSUS =
+ "ProcedureId {}: Failed to rollback from write config node consensus.";
+ public static final String PROCEDUREID_FAIL_TO_BECAUSE = "ProcedureId %s: Fail to %s because %s";
+ public static final String PROCEDUREID_INVALID_LOCK_STATE_PIPE_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {}: {}. Invalid lock state. Pipe lock will be released.";
+ public static final String PROCEDUREID_INVALID_LOCK_STATE_SUBSCRIPTION_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {}: {}. Invalid lock state. Subscription lock will be released.";
+ public static final String PROCEDUREID_INVALID_LOCK_STATE_WITHOUT_ACQUIRING_PIPE_LOCK =
+ "ProcedureId {}: {}. Invalid lock state. Without acquiring pipe lock.";
+ public static final String PROCEDUREID_INVALID_LOCK_STATE_WITHOUT_ACQUIRING_SUBSCRIPTION_LOCK =
+ "ProcedureId {}: {}. Invalid lock state. Without acquiring subscription lock.";
+ public static final String PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_BE_EXECUTED_WITH =
+ "ProcedureId {}: LOCK_ACQUIRED. The following procedure should be executed with pipe lock.";
+ public static final String PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_BE_EXECUTED_WITH_2 =
+ "ProcedureId {}: LOCK_ACQUIRED. The following procedure should be executed with subscription and pipe lock.";
+ public static final String PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_BE_EXECUTED_WITH_3 =
+ "ProcedureId {}: LOCK_ACQUIRED. The following procedure should be executed with subscription lock.";
+ public static final String PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_NOT_BE_EXECUTED =
+ "ProcedureId {}: LOCK_ACQUIRED. The following procedure should not be executed without pipe lock.";
+ public static final String PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_NOT_BE_EXECUTED_2 =
+ "ProcedureId {}: LOCK_ACQUIRED. The following procedure should not be executed without subscription lock.";
+ public static final String PROCEDUREID_LOCK_EVENT_WAIT_PIPE_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {}: LOCK_EVENT_WAIT. Pipe lock will be released.";
+ public static final String PROCEDUREID_LOCK_EVENT_WAIT_SUBSCRIPTION_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {}: LOCK_EVENT_WAIT. Subscription lock will be released.";
+ public static final String PROCEDUREID_LOCK_EVENT_WAIT_WITHOUT_ACQUIRING_PIPE_LOCK =
+ "ProcedureId {}: LOCK_EVENT_WAIT. Without acquiring pipe lock.";
+ public static final String PROCEDUREID_LOCK_EVENT_WAIT_WITHOUT_ACQUIRING_SUBSCRIPTION_LOCK =
+ "ProcedureId {}: LOCK_EVENT_WAIT. Without acquiring subscription lock.";
+ public static final String PROCEDUREID_PIPE_LOCK_IS_NOT_ACQUIRED_EXECUTEFROMSTATE_S_EXECUTION_WILL =
+ "ProcedureId {}: Pipe lock is not acquired, executeFromState's execution will be skipped.";
+ public static final String PROCEDUREID_PIPE_LOCK_IS_NOT_ACQUIRED_ROLLBACKSTATE_S_EXECUTION_WILL =
+ "ProcedureId {}: Pipe lock is not acquired, rollbackState({})'s execution will be skipped.";
+ public static final String PROCEDUREID_RELEASE_LOCK_NO_NEED_TO_RELEASE_PIPE_LOCK =
+ "ProcedureId {} release lock. No need to release pipe lock.";
+ public static final String PROCEDUREID_RELEASE_LOCK_NO_NEED_TO_RELEASE_SUBSCRIPTION_LOCK =
+ "ProcedureId {} release lock. No need to release subscription lock.";
+ public static final String PROCEDUREID_RELEASE_LOCK_PIPE_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {} release lock. Pipe lock will be released.";
+ public static final String PROCEDUREID_RELEASE_LOCK_SUBSCRIPTION_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {} release lock. Subscription lock will be released.";
+ public static final String PROCEDUREID_SUBSCRIPTION_LOCK_IS_NOT_ACQUIRED_EXECUTEFROMSTATE_S_EXECUTION_WILL =
+ "ProcedureId {}: Subscription lock is not acquired, executeFromState({})'s execution will be skipped.";
+ public static final String PROCEDUREID_SUBSCRIPTION_LOCK_IS_NOT_ACQUIRED_ROLLBACKSTATE_S_EXECUTION_WILL =
+ "ProcedureId {}: Subscription lock is not acquired, rollbackState({})'s execution will be skipped.";
+ public static final String PROCEDUREID_TRY_TO_ACQUIRE_PIPE_LOCK =
+ "ProcedureId {} try to acquire pipe lock.";
+ public static final String PROCEDUREID_TRY_TO_ACQUIRE_SUBSCRIPTION_AND_PIPE_LOCK =
+ "ProcedureId {} try to acquire subscription and pipe lock.";
+ public static final String PROCEDUREID_TRY_TO_ACQUIRE_SUBSCRIPTION_LOCK =
+ "ProcedureId {} try to acquire subscription lock.";
+ public static final String PROCEDURE_TYPE = "Procedure type ";
+ public static final String REMOVEREGIONLOCATION_REMOVE_REGION_FROM_DATANODE_RESULT_IS =
+ "RemoveRegionLocation remove region {} from DataNode {}, result is {}";
+ public static final String REMOVEREGIONPEER_STATE_FAILED = "RemoveRegionPeer state {} failed";
+ public static final String REMOVEREGIONPEER_STATE_SUCCESS = "RemoveRegionPeer state {} success";
+ public static final String REMOVEREGION_RATIS_TRANSFER_LEADER_FAIL_BUT_PROCEDURE_WILL_CONTINUE =
+ "[RemoveRegion] Ratis transfer leader fail, but procedure will continue.";
+ public static final String REMOVE_CONFIG_NODE = "Remove Config Node";
+ public static final String REMOVE_DATA_NODE_FAILED = "Remove Data Node failed ";
+ public static final String RENAMETABLECOLUMN_COSTS_MS = "RenameTableColumn-{}.{}-{} costs {}ms";
+ public static final String RENAMETABLE_COSTS_MS = "RenameTable-{}.{}-{} costs {}ms";
+ public static final String RENAME_COLUMN_TO_TABLE_ON_CONFIG_NODE =
+ "Rename column to table {}.{} on config node";
+ public static final String RETRIEVABLE_ERROR_TRYING_TO_CREATE_CQ_STATE =
+ "Retrievable error trying to create cq [{}], state [{}]";
+ public static final String RETRIEVABLE_ERROR_TRYING_TO_CREATE_PIPE_PLUGIN_STATE =
+ "Retrievable error trying to create pipe plugin [{}], state: {}";
+ public static final String RETRIEVABLE_ERROR_TRYING_TO_DROP_PIPE_PLUGIN_STATE =
+ "Retrievable error trying to drop pipe plugin [{}], state: {}";
+ public static final String RETRIEVABLE_ERROR_TRYING_TO_EXECUTE_PLAN_STATE =
+ "Retrievable error trying to execute plan {}, state: {}";
+ public static final String RETRIEVABLE_ERROR_TRYING_TO_REMOVE_AINODE_STATE =
+ "Retrievable error trying to remove AINode [{}], state [{}]";
+ public static final String ROLLBACK_CREATETABLE_COSTS_MS = "Rollback CreateTable-{} costs {}ms.";
+ public static final String ROLLBACK_CREATE_TABLE_FAILED = "Rollback create table failed";
+ public static final String ROLLBACK_DROPTABLE_COSTS_MS = "Rollback DropTable-{} costs {}ms.";
+ public static final String ROLLBACK_PRE_RELEASE = "Rollback pre-release ";
+ public static final String ROLLBACK_PRE_RELEASE_TEMPLATE_FAILED =
+ "Rollback pre release template failed";
+ public static final String ROLLBACK_RENAMETABLECOLUMN_COSTS_MS =
+ "Rollback RenameTableColumn-{} costs {}ms.";
+ public static final String ROLLBACK_RENAMETABLE_COSTS_MS = "Rollback RenameTable-{} costs {}ms.";
+ public static final String ROLLBACK_SETTABLEPROPERTIES_COSTS_MS =
+ "Rollback SetTableProperties-{} costs {}ms.";
+ public static final String ROLLBACK_SETTEMPLATE_COSTS_MS = "Rollback SetTemplate-{} costs {}ms.";
+ public static final String ROLLBACK_TEMPLATE_CACHE_FAILED = "Rollback template cache failed";
+ public static final String ROLLBACK_TEMPLATE_PRE_UNSET_FAILED_BECAUSE_OF =
+ "Rollback template pre unset failed because of";
+ public static final String ROLLBACK_UNSET_TEMPLATE_FAILED_AND_THE_CLUSTER_TEMPLATE_INFO_MANAGEMENT =
+ "Rollback unset template failed and the cluster template info management is strictly broken. Please try unset again.";
+ public static final String SELECTED_DATANODE_FOR_REGION = "Selected DataNode {} for Region {}";
+ public static final String SEND_ACTION_ADDREGIONPEER_FINISHED_REGIONID_RPCDATANODE_DESTDATANODE_STATUS =
+ "{}, Send action addRegionPeer finished, regionId: {}, rpcDataNode: {}, destDataNode: {}, status: {}";
+ public static final String SEND_ACTION_CREATENEWREGIONPEER_ERROR_REGIONID_NEWPEERDATANODEID_RESULT =
+ "{}, Send action createNewRegionPeer error, regionId: {}, newPeerDataNodeId: {}, result: {}";
+ public static final String SEND_ACTION_CREATENEWREGIONPEER_FINISHED_REGIONID_NEWPEERDATANODEID =
+ "{}, Send action createNewRegionPeer finished, regionId: {}, newPeerDataNodeId: {}";
+ public static final String SEND_ACTION_DELETEOLDREGIONPEER_FINISHED_REGIONID_DATANODEID =
+ "{}, Send action deleteOldRegionPeer finished, regionId: {}, dataNodeId: {}";
+ public static final String SEND_ACTION_REMOVEREGIONPEER_FINISHED_REGIONID_RPCDATANODE =
+ "{}, Send action removeRegionPeer finished, regionId: {}, rpcDataNode: {}";
+ public static final String SETSCHEMATEMPLATE_COSTS_MS = "SetSchemaTemplate-[{}] costs {}ms";
+ public static final String SETTABLEPROPERTIES_COSTS_MS = "SetTableProperties-{}.{}-{} costs {}ms";
+ public static final String SETTTL_COSTS_MS = "SetTTL-[{}] costs {}ms";
+ public static final String SET_PROPERTIES_TO_TABLE = "Set properties to table {}.{}";
+ public static final String SET_TEMPLATE_TO_FAILED_WHEN_CHECK_TIME_SERIES_EXISTENCE_ON =
+ "Set template %s to %s failed when [check time series existence on DataNode] because ";
+ public static final String STARTPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK =
+ "StartPipeProcedureV2: executeFromCalculateInfoForTask({})";
+ public static final String STARTPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES =
+ "StartPipeProcedureV2: executeFromOperateOnDataNodes({})";
+ public static final String STARTPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK =
+ "StartPipeProcedureV2: executeFromValidateTask({})";
+ public static final String STARTPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "StartPipeProcedureV2: executeFromWriteConfigNodeConsensus({})";
+ public static final String STARTPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "StartPipeProcedureV2: rollbackFromCalculateInfoForTask({})";
+ public static final String STARTPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES =
+ "StartPipeProcedureV2: rollbackFromOperateOnDataNodes({})";
+ public static final String STARTPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK =
+ "StartPipeProcedureV2: rollbackFromValidateTask({})";
+ public static final String STARTPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "StartPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})";
+ public static final String START_INACTIVE_ROLLBACK_OF_CQ = "Start [INACTIVE] rollback of CQ {}";
+ public static final String START_ROLLBACK_ADD_COLUMN_TO_TABLE_WHEN_ADDING_COLUMN =
+ "Start rollback Add column to table {}.{} when adding column";
+ public static final String START_ROLLBACK_COMMIT_SET_SCHEMAENGINE_TEMPLATE_ON_PATH =
+ "Start rollback commit set schemaengine template {} on path {}";
+ public static final String START_ROLLBACK_PRE_CREATE_TABLE =
+ "Start rollback pre create table {}.{}";
+ public static final String START_ROLLBACK_PRE_RELEASE_INFO_FOR_TABLE_WHEN_SETTING_PROPERTIES =
+ "Start rollback pre release info for table {}.{} when setting properties";
+ public static final String START_ROLLBACK_PRE_RELEASE_INFO_OF_TABLE =
+ "Start rollback pre release info of table {}.{}";
+ public static final String START_ROLLBACK_PRE_RELEASE_SCHEMAENGINE_TEMPLATE_ON_PATH =
+ "Start rollback pre release schemaengine template {} on path {}";
+ public static final String START_ROLLBACK_PRE_RELEASE_TABLE =
+ "Start rollback pre release table {}.{}";
+ public static final String START_ROLLBACK_PRE_SET_SCHEMAENGINE_TEMPLATE_ON_PATH =
+ "Start rollback pre set schemaengine template {} on path {}";
+ public static final String START_ROLLBACK_RENAMING_COLUMN_TO_TABLE_ON_CONFIGNODE =
+ "Start rollback Renaming column to table {}.{} on configNode";
+ public static final String START_ROLLBACK_RENAMING_TABLE_ON_CONFIGNODE =
+ "Start rollback Renaming table {}.{} on configNode";
+ public static final String START_ROLLBACK_SET_PROPERTIES_TO_TABLE =
+ "Start rollback set properties to table {}.{}";
+ public static final String STATE_STUCK_AT = "State stuck at ";
+ public static final String STOPPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK =
+ "StopPipeProcedureV2: executeFromCalculateInfoForTask({})";
+ public static final String STOPPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES =
+ "StopPipeProcedureV2: executeFromOperateOnDataNodes({})";
+ public static final String STOPPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK =
+ "StopPipeProcedureV2: executeFromValidateTask({})";
+ public static final String STOPPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "StopPipeProcedureV2: executeFromWriteConfigNodeConsensus({})";
+ public static final String STOPPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "StopPipeProcedureV2: rollbackFromCalculateInfoForTask({})";
+ public static final String STOPPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES =
+ "StopPipeProcedureV2: rollbackFromOperateOnDataNodes({})";
+ public static final String STOPPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK =
+ "StopPipeProcedureV2: rollbackFromValidateTask({})";
+ public static final String STOPPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "StopPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})";
+ public static final String STOP_DATA_NODE_MEETS_ERROR_ERROR_DATANODE =
+ "{}, Stop Data Node meets error, error datanode: {}";
+ public static final String STOP_DATA_NODE_SUCCESS = "{}, Stop Data Node {} success.";
+ public static final String SUBMITTED_ASYNC_CONSENSUS_PIPE_CREATION =
+ "{}, Submitted async consensus pipe creation: {}";
+ public static final String SUBSCRIPTION_META_SYNC_PROCEDURE_FINISHED_UPDATING_LAST_SYNC_VERSION =
+ "Subscription meta sync procedure finished, updating last sync version.";
+ public static final String SUCCESSFULLY_OPERATE_WILL_CLEAR_CACHE_TO_THE_DATA_REGIONS_ANYWAY =
+ "Successfully operate, will clear cache to the data regions anyway";
+ public static final String SUCCESSFULLY_RESTORED_WILL_SET_MODS_TO_THE_DATA_REGIONS_ANYWAY =
+ "Successfully restored, will set mods to the data regions anyway";
+ public static final String SUCCESSFULLY_STOPPED_AINODE = "Successfully stopped AINode {}";
+ public static final String TABLE_ALREADY_EXISTS = "Table '%s.%s' already exists.";
+ public static final String TABLE_NOT_EXISTS = "Table '%s.%s' not exists.";
+ public static final String TARGET_DEVICE_TEMPLATE_IS_NOT_ACTIVATED_ON_ANY_PATH_MATCHED =
+ "Target Device Template is not activated on any path matched by given path pattern";
+ public static final String TASK_CANNOT_GET_TASK_REPORT_FROM_DATANODE_LAST_REPORT_TIME =
+ "{} task {} cannot get task report from DataNode {}, last report time is {} ago";
+ public static final String THE_UPDATED_TABLE_HAS_THE_SAME_PROPERTIES_WITH_THE_ORIGINAL =
+ "The updated table has the same properties with the original one. Skip the procedure.";
+ public static final String TOPICMETASYNCPROCEDURE_ACQUIRELOCK_SKIP_THE_PROCEDURE_DUE_TO_THE_LAST_EXECUTION =
+ "TopicMetaSyncProcedure: acquireLock, skip the procedure due to the last execution time {}";
+ public static final String TOPICMETASYNCPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "TopicMetaSyncProcedure: executeFromOperateOnConfigNodes";
+ public static final String TOPICMETASYNCPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "TopicMetaSyncProcedure: executeFromOperateOnDataNodes";
+ public static final String TOPICMETASYNCPROCEDURE_EXECUTEFROMVALIDATE =
+ "TopicMetaSyncProcedure: executeFromValidate";
+ public static final String TOPICMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "TopicMetaSyncProcedure: rollbackFromOperateOnConfigNodes";
+ public static final String TOPICMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "TopicMetaSyncProcedure: rollbackFromOperateOnDataNodes";
+ public static final String TOPICMETASYNCPROCEDURE_ROLLBACKFROMVALIDATE =
+ "TopicMetaSyncProcedure: rollbackFromValidate";
+ public static final String UNEXPECTED_FAIL_TSSTATUS_IS = "Unexpected fail, tsStatus is ";
+ public static final String UNEXPECTED_STATE = "Unexpected state";
+ public static final String UNKNOWN_CREATECQSTATE = "Unknown CreateCQState: ";
+ public static final String UNKNOWN_CREATETRIGGERSTATE = "Unknown CreateTriggerState: ";
+ public static final String UNKNOWN_DROPTRIGGERSTATE = "Unknown DropTriggerState: ";
+ public static final String UNKNOWN_LOAD_BALANCE_STRATEGY = "Unknown load balance strategy: ";
+ public static final String UNKNOWN_PROCEDURE_TYPE = "Unknown Procedure type: ";
+ public static final String UNKNOWN_PROCEDURE_TYPE_2 = "Unknown Procedure type: {}";
+ public static final String UNKNOWN_STATE = "Unknown state: ";
+ public static final String UNKNOWN_STATE_DURING_EXECUTING_CREATEPIPEPLUGINPROCEDURE =
+ "Unknown state during executing createPipePluginProcedure, %s";
+ public static final String UNKNOWN_STATE_DURING_EXECUTING_OPERATEPIPEPROCEDURE =
+ "Unknown state during executing operatePipeProcedure, %s";
+ public static final String UNKNOWN_STATE_DURING_EXECUTING_OPERATESUBSCRIPTIONPROCEDURE =
+ "Unknown state during executing operateSubscriptionProcedure, %s";
+ public static final String UNKNOWN_STATE_DURING_EXECUTING_REMOVEAINODEPROCEDURE =
+ "Unknown state during executing removeAINodeProcedure, %s";
+ public static final String UNKNOWN_STATE_DURING_ROLLBACK_OPERATESUBSCRIPTIONPROCEDURE =
+ "Unknown state during rollback operateSubscriptionProcedure, %s";
+ public static final String UNKNOWN_STATE_FOR_ROLLBACK = "Unknown state for rollback: ";
+ public static final String UNRECOGNIZED_ADDTABLECOLUMNSTATE = "Unrecognized AddTableColumnState ";
+ public static final String UNRECOGNIZED_ALTERTABLECOLUMNDATATYPEPROCEDURE =
+ "Unrecognized AlterTableColumnDataTypeProcedure ";
+ public static final String UNRECOGNIZED_ALTERTIMESERIESDATATYPEPROCEDURE_STATE =
+ "Unrecognized AlterTimeSeriesDataTypeProcedure state ";
+ public static final String UNRECOGNIZED_CREATETABLESTATE = "Unrecognized CreateTableState ";
+ public static final String UNRECOGNIZED_DROPTABLECOLUMNSTATE =
+ "Unrecognized DropTableColumnState ";
+ public static final String UNRECOGNIZED_DROPTABLESTATE = "Unrecognized DropTableState ";
+ public static final String UNRECOGNIZED_LOG_TYPE = "unrecognized log type ";
+ public static final String UNRECOGNIZED_RENAMETABLECOLUMNSTATE =
+ "Unrecognized RenameTableColumnState ";
+ public static final String UNRECOGNIZED_RENAMETABLESTATE = "Unrecognized RenameTableState ";
+ public static final String UNRECOGNIZED_SETTEMPLATESTATE = "Unrecognized SetTemplateState ";
+ public static final String UNRECOGNIZED_STATE = "Unrecognized state ";
+ public static final String UNSETTEMPLATE_COSTS_MS = "UnsetTemplate-[{}] costs {}ms";
+ public static final String UNSET_TEMPLATE_FROM_FAILED_WHEN_CHECK_DATANODE_TEMPLATE_ACTIVATION_BECAUSE =
+ "Unset template %s from %s failed when [check DataNode template activation] because %s";
+ public static final String UNSET_TEMPLATE_ON = "Unset template {} on {}";
+ public static final String UNSUPPORTED_ROLL_BACK_STATE = "Unsupported roll back STATE [{}]";
+ public static final String UNSUPPORTED_STATE = "Unsupported state: ";
+ public static final String UPDATE_DATANODE_TTL_CACHE_FAILED = "Update dataNode ttl cache failed";
+ public static final String VALIDATE_TABLE_FOR_TABLE_WHEN_SETTING_PROPERTIES =
+ "Validate table for table {}.{} when setting properties";
+ public static final String WAITTASKFINISH_RETURNS_PROCESSING_WHICH_MEANS_THE_WAITING_HAS_BEEN_INTERRUPTED =
+ "waitTaskFinish() returns PROCESSING, which means the waiting has been interrupted, this procedure will end without rollback";
+
+ private ProcedureMessages() {}
+}
diff --git a/iotdb-core/confignode/src/main/i18n/zh/org/apache/iotdb/confignode/i18n/ConfigNodeMessages.java b/iotdb-core/confignode/src/main/i18n/zh/org/apache/iotdb/confignode/i18n/ConfigNodeMessages.java
new file mode 100644
index 0000000000000..c81c4ecbb4e2b
--- /dev/null
+++ b/iotdb-core/confignode/src/main/i18n/zh/org/apache/iotdb/confignode/i18n/ConfigNodeMessages.java
@@ -0,0 +1,492 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.i18n;
+
+public final class ConfigNodeMessages {
+
+ public static final String ACQUIRE_TRIGGERTABLELOCK = "acquire TriggerTableLock";
+ public static final String ACQUIRE_UDFTABLELOCK = "acquire UDFTableLock";
+ public static final String ACTIVATING = "Activating {}...";
+ public static final String ADJUSTREGIONGROUPNUM_THE_MAXIMUM_NUMBER_OF_DATAREGIONGROUPS_FOR =
+ "[AdjustRegionGroupNum] The maximum number of DataRegionGroups for Database: {} is adjusted to: {}";
+ public static final String ADJUSTREGIONGROUPNUM_THE_MAXIMUM_NUMBER_OF_SCHEMAREGIONGROUPS_FOR =
+ "[AdjustRegionGroupNum] The maximum number of SchemaRegionGroups for Database: {} is adjusted to: {}";
+ public static final String ADJUSTREGIONGROUPNUM_THE_MINIMUM_NUMBER_OF_DATAREGIONGROUPS_FOR =
+ "[AdjustRegionGroupNum] The minimum number of DataRegionGroups for Database: {} is adjusted to: {}";
+ public static final String ADJUSTREGIONGROUPNUM_THE_MINIMUM_NUMBER_OF_SCHEMAREGIONGROUPS_FOR =
+ "[AdjustRegionGroupNum] The minimum number of SchemaRegionGroups for Database: {} is adjusted to: {}";
+ public static final String CANNOT_FIND_REGIONGROUP_FOR_REGION_WHEN_ADDREGIONNEWLOCATION_IN =
+ "Cannot find RegionGroup for region {} when addRegionNewLocation in {}";
+ public static final String CANNOT_FIND_REGIONGROUP_FOR_REGION_WHEN_REMOVEREGIONOLDLOCATION_IN =
+ "Cannot find RegionGroup for region {} when removeRegionOldLocation in {}";
+ public static final String CAN_ONLY_ALTER_DATATYPE_OF_FIELD_COLUMNS =
+ "Can only alter datatype of FIELD columns";
+ public static final String CAN_T_CLOSE_STANDALONELOG_FOR_CONFIGNODE_SIMPLECONSENSUS_MODE =
+ "Can't close StandAloneLog for ConfigNode SimpleConsensus mode, ";
+ public static final String CAN_T_CONNECT_TO_DATA_NODE = "无法连接到 DataNode:{}";
+ public static final String CAN_T_CONSTRUCT_CLUSTERSCHEMAINFO = "无法构建 ClusterSchemaInfo";
+ public static final String CAN_T_DELETE_TEMPORARY_SNAPSHOT_FILE_RETRYING =
+ "Can't delete temporary snapshot file: {}, retrying...";
+ public static final String CAN_T_FORCE_LOGWRITER_FOR_CONFIGNODE_FLUSHWALFORSIMPLECONSENSUS =
+ "Can't force logWriter for ConfigNode flushWALForSimpleConsensus";
+ public static final String CAN_T_FORCE_LOGWRITER_FOR_CONFIGNODE_SIMPLECONSENSUS_MODE =
+ "Can't force logWriter for ConfigNode SimpleConsensus mode";
+ public static final String CAN_T_SERIALIZE_CURRENT_CONFIGPHYSICALPLAN_FOR_CONFIGNODE_SIMPLECONSENSUS_MODE =
+ "Can't serialize current ConfigPhysicalPlan for ConfigNode SimpleConsensus mode";
+ public static final String CAN_T_START_CONFIGNODE_CONSENSUS_GROUP =
+ "Can't start ConfigNode consensus group!";
+ public static final String CHANGE_REGIONS_LEADER_ERROR_ON_DATE_NODE =
+ "Change regions leader error on Date node: {}";
+ public static final String CHECK_BEFORE_DROPPING_TOPIC_TOPIC_EXISTS =
+ "Check before dropping topic: {}, topic exists: {}";
+ public static final String CHECK_BEFORE_DROP_PIPE_PIPE_EXISTS =
+ "Check before drop pipe {}, pipe exists: {}.";
+ public static final String CLUSTERID_HAS_BEEN_GENERATED = "已生成 clusterID:{}";
+ public static final String CLUSTERID_HAS_BEEN_RECOVERED_FROM_SNAPSHOT = "已从快照恢复 clusterID:{}";
+ public static final String CLUSTERID_NOT_GENERATED_YET_SHOULD_NEVER_HAPPEN =
+ "clusterId not generated yet, should never happen.";
+ public static final String CONFIGNODESNAPSHOT_FINISH_TO_TAKE_SNAPSHOT_FOR_TIME_CONSUMPTION_MS =
+ "[ConfigNodeSnapshot] Finish to take snapshot for {}, time consumption: {} ms";
+ public static final String CONFIGNODESNAPSHOT_LOAD_SNAPSHOT_FOR_COST_MS =
+ "[ConfigNodeSnapshot] Load snapshot for {} cost {} ms";
+ public static final String CONFIGNODESNAPSHOT_LOAD_SNAPSHOT_SUCCESS_LATESTSNAPSHOTROOTDIR =
+ "[ConfigNodeSnapshot] Load snapshot success, latestSnapshotRootDir: {}";
+ public static final String CONFIGNODESNAPSHOT_START_TO_LOAD_SNAPSHOT_FOR_FROM =
+ "[ConfigNodeSnapshot] Start to load snapshot for {} from {}";
+ public static final String CONFIGNODESNAPSHOT_START_TO_TAKE_SNAPSHOT_FOR_INTO =
+ "[ConfigNodeSnapshot] Start to take snapshot for {} into {}";
+ public static final String CONFIGNODESNAPSHOT_TASK_SNAPSHOT_SUCCESS_SNAPSHOTDIR =
+ "[ConfigNodeSnapshot] Task snapshot success, snapshotDir: {}";
+ public static final String CONFIGNODE_EXITING = "ConfigNode 正在退出...";
+ public static final String CONFIGNODE_NEED_REDIRECT_TO_RETRY =
+ "ConfigNode need redirect to {}, retry {} ...";
+ public static final String CONFIGNODE_PORT_CHECK_SUCCESSFUL = "ConfigNode 端口检查成功。";
+ public static final String CONFIGNODE_RPC_SERVICE_FINISHED_TO_REMOVE_AINODE_RESULT =
+ "ConfigNode RPC Service finished to remove AINode, result: {}";
+ public static final String CONFIGNODE_RPC_SERVICE_FINISHED_TO_REMOVE_DATANODE_REQ_RESULT =
+ "ConfigNode RPC Service finished to remove DataNode, req: {}, result: {}";
+ public static final String CONFIGNODE_RPC_SERVICE_START_TO_REMOVE_AINODE =
+ "ConfigNode RPC Service start to remove AINode";
+ public static final String CONFIGNODE_RPC_SERVICE_START_TO_REMOVE_DATANODE_REQ =
+ "ConfigNode RPC Service start to remove DataNode, req: {}";
+ public static final String CONFIGNODE_SIMPLECONSENSUSFILE_HAS_EXISTED_FILEPATH =
+ "ConfigNode SimpleConsensusFile has existed,filePath:{}";
+ public static final String CONFIG_REGION_LISTENING_QUEUE_LISTEN_TO_SNAPSHOT_FAILED_THE_HISTORICAL =
+ "Config Region Listening Queue Listen to snapshot failed, the historical data may not be transferred.";
+ public static final String CONFIG_REGION_LISTENING_QUEUE_LISTEN_TO_SNAPSHOT_FAILED_WHEN_STARTUP =
+ "Config Region Listening Queue Listen to snapshot failed when startup, snapshot will be tried again when starting schema transferring pipes";
+ public static final String CONTINUOUS_QUERY_MIN_EVERY_INTERVAL_IN_MS_SHOULD_BE_GREATER =
+ "continuous_query_min_every_interval_in_ms 应大于 0,但当前值为 {},忽略并使用默认值 {}";
+ public static final String CONTINUOUS_QUERY_SUBMIT_THREAD_SHOULD_BE_GREATER_THAN_0 =
+ "continuous_query_submit_thread 应大于 0,但当前值为 {},忽略并使用默认值 {}";
+ public static final String COULDN_T_LOAD_CONFIGNODE_CONF_FILE_REJECT_CONFIGNODE_STARTUP =
+ "无法加载 ConfigNode 配置文件,拒绝启动 ConfigNode。";
+ public static final String COULDN_T_LOAD_THE_CONFIGURATION_FROM_ANY_OF_THE_KNOWN =
+ "Couldn't load the configuration {} from any of the known sources.";
+ public static final String CREATEREGIONGROUPS_DATABASE_HAS_BEEN_DELETED_CORRESPONDING_REGIONGROUPS =
+ "[CreateRegionGroups] Database {} has been deleted, corresponding RegionGroups will not be created.";
+ public static final String CREATE_CONFIGNODE_SIMPLECONSENSUSFILE =
+ "Create ConfigNode SimpleConsensusFile: {}";
+ public static final String CREATE_CONFIGNODE_SIMPLECONSENSUSFILE_FAILED_FILEPATH =
+ "Create ConfigNode SimpleConsensusFile failed, filePath: {}";
+ public static final String CURRENT_NODE_NODEID_IP_PORT_AS_CONFIG_REGION_LEADER_IS =
+ "Current node [nodeId: {}, ip:port: {}] as config region leader is ready to work";
+ public static final String CURRENT_NODE_NODEID_IP_PORT_BECOMES_CONFIG_REGION_LEADER =
+ "Current node [nodeId: {}, ip:port: {}] becomes config region leader";
+ public static final String CURRENT_NODE_NODEID_IP_PORT_IS_NO_LONGER_THE_LEADER =
+ "Current node [nodeId:{}, ip:port: {}] is no longer the leader, ";
+ public static final String DATABASE_INCONSISTENCY_DETECTED_WHEN_ADJUSTING_MAX_REGION_GROUP_COUNT_MESSAGE =
+ "Database inconsistency detected when adjusting max region group count, message: {}, will be corrected by the following adjusting plans";
+ public static final String DATABASE_NOT_EXIST = "数据库不存在";
+ public static final String DATA_REGION_CONSENSUS_PROTOCOL_CLASS =
+ "data_region_consensus_protocol_class";
+ public static final String DEACTIVATING = "Deactivating {}...";
+ public static final String DEFAULT_CHARSET_IS = "{} default charset is: {}";
+ public static final String DELETED_FAILED_TAKE_APPROPRIATE_ACTION =
+ "{} deleted failed; take appropriate action.";
+ public static final String DELETE_USELESS_PROCEDURE_WAL_DIR_FAIL = "删除无用的过程 WAL 目录失败。";
+ public static final String DESERIALIZATION_ERROR_FOR_WRITE_PLAN_REQUEST_BYTEBUFFER =
+ "Deserialization error for write plan, request: {}, bytebuffer: {}";
+ public static final String DOES_NOT_EXIST = "%s does not exist";
+ public static final String DROPPING_TAG_OR_TIME_COLUMN_IS_NOT_SUPPORTED = "不支持删除标签列或时间列。";
+ public static final String DROP_CQ_FAILED_BECAUSE_ITS_MD5_DOESN_T_MATCH =
+ "Drop CQ {} failed, because its MD5 doesn't match.";
+ public static final String DROP_CQ_FAILED_BECAUSE_IT_DOESN_T_EXIST =
+ "Drop CQ {} failed, because it doesn't exist.";
+ public static final String DROP_CQ_SUCCESSFULLY = "Drop CQ {} successfully.";
+ public static final String DUPLICATED_TEMPLATE_NAME = "Duplicated template name: ";
+ public static final String ENABLESEPARATIONOFADMINPOWERS_IS_NOT_SUPPORTED =
+ "不支持 EnableSeparationOfAdminPowers";
+ public static final String ENVIRONMENT_VARIABLES = "{} environment variables: {}";
+ public static final String ERROR_GET_MATCHED_PATHS_IN_GIVEN_LEVEL =
+ "Error get matched paths in given level.";
+ public static final String ERROR_GET_MATCHED_PATHS_IN_NEXT_LEVEL =
+ "Error get matched paths in next level.";
+ public static final String ERROR_OCCURRED_WHEN_GET_PATHS_SET_ON_TEMPLATE =
+ "Error occurred when get paths set on template {}";
+ public static final String ERROR_STARTING = "Error starting";
+ public static final String EXECUTE_ALTERDATABASE_WITH_RESULT =
+ "Execute AlterDatabase: {} with result: {}";
+ public static final String EXECUTE_GETCLUSTERID_WITH_RESULT =
+ "Execute getClusterId with result {}";
+ public static final String EXECUTE_GETSYSTEMCONFIGURATION_WITH_RESULT =
+ "Execute GetSystemConfiguration with result {}";
+ public static final String EXECUTE_NON_QUERY_PLAN_FAILED = "执行非查询计划失败";
+ public static final String EXECUTE_QUERY_PLAN_FAILED = "执行查询计划失败";
+ public static final String EXECUTE_REGISTERAINODEREQUEST_WITH_RESULT =
+ "Execute RegisterAINodeRequest {} with result {}";
+ public static final String EXECUTE_REGISTERCONFIGNODEREQUEST_WITH_RESULT =
+ "Execute RegisterConfigNodeRequest {} with result {}";
+ public static final String EXECUTE_REGISTERDATANODEREQUEST_WITH_RESULT =
+ "Execute RegisterDataNodeRequest {} with result {}";
+ public static final String EXECUTE_RESTARTAINODEREQUEST_WITH_RESULT =
+ "Execute RestartAINodeRequest {} with result {}";
+ public static final String EXECUTE_RESTARTDATANODEREQUEST_WITH_RESULT =
+ "Execute RestartDataNodeRequest {} with result {}";
+ public static final String EXECUTE_SETDATABASE_WITH_RESULT =
+ "Execute SetDatabase: {} with result: {}";
+ public static final String FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE =
+ "执行共识层读取 API 失败:";
+ public static final String FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE =
+ "执行共识层写入 API 失败:";
+ public static final String FAILED_ON_AINODE = "{} failed on AINode {}";
+ public static final String FAILED_ON_AINODE_RETRYING = "{} failed on AINode {}, retrying {}...";
+ public static final String FAILED_ON_CONFIGNODE = "{} failed on ConfigNode {}";
+ public static final String FAILED_ON_CONFIGNODE_BECAUSE_RETRYING =
+ "{} failed on ConfigNode {}, because {}, retrying {}...";
+ public static final String FAILED_ON_DATANODE = "{} failed on DataNode {}";
+ public static final String FAILED_ON_DATANODE_RETRYING =
+ "{} failed on DataNode {}, retrying {}...";
+ public static final String FAILED_TO_ALTER_PIPE = "Failed to alter pipe";
+ public static final String FAILED_TO_CHECK_SCHEMA_REGION_USING_TEMPLATE_ON_DATANODE =
+ "Failed to check schema region using template on DataNode {}, {}";
+ public static final String FAILED_TO_CHECK_TIMESERIES_EXISTENCE_ON_DATANODE =
+ "Failed to check timeseries existence on DataNode {}, {}";
+ public static final String FAILED_TO_COUNT_PATHS_USING_TEMPLATE_ON_DATANODE =
+ "Failed to count paths using template on DataNode {}, {}";
+ public static final String FAILED_TO_CREATE_MULTIPLE_PIPES = "Failed to create multiple pipes";
+ public static final String FAILED_TO_CREATE_PIPE = "Failed to create pipe";
+ public static final String FAILED_TO_CREATE_PIPEPLUGIN_SOURCE_PIPEPLUGIN_FAILED_TO_LOAD =
+ "Failed to create PipePlugin [%s], source PipePlugin [%s] failed to load: %s";
+ public static final String FAILED_TO_CREATE_PIPEPLUGIN_SOURCE_PIPEPLUGIN_JAR_DOES_NOT_EXIST =
+ "Failed to create PipePlugin [%s], source PipePlugin [%s] jar [%s] does not exist in install dir.";
+ public static final String FAILED_TO_CREATE_PIPEPLUGIN_THE_SAME_NAME_PIPEPLUGIN_HAS_BEEN =
+ "Failed to create PipePlugin [%s], the same name PipePlugin has been created";
+ public static final String FAILED_TO_CREATE_PIPEPLUGIN_THIS_PIPEPLUGIN_EXISTS_BUT_FAILED_TO =
+ "Failed to create PipePlugin [%s], this PipePlugin exists but failed to load: %s";
+ public static final String FAILED_TO_CREATE_TEMPLATE_BECAUSE_TEMPLATE_NAME_EXISTS =
+ "Failed to create template, because template name {} exists";
+ public static final String FAILED_TO_CREATE_TRIGGER_THE_SAME_NAME_JAR_BUT_DIFFERENT =
+ "Failed to create trigger [%s], the same name Jar [%s] but different MD5 [%s] has existed";
+ public static final String FAILED_TO_CREATE_TRIGGER_THE_SAME_NAME_TRIGGER_HAS_BEEN =
+ "Failed to create trigger [%s], the same name trigger has been created";
+ public static final String FAILED_TO_CREATE_UDF_THE_SAME_NAME_JAR_BUT_DIFFERENT =
+ "Failed to create UDF [%s], the same name Jar [%s] but different MD5 [%s] has existed";
+ public static final String FAILED_TO_CREATE_UDF_THE_SAME_NAME_UDF_HAS_BEEN =
+ "Failed to create UDF [%s], the same name UDF has been created";
+ public static final String FAILED_TO_DECREASE_LISTENER_REFERENCE =
+ "Failed to decrease listener reference";
+ public static final String FAILED_TO_DROP_PIPE = "Failed to drop pipe";
+ public static final String FAILED_TO_DROP_PIPEPLUGIN_THE_PIPEPLUGIN_IS_A_BUILT_IN =
+ "Failed to drop PipePlugin [%s], the PipePlugin is a built-in PipePlugin";
+ public static final String FAILED_TO_DROP_PIPEPLUGIN_THIS_PIPEPLUGIN_HAS_NOT_BEEN_CREATED =
+ "Failed to drop PipePlugin [%s], this PipePlugin has not been created";
+ public static final String FAILED_TO_DROP_TRIGGER_THIS_TRIGGER_HAS_NOT_BEEN_CREATED =
+ "Failed to drop trigger [%s], this trigger has not been created";
+ public static final String FAILED_TO_DROP_UDF_THIS_UDF_HAS_NOT_BEEN_CREATED =
+ "Failed to drop UDF [%s], this UDF has not been created";
+ public static final String FAILED_TO_FETCH_SCHEMAENGINE_BLACK_LIST_ON_DATANODE =
+ "Failed to fetch schemaengine black list on DataNode {}, {}";
+ public static final String FAILED_TO_GET_FIELD = "Failed to get field {}";
+ public static final String FAILED_TO_HANDLE_LEADER_CHANGE = "Failed to handle leader change";
+ public static final String FAILED_TO_HANDLE_META_CHANGES = "Failed to handle meta changes";
+ public static final String FAILED_TO_INCREASE_LISTENER_REFERENCE =
+ "Failed to increase listener reference";
+ public static final String FAILED_TO_LOAD_PIPE_INFO_FROM_SNAPSHOT =
+ "Failed to load pipe info from snapshot, ";
+ public static final String FAILED_TO_LOAD_PIPE_PLUGIN_INFO_FROM_SNAPSHOT =
+ "Failed to load pipe plugin info from snapshot";
+ public static final String FAILED_TO_LOAD_PIPE_TASK_INFO_FROM_SNAPSHOT =
+ "Failed to load pipe task info from snapshot";
+ public static final String FAILED_TO_LOAD_PLUGIN_CLASS_FOR_PLUGIN_WHEN_LOADING_SNAPSHOT =
+ "Failed to load plugin class for plugin [{}] when loading snapshot [{}] ";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_BECAUSE_GET_NULL_DATABASE_NAME =
+ "Failed to load snapshot because get null database name";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_BECAUSE_SNAPSHOT_DIR_NOT_EXISTS =
+ "Failed to load snapshot, because snapshot dir [{}] not exists.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_OF_CQINFO_SNAPSHOT_FILE_DOES_NOT =
+ "Failed to load snapshot of CQInfo, snapshot file [{}] does not exist.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_OF_TEMPLATEPRESETTABLE_SNAPSHOT_FILE_IS_NOT =
+ "Failed to load snapshot of TemplatePreSetTable,snapshot file [{}] is not a valid file.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_OF_TTLINFO_SNAPSHOT_FILE_DOES_NOT =
+ "Failed to load snapshot of TTLInfo, snapshot file [{}] does not exist.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST =
+ "Failed to load snapshot, snapshot file [{}] is not exist.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST_2 =
+ "Failed to load snapshot,snapshot file [{}] is not exist.";
+ public static final String FAILED_TO_LOAD_SUBSCRIPTION_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST =
+ "Failed to load subscription snapshot, snapshot file {} is not exist.";
+ public static final String FAILED_TO_ON_CONFIGNODE_RESPONSE =
+ "Failed to {} on ConfigNode: {}, response: {}";
+ public static final String FAILED_TO_ON_DATANODE = "Failed to {} on DataNode {}, {}";
+ public static final String FAILED_TO_ON_DATANODE_EXCEPTION =
+ "Failed to {} on DataNode: {}, exception: {}";
+ public static final String FAILED_TO_ON_DATANODE_RESPONSE =
+ "Failed to {} on DataNode: {}, response: {}";
+ public static final String FAILED_TO_OPERATE_PIPE = "Failed to operate pipe";
+ public static final String FAILED_TO_SET_PIPE_STATUS = "Failed to set pipe status";
+ public static final String FAILED_TO_SET_PIPE_STATUS_WITH_STOPPED_BY_RUNTIME_EXCEPTION =
+ "Failed to set pipe status with stopped-by-runtime-exception flag";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_BECAUSE_CREATE_TMP_DIR_FAIL =
+ "Failed to take snapshot, because create tmp dir [{}] fail.";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_DIR_IS_ALREADY_EXIST =
+ "Failed to take snapshot, because snapshot dir [{}] is already exist.";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST =
+ "Failed to take snapshot, because snapshot file [{}] is already exist.";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_OF_CQINFO_BECAUSE_SNAPSHOT_FILE_IS =
+ "Failed to take snapshot of CQInfo, because snapshot file [{}] is already exist.";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_OF_TEMPLATEPRESETTABLE_BECAUSE_SNAPSHOT_FILE_IS =
+ "Failed to take snapshot of TemplatePreSetTable, because snapshot file [{}] is already exist.";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_OF_TTLINFO_BECAUSE_SNAPSHOT_FILE_IS =
+ "Failed to take snapshot of TTLInfo, because snapshot file [{}] is already exist.";
+ public static final String FAILED_TO_TAKE_SUBSCRIPTION_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY =
+ "Failed to take subscription snapshot, because snapshot file {} is already exist.";
+ public static final String FAILED_TO_UPDATE_CONFIG_FILE = "更新配置文件失败";
+ public static final String FILE_NOT_EXISTS = "File {} not exists";
+ public static final String FOR_RECEIVES = "{} for {} receives: {}";
+ public static final String GET_DATANODE_CPU_CORE_FAIL_WILL_BE_TREATED_AS_ZERO =
+ "Get DataNode {} cpu core fail, will be treated as zero.";
+ public static final String GET_PIPEPLUGIN_JAR_FAILED = "Get PipePlugin_Jar failed";
+ public static final String GET_TRIGGERJAR_FAILED = "Get TriggerJar failed";
+ public static final String GET_UDF_JAR_FAILED = "Get UDF_Jar failed";
+ public static final String GET_URL_FAILED = "获取 URL 失败";
+ public static final String GET_USER_OR_ROLE_PERMISSIONINFO_FAILED_BECAUSE =
+ "get user or role permissionInfo failed because ";
+ public static final String HANDLING_CONSUMER_GROUP_META_CHANGES =
+ "Handling consumer group meta changes ...";
+ public static final String HANDLING_PIPE_META_CHANGES = "Handling pipe meta changes ...";
+ public static final String HANDLING_TOPIC_META_CHANGES = "Handling topic meta changes ...";
+ public static final String HAS_REGISTERED_SUCCESSFULLY_WAITING_FOR_THE_LEADER_S_SCHEDULING_TO =
+ "{} {} has registered successfully. Waiting for the leader's scheduling to join the cluster: {}.";
+ public static final String HAS_SUCCESSFULLY_RESTARTED_AND_JOINED_THE_CLUSTER =
+ "{} has successfully restarted and joined the cluster: {}.";
+ public static final String HAS_SUCCESSFULLY_STARTED_AND_JOINED_THE_CLUSTER =
+ "{} has successfully started and joined the cluster: {}.";
+ public static final String ID_TOOK_SNAPSHOT_FAIL = "{} id {} took snapshot fail";
+ public static final String INITSTANDALONECONFIGNODE_MEETS_ERROR_CAN_T_FIND_STANDALONE_LOG_FILES_FILEPATH =
+ "InitStandAloneConfigNode meets error, can't find standalone log files, filePath: {}";
+ public static final String INVALID_AUTHOR_TYPE_ORDINAL = "无效的 Author 类型序号";
+ public static final String IOTDB_STARTED = "IoTDB started";
+ public static final String IS_DEACTIVATED = "{} is deactivated.";
+ public static final String IS_IN_RESTARTING_PROCESS = "{} is in restarting process...";
+ public static final String LEADER_DISTRIBUTION_POLICY = "leader_distribution_policy";
+ public static final String LEADER_HAS_NOT_BEEN_ELECTED_YET_WAIT_FOR_1_SECOND =
+ "Leader has not been elected yet, wait for 1 second";
+ public static final String LOAD_FAILED_IT_WILL_BE_DELETED = "Load {} failed, it will be deleted.";
+ public static final String LOAD_PROCEDURE_WAL_FAILED = "Load procedure wal failed.";
+ public static final String LOAD_SNAPSHOT_ERROR = "加载快照出错";
+ public static final String MAKE_DIRS = "Make dirs: {}";
+ public static final String MEET_ERROR_WHEN_DEACTIVATE_CONFIGNODE =
+ "Meet error when deactivate ConfigNode";
+ public static final String MEET_ERROR_WHEN_DOING_START_CHECKING =
+ "Meet error when doing start checking";
+ public static final String MEET_ERROR_WHILE_STARTING_UP = "Meet error while starting up.";
+ public static final String NEW_TYPE_IS_NOT_COMPATIBLE_WITH_THE_EXISTING_ONE =
+ "New type %s is not compatible with the existing one %s";
+ public static final String NODE_IS_ALREADY_IN_REGION_LOCATIONS_WHEN_ADDREGIONNEWLOCATION_IN =
+ "Node is already in region locations when addRegionNewLocation in {}, ";
+ public static final String NODE_IS_NOT_IN_REGION_LOCATIONS_WHEN_REMOVEREGIONOLDLOCATION_IN =
+ "Node is not in region locations when removeRegionOldLocation in {}, ";
+ public static final String OLD_PROCEDURE_FILES_HAVE_BEEN_LOADED_SUCCESSFULLY_TAKING_SNAPSHOT =
+ "Old procedure files have been loaded successfully, taking snapshot...";
+ public static final String PARTITIONTABLECLEANER_THE_TIMEPARTITIONS_ARE_REMOVED_FROM_DATABASE =
+ "[PartitionTableCleaner] The TimePartitions: {} are removed from Database: {}";
+ public static final String PATH1_SHOULD_NOT_BE_NULL = "Path1 should not be null";
+ public static final String PIPEMETASYNCER_IS_TRYING_TO_RESTART_THE_PIPES =
+ "PipeMetaSyncer is trying to restart the pipes: {}";
+ public static final String PIPE_IS_USING_EXTERNAL_SOURCE_SKIP_REGION =
+ "Pipe {} is using external source, skip region leader change. PipeHandleLeaderChangePlan: {}";
+ public static final String PLAN_TYPE_IS_NOT_SUPPORTED = "Plan type %s is not supported.";
+ public static final String PLEASE_SET_THE_CN_SEED_CONFIG_NODE_PARAMETER_IN_IOTDB =
+ "Please set the cn_seed_config_node parameter in iotdb-system.properties file.";
+ public static final String PORTS_USED_IN_CONFIGNODE_HAVE_REPEAT =
+ "ports used in configNode have repeat.";
+ public static final String REACH_EOF = "Reach eof";
+ public static final String RECORDING_CONSUMER_GROUP_META = "Recording consumer group meta: {}";
+ public static final String RECORDING_TOPIC_META = "Recording topic meta: {}";
+ public static final String RECOVERED_CONSENSUS_PIPES_AS_RUNNING_DURING_SNAPSHOT_LOAD =
+ "Recovered consensus pipes {} as RUNNING during snapshot load.";
+ public static final String RELEASE_TRIGGERTABLELOCK = "release TriggerTableLock";
+ public static final String RELEASE_UDFTABLELOCK = "release UDFTableLock";
+ public static final String REMOVED_THE_AINODE_FROM_CLUSTER = "Removed the AINode {} from cluster";
+ public static final String REMOVED_THE_DATANODE_FROM_CLUSTER =
+ "Removed the datanode {} from cluster";
+ public static final String REMOVE_ONLINE_CONFIGNODE_FAILED = "Remove online ConfigNode failed.";
+ public static final String REPORTING_CONFIGNODE_SHUTDOWN_FAILED_THE_CLUSTER_WILL_STILL_TAKE_THE =
+ "Reporting ConfigNode shutdown failed. The cluster will still take the current ConfigNode as Running for a few seconds.";
+ public static final String RETRY_WAIT_FAILED = "重试等待失败。";
+ public static final String ROUTE_PRIORITY_POLICY = "route_priority_policy";
+ public static final String SCHEMA_OF_MEASUREMENT_IS_NOT_COMPATIBLE_WITH_EXISTING_MEASUREMENT_IN =
+ "Schema of measurement %s is not compatible with existing measurement in template %s";
+ public static final String SCHEMA_REGION_CONSENSUS_PROTOCOL_CLASS =
+ "schema_region_consensus_protocol_class";
+ public static final String SEND_RPC_TO_DATA_NODE_FOR_CHANGING_REGIONS_LEADER_ON =
+ "Send RPC to data node: {} for changing regions leader on it";
+ public static final String SETTTL_THE_TTL_OF_DATABASE_IS_ADJUSTED_TO =
+ "[SetTTL] The ttl of Database: {} is adjusted to: {}";
+ public static final String SNAPSHOT_DIRECTORY_CAN_NOT_BE_CREATED =
+ "snapshot directory [{}] can not be created.";
+ public static final String SNAPSHOT_DIRECTORY_IS_NOT_EMPTY =
+ "Snapshot directory [{}] is not empty.";
+ public static final String SNAPSHOT_DIRECTORY_IS_NOT_EXIST_CAN_NOT_LOAD_SNAPSHOT_WITH =
+ "snapshot directory [{}] is not exist, can not load snapshot with this directory.";
+ public static final String SNAPSHOT_DIRECTORY_IS_NOT_EXIST_START_TO_CREATE_IT =
+ "snapshot directory [{}] is not exist,start to create it.";
+ public static final String STARTING_IOTDB = "Starting IoTDB {}";
+ public static final String START_CONFIGNODE_FAILED_BECAUSE_COULDN_T_MAKE_SYSTEM_DIRS =
+ "Start ConfigNode failed, because couldn't make system dirs: %s.";
+ public static final String START_READING_CONFIGNODE_CONF_FILE =
+ "start reading ConfigNode conf file: {}";
+ public static final String SUCCESSFULLY_APPLY_CONFIGNODE_CURRENT_CONFIGNODEGROUP =
+ "Successfully apply ConfigNode: {}. Current ConfigNodeGroup: {}";
+ public static final String SUCCESSFULLY_CHECK_SCHEMA_REGION_USING_TEMPLATE_ON_DATANODE =
+ "Successfully check schema region using template on DataNode: {}";
+ public static final String SUCCESSFULLY_CHECK_TIMESERIES_EXISTENCE_ON_DATANODE =
+ "Successfully check timeseries existence on DataNode: {}";
+ public static final String SUCCESSFULLY_COUNT_PATHS_USING_TEMPLATE_ON_DATANODE =
+ "Successfully count paths using template on DataNode: {}";
+ public static final String SUCCESSFULLY_FETCH_SCHEMAENGINE_BLACK_LIST_ON_DATANODE =
+ "Successfully fetch schemaengine black list on DataNode: {}";
+ public static final String SUCCESSFULLY_INITIALIZE_CONFIGMANAGER =
+ "Successfully initialize ConfigManager.";
+ public static final String SUCCESSFULLY_ON_CONFIGNODE = "Successfully {} on ConfigNode: {}";
+ public static final String SUCCESSFULLY_ON_DATANODE = "Successfully {} on DataNode: {}";
+ public static final String SUCCESSFULLY_REMOVE_CONFIGNODE_CURRENT_CONFIGNODEGROUP =
+ "Successfully remove ConfigNode: {}. Current ConfigNodeGroup: {}";
+ public static final String SUCCESSFULLY_SETUP_INTERNAL_SERVICES =
+ "Successfully setup internal services.";
+ public static final String SUCCESSFULLY_UPDATE_NODE_S_VERSION =
+ "Successfully update Node {} 's version.";
+ public static final String SYSTEMPROPERTIES_NORMALIZE_FROM_TO_FOR_COMPATIBILITY =
+ "[SystemProperties] Normalize {} from {} to {} for compatibility.";
+ public static final String SYSTEMPROPERTIES_STORE_CONFIG_NODE_ID =
+ "[SystemProperties] store config_node_id: {}";
+ public static final String SYSTEMPROPERTIES_STORE_IS_SEED_CONFIG_NODE =
+ "[SystemProperties] store is_seed_config_node: {}";
+ public static final String TAKE_SNAPSHOT_ERROR = "创建快照出错";
+ public static final String TAKING_SNAPSHOT_FAIL_PROCEDURE_UPGRADE_FAIL =
+ "Taking snapshot fail, procedure upgrade fail";
+ public static final String TEMPLATE_ALREADY_EXISTS_ON = "Template already exists on ";
+ public static final String TEMPLATE_DOES_NOT_EXIST = "Template %s does not exist";
+ public static final String TEMPLATE_FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY =
+ "template failed to take snapshot, because snapshot file [{}] is already exist.";
+ public static final String TEMPLATE_IS_NOT_SET_ON_PATH = "Template %s is not set on path %s";
+ public static final String TEMPLATE_WITH_ID_DOES_NOT_EXIST = "Template with id=%s does not exist";
+ public static final String THERE_ARE_AI_NODES_IN_CLUSTER_AFTER_EXECUTED_REMOVEAINODEPLAN =
+ "{}, There are {} AI nodes in cluster after executed RemoveAINodePlan";
+ public static final String THERE_ARE_AI_NODES_IN_CLUSTER_BEFORE_EXECUTED_REMOVEAINODEPLAN =
+ "{}, There are {} AI nodes in cluster before executed RemoveAINodePlan";
+ public static final String THERE_ARE_DATA_NODE_IN_CLUSTER_AFTER_EXECUTED_REMOVEDATANODEPLAN =
+ "{}, There are {} data node in cluster after executed RemoveDataNodePlan";
+ public static final String THERE_ARE_DATA_NODE_IN_CLUSTER_BEFORE_EXECUTED_REMOVEDATANODEPLAN =
+ "{}, There are {} data node in cluster before executed RemoveDataNodePlan";
+ public static final String THESE_REQUEST_TYPES_SHOULD_BE_ADDED_TO_ACTIONMAP =
+ "These request types should be added to actionMap: %s";
+ public static final String THE_CHECK_SUM_OF_THE_NO_LOG_BATCH_IS_INCORRECT =
+ "The check sum of the No.%d log batch is incorrect! In ";
+ public static final String THE_CURRENT_CONFIGNODE_CAN_T_JOINED_THE_CLUSTER_BECAUSE_LEADER =
+ "The current ConfigNode can't joined the cluster because leader's scheduling failed. The possible cause is that the ip:port configuration is incorrect.";
+ public static final String THE_CURRENT_CONFIGNODE_CAN_T_SEND_REGISTER_REQUEST_TO_THE =
+ "The current ConfigNode can't send register request to the ConfigNode-leader after all retries!";
+ public static final String THE_CURRENT_IS_NOW_STARTING_AS_THE_SEED_CONFIGNODE =
+ "The current {} is now starting as the Seed-ConfigNode.";
+ public static final String THE_DATA_REPLICATION_FACTOR_SHOULD_BE_POSITIVE =
+ "The data_replication_factor should be positive";
+ public static final String THE_DEFAULT_DATA_REGION_GROUP_NUM_SHOULD_BE_POSITIVE =
+ "The default_data_region_group_num should be positive";
+ public static final String THE_DEFAULT_SCHEMA_REGION_GROUP_NUM_SHOULD_BE_POSITIVE =
+ "The default_schema_region_group_num should be positive";
+ public static final String THE_PARAMETER_CN_TARGET_CONFIG_NODE_LIST_HAS_BEEN_ABANDONED =
+ "参数 cn_target_config_node_list 已废弃,仅使用第一个 ConfigNode 地址加入集群。请改用 cn_seed_config_node。";
+ public static final String THE_PARAMETER_CONFIG_NODE_ID_DOESN_T_EXIST_IN =
+ "The parameter config_node_id doesn't exist in ";
+ public static final String THE_PROCEDURE_FRAMEWORK_HAS_BEEN_SUCCESSFULLY_UPGRADED_NOW_IT_USES =
+ "The Procedure framework has been successfully upgraded. Now it uses the consensus layer's services instead of maintaining the WAL itself.";
+ public static final String THE_REMOVE_CONFIGNODE_SCRIPT_HAS_BEEN_DEPRECATED_PLEASE_CONNECT_TO =
+ "The remove-confignode script has been deprecated. Please connect to the CLI and use SQL: remove confignode [confignode_id].";
+ public static final String THE_RESULT_OF_REGISTER_CONFIGNODE_IS_EMPTY =
+ "The result of register ConfigNode is empty!";
+ public static final String THE_RESULT_OF_REGISTER_SELF_CONFIGNODE_IS_RETRY =
+ "The result of register self ConfigNode is {}, retry {} ...";
+ public static final String THE_RESULT_OF_SUBMITTING_REMOVECONFIGNODE_JOB_IS_REMOVECONFIGNODEREQUEST =
+ "The result of submitting RemoveConfigNode job is {}. RemoveConfigNodeRequest: {}";
+ public static final String THE_SCHEMA_REPLICATION_FACTOR_SHOULD_BE_POSITIVE =
+ "The schema_replication_factor should be positive";
+ public static final String THE_SEEDCONFIGNODE_SETTING_IN_CONF_IS_EMPTY =
+ "The seedConfigNode setting in conf is empty";
+ public static final String THE_S_CREATION_HAS_NOT_PASSED_IN_JARNAME_WHICH_DOES =
+ "The %s's creation has not passed in jarName, which does not exist in other pipePlugins. Please check";
+ public static final String THE_TIMESTAMP_PRECISION_SHOULD_BE_MS_US_OR_NS =
+ "The timestamp_precision should be ms, us or ns";
+ public static final String THE_TIME_PARTITION_INTERVAL_SHOULD_BE_POSITIVE =
+ "The time_partition_interval should be positive";
+ public static final String THE_TIME_PARTITION_ORIGIN_SHOULD_BE_NON_NEGATIVE =
+ "The time_partition_origin should be non-negative";
+ public static final String TRY_LISTEN_TO_PLAN_FAILED = "Try listen to plan failed";
+ public static final String UNDEFINED_TEMPLATE = "Undefined template {}";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_THE_CLOSE_METHOD_OF_LOGWRITER =
+ "Unexpected interruption during the close method of logWriter";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_WAITING_FOR_LEADER_ELECTION =
+ "Unexpected interruption during waiting for leader election.";
+ public static final String UNEXPECTED_READ_PLAN = "Unexpected read plan : {}";
+ public static final String UNEXPECTED_WRITE_PLAN_REQUEST_BYTEBUFFER =
+ "Unexpected write plan, request: {}, bytebuffer: {}";
+ public static final String UNKNOWN_FAILURE_DETECTOR = "未知 failure_detector:%s,请设置为 \"fixed\" 或 \"phi_accrual\"";
+ public static final String UNKNOWN_HOST_WHEN_CHECKING_SEED_CONFIGNODE_IP =
+ "Unknown host when checking seed configNode IP {}";
+ public static final String UNKNOWN_LEADER_DISTRIBUTION_POLICY =
+ "未知 leader_distribution_policy:%s,请设置为 \"GREEDY\"、\"CFD\" 或 \"HASH\"";
+ public static final String UNKNOWN_PHYSICALPLAN_CONFIGPHYSICALPLANTYPE =
+ "unknown PhysicalPlan configPhysicalPlanType: ";
+ public static final String UNKNOWN_READ_CONSISTENCY_LEVEL_PLEASE_SET_TO =
+ "未知 read_consistency_level:%s,请设置为 \"strong\" 或 \"weak\"";
+ public static final String UNKNOWN_ROUTE_PRIORITY_POLICY_PLEASE_SET_TO =
+ "未知 route_priority_policy:%s,请设置为 \"LEADER\" 或 \"GREEDY\"";
+ public static final String UNRECOGNIZED_LOG_CONFIGPHYSICALPLANTYPE =
+ "Unrecognized log configPhysicalPlanType: ";
+ public static final String UNRECOGNIZED_REGIONMAINTAINTYPE = "Unrecognized RegionMaintainType: ";
+ public static final String UNSUPPORTED_SUBPLAN_TYPE = "Unsupported subPlan type: %s";
+ public static final String UNSUPPORTED_SUB_PLAN_TYPE = "Unsupported sub plan type: ";
+ public static final String UPDATE_ONLINE_CONFIGNODE_FAILED = "Update online ConfigNode failed.";
+ public static final String UPDATE_PROCEDURE_PID_WAL_FAILED =
+ "Update Procedure (pid={}) wal failed";
+ public static final String UTILITY_CLASS_SYSTEMPROPERTIESUTILS =
+ "Utility class: SystemPropertiesUtils.";
+ public static final String VIEW_IS_NOT_SUPPORTED = "不支持视图。";
+ public static final String WRITE_CONFIGNODE_SYSTEM_PROPERTIES_FAILED =
+ "Write confignode-system.properties failed";
+ public static final String WRONG_MNODE_TYPE = "错误的 MNode 类型";
+ public static final String WRONG_NODE_TYPE = "错误的节点类型";
+ public static final String YOU_SHOULD_MANUALLY_DELETE_THE_PROCEDURE_WAL_DIR_BEFORE_CONFIGNODE =
+ "You should manually delete the procedure wal dir before ConfigNode restart. {}";
+ public static final String NOT_SUPPORT = "不支持";
+
+ private ConfigNodeMessages() {}
+}
diff --git a/iotdb-core/confignode/src/main/i18n/zh/org/apache/iotdb/confignode/i18n/ManagerMessages.java b/iotdb-core/confignode/src/main/i18n/zh/org/apache/iotdb/confignode/i18n/ManagerMessages.java
new file mode 100644
index 0000000000000..f2be13a9bf617
--- /dev/null
+++ b/iotdb-core/confignode/src/main/i18n/zh/org/apache/iotdb/confignode/i18n/ManagerMessages.java
@@ -0,0 +1,507 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.i18n;
+
+public final class ManagerMessages {
+
+ public static final String ACTIVATEDATAALLOTTABLE_ACTIVATE_SERIESPARTITIONSLOT =
+ "[ActivateDataAllotTable] Activate SeriesPartitionSlot {} ";
+ public static final String AFTER_THIS_SUCCESSFUL_SYNC_IF_PIPETASKINFO_IS_EMPTY_DURING_THIS =
+ "After this successful sync, if PipeTaskInfo is empty during this sync and has not been modified afterwards, all subsequent syncs will be skipped";
+ public static final String AFTER_THIS_SUCCESSFUL_SYNC_IF_SUBSCRIPTIONINFO_IS_EMPTY_DURING_THIS =
+ "After this successful sync, if SubscriptionInfo is empty during this sync and has not been modified afterwards, all subsequent syncs will be skipped";
+ public static final String ATTEMPT_TO_REPORT_PIPE_EXCEPTION_TO_A_NULL_PIPETASKMETA =
+ "Attempt to report pipe exception to a null PipeTaskMeta.";
+ public static final String AUTH_RUN_AUTH_PLAN = "Auth: run auth plan: {}";
+ public static final String CLUSTERID = "clusterID: {}";
+ public static final String COLLECTING_PIPE_HEARTBEAT_FROM_DATA_NODES =
+ "Collecting pipe heartbeat {} from data nodes";
+ public static final String CONNECTION_FROM_DATANODE_TO_DATANODE_IS_BROKEN =
+ "Connection from DataNode {} to DataNode {} is broken";
+ public static final String CONSENSUSGROUPSTATISTICS = "[ConsensusGroupStatistics]\t {}: {} -> {}";
+ public static final String CONSENSUSGROUPSTATISTICS_CONSENSUSGROUPSTATISTICSMAP =
+ "[ConsensusGroupStatistics] ConsensusGroupStatisticsMap: ";
+ public static final String CONSENSUSMANAGER_GETLEADERPEER_BEEN_INTERRUPTED =
+ "ConsensusManager getLeaderPeer been interrupted, ";
+ public static final String CONSUMER_IN_CONSUMER_GROUP_FAILED_TO_SUBSCRIBE_TOPICS_RESULT_STATUS =
+ "Consumer {} in consumer group {} failed to subscribe topics {}. Result status: {}.";
+ public static final String CONSUMER_IN_CONSUMER_GROUP_FAILED_TO_UNSUBSCRIBE_TOPICS_RESULT_STATUS =
+ "Consumer {} in consumer group {} failed to unsubscribe topics {}. Result status: {}.";
+ public static final String CREATEPEERFORCONSENSUSGROUP = "createPeerForConsensusGroup {}...";
+ public static final String CREATEREGIONGROUPS_STARTING_TO_CREATE_THE_FOLLOWING_REGIONGROUPS =
+ "[CreateRegionGroups] Starting to create the following RegionGroups:";
+ public static final String CREATE_DATAPARTITION_FAILED_BECAUSE =
+ "Create DataPartition failed because: ";
+ public static final String CREATE_SCHEMAPARTITION_FAILED_BECAUSE =
+ "Create SchemaPartition failed because: ";
+ public static final String DATABASE_DOESN_T_EXIST = "Database: {} doesn't exist";
+ public static final String DATABASE_NOT_EXISTS_WHEN_SETUPPARTITIONBALANCER =
+ "Database {} not exists when setupPartitionBalancer";
+ public static final String DATABASE_NOT_EXISTS_WHEN_UPDATEDATAALLOTTABLE =
+ "Database {} not exists when updateDataAllotTable";
+ public static final String DATANODELOCATION_IS_NULL_DATANODEID =
+ "DataNodeLocation is null, datanodeId {}";
+ public static final String DATAREGIONGROUPEXTENSIONPOLICY_DOESN_T_EXIST =
+ "DataRegionGroupExtensionPolicy %s doesn't exist.";
+ public static final String DECREASE_REFERENCE_COUNT_FOR_SNAPSHOT_ERROR =
+ "Decrease reference count for snapshot {} error.";
+ public static final String DELETING_REGIONS_COSTS_MS = "Deleting regions costs {}ms";
+ public static final String DETECTED_COMPLETION_OF_PIPE_STATIC_META_REMOVE_IT =
+ "Detected completion of pipe {}, static meta: {}, remove it.";
+ public static final String DETECT_PIPERUNTIMECRITICALEXCEPTION_FROM_AGENT_STOP_PIPE =
+ "Detect PipeRuntimeCriticalException {} from agent, stop pipe {}.";
+ public static final String ENABLE_SEPARATION_OF_POWERS_IS_NOT_SUPPORTED = "不支持启用权力分离";
+ public static final String ENDEXECUTECQ_TIME_RANGE_IS_CURRENT_TIME_IS =
+ "[EndExecuteCQ] {}, time range is [{}, {}), current time is {}";
+ public static final String ERROR_HAPPENED_WHILE_SHUTTING_DOWN_PREVIOUS_CQ_SCHEDULE_THREAD_POOL =
+ "Error happened while shutting down previous cq schedule thread pool.";
+ public static final String ERROR_OCCURRED_DURING_CLOSING_PIPECONNECTOR =
+ "Error occurred during closing PipeConnector.";
+ public static final String ERROR_OCCURRED_DURING_CLOSING_PIPEEXTRACTOR =
+ "Error occurred during closing PipeExtractor.";
+ public static final String ERROR_OCCURRED_DURING_CLOSING_PIPEPROCESSOR =
+ "Error occurred during closing PipeProcessor.";
+ public static final String ERROR_WHEN_COUNTING_DATAREGIONGROUPS_IN_DATABASE =
+ "Error when counting DataRegionGroups in Database: {}";
+ public static final String ERROR_WHEN_COUNTING_SCHEMAREGIONGROUPS_IN_DATABASE =
+ "Error when counting SchemaRegionGroups in Database: {}";
+ public static final String EVENT_SERVICE_IS_STARTED_SUCCESSFULLY =
+ "Event service is started successfully.";
+ public static final String EVENT_SERVICE_IS_STOPPED_SUCCESSFULLY =
+ "Event service is stopped successfully.";
+ public static final String EXCEPTION_ENCOUNTERED_WHEN_TRIGGERING_SCHEMA_REGION_SNAPSHOT =
+ "Exception encountered when triggering schema region snapshot.";
+ public static final String EXECUTE_CQ_FAILED = "Execute CQ {} failed";
+ public static final String EXECUTE_CQ_FAILED_TSSTATUS_IS = "Execute CQ {} failed, TSStatus is {}";
+ public static final String EXPECTED_PIPE_HEARTBEAT_NODE_COUNT_IS_FALLBACK_TO_1 =
+ "Expected pipe heartbeat node count is {}, fallback to 1.";
+ public static final String EXTENDREGION_SUBMIT_ADDREGIONPEERPROCEDURE_SUCCESSFULLY =
+ "[ExtendRegion] Submit AddRegionPeerProcedure successfully: {}";
+ public static final String EXTEND_REGION_GROUP_FAILED = "Extend region group failed";
+ public static final String FAILED_IN_THE_READ_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER =
+ "执行共识层读写 API 失败:";
+ public static final String FAILED_TO_ACQUIRE_LOCK_WHEN_PARSEHEARTBEAT_FROM_NODE_ID =
+ "Failed to acquire lock when parseHeartbeat from node (id={}).";
+ public static final String FAILED_TO_ACQUIRE_PIPE_LOCK_FOR_AUTO_RESTART_PIPE_TASK =
+ "Failed to acquire pipe lock for auto restart pipe task.";
+ public static final String FAILED_TO_ACQUIRE_PIPE_LOCK_FOR_HANDLING_SUCCESSFUL_RESTART =
+ "Failed to acquire pipe lock for handling successful restart.";
+ public static final String FAILED_TO_ALTER_PIPE_RESULT_STATUS =
+ "Failed to alter pipe {}. Result status: {}.";
+ public static final String FAILED_TO_CHECK_AND_REPAIR_CONSENSUS_PIPES =
+ "Failed to check and repair consensus pipes";
+ public static final String FAILED_TO_CHECK_PASSWORD_FOR_PIPE =
+ "Failed to check password for pipe %s.";
+ public static final String FAILED_TO_CLOSE_CONSUMER_IN_CONSUMER_GROUP_RESULT_STATUS =
+ "Failed to close consumer {} in consumer group {}. Result status: {}.";
+ public static final String FAILED_TO_CLOSE_EXTRACTOR_AFTER_FAILED_TO_INITIALIZE_EXTRACTOR =
+ "Failed to close extractor after failed to initialize extractor. ";
+ public static final String FAILED_TO_CLOSE_SINK_AFTER_FAILED_TO_INITIALIZE_IT_IGNORE =
+ "Failed to close sink after failed to initialize it. Ignore this exception.";
+ public static final String FAILED_TO_COLLECT_COMMITCREATETABLEPLAN =
+ "Failed to collect CommitCreateTablePlan";
+ public static final String FAILED_TO_COLLECT_PIPE_META_LIST_FROM_CONFIG_NODE_TASK =
+ "Failed to collect pipe meta list from config node task agent";
+ public static final String FAILED_TO_COLLECT_UNSETTEMPLATEPLAN =
+ "Failed to collect UnsetTemplatePlan";
+ public static final String FAILED_TO_COLLECT_USER_NAME_FOR_USER_ID =
+ "Failed to collect user name for user id {}";
+ public static final String FAILED_TO_CREATE_CONSUMER_IN_CONSUMER_GROUP_RESULT_STATUS =
+ "Failed to create consumer {} in consumer group {}. Result status: {}.";
+ public static final String FAILED_TO_CREATE_PEER_FOR_CONSENSUS_GROUP =
+ "Failed to create peer for consensus group";
+ public static final String FAILED_TO_CREATE_PIPE_RESULT_STATUS =
+ "Failed to create pipe {}. Result status: {}.";
+ public static final String FAILED_TO_CREATE_SUBTASK_FOR_PIPE_CREATION_TIME =
+ "Failed to create subtask for pipe %s, creation time %d";
+ public static final String FAILED_TO_CREATE_TOPIC_WITH_ATTRIBUTES_RESULT_STATUS =
+ "Failed to create topic {} with attributes {}. Result status: {}.";
+ public static final String FAILED_TO_DEEP_COPY_PIPEMETA = "深拷贝 pipeMeta 失败";
+ public static final String FAILED_TO_DEREGISTER_PIPE_CONFIG_REGION_CONNECTOR =
+ "Failed to deregister pipe config region connector metrics, PipeConfigNodeSubtask({}) does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_CONFIG_REGION_EXTRACTOR =
+ "Failed to deregister pipe config region extractor metrics, IoTDBConfigRegionExtractor({}) does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_REMAINING_TIME_METRICS_REMAININGTIMEOPERATOR_DOES_NOT =
+ "Failed to deregister pipe remaining time metrics, RemainingTimeOperator({}) does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_TEMPORARY_META_METRICS_PIPETEMPORARYMETA_DOES_NOT =
+ "Failed to deregister pipe temporary meta metrics, PipeTemporaryMeta({}) does not exist";
+ public static final String FAILED_TO_DROP_PIPE_RESULT_STATUS =
+ "Failed to drop pipe {}. Result status: {}.";
+ public static final String FAILED_TO_GET_ALL_PIPE_INFO = "Failed to get all pipe info.";
+ public static final String FAILED_TO_GET_ALL_SUBSCRIPTION_INFO =
+ "Failed to get all subscription info.";
+ public static final String FAILED_TO_GET_ALL_TOPIC_INFO = "Failed to get all topic info.";
+ public static final String FAILED_TO_HANDLE_PIPE_META_CHANGES = "处理 pipe 元数据变更失败";
+ public static final String FAILED_TO_HANDLE_PIPE_META_CHANGE_RESULT_STATUS =
+ "Failed to handle pipe meta change. Result status: {}.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_FROM_BYTEBUFFER =
+ "Failed to load snapshot from byteBuffer {}.";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_A_NORMAL =
+ "Failed to load snapshot,snapshot file [{}] is not a normal file.";
+ public static final String FAILED_TO_MARK_PIPE_CONFIG_REGION_WRITE_PLAN_EVENT_PIPECONFIGNODESUBTASK =
+ "Failed to mark pipe config region write plan event, PipeConfigNodeSubtask({}) does not exist";
+ public static final String FAILED_TO_MARK_PIPE_REGION_COMMIT_REMAININGTIMEOPERATOR_DOES_NOT_EXIST =
+ "Failed to mark pipe region commit, RemainingTimeOperator({}) does not exist";
+ public static final String FAILED_TO_SHOW_SUBSCRIPTION_INFO = "Failed to show subscription info.";
+ public static final String FAILED_TO_SHOW_TOPIC_INFO = "Failed to show topic info.";
+ public static final String FAILED_TO_START_PIPE_RESULT_STATUS =
+ "Failed to start pipe {}. Result status: {}.";
+ public static final String FAILED_TO_STOP_PIPE_RESULT_STATUS =
+ "Failed to stop pipe {}. Result status: {}.";
+ public static final String FAILED_TO_SUBMIT_ASYNC_CONSENSUS_PIPE_CREATION_FOR =
+ "Failed to submit async consensus pipe creation for {}: {}";
+ public static final String FAILED_TO_SUBMIT_ASYNC_CONSENSUS_PIPE_DROP_FOR =
+ "Failed to submit async consensus pipe drop for {}: {}";
+ public static final String FAILED_TO_SYNC_CONSUMER_GROUP_META_RESULT_STATUS =
+ "Failed to sync consumer group meta. Result status: {}.";
+ public static final String FAILED_TO_SYNC_PIPE_META_RESULT_STATUS =
+ "Failed to sync pipe meta. Result status: {}.";
+ public static final String FAILED_TO_SYNC_TEMPLATE_EXTENSION_INFO_TO_DATANODE =
+ "Failed to sync template {} extension info to DataNode {}";
+ public static final String FAILED_TO_SYNC_TOPIC_META_RESULT_STATUS =
+ "Failed to sync topic meta. Result status: {}.";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_CONFIG_REGION_CONNECTOR_METRICS_CONNECTOR =
+ "Failed to unbind from pipe config region connector metrics, connector map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_CONFIG_REGION_EXTRACTOR_METRICS_EXTRACTOR =
+ "Failed to unbind from pipe config region extractor metrics, extractor map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_REMAINING_TIME_METRICS_REMAININGTIMEOPERATOR_MAP =
+ "Failed to unbind from pipe remaining time metrics, RemainingTimeOperator map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_TEMPORARY_META_METRICS_PIPETEMPORARYMETA_MAP =
+ "Failed to unbind from pipe temporary meta metrics, PipeTemporaryMeta map not empty";
+ public static final String FAILED_TO_UPDATE_PIPE_PROCEDURE_TIMER_PIPEPROCEDURE_DOES_NOT_EXIST =
+ "Failed to update pipe procedure timer, PipeProcedure({}) does not exist";
+ public static final String FAILED_TO_UPDATE_THE_LAST_EXECUTION_TIME_OF_CQ_BECAUSE =
+ "Failed to update the last execution time {} of CQ {}, because {}";
+ public static final String FAIL_TO_GET_ALLUDFTABLE = "Fail to get AllUDFTable";
+ public static final String FAIL_TO_GET_PIPEPLUGINTABLE = "Fail to get PipePluginTable";
+ public static final String FAIL_TO_GET_TRIGGERTABLE = "Fail to get TriggerTable";
+ public static final String FAIL_TO_GET_UDFTABLE = "Fail to get UDFTable";
+ public static final String FAIL_TO_TRANSFER_BECAUSE_WILL_RETRY =
+ "Fail to transfer because {}, will retry";
+ public static final String FORCE_UPDATE_NODECACHE_STATUS_CURRENTNANOTIME =
+ "Force update NodeCache: status={}, currentNanoTime={}";
+ public static final String GETDATAPARTITION_INTERFACE_RECEIVE_PARTITIONSLOTSMAP_RETURN =
+ "GetDataPartition interface receive PartitionSlotsMap: {}, return: {}";
+ public static final String GETNODEPATHSPARTITION_RECEIVED_PARTIALPATH_LEVEL_PATHPATTERNTREE_RESP =
+ "[GetNodePathsPartition]:{}Received PartialPath: {}, Level: {}, PathPatternTree: {}, Resp: {}";
+ public static final String GETORCREATEDATAPARTITION_RECEIVE_PARTITIONSLOTSMAP_RETURN_TDATAPARTITIONTABLERESP =
+ "[GetOrCreateDataPartition]:{}Receive PartitionSlotsMap: {}, Return TDataPartitionTableResp: {}";
+ public static final String GETORCREATESCHEMAPARTITION_RECEIVE_DATABASENAMESLOTMAP_RETURN_TSCHEMAPARTITIONTABLERESP =
+ "[GetOrCreateSchemaPartition]:{}Receive databaseNameSlotMap: {}, Return TSchemaPartitionTableResp: {}";
+ public static final String GETORCREATESCHEMAPARTITION_RECEIVE_PATHPATTERNTREE_RETURN_TSCHEMAPARTITIONTABLERESP =
+ "[GetOrCreateSchemaPartition]:{}Receive PathPatternTree: {}, Return TSchemaPartitionTableResp: {}";
+ public static final String GETSCHEMAPARTITION_RECEIVE_PATHS_RETURN =
+ "GetSchemaPartition receive paths: {}, return: {}";
+ public static final String GET_REGION_GROUP_ID_FAIL = "获取区域组 ID 失败";
+ public static final String HEARTBEAT_SERVICE_IS_STARTED_SUCCESSFULLY =
+ "Heartbeat service is started successfully.";
+ public static final String HEARTBEAT_SERVICE_IS_STOPPED_SUCCESSFULLY =
+ "Heartbeat service is stopped successfully.";
+ public static final String INCORRECT_VERSION_OF = "Incorrect version of ";
+ public static final String INIT_CONSENSUSMANAGER_SUCCESSFULLY_WHEN_RESTARTED =
+ "Init ConsensusManager successfully when restarted";
+ public static final String INTERRUPTED_WHILE_WAITING_FOR_PIPETASKCOORDINATOR_LOCK_CURRENT_THREAD =
+ "Interrupted while waiting for PipeTaskCoordinator lock, current thread: {}";
+ public static final String INTERRUPT_WHEN_WAIT_FOR_CALCULATING_REGION_PRIORITY =
+ "Interrupt when wait for calculating Region priority";
+ public static final String INTERRUPT_WHEN_WAIT_FOR_LEADER_ELECTION =
+ "Interrupt when wait for leader election";
+ public static final String INVALID_EVENT_TYPE = "Invalid event type: ";
+ public static final String IOTCONSENSUSV2_LEADER_CHANGED_FAILED_TO_FLUSH_OLD_LEADER_FOR_REGION =
+ "[IoTConsensusV2 Leader Changed] Failed to flush old leader {} for region {}";
+ public static final String IOTCONSENSUSV2_LEADER_CHANGED_SUCCESSFULLY_FLUSH_OLD_LEADER_FOR_REGION =
+ "[IoTConsensusV2 Leader Changed] Successfully flush old leader {} for region {}";
+ public static final String IOTDBCONFIGNODERECEIVER_DOES_NOT_SUPPORT_LOAD_FILE_V1 =
+ "IoTDBConfigNodeReceiver does not support load file V1.";
+ public static final String IOTDBCONFIGREGIONAIRGAPCONNECTOR_CAN_T_TRANSFER_TABLETINSERTIONEVENT =
+ "IoTDBConfigRegionAirGapConnector can't transfer TabletInsertionEvent.";
+ public static final String IOTDBCONFIGREGIONAIRGAPCONNECTOR_CAN_T_TRANSFER_TSFILEINSERTIONEVENT =
+ "IoTDBConfigRegionAirGapConnector can't transfer TsFileInsertionEvent.";
+ public static final String IOTDBCONFIGREGIONAIRGAPCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBConfigRegionAirGapConnector does not support transferring generic event: {}.";
+ public static final String IOTDBCONFIGREGIONSINK_CAN_T_TRANSFER_TABLETINSERTIONEVENT =
+ "IoTDBConfigRegionSink can't transfer TabletInsertionEvent.";
+ public static final String IOTDBCONFIGREGIONSINK_CAN_T_TRANSFER_TSFILEINSERTIONEVENT =
+ "IoTDBConfigRegionSink can't transfer TsFileInsertionEvent.";
+ public static final String IOTDBCONFIGREGIONSINK_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBConfigRegionSink does not support transferring generic event: {}.";
+ public static final String IOTDBCONFIGREGIONSOURCE_DOES_NOT_TRANSFERRING_EVENTS_UNDER_SIMPLE_CONSENSUS =
+ "IoTDBConfigRegionSource does not transferring events under simple consensus";
+ public static final String LEADERBALANCER_FAILED_TO_CHANGE_THE_LEADER_OF_REGION_TO_DATANODE =
+ "[LeaderBalancer] Failed to change the leader of Region: {} to DataNode: {}";
+ public static final String LEADERBALANCER_REGION_NOT_IN_DATABASEREGIONGROUPMAP =
+ "[LeaderBalancer] Region: {} not in databaseRegionGroupMap";
+ public static final String LEADERBALANCER_REGION_NOT_IN_REGIONLEADERMAP =
+ "[LeaderBalancer] Region: {} not in regionLeaderMap";
+ public static final String LEADERBALANCER_REGION_NOT_IN_REGIONLOCATIONMAP =
+ "[LeaderBalancer] Region: {} not in regionLocationMap";
+ public static final String LEADERBALANCER_REGION_NOT_IN_REGIONSTATISTICSMAP =
+ "[LeaderBalancer] Region: {} not in regionStatisticsMap";
+ public static final String LEADERBALANCER_THE_FOLLOWING_REGIONGROUPS_LEADER_CANNOT_BE =
+ "[LeaderBalancer] The following RegionGroups' leader cannot be selected because their corresponding caches are incomplete: {}";
+ public static final String LEADERBALANCER_TRY_TO_CHANGE_THE_LEADER_OF_REGION_TO_DATANODE =
+ "[LeaderBalancer] Try to change the leader of Region: {} to DataNode: {} ";
+ public static final String LOADSTATISTICS_SERVICE_IS_STARTED_SUCCESSFULLY =
+ "LoadStatistics service is started successfully.";
+ public static final String LOADSTATISTICS_SERVICE_IS_STOPPED_SUCCESSFULLY =
+ "LoadStatistics service is stopped successfully.";
+ public static final String MIGRATEREGION_SUBMIT_REGIONMIGRATEPROCEDURE_SUCCESSFULLY_REGION_ORIGIN_DATANODE =
+ "[MigrateRegion] Submit RegionMigrateProcedure successfully, Region: {}, Origin DataNode: {}, Dest DataNode: {}, Add Coordinator: {}, Remove Coordinator: {}";
+ public static final String MISMATCHED_CRC32_CODE_WHEN_DESERIALIZING_SERVICE_INFO =
+ "Mismatched CRC32 code when deserializing service info.";
+ public static final String NETWORK_ERROR_WHEN_SEAL_CONFIG_REGION_SNAPSHOT_BECAUSE =
+ "Network error when seal config region snapshot %s, because %s.";
+ public static final String NETWORK_ERROR_WHEN_TRANSFER_CONFIG_REGION_WRITE_PLAN_BECAUSE =
+ "Network error when transfer config region write plan %s, because %s.";
+ public static final String NETWORK_ERROR_WHEN_TRANSFER_EVENT_BECAUSE =
+ "Network error when transfer event %s, because %s.";
+ public static final String NODEMANAGER_START_TO_REMOVE_DATANODE =
+ "NodeManager start to remove DataNode {}";
+ public static final String NODEMANAGER_SUBMIT_REMOVEAINODEPLAN_FINISHED =
+ "NodeManager submit RemoveAINodePlan finished, {}";
+ public static final String NODEMANAGER_SUBMIT_REMOVEDATANODEPLAN_FINISHED_REMOVEDATANODEPLAN =
+ "NodeManager submit RemoveDataNodePlan finished, removeDataNodePlan: {}";
+ public static final String NODESTATISTICS = "[NodeStatistics]\t {}: {} -> {}";
+ public static final String NODESTATISTICS_NODESTATISTICSMAP =
+ "[NodeStatistics] NodeStatisticsMap: ";
+ public static final String NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN = "没有传输计划的权限:";
+ public static final String NOT_IMPLEMENT_YET = "尚未实现";
+ public static final String NO_CORRESPONDING_PIPE_IS_RUNNING_IN_THE_REPORTED_DATAREGION_RUNTIMEMETAFROMAGENT =
+ "No corresponding Pipe is running in the reported DataRegion. runtimeMetaFromAgent is null, runtimeMetaFromCoordinator: {}";
+ public static final String PARTITIONBALANCER_THE_SERIESSLOT_IN_TIMESLOT_WILL_BE =
+ "[PartitionBalancer] The SeriesSlot: {} in TimeSlot: {} will be allocated to DataRegionGroup: {}, because the original target: {} is currently unavailable.";
+ public static final String PHIACCRUALDETECTOR_TOPOLOGY_IS_BROKEN_HEARTBEAT_HISTORY_MS =
+ "[PhiAccrualDetector] Topology {} is broken, heartbeat history (ms): {}";
+ public static final String PHIACCRUALDETECTOR_TOPOLOGY_IS_RECOVERED_HEARTBEAT_HISTORY_MS =
+ "[PhiAccrualDetector] Topology {} is recovered, heartbeat history (ms): {}";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_WAS_FAILED_TO_SUBMIT =
+ "PipeHandleLeaderChangeProcedure was failed to submit.";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_WAS_SUBMITTED_PROCEDUREID =
+ "PipeHandleLeaderChangeProcedure was submitted, procedureId: {}.";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_WAS_FAILED_TO_SUBMIT =
+ "PipeHandleMetaChangeProcedure was failed to submit.";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_WAS_SUBMITTED_PROCEDUREID =
+ "PipeHandleMetaChangeProcedure was submitted, procedureId: {}.";
+ public static final String PIPEHEARTBEAT_IS_STARTED_SUCCESSFULLY =
+ "PipeHeartbeat is started successfully.";
+ public static final String PIPEHEARTBEAT_IS_STOPPED_SUCCESSFULLY =
+ "PipeHeartbeat is stopped successfully.";
+ public static final String PIPEMETASYNCER_IS_STARTED_SUCCESSFULLY =
+ "PipeMetaSyncer is started successfully.";
+ public static final String PIPEMETASYNCER_IS_STOPPED_SUCCESSFULLY =
+ "PipeMetaSyncer is stopped successfully.";
+ public static final String PIPERUNTIMECONFIGNODEAGENT_STARTED =
+ "PipeRuntimeConfigNodeAgent started";
+ public static final String PIPERUNTIMECONFIGNODEAGENT_STOPPED =
+ "PipeRuntimeConfigNodeAgent stopped";
+ public static final String PIPERUNTIMECOORDINATOR_MEETS_ERROR_IN_UPDATING_PIPEMETAKEEPER =
+ "PipeRuntimeCoordinator meets error in updating pipeMetaKeeper, ";
+ public static final String PIPETASKCOORDINATORLOCK_IS_HELD_BY_ANOTHER_THREAD_SKIP_THIS_ROUND_OF =
+ "PipeTaskCoordinatorLock is held by another thread, skip this round of heartbeat to avoid procedure and rpc accumulation as much as possible";
+ public static final String PIPETASKCOORDINATORLOCK_IS_HELD_BY_ANOTHER_THREAD_SKIP_THIS_ROUND_OF_2 =
+ "PipeTaskCoordinatorLock is held by another thread, skip this round of sync to avoid procedure and rpc accumulation as much as possible";
+ public static final String PIPETASKCOORDINATOR_LOCK_ACQUIRED_BY_THREAD =
+ "PipeTaskCoordinator lock acquired by thread {}";
+ public static final String PIPETASKCOORDINATOR_LOCK_FAILED_TO_ACQUIRE_BY_THREAD_BECAUSE_OF_TIMEOUT =
+ "PipeTaskCoordinator lock failed to acquire by thread {} because of timeout";
+ public static final String PIPETASKCOORDINATOR_LOCK_RELEASED_BY_THREAD =
+ "PipeTaskCoordinator lock released by thread {}";
+ public static final String PIPETASKCOORDINATOR_LOCK_WAITING_FOR_THREAD =
+ "PipeTaskCoordinator lock waiting for thread {}";
+ public static final String PIPE_SNAPSHOT_DIR_FOUND_DELETING_IT =
+ "Pipe snapshot dir found, deleting it: {},";
+ public static final String PROCEDUREMANAGER_IS_STARTED_SUCCESSFULLY = "ProcedureManager 已成功启动。";
+ public static final String PROCEDUREMANAGER_IS_STOPPED_SUCCESSFULLY = "ProcedureManager 已成功停止。";
+ public static final String PROCEDURE_DETAILS_ARE = "[{}] procedure details are {}";
+ public static final String REBALANCEDATAALLOTTABLE_DATABASE =
+ "[ReBalanceDataAllotTable] Database: {}, ";
+ public static final String RECEIVED_PIPE_HEARTBEAT_REQUEST_FROM_CONFIG_COORDINATOR =
+ "Received pipe heartbeat request {} from config coordinator.";
+ public static final String RECEIVER_ID = "Receiver id = {}: {}";
+ public static final String RECEIVER_ID_EXCEPTION_ENCOUNTERED_WHILE_EXECUTING_PLAN =
+ "Receiver id = {}: Exception encountered while executing plan {}: ";
+ public static final String RECEIVER_ID_FAILURE_STATUS_ENCOUNTERED_WHILE_EXECUTING_PLAN =
+ "Receiver id = {}: Failure status encountered while executing plan {}: {}";
+ public static final String RECEIVER_ID_PERMISSION_CHECK_FAILED_WHILE_EXECUTING_PLAN =
+ "Receiver id = {}: Permission check failed while executing plan {}: {}";
+ public static final String RECEIVER_ID_UNSUPPORTED_PIPEREQUESTTYPE_ON_CONFIGNODE_RESPONSE_STATUS =
+ "Receiver id = {}: Unsupported PipeRequestType on ConfigNode, response status = {}.";
+ public static final String RECONSTRUCTREGION_SUBMIT_RECONSTRUCTREGIONPROCEDURE_SUCCESSFULLY =
+ "[ReconstructRegion] Submit ReconstructRegionProcedure successfully, {}";
+ public static final String REGIONCLEANER_IS_STARTED_SUCCESSFULLY =
+ "RegionCleaner is started successfully.";
+ public static final String REGIONCLEANER_IS_STOPPED_SUCCESSFULLY =
+ "RegionCleaner is stopped successfully.";
+ public static final String REGIONELECTION_THE_LEADER_OF_REGIONGROUPS_IS_ELECTED =
+ "[RegionElection] The leader of RegionGroups: {} is elected.";
+ public static final String REGIONELECTION_THE_LEADER_OF_REGIONGROUPS_IS_NOT_DETERMINED_AFTER_10 =
+ "[RegionElection] The leader of RegionGroups: {} is not determined after 10 heartbeat interval. Some function might fail.";
+ public static final String REGIONELECTION_WAIT_FOR_LEADER_ELECTION_OF_REGIONGROUPS =
+ "[RegionElection] Wait for leader election of RegionGroups: {}";
+ public static final String REGIONGROUPSTATISTICS_REGIONGROUP =
+ "[RegionGroupStatistics]\t RegionGroup {}: {} -> {}";
+ public static final String REGIONGROUPSTATISTICS_REGIONGROUPSTATISTICSMAP =
+ "[RegionGroupStatistics] RegionGroupStatisticsMap: ";
+ public static final String REGIONGROUPSTATISTICS_REGION_IN_DATANODE =
+ "[RegionGroupStatistics]\t Region in DataNode {}: {} -> {}";
+ public static final String REGIONGROUPSTATISTICS_REGION_IN_DATANODE_NULL =
+ "[RegionGroupStatistics]\t Region in DataNode {}: null -> {}";
+ public static final String REGIONGROUPSTATISTICS_REGION_IN_DATANODE_NULL_2 =
+ "[RegionGroupStatistics]\t Region in DataNode {}: {} -> null";
+ public static final String REGIONGROUPSTATUS_DOESN_T_EXIST =
+ "RegionGroupStatus %s doesn't exist.";
+ public static final String REGIONPRIORITY = "[RegionPriority]\t {}: {}->{}";
+ public static final String REGIONPRIORITY_REGIONPRIORITYMAP =
+ "[RegionPriority] RegionPriorityMap: ";
+ public static final String REGIONPRIORITY_THE_ROUTING_PRIORITY_OF_REGIONGROUPS_IS_CALCULATED =
+ "[RegionPriority] The routing priority of RegionGroups: {} is calculated.";
+ public static final String REGIONPRIORITY_THE_ROUTING_PRIORITY_OF_REGIONGROUPS_IS_NOT_DETERMINED_AFTER =
+ "[RegionPriority] The routing priority of RegionGroups: {} is not determined after 10 heartbeat interval. Some function might fail.";
+ public static final String REGIONPRIORITY_WAIT_FOR_REGION_PRIORITY_UPDATE_OF_REGIONGROUPS =
+ "[RegionPriority] Wait for Region priority update of RegionGroups: {}";
+ public static final String REGION_ID = "Region id ";
+ public static final String REMOVEREGIONPEER_SUBMIT_REMOVEREGIONPEERPROCEDURE_SUCCESSFULLY =
+ "[RemoveRegionPeer] Submit RemoveRegionPeerProcedure successfully: {}";
+ public static final String REMOVE_REGION_TARGET_DATANODE_NOT_FOUND_WILL_SIMPLY_CLEAN_UP =
+ "Remove region: Target DataNode {} not found, will simply clean up the partition table of region {} and do nothing else.";
+ public static final String REPORT_PIPERUNTIMEEXCEPTION_TO_LOCAL_PIPETASKMETA_EXCEPTION_MESSAGE =
+ "Report PipeRuntimeException to local PipeTaskMeta({}), exception message: {}";
+ public static final String RETRYFAILMISSIONS_SERVICE_IS_STARTED_SUCCESSFULLY =
+ "RetryFailMissions service is started successfully.";
+ public static final String RETRYFAILMISSIONS_SERVICE_IS_STOPPED_SUCCESSFULLY =
+ "RetryFailMissions service is stopped successfully.";
+ public static final String SERIALIZATION_FAILED_FOR_THE_ALTER_ENCODING_TIME_SERIES_PLAN_IN =
+ "Serialization failed for the alter encoding time series plan in pipe transmission, skip transfer";
+ public static final String SERIALIZATION_FAILED_FOR_THE_DELETE_LOGICAL_VIEW_PLAN_IN_PIPE =
+ "Serialization failed for the delete logical view plan in pipe transmission, skip transfer";
+ public static final String SERIALIZATION_FAILED_FOR_THE_DELETE_TIME_SERIES_PLAN_IN_PIPE =
+ "Serialization failed for the delete time series plan in pipe transmission, skip transfer";
+ public static final String SOMETHING_WRONG_HAPPENED_WHILE_CALLING_CONSENSUS_LAYER_S_CREATELOCALPEER_API =
+ "Something wrong happened while calling consensus layer's createLocalPeer API.";
+ public static final String SOME_PIPES_NEED_RESTARTING_WILL_RESTART_THEM_AFTER_THIS_SYNC =
+ "Some pipes need restarting, will restart them after this sync";
+ public static final String STARTEXECUTECQ_EXECUTE_CQ_ON_DATANODE_TIME_RANGE_IS_CURRENT_TIME =
+ "[StartExecuteCQ] execute CQ {} on DataNode[{}], time range is [{}, {}), current time is {}";
+ public static final String START_TO_ACTIVATE_UDF_IN_UDF_TABLE_ON_CONFIG_NODES =
+ "Start to activate UDF [{}] in UDF_Table on Config Nodes";
+ public static final String START_TO_ADD_UDF_IN_UDF_TABLE_ON_CONFIG_NODES =
+ "Start to add UDF [{}] in UDF_Table on Config Nodes";
+ public static final String START_TO_CREATE_REGION_ON_DATANODE =
+ "Start to create Region: {} on DataNode: {}";
+ public static final String START_TO_CREATE_UDF_ON_DATA_NODES_NEEDTOSAVEJAR =
+ "Start to create UDF [{}] on Data Nodes, needToSaveJar[{}]";
+ public static final String START_TO_DELETE_REGION_ON_DATANODE =
+ "Start to delete Region: {} on DataNode: {}";
+ public static final String START_TRANSFER_OF = "Start transfer of {}";
+ public static final String STOP_SUBMITTING_CQ_BECAUSE = "Stop submitting CQ {} because {}";
+ public static final String STOP_SUBMITTING_CQ_BECAUSE_CURRENT_NODE_IS_NOT_LEADER_OR =
+ "Stop submitting CQ {} because current node is not leader or current scheduled thread pool is shut down.";
+ public static final String SUBMITTED_ASYNC_CONSENSUS_PIPE_CREATION =
+ "Submitted async consensus pipe creation: {}";
+ public static final String SUBMITTED_ASYNC_CONSENSUS_PIPE_DROP =
+ "Submitted async consensus pipe drop: {}";
+ public static final String SUBMIT_REMOVEAINODEPROCEDURE_SUCCESSFULLY =
+ "Submit RemoveAINodeProcedure successfully, {}";
+ public static final String SUBMIT_REMOVECONFIGNODEPROCEDURE_SUCCESSFULLY =
+ "Submit RemoveConfigNodeProcedure successfully: {}";
+ public static final String SUBMIT_REMOVEDATANODESPROCEDURE_SUCCESSFULLY =
+ "Submit RemoveDataNodesProcedure successfully, {}";
+ public static final String SUBSCRIPTIONCOORDINATORLOCK_IS_HELD_BY_ANOTHER_THREAD_SKIP_THIS_ROUND_OF =
+ "SubscriptionCoordinatorLock is held by another thread, skip this round of sync to avoid procedure and rpc accumulation as much as possible";
+ public static final String SUBSCRIPTIONMETASYNCER_IS_STARTED_SUCCESSFULLY =
+ "SubscriptionMetaSyncer is started successfully.";
+ public static final String SUBSCRIPTIONMETASYNCER_IS_STOPPED_SUCCESSFULLY =
+ "SubscriptionMetaSyncer is stopped successfully.";
+ public static final String SUCCESSFULLY_TRANSFERRED_CONFIG_EVENT =
+ "Successfully transferred config event {}.";
+ public static final String SUCCESSFULLY_TRANSFERRED_CONFIG_REGION_SNAPSHOT =
+ "Successfully transferred config region snapshot {}.";
+ public static final String THERE_IS_NO_RUNNING_DATANODE_TO_EXECUTE_CQ =
+ "There is no RUNNING DataNode to execute CQ {}";
+ public static final String THE_CONFIGNODE_WILL_BE_SHUTDOWN_SOON_MARK_IT_AS_UNKNOWN =
+ "The ConfigNode-{} will be shutdown soon, mark it as Unknown";
+ public static final String THE_CONFIG_REGION_AIR_GAP_CONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING =
+ "The config region air gap connector does not support transferring single file piece bytes.";
+ public static final String THE_CONFIG_REGION_SINK_DOES_NOT_SUPPORT_TRANSFERRING_SINGLE_FILE =
+ "The config region sink does not support transferring single file piece req.";
+ public static final String THE_CONFIG_REGION_SNAPSHOTS_CANNOT_BE_PARSED =
+ "The config region snapshots %s cannot be parsed.";
+ public static final String THE_DATABASE_DOESN_T_EXIST_MAYBE_IT_HAS_BEEN_PRE =
+ "The Database: {} doesn't exist. Maybe it has been pre-deleted.";
+ public static final String THE_DATANODE_WILL_BE_SHUTDOWN_SOON_MARK_IT_AS_UNKNOWN =
+ "The DataNode-{} will be shutdown soon, mark it as Unknown";
+ public static final String THE_REMOVENODEREPLICASELECT_METHOD_OF_GREEDYREGIONGROUPALLOCATOR_IS_YET =
+ "The removeNodeReplicaSelect method of GreedyRegionGroupAllocator is yet to be implemented.";
+ public static final String THE_REMOVENODEREPLICASELECT_METHOD_OF_PARTITEGRAPHPLACEMENTREGIONGROUPALLOCATOR =
+ "The removeNodeReplicaSelect method of PartiteGraphPlacementRegionGroupAllocator is yet to be implemented.";
+ public static final String THE_REMOVE_DATANODE_REQUEST_CHECK_FAILED_REQ_CHECK_RESULT =
+ "The remove DataNode request check failed. req: {}, check result: {}";
+ public static final String TOPOLOGY_ASYMMETRIC_NETWORK_PARTITION_FROM_TO =
+ "[Topology] Asymmetric network partition from {} to {}";
+ public static final String TOPOLOGY_CLUSTER_TOPOLOGY_CHANGED_LATEST =
+ "[Topology] Cluster topology changed, latest: {}";
+ public static final String TOPOLOGY_PROBING_HAS_STARTED_SUCCESSFULLY =
+ "Topology Probing has started successfully";
+ public static final String TOPOLOGY_PROBING_HAS_STOPPED_SUCCESSFULLY =
+ "Topology Probing has stopped successfully";
+ public static final String TOPOLOGY_TOPOLOGY_OF_DATANODE_IS_NOW_TO_DATANODE =
+ "[Topology] Topology of DataNode {} is now {} to DataNode {}";
+ public static final String UNABLE_TO_PARSE_PATH_WHEN_CHECKING_READ_PRIVILEGE_PATH =
+ "Unable to parse path when checking READ privilege, path: {}";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_CREATING_SERVICE_ON_DATANODE =
+ "Unexpected error happened while creating Service {} on DataNode {}: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_DROPPING_CQ =
+ "Unexpected error happened while dropping cq {}: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_DROPPING_SERVICE_ON_DATANODE =
+ "Unexpected error happened while dropping Service {} on DataNode {}: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_FETCHING_CQ_LIST =
+ "Unexpected error happened while fetching cq list: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_GETTING_USER_DEFINED_SERVICE =
+ "Unexpected error happened while getting user-defined Service: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_SHOWING_CQ =
+ "Unexpected error happened while showing cq: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_SHOWING_SERVICE =
+ "Unexpected error happened while showing Service: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_STARTING_SERVICE_ON_DATANODE =
+ "Unexpected error happened while starting Service {} on DataNode {}: ";
+ public static final String UNEXPECTED_ERROR_HAPPENED_WHILE_STOPPING_SERVICE_ON_DATANODE =
+ "Unexpected error happened while stopping Service {} on DataNode {}: ";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_RETRY_CREATING_PEER_FOR_CONSENSUS_GROUP =
+ "Unexpected interruption during retry creating peer for consensus group";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_RETRY_GETTING_LATEST_REGION_ROUTE_MAP =
+ "Unexpected interruption during retry getting latest region route map";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_WAITING_FOR_CONFIGNODE_LEADER_READY =
+ "Unexpected interruption during waiting for configNode leader ready.";
+ public static final String UNEXPECTED_INTERRUPTION_DURING_WAITING_FOR_GET_CLUSTER_ID =
+ "Unexpected interruption during waiting for get cluster id.";
+ public static final String UNEXPECTED_NULL_PROCEDURE_PARAMETERS_FOR_WAITINGPROCEDUREFINISHED =
+ "Unexpected null procedure parameters for waitingProcedureFinished";
+ public static final String UNKNOWN_DATAPARTITION_ALLOCATION_STRATEGY_USING_INHERIT_STRATEGY_BY_DEFAULT =
+ "Unknown DataPartition allocation strategy {}, using INHERIT strategy by default.";
+ public static final String UNKNOWN_TIMEOUTPOLICY = "Unknown TimeoutPolicy: ";
+ public static final String UN_PARSE_ABLE_PATH_NAME_ENCOUNTERED_DURING_TEMPLATE_PRIVILEGE_TRIMMING =
+ "Un-parse-able path name encountered during template privilege trimming, please check";
+ public static final String UPGRADE_CONFIGNODE_CONSENSUS_WAL_DIR_FOR_SIMPLECONSENSUS_FROM_VERSION_1 =
+ "upgrade ConfigNode consensus wal dir for SimpleConsensus from version/1.0 to version/1.1 failed, ";
+ public static final String WRITE_PARTITION_ALLOCATION_RESULT_FAILED_BECAUSE =
+ "Write partition allocation result failed because: {}";
+
+ private ManagerMessages() {}
+}
diff --git a/iotdb-core/confignode/src/main/i18n/zh/org/apache/iotdb/confignode/i18n/ProcedureMessages.java b/iotdb-core/confignode/src/main/i18n/zh/org/apache/iotdb/confignode/i18n/ProcedureMessages.java
new file mode 100644
index 0000000000000..8d619ac0d9b6a
--- /dev/null
+++ b/iotdb-core/confignode/src/main/i18n/zh/org/apache/iotdb/confignode/i18n/ProcedureMessages.java
@@ -0,0 +1,1000 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.i18n;
+
+public final class ProcedureMessages {
+
+ public static final String ADDNEVERFINISHSUBPROCEDUREPROCEDURE_RUN_AGAIN_WHICH_SHOULD_NEVER_HAPPEN =
+ "AddNeverFinishSubProcedureProcedure run again, which should never happen";
+ public static final String ADDREGIONLOCATION_FINISHED_ADD_REGION_TO_RESULT_IS =
+ "AddRegionLocation finished, add region {} to {}, result is {}";
+ public static final String ADDTABLECOLUMN_COSTS_MS = "AddTableColumn-{}.{}-{} costs {}ms";
+ public static final String ADD_COLUMN_TO_TABLE = "Add column to table {}.{}";
+ public static final String ADD_CONFIGNODE_FAILED = "Add ConfigNode failed ";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "AlterConsumerGroupProcedure: executeFromOperateOnConfigNodes({})";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "AlterConsumerGroupProcedure: executeFromOperateOnDataNodes({})";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_EXECUTEFROMVALIDATE_TRY_TO_VALIDATE =
+ "AlterConsumerGroupProcedure: executeFromValidate, try to validate";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "AlterConsumerGroupProcedure: rollbackFromOperateOnConfigNodes({})";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "AlterConsumerGroupProcedure: rollbackFromOperateOnDataNodes";
+ public static final String ALTERCONSUMERGROUPPROCEDURE_ROLLBACKFROMVALIDATE =
+ "AlterConsumerGroupProcedure: rollbackFromValidate";
+ public static final String ALTERENCODINGCOMPRESSOR_COSTS_MS =
+ "AlterEncodingCompressor-[{}] costs {}ms";
+ public static final String ALTERING_COLUMN_IN_ON_CONFIGNODE =
+ "Altering column {} in {}.{} on configNode";
+ public static final String ALTERING_TIME_SERIES_DATA_TYPE = "altering time series {} data type";
+ public static final String ALTERLOGICALVIEW_COSTS_MS = "AlterLogicalView-[{}] costs {}ms";
+ public static final String ALTERPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK =
+ "AlterPipeProcedureV2: executeFromCalculateInfoForTask({})";
+ public static final String ALTERPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES =
+ "AlterPipeProcedureV2: executeFromOperateOnDataNodes({})";
+ public static final String ALTERPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK =
+ "AlterPipeProcedureV2: executeFromValidateTask({})";
+ public static final String ALTERPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "AlterPipeProcedureV2: executeFromWriteConfigNodeConsensus({})";
+ public static final String ALTERPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "AlterPipeProcedureV2: rollbackFromCalculateInfoForTask({})";
+ public static final String ALTERPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES =
+ "AlterPipeProcedureV2: rollbackFromOperateOnDataNodes({})";
+ public static final String ALTERPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK =
+ "AlterPipeProcedureV2: rollbackFromValidateTask({})";
+ public static final String ALTERPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "AlterPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})";
+ public static final String ALTERTABLECOLUMNDATATYPE_COSTS_MS =
+ "AlterTableColumnDataType-{}.{}-{} costs {}ms";
+ public static final String ALTERTIMESERIESDATATYPE_COSTS_MS =
+ "AlterTimeSeriesDataType-{}-[{}] costs {}ms";
+ public static final String ALTERTOPICPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES_TRY_TO_ALTER_TOPIC =
+ "AlterTopicProcedure: executeFromOperateOnConfigNodes, try to alter topic";
+ public static final String ALTERTOPICPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "AlterTopicProcedure: executeFromOperateOnDataNodes({})";
+ public static final String ALTERTOPICPROCEDURE_EXECUTEFROMVALIDATE =
+ "AlterTopicProcedure: executeFromValidate";
+ public static final String ALTERTOPICPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "AlterTopicProcedure: rollbackFromOperateOnConfigNodes({})";
+ public static final String ALTERTOPICPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "AlterTopicProcedure: rollbackFromOperateOnDataNodes({})";
+ public static final String ALTERTOPICPROCEDURE_ROLLBACKFROMVALIDATE =
+ "AlterTopicProcedure: rollbackFromValidate({})";
+ public static final String ALTER_ENCODING_COMPRESSOR_IN_SCHEMA_REGIONS_FAILED_FAILURES =
+ "Alter encoding compressor %s in schema regions failed. Failures: %s";
+ public static final String ALTER_ENCODING_COMPRESSOR_IN_SCHEMA_REGION_FOR_TIMESERIES =
+ "Alter encoding {} & compressor {} in schema region for timeSeries {}";
+ public static final String ALTER_TIMESERIES_DATA_TYPE_TO_IN_SCHEMA_REGIONS_FAILED_FAILURES =
+ "Alter timeseries %s data type to %s in schema regions failed. Failures: %s";
+ public static final String ALTER_TIME_SERIES_DATA_TYPE_FAILED =
+ "alter time series {} data type failed";
+ public static final String ALTER_VIEW = "Alter view {}";
+ public static final String ALTER_VIEW_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN_ALL =
+ "Alter view %s failed when [%s] because failed to execute in all replicaset of schemaRegion %s. Failure nodes: %s, statuses: %s";
+ public static final String AUTHENTICATION_FAILED = "认证失败。";
+ public static final String AUTH_PROCEDURE_CLEAN_DATANODE_CACHE_SUCCESSFULLY =
+ "Auth procedure: clean datanode cache successfully";
+ public static final String BEGIN_TO_CHANGE_DATANODE_STATUS_NODESTATUSMAP =
+ "{}, Begin to change DataNode status, nodeStatusMap: {}";
+ public static final String BEGIN_TO_STOP_DATANODES_AND_KILL_THE_DATANODE_PROCESS =
+ "{}, Begin to stop DataNodes and kill the DataNode process: {}";
+ public static final String BROADCASTDATANODESTATUSCHANGE_FINISHED_DATANODE =
+ "{}, BroadcastDataNodeStatusChange finished, dataNode: {}";
+ public static final String BROADCASTDATANODESTATUSCHANGE_MEETS_ERROR_STATUS_CHANGE_DATANODES_ERROR_DATANODE =
+ "{}, BroadcastDataNodeStatusChange meets error, status change dataNodes: {}, error datanode: {}";
+ public static final String BROADCASTDATANODESTATUSCHANGE_START_DATANODE =
+ "{}, BroadcastDataNodeStatusChange start, dataNode: {}";
+ public static final String CALL_CHANGEREGIONLEADER_FAIL_FOR_THE_TIME_WILL_SLEEP_MS =
+ "Call changeRegionLeader fail for the {} time, will sleep {} ms";
+ public static final String CANNOT_FIND_DATANODES_CONTAIN_THE_GIVEN_REGION =
+ "Cannot find DataNodes contain the given region: {}";
+ public static final String CANNOT_FIND_REGION_REPLICA_NODES_IN_CREATEPEER_REGIONID =
+ "{}, Cannot find region replica nodes in createPeer, regionId: {}";
+ public static final String CANNOT_FIND_REGION_REPLICA_NODES_REGION =
+ "Cannot find region replica nodes, region: {}";
+ public static final String CATCH_EXCEPTION_WHILE_DESERIALIZING_PROCEDURE_THIS_PROCEDURE_WILL_BE_IGNORED =
+ "Catch exception while deserializing procedure, this procedure will be ignored.";
+ public static final String CHANGE_REGION_LEADER_FINISHED_REGIONID_NEWLEADERNODE =
+ "{}, Change region leader finished, regionId: {}, newLeaderNode: {}";
+ public static final String CHECK_AND_INVALIDATE_COLUMN_IN_WHEN_ALTERING_COLUMN_DATA_TYPE =
+ "Check and invalidate column {} in {}.{} when altering column data type";
+ public static final String CHECK_AND_INVALIDATE_COLUMN_IN_WHEN_DROPPING_COLUMN =
+ "Check and invalidate column {} in {}.{} when dropping column";
+ public static final String CHECK_AND_INVALIDATE_SERIES_WHEN_ALTERING_TIME_SERIES_DATA_TYPE =
+ "Check and invalidate series {} when altering time series data type";
+ public static final String CHECK_AND_INVALIDATE_TABLE_WHEN_DROPPING_TABLE =
+ "Check and invalidate table {}.{} when dropping table";
+ public static final String CHECK_DATANODE_TEMPLATE_ACTIVATION_OF_TEMPLATE_SET_ON =
+ "Check DataNode template activation of template {} set on {}";
+ public static final String CHECK_TEMPLATE_EXISTENCE_SET_ON_PATH_WHEN_TRY_SETTING_TEMPLATE =
+ "Check template existence set on path {} when try setting template {}";
+ public static final String CHECK_THE_EXISTENCE_OF_TABLE = "Check the existence of table {}.{}";
+ public static final String CHECK_TIMESERIES_EXISTENCE_UNDER_PATH_WHEN_TRY_SETTING_TEMPLATE =
+ "Check timeseries existence under path {} when try setting template {}";
+ public static final String CLEARING_CACHE_AFTER_ALTER_TIME_SERIES_DATA_TYPE =
+ "clearing cache after alter time series {} data type";
+ public static final String COLUMN_CHECK_FOR_TABLE_WHEN_ADDING_COLUMN =
+ "Column check for table {}.{} when adding column";
+ public static final String COLUMN_CHECK_FOR_TABLE_WHEN_RENAMING_COLUMN =
+ "Column check for table {}.{} when renaming column";
+ public static final String COLUMN_CHECK_FOR_TABLE_WHEN_RENAMING_TABLE =
+ "Column check for table {}.{} when renaming table";
+ public static final String COMMIT_CREATE_TABLE = "Commit create table {}.{}";
+ public static final String COMMIT_RELEASE_INFO_OF_TABLE_WHEN_ADDING_COLUMN =
+ "Commit release info of table {}.{} when adding column";
+ public static final String COMMIT_RELEASE_INFO_OF_TABLE_WHEN_ALTERING_COLUMN =
+ "Commit release info of table {}.{} when altering column";
+ public static final String COMMIT_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_COLUMN =
+ "Commit release info of table {}.{} when renaming column";
+ public static final String COMMIT_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_TABLE =
+ "Commit release info of table {}.{} when renaming table";
+ public static final String COMMIT_RELEASE_INFO_OF_TABLE_WHEN_SETTING_PROPERTIES =
+ "Commit release info of table {}.{} when setting properties";
+ public static final String COMMIT_RELEASE_SCHEMAENGINE_TEMPLATE_SET_ON_PATH =
+ "Commit release schemaengine template {} set on path {}";
+ public static final String COMMIT_RELEASE_TABLE = "Commit release table {}.{}";
+ public static final String COMMIT_SET_SCHEMAENGINE_TEMPLATE_ON_PATH =
+ "Commit set schemaengine template {} on path {}";
+ public static final String CONSENSUSPIPEGUARDIAN_CONSENSUS_PIPE_IS_STOPPED_RESTARTING_ASYNCHRONOUSLY =
+ "[ConsensusPipeGuardian] consensus pipe [{}] is stopped, restarting asynchronously";
+ public static final String CONSENSUSPIPEGUARDIAN_CONSENSUS_PIPE_MISSING_CREATING_ASYNCHRONOUSLY =
+ "[ConsensusPipeGuardian] consensus pipe [{}] missing, creating asynchronously";
+ public static final String CONSENSUSPIPEGUARDIAN_UNEXPECTED_CONSENSUS_PIPE_EXISTS_DROPPING_ASYNCHRONOUSLY =
+ "[ConsensusPipeGuardian] unexpected consensus pipe [{}] exists, dropping asynchronously";
+ public static final String CONSTRUCT_SCHEMAENGINE_BLACK_LIST_OF_DEVICES_IN =
+ "Construct schemaEngine black list of devices in {}.{}";
+ public static final String CONSTRUCT_SCHEMAENGINE_BLACK_LIST_OF_TEMPLATE_SET_ON =
+ "Construct schemaengine black list of template {} set on {}";
+ public static final String CONSTRUCT_SCHEMAENGINE_BLACK_LIST_OF_TIMESERIES =
+ "Construct schemaEngine black list of timeSeries {}";
+ public static final String CONSTRUCT_SCHEMA_BLACK_LIST_WITH_TEMPLATE =
+ "Construct schema black list with template {}";
+ public static final String CONSTRUCT_VIEW_SCHEMAENGINE_BLACK_LIST_OF_VIEW =
+ "Construct view schemaengine black list of view {}";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_ACQUIRELOCK_SKIP_THE_PROCEDURE_DUE_TO =
+ "ConsumerGroupMetaSyncProcedure: acquireLock, skip the procedure due to the last execution time {}";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "ConsumerGroupMetaSyncProcedure: executeFromOperateOnConfigNodes";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "ConsumerGroupMetaSyncProcedure: executeFromOperateOnDataNodes";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_EXECUTEFROMVALIDATE =
+ "ConsumerGroupMetaSyncProcedure: executeFromValidate";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "ConsumerGroupMetaSyncProcedure: rollbackFromOperateOnConfigNodes";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "ConsumerGroupMetaSyncProcedure: rollbackFromOperateOnDataNodes";
+ public static final String CONSUMERGROUPMETASYNCPROCEDURE_ROLLBACKFROMVALIDATE =
+ "ConsumerGroupMetaSyncProcedure: rollbackFromValidate";
+ public static final String CREATEDATABASE_FAIL_TWICE = "createDatabase fail twice";
+ public static final String CREATED_CONSENSUS_PIPE = "{}, Created consensus pipe {}";
+ public static final String CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMCREATEONCONFIGNODES =
+ "CreatePipePluginProcedure: executeFromCreateOnConfigNodes({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMCREATEONDATANODES =
+ "CreatePipePluginProcedure: executeFromCreateOnDataNodes({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMLOCK =
+ "CreatePipePluginProcedure: executeFromLock({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMUNLOCK =
+ "CreatePipePluginProcedure: executeFromUnlock({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_FAILED_IN_STATE_WILL_ROLLBACK =
+ "CreatePipePluginProcedure failed in state {}, will rollback";
+ public static final String CREATEPIPEPLUGINPROCEDURE_ROLLBACKFROMCREATEONCONFIGNODES =
+ "CreatePipePluginProcedure: rollbackFromCreateOnConfigNodes({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_ROLLBACKFROMCREATEONDATANODES =
+ "CreatePipePluginProcedure: rollbackFromCreateOnDataNodes({})";
+ public static final String CREATEPIPEPLUGINPROCEDURE_ROLLBACKFROMLOCK =
+ "CreatePipePluginProcedure: rollbackFromLock({})";
+ public static final String CREATEPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK =
+ "CreatePipeProcedureV2: executeFromCalculateInfoForTask({})";
+ public static final String CREATEPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES =
+ "CreatePipeProcedureV2: executeFromOperateOnDataNodes({})";
+ public static final String CREATEPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK =
+ "CreatePipeProcedureV2: executeFromValidateTask({})";
+ public static final String CREATEPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "CreatePipeProcedureV2: executeFromWriteConfigNodeConsensus({})";
+ public static final String CREATEPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "CreatePipeProcedureV2: rollbackFromCalculateInfoForTask({})";
+ public static final String CREATEPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES =
+ "CreatePipeProcedureV2: rollbackFromOperateOnDataNodes({})";
+ public static final String CREATEPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK =
+ "CreatePipeProcedureV2: rollbackFromValidateTask({})";
+ public static final String CREATEPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "CreatePipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})";
+ public static final String CREATEREGIONGROUPS_ALL_REPLICAS_OF_REGIONGROUP_ARE_CREATED_SUCCESSFULLY =
+ "[CreateRegionGroups] All replicas of RegionGroup: {} are created successfully!";
+ public static final String CREATEREGIONGROUPS_FAILED_TO_CREATE_MOST_OF_REPLICAS_IN_REGIONGROUP_THE =
+ "[CreateRegionGroups] Failed to create most of replicas in RegionGroup: {}, The redundant replicas in this RegionGroup will be deleted.";
+ public static final String CREATEREGIONGROUPS_FAILED_TO_CREATE_SOME_REPLICAS_OF_REGIONGROUP_BUT_THIS =
+ "[CreateRegionGroups] Failed to create some replicas of RegionGroup: {}, but this RegionGroup can still be used.";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "CreateSubscriptionProcedure: executeFromOperateOnConfigNodes";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "CreateSubscriptionProcedure: executeFromOperateOnDataNodes";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_EXECUTEFROMVALIDATE =
+ "CreateSubscriptionProcedure: executeFromValidate";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "CreateSubscriptionProcedure: rollbackFromOperateOnConfigNodes";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "CreateSubscriptionProcedure: rollbackFromOperateOnDataNodes";
+ public static final String CREATESUBSCRIPTIONPROCEDURE_ROLLBACKFROMVALIDATE =
+ "CreateSubscriptionProcedure: rollbackFromValidate";
+ public static final String CREATETABLE_COSTS_MS = "CreateTable-{}.{}-{} costs {}ms";
+ public static final String CREATETOPICPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "CreateTopicProcedure: executeFromOperateOnConfigNodes({})";
+ public static final String CREATETOPICPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "CreateTopicProcedure: executeFromOperateOnDataNodes({})";
+ public static final String CREATETOPICPROCEDURE_EXECUTEFROMVALIDATE =
+ "CreateTopicProcedure: executeFromValidate";
+ public static final String CREATETOPICPROCEDURE_ROLLBACKFROMCREATEONCONFIGNODES =
+ "CreateTopicProcedure: rollbackFromCreateOnConfigNodes({})";
+ public static final String CREATETOPICPROCEDURE_ROLLBACKFROMCREATEONDATANODES =
+ "CreateTopicProcedure: rollbackFromCreateOnDataNodes({})";
+ public static final String CREATETOPICPROCEDURE_ROLLBACKFROMVALIDATE =
+ "CreateTopicProcedure: rollbackFromValidate({})";
+ public static final String DATANODE_IS_SUBMIT_DELETE_OLD_REGION_PEER_WITH_A_SINGLE =
+ "{}, DataNode {} is {}, submit DELETE_OLD_REGION_PEER with a single RPC attempt and let RemoveRegionPeerProcedure handle retries.";
+ public static final String DEACTIVATETEMPLATE_COSTS_MS = "DeactivateTemplate-[{}] costs {}ms";
+ public static final String DEACTIVATE_TEMPLATE_OF = "Deactivate template of {}";
+ public static final String DEACTIVATE_TEMPLATE_OF_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN =
+ "Deactivate template of %s failed when [%s] because failed to execute in all replicaset of %s %s. Failure: %s";
+ public static final String DELETEDATABASEPROCEDURE_DELETE_DATABASE =
+ "[DeleteDatabaseProcedure] Delete database ";
+ public static final String DELETEDATABASEPROCEDURE_DELETE_DATABASESCHEMA_FAILED =
+ "[DeleteDatabaseProcedure] Delete DatabaseSchema failed";
+ public static final String DELETEDATABASEPROCEDURE_INVALIDATE_CACHE_FAILED =
+ "[DeleteDatabaseProcedure] Invalidate cache failed";
+ public static final String DELETEDATABASEPROCEDURE_STATE_STUCK_AT =
+ "[DeleteDatabaseProcedure] State stuck at ";
+ public static final String DELETEDEVICES_COSTS_MS = "DeleteDevices-[{}] costs {}ms";
+ public static final String DELETELOGICALVIEW_COSTS_MS = "DeleteLogicalView-[{}] costs {}ms";
+ public static final String DELETETIMESERIES_COSTS_MS = "DeleteTimeSeries-[{}] costs {}ms";
+ public static final String DELETE_DATA_OF_DEVICES_IN = "Delete data of devices in {}.{}";
+ public static final String DELETE_DATA_OF_TEMPLATE_TIMESERIES =
+ "Delete data of template timeSeries {}";
+ public static final String DELETE_DATA_OF_TIMESERIES = "Delete data of timeSeries {}";
+ public static final String DELETE_DEVICES_IN_IN_SCHEMAENGINE =
+ "Delete devices in {}.{} in schemaEngine";
+ public static final String DELETE_TIMESERIES_SCHEMAENGINE_OF =
+ "Delete timeSeries schemaEngine of {}";
+ public static final String DELETE_TIME_SERIES_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN =
+ "Delete time series %s failed when [%s] because failed to execute in all replicaset of %s %s. Failures: %s";
+ public static final String DELETE_VIEW_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN_ALL =
+ "Delete view %s failed when [%s] because failed to execute in all replicaset of schemaRegion %s. Failures: %s";
+ public static final String DELETE_VIEW_SCHEMAENGINE_OF = "Delete view schemaengine of {}";
+ public static final String DELETING_DATA_FOR_TABLE = "Deleting data for table {}.{}";
+ public static final String DELETING_DEVICES_FOR_TABLE_WHEN_DROPPING_TABLE =
+ "Deleting devices for table {}.{} when dropping table";
+ public static final String DESERIALIZE_MEETS_ERROR_IN_CREATEREGIONGROUPSPROCEDURE =
+ "Deserialize meets error in CreateRegionGroupsProcedure";
+ public static final String DROPPING_COLUMN_IN_ON_CONFIGNODE =
+ "Dropping column {} in {}.{} on configNode";
+ public static final String DROPPING_TABLE_ON_CONFIGNODE = "Dropping table {}.{} on configNode";
+ public static final String DROPPIPEPLUGINPROCEDURE_EXECUTEFROMDROPONCONFIGNODES =
+ "DropPipePluginProcedure: executeFromDropOnConfigNodes({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_EXECUTEFROMDROPONDATANODES =
+ "DropPipePluginProcedure: executeFromDropOnDataNodes({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_EXECUTEFROMLOCK =
+ "DropPipePluginProcedure: executeFromLock({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_EXECUTEFROMUNLOCK =
+ "DropPipePluginProcedure: executeFromUnlock({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_FAILED_IN_STATE_WILL_ROLLBACK =
+ "DropPipePluginProcedure failed in state {}, will rollback";
+ public static final String DROPPIPEPLUGINPROCEDURE_ROLLBACKFROMDROPONCONFIGNODES =
+ "DropPipePluginProcedure: rollbackFromDropOnConfigNodes({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_ROLLBACKFROMDROPONDATANODES =
+ "DropPipePluginProcedure: rollbackFromDropOnDataNodes({})";
+ public static final String DROPPIPEPLUGINPROCEDURE_ROLLBACKFROMLOCK =
+ "DropPipePluginProcedure: rollbackFromLock({})";
+ public static final String DROPPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK =
+ "DropPipeProcedureV2: executeFromCalculateInfoForTask({})";
+ public static final String DROPPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES =
+ "DropPipeProcedureV2: executeFromOperateOnDataNodes({})";
+ public static final String DROPPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK =
+ "DropPipeProcedureV2: executeFromValidateTask({})";
+ public static final String DROPPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "DropPipeProcedureV2: executeFromWriteConfigNodeConsensus({})";
+ public static final String DROPPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "DropPipeProcedureV2: rollbackFromCalculateInfoForTask({})";
+ public static final String DROPPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES =
+ "DropPipeProcedureV2: rollbackFromOperateOnDataNodes({})";
+ public static final String DROPPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK =
+ "DropPipeProcedureV2: rollbackFromValidateTask({})";
+ public static final String DROPPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "DropPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "DropSubscriptionProcedure: executeFromOperateOnConfigNodes";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "DropSubscriptionProcedure: executeFromOperateOnDataNodes";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_EXECUTEFROMVALIDATE =
+ "DropSubscriptionProcedure: executeFromValidate";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_ROLLBACKFROMLOCK =
+ "DropSubscriptionProcedure: rollbackFromLock";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "DropSubscriptionProcedure: rollbackFromOperateOnConfigNodes";
+ public static final String DROPSUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "DropSubscriptionProcedure: rollbackFromOperateOnDataNodes";
+ public static final String DROPTABLECOLUMN_COSTS_MS = "DropTableColumn-{}.{}-{} costs {}ms";
+ public static final String DROPTABLE_COSTS_MS = "DropTable-{}.{}-{} costs {}ms";
+ public static final String DROPTOPICPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "DropTopicProcedure: executeFromOperateOnConfigNodes({})";
+ public static final String DROPTOPICPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "DropTopicProcedure: executeFromOperateOnDataNodes({})";
+ public static final String DROPTOPICPROCEDURE_EXECUTEFROMVALIDATE =
+ "DropTopicProcedure: executeFromValidate({})";
+ public static final String DROPTOPICPROCEDURE_ROLLBACKFROMCREATEONCONFIGNODES =
+ "DropTopicProcedure: rollbackFromCreateOnConfigNodes({})";
+ public static final String DROPTOPICPROCEDURE_ROLLBACKFROMCREATEONDATANODES =
+ "DropTopicProcedure: rollbackFromCreateOnDataNodes({})";
+ public static final String DROPTOPICPROCEDURE_ROLLBACKFROMVALIDATE =
+ "DropTopicProcedure: rollbackFromValidate({})";
+ public static final String ERROR_IN_DESERIALIZE = "Error in deserialize {}";
+ public static final String ERROR_IN_DESERIALIZE_PROCID_THIS_PROCEDURE_WILL_BE_IGNORED_IT =
+ "Error in deserialize {} (procID {}). This procedure will be ignored. It may belong to old version and cannot be used now.";
+ public static final String EXECUTE_AUTH_PLAN_SUCCESS_TO_INVALIDATE_DATANODES =
+ "Execute auth plan {} success. To invalidate datanodes: {}";
+ public static final String EXECUTING_ON_REGION_FOR_COLUMN_IN_WHEN_DROPPING_COLUMN =
+ "Executing on region for column {} in {}.{} when dropping column";
+ public static final String FAILED_TO_ACTIVE_CQ_BECAUSE_OF_NO_SUCH_CQ =
+ "Failed to active CQ {} because of no such cq: {}";
+ public static final String FAILED_TO_ACTIVE_CQ_BECAUSE_THIS_CQ_HAS_ALREADY_BEEN =
+ "Failed to active CQ {} because this cq has already been active";
+ public static final String FAILED_TO_ACTIVE_CQ_SUCCESSFULLY_BECAUSE_OF_UNKNOWN_REASONS =
+ "Failed to active CQ {} successfully because of unknown reasons {}";
+ public static final String FAILED_TO_ALTER_CONSUMER_GROUP_ON_CONFIG_NODES_BECAUSE =
+ "Failed to alter consumer group %s on config nodes, because %s";
+ public static final String FAILED_TO_ALTER_CONSUMER_GROUP_ON_DATA_NODES_BECAUSE =
+ "Failed to alter consumer group (%s -> %s) on data nodes, because %s";
+ public static final String FAILED_TO_ALTER_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER =
+ "Failed to alter pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_ALTER_TOPIC_ON_CONFIG_NODES_BECAUSE =
+ "Failed to alter topic (%s -> %s) on config nodes, because %s";
+ public static final String FAILED_TO_ALTER_TOPIC_ON_DATA_NODES_BECAUSE =
+ "Failed to alter topic (%s -> %s) on data nodes, because %s";
+ public static final String FAILED_TO_CHANGE_DATANODE_STATUS_DATANODEID_NODESTATUS =
+ "{}, Failed to change DataNode status, dataNodeId={}, nodeStatus={}";
+ public static final String FAILED_TO_COMMIT_SET_TEMPLATE_ON_PATH_DUE_TO =
+ "Failed to commit set template {} on path {} due to {}";
+ public static final String FAILED_TO_CREATE_CONSENSUS_PIPE =
+ "{}, Failed to create consensus pipe {}: {}";
+ public static final String FAILED_TO_CREATE_PIPES_WHEN_CREATING_SUBSCRIPTION_WITH_REQUEST_DETAILS =
+ "Failed to create pipes %s when creating subscription with request %s, details: %s, metadata will be synchronized later.";
+ public static final String FAILED_TO_CREATE_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER =
+ "Failed to create pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_CREATE_PIPE_PLUGIN_INSTANCE_ON_DATA_NODES =
+ "Failed to create pipe plugin instance [%s] on data nodes";
+ public static final String FAILED_TO_CREATE_SUBSCRIPTION_WITH_REQUEST_ON_CONFIG_NODES_BECAUSE =
+ "Failed to create subscription with request %s on config nodes, because %s";
+ public static final String FAILED_TO_CREATE_TOPIC_ON_CONFIG_NODES_BECAUSE =
+ "Failed to create topic %s on config nodes, because %s";
+ public static final String FAILED_TO_CREATE_TOPIC_ON_DATA_NODES_BECAUSE =
+ "Failed to create topic %s on data nodes, because %s";
+ public static final String FAILED_TO_DESERIALIZE_DATAPARTITIONTABLES =
+ "Failed to deserialize dataPartitionTables";
+ public static final String FAILED_TO_DESERIALIZE_FINALDATAPARTITIONTABLES =
+ "Failed to deserialize finalDataPartitionTables";
+ public static final String FAILED_TO_DO_INACTIVE_ROLLBACK_OF_CQ_BECAUSE_OF_NO =
+ "Failed to do [INACTIVE] rollback of CQ {} because of no such cq: {}";
+ public static final String FAILED_TO_DO_INACTIVE_ROLLBACK_OF_CQ_BECAUSE_OF_UNKNOWN =
+ "Failed to do [INACTIVE] rollback of CQ {} because of unknown reasons {}";
+ public static final String FAILED_TO_DROP_PIPES_WHEN_DROPPING_SUBSCRIPTION_WITH_REQUEST_BECAUSE =
+ "Failed to drop pipes %s when dropping subscription with request %s, because %s";
+ public static final String FAILED_TO_DROP_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER =
+ "Failed to drop pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_DROP_PIPE_PLUGIN_ON_DATA_NODES =
+ "Failed to drop pipe plugin %s on data nodes";
+ public static final String FAILED_TO_DROP_SUBSCRIPTION_WITH_REQUEST_ON_CONFIG_NODES_BECAUSE =
+ "Failed to drop subscription with request %s on config nodes, because %s";
+ public static final String FAILED_TO_DROP_TOPIC_ON_CONFIG_NODES_BECAUSE =
+ "Failed to drop topic %s on config nodes, because %s";
+ public static final String FAILED_TO_DROP_TOPIC_ON_DATA_NODES_BECAUSE =
+ "Failed to drop topic %s on data nodes, because %s";
+ public static final String FAILED_TO_EXECUTE_IN_ALL_REPLICASET_OF_SCHEMAREGION_WHEN_CHECKING =
+ "Failed to execute in all replicaset of schemaRegion %s when checking templates on path %s. Failures: %s";
+ public static final String FAILED_TO_EXECUTE_IN_ALL_REPLICASET_OF_SCHEMAREGION_WHEN_CHECKING_2 =
+ "Failed to execute in all replicaset of schemaRegion %s when checking the template %s on %s. Failure nodes: %s";
+ public static final String FAILED_TO_EXECUTE_PLAN_BECAUSE =
+ "Failed to execute plan {} because {}";
+ public static final String FAILED_TO_FOR_TABLE_TO_DATANODE_FAILURE_RESULTS =
+ "Failed to {} for table {}.{} to DataNode, failure results: {}";
+ public static final String FAILED_TO_INIT_CQ_BECAUSE_OF_UNKNOWN_REASONS =
+ "Failed to init CQ {} because of unknown reasons {}";
+ public static final String FAILED_TO_INIT_CQ_BECAUSE_SUCH_CQ_ALREADY_EXISTS =
+ "Failed to init CQ {} because such cq already exists";
+ public static final String FAILED_TO_INVALIDATE_COLUMN_S_CACHE_OF_TABLE =
+ "Failed to invalidate {} column {}'s cache of table {}.{}";
+ public static final String FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_DEVICES_IN_TABLE =
+ "Failed to invalidate schemaEngine cache of devices in table {}.{}";
+ public static final String FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_TABLE =
+ "Failed to invalidate schemaEngine cache of table {}.{}";
+ public static final String FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_TIMESERIES =
+ "Failed to invalidate schemaEngine cache of timeSeries {}";
+ public static final String FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_VIEW =
+ "Failed to invalidate schemaengine cache of view {}";
+ public static final String FAILED_TO_INVALIDATE_SCHEMA_CACHE_OF_TEMPLATE_TIMESERIES =
+ "Failed to invalidate schema cache of template timeSeries {}";
+ public static final String FAILED_TO_INVALIDATE_TEMPLATE_CACHE_OF_TEMPLATE_SET_ON =
+ "Failed to invalidate template cache of template {} set on {}";
+ public static final String FAILED_TO_PRE_RELEASE_FOR_TABLE_TO_DATANODE_FAILURE_RESULTS =
+ "Failed to pre-release {} for table {}.{} to DataNode, failure results: {}";
+ public static final String FAILED_TO_PRE_SET_TEMPLATE_ON_PATH_DUE_TO =
+ "Failed to pre set template {} on path {} due to {}";
+ public static final String FAILED_TO_PUSH_CONSUMER_GROUP_META_TO_DATANODES_DETAILS =
+ "Failed to push consumer group meta to dataNodes, details: %s";
+ public static final String FAILED_TO_PUSH_PIPE_META_LIST_TO_DATA_NODES_WILL =
+ "Failed to push pipe meta list to data nodes, will retry later.";
+ public static final String FAILED_TO_PUSH_PIPE_META_TO_DATANODES_DETAILS =
+ "Failed to push pipe meta to dataNodes, details: %s";
+ public static final String FAILED_TO_PUSH_TOPIC_META_TO_DATANODES_DETAILS =
+ "Failed to push topic meta to dataNodes, details: %s";
+ public static final String FAILED_TO_REMOVE_DATA_NODE_BECAUSE_IT_IS_NOT_IN =
+ "Failed to remove data node {} because it is not in running and the configuration of cluster is one replication";
+ public static final String FAILED_TO_ROLLBACK_ALTER_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED =
+ "Failed to rollback alter pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_ROLLBACK_COMMIT_SET_TEMPLATE_ON_PATH_DUE_TO =
+ "Failed to rollback commit set template {} on path {} due to {}";
+ public static final String FAILED_TO_ROLLBACK_CREATE_PIPES_WHEN_CREATING_SUBSCRIPTION_WITH_REQUEST =
+ "Failed to rollback create pipes when creating subscription with request %s, because %s";
+ public static final String FAILED_TO_ROLLBACK_CREATE_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED =
+ "Failed to rollback create pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_ROLLBACK_CREATING_SUBSCRIPTION_WITH_REQUEST_ON_CONFIG_NODES =
+ "Failed to rollback creating subscription with request %s on config nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_CREATING_TOPIC_ON_CONFIG_NODES_BECAUSE =
+ "Failed to rollback creating topic %s on config nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_CREATING_TOPIC_ON_DATA_NODES_BECAUSE =
+ "Failed to rollback creating topic %s on data nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_FROM_ALTERING_CONSUMER_GROUP_ON_CONFIG_NODES =
+ "Failed to rollback from altering consumer group (%s -> %s) on config nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_FROM_ALTERING_CONSUMER_GROUP_ON_DATA_NODES =
+ "Failed to rollback from altering consumer group (%s -> %s) on data nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_FROM_ALTERING_TOPIC_ON_CONFIG_NODES_BECAUSE =
+ "Failed to rollback from altering topic (%s -> %s) on config nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_FROM_ALTERING_TOPIC_ON_DATA_NODES_BECAUSE =
+ "Failed to rollback from altering topic (%s -> %s) on data nodes, because %s";
+ public static final String FAILED_TO_ROLLBACK_PIPE_PLUGIN_ON_DATA_NODES =
+ "Failed to rollback pipe plugin [%s] on data nodes";
+ public static final String FAILED_TO_ROLLBACK_PRE_RELEASE_FOR_TABLE_INFO_TO_DATANODE =
+ "Failed to rollback pre-release {} for table {}.{} info to DataNode, failure results: {}";
+ public static final String FAILED_TO_ROLLBACK_PRE_RELEASE_TEMPLATE_INFO_OF_TEMPLATE_SET =
+ "Failed to rollback pre release template info of template {} set on path {} on DataNode {}";
+ public static final String FAILED_TO_ROLLBACK_PRE_SET_TEMPLATE_ON_PATH_DUE_TO =
+ "Failed to rollback pre set template {} on path {} due to {}";
+ public static final String FAILED_TO_ROLLBACK_PRE_UNSET_TEMPLATE_OPERATION_OF_TEMPLATE_SET =
+ "Failed to rollback pre unset template operation of template {} set on {}";
+ public static final String FAILED_TO_ROLLBACK_START_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED =
+ "Failed to rollback start pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_ROLLBACK_STOP_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED =
+ "Failed to rollback stop pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_ROLLBACK_TABLE_CREATION =
+ "Failed to rollback table creation {}.{}";
+ public static final String FAILED_TO_ROLLBACK_TEMPLATE_CACHE_OF_TEMPLATE_SET_ON =
+ "Failed to rollback template cache of template {} set on {}";
+ public static final String FAILED_TO_SERIALIZE_DATAPARTITIONTABLES =
+ "Failed to serialize dataPartitionTables";
+ public static final String FAILED_TO_SERIALIZE_FAILEDDATANODE =
+ "Failed to serialize failedDataNode";
+ public static final String FAILED_TO_SERIALIZE_FINALDATAPARTITIONTABLES =
+ "Failed to serialize finalDataPartitionTables";
+ public static final String FAILED_TO_SERIALIZE_SKIPDATANODE = "Failed to serialize skipDataNode";
+ public static final String FAILED_TO_SET_SCHEMAENGINE_TEMPLATE_ON_PATH_BECAUSE_THERE_S =
+ "Failed to set schemaengine template %s on path %s because there's failure on DataNode %s";
+ public static final String FAILED_TO_START_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER =
+ "Failed to start pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_STOP_AINODE_BECAUSE_BUT_THE_REMOVE_PROCESS_WILL =
+ "Failed to stop AINode {} because {}, but the remove process will continue.";
+ public static final String FAILED_TO_STOP_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER =
+ "Failed to stop pipe {}, details: {}, metadata will be synchronized later.";
+ public static final String FAILED_TO_SYNC_TABLE_COMMIT_CREATE_INFO_TO_DATANODE_FAILURE =
+ "Failed to sync table {}.{} commit-create info to DataNode {}, failure results: ";
+ public static final String FAILED_TO_SYNC_TABLE_PRE_CREATE_INFO_TO_DATANODE_FAILURE =
+ "Failed to sync table {}.{} pre-create info to DataNode, failure results: {}";
+ public static final String FAILED_TO_SYNC_TABLE_ROLLBACK_CREATE_INFO_TO_DATANODE_FAILURE =
+ "Failed to sync table {}.{} rollback-create info to DataNode {}, failure results: ";
+ public static final String FAILED_TO_SYNC_TEMPLATE_COMMIT_SET_INFO_ON_PATH_TO =
+ "Failed to sync template {} commit-set info on path {} to DataNode {}";
+ public static final String FAILED_TO_SYNC_TEMPLATE_PRE_SET_INFO_ON_PATH_TO =
+ "Failed to sync template {} pre-set info on path {} to DataNode {}";
+ public static final String FAILED_TO_UPDATE_PROCEDURE = "Failed to update procedure {}";
+ public static final String FAILED_TO_UPDATE_TTL_CACHE_OF_DATANODE =
+ "Failed to update ttl cache of dataNode.";
+ public static final String FAILED_TO_WRITE_DATAPARTITIONTABLE_TO_CONSENSUS_LOG =
+ "Failed to write DataPartitionTable to consensus log";
+ public static final String FAIL_IN_CREATECQPROCEDURE = "Fail in CreateCQProcedure";
+ public static final String FAIL_TO_ACTIVE_TRIGGERINSTANCE_ON_DATA_NODES =
+ "Fail to active triggerInstance [%s] on Data Nodes";
+ public static final String FAIL_TO_CONFIG_NODE_INACTIVE_ROLLBACK_OF_TRIGGER =
+ "Fail to [CONFIG_NODE_INACTIVE] rollback of trigger [%s]";
+ public static final String FAIL_TO_CREATE_PIPE_PLUGIN_AFTER_RETRIES =
+ "Fail to create pipe plugin [{}] after {} retries";
+ public static final String FAIL_TO_CREATE_TRIGGERINSTANCE_ON_DATA_NODES =
+ "Fail to create triggerInstance [%s] on Data Nodes";
+ public static final String FAIL_TO_CREATE_TRIGGER_AT_STATE =
+ "Fail to create trigger [%s] at STATE [%s]";
+ public static final String FAIL_TO_DATA_NODE_INACTIVE_ROLLBACK_OF_TRIGGER =
+ "Fail to [DATA_NODE_INACTIVE] rollback of trigger [%s]";
+ public static final String FAIL_TO_DROP_PIPE_PLUGIN_AFTER_RETRIES =
+ "Fail to drop pipe plugin [{}] after {} retries";
+ public static final String FAIL_TO_DROP_TRIGGER_AT_STATE =
+ "Fail to drop trigger [%s] at STATE [%s]";
+ public static final String FAIL_TO_DROP_TRIGGER_ON_DATA_NODES =
+ "Fail to drop trigger [%s] on Data Nodes";
+ public static final String FAIL_TO_EXECUTE_PLAN_AT_STATE =
+ "Fail to execute plan [%s] at state[%s]";
+ public static final String FAIL_TO_REMOVE_AINODE_AT_STATE =
+ "Fail to remove AINode [%s] at STATE [%s], %s";
+ public static final String FAIL_TO_REMOVE_AINODE_ON_CONFIG_NODES =
+ "Fail to remove [%s] AINode on Config Nodes [%s]";
+ public static final String FAIL_WHEN_EXECUTE = "Fail when execute {} ";
+ public static final String FINISH_INACTIVE_ROLLBACK_OF_CQ_SUCCESSFULLY =
+ "Finish [INACTIVE] rollback of CQ {} successfully";
+ public static final String FINISH_INIT_CQ_SUCCESSFULLY = "Finish init CQ {} successfully";
+ public static final String FINISH_SCHEDULING_CQ_SUCCESSFULLY =
+ "Finish Scheduling CQ {} successfully";
+ public static final String FORCE_UPDATE_NODECACHE_DATANODEID_NODESTATUS_CURRENTTIME =
+ "{}, Force update NodeCache: dataNodeId={}, nodeStatus={}, currentTime={}";
+ public static final String FOR_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN_ALL_REPLICASET =
+ "[%s] for %s.%s failed when [%s] because failed to execute in all replicaset of %s %s. Failure nodes: %s";
+ public static final String FOR_FAILED_WHEN_CONSTRUCT_BLACK_LIST_FOR_TABLE_BECAUSE_FAILED =
+ "[%s] for %s.%s failed when construct black list for table because failed to execute in all replicaset of %s %s. Failures: %s";
+ public static final String INVALIDATE_CACHE_OF_DEVICES_IN =
+ "Invalidate cache of devices in {}.{}";
+ public static final String INVALIDATE_CACHE_OF_TEMPLATE_SET_ON =
+ "Invalidate cache of template {} set on {}";
+ public static final String INVALIDATE_CACHE_OF_TEMPLATE_TIMESERIES =
+ "Invalidate cache of template timeSeries {}";
+ public static final String INVALIDATE_CACHE_OF_TIMESERIES = "Invalidate cache of timeSeries {}";
+ public static final String INVALIDATE_CACHE_OF_VIEW = "Invalidate cache of view {}";
+ public static final String INVALIDATE_COLUMN_CACHE_FAILED_FOR_TABLE =
+ "Invalidate column %s cache failed for table %s.%s";
+ public static final String INVALIDATE_SCHEMAENGINE_CACHE_FAILED = "使 SchemaEngine 缓存失效失败";
+ public static final String INVALIDATE_SCHEMA_CACHE_FAILED = "Invalidate schema cache failed";
+ public static final String INVALIDATE_TEMPLATE_CACHE_FAILED = "Invalidate template cache failed";
+ public static final String INVALIDATE_VIEW_SCHEMAENGINE_CACHE_FAILED =
+ "Invalidate view schemaengine cache failed";
+ public static final String INVALIDATING_CACHE_FOR_COLUMN_IN_WHEN_DROPPING_COLUMN =
+ "Invalidating cache for column {} in {}.{} when dropping column";
+ public static final String INVALIDATING_CACHE_FOR_TABLE_WHEN_DROPPING_TABLE =
+ "Invalidating cache for table {}.{} when dropping table";
+ public static final String INVALID_DATA_TYPE_CANNOT_BE_USED_AS_A_NEW_TYPE =
+ "Invalid data type cannot be used as a new type";
+ public static final String IO_ERROR_WHEN_DESERIALIZE_AUTHPLAN =
+ "IO error when deserialize authplan.";
+ public static final String IO_ERROR_WHEN_DESERIALIZE_SETTTL_PLAN =
+ "IO error when deserialize setTTL plan.";
+ public static final String NO_AVAILABLE_DATANODE_TO_ASSIGN_TASKS = "没有可用的 DataNode 分配任务";
+ public static final String NO_DATABASE_LOST_DATA_PARTITION_TABLE_FOR_CONSENSUS_WRITE =
+ "No database lost data partition table for consensus write";
+ public static final String NO_DATAPARTITIONTABLE_AVAILABLE_FOR_CONSENSUS_WRITE =
+ "No DataPartitionTable available for consensus write";
+ public static final String NO_ENOUGH_DATA_NODE_TO_MIGRATE_REGION =
+ "No enough Data node to migrate region: {}";
+ public static final String OPERATION_TIMED_OUT_AFTER = "Operation timed out after ";
+ public static final String PARTITIONTABLECLEANER_PERIODICALLY_ACTIVATE_PARTITIONTABLEAUTOCLEANER_DATABASETTL =
+ "[PartitionTableCleaner] Periodically activate PartitionTableAutoCleaner, databaseTTL: {}";
+ public static final String PARTITIONTABLECLEANER_PERIODICALLY_ACTIVATE_PARTITIONTABLEAUTOCLEANER_FOR =
+ "[PartitionTableCleaner] Periodically activate PartitionTableAutoCleaner for: {}";
+ public static final String PARTITIONTABLECLEANER_THE_PARTITIONTABLEAUTOCLEANER_IS_STARTED_WITH_CYCLE_MS =
+ "[PartitionTableCleaner] The PartitionTableAutoCleaner is started with cycle={}ms";
+ public static final String PID_ADDREGION_CANNOT_ROLL_BACK_BECAUSE_CANNOT_FIND_THE_CORRECT =
+ "[pid{}][AddRegion] Cannot roll back, because cannot find the correct locations";
+ public static final String PID_ADDREGION_IT_APPEARS_THAT_CONSENSUS_WRITE_HAS_NOT_MODIFIED =
+ "[pid{}][AddRegion] It appears that consensus write has not modified the local partition table. ";
+ public static final String PID_ADDREGION_RESET_PEER_LIST_PEER_LIST_OF_CONSENSUS_GROUP =
+ "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} failed to reset to {}, you may manually reset it";
+ public static final String PID_ADDREGION_RESET_PEER_LIST_PEER_LIST_OF_CONSENSUS_GROUP_2 =
+ "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} has been successfully reset to {}";
+ public static final String PID_ADDREGION_RESET_PEER_LIST_PEER_LIST_OF_CONSENSUS_GROUP_3 =
+ "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} will be reset to {}";
+ public static final String PID_ADDREGION_STARTED_WILL_BE_ADDED_TO_DATANODE =
+ "[pid{}][AddRegion] started, {} will be added to DataNode {}.";
+ public static final String PID_ADDREGION_START_TO_ROLL_BACK_BECAUSE =
+ "[pid{}][AddRegion] Start to roll back, because: {}";
+ public static final String PID_ADDREGION_STATE_COMPLETE = "[pid{}][AddRegion] state {} complete";
+ public static final String PID_ADDREGION_STATE_FAILED = "[pid{}][AddRegion] state {} failed";
+ public static final String PID_ADDREGION_SUCCESS_HAS_BEEN_ADDED_TO_DATANODE_PROCEDURE_TOOK =
+ "[pid{}][AddRegion] success, {} has been added to DataNode {}. Procedure took {} (start at {}).";
+ public static final String PID_MIGRATEREGION_STARTED_WILL_BE_MIGRATED_FROM_DATANODE_TO =
+ "[pid{}][MigrateRegion] started, {} will be migrated from DataNode {} to {}.";
+ public static final String PID_MIGRATEREGION_STATE_COMPLETE =
+ "[pid{}][MigrateRegion] state {} complete";
+ public static final String PID_MIGRATEREGION_STATE_FAIL = "[pid{}][MigrateRegion] state {} fail";
+ public static final String PID_MIGRATEREGION_SUB_PROCEDURE_ADDREGIONPEERPROCEDURE =
+ "[pid{}][MigrateRegion] sub-procedure AddRegionPeerProcedure failed, RegionMigrateProcedure will not continue";
+ public static final String PID_MIGRATEREGION_SUCCESS_HAS_BEEN_MIGRATED_FROM_DATANODE_TO_PROCEDURE =
+ "[pid{}][MigrateRegion] success,{} {} has been migrated from DataNode {} to {}. Procedure took {} (started at {}).";
+ public static final String PID_NOTIFYREGIONMIGRATION_STARTED_REGION_ID_IS =
+ "[pid{}][NotifyRegionMigration] started, region id is {}.";
+ public static final String PID_NOTIFYREGIONMIGRATION_STATE_COMPLETE =
+ "[pid{}][NotifyRegionMigration] state {} complete";
+ public static final String PID_NOTIFYREGIONMIGRATION_STATE_FAILED =
+ "[pid{}][NotifyRegionMigration] state {} failed";
+ public static final String PID_RECONSTRUCTREGION_FAILED_BUT_THE_REGION_HAS_BEEN_REMOVED_FROM =
+ "[pid{}][ReconstructRegion] failed, but the region {} has been removed from DataNode {}. Use 'extend region' to fix this.";
+ public static final String PID_RECONSTRUCTREGION_STARTED_REGION_ON_DATANODE_WILL_BE_RECONSTRUCTED =
+ "[pid{}][ReconstructRegion] started, region {} on DataNode {}({}) will be reconstructed.";
+ public static final String PID_RECONSTRUCTREGION_STATE_COMPLETE =
+ "[pid{}][ReconstructRegion] state {} complete";
+ public static final String PID_RECONSTRUCTREGION_STATE_FAIL =
+ "[pid{}][ReconstructRegion] state {} fail";
+ public static final String PID_RECONSTRUCTREGION_SUB_PROCEDURE_REMOVEREGIONPEERPROCEDURE =
+ "[pid{}][ReconstructRegion] sub-procedure RemoveRegionPeerProcedure failed, ReconstructRegionProcedure will not continue";
+ public static final String PID_RECONSTRUCTREGION_SUCCESS_REGION_HAS_BEEN_RECONSTRUCTED =
+ "[pid{}][ReconstructRegion] success, region {} has been reconstructed on DataNode {}. Procedure took {} (started at {})";
+ public static final String PID_REMOVEREGION_DELETE_OLD_REGION_PEER_EXECUTED_FAILED_AFTER_ATTEMPTS =
+ "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER executed failed after {} attempts, procedure will continue. You should manually delete region file. {}";
+ public static final String PID_REMOVEREGION_DELETE_OLD_REGION_PEER_EXECUTED_FAILED_ATTEMPT_WILL =
+ "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER executed failed (attempt {}/{}), will retry after {}ms. {}";
+ public static final String PID_REMOVEREGION_DELETE_OLD_REGION_PEER_TASK_SUBMITTED_FAILED_AFTER =
+ "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER task submitted failed after {} attempts, procedure will continue. You should manually delete region file. {}";
+ public static final String PID_REMOVEREGION_DELETE_OLD_REGION_PEER_TASK_SUBMITTED_FAILED_ATTEMPT =
+ "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER task submitted failed (attempt {}/{}), will retry after {}ms. {}";
+ public static final String PID_REMOVEREGION_EXECUTED_FAILED_CONFIGNODE_BELIEVE_CURRENT_PEER_LIST_OF =
+ "[pid{}][RemoveRegion] {} executed failed, ConfigNode believe current peer list of {} is {}. Procedure will continue. You should manually clear peer list.";
+ public static final String PID_REMOVEREGION_STARTED_REGION_WILL_BE_REMOVED_FROM_DATANODE =
+ "[pid{}][RemoveRegion] started, region {} will be removed from DataNode {}.";
+ public static final String PID_REMOVEREGION_STATE_SUCCESS =
+ "[pid{}][RemoveRegion] state {} success";
+ public static final String PID_REMOVEREGION_SUCCESS_REGION_HAS_BEEN_REMOVED_FROM_DATANODE_PROCEDURE =
+ "[pid{}][RemoveRegion] success, region {} has been removed from DataNode {}. Procedure took {} (started at {})";
+ public static final String PID_REMOVEREGION_TASK_SUBMITTED_FAILED_CONFIGNODE_BELIEVE_CURRENT_PEER_LIST =
+ "[pid{}][RemoveRegion] {} task submitted failed, ConfigNode believe current peer list of {} is {}. Procedure will continue. You should manually clear peer list.";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMCALCULATEINFOFORTASK =
+ "PipeHandleLeaderChangeProcedure: executeFromCalculateInfoForTask";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMHANDLEONCONFIGNODES =
+ "PipeHandleLeaderChangeProcedure: executeFromHandleOnConfigNodes";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMHANDLEONDATANODES =
+ "PipeHandleLeaderChangeProcedure: executeFromHandleOnDataNodes";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMVALIDATETASK =
+ "PipeHandleLeaderChangeProcedure: executeFromValidateTask";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "PipeHandleLeaderChangeProcedure: rollbackFromCalculateInfoForTask";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMCREATEONDATANODES =
+ "PipeHandleLeaderChangeProcedure: rollbackFromCreateOnDataNodes";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMHANDLEONCONFIGNODES =
+ "PipeHandleLeaderChangeProcedure: rollbackFromHandleOnConfigNodes";
+ public static final String PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMVALIDATETASK =
+ "PipeHandleLeaderChangeProcedure: rollbackFromValidateTask";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMCALCULATEINFOFORTASK =
+ "PipeHandleMetaChangeProcedure: executeFromCalculateInfoForTask";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMHANDLEONDATANODES =
+ "PipeHandleMetaChangeProcedure: executeFromHandleOnDataNodes";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMVALIDATETASK =
+ "PipeHandleMetaChangeProcedure: executeFromValidateTask";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "PipeHandleMetaChangeProcedure: executeFromWriteConfigNodeConsensus";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "PipeHandleMetaChangeProcedure: rollbackFromCalculateInfoForTask";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "PipeHandleMetaChangeProcedure: rollbackFromOperateOnDataNodes";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMVALIDATETASK =
+ "PipeHandleMetaChangeProcedure: rollbackFromValidateTask";
+ public static final String PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "PipeHandleMetaChangeProcedure: rollbackFromWriteConfigNodeConsensus";
+ public static final String PIPEMETASYNCPROCEDURE_ACQUIRELOCK_SKIP_THE_PROCEDURE_DUE_TO_THE_LAST_EXECUTION =
+ "PipeMetaSyncProcedure: acquireLock, skip the procedure due to the last execution time {}";
+ public static final String PIPEMETASYNCPROCEDURE_EXECUTEFROMCALCULATEINFOFORTASK =
+ "PipeMetaSyncProcedure: executeFromCalculateInfoForTask";
+ public static final String PIPEMETASYNCPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "PipeMetaSyncProcedure: executeFromOperateOnDataNodes";
+ public static final String PIPEMETASYNCPROCEDURE_EXECUTEFROMVALIDATETASK =
+ "PipeMetaSyncProcedure: executeFromValidateTask";
+ public static final String PIPEMETASYNCPROCEDURE_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "PipeMetaSyncProcedure: executeFromWriteConfigNodeConsensus";
+ public static final String PIPEMETASYNCPROCEDURE_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "PipeMetaSyncProcedure: rollbackFromCalculateInfoForTask";
+ public static final String PIPEMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "PipeMetaSyncProcedure: rollbackFromOperateOnDataNodes";
+ public static final String PIPEMETASYNCPROCEDURE_ROLLBACKFROMVALIDATETASK =
+ "PipeMetaSyncProcedure: rollbackFromValidateTask";
+ public static final String PIPEMETASYNCPROCEDURE_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "PipeMetaSyncProcedure: rollbackFromWriteConfigNodeConsensus";
+ public static final String PIPE_NOT_FOUND_IN_PIPETASKINFO_CAN_NOT_PUSH_ITS_META =
+ "Pipe {} not found in PipeTaskInfo, can not push its meta.";
+ public static final String PIPE_PLUGIN_IS_ALREADY_CREATED_AND_ISSETIFNOTEXISTSCONDITION_IS_TRUE_END =
+ "Pipe plugin {} is already created and isSetIfNotExistsCondition is true, end the CreatePipePluginProcedure({})";
+ public static final String PIPE_PLUGIN_IS_ALREADY_CREATED_END_THE_CREATEPIPEPLUGINPROCEDURE =
+ "Pipe plugin {} is already created, end the CreatePipePluginProcedure({})";
+ public static final String PIPE_PLUGIN_IS_NOT_EXIST_END_THE_DROPPIPEPLUGINPROCEDURE =
+ "Pipe plugin {} is not exist, end the DropPipePluginProcedure({})";
+ public static final String PRE_CREATE_TABLE = "Pre create table {}.{}";
+ public static final String PRE_CREATE_TABLE_FAILED = "Pre create table failed";
+ public static final String PRE_RELEASE = "Pre-release ";
+ public static final String PRE_RELEASE_INFO_FOR_TABLE_WHEN_SETTING_PROPERTIES =
+ "Pre release info for table {}.{} when setting properties";
+ public static final String PRE_RELEASE_INFO_OF_TABLE_WHEN_ADDING_COLUMN =
+ "Pre release info of table {}.{} when adding column";
+ public static final String PRE_RELEASE_INFO_OF_TABLE_WHEN_ALTERING_COLUMN =
+ "Pre-release info of table {}.{} when altering column";
+ public static final String PRE_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_COLUMN =
+ "Pre release info of table {}.{} when renaming column";
+ public static final String PRE_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_TABLE =
+ "Pre release info of table {}.{} when renaming table";
+ public static final String PRE_RELEASE_SCHEMAENGINE_TEMPLATE_SET_ON_PATH =
+ "Pre release schemaengine template {} set on path {}";
+ public static final String PRE_RELEASE_TABLE = "Pre release table {}.{}";
+ public static final String PRE_SET_SCHEMAENGINE_TEMPLATE_ON_PATH =
+ "Pre set schemaengine template {} on path {}";
+ public static final String PRE_SET_TEMPLATE_FAILED = "Pre set template failed";
+ public static final String PROCEDUREID = "ProcedureId {}: {}";
+ public static final String PROCEDUREID_ACQUIRED_PIPE_LOCK = "ProcedureId {} acquired pipe lock.";
+ public static final String PROCEDUREID_ACQUIRED_SUBSCRIPTION_LOCK =
+ "ProcedureId {} acquired subscription lock.";
+ public static final String PROCEDUREID_ALL_RETRIES_FAILED_WHEN_TRYING_TO_AT_STATE_WILL =
+ "ProcedureId {}: All {} retries failed when trying to {} at state [{}], will rollback...";
+ public static final String PROCEDUREID_ENCOUNTERED_ERROR_WHEN_TRYING_TO_AT_STATE_RETRY =
+ "ProcedureId {}: Encountered error when trying to {} at state [{}], retry [{}/{}]";
+ public static final String PROCEDUREID_FAILED_TO_ACQUIRE_PIPE_LOCK =
+ "ProcedureId {} failed to acquire pipe lock.";
+ public static final String PROCEDUREID_FAILED_TO_ACQUIRE_SUBSCRIPTION_LOCK =
+ "ProcedureId {} failed to acquire subscription lock.";
+ public static final String PROCEDUREID_FAILED_TO_ROLLBACK_FROM_CALCULATE_INFO_FOR_TASK =
+ "ProcedureId {}: Failed to rollback from calculate info for task.";
+ public static final String PROCEDUREID_FAILED_TO_ROLLBACK_FROM_OPERATE_ON_DATA_NODES =
+ "ProcedureId {}: Failed to rollback from operate on data nodes.";
+ public static final String PROCEDUREID_FAILED_TO_ROLLBACK_FROM_STATE_BECAUSE =
+ "ProcedureId {}: Failed to rollback from state [{}], because {}";
+ public static final String PROCEDUREID_FAILED_TO_ROLLBACK_FROM_VALIDATE_TASK =
+ "ProcedureId {}: Failed to rollback from validate task.";
+ public static final String PROCEDUREID_FAILED_TO_ROLLBACK_FROM_WRITE_CONFIG_NODE_CONSENSUS =
+ "ProcedureId {}: Failed to rollback from write config node consensus.";
+ public static final String PROCEDUREID_FAIL_TO_BECAUSE = "ProcedureId %s: Fail to %s because %s";
+ public static final String PROCEDUREID_INVALID_LOCK_STATE_PIPE_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {}: {}. Invalid lock state. Pipe lock will be released.";
+ public static final String PROCEDUREID_INVALID_LOCK_STATE_SUBSCRIPTION_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {}: {}. Invalid lock state. Subscription lock will be released.";
+ public static final String PROCEDUREID_INVALID_LOCK_STATE_WITHOUT_ACQUIRING_PIPE_LOCK =
+ "ProcedureId {}: {}. Invalid lock state. Without acquiring pipe lock.";
+ public static final String PROCEDUREID_INVALID_LOCK_STATE_WITHOUT_ACQUIRING_SUBSCRIPTION_LOCK =
+ "ProcedureId {}: {}. Invalid lock state. Without acquiring subscription lock.";
+ public static final String PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_BE_EXECUTED_WITH =
+ "ProcedureId {}: LOCK_ACQUIRED. The following procedure should be executed with pipe lock.";
+ public static final String PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_BE_EXECUTED_WITH_2 =
+ "ProcedureId {}: LOCK_ACQUIRED. The following procedure should be executed with subscription and pipe lock.";
+ public static final String PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_BE_EXECUTED_WITH_3 =
+ "ProcedureId {}: LOCK_ACQUIRED. The following procedure should be executed with subscription lock.";
+ public static final String PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_NOT_BE_EXECUTED =
+ "ProcedureId {}: LOCK_ACQUIRED. The following procedure should not be executed without pipe lock.";
+ public static final String PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_NOT_BE_EXECUTED_2 =
+ "ProcedureId {}: LOCK_ACQUIRED. The following procedure should not be executed without subscription lock.";
+ public static final String PROCEDUREID_LOCK_EVENT_WAIT_PIPE_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {}: LOCK_EVENT_WAIT. Pipe lock will be released.";
+ public static final String PROCEDUREID_LOCK_EVENT_WAIT_SUBSCRIPTION_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {}: LOCK_EVENT_WAIT. Subscription lock will be released.";
+ public static final String PROCEDUREID_LOCK_EVENT_WAIT_WITHOUT_ACQUIRING_PIPE_LOCK =
+ "ProcedureId {}: LOCK_EVENT_WAIT. Without acquiring pipe lock.";
+ public static final String PROCEDUREID_LOCK_EVENT_WAIT_WITHOUT_ACQUIRING_SUBSCRIPTION_LOCK =
+ "ProcedureId {}: LOCK_EVENT_WAIT. Without acquiring subscription lock.";
+ public static final String PROCEDUREID_PIPE_LOCK_IS_NOT_ACQUIRED_EXECUTEFROMSTATE_S_EXECUTION_WILL =
+ "ProcedureId {}: Pipe lock is not acquired, executeFromState's execution will be skipped.";
+ public static final String PROCEDUREID_PIPE_LOCK_IS_NOT_ACQUIRED_ROLLBACKSTATE_S_EXECUTION_WILL =
+ "ProcedureId {}: Pipe lock is not acquired, rollbackState({})'s execution will be skipped.";
+ public static final String PROCEDUREID_RELEASE_LOCK_NO_NEED_TO_RELEASE_PIPE_LOCK =
+ "ProcedureId {} release lock. No need to release pipe lock.";
+ public static final String PROCEDUREID_RELEASE_LOCK_NO_NEED_TO_RELEASE_SUBSCRIPTION_LOCK =
+ "ProcedureId {} release lock. No need to release subscription lock.";
+ public static final String PROCEDUREID_RELEASE_LOCK_PIPE_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {} release lock. Pipe lock will be released.";
+ public static final String PROCEDUREID_RELEASE_LOCK_SUBSCRIPTION_LOCK_WILL_BE_RELEASED =
+ "ProcedureId {} release lock. Subscription lock will be released.";
+ public static final String PROCEDUREID_SUBSCRIPTION_LOCK_IS_NOT_ACQUIRED_EXECUTEFROMSTATE_S_EXECUTION_WILL =
+ "ProcedureId {}: Subscription lock is not acquired, executeFromState({})'s execution will be skipped.";
+ public static final String PROCEDUREID_SUBSCRIPTION_LOCK_IS_NOT_ACQUIRED_ROLLBACKSTATE_S_EXECUTION_WILL =
+ "ProcedureId {}: Subscription lock is not acquired, rollbackState({})'s execution will be skipped.";
+ public static final String PROCEDUREID_TRY_TO_ACQUIRE_PIPE_LOCK =
+ "ProcedureId {} try to acquire pipe lock.";
+ public static final String PROCEDUREID_TRY_TO_ACQUIRE_SUBSCRIPTION_AND_PIPE_LOCK =
+ "ProcedureId {} try to acquire subscription and pipe lock.";
+ public static final String PROCEDUREID_TRY_TO_ACQUIRE_SUBSCRIPTION_LOCK =
+ "ProcedureId {} try to acquire subscription lock.";
+ public static final String PROCEDURE_TYPE = "Procedure type ";
+ public static final String REMOVEREGIONLOCATION_REMOVE_REGION_FROM_DATANODE_RESULT_IS =
+ "RemoveRegionLocation remove region {} from DataNode {}, result is {}";
+ public static final String REMOVEREGIONPEER_STATE_FAILED = "RemoveRegionPeer state {} failed";
+ public static final String REMOVEREGIONPEER_STATE_SUCCESS = "RemoveRegionPeer state {} success";
+ public static final String REMOVEREGION_RATIS_TRANSFER_LEADER_FAIL_BUT_PROCEDURE_WILL_CONTINUE =
+ "[RemoveRegion] Ratis transfer leader fail, but procedure will continue.";
+ public static final String REMOVE_CONFIG_NODE = "Remove Config Node";
+ public static final String REMOVE_DATA_NODE_FAILED = "Remove Data Node failed ";
+ public static final String RENAMETABLECOLUMN_COSTS_MS = "RenameTableColumn-{}.{}-{} costs {}ms";
+ public static final String RENAMETABLE_COSTS_MS = "RenameTable-{}.{}-{} costs {}ms";
+ public static final String RENAME_COLUMN_TO_TABLE_ON_CONFIG_NODE =
+ "Rename column to table {}.{} on config node";
+ public static final String RETRIEVABLE_ERROR_TRYING_TO_CREATE_CQ_STATE =
+ "Retrievable error trying to create cq [{}], state [{}]";
+ public static final String RETRIEVABLE_ERROR_TRYING_TO_CREATE_PIPE_PLUGIN_STATE =
+ "Retrievable error trying to create pipe plugin [{}], state: {}";
+ public static final String RETRIEVABLE_ERROR_TRYING_TO_DROP_PIPE_PLUGIN_STATE =
+ "Retrievable error trying to drop pipe plugin [{}], state: {}";
+ public static final String RETRIEVABLE_ERROR_TRYING_TO_EXECUTE_PLAN_STATE =
+ "Retrievable error trying to execute plan {}, state: {}";
+ public static final String RETRIEVABLE_ERROR_TRYING_TO_REMOVE_AINODE_STATE =
+ "Retrievable error trying to remove AINode [{}], state [{}]";
+ public static final String ROLLBACK_CREATETABLE_COSTS_MS = "Rollback CreateTable-{} costs {}ms.";
+ public static final String ROLLBACK_CREATE_TABLE_FAILED = "Rollback create table failed";
+ public static final String ROLLBACK_DROPTABLE_COSTS_MS = "Rollback DropTable-{} costs {}ms.";
+ public static final String ROLLBACK_PRE_RELEASE = "Rollback pre-release ";
+ public static final String ROLLBACK_PRE_RELEASE_TEMPLATE_FAILED =
+ "Rollback pre release template failed";
+ public static final String ROLLBACK_RENAMETABLECOLUMN_COSTS_MS =
+ "Rollback RenameTableColumn-{} costs {}ms.";
+ public static final String ROLLBACK_RENAMETABLE_COSTS_MS = "Rollback RenameTable-{} costs {}ms.";
+ public static final String ROLLBACK_SETTABLEPROPERTIES_COSTS_MS =
+ "Rollback SetTableProperties-{} costs {}ms.";
+ public static final String ROLLBACK_SETTEMPLATE_COSTS_MS = "Rollback SetTemplate-{} costs {}ms.";
+ public static final String ROLLBACK_TEMPLATE_CACHE_FAILED = "Rollback template cache failed";
+ public static final String ROLLBACK_TEMPLATE_PRE_UNSET_FAILED_BECAUSE_OF =
+ "Rollback template pre unset failed because of";
+ public static final String ROLLBACK_UNSET_TEMPLATE_FAILED_AND_THE_CLUSTER_TEMPLATE_INFO_MANAGEMENT =
+ "Rollback unset template failed and the cluster template info management is strictly broken. Please try unset again.";
+ public static final String SELECTED_DATANODE_FOR_REGION = "Selected DataNode {} for Region {}";
+ public static final String SEND_ACTION_ADDREGIONPEER_FINISHED_REGIONID_RPCDATANODE_DESTDATANODE_STATUS =
+ "{}, Send action addRegionPeer finished, regionId: {}, rpcDataNode: {}, destDataNode: {}, status: {}";
+ public static final String SEND_ACTION_CREATENEWREGIONPEER_ERROR_REGIONID_NEWPEERDATANODEID_RESULT =
+ "{}, Send action createNewRegionPeer error, regionId: {}, newPeerDataNodeId: {}, result: {}";
+ public static final String SEND_ACTION_CREATENEWREGIONPEER_FINISHED_REGIONID_NEWPEERDATANODEID =
+ "{}, Send action createNewRegionPeer finished, regionId: {}, newPeerDataNodeId: {}";
+ public static final String SEND_ACTION_DELETEOLDREGIONPEER_FINISHED_REGIONID_DATANODEID =
+ "{}, Send action deleteOldRegionPeer finished, regionId: {}, dataNodeId: {}";
+ public static final String SEND_ACTION_REMOVEREGIONPEER_FINISHED_REGIONID_RPCDATANODE =
+ "{}, Send action removeRegionPeer finished, regionId: {}, rpcDataNode: {}";
+ public static final String SETSCHEMATEMPLATE_COSTS_MS = "SetSchemaTemplate-[{}] costs {}ms";
+ public static final String SETTABLEPROPERTIES_COSTS_MS = "SetTableProperties-{}.{}-{} costs {}ms";
+ public static final String SETTTL_COSTS_MS = "SetTTL-[{}] costs {}ms";
+ public static final String SET_PROPERTIES_TO_TABLE = "Set properties to table {}.{}";
+ public static final String SET_TEMPLATE_TO_FAILED_WHEN_CHECK_TIME_SERIES_EXISTENCE_ON =
+ "Set template %s to %s failed when [check time series existence on DataNode] because ";
+ public static final String STARTPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK =
+ "StartPipeProcedureV2: executeFromCalculateInfoForTask({})";
+ public static final String STARTPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES =
+ "StartPipeProcedureV2: executeFromOperateOnDataNodes({})";
+ public static final String STARTPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK =
+ "StartPipeProcedureV2: executeFromValidateTask({})";
+ public static final String STARTPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "StartPipeProcedureV2: executeFromWriteConfigNodeConsensus({})";
+ public static final String STARTPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "StartPipeProcedureV2: rollbackFromCalculateInfoForTask({})";
+ public static final String STARTPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES =
+ "StartPipeProcedureV2: rollbackFromOperateOnDataNodes({})";
+ public static final String STARTPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK =
+ "StartPipeProcedureV2: rollbackFromValidateTask({})";
+ public static final String STARTPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "StartPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})";
+ public static final String START_INACTIVE_ROLLBACK_OF_CQ = "Start [INACTIVE] rollback of CQ {}";
+ public static final String START_ROLLBACK_ADD_COLUMN_TO_TABLE_WHEN_ADDING_COLUMN =
+ "Start rollback Add column to table {}.{} when adding column";
+ public static final String START_ROLLBACK_COMMIT_SET_SCHEMAENGINE_TEMPLATE_ON_PATH =
+ "Start rollback commit set schemaengine template {} on path {}";
+ public static final String START_ROLLBACK_PRE_CREATE_TABLE =
+ "Start rollback pre create table {}.{}";
+ public static final String START_ROLLBACK_PRE_RELEASE_INFO_FOR_TABLE_WHEN_SETTING_PROPERTIES =
+ "Start rollback pre release info for table {}.{} when setting properties";
+ public static final String START_ROLLBACK_PRE_RELEASE_INFO_OF_TABLE =
+ "Start rollback pre release info of table {}.{}";
+ public static final String START_ROLLBACK_PRE_RELEASE_SCHEMAENGINE_TEMPLATE_ON_PATH =
+ "Start rollback pre release schemaengine template {} on path {}";
+ public static final String START_ROLLBACK_PRE_RELEASE_TABLE =
+ "Start rollback pre release table {}.{}";
+ public static final String START_ROLLBACK_PRE_SET_SCHEMAENGINE_TEMPLATE_ON_PATH =
+ "Start rollback pre set schemaengine template {} on path {}";
+ public static final String START_ROLLBACK_RENAMING_COLUMN_TO_TABLE_ON_CONFIGNODE =
+ "Start rollback Renaming column to table {}.{} on configNode";
+ public static final String START_ROLLBACK_RENAMING_TABLE_ON_CONFIGNODE =
+ "Start rollback Renaming table {}.{} on configNode";
+ public static final String START_ROLLBACK_SET_PROPERTIES_TO_TABLE =
+ "Start rollback set properties to table {}.{}";
+ public static final String STATE_STUCK_AT = "State stuck at ";
+ public static final String STOPPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK =
+ "StopPipeProcedureV2: executeFromCalculateInfoForTask({})";
+ public static final String STOPPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES =
+ "StopPipeProcedureV2: executeFromOperateOnDataNodes({})";
+ public static final String STOPPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK =
+ "StopPipeProcedureV2: executeFromValidateTask({})";
+ public static final String STOPPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS =
+ "StopPipeProcedureV2: executeFromWriteConfigNodeConsensus({})";
+ public static final String STOPPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK =
+ "StopPipeProcedureV2: rollbackFromCalculateInfoForTask({})";
+ public static final String STOPPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES =
+ "StopPipeProcedureV2: rollbackFromOperateOnDataNodes({})";
+ public static final String STOPPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK =
+ "StopPipeProcedureV2: rollbackFromValidateTask({})";
+ public static final String STOPPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS =
+ "StopPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})";
+ public static final String STOP_DATA_NODE_MEETS_ERROR_ERROR_DATANODE =
+ "{}, Stop Data Node meets error, error datanode: {}";
+ public static final String STOP_DATA_NODE_SUCCESS = "{}, Stop Data Node {} success.";
+ public static final String SUBMITTED_ASYNC_CONSENSUS_PIPE_CREATION =
+ "{}, Submitted async consensus pipe creation: {}";
+ public static final String SUBSCRIPTION_META_SYNC_PROCEDURE_FINISHED_UPDATING_LAST_SYNC_VERSION =
+ "Subscription meta sync procedure finished, updating last sync version.";
+ public static final String SUCCESSFULLY_OPERATE_WILL_CLEAR_CACHE_TO_THE_DATA_REGIONS_ANYWAY =
+ "Successfully operate, will clear cache to the data regions anyway";
+ public static final String SUCCESSFULLY_RESTORED_WILL_SET_MODS_TO_THE_DATA_REGIONS_ANYWAY =
+ "Successfully restored, will set mods to the data regions anyway";
+ public static final String SUCCESSFULLY_STOPPED_AINODE = "Successfully stopped AINode {}";
+ public static final String TABLE_ALREADY_EXISTS = "Table '%s.%s' already exists.";
+ public static final String TABLE_NOT_EXISTS = "Table '%s.%s' not exists.";
+ public static final String TARGET_DEVICE_TEMPLATE_IS_NOT_ACTIVATED_ON_ANY_PATH_MATCHED =
+ "Target Device Template is not activated on any path matched by given path pattern";
+ public static final String TASK_CANNOT_GET_TASK_REPORT_FROM_DATANODE_LAST_REPORT_TIME =
+ "{} task {} cannot get task report from DataNode {}, last report time is {} ago";
+ public static final String THE_UPDATED_TABLE_HAS_THE_SAME_PROPERTIES_WITH_THE_ORIGINAL =
+ "The updated table has the same properties with the original one. Skip the procedure.";
+ public static final String TOPICMETASYNCPROCEDURE_ACQUIRELOCK_SKIP_THE_PROCEDURE_DUE_TO_THE_LAST_EXECUTION =
+ "TopicMetaSyncProcedure: acquireLock, skip the procedure due to the last execution time {}";
+ public static final String TOPICMETASYNCPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES =
+ "TopicMetaSyncProcedure: executeFromOperateOnConfigNodes";
+ public static final String TOPICMETASYNCPROCEDURE_EXECUTEFROMOPERATEONDATANODES =
+ "TopicMetaSyncProcedure: executeFromOperateOnDataNodes";
+ public static final String TOPICMETASYNCPROCEDURE_EXECUTEFROMVALIDATE =
+ "TopicMetaSyncProcedure: executeFromValidate";
+ public static final String TOPICMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES =
+ "TopicMetaSyncProcedure: rollbackFromOperateOnConfigNodes";
+ public static final String TOPICMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONDATANODES =
+ "TopicMetaSyncProcedure: rollbackFromOperateOnDataNodes";
+ public static final String TOPICMETASYNCPROCEDURE_ROLLBACKFROMVALIDATE =
+ "TopicMetaSyncProcedure: rollbackFromValidate";
+ public static final String UNEXPECTED_FAIL_TSSTATUS_IS = "Unexpected fail, tsStatus is ";
+ public static final String UNEXPECTED_STATE = "意外的状态";
+ public static final String UNKNOWN_CREATECQSTATE = "Unknown CreateCQState: ";
+ public static final String UNKNOWN_CREATETRIGGERSTATE = "Unknown CreateTriggerState: ";
+ public static final String UNKNOWN_DROPTRIGGERSTATE = "Unknown DropTriggerState: ";
+ public static final String UNKNOWN_LOAD_BALANCE_STRATEGY = "Unknown load balance strategy: ";
+ public static final String UNKNOWN_PROCEDURE_TYPE = "Unknown Procedure type: ";
+ public static final String UNKNOWN_PROCEDURE_TYPE_2 = "Unknown Procedure type: {}";
+ public static final String UNKNOWN_STATE = "Unknown state: ";
+ public static final String UNKNOWN_STATE_DURING_EXECUTING_CREATEPIPEPLUGINPROCEDURE =
+ "Unknown state during executing createPipePluginProcedure, %s";
+ public static final String UNKNOWN_STATE_DURING_EXECUTING_OPERATEPIPEPROCEDURE =
+ "Unknown state during executing operatePipeProcedure, %s";
+ public static final String UNKNOWN_STATE_DURING_EXECUTING_OPERATESUBSCRIPTIONPROCEDURE =
+ "Unknown state during executing operateSubscriptionProcedure, %s";
+ public static final String UNKNOWN_STATE_DURING_EXECUTING_REMOVEAINODEPROCEDURE =
+ "Unknown state during executing removeAINodeProcedure, %s";
+ public static final String UNKNOWN_STATE_DURING_ROLLBACK_OPERATESUBSCRIPTIONPROCEDURE =
+ "Unknown state during rollback operateSubscriptionProcedure, %s";
+ public static final String UNKNOWN_STATE_FOR_ROLLBACK = "Unknown state for rollback: ";
+ public static final String UNRECOGNIZED_ADDTABLECOLUMNSTATE = "Unrecognized AddTableColumnState ";
+ public static final String UNRECOGNIZED_ALTERTABLECOLUMNDATATYPEPROCEDURE =
+ "Unrecognized AlterTableColumnDataTypeProcedure ";
+ public static final String UNRECOGNIZED_ALTERTIMESERIESDATATYPEPROCEDURE_STATE =
+ "Unrecognized AlterTimeSeriesDataTypeProcedure state ";
+ public static final String UNRECOGNIZED_CREATETABLESTATE = "Unrecognized CreateTableState ";
+ public static final String UNRECOGNIZED_DROPTABLECOLUMNSTATE =
+ "Unrecognized DropTableColumnState ";
+ public static final String UNRECOGNIZED_DROPTABLESTATE = "Unrecognized DropTableState ";
+ public static final String UNRECOGNIZED_LOG_TYPE = "unrecognized log type ";
+ public static final String UNRECOGNIZED_RENAMETABLECOLUMNSTATE =
+ "Unrecognized RenameTableColumnState ";
+ public static final String UNRECOGNIZED_RENAMETABLESTATE = "Unrecognized RenameTableState ";
+ public static final String UNRECOGNIZED_SETTEMPLATESTATE = "Unrecognized SetTemplateState ";
+ public static final String UNRECOGNIZED_STATE = "Unrecognized state ";
+ public static final String UNSETTEMPLATE_COSTS_MS = "UnsetTemplate-[{}] costs {}ms";
+ public static final String UNSET_TEMPLATE_FROM_FAILED_WHEN_CHECK_DATANODE_TEMPLATE_ACTIVATION_BECAUSE =
+ "Unset template %s from %s failed when [check DataNode template activation] because %s";
+ public static final String UNSET_TEMPLATE_ON = "Unset template {} on {}";
+ public static final String UNSUPPORTED_ROLL_BACK_STATE = "Unsupported roll back STATE [{}]";
+ public static final String UNSUPPORTED_STATE = "Unsupported state: ";
+ public static final String UPDATE_DATANODE_TTL_CACHE_FAILED = "Update dataNode ttl cache failed";
+ public static final String VALIDATE_TABLE_FOR_TABLE_WHEN_SETTING_PROPERTIES =
+ "Validate table for table {}.{} when setting properties";
+ public static final String WAITTASKFINISH_RETURNS_PROCESSING_WHICH_MEANS_THE_WAITING_HAS_BEEN_INTERRUPTED =
+ "waitTaskFinish() returns PROCESSING, which means the waiting has been interrupted, this procedure will end without rollback";
+
+ private ProcedureMessages() {}
+}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java
index 8c7b389bd3dcc..1ffc8dce3ab08 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/CnToDnInternalServiceAsyncRequestManager.java
@@ -49,6 +49,7 @@
import org.apache.iotdb.confignode.client.async.handlers.rpc.subscription.ConsumerGroupPushMetaRPCHandler;
import org.apache.iotdb.confignode.client.async.handlers.rpc.subscription.TopicPushMetaRPCHandler;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TActiveTriggerInstanceReq;
import org.apache.iotdb.mpp.rpc.thrift.TAlterEncodingCompressorReq;
import org.apache.iotdb.mpp.rpc.thrift.TAlterTimeSeriesReq;
@@ -506,7 +507,8 @@ protected void checkActionMapCompleteness() {
.collect(Collectors.toList());
if (!lackList.isEmpty()) {
throw new UncheckedStartupException(
- String.format("These request types should be added to actionMap: %s", lackList));
+ String.format(
+ ConfigNodeMessages.THESE_REQUEST_TYPES_SHOULD_BE_ADDED_TO_ACTIONMAP, lackList));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CheckTimeSeriesExistenceRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CheckTimeSeriesExistenceRPCHandler.java
index 3a735691a0efa..3a1c47e174673 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CheckTimeSeriesExistenceRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CheckTimeSeriesExistenceRPCHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TCheckTimeSeriesExistenceResp;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -54,12 +55,19 @@ public void onComplete(TCheckTimeSeriesExistenceResp response) {
responseMap.put(requestId, response);
if (tsStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
nodeLocationMap.remove(requestId);
- LOGGER.info("Successfully check timeseries existence on DataNode: {}", targetNode);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_CHECK_TIMESERIES_EXISTENCE_ON_DATANODE, targetNode);
} else if (tsStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) {
nodeLocationMap.remove(requestId);
- LOGGER.error("Failed to check timeseries existence on DataNode {}, {}", targetNode, tsStatus);
+ LOGGER.error(
+ ConfigNodeMessages.FAILED_TO_CHECK_TIMESERIES_EXISTENCE_ON_DATANODE,
+ targetNode,
+ tsStatus);
} else {
- LOGGER.error("Failed to check timeseries existence on DataNode {}, {}", targetNode, tsStatus);
+ LOGGER.error(
+ ConfigNodeMessages.FAILED_TO_CHECK_TIMESERIES_EXISTENCE_ON_DATANODE,
+ targetNode,
+ tsStatus);
}
countDownLatch.countDown();
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/ConfigNodeTSStatusRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/ConfigNodeTSStatusRPCHandler.java
index a28acb3496267..0c85ec7c837b6 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/ConfigNodeTSStatusRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/ConfigNodeTSStatusRPCHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.client.CnToCnNodeRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -60,10 +61,11 @@ public void onComplete(TSStatus response) {
if (response.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
// Remove only if success
nodeLocationMap.remove(requestId);
- LOGGER.info("Successfully {} on ConfigNode: {}", requestType, formattedTargetLocation);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_ON_CONFIGNODE, requestType, formattedTargetLocation);
} else {
LOGGER.error(
- "Failed to {} on ConfigNode: {}, response: {}",
+ ConfigNodeMessages.FAILED_TO_ON_CONFIGNODE_RESPONSE,
requestType,
formattedTargetLocation,
response);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CountPathsUsingTemplateRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CountPathsUsingTemplateRPCHandler.java
index b27c74bb41d8c..45574e131e70a 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CountPathsUsingTemplateRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/CountPathsUsingTemplateRPCHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TCountPathsUsingTemplateResp;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -54,12 +55,19 @@ public void onComplete(TCountPathsUsingTemplateResp response) {
responseMap.put(requestId, response);
if (tsStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
nodeLocationMap.remove(requestId);
- LOGGER.info("Successfully count paths using template on DataNode: {}", targetNode);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_COUNT_PATHS_USING_TEMPLATE_ON_DATANODE, targetNode);
} else if (tsStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) {
nodeLocationMap.remove(requestId);
- LOGGER.error("Failed to count paths using template on DataNode {}, {}", targetNode, tsStatus);
+ LOGGER.error(
+ ConfigNodeMessages.FAILED_TO_COUNT_PATHS_USING_TEMPLATE_ON_DATANODE,
+ targetNode,
+ tsStatus);
} else {
- LOGGER.error("Failed to count paths using template on DataNode {}, {}", targetNode, tsStatus);
+ LOGGER.error(
+ ConfigNodeMessages.FAILED_TO_COUNT_PATHS_USING_TEMPLATE_ON_DATANODE,
+ targetNode,
+ tsStatus);
}
countDownLatch.countDown();
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeTSStatusRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeTSStatusRPCHandler.java
index 7c93f363dd4b8..0398bdcda79b5 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeTSStatusRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/DataNodeTSStatusRPCHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -54,10 +55,11 @@ public void onComplete(TSStatus response) {
if (response.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
// Remove only if success
nodeLocationMap.remove(requestId);
- LOGGER.info("Successfully {} on DataNode: {}", requestType, formattedTargetLocation);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_ON_DATANODE, requestType, formattedTargetLocation);
} else {
LOGGER.error(
- "Failed to {} on DataNode: {}, response: {}",
+ ConfigNodeMessages.FAILED_TO_ON_DATANODE_RESPONSE,
requestType,
formattedTargetLocation,
response);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/FetchSchemaBlackListRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/FetchSchemaBlackListRPCHandler.java
index 693017ec02d6a..44f8362585cf9 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/FetchSchemaBlackListRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/FetchSchemaBlackListRPCHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TFetchSchemaBlackListResp;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -54,14 +55,19 @@ public void onComplete(TFetchSchemaBlackListResp tFetchSchemaBlackListResp) {
responseMap.put(requestId, tFetchSchemaBlackListResp);
if (tsStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
nodeLocationMap.remove(requestId);
- LOGGER.info("Successfully fetch schemaengine black list on DataNode: {}", targetNode);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_FETCH_SCHEMAENGINE_BLACK_LIST_ON_DATANODE, targetNode);
} else if (tsStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) {
nodeLocationMap.remove(requestId);
LOGGER.error(
- "Failed to fetch schemaengine black list on DataNode {}, {}", targetNode, tsStatus);
+ ConfigNodeMessages.FAILED_TO_FETCH_SCHEMAENGINE_BLACK_LIST_ON_DATANODE,
+ targetNode,
+ tsStatus);
} else {
LOGGER.error(
- "Failed to fetch schemaengine black list on DataNode {}, {}", targetNode, tsStatus);
+ ConfigNodeMessages.FAILED_TO_FETCH_SCHEMAENGINE_BLACK_LIST_ON_DATANODE,
+ targetNode,
+ tsStatus);
}
countDownLatch.countDown();
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/GetBuiltInExternalServiceRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/GetBuiltInExternalServiceRPCHandler.java
index 38b34fe6d4938..f8b61529d5703 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/GetBuiltInExternalServiceRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/GetBuiltInExternalServiceRPCHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.common.rpc.thrift.TExternalServiceListResp;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.rpc.TSStatusCode;
import org.slf4j.Logger;
@@ -52,7 +53,7 @@ public void onComplete(TExternalServiceListResp response) {
responseMap.put(requestId, response);
} else {
LOGGER.error(
- "Failed to {} on DataNode: {}, response: {}",
+ ConfigNodeMessages.FAILED_TO_ON_DATANODE_RESPONSE,
requestType,
formattedTargetLocation,
response);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipeHeartbeatRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipeHeartbeatRPCHandler.java
index ea9b68e7dc0b0..94d40ab6dfc74 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipeHeartbeatRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipeHeartbeatRPCHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -48,7 +49,7 @@ public void onComplete(TPipeHeartbeatResp response) {
// Put response
responseMap.put(requestId, response);
nodeLocationMap.remove(requestId);
- LOGGER.debug("Successfully {} on DataNode: {}", requestType, formattedTargetLocation);
+ LOGGER.debug(ConfigNodeMessages.SUCCESSFULLY_ON_DATANODE, requestType, formattedTargetLocation);
// Always CountDown
countDownLatch.countDown();
@@ -57,7 +58,7 @@ public void onComplete(TPipeHeartbeatResp response) {
@Override
public void onError(Exception e) {
LOGGER.error(
- "Failed to {} on DataNode: {}, exception: {}",
+ ConfigNodeMessages.FAILED_TO_ON_DATANODE_EXCEPTION,
requestType,
formattedTargetLocation,
e.getMessage());
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipePushMetaRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipePushMetaRPCHandler.java
index 0928c76cdc908..df5e2d8bddcc0 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipePushMetaRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/PipePushMetaRPCHandler.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TPushPipeMetaResp;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -50,10 +51,11 @@ public void onComplete(TPushPipeMetaResp response) {
responseMap.put(requestId, response);
if (response.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.debug("Successfully {} on DataNode: {}", requestType, formattedTargetLocation);
+ LOGGER.debug(
+ ConfigNodeMessages.SUCCESSFULLY_ON_DATANODE, requestType, formattedTargetLocation);
} else {
LOGGER.error(
- "Failed to {} on DataNode: {}, response: {}",
+ ConfigNodeMessages.FAILED_TO_ON_DATANODE_RESPONSE,
requestType,
formattedTargetLocation,
response);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SchemaUpdateRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SchemaUpdateRPCHandler.java
index dc2796a232e28..84d848a484ec5 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SchemaUpdateRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/SchemaUpdateRPCHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -48,17 +49,18 @@ public SchemaUpdateRPCHandler(
@Override
public void onComplete(TSStatus tsStatus) {
responseMap.put(requestId, tsStatus);
- LOGGER.info("{} for {} receives: {}", requestType, requestId, tsStatus);
+ LOGGER.info(ConfigNodeMessages.FOR_RECEIVES, requestType, requestId, tsStatus);
if (tsStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
nodeLocationMap.remove(requestId);
- LOGGER.info("Successfully {} on DataNode: {}", requestType, formattedTargetLocation);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_ON_DATANODE, requestType, formattedTargetLocation);
} else if (tsStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) {
nodeLocationMap.remove(requestId);
LOGGER.warn(
- "Failed to {} on DataNode {}, {}", requestType, formattedTargetLocation, tsStatus);
+ ConfigNodeMessages.FAILED_TO_ON_DATANODE, requestType, formattedTargetLocation, tsStatus);
} else {
LOGGER.warn(
- "Failed to {} on DataNode {}, {}", requestType, formattedTargetLocation, tsStatus);
+ ConfigNodeMessages.FAILED_TO_ON_DATANODE, requestType, formattedTargetLocation, tsStatus);
}
countDownLatch.countDown();
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TransferLeaderRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TransferLeaderRPCHandler.java
index 8bfe0eb4755d3..770bc919eb67a 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TransferLeaderRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TransferLeaderRPCHandler.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TRegionLeaderChangeResp;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -54,10 +55,11 @@ public void onComplete(TRegionLeaderChangeResp response) {
if (response.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
// Remove only if success
nodeLocationMap.remove(requestId);
- LOGGER.info("Successfully {} on DataNode: {}", requestType, formattedTargetLocation);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_ON_DATANODE, requestType, formattedTargetLocation);
} else {
LOGGER.error(
- "Failed to {} on DataNode: {}, response: {}",
+ ConfigNodeMessages.FAILED_TO_ON_DATANODE_RESPONSE,
requestType,
formattedTargetLocation,
response);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TreeDeviceViewFieldDetectionHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TreeDeviceViewFieldDetectionHandler.java
index a0f908e94d45c..71bc289ccb4de 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TreeDeviceViewFieldDetectionHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/TreeDeviceViewFieldDetectionHandler.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TDeviceViewResp;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -60,10 +61,11 @@ public void onComplete(final TDeviceViewResp response) {
responseMap.put(requestId, response);
if (response.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.info("Successfully {} on DataNode: {}", requestType, formattedTargetLocation);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_ON_DATANODE, requestType, formattedTargetLocation);
} else {
LOGGER.error(
- "Failed to {} on DataNode: {}, response: {}",
+ ConfigNodeMessages.FAILED_TO_ON_DATANODE_RESPONSE,
requestType,
formattedTargetLocation,
response);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/CheckSchemaRegionUsingTemplateRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/CheckSchemaRegionUsingTemplateRPCHandler.java
index 249e8b5176799..6d3d1bf500746 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/CheckSchemaRegionUsingTemplateRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/CheckSchemaRegionUsingTemplateRPCHandler.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
import org.apache.iotdb.confignode.client.async.handlers.rpc.DataNodeAsyncRequestRPCHandler;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TCheckSchemaRegionUsingTemplateResp;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -55,14 +56,20 @@ public void onComplete(TCheckSchemaRegionUsingTemplateResp response) {
responseMap.put(requestId, response);
if (tsStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
nodeLocationMap.remove(requestId);
- LOGGER.info("Successfully check schema region using template on DataNode: {}", targetNode);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_CHECK_SCHEMA_REGION_USING_TEMPLATE_ON_DATANODE,
+ targetNode);
} else if (tsStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) {
nodeLocationMap.remove(requestId);
LOGGER.error(
- "Failed to check schema region using template on DataNode {}, {}", targetNode, tsStatus);
+ ConfigNodeMessages.FAILED_TO_CHECK_SCHEMA_REGION_USING_TEMPLATE_ON_DATANODE,
+ targetNode,
+ tsStatus);
} else {
LOGGER.error(
- "Failed to check schema region using template on DataNode {}, {}", targetNode, tsStatus);
+ ConfigNodeMessages.FAILED_TO_CHECK_SCHEMA_REGION_USING_TEMPLATE_ON_DATANODE,
+ targetNode,
+ tsStatus);
}
countDownLatch.countDown();
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/ConsumerGroupPushMetaRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/ConsumerGroupPushMetaRPCHandler.java
index 2938d4f85b7cd..0bf1f0561ff7b 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/ConsumerGroupPushMetaRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/ConsumerGroupPushMetaRPCHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
import org.apache.iotdb.confignode.client.async.handlers.rpc.DataNodeAsyncRequestRPCHandler;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TPushConsumerGroupMetaResp;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -53,10 +54,11 @@ public void onComplete(TPushConsumerGroupMetaResp response) {
responseMap.put(requestId, response);
if (response.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.info("Successfully {} on DataNode: {}", requestType, formattedTargetLocation);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_ON_DATANODE, requestType, formattedTargetLocation);
} else {
LOGGER.error(
- "Failed to {} on DataNode: {}, response: {}",
+ ConfigNodeMessages.FAILED_TO_ON_DATANODE_RESPONSE,
requestType,
formattedTargetLocation,
response);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/TopicPushMetaRPCHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/TopicPushMetaRPCHandler.java
index 91ffdd7232b3f..8b8d59b9e7e79 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/TopicPushMetaRPCHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/async/handlers/rpc/subscription/TopicPushMetaRPCHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
import org.apache.iotdb.confignode.client.async.handlers.rpc.DataNodeAsyncRequestRPCHandler;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TPushTopicMetaResp;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -52,10 +53,11 @@ public void onComplete(TPushTopicMetaResp response) {
responseMap.put(requestId, response);
if (response.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.info("Successfully {} on DataNode: {}", requestType, formattedTargetLocation);
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_ON_DATANODE, requestType, formattedTargetLocation);
} else {
LOGGER.error(
- "Failed to {} on DataNode: {}, response: {}",
+ ConfigNodeMessages.FAILED_TO_ON_DATANODE_RESPONSE,
requestType,
formattedTargetLocation,
response);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncAINodeClientPool.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncAINodeClientPool.java
index 5ef9df1c99735..ed3fa9275c88b 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncAINodeClientPool.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncAINodeClientPool.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.client.IClientManager;
import org.apache.iotdb.commons.client.sync.SyncAINodeClient;
import org.apache.iotdb.commons.exception.UncheckedStartupException;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.rpc.TSStatusCode;
import com.google.common.collect.ImmutableMap;
@@ -74,7 +75,8 @@ private void checkActionMapCompleteness() {
.collect(Collectors.toList());
if (!lackList.isEmpty()) {
throw new UncheckedStartupException(
- String.format("These request types should be added to actionMap: %s", lackList));
+ String.format(
+ ConfigNodeMessages.THESE_REQUEST_TYPES_SHOULD_BE_ADDED_TO_ACTIONMAP, lackList));
}
}
@@ -87,12 +89,13 @@ public Object sendSyncRequestToAINodeWithRetry(
} catch (Exception e) {
lastException = e;
if (retry != DEFAULT_RETRY_NUM - 1) {
- LOGGER.warn("{} failed on AINode {}, retrying {}...", requestType, endPoint, retry + 1);
+ LOGGER.warn(
+ ConfigNodeMessages.FAILED_ON_AINODE_RETRYING, requestType, endPoint, retry + 1);
doRetryWait(retry);
}
}
}
- LOGGER.error("{} failed on AINode {}", requestType, endPoint, lastException);
+ LOGGER.error(ConfigNodeMessages.FAILED_ON_AINODE, requestType, endPoint, lastException);
return new TSStatus(TSStatusCode.INTERNAL_REQUEST_RETRY_ERROR.getStatusCode())
.setMessage("All retry failed due to: " + lastException.getMessage());
}
@@ -106,12 +109,13 @@ public Object sendSyncRequestToAINodeWithGivenRetry(
} catch (Exception e) {
lastException = e;
if (retry != retryNum - 1) {
- LOGGER.warn("{} failed on AINode {}, retrying {}...", requestType, endPoint, retry + 1);
+ LOGGER.warn(
+ ConfigNodeMessages.FAILED_ON_AINODE_RETRYING, requestType, endPoint, retry + 1);
doRetryWait(retry);
}
}
}
- LOGGER.error("{} failed on AINode {}", requestType, endPoint, lastException);
+ LOGGER.error(ConfigNodeMessages.FAILED_ON_AINODE, requestType, endPoint, lastException);
return new TSStatus(TSStatusCode.INTERNAL_REQUEST_RETRY_ERROR.getStatusCode())
.setMessage("All retry failed due to: " + lastException.getMessage());
}
@@ -131,7 +135,7 @@ private void doRetryWait(int retryNum) {
TimeUnit.MILLISECONDS.sleep(3200L);
}
} catch (InterruptedException e) {
- LOGGER.warn("Retry wait failed.", e);
+ LOGGER.warn(ConfigNodeMessages.RETRY_WAIT_FAILED, e);
Thread.currentThread().interrupt();
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncConfigNodeClientPool.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncConfigNodeClientPool.java
index 96c9f77552633..f87b686855e72 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncConfigNodeClientPool.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncConfigNodeClientPool.java
@@ -29,6 +29,7 @@
import org.apache.iotdb.commons.client.exception.ClientManagerException;
import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient;
import org.apache.iotdb.confignode.client.CnToCnNodeRequestType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.rpc.thrift.TAddConsensusGroupReq;
import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq;
import org.apache.iotdb.rpc.RpcUtils;
@@ -107,7 +108,7 @@ public Object sendSyncRequestToConfigNodeWithRetry(
} catch (Exception e) {
lastException = e;
LOGGER.warn(
- "{} failed on ConfigNode {}, because {}, retrying {}...",
+ ConfigNodeMessages.FAILED_ON_CONFIGNODE_BECAUSE_RETRYING,
requestType,
endPoint,
e.getMessage(),
@@ -115,7 +116,7 @@ public Object sendSyncRequestToConfigNodeWithRetry(
doRetryWait(retry);
}
}
- LOGGER.error("{} failed on ConfigNode {}", requestType, endPoint, lastException);
+ LOGGER.error(ConfigNodeMessages.FAILED_ON_CONFIGNODE, requestType, endPoint, lastException);
return RpcUtils.getStatus(
TSStatusCode.INTERNAL_REQUEST_RETRY_ERROR,
"All retry failed due to: " + lastException.getMessage());
@@ -149,7 +150,7 @@ private void doRetryWait(int retryNum) {
try {
TimeUnit.MILLISECONDS.sleep(100L * (long) Math.pow(2, retryNum));
} catch (InterruptedException e) {
- LOGGER.error("Retry wait failed.", e);
+ LOGGER.error(ConfigNodeMessages.RETRY_WAIT_FAILED, e);
Thread.currentThread().interrupt();
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java
index b9cf775459c2e..a0dfd6c27e81b 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.commons.client.exception.ClientManagerException;
import org.apache.iotdb.commons.client.sync.SyncDataNodeInternalServiceClient;
import org.apache.iotdb.commons.exception.UncheckedStartupException;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.mpp.rpc.thrift.TCleanDataNodeCacheReq;
import org.apache.iotdb.mpp.rpc.thrift.TCreateDataRegionReq;
import org.apache.iotdb.mpp.rpc.thrift.TCreatePeerReq;
@@ -160,7 +161,8 @@ private void checkActionMapCompleteness() {
.collect(Collectors.toList());
if (!lackList.isEmpty()) {
throw new UncheckedStartupException(
- String.format("These request types should be added to actionMap: %s", lackList));
+ String.format(
+ ConfigNodeMessages.THESE_REQUEST_TYPES_SHOULD_BE_ADDED_TO_ACTIONMAP, lackList));
}
}
@@ -173,12 +175,13 @@ public Object sendSyncRequestToDataNodeWithRetry(
} catch (Exception e) {
lastException = e;
if (retry != DEFAULT_RETRY_NUM - 1) {
- LOGGER.warn("{} failed on DataNode {}, retrying {}...", requestType, endPoint, retry + 1);
+ LOGGER.warn(
+ ConfigNodeMessages.FAILED_ON_DATANODE_RETRYING, requestType, endPoint, retry + 1);
doRetryWait(retry);
}
}
}
- LOGGER.error("{} failed on DataNode {}", requestType, endPoint, lastException);
+ LOGGER.error(ConfigNodeMessages.FAILED_ON_DATANODE, requestType, endPoint, lastException);
return new TSStatus(TSStatusCode.INTERNAL_REQUEST_RETRY_ERROR.getStatusCode())
.setMessage("All retry failed due to: " + lastException.getMessage());
}
@@ -192,12 +195,13 @@ public Object sendSyncRequestToDataNodeWithGivenRetry(
} catch (Exception e) {
lastException = e;
if (retry != retryNum - 1) {
- LOGGER.warn("{} failed on DataNode {}, retrying {}...", requestType, endPoint, retry + 1);
+ LOGGER.warn(
+ ConfigNodeMessages.FAILED_ON_DATANODE_RETRYING, requestType, endPoint, retry + 1);
doRetryWait(retry);
}
}
}
- LOGGER.error("{} failed on DataNode {}", requestType, endPoint, lastException);
+ LOGGER.error(ConfigNodeMessages.FAILED_ON_DATANODE, requestType, endPoint, lastException);
return new TSStatus(TSStatusCode.INTERNAL_REQUEST_RETRY_ERROR.getStatusCode())
.setMessage("All retry failed due to: " + lastException.getMessage());
}
@@ -218,7 +222,7 @@ private void doRetryWait(int retryNum) {
TimeUnit.MILLISECONDS.sleep(3200L);
}
} catch (InterruptedException e) {
- LOGGER.warn("Retry wait failed.", e);
+ LOGGER.warn(ConfigNodeMessages.RETRY_WAIT_FAILED, e);
Thread.currentThread().interrupt();
}
}
@@ -234,17 +238,17 @@ private void doRetryWait(int retryNum) {
*/
public TRegionLeaderChangeResp changeRegionLeader(
TConsensusGroupId regionId, TEndPoint dataNode, TDataNodeLocation newLeaderNode) {
- LOGGER.info("Send RPC to data node: {} for changing regions leader on it", dataNode);
+ LOGGER.info(ConfigNodeMessages.SEND_RPC_TO_DATA_NODE_FOR_CHANGING_REGIONS_LEADER_ON, dataNode);
TSStatus status;
try (SyncDataNodeInternalServiceClient client = clientManager.borrowClient(dataNode)) {
TRegionLeaderChangeReq req = new TRegionLeaderChangeReq(regionId, newLeaderNode);
return client.changeRegionLeader(req);
} catch (ClientManagerException e) {
- LOGGER.error("Can't connect to Data node: {}", dataNode, e);
+ LOGGER.error(ConfigNodeMessages.CAN_T_CONNECT_TO_DATA_NODE, dataNode, e);
status = new TSStatus(TSStatusCode.CAN_NOT_CONNECT_DATANODE.getStatusCode());
status.setMessage(e.getMessage());
} catch (TException e) {
- LOGGER.error("Change regions leader error on Date node: {}", dataNode, e);
+ LOGGER.error(ConfigNodeMessages.CHANGE_REGIONS_LEADER_ERROR_ON_DATE_NODE, dataNode, e);
status = new TSStatus(TSStatusCode.REGION_LEADER_CHANGE_ERROR.getStatusCode());
status.setMessage(e.getMessage());
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
index 64167ba1d2e0f..d1e831dfbd62d 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.client.property.ClientPoolProperty.DefaultProperty;
import org.apache.iotdb.commons.conf.CommonDescriptor;
import org.apache.iotdb.commons.conf.IoTDBConstant;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.load.balancer.RegionBalancer;
import org.apache.iotdb.confignode.manager.load.balancer.router.leader.AbstractLeaderBalancer;
import org.apache.iotdb.confignode.manager.load.balancer.router.priority.IPriorityBalancer;
@@ -1231,7 +1232,7 @@ public String getConfigMessage() {
.append(configContent)
.append(";");
} catch (Exception e) {
- LOGGER.warn("Failed to get field {}", configField, e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_TO_GET_FIELD, configField, e);
}
}
return configMessage.toString();
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
index fa35565ff5101..c2c1deec5c7f9 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.exception.BadNodeUrlException;
import org.apache.iotdb.commons.schema.SchemaConstant;
import org.apache.iotdb.commons.utils.NodeUrlUtils;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.load.balancer.RegionBalancer;
import org.apache.iotdb.confignode.manager.load.balancer.router.leader.AbstractLeaderBalancer;
import org.apache.iotdb.confignode.manager.load.balancer.router.priority.IPriorityBalancer;
@@ -67,7 +68,7 @@ public class ConfigNodeDescriptor {
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
- LOGGER.error("Failed to update config file", e);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_UPDATE_CONFIG_FILE, e);
}
}
@@ -113,7 +114,7 @@ else if (!urlString.endsWith(".properties")) {
try {
return new URL(urlString);
} catch (MalformedURLException e) {
- LOGGER.warn("get url failed", e);
+ LOGGER.warn(ConfigNodeMessages.GET_URL_FAILED, e);
return null;
}
}
@@ -123,11 +124,12 @@ private void loadProps() {
URL url = getPropsUrl(CommonConfig.SYSTEM_CONFIG_NAME);
if (url != null) {
try (InputStream inputStream = url.openStream()) {
- LOGGER.info("start reading ConfigNode conf file: {}", url);
+ LOGGER.info(ConfigNodeMessages.START_READING_CONFIGNODE_CONF_FILE, url);
trimProperties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
loadProperties(trimProperties);
} catch (IOException | BadNodeUrlException e) {
- LOGGER.error("Couldn't load ConfigNode conf file, reject ConfigNode startup.", e);
+ LOGGER.error(
+ ConfigNodeMessages.COULDN_T_LOAD_CONFIGNODE_CONF_FILE_REJECT_CONFIGNODE_STARTUP, e);
System.exit(-1);
} finally {
conf.updatePath();
@@ -141,7 +143,7 @@ private void loadProps() {
}
} else {
LOGGER.warn(
- "Couldn't load the configuration {} from any of the known sources.",
+ ConfigNodeMessages.COULDN_T_LOAD_THE_CONFIGURATION_FROM_ANY_OF_THE_KNOWN,
CommonConfig.SYSTEM_CONFIG_NAME);
}
}
@@ -166,10 +168,7 @@ private void loadProperties(TrimProperties properties) throws BadNodeUrlExceptio
String seedConfigNode = properties.getProperty(IoTDBConstant.CN_SEED_CONFIG_NODE, null);
if (seedConfigNode == null) {
seedConfigNode = properties.getProperty(IoTDBConstant.CN_TARGET_CONFIG_NODE_LIST, null);
- LOGGER.warn(
- "The parameter cn_target_config_node_list has been abandoned, "
- + "only the first ConfigNode address will be used to join in the cluster. "
- + "Please use cn_seed_config_node instead.");
+ LOGGER.warn(ConfigNodeMessages.THE_PARAMETER_CN_TARGET_CONFIG_NODE_LIST_HAS_BEEN_ABANDONED);
}
if (seedConfigNode != null) {
conf.setSeedConfigNode(NodeUrlUtils.parseTEndPointUrls(seedConfigNode).get(0));
@@ -317,9 +316,7 @@ private void loadProperties(TrimProperties properties) throws BadNodeUrlExceptio
conf.setFailureDetector(failureDetector);
} else {
throw new IOException(
- String.format(
- "Unknown failure_detector: %s, " + "please set to \"fixed\" or \"phi_accrual\"",
- failureDetector));
+ String.format(ConfigNodeMessages.UNKNOWN_FAILURE_DETECTOR, failureDetector));
}
conf.setFailureDetectorFixedThresholdInMs(
@@ -349,9 +346,7 @@ private void loadProperties(TrimProperties properties) throws BadNodeUrlExceptio
} else {
throw new IOException(
String.format(
- "Unknown leader_distribution_policy: %s, "
- + "please set to \"GREEDY\" or \"CFD\" or \"HASH\"",
- leaderDistributionPolicy));
+ ConfigNodeMessages.UNKNOWN_LEADER_DISTRIBUTION_POLICY, leaderDistributionPolicy));
}
conf.setEnableAutoLeaderBalanceForRatisConsensus(
@@ -374,8 +369,7 @@ private void loadProperties(TrimProperties properties) throws BadNodeUrlExceptio
} else {
throw new IOException(
String.format(
- "Unknown route_priority_policy: %s, please set to \"LEADER\" or \"GREEDY\"",
- routePriorityPolicy));
+ ConfigNodeMessages.UNKNOWN_ROUTE_PRIORITY_POLICY_PLEASE_SET_TO, routePriorityPolicy));
}
String readConsistencyLevel =
@@ -385,7 +379,7 @@ private void loadProperties(TrimProperties properties) throws BadNodeUrlExceptio
} else {
throw new IOException(
String.format(
- "Unknown read_consistency_level: %s, please set to \"strong\" or \"weak\"",
+ ConfigNodeMessages.UNKNOWN_READ_CONSISTENCY_LEVEL_PLEASE_SET_TO,
readConsistencyLevel));
}
@@ -741,8 +735,7 @@ private void loadCQConfig(TrimProperties properties) {
"continuous_query_submit_thread_count", String.valueOf(conf.getCqSubmitThread())));
if (cqSubmitThread <= 0) {
LOGGER.warn(
- "continuous_query_submit_thread should be greater than 0, "
- + "but current value is {}, ignore that and use the default value {}",
+ ConfigNodeMessages.CONTINUOUS_QUERY_SUBMIT_THREAD_SHOULD_BE_GREATER_THAN_0,
cqSubmitThread,
conf.getCqSubmitThread());
cqSubmitThread = conf.getCqSubmitThread();
@@ -756,8 +749,7 @@ private void loadCQConfig(TrimProperties properties) {
String.valueOf(conf.getCqMinEveryIntervalInMs())));
if (cqMinEveryIntervalInMs <= 0) {
LOGGER.warn(
- "continuous_query_min_every_interval_in_ms should be greater than 0, "
- + "but current value is {}, ignore that and use the default value {}",
+ ConfigNodeMessages.CONTINUOUS_QUERY_MIN_EVERY_INTERVAL_IN_MS_SHOULD_BE_GREATER,
cqMinEveryIntervalInMs,
conf.getCqMinEveryIntervalInMs());
cqMinEveryIntervalInMs = conf.getCqMinEveryIntervalInMs();
@@ -782,7 +774,10 @@ public boolean isSeedConfigNode() {
Collections.singletonList(conf.getSeedConfigNode().getIp()))))
&& conf.getInternalPort() == conf.getSeedConfigNode().getPort();
} catch (UnknownHostException e) {
- LOGGER.warn("Unknown host when checking seed configNode IP {}", conf.getInternalAddress(), e);
+ LOGGER.warn(
+ ConfigNodeMessages.UNKNOWN_HOST_WHEN_CHECKING_SEED_CONFIGNODE_IP,
+ conf.getInternalAddress(),
+ e);
return false;
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java
index 70cb453c80d53..d3f6f15f1be29 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.service.StartupChecks;
import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager;
import org.apache.iotdb.confignode.client.sync.SyncDataNodeClientPool;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.load.balancer.router.leader.AbstractLeaderBalancer;
import org.apache.iotdb.confignode.manager.load.balancer.router.priority.IPriorityBalancer;
import org.apache.iotdb.consensus.ConsensusFactory;
@@ -62,9 +63,9 @@ protected void portCheck() throws StartupException {
portSet.add(CONF.getConsensusPort());
portSet.add(CONF.getInternalPort());
if (portSet.size() != CONFIGNODE_PORTS) {
- throw new StartupException("ports used in configNode have repeat.");
+ throw new StartupException(ConfigNodeMessages.PORTS_USED_IN_CONFIGNODE_HAVE_REPEAT);
} else {
- LOGGER.info("configNode port check successful.");
+ LOGGER.info(ConfigNodeMessages.CONFIGNODE_PORT_CHECK_SUCCESSFUL);
}
}
@@ -104,10 +105,12 @@ private void checkGlobalConfig() throws ConfigurationException {
// The replication factor should be positive
if (CONF.getSchemaReplicationFactor() <= 0) {
- throw new ConfigurationException("The schema_replication_factor should be positive");
+ throw new ConfigurationException(
+ ConfigNodeMessages.THE_SCHEMA_REPLICATION_FACTOR_SHOULD_BE_POSITIVE);
}
if (CONF.getDataReplicationFactor() <= 0) {
- throw new ConfigurationException("The data_replication_factor should be positive");
+ throw new ConfigurationException(
+ ConfigNodeMessages.THE_DATA_REPLICATION_FACTOR_SHOULD_BE_POSITIVE);
}
// When the schema_replication_factor is greater than 1
@@ -115,7 +118,7 @@ private void checkGlobalConfig() throws ConfigurationException {
if (CONF.getSchemaReplicationFactor() > 1
&& ConsensusFactory.SIMPLE_CONSENSUS.equals(CONF.getSchemaRegionConsensusProtocolClass())) {
throw new ConfigurationException(
- "schema_region_consensus_protocol_class",
+ ConfigNodeMessages.SCHEMA_REGION_CONSENSUS_PROTOCOL_CLASS,
CONF.getSchemaRegionConsensusProtocolClass(),
ConsensusFactory.RATIS_CONSENSUS,
ConsensusFactory.SIMPLE_CONSENSUS
@@ -127,7 +130,7 @@ private void checkGlobalConfig() throws ConfigurationException {
if (CONF.getDataReplicationFactor() > 1
&& ConsensusFactory.SIMPLE_CONSENSUS.equals(CONF.getDataRegionConsensusProtocolClass())) {
throw new ConfigurationException(
- "data_region_consensus_protocol_class",
+ ConfigNodeMessages.DATA_REGION_CONSENSUS_PROTOCOL_CLASS,
CONF.getDataRegionConsensusProtocolClass(),
ConsensusFactory.IOT_CONSENSUS + "or" + ConsensusFactory.RATIS_CONSENSUS,
ConsensusFactory.SIMPLE_CONSENSUS
@@ -138,7 +141,7 @@ private void checkGlobalConfig() throws ConfigurationException {
// we should report an error
if (CONF.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.IOT_CONSENSUS)) {
throw new ConfigurationException(
- "schema_region_consensus_protocol_class",
+ ConfigNodeMessages.SCHEMA_REGION_CONSENSUS_PROTOCOL_CLASS,
String.valueOf(CONF.getSchemaRegionConsensusProtocolClass()),
String.format(
"%s or %s", ConsensusFactory.SIMPLE_CONSENSUS, ConsensusFactory.RATIS_CONSENSUS),
@@ -149,7 +152,7 @@ private void checkGlobalConfig() throws ConfigurationException {
// we should report an error
if (CONF.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.IOT_CONSENSUS_V2)) {
throw new ConfigurationException(
- "schema_region_consensus_protocol_class",
+ ConfigNodeMessages.SCHEMA_REGION_CONSENSUS_PROTOCOL_CLASS,
String.valueOf(CONF.getSchemaRegionConsensusProtocolClass()),
String.format(
"%s or %s", ConsensusFactory.SIMPLE_CONSENSUS, ConsensusFactory.RATIS_CONSENSUS),
@@ -161,7 +164,7 @@ private void checkGlobalConfig() throws ConfigurationException {
&& !AbstractLeaderBalancer.CFD_POLICY.equals(CONF.getLeaderDistributionPolicy())
&& !AbstractLeaderBalancer.HASH_POLICY.equals(CONF.getLeaderDistributionPolicy())) {
throw new ConfigurationException(
- "leader_distribution_policy",
+ ConfigNodeMessages.LEADER_DISTRIBUTION_POLICY,
CONF.getRoutePriorityPolicy(),
"GREEDY or MIN_COST_FLOW or HASH",
"an unrecognized leader_distribution_policy is set");
@@ -171,7 +174,7 @@ private void checkGlobalConfig() throws ConfigurationException {
if (!CONF.getRoutePriorityPolicy().equals(IPriorityBalancer.LEADER_POLICY)
&& !CONF.getRoutePriorityPolicy().equals(IPriorityBalancer.GREEDY_POLICY)) {
throw new ConfigurationException(
- "route_priority_policy",
+ ConfigNodeMessages.ROUTE_PRIORITY_POLICY,
CONF.getRoutePriorityPolicy(),
"LEADER or GREEDY",
"an unrecognized route_priority_policy is set");
@@ -179,20 +182,24 @@ private void checkGlobalConfig() throws ConfigurationException {
// The default RegionGroupNum should be positive
if (CONF.getDefaultSchemaRegionGroupNumPerDatabase() <= 0) {
- throw new ConfigurationException("The default_schema_region_group_num should be positive");
+ throw new ConfigurationException(
+ ConfigNodeMessages.THE_DEFAULT_SCHEMA_REGION_GROUP_NUM_SHOULD_BE_POSITIVE);
}
if (CONF.getDefaultDataRegionGroupNumPerDatabase() <= 0) {
- throw new ConfigurationException("The default_data_region_group_num should be positive");
+ throw new ConfigurationException(
+ ConfigNodeMessages.THE_DEFAULT_DATA_REGION_GROUP_NUM_SHOULD_BE_POSITIVE);
}
// Check time partition origin
if (COMMON_CONFIG.getTimePartitionOrigin() < 0) {
- throw new ConfigurationException("The time_partition_origin should be non-negative");
+ throw new ConfigurationException(
+ ConfigNodeMessages.THE_TIME_PARTITION_ORIGIN_SHOULD_BE_NON_NEGATIVE);
}
// Check time partition interval
if (COMMON_CONFIG.getTimePartitionInterval() <= 0) {
- throw new ConfigurationException("The time_partition_interval should be positive");
+ throw new ConfigurationException(
+ ConfigNodeMessages.THE_TIME_PARTITION_INTERVAL_SHOULD_BE_POSITIVE);
}
// Check timestamp precision
@@ -200,7 +207,8 @@ private void checkGlobalConfig() throws ConfigurationException {
if (!("ms".equals(timestampPrecision)
|| "us".equals(timestampPrecision)
|| "ns".equals(timestampPrecision))) {
- throw new ConfigurationException("The timestamp_precision should be ms, us or ns");
+ throw new ConfigurationException(
+ ConfigNodeMessages.THE_TIMESTAMP_PRECISION_SHOULD_BE_MS_US_OR_NS);
}
}
@@ -217,11 +225,11 @@ private void createDirsIfNecessary() throws IOException {
private void createDirIfEmpty(File dir) throws IOException {
if (!dir.exists()) {
if (dir.mkdirs()) {
- LOGGER.info("Make dirs: {}", dir);
+ LOGGER.info(ConfigNodeMessages.MAKE_DIRS, dir);
} else {
throw new IOException(
String.format(
- "Start ConfigNode failed, because couldn't make system dirs: %s.",
+ ConfigNodeMessages.START_CONFIGNODE_FAILED_BECAUSE_COULDN_T_MAKE_SYSTEM_DIRS,
dir.getAbsolutePath()));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/SystemPropertiesUtils.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/SystemPropertiesUtils.java
index a3ee036f6ab05..2f2cea4535f57 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/SystemPropertiesUtils.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/conf/SystemPropertiesUtils.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.exception.BadNodeUrlException;
import org.apache.iotdb.commons.file.SystemPropertiesHandler;
import org.apache.iotdb.commons.utils.NodeUrlUtils;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.consensus.ConsensusFactory;
import org.slf4j.Logger;
@@ -62,7 +63,7 @@ public class SystemPropertiesUtils {
private static final String TIME_PARTITION_INTERVAL = "time_partition_interval";
private SystemPropertiesUtils() {
- throw new IllegalStateException("Utility class: SystemPropertiesUtils.");
+ throw new IllegalStateException(ConfigNodeMessages.UTILITY_CLASS_SYSTEMPROPERTIESUTILS);
}
public static void reinitializeStatics() {
@@ -151,7 +152,7 @@ public static void checkSystemProperties() throws IOException {
systemProperties.setProperty(DATA_CONSENSUS_PROTOCOL, dataRegionConsensusProtocolClass);
needRewriteConsensusProtocol = true;
LOGGER.warn(
- "[SystemProperties] Normalize {} from {} to {} for compatibility.",
+ ConfigNodeMessages.SYSTEMPROPERTIES_NORMALIZE_FROM_TO_FOR_COMPATIBILITY,
DATA_CONSENSUS_PROTOCOL,
persistedDataRegionConsensusProtocolClass,
dataRegionConsensusProtocolClass);
@@ -272,12 +273,12 @@ public static void storeSystemParameters() throws IOException {
// Cluster configuration
systemProperties.setProperty("config_node_id", String.valueOf(conf.getConfigNodeId()));
- LOGGER.info("[SystemProperties] store config_node_id: {}", conf.getConfigNodeId());
+ LOGGER.info(ConfigNodeMessages.SYSTEMPROPERTIES_STORE_CONFIG_NODE_ID, conf.getConfigNodeId());
systemProperties.setProperty(
"is_seed_config_node",
String.valueOf(ConfigNodeDescriptor.getInstance().isSeedConfigNode()));
LOGGER.info(
- "[SystemProperties] store is_seed_config_node: {}",
+ ConfigNodeMessages.SYSTEMPROPERTIES_STORE_IS_SEED_CONFIG_NODE,
ConfigNodeDescriptor.getInstance().isSeedConfigNode());
// Startup configuration
@@ -347,7 +348,7 @@ public static int loadConfigNodeIdWhenRestarted() throws IOException {
return Integer.parseInt(systemProperties.getProperty("config_node_id", null));
} catch (NumberFormatException e) {
throw new IOException(
- "The parameter config_node_id doesn't exist in "
+ ConfigNodeMessages.THE_PARAMETER_CONFIG_NODE_ID_DOESN_T_EXIST_IN
+ "data/confignode/system/confignode-system.properties. "
+ "Please delete data dir data/confignode and restart again.",
e);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlan.java
index ed3db747c5f80..1ad547f7845f8 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlan.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigPhysicalPlan.java
@@ -142,6 +142,7 @@
import org.apache.iotdb.confignode.consensus.request.write.trigger.UpdateTriggerLocationPlan;
import org.apache.iotdb.confignode.consensus.request.write.trigger.UpdateTriggerStateInTablePlan;
import org.apache.iotdb.confignode.consensus.request.write.trigger.UpdateTriggersOnTransferNodesPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.tsfile.utils.PublicBAOS;
@@ -191,7 +192,8 @@ public static ConfigPhysicalPlan create(final ByteBuffer buffer) throws IOExcept
final ConfigPhysicalPlanType configPhysicalPlanType =
ConfigPhysicalPlanType.convertToConfigPhysicalPlanType(planType);
if (configPhysicalPlanType == null) {
- throw new IOException("Unrecognized log configPhysicalPlanType: " + planType);
+ throw new IOException(
+ ConfigNodeMessages.UNRECOGNIZED_LOG_CONFIGPHYSICALPLANTYPE + planType);
}
final ConfigPhysicalPlan plan;
@@ -617,7 +619,8 @@ public static ConfigPhysicalPlan create(final ByteBuffer buffer) throws IOExcept
plan = new DropExternalServicePlan();
break;
default:
- throw new IOException("unknown PhysicalPlan configPhysicalPlanType: " + planType);
+ throw new IOException(
+ ConfigNodeMessages.UNKNOWN_PHYSICALPLAN_CONFIGPHYSICALPLANTYPE + planType);
}
plan.deserializeImpl(buffer);
return plan;
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/OperateMultiplePipesPlanV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/OperateMultiplePipesPlanV2.java
index c9ff9f574483f..c272e15b1a363 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/OperateMultiplePipesPlanV2.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/pipe/task/OperateMultiplePipesPlanV2.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan;
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.tsfile.utils.ReadWriteIOUtils;
@@ -64,7 +65,8 @@ protected void serializeImpl(DataOutputStream stream) throws IOException {
} else if (subPlan instanceof SetPipeStatusPlanV2) {
((SetPipeStatusPlanV2) subPlan).serializeImpl(stream);
} else {
- throw new IOException("Unsupported sub plan type: " + subPlan.getClass().getName());
+ throw new IOException(
+ ConfigNodeMessages.UNSUPPORTED_SUB_PLAN_TYPE + subPlan.getClass().getName());
}
}
} else {
@@ -96,7 +98,7 @@ protected void deserializeImpl(ByteBuffer buffer) throws IOException {
setPipeStatusPlanV2.deserializeImpl(buffer);
subPlans.add(setPipeStatusPlanV2);
} else {
- throw new IOException("Unsupported sub plan type: " + type);
+ throw new IOException(ConfigNodeMessages.UNSUPPORTED_SUB_PLAN_TYPE + type);
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/ConfigRegionStateMachine.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/ConfigRegionStateMachine.java
index effb1c466bab9..c49b92c70d452 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/ConfigRegionStateMachine.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/ConfigRegionStateMachine.java
@@ -34,6 +34,7 @@
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan;
import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan;
import org.apache.iotdb.confignode.exception.physical.UnknownPhysicalPlanTypeException;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.consensus.ConsensusManager;
import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent;
@@ -125,7 +126,7 @@ protected TSStatus write(ConfigPhysicalPlan plan) {
try {
result = executor.executeNonQueryPlan(plan);
} catch (UnknownPhysicalPlanTypeException e) {
- LOGGER.error("Execute non-query plan failed", e);
+ LOGGER.error(ConfigNodeMessages.EXECUTE_NON_QUERY_PLAN_FAILED, e);
result = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
}
@@ -148,7 +149,7 @@ public IConsensusRequest deserializeRequest(IConsensusRequest request) {
result = ConfigPhysicalPlan.Factory.create(request.serializeToByteBuffer());
} catch (Exception e) {
LOGGER.error(
- "Deserialization error for write plan, request: {}, bytebuffer: {}",
+ ConfigNodeMessages.DESERIALIZATION_ERROR_FOR_WRITE_PLAN_REQUEST_BYTEBUFFER,
request,
request.serializeToByteBuffer(),
e);
@@ -158,7 +159,7 @@ public IConsensusRequest deserializeRequest(IConsensusRequest request) {
result = request;
} else {
LOGGER.error(
- "Unexpected write plan, request: {}, bytebuffer: {}",
+ ConfigNodeMessages.UNEXPECTED_WRITE_PLAN_REQUEST_BYTEBUFFER,
request,
request.serializeToByteBuffer());
return null;
@@ -172,7 +173,7 @@ public DataSet read(final IConsensusRequest request) {
if (request instanceof ConfigPhysicalReadPlan) {
plan = (ConfigPhysicalReadPlan) request;
} else {
- LOGGER.error("Unexpected read plan : {}", request);
+ LOGGER.error(ConfigNodeMessages.UNEXPECTED_READ_PLAN, request);
return null;
}
return read(plan);
@@ -184,7 +185,7 @@ protected DataSet read(final ConfigPhysicalReadPlan plan) {
try {
result = executor.executeQueryPlan(plan);
} catch (final UnknownPhysicalPlanTypeException | AuthException e) {
- LOGGER.error("Execute query plan failed", e);
+ LOGGER.error(ConfigNodeMessages.EXECUTE_QUERY_PLAN_FAILED, e);
result = null;
}
return result;
@@ -201,7 +202,8 @@ public boolean takeSnapshot(File snapshotDir) {
} catch (IOException e) {
if (PipeConfigNodeAgent.runtime().listener().isOpened()) {
LOGGER.warn(
- "Config Region Listening Queue Listen to snapshot failed, the historical data may not be transferred.",
+ ConfigNodeMessages
+ .CONFIG_REGION_LISTENING_QUEUE_LISTEN_TO_SNAPSHOT_FAILED_THE_HISTORICAL,
e);
}
}
@@ -221,7 +223,7 @@ public void loadSnapshot(final File latestSnapshotRootDir) {
} catch (final IOException e) {
if (PipeConfigNodeAgent.runtime().listener().isOpened()) {
LOGGER.warn(
- "Config Region Listening Queue Listen to snapshot failed when startup, snapshot will be tried again when starting schema transferring pipes",
+ ConfigNodeMessages.CONFIG_REGION_LISTENING_QUEUE_LISTEN_TO_SNAPSHOT_FAILED_WHEN_STARTUP,
e);
}
}
@@ -234,7 +236,7 @@ public void notifyLeaderChanged(ConsensusGroupId groupId, int newLeaderId) {
int currentNodeId = ConfigNodeDescriptor.getInstance().getConf().getConfigNodeId();
if (currentNodeId != newLeaderId) {
LOGGER.info(
- "Current node [nodeId:{}, ip:port: {}] is no longer the leader, "
+ ConfigNodeMessages.CURRENT_NODE_NODEID_IP_PORT_IS_NO_LONGER_THE_LEADER
+ "the new leader is [nodeId:{}]",
currentNodeId,
currentNodeTEndPoint,
@@ -248,7 +250,7 @@ public void notifyNotLeader() {
// couldn't initialize earlier than the ConfigRegionStateMachine
int currentNodeId = ConfigNodeDescriptor.getInstance().getConf().getConfigNodeId();
LOGGER.info(
- "Current node [nodeId:{}, ip:port: {}] is no longer the leader, "
+ ConfigNodeMessages.CURRENT_NODE_NODEID_IP_PORT_IS_NO_LONGER_THE_LEADER
+ "start cleaning up related services",
currentNodeId,
currentNodeTEndPoint);
@@ -272,7 +274,7 @@ public void notifyNotLeader() {
PipeConfigNodeAgent.receiver().cleanPipeReceiverDir();
LOGGER.info(
- "Current node [nodeId:{}, ip:port: {}] is no longer the leader, "
+ ConfigNodeMessages.CURRENT_NODE_NODEID_IP_PORT_IS_NO_LONGER_THE_LEADER
+ "all services on old leader are unavailable now.",
currentNodeId,
currentNodeTEndPoint);
@@ -281,7 +283,7 @@ public void notifyNotLeader() {
@Override
public void notifyLeaderReady() {
LOGGER.info(
- "Current node [nodeId: {}, ip:port: {}] becomes config region leader",
+ ConfigNodeMessages.CURRENT_NODE_NODEID_IP_PORT_BECOMES_CONFIG_REGION_LEADER,
ConfigNodeDescriptor.getInstance().getConf().getConfigNodeId(),
currentNodeTEndPoint);
@@ -328,7 +330,7 @@ public void notifyLeaderReady() {
threadPool.submit(() -> configManager.getClusterManager().checkClusterId());
LOGGER.info(
- "Current node [nodeId: {}, ip:port: {}] as config region leader is ready to work",
+ ConfigNodeMessages.CURRENT_NODE_NODEID_IP_PORT_AS_CONFIG_REGION_LEADER_IS,
ConfigNodeDescriptor.getInstance().getConf().getConfigNodeId(),
currentNodeTEndPoint);
}
@@ -359,14 +361,15 @@ private void writeLogForSimpleConsensus(ConfigPhysicalPlan plan) {
Files.move(
simpleLogFile.toPath(), completedFilePath.toPath(), StandardCopyOption.ATOMIC_MOVE);
} catch (IOException e) {
- LOGGER.error("Can't force logWriter for ConfigNode SimpleConsensus mode", e);
+ LOGGER.error(
+ ConfigNodeMessages.CAN_T_FORCE_LOGWRITER_FOR_CONFIGNODE_SIMPLECONSENSUS_MODE, e);
}
for (int retry = 0; retry < 5; retry++) {
try {
simpleLogWriter.close();
} catch (IOException e) {
LOGGER.warn(
- "Can't close StandAloneLog for ConfigNode SimpleConsensus mode, "
+ ConfigNodeMessages.CAN_T_CLOSE_STANDALONELOG_FOR_CONFIGNODE_SIMPLECONSENSUS_MODE
+ "filePath: {}, retry: {}",
simpleLogFile.getAbsolutePath(),
retry);
@@ -375,7 +378,8 @@ private void writeLogForSimpleConsensus(ConfigPhysicalPlan plan) {
TimeUnit.SECONDS.sleep(1);
} catch (InterruptedException e2) {
Thread.currentThread().interrupt();
- LOGGER.warn("Unexpected interruption during the close method of logWriter");
+ LOGGER.warn(
+ ConfigNodeMessages.UNEXPECTED_INTERRUPTION_DURING_THE_CLOSE_METHOD_OF_LOGWRITER);
}
continue;
}
@@ -393,7 +397,9 @@ private void writeLogForSimpleConsensus(ConfigPhysicalPlan plan) {
endIndex = endIndex + 1;
} catch (Exception e) {
LOGGER.error(
- "Can't serialize current ConfigPhysicalPlan for ConfigNode SimpleConsensus mode", e);
+ ConfigNodeMessages
+ .CAN_T_SERIALIZE_CURRENT_CONFIGPHYSICALPLAN_FOR_CONFIGNODE_SIMPLECONSENSUS_MODE,
+ e);
}
}
@@ -411,7 +417,8 @@ private void initStandAloneConfigNode() {
logReader = new SingleFileLogReader(logFile);
} catch (FileNotFoundException e) {
LOGGER.error(
- "InitStandAloneConfigNode meets error, can't find standalone log files, filePath: {}",
+ ConfigNodeMessages
+ .INITSTANDALONECONFIGNODE_MEETS_ERROR_CAN_T_FIND_STANDALONE_LOG_FILES_FILEPATH,
logFile.getAbsolutePath(),
e);
continue;
@@ -431,7 +438,7 @@ private void initStandAloneConfigNode() {
PipeConfigNodeAgent.runtime().listener().tryListenToPlan(nextPlan, false);
}
} catch (UnknownPhysicalPlanTypeException e) {
- LOGGER.error("Try listen to plan failed", e);
+ LOGGER.error(ConfigNodeMessages.TRY_LISTEN_TO_PLAN_FAILED, e);
}
}
logReader.close();
@@ -459,7 +466,8 @@ private void flushWALForSimpleConsensus() {
try {
simpleLogWriter.force();
} catch (IOException e) {
- LOGGER.error("Can't force logWriter for ConfigNode flushWALForSimpleConsensus", e);
+ LOGGER.error(
+ ConfigNodeMessages.CAN_T_FORCE_LOGWRITER_FOR_CONFIGNODE_FLUSHWALFORSIMPLECONSENSUS, e);
}
}
}
@@ -469,14 +477,16 @@ private void createLogFile(int startIndex) {
try {
if (!simpleLogFile.createNewFile()) {
LOGGER.warn(
- "ConfigNode SimpleConsensusFile has existed,filePath:{}",
+ ConfigNodeMessages.CONFIGNODE_SIMPLECONSENSUSFILE_HAS_EXISTED_FILEPATH,
simpleLogFile.getAbsolutePath());
}
simpleLogWriter = new LogWriter(simpleLogFile, false);
- LOGGER.info("Create ConfigNode SimpleConsensusFile: {}", simpleLogFile.getAbsolutePath());
+ LOGGER.info(
+ ConfigNodeMessages.CREATE_CONFIGNODE_SIMPLECONSENSUSFILE,
+ simpleLogFile.getAbsolutePath());
} catch (Exception e) {
LOGGER.warn(
- "Create ConfigNode SimpleConsensusFile failed, filePath: {}",
+ ConfigNodeMessages.CREATE_CONFIGNODE_SIMPLECONSENSUSFILE_FAILED_FILEPATH,
simpleLogFile.getAbsolutePath(),
e);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterManager.java
index 0bfc6df39e2f4..7faec232cabb8 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterManager.java
@@ -40,6 +40,7 @@
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
import org.apache.iotdb.confignode.consensus.request.write.confignode.UpdateClusterIdPlan;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.persistence.ClusterInfo;
import org.apache.iotdb.consensus.exception.ConsensusException;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -72,7 +73,7 @@ public ClusterManager(IManager configManager, ClusterInfo clusterInfo) {
public void checkClusterId() {
if (clusterInfo.getClusterId() != null) {
- LOGGER.info("clusterID: {}", clusterInfo.getClusterId());
+ LOGGER.info(ManagerMessages.CLUSTERID, clusterInfo.getClusterId());
return;
}
generateClusterId();
@@ -90,7 +91,7 @@ public String getClusterIdWithRetry(long maxWaitTime) {
Thread.sleep(100);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOGGER.warn("Unexpected interruption during waiting for get cluster id.");
+ LOGGER.warn(ManagerMessages.UNEXPECTED_INTERRUPTION_DURING_WAITING_FOR_GET_CLUSTER_ID);
break;
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java
index 327f842e966e6..53e51d36ee76a 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java
@@ -108,6 +108,7 @@
import org.apache.iotdb.confignode.consensus.response.template.TemplateSetInfoResp;
import org.apache.iotdb.confignode.consensus.response.ttl.ShowTTLResp;
import org.apache.iotdb.confignode.consensus.statemachine.ConfigRegionStateMachine;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.consensus.ConsensusManager;
import org.apache.iotdb.confignode.manager.cq.CQManager;
import org.apache.iotdb.confignode.manager.externalservice.ExternalServiceInfo;
@@ -563,7 +564,7 @@ public TSStatus reportDataNodeShutdown(TDataNodeLocation dataNodeLocation) {
dataNodeLocation.getDataNodeId(),
new NodeHeartbeatSample(NodeStatus.Unknown));
LOGGER.info(
- "The DataNode-{} will be shutdown soon, mark it as Unknown",
+ ManagerMessages.THE_DATANODE_WILL_BE_SHUTDOWN_SOON_MARK_IT_AS_UNKNOWN,
dataNodeLocation.getDataNodeId());
}
return status;
@@ -901,7 +902,7 @@ public TSchemaPartitionTableResp getSchemaPartition(
partitionManager.getSchemaPartition(getSchemaPartitionPlan);
resp = queryResult.convertToRpcSchemaPartitionTableResp();
- LOGGER.debug("GetSchemaPartition receive paths: {}, return: {}", dbSlotMap, resp);
+ LOGGER.debug(ManagerMessages.GETSCHEMAPARTITION_RECEIVE_PATHS_RETURN, dbSlotMap, resp);
return resp;
}
@@ -1001,7 +1002,8 @@ private void printNewCreatedSchemaPartition(
Map> databaseNameSlotMap, TSchemaPartitionTableResp resp) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "[GetOrCreateSchemaPartition]:{}Receive databaseNameSlotMap: {}, Return TSchemaPartitionTableResp: {}",
+ ManagerMessages
+ .GETORCREATESCHEMAPARTITION_RECEIVE_DATABASENAMESLOTMAP_RETURN_TSCHEMAPARTITIONTABLERESP,
System.lineSeparator(),
databaseNameSlotMap,
partitionTableRespToString(resp));
@@ -1019,7 +1021,8 @@ private void printNewCreatedSchemaPartition(
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "[GetOrCreateSchemaPartition]:{}Receive PathPatternTree: {}, Return TSchemaPartitionTableResp: {}",
+ ManagerMessages
+ .GETORCREATESCHEMAPARTITION_RECEIVE_PATHPATTERNTREE_RETURN_TSCHEMAPARTITIONTABLERESP,
lineSeparator,
devicePathString,
partitionTableRespToString(resp));
@@ -1106,7 +1109,7 @@ private void printNodePathsPartition(
schemaNodeManagementRespString.append(lineSeparator).append("}");
LOGGER.info(
- "[GetNodePathsPartition]:{}Received PartialPath: {}, Level: {}, PathPatternTree: {}, Resp: {}",
+ ManagerMessages.GETNODEPATHSPARTITION_RECEIVED_PARTIALPATH_LEVEL_PATHPATTERNTREE_RESP,
lineSeparator,
partialPath,
level,
@@ -1128,7 +1131,7 @@ public TDataPartitionTableResp getDataPartition(GetDataPartitionPlan getDataPart
resp = queryResult.convertToTDataPartitionTableResp();
LOGGER.debug(
- "GetDataPartition interface receive PartitionSlotsMap: {}, return: {}",
+ ManagerMessages.GETDATAPARTITION_INTERFACE_RECEIVE_PARTITIONSLOTSMAP_RETURN,
getDataPartitionPlan.getPartitionSlotsMap(),
resp);
@@ -1225,7 +1228,8 @@ private void printNewCreatedDataPartition(
dataPartitionRespString.append(lineSeparator).append("}");
LOGGER.info(
- "[GetOrCreateDataPartition]:{}Receive PartitionSlotsMap: {}, Return TDataPartitionTableResp: {}",
+ ManagerMessages
+ .GETORCREATEDATAPARTITION_RECEIVE_PARTITIONSLOTSMAP_RETURN_TDATAPARTITIONTABLERESP,
lineSeparator,
partitionSlotsMapString,
dataPartitionRespString);
@@ -1535,9 +1539,10 @@ public TSStatus createPeerForConsensusGroup(List configNode
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOGGER.warn("Unexpected interruption during retry creating peer for consensus group");
+ LOGGER.warn(
+ ManagerMessages.UNEXPECTED_INTERRUPTION_DURING_RETRY_CREATING_PEER_FOR_CONSENSUS_GROUP);
} catch (ConsensusException e) {
- LOGGER.error("Failed to create peer for consensus group", e);
+ LOGGER.error(ManagerMessages.FAILED_TO_CREATE_PEER_FOR_CONSENSUS_GROUP, e);
break;
}
}
@@ -1569,7 +1574,7 @@ public TSStatus reportConfigNodeShutdown(TConfigNodeLocation configNodeLocation)
configNodeLocation.getConfigNodeId(),
new NodeHeartbeatSample(NodeStatus.Unknown));
LOGGER.info(
- "The ConfigNode-{} will be shutdown soon, mark it as Unknown",
+ ManagerMessages.THE_CONFIGNODE_WILL_BE_SHUTDOWN_SOON_MARK_IT_AS_UNKNOWN,
configNodeLocation.getConfigNodeId());
}
return status;
@@ -1804,7 +1809,7 @@ public TSStatus submitLoadConfigurationTask() {
@Override
public TSStatus loadConfiguration() {
- throw new UnsupportedOperationException("not implement yet");
+ throw new UnsupportedOperationException(ManagerMessages.NOT_IMPLEMENT_YET);
}
@Override
@@ -1917,7 +1922,8 @@ public TRegionRouteMapResp getLatestRegionRouteMap() {
TimeUnit.MILLISECONDS.sleep(retryIntervalInMS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOGGER.warn("Unexpected interruption during retry getting latest region route map");
+ LOGGER.warn(
+ ManagerMessages.UNEXPECTED_INTERRUPTION_DURING_RETRY_GETTING_LATEST_REGION_ROUTE_MAP);
resp.getStatus().setCode(TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode());
return resp;
}
@@ -2807,12 +2813,12 @@ public TSStatus transfer(List newUnknownDataList) {
newUnknownDataList.forEach(
dataNodeLocation -> runningDataNodeLocationMap.remove(dataNodeLocation.getDataNodeId()));
- LOGGER.info("Start transfer of {}", newUnknownDataList);
+ LOGGER.info(ManagerMessages.START_TRANSFER_OF, newUnknownDataList);
// Transfer trigger
TSStatus transferResult =
triggerManager.transferTrigger(newUnknownDataList, runningDataNodeLocationMap);
if (transferResult.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.warn("Fail to transfer because {}, will retry", transferResult.getMessage());
+ LOGGER.warn(ManagerMessages.FAIL_TO_TRANSFER_BECAUSE_WILL_RETRY, transferResult.getMessage());
}
return transferResult;
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java
index ee39bfc2b19e9..3d585ef22705e 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java
@@ -29,6 +29,8 @@
import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan;
import org.apache.iotdb.confignode.consensus.response.auth.PermissionInfoResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.consensus.ConsensusManager;
import org.apache.iotdb.confignode.persistence.auth.AuthorInfo;
import org.apache.iotdb.confignode.rpc.thrift.TAuthizedPatternTreeResp;
@@ -64,7 +66,7 @@ public PermissionManager(final ConfigManager configManager, final AuthorInfo aut
public TSStatus operatePermission(AuthorPlan authorPlan, boolean isGeneratedByPipe) {
TSStatus tsStatus;
// If the permissions change, clear the cache content affected by the operation
- LOGGER.info("Auth: run auth plan: {}", authorPlan.toString());
+ LOGGER.info(ManagerMessages.AUTH_RUN_AUTH_PLAN, authorPlan.toString());
try {
if (authorPlan.getAuthorType() == ConfigPhysicalPlanType.CreateUser
|| authorPlan.getAuthorType() == ConfigPhysicalPlanType.RCreateUser
@@ -84,7 +86,7 @@ public TSStatus operatePermission(AuthorPlan authorPlan, boolean isGeneratedByPi
}
return tsStatus;
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return res;
@@ -101,7 +103,7 @@ public PermissionInfoResp queryPermission(final AuthorPlan authorPlan) {
try {
return (PermissionInfoResp) getConsensusManager().read(authorPlan);
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return new PermissionInfoResp(res);
@@ -150,6 +152,7 @@ public String getUserName(long userId) throws AuthException {
public TSStatus enableSeparationOfPowers(
String systemAdminUsername, String securityAdminUsername, String auditAdminUsername) {
- throw new UnsupportedOperationException("Enable separation of powers is not supported");
+ throw new UnsupportedOperationException(
+ ManagerMessages.ENABLE_SEPARATION_OF_POWERS_IS_NOT_SUPPORTED);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java
index 3de0f4247d808..663032005c8b8 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java
@@ -54,6 +54,7 @@
import org.apache.iotdb.confignode.consensus.request.write.datanode.RemoveDataNodePlan;
import org.apache.iotdb.confignode.consensus.request.write.procedure.UpdateProcedurePlan;
import org.apache.iotdb.confignode.consensus.request.write.region.CreateRegionGroupsPlan;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.partition.PartitionManager;
import org.apache.iotdb.confignode.persistence.ProcedureInfo;
import org.apache.iotdb.confignode.procedure.PartitionTableAutoCleaner;
@@ -233,7 +234,7 @@ public void startExecutor() {
CONFIG_NODE_CONFIG.getProcedureCompletedEvictTTL());
executor.addInternalProcedure(partitionTableCleaner);
store.start();
- LOGGER.info("ProcedureManager is started successfully.");
+ LOGGER.info(ManagerMessages.PROCEDUREMANAGER_IS_STARTED_SUCCESSFULLY);
}
}
@@ -243,7 +244,7 @@ public void stopExecutor() {
if (!executor.isRunning()) {
executor.join();
store.stop();
- LOGGER.info("ProcedureManager is stopped successfully.");
+ LOGGER.info(ManagerMessages.PROCEDUREMANAGER_IS_STOPPED_SUCCESSFULLY);
}
executor.removeInternalProcedure(partitionTableCleaner);
}
@@ -628,7 +629,8 @@ public void removeConfigNode(RemoveConfigNodePlan removeConfigNodePlan) {
final RemoveConfigNodeProcedure removeConfigNodeProcedure =
new RemoveConfigNodeProcedure(removeConfigNodePlan.getConfigNodeLocation());
this.executor.submitProcedure(removeConfigNodeProcedure);
- LOGGER.info("Submit RemoveConfigNodeProcedure successfully: {}", removeConfigNodePlan);
+ LOGGER.info(
+ ManagerMessages.SUBMIT_REMOVECONFIGNODEPROCEDURE_SUCCESSFULLY, removeConfigNodePlan);
}
/**
@@ -647,7 +649,7 @@ public boolean removeDataNode(RemoveDataNodePlan removeDataNodePlan) {
this.executor.submitProcedure(
new RemoveDataNodesProcedure(removeDataNodePlan.getDataNodeLocations(), nodeStatusMap));
LOGGER.info(
- "Submit RemoveDataNodesProcedure successfully, {}",
+ ManagerMessages.SUBMIT_REMOVEDATANODESPROCEDURE_SUCCESSFULLY,
removeDataNodePlan.getDataNodeLocations());
return true;
}
@@ -655,7 +657,8 @@ public boolean removeDataNode(RemoveDataNodePlan removeDataNodePlan) {
public boolean removeAINode(RemoveAINodePlan removeAINodePlan) {
this.executor.submitProcedure(new RemoveAINodeProcedure(removeAINodePlan.getAINodeLocation()));
LOGGER.info(
- "Submit RemoveAINodeProcedure successfully, {}", removeAINodePlan.getAINodeLocation());
+ ManagerMessages.SUBMIT_REMOVEAINODEPROCEDURE_SUCCESSFULLY,
+ removeAINodePlan.getAINodeLocation());
return true;
}
@@ -1087,7 +1090,7 @@ public TSStatus migrateRegion(TMigrateRegionReq migrateRegionReq) {
if (optional.isPresent()) {
regionGroupId = optional.get();
} else {
- LOGGER.error("get region group id fail");
+ LOGGER.error(ManagerMessages.GET_REGION_GROUP_ID_FAIL);
return new TSStatus(TSStatusCode.MIGRATE_REGION_ERROR.getStatusCode())
.setMessage("get region group id fail");
}
@@ -1139,7 +1142,8 @@ public TSStatus migrateRegion(TMigrateRegionReq migrateRegionReq) {
coordinatorForAddPeer,
coordinatorForRemovePeer));
LOGGER.info(
- "[MigrateRegion] Submit RegionMigrateProcedure successfully, Region: {}, Origin DataNode: {}, Dest DataNode: {}, Add Coordinator: {}, Remove Coordinator: {}",
+ ManagerMessages
+ .MIGRATEREGION_SUBMIT_REGIONMIGRATEPROCEDURE_SUCCESSFULLY_REGION_ORIGIN_DATANODE,
regionGroupId,
originalDataNode,
destDataNode,
@@ -1162,7 +1166,10 @@ public TSStatus reconstructRegion(TReconstructRegionReq req) {
configManager
.getPartitionManager()
.generateTConsensusGroupIdByRegionId(x)
- .orElseThrow(() -> new IllegalArgumentException("Region id " + x + " is invalid"));
+ .orElseThrow(
+ () ->
+ new IllegalArgumentException(
+ ManagerMessages.REGION_ID + x + " is invalid"));
final TDataNodeLocation coordinator =
handler
.filterDataNodeWithOtherRegionReplica(
@@ -1183,7 +1190,7 @@ public TSStatus reconstructRegion(TReconstructRegionReq req) {
reconstructRegionProcedure -> {
this.executor.submitProcedure(reconstructRegionProcedure);
LOGGER.info(
- "[ReconstructRegion] Submit ReconstructRegionProcedure successfully, {}",
+ ManagerMessages.RECONSTRUCTREGION_SUBMIT_RECONSTRUCTREGIONPROCEDURE_SUCCESSFULLY,
reconstructRegionProcedure);
});
}
@@ -1247,7 +1254,7 @@ private TSStatus extendOneRegion(int theRegionId, TExtendRegionReq req) {
if (optional.isPresent()) {
regionId = optional.get();
} else {
- LOGGER.error("get region group id fail");
+ LOGGER.error(ManagerMessages.GET_REGION_GROUP_ID_FAIL);
return new TSStatus(TSStatusCode.EXTEND_REGION_ERROR.getStatusCode())
.setMessage("get region group id fail");
}
@@ -1276,7 +1283,8 @@ private TSStatus extendOneRegion(int theRegionId, TExtendRegionReq req) {
AddRegionPeerProcedure procedure =
new AddRegionPeerProcedure(regionId, coordinator, targetDataNode);
this.executor.submitProcedure(procedure);
- LOGGER.info("[ExtendRegion] Submit AddRegionPeerProcedure successfully: {}", procedure);
+ LOGGER.info(
+ ManagerMessages.EXTENDREGION_SUBMIT_ADDREGIONPEERPROCEDURE_SUCCESSFULLY, procedure);
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
}
@@ -1291,7 +1299,7 @@ private TSStatus removeOneRegion(int theRegionId, TRemoveRegionReq req) {
if (optional.isPresent()) {
regionId = optional.get();
} else {
- LOGGER.error("get region group id fail");
+ LOGGER.error(ManagerMessages.GET_REGION_GROUP_ID_FAIL);
return new TSStatus(TSStatusCode.REMOVE_REGION_PEER_ERROR.getStatusCode())
.setMessage("get region group id fail");
}
@@ -1324,7 +1332,7 @@ private TSStatus removeOneRegion(int theRegionId, TRemoveRegionReq req) {
// NodeManager.
// In this case, simply clean up the partition table once and do nothing else.
LOGGER.warn(
- "Remove region: Target DataNode {} not found, will simply clean up the partition table of region {} and do nothing else.",
+ ManagerMessages.REMOVE_REGION_TARGET_DATANODE_NOT_FOUND_WILL_SIMPLY_CLEAN_UP,
req.getDataNodeId(),
req.getRegionId());
this.executor
@@ -1340,7 +1348,8 @@ private TSStatus removeOneRegion(int theRegionId, TRemoveRegionReq req) {
new RemoveRegionPeerProcedure(regionId, coordinator, targetDataNode);
this.executor.submitProcedure(procedure);
LOGGER.info(
- "[RemoveRegionPeer] Submit RemoveRegionPeerProcedure successfully: {}", procedure);
+ ManagerMessages.REMOVEREGIONPEER_SUBMIT_REMOVEREGIONPEERPROCEDURE_SUCCESSFULLY,
+ procedure);
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
}
@@ -1494,10 +1503,10 @@ public void createConsensusPipeAsync(TCreatePipeReq req) {
try {
CreatePipeProcedureV2 procedure = new CreatePipeProcedureV2(req);
executor.submitProcedure(procedure);
- LOGGER.info("Submitted async consensus pipe creation: {}", req.getPipeName());
+ LOGGER.info(ManagerMessages.SUBMITTED_ASYNC_CONSENSUS_PIPE_CREATION, req.getPipeName());
} catch (Exception e) {
LOGGER.warn(
- "Failed to submit async consensus pipe creation for {}: {}",
+ ManagerMessages.FAILED_TO_SUBMIT_ASYNC_CONSENSUS_PIPE_CREATION_FOR,
req.getPipeName(),
e.getMessage());
}
@@ -1605,10 +1614,10 @@ public void dropConsensusPipeAsync(String pipeName) {
try {
DropPipeProcedureV2 procedure = new DropPipeProcedureV2(pipeName);
executor.submitProcedure(procedure);
- LOGGER.info("Submitted async consensus pipe drop: {}", pipeName);
+ LOGGER.info(ManagerMessages.SUBMITTED_ASYNC_CONSENSUS_PIPE_DROP, pipeName);
} catch (Exception e) {
LOGGER.warn(
- "Failed to submit async consensus pipe drop for {}: {}", pipeName, e.getMessage());
+ ManagerMessages.FAILED_TO_SUBMIT_ASYNC_CONSENSUS_PIPE_DROP_FOR, pipeName, e.getMessage());
}
}
@@ -1649,9 +1658,10 @@ public void pipeHandleLeaderChange(
final long procedureId =
executor.submitProcedure(
new PipeHandleLeaderChangeProcedure(dataRegionGroupToOldAndNewLeaderPairMap));
- LOGGER.info("PipeHandleLeaderChangeProcedure was submitted, procedureId: {}.", procedureId);
+ LOGGER.info(
+ ManagerMessages.PIPEHANDLELEADERCHANGEPROCEDURE_WAS_SUBMITTED_PROCEDUREID, procedureId);
} catch (Exception e) {
- LOGGER.warn("PipeHandleLeaderChangeProcedure was failed to submit.", e);
+ LOGGER.warn(ManagerMessages.PIPEHANDLELEADERCHANGEPROCEDURE_WAS_FAILED_TO_SUBMIT, e);
}
}
@@ -1662,10 +1672,11 @@ public boolean pipeHandleMetaChange(
executor.submitProcedure(
new PipeHandleMetaChangeProcedure(
needWriteConsensusOnConfigNodes, needPushPipeMetaToDataNodes));
- LOGGER.info("PipeHandleMetaChangeProcedure was submitted, procedureId: {}.", procedureId);
+ LOGGER.info(
+ ManagerMessages.PIPEHANDLEMETACHANGEPROCEDURE_WAS_SUBMITTED_PROCEDUREID, procedureId);
return true;
} catch (Exception e) {
- LOGGER.warn("PipeHandleMetaChangeProcedure was failed to submit.", e);
+ LOGGER.warn(ManagerMessages.PIPEHANDLEMETACHANGEPROCEDURE_WAS_FAILED_TO_SUBMIT, e);
return false;
}
}
@@ -1886,7 +1897,8 @@ protected TSStatus waitingProcedureFinished(final Procedure> procedure) {
protected TSStatus waitingProcedureFinished(
Procedure> procedure, final long procedureWaitRetryTimeout) {
if (procedure == null) {
- LOGGER.error("Unexpected null procedure parameters for waitingProcedureFinished");
+ LOGGER.error(
+ ManagerMessages.UNEXPECTED_NULL_PROCEDURE_PARAMETERS_FOR_WAITINGPROCEDUREFINISHED);
return RpcUtils.getStatus(TSStatusCode.INTERNAL_SERVER_ERROR);
}
TSStatus status;
@@ -2330,7 +2342,7 @@ public boolean isExistUnfinishedProcedure(
for (Procedure procedure : getExecutor().getProcedures().values()) {
if (!procedure.isFinished() && procedureClass.isInstance(procedure)) {
LOGGER.info(
- "[{}] procedure details are {}",
+ ManagerMessages.PROCEDURE_DETAILS_ARE,
procedureClass.getSimpleName(),
procedure.toStringDetails());
return true;
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/RetryFailedTasksThread.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/RetryFailedTasksThread.java
index c1ad7f4591bb8..748d6378462f0 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/RetryFailedTasksThread.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/RetryFailedTasksThread.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil;
import org.apache.iotdb.confignode.conf.ConfigNodeConfig;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.load.LoadManager;
import org.apache.iotdb.confignode.manager.node.NodeManager;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -83,7 +84,7 @@ public void startRetryFailedTasksService() {
0,
HEARTBEAT_INTERVAL,
TimeUnit.MILLISECONDS);
- LOGGER.info("RetryFailMissions service is started successfully.");
+ LOGGER.info(ManagerMessages.RETRYFAILMISSIONS_SERVICE_IS_STARTED_SUCCESSFULLY);
}
}
}
@@ -94,7 +95,7 @@ public void stopRetryFailedTasksService() {
if (currentFailedTasksRetryThreadFuture != null) {
currentFailedTasksRetryThreadFuture.cancel(false);
currentFailedTasksRetryThreadFuture = null;
- LOGGER.info("RetryFailMissions service is stopped successfully.");
+ LOGGER.info(ManagerMessages.RETRYFAILMISSIONS_SERVICE_IS_STOPPED_SUCCESSFULLY);
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TTLManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TTLManager.java
index b5c2e4900d728..ce3b3423ec175 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TTLManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TTLManager.java
@@ -29,6 +29,7 @@
import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan;
import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan;
import org.apache.iotdb.confignode.consensus.response.ttl.ShowTTLResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.persistence.TTLInfo;
import org.apache.iotdb.consensus.common.DataSet;
import org.apache.iotdb.consensus.exception.ConsensusException;
@@ -113,7 +114,7 @@ public DataSet showTTL(ShowTTLPlan showTTLPlan) {
try {
return configManager.getConsensusManager().read(showTTLPlan);
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
TSStatus tsStatus = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
tsStatus.setMessage(e.getMessage());
ShowTTLResp resp = new ShowTTLResp();
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TriggerManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TriggerManager.java
index 5f64c4963125f..89d698e08bef5 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TriggerManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/TriggerManager.java
@@ -37,6 +37,8 @@
import org.apache.iotdb.confignode.consensus.response.trigger.TransferringTriggersResp;
import org.apache.iotdb.confignode.consensus.response.trigger.TriggerLocationResp;
import org.apache.iotdb.confignode.consensus.response.trigger.TriggerTableResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.consensus.ConsensusManager;
import org.apache.iotdb.confignode.manager.node.NodeManager;
import org.apache.iotdb.confignode.persistence.TriggerInfo;
@@ -145,7 +147,7 @@ public TGetTriggerTableResp getTriggerTable(boolean onlyStateful) {
configManager.getConsensusManager().read(new GetTriggerTablePlan(onlyStateful)))
.convertToThriftResponse();
} catch (IOException | ConsensusException e) {
- LOGGER.error("Fail to get TriggerTable", e);
+ LOGGER.error(ManagerMessages.FAIL_TO_GET_TRIGGERTABLE, e);
return new TGetTriggerTableResp(
new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())
.setMessage(e.getMessage()),
@@ -159,7 +161,7 @@ public TGetLocationForTriggerResp getLocationOfStatefulTrigger(String triggerNam
configManager.getConsensusManager().read(new GetTriggerLocationPlan(triggerName)))
.convertToThriftResponse();
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
return new TGetLocationForTriggerResp(
new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())
.setMessage(e.getMessage()));
@@ -172,7 +174,7 @@ public TGetJarInListResp getTriggerJar(TGetJarInListReq req) {
configManager.getConsensusManager().read(new GetTriggerJarPlan(req.getJarNameList())))
.convertToThriftResponse();
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return new JarResp(res, Collections.emptyList()).convertToThriftResponse();
@@ -230,7 +232,7 @@ public TSStatus transferTrigger(
}
}
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the read/write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ManagerMessages.FAILED_IN_THE_READ_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER, e);
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return res;
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java
index cfe21f9f37bed..9768b1211709d 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java
@@ -40,6 +40,8 @@
import org.apache.iotdb.confignode.consensus.request.write.function.UpdateFunctionPlan;
import org.apache.iotdb.confignode.consensus.response.JarResp;
import org.apache.iotdb.confignode.consensus.response.function.FunctionTableResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.persistence.UDFInfo;
import org.apache.iotdb.confignode.rpc.thrift.TCreateFunctionReq;
import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListReq;
@@ -105,7 +107,7 @@ public TSStatus createFunction(TCreateFunctionReq req) {
final boolean needToSaveJar = isUsingURI && udfInfo.needToSaveJar(jarName);
- LOGGER.info("Start to add UDF [{}] in UDF_Table on Config Nodes", udfName);
+ LOGGER.info(ManagerMessages.START_TO_ADD_UDF_IN_UDF_TABLE_ON_CONFIG_NODES, udfName);
CreateFunctionPlan createFunctionPlan =
new CreateFunctionPlan(udfInformation, needToSaveJar ? new Binary(jarFile) : null);
if (needToSaveJar && createFunctionPlan.getSerializedSize() > planSizeLimit) {
@@ -128,7 +130,7 @@ public TSStatus createFunction(TCreateFunctionReq req) {
jarName,
jarMD5);
LOGGER.info(
- "Start to create UDF [{}] on Data Nodes, needToSaveJar[{}]", udfName, needToSaveJar);
+ ManagerMessages.START_TO_CREATE_UDF_ON_DATA_NODES_NEEDTOSAVEJAR, udfName, needToSaveJar);
final TSStatus dataNodesStatus =
RpcUtils.squashResponseStatusList(
createFunctionOnDataNodes(udfInformation, needToSaveJar ? jarFile : null));
@@ -136,7 +138,7 @@ public TSStatus createFunction(TCreateFunctionReq req) {
return dataNodesStatus;
}
- LOGGER.info("Start to activate UDF [{}] in UDF_Table on Config Nodes", udfName);
+ LOGGER.info(ManagerMessages.START_TO_ACTIVATE_UDF_IN_UDF_TABLE_ON_CONFIG_NODES, udfName);
return configManager.getConsensusManager().write(new UpdateFunctionPlan(udfInformation));
} catch (IoTDBRuntimeException e) {
return new TSStatus(e.getErrorCode()).setMessage(e.getMessage());
@@ -218,7 +220,7 @@ public TGetUDFTableResp getUDFTable(Model model) {
configManager.getConsensusManager().read(new GetFunctionTablePlan(model)))
.convertToThriftResponse();
} catch (IOException | ConsensusException e) {
- LOGGER.error("Fail to get UDFTable", e);
+ LOGGER.error(ManagerMessages.FAIL_TO_GET_UDFTABLE, e);
return new TGetUDFTableResp(
new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())
.setMessage(e.getMessage()),
@@ -232,7 +234,7 @@ public TGetUDFTableResp getAllUDFTable() {
configManager.getConsensusManager().read(new GetAllFunctionTablePlan()))
.convertToThriftResponse();
} catch (IOException | ConsensusException e) {
- LOGGER.error("Fail to get AllUDFTable", e);
+ LOGGER.error(ManagerMessages.FAIL_TO_GET_ALLUDFTABLE, e);
return new TGetUDFTableResp(
new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())
.setMessage(e.getMessage()),
@@ -246,7 +248,7 @@ public TGetJarInListResp getUDFJar(TGetJarInListReq req) {
configManager.getConsensusManager().read(new GetUDFJarPlan(req.getJarNameList())))
.convertToThriftResponse();
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return new JarResp(res, Collections.emptyList()).convertToThriftResponse();
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/consensus/ConsensusManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/consensus/ConsensusManager.java
index 51773d83923fc..8b4eeed5a1b58 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/consensus/ConsensusManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/consensus/ConsensusManager.java
@@ -34,6 +34,7 @@
import org.apache.iotdb.confignode.consensus.request.read.ConfigPhysicalReadPlan;
import org.apache.iotdb.confignode.consensus.statemachine.ConfigRegionStateMachine;
import org.apache.iotdb.confignode.exception.AddPeerException;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.IManager;
import org.apache.iotdb.confignode.manager.node.NodeManager;
import org.apache.iotdb.consensus.ConsensusFactory;
@@ -88,7 +89,7 @@ public ConsensusManager(IManager configManager, ConfigRegionStateMachine stateMa
public void start() throws IOException {
consensusImpl.start();
if (SystemPropertiesUtils.isRestarted()) {
- LOGGER.info("Init ConsensusManager successfully when restarted");
+ LOGGER.info(ManagerMessages.INIT_CONSENSUSMANAGER_SUCCESSFULLY_WHEN_RESTARTED);
} else if (ConfigNodeDescriptor.getInstance().isSeedConfigNode()) {
// Create ConsensusGroup that contains only itself
// if the current ConfigNode is Seed-ConfigNode
@@ -101,7 +102,9 @@ public void start() throws IOException {
new TEndPoint(CONF.getInternalAddress(), CONF.getConsensusPort()))));
} catch (ConsensusException e) {
LOGGER.error(
- "Something wrong happened while calling consensus layer's createLocalPeer API.", e);
+ ManagerMessages
+ .SOMETHING_WRONG_HAPPENED_WHILE_CALLING_CONSENSUS_LAYER_S_CREATELOCALPEER_API,
+ e);
}
}
isInitialized = true;
@@ -262,7 +265,7 @@ private void upgrade() {
File oldWalDir = new File(consensusDir, "simple");
if (oldWalDir.exists() && !oldWalDir.renameTo(new File(getConfigRegionDir()))) {
LOGGER.warn(
- "upgrade ConfigNode consensus wal dir for SimpleConsensus from version/1.0 to version/1.1 failed, "
+ ManagerMessages.UPGRADE_CONFIGNODE_CONSENSUS_WAL_DIR_FOR_SIMPLECONSENSUS_FROM_VERSION_1
+ "you maybe need to rename the simple dir to 0_0 manually.");
}
}
@@ -276,7 +279,7 @@ private void upgrade() {
*/
public void createPeerForConsensusGroup(List configNodeLocations)
throws ConsensusException {
- LOGGER.info("createPeerForConsensusGroup {}...", configNodeLocations);
+ LOGGER.info(ManagerMessages.CREATEPEERFORCONSENSUSGROUP, configNodeLocations);
List peerList = new ArrayList<>();
for (TConfigNodeLocation configNodeLocation : configNodeLocations) {
@@ -367,7 +370,7 @@ private Peer getLeaderPeer() {
try {
TimeUnit.MILLISECONDS.sleep(RETRY_WAIT_TIME_MS);
} catch (InterruptedException e) {
- LOGGER.warn("ConsensusManager getLeaderPeer been interrupted, ", e);
+ LOGGER.warn(ManagerMessages.CONSENSUSMANAGER_GETLEADERPEER_BEEN_INTERRUPTED, e);
Thread.currentThread().interrupt();
}
}
@@ -437,7 +440,8 @@ public TSStatus confirmLeader() {
Thread.sleep(RETRY_WAIT_TIME_MS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOGGER.warn("Unexpected interruption during waiting for configNode leader ready.");
+ LOGGER.warn(
+ ManagerMessages.UNEXPECTED_INTERRUPTION_DURING_WAITING_FOR_CONFIGNODE_LEADER_READY);
break;
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQManager.java
index 5726b3ce82698..c4c1e8aede97f 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQManager.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.confignode.consensus.request.read.cq.ShowCQPlan;
import org.apache.iotdb.confignode.consensus.request.write.cq.DropCQPlan;
import org.apache.iotdb.confignode.consensus.response.cq.ShowCQResp;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.persistence.cq.CQInfo;
import org.apache.iotdb.confignode.rpc.thrift.TCreateCQReq;
@@ -80,7 +81,7 @@ public TSStatus dropCQ(TDropCQReq req) {
try {
return configManager.getConsensusManager().write(new DropCQPlan(req.cqId));
} catch (ConsensusException e) {
- LOGGER.warn("Unexpected error happened while dropping cq {}: ", req.cqId, e);
+ LOGGER.warn(ManagerMessages.UNEXPECTED_ERROR_HAPPENED_WHILE_DROPPING_CQ, req.cqId, e);
// consensus layer related errors
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
@@ -93,7 +94,7 @@ public TShowCQResp showCQ() {
DataSet response = configManager.getConsensusManager().read(new ShowCQPlan());
return ((ShowCQResp) response).convertToRpcShowCQResp();
} catch (ConsensusException e) {
- LOGGER.warn("Unexpected error happened while showing cq: ", e);
+ LOGGER.warn(ManagerMessages.UNEXPECTED_ERROR_HAPPENED_WHILE_SHOWING_CQ, e);
// consensus layer related errors
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
@@ -123,7 +124,8 @@ public void startCQScheduler() {
} catch (Exception t) {
// just print the error log because we should make sure we can start a new cq schedule pool
// successfully in the next steps
- LOGGER.error("Error happened while shutting down previous cq schedule thread pool.", t);
+ LOGGER.error(
+ ManagerMessages.ERROR_HAPPENED_WHILE_SHUTTING_DOWN_PREVIOUS_CQ_SCHEDULE_THREAD_POOL, t);
}
// 2. start a new schedule thread pool
@@ -140,7 +142,7 @@ public void startCQScheduler() {
allCQs = ((ShowCQResp) response).getCqList();
} catch (ConsensusException e) {
// consensus layer related errors
- LOGGER.warn("Unexpected error happened while fetching cq list: ", e);
+ LOGGER.warn(ManagerMessages.UNEXPECTED_ERROR_HAPPENED_WHILE_FETCHING_CQ_LIST, e);
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQScheduleTask.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQScheduleTask.java
index 6125edc1ef7bd..c58f5ade9bcd0 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQScheduleTask.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/cq/CQScheduleTask.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.cq.TimeoutPolicy;
import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager;
import org.apache.iotdb.confignode.consensus.request.write.cq.UpdateCQLastExecTimePlan;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.persistence.cq.CQInfo;
import org.apache.iotdb.confignode.rpc.thrift.TCreateCQReq;
@@ -172,13 +173,13 @@ public void run() {
configManager.getNodeManager().getLowestLoadDataNode();
// no usable DataNode to execute CQ
if (!targetDataNode.isPresent()) {
- LOGGER.warn("There is no RUNNING DataNode to execute CQ {}", cqId);
+ LOGGER.warn(ManagerMessages.THERE_IS_NO_RUNNING_DATANODE_TO_EXECUTE_CQ, cqId);
if (needSubmit()) {
submitSelf(retryWaitTimeInMS, TimeUnit.MILLISECONDS);
}
} else {
LOGGER.info(
- "[StartExecuteCQ] execute CQ {} on DataNode[{}], time range is [{}, {}), current time is {}",
+ ManagerMessages.STARTEXECUTECQ_EXECUTE_CQ_ON_DATANODE_TIME_RANGE_IS_CURRENT_TIME,
cqId,
targetDataNode.get().dataNodeId,
startTime,
@@ -192,7 +193,7 @@ public void run() {
.getAsyncClient(targetDataNode.get());
client.executeCQ(executeCQReq, new AsyncExecuteCQCallback(startTime, endTime));
} catch (Exception t) {
- LOGGER.warn("Execute CQ {} failed", cqId, t);
+ LOGGER.warn(ManagerMessages.EXECUTE_CQ_FAILED, cqId, t);
if (needSubmit()) {
submitSelf(retryWaitTimeInMS, TimeUnit.MILLISECONDS);
}
@@ -232,7 +233,7 @@ private void updateExecutionTime() {
executionTime =
executionTime + ((now - executionTime - 1) / everyInterval + 1) * everyInterval;
} else {
- throw new IllegalArgumentException("Unknown TimeoutPolicy: " + timeoutPolicy);
+ throw new IllegalArgumentException(ManagerMessages.UNKNOWN_TIMEOUTPOLICY + timeoutPolicy);
}
}
@@ -241,7 +242,7 @@ public void onComplete(TSStatus response) {
if (response.code == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.info(
- "[EndExecuteCQ] {}, time range is [{}, {}), current time is {}",
+ ManagerMessages.ENDEXECUTECQ_TIME_RANGE_IS_CURRENT_TIME_IS,
cqId,
startTime,
endTime,
@@ -261,13 +262,13 @@ public void onComplete(TSStatus response) {
// may still update failed because stale CQTask in old leader may update it in advance
if (result.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Failed to update the last execution time {} of CQ {}, because {}",
+ ManagerMessages.FAILED_TO_UPDATE_THE_LAST_EXECUTION_TIME_OF_CQ_BECAUSE,
executionTime,
cqId,
result.getMessage());
// no such cq, we don't need to submit it again
if (result.getCode() == TSStatusCode.NO_SUCH_CQ.getStatusCode()) {
- LOGGER.info("Stop submitting CQ {} because {}", cqId, result.getMessage());
+ LOGGER.info(ManagerMessages.STOP_SUBMITTING_CQ_BECAUSE, cqId, result.getMessage());
return;
}
}
@@ -277,12 +278,11 @@ public void onComplete(TSStatus response) {
submitSelf();
} else {
LOGGER.info(
- "Stop submitting CQ {} because current node is not leader or current scheduled thread pool is shut down.",
- cqId);
+ ManagerMessages.STOP_SUBMITTING_CQ_BECAUSE_CURRENT_NODE_IS_NOT_LEADER_OR, cqId);
}
} else {
- LOGGER.warn("Execute CQ {} failed, TSStatus is {}", cqId, response);
+ LOGGER.warn(ManagerMessages.EXECUTE_CQ_FAILED_TSSTATUS_IS, cqId, response);
if (needSubmit()) {
submitSelf(retryWaitTimeInMS, TimeUnit.MILLISECONDS);
}
@@ -291,7 +291,7 @@ public void onComplete(TSStatus response) {
@Override
public void onError(Exception exception) {
- LOGGER.warn("Execute CQ {} failed", cqId, exception);
+ LOGGER.warn(ManagerMessages.EXECUTE_CQ_FAILED, cqId, exception);
if (needSubmit()) {
submitSelf(retryWaitTimeInMS, TimeUnit.MILLISECONDS);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/externalservice/ExternalServiceInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/externalservice/ExternalServiceInfo.java
index 9c5ee95343b34..a3cf89515b5ef 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/externalservice/ExternalServiceInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/externalservice/ExternalServiceInfo.java
@@ -29,6 +29,8 @@
import org.apache.iotdb.confignode.consensus.request.write.externalservice.StartExternalServicePlan;
import org.apache.iotdb.confignode.consensus.request.write.externalservice.StopExternalServicePlan;
import org.apache.iotdb.confignode.consensus.response.externalservice.ShowExternalServiceResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.rpc.TSStatusCode;
import org.apache.tsfile.utils.ReadWriteIOUtils;
@@ -212,7 +214,7 @@ private void serializeServiceInfoWithCRC(ServiceInfo serviceInfo, OutputStream o
private void deserializeInfos(InputStream inputStream) throws IOException {
if (ReadWriteIOUtils.readByte(inputStream) != SERIALIZATION_VERSION) {
- throw new IOException("Incorrect version of " + SNAPSHOT_FILENAME);
+ throw new IOException(ManagerMessages.INCORRECT_VERSION_OF + SNAPSHOT_FILENAME);
}
int outerSize = ReadWriteIOUtils.readInt(inputStream);
@@ -244,7 +246,7 @@ private ServiceInfo deserializeServiceInfoConsiderCRC(InputStream inputStream)
int expectedCRC = ReadWriteIOUtils.readInt(inputStream);
if ((int) crc32.getValue() != expectedCRC) {
- LOGGER.error("Mismatched CRC32 code when deserializing service info.");
+ LOGGER.error(ManagerMessages.MISMATCHED_CRC32_CODE_WHEN_DESERIALIZING_SERVICE_INFO);
return null;
}
@@ -261,7 +263,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException {
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST,
snapshotFile.getAbsolutePath());
return false;
}
@@ -288,7 +290,7 @@ public void processLoadSnapshot(File snapshotDir) throws IOException {
if (!snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot,snapshot file [{}] is not a normal file.",
+ ManagerMessages.FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_A_NORMAL,
snapshotFile.getAbsolutePath());
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/externalservice/ExternalServiceManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/externalservice/ExternalServiceManager.java
index 3e6569a45e28b..d050e564b58f9 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/externalservice/ExternalServiceManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/externalservice/ExternalServiceManager.java
@@ -33,6 +33,7 @@
import org.apache.iotdb.confignode.consensus.request.write.externalservice.StartExternalServicePlan;
import org.apache.iotdb.confignode.consensus.request.write.externalservice.StopExternalServicePlan;
import org.apache.iotdb.confignode.consensus.response.externalservice.ShowExternalServiceResp;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.rpc.thrift.TCreateExternalServiceReq;
import org.apache.iotdb.consensus.exception.ConsensusException;
@@ -71,7 +72,7 @@ public TSStatus createService(TCreateExternalServiceReq req) {
ServiceInfo.ServiceType.USER_DEFINED)));
} catch (ConsensusException e) {
LOGGER.warn(
- "Unexpected error happened while creating Service {} on DataNode {}: ",
+ ManagerMessages.UNEXPECTED_ERROR_HAPPENED_WHILE_CREATING_SERVICE_ON_DATANODE,
req.getServiceName(),
req.getDataNodeId(),
e);
@@ -89,7 +90,7 @@ public TSStatus startService(int dataNodeId, String serviceName) {
.write(new StartExternalServicePlan(dataNodeId, serviceName));
} catch (ConsensusException e) {
LOGGER.warn(
- "Unexpected error happened while starting Service {} on DataNode {}: ",
+ ManagerMessages.UNEXPECTED_ERROR_HAPPENED_WHILE_STARTING_SERVICE_ON_DATANODE,
serviceName,
dataNodeId,
e);
@@ -107,7 +108,7 @@ public TSStatus stopService(int dataNodeId, String serviceName) {
.write(new StopExternalServicePlan(dataNodeId, serviceName));
} catch (ConsensusException e) {
LOGGER.warn(
- "Unexpected error happened while stopping Service {} on DataNode {}: ",
+ ManagerMessages.UNEXPECTED_ERROR_HAPPENED_WHILE_STOPPING_SERVICE_ON_DATANODE,
serviceName,
dataNodeId,
e);
@@ -125,7 +126,7 @@ public TSStatus dropService(int dataNodeId, String serviceName) {
.write(new DropExternalServicePlan(dataNodeId, serviceName));
} catch (ConsensusException e) {
LOGGER.warn(
- "Unexpected error happened while dropping Service {} on DataNode {}: ",
+ ManagerMessages.UNEXPECTED_ERROR_HAPPENED_WHILE_DROPPING_SERVICE_ON_DATANODE,
serviceName,
dataNodeId,
e);
@@ -180,7 +181,7 @@ public TExternalServiceListResp showService(int dataNodeId) {
response.getServiceInfoEntryList().addAll(builtInResp.getExternalServiceInfos()));
return response.convertToRpcShowExternalServiceResp();
} catch (ConsensusException e) {
- LOGGER.warn("Unexpected error happened while showing Service: ", e);
+ LOGGER.warn(ManagerMessages.UNEXPECTED_ERROR_HAPPENED_WHILE_SHOWING_SERVICE, e);
// consensus layer related errors
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
@@ -199,7 +200,7 @@ public List getUserDefinedService(int dataNodeId) {
.read(new ShowExternalServicePlan(Collections.singleton(dataNodeId)));
return response.getServiceInfoEntryList();
} catch (ConsensusException e) {
- LOGGER.warn("Unexpected error happened while getting user-defined Service: ", e);
+ LOGGER.warn(ManagerMessages.UNEXPECTED_ERROR_HAPPENED_WHILE_GETTING_USER_DEFINED_SERVICE, e);
return Collections.emptyList();
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java
index f76b54fec3063..1ad0914e82de9 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
import org.apache.iotdb.confignode.exception.DatabaseNotExistsException;
import org.apache.iotdb.confignode.exception.NoAvailableRegionGroupException;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.IManager;
import org.apache.iotdb.confignode.manager.load.balancer.partition.DataPartitionPolicyTable;
import org.apache.iotdb.confignode.manager.partition.PartitionManager;
@@ -84,7 +85,8 @@ public PartitionBalancer(IManager configManager) {
break;
default:
LOGGER.warn(
- "Unknown DataPartition allocation strategy {}, using INHERIT strategy by default.",
+ ManagerMessages
+ .UNKNOWN_DATAPARTITION_ALLOCATION_STRATEGY_USING_INHERIT_STRATEGY_BY_DEFAULT,
ConfigNodeDescriptor.getInstance().getConf().getDataPartitionAllocationStrategy());
this.dataPartitionAllocationStrategy = DataPartitionAllocationStrategy.INHERIT;
break;
@@ -260,7 +262,7 @@ private void inheritAllocationStrategy(
availableDataRegionGroupCounter.put(
greedyGroupId, availableDataRegionGroupCounter.get(greedyGroupId) + 1);
LOGGER.warn(
- "[PartitionBalancer] The SeriesSlot: {} in TimeSlot: {} will be allocated to DataRegionGroup: {}, because the original target: {} is currently unavailable.",
+ ManagerMessages.PARTITIONBALANCER_THE_SERIESSLOT_IN_TIMESLOT_WILL_BE,
seriesPartitionSlot,
timePartitionSlot,
greedyGroupId,
@@ -330,7 +332,7 @@ public void reBalanceDataPartitionPolicy(String database) {
}
} catch (DatabaseNotExistsException e) {
- LOGGER.error("Database {} not exists when updateDataAllotTable", database);
+ LOGGER.error(ManagerMessages.DATABASE_NOT_EXISTS_WHEN_UPDATEDATAALLOTTABLE, database);
}
}
@@ -354,7 +356,8 @@ public void setupPartitionBalancer() {
dataPartitionPolicyTable.setDataAllotMap(
getPartitionManager().getLastDataAllotTable(database));
} catch (DatabaseNotExistsException e) {
- LOGGER.error("Database {} not exists when setupPartitionBalancer", database);
+ LOGGER.error(
+ ManagerMessages.DATABASE_NOT_EXISTS_WHEN_SETUPPARTITIONBALANCER, database);
} finally {
dataPartitionPolicyTable.releaseLock();
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java
index 37d86daf1532a..e3954a59385ef 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java
@@ -31,6 +31,7 @@
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.conf.ConfigNodeConfig;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.IManager;
import org.apache.iotdb.confignode.manager.ProcedureManager;
import org.apache.iotdb.confignode.manager.load.LoadManager;
@@ -195,7 +196,7 @@ private void balanceRegionLeader(
int oldLeaderId = currentLeaderMap.get(regionGroupId);
if (newLeaderId != -1 && !newLeaderId.equals(oldLeaderId)) {
LOGGER.info(
- "[LeaderBalancer] Try to change the leader of Region: {} to DataNode: {} ",
+ ManagerMessages.LEADERBALANCER_TRY_TO_CHANGE_THE_LEADER_OF_REGION_TO_DATANODE,
regionGroupId,
newLeaderId);
switch (consensusProtocolClass) {
@@ -271,7 +272,7 @@ private void balanceRegionLeader(
lastFailedTimeForLeaderBalance.put(
clientHandler.getRequest(i).getRegionId(), currentTime);
LOGGER.error(
- "[LeaderBalancer] Failed to change the leader of Region: {} to DataNode: {}",
+ ManagerMessages.LEADERBALANCER_FAILED_TO_CHANGE_THE_LEADER_OF_REGION_TO_DATANODE,
clientHandler.getRequest(i).getRegionId(),
clientHandler.getRequest(i).getNewLeaderNode().getDataNodeId());
}
@@ -305,7 +306,7 @@ private void invalidateSchemaCacheOfOldLeaders() {
final TDataNodeLocation dataNodeLocation =
getNodeManager().getRegisteredDataNode(dataNodeId).getLocation();
if (dataNodeLocation == null) {
- LOGGER.warn("DataNodeLocation is null, datanodeId {}", dataNodeId);
+ LOGGER.warn(ManagerMessages.DATANODELOCATION_IS_NULL_DATANODEID, dataNodeId);
return;
}
invalidateSchemaCacheRequestHandler.putNodeLocation(
@@ -347,12 +348,13 @@ private void flushOldLeaderIfIoTV2() {
TSStatus result = configManager.flushOnSpecificDN(flushReq, oldLeaderDataNodeLocation);
if (result.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.info(
- "[IoTConsensusV2 Leader Changed] Successfully flush old leader {} for region {}",
+ ManagerMessages
+ .IOTCONSENSUSV2_LEADER_CHANGED_SUCCESSFULLY_FLUSH_OLD_LEADER_FOR_REGION,
oldLeaderId,
regionGroupIds);
} else {
LOGGER.info(
- "[IoTConsensusV2 Leader Changed] Failed to flush old leader {} for region {}",
+ ManagerMessages.IOTCONSENSUSV2_LEADER_CHANGED_FAILED_TO_FLUSH_OLD_LEADER_FOR_REGION,
oldLeaderId,
regionGroupIds);
}
@@ -435,13 +437,13 @@ private void broadcastLatestRegionPriorityMap() {
private void recordRegionPriorityMap(
Map> differentPriorityMap) {
- LOGGER.info("[RegionPriority] RegionPriorityMap: ");
+ LOGGER.info(ManagerMessages.REGIONPRIORITY_REGIONPRIORITYMAP);
for (Map.Entry>
regionPriorityEntry : differentPriorityMap.entrySet()) {
if (!Objects.equals(
regionPriorityEntry.getValue().getRight(), regionPriorityEntry.getValue().getLeft())) {
LOGGER.info(
- "[RegionPriority]\t {}: {}->{}",
+ ManagerMessages.REGIONPRIORITY,
regionPriorityEntry.getKey(),
regionPriorityEntry.getValue().getLeft() == null
? "null"
@@ -493,7 +495,8 @@ public void clearRegionPriority() {
public void waitForPriorityUpdate(List regionGroupIds) {
long startTime = System.currentTimeMillis();
LOGGER.info(
- "[RegionPriority] Wait for Region priority update of RegionGroups: {}", regionGroupIds);
+ ManagerMessages.REGIONPRIORITY_WAIT_FOR_REGION_PRIORITY_UPDATE_OF_REGIONGROUPS,
+ regionGroupIds);
while (System.currentTimeMillis() - startTime <= REGION_PRIORITY_WAITING_TIMEOUT) {
AtomicBoolean allRegionPriorityCalculated = new AtomicBoolean(true);
priorityMapLock.readLock().lock();
@@ -509,7 +512,7 @@ public void waitForPriorityUpdate(List regionGroupIds) {
}
if (allRegionPriorityCalculated.get()) {
LOGGER.info(
- "[RegionPriority] The routing priority of RegionGroups: {} is calculated.",
+ ManagerMessages.REGIONPRIORITY_THE_ROUTING_PRIORITY_OF_REGIONGROUPS_IS_CALCULATED,
regionGroupIds);
return;
}
@@ -517,13 +520,13 @@ public void waitForPriorityUpdate(List regionGroupIds) {
TimeUnit.MILLISECONDS.sleep(WAIT_PRIORITY_INTERVAL);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOGGER.warn("Interrupt when wait for calculating Region priority", e);
+ LOGGER.warn(ManagerMessages.INTERRUPT_WHEN_WAIT_FOR_CALCULATING_REGION_PRIORITY, e);
return;
}
}
LOGGER.warn(
- "[RegionPriority] The routing priority of RegionGroups: {} is not determined after 10 heartbeat interval. Some function might fail.",
+ ManagerMessages.REGIONPRIORITY_THE_ROUTING_PRIORITY_OF_REGIONGROUPS_IS_NOT_DETERMINED_AFTER,
regionGroupIds);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/partition/DataPartitionPolicyTable.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/partition/DataPartitionPolicyTable.java
index d8289c61aa24c..334f7983d216b 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/partition/DataPartitionPolicyTable.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/partition/DataPartitionPolicyTable.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.structure.BalanceTreeMap;
import org.apache.iotdb.confignode.conf.ConfigNodeConfig;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -75,7 +76,7 @@ public TConsensusGroupId getRegionGroupIdOrActivateIfNecessary(
seriesPartitionSlotCounter.put(
regionGroupId, seriesPartitionSlotCounter.get(regionGroupId) + 1);
LOGGER.info(
- "[ActivateDataAllotTable] Activate SeriesPartitionSlot {} "
+ ManagerMessages.ACTIVATEDATAALLOTTABLE_ACTIVATE_SERIESPARTITIONSLOT
+ "to RegionGroup {}, SeriesPartitionSlot Count: {}",
seriesPartitionSlot,
regionGroupId,
@@ -163,7 +164,7 @@ public void logDataAllotTable(String database) {
.forEach(
regionGroupId ->
LOGGER.info(
- "[ReBalanceDataAllotTable] Database: {}, "
+ ManagerMessages.REBALANCEDATAALLOTTABLE_DATABASE
+ "RegionGroupId: {}, SeriesPartitionSlot Count: {}",
database,
regionGroupId,
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocator.java
index 0b34300a12cb0..33859233d908f 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocator.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyRegionGroupAllocator.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration;
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import java.security.SecureRandom;
import java.util.ArrayList;
@@ -92,7 +93,7 @@ public Map removeNodeReplicaSelect(
Map remainReplicasMap) {
// TODO: Implement this method
throw new UnsupportedOperationException(
- "The removeNodeReplicaSelect method of GreedyRegionGroupAllocator is yet to be implemented.");
+ ManagerMessages.THE_REMOVENODEREPLICASELECT_METHOD_OF_GREEDYREGIONGROUPALLOCATOR_IS_YET);
}
private List buildWeightList(
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/PartiteGraphPlacementRegionGroupAllocator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/PartiteGraphPlacementRegionGroupAllocator.java
index 6a9975227836d..f7be655477697 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/PartiteGraphPlacementRegionGroupAllocator.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/PartiteGraphPlacementRegionGroupAllocator.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.load.balancer.region.GreedyRegionGroupAllocator.DataNodeEntry;
import org.apache.tsfile.utils.Pair;
@@ -124,7 +125,8 @@ public Map removeNodeReplicaSelect(
Map remainReplicasMap) {
// TODO: Implement this method
throw new UnsupportedOperationException(
- "The removeNodeReplicaSelect method of PartiteGraphPlacementRegionGroupAllocator is yet to be implemented.");
+ ManagerMessages
+ .THE_REMOVENODEREPLICASELECT_METHOD_OF_PARTITEGRAPHPLACEMENTREGIONGROUPALLOCATOR);
}
private void prepare(
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/AbstractLeaderBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/AbstractLeaderBalancer.java
index e8295f990c36c..b4086e2dbbdd3 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/AbstractLeaderBalancer.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/AbstractLeaderBalancer.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId;
import org.apache.iotdb.commons.cluster.NodeStatus;
import org.apache.iotdb.commons.cluster.RegionStatus;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.load.cache.node.NodeStatistics;
import org.apache.iotdb.confignode.manager.load.cache.region.RegionStatistics;
@@ -92,7 +93,7 @@ protected void initialize(
.collect(Collectors.toSet());
if (!differenceSet.isEmpty()) {
LOGGER.warn(
- "[LeaderBalancer] The following RegionGroups' leader cannot be selected because their corresponding caches are incomplete: {}",
+ ManagerMessages.LEADERBALANCER_THE_FOLLOWING_REGIONGROUPS_LEADER_CANNOT_BE,
differenceSet);
Set databaseRegionGroupUnionSet =
databaseRegionGroupMap.values().stream()
@@ -101,17 +102,19 @@ protected void initialize(
differenceSet.forEach(
regionId -> {
if (!databaseRegionGroupUnionSet.contains(regionId)) {
- LOGGER.warn("[LeaderBalancer] Region: {} not in databaseRegionGroupMap", regionId);
+ LOGGER.warn(
+ ManagerMessages.LEADERBALANCER_REGION_NOT_IN_DATABASEREGIONGROUPMAP, regionId);
}
if (!regionLocationMap.containsKey(regionId)) {
- LOGGER.warn("[LeaderBalancer] Region: {} not in regionLocationMap", regionId);
+ LOGGER.warn(ManagerMessages.LEADERBALANCER_REGION_NOT_IN_REGIONLOCATIONMAP, regionId);
}
if (!regionLeaderMap.containsKey(regionId)) {
- LOGGER.warn("[LeaderBalancer] Region: {} not in regionLeaderMap", regionId);
+ LOGGER.warn(ManagerMessages.LEADERBALANCER_REGION_NOT_IN_REGIONLEADERMAP, regionId);
}
if (!regionStatisticsMap.containsKey(regionId)) {
- LOGGER.warn("[LeaderBalancer] Region: {} not in regionStatisticsMap", regionId);
+ LOGGER.warn(
+ ManagerMessages.LEADERBALANCER_REGION_NOT_IN_REGIONSTATISTICSMAP, regionId);
}
});
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java
index 07105d96bbd1d..bdaace16d4054 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java
@@ -32,6 +32,7 @@
import org.apache.iotdb.commons.cluster.RegionStatus;
import org.apache.iotdb.confignode.conf.ConfigNodeConfig;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.IManager;
import org.apache.iotdb.confignode.manager.ProcedureManager;
import org.apache.iotdb.confignode.manager.load.cache.consensus.ConsensusGroupCache;
@@ -753,7 +754,8 @@ public Map getRegionLeaderMap(
*/
public void waitForLeaderElection(List regionGroupIds) {
long startTime = System.currentTimeMillis();
- LOGGER.info("[RegionElection] Wait for leader election of RegionGroups: {}", regionGroupIds);
+ LOGGER.info(
+ ManagerMessages.REGIONELECTION_WAIT_FOR_LEADER_ELECTION_OF_REGIONGROUPS, regionGroupIds);
while (System.currentTimeMillis() - startTime <= LEADER_ELECTION_WAITING_TIMEOUT) {
AtomicBoolean allRegionLeaderElected = new AtomicBoolean(true);
regionGroupIds.forEach(
@@ -764,26 +766,27 @@ public void waitForLeaderElection(List regionGroupIds) {
}
});
if (allRegionLeaderElected.get()) {
- LOGGER.info("[RegionElection] The leader of RegionGroups: {} is elected.", regionGroupIds);
+ LOGGER.info(
+ ManagerMessages.REGIONELECTION_THE_LEADER_OF_REGIONGROUPS_IS_ELECTED, regionGroupIds);
return;
}
try {
TimeUnit.MILLISECONDS.sleep(WAIT_LEADER_INTERVAL);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOGGER.warn("Interrupt when wait for leader election", e);
+ LOGGER.warn(ManagerMessages.INTERRUPT_WHEN_WAIT_FOR_LEADER_ELECTION, e);
return;
}
}
LOGGER.warn(
- "[RegionElection] The leader of RegionGroups: {} is not determined after 10 heartbeat interval. Some function might fail.",
+ ManagerMessages.REGIONELECTION_THE_LEADER_OF_REGIONGROUPS_IS_NOT_DETERMINED_AFTER_10,
regionGroupIds);
}
public void updateTopology(Map> latestTopology) {
if (!latestTopology.equals(topologyGraph)) {
- LOGGER.info("[Topology] Cluster topology changed, latest: {}", latestTopology);
+ LOGGER.info(ManagerMessages.TOPOLOGY_CLUSTER_TOPOLOGY_CHANGED_LATEST, latestTopology);
for (int fromId : latestTopology.keySet()) {
for (int toId : latestTopology.keySet()) {
boolean originReachable =
@@ -792,7 +795,7 @@ public void updateTopology(Map> latestTopology) {
latestTopology.getOrDefault(fromId, Collections.emptySet()).contains(toId);
if (originReachable != newReachable) {
LOGGER.info(
- "[Topology] Topology of DataNode {} is now {} to DataNode {}",
+ ManagerMessages.TOPOLOGY_TOPOLOGY_OF_DATANODE_IS_NOW_TO_DATANODE,
fromId,
newReachable ? "reachable" : "unreachable",
toId);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/detector/PhiAccrualDetector.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/detector/PhiAccrualDetector.java
index e06ee3e0adb10..ca422aed50a0b 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/detector/PhiAccrualDetector.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/detector/PhiAccrualDetector.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.confignode.manager.load.cache.detector;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample;
import org.apache.iotdb.confignode.manager.load.cache.IFailureDetector;
import org.apache.iotdb.confignode.manager.load.cache.node.NodeHeartbeatSample;
@@ -86,12 +87,14 @@ public boolean isAvailable(Object id, List history) {
if (Boolean.TRUE.equals(previousAvailability) && !isAvailable) {
final StringBuilder builder = buildRecentHeartbeatHistory(phiAccrual);
LOGGER.info(
- "[PhiAccrualDetector] Topology {} is broken, heartbeat history (ms): {}", id, builder);
+ ManagerMessages.PHIACCRUALDETECTOR_TOPOLOGY_IS_BROKEN_HEARTBEAT_HISTORY_MS, id, builder);
}
if (Boolean.FALSE.equals(previousAvailability) && isAvailable) {
final StringBuilder builder = buildRecentHeartbeatHistory(phiAccrual);
LOGGER.info(
- "[PhiAccrualDetector] Topology {} is recovered, heartbeat history (ms): {}", id, builder);
+ ManagerMessages.PHIACCRUALDETECTOR_TOPOLOGY_IS_RECOVERED_HEARTBEAT_HISTORY_MS,
+ id,
+ builder);
}
return isAvailable;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/DataNodeHeartbeatCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/DataNodeHeartbeatCache.java
index 2e3e8906e74d0..e3d6d15c8356b 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/DataNodeHeartbeatCache.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/node/DataNodeHeartbeatCache.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TLoadSample;
import org.apache.iotdb.commons.cluster.NodeStatus;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample;
import org.slf4j.Logger;
@@ -86,7 +87,7 @@ public synchronized void updateCurrentStatistics(boolean forceUpdate) {
if (forceUpdate) {
LOGGER.debug(
- "Force update NodeCache: status={}, currentNanoTime={}", status, currentNanoTime);
+ ManagerMessages.FORCE_UPDATE_NODECACHE_STATUS_CURRENTNANOTIME, status, currentNanoTime);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/EventService.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/EventService.java
index 209f39e7e56dd..762e6b1782b4c 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/EventService.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/EventService.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil;
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.load.cache.LoadCache;
import org.apache.iotdb.confignode.manager.load.cache.consensus.ConsensusGroupStatistics;
import org.apache.iotdb.confignode.manager.load.cache.node.NodeStatistics;
@@ -102,7 +103,7 @@ public void startEventService() {
0,
HEARTBEAT_INTERVAL,
TimeUnit.MILLISECONDS);
- LOGGER.info("Event service is started successfully.");
+ LOGGER.info(ManagerMessages.EVENT_SERVICE_IS_STARTED_SUCCESSFULLY);
}
}
}
@@ -113,7 +114,7 @@ public void stopEventService() {
if (currentEventServiceFuture != null) {
currentEventServiceFuture.cancel(false);
currentEventServiceFuture = null;
- LOGGER.info("Event service is stopped successfully.");
+ LOGGER.info(ManagerMessages.EVENT_SERVICE_IS_STOPPED_SUCCESSFULLY);
}
synchronized (this) {
previousNodeStatisticsMap.clear();
@@ -158,11 +159,11 @@ public synchronized void checkAndBroadcastNodeStatisticsChangeEventIfNecessary()
private void recordNodeStatistics(
Map> differentNodeStatisticsMap) {
- LOGGER.info("[NodeStatistics] NodeStatisticsMap: ");
+ LOGGER.info(ManagerMessages.NODESTATISTICS_NODESTATISTICSMAP);
for (Map.Entry> nodeCacheEntry :
differentNodeStatisticsMap.entrySet()) {
LOGGER.info(
- "[NodeStatistics]\t {}: {} -> {}",
+ ManagerMessages.NODESTATISTICS,
"nodeId{" + nodeCacheEntry.getKey() + "}",
nodeCacheEntry.getValue().getLeft(),
nodeCacheEntry.getValue().getRight());
@@ -205,13 +206,13 @@ public synchronized void checkAndBroadcastRegionGroupStatisticsChangeEventIfNece
private void recordRegionGroupStatistics(
Map>
differentRegionGroupStatisticsMap) {
- LOGGER.info("[RegionGroupStatistics] RegionGroupStatisticsMap: ");
+ LOGGER.info(ManagerMessages.REGIONGROUPSTATISTICS_REGIONGROUPSTATISTICSMAP);
for (Map.Entry>
regionGroupStatisticsEntry : differentRegionGroupStatisticsMap.entrySet()) {
RegionGroupStatistics previousStatistics = regionGroupStatisticsEntry.getValue().getLeft();
RegionGroupStatistics currentStatistics = regionGroupStatisticsEntry.getValue().getRight();
LOGGER.info(
- "[RegionGroupStatistics]\t RegionGroup {}: {} -> {}",
+ ManagerMessages.REGIONGROUPSTATISTICS_REGIONGROUP,
regionGroupStatisticsEntry.getKey(),
previousStatistics == null ? null : previousStatistics.getRegionGroupStatus(),
currentStatistics == null ? null : currentStatistics.getRegionGroupStatus());
@@ -223,13 +224,13 @@ private void recordRegionGroupStatistics(
for (Integer leftId : leftIds) {
if (rightIds.contains(leftId)) {
LOGGER.info(
- "[RegionGroupStatistics]\t Region in DataNode {}: {} -> {}",
+ ManagerMessages.REGIONGROUPSTATISTICS_REGION_IN_DATANODE,
leftId,
previousStatistics.getRegionStatus(leftId),
currentStatistics.getRegionStatus(leftId));
} else {
LOGGER.info(
- "[RegionGroupStatistics]\t Region in DataNode {}: {} -> null",
+ ManagerMessages.REGIONGROUPSTATISTICS_REGION_IN_DATANODE_NULL_2,
leftId,
previousStatistics.getRegionStatus(leftId));
}
@@ -237,7 +238,7 @@ private void recordRegionGroupStatistics(
for (Integer rightId : rightIds) {
if (!leftIds.contains(rightId)) {
LOGGER.info(
- "[RegionGroupStatistics]\t Region in DataNode {}: null -> {}",
+ ManagerMessages.REGIONGROUPSTATISTICS_REGION_IN_DATANODE_NULL,
rightId,
currentStatistics.getRegionStatus(rightId));
}
@@ -284,14 +285,14 @@ public synchronized void checkAndBroadcastConsensusGroupStatisticsChangeEventIfN
private void recordConsensusGroupStatistics(
Map>
differentConsensusGroupStatisticsMap) {
- LOGGER.info("[ConsensusGroupStatistics] ConsensusGroupStatisticsMap: ");
+ LOGGER.info(ManagerMessages.CONSENSUSGROUPSTATISTICS_CONSENSUSGROUPSTATISTICSMAP);
for (Map.Entry>
consensusGroupStatisticsEntry : differentConsensusGroupStatisticsMap.entrySet()) {
if (!Objects.equals(
consensusGroupStatisticsEntry.getValue().getRight(),
consensusGroupStatisticsEntry.getValue().getLeft())) {
LOGGER.info(
- "[ConsensusGroupStatistics]\t {}: {} -> {}",
+ ManagerMessages.CONSENSUSGROUPSTATISTICS,
consensusGroupStatisticsEntry.getKey(),
consensusGroupStatisticsEntry.getValue().getLeft(),
consensusGroupStatisticsEntry.getValue().getRight());
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/HeartbeatService.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/HeartbeatService.java
index 64322da5bbb20..5560554497292 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/HeartbeatService.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/HeartbeatService.java
@@ -35,6 +35,7 @@
import org.apache.iotdb.confignode.client.async.handlers.heartbeat.ConfigNodeHeartbeatHandler;
import org.apache.iotdb.confignode.client.async.handlers.heartbeat.DataNodeHeartbeatHandler;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.IManager;
import org.apache.iotdb.confignode.manager.consensus.ConsensusManager;
import org.apache.iotdb.confignode.manager.load.cache.LoadCache;
@@ -103,7 +104,7 @@ public void startHeartbeatService() {
0,
HEARTBEAT_INTERVAL,
TimeUnit.MILLISECONDS);
- LOGGER.info("Heartbeat service is started successfully.");
+ LOGGER.info(ManagerMessages.HEARTBEAT_SERVICE_IS_STARTED_SUCCESSFULLY);
}
}
}
@@ -114,7 +115,7 @@ public void stopHeartbeatService() {
if (currentHeartbeatFuture != null) {
currentHeartbeatFuture.cancel(false);
currentHeartbeatFuture = null;
- LOGGER.info("Heartbeat service is stopped successfully.");
+ LOGGER.info(ManagerMessages.HEARTBEAT_SERVICE_IS_STOPPED_SUCCESSFULLY);
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/StatisticsService.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/StatisticsService.java
index 5db975d14af90..9fb235eed815e 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/StatisticsService.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/StatisticsService.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.concurrent.ThreadName;
import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.load.cache.LoadCache;
import org.slf4j.Logger;
@@ -65,7 +66,7 @@ public void startLoadStatisticsService() {
0,
STATISTICS_UPDATE_INTERVAL,
TimeUnit.MILLISECONDS);
- LOGGER.info("LoadStatistics service is started successfully.");
+ LOGGER.info(ManagerMessages.LOADSTATISTICS_SERVICE_IS_STARTED_SUCCESSFULLY);
}
}
}
@@ -76,7 +77,7 @@ public void stopLoadStatisticsService() {
if (currentLoadStatisticsFuture != null) {
currentLoadStatisticsFuture.cancel(false);
currentLoadStatisticsFuture = null;
- LOGGER.info("LoadStatistics service is stopped successfully.");
+ LOGGER.info(ManagerMessages.LOADSTATISTICS_SERVICE_IS_STOPPED_SUCCESSFULLY);
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/TopologyService.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/TopologyService.java
index 9e4f6bd6121ab..b0c5f9730dc13 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/TopologyService.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/service/TopologyService.java
@@ -32,6 +32,7 @@
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.conf.ConfigNodeConfig;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.IManager;
import org.apache.iotdb.confignode.manager.load.cache.AbstractHeartbeatSample;
import org.apache.iotdb.confignode.manager.load.cache.IFailureDetector;
@@ -121,7 +122,7 @@ public synchronized void startTopologyService() {
future = this.topologyThread.submit(this);
}
shouldRun.set(true);
- LOGGER.info("Topology Probing has started successfully");
+ LOGGER.info(ManagerMessages.TOPOLOGY_PROBING_HAS_STARTED_SUCCESSFULLY);
}
public synchronized void stopTopologyService() {
@@ -129,7 +130,7 @@ public synchronized void stopTopologyService() {
future.cancel(true);
future = null;
heartbeats.clear();
- LOGGER.info("Topology Probing has stopped successfully");
+ LOGGER.info(ManagerMessages.TOPOLOGY_PROBING_HAS_STOPPED_SUCCESSFULLY);
}
/**
@@ -226,7 +227,7 @@ private synchronized void topologyProbing() {
if (!entry.getValue().isEmpty()
&& !failureDetector.isAvailable(entry.getKey(), entry.getValue())) {
- LOGGER.debug("Connection from DataNode {} to DataNode {} is broken", fromId, toId);
+ LOGGER.debug(ManagerMessages.CONNECTION_FROM_DATANODE_TO_DATANODE_IS_BROKEN, fromId, toId);
} else {
Optional.ofNullable(latestTopology.get(fromId)).ifPresent(s -> s.add(toId));
}
@@ -266,7 +267,7 @@ private void logAsymmetricPartition(final Map> topology) {
continue;
}
if (!reachableTo.contains(from) && !reachableFrom.contains(to)) {
- LOGGER.debug("[Topology] Asymmetric network partition from {} to {}", from, to);
+ LOGGER.debug(ManagerMessages.TOPOLOGY_ASYMMETRIC_NETWORK_PARTITION_FROM_TO, from, to);
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java
index 2e50c6b787b20..f2fdd2b611b36 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/node/NodeManager.java
@@ -63,6 +63,8 @@
import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeConfigurationResp;
import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeRegisterResp;
import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeToStatusResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ClusterManager;
import org.apache.iotdb.confignode.manager.IManager;
import org.apache.iotdb.confignode.manager.PermissionManager;
@@ -414,7 +416,7 @@ public TDataNodeRestartResp updateDataNodeIfNecessary(TDataNodeRestartReq req) {
*/
public DataSet removeDataNode(RemoveDataNodePlan removeDataNodePlan) {
configManager.getProcedureManager().getEnv().getSubmitRegionMigrateLock().lock();
- LOGGER.info("NodeManager start to remove DataNode {}", removeDataNodePlan);
+ LOGGER.info(ManagerMessages.NODEMANAGER_START_TO_REMOVE_DATANODE, removeDataNodePlan);
try {
// Checks if the RemoveDataNode request is valid
RemoveDataNodeHandler removeDataNodeHandler =
@@ -423,7 +425,7 @@ public DataSet removeDataNode(RemoveDataNodePlan removeDataNodePlan) {
removeDataNodeHandler.checkRemoveDataNodeRequest(removeDataNodePlan);
if (preCheckStatus.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.error(
- "The remove DataNode request check failed. req: {}, check result: {}",
+ ManagerMessages.THE_REMOVE_DATANODE_REQUEST_CHECK_FAILED_REQ_CHECK_RESULT,
removeDataNodePlan,
preCheckStatus.getStatus());
return preCheckStatus;
@@ -453,7 +455,7 @@ public DataSet removeDataNode(RemoveDataNodePlan removeDataNodePlan) {
dataSet.setStatus(status);
LOGGER.info(
- "NodeManager submit RemoveDataNodePlan finished, removeDataNodePlan: {}",
+ ManagerMessages.NODEMANAGER_SUBMIT_REMOVEDATANODEPLAN_FINISHED_REMOVEDATANODEPLAN,
removeDataNodePlan);
return dataSet;
} finally {
@@ -576,7 +578,7 @@ public TSStatus removeAINode() {
status.setMessage("Server rejected the request, maybe requests are too many");
}
- LOGGER.info("NodeManager submit RemoveAINodePlan finished, {}", removeAINodePlan);
+ LOGGER.info(ManagerMessages.NODEMANAGER_SUBMIT_REMOVEAINODEPLAN_FINISHED, removeAINodePlan);
return status;
}
@@ -614,7 +616,7 @@ public AINodeConfigurationResp getAINodeConfiguration(GetAINodeConfigurationPlan
try {
return (AINodeConfigurationResp) getConsensusManager().read(req);
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
AINodeConfigurationResp response = new AINodeConfigurationResp();
@@ -634,7 +636,7 @@ public DataNodeConfigurationResp getDataNodeConfiguration(GetDataNodeConfigurati
try {
return (DataNodeConfigurationResp) getConsensusManager().read(req);
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
DataNodeConfigurationResp response = new DataNodeConfigurationResp();
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java
index da7a19cfdb5aa..5be81256b5c39 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java
@@ -71,6 +71,7 @@
import org.apache.iotdb.confignode.exception.DatabaseNotExistsException;
import org.apache.iotdb.confignode.exception.NoAvailableRegionGroupException;
import org.apache.iotdb.confignode.exception.NotEnoughDataNodeException;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.IManager;
import org.apache.iotdb.confignode.manager.ProcedureManager;
import org.apache.iotdb.confignode.manager.TTLManager;
@@ -306,7 +307,7 @@ public SchemaPartitionResp getOrCreateSchemaPartition(final GetOrCreateSchemaPar
return resp;
}
- LOGGER.error("Create SchemaPartition failed because: ", e);
+ LOGGER.error(ManagerMessages.CREATE_SCHEMAPARTITION_FAILED_BECAUSE, e);
resp.setStatus(
new TSStatus(TSStatusCode.NO_AVAILABLE_REGION_GROUP.getStatusCode())
.setMessage(e.getMessage()));
@@ -451,7 +452,7 @@ public DataPartitionResp getOrCreateDataPartition(final GetOrCreateDataPartition
return resp;
}
- LOGGER.error("Create DataPartition failed because: ", e);
+ LOGGER.error(ManagerMessages.CREATE_DATAPARTITION_FAILED_BECAUSE, e);
if (e instanceof DatabaseNotExistsException) {
resp.setStatus(
new TSStatus(TSStatusCode.DATABASE_NOT_EXIST.getStatusCode())
@@ -552,7 +553,8 @@ private TSStatus consensusWritePartitionResult(ConfigPhysicalPlan plan) {
return getConsensusManager().write(plan);
} catch (ConsensusException e) {
// The allocation might fail due to consensus error
- LOGGER.error("Write partition allocation result failed because: {}", e.getMessage());
+ LOGGER.error(
+ ManagerMessages.WRITE_PARTITION_ALLOCATION_RESULT_FAILED_BECAUSE, e.getMessage());
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return res;
@@ -600,11 +602,11 @@ private TSStatus extendRegionGroupIfNecessary(
}
}
} catch (NotEnoughDataNodeException e) {
- LOGGER.error("Extend region group failed", e);
+ LOGGER.error(ManagerMessages.EXTEND_REGION_GROUP_FAILED, e);
result.setCode(TSStatusCode.NO_ENOUGH_DATANODE.getStatusCode());
result.setMessage(e.getMessage());
} catch (DatabaseNotExistsException e) {
- LOGGER.error("Extend region group failed", e);
+ LOGGER.error(ManagerMessages.EXTEND_REGION_GROUP_FAILED, e);
result.setCode(TSStatusCode.DATABASE_NOT_EXIST.getStatusCode());
result.setMessage(e.getMessage());
}
@@ -711,7 +713,7 @@ private TSStatus generateAndAllocateRegionGroups(
if (!allotmentMap.isEmpty()) {
final CreateRegionGroupsPlan createRegionGroupsPlan =
getLoadManager().allocateRegionGroups(allotmentMap, consensusGroupType);
- LOGGER.info("[CreateRegionGroups] Starting to create the following RegionGroups:");
+ LOGGER.info(ManagerMessages.CREATEREGIONGROUPS_STARTING_TO_CREATE_THE_FOLLOWING_REGIONGROUPS);
createRegionGroupsPlan.planLog(LOGGER);
return getProcedureManager().createRegionGroups(consensusGroupType, createRegionGroupsPlan);
} else {
@@ -1379,7 +1381,7 @@ public void maintainRegionReplicas() {
RegionCreateTask schemaRegionCreateTask =
(RegionCreateTask) regionMaintainTask;
LOGGER.info(
- "Start to create Region: {} on DataNode: {}",
+ ManagerMessages.START_TO_CREATE_REGION_ON_DATANODE,
schemaRegionCreateTask.getRegionReplicaSet().getRegionId(),
schemaRegionCreateTask.getTargetDataNode());
createSchemaRegionHandler.putRequest(
@@ -1415,7 +1417,7 @@ public void maintainRegionReplicas() {
RegionCreateTask dataRegionCreateTask =
(RegionCreateTask) regionMaintainTask;
LOGGER.info(
- "Start to create Region: {} on DataNode: {}",
+ ManagerMessages.START_TO_CREATE_REGION_ON_DATANODE,
dataRegionCreateTask.getRegionReplicaSet().getRegionId(),
dataRegionCreateTask.getTargetDataNode());
createDataRegionHandler.putRequest(
@@ -1451,7 +1453,7 @@ public void maintainRegionReplicas() {
for (RegionMaintainTask regionMaintainTask : selectedRegionMaintainTask) {
RegionDeleteTask regionDeleteTask = (RegionDeleteTask) regionMaintainTask;
LOGGER.info(
- "Start to delete Region: {} on DataNode: {}",
+ ManagerMessages.START_TO_DELETE_REGION_ON_DATANODE,
regionDeleteTask.getRegionId(),
regionDeleteTask.getTargetDataNode());
deleteRegionHandler.putRequest(
@@ -1468,7 +1470,8 @@ public void maintainRegionReplicas() {
.sendAsyncRequestWithRetry(deleteRegionHandler);
LOGGER.info(
- "Deleting regions costs {}ms", (System.currentTimeMillis() - startTime));
+ ManagerMessages.DELETING_REGIONS_COSTS_MS,
+ (System.currentTimeMillis() - startTime));
for (Map.Entry entry :
deleteRegionHandler.getResponseMap().entrySet()) {
@@ -1530,7 +1533,7 @@ public void startRegionCleaner() {
0,
REGION_MAINTAINER_WORK_INTERVAL,
TimeUnit.SECONDS);
- LOGGER.info("RegionCleaner is started successfully.");
+ LOGGER.info(ManagerMessages.REGIONCLEANER_IS_STARTED_SUCCESSFULLY);
}
}
}
@@ -1541,7 +1544,7 @@ public void stopRegionCleaner() {
/* Stop the RegionCleaner service */
currentRegionMaintainerFuture.cancel(false);
currentRegionMaintainerFuture = null;
- LOGGER.info("RegionCleaner is stopped successfully.");
+ LOGGER.info(ManagerMessages.REGIONCLEANER_IS_STOPPED_SUCCESSFULLY);
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionMetrics.java
index b6b709cd8010d..3a6939d1dcce6 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionMetrics.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionMetrics.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.service.metric.enums.Tag;
import org.apache.iotdb.commons.utils.NodeUrlUtils;
import org.apache.iotdb.confignode.exception.DatabaseNotExistsException;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.IManager;
import org.apache.iotdb.confignode.manager.load.LoadManager;
import org.apache.iotdb.confignode.manager.node.NodeManager;
@@ -346,7 +347,8 @@ private static void bindDatabasePartitionMetricsWhenUpdate(
try {
return manager.getRegionGroupCount(database, TConsensusGroupType.SchemaRegion);
} catch (DatabaseNotExistsException e) {
- LOGGER.info("Error when counting SchemaRegionGroups in Database: {}", database, e);
+ LOGGER.info(
+ ManagerMessages.ERROR_WHEN_COUNTING_SCHEMAREGIONGROUPS_IN_DATABASE, database, e);
return 0;
}
},
@@ -362,7 +364,8 @@ private static void bindDatabasePartitionMetricsWhenUpdate(
try {
return manager.getRegionGroupCount(database, TConsensusGroupType.DataRegion);
} catch (DatabaseNotExistsException e) {
- LOGGER.info("Error when counting DataRegionGroups in Database: {}", database, e);
+ LOGGER.info(
+ ManagerMessages.ERROR_WHEN_COUNTING_DATAREGIONGROUPS_IN_DATABASE, database, e);
return 0;
}
},
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupExtensionPolicy.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupExtensionPolicy.java
index ac461d76e9a34..5f69a5d3525a5 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupExtensionPolicy.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupExtensionPolicy.java
@@ -18,6 +18,8 @@
*/
package org.apache.iotdb.confignode.manager.partition;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
+
import java.io.IOException;
public enum RegionGroupExtensionPolicy {
@@ -42,6 +44,6 @@ public static RegionGroupExtensionPolicy parse(String policy) throws IOException
}
}
throw new IOException(
- String.format("DataRegionGroupExtensionPolicy %s doesn't exist.", policy));
+ String.format(ManagerMessages.DATAREGIONGROUPEXTENSIONPOLICY_DOESN_T_EXIST, policy));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupStatus.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupStatus.java
index a2c4bea6a736f..f12bd644c1e13 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupStatus.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/RegionGroupStatus.java
@@ -18,6 +18,8 @@
*/
package org.apache.iotdb.confignode.manager.partition;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
+
public enum RegionGroupStatus {
/** All Regions in RegionGroup are in the Running status */
@@ -56,7 +58,8 @@ public static RegionGroupStatus parse(String status) {
return regionGroupStatus;
}
}
- throw new RuntimeException(String.format("RegionGroupStatus %s doesn't exist.", status));
+ throw new RuntimeException(
+ String.format(ManagerMessages.REGIONGROUPSTATUS_DOESN_T_EXIST, status));
}
/**
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigNodeRuntimeAgent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigNodeRuntimeAgent.java
index e8cc16a1edf7d..ca98f4ec9e892 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigNodeRuntimeAgent.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/runtime/PipeConfigNodeRuntimeAgent.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.service.IService;
import org.apache.iotdb.commons.service.ServiceType;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent;
import org.apache.iotdb.confignode.manager.pipe.resource.PipeConfigNodeCopiedFileDirStartupCleaner;
import org.apache.iotdb.confignode.manager.pipe.source.ConfigRegionListeningQueue;
@@ -73,7 +74,7 @@ public synchronized void start() {
}
isShutdown.set(false);
- LOGGER.info("PipeRuntimeConfigNodeAgent started");
+ LOGGER.info(ManagerMessages.PIPERUNTIMECONFIGNODEAGENT_STARTED);
}
@Override
@@ -88,7 +89,7 @@ public synchronized void stop() {
PipeConfigNodeAgent.task().dropAllPipeTasks();
- LOGGER.info("PipeRuntimeConfigNodeAgent stopped");
+ LOGGER.info(ManagerMessages.PIPERUNTIMECONFIGNODEAGENT_STOPPED);
}
public boolean isShutdown() {
@@ -141,14 +142,16 @@ public void report(final EnrichedEvent event, final PipeRuntimeException pipeRun
if (event.getPipeTaskMeta() != null) {
report(event.getPipeTaskMeta(), pipeRuntimeException);
} else {
- LOGGER.warn("Attempt to report pipe exception to a null PipeTaskMeta.", pipeRuntimeException);
+ LOGGER.warn(
+ ManagerMessages.ATTEMPT_TO_REPORT_PIPE_EXCEPTION_TO_A_NULL_PIPETASKMETA,
+ pipeRuntimeException);
}
}
private void report(
final PipeTaskMeta pipeTaskMeta, final PipeRuntimeException pipeRuntimeException) {
LOGGER.warn(
- "Report PipeRuntimeException to local PipeTaskMeta({}), exception message: {}",
+ ManagerMessages.REPORT_PIPERUNTIMEEXCEPTION_TO_LOCAL_PIPETASKMETA_EXCEPTION_MESSAGE,
pipeTaskMeta,
pipeRuntimeException.getMessage(),
pipeRuntimeException);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtask.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtask.java
index a79da86968fd6..fd1a849063062 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtask.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtask.java
@@ -32,6 +32,7 @@
import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskSourceRuntimeEnvironment;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.event.ProgressReportEvent;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent;
import org.apache.iotdb.confignode.manager.pipe.metric.sink.PipeConfigRegionSinkMetrics;
import org.apache.iotdb.confignode.manager.pipe.source.IoTDBConfigRegionSource;
@@ -105,7 +106,7 @@ private void initSource(final Map sourceAttributes) throws Excep
source.close();
} catch (Exception closeException) {
LOGGER.warn(
- "Failed to close extractor after failed to initialize extractor. "
+ ManagerMessages.FAILED_TO_CLOSE_EXTRACTOR_AFTER_FAILED_TO_INITIALIZE_EXTRACTOR
+ "Ignore this exception.",
closeException);
}
@@ -154,7 +155,7 @@ private void initSink(final Map sinkAttributes) throws Exception
outputPipeSink.close();
} catch (final Exception closeException) {
LOGGER.warn(
- "Failed to close sink after failed to initialize it. Ignore this exception.",
+ ManagerMessages.FAILED_TO_CLOSE_SINK_AFTER_FAILED_TO_INITIALIZE_IT_IGNORE,
closeException);
}
throw e;
@@ -207,19 +208,19 @@ public void close() {
try {
source.close();
} catch (final Exception e) {
- LOGGER.info("Error occurred during closing PipeExtractor.", e);
+ LOGGER.info(ManagerMessages.ERROR_OCCURRED_DURING_CLOSING_PIPEEXTRACTOR, e);
}
try {
processor.close();
} catch (final Exception e) {
- LOGGER.info("Error occurred during closing PipeProcessor.", e);
+ LOGGER.info(ManagerMessages.ERROR_OCCURRED_DURING_CLOSING_PIPEPROCESSOR, e);
}
try {
outputPipeSink.close();
} catch (final Exception e) {
- LOGGER.info("Error occurred during closing PipeConnector.", e);
+ LOGGER.info(ManagerMessages.ERROR_OCCURRED_DURING_CLOSING_PIPECONNECTOR, e);
} finally {
// Should be after connector.close()
super.close();
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java
index 4f90d147e264b..b50b877b5bf62 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent;
import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeConfigNodeRemainingTimeMetrics;
import org.apache.iotdb.confignode.manager.pipe.metric.source.PipeConfigRegionSourceMetrics;
@@ -151,14 +152,14 @@ protected List handlePipeMetaChangesInternal(
try {
return pipeMeta.deepCopy4TaskAgent();
} catch (Exception e) {
- throw new PipeException("failed to deep copy pipeMeta", e);
+ throw new PipeException(ManagerMessages.FAILED_TO_DEEP_COPY_PIPEMETA, e);
}
})
.collect(Collectors.toList()));
clearConfigRegionListeningQueueIfNecessary(pipeMetaListFromCoordinator);
return exceptionMessages;
} catch (final Exception e) {
- throw new PipeException("failed to handle pipe meta changes", e);
+ throw new PipeException(ManagerMessages.FAILED_TO_HANDLE_PIPE_META_CHANGES, e);
}
}
@@ -209,7 +210,8 @@ protected void collectPipeMetaListInternal(
PipeConfig.getInstance().getPipeMetaReportMaxLogNumPerRound(),
PipeConfig.getInstance().getPipeMetaReportMaxLogIntervalRounds(),
pipeMetaKeeper.getPipeMetaCount());
- LOGGER.debug("Received pipe heartbeat request {} from config coordinator.", req.heartbeatId);
+ LOGGER.debug(
+ ManagerMessages.RECEIVED_PIPE_HEARTBEAT_REQUEST_FROM_CONFIG_COORDINATOR, req.heartbeatId);
final List pipeMetaBinaryList = new ArrayList<>();
final List pipeRemainingEventCountList = new ArrayList<>();
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskStage.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskStage.java
index b5fba6c004b49..bbd03236e2447 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskStage.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskStage.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta;
import org.apache.iotdb.commons.pipe.agent.task.stage.PipeTaskStage;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.pipe.api.exception.PipeException;
import java.util.Map;
@@ -49,7 +50,9 @@ public PipeConfigNodeTaskStage(
} catch (final Exception e) {
throw new PipeException(
String.format(
- "Failed to create subtask for pipe %s, creation time %d", pipeName, creationTime),
+ ManagerMessages.FAILED_TO_CREATE_SUBTASK_FOR_PIPE_CREATION_TIME,
+ pipeName,
+ creationTime),
e);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/plugin/PipePluginCoordinator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/plugin/PipePluginCoordinator.java
index 8a53fd84b903c..db72a7d5799a9 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/plugin/PipePluginCoordinator.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/plugin/PipePluginCoordinator.java
@@ -25,6 +25,8 @@
import org.apache.iotdb.confignode.consensus.request.read.pipe.plugin.GetPipePluginTablePlan;
import org.apache.iotdb.confignode.consensus.response.JarResp;
import org.apache.iotdb.confignode.consensus.response.pipe.plugin.PipePluginTableResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.persistence.pipe.PipePluginInfo;
import org.apache.iotdb.confignode.rpc.thrift.TCreatePipePluginReq;
@@ -105,7 +107,7 @@ public TGetPipePluginTableResp getPipePluginTable() {
configManager.getConsensusManager().read(new GetPipePluginTablePlan()))
.convertToThriftResponse();
} catch (IOException | ConsensusException e) {
- LOGGER.error("Fail to get PipePluginTable", e);
+ LOGGER.error(ManagerMessages.FAIL_TO_GET_PIPEPLUGINTABLE, e);
return new TGetPipePluginTableResp(
new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())
.setMessage(e.getMessage()),
@@ -120,7 +122,7 @@ public TGetPipePluginTableResp getPipePluginTableExtended(TShowPipePluginReq req
.filter(req.isTableModel)
.convertToThriftResponse();
} catch (IOException | ConsensusException e) {
- LOGGER.error("Fail to get PipePluginTable", e);
+ LOGGER.error(ManagerMessages.FAIL_TO_GET_PIPEPLUGINTABLE, e);
return new TGetPipePluginTableResp(
new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())
.setMessage(e.getMessage()),
@@ -136,7 +138,7 @@ public TGetJarInListResp getPipePluginJar(TGetJarInListReq req) {
.read(new GetPipePluginJarPlan(req.getJarNameList())))
.convertToThriftResponse();
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return new JarResp(res, Collections.emptyList()).convertToThriftResponse();
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeMetaSyncer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeMetaSyncer.java
index 959dc7f43636f..941d6d6d06cec 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeMetaSyncer.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/PipeMetaSyncer.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.concurrent.ThreadName;
import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.ProcedureManager;
import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo;
@@ -81,7 +82,7 @@ public synchronized void start() {
INITIAL_SYNC_DELAY_MINUTES,
SYNC_INTERVAL_MINUTES,
TimeUnit.MINUTES);
- LOGGER.info("PipeMetaSyncer is started successfully.");
+ LOGGER.info(ManagerMessages.PIPEMETASYNCER_IS_STARTED_SUCCESSFULLY);
}
}
@@ -95,7 +96,7 @@ private synchronized void sync() {
if (configManager.getPipeManager().getPipeTaskCoordinator().isLocked()) {
LOGGER.warn(
- "PipeTaskCoordinatorLock is held by another thread, skip this round of sync to avoid procedure and rpc accumulation as much as possible");
+ ManagerMessages.PIPETASKCOORDINATORLOCK_IS_HELD_BY_ANOTHER_THREAD_SKIP_THIS_ROUND_OF_2);
return;
}
@@ -108,7 +109,7 @@ private synchronized void sync() {
== PipeConfig.getInstance().getPipeMetaSyncerAutoRestartPipeCheckIntervalRound()) {
somePipesNeedRestarting = autoRestartWithLock();
if (somePipesNeedRestarting) {
- LOGGER.info("Some pipes need restarting, will restart them after this sync");
+ LOGGER.info(ManagerMessages.SOME_PIPES_NEED_RESTARTING_WILL_RESTART_THEM_AFTER_THIS_SYNC);
}
pipeAutoRestartRoundCounter.set(0);
}
@@ -130,17 +131,18 @@ private synchronized void sync() {
successfulSync = true;
} else {
LOGGER.warn(
- "Failed to handle pipe meta change. Result status: {}.", handleMetaChangeStatus);
+ ManagerMessages.FAILED_TO_HANDLE_PIPE_META_CHANGE_RESULT_STATUS,
+ handleMetaChangeStatus);
}
}
if (successfulSync) {
LOGGER.info(
- "After this successful sync, if PipeTaskInfo is empty during this sync and has not been modified afterwards, all subsequent syncs will be skipped");
+ ManagerMessages.AFTER_THIS_SUCCESSFUL_SYNC_IF_PIPETASKINFO_IS_EMPTY_DURING_THIS);
isLastPipeSyncSuccessful = true;
}
} else {
- LOGGER.warn("Failed to sync pipe meta. Result status: {}.", metaSyncStatus);
+ LOGGER.warn(ManagerMessages.FAILED_TO_SYNC_PIPE_META_RESULT_STATUS, metaSyncStatus);
}
}
@@ -148,7 +150,7 @@ public synchronized void stop() {
if (metaSyncFuture != null) {
metaSyncFuture.cancel(false);
metaSyncFuture = null;
- LOGGER.info("PipeMetaSyncer is stopped successfully.");
+ LOGGER.info(ManagerMessages.PIPEMETASYNCER_IS_STOPPED_SUCCESSFULLY);
}
}
@@ -156,7 +158,7 @@ private boolean autoRestartWithLock() {
final AtomicReference pipeTaskInfo =
configManager.getPipeManager().getPipeTaskCoordinator().tryLock();
if (pipeTaskInfo == null) {
- LOGGER.warn("Failed to acquire pipe lock for auto restart pipe task.");
+ LOGGER.warn(ManagerMessages.FAILED_TO_ACQUIRE_PIPE_LOCK_FOR_AUTO_RESTART_PIPE_TASK);
return false;
}
try {
@@ -174,7 +176,7 @@ private void checkAndRepairConsensusPipes() {
.getRegionMaintainHandler()
.checkAndRepairConsensusPipes();
} catch (Exception e) {
- LOGGER.warn("Failed to check and repair consensus pipes", e);
+ LOGGER.warn(ManagerMessages.FAILED_TO_CHECK_AND_REPAIR_CONSENSUS_PIPES, e);
}
}
@@ -182,7 +184,7 @@ private boolean handleSuccessfulRestartWithLock() {
final AtomicReference pipeTaskInfo =
configManager.getPipeManager().getPipeTaskCoordinator().tryLock();
if (pipeTaskInfo == null) {
- LOGGER.warn("Failed to acquire pipe lock for handling successful restart.");
+ LOGGER.warn(ManagerMessages.FAILED_TO_ACQUIRE_PIPE_LOCK_FOR_HANDLING_SUCCESSFUL_RESTART);
return false;
}
try {
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatParser.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatParser.java
index 6dc11ddd3f3bd..423353dd1fb9d 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatParser.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatParser.java
@@ -31,6 +31,7 @@
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTemporaryMetaInCoordinator;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.confignode.consensus.response.pipe.task.PipeTableResp;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.pipe.resource.PipeConfigNodeResourceManager;
import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo;
@@ -93,7 +94,8 @@ synchronized void parseHeartbeat(final int nodeId, final PipeHeartbeat pipeHeart
configManager.getPipeManager().getPipeTaskCoordinator().tryLock();
if (pipeTaskInfo == null) {
LOGGER.warn(
- "Failed to acquire lock when parseHeartbeat from node (id={}).", nodeId);
+ ManagerMessages.FAILED_TO_ACQUIRE_LOCK_WHEN_PARSEHEARTBEAT_FROM_NODE_ID,
+ nodeId);
return;
}
@@ -125,7 +127,8 @@ private int getExpectedHeartbeatNodeCount() {
configManager.getNodeManager().getRegisteredDataNodeCount()
+ (PipeConfig.getInstance().isSeperatedPipeHeartbeatEnabled() ? 1 : 0);
if (expectedNodeCount <= 0) {
- LOGGER.warn("Expected pipe heartbeat node count is {}, fallback to 1.", expectedNodeCount);
+ LOGGER.warn(
+ ManagerMessages.EXPECTED_PIPE_HEARTBEAT_NODE_COUNT_IS_FALLBACK_TO_1, expectedNodeCount);
return 1;
}
return expectedNodeCount;
@@ -140,7 +143,7 @@ private void parseHeartbeatAndSaveMetaChangeLocally(
final PipeMeta pipeMetaFromAgent = pipeHeartbeat.getPipeMeta(staticMeta);
if (pipeMetaFromAgent == null) {
LOGGER.info(
- "PipeRuntimeCoordinator meets error in updating pipeMetaKeeper, "
+ ManagerMessages.PIPERUNTIMECOORDINATOR_MEETS_ERROR_IN_UPDATING_PIPEMETAKEEPER
+ "pipeMetaFromAgent is null, pipeMetaFromCoordinator: {}",
pipeMetaFromCoordinator);
continue;
@@ -161,7 +164,7 @@ private void parseHeartbeatAndSaveMetaChangeLocally(
if (uncompletedDataNodeIds.isEmpty()) {
pipeTaskInfo.get().removePipeMeta(staticMeta.getPipeName());
LOGGER.info(
- "Detected completion of pipe {}, static meta: {}, remove it.",
+ ManagerMessages.DETECTED_COMPLETION_OF_PIPE_STATIC_META_REMOVE_IT,
staticMeta.getPipeName(),
staticMeta);
needWriteConsensusOnConfigNodes.set(true);
@@ -188,7 +191,8 @@ private void parseHeartbeatAndSaveMetaChangeLocally(
pipeTaskMetaMapFromAgent.get(runtimeMetaFromCoordinator.getKey());
if (runtimeMetaFromAgent == null) {
LOGGER.debug(
- "No corresponding Pipe is running in the reported DataRegion. runtimeMetaFromAgent is null, runtimeMetaFromCoordinator: {}",
+ ManagerMessages
+ .NO_CORRESPONDING_PIPE_IS_RUNNING_IN_THE_REPORTED_DATAREGION_RUNTIMEMETAFROMAGENT,
runtimeMetaFromCoordinator);
continue;
}
@@ -251,7 +255,7 @@ private void parseHeartbeatAndSaveMetaChangeLocally(
needPushPipeMetaToDataNodes.set(false);
LOGGER.warn(
- "Detect PipeRuntimeCriticalException {} from agent, stop pipe {}.",
+ ManagerMessages.DETECT_PIPERUNTIMECRITICALEXCEPTION_FROM_AGENT_STOP_PIPE,
exception,
pipeName);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatScheduler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatScheduler.java
index 8f864b4d5c20c..bbdaee8e12c3e 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatScheduler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/runtime/heartbeat/PipeHeartbeatScheduler.java
@@ -29,6 +29,7 @@
import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager;
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent;
import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatReq;
@@ -74,7 +75,7 @@ public synchronized void start() {
HEARTBEAT_INTERVAL_SECONDS,
HEARTBEAT_INTERVAL_SECONDS,
TimeUnit.SECONDS);
- LOGGER.info("PipeHeartbeat is started successfully.");
+ LOGGER.info(ManagerMessages.PIPEHEARTBEAT_IS_STARTED_SUCCESSFULLY);
}
}
@@ -85,7 +86,7 @@ private synchronized void heartbeat() {
if (configManager.getPipeManager().getPipeTaskCoordinator().isLocked()) {
LOGGER.warn(
- "PipeTaskCoordinatorLock is held by another thread, skip this round of heartbeat to avoid procedure and rpc accumulation as much as possible");
+ ManagerMessages.PIPETASKCOORDINATORLOCK_IS_HELD_BY_ANOTHER_THREAD_SKIP_THIS_ROUND_OF);
return;
}
@@ -93,7 +94,7 @@ private synchronized void heartbeat() {
final Map dataNodeLocationMap =
configManager.getNodeManager().getRegisteredDataNodeLocations();
final TPipeHeartbeatReq request = new TPipeHeartbeatReq(System.currentTimeMillis());
- LOGGER.debug("Collecting pipe heartbeat {} from data nodes", request.heartbeatId);
+ LOGGER.debug(ManagerMessages.COLLECTING_PIPE_HEARTBEAT_FROM_DATA_NODES, request.heartbeatId);
final DataNodeAsyncRequestContext clientHandler =
new DataNodeAsyncRequestContext<>(
@@ -129,7 +130,7 @@ private synchronized void heartbeat() {
configNodeResp.getPipeRemainingEventCountList(),
configNodeResp.getPipeRemainingTimeList()));
} catch (final Exception e) {
- LOGGER.warn("Failed to collect pipe meta list from config node task agent", e);
+ LOGGER.warn(ManagerMessages.FAILED_TO_COLLECT_PIPE_META_LIST_FROM_CONFIG_NODE_TASK, e);
}
}
@@ -137,7 +138,7 @@ public synchronized void stop() {
if (IS_SEPERATED_PIPE_HEARTBEAT_ENABLED && heartbeatFuture != null) {
heartbeatFuture.cancel(false);
heartbeatFuture = null;
- LOGGER.info("PipeHeartbeat is stopped successfully.");
+ LOGGER.info(ManagerMessages.PIPEHEARTBEAT_IS_STOPPED_SUCCESSFULLY);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/task/PipeTaskCoordinator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/task/PipeTaskCoordinator.java
index 4459ec6760280..1d4d10006b625 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/task/PipeTaskCoordinator.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/task/PipeTaskCoordinator.java
@@ -24,6 +24,8 @@
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus;
import org.apache.iotdb.confignode.consensus.request.read.pipe.task.ShowPipePlanV2;
import org.apache.iotdb.confignode.consensus.response.pipe.task.PipeTableResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo;
import org.apache.iotdb.confignode.rpc.thrift.TAlterPipeReq;
@@ -105,7 +107,7 @@ public TSStatus createPipe(TCreatePipeReq req) {
status = configManager.getProcedureManager().createPipe(req);
}
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.warn("Failed to create pipe {}. Result status: {}.", req.getPipeName(), status);
+ LOGGER.warn(ManagerMessages.FAILED_TO_CREATE_PIPE_RESULT_STATUS, req.getPipeName(), status);
}
return status;
}
@@ -125,7 +127,7 @@ public TSStatus alterPipe(TAlterPipeReq req) {
}
final TSStatus status = configManager.getProcedureManager().alterPipe(req);
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.warn("Failed to alter pipe {}. Result status: {}.", req.getPipeName(), status);
+ LOGGER.warn(ManagerMessages.FAILED_TO_ALTER_PIPE_RESULT_STATUS, req.getPipeName(), status);
}
return status;
}
@@ -139,7 +141,7 @@ private TSStatus startPipe(String pipeName) {
status = configManager.getProcedureManager().startPipe(pipeName);
}
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.warn("Failed to start pipe {}. Result status: {}.", pipeName, status);
+ LOGGER.warn(ManagerMessages.FAILED_TO_START_PIPE_RESULT_STATUS, pipeName, status);
}
return status;
}
@@ -165,7 +167,7 @@ private TSStatus stopPipe(String pipeName) {
status = configManager.getProcedureManager().stopPipe(pipeName);
}
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.warn("Failed to stop pipe {}. Result status: {}.", pipeName, status);
+ LOGGER.warn(ManagerMessages.FAILED_TO_STOP_PIPE_RESULT_STATUS, pipeName, status);
}
return status;
}
@@ -202,7 +204,7 @@ public TSStatus dropPipe(TDropPipeReq req) {
status = configManager.getProcedureManager().dropPipe(pipeName);
}
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.warn("Failed to drop pipe {}. Result status: {}.", pipeName, status);
+ LOGGER.warn(ManagerMessages.FAILED_TO_DROP_PIPE_RESULT_STATUS, pipeName, status);
}
return status;
}
@@ -213,7 +215,7 @@ public TShowPipeResp showPipes(final TShowPipeReq req) {
.filter(req.whereClause, req.pipeName, req.isTableModel, req.userName)
.convertToTShowPipeResp();
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return new PipeTableResp(res, Collections.emptyList()).convertToTShowPipeResp();
@@ -225,7 +227,7 @@ public TGetAllPipeInfoResp getAllPipeInfo() {
return ((PipeTableResp) configManager.getConsensusManager().read(new ShowPipePlanV2()))
.convertToTGetAllPipeInfoResp();
} catch (IOException | ConsensusException e) {
- LOGGER.warn("Failed to get all pipe info.", e);
+ LOGGER.warn(ManagerMessages.FAILED_TO_GET_ALL_PIPE_INFO, e);
return new TGetAllPipeInfoResp(
new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()).setMessage(e.getMessage()),
Collections.emptyList());
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/task/PipeTaskCoordinatorLock.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/task/PipeTaskCoordinatorLock.java
index b86c556f20df8..0f788435394f2 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/task/PipeTaskCoordinatorLock.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/coordinator/task/PipeTaskCoordinatorLock.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.confignode.manager.pipe.coordinator.task;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -42,15 +44,17 @@ public class PipeTaskCoordinatorLock {
public void lock() {
LOGGER.debug(
- "PipeTaskCoordinator lock waiting for thread {}", Thread.currentThread().getName());
+ ManagerMessages.PIPETASKCOORDINATOR_LOCK_WAITING_FOR_THREAD,
+ Thread.currentThread().getName());
try {
semaphore.acquire();
LOGGER.debug(
- "PipeTaskCoordinator lock acquired by thread {}", Thread.currentThread().getName());
+ ManagerMessages.PIPETASKCOORDINATOR_LOCK_ACQUIRED_BY_THREAD,
+ Thread.currentThread().getName());
} catch (final InterruptedException e) {
Thread.currentThread().interrupt();
LOGGER.error(
- "Interrupted while waiting for PipeTaskCoordinator lock, current thread: {}",
+ ManagerMessages.INTERRUPTED_WHILE_WAITING_FOR_PIPETASKCOORDINATOR_LOCK_CURRENT_THREAD,
Thread.currentThread().getName());
}
}
@@ -58,21 +62,23 @@ public void lock() {
public boolean tryLock() {
try {
LOGGER.debug(
- "PipeTaskCoordinator lock waiting for thread {}", Thread.currentThread().getName());
+ ManagerMessages.PIPETASKCOORDINATOR_LOCK_WAITING_FOR_THREAD,
+ Thread.currentThread().getName());
if (semaphore.tryAcquire(10, TimeUnit.SECONDS)) {
LOGGER.debug(
- "PipeTaskCoordinator lock acquired by thread {}", Thread.currentThread().getName());
+ ManagerMessages.PIPETASKCOORDINATOR_LOCK_ACQUIRED_BY_THREAD,
+ Thread.currentThread().getName());
return true;
} else {
LOGGER.info(
- "PipeTaskCoordinator lock failed to acquire by thread {} because of timeout",
+ ManagerMessages.PIPETASKCOORDINATOR_LOCK_FAILED_TO_ACQUIRE_BY_THREAD_BECAUSE_OF_TIMEOUT,
Thread.currentThread().getName());
return false;
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOGGER.error(
- "Interrupted while waiting for PipeTaskCoordinator lock, current thread: {}",
+ ManagerMessages.INTERRUPTED_WHILE_WAITING_FOR_PIPETASKCOORDINATOR_LOCK_CURRENT_THREAD,
Thread.currentThread().getName());
return false;
}
@@ -81,7 +87,8 @@ public boolean tryLock() {
public void unlock() {
semaphore.release();
LOGGER.debug(
- "PipeTaskCoordinator lock released by thread {}", Thread.currentThread().getName());
+ ManagerMessages.PIPETASKCOORDINATOR_LOCK_RELEASED_BY_THREAD,
+ Thread.currentThread().getName());
}
public boolean isLocked() {
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionSnapshotEvent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionSnapshotEvent.java
index 9f0e6c64839e6..0d0d8374c3391 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionSnapshotEvent.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigRegionSnapshotEvent.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource;
import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager;
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.resource.PipeConfigNodeResourceManager;
import org.apache.iotdb.confignode.persistence.schema.CNSnapshotFileType;
import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent;
@@ -349,7 +350,7 @@ protected void finalizeResource() {
resourceManager.decreaseSnapshotReference(templateFilePath);
}
} catch (final Exception e) {
- LOGGER.warn("Decrease reference count for snapshot {} error.", snapshotPath, e);
+ LOGGER.warn(ManagerMessages.DECREASE_REFERENCE_COUNT_FOR_SNAPSHOT_ERROR, snapshotPath, e);
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigSerializableEventType.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigSerializableEventType.java
index e7f2f8fd21284..f6e4f79d58978 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigSerializableEventType.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/event/PipeConfigSerializableEventType.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.confignode.manager.pipe.event;
import org.apache.iotdb.commons.pipe.event.SerializableEvent;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -69,7 +70,7 @@ public static SerializableEvent deserialize(ByteBuffer buffer, byte eventType)
event = new PipeConfigRegionSnapshotEvent();
break;
default:
- throw new IllegalArgumentException("Invalid event type: " + eventType);
+ throw new IllegalArgumentException(ManagerMessages.INVALID_EVENT_TYPE + eventType);
}
event.deserializeFromByteBuffer(buffer);
return event;
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeRemainingTimeMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeRemainingTimeMetrics.java
index bd4f42042a1e4..0e9c44f05d2ad 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeRemainingTimeMetrics.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeConfigNodeRemainingTimeMetrics.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager;
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.source.IoTDBConfigRegionSource;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -77,7 +78,8 @@ public void unbindFrom(final AbstractMetricService metricService) {
ImmutableSet.copyOf(remainingTimeOperatorMap.keySet()).forEach(this::deregister);
if (!remainingTimeOperatorMap.isEmpty()) {
LOGGER.warn(
- "Failed to unbind from pipe remaining time metrics, RemainingTimeOperator map not empty");
+ ManagerMessages
+ .FAILED_TO_UNBIND_FROM_PIPE_REMAINING_TIME_METRICS_REMAININGTIMEOPERATOR_MAP);
}
}
@@ -133,7 +135,8 @@ public void freezeRate(final String pipeID) {
public void deregister(final String pipeID) {
if (!remainingTimeOperatorMap.containsKey(pipeID)) {
LOGGER.warn(
- "Failed to deregister pipe remaining time metrics, RemainingTimeOperator({}) does not exist",
+ ManagerMessages
+ .FAILED_TO_DEREGISTER_PIPE_REMAINING_TIME_METRICS_REMAININGTIMEOPERATOR_DOES_NOT,
pipeID);
return;
}
@@ -149,7 +152,8 @@ public void markRegionCommit(final String pipeID, final boolean isDataRegion) {
final PipeConfigNodeRemainingTimeOperator operator = remainingTimeOperatorMap.get(pipeID);
if (Objects.isNull(operator)) {
LOGGER.info(
- "Failed to mark pipe region commit, RemainingTimeOperator({}) does not exist", pipeID);
+ ManagerMessages.FAILED_TO_MARK_PIPE_REGION_COMMIT_REMAININGTIMEOPERATOR_DOES_NOT_EXIST,
+ pipeID);
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeProcedureMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeProcedureMetrics.java
index 102c050c8f252..fdce3b6b4ecbf 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeProcedureMetrics.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeProcedureMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.procedure.impl.pipe.PipeTaskOperation;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -70,7 +71,8 @@ public void unbindFrom(AbstractMetricService metricService) {
public void updateTimer(String name, long durationMillis) {
Timer timer = timerMap.get(name);
if (timer == null) {
- LOGGER.warn("Failed to update pipe procedure timer, PipeProcedure({}) does not exist", name);
+ LOGGER.warn(
+ ManagerMessages.FAILED_TO_UPDATE_PIPE_PROCEDURE_TIMER_PIPEPROCEDURE_DOES_NOT_EXIST, name);
return;
}
timer.updateMillis(durationMillis);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeTemporaryMetaInCoordinatorMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeTemporaryMetaInCoordinatorMetrics.java
index 8ebc9d22b3c69..19604e0d6c17b 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeTemporaryMetaInCoordinatorMetrics.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/overview/PipeTemporaryMetaInCoordinatorMetrics.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTemporaryMetaInCoordinator;
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
import org.apache.iotdb.metrics.utils.MetricLevel;
@@ -94,7 +95,7 @@ public void unbindFrom(final AbstractMetricService metricService) {
ImmutableSet.copyOf(pipeTemporaryMetaMap.keySet()).forEach(this::deregister);
if (!pipeTemporaryMetaMap.isEmpty()) {
LOGGER.warn(
- "Failed to unbind from pipe temporary meta metrics, PipeTemporaryMeta map not empty");
+ ManagerMessages.FAILED_TO_UNBIND_FROM_PIPE_TEMPORARY_META_METRICS_PIPETEMPORARYMETA_MAP);
}
}
@@ -136,7 +137,8 @@ public void register(final PipeMeta pipeMeta) {
public void deregister(final String pipeID) {
if (!pipeTemporaryMetaMap.containsKey(pipeID)) {
LOGGER.warn(
- "Failed to deregister pipe temporary meta metrics, PipeTemporaryMeta({}) does not exist",
+ ManagerMessages
+ .FAILED_TO_DEREGISTER_PIPE_TEMPORARY_META_METRICS_PIPETEMPORARYMETA_DOES_NOT,
pipeID);
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/sink/PipeConfigRegionSinkMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/sink/PipeConfigRegionSinkMetrics.java
index aa3661440fa1a..0ca45d4c3648d 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/sink/PipeConfigRegionSinkMetrics.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/sink/PipeConfigRegionSinkMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.agent.task.PipeConfigNodeSubtask;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -77,7 +78,7 @@ public void unbindFrom(final AbstractMetricService metricService) {
ImmutableSet.copyOf(subtaskMap.keySet()).forEach(this::deregister);
if (!subtaskMap.isEmpty()) {
LOGGER.warn(
- "Failed to unbind from pipe config region connector metrics, connector map not empty");
+ ManagerMessages.FAILED_TO_UNBIND_FROM_PIPE_CONFIG_REGION_CONNECTOR_METRICS_CONNECTOR);
}
}
@@ -110,9 +111,7 @@ public void register(final PipeConfigNodeSubtask pipeConfigNodeSubtask) {
public void deregister(final String taskID) {
if (!subtaskMap.containsKey(taskID)) {
- LOGGER.warn(
- "Failed to deregister pipe config region connector metrics, PipeConfigNodeSubtask({}) does not exist",
- taskID);
+ LOGGER.warn(ManagerMessages.FAILED_TO_DEREGISTER_PIPE_CONFIG_REGION_CONNECTOR, taskID);
return;
}
if (Objects.nonNull(metricService)) {
@@ -128,7 +127,7 @@ public void markConfigEvent(final String taskID) {
final Rate rate = configRateMap.get(taskID);
if (rate == null) {
LOGGER.info(
- "Failed to mark pipe config region write plan event, PipeConfigNodeSubtask({}) does not exist",
+ ManagerMessages.FAILED_TO_MARK_PIPE_CONFIG_REGION_WRITE_PLAN_EVENT_PIPECONFIGNODESUBTASK,
taskID);
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/source/PipeConfigRegionSourceMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/source/PipeConfigRegionSourceMetrics.java
index 7219dcac67fea..428b4b4026344 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/source/PipeConfigRegionSourceMetrics.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/source/PipeConfigRegionSourceMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.source.IoTDBConfigRegionSource;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -73,7 +74,7 @@ public void unbindFrom(final AbstractMetricService metricService) {
ImmutableSet.copyOf(extractorMap.keySet()).forEach(this::deregister);
if (!extractorMap.isEmpty()) {
LOGGER.warn(
- "Failed to unbind from pipe config region extractor metrics, extractor map not empty");
+ ManagerMessages.FAILED_TO_UNBIND_FROM_PIPE_CONFIG_REGION_EXTRACTOR_METRICS_EXTRACTOR);
}
}
@@ -105,9 +106,7 @@ public void register(final IoTDBConfigRegionSource extractor) {
public void deregister(final String taskID) {
if (!extractorMap.containsKey(taskID)) {
- LOGGER.warn(
- "Failed to deregister pipe config region extractor metrics, IoTDBConfigRegionExtractor({}) does not exist",
- taskID);
+ LOGGER.warn(ManagerMessages.FAILED_TO_DEREGISTER_PIPE_CONFIG_REGION_EXTRACTOR, taskID);
return;
}
if (Objects.nonNull(metricService)) {
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java
index 81e82b96adee8..7de8e8bf78d9d 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/receiver/protocol/IoTDBConfigNodeReceiver.java
@@ -89,6 +89,7 @@
import org.apache.iotdb.confignode.consensus.request.write.template.ExtendSchemaTemplatePlan;
import org.apache.iotdb.confignode.consensus.request.write.trigger.DeleteTriggerInTablePlan;
import org.apache.iotdb.confignode.consensus.request.write.trigger.UpdateTriggerStateInTablePlan;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionSnapshotEvent;
import org.apache.iotdb.confignode.manager.pipe.metric.receiver.PipeConfigNodeReceiverMetrics;
@@ -237,7 +238,7 @@ public TPipeTransferResp receive(final TPipeTransferReq req) {
TSStatusCode.PIPE_TYPE_ERROR,
String.format("Unsupported PipeRequestType on ConfigNode %s.", rawRequestType));
LOGGER.warn(
- "Receiver id = {}: Unsupported PipeRequestType on ConfigNode, response status = {}.",
+ ManagerMessages.RECEIVER_ID_UNSUPPORTED_PIPEREQUESTTYPE_ON_CONFIGNODE_RESPONSE_STATUS,
receiverId.get(),
status);
return new TPipeTransferResp(status);
@@ -245,7 +246,7 @@ public TPipeTransferResp receive(final TPipeTransferReq req) {
final String error =
"Exception encountered while handling pipe transfer request. Root cause: "
+ e.getMessage();
- LOGGER.warn("Receiver id = {}: {}", receiverId.get(), error, e);
+ LOGGER.warn(ManagerMessages.RECEIVER_ID, receiverId.get(), error, e);
return new TPipeTransferResp(RpcUtils.getStatus(TSStatusCode.PIPE_ERROR, error));
}
}
@@ -273,7 +274,7 @@ private TSStatus executePlanAndClassifyExceptions(final ConfigPhysicalPlan plan)
result = checkPermission(plan);
if (result.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Receiver id = {}: Permission check failed while executing plan {}: {}",
+ ManagerMessages.RECEIVER_ID_PERMISSION_CHECK_FAILED_WHILE_EXECUTING_PLAN,
receiverId.get(),
plan,
result);
@@ -282,7 +283,7 @@ private TSStatus executePlanAndClassifyExceptions(final ConfigPhysicalPlan plan)
result = executePlan(plan);
if (result.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Receiver id = {}: Failure status encountered while executing plan {}: {}",
+ ManagerMessages.RECEIVER_ID_FAILURE_STATUS_ENCOUNTERED_WHILE_EXECUTING_PLAN,
receiverId.get(),
plan,
result);
@@ -290,7 +291,7 @@ private TSStatus executePlanAndClassifyExceptions(final ConfigPhysicalPlan plan)
}
} catch (final Exception e) {
LOGGER.warn(
- "Receiver id = {}: Exception encountered while executing plan {}: ",
+ ManagerMessages.RECEIVER_ID_EXCEPTION_ENCOUNTERED_WHILE_EXECUTING_PLAN,
receiverId.get(),
plan,
e);
@@ -1252,7 +1253,7 @@ protected String getSenderPort() {
protected TSStatus loadFileV1(
final PipeTransferFileSealReqV1 req, final String fileAbsolutePath) {
throw new UnsupportedOperationException(
- "IoTDBConfigNodeReceiver does not support load file V1.");
+ ManagerMessages.IOTDBCONFIGNODERECEIVER_DOES_NOT_SUPPORT_LOAD_FILE_V1);
}
@Override
@@ -1269,7 +1270,8 @@ protected TSStatus loadFileV2(
parameters.getOrDefault("authUserName", ""));
if (Objects.isNull(generator)) {
throw new IOException(
- String.format("The config region snapshots %s cannot be parsed.", fileAbsolutePaths));
+ String.format(
+ ManagerMessages.THE_CONFIG_REGION_SNAPSHOTS_CANNOT_BE_PARSED, fileAbsolutePaths));
}
final Set executionTypes =
PipeConfigRegionSnapshotEvent.getConfigPhysicalPlanTypeSet(
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeCopiedFileDirStartupCleaner.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeCopiedFileDirStartupCleaner.java
index 69848cdb34ca6..722af841cc1ef 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeCopiedFileDirStartupCleaner.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/resource/PipeConfigNodeCopiedFileDirStartupCleaner.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager;
import org.apache.iotdb.commons.utils.FileUtils;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -41,7 +42,7 @@ public static void clean() {
+ File.separator
+ PipeSnapshotResourceManager.PIPE_SNAPSHOT_DIR_NAME);
if (iotConsensusV2Dir.isDirectory()) {
- LOGGER.info("Pipe snapshot dir found, deleting it: {},", iotConsensusV2Dir);
+ LOGGER.info(ManagerMessages.PIPE_SNAPSHOT_DIR_FOUND_DELETING_IT, iotConsensusV2Dir);
FileUtils.deleteFileOrDirectory(iotConsensusV2Dir);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionAirGapSink.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionAirGapSink.java
index 882c33cb65732..c8c2890f5be3e 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionAirGapSink.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionAirGapSink.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.common.PipeTransferHandshakeConstant;
import org.apache.iotdb.commons.pipe.sink.protocol.IoTDBAirGapSink;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionSnapshotEvent;
import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionWritePlanEvent;
import org.apache.iotdb.confignode.manager.pipe.sink.payload.PipeTransferConfigNodeHandshakeV1Req;
@@ -106,7 +107,7 @@ protected boolean mayNeedHandshakeWhenFail() {
protected byte[] getTransferSingleFilePieceBytes(
final String fileName, final long position, final byte[] payLoad) {
throw new UnsupportedOperationException(
- "The config region air gap connector does not support transferring single file piece bytes.");
+ ManagerMessages.THE_CONFIG_REGION_AIR_GAP_CONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING);
}
@Override
@@ -118,13 +119,13 @@ protected byte[] getTransferMultiFilePieceBytes(
@Override
public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception {
throw new UnsupportedOperationException(
- "IoTDBConfigRegionAirGapConnector can't transfer TabletInsertionEvent.");
+ ManagerMessages.IOTDBCONFIGREGIONAIRGAPCONNECTOR_CAN_T_TRANSFER_TABLETINSERTIONEVENT);
}
@Override
public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exception {
throw new UnsupportedOperationException(
- "IoTDBConfigRegionAirGapConnector can't transfer TsFileInsertionEvent.");
+ ManagerMessages.IOTDBCONFIGREGIONAIRGAPCONNECTOR_CAN_T_TRANSFER_TSFILEINSERTIONEVENT);
}
@Override
@@ -139,7 +140,8 @@ public void transfer(final Event event) throws Exception {
doTransferWrapper(socket, (PipeConfigRegionSnapshotEvent) event);
} else if (!(event instanceof PipeHeartbeatEvent)) {
LOGGER.warn(
- "IoTDBConfigRegionAirGapConnector does not support transferring generic event: {}.",
+ ManagerMessages
+ .IOTDBCONFIGREGIONAIRGAPCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT,
event);
}
} catch (final IOException e) {
@@ -147,8 +149,9 @@ public void transfer(final Event event) throws Exception {
throw new PipeConnectionException(
String.format(
- "Network error when transfer event %s, because %s.",
- ((EnrichedEvent) event).coreReportMessage(), e.getMessage()),
+ ManagerMessages.NETWORK_ERROR_WHEN_TRANSFER_EVENT_BECAUSE,
+ ((EnrichedEvent) event).coreReportMessage(),
+ e.getMessage()),
e);
}
}
@@ -256,7 +259,7 @@ private void doTransfer(
pipeConfigRegionSnapshotEvent.toString(),
true);
} else {
- LOGGER.info("Successfully transferred config region snapshot {}.", snapshot);
+ LOGGER.info(ManagerMessages.SUCCESSFULLY_TRANSFERRED_CONFIG_REGION_SNAPSHOT, snapshot);
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionSink.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionSink.java
index f06a47d45410b..07e64321de78a 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionSink.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/sink/protocol/IoTDBConfigRegionSink.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq;
import org.apache.iotdb.commons.pipe.sink.protocol.IoTDBSslSyncSink;
import org.apache.iotdb.confignode.conf.ConfigNodeConfig;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionSnapshotEvent;
import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionWritePlanEvent;
import org.apache.iotdb.confignode.manager.pipe.sink.client.IoTDBConfigNodeSyncClientManager;
@@ -97,7 +98,7 @@ protected IoTDBSyncClientManager constructClient(
protected PipeTransferFilePieceReq getTransferSingleFilePieceReq(
final String fileName, final long position, final byte[] payLoad) {
throw new UnsupportedOperationException(
- "The config region sink does not support transferring single file piece req.");
+ ManagerMessages.THE_CONFIG_REGION_SINK_DOES_NOT_SUPPORT_TRANSFERRING_SINGLE_FILE);
}
@Override
@@ -114,13 +115,13 @@ protected void mayLimitRateAndRecordIO(final long requiredBytes) {
@Override
public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception {
throw new UnsupportedOperationException(
- "IoTDBConfigRegionSink can't transfer TabletInsertionEvent.");
+ ManagerMessages.IOTDBCONFIGREGIONSINK_CAN_T_TRANSFER_TABLETINSERTIONEVENT);
}
@Override
public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exception {
throw new UnsupportedOperationException(
- "IoTDBConfigRegionSink can't transfer TsFileInsertionEvent.");
+ ManagerMessages.IOTDBCONFIGREGIONSINK_CAN_T_TRANSFER_TSFILEINSERTIONEVENT);
}
@Override
@@ -130,7 +131,8 @@ public void transfer(final Event event) throws Exception {
} else if (event instanceof PipeConfigRegionSnapshotEvent) {
doTransferWrapper((PipeConfigRegionSnapshotEvent) event);
} else if (!(event instanceof PipeHeartbeatEvent)) {
- LOGGER.warn("IoTDBConfigRegionSink does not support transferring generic event: {}.", event);
+ LOGGER.warn(
+ ManagerMessages.IOTDBCONFIGREGIONSINK_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT, event);
}
}
@@ -169,8 +171,9 @@ private void doTransfer(final PipeConfigRegionWritePlanEvent pipeConfigRegionWri
clientAndStatus.setRight(false);
throw new PipeConnectionException(
String.format(
- "Network error when transfer config region write plan %s, because %s.",
- pipeConfigRegionWritePlanEvent.getConfigPhysicalPlan().getType(), e.getMessage()),
+ ManagerMessages.NETWORK_ERROR_WHEN_TRANSFER_CONFIG_REGION_WRITE_PLAN_BECAUSE,
+ pipeConfigRegionWritePlanEvent.getConfigPhysicalPlan().getType(),
+ e.getMessage()),
e);
}
@@ -191,7 +194,8 @@ private void doTransfer(final PipeConfigRegionWritePlanEvent pipeConfigRegionWri
true);
}
- LOGGER.info("Successfully transferred config event {}.", pipeConfigRegionWritePlanEvent);
+ LOGGER.info(
+ ManagerMessages.SUCCESSFULLY_TRANSFERRED_CONFIG_EVENT, pipeConfigRegionWritePlanEvent);
}
private void doTransferWrapper(final PipeConfigRegionSnapshotEvent pipeConfigRegionSnapshotEvent)
@@ -259,8 +263,9 @@ private void doTransfer(final PipeConfigRegionSnapshotEvent snapshotEvent)
clientAndStatus.setRight(false);
throw new PipeConnectionException(
String.format(
- "Network error when seal config region snapshot %s, because %s.",
- snapshotFile, e.getMessage()),
+ ManagerMessages.NETWORK_ERROR_WHEN_SEAL_CONFIG_REGION_SNAPSHOT_BECAUSE,
+ snapshotFile,
+ e.getMessage()),
e);
}
@@ -281,6 +286,6 @@ private void doTransfer(final PipeConfigRegionSnapshotEvent snapshotEvent)
true);
}
- LOGGER.info("Successfully transferred config region snapshot {}.", snapshotFile);
+ LOGGER.info(ManagerMessages.SUCCESSFULLY_TRANSFERRED_CONFIG_REGION_SNAPSHOT, snapshotFile);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/ConfigRegionListeningQueue.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/ConfigRegionListeningQueue.java
index 988ead4b8ad6b..2d34eff77430e 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/ConfigRegionListeningQueue.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/ConfigRegionListeningQueue.java
@@ -35,6 +35,7 @@
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeUnsetSchemaTemplatePlan;
import org.apache.iotdb.confignode.consensus.request.write.table.CommitCreateTablePlan;
import org.apache.iotdb.confignode.consensus.request.write.template.UnsetSchemaTemplatePlan;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionSnapshotEvent;
import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionWritePlanEvent;
import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigSerializableEventType;
@@ -90,7 +91,7 @@ public synchronized void tryListenToPlan(
((UnsetSchemaTemplatePlan) plan).getPath().getFullPath()),
isGeneratedByPipe);
} catch (final MetadataException e) {
- LOGGER.warn("Failed to collect UnsetTemplatePlan", e);
+ LOGGER.warn(ManagerMessages.FAILED_TO_COLLECT_UNSETTEMPLATEPLAN, e);
return;
}
break;
@@ -113,7 +114,7 @@ public synchronized void tryListenToPlan(
((CommitCreateTablePlan) plan).getDatabase(), table.get()),
isGeneratedByPipe);
} catch (final MetadataException e) {
- LOGGER.warn("Failed to collect CommitCreateTablePlan", e);
+ LOGGER.warn(ManagerMessages.FAILED_TO_COLLECT_COMMITCREATETABLEPLAN, e);
return;
}
break;
@@ -168,7 +169,7 @@ public synchronized void tryListenToSnapshots(
.getPermissionManager()
.getUserName(userId));
} catch (AuthException e) {
- LOGGER.warn("Failed to collect user name for user id {}", userId, e);
+ LOGGER.warn(ManagerMessages.FAILED_TO_COLLECT_USER_NAME_FOR_USER_ID, userId, e);
}
} else {
curEvent.setAuthUserName(userName);
@@ -195,7 +196,7 @@ protected Event deserializeFromByteBuffer(final ByteBuffer byteBuffer) {
((EnrichedEvent) result).increaseReferenceCount(ConfigRegionListeningQueue.class.getName());
return result;
} catch (final IOException e) {
- LOGGER.error("Failed to load snapshot from byteBuffer {}.", byteBuffer);
+ LOGGER.error(ManagerMessages.FAILED_TO_LOAD_SNAPSHOT_FROM_BYTEBUFFER, byteBuffer);
}
return null;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/IoTDBConfigRegionSource.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/IoTDBConfigRegionSource.java
index 762e8c154e513..8b4f99f5b26db 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/IoTDBConfigRegionSource.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/IoTDBConfigRegionSource.java
@@ -39,6 +39,7 @@
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlanType;
import org.apache.iotdb.confignode.consensus.request.write.database.DatabaseSchemaPlan;
import org.apache.iotdb.confignode.consensus.request.write.database.DeleteDatabasePlan;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.PermissionManager;
import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent;
import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionSnapshotEvent;
@@ -100,7 +101,8 @@ public void customize(
.getConfigNodeConsensusProtocolClass()
.equals(ConsensusFactory.SIMPLE_CONSENSUS)) {
throw new PipeException(
- "IoTDBConfigRegionSource does not transferring events under simple consensus");
+ ManagerMessages
+ .IOTDBCONFIGREGIONSOURCE_DOES_NOT_TRANSFERRING_EVENTS_UNDER_SIMPLE_CONSENSUS);
}
super.customize(parameters, configuration);
@@ -121,7 +123,7 @@ protected void login(final @Nonnull String password) {
.getCode()
!= TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new PipePasswordCheckException(
- String.format("Failed to check password for pipe %s.", pipeName));
+ String.format(ManagerMessages.FAILED_TO_CHECK_PASSWORD_FOR_PIPE, pipeName));
}
}
@@ -146,7 +148,8 @@ protected void triggerSnapshot() {
new ConfigRegionId(ConfigNodeDescriptor.getInstance().getConf().getConfigRegionId()),
true);
} catch (final ConsensusException e) {
- throw new PipeException("Exception encountered when triggering schema region snapshot.", e);
+ throw new PipeException(
+ ManagerMessages.EXCEPTION_ENCOUNTERED_WHEN_TRIGGERING_SCHEMA_REGION_SNAPSHOT, e);
}
}
@@ -283,7 +286,7 @@ protected Optional trimRealtimeEventByPrivilege(
if (skipIfNoPrivileges) {
return Optional.empty();
}
- throw new AccessDeniedException("Not has privilege to transfer plan: " + plan);
+ throw new AccessDeniedException(ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + plan);
}
@Override
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTreePatternParseVisitor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTreePatternParseVisitor.java
index cb332742fde77..859772fa78aed 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTreePatternParseVisitor.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTreePatternParseVisitor.java
@@ -42,6 +42,7 @@
import org.apache.iotdb.confignode.consensus.request.write.template.CommitSetSchemaTemplatePlan;
import org.apache.iotdb.confignode.consensus.request.write.template.CreateSchemaTemplatePlan;
import org.apache.iotdb.confignode.consensus.request.write.template.ExtendSchemaTemplatePlan;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.pipe.event.PipeConfigRegionWritePlanEvent;
import org.apache.iotdb.db.schemaengine.template.alter.TemplateExtendInfo;
@@ -230,9 +231,7 @@ public Optional visitPipeDeleteTimeSeries(
? Optional.of(new PipeDeleteTimeSeriesPlan(intersectedTree.serialize()))
: Optional.empty();
} catch (final IOException e) {
- LOGGER.warn(
- "Serialization failed for the delete time series plan in pipe transmission, skip transfer",
- e);
+ LOGGER.warn(ManagerMessages.SERIALIZATION_FAILED_FOR_THE_DELETE_TIME_SERIES_PLAN_IN_PIPE, e);
return Optional.empty();
}
}
@@ -249,9 +248,7 @@ public Optional visitPipeDeleteLogicalView(
? Optional.of(new PipeDeleteLogicalViewPlan(intersectedTree.serialize()))
: Optional.empty();
} catch (final IOException e) {
- LOGGER.warn(
- "Serialization failed for the delete logical view plan in pipe transmission, skip transfer",
- e);
+ LOGGER.warn(ManagerMessages.SERIALIZATION_FAILED_FOR_THE_DELETE_LOGICAL_VIEW_PLAN_IN_PIPE, e);
return Optional.empty();
}
}
@@ -274,8 +271,7 @@ public Optional visitPipeAlterEncodingCompressor(
: Optional.empty();
} catch (final IOException e) {
LOGGER.warn(
- "Serialization failed for the alter encoding time series plan in pipe transmission, skip transfer",
- e);
+ ManagerMessages.SERIALIZATION_FAILED_FOR_THE_ALTER_ENCODING_TIME_SERIES_PLAN_IN, e);
return Optional.empty();
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTreePrivilegeParseVisitor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTreePrivilegeParseVisitor.java
index 8f7c15aab380a..aa81c0681ba7d 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTreePrivilegeParseVisitor.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/source/PipeConfigTreePrivilegeParseVisitor.java
@@ -47,6 +47,7 @@
import org.apache.iotdb.confignode.consensus.request.write.template.CommitSetSchemaTemplatePlan;
import org.apache.iotdb.confignode.consensus.request.write.template.CreateSchemaTemplatePlan;
import org.apache.iotdb.confignode.consensus.request.write.template.ExtendSchemaTemplatePlan;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.service.ConfigNode;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -156,7 +157,7 @@ public boolean canShowSchemaTemplate(final String templateName, final IAuditEnti
.anyMatch(path -> hasReadPrivilege(userEntity, path, true, true));
} catch (final Exception e) {
LOGGER.warn(
- "Un-parse-able path name encountered during template privilege trimming, please check",
+ ManagerMessages.UN_PARSE_ABLE_PATH_NAME_ENCOUNTERED_DURING_TEMPLATE_PRIVILEGE_TRIMMING,
e);
return false;
}
@@ -272,7 +273,7 @@ public Optional visitPipeDeleteTimeSeries(
if (!skip && !originalTree.equals(intersectedTree)) {
logger.recordObjectAuthenticationAuditLog(userEntity.setResult(false), () -> auditObject);
throw new AccessDeniedException(
- "Not has privilege to transfer plan: " + pipeDeleteTimeSeriesPlan);
+ ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + pipeDeleteTimeSeriesPlan);
}
final boolean result = !intersectedTree.isEmpty();
logger.recordObjectAuthenticationAuditLog(userEntity.setResult(result), () -> auditObject);
@@ -280,9 +281,7 @@ public Optional visitPipeDeleteTimeSeries(
? Optional.of(new PipeDeleteTimeSeriesPlan(intersectedTree.serialize()))
: Optional.empty();
} catch (final IOException e) {
- LOGGER.warn(
- "Serialization failed for the delete time series plan in pipe transmission, skip transfer",
- e);
+ LOGGER.warn(ManagerMessages.SERIALIZATION_FAILED_FOR_THE_DELETE_TIME_SERIES_PLAN_IN_PIPE, e);
logger.recordObjectAuthenticationAuditLog(userEntity.setResult(false), () -> auditObject);
return Optional.empty();
} catch (final AuthException e) {
@@ -291,7 +290,7 @@ public Optional visitPipeDeleteTimeSeries(
return Optional.empty();
} else {
throw new AccessDeniedException(
- "Not has privilege to transfer plan: " + pipeDeleteTimeSeriesPlan);
+ ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + pipeDeleteTimeSeriesPlan);
}
}
}
@@ -310,7 +309,7 @@ public Optional visitPipeDeleteLogicalView(
if (!skip && !originalTree.equals(intersectedTree)) {
logger.recordObjectAuthenticationAuditLog(userEntity.setResult(false), () -> auditObject);
throw new AccessDeniedException(
- "Not has privilege to transfer plan: " + pipeDeleteLogicalViewPlan);
+ ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + pipeDeleteLogicalViewPlan);
}
final boolean result = !intersectedTree.isEmpty();
logger.recordObjectAuthenticationAuditLog(userEntity.setResult(result), () -> auditObject);
@@ -318,9 +317,7 @@ public Optional visitPipeDeleteLogicalView(
? Optional.of(new PipeDeleteLogicalViewPlan(intersectedTree.serialize()))
: Optional.empty();
} catch (final IOException e) {
- LOGGER.warn(
- "Serialization failed for the delete time series plan in pipe transmission, skip transfer",
- e);
+ LOGGER.warn(ManagerMessages.SERIALIZATION_FAILED_FOR_THE_DELETE_TIME_SERIES_PLAN_IN_PIPE, e);
logger.recordObjectAuthenticationAuditLog(userEntity.setResult(false), () -> auditObject);
return Optional.empty();
} catch (final AuthException e) {
@@ -329,7 +326,7 @@ public Optional visitPipeDeleteLogicalView(
return Optional.empty();
} else {
throw new AccessDeniedException(
- "Not has privilege to transfer plan: " + pipeDeleteLogicalViewPlan);
+ ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + pipeDeleteLogicalViewPlan);
}
}
}
@@ -349,7 +346,7 @@ public Optional visitPipeAlterEncodingCompressor(
if (!skip && !originalTree.equals(intersectedTree)) {
logger.recordObjectAuthenticationAuditLog(userEntity.setResult(false), () -> auditObject);
throw new AccessDeniedException(
- "Not has privilege to transfer plan: " + pipeAlterEncodingCompressor);
+ ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + pipeAlterEncodingCompressor);
}
final boolean result = !intersectedTree.isEmpty();
logger.recordObjectAuthenticationAuditLog(userEntity.setResult(result), () -> auditObject);
@@ -362,9 +359,7 @@ public Optional visitPipeAlterEncodingCompressor(
pipeAlterEncodingCompressor.isMayAlterAudit()))
: Optional.empty();
} catch (final IOException e) {
- LOGGER.warn(
- "Serialization failed for the delete time series plan in pipe transmission, skip transfer",
- e);
+ LOGGER.warn(ManagerMessages.SERIALIZATION_FAILED_FOR_THE_DELETE_TIME_SERIES_PLAN_IN_PIPE, e);
logger.recordObjectAuthenticationAuditLog(userEntity.setResult(false), () -> auditObject);
return Optional.empty();
} catch (final AuthException e) {
@@ -373,7 +368,7 @@ public Optional visitPipeAlterEncodingCompressor(
return Optional.empty();
} else {
throw new AccessDeniedException(
- "Not has privilege to transfer plan: " + pipeAlterEncodingCompressor);
+ ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + pipeAlterEncodingCompressor);
}
}
}
@@ -409,7 +404,7 @@ public Optional visitPipeDeactivateTemplate(
return Optional.empty();
} else {
throw new AccessDeniedException(
- "Not has privilege to transfer plan: " + pipeDeactivateTemplatePlan);
+ ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + pipeDeactivateTemplatePlan);
}
}
}
@@ -437,7 +432,8 @@ public Optional visitTTL(
if (skip) {
return Optional.empty();
} else {
- throw new AccessDeniedException("Not has privilege to transfer plan: " + setTTLPlan);
+ throw new AccessDeniedException(
+ ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + setTTLPlan);
}
}
}
@@ -472,7 +468,7 @@ public Optional visitPipeAlterTimeSeries(
return Optional.empty();
} else {
throw new AccessDeniedException(
- "Not has privilege to transfer plan: " + pipeAlterTimeSeriesPlan);
+ ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + pipeAlterTimeSeriesPlan);
}
}
}
@@ -486,7 +482,7 @@ private List getAllIntersectedPatterns(
final PathPatternTree intersectedTree =
thisPatternTree.intersectWithFullPathPrefixTree(getAuthorizedPTree(userEntity));
if (!skip && !thisPatternTree.equals(intersectedTree)) {
- throw new AccessDeniedException("Not has privilege to transfer plan: " + plan);
+ throw new AccessDeniedException(ManagerMessages.NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN + plan);
}
return intersectedTree.getAllPathPatterns();
}
@@ -593,7 +589,7 @@ private boolean hasReadPrivilege(
try {
partialPath = new PartialPath(path);
} catch (final IllegalPathException e) {
- LOGGER.warn("Unable to parse path when checking READ privilege, path: {}", path);
+ LOGGER.warn(ManagerMessages.UNABLE_TO_PARSE_PATH_WHEN_CHECKING_READ_PRIVILEGE_PATH, path);
return false;
}
if (withWildcard) {
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java
index f823f9177ac97..bbe6bad0405cc 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java
@@ -87,6 +87,9 @@
import org.apache.iotdb.confignode.consensus.response.template.TemplateInfoResp;
import org.apache.iotdb.confignode.consensus.response.template.TemplateSetInfoResp;
import org.apache.iotdb.confignode.exception.DatabaseNotExistsException;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.IManager;
import org.apache.iotdb.confignode.manager.consensus.ConsensusManager;
import org.apache.iotdb.confignode.manager.node.NodeManager;
@@ -403,7 +406,7 @@ public TShowDatabaseResp showDatabase(final GetDatabasePlan getDatabasePlan) {
} catch (final DatabaseNotExistsException e) {
// Skip pre-deleted Database
LOGGER.warn(
- "The Database: {} doesn't exist. Maybe it has been pre-deleted.",
+ ManagerMessages.THE_DATABASE_DOESN_T_EXIST_MAYBE_IT_HAS_BEEN_PRE,
databaseSchema.getName());
continue;
}
@@ -426,7 +429,7 @@ public Map getTTLInfoForUpgrading() {
}
infoMap.put(database, ttl);
} catch (final DatabaseNotExistsException e) {
- LOGGER.warn("Database: {} doesn't exist", databases, e);
+ LOGGER.warn(ManagerMessages.DATABASE_DOESN_T_EXIST, databases, e);
}
}
return infoMap;
@@ -530,7 +533,7 @@ public synchronized void adjustMaxRegionGroupNum() {
databaseSchema.getSchemaReplicationFactor(),
allocatedSchemaRegionGroupCount);
LOGGER.info(
- "[AdjustRegionGroupNum] The maximum number of SchemaRegionGroups for Database: {} is adjusted to: {}",
+ ConfigNodeMessages.ADJUSTREGIONGROUPNUM_THE_MAXIMUM_NUMBER_OF_SCHEMAREGIONGROUPS_FOR,
databaseSchema.getName(),
maxSchemaRegionGroupNum);
@@ -558,7 +561,7 @@ public synchronized void adjustMaxRegionGroupNum() {
databaseSchema.getDataReplicationFactor(),
allocatedDataRegionGroupCount);
LOGGER.info(
- "[AdjustRegionGroupNum] The maximum number of DataRegionGroups for Database: {} is adjusted to: {}",
+ ConfigNodeMessages.ADJUSTREGIONGROUPNUM_THE_MAXIMUM_NUMBER_OF_DATAREGIONGROUPS_FOR,
databaseSchema.getName(),
maxDataRegionGroupNum);
@@ -907,7 +910,7 @@ public static TSStatus enrichDatabaseSchemaWithDefaultProperties(
}
if (errorResp != null) {
- LOGGER.warn("Execute SetDatabase: {} with result: {}", databaseSchema, errorResp);
+ LOGGER.warn(ConfigNodeMessages.EXECUTE_SETDATABASE_WITH_RESULT, databaseSchema, errorResp);
return errorResp;
}
@@ -1171,7 +1174,7 @@ public synchronized TSStatus extendSchemaTemplate(
for (Map.Entry entry : statusMap.entrySet()) {
if (entry.getValue().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Failed to sync template {} extension info to DataNode {}",
+ ManagerMessages.FAILED_TO_SYNC_TEMPLATE_EXTENSION_INFO_TO_DATANODE,
template.getName(),
dataNodeLocationMap.get(entry.getKey()));
return RpcUtils.getStatus(
@@ -1201,7 +1204,7 @@ public TShowTableResp showTables(final String database, final boolean isDetails)
configManager.getConsensusManager().read(new ShowTablePlan(database, isDetails)))
.convertToTShowTableResp();
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return new TShowTableResp(res);
@@ -1214,7 +1217,7 @@ public TShowTable4InformationSchemaResp showTables4InformationSchema() {
configManager.getConsensusManager().read(new ShowTable4InformationSchemaPlan()))
.convertToTShowTable4InformationSchemaResp();
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return new TShowTable4InformationSchemaResp(res);
@@ -1230,7 +1233,7 @@ public TDescTableResp describeTable(
.read(new DescTablePlan(database, tableName, isDetails)))
.convertToTDescTableResp();
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return new TDescTableResp(res);
@@ -1243,7 +1246,7 @@ public TDescTable4InformationSchemaResp describeTables4InformationSchema() {
configManager.getConsensusManager().read(new DescTable4InformationSchemaPlan()))
.convertToTDescTable4InformationSchemaResp();
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return new TDescTable4InformationSchemaResp(res);
@@ -1256,7 +1259,7 @@ public TFetchTableResp fetchTables(final Map> fetchTableMap)
configManager.getConsensusManager().read(new FetchTablePlan(fetchTableMap)))
.convertToTFetchTableResp();
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
return new TFetchTableResp(res);
@@ -1506,7 +1509,7 @@ public synchronized Pair tableCheckForRenaming(
return new Pair<>(
RpcUtils.getStatus(
TSStatusCode.TABLE_ALREADY_EXISTS,
- String.format("Table '%s.%s' already exists.", database, newName)),
+ String.format(ProcedureMessages.TABLE_ALREADY_EXISTS, database, newName)),
null);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionCoordinator.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionCoordinator.java
index 038167ae58cbe..83031570fa61e 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionCoordinator.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionCoordinator.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.confignode.consensus.request.read.subscription.ShowTopicPlan;
import org.apache.iotdb.confignode.consensus.response.subscription.SubscriptionTableResp;
import org.apache.iotdb.confignode.consensus.response.subscription.TopicTableResp;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.pipe.coordinator.task.PipeTaskCoordinatorLock;
import org.apache.iotdb.confignode.persistence.subscription.SubscriptionInfo;
@@ -140,7 +141,7 @@ public TSStatus createTopic(TCreateTopicReq req) {
final TSStatus status = configManager.getProcedureManager().createTopic(req);
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Failed to create topic {} with attributes {}. Result status: {}.",
+ ManagerMessages.FAILED_TO_CREATE_TOPIC_WITH_ATTRIBUTES_RESULT_STATUS,
req.getTopicName(),
req.getTopicAttributes(),
status);
@@ -169,7 +170,7 @@ public TShowTopicResp showTopic(TShowTopicReq req) {
.filter(req.topicName, req.isTableModel)
.convertToTShowTopicResp();
} catch (Exception e) {
- LOGGER.warn("Failed to show topic info.", e);
+ LOGGER.warn(ManagerMessages.FAILED_TO_SHOW_TOPIC_INFO, e);
return new TopicTableResp(
new TSStatus(TSStatusCode.SHOW_TOPIC_ERROR.getStatusCode())
.setMessage(e.getMessage()),
@@ -183,7 +184,7 @@ public TGetAllTopicInfoResp getAllTopicInfo() {
return ((TopicTableResp) configManager.getConsensusManager().read(new ShowTopicPlan()))
.convertToTGetAllTopicInfoResp();
} catch (Exception e) {
- LOGGER.warn("Failed to get all topic info.", e);
+ LOGGER.warn(ManagerMessages.FAILED_TO_GET_ALL_TOPIC_INFO, e);
return new TGetAllTopicInfoResp(
new TSStatus(TSStatusCode.SHOW_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage()),
Collections.emptyList());
@@ -194,7 +195,7 @@ public TSStatus createConsumer(TCreateConsumerReq req) {
final TSStatus status = configManager.getProcedureManager().createConsumer(req);
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Failed to create consumer {} in consumer group {}. Result status: {}.",
+ ManagerMessages.FAILED_TO_CREATE_CONSUMER_IN_CONSUMER_GROUP_RESULT_STATUS,
req.getConsumerId(),
req.getConsumerGroupId(),
status);
@@ -206,7 +207,7 @@ public TSStatus dropConsumer(TCloseConsumerReq req) {
final TSStatus status = configManager.getProcedureManager().dropConsumer(req);
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Failed to close consumer {} in consumer group {}. Result status: {}.",
+ ManagerMessages.FAILED_TO_CLOSE_CONSUMER_IN_CONSUMER_GROUP_RESULT_STATUS,
req.getConsumerId(),
req.getConsumerGroupId(),
status);
@@ -218,7 +219,7 @@ public TSStatus createSubscription(TSubscribeReq req) {
final TSStatus status = configManager.getProcedureManager().createSubscription(req);
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Consumer {} in consumer group {} failed to subscribe topics {}. Result status: {}.",
+ ManagerMessages.CONSUMER_IN_CONSUMER_GROUP_FAILED_TO_SUBSCRIBE_TOPICS_RESULT_STATUS,
req.getConsumerId(),
req.getConsumerGroupId(),
req.getTopicNames(),
@@ -231,7 +232,7 @@ public TSStatus dropSubscription(TUnsubscribeReq req) {
final TSStatus status = configManager.getProcedureManager().dropSubscription(req);
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Consumer {} in consumer group {} failed to unsubscribe topics {}. Result status: {}.",
+ ManagerMessages.CONSUMER_IN_CONSUMER_GROUP_FAILED_TO_UNSUBSCRIBE_TOPICS_RESULT_STATUS,
req.getConsumerId(),
req.getConsumerGroupId(),
req.getTopicNames(),
@@ -272,7 +273,7 @@ public TShowSubscriptionResp showSubscription(TShowSubscriptionReq req) {
.filter(req.getTopicName(), req.isTableModel)
.convertToTShowSubscriptionResp();
} catch (Exception e) {
- LOGGER.warn("Failed to show subscription info.", e);
+ LOGGER.warn(ManagerMessages.FAILED_TO_SHOW_SUBSCRIPTION_INFO, e);
return new SubscriptionTableResp(
new TSStatus(TSStatusCode.SHOW_SUBSCRIPTION_ERROR.getStatusCode())
.setMessage(e.getMessage()),
@@ -288,7 +289,7 @@ public TGetAllSubscriptionInfoResp getAllSubscriptionInfo() {
configManager.getConsensusManager().read(new ShowSubscriptionPlan()))
.convertToTGetAllSubscriptionInfoResp();
} catch (Exception e) {
- LOGGER.warn("Failed to get all subscription info.", e);
+ LOGGER.warn(ManagerMessages.FAILED_TO_GET_ALL_SUBSCRIPTION_INFO, e);
return new TGetAllSubscriptionInfoResp(
new TSStatus(TSStatusCode.SHOW_SUBSCRIPTION_ERROR.getStatusCode())
.setMessage(e.getMessage()),
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionMetaSyncer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionMetaSyncer.java
index de49987e13fbe..f1861d063abb9 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionMetaSyncer.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/subscription/SubscriptionMetaSyncer.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.concurrent.ThreadName;
import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil;
import org.apache.iotdb.commons.subscription.config.SubscriptionConfig;
+import org.apache.iotdb.confignode.i18n.ManagerMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.ProcedureManager;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -70,7 +71,7 @@ public synchronized void start() {
INITIAL_SYNC_DELAY_MINUTES,
SYNC_INTERVAL_MINUTES,
TimeUnit.MINUTES);
- LOGGER.info("SubscriptionMetaSyncer is started successfully.");
+ LOGGER.info(ManagerMessages.SUBSCRIPTIONMETASYNCER_IS_STARTED_SUCCESSFULLY);
}
}
@@ -84,7 +85,7 @@ private synchronized void sync() {
if (configManager.getSubscriptionManager().getSubscriptionCoordinator().isLocked()) {
LOGGER.warn(
- "SubscriptionCoordinatorLock is held by another thread, skip this round of sync to avoid procedure and rpc accumulation as much as possible");
+ ManagerMessages.SUBSCRIPTIONCOORDINATORLOCK_IS_HELD_BY_ANOTHER_THREAD_SKIP_THIS_ROUND_OF);
return;
}
@@ -94,7 +95,7 @@ private synchronized void sync() {
// TODO: consider drop the topic which is subscribed by consumers
final TSStatus topicMetaSyncStatus = procedureManager.topicMetaSync();
if (topicMetaSyncStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.warn("Failed to sync topic meta. Result status: {}.", topicMetaSyncStatus);
+ LOGGER.warn(ManagerMessages.FAILED_TO_SYNC_TOPIC_META_RESULT_STATUS, topicMetaSyncStatus);
return;
}
@@ -102,12 +103,13 @@ private synchronized void sync() {
final TSStatus consumerGroupMetaSyncStatus = procedureManager.consumerGroupMetaSync();
if (consumerGroupMetaSyncStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Failed to sync consumer group meta. Result status: {}.", consumerGroupMetaSyncStatus);
+ ManagerMessages.FAILED_TO_SYNC_CONSUMER_GROUP_META_RESULT_STATUS,
+ consumerGroupMetaSyncStatus);
return;
}
LOGGER.info(
- "After this successful sync, if SubscriptionInfo is empty during this sync and has not been modified afterwards, all subsequent syncs will be skipped");
+ ManagerMessages.AFTER_THIS_SUCCESSFUL_SYNC_IF_SUBSCRIPTIONINFO_IS_EMPTY_DURING_THIS);
isLastSubscriptionSyncSuccessful = true;
}
@@ -115,7 +117,7 @@ public synchronized void stop() {
if (metaSyncFuture != null) {
metaSyncFuture.cancel(false);
metaSyncFuture = null;
- LOGGER.info("SubscriptionMetaSyncer is stopped successfully.");
+ LOGGER.info(ManagerMessages.SUBSCRIPTIONMETASYNCER_IS_STOPPED_SUCCESSFULLY);
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ClusterInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ClusterInfo.java
index b40bc75728d1c..0eaf9ae65c133 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ClusterInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ClusterInfo.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.snapshot.SnapshotProcessor;
import org.apache.iotdb.confignode.consensus.request.write.confignode.UpdateClusterIdPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.thrift.TException;
@@ -51,7 +52,7 @@ public String getClusterId() {
public TSStatus updateClusterId(UpdateClusterIdPlan updateClusterIdPlan) {
this.clusterId = updateClusterIdPlan.getClusterId();
- LOGGER.info("clusterID has been generated: {}", clusterId);
+ LOGGER.info(ConfigNodeMessages.CLUSTERID_HAS_BEEN_GENERATED, clusterId);
return RpcUtils.SUCCESS_STATUS;
}
@@ -60,7 +61,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST,
snapshotFile.getAbsolutePath());
return false;
}
@@ -84,7 +85,7 @@ public void processLoadSnapshot(File snapshotDir) throws TException, IOException
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot,snapshot file [{}] is not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST_2,
snapshotFile.getAbsolutePath());
return;
}
@@ -93,7 +94,7 @@ public void processLoadSnapshot(File snapshotDir) throws TException, IOException
clusterId = ReadWriteIOUtils.readString(fileInputStream);
}
- LOGGER.info("clusterID has been recovered from snapshot: {}", clusterId);
+ LOGGER.info(ConfigNodeMessages.CLUSTERID_HAS_BEEN_RECOVERED_FROM_SNAPSHOT, clusterId);
}
@Override
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java
index 5589fdd37994b..63855d9646d30 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.confignode.consensus.request.write.procedure.DeleteProcedurePlan;
import org.apache.iotdb.confignode.consensus.request.write.procedure.UpdateProcedurePlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.procedure.Procedure;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
@@ -102,7 +103,7 @@ public List> oldLoad() {
Long.parseLong(p2.getFileName().toString().split("\\.")[0])))
.forEach(path -> loadProcedure(path).ifPresent(procedureList::add));
} catch (IOException e) {
- LOGGER.error("Load procedure wal failed.", e);
+ LOGGER.error(ConfigNodeMessages.LOAD_PROCEDURE_WAL_FAILED, e);
}
procedureList.forEach(procedure -> procedureMap.put(procedure.getProcId(), procedure));
procedureList.forEach(
@@ -113,22 +114,23 @@ public List> oldLoad() {
public void upgrade() {
if (isOldVersion()) {
try {
- LOGGER.info("Old procedure files have been loaded successfully, taking snapshot...");
+ LOGGER.info(
+ ConfigNodeMessages.OLD_PROCEDURE_FILES_HAVE_BEEN_LOADED_SUCCESSFULLY_TAKING_SNAPSHOT);
configManager.getConsensusManager().manuallyTakeSnapshot();
} catch (ConsensusException e) {
- LOGGER.warn("Taking snapshot fail, procedure upgrade fail", e);
+ LOGGER.warn(ConfigNodeMessages.TAKING_SNAPSHOT_FAIL_PROCEDURE_UPGRADE_FAIL, e);
return;
}
try {
FileUtils.recursivelyDeleteFolder(OLD_PROCEDURE_WAL_DIR);
} catch (IOException e) {
- LOGGER.error("Delete useless procedure wal dir fail.", e);
+ LOGGER.error(ConfigNodeMessages.DELETE_USELESS_PROCEDURE_WAL_DIR_FAIL, e);
LOGGER.error(
- "You should manually delete the procedure wal dir before ConfigNode restart. {}",
+ ConfigNodeMessages.YOU_SHOULD_MANUALLY_DELETE_THE_PROCEDURE_WAL_DIR_BEFORE_CONFIGNODE,
OLD_PROCEDURE_WAL_DIR);
}
LOGGER.info(
- "The Procedure framework has been successfully upgraded. Now it uses the consensus layer's services instead of maintaining the WAL itself.");
+ ConfigNodeMessages.THE_PROCEDURE_FRAMEWORK_HAS_BEEN_SUCCESSFULLY_UPGRADED_NOW_IT_USES);
}
}
@@ -148,7 +150,7 @@ public TSStatus oldUpdateProcedure(UpdateProcedurePlan updateProcedurePlan) {
try {
procedureWAL.save(procedure);
} catch (IOException e) {
- LOGGER.error("Update Procedure (pid={}) wal failed", procedure.getProcId(), e);
+ LOGGER.error(ConfigNodeMessages.UPDATE_PROCEDURE_PID_WAL_FAILED, procedure.getProcId(), e);
return new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
}
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
@@ -172,9 +174,10 @@ private static Optional loadProcedure(Path procedureFilePath) {
return Optional.ofNullable(procedure);
}
} catch (Exception e) {
- LOGGER.error("Load {} failed, it will be deleted.", procedureFilePath, e);
+ LOGGER.error(ConfigNodeMessages.LOAD_FAILED_IT_WILL_BE_DELETED, procedureFilePath, e);
if (!procedureFilePath.toFile().delete()) {
- LOGGER.error("{} deleted failed; take appropriate action.", procedureFilePath, e);
+ LOGGER.error(
+ ConfigNodeMessages.DELETED_FAILED_TAKE_APPROPRIATE_ACTION, procedureFilePath, e);
}
}
return Optional.empty();
@@ -185,13 +188,13 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept
File procedureSnapshotDir = new File(snapshotDir, PROCEDURE_SNAPSHOT_DIR);
if (procedureSnapshotDir.exists()) {
LOGGER.error(
- "Failed to take snapshot, because snapshot dir [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_DIR_IS_ALREADY_EXIST,
procedureSnapshotDir.getAbsolutePath());
return false;
}
File tmpDir = new File(procedureSnapshotDir.getAbsolutePath() + "-" + UUID.randomUUID());
if (!tmpDir.mkdir()) {
- LOGGER.error("Failed to take snapshot, because create tmp dir [{}] fail.", tmpDir);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_CREATE_TMP_DIR_FAIL, tmpDir);
return false;
}
@@ -223,7 +226,10 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept
} catch (IOException e) {
snapshotAllSuccess.set(false);
LOGGER.warn(
- "{} id {} took snapshot fail", procedure.getClass(), procedure.getProcId(), e);
+ ConfigNodeMessages.ID_TOOK_SNAPSHOT_FAIL,
+ procedure.getClass(),
+ procedure.getProcId(),
+ e);
}
});
if (!snapshotAllSuccess.get()) {
@@ -238,7 +244,7 @@ public void processLoadSnapshot(File snapshotDir) throws TException, IOException
File procedureSnapshotDir = new File(snapshotDir, PROCEDURE_SNAPSHOT_DIR);
if (!procedureSnapshotDir.exists() || !procedureSnapshotDir.isDirectory()) {
LOGGER.error(
- "Failed to load snapshot, because snapshot dir [{}] not exists.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_BECAUSE_SNAPSHOT_DIR_NOT_EXISTS,
procedureSnapshotDir.getAbsolutePath());
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TTLInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TTLInfo.java
index f38e711f6d552..7b98ebba50b87 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TTLInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TTLInfo.java
@@ -31,6 +31,7 @@
import org.apache.iotdb.confignode.consensus.request.read.ttl.ShowTTLPlan;
import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan;
import org.apache.iotdb.confignode.consensus.response.ttl.ShowTTLResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -198,7 +199,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot of TTLInfo, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_OF_TTLINFO_BECAUSE_SNAPSHOT_FILE_IS,
snapshotFile.getAbsolutePath());
return false;
}
@@ -218,7 +219,7 @@ public void processLoadSnapshot(File snapshotDir) throws TException, IOException
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot of TTLInfo, snapshot file [{}] does not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_OF_TTLINFO_SNAPSHOT_FILE_DOES_NOT,
snapshotFile.getAbsolutePath());
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TriggerInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TriggerInfo.java
index 065ae826fb804..f8cd8efd1cf0a 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TriggerInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/TriggerInfo.java
@@ -42,6 +42,7 @@
import org.apache.iotdb.confignode.consensus.response.trigger.TransferringTriggersResp;
import org.apache.iotdb.confignode.consensus.response.trigger.TriggerLocationResp;
import org.apache.iotdb.confignode.consensus.response.trigger.TriggerTableResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.rpc.thrift.TTriggerState;
import org.apache.iotdb.consensus.common.DataSet;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -89,12 +90,12 @@ public TriggerInfo() throws IOException {
}
public void acquireTriggerTableLock() {
- LOGGER.info("acquire TriggerTableLock");
+ LOGGER.info(ConfigNodeMessages.ACQUIRE_TRIGGERTABLELOCK);
triggerTableLock.lock();
}
public void releaseTriggerTableLock() {
- LOGGER.info("release TriggerTableLock");
+ LOGGER.info(ConfigNodeMessages.RELEASE_TRIGGERTABLELOCK);
triggerTableLock.unlock();
}
@@ -104,15 +105,17 @@ public void validate(String triggerName, String jarName, String jarMD5)
if (triggerTable.containsTrigger(triggerName)) {
throw new TriggerManagementException(
String.format(
- "Failed to create trigger [%s], the same name trigger has been created",
+ ConfigNodeMessages.FAILED_TO_CREATE_TRIGGER_THE_SAME_NAME_TRIGGER_HAS_BEEN,
triggerName));
}
if (existedJarToMD5.containsKey(jarName) && !existedJarToMD5.get(jarName).equals(jarMD5)) {
throw new TriggerManagementException(
String.format(
- "Failed to create trigger [%s], the same name Jar [%s] but different MD5 [%s] has existed",
- triggerName, jarName, jarMD5));
+ ConfigNodeMessages.FAILED_TO_CREATE_TRIGGER_THE_SAME_NAME_JAR_BUT_DIFFERENT,
+ triggerName,
+ jarName,
+ jarMD5));
}
}
@@ -123,7 +126,8 @@ public void validate(String triggerName) throws TriggerManagementException {
}
throw new TriggerManagementException(
String.format(
- "Failed to drop trigger [%s], this trigger has not been created", triggerName));
+ ConfigNodeMessages.FAILED_TO_DROP_TRIGGER_THIS_TRIGGER_HAS_NOT_BEEN_CREATED,
+ triggerName));
}
public boolean needToSaveJar(String jarName) {
@@ -202,7 +206,7 @@ public JarResp getTriggerJar(GetTriggerJarPlan physicalPlan) {
TriggerExecutableManager.getInstance().getFileStringUnderInstallByName(jarName)));
}
} catch (Exception e) {
- LOGGER.error("Get TriggerJar failed", e);
+ LOGGER.error(ConfigNodeMessages.GET_TRIGGERJAR_FAILED, e);
return new JarResp(
new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())
.setMessage("Get TriggerJar failed, because " + e.getMessage()),
@@ -242,7 +246,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST,
snapshotFile.getAbsolutePath());
return false;
}
@@ -264,7 +268,7 @@ public void processLoadSnapshot(File snapshotDir) throws TException, IOException
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot,snapshot file [{}] is not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST_2,
snapshotFile.getAbsolutePath());
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java
index 308a442f76525..5b3dfe186642d 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java
@@ -36,6 +36,7 @@
import org.apache.iotdb.confignode.consensus.request.write.function.UpdateFunctionPlan;
import org.apache.iotdb.confignode.consensus.response.JarResp;
import org.apache.iotdb.confignode.consensus.response.function.FunctionTableResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.consensus.common.DataSet;
import org.apache.iotdb.rpc.TSStatusCode;
import org.apache.iotdb.udf.api.exception.UDFManagementException;
@@ -83,12 +84,12 @@ public UDFInfo() throws IOException {
}
public void acquireUDFTableLock() {
- LOGGER.info("acquire UDFTableLock");
+ LOGGER.info(ConfigNodeMessages.ACQUIRE_UDFTABLELOCK);
udfTableLock.lock();
}
public void releaseUDFTableLock() {
- LOGGER.info("release UDFTableLock");
+ LOGGER.info(ConfigNodeMessages.RELEASE_UDFTABLELOCK);
udfTableLock.unlock();
}
@@ -98,15 +99,18 @@ public void validate(Model model, String udfName, String jarName, String jarMD5)
if (udfTable.containsUDF(model, udfName)
&& udfTable.getUDFInformation(model, udfName).isAvailable()) {
throw new IoTDBRuntimeException(
- String.format("Failed to create UDF [%s], the same name UDF has been created", udfName),
+ String.format(
+ ConfigNodeMessages.FAILED_TO_CREATE_UDF_THE_SAME_NAME_UDF_HAS_BEEN, udfName),
TSStatusCode.UDF_ALREADY_EXISTS.getStatusCode());
}
if (existedJarToMD5.containsKey(jarName) && !existedJarToMD5.get(jarName).equals(jarMD5)) {
throw new IoTDBRuntimeException(
String.format(
- "Failed to create UDF [%s], the same name Jar [%s] but different MD5 [%s] has existed",
- udfName, jarName, jarMD5),
+ ConfigNodeMessages.FAILED_TO_CREATE_UDF_THE_SAME_NAME_JAR_BUT_DIFFERENT,
+ udfName,
+ jarName,
+ jarMD5),
TSStatusCode.UDF_ALREADY_EXISTS.getStatusCode());
}
}
@@ -118,7 +122,8 @@ public UDFInformation getUDFInformation(Model model, String udfName)
return udfTable.getUDFInformation(model, udfName);
}
throw new UDFManagementException(
- String.format("Failed to drop UDF [%s], this UDF has not been created", udfName));
+ String.format(
+ ConfigNodeMessages.FAILED_TO_DROP_UDF_THIS_UDF_HAS_NOT_BEEN_CREATED, udfName));
}
public boolean needToSaveJar(String jarName) {
@@ -169,7 +174,7 @@ public JarResp getUDFJar(GetUDFJarPlan physicalPlan) {
UDFExecutableManager.getInstance().getFileStringUnderInstallByName(jarName)));
}
} catch (Exception e) {
- LOGGER.error("Get UDF_Jar failed", e);
+ LOGGER.error(ConfigNodeMessages.GET_UDF_JAR_FAILED, e);
return new JarResp(
new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())
.setMessage("Get UDF_Jar failed, because " + e.getMessage()),
@@ -207,7 +212,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException {
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST,
snapshotFile.getAbsolutePath());
return false;
}
@@ -230,7 +235,7 @@ public void processLoadSnapshot(File snapshotDir) throws IOException {
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot,snapshot file [{}] is not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST_2,
snapshotFile.getAbsolutePath());
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorInfo.java
index d7404009825ae..bb0fd51c9fb73 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorInfo.java
@@ -37,6 +37,7 @@
import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorRelationalPlan;
import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorTreePlan;
import org.apache.iotdb.confignode.consensus.response.auth.PermissionInfoResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.rpc.thrift.TAuthizedPatternTreeResp;
import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp;
import org.apache.iotdb.db.queryengine.plan.relational.type.AuthorRType;
@@ -64,13 +65,13 @@ public AuthorInfo() {
authorizer = BasicAuthorizer.getInstance();
authorPlanExecutor = new AuthorPlanExecutor(authorizer);
} catch (AuthException e) {
- LOGGER.error("get user or role permissionInfo failed because ", e);
+ LOGGER.error(ConfigNodeMessages.GET_USER_OR_ROLE_PERMISSIONINFO_FAILED_BECAUSE, e);
}
}
public static ConfigPhysicalPlanType getConfigPhysicalPlanTypeFromAuthorType(int authorType) {
if (authorType < 0) {
- throw new IndexOutOfBoundsException("Invalid Author Type ordinal");
+ throw new IndexOutOfBoundsException(ConfigNodeMessages.INVALID_AUTHOR_TYPE_ORDINAL);
}
ConfigPhysicalPlanType configPhysicalPlanType;
if (authorType >= AuthorType.RENAME_USER.ordinal()) {
@@ -83,7 +84,7 @@ public static ConfigPhysicalPlanType getConfigPhysicalPlanTypeFromAuthorType(int
case UPDATE_USER_MIN_SESSION:
return ConfigPhysicalPlanType.UpdateUserMinSession;
default:
- throw new IndexOutOfBoundsException("Invalid Author Type ordinal");
+ throw new IndexOutOfBoundsException(ConfigNodeMessages.INVALID_AUTHOR_TYPE_ORDINAL);
}
} else {
configPhysicalPlanType =
@@ -259,7 +260,8 @@ public TPermissionInfoResp getUserPermissionInfo(String username, ModelType type
public TSStatus enableSeparationOfAdminPowers(
String systemAdminUsername, String securityAdminUsername, String auditAdminUsername) {
- throw new UnsupportedOperationException("EnableSeparationOfAdminPowers is not supported");
+ throw new UnsupportedOperationException(
+ ConfigNodeMessages.ENABLESEPARATIONOFADMINPOWERS_IS_NOT_SUPPORTED);
}
@TestOnly
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorPlanExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorPlanExecutor.java
index 745b0e6d09505..856f402ac8a0b 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorPlanExecutor.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/auth/AuthorPlanExecutor.java
@@ -37,6 +37,7 @@
import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorRelationalPlan;
import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorTreePlan;
import org.apache.iotdb.confignode.consensus.response.auth.PermissionInfoResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.rpc.thrift.TAuthizedPatternTreeResp;
import org.apache.iotdb.confignode.rpc.thrift.TListUserInfo;
import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp;
@@ -438,7 +439,7 @@ public TSStatus executeRelationalAuthorNonQuery(AuthorRelationalPlan authorPlan)
}
break;
default:
- throw new AuthException(TSStatusCode.ILLEGAL_PARAMETER, "not support");
+ throw new AuthException(TSStatusCode.ILLEGAL_PARAMETER, ConfigNodeMessages.NOT_SUPPORT);
}
} catch (AuthException e) {
return RpcUtils.getStatus(e.getCode(), e.getMessage());
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/cq/CQInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/cq/CQInfo.java
index 4c1225ef48a05..9c99cfbb0e8bc 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/cq/CQInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/cq/CQInfo.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.confignode.consensus.request.write.cq.DropCQPlan;
import org.apache.iotdb.confignode.consensus.request.write.cq.UpdateCQLastExecTimePlan;
import org.apache.iotdb.confignode.consensus.response.cq.ShowCQResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.rpc.thrift.TCreateCQReq;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -118,15 +119,15 @@ public TSStatus dropCQ(DropCQPlan plan) {
if (cqEntry == null) {
res.code = TSStatusCode.NO_SUCH_CQ.getStatusCode();
res.message = String.format(CQ_NOT_EXIST_FORMAT, cqId);
- LOGGER.warn("Drop CQ {} failed, because it doesn't exist.", cqId);
+ LOGGER.warn(ConfigNodeMessages.DROP_CQ_FAILED_BECAUSE_IT_DOESN_T_EXIST, cqId);
} else if ((md5.isPresent() && !md5.get().equals(cqEntry.md5))) {
res.code = TSStatusCode.NO_SUCH_CQ.getStatusCode();
res.message = String.format(MD5_NOT_MATCH_FORMAT, cqId);
- LOGGER.warn("Drop CQ {} failed, because its MD5 doesn't match.", cqId);
+ LOGGER.warn(ConfigNodeMessages.DROP_CQ_FAILED_BECAUSE_ITS_MD5_DOESN_T_MATCH, cqId);
} else {
cqMap.remove(cqId);
res.code = TSStatusCode.SUCCESS_STATUS.getStatusCode();
- LOGGER.info("Drop CQ {} successfully.", cqId);
+ LOGGER.info(ConfigNodeMessages.DROP_CQ_SUCCESSFULLY, cqId);
}
return res;
} finally {
@@ -217,7 +218,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot of CQInfo, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_OF_CQINFO_BECAUSE_SNAPSHOT_FILE_IS,
snapshotFile.getAbsolutePath());
return false;
}
@@ -253,7 +254,7 @@ public void processLoadSnapshot(File snapshotDir) throws TException, IOException
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot of CQInfo, snapshot file [{}] does not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_OF_CQINFO_SNAPSHOT_FILE_DOES_NOT,
snapshotFile.getAbsolutePath());
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java
index b2bae24de38b7..eb8d5e5538b39 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java
@@ -151,6 +151,7 @@
import org.apache.iotdb.confignode.consensus.request.write.trigger.UpdateTriggersOnTransferNodesPlan;
import org.apache.iotdb.confignode.consensus.response.partition.SchemaNodeManagementResp;
import org.apache.iotdb.confignode.exception.physical.UnknownPhysicalPlanTypeException;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.externalservice.ExternalServiceInfo;
import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent;
import org.apache.iotdb.confignode.persistence.ClusterInfo;
@@ -698,7 +699,7 @@ public TSStatus executeNonQueryPlan(ConfigPhysicalPlan physicalPlan)
// PipeUnsetTemplate plan will not be written here, and exists only after pipe
// sender collects UnsetTemplatePlan and before receiver calls ConfigManager.
throw new UnsupportedOperationException(
- String.format("Plan type %s is not supported.", physicalPlan.getType()));
+ String.format(ConfigNodeMessages.PLAN_TYPE_IS_NOT_SUPPORTED, physicalPlan.getType()));
case TestOnly:
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
default:
@@ -711,11 +712,13 @@ public boolean takeSnapshot(File snapshotDir) {
// if it does not exist, print a log to warn there may have a problem.
if (!snapshotDir.exists()) {
LOGGER.warn(
- "snapshot directory [{}] is not exist,start to create it.",
+ ConfigNodeMessages.SNAPSHOT_DIRECTORY_IS_NOT_EXIST_START_TO_CREATE_IT,
snapshotDir.getAbsolutePath());
// Try to create a directory to enable snapshot operation
if (!snapshotDir.mkdirs()) {
- LOGGER.error("snapshot directory [{}] can not be created.", snapshotDir.getAbsolutePath());
+ LOGGER.error(
+ ConfigNodeMessages.SNAPSHOT_DIRECTORY_CAN_NOT_BE_CREATED,
+ snapshotDir.getAbsolutePath());
return false;
}
}
@@ -724,7 +727,8 @@ public boolean takeSnapshot(File snapshotDir) {
// which may result in incorrect results.
File[] fileList = snapshotDir.listFiles();
if (fileList != null && fileList.length > 0) {
- LOGGER.error("Snapshot directory [{}] is not empty.", snapshotDir.getAbsolutePath());
+ LOGGER.error(
+ ConfigNodeMessages.SNAPSHOT_DIRECTORY_IS_NOT_EMPTY, snapshotDir.getAbsolutePath());
return false;
}
@@ -735,16 +739,17 @@ public boolean takeSnapshot(File snapshotDir) {
try {
long startTime = System.currentTimeMillis();
LOGGER.info(
- "[ConfigNodeSnapshot] Start to take snapshot for {} into {}",
+ ConfigNodeMessages.CONFIGNODESNAPSHOT_START_TO_TAKE_SNAPSHOT_FOR_INTO,
x.getClass().getName(),
snapshotDir.getAbsolutePath());
takeSnapshotResult = x.processTakeSnapshot(snapshotDir);
LOGGER.info(
- "[ConfigNodeSnapshot] Finish to take snapshot for {}, time consumption: {} ms",
+ ConfigNodeMessages
+ .CONFIGNODESNAPSHOT_FINISH_TO_TAKE_SNAPSHOT_FOR_TIME_CONSUMPTION_MS,
x.getClass().getName(),
System.currentTimeMillis() - startTime);
} catch (TException | IOException e) {
- LOGGER.error("Take snapshot error", e);
+ LOGGER.error(ConfigNodeMessages.TAKE_SNAPSHOT_ERROR, e);
takeSnapshotResult = false;
} finally {
// If any snapshot fails, the whole fails
@@ -755,7 +760,8 @@ public boolean takeSnapshot(File snapshotDir) {
}
});
if (result.get()) {
- LOGGER.info("[ConfigNodeSnapshot] Task snapshot success, snapshotDir: {}", snapshotDir);
+ LOGGER.info(
+ ConfigNodeMessages.CONFIGNODESNAPSHOT_TASK_SNAPSHOT_SUCCESS_SNAPSHOTDIR, snapshotDir);
}
return result.get();
}
@@ -763,7 +769,7 @@ public boolean takeSnapshot(File snapshotDir) {
public void loadSnapshot(final File latestSnapshotRootDir) {
if (!latestSnapshotRootDir.exists()) {
LOGGER.error(
- "snapshot directory [{}] is not exist, can not load snapshot with this directory.",
+ ConfigNodeMessages.SNAPSHOT_DIRECTORY_IS_NOT_EXIST_CAN_NOT_LOAD_SNAPSHOT_WITH,
latestSnapshotRootDir.getAbsolutePath());
return;
}
@@ -775,22 +781,22 @@ public void loadSnapshot(final File latestSnapshotRootDir) {
try {
final long startTime = System.currentTimeMillis();
LOGGER.info(
- "[ConfigNodeSnapshot] Start to load snapshot for {} from {}",
+ ConfigNodeMessages.CONFIGNODESNAPSHOT_START_TO_LOAD_SNAPSHOT_FOR_FROM,
x.getClass().getName(),
latestSnapshotRootDir.getAbsolutePath());
x.processLoadSnapshot(latestSnapshotRootDir);
LOGGER.info(
- "[ConfigNodeSnapshot] Load snapshot for {} cost {} ms",
+ ConfigNodeMessages.CONFIGNODESNAPSHOT_LOAD_SNAPSHOT_FOR_COST_MS,
x.getClass().getName(),
System.currentTimeMillis() - startTime);
} catch (final TException | IOException e) {
result.set(false);
- LOGGER.error("Load snapshot error", e);
+ LOGGER.error(ConfigNodeMessages.LOAD_SNAPSHOT_ERROR, e);
}
});
if (result.get()) {
LOGGER.info(
- "[ConfigNodeSnapshot] Load snapshot success, latestSnapshotRootDir: {}",
+ ConfigNodeMessages.CONFIGNODESNAPSHOT_LOAD_SNAPSHOT_SUCCESS_LATESTSNAPSHOTROOTDIR,
latestSnapshotRootDir);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java
index dd9ea5e9e920c..ba33344ac51a4 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java
@@ -42,6 +42,7 @@
import org.apache.iotdb.confignode.consensus.request.write.datanode.UpdateDataNodePlan;
import org.apache.iotdb.confignode.consensus.response.ainode.AINodeConfigurationResp;
import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeConfigurationResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.rpc.thrift.TNodeVersionInfo;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -168,7 +169,7 @@ public TSStatus registerDataNode(RegisterDataNodePlan registerDataNodePlan) {
*/
public TSStatus removeDataNode(RemoveDataNodePlan req) {
LOGGER.info(
- "{}, There are {} data node in cluster before executed RemoveDataNodePlan",
+ ConfigNodeMessages.THERE_ARE_DATA_NODE_IN_CLUSTER_BEFORE_EXECUTED_REMOVEDATANODEPLAN,
REMOVE_DATANODE_PROCESS,
registeredDataNodes.size());
@@ -180,14 +181,14 @@ public TSStatus removeDataNode(RemoveDataNodePlan req) {
removeDataNodes -> {
registeredDataNodes.remove(removeDataNodes.getDataNodeId());
nodeVersionInfo.remove(removeDataNodes.getDataNodeId());
- LOGGER.info("Removed the datanode {} from cluster", removeDataNodes);
+ LOGGER.info(ConfigNodeMessages.REMOVED_THE_DATANODE_FROM_CLUSTER, removeDataNodes);
});
} finally {
versionInfoReadWriteLock.writeLock().unlock();
dataNodeInfoReadWriteLock.writeLock().unlock();
}
LOGGER.info(
- "{}, There are {} data node in cluster after executed RemoveDataNodePlan",
+ ConfigNodeMessages.THERE_ARE_DATA_NODE_IN_CLUSTER_AFTER_EXECUTED_REMOVEDATANODEPLAN,
REMOVE_DATANODE_PROCESS,
registeredDataNodes.size());
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
@@ -297,7 +298,8 @@ public int getDataNodeCpuCoreCount(int dataNodeId) {
try {
return registeredDataNodes.get(dataNodeId).getResource().getCpuCoreNum();
} catch (Exception e) {
- LOGGER.warn("Get DataNode {} cpu core fail, will be treated as zero.", dataNodeId, e);
+ LOGGER.warn(
+ ConfigNodeMessages.GET_DATANODE_CPU_CORE_FAIL_WILL_BE_TREATED_AS_ZERO, dataNodeId, e);
return 0;
}
}
@@ -426,12 +428,12 @@ public TSStatus applyConfigNode(ApplyConfigNodePlan applyConfigNodePlan) {
applyConfigNodePlan.getConfigNodeLocation());
SystemPropertiesUtils.storeConfigNodeList(new ArrayList<>(registeredConfigNodes.values()));
LOGGER.info(
- "Successfully apply ConfigNode: {}. Current ConfigNodeGroup: {}",
+ ConfigNodeMessages.SUCCESSFULLY_APPLY_CONFIGNODE_CURRENT_CONFIGNODEGROUP,
applyConfigNodePlan.getConfigNodeLocation(),
registeredConfigNodes);
status.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode());
} catch (IOException e) {
- LOGGER.error("Update online ConfigNode failed.", e);
+ LOGGER.error(ConfigNodeMessages.UPDATE_ONLINE_CONFIGNODE_FAILED, e);
status.setCode(TSStatusCode.ADD_CONFIGNODE_ERROR.getStatusCode());
status.setMessage(
"Apply new ConfigNode failed because current ConfigNode can't store ConfigNode information.");
@@ -461,12 +463,12 @@ public TSStatus removeConfigNode(RemoveConfigNodePlan removeConfigNodePlan) {
nodeVersionInfo.remove(removeConfigNodePlan.getConfigNodeLocation().getConfigNodeId());
SystemPropertiesUtils.storeConfigNodeList(new ArrayList<>(registeredConfigNodes.values()));
LOGGER.info(
- "Successfully remove ConfigNode: {}. Current ConfigNodeGroup: {}",
+ ConfigNodeMessages.SUCCESSFULLY_REMOVE_CONFIGNODE_CURRENT_CONFIGNODEGROUP,
removeConfigNodePlan.getConfigNodeLocation(),
registeredConfigNodes);
status.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode());
} catch (IOException e) {
- LOGGER.error("Remove online ConfigNode failed.", e);
+ LOGGER.error(ConfigNodeMessages.REMOVE_ONLINE_CONFIGNODE_FAILED, e);
status.setCode(TSStatusCode.REMOVE_CONFIGNODE_ERROR.getStatusCode());
status.setMessage(
"Remove ConfigNode failed because current ConfigNode can't store ConfigNode information.");
@@ -526,7 +528,7 @@ public TSStatus updateAINode(UpdateAINodePlan updateAINodePlan) {
*/
public TSStatus removeAINode(RemoveAINodePlan req) {
LOGGER.info(
- "{}, There are {} AI nodes in cluster before executed RemoveAINodePlan",
+ ConfigNodeMessages.THERE_ARE_AI_NODES_IN_CLUSTER_BEFORE_EXECUTED_REMOVEAINODEPLAN,
REMOVE_AINODE_PROCESS,
registeredAINodes.size());
@@ -536,13 +538,13 @@ public TSStatus removeAINode(RemoveAINodePlan req) {
try {
registeredAINodes.remove(removedAINode.getAiNodeId());
nodeVersionInfo.remove(removedAINode.getAiNodeId());
- LOGGER.info("Removed the AINode {} from cluster", removedAINode);
+ LOGGER.info(ConfigNodeMessages.REMOVED_THE_AINODE_FROM_CLUSTER, removedAINode);
} finally {
versionInfoReadWriteLock.writeLock().unlock();
aiNodeInfoReadWriteLock.writeLock().unlock();
}
LOGGER.info(
- "{}, There are {} AI nodes in cluster after executed RemoveAINodePlan",
+ ConfigNodeMessages.THERE_ARE_AI_NODES_IN_CLUSTER_AFTER_EXECUTED_REMOVEAINODEPLAN,
REMOVE_AINODE_PROCESS,
registeredAINodes.size());
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
@@ -562,7 +564,8 @@ public TSStatus updateVersionInfo(UpdateVersionInfoPlan updateVersionInfoPlan) {
} finally {
versionInfoReadWriteLock.writeLock().unlock();
}
- LOGGER.info("Successfully update Node {} 's version.", updateVersionInfoPlan.getNodeId());
+ LOGGER.info(
+ ConfigNodeMessages.SUCCESSFULLY_UPDATE_NODE_S_VERSION, updateVersionInfoPlan.getNodeId());
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
}
@@ -631,7 +634,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException, TExcept
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST,
snapshotFile.getAbsolutePath());
return false;
}
@@ -673,7 +676,8 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException, TExcept
break;
} else {
LOGGER.warn(
- "Can't delete temporary snapshot file: {}, retrying...", tmpFile.getAbsolutePath());
+ ConfigNodeMessages.CAN_T_DELETE_TEMPORARY_SNAPSHOT_FILE_RETRYING,
+ tmpFile.getAbsolutePath());
}
}
}
@@ -721,7 +725,7 @@ public void processLoadSnapshot(File snapshotDir) throws IOException, TException
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot,snapshot file [{}] is not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST_2,
snapshotFile.getAbsolutePath());
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java
index 5a80364f0331a..d2f1f80e7a3e0 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java
@@ -29,6 +29,7 @@
import org.apache.iotdb.commons.partition.SchemaPartitionTable;
import org.apache.iotdb.commons.utils.CommonDateTimeUtils;
import org.apache.iotdb.confignode.consensus.request.read.region.GetRegionInfoListPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.rpc.thrift.TRegionInfo;
import org.apache.iotdb.confignode.rpc.thrift.TShowRegionReq;
import org.apache.iotdb.confignode.rpc.thrift.TTimeSlotList;
@@ -533,14 +534,14 @@ void addRegionNewLocation(TConsensusGroupId regionId, TDataNodeLocation node) {
RegionGroup regionGroup = regionGroupMap.get(regionId);
if (regionGroup == null) {
LOGGER.warn(
- "Cannot find RegionGroup for region {} when addRegionNewLocation in {}",
+ ConfigNodeMessages.CANNOT_FIND_REGIONGROUP_FOR_REGION_WHEN_ADDREGIONNEWLOCATION_IN,
regionId,
databaseName);
return;
}
if (regionGroup.getReplicaSet().getDataNodeLocations().contains(node)) {
LOGGER.info(
- "Node is already in region locations when addRegionNewLocation in {}, "
+ ConfigNodeMessages.NODE_IS_ALREADY_IN_REGION_LOCATIONS_WHEN_ADDREGIONNEWLOCATION_IN
+ "node: {}, region: {}",
databaseName,
node,
@@ -554,7 +555,7 @@ void removeRegionLocation(TConsensusGroupId regionId, int nodeId) {
RegionGroup regionGroup = regionGroupMap.get(regionId);
if (regionGroup == null) {
LOGGER.warn(
- "Cannot find RegionGroup for region {} when removeRegionOldLocation in {}",
+ ConfigNodeMessages.CANNOT_FIND_REGIONGROUP_FOR_REGION_WHEN_REMOVEREGIONOLDLOCATION_IN,
regionId,
databaseName);
return;
@@ -563,7 +564,7 @@ void removeRegionLocation(TConsensusGroupId regionId, int nodeId) {
.map(TDataNodeLocation::getDataNodeId)
.noneMatch(id -> id == nodeId)) {
LOGGER.info(
- "Node is not in region locations when removeRegionOldLocation in {}, "
+ ConfigNodeMessages.NODE_IS_NOT_IN_REGION_LOCATIONS_WHEN_REMOVEREGIONOLDLOCATION_IN
+ "no need to remove it, node: {}, region: {}",
databaseName,
nodeId,
@@ -639,7 +640,7 @@ public void autoCleanPartitionTable(long TTL, TTimePartitionSlot currentTimeSlot
.toArray();
if (removedTimePartitionSlots.length > 0) {
LOGGER.info(
- "[PartitionTableCleaner] The TimePartitions: {} are removed from Database: {}",
+ ConfigNodeMessages.PARTITIONTABLECLEANER_THE_TIMEPARTITIONS_ARE_REMOVED_FROM_DATABASE,
removedTimePartitionSlots,
databaseName);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java
index 5c2c93daeabed..af9429e59532c 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java
@@ -62,6 +62,7 @@
import org.apache.iotdb.confignode.consensus.response.partition.SchemaNodeManagementResp;
import org.apache.iotdb.confignode.consensus.response.partition.SchemaPartitionResp;
import org.apache.iotdb.confignode.exception.DatabaseNotExistsException;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.persistence.partition.maintainer.RegionMaintainTask;
import org.apache.iotdb.confignode.rpc.thrift.TRegionInfo;
import org.apache.iotdb.confignode.rpc.thrift.TShowRegionReq;
@@ -198,7 +199,8 @@ public TSStatus createRegionGroups(CreateRegionGroupsPlan plan) {
(database, regionReplicaSets) -> {
if (isDatabasePreDeleted(database)) {
LOGGER.warn(
- "[CreateRegionGroups] Database {} has been deleted, corresponding RegionGroups will not be created.",
+ ConfigNodeMessages
+ .CREATEREGIONGROUPS_DATABASE_HAS_BEEN_DELETED_CORRESPONDING_REGIONGROUPS,
database);
return;
}
@@ -982,7 +984,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST,
snapshotFile.getAbsolutePath());
return false;
}
@@ -1030,7 +1032,8 @@ public boolean processTakeSnapshot(File snapshotDir) throws TException, IOExcept
break;
} else {
LOGGER.warn(
- "Can't delete temporary snapshot file: {}, retrying...", tmpFile.getAbsolutePath());
+ ConfigNodeMessages.CAN_T_DELETE_TEMPORARY_SNAPSHOT_FILE_RETRYING,
+ tmpFile.getAbsolutePath());
}
}
}
@@ -1041,7 +1044,7 @@ public void processLoadSnapshot(final File snapshotDir) throws TException, IOExc
final File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot,snapshot file [{}] is not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST_2,
snapshotFile.getAbsolutePath());
return;
}
@@ -1062,7 +1065,8 @@ public void processLoadSnapshot(final File snapshotDir) throws TException, IOExc
for (int i = 0; i < length; i++) {
final String database = ReadWriteIOUtils.readString(fileInputStream);
if (database == null) {
- throw new IOException("Failed to load snapshot because get null database name");
+ throw new IOException(
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_BECAUSE_GET_NULL_DATABASE_NAME);
}
final DatabasePartitionTable databasePartitionTable = new DatabasePartitionTable(database);
databasePartitionTable.deserialize(fileInputStream, protocol);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/maintainer/RegionMaintainTask.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/maintainer/RegionMaintainTask.java
index f1da0048502e1..25aca0f3b077c 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/maintainer/RegionMaintainTask.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/maintainer/RegionMaintainTask.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId;
import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TProtocol;
@@ -75,7 +76,7 @@ public static class Factory {
public static RegionMaintainTask create(ByteBuffer buffer) throws IOException {
int typeNum = buffer.getInt();
if (typeNum >= RegionMaintainType.values().length) {
- throw new IOException("Unrecognized RegionMaintainType: " + typeNum);
+ throw new IOException(ConfigNodeMessages.UNRECOGNIZED_REGIONMAINTAINTYPE + typeNum);
}
RegionMaintainTask task;
RegionMaintainType type = RegionMaintainType.values()[typeNum];
@@ -87,7 +88,7 @@ public static RegionMaintainTask create(ByteBuffer buffer) throws IOException {
task = new RegionDeleteTask();
break;
default:
- throw new IOException("Unrecognized RegionMaintainType: " + typeNum);
+ throw new IOException(ConfigNodeMessages.UNRECOGNIZED_REGIONMAINTAINTYPE + typeNum);
}
task.deserialize(buffer);
return task;
@@ -97,7 +98,7 @@ public static RegionMaintainTask create(InputStream inputStream, TProtocol proto
throws IOException, TException {
int typeNum = ReadWriteIOUtils.readInt(inputStream);
if (typeNum >= RegionMaintainType.values().length) {
- throw new IOException("Unrecognized RegionMaintainType: " + typeNum);
+ throw new IOException(ConfigNodeMessages.UNRECOGNIZED_REGIONMAINTAINTYPE + typeNum);
}
RegionMaintainTask task;
RegionMaintainType type = RegionMaintainType.values()[typeNum];
@@ -109,7 +110,7 @@ public static RegionMaintainTask create(InputStream inputStream, TProtocol proto
task = new RegionDeleteTask();
break;
default:
- throw new IOException("Unrecognized RegionMaintainType: " + typeNum);
+ throw new IOException(ConfigNodeMessages.UNRECOGNIZED_REGIONMAINTAINTYPE + typeNum);
}
task.deserialize(inputStream, protocol);
return task;
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeInfo.java
index 9341b1f2e29bb..3046662dbb577 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeInfo.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.OperateMultiplePipesPlanV2;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.SetPipeStatusPlanV2;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.SetPipeStatusWithStoppedByRuntimeExceptionPlanV2;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent;
import org.apache.iotdb.confignode.manager.pipe.agent.runtime.PipeConfigRegionListener;
import org.apache.iotdb.confignode.manager.pipe.agent.task.PipeConfigNodeSubtask;
@@ -92,7 +93,8 @@ public TSStatus createPipe(final CreatePipePlanV2 plan) {
.increaseListenerReference(plan.getPipeStaticMeta().getSourceParameters());
return null;
} catch (final Exception e) {
- throw new PipeException("Failed to increase listener reference", e);
+ throw new PipeException(
+ ConfigNodeMessages.FAILED_TO_INCREASE_LISTENER_REFERENCE, e);
}
});
PipeTemporaryMetaInCoordinatorMetrics.getInstance()
@@ -103,7 +105,7 @@ public TSStatus createPipe(final CreatePipePlanV2 plan) {
.setMessage(message.getMessage());
}
} catch (final Exception e) {
- LOGGER.error("Failed to create pipe", e);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_CREATE_PIPE, e);
return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode())
.setMessage("Failed to create pipe, because " + e.getMessage());
}
@@ -119,7 +121,7 @@ public TSStatus setPipeStatus(final SetPipeStatusPlanV2 plan) {
.handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList());
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
} catch (final Exception e) {
- LOGGER.error("Failed to set pipe status", e);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_SET_PIPE_STATUS, e);
return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode())
.setMessage("Failed to set pipe status, because " + e.getMessage());
}
@@ -136,7 +138,8 @@ public TSStatus setPipeStatusWithStoppedByRuntimeException(
.handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList());
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
} catch (final Exception e) {
- LOGGER.error("Failed to set pipe status with stopped-by-runtime-exception flag", e);
+ LOGGER.error(
+ ConfigNodeMessages.FAILED_TO_SET_PIPE_STATUS_WITH_STOPPED_BY_RUNTIME_EXCEPTION, e);
return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode())
.setMessage(
"Failed to set pipe status with stopped-by-runtime-exception flag, because "
@@ -160,7 +163,8 @@ public TSStatus dropPipe(final DropPipePlanV2 plan) {
PipeConfigNodeAgent.runtime()
.decreaseListenerReference(meta.getStaticMeta().getSourceParameters());
} catch (final Exception e) {
- throw new PipeException("Failed to decrease listener reference", e);
+ throw new PipeException(
+ ConfigNodeMessages.FAILED_TO_DECREASE_LISTENER_REFERENCE, e);
}
});
PipeTemporaryMetaInCoordinatorMetrics.getInstance()
@@ -171,7 +175,7 @@ public TSStatus dropPipe(final DropPipePlanV2 plan) {
.setMessage(message.getMessage());
}
} catch (final Exception e) {
- LOGGER.error("Failed to drop pipe", e);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_DROP_PIPE, e);
return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode())
.setMessage("Failed to drop pipe, because " + e.getMessage());
}
@@ -198,7 +202,8 @@ public TSStatus alterPipe(final AlterPipePlanV2 plan) {
PipeConfigNodeAgent.runtime()
.decreaseListenerReference(meta.getStaticMeta().getSourceParameters());
} catch (final Exception e) {
- throw new PipeException("Failed to decrease listener reference", e);
+ throw new PipeException(
+ ConfigNodeMessages.FAILED_TO_DECREASE_LISTENER_REFERENCE, e);
}
});
PipeTemporaryMetaInCoordinatorMetrics.getInstance()
@@ -209,7 +214,7 @@ public TSStatus alterPipe(final AlterPipePlanV2 plan) {
.setMessage(message.getMessage());
}
} catch (final Exception e) {
- LOGGER.error("Failed to alter pipe", e);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_ALTER_PIPE, e);
return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode())
.setMessage("Failed to alter pipe, because " + e.getMessage());
}
@@ -230,7 +235,7 @@ public TSStatus operateMultiplePipes(final OperateMultiplePipesPlanV2 plans) {
.handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList());
return status;
} catch (final Exception e) {
- LOGGER.error("Failed to create multiple pipes", e);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_CREATE_MULTIPLE_PIPES, e);
return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode())
.setMessage("Failed to create multiple pipes, because " + e.getMessage());
}
@@ -249,7 +254,7 @@ public TSStatus handleLeaderChange(final PipeHandleLeaderChangePlan plan) {
.handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList());
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
} catch (final Exception e) {
- LOGGER.error("Failed to handle leader change", e);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_HANDLE_LEADER_CHANGE, e);
return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode())
.setMessage("Failed to handle leader change, because " + e.getMessage());
}
@@ -269,7 +274,7 @@ public TSStatus handleMetaChanges(final PipeHandleMetaChangePlan plan) {
.handleTemporaryMetaChanges(pipeTaskInfo.getPipeMetaList());
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
} catch (final Exception e) {
- LOGGER.error("Failed to handle meta changes", e);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_HANDLE_META_CHANGES, e);
return new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode())
.setMessage("Failed to handle meta changes, because " + e.getMessage());
}
@@ -296,20 +301,20 @@ public void processLoadSnapshot(final File snapshotDir) throws IOException {
.increaseListenerReference(pipeMeta.getStaticMeta().getSourceParameters());
}
} catch (final Exception ex) {
- LOGGER.error("Failed to load pipe task info from snapshot", ex);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_LOAD_PIPE_TASK_INFO_FROM_SNAPSHOT, ex);
loadPipeTaskInfoException = ex;
}
try {
pipePluginInfo.processLoadSnapshot(snapshotDir);
} catch (final Exception ex) {
- LOGGER.error("Failed to load pipe plugin info from snapshot", ex);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_LOAD_PIPE_PLUGIN_INFO_FROM_SNAPSHOT, ex);
loadPipePluginInfoException = ex;
}
if (loadPipeTaskInfoException != null || loadPipePluginInfoException != null) {
throw new IOException(
- "Failed to load pipe info from snapshot, "
+ ConfigNodeMessages.FAILED_TO_LOAD_PIPE_INFO_FROM_SNAPSHOT
+ "loadPipeTaskInfoException="
+ loadPipeTaskInfoException
+ ", loadPipePluginInfoException="
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipePluginInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipePluginInfo.java
index 30771e78e4696..19251c50144b6 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipePluginInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipePluginInfo.java
@@ -39,6 +39,7 @@
import org.apache.iotdb.confignode.consensus.request.write.pipe.plugin.DropPipePluginPlan;
import org.apache.iotdb.confignode.consensus.response.JarResp;
import org.apache.iotdb.confignode.consensus.response.pipe.plugin.PipePluginTableResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.consensus.common.DataSet;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
import org.apache.iotdb.pipe.api.exception.PipeException;
@@ -118,15 +119,16 @@ public boolean validateBeforeCreatingPipePlugin(
if (loadingFailureMessage != null) {
throw new PipeException(
String.format(
- "Failed to create PipePlugin [%s], this PipePlugin exists but failed to load: %s",
- pluginName, loadingFailureMessage));
+ ConfigNodeMessages.FAILED_TO_CREATE_PIPEPLUGIN_THIS_PIPEPLUGIN_EXISTS_BUT_FAILED_TO,
+ pluginName,
+ loadingFailureMessage));
}
if (isSetIfNotExistsCondition) {
return true;
}
throw new PipeException(
String.format(
- "Failed to create PipePlugin [%s], the same name PipePlugin has been created",
+ ConfigNodeMessages.FAILED_TO_CREATE_PIPEPLUGIN_THE_SAME_NAME_PIPEPLUGIN_HAS_BEEN,
pluginName));
}
return false;
@@ -145,12 +147,13 @@ public boolean validateBeforeDroppingPipePlugin(
}
throw new PipeException(
String.format(
- "Failed to drop PipePlugin [%s], this PipePlugin has not been created", pluginName));
+ ConfigNodeMessages.FAILED_TO_DROP_PIPEPLUGIN_THIS_PIPEPLUGIN_HAS_NOT_BEEN_CREATED,
+ pluginName));
}
if (pipePluginMetaKeeper.getPipePluginMeta(pluginName).isBuiltin()) {
throw new PipeException(
String.format(
- "Failed to drop PipePlugin [%s], the PipePlugin is a built-in PipePlugin",
+ ConfigNodeMessages.FAILED_TO_DROP_PIPEPLUGIN_THE_PIPEPLUGIN_IS_A_BUILT_IN,
pluginName));
}
return false;
@@ -240,21 +243,26 @@ public TSStatus createPipePlugin(final CreatePipePluginPlan createPipePluginPlan
if (existedLoadingFailureMessage != null) {
throw new PipeException(
String.format(
- "Failed to create PipePlugin [%s], source PipePlugin [%s] failed to load: %s",
- pluginName, existed, existedLoadingFailureMessage));
+ ConfigNodeMessages.FAILED_TO_CREATE_PIPEPLUGIN_SOURCE_PIPEPLUGIN_FAILED_TO_LOAD,
+ pluginName,
+ existed,
+ existedLoadingFailureMessage));
}
if (!pipePluginExecutableManager.hasPluginFileUnderInstallDir(existed, jarName)) {
throw new PipeException(
String.format(
- "Failed to create PipePlugin [%s], source PipePlugin [%s] jar [%s] does not exist in install dir.",
- pluginName, existed, jarName));
+ ConfigNodeMessages
+ .FAILED_TO_CREATE_PIPEPLUGIN_SOURCE_PIPEPLUGIN_JAR_DOES_NOT_EXIST,
+ pluginName,
+ existed,
+ jarName));
}
pipePluginExecutableManager.linkExistedPlugin(existed, pluginName, jarName);
computeFromPluginClass(pluginName, className);
} else {
throw new PipeException(
String.format(
- "The %s's creation has not passed in jarName, which does not exist in other pipePlugins. Please check",
+ ConfigNodeMessages.THE_S_CREATION_HAS_NOT_PASSED_IN_JARNAME_WHICH_DOES,
pluginName));
}
}
@@ -355,7 +363,7 @@ public JarResp getPipePluginJar(final GetPipePluginJarPlan getPipePluginJarPlan)
for (final String jarName : getPipePluginJarPlan.getJarNames()) {
String pluginName = pipePluginMetaKeeper.getPluginNameByJarName(jarName);
if (pluginName == null) {
- throw new PipeException(String.format("%s does not exist", jarName));
+ throw new PipeException(String.format(ConfigNodeMessages.DOES_NOT_EXIST, jarName));
}
String jarPath = manager.getPluginInstallPathV2(pluginName, jarName);
@@ -366,7 +374,7 @@ public JarResp getPipePluginJar(final GetPipePluginJarPlan getPipePluginJarPlan)
}
if (!Files.exists(Paths.get(jarPath))) {
- throw new PipeException(String.format("%s does not exist", jarName));
+ throw new PipeException(String.format(ConfigNodeMessages.DOES_NOT_EXIST, jarName));
}
ByteBuffer byteBuffer = ExecutableManager.transferToBytebuffer(jarPath);
@@ -379,7 +387,7 @@ public JarResp getPipePluginJar(final GetPipePluginJarPlan getPipePluginJarPlan)
}
return new JarResp(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()), jarList);
} catch (final Exception e) {
- LOGGER.error("Get PipePlugin_Jar failed", e);
+ LOGGER.error(ConfigNodeMessages.GET_PIPEPLUGIN_JAR_FAILED, e);
return new JarResp(
new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode())
.setMessage("Get PipePlugin_Jar failed, because " + e.getMessage()),
@@ -396,7 +404,7 @@ public boolean processTakeSnapshot(final File snapshotDir) throws IOException {
final File snapshotFile = new File(snapshotDir, SNAPSHOT_FILE_NAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST,
snapshotFile.getAbsolutePath());
return false;
}
@@ -418,7 +426,7 @@ public void processLoadSnapshot(final File snapshotDir) throws IOException {
final File snapshotFile = new File(snapshotDir, SNAPSHOT_FILE_NAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot, snapshot file [{}] is not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST,
snapshotFile.getAbsolutePath());
return;
}
@@ -502,7 +510,7 @@ private void createPipePluginOnStartup(
getRootCauseMessage(e)));
pipePluginMetaKeeper.addPipePluginVisibility(pluginName, Visibility.BOTH);
LOGGER.warn(
- "Failed to load plugin class for plugin [{}] when loading snapshot [{}] ",
+ ConfigNodeMessages.FAILED_TO_LOAD_PLUGIN_CLASS_FOR_PLUGIN_WHEN_LOADING_SNAPSHOT,
pluginName,
snapshotFile.getAbsolutePath(),
e);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeTaskInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeTaskInfo.java
index 035fa23dd9ed6..f6f921af035c9 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeTaskInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/pipe/PipeTaskInfo.java
@@ -50,6 +50,7 @@
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.SetPipeStatusPlanV2;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.SetPipeStatusWithStoppedByRuntimeExceptionPlanV2;
import org.apache.iotdb.confignode.consensus.response.pipe.task.PipeTableResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.pipe.resource.PipeConfigNodeResourceManager;
import org.apache.iotdb.confignode.procedure.impl.pipe.runtime.PipeHandleMetaChangeProcedure;
import org.apache.iotdb.confignode.rpc.thrift.TAlterPipeReq;
@@ -332,7 +333,7 @@ public void checkBeforeDropPipe(final String pipeName) {
private void checkBeforeDropPipeInternal(final String pipeName) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "Check before drop pipe {}, pipe exists: {}.", pipeName, isPipeExisted(pipeName));
+ ConfigNodeMessages.CHECK_BEFORE_DROP_PIPE_PIPE_EXISTS, pipeName, isPipeExisted(pipeName));
}
// No matter whether the pipe exists, we allow the drop operation executed on all nodes to
// ensure the consistency.
@@ -473,12 +474,13 @@ public TSStatus operateMultiplePipes(final OperateMultiplePipesPlanV2 plan) {
dropPipe((DropPipePlanV2) subPlan);
} else {
throw new PipeException(
- String.format("Unsupported subPlan type: %s", subPlan.getClass().getName()));
+ String.format(
+ ConfigNodeMessages.UNSUPPORTED_SUBPLAN_TYPE, subPlan.getClass().getName()));
}
status.getSubStatus().add(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()));
} catch (final Exception e) {
// If one of the subPlan fails, we stop operating the rest of the pipes
- LOGGER.error("Failed to operate pipe", e);
+ LOGGER.error(ConfigNodeMessages.FAILED_TO_OPERATE_PIPE, e);
status.setCode(TSStatusCode.PIPE_ERROR.getStatusCode());
status.getSubStatus().add(new TSStatus(TSStatusCode.PIPE_ERROR.getStatusCode()));
break;
@@ -635,7 +637,7 @@ private TSStatus handleLeaderChangeInternal(final PipeHandleLeaderChangePlan pla
// don't know about RegionLeader Map and will be balanced in the meta
// sync procedure
LOGGER.info(
- "Pipe {} is using external source, skip region leader change. PipeHandleLeaderChangePlan: {}",
+ ConfigNodeMessages.PIPE_IS_USING_EXTERNAL_SOURCE_SKIP_REGION,
pipeMeta.getStaticMeta().getPipeName(),
plan.getConsensusGroupId2NewLeaderIdMap());
return;
@@ -699,7 +701,7 @@ public TSStatus handleMetaChanges(final PipeHandleMetaChangePlan plan) {
}
private TSStatus handleMetaChangesInternal(final PipeHandleMetaChangePlan plan) {
- LOGGER.debug("Handling pipe meta changes ...");
+ LOGGER.debug(ConfigNodeMessages.HANDLING_PIPE_META_CHANGES);
pipeMetaKeeper.clear();
@@ -903,7 +905,7 @@ private boolean autoRestartInternal() {
});
if (needRestart.get()) {
- LOGGER.info("PipeMetaSyncer is trying to restart the pipes: {}", pipeToRestart);
+ LOGGER.info(ConfigNodeMessages.PIPEMETASYNCER_IS_TRYING_TO_RESTART_THE_PIPES, pipeToRestart);
}
return needRestart.get();
}
@@ -955,7 +957,7 @@ public boolean processTakeSnapshot(final File snapshotDir) throws IOException {
final File snapshotFile = new File(snapshotDir, SNAPSHOT_FILE_NAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST,
snapshotFile.getAbsolutePath());
return false;
}
@@ -977,7 +979,7 @@ public void processLoadSnapshot(final File snapshotDir) throws IOException {
final File snapshotFile = new File(snapshotDir, SNAPSHOT_FILE_NAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot,snapshot file [{}] is not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST_2,
snapshotFile.getAbsolutePath());
return;
}
@@ -1011,7 +1013,8 @@ private void normalizeRecoveredConsensusPipeStatus() {
if (!restartedConsensusPipes.isEmpty()) {
LOGGER.info(
- "Recovered consensus pipes {} as RUNNING during snapshot load.", restartedConsensusPipes);
+ ConfigNodeMessages.RECOVERED_CONSENSUS_PIPES_AS_RUNNING_DURING_SNAPSHOT_LOAD,
+ restartedConsensusPipes);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java
index 67ce6736a56d5..7dd6acc4e2302 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java
@@ -94,6 +94,7 @@
import org.apache.iotdb.confignode.consensus.response.template.TemplateInfoResp;
import org.apache.iotdb.confignode.consensus.response.template.TemplateSetInfoResp;
import org.apache.iotdb.confignode.exception.DatabaseNotExistsException;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.persistence.schema.ConfigMTree.TableSchemaDetails;
import org.apache.iotdb.confignode.rpc.thrift.TDatabaseSchema;
import org.apache.iotdb.confignode.rpc.thrift.TTableColumnInfo;
@@ -173,7 +174,7 @@ public ClusterSchemaInfo() throws IOException {
templateTable = new TemplateTable();
templatePreSetTable = new TemplatePreSetTable();
} catch (final MetadataException e) {
- LOGGER.error("Can't construct ClusterSchemaInfo", e);
+ LOGGER.error(ConfigNodeMessages.CAN_T_CONSTRUCT_CLUSTERSCHEMAINFO, e);
throw new IOException(e);
}
}
@@ -243,11 +244,11 @@ public TSStatus alterDatabase(final DatabaseSchemaPlan plan) {
currentSchema.getMinSchemaRegionGroupNum(),
currentSchema.getMaxSchemaRegionGroupNum()));
LOGGER.info(
- "[AdjustRegionGroupNum] The minimum number of SchemaRegionGroups for Database: {} is adjusted to: {}",
+ ConfigNodeMessages.ADJUSTREGIONGROUPNUM_THE_MINIMUM_NUMBER_OF_SCHEMAREGIONGROUPS_FOR,
currentSchema.getName(),
currentSchema.getMinSchemaRegionGroupNum());
LOGGER.info(
- "[AdjustRegionGroupNum] The maximum number of SchemaRegionGroups for Database: {} is adjusted to: {}",
+ ConfigNodeMessages.ADJUSTREGIONGROUPNUM_THE_MAXIMUM_NUMBER_OF_SCHEMAREGIONGROUPS_FOR,
currentSchema.getName(),
currentSchema.getMaxSchemaRegionGroupNum());
}
@@ -258,11 +259,11 @@ public TSStatus alterDatabase(final DatabaseSchemaPlan plan) {
currentSchema.getMinDataRegionGroupNum(),
currentSchema.getMaxDataRegionGroupNum()));
LOGGER.info(
- "[AdjustRegionGroupNum] The minimum number of DataRegionGroups for Database: {} is adjusted to: {}",
+ ConfigNodeMessages.ADJUSTREGIONGROUPNUM_THE_MINIMUM_NUMBER_OF_DATAREGIONGROUPS_FOR,
currentSchema.getName(),
currentSchema.getMinDataRegionGroupNum());
LOGGER.info(
- "[AdjustRegionGroupNum] The maximum number of DataRegionGroups for Database: {} is adjusted to: {}",
+ ConfigNodeMessages.ADJUSTREGIONGROUPNUM_THE_MAXIMUM_NUMBER_OF_DATAREGIONGROUPS_FOR,
currentSchema.getName(),
currentSchema.getMaxDataRegionGroupNum());
}
@@ -270,7 +271,7 @@ public TSStatus alterDatabase(final DatabaseSchemaPlan plan) {
if (alterSchema.isSetTTL()) {
currentSchema.setTTL(alterSchema.getTTL());
LOGGER.info(
- "[SetTTL] The ttl of Database: {} is adjusted to: {}",
+ ConfigNodeMessages.SETTTL_THE_TTL_OF_DATABASE_IS_ADJUSTED_TO,
currentSchema.getName(),
currentSchema.getTTL());
}
@@ -307,7 +308,7 @@ public TSStatus deleteDatabase(final DeleteDatabasePlan plan) {
result.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode());
} catch (final MetadataException e) {
- LOGGER.warn("Database not exist", e);
+ LOGGER.warn(ConfigNodeMessages.DATABASE_NOT_EXIST, e);
result
.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode())
.setMessage("Database not exist: " + e.getMessage());
@@ -505,7 +506,8 @@ public TSStatus adjustMaxRegionGroupCount(final AdjustMaxRegionGroupNumPlan plan
result.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode());
} catch (final MetadataException e) {
LOGGER.info(
- "Database inconsistency detected when adjusting max region group count, message: {}, will be corrected by the following adjusting plans",
+ ConfigNodeMessages
+ .DATABASE_INCONSISTENCY_DETECTED_WHEN_ADJUSTING_MAX_REGION_GROUP_COUNT_MESSAGE,
e.getMessage());
result.setCode(e.getErrorCode()).setMessage(e.getMessage());
} finally {
@@ -748,7 +750,7 @@ public boolean processDatabaseSchemaSnapshot(
final File snapshotFile = new File(snapshotDir, snapshotFileName);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY_EXIST,
snapshotFile.getAbsolutePath());
return false;
}
@@ -775,7 +777,8 @@ public boolean processDatabaseSchemaSnapshot(
break;
} else {
LOGGER.warn(
- "Can't delete temporary snapshot file: {}, retrying...", tmpFile.getAbsolutePath());
+ ConfigNodeMessages.CAN_T_DELETE_TEMPORARY_SNAPSHOT_FILE_RETRYING,
+ tmpFile.getAbsolutePath());
}
}
databaseReadWriteLock.readLock().unlock();
@@ -810,7 +813,7 @@ public void processMTreeLoadSnapshot(
final File snapshotFile = new File(snapshotDir, snapshotFileName);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot,snapshot file [{}] is not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST_2,
snapshotFile.getAbsolutePath());
return;
}
@@ -838,7 +841,7 @@ public Pair, Set> getNodesListInGivenLevel(
matchedPathsInNextLevel =
treeModelMTree.getNodesListInGivenLevel(partialPath, level, true, scope);
} catch (MetadataException e) {
- LOGGER.error("Error get matched paths in given level.", e);
+ LOGGER.error(ConfigNodeMessages.ERROR_GET_MATCHED_PATHS_IN_GIVEN_LEVEL, e);
} finally {
databaseReadWriteLock.readLock().unlock();
}
@@ -853,7 +856,7 @@ public Pair, Set> getChildNodePathInNextLevel(
try {
matchedPathsInNextLevel = treeModelMTree.getChildNodePathInNextLevel(partialPath, scope);
} catch (MetadataException e) {
- LOGGER.error("Error get matched paths in next level.", e);
+ LOGGER.error(ConfigNodeMessages.ERROR_GET_MATCHED_PATHS_IN_NEXT_LEVEL, e);
} finally {
databaseReadWriteLock.readLock().unlock();
}
@@ -1058,7 +1061,7 @@ public AllTemplateSetInfoResp getAllTemplateSetInfo() {
templateSetInfo.put(id, pathSetInfoList);
}
} catch (MetadataException e) {
- LOGGER.error("Error occurred when get paths set on template {}", id, e);
+ LOGGER.error(ConfigNodeMessages.ERROR_OCCURRED_WHEN_GET_PATHS_SET_ON_TEMPLATE, id, e);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java
index c39c5fe3ff9db..f61e9aa915279 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java
@@ -42,6 +42,7 @@
import org.apache.iotdb.commons.utils.MetadataUtils;
import org.apache.iotdb.commons.utils.PathUtils;
import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.schema.ClusterSchemaManager;
import org.apache.iotdb.confignode.persistence.schema.mnode.IConfigMNode;
import org.apache.iotdb.confignode.persistence.schema.mnode.factory.ConfigMNodeFactory;
@@ -497,7 +498,8 @@ public void checkTemplateOnPath(final PartialPath path) throws MetadataException
IConfigMNode child;
if (cur.getSchemaTemplateId() != NON_TEMPLATE) {
- throw new MetadataException("Template already exists on " + cur.getFullPath());
+ throw new MetadataException(
+ ConfigNodeMessages.TEMPLATE_ALREADY_EXISTS_ON + cur.getFullPath());
}
for (int i = 1; i < nodeNames.length; i++) {
@@ -507,7 +509,8 @@ public void checkTemplateOnPath(final PartialPath path) throws MetadataException
}
cur = child;
if (cur.getSchemaTemplateId() != NON_TEMPLATE) {
- throw new MetadataException("Template already exists on " + cur.getFullPath());
+ throw new MetadataException(
+ ConfigNodeMessages.TEMPLATE_ALREADY_EXISTS_ON + cur.getFullPath());
}
}
@@ -526,7 +529,8 @@ private void checkTemplateOnSubtree(final IConfigMNode node) throws MetadataExce
continue;
}
if (child.getSchemaTemplateId() != NON_TEMPLATE) {
- throw new MetadataException("Template already exists on " + child.getFullPath());
+ throw new MetadataException(
+ ConfigNodeMessages.TEMPLATE_ALREADY_EXISTS_ON + child.getFullPath());
}
checkTemplateOnSubtree(child);
}
@@ -658,7 +662,7 @@ private IConfigMNode getNodeSetTemplate(int templateId, PartialPath path)
}
if (cur.getSchemaTemplateId() != templateId) {
throw new MetadataException(
- String.format("Template %s is not set on path %s", templateId, path));
+ String.format(ConfigNodeMessages.TEMPLATE_IS_NOT_SET_ON_PATH, templateId, path));
}
return cur;
}
@@ -972,7 +976,7 @@ public boolean preDeleteColumn(
}
if (columnSchema.getColumnCategory() == TsTableColumnCategory.TAG
|| columnSchema.getColumnCategory() == TsTableColumnCategory.TIME) {
- throw new SemanticException("Dropping tag or time column is not supported.");
+ throw new SemanticException(ConfigNodeMessages.DROPPING_TAG_OR_TIME_COLUMN_IS_NOT_SUPPORTED);
}
node.addPreDeletedColumn(columnName);
@@ -1001,13 +1005,14 @@ public void preAlterColumnDataType(
PathUtils.unQualifyDatabaseName(database.getFullPath()), tableName, columnName);
}
if (columnSchema.getColumnCategory() != TsTableColumnCategory.FIELD) {
- throw new SemanticException("Can only alter datatype of FIELD columns");
+ throw new SemanticException(ConfigNodeMessages.CAN_ONLY_ALTER_DATATYPE_OF_FIELD_COLUMNS);
}
if (!MetadataUtils.canAlter(columnSchema.getDataType(), dataType)) {
throw new SemanticException(
String.format(
- "New type %s is not compatible with the existing one %s",
- dataType, columnSchema.getDataType()));
+ ConfigNodeMessages.NEW_TYPE_IS_NOT_COMPATIBLE_WITH_THE_EXISTING_ONE,
+ dataType,
+ columnSchema.getDataType()));
}
node.addPreAlteredColumn(columnName, dataType);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigNodeSnapshotParser.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigNodeSnapshotParser.java
index 841264416946f..4dc162d8f4535 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigNodeSnapshotParser.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigNodeSnapshotParser.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.file.SystemFileFactory;
import org.apache.iotdb.confignode.conf.ConfigNodeConfig;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.persistence.TTLInfo;
import org.apache.tsfile.utils.Pair;
@@ -169,12 +170,12 @@ public static CNPhysicalPlanGenerator translate2PhysicalPlan(
final Path path1, final Path path2, final CNSnapshotFileType type, final String userName)
throws IOException {
if (path1 == null) {
- LOGGER.warn("Path1 should not be null");
+ LOGGER.warn(ConfigNodeMessages.PATH1_SHOULD_NOT_BE_NULL);
return null;
}
if (!path1.toFile().exists()) {
- LOGGER.warn("File {} not exists", path1.toFile().getName());
+ LOGGER.warn(ConfigNodeMessages.FILE_NOT_EXISTS, path1.toFile().getName());
return null;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplatePreSetTable.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplatePreSetTable.java
index 251100c00718a..9105e5a7d01da 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplatePreSetTable.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplatePreSetTable.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.exception.IllegalPathException;
import org.apache.iotdb.commons.path.PartialPath;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.tsfile.utils.ReadWriteIOUtils;
import org.slf4j.Logger;
@@ -108,7 +109,8 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException {
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take snapshot of TemplatePreSetTable, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages
+ .FAILED_TO_TAKE_SNAPSHOT_OF_TEMPLATEPRESETTABLE_BECAUSE_SNAPSHOT_FILE_IS,
snapshotFile.getAbsolutePath());
return false;
}
@@ -132,7 +134,8 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException {
break;
} else {
LOGGER.warn(
- "Can't delete temporary snapshot file: {}, retrying...", tmpFile.getAbsolutePath());
+ ConfigNodeMessages.CAN_T_DELETE_TEMPORARY_SNAPSHOT_FILE_RETRYING,
+ tmpFile.getAbsolutePath());
}
}
}
@@ -151,7 +154,7 @@ public void processLoadSnapshot(File snapshotDir) throws IOException {
if (!snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot of TemplatePreSetTable,snapshot file [{}] is not a valid file.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_OF_TEMPLATEPRESETTABLE_SNAPSHOT_FILE_IS_NOT,
snapshotFile.getAbsolutePath());
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplateTable.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplateTable.java
index 14d31158d765f..10fa66108bb79 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplateTable.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/TemplateTable.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.exception.MetadataException;
import org.apache.iotdb.commons.schema.template.Template;
import org.apache.iotdb.commons.utils.TestOnly;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.db.exception.metadata.template.UndefinedTemplateException;
import org.apache.iotdb.db.schemaengine.template.alter.TemplateExtendInfo;
@@ -76,7 +77,8 @@ public Template getTemplate(String name) throws MetadataException {
try {
Template template = templateMap.get(name);
if (template == null) {
- throw new MetadataException(String.format("Template %s does not exist", name));
+ throw new MetadataException(
+ String.format(ConfigNodeMessages.TEMPLATE_DOES_NOT_EXIST, name));
}
return templateMap.get(name);
} finally {
@@ -90,7 +92,7 @@ public Template getTemplate(int templateId) throws MetadataException {
Template template = templateIdMap.get(templateId);
if (template == null) {
throw new MetadataException(
- String.format("Template with id=%s does not exist", templateId));
+ String.format(ConfigNodeMessages.TEMPLATE_WITH_ID_DOES_NOT_EXIST, templateId));
}
return template;
} finally {
@@ -113,8 +115,9 @@ public void createTemplate(final Template template) throws MetadataException {
final Template temp = this.templateMap.get(template.getName());
if (temp != null) {
LOGGER.error(
- "Failed to create template, because template name {} exists", template.getName());
- throw new MetadataException("Duplicated template name: " + temp.getName());
+ ConfigNodeMessages.FAILED_TO_CREATE_TEMPLATE_BECAUSE_TEMPLATE_NAME_EXISTS,
+ template.getName());
+ throw new MetadataException(ConfigNodeMessages.DUPLICATED_TEMPLATE_NAME + temp.getName());
}
template.setId(templateIdGenerator.getAndIncrement());
this.templateMap.put(template.getName(), template);
@@ -129,7 +132,7 @@ public void dropTemplate(String templateName) throws MetadataException {
try {
Template temp = this.templateMap.remove(templateName);
if (temp == null) {
- LOGGER.error("Undefined template {}", templateName);
+ LOGGER.error(ConfigNodeMessages.UNDEFINED_TEMPLATE, templateName);
throw new UndefinedTemplateException(templateName);
}
templateIdMap.remove(temp.getId());
@@ -166,8 +169,10 @@ public void extendTemplate(TemplateExtendInfo templateExtendInfo) throws Metadat
&& !measurementSchema.getCompressor().equals(compressionTypeList.get(i)))) {
throw new MetadataException(
String.format(
- "Schema of measurement %s is not compatible with existing measurement in template %s",
- measurementList.get(i), template.getName()));
+ ConfigNodeMessages
+ .SCHEMA_OF_MEASUREMENT_IS_NOT_COMPATIBLE_WITH_EXISTING_MEASUREMENT_IN,
+ measurementList.get(i),
+ template.getName()));
}
}
}
@@ -211,7 +216,7 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException {
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "template failed to take snapshot, because snapshot file [{}] is already exist.",
+ ConfigNodeMessages.TEMPLATE_FAILED_TO_TAKE_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY,
snapshotFile.getAbsolutePath());
return false;
}
@@ -236,7 +241,8 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException {
break;
} else {
LOGGER.warn(
- "Can't delete temporary snapshot file: {}, retrying...", tmpFile.getAbsolutePath());
+ ConfigNodeMessages.CAN_T_DELETE_TEMPORARY_SNAPSHOT_FILE_RETRYING,
+ tmpFile.getAbsolutePath());
}
}
templateReadWriteLock.writeLock().unlock();
@@ -247,7 +253,7 @@ public void processLoadSnapshot(File snapshotDir) throws IOException {
File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load snapshot,snapshot file [{}] is not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST_2,
snapshotFile.getAbsolutePath());
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/IConfigMNode.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/IConfigMNode.java
index dd2b5932b0038..8c6529b55b03f 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/IConfigMNode.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/IConfigMNode.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.schema.node.role.IDeviceMNode;
import org.apache.iotdb.commons.schema.node.role.IInternalMNode;
import org.apache.iotdb.commons.schema.node.role.IMeasurementMNode;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.rpc.thrift.TDatabaseSchema;
public interface IConfigMNode extends IMNode {
@@ -59,16 +60,16 @@ default boolean isMeasurement() {
@Override
default IInternalMNode getAsInternalMNode() {
- throw new UnsupportedOperationException("Wrong node type");
+ throw new UnsupportedOperationException(ConfigNodeMessages.WRONG_NODE_TYPE);
}
@Override
default IDeviceMNode getAsDeviceMNode() {
- throw new UnsupportedOperationException("Wrong node type");
+ throw new UnsupportedOperationException(ConfigNodeMessages.WRONG_NODE_TYPE);
}
@Override
default IMeasurementMNode getAsMeasurementMNode() {
- throw new UnsupportedOperationException("Wrong node type");
+ throw new UnsupportedOperationException(ConfigNodeMessages.WRONG_NODE_TYPE);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/basic/ConfigBasicMNode.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/basic/ConfigBasicMNode.java
index 47575892790ed..63e7c775295ff 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/basic/ConfigBasicMNode.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/basic/ConfigBasicMNode.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.schema.node.role.IDatabaseMNode;
import org.apache.iotdb.commons.schema.node.utils.IMNodeContainer;
import org.apache.iotdb.commons.schema.node.visitor.MNodeVisitor;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.persistence.schema.mnode.IConfigMNode;
import org.apache.iotdb.confignode.persistence.schema.mnode.container.ConfigMNodeContainer;
import org.apache.iotdb.confignode.persistence.schema.mnode.info.ConfigMNodeInfo;
@@ -167,7 +168,7 @@ public boolean isDatabase() {
@Override
public IDatabaseMNode getAsDatabaseMNode() {
- throw new UnsupportedOperationException("Wrong MNode Type");
+ throw new UnsupportedOperationException(ConfigNodeMessages.WRONG_MNODE_TYPE);
}
@Override
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/factory/ConfigMNodeFactory.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/factory/ConfigMNodeFactory.java
index 382e7c6faf58a..49b33a8a9e0f6 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/factory/ConfigMNodeFactory.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/factory/ConfigMNodeFactory.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.schema.node.role.IMeasurementMNode;
import org.apache.iotdb.commons.schema.node.utils.IMNodeFactory;
import org.apache.iotdb.commons.schema.node.utils.MNodeFactory;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.persistence.schema.mnode.IConfigMNode;
import org.apache.iotdb.confignode.persistence.schema.mnode.impl.ConfigBasicInternalMNode;
import org.apache.iotdb.confignode.persistence.schema.mnode.impl.ConfigDatabaseMNode;
@@ -77,6 +78,6 @@ public IConfigMNode createInternalMNode(IConfigMNode parent, String name) {
@Override
public IMeasurementMNode createLogicalViewMNode(
IDeviceMNode parent, String name, IMeasurementSchema measurementSchema) {
- throw new UnsupportedOperationException("View is not supported.");
+ throw new UnsupportedOperationException(ConfigNodeMessages.VIEW_IS_NOT_SUPPORTED);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/impl/ConfigTableNode.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/impl/ConfigTableNode.java
index 196a0ed47e972..1575bec961bd6 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/impl/ConfigTableNode.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/mnode/impl/ConfigTableNode.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.schema.node.visitor.MNodeVisitor;
import org.apache.iotdb.commons.schema.table.TableNodeStatus;
import org.apache.iotdb.commons.schema.table.TsTable;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.persistence.schema.mnode.IConfigMNode;
import org.apache.iotdb.confignode.persistence.schema.mnode.container.ConfigMNodeContainer;
import org.apache.iotdb.confignode.persistence.schema.mnode.info.ConfigTableInfo;
@@ -221,7 +222,7 @@ public boolean isDatabase() {
@Override
public IDatabaseMNode getAsDatabaseMNode() {
- throw new UnsupportedOperationException("Wrong MNode Type");
+ throw new UnsupportedOperationException(ConfigNodeMessages.WRONG_MNODE_TYPE);
}
@Override
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/subscription/SubscriptionInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/subscription/SubscriptionInfo.java
index 0c262655156d3..6fcdcf28ebd0f 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/subscription/SubscriptionInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/subscription/SubscriptionInfo.java
@@ -35,6 +35,7 @@
import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.runtime.TopicHandleMetaChangePlan;
import org.apache.iotdb.confignode.consensus.response.subscription.SubscriptionTableResp;
import org.apache.iotdb.confignode.consensus.response.subscription.TopicTableResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.rpc.thrift.TCloseConsumerReq;
import org.apache.iotdb.confignode.rpc.thrift.TCreateConsumerReq;
import org.apache.iotdb.confignode.rpc.thrift.TCreateTopicReq;
@@ -186,7 +187,7 @@ public void validateBeforeDroppingTopic(String topicName) throws SubscriptionExc
private void checkBeforeDropTopicInternal(String topicName) throws SubscriptionException {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "Check before dropping topic: {}, topic exists: {}",
+ ConfigNodeMessages.CHECK_BEFORE_DROPPING_TOPIC_TOPIC_EXISTS,
topicName,
isTopicExisted(topicName));
}
@@ -380,7 +381,7 @@ public TSStatus dropTopic(DropTopicPlan plan) {
public TSStatus handleTopicMetaChanges(TopicHandleMetaChangePlan plan) {
acquireWriteLock();
try {
- LOGGER.info("Handling topic meta changes ...");
+ LOGGER.info(ConfigNodeMessages.HANDLING_TOPIC_META_CHANGES);
topicMetaKeeper.clear();
@@ -388,7 +389,7 @@ public TSStatus handleTopicMetaChanges(TopicHandleMetaChangePlan plan) {
.forEach(
topicMeta -> {
topicMetaKeeper.addTopicMeta(topicMeta.getTopicName(), topicMeta);
- LOGGER.info("Recording topic meta: {}", topicMeta);
+ LOGGER.info(ConfigNodeMessages.RECORDING_TOPIC_META, topicMeta);
});
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
@@ -548,7 +549,7 @@ public TSStatus alterConsumerGroup(AlterConsumerGroupPlan plan) {
public TSStatus handleConsumerGroupMetaChanges(ConsumerGroupHandleMetaChangePlan plan) {
acquireWriteLock();
try {
- LOGGER.info("Handling consumer group meta changes ...");
+ LOGGER.info(ConfigNodeMessages.HANDLING_CONSUMER_GROUP_META_CHANGES);
consumerGroupMetaKeeper.clear();
@@ -557,7 +558,7 @@ public TSStatus handleConsumerGroupMetaChanges(ConsumerGroupHandleMetaChangePlan
consumerGroupMeta -> {
consumerGroupMetaKeeper.addConsumerGroupMeta(
consumerGroupMeta.getConsumerGroupId(), consumerGroupMeta);
- LOGGER.info("Recording consumer group meta: {}", consumerGroupMeta);
+ LOGGER.info(ConfigNodeMessages.RECORDING_CONSUMER_GROUP_META, consumerGroupMeta);
});
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
@@ -732,7 +733,8 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException {
final File snapshotFile = new File(snapshotDir, SNAPSHOT_FILE_NAME);
if (snapshotFile.exists() && snapshotFile.isFile()) {
LOGGER.error(
- "Failed to take subscription snapshot, because snapshot file {} is already exist.",
+ ConfigNodeMessages
+ .FAILED_TO_TAKE_SUBSCRIPTION_SNAPSHOT_BECAUSE_SNAPSHOT_FILE_IS_ALREADY,
snapshotFile.getAbsolutePath());
return false;
}
@@ -756,7 +758,7 @@ public void processLoadSnapshot(File snapshotDir) throws IOException {
final File snapshotFile = new File(snapshotDir, SNAPSHOT_FILE_NAME);
if (!snapshotFile.exists() || !snapshotFile.isFile()) {
LOGGER.error(
- "Failed to load subscription snapshot, snapshot file {} is not exist.",
+ ConfigNodeMessages.FAILED_TO_LOAD_SUBSCRIPTION_SNAPSHOT_SNAPSHOT_FILE_IS_NOT_EXIST,
snapshotFile.getAbsolutePath());
return;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/PartitionTableAutoCleaner.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/PartitionTableAutoCleaner.java
index 84f12a78ec46b..c7a7488de3746 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/PartitionTableAutoCleaner.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/PartitionTableAutoCleaner.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.conf.CommonDescriptor;
import org.apache.iotdb.commons.utils.PathUtils;
import org.apache.iotdb.confignode.consensus.request.write.partition.AutoCleanPartitionTablePlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.consensus.exception.ConsensusException;
@@ -52,7 +53,8 @@ public PartitionTableAutoCleaner(ConfigManager configManager) {
super(COMMON_CONFIG.getTTLCheckInterval());
this.configManager = configManager;
LOGGER.info(
- "[PartitionTableCleaner] The PartitionTableAutoCleaner is started with cycle={}ms",
+ ProcedureMessages
+ .PARTITIONTABLECLEANER_THE_PARTITIONTABLEAUTOCLEANER_IS_STARTED_WITH_CYCLE_MS,
COMMON_CONFIG.getTTLCheckInterval());
}
@@ -78,7 +80,8 @@ protected void periodicExecute(Env env) {
databaseTTLMap.put(database, databaseTTL);
}
LOGGER.info(
- "[PartitionTableCleaner] Periodically activate PartitionTableAutoCleaner, databaseTTL: {}",
+ ProcedureMessages
+ .PARTITIONTABLECLEANER_PERIODICALLY_ACTIVATE_PARTITIONTABLEAUTOCLEANER_DATABASETTL,
databaseTTLMap);
for (String database : databases) {
long databaseTTL = databaseTTLMap.get(database);
@@ -91,7 +94,8 @@ protected void periodicExecute(Env env) {
}
if (!databaseTTLMap.isEmpty()) {
LOGGER.info(
- "[PartitionTableCleaner] Periodically activate PartitionTableAutoCleaner for: {}",
+ ProcedureMessages
+ .PARTITIONTABLECLEANER_PERIODICALLY_ACTIVATE_PARTITIONTABLEAUTOCLEANER_FOR,
databaseTTLMap);
// Only clean the partition table when necessary
TTimePartitionSlot currentTimePartitionSlot = getCurrentTimePartitionSlot();
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java
index 89e6e37e431a0..1cbeb259221ac 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.confignode.procedure;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.state.ProcedureLockState;
@@ -677,7 +678,7 @@ protected synchronized boolean setTimeoutFailure(Env env) {
long timeDiff = System.currentTimeMillis() - lastUpdate;
setFailure(
"ProcedureExecutor",
- new ProcedureException("Operation timed out after " + timeDiff + " ms."));
+ new ProcedureException(ProcedureMessages.OPERATION_TIMED_OUT_AFTER + timeDiff + " ms."));
return true;
}
return false;
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java
index cd1660862c745..66eb4a9dfd6aa 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.concurrent.ThreadName;
import org.apache.iotdb.commons.utils.RetryUtils;
import org.apache.iotdb.commons.utils.TestOnly;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.scheduler.ProcedureScheduler;
@@ -167,7 +168,7 @@ private void recover() {
case ROLLEDBACK:
case INITIALIZING:
LOG.error("Unexpected state:{} for {}", proc.getState(), proc);
- throw new UnsupportedOperationException("Unexpected state");
+ throw new UnsupportedOperationException(ProcedureMessages.UNEXPECTED_STATE);
default:
break;
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java
index 1614148abd308..3e62cdc1d4f30 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.confignode.procedure;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -80,7 +82,7 @@ public void run() {
} catch (Exception e) {
// Do nothing since new CN leader can converge to the correct state when restore this
// procedure.
- LOGGER.warn("Failed to update procedure {}", procedure, e);
+ LOGGER.warn(ProcedureMessages.FAILED_TO_UPDATE_PROCEDURE, procedure, e);
}
executor.getScheduler().addFront(procedure);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java
index 47105dda1c582..caa1b79b76a38 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java
@@ -47,6 +47,7 @@
import org.apache.iotdb.confignode.consensus.request.write.partition.AddRegionLocationPlan;
import org.apache.iotdb.confignode.consensus.request.write.partition.RemoveRegionLocationPlan;
import org.apache.iotdb.confignode.consensus.request.write.region.CreateRegionGroupsPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.load.cache.consensus.ConsensusGroupHeartbeatSample;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -131,7 +132,7 @@ public TDataNodeLocation findDestDataNode(TConsensusGroupId regionId) {
TSStatus status;
List regionReplicaNodes = findRegionLocations(regionId);
if (regionReplicaNodes.isEmpty()) {
- LOGGER.warn("Cannot find region replica nodes, region: {}", regionId);
+ LOGGER.warn(ProcedureMessages.CANNOT_FIND_REGION_REPLICA_NODES_REGION, regionId);
status = new TSStatus(TSStatusCode.MIGRATE_REGION_ERROR.getStatusCode());
status.setMessage("Cannot find region replica nodes, region: " + regionId);
return null;
@@ -139,7 +140,7 @@ public TDataNodeLocation findDestDataNode(TConsensusGroupId regionId) {
Optional newNode = pickNewReplicaNodeForRegion(regionReplicaNodes);
if (!newNode.isPresent()) {
- LOGGER.warn("No enough Data node to migrate region: {}", regionId);
+ LOGGER.warn(ProcedureMessages.NO_ENOUGH_DATA_NODE_TO_MIGRATE_REGION, regionId);
return null;
}
return newNode.get();
@@ -161,7 +162,7 @@ public TSStatus createNewRegionPeer(TConsensusGroupId regionId, TDataNodeLocatio
List regionReplicaNodes = findRegionLocations(regionId);
if (regionReplicaNodes.isEmpty()) {
LOGGER.warn(
- "{}, Cannot find region replica nodes in createPeer, regionId: {}",
+ ProcedureMessages.CANNOT_FIND_REGION_REPLICA_NODES_IN_CREATEPEER_REGIONID,
REGION_MIGRATE_PROCESS,
regionId);
status = new TSStatus(TSStatusCode.MIGRATE_REGION_ERROR.getStatusCode());
@@ -194,13 +195,13 @@ public TSStatus createNewRegionPeer(TConsensusGroupId regionId, TDataNodeLocatio
if (isSucceed(status)) {
LOGGER.info(
- "{}, Send action createNewRegionPeer finished, regionId: {}, newPeerDataNodeId: {}",
+ ProcedureMessages.SEND_ACTION_CREATENEWREGIONPEER_FINISHED_REGIONID_NEWPEERDATANODEID,
REGION_MIGRATE_PROCESS,
regionId,
getIdWithRpcEndpoint(destDataNode));
} else {
LOGGER.error(
- "{}, Send action createNewRegionPeer error, regionId: {}, newPeerDataNodeId: {}, result: {}",
+ ProcedureMessages.SEND_ACTION_CREATENEWREGIONPEER_ERROR_REGIONID_NEWPEERDATANODEID_RESULT,
REGION_MIGRATE_PROCESS,
regionId,
getIdWithRpcEndpoint(destDataNode),
@@ -238,7 +239,8 @@ public TSStatus submitAddRegionPeerTask(
maintainPeerReq,
CnToDnSyncRequestType.ADD_REGION_PEER);
LOGGER.info(
- "{}, Send action addRegionPeer finished, regionId: {}, rpcDataNode: {}, destDataNode: {}, status: {}",
+ ProcedureMessages
+ .SEND_ACTION_ADDREGIONPEER_FINISHED_REGIONID_RPCDATANODE_DESTDATANODE_STATUS,
REGION_MIGRATE_PROCESS,
regionId,
getIdWithRpcEndpoint(coordinator),
@@ -275,7 +277,7 @@ public TSStatus submitRemoveRegionPeerTask(
maintainPeerReq,
CnToDnSyncRequestType.REMOVE_REGION_PEER);
LOGGER.info(
- "{}, Send action removeRegionPeer finished, regionId: {}, rpcDataNode: {}",
+ ProcedureMessages.SEND_ACTION_REMOVEREGIONPEER_FINISHED_REGIONID_RPCDATANODE,
REGION_MIGRATE_PROCESS,
regionId,
getIdWithRpcEndpoint(coordinator));
@@ -303,7 +305,7 @@ public TSStatus submitDeleteOldRegionPeerTask(
final boolean useFullRetry = !NodeStatus.Unknown.equals(nodeStatus);
if (!useFullRetry) {
LOGGER.info(
- "{}, DataNode {} is {}, submit DELETE_OLD_REGION_PEER with a single RPC attempt and let RemoveRegionPeerProcedure handle retries.",
+ ProcedureMessages.DATANODE_IS_SUBMIT_DELETE_OLD_REGION_PEER_WITH_A_SINGLE,
REGION_MIGRATE_PROCESS,
simplifiedLocation(originalDataNode),
nodeStatus);
@@ -316,7 +318,7 @@ public TSStatus submitDeleteOldRegionPeerTask(
CnToDnSyncRequestType.DELETE_OLD_REGION_PEER,
useFullRetry);
LOGGER.info(
- "{}, Send action deleteOldRegionPeer finished, regionId: {}, dataNodeId: {}",
+ ProcedureMessages.SEND_ACTION_DELETEOLDREGIONPEER_FINISHED_REGIONID_DATANODEID,
REGION_MIGRATE_PROCESS,
regionId,
originalDataNode.getInternalEndPoint());
@@ -375,7 +377,7 @@ public TRegionMigrateResult waitTaskFinish(long taskId, TDataNodeLocation dataNo
long disconnectionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lastReportTime);
if (disconnectionTime > waitTime) {
LOGGER.warn(
- "{} task {} cannot get task report from DataNode {}, last report time is {} ago",
+ ProcedureMessages.TASK_CANNOT_GET_TASK_REPORT_FROM_DATANODE_LAST_REPORT_TIME,
REGION_MIGRATE_PROCESS,
taskId,
dataNodeLocation,
@@ -400,7 +402,7 @@ public void addRegionLocation(TConsensusGroupId regionId, TDataNodeLocation newL
AddRegionLocationPlan req = new AddRegionLocationPlan(regionId, newLocation);
TSStatus status = configManager.getPartitionManager().addRegionLocation(req);
LOGGER.info(
- "AddRegionLocation finished, add region {} to {}, result is {}",
+ ProcedureMessages.ADDREGIONLOCATION_FINISHED_ADD_REGION_TO_RESULT_IS,
regionId,
getIdWithRpcEndpoint(newLocation),
status);
@@ -422,7 +424,7 @@ public void removeRegionLocation(
RemoveRegionLocationPlan req = new RemoveRegionLocationPlan(regionId, deprecatedLocation);
TSStatus status = configManager.getPartitionManager().removeRegionLocation(req);
LOGGER.info(
- "RemoveRegionLocation remove region {} from DataNode {}, result is {}",
+ ProcedureMessages.REMOVEREGIONLOCATION_REMOVE_REGION_FROM_DATANODE_RESULT_IS,
regionId,
getIdWithRpcEndpoint(deprecatedLocation),
status);
@@ -572,7 +574,7 @@ public void checkAndRepairConsensusPipes() {
String pipeName = entry.getKey();
if (!actualPipes.containsKey(pipeName)) {
LOGGER.warn(
- "[ConsensusPipeGuardian] consensus pipe [{}] missing, creating asynchronously",
+ ProcedureMessages.CONSENSUSPIPEGUARDIAN_CONSENSUS_PIPE_MISSING_CREATING_ASYNCHRONOUSLY,
pipeName);
TRegionReplicaSet replicaSet = entry.getValue();
ConsensusPipeName parsed = new ConsensusPipeName(pipeName);
@@ -597,12 +599,14 @@ public void checkAndRepairConsensusPipes() {
PipeStatus status = entry.getValue();
if (!expectedPipeToReplicaSet.containsKey(pipeName)) {
LOGGER.warn(
- "[ConsensusPipeGuardian] unexpected consensus pipe [{}] exists, dropping asynchronously",
+ ProcedureMessages
+ .CONSENSUSPIPEGUARDIAN_UNEXPECTED_CONSENSUS_PIPE_EXISTS_DROPPING_ASYNCHRONOUSLY,
pipeName);
configManager.getProcedureManager().dropConsensusPipeAsync(pipeName);
} else if (PipeStatus.STOPPED.equals(status)) {
LOGGER.warn(
- "[ConsensusPipeGuardian] consensus pipe [{}] is stopped, restarting asynchronously",
+ ProcedureMessages
+ .CONSENSUSPIPEGUARDIAN_CONSENSUS_PIPE_IS_STOPPED_RESTARTING_ASYNCHRONOUSLY,
pipeName);
configManager.getProcedureManager().startConsensusPipe(pipeName);
}
@@ -687,12 +691,13 @@ private void createSingleConsensusPipe(
TSStatus status = configManager.getProcedureManager().createConsensusPipe(req);
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "{}, Failed to create consensus pipe {}: {}",
+ ProcedureMessages.FAILED_TO_CREATE_CONSENSUS_PIPE,
REGION_MIGRATE_PROCESS,
req.getPipeName(),
status);
} else {
- LOGGER.info("{}, Created consensus pipe {}", REGION_MIGRATE_PROCESS, req.getPipeName());
+ LOGGER.info(
+ ProcedureMessages.CREATED_CONSENSUS_PIPE, REGION_MIGRATE_PROCESS, req.getPipeName());
}
}
@@ -712,7 +717,7 @@ private void createSingleConsensusPipeAsync(
regionId, senderNodeId, senderEndpoint, receiverNodeId, receiverEndpoint);
configManager.getProcedureManager().createConsensusPipeAsync(req);
LOGGER.info(
- "{}, Submitted async consensus pipe creation: {}",
+ ProcedureMessages.SUBMITTED_ASYNC_CONSENSUS_PIPE_CREATION,
REGION_MIGRATE_PROCESS,
req.getPipeName());
}
@@ -823,11 +828,15 @@ public void transferRegionLeader(
break;
}
if (retryTime++ > MAX_RETRY_TIME) {
- LOGGER.warn("[RemoveRegion] Ratis transfer leader fail, but procedure will continue.");
+ LOGGER.warn(
+ ProcedureMessages
+ .REMOVEREGION_RATIS_TRANSFER_LEADER_FAIL_BUT_PROCEDURE_WILL_CONTINUE);
return;
}
LOGGER.warn(
- "Call changeRegionLeader fail for the {} time, will sleep {} ms", retryTime, sleepTime);
+ ProcedureMessages.CALL_CHANGEREGIONLEADER_FAIL_FOR_THE_TIME_WILL_SLEEP_MS,
+ retryTime,
+ sleepTime);
Thread.sleep(sleepTime);
}
}
@@ -841,7 +850,7 @@ public void transferRegionLeader(
configManager.getLoadManager().getRouteBalancer().balanceRegionLeaderAndPriority();
LOGGER.info(
- "{}, Change region leader finished, regionId: {}, newLeaderNode: {}",
+ ProcedureMessages.CHANGE_REGION_LEADER_FINISHED_REGIONID_NEWLEADERNODE,
REGION_MIGRATE_PROCESS,
regionId,
newLeaderNode);
@@ -884,7 +893,7 @@ public Optional filterDataNodeWithOtherRegionReplica(
NodeStatus... allowingStatus) {
List regionLocations = findRegionLocations(regionId);
if (regionLocations.isEmpty()) {
- LOGGER.warn("Cannot find DataNodes contain the given region: {}", regionId);
+ LOGGER.warn(ProcedureMessages.CANNOT_FIND_DATANODES_CONTAIN_THE_GIVEN_REGION, regionId);
return Optional.empty();
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RemoveDataNodeHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RemoveDataNodeHandler.java
index 980dd712eb0fe..1545c720a0187 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RemoveDataNodeHandler.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RemoveDataNodeHandler.java
@@ -37,6 +37,8 @@
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
import org.apache.iotdb.confignode.consensus.request.write.datanode.RemoveDataNodePlan;
import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeToStatusResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.load.balancer.region.GreedyCopySetRegionGroupAllocator;
import org.apache.iotdb.confignode.manager.load.balancer.region.IRegionGroupAllocator;
@@ -131,7 +133,7 @@ public boolean checkEnoughDataNodeAfterRemoving(List removedD
public void changeDataNodeStatus(
List removedDataNodes, Map nodeStatusMap) {
LOGGER.info(
- "{}, Begin to change DataNode status, nodeStatusMap: {}",
+ ProcedureMessages.BEGIN_TO_CHANGE_DATANODE_STATUS_NODESTATUSMAP,
REMOVE_DATANODE_PROCESS,
nodeStatusMap);
@@ -155,7 +157,7 @@ public void changeDataNodeStatus(
if (!isSucceed(entry.getValue())) {
LOGGER.error(
- "{}, Failed to change DataNode status, dataNodeId={}, nodeStatus={}",
+ ProcedureMessages.FAILED_TO_CHANGE_DATANODE_STATUS_DATANODEID_NODESTATUS,
REMOVE_DATANODE_PROCESS,
dataNodeId,
nodeStatus);
@@ -170,7 +172,7 @@ public void changeDataNodeStatus(
NodeType.DataNode, dataNodeId, new NodeHeartbeatSample(currentTime, nodeStatus));
LOGGER.info(
- "{}, Force update NodeCache: dataNodeId={}, nodeStatus={}, currentTime={}",
+ ProcedureMessages.FORCE_UPDATE_NODECACHE_DATANODEID_NODESTATUS_CURRENTTIME,
REMOVE_DATANODE_PROCESS,
dataNodeId,
nodeStatus,
@@ -301,7 +303,7 @@ public List selectMigrationPlans(
TDataNodeConfiguration selectedNode = result.get(regionId);
LOGGER.info(
- "Selected DataNode {} for Region {}",
+ ProcedureMessages.SELECTED_DATANODE_FOR_REGION,
selectedNode.getLocation().getDataNodeId(),
regionId);
@@ -391,7 +393,7 @@ public void broadcastDataNodeStatusChange(List dataNodes) {
.map(RegionMaintainHandler::getIdWithRpcEndpoint)
.collect(Collectors.joining(", "));
LOGGER.info(
- "{}, BroadcastDataNodeStatusChange start, dataNode: {}",
+ ProcedureMessages.BROADCASTDATANODESTATUSCHANGE_START_DATANODE,
REMOVE_DATANODE_PROCESS,
dataNodesString);
@@ -417,7 +419,8 @@ public void broadcastDataNodeStatusChange(List dataNodes) {
cleanDataNodeCacheContext.getResponseMap().entrySet()) {
if (!isSucceed(entry.getValue())) {
LOGGER.error(
- "{}, BroadcastDataNodeStatusChange meets error, status change dataNodes: {}, error datanode: {}",
+ ProcedureMessages
+ .BROADCASTDATANODESTATUSCHANGE_MEETS_ERROR_STATUS_CHANGE_DATANODES_ERROR_DATANODE,
REMOVE_DATANODE_PROCESS,
dataNodesString,
entry.getValue());
@@ -426,7 +429,7 @@ public void broadcastDataNodeStatusChange(List dataNodes) {
}
LOGGER.info(
- "{}, BroadcastDataNodeStatusChange finished, dataNode: {}",
+ ProcedureMessages.BROADCASTDATANODESTATUSCHANGE_FINISHED_DATANODE,
REMOVE_DATANODE_PROCESS,
dataNodesString);
}
@@ -441,7 +444,7 @@ public void removeDataNodePersistence(List removedDataNodes)
try {
configManager.getConsensusManager().write(new RemoveDataNodePlan(removedDataNodes));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
}
// Adjust maxRegionGroupNum
@@ -463,7 +466,7 @@ public void removeDataNodePersistence(List removedDataNodes)
public void stopDataNodes(List removedDataNodes) {
LOGGER.info(
- "{}, Begin to stop DataNodes and kill the DataNode process: {}",
+ ProcedureMessages.BEGIN_TO_STOP_DATANODES_AND_KILL_THE_DATANODE_PROCESS,
REMOVE_DATANODE_PROCESS,
removedDataNodes);
@@ -483,11 +486,11 @@ public void stopDataNodes(List removedDataNodes) {
configManager.getLoadManager().removeNodeCache(dataNodeId);
if (!isSucceed(entry.getValue())) {
LOGGER.error(
- "{}, Stop Data Node meets error, error datanode: {}",
+ ProcedureMessages.STOP_DATA_NODE_MEETS_ERROR_ERROR_DATANODE,
REMOVE_DATANODE_PROCESS,
entry.getValue());
} else {
- LOGGER.info("{}, Stop Data Node {} success.", REMOVE_DATANODE_PROCESS, dataNodeId);
+ LOGGER.info(ProcedureMessages.STOP_DATA_NODE_SUCCESS, REMOVE_DATANODE_PROCESS, dataNodeId);
}
}
}
@@ -569,8 +572,7 @@ public TSStatus checkRegionReplication(RemoveDataNodePlan removeDataNodePlan) {
configManager.getLoadManager().getNodeStatus(dataNodeLocation.getDataNodeId()))) {
removedDataNodes.remove(dataNodeLocation);
LOGGER.error(
- "Failed to remove data node {} because it is not in running and the configuration of cluster is one replication",
- dataNodeLocation);
+ ProcedureMessages.FAILED_TO_REMOVE_DATA_NODE_BECAUSE_IT_IS_NOT_IN, dataNodeLocation);
}
if (removedDataNodes.isEmpty()) {
status.setCode(TSStatusCode.NO_ENOUGH_DATANODE.getStatusCode());
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/cq/CreateCQProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/cq/CreateCQProcedure.java
index a19f3cb4ae880..ac964d23ca311 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/cq/CreateCQProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/cq/CreateCQProcedure.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.confignode.consensus.request.write.cq.ActiveCQPlan;
import org.apache.iotdb.confignode.consensus.request.write.cq.AddCQPlan;
import org.apache.iotdb.confignode.consensus.request.write.cq.DropCQPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.cq.CQScheduleTask;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -99,20 +100,20 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateCQState state)
activeCQ(env);
return Flow.NO_MORE_STATE;
default:
- throw new IllegalArgumentException("Unknown CreateCQState: " + state);
+ throw new IllegalArgumentException(ProcedureMessages.UNKNOWN_CREATECQSTATE + state);
}
} catch (Exception t) {
if (isRollbackSupported(state)) {
- LOGGER.error("Fail in CreateCQProcedure", t);
+ LOGGER.error(ProcedureMessages.FAIL_IN_CREATECQPROCEDURE, t);
setFailure(new ProcedureException(t));
} else {
LOGGER.error(
- "Retrievable error trying to create cq [{}], state [{}]", req.getCqId(), state, t);
+ ProcedureMessages.RETRIEVABLE_ERROR_TRYING_TO_CREATE_CQ_STATE, req.getCqId(), state, t);
if (getCycles() > RETRY_THRESHOLD) {
setFailure(
new ProcedureException(
String.format(
- "Fail to create trigger [%s] at STATE [%s]", req.getCqId(), state)));
+ ProcedureMessages.FAIL_TO_CREATE_TRIGGER_AT_STATE, req.getCqId(), state)));
}
}
}
@@ -132,13 +133,13 @@ private void addCQ(ConfigNodeProcedureEnv env) {
res.setMessage(e.getMessage());
}
if (res.code == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.debug("Finish init CQ {} successfully", req.cqId);
+ LOGGER.debug(ProcedureMessages.FINISH_INIT_CQ_SUCCESSFULLY, req.cqId);
setNextState(INACTIVE);
} else if (res.code == TSStatusCode.CQ_ALREADY_EXIST.getStatusCode()) {
- LOGGER.info("Failed to init CQ {} because such cq already exists", req.cqId);
+ LOGGER.info(ProcedureMessages.FAILED_TO_INIT_CQ_BECAUSE_SUCH_CQ_ALREADY_EXISTS, req.cqId);
setFailure(new ProcedureException(new IoTDBException(res)));
} else {
- LOGGER.warn("Failed to init CQ {} because of unknown reasons {}", req.cqId, res);
+ LOGGER.warn(ProcedureMessages.FAILED_TO_INIT_CQ_BECAUSE_OF_UNKNOWN_REASONS, req.cqId, res);
setFailure(new ProcedureException(new IoTDBException(res)));
}
}
@@ -153,14 +154,17 @@ private void activeCQ(ConfigNodeProcedureEnv env) {
res.setMessage(e.getMessage());
}
if (res.code == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.debug("Finish Scheduling CQ {} successfully", req.cqId);
+ LOGGER.debug(ProcedureMessages.FINISH_SCHEDULING_CQ_SUCCESSFULLY, req.cqId);
} else if (res.code == TSStatusCode.NO_SUCH_CQ.getStatusCode()) {
- LOGGER.warn("Failed to active CQ {} because of no such cq: {}", req.cqId, res.message);
+ LOGGER.warn(
+ ProcedureMessages.FAILED_TO_ACTIVE_CQ_BECAUSE_OF_NO_SUCH_CQ, req.cqId, res.message);
} else if (res.code == TSStatusCode.CQ_ALREADY_ACTIVE.getStatusCode()) {
- LOGGER.warn("Failed to active CQ {} because this cq has already been active", req.cqId);
+ LOGGER.warn(ProcedureMessages.FAILED_TO_ACTIVE_CQ_BECAUSE_THIS_CQ_HAS_ALREADY_BEEN, req.cqId);
} else {
LOGGER.warn(
- "Failed to active CQ {} successfully because of unknown reasons {}", req.cqId, res);
+ ProcedureMessages.FAILED_TO_ACTIVE_CQ_SUCCESSFULLY_BECAUSE_OF_UNKNOWN_REASONS,
+ req.cqId,
+ res);
}
}
@@ -173,7 +177,7 @@ protected void rollbackState(ConfigNodeProcedureEnv env, CreateCQState state)
// do nothing
break;
case INACTIVE:
- LOGGER.info("Start [INACTIVE] rollback of CQ {}", req.cqId);
+ LOGGER.info(ProcedureMessages.START_INACTIVE_ROLLBACK_OF_CQ, req.cqId);
TSStatus res;
try {
res = env.getConfigManager().getConsensusManager().write(new DropCQPlan(req.cqId, md5));
@@ -183,22 +187,22 @@ protected void rollbackState(ConfigNodeProcedureEnv env, CreateCQState state)
res.setMessage(e.getMessage());
}
if (res.code == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.info("Finish [INACTIVE] rollback of CQ {} successfully", req.cqId);
+ LOGGER.info(ProcedureMessages.FINISH_INACTIVE_ROLLBACK_OF_CQ_SUCCESSFULLY, req.cqId);
} else if (res.code == TSStatusCode.NO_SUCH_CQ.getStatusCode()) {
LOGGER.warn(
- "Failed to do [INACTIVE] rollback of CQ {} because of no such cq: {}",
+ ProcedureMessages.FAILED_TO_DO_INACTIVE_ROLLBACK_OF_CQ_BECAUSE_OF_NO,
req.cqId,
res.message);
} else {
LOGGER.warn(
- "Failed to do [INACTIVE] rollback of CQ {} because of unknown reasons {}",
+ ProcedureMessages.FAILED_TO_DO_INACTIVE_ROLLBACK_OF_CQ_BECAUSE_OF_UNKNOWN,
req.cqId,
res);
}
break;
default:
- throw new IllegalArgumentException("Unknown CreateCQState: " + state);
+ throw new IllegalArgumentException(ProcedureMessages.UNKNOWN_CREATECQSTATE + state);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/AddConfigNodeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/AddConfigNodeProcedure.java
index b38696a10ed2b..ae3bfef293b14 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/AddConfigNodeProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/AddConfigNodeProcedure.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation;
import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException;
import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.state.AddConfigNodeState;
@@ -88,7 +89,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AddConfigNodeState s
}
} catch (Exception e) {
if (isRollbackSupported(state)) {
- setFailure(new ProcedureException("Add ConfigNode failed " + state));
+ setFailure(new ProcedureException(ProcedureMessages.ADD_CONFIGNODE_FAILED + state));
} else {
LOG.error(
"Retrievable error trying to add config node {}, state {}",
@@ -96,7 +97,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AddConfigNodeState s
state,
e);
if (getCycles() > RETRY_THRESHOLD) {
- setFailure(new ProcedureException("State stuck at " + state));
+ setFailure(new ProcedureException(ProcedureMessages.STATE_STUCK_AT + state));
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveAINodeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveAINodeProcedure.java
index facc2ce4ab728..95c320ef87853 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveAINodeProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveAINodeProcedure.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.confignode.client.sync.CnToAnSyncRequestType;
import org.apache.iotdb.confignode.client.sync.SyncAINodeClientPool;
import org.apache.iotdb.confignode.consensus.request.write.ainode.RemoveAINodePlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.state.RemoveAINodeState;
@@ -73,11 +74,12 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveAINodeState st
null,
CnToAnSyncRequestType.STOP_AI_NODE);
if (resp != null && resp.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.info("Successfully stopped AINode {}", removedAINode.getInternalEndPoint());
+ LOGGER.info(
+ ProcedureMessages.SUCCESSFULLY_STOPPED_AINODE, removedAINode.getInternalEndPoint());
} else {
if (resp != null) {
LOGGER.warn(
- "Failed to stop AINode {} because {}, but the remove process will continue.",
+ ProcedureMessages.FAILED_TO_STOP_AINODE_BECAUSE_BUT_THE_REMOVE_PROCESS_WILL,
resp.getMessage(),
removedAINode.getInternalEndPoint());
}
@@ -92,26 +94,33 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveAINodeState st
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new ProcedureException(
String.format(
- "Fail to remove [%s] AINode on Config Nodes [%s]",
- removedAINode, response.getMessage()));
+ ProcedureMessages.FAIL_TO_REMOVE_AINODE_ON_CONFIG_NODES,
+ removedAINode,
+ response.getMessage()));
}
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException(
- String.format("Unknown state during executing removeAINodeProcedure, %s", state));
+ String.format(
+ ProcedureMessages.UNKNOWN_STATE_DURING_EXECUTING_REMOVEAINODEPROCEDURE, state));
}
} catch (Exception e) {
if (isRollbackSupported(state)) {
setFailure(new ProcedureException(e.getMessage()));
} else {
LOGGER.error(
- "Retrievable error trying to remove AINode [{}], state [{}]", removedAINode, state, e);
+ ProcedureMessages.RETRIEVABLE_ERROR_TRYING_TO_REMOVE_AINODE_STATE,
+ removedAINode,
+ state,
+ e);
if (getCycles() > RETRY_THRESHOLD) {
setFailure(
new ProcedureException(
String.format(
- "Fail to remove AINode [%s] at STATE [%s], %s",
- removedAINode, state, e.getMessage())));
+ ProcedureMessages.FAIL_TO_REMOVE_AINODE_AT_STATE,
+ removedAINode,
+ state,
+ e.getMessage())));
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveConfigNodeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveConfigNodeProcedure.java
index 2780214c3a8da..bd2e48761ddbb 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveConfigNodeProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveConfigNodeProcedure.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation;
import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException;
import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.state.RemoveConfigNodeState;
@@ -76,7 +77,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveConfigNodeStat
} catch (Exception e) {
if (isRollbackSupported(state)) {
setFailure(
- new ProcedureException("Remove Config Node" + removedConfigNode + " failed " + state));
+ new ProcedureException(
+ ProcedureMessages.REMOVE_CONFIG_NODE + removedConfigNode + " failed " + state));
} else {
LOG.error(
"Retrievable error trying to remove config node {}, state {}",
@@ -84,7 +86,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveConfigNodeStat
state,
e);
if (getCycles() > RETRY_THRESHOLD) {
- setFailure(new ProcedureException("State stuck at " + state));
+ setFailure(new ProcedureException(ProcedureMessages.STATE_STUCK_AT + state));
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java
index a531d67955cec..e063738a22238 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.cluster.NodeStatus;
import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException;
import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.env.RemoveDataNodeHandler;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -146,7 +147,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveDataNodeState
}
} catch (Exception e) {
if (isRollbackSupported(state)) {
- setFailure(new ProcedureException("Remove Data Node failed " + state));
+ setFailure(new ProcedureException(ProcedureMessages.REMOVE_DATA_NODE_FAILED + state));
} else {
LOG.error(
"Retrievable error trying to remove data node {}, state {}",
@@ -154,7 +155,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveDataNodeState
state,
e);
if (getCycles() > RETRY_THRESHOLD) {
- setFailure(new ProcedureException("State stuck at " + state));
+ setFailure(new ProcedureException(ProcedureMessages.STATE_STUCK_AT + state));
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/partition/DataPartitionTableIntegrityCheckProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/partition/DataPartitionTableIntegrityCheckProcedure.java
index f3d539576d4ba..81db9e7af7231 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/partition/DataPartitionTableIntegrityCheckProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/partition/DataPartitionTableIntegrityCheckProcedure.java
@@ -34,6 +34,7 @@
import org.apache.iotdb.confignode.client.sync.SyncDataNodeClientPool;
import org.apache.iotdb.confignode.consensus.request.read.partition.GetDataPartitionPlan;
import org.apache.iotdb.confignode.consensus.request.write.partition.CreateDataPartitionPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.load.LoadManager;
import org.apache.iotdb.confignode.manager.node.NodeManager;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
@@ -167,7 +168,7 @@ protected Flow executeFromState(
case WRITE_PARTITION_TABLE_TO_CONSENSUS:
return writePartitionTableToConsensus(env);
default:
- throw new ProcedureException("Unknown state: " + state);
+ throw new ProcedureException(ProcedureMessages.UNKNOWN_STATE + state);
}
} catch (Exception e) {
LOG.error("[DataPartitionIntegrity] Error executing state {}: {}", state, e.getMessage(), e);
@@ -206,7 +207,7 @@ protected void rollbackState(
earliestTimeslots.clear();
dataPartitionTables.clear();
finalDataPartitionTables.clear();
- throw new ProcedureException("Unknown state for rollback: " + state);
+ throw new ProcedureException(ProcedureMessages.UNKNOWN_STATE_FOR_ROLLBACK + state);
}
}
@@ -706,7 +707,8 @@ private Flow writePartitionTableToConsensus(final ConfigNodeProcedureEnv env) {
LOG.error("[DataPartitionIntegrity] No database lost data partition table");
setFailure(
"DataPartitionTableIntegrityCheckProcedure",
- new ProcedureException("No database lost data partition table for consensus write"));
+ new ProcedureException(
+ ProcedureMessages.NO_DATABASE_LOST_DATA_PARTITION_TABLE_FOR_CONSENSUS_WRITE));
return getFlow();
}
@@ -714,7 +716,8 @@ private Flow writePartitionTableToConsensus(final ConfigNodeProcedureEnv env) {
LOG.error("[DataPartitionIntegrity] DataPartitionTable to write to consensus");
setFailure(
"DataPartitionTableIntegrityCheckProcedure",
- new ProcedureException("No DataPartitionTable available for consensus write"));
+ new ProcedureException(
+ ProcedureMessages.NO_DATAPARTITIONTABLE_AVAILABLE_FOR_CONSENSUS_WRITE));
return getFlow();
}
@@ -738,7 +741,8 @@ private Flow writePartitionTableToConsensus(final ConfigNodeProcedureEnv env) {
LOG.error("[DataPartitionIntegrity] Failed to write DataPartitionTable to consensus log");
setFailure(
"DataPartitionTableIntegrityCheckProcedure",
- new ProcedureException("Failed to write DataPartitionTable to consensus log"));
+ new ProcedureException(
+ ProcedureMessages.FAILED_TO_WRITE_DATAPARTITIONTABLE_TO_CONSENSUS_LOG));
}
} catch (Exception e) {
LOG.error("[DataPartitionIntegrity] Error writing DataPartitionTable to consensus log", e);
@@ -819,7 +823,7 @@ public void serialize(final DataOutputStream stream) throws IOException {
this.getClass().getSimpleName(),
entry.getKey(),
e);
- throw new IOException("Failed to serialize dataPartitionTables", e);
+ throw new IOException(ProcedureMessages.FAILED_TO_SERIALIZE_DATAPARTITIONTABLES, e);
}
}
}
@@ -851,7 +855,7 @@ public void serialize(final DataOutputStream stream) throws IOException {
"[DataPartitionIntegrity] {} serialize finalDataPartitionTables failed",
this.getClass().getSimpleName(),
e);
- throw new IOException("Failed to serialize finalDataPartitionTables", e);
+ throw new IOException(ProcedureMessages.FAILED_TO_SERIALIZE_FINALDATAPARTITIONTABLES, e);
}
}
} else {
@@ -872,7 +876,7 @@ public void serialize(final DataOutputStream stream) throws IOException {
stream.write(buf, 0, size);
} catch (TException e) {
LOG.error("[DataPartitionIntegrity] Failed to serialize skipDataNode", e);
- throw new IOException("Failed to serialize skipDataNode", e);
+ throw new IOException(ProcedureMessages.FAILED_TO_SERIALIZE_SKIPDATANODE, e);
}
}
@@ -890,7 +894,7 @@ public void serialize(final DataOutputStream stream) throws IOException {
stream.write(buf, 0, size);
} catch (TException e) {
LOG.error("[DataPartitionIntegrity] Failed to serialize failedDataNode", e);
- throw new IOException("Failed to serialize failedDataNode", e);
+ throw new IOException(ProcedureMessages.FAILED_TO_SERIALIZE_FAILEDDATANODE, e);
}
}
}
@@ -938,7 +942,8 @@ public void deserialize(final ByteBuffer byteBuffer) {
this.getClass().getSimpleName(),
dataNodeId,
e);
- throw new RuntimeException("Failed to deserialize dataPartitionTables", e);
+ throw new RuntimeException(
+ ProcedureMessages.FAILED_TO_DESERIALIZE_DATAPARTITIONTABLES, e);
}
}
@@ -978,7 +983,8 @@ public void deserialize(final ByteBuffer byteBuffer) {
"[DataPartitionIntegrity] {} deserialize finalDataPartitionTables failed",
this.getClass().getSimpleName(),
e);
- throw new RuntimeException("Failed to deserialize finalDataPartitionTables", e);
+ throw new RuntimeException(
+ ProcedureMessages.FAILED_TO_DESERIALIZE_FINALDATAPARTITIONTABLES, e);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java
index dfdfe00f3106a..5679b36ffc506 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType;
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta;
import org.apache.iotdb.commons.pipe.config.constant.SystemConstant;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.pipe.metric.overview.PipeProcedureMetrics;
import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
@@ -112,12 +113,12 @@ protected AtomicReference acquireLockInternal(
@Override
protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProcedureEnv) {
- LOGGER.debug("ProcedureId {} try to acquire pipe lock.", getProcId());
+ LOGGER.debug(ProcedureMessages.PROCEDUREID_TRY_TO_ACQUIRE_PIPE_LOCK, getProcId());
pipeTaskInfo = acquireLockInternal(configNodeProcedureEnv);
if (pipeTaskInfo == null) {
- LOGGER.warn("ProcedureId {} failed to acquire pipe lock.", getProcId());
+ LOGGER.warn(ProcedureMessages.PROCEDUREID_FAILED_TO_ACQUIRE_PIPE_LOCK, getProcId());
} else {
- LOGGER.debug("ProcedureId {} acquired pipe lock.", getProcId());
+ LOGGER.debug(ProcedureMessages.PROCEDUREID_ACQUIRED_PIPE_LOCK, getProcId());
}
final ProcedureLockState procedureLockState = super.acquireLock(configNodeProcedureEnv);
@@ -125,19 +126,25 @@ protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProced
case LOCK_ACQUIRED:
if (pipeTaskInfo == null) {
LOGGER.warn(
- "ProcedureId {}: LOCK_ACQUIRED. The following procedure should not be executed without pipe lock.",
+ ProcedureMessages
+ .PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_NOT_BE_EXECUTED,
getProcId());
} else {
LOGGER.debug(
- "ProcedureId {}: LOCK_ACQUIRED. The following procedure should be executed with pipe lock.",
+ ProcedureMessages
+ .PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_BE_EXECUTED_WITH,
getProcId());
}
break;
case LOCK_EVENT_WAIT:
if (pipeTaskInfo == null) {
- LOGGER.warn("ProcedureId {}: LOCK_EVENT_WAIT. Without acquiring pipe lock.", getProcId());
+ LOGGER.warn(
+ ProcedureMessages.PROCEDUREID_LOCK_EVENT_WAIT_WITHOUT_ACQUIRING_PIPE_LOCK,
+ getProcId());
} else {
- LOGGER.debug("ProcedureId {}: LOCK_EVENT_WAIT. Pipe lock will be released.", getProcId());
+ LOGGER.debug(
+ ProcedureMessages.PROCEDUREID_LOCK_EVENT_WAIT_PIPE_LOCK_WILL_BE_RELEASED,
+ getProcId());
configNodeProcedureEnv
.getConfigManager()
.getPipeManager()
@@ -149,12 +156,12 @@ protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProced
default:
if (pipeTaskInfo == null) {
LOGGER.error(
- "ProcedureId {}: {}. Invalid lock state. Without acquiring pipe lock.",
+ ProcedureMessages.PROCEDUREID_INVALID_LOCK_STATE_WITHOUT_ACQUIRING_PIPE_LOCK,
getProcId(),
procedureLockState);
} else {
LOGGER.error(
- "ProcedureId {}: {}. Invalid lock state. Pipe lock will be released.",
+ ProcedureMessages.PROCEDUREID_INVALID_LOCK_STATE_PIPE_LOCK_WILL_BE_RELEASED,
getProcId(),
procedureLockState);
configNodeProcedureEnv
@@ -174,9 +181,11 @@ protected void releaseLock(ConfigNodeProcedureEnv configNodeProcedureEnv) {
super.releaseLock(configNodeProcedureEnv);
if (pipeTaskInfo == null) {
- LOGGER.warn("ProcedureId {} release lock. No need to release pipe lock.", getProcId());
+ LOGGER.warn(
+ ProcedureMessages.PROCEDUREID_RELEASE_LOCK_NO_NEED_TO_RELEASE_PIPE_LOCK, getProcId());
} else {
- LOGGER.debug("ProcedureId {} release lock. Pipe lock will be released.", getProcId());
+ LOGGER.debug(
+ ProcedureMessages.PROCEDUREID_RELEASE_LOCK_PIPE_LOCK_WILL_BE_RELEASED, getProcId());
if (this instanceof PipeMetaSyncProcedure) {
configNodeProcedureEnv
.getConfigManager()
@@ -227,7 +236,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperatePipeTaskState
throws InterruptedException {
if (pipeTaskInfo == null) {
LOGGER.warn(
- "ProcedureId {}: Pipe lock is not acquired, executeFromState's execution will be skipped.",
+ ProcedureMessages.PROCEDUREID_PIPE_LOCK_IS_NOT_ACQUIRED_EXECUTEFROMSTATE_S_EXECUTION_WILL,
getProcId());
return Flow.NO_MORE_STATE;
}
@@ -236,7 +245,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperatePipeTaskState
switch (state) {
case VALIDATE_TASK:
if (!executeFromValidateTask(env)) {
- LOGGER.info("ProcedureId {}: {}", getProcId(), SKIP_PIPE_PROCEDURE_MESSAGE);
+ LOGGER.info(ProcedureMessages.PROCEDUREID, getProcId(), SKIP_PIPE_PROCEDURE_MESSAGE);
// On client side, the message returned after the successful execution of the pipe
// command corresponding to this procedure is "Msg: The statement is executed
// successfully."
@@ -258,13 +267,14 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperatePipeTaskState
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException(
- String.format("Unknown state during executing operatePipeProcedure, %s", state));
+ String.format(
+ ProcedureMessages.UNKNOWN_STATE_DURING_EXECUTING_OPERATEPIPEPROCEDURE, state));
}
} catch (Exception e) {
// Retry before rollback
if (getCycles() < RETRY_THRESHOLD) {
LOGGER.warn(
- "ProcedureId {}: Encountered error when trying to {} at state [{}], retry [{}/{}]",
+ ProcedureMessages.PROCEDUREID_ENCOUNTERED_ERROR_WHEN_TRYING_TO_AT_STATE_RETRY,
getProcId(),
getOperation(),
state,
@@ -276,7 +286,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperatePipeTaskState
TimeUnit.MILLISECONDS.sleep(3000L);
} else {
LOGGER.warn(
- "ProcedureId {}: All {} retries failed when trying to {} at state [{}], will rollback...",
+ ProcedureMessages.PROCEDUREID_ALL_RETRIES_FAILED_WHEN_TRYING_TO_AT_STATE_WILL,
getProcId(),
RETRY_THRESHOLD,
getOperation(),
@@ -285,8 +295,10 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperatePipeTaskState
setFailure(
new ProcedureException(
String.format(
- "ProcedureId %s: Fail to %s because %s",
- getProcId(), getOperation().name(), e.getMessage())));
+ ProcedureMessages.PROCEDUREID_FAIL_TO_BECAUSE,
+ getProcId(),
+ getOperation().name(),
+ e.getMessage())));
return Flow.NO_MORE_STATE;
}
}
@@ -303,7 +315,7 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperatePipeTaskState st
throws IOException, InterruptedException, ProcedureException {
if (pipeTaskInfo == null) {
LOGGER.warn(
- "ProcedureId {}: Pipe lock is not acquired, rollbackState({})'s execution will be skipped.",
+ ProcedureMessages.PROCEDUREID_PIPE_LOCK_IS_NOT_ACQUIRED_ROLLBACKSTATE_S_EXECUTION_WILL,
getProcId(),
state);
return;
@@ -316,7 +328,10 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperatePipeTaskState st
rollbackFromValidateTask(env);
isRollbackFromValidateTaskSuccessful = true;
} catch (Exception e) {
- LOGGER.warn("ProcedureId {}: Failed to rollback from validate task.", getProcId(), e);
+ LOGGER.warn(
+ ProcedureMessages.PROCEDUREID_FAILED_TO_ROLLBACK_FROM_VALIDATE_TASK,
+ getProcId(),
+ e);
}
}
break;
@@ -325,7 +340,9 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperatePipeTaskState st
rollbackFromCalculateInfoForTask(env);
} catch (Exception e) {
LOGGER.warn(
- "ProcedureId {}: Failed to rollback from calculate info for task.", getProcId(), e);
+ ProcedureMessages.PROCEDUREID_FAILED_TO_ROLLBACK_FROM_CALCULATE_INFO_FOR_TASK,
+ getProcId(),
+ e);
}
break;
case WRITE_CONFIG_NODE_CONSENSUS:
@@ -339,7 +356,7 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperatePipeTaskState st
}
} catch (Exception e) {
LOGGER.warn(
- "ProcedureId {}: Failed to rollback from write config node consensus.",
+ ProcedureMessages.PROCEDUREID_FAILED_TO_ROLLBACK_FROM_WRITE_CONFIG_NODE_CONSENSUS,
getProcId(),
e);
}
@@ -355,11 +372,13 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperatePipeTaskState st
isRollbackFromOperateOnDataNodesSuccessful = true;
} catch (Exception e) {
LOGGER.warn(
- "ProcedureId {}: Failed to rollback from operate on data nodes.", getProcId(), e);
+ ProcedureMessages.PROCEDUREID_FAILED_TO_ROLLBACK_FROM_OPERATE_ON_DATA_NODES,
+ getProcId(),
+ e);
}
break;
default:
- LOGGER.error("Unsupported roll back STATE [{}]", state);
+ LOGGER.error(ProcedureMessages.UNSUPPORTED_ROLL_BACK_STATE, state);
}
}
@@ -515,7 +534,7 @@ protected void pushPipeMetaToDataNodesIgnoreException(ConfigNodeProcedureEnv env
// Ignore the exceptions reported
pushPipeMetaToDataNodes(env);
} catch (Exception e) {
- LOGGER.info("Failed to push pipe meta list to data nodes, will retry later.", e);
+ LOGGER.info(ProcedureMessages.FAILED_TO_PUSH_PIPE_META_LIST_TO_DATA_NODES_WILL, e);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedure.java
index f4fa738428d43..ddc658bdf4d2d 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/CreatePipePluginProcedure.java
@@ -24,6 +24,8 @@
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.confignode.consensus.request.write.pipe.plugin.CreatePipePluginPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.plugin.DropPipePluginPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.pipe.coordinator.plugin.PipePluginCoordinator;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
@@ -100,21 +102,24 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, CreatePipePluginStat
return executeFromUnlock(env);
default:
throw new UnsupportedOperationException(
- String.format("Unknown state during executing createPipePluginProcedure, %s", state));
+ String.format(
+ ProcedureMessages.UNKNOWN_STATE_DURING_EXECUTING_CREATEPIPEPLUGINPROCEDURE,
+ state));
}
} catch (Exception e) {
if (isRollbackSupported(state)) {
- LOGGER.error("CreatePipePluginProcedure failed in state {}, will rollback", state, e);
+ LOGGER.error(
+ ProcedureMessages.CREATEPIPEPLUGINPROCEDURE_FAILED_IN_STATE_WILL_ROLLBACK, state, e);
setFailure(new ProcedureException(e.getMessage()));
} else {
LOGGER.error(
- "Retrievable error trying to create pipe plugin [{}], state: {}",
+ ProcedureMessages.RETRIEVABLE_ERROR_TRYING_TO_CREATE_PIPE_PLUGIN_STATE,
pipePluginMeta.getPluginName(),
state,
e);
if (getCycles() > RETRY_THRESHOLD) {
LOGGER.error(
- "Fail to create pipe plugin [{}] after {} retries",
+ ProcedureMessages.FAIL_TO_CREATE_PIPE_PLUGIN_AFTER_RETRIES,
pipePluginMeta.getPluginName(),
getCycles());
setFailure(new ProcedureException(e.getMessage()));
@@ -125,7 +130,9 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, CreatePipePluginStat
}
private Flow executeFromLock(ConfigNodeProcedureEnv env) {
- LOGGER.info("CreatePipePluginProcedure: executeFromLock({})", pipePluginMeta.getPluginName());
+ LOGGER.info(
+ ProcedureMessages.CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMLOCK,
+ pipePluginMeta.getPluginName());
final PipePluginCoordinator pipePluginCoordinator =
env.getConfigManager().getPipeManager().getPipePluginCoordinator();
@@ -137,7 +144,8 @@ private Flow executeFromLock(ConfigNodeProcedureEnv env) {
.getPipePluginInfo()
.validateBeforeCreatingPipePlugin(pluginName, isSetIfNotExistsCondition)) {
LOGGER.info(
- "Pipe plugin {} is already created and isSetIfNotExistsCondition is true, end the CreatePipePluginProcedure({})",
+ ProcedureMessages
+ .PIPE_PLUGIN_IS_ALREADY_CREATED_AND_ISSETIFNOTEXISTSCONDITION_IS_TRUE_END,
pluginName,
pluginName);
pipePluginCoordinator.unlock();
@@ -146,7 +154,7 @@ private Flow executeFromLock(ConfigNodeProcedureEnv env) {
} catch (PipeException e) {
// The pipe plugin has already created, we should end the procedure
LOGGER.info(
- "Pipe plugin {} is already created, end the CreatePipePluginProcedure({})",
+ ProcedureMessages.PIPE_PLUGIN_IS_ALREADY_CREATED_END_THE_CREATEPIPEPLUGINPROCEDURE,
pluginName,
pluginName);
setFailure(new ProcedureException(e.getMessage()));
@@ -160,7 +168,7 @@ private Flow executeFromLock(ConfigNodeProcedureEnv env) {
private Flow executeFromCreateOnConfigNodes(ConfigNodeProcedureEnv env) {
LOGGER.info(
- "CreatePipePluginProcedure: executeFromCreateOnConfigNodes({})",
+ ProcedureMessages.CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMCREATEONCONFIGNODES,
pipePluginMeta.getPluginName());
final ConfigManager configNodeManager = env.getConfigManager();
@@ -178,7 +186,7 @@ private Flow executeFromCreateOnConfigNodes(ConfigNodeProcedureEnv env) {
try {
response = configNodeManager.getConsensusManager().write(createPluginPlan);
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -192,7 +200,7 @@ private Flow executeFromCreateOnConfigNodes(ConfigNodeProcedureEnv env) {
private Flow executeFromCreateOnDataNodes(ConfigNodeProcedureEnv env) throws IOException {
LOGGER.info(
- "CreatePipePluginProcedure: executeFromCreateOnDataNodes({})",
+ ProcedureMessages.CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMCREATEONDATANODES,
pipePluginMeta.getPluginName());
if (RpcUtils.squashResponseStatusList(env.createPipePluginOnDataNodes(pipePluginMeta, jarFile))
@@ -204,12 +212,14 @@ private Flow executeFromCreateOnDataNodes(ConfigNodeProcedureEnv env) throws IOE
throw new PipeException(
String.format(
- "Failed to create pipe plugin instance [%s] on data nodes",
+ ProcedureMessages.FAILED_TO_CREATE_PIPE_PLUGIN_INSTANCE_ON_DATA_NODES,
pipePluginMeta.getPluginName()));
}
private Flow executeFromUnlock(ConfigNodeProcedureEnv env) {
- LOGGER.info("CreatePipePluginProcedure: executeFromUnlock({})", pipePluginMeta.getPluginName());
+ LOGGER.info(
+ ProcedureMessages.CREATEPIPEPLUGINPROCEDURE_EXECUTEFROMUNLOCK,
+ pipePluginMeta.getPluginName());
env.getConfigManager().getPipeManager().getPipePluginCoordinator().unlock();
@@ -233,14 +243,16 @@ protected void rollbackState(ConfigNodeProcedureEnv env, CreatePipePluginState s
}
private void rollbackFromLock(ConfigNodeProcedureEnv env) {
- LOGGER.info("CreatePipePluginProcedure: rollbackFromLock({})", pipePluginMeta.getPluginName());
+ LOGGER.info(
+ ProcedureMessages.CREATEPIPEPLUGINPROCEDURE_ROLLBACKFROMLOCK,
+ pipePluginMeta.getPluginName());
env.getConfigManager().getPipeManager().getPipePluginCoordinator().unlock();
}
private void rollbackFromCreateOnConfigNodes(ConfigNodeProcedureEnv env) {
LOGGER.info(
- "CreatePipePluginProcedure: rollbackFromCreateOnConfigNodes({})",
+ ProcedureMessages.CREATEPIPEPLUGINPROCEDURE_ROLLBACKFROMCREATEONCONFIGNODES,
pipePluginMeta.getPluginName());
try {
@@ -248,13 +260,13 @@ private void rollbackFromCreateOnConfigNodes(ConfigNodeProcedureEnv env) {
.getConsensusManager()
.write(new DropPipePluginPlan(pipePluginMeta.getPluginName()));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
}
}
private void rollbackFromCreateOnDataNodes(ConfigNodeProcedureEnv env) throws ProcedureException {
LOGGER.info(
- "CreatePipePluginProcedure: rollbackFromCreateOnDataNodes({})",
+ ProcedureMessages.CREATEPIPEPLUGINPROCEDURE_ROLLBACKFROMCREATEONDATANODES,
pipePluginMeta.getPluginName());
if (RpcUtils.squashResponseStatusList(
@@ -263,7 +275,8 @@ private void rollbackFromCreateOnDataNodes(ConfigNodeProcedureEnv env) throws Pr
!= TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new ProcedureException(
String.format(
- "Failed to rollback pipe plugin [%s] on data nodes", pipePluginMeta.getPluginName()));
+ ProcedureMessages.FAILED_TO_ROLLBACK_PIPE_PLUGIN_ON_DATA_NODES,
+ pipePluginMeta.getPluginName()));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/DropPipePluginProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/DropPipePluginProcedure.java
index ab48a2478506c..771ab6230bff5 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/DropPipePluginProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/plugin/DropPipePluginProcedure.java
@@ -21,6 +21,8 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.consensus.request.write.pipe.plugin.DropPipePluginPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.pipe.coordinator.plugin.PipePluginCoordinator;
import org.apache.iotdb.confignode.manager.pipe.coordinator.task.PipeTaskCoordinator;
import org.apache.iotdb.confignode.manager.subscription.SubscriptionCoordinator;
@@ -99,13 +101,18 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, DropPipePluginState
}
} catch (Exception e) {
if (isRollbackSupported(state)) {
- LOGGER.warn("DropPipePluginProcedure failed in state {}, will rollback", state, e);
+ LOGGER.warn(
+ ProcedureMessages.DROPPIPEPLUGINPROCEDURE_FAILED_IN_STATE_WILL_ROLLBACK, state, e);
setFailure(new ProcedureException(e.getMessage()));
} else {
LOGGER.error(
- "Retrievable error trying to drop pipe plugin [{}], state: {}", pluginName, state, e);
+ ProcedureMessages.RETRIEVABLE_ERROR_TRYING_TO_DROP_PIPE_PLUGIN_STATE,
+ pluginName,
+ state,
+ e);
if (getCycles() > RETRY_THRESHOLD) {
- LOGGER.error("Fail to drop pipe plugin [{}] after {} retries", pluginName, getCycles());
+ LOGGER.error(
+ ProcedureMessages.FAIL_TO_DROP_PIPE_PLUGIN_AFTER_RETRIES, pluginName, getCycles());
setFailure(new ProcedureException(e.getMessage()));
}
}
@@ -114,7 +121,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, DropPipePluginState
}
private Flow executeFromLock(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipePluginProcedure: executeFromLock({})", pluginName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPLUGINPROCEDURE_EXECUTEFROMLOCK, pluginName);
final PipeTaskCoordinator pipeTaskCoordinator =
env.getConfigManager().getPipeManager().getPipeTaskCoordinator();
@@ -132,7 +139,7 @@ private Flow executeFromLock(ConfigNodeProcedureEnv env) {
.getPipePluginInfo()
.validateBeforeDroppingPipePlugin(pluginName, isSetIfExistsCondition)) {
LOGGER.info(
- "Pipe plugin {} is not exist, end the DropPipePluginProcedure({})",
+ ProcedureMessages.PIPE_PLUGIN_IS_NOT_EXIST_END_THE_DROPPIPEPLUGINPROCEDURE,
pluginName,
pluginName);
pipePluginCoordinator.unlock();
@@ -154,14 +161,14 @@ private Flow executeFromLock(ConfigNodeProcedureEnv env) {
try {
env.getConfigManager().getConsensusManager().write(new DropPipePluginPlan(pluginName));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
}
setNextState(DropPipePluginState.DROP_ON_DATA_NODES);
return Flow.HAS_MORE_STATE;
}
private Flow executeFromDropOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipePluginProcedure: executeFromDropOnDataNodes({})", pluginName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPLUGINPROCEDURE_EXECUTEFROMDROPONDATANODES, pluginName);
final List dropStatusList = env.dropPipePluginOnDataNodes(pluginName, true);
if (dropStatusList.stream().allMatch(this::isDropPipePluginSuccessOrNotExists)) {
@@ -170,7 +177,7 @@ private Flow executeFromDropOnDataNodes(ConfigNodeProcedureEnv env) {
}
throw new PipeException(
- String.format("Failed to drop pipe plugin %s on data nodes", pluginName));
+ String.format(ProcedureMessages.FAILED_TO_DROP_PIPE_PLUGIN_ON_DATA_NODES, pluginName));
}
private boolean isDropPipePluginSuccessOrNotExists(final TSStatus status) {
@@ -186,12 +193,12 @@ private boolean isDropPipePluginSuccessOrNotExists(final TSStatus status) {
}
private Flow executeFromDropOnConfigNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipePluginProcedure: executeFromDropOnConfigNodes({})", pluginName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPLUGINPROCEDURE_EXECUTEFROMDROPONCONFIGNODES, pluginName);
try {
env.getConfigManager().getConsensusManager().write(new DropPipePluginPlan(pluginName));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
}
setNextState(DropPipePluginState.UNLOCK);
@@ -199,7 +206,7 @@ private Flow executeFromDropOnConfigNodes(ConfigNodeProcedureEnv env) {
}
private Flow executeFromUnlock(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipePluginProcedure: executeFromUnlock({})", pluginName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPLUGINPROCEDURE_EXECUTEFROMUNLOCK, pluginName);
env.getConfigManager().getPipeManager().getPipePluginCoordinator().unlock();
env.getConfigManager().getPipeManager().getPipeTaskCoordinator().unlock();
@@ -223,21 +230,22 @@ protected void rollbackState(ConfigNodeProcedureEnv env, DropPipePluginState sta
}
private void rollbackFromLock(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipePluginProcedure: rollbackFromLock({})", pluginName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPLUGINPROCEDURE_ROLLBACKFROMLOCK, pluginName);
env.getConfigManager().getPipeManager().getPipePluginCoordinator().unlock();
env.getConfigManager().getPipeManager().getPipeTaskCoordinator().unlock();
}
private void rollbackFromDropOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipePluginProcedure: rollbackFromDropOnDataNodes({})", pluginName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPLUGINPROCEDURE_ROLLBACKFROMDROPONDATANODES, pluginName);
// do nothing but wait for rolling back to the previous state: LOCK
// TODO: we should drop the pipe plugin on data nodes properly with RuntimeAgent's help
}
private void rollbackFromDropOnConfigNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipePluginProcedure: rollbackFromDropOnConfigNodes({})", pluginName);
+ LOGGER.info(
+ ProcedureMessages.DROPPIPEPLUGINPROCEDURE_ROLLBACKFROMDROPONCONFIGNODES, pluginName);
// do nothing but wait for rolling back to the previous state: DROP_ON_DATA_NODES
// TODO: we should drop the pipe plugin on config nodes properly with RuntimeCoordinator's help
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleLeaderChangeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleLeaderChangeProcedure.java
index 61f6f3cae2aaf..2c5b9566fce61 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleLeaderChangeProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleLeaderChangeProcedure.java
@@ -23,6 +23,8 @@
import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.consensus.request.write.pipe.runtime.PipeHandleLeaderChangePlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2;
import org.apache.iotdb.confignode.procedure.impl.pipe.PipeTaskOperation;
@@ -68,7 +70,7 @@ protected PipeTaskOperation getOperation() {
@Override
public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleLeaderChangeProcedure: executeFromValidateTask");
+ LOGGER.info(ProcedureMessages.PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMVALIDATETASK);
// Nothing needs to be checked
return true;
@@ -76,14 +78,14 @@ public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) {
@Override
public void executeFromCalculateInfoForTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleLeaderChangeProcedure: executeFromCalculateInfoForTask");
+ LOGGER.info(ProcedureMessages.PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMCALCULATEINFOFORTASK);
// Nothing needs to be calculated
}
@Override
public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleLeaderChangeProcedure: executeFromHandleOnConfigNodes");
+ LOGGER.info(ProcedureMessages.PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMHANDLEONCONFIGNODES);
final Map newConsensusGroupIdToLeaderConsensusIdMap =
new HashMap<>();
@@ -98,7 +100,7 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
try {
response = env.getConfigManager().getConsensusManager().write(pipeHandleLeaderChangePlan);
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -109,35 +111,35 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
@Override
public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleLeaderChangeProcedure: executeFromHandleOnDataNodes");
+ LOGGER.info(ProcedureMessages.PIPEHANDLELEADERCHANGEPROCEDURE_EXECUTEFROMHANDLEONDATANODES);
pushPipeMetaToDataNodesIgnoreException(env);
}
@Override
public void rollbackFromValidateTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleLeaderChangeProcedure: rollbackFromValidateTask");
+ LOGGER.info(ProcedureMessages.PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMVALIDATETASK);
// Nothing to do
}
@Override
public void rollbackFromCalculateInfoForTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleLeaderChangeProcedure: rollbackFromCalculateInfoForTask");
+ LOGGER.info(ProcedureMessages.PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMCALCULATEINFOFORTASK);
// Nothing to do
}
@Override
public void rollbackFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleLeaderChangeProcedure: rollbackFromHandleOnConfigNodes");
+ LOGGER.info(ProcedureMessages.PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMHANDLEONCONFIGNODES);
// Nothing to do
}
@Override
public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleLeaderChangeProcedure: rollbackFromCreateOnDataNodes");
+ LOGGER.info(ProcedureMessages.PIPEHANDLELEADERCHANGEPROCEDURE_ROLLBACKFROMCREATEONDATANODES);
// Nothing to do
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleMetaChangeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleMetaChangeProcedure.java
index 401859f0a7e0a..a0dec36735265 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleMetaChangeProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeHandleMetaChangeProcedure.java
@@ -22,6 +22,8 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta;
import org.apache.iotdb.confignode.consensus.request.write.pipe.runtime.PipeHandleMetaChangePlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2;
@@ -78,7 +80,7 @@ protected PipeTaskOperation getOperation() {
@Override
public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleMetaChangeProcedure: executeFromValidateTask");
+ LOGGER.info(ProcedureMessages.PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMVALIDATETASK);
// Do nothing
return true;
@@ -86,14 +88,15 @@ public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) {
@Override
public void executeFromCalculateInfoForTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleMetaChangeProcedure: executeFromCalculateInfoForTask");
+ LOGGER.info(ProcedureMessages.PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMCALCULATEINFOFORTASK);
// Do nothing
}
@Override
public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleMetaChangeProcedure: executeFromWriteConfigNodeConsensus");
+ LOGGER.info(
+ ProcedureMessages.PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMWRITECONFIGNODECONSENSUS);
if (!needWriteConsensusOnConfigNodes) {
return;
@@ -111,7 +114,7 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
.getConsensusManager()
.write(new PipeHandleMetaChangePlan(pipeMetaList));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -122,7 +125,7 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
@Override
public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleMetaChangeProcedure: executeFromHandleOnDataNodes");
+ LOGGER.info(ProcedureMessages.PIPEHANDLEMETACHANGEPROCEDURE_EXECUTEFROMHANDLEONDATANODES);
if (!needPushPipeMetaToDataNodes) {
return;
@@ -133,28 +136,29 @@ public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
@Override
public void rollbackFromValidateTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleMetaChangeProcedure: rollbackFromValidateTask");
+ LOGGER.info(ProcedureMessages.PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMVALIDATETASK);
// Do nothing
}
@Override
public void rollbackFromCalculateInfoForTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleMetaChangeProcedure: rollbackFromCalculateInfoForTask");
+ LOGGER.info(ProcedureMessages.PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMCALCULATEINFOFORTASK);
// Do nothing
}
@Override
public void rollbackFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleMetaChangeProcedure: rollbackFromWriteConfigNodeConsensus");
+ LOGGER.info(
+ ProcedureMessages.PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMWRITECONFIGNODECONSENSUS);
// Do nothing
}
@Override
public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("PipeHandleMetaChangeProcedure: rollbackFromOperateOnDataNodes");
+ LOGGER.info(ProcedureMessages.PIPEHANDLEMETACHANGEPROCEDURE_ROLLBACKFROMOPERATEONDATANODES);
// Do nothing
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeMetaSyncProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeMetaSyncProcedure.java
index 393a8bd5ab8c1..33b909eae274e 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeMetaSyncProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/runtime/PipeMetaSyncProcedure.java
@@ -26,6 +26,8 @@
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant;
import org.apache.iotdb.confignode.consensus.request.write.pipe.runtime.PipeHandleMetaChangePlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2;
@@ -88,7 +90,8 @@ protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProced
// Skip by setting the pipeTaskInfo to null
pipeTaskInfo = null;
LOGGER.debug(
- "PipeMetaSyncProcedure: acquireLock, skip the procedure due to the last execution time {}",
+ ProcedureMessages
+ .PIPEMETASYNCPROCEDURE_ACQUIRELOCK_SKIP_THE_PROCEDURE_DUE_TO_THE_LAST_EXECUTION,
LAST_EXECUTION_TIME.get());
return ProcedureLockState.LOCK_ACQUIRED;
}
@@ -103,7 +106,7 @@ protected PipeTaskOperation getOperation() {
@Override
public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) {
- LOGGER.debug("PipeMetaSyncProcedure: executeFromValidateTask");
+ LOGGER.debug(ProcedureMessages.PIPEMETASYNCPROCEDURE_EXECUTEFROMVALIDATETASK);
LAST_EXECUTION_TIME.set(System.currentTimeMillis());
return true;
@@ -111,7 +114,7 @@ public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) {
@Override
public void executeFromCalculateInfoForTask(ConfigNodeProcedureEnv env) {
- LOGGER.debug("PipeMetaSyncProcedure: executeFromCalculateInfoForTask");
+ LOGGER.debug(ProcedureMessages.PIPEMETASYNCPROCEDURE_EXECUTEFROMCALCULATEINFOFORTASK);
// Re-balance the external source tasks here in case of any changes in the dataRegion
pipeTaskInfo
@@ -170,7 +173,7 @@ public void executeFromCalculateInfoForTask(ConfigNodeProcedureEnv env) {
@Override
public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
- LOGGER.debug("PipeMetaSyncProcedure: executeFromWriteConfigNodeConsensus");
+ LOGGER.debug(ProcedureMessages.PIPEMETASYNCPROCEDURE_EXECUTEFROMWRITECONFIGNODECONSENSUS);
final List pipeMetaList = new ArrayList<>();
for (final PipeMeta pipeMeta : pipeTaskInfo.get().getPipeMetaList()) {
@@ -184,7 +187,7 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
.getConsensusManager()
.write(new PipeHandleMetaChangePlan(pipeMetaList));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -196,41 +199,41 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
@Override
public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
throws PipeException, IOException {
- LOGGER.debug("PipeMetaSyncProcedure: executeFromOperateOnDataNodes");
+ LOGGER.debug(ProcedureMessages.PIPEMETASYNCPROCEDURE_EXECUTEFROMOPERATEONDATANODES);
Map respMap = pushPipeMetaToDataNodes(env);
if (pipeTaskInfo.get().recordDataNodePushPipeMetaExceptions(respMap)) {
throw new PipeException(
String.format(
- "Failed to push pipe meta to dataNodes, details: %s",
+ ProcedureMessages.FAILED_TO_PUSH_PIPE_META_TO_DATANODES_DETAILS,
parsePushPipeMetaExceptionForPipe(null, respMap)));
}
}
@Override
public void rollbackFromValidateTask(ConfigNodeProcedureEnv env) {
- LOGGER.debug("PipeMetaSyncProcedure: rollbackFromValidateTask");
+ LOGGER.debug(ProcedureMessages.PIPEMETASYNCPROCEDURE_ROLLBACKFROMVALIDATETASK);
// Do nothing
}
@Override
public void rollbackFromCalculateInfoForTask(ConfigNodeProcedureEnv env) {
- LOGGER.debug("PipeMetaSyncProcedure: rollbackFromCalculateInfoForTask");
+ LOGGER.debug(ProcedureMessages.PIPEMETASYNCPROCEDURE_ROLLBACKFROMCALCULATEINFOFORTASK);
// Do nothing
}
@Override
public void rollbackFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
- LOGGER.debug("PipeMetaSyncProcedure: rollbackFromWriteConfigNodeConsensus");
+ LOGGER.debug(ProcedureMessages.PIPEMETASYNCPROCEDURE_ROLLBACKFROMWRITECONFIGNODECONSENSUS);
// Do nothing
}
@Override
public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.debug("PipeMetaSyncProcedure: rollbackFromOperateOnDataNodes");
+ LOGGER.debug(ProcedureMessages.PIPEMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONDATANODES);
// Do nothing
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/AlterPipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/AlterPipeProcedureV2.java
index 116c15dde2296..b0d0bce219bde 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/AlterPipeProcedureV2.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/AlterPipeProcedureV2.java
@@ -35,6 +35,8 @@
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.AlterPipePlanV2;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.pipe.coordinator.PipeManager;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2;
@@ -99,7 +101,8 @@ protected PipeTaskOperation getOperation() {
@Override
public boolean executeFromValidateTask(final ConfigNodeProcedureEnv env) throws PipeException {
LOGGER.info(
- "AlterPipeProcedureV2: executeFromValidateTask({})", alterPipeRequest.getPipeName());
+ ProcedureMessages.ALTERPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK,
+ alterPipeRequest.getPipeName());
// We should execute checkBeforeAlterPipe before checking the pipe plugin. This method will
// update the alterPipeRequest based on the alterPipeRequest and existing pipe metadata.
@@ -148,7 +151,7 @@ public boolean executeFromValidateTask(final ConfigNodeProcedureEnv env) throws
@Override
public void executeFromCalculateInfoForTask(final ConfigNodeProcedureEnv env) {
LOGGER.info(
- "AlterPipeProcedureV2: executeFromCalculateInfoForTask({})",
+ ProcedureMessages.ALTERPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK,
alterPipeRequest.getPipeName());
final PipeMeta currentPipeMeta =
@@ -259,7 +262,7 @@ public void executeFromCalculateInfoForTask(final ConfigNodeProcedureEnv env) {
public void executeFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv env)
throws PipeException {
LOGGER.info(
- "AlterPipeProcedureV2: executeFromWriteConfigNodeConsensus({})",
+ ProcedureMessages.ALTERPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS,
alterPipeRequest.getPipeName());
TSStatus response;
@@ -269,7 +272,7 @@ public void executeFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv env
.getConsensusManager()
.write(new AlterPipePlanV2(updatedPipeStaticMeta, updatedPipeRuntimeMeta));
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -281,7 +284,7 @@ public void executeFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv env
@Override
public void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) throws IOException {
final String pipeName = alterPipeRequest.getPipeName();
- LOGGER.info("AlterPipeProcedureV2: executeFromOperateOnDataNodes({})", pipeName);
+ LOGGER.info(ProcedureMessages.ALTERPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES, pipeName);
final String exceptionMessage =
parsePushPipeMetaExceptionForPipe(
@@ -292,7 +295,7 @@ public void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) thro
: pushSinglePipeMetaToDataNodes(pipeName, env));
if (!exceptionMessage.isEmpty()) {
LOGGER.warn(
- "Failed to alter pipe {}, details: {}, metadata will be synchronized later.",
+ ProcedureMessages.FAILED_TO_ALTER_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER,
alterPipeRequest.getPipeName(),
exceptionMessage);
}
@@ -301,14 +304,15 @@ public void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) thro
@Override
public void rollbackFromValidateTask(final ConfigNodeProcedureEnv env) {
LOGGER.info(
- "AlterPipeProcedureV2: rollbackFromValidateTask({})", alterPipeRequest.getPipeName());
+ ProcedureMessages.ALTERPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK,
+ alterPipeRequest.getPipeName());
// Do nothing
}
@Override
public void rollbackFromCalculateInfoForTask(final ConfigNodeProcedureEnv env) {
LOGGER.info(
- "AlterPipeProcedureV2: rollbackFromCalculateInfoForTask({})",
+ ProcedureMessages.ALTERPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK,
alterPipeRequest.getPipeName());
// Do nothing
}
@@ -316,7 +320,7 @@ public void rollbackFromCalculateInfoForTask(final ConfigNodeProcedureEnv env) {
@Override
public void rollbackFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv env) {
LOGGER.info(
- "AlterPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})",
+ ProcedureMessages.ALTERPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS,
alterPipeRequest.getPipeName());
TSStatus response;
try {
@@ -325,7 +329,7 @@ public void rollbackFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv en
.getConsensusManager()
.write(new AlterPipePlanV2(currentPipeStaticMeta, currentPipeRuntimeMeta));
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -337,7 +341,8 @@ public void rollbackFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv en
@Override
public void rollbackFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) throws IOException {
LOGGER.info(
- "AlterPipeProcedureV2: rollbackFromOperateOnDataNodes({})", alterPipeRequest.getPipeName());
+ ProcedureMessages.ALTERPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES,
+ alterPipeRequest.getPipeName());
// Push all pipe metas to datanode, may be time-consuming
final String exceptionMessage =
@@ -345,7 +350,7 @@ public void rollbackFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) thr
alterPipeRequest.getPipeName(), pushPipeMetaToDataNodes(env));
if (!exceptionMessage.isEmpty()) {
LOGGER.warn(
- "Failed to rollback alter pipe {}, details: {}, metadata will be synchronized later.",
+ ProcedureMessages.FAILED_TO_ROLLBACK_ALTER_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED,
alterPipeRequest.getPipeName(),
exceptionMessage);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/CreatePipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/CreatePipeProcedureV2.java
index 98f6756db2d46..3cb0665600a09 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/CreatePipeProcedureV2.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/CreatePipeProcedureV2.java
@@ -38,6 +38,8 @@
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.CreatePipePlanV2;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.DropPipePlanV2;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.pipe.coordinator.PipeManager;
import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
@@ -137,7 +139,8 @@ protected PipeTaskOperation getOperation() {
@Override
public boolean executeFromValidateTask(final ConfigNodeProcedureEnv env) throws PipeException {
LOGGER.info(
- "CreatePipeProcedureV2: executeFromValidateTask({})", createPipeRequest.getPipeName());
+ ProcedureMessages.CREATEPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK,
+ createPipeRequest.getPipeName());
final PipeManager pipeManager = env.getConfigManager().getPipeManager();
pipeManager
@@ -204,7 +207,7 @@ public static void checkAndEnrichSourceAuthentication(
env.getConfigManager().getPermissionManager().login4Pipe(username, password);
}
if (Objects.isNull(hashedPassword)) {
- throw new PipeException("Authentication failed.");
+ throw new PipeException(ProcedureMessages.AUTHENTICATION_FAILED);
}
sourceParameters.addOrReplaceEquivalentAttributes(
new PipeParameters(
@@ -248,7 +251,7 @@ public static void checkAndEnrichSinkAuthentication(
PipeSinkConstant.CONNECTOR_IOTDB_PASSWORD_KEY,
PipeSinkConstant.SINK_IOTDB_PASSWORD_KEY));
if (Objects.isNull(hashedPassword)) {
- throw new PipeException("Authentication failed.");
+ throw new PipeException(ProcedureMessages.AUTHENTICATION_FAILED);
}
sinkParameters.addOrReplaceEquivalentAttributes(
new PipeParameters(
@@ -259,7 +262,7 @@ public static void checkAndEnrichSinkAuthentication(
@Override
public void executeFromCalculateInfoForTask(final ConfigNodeProcedureEnv env) {
LOGGER.info(
- "CreatePipeProcedureV2: executeFromCalculateInfoForTask({})",
+ ProcedureMessages.CREATEPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK,
createPipeRequest.getPipeName());
pipeStaticMeta =
@@ -358,7 +361,7 @@ public void executeFromCalculateInfoForTask(final ConfigNodeProcedureEnv env) {
public void executeFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv env)
throws PipeException {
LOGGER.info(
- "CreatePipeProcedureV2: executeFromWriteConfigNodeConsensus({})",
+ ProcedureMessages.CREATEPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS,
createPipeRequest.getPipeName());
TSStatus response;
@@ -368,7 +371,7 @@ public void executeFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv env
.getConsensusManager()
.write(new CreatePipePlanV2(pipeStaticMeta, pipeRuntimeMeta));
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -380,13 +383,13 @@ public void executeFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv env
@Override
public void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) throws IOException {
final String pipeName = createPipeRequest.getPipeName();
- LOGGER.info("CreatePipeProcedureV2: executeFromOperateOnDataNodes({})", pipeName);
+ LOGGER.info(ProcedureMessages.CREATEPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES, pipeName);
final String exceptionMessage =
parsePushPipeMetaExceptionForPipe(pipeName, pushSinglePipeMetaToDataNodes(pipeName, env));
if (!exceptionMessage.isEmpty()) {
LOGGER.warn(
- "Failed to create pipe {}, details: {}, metadata will be synchronized later.",
+ ProcedureMessages.FAILED_TO_CREATE_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER,
createPipeRequest.getPipeName(),
exceptionMessage);
}
@@ -395,14 +398,15 @@ public void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) thro
@Override
public void rollbackFromValidateTask(final ConfigNodeProcedureEnv env) {
LOGGER.info(
- "CreatePipeProcedureV2: rollbackFromValidateTask({})", createPipeRequest.getPipeName());
+ ProcedureMessages.CREATEPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK,
+ createPipeRequest.getPipeName());
// Do nothing
}
@Override
public void rollbackFromCalculateInfoForTask(final ConfigNodeProcedureEnv env) {
LOGGER.info(
- "CreatePipeProcedureV2: rollbackFromCalculateInfoForTask({})",
+ ProcedureMessages.CREATEPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK,
createPipeRequest.getPipeName());
// Do nothing
}
@@ -410,7 +414,7 @@ public void rollbackFromCalculateInfoForTask(final ConfigNodeProcedureEnv env) {
@Override
public void rollbackFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv env) {
LOGGER.info(
- "CreatePipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})",
+ ProcedureMessages.CREATEPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS,
createPipeRequest.getPipeName());
TSStatus response;
try {
@@ -419,7 +423,7 @@ public void rollbackFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv en
.getConsensusManager()
.write(new DropPipePlanV2(createPipeRequest.getPipeName()));
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -431,7 +435,7 @@ public void rollbackFromWriteConfigNodeConsensus(final ConfigNodeProcedureEnv en
@Override
public void rollbackFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) throws IOException {
LOGGER.info(
- "CreatePipeProcedureV2: rollbackFromOperateOnDataNodes({})",
+ ProcedureMessages.CREATEPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES,
createPipeRequest.getPipeName());
// Push all pipe metas to datanode, may be time-consuming
@@ -440,7 +444,7 @@ public void rollbackFromOperateOnDataNodes(final ConfigNodeProcedureEnv env) thr
createPipeRequest.getPipeName(), pushPipeMetaToDataNodes(env));
if (!exceptionMessage.isEmpty()) {
LOGGER.warn(
- "Failed to rollback create pipe {}, details: {}, metadata will be synchronized later.",
+ ProcedureMessages.FAILED_TO_ROLLBACK_CREATE_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED,
createPipeRequest.getPipeName(),
exceptionMessage);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/DropPipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/DropPipeProcedureV2.java
index 0c7042caf3f07..4ba6906785101 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/DropPipeProcedureV2.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/DropPipeProcedureV2.java
@@ -21,6 +21,8 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.DropPipePlanV2;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2;
@@ -74,7 +76,7 @@ protected PipeTaskOperation getOperation() {
@Override
public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) throws PipeException {
- LOGGER.info("DropPipeProcedureV2: executeFromValidateTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK, pipeName);
pipeTaskInfo.get().checkBeforeDropPipe(pipeName);
@@ -83,19 +85,20 @@ public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) throws PipeEx
@Override
public void executeFromCalculateInfoForTask(ConfigNodeProcedureEnv env) throws PipeException {
- LOGGER.info("DropPipeProcedureV2: executeFromCalculateInfoForTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK, pipeName);
// Do nothing
}
@Override
public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) throws PipeException {
- LOGGER.info("DropPipeProcedureV2: executeFromWriteConfigNodeConsensus({})", pipeName);
+ LOGGER.info(
+ ProcedureMessages.DROPPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS, pipeName);
TSStatus response;
try {
response = env.getConfigManager().getConsensusManager().write(new DropPipePlanV2(pipeName));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -106,13 +109,13 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) thro
@Override
public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipeProcedureV2: executeFromOperateOnDataNodes({})", pipeName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES, pipeName);
final String exceptionMessage =
parsePushPipeMetaExceptionForPipe(pipeName, dropSinglePipeOnDataNodes(pipeName, env));
if (!exceptionMessage.isEmpty()) {
LOGGER.warn(
- "Failed to drop pipe {}, details: {}, metadata will be synchronized later.",
+ ProcedureMessages.FAILED_TO_DROP_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER,
pipeName,
exceptionMessage);
}
@@ -120,25 +123,26 @@ public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
@Override
public void rollbackFromValidateTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipeProcedureV2: rollbackFromValidateTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK, pipeName);
// Do nothing
}
@Override
public void rollbackFromCalculateInfoForTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipeProcedureV2: rollbackFromCalculateInfoForTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK, pipeName);
// Do nothing
}
@Override
public void rollbackFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})", pipeName);
+ LOGGER.info(
+ ProcedureMessages.DROPPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS, pipeName);
// Do nothing
}
@Override
public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropPipeProcedureV2: rollbackFromOperateOnDataNodes({})", pipeName);
+ LOGGER.info(ProcedureMessages.DROPPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES, pipeName);
// Do nothing
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StartPipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StartPipeProcedureV2.java
index fe36137b35f47..251a4f0e3af0f 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StartPipeProcedureV2.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StartPipeProcedureV2.java
@@ -22,6 +22,8 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.SetPipeStatusPlanV2;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2;
import org.apache.iotdb.confignode.procedure.impl.pipe.PipeTaskOperation;
@@ -61,7 +63,7 @@ protected PipeTaskOperation getOperation() {
@Override
public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) throws PipeException {
- LOGGER.info("StartPipeProcedureV2: executeFromValidateTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.STARTPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK, pipeName);
pipeTaskInfo.get().checkBeforeStartPipe(pipeName);
@@ -71,13 +73,14 @@ public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) throws PipeEx
@Override
public void executeFromCalculateInfoForTask(ConfigNodeProcedureEnv env) throws PipeException {
- LOGGER.info("StartPipeProcedureV2: executeFromCalculateInfoForTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.STARTPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK, pipeName);
// Do nothing
}
@Override
public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) throws PipeException {
- LOGGER.info("StartPipeProcedureV2: executeFromWriteConfigNodeConsensus({})", pipeName);
+ LOGGER.info(
+ ProcedureMessages.STARTPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS, pipeName);
TSStatus response;
try {
@@ -86,7 +89,7 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) thro
.getConsensusManager()
.write(new SetPipeStatusPlanV2(pipeName, PipeStatus.RUNNING));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -97,13 +100,13 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) thro
@Override
public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) throws IOException {
- LOGGER.info("StartPipeProcedureV2: executeFromOperateOnDataNodes({})", pipeName);
+ LOGGER.info(ProcedureMessages.STARTPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES, pipeName);
final String exceptionMessage =
parsePushPipeMetaExceptionForPipe(pipeName, pushSinglePipeMetaToDataNodes(pipeName, env));
if (!exceptionMessage.isEmpty()) {
LOGGER.warn(
- "Failed to start pipe {}, details: {}, metadata will be synchronized later.",
+ ProcedureMessages.FAILED_TO_START_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER,
pipeName,
exceptionMessage);
return;
@@ -116,19 +119,20 @@ public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) throws IOE
@Override
public void rollbackFromValidateTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("StartPipeProcedureV2: rollbackFromValidateTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.STARTPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK, pipeName);
// Do nothing
}
@Override
public void rollbackFromCalculateInfoForTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("StartPipeProcedureV2: rollbackFromCalculateInfoForTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.STARTPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK, pipeName);
// Do nothing
}
@Override
public void rollbackFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
- LOGGER.info("StartPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})", pipeName);
+ LOGGER.info(
+ ProcedureMessages.STARTPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS, pipeName);
TSStatus response;
try {
@@ -137,7 +141,7 @@ public void rollbackFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
.getConsensusManager()
.write(new SetPipeStatusPlanV2(pipeName, PipeStatus.STOPPED));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -148,14 +152,14 @@ public void rollbackFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
@Override
public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) throws IOException {
- LOGGER.info("StartPipeProcedureV2: rollbackFromOperateOnDataNodes({})", pipeName);
+ LOGGER.info(ProcedureMessages.STARTPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES, pipeName);
// Push all pipe metas to datanode, may be time-consuming
final String exceptionMessage =
parsePushPipeMetaExceptionForPipe(pipeName, pushPipeMetaToDataNodes(env));
if (!exceptionMessage.isEmpty()) {
LOGGER.warn(
- "Failed to rollback start pipe {}, details: {}, metadata will be synchronized later.",
+ ProcedureMessages.FAILED_TO_ROLLBACK_START_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED,
pipeName,
exceptionMessage);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StopPipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StopPipeProcedureV2.java
index b2e1a584ec54a..e2ec41f3b8336 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StopPipeProcedureV2.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/task/StopPipeProcedureV2.java
@@ -22,6 +22,8 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStatus;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.SetPipeStatusWithStoppedByRuntimeExceptionPlanV2;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2;
import org.apache.iotdb.confignode.procedure.impl.pipe.PipeTaskOperation;
@@ -62,7 +64,7 @@ protected PipeTaskOperation getOperation() {
@Override
public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) throws PipeException {
- LOGGER.info("StopPipeProcedureV2: executeFromValidateTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.STOPPIPEPROCEDUREV2_EXECUTEFROMVALIDATETASK, pipeName);
pipeTaskInfo.get().checkBeforeStopPipe(pipeName);
@@ -71,14 +73,15 @@ public boolean executeFromValidateTask(ConfigNodeProcedureEnv env) throws PipeEx
@Override
public void executeFromCalculateInfoForTask(ConfigNodeProcedureEnv env) throws PipeException {
- LOGGER.info("StopPipeProcedureV2: executeFromCalculateInfoForTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.STOPPIPEPROCEDUREV2_EXECUTEFROMCALCULATEINFOFORTASK, pipeName);
isStoppedByRuntimeExceptionBeforeStop =
pipeTaskInfo.get().isStoppedByRuntimeException(pipeName);
}
@Override
public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) throws PipeException {
- LOGGER.info("StopPipeProcedureV2: executeFromWriteConfigNodeConsensus({})", pipeName);
+ LOGGER.info(
+ ProcedureMessages.STOPPIPEPROCEDUREV2_EXECUTEFROMWRITECONFIGNODECONSENSUS, pipeName);
TSStatus response;
try {
@@ -89,7 +92,7 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) thro
new SetPipeStatusWithStoppedByRuntimeExceptionPlanV2(
pipeName, PipeStatus.STOPPED, false));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -100,13 +103,13 @@ public void executeFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) thro
@Override
public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) throws IOException {
- LOGGER.info("StopPipeProcedureV2: executeFromOperateOnDataNodes({})", pipeName);
+ LOGGER.info(ProcedureMessages.STOPPIPEPROCEDUREV2_EXECUTEFROMOPERATEONDATANODES, pipeName);
final String exceptionMessage =
parsePushPipeMetaExceptionForPipe(pipeName, pushSinglePipeMetaToDataNodes(pipeName, env));
if (!exceptionMessage.isEmpty()) {
LOGGER.warn(
- "Failed to stop pipe {}, details: {}, metadata will be synchronized later.",
+ ProcedureMessages.FAILED_TO_STOP_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED_LATER,
pipeName,
exceptionMessage);
}
@@ -114,19 +117,20 @@ public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) throws IOE
@Override
public void rollbackFromValidateTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("StopPipeProcedureV2: rollbackFromValidateTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.STOPPIPEPROCEDUREV2_ROLLBACKFROMVALIDATETASK, pipeName);
// Do nothing
}
@Override
public void rollbackFromCalculateInfoForTask(ConfigNodeProcedureEnv env) {
- LOGGER.info("StopPipeProcedureV2: rollbackFromCalculateInfoForTask({})", pipeName);
+ LOGGER.info(ProcedureMessages.STOPPIPEPROCEDUREV2_ROLLBACKFROMCALCULATEINFOFORTASK, pipeName);
// Do nothing
}
@Override
public void rollbackFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
- LOGGER.info("StopPipeProcedureV2: rollbackFromWriteConfigNodeConsensus({})", pipeName);
+ LOGGER.info(
+ ProcedureMessages.STOPPIPEPROCEDUREV2_ROLLBACKFROMWRITECONFIGNODECONSENSUS, pipeName);
TSStatus response;
try {
response =
@@ -136,7 +140,7 @@ public void rollbackFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
new SetPipeStatusWithStoppedByRuntimeExceptionPlanV2(
pipeName, PipeStatus.RUNNING, isStoppedByRuntimeExceptionBeforeStop));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -147,14 +151,14 @@ public void rollbackFromWriteConfigNodeConsensus(ConfigNodeProcedureEnv env) {
@Override
public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) throws IOException {
- LOGGER.info("StopPipeProcedureV2: rollbackFromOperateOnDataNodes({})", pipeName);
+ LOGGER.info(ProcedureMessages.STOPPIPEPROCEDUREV2_ROLLBACKFROMOPERATEONDATANODES, pipeName);
// Push all pipe metas to datanode, may be time-consuming
final String exceptionMessage =
parsePushPipeMetaExceptionForPipe(pipeName, pushPipeMetaToDataNodes(env));
if (!exceptionMessage.isEmpty()) {
LOGGER.warn(
- "Failed to rollback stop pipe {}, details: {}, metadata will be synchronized later.",
+ ProcedureMessages.FAILED_TO_ROLLBACK_STOP_PIPE_DETAILS_METADATA_WILL_BE_SYNCHRONIZED,
pipeName,
exceptionMessage);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/util/PipeExternalSourceLoadBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/util/PipeExternalSourceLoadBalancer.java
index 76756a579f07e..66021fc91c9cf 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/util/PipeExternalSourceLoadBalancer.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/util/PipeExternalSourceLoadBalancer.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.cluster.NodeStatus;
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta;
import org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.pipe.api.exception.PipeException;
@@ -60,7 +61,8 @@ public PipeExternalSourceLoadBalancer(final String balanceStrategy) {
this.strategy = new ProportionalBalanceStrategy();
break;
default:
- throw new IllegalArgumentException("Unknown load balance strategy: " + balanceStrategy);
+ throw new IllegalArgumentException(
+ ProcedureMessages.UNKNOWN_LOAD_BALANCE_STRATEGY + balanceStrategy);
}
}
@@ -107,7 +109,7 @@ public Map balance(
.sorted()
.collect(Collectors.toList());
if (runningDataNodes.isEmpty()) {
- throw new PipeException("No available datanode to assign tasks");
+ throw new PipeException(ProcedureMessages.NO_AVAILABLE_DATANODE_TO_ASSIGN_TASKS);
}
final int numNodes = runningDataNodes.size();
for (int i = 1; i <= Math.min(numNodes, parallelCount); i++) {
@@ -141,7 +143,7 @@ public Map balance(
.sorted()
.collect(Collectors.toList());
if (runningDataNodes.isEmpty()) {
- throw new PipeException("No available datanode to assign tasks");
+ throw new PipeException(ProcedureMessages.NO_AVAILABLE_DATANODE_TO_ASSIGN_TASKS);
}
final int numNodes = runningDataNodes.size();
final int quotient = parallelCount / numNodes;
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/AddRegionPeerProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/AddRegionPeerProcedure.java
index f691292c7079b..d09647a332f5b 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/AddRegionPeerProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/AddRegionPeerProcedure.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.queryengine.utils.DateTimeUtils;
import org.apache.iotdb.commons.utils.CommonDateTimeUtils;
import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.env.RegionMaintainHandler;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -83,7 +84,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AddRegionPeerState s
switch (state) {
case CREATE_NEW_REGION_PEER:
LOGGER.info(
- "[pid{}][AddRegion] started, {} will be added to DataNode {}.",
+ ProcedureMessages.PID_ADDREGION_STARTED_WILL_BE_ADDED_TO_DATANODE,
getProcId(),
regionId,
simplifiedLocation(targetDataNode));
@@ -124,7 +125,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AddRegionPeerState s
env, handler, String.format("%s result is %s", state, result.getTaskStatus()));
case PROCESSING:
LOGGER.info(
- "waitTaskFinish() returns PROCESSING, which means the waiting has been interrupted, this procedure will end without rollback");
+ ProcedureMessages
+ .WAITTASKFINISH_RETURNS_PROCESSING_WHICH_MEANS_THE_WAITING_HAS_BEEN_INTERRUPTED);
return Flow.NO_MORE_STATE;
case SUCCESS:
setNextState(UPDATE_REGION_LOCATION_CACHE);
@@ -136,9 +138,9 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AddRegionPeerState s
case UPDATE_REGION_LOCATION_CACHE:
handler.forceUpdateRegionCache(regionId, targetDataNode, RegionStatus.Running);
setKillPoint(state);
- LOGGER.info("[pid{}][AddRegion] state {} complete", getProcId(), state);
+ LOGGER.info(ProcedureMessages.PID_ADDREGION_STATE_COMPLETE, getProcId(), state);
LOGGER.info(
- "[pid{}][AddRegion] success, {} has been added to DataNode {}. Procedure took {} (start at {}).",
+ ProcedureMessages.PID_ADDREGION_SUCCESS_HAS_BEEN_ADDED_TO_DATANODE_PROCEDURE_TOOK,
getProcId(),
regionId,
simplifiedLocation(targetDataNode),
@@ -147,13 +149,13 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AddRegionPeerState s
DateTimeUtils.convertLongToDate(getSubmittedTime(), "ms"));
return Flow.NO_MORE_STATE;
default:
- throw new ProcedureException("Unsupported state: " + state.name());
+ throw new ProcedureException(ProcedureMessages.UNSUPPORTED_STATE + state.name());
}
} catch (Exception e) {
- LOGGER.error("[pid{}][AddRegion] state {} failed", getProcId(), state, e);
+ LOGGER.error(ProcedureMessages.PID_ADDREGION_STATE_FAILED, getProcId(), state, e);
return Flow.NO_MORE_STATE;
}
- LOGGER.info("[pid{}][AddRegion] state {} complete", getProcId(), state);
+ LOGGER.info(ProcedureMessages.PID_ADDREGION_STATE_COMPLETE, getProcId(), state);
return Flow.HAS_MORE_STATE;
}
@@ -167,9 +169,10 @@ private Flow warnAndRollBackAndNoMoreState(
ConfigNodeProcedureEnv env, RegionMaintainHandler handler, String reason, Exception e)
throws ProcedureException {
if (e != null) {
- LOGGER.warn("[pid{}][AddRegion] Start to roll back, because: {}", getProcId(), reason, e);
+ LOGGER.warn(
+ ProcedureMessages.PID_ADDREGION_START_TO_ROLL_BACK_BECAUSE, getProcId(), reason, e);
} else {
- LOGGER.warn("[pid{}][AddRegion] Start to roll back, because: {}", getProcId(), reason);
+ LOGGER.warn(ProcedureMessages.PID_ADDREGION_START_TO_ROLL_BACK_BECAUSE, getProcId(), reason);
}
handler.removeRegionLocation(regionId, targetDataNode);
@@ -180,11 +183,12 @@ private Flow warnAndRollBackAndNoMoreState(
.orElseThrow(
() ->
new ProcedureException(
- "[pid{}][AddRegion] Cannot roll back, because cannot find the correct locations"))
+ ProcedureMessages
+ .PID_ADDREGION_CANNOT_ROLL_BACK_BECAUSE_CANNOT_FIND_THE_CORRECT))
.getDataNodeLocations();
if (correctDataNodeLocations.remove(targetDataNode)) {
LOGGER.warn(
- "[pid{}][AddRegion] It appears that consensus write has not modified the local partition table. "
+ ProcedureMessages.PID_ADDREGION_IT_APPEARS_THAT_CONSENSUS_WRITE_HAS_NOT_MODIFIED
+ "Please verify whether a leader change has occurred during this stage. "
+ "If this log is triggered without a leader change, it indicates a potential bug in the partition table.",
getProcId());
@@ -202,7 +206,7 @@ private Flow warnAndRollBackAndNoMoreState(
Collectors.toMap(
TDataNodeLocation::getDataNodeId, dataNodeLocation -> dataNodeLocation));
LOGGER.info(
- "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} will be reset to {}",
+ ProcedureMessages.PID_ADDREGION_RESET_PEER_LIST_PEER_LIST_OF_CONSENSUS_GROUP_3,
getProcId(),
regionId,
relatedDataNodeLocationMap.values().stream()
@@ -217,7 +221,7 @@ private Flow warnAndRollBackAndNoMoreState(
(dataNodeId, resetResult) -> {
if (resetResult.getCode() == SUCCESS_STATUS.getStatusCode()) {
LOGGER.info(
- "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} has been successfully reset to {}",
+ ProcedureMessages.PID_ADDREGION_RESET_PEER_LIST_PEER_LIST_OF_CONSENSUS_GROUP_2,
getProcId(),
regionId,
dataNodeId,
@@ -225,7 +229,7 @@ private Flow warnAndRollBackAndNoMoreState(
} else {
// TODO: more precise
LOGGER.warn(
- "[pid{}][AddRegion] reset peer list: peer list of consensus group {} on DataNode {} failed to reset to {}, you may manually reset it",
+ ProcedureMessages.PID_ADDREGION_RESET_PEER_LIST_PEER_LIST_OF_CONSENSUS_GROUP,
getProcId(),
regionId,
dataNodeId,
@@ -272,7 +276,7 @@ public void deserialize(ByteBuffer byteBuffer) {
targetDataNode = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer);
coordinator = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer);
} catch (ThriftSerDeException e) {
- LOGGER.error("Error in deserialize {}", this.getClass(), e);
+ LOGGER.error(ProcedureMessages.ERROR_IN_DESERIALIZE, this.getClass(), e);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java
index 17c8b2abdf4f6..472ddd019b8cb 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java
@@ -30,6 +30,8 @@
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
import org.apache.iotdb.confignode.consensus.request.write.region.CreateRegionGroupsPlan;
import org.apache.iotdb.confignode.consensus.request.write.region.OfferRegionMaintainTasksPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.load.cache.region.RegionHeartbeatSample;
import org.apache.iotdb.confignode.persistence.partition.maintainer.RegionCreateTask;
import org.apache.iotdb.confignode.persistence.partition.maintainer.RegionDeleteTask;
@@ -114,7 +116,8 @@ protected Flow executeFromState(
// all RegionReplicas were created successfully
persistPlan.addRegionGroup(database, regionReplicaSet);
LOGGER.info(
- "[CreateRegionGroups] All replicas of RegionGroup: {} are created successfully!",
+ ProcedureMessages
+ .CREATEREGIONGROUPS_ALL_REPLICAS_OF_REGIONGROUP_ARE_CREATED_SUCCESSFULLY,
regionReplicaSet.getRegionId());
} else {
final TRegionReplicaSet failedRegionReplicas =
@@ -143,7 +146,8 @@ protected Flow executeFromState(
});
LOGGER.info(
- "[CreateRegionGroups] Failed to create some replicas of RegionGroup: {}, but this RegionGroup can still be used.",
+ ProcedureMessages
+ .CREATEREGIONGROUPS_FAILED_TO_CREATE_SOME_REPLICAS_OF_REGIONGROUP_BUT_THIS,
regionReplicaSet.getRegionId());
} else {
// The redundant RegionReplicas should be deleted otherwise
@@ -162,7 +166,8 @@ protected Flow executeFromState(
});
LOGGER.info(
- "[CreateRegionGroups] Failed to create most of replicas in RegionGroup: {}, The redundant replicas in this RegionGroup will be deleted.",
+ ProcedureMessages
+ .CREATEREGIONGROUPS_FAILED_TO_CREATE_MOST_OF_REPLICAS_IN_REGIONGROUP_THE,
regionReplicaSet.getRegionId());
}
}
@@ -172,7 +177,8 @@ protected Flow executeFromState(
try {
env.getConfigManager().getConsensusManager().write(offerPlan);
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(
+ ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
}
setNextState(CreateRegionGroupsState.ACTIVATE_REGION_GROUPS);
break;
@@ -308,7 +314,7 @@ public void deserialize(final ByteBuffer byteBuffer) {
persistPlan.deserializeForProcedure(byteBuffer);
}
} catch (final Exception e) {
- LOGGER.error("Deserialize meets error in CreateRegionGroupsProcedure", e);
+ LOGGER.error(ProcedureMessages.DESERIALIZE_MEETS_ERROR_IN_CREATEREGIONGROUPSPROCEDURE, e);
throw new RuntimeException(e);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/NotifyRegionMigrationProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/NotifyRegionMigrationProcedure.java
index a02f60c04761f..2234db867c313 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/NotifyRegionMigrationProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/NotifyRegionMigrationProcedure.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId;
import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException;
import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.state.NotifyRegionMigrationState;
@@ -60,13 +61,13 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, NotifyRegionMigratio
}
try {
LOGGER.info(
- "[pid{}][NotifyRegionMigration] started, region id is {}.", getProcId(), regionId);
+ ProcedureMessages.PID_NOTIFYREGIONMIGRATION_STARTED_REGION_ID_IS, getProcId(), regionId);
env.notifyRegionMigrationToAllDataNodes(regionId, isStart);
} catch (Exception e) {
- LOGGER.error("[pid{}][NotifyRegionMigration] state {} failed", getProcId(), state, e);
+ LOGGER.error(ProcedureMessages.PID_NOTIFYREGIONMIGRATION_STATE_FAILED, getProcId(), state, e);
return Flow.NO_MORE_STATE;
}
- LOGGER.info("[pid{}][NotifyRegionMigration] state {} complete", getProcId(), state);
+ LOGGER.info(ProcedureMessages.PID_NOTIFYREGIONMIGRATION_STATE_COMPLETE, getProcId(), state);
return Flow.NO_MORE_STATE;
}
@@ -105,7 +106,7 @@ public void deserialize(ByteBuffer byteBuffer) {
regionId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer);
isStart = (byteBuffer.get() != (byte) 0);
} catch (ThriftSerDeException e) {
- LOGGER.error("Error in deserialize {}", this.getClass(), e);
+ LOGGER.error(ProcedureMessages.ERROR_IN_DESERIALIZE, this.getClass(), e);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/ReconstructRegionProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/ReconstructRegionProcedure.java
index 0cd1a00c7247f..9b713c931030f 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/ReconstructRegionProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/ReconstructRegionProcedure.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.queryengine.utils.DateTimeUtils;
import org.apache.iotdb.commons.utils.CommonDateTimeUtils;
import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.state.ReconstructRegionState;
@@ -60,7 +61,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, ReconstructRegionSta
switch (state) {
case RECONSTRUCT_REGION_PREPARE:
LOGGER.info(
- "[pid{}][ReconstructRegion] started, region {} on DataNode {}({}) will be reconstructed.",
+ ProcedureMessages
+ .PID_RECONSTRUCTREGION_STARTED_REGION_ON_DATANODE_WILL_BE_RECONSTRUCTED,
getProcId(),
regionId.getId(),
targetDataNode.getDataNodeId(),
@@ -76,7 +78,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, ReconstructRegionSta
.getPartitionManager()
.isDataNodeContainsRegion(targetDataNode.getDataNodeId(), regionId)) {
LOGGER.warn(
- "[pid{}][ReconstructRegion] sub-procedure RemoveRegionPeerProcedure failed, ReconstructRegionProcedure will not continue",
+ ProcedureMessages.PID_RECONSTRUCTREGION_SUB_PROCEDURE_REMOVEREGIONPEERPROCEDURE,
getProcId());
return Flow.NO_MORE_STATE;
}
@@ -91,13 +93,13 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, ReconstructRegionSta
.getPartitionManager()
.isDataNodeContainsRegion(targetDataNode.getDataNodeId(), regionId)) {
LOGGER.warn(
- "[pid{}][ReconstructRegion] failed, but the region {} has been removed from DataNode {}. Use 'extend region' to fix this.",
+ ProcedureMessages.PID_RECONSTRUCTREGION_FAILED_BUT_THE_REGION_HAS_BEEN_REMOVED_FROM,
getProcId(),
regionId.getId(),
targetDataNode.getDataNodeId());
} else {
LOGGER.info(
- "[pid{}][ReconstructRegion] success, region {} has been reconstructed on DataNode {}. Procedure took {} (started at {})",
+ ProcedureMessages.PID_RECONSTRUCTREGION_SUCCESS_REGION_HAS_BEEN_RECONSTRUCTED,
getProcId(),
regionId.getId(),
targetDataNode.getDataNodeId(),
@@ -107,13 +109,13 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, ReconstructRegionSta
}
return Flow.NO_MORE_STATE;
default:
- throw new ProcedureException("Unsupported state: " + state.name());
+ throw new ProcedureException(ProcedureMessages.UNSUPPORTED_STATE + state.name());
}
} catch (Exception e) {
- LOGGER.error("[pid{}][ReconstructRegion] state {} fail", getProcId(), state, e);
+ LOGGER.error(ProcedureMessages.PID_RECONSTRUCTREGION_STATE_FAIL, getProcId(), state, e);
return Flow.NO_MORE_STATE;
}
- LOGGER.info("[pid{}][ReconstructRegion] state {} complete", getProcId(), state);
+ LOGGER.info(ProcedureMessages.PID_RECONSTRUCTREGION_STATE_COMPLETE, getProcId(), state);
return Flow.HAS_MORE_STATE;
}
@@ -140,7 +142,7 @@ public void deserialize(ByteBuffer byteBuffer) {
coordinator = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer);
} catch (ThriftSerDeException e) {
LOGGER.warn(
- "Error in deserialize {} (procID {}). This procedure will be ignored. It may belong to old version and cannot be used now.",
+ ProcedureMessages.ERROR_IN_DESERIALIZE_PROCID_THIS_PROCEDURE_WILL_BE_IGNORED_IT,
this.getClass(),
this.getProcId(),
e);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrateProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrateProcedure.java
index 899d0504767c5..2bbeb9d12d5e7 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrateProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RegionMigrateProcedure.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.queryengine.utils.DateTimeUtils;
import org.apache.iotdb.commons.utils.CommonDateTimeUtils;
import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.env.RegionMaintainHandler;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -78,7 +79,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RegionTransitionStat
switch (state) {
case REGION_MIGRATE_PREPARE:
LOGGER.info(
- "[pid{}][MigrateRegion] started, {} will be migrated from DataNode {} to {}.",
+ ProcedureMessages.PID_MIGRATEREGION_STARTED_WILL_BE_MIGRATED_FROM_DATANODE_TO,
getProcId(),
regionId,
RegionMaintainHandler.simplifiedLocation(originalDataNode),
@@ -96,7 +97,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RegionTransitionStat
.getPartitionManager()
.isDataNodeContainsRegion(destDataNode.getDataNodeId(), regionId)) {
LOGGER.warn(
- "[pid{}][MigrateRegion] sub-procedure AddRegionPeerProcedure failed, RegionMigrateProcedure will not continue",
+ ProcedureMessages.PID_MIGRATEREGION_SUB_PROCEDURE_ADDREGIONPEERPROCEDURE,
getProcId());
return Flow.NO_MORE_STATE;
}
@@ -116,7 +117,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RegionTransitionStat
"but you may need to restart the related DataNode to make sure everything is cleaned up. ";
}
LOGGER.info(
- "[pid{}][MigrateRegion] success,{} {} has been migrated from DataNode {} to {}. Procedure took {} (started at {}).",
+ ProcedureMessages
+ .PID_MIGRATEREGION_SUCCESS_HAS_BEEN_MIGRATED_FROM_DATANODE_TO_PROCEDURE,
getProcId(),
cleanHint,
regionId,
@@ -128,14 +130,14 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RegionTransitionStat
addChildProcedure(new NotifyRegionMigrationProcedure(regionId, false));
return Flow.NO_MORE_STATE;
default:
- throw new ProcedureException("Unsupported state: " + state.name());
+ throw new ProcedureException(ProcedureMessages.UNSUPPORTED_STATE + state.name());
}
} catch (Exception e) {
- LOGGER.error("[pid{}][MigrateRegion] state {} fail", getProcId(), state, e);
+ LOGGER.error(ProcedureMessages.PID_MIGRATEREGION_STATE_FAIL, getProcId(), state, e);
// meets exception in region migrate process terminate the process
return Flow.NO_MORE_STATE;
}
- LOGGER.info("[pid{}][MigrateRegion] state {} complete", getProcId(), state);
+ LOGGER.info(ProcedureMessages.PID_MIGRATEREGION_STATE_COMPLETE, getProcId(), state);
return Flow.HAS_MORE_STATE;
}
@@ -180,7 +182,7 @@ public void deserialize(ByteBuffer byteBuffer) {
coordinatorForRemovePeer = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer);
} catch (ThriftSerDeException e) {
LOGGER.warn(
- "Error in deserialize {} (procID {}). This procedure will be ignored. It may belong to old version and cannot be used now.",
+ ProcedureMessages.ERROR_IN_DESERIALIZE_PROCID_THIS_PROCEDURE_WILL_BE_IGNORED_IT,
this.getClass(),
this.getProcId(),
e);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RemoveRegionPeerProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RemoveRegionPeerProcedure.java
index 05d57a0c09e15..390eb63534c15 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RemoveRegionPeerProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/RemoveRegionPeerProcedure.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.commons.queryengine.utils.DateTimeUtils;
import org.apache.iotdb.commons.utils.CommonDateTimeUtils;
import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.env.RegionMaintainHandler;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -74,7 +75,7 @@ public RemoveRegionPeerProcedure(
private void handleTransferLeader(RegionMaintainHandler handler)
throws ProcedureException, InterruptedException {
LOGGER.info(
- "[pid{}][RemoveRegion] started, region {} will be removed from DataNode {}.",
+ ProcedureMessages.PID_REMOVEREGION_STARTED_REGION_WILL_BE_REMOVED_FROM_DATANODE,
getProcId(),
regionId.getId(),
targetDataNode.getDataNodeId());
@@ -105,7 +106,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat
setKillPoint(state);
if (tsStatus.getCode() != SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "[pid{}][RemoveRegion] {} task submitted failed, ConfigNode believe current peer list of {} is {}. Procedure will continue. You should manually clear peer list.",
+ ProcedureMessages
+ .PID_REMOVEREGION_TASK_SUBMITTED_FAILED_CONFIGNODE_BELIEVE_CURRENT_PEER_LIST,
getProcId(),
state,
regionId,
@@ -117,7 +119,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat
handler.waitTaskFinish(this.getProcId(), coordinator);
if (removeRegionPeerResult.getTaskStatus() != TRegionMaintainTaskStatus.SUCCESS) {
LOGGER.warn(
- "[pid{}][RemoveRegion] {} executed failed, ConfigNode believe current peer list of {} is {}. Procedure will continue. You should manually clear peer list.",
+ ProcedureMessages
+ .PID_REMOVEREGION_EXECUTED_FAILED_CONFIGNODE_BELIEVE_CURRENT_PEER_LIST_OF,
getProcId(),
state,
regionId,
@@ -136,7 +139,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat
deleteOldRegionPeerAttempted++;
if (deleteOldRegionPeerAttempted <= MAX_DELETE_OLD_REGION_PEER_RETRY) {
LOGGER.warn(
- "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER task submitted failed (attempt {}/{}), will retry after {}ms. {}",
+ ProcedureMessages
+ .PID_REMOVEREGION_DELETE_OLD_REGION_PEER_TASK_SUBMITTED_FAILED_ATTEMPT,
getProcId(),
deleteOldRegionPeerAttempted,
MAX_DELETE_OLD_REGION_PEER_RETRY + 1,
@@ -147,7 +151,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat
return Flow.HAS_MORE_STATE;
}
LOGGER.warn(
- "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER task submitted failed after {} attempts, procedure will continue. You should manually delete region file. {}",
+ ProcedureMessages
+ .PID_REMOVEREGION_DELETE_OLD_REGION_PEER_TASK_SUBMITTED_FAILED_AFTER,
getProcId(),
deleteOldRegionPeerAttempted + 1,
regionId);
@@ -160,7 +165,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat
deleteOldRegionPeerAttempted++;
if (deleteOldRegionPeerAttempted <= MAX_DELETE_OLD_REGION_PEER_RETRY) {
LOGGER.warn(
- "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER executed failed (attempt {}/{}), will retry after {}ms. {}",
+ ProcedureMessages
+ .PID_REMOVEREGION_DELETE_OLD_REGION_PEER_EXECUTED_FAILED_ATTEMPT_WILL,
getProcId(),
deleteOldRegionPeerAttempted,
MAX_DELETE_OLD_REGION_PEER_RETRY + 1,
@@ -171,7 +177,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat
return Flow.HAS_MORE_STATE;
}
LOGGER.warn(
- "[pid{}][RemoveRegion] DELETE_OLD_REGION_PEER executed failed after {} attempts, procedure will continue. You should manually delete region file. {}",
+ ProcedureMessages
+ .PID_REMOVEREGION_DELETE_OLD_REGION_PEER_EXECUTED_FAILED_AFTER_ATTEMPTS,
getProcId(),
deleteOldRegionPeerAttempted + 1,
regionId);
@@ -188,9 +195,10 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat
case REMOVE_REGION_LOCATION_CACHE:
handler.removeRegionLocation(regionId, targetDataNode);
setKillPoint(state);
- LOGGER.info("RemoveRegionPeer state {} success", state);
+ LOGGER.info(ProcedureMessages.REMOVEREGIONPEER_STATE_SUCCESS, state);
LOGGER.info(
- "[pid{}][RemoveRegion] success, region {} has been removed from DataNode {}. Procedure took {} (started at {})",
+ ProcedureMessages
+ .PID_REMOVEREGION_SUCCESS_REGION_HAS_BEEN_REMOVED_FROM_DATANODE_PROCEDURE,
getProcId(),
regionId.getId(),
targetDataNode.getDataNodeId(),
@@ -199,13 +207,13 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveRegionPeerStat
DateTimeUtils.convertLongToDate(getSubmittedTime(), "ms"));
return Flow.NO_MORE_STATE;
default:
- throw new ProcedureException("Unsupported state: " + state.name());
+ throw new ProcedureException(ProcedureMessages.UNSUPPORTED_STATE + state.name());
}
} catch (Exception e) {
- LOGGER.error("RemoveRegionPeer state {} failed", state, e);
+ LOGGER.error(ProcedureMessages.REMOVEREGIONPEER_STATE_FAILED, state, e);
return Flow.NO_MORE_STATE;
}
- LOGGER.info("[pid{}][RemoveRegion] state {} success", getProcId(), state);
+ LOGGER.info(ProcedureMessages.PID_REMOVEREGION_STATE_SUCCESS, getProcId(), state);
return Flow.HAS_MORE_STATE;
}
@@ -245,7 +253,7 @@ public void deserialize(ByteBuffer byteBuffer) {
targetDataNode = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer);
coordinator = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(byteBuffer);
} catch (ThriftSerDeException e) {
- LOGGER.error("Error in deserialize {}", this.getClass(), e);
+ LOGGER.error(ProcedureMessages.ERROR_IN_DESERIALIZE, this.getClass(), e);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterEncodingCompressorProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterEncodingCompressorProcedure.java
index e9e257ac254a4..1ad60f6f852ca 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterEncodingCompressorProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterEncodingCompressorProcedure.java
@@ -31,6 +31,7 @@
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeAlterEncodingCompressorPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.ClusterManager;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -121,7 +122,7 @@ protected Flow executeFromState(
case ALTER_SCHEMA_REGION:
if (LOGGER.isInfoEnabled()) {
LOGGER.info(
- "Alter encoding {} & compressor {} in schema region for timeSeries {}",
+ ProcedureMessages.ALTER_ENCODING_COMPRESSOR_IN_SCHEMA_REGION_FOR_TIMESERIES,
SerializeUtils.deserializeEncodingNullable(encoding),
SerializeUtils.deserializeCompressorNullable(compressor),
requestMessage);
@@ -131,18 +132,18 @@ protected Flow executeFromState(
}
break;
case CLEAR_CACHE:
- LOGGER.info("Invalidate cache of timeSeries {}", requestMessage);
+ LOGGER.info(ProcedureMessages.INVALIDATE_CACHE_OF_TIMESERIES, requestMessage);
invalidateCache(env, patternTreeBytes, requestMessage, this::setFailure, false);
collectPayload4Pipe(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized state " + state));
+ setFailure(new ProcedureException(ProcedureMessages.UNRECOGNIZED_STATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "AlterEncodingCompressor-[{}] costs {}ms",
+ ProcedureMessages.ALTERENCODINGCOMPRESSOR_COSTS_MS,
state,
(System.currentTimeMillis() - startTime));
}
@@ -216,8 +217,10 @@ protected void onAllReplicasetFailure(
new ProcedureException(
new MetadataException(
String.format(
- "Alter encoding compressor %s in schema regions failed. Failures: %s",
- requestMessage, printFailureMap()))));
+ ProcedureMessages
+ .ALTER_ENCODING_COMPRESSOR_IN_SCHEMA_REGIONS_FAILED_FAILURES,
+ requestMessage,
+ printFailureMap()))));
interruptTask();
}
};
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java
index cc41d941c208b..8c8d2019f4de8 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java
@@ -33,6 +33,7 @@
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager;
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -97,12 +98,12 @@ protected Flow executeFromState(
try {
switch (state) {
case CLEAN_DATANODE_SCHEMA_CACHE:
- LOGGER.info("Invalidate cache of view {}", viewPathToSourceMap.keySet());
+ LOGGER.info(ProcedureMessages.INVALIDATE_CACHE_OF_VIEW, viewPathToSourceMap.keySet());
invalidateCache(env);
setNextState(AlterLogicalViewState.ALTER_LOGICAL_VIEW);
return Flow.HAS_MORE_STATE;
case ALTER_LOGICAL_VIEW:
- LOGGER.info("Alter view {}", viewPathToSourceMap.keySet());
+ LOGGER.info(ProcedureMessages.ALTER_VIEW, viewPathToSourceMap.keySet());
try {
alterLogicalView(env);
} catch (final ProcedureException e) {
@@ -110,12 +111,14 @@ protected Flow executeFromState(
}
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized state " + state));
+ setFailure(new ProcedureException(ProcedureMessages.UNRECOGNIZED_STATE + state));
return Flow.NO_MORE_STATE;
}
} finally {
LOGGER.info(
- "AlterLogicalView-[{}] costs {}ms", state, (System.currentTimeMillis() - startTime));
+ ProcedureMessages.ALTERLOGICALVIEW_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
@@ -133,10 +136,12 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) {
// all dataNodes must clear the related schemaengine cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.error(
- "Failed to invalidate schemaengine cache of view {}", viewPathToSourceMap.keySet());
+ ProcedureMessages.FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_VIEW,
+ viewPathToSourceMap.keySet());
setFailure(
new ProcedureException(
- new MetadataException("Invalidate view schemaengine cache failed")));
+ new MetadataException(
+ ProcedureMessages.INVALIDATE_VIEW_SCHEMAENGINE_CACHE_FAILED)));
return;
}
}
@@ -393,7 +398,7 @@ protected void onAllReplicasetFailure(
new ProcedureException(
new MetadataException(
String.format(
- "Alter view %s failed when [%s] because failed to execute in all replicaset of schemaRegion %s. Failure nodes: %s, statuses: %s",
+ ProcedureMessages.ALTER_VIEW_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN_ALL,
viewPathToSourceMap.keySet(),
taskName,
consensusGroupId.id,
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterTimeSeriesDataTypeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterTimeSeriesDataTypeProcedure.java
index 59398f147fd90..26ea988f98e72 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterTimeSeriesDataTypeProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterTimeSeriesDataTypeProcedure.java
@@ -32,6 +32,7 @@
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeAlterTimeSeriesPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.ClusterManager;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -101,20 +102,24 @@ protected StateMachineProcedure.Flow executeFromState(
switch (state) {
case CHECK_AND_INVALIDATE_SERIES:
LOGGER.info(
- "Check and invalidate series {} when altering time series data type",
+ ProcedureMessages.CHECK_AND_INVALIDATE_SERIES_WHEN_ALTERING_TIME_SERIES_DATA_TYPE,
measurementPath.getFullPath());
checkAndPreAlterTimeSeries();
break;
case ALTER_TIME_SERIES_DATA_TYPE:
- LOGGER.info("altering time series {} data type", measurementPath.getFullPath());
+ LOGGER.info(
+ ProcedureMessages.ALTERING_TIME_SERIES_DATA_TYPE, measurementPath.getFullPath());
if (!alterTimeSeriesDataType(env)) {
- LOGGER.error("alter time series {} data type failed", measurementPath.getFullPath());
+ LOGGER.error(
+ ProcedureMessages.ALTER_TIME_SERIES_DATA_TYPE_FAILED,
+ measurementPath.getFullPath());
return Flow.NO_MORE_STATE;
}
break;
case CLEAR_CACHE:
LOGGER.info(
- "clearing cache after alter time series {} data type", measurementPath.getFullPath());
+ ProcedureMessages.CLEARING_CACHE_AFTER_ALTER_TIME_SERIES_DATA_TYPE,
+ measurementPath.getFullPath());
PathPatternTree patternTree = new PathPatternTree();
patternTree.appendPathPattern(measurementPath);
patternTree.constructTree();
@@ -129,13 +134,13 @@ protected StateMachineProcedure.Flow executeFromState(
default:
setFailure(
new ProcedureException(
- "Unrecognized AlterTimeSeriesDataTypeProcedure state " + state));
+ ProcedureMessages.UNRECOGNIZED_ALTERTIMESERIESDATATYPEPROCEDURE_STATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "AlterTimeSeriesDataType-{}-[{}] costs {}ms",
+ ProcedureMessages.ALTERTIMESERIESDATATYPE_COSTS_MS,
measurementPath.getFullPath(),
state,
(System.currentTimeMillis() - startTime));
@@ -148,7 +153,8 @@ private void checkAndPreAlterTimeSeries() {
} else {
setFailure(
new ProcedureException(
- new MetadataException("Invalid data type cannot be used as a new type")));
+ new MetadataException(
+ ProcedureMessages.INVALID_DATA_TYPE_CANNOT_BE_USED_AS_A_NEW_TYPE)));
}
}
@@ -224,8 +230,11 @@ protected void onAllReplicasetFailure(
new ProcedureException(
new MetadataException(
String.format(
- "Alter timeseries %s data type to %s in schema regions failed. Failures: %s",
- measurementPath.getFullPath(), dataType, printFailureMap()))));
+ ProcedureMessages
+ .ALTER_TIMESERIES_DATA_TYPE_TO_IN_SCHEMA_REGIONS_FAILED_FAILURES,
+ measurementPath.getFullPath(),
+ dataType,
+ printFailureMap()))));
interruptTask();
}
};
@@ -252,9 +261,12 @@ public static void invalidateCache(
for (final TSStatus status : statusMap.values()) {
// All dataNodes must clear the related schemaEngine cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.error("Failed to invalidate schemaEngine cache of timeSeries {}", requestMessage);
+ LOGGER.error(
+ ProcedureMessages.FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_TIMESERIES,
+ requestMessage);
setFailure.accept(
- new ProcedureException(new MetadataException("Invalidate schemaEngine cache failed")));
+ new ProcedureException(
+ new MetadataException(ProcedureMessages.INVALIDATE_SCHEMAENGINE_CACHE_FAILED)));
return;
}
}
@@ -374,7 +386,8 @@ public void deserialize(final ByteBuffer byteBuffer) {
queryId = ReadWriteIOUtils.readString(byteBuffer);
setMeasurementPath((MeasurementPath) PathDeserializeUtil.deserialize(byteBuffer));
if (getCurrentState() == AlterTimeSeriesDataTypeState.CLEAR_CACHE) {
- LOGGER.info("Successfully operate, will clear cache to the data regions anyway");
+ LOGGER.info(
+ ProcedureMessages.SUCCESSFULLY_OPERATE_WILL_CLEAR_CACHE_TO_THE_DATA_REGIONS_ANYWAY);
}
if (byteBuffer.hasRemaining()) {
operationType = ReadWriteIOUtils.readByte(byteBuffer);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java
index 54249754d7e88..ab4913da04d81 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java
@@ -34,6 +34,7 @@
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeDeactivateTemplatePlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -100,7 +101,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, DeactivateTemplateSt
try {
switch (state) {
case CONSTRUCT_BLACK_LIST:
- LOGGER.info("Construct schema black list with template {}", requestMessage);
+ LOGGER.info(ProcedureMessages.CONSTRUCT_SCHEMA_BLACK_LIST_WITH_TEMPLATE, requestMessage);
if (constructBlackList(env) > 0) {
setNextState(DeactivateTemplateState.CLEAN_DATANODE_SCHEMA_CACHE);
break;
@@ -108,31 +109,34 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, DeactivateTemplateSt
setFailure(
new ProcedureException(
new IoTDBException(
- "Target Device Template is not activated on any path matched by given path pattern",
+ ProcedureMessages
+ .TARGET_DEVICE_TEMPLATE_IS_NOT_ACTIVATED_ON_ANY_PATH_MATCHED,
TSStatusCode.TEMPLATE_NOT_ACTIVATED.getStatusCode())));
return Flow.NO_MORE_STATE;
}
case CLEAN_DATANODE_SCHEMA_CACHE:
- LOGGER.info("Invalidate cache of template timeSeries {}", requestMessage);
+ LOGGER.info(ProcedureMessages.INVALIDATE_CACHE_OF_TEMPLATE_TIMESERIES, requestMessage);
invalidateCache(env);
break;
case DELETE_DATA:
- LOGGER.info("Delete data of template timeSeries {}", requestMessage);
+ LOGGER.info(ProcedureMessages.DELETE_DATA_OF_TEMPLATE_TIMESERIES, requestMessage);
deleteData(env);
break;
case DEACTIVATE_TEMPLATE:
- LOGGER.info("Deactivate template of {}", requestMessage);
+ LOGGER.info(ProcedureMessages.DEACTIVATE_TEMPLATE_OF, requestMessage);
deactivateTemplate(env);
collectPayload4Pipe(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized state " + state));
+ setFailure(new ProcedureException(ProcedureMessages.UNRECOGNIZED_STATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "DeactivateTemplate-[{}] costs {}ms", state, (System.currentTimeMillis() - startTime));
+ ProcedureMessages.DEACTIVATETEMPLATE_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
@@ -192,9 +196,11 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) {
// all dataNodes must clear the related schema cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.error(
- "Failed to invalidate schema cache of template timeSeries {}", requestMessage);
+ ProcedureMessages.FAILED_TO_INVALIDATE_SCHEMA_CACHE_OF_TEMPLATE_TIMESERIES,
+ requestMessage);
setFailure(
- new ProcedureException(new MetadataException("Invalidate schema cache failed")));
+ new ProcedureException(
+ new MetadataException(ProcedureMessages.INVALIDATE_SCHEMA_CACHE_FAILED)));
return;
}
}
@@ -449,7 +455,8 @@ protected void onAllReplicasetFailure(
new ProcedureException(
new MetadataException(
String.format(
- "Deactivate template of %s failed when [%s] because failed to execute in all replicaset of %s %s. Failure: %s",
+ ProcedureMessages
+ .DEACTIVATE_TEMPLATE_OF_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN,
requestMessage,
taskName,
consensusGroupId.type,
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteDatabaseProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteDatabaseProcedure.java
index 3ea3f2d06545d..96299f418572e 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteDatabaseProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteDatabaseProcedure.java
@@ -32,6 +32,7 @@
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.consensus.request.write.database.PreDeleteDatabasePlan;
import org.apache.iotdb.confignode.consensus.request.write.region.OfferRegionMaintainTasksPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.partition.PartitionMetrics;
import org.apache.iotdb.confignode.persistence.partition.maintainer.RegionDeleteTask;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
@@ -103,7 +104,9 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final DeleteDa
if (env.invalidateCache(deleteDatabaseSchema.getName())) {
setNextState(DeleteDatabaseState.DELETE_DATABASE_SCHEMA);
} else {
- setFailure(new ProcedureException("[DeleteDatabaseProcedure] Invalidate cache failed"));
+ setFailure(
+ new ProcedureException(
+ ProcedureMessages.DELETEDATABASEPROCEDURE_INVALIDATE_CACHE_FAILED));
}
break;
case DELETE_DATABASE_SCHEMA:
@@ -213,14 +216,15 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final DeleteDa
return Flow.NO_MORE_STATE;
} else if (getCycles() > RETRY_THRESHOLD) {
setFailure(
- new ProcedureException("[DeleteDatabaseProcedure] Delete DatabaseSchema failed"));
+ new ProcedureException(
+ ProcedureMessages.DELETEDATABASEPROCEDURE_DELETE_DATABASESCHEMA_FAILED));
}
}
} catch (final ConsensusException | TException | IOException e) {
if (isRollbackSupported(state)) {
setFailure(
new ProcedureException(
- "[DeleteDatabaseProcedure] Delete database "
+ ProcedureMessages.DELETEDATABASEPROCEDURE_DELETE_DATABASE
+ deleteDatabaseSchema.getName()
+ " failed "
+ state));
@@ -231,7 +235,9 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final DeleteDa
state,
e);
if (getCycles() > RETRY_THRESHOLD) {
- setFailure(new ProcedureException("[DeleteDatabaseProcedure] State stuck at " + state));
+ setFailure(
+ new ProcedureException(
+ ProcedureMessages.DELETEDATABASEPROCEDURE_STATE_STUCK_AT + state));
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteLogicalViewProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteLogicalViewProcedure.java
index 0c68fa62e1884..4f63e96840c20 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteLogicalViewProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteLogicalViewProcedure.java
@@ -31,6 +31,7 @@
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeDeleteLogicalViewPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -94,7 +95,8 @@ protected Flow executeFromState(
try {
switch (state) {
case CONSTRUCT_BLACK_LIST:
- LOGGER.info("Construct view schemaengine black list of view {}", requestMessage);
+ LOGGER.info(
+ ProcedureMessages.CONSTRUCT_VIEW_SCHEMAENGINE_BLACK_LIST_OF_VIEW, requestMessage);
if (constructBlackList(env) > 0) {
setNextState(DeleteLogicalViewState.CLEAN_DATANODE_SCHEMA_CACHE);
break;
@@ -108,22 +110,24 @@ protected Flow executeFromState(
return Flow.NO_MORE_STATE;
}
case CLEAN_DATANODE_SCHEMA_CACHE:
- LOGGER.info("Invalidate cache of view {}", requestMessage);
+ LOGGER.info(ProcedureMessages.INVALIDATE_CACHE_OF_VIEW, requestMessage);
invalidateCache(env);
break;
case DELETE_VIEW_SCHEMA:
- LOGGER.info("Delete view schemaengine of {}", requestMessage);
+ LOGGER.info(ProcedureMessages.DELETE_VIEW_SCHEMAENGINE_OF, requestMessage);
deleteViewSchema(env);
collectPayload4Pipe(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized state " + state));
+ setFailure(new ProcedureException(ProcedureMessages.UNRECOGNIZED_STATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "DeleteLogicalView-[{}] costs {}ms", state, (System.currentTimeMillis() - startTime));
+ ProcedureMessages.DELETELOGICALVIEW_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
@@ -175,10 +179,12 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) {
for (final TSStatus status : statusMap.values()) {
// all dataNodes must clear the related schemaengine cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.error("Failed to invalidate schemaengine cache of view {}", requestMessage);
+ LOGGER.error(
+ ProcedureMessages.FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_VIEW, requestMessage);
setFailure(
new ProcedureException(
- new MetadataException("Invalidate view schemaengine cache failed")));
+ new MetadataException(
+ ProcedureMessages.INVALIDATE_VIEW_SCHEMAENGINE_CACHE_FAILED)));
return;
}
}
@@ -341,8 +347,11 @@ protected void onAllReplicasetFailure(
new ProcedureException(
new MetadataException(
String.format(
- "Delete view %s failed when [%s] because failed to execute in all replicaset of schemaRegion %s. Failures: %s",
- requestMessage, taskName, consensusGroupId.id, printFailureMap()))));
+ ProcedureMessages.DELETE_VIEW_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN_ALL,
+ requestMessage,
+ taskName,
+ consensusGroupId.id,
+ printFailureMap()))));
interruptTask();
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java
index a8f4b6d53c8a3..0b5e45b5ca1f5 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java
@@ -31,6 +31,7 @@
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeDeleteTimeSeriesPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -107,7 +108,8 @@ protected Flow executeFromState(
try {
switch (state) {
case CONSTRUCT_BLACK_LIST:
- LOGGER.info("Construct schemaEngine black list of timeSeries {}", requestMessage);
+ LOGGER.info(
+ ProcedureMessages.CONSTRUCT_SCHEMAENGINE_BLACK_LIST_OF_TIMESERIES, requestMessage);
if (constructBlackList(env) > 0) {
setNextState(DeleteTimeSeriesState.CLEAN_DATANODE_SCHEMA_CACHE);
break;
@@ -122,27 +124,29 @@ protected Flow executeFromState(
return Flow.NO_MORE_STATE;
}
case CLEAN_DATANODE_SCHEMA_CACHE:
- LOGGER.info("Invalidate cache of timeSeries {}", requestMessage);
+ LOGGER.info(ProcedureMessages.INVALIDATE_CACHE_OF_TIMESERIES, requestMessage);
invalidateCache(env, patternTreeBytes, requestMessage, this::setFailure, true);
setNextState(DeleteTimeSeriesState.DELETE_DATA);
break;
case DELETE_DATA:
- LOGGER.info("Delete data of timeSeries {}", requestMessage);
+ LOGGER.info(ProcedureMessages.DELETE_DATA_OF_TIMESERIES, requestMessage);
deleteData(env);
break;
case DELETE_TIMESERIES_SCHEMA:
- LOGGER.info("Delete timeSeries schemaEngine of {}", requestMessage);
+ LOGGER.info(ProcedureMessages.DELETE_TIMESERIES_SCHEMAENGINE_OF, requestMessage);
deleteTimeSeriesSchema(env);
collectPayload4Pipe(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized state " + state));
+ setFailure(new ProcedureException(ProcedureMessages.UNRECOGNIZED_STATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "DeleteTimeSeries-[{}] costs {}ms", state, (System.currentTimeMillis() - startTime));
+ ProcedureMessages.DELETETIMESERIES_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
@@ -205,9 +209,12 @@ public static void invalidateCache(
for (final TSStatus status : statusMap.values()) {
// All dataNodes must clear the related schemaEngine cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.error("Failed to invalidate schemaEngine cache of timeSeries {}", requestMessage);
+ LOGGER.error(
+ ProcedureMessages.FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_TIMESERIES,
+ requestMessage);
setFailure.accept(
- new ProcedureException(new MetadataException("Invalidate schemaEngine cache failed")));
+ new ProcedureException(
+ new MetadataException(ProcedureMessages.INVALIDATE_SCHEMAENGINE_CACHE_FAILED)));
return;
}
}
@@ -368,7 +375,7 @@ public void deserialize(final ByteBuffer byteBuffer) {
setPatternTree(PathPatternTree.deserialize(byteBuffer));
if (getCurrentState() == DeleteTimeSeriesState.CLEAN_DATANODE_SCHEMA_CACHE
|| getCurrentState() == DeleteTimeSeriesState.DELETE_DATA) {
- LOGGER.info("Successfully restored, will set mods to the data regions anyway");
+ LOGGER.info(ProcedureMessages.SUCCESSFULLY_RESTORED_WILL_SET_MODS_TO_THE_DATA_REGIONS_ANYWAY);
}
if (byteBuffer.hasRemaining()) {
mayDeleteAudit = ReadWriteIOUtils.readBoolean(byteBuffer);
@@ -435,7 +442,7 @@ protected void onAllReplicasetFailure(
new ProcedureException(
new MetadataException(
String.format(
- "Delete time series %s failed when [%s] because failed to execute in all replicaset of %s %s. Failures: %s",
+ ProcedureMessages.DELETE_TIME_SERIES_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN,
requestMessage,
taskName,
consensusGroupId.type,
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SchemaUtils.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SchemaUtils.java
index f5dcae9e73918..4b8d0a533afe3 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SchemaUtils.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SchemaUtils.java
@@ -34,6 +34,7 @@
import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager;
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.consensus.exception.ConsensusException;
@@ -128,8 +129,12 @@ protected void onAllReplicasetFailure(
exception[0] =
new MetadataException(
String.format(
- "Failed to execute in all replicaset of schemaRegion %s when checking the template %s on %s. Failure nodes: %s",
- consensusGroupId.id, template, patternTree, dataNodeLocationSet));
+ ProcedureMessages
+ .FAILED_TO_EXECUTE_IN_ALL_REPLICASET_OF_SCHEMAREGION_WHEN_CHECKING_2,
+ consensusGroupId.id,
+ template,
+ patternTree,
+ dataNodeLocationSet));
interruptTask();
}
};
@@ -212,8 +217,11 @@ protected void onAllReplicasetFailure(
exception[0] =
new MetadataException(
String.format(
- "Failed to execute in all replicaset of schemaRegion %s when checking templates on path %s. Failures: %s",
- consensusGroupId.id, deleteDatabasePatternPaths, printFailureMap()));
+ ProcedureMessages
+ .FAILED_TO_EXECUTE_IN_ALL_REPLICASET_OF_SCHEMAREGION_WHEN_CHECKING,
+ consensusGroupId.id,
+ deleteDatabasePatternPaths,
+ printFailureMap()));
interruptTask();
}
};
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTTLProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTTLProcedure.java
index 3e1af7dd1f55b..b90f2df87d5c3 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTTLProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTTLProcedure.java
@@ -30,6 +30,8 @@
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan;
import org.apache.iotdb.confignode.consensus.request.write.database.SetTTLPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -79,7 +81,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, SetTTLState state)
return Flow.NO_MORE_STATE;
}
} finally {
- LOGGER.info("SetTTL-[{}] costs {}ms", state, (System.currentTimeMillis() - startTime));
+ LOGGER.info(
+ ProcedureMessages.SETTTL_COSTS_MS, state, (System.currentTimeMillis() - startTime));
}
}
@@ -91,12 +94,12 @@ private void setConfigNodeTTL(ConfigNodeProcedureEnv env) {
.getConsensusManager()
.write(isGeneratedByPipe ? new PipeEnrichedPlan(this.plan) : this.plan);
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
}
if (res.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.info("Failed to execute plan {} because {}", plan, res.message);
+ LOGGER.info(ProcedureMessages.FAILED_TO_EXECUTE_PLAN_BECAUSE, plan, res.message);
setFailure(new ProcedureException(new IoTDBException(res)));
} else {
setNextState(SetTTLState.UPDATE_DATANODE_CACHE);
@@ -119,9 +122,10 @@ private void updateDataNodeTTL(ConfigNodeProcedureEnv env) {
for (TSStatus status : statusMap.values()) {
// all dataNodes must clear the related schemaengine cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.error("Failed to update ttl cache of dataNode.");
+ LOGGER.error(ProcedureMessages.FAILED_TO_UPDATE_TTL_CACHE_OF_DATANODE);
setFailure(
- new ProcedureException(new MetadataException("Update dataNode ttl cache failed")));
+ new ProcedureException(
+ new MetadataException(ProcedureMessages.UPDATE_DATANODE_TTL_CACHE_FAILED)));
return;
}
}
@@ -164,7 +168,7 @@ public void deserialize(ByteBuffer byteBuffer) {
ReadWriteIOUtils.readInt(byteBuffer);
this.plan = (SetTTLPlan) ConfigPhysicalPlan.Factory.create(byteBuffer);
} catch (IOException e) {
- LOGGER.error("IO error when deserialize setTTL plan.", e);
+ LOGGER.error(ProcedureMessages.IO_ERROR_WHEN_DESERIALIZE_SETTTL_PLAN, e);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTemplateProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTemplateProcedure.java
index 27ccd77b03a39..55fffedad6145 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTemplateProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/SetTemplateProcedure.java
@@ -38,6 +38,8 @@
import org.apache.iotdb.confignode.consensus.request.write.template.CommitSetSchemaTemplatePlan;
import org.apache.iotdb.confignode.consensus.request.write.template.PreSetSchemaTemplatePlan;
import org.apache.iotdb.confignode.consensus.response.template.TemplateInfoResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -105,48 +107,58 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final SetTempl
switch (state) {
case VALIDATE_TEMPLATE_EXISTENCE:
LOGGER.info(
- "Check template existence set on path {} when try setting template {}",
+ ProcedureMessages.CHECK_TEMPLATE_EXISTENCE_SET_ON_PATH_WHEN_TRY_SETTING_TEMPLATE,
templateSetPath,
templateName);
validateTemplateExistence(env);
break;
case PRE_SET:
- LOGGER.info("Pre set schemaengine template {} on path {}", templateName, templateSetPath);
+ LOGGER.info(
+ ProcedureMessages.PRE_SET_SCHEMAENGINE_TEMPLATE_ON_PATH,
+ templateName,
+ templateSetPath);
preSetTemplate(env);
break;
case PRE_RELEASE:
LOGGER.info(
- "Pre release schemaengine template {} set on path {}", templateName, templateSetPath);
+ ProcedureMessages.PRE_RELEASE_SCHEMAENGINE_TEMPLATE_SET_ON_PATH,
+ templateName,
+ templateSetPath);
preReleaseTemplate(env);
break;
case VALIDATE_TIMESERIES_EXISTENCE:
LOGGER.info(
- "Check timeseries existence under path {} when try setting template {}",
+ ProcedureMessages.CHECK_TIMESERIES_EXISTENCE_UNDER_PATH_WHEN_TRY_SETTING_TEMPLATE,
templateSetPath,
templateName);
validateTimeSeriesExistence(env);
break;
case COMMIT_SET:
LOGGER.info(
- "Commit set schemaengine template {} on path {}", templateName, templateSetPath);
+ ProcedureMessages.COMMIT_SET_SCHEMAENGINE_TEMPLATE_ON_PATH,
+ templateName,
+ templateSetPath);
commitSetTemplate(env);
setNextState(SetTemplateState.COMMIT_RELEASE);
break;
case COMMIT_RELEASE:
LOGGER.info(
- "Commit release schemaengine template {} set on path {}",
+ ProcedureMessages.COMMIT_RELEASE_SCHEMAENGINE_TEMPLATE_SET_ON_PATH,
templateName,
templateSetPath);
commitReleaseTemplate(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized SetTemplateState " + state));
+ setFailure(
+ new ProcedureException(ProcedureMessages.UNRECOGNIZED_SETTEMPLATESTATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "SetSchemaTemplate-[{}] costs {}ms", state, (System.currentTimeMillis() - startTime));
+ ProcedureMessages.SETSCHEMATEMPLATE_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
@@ -160,7 +172,7 @@ private void validateTemplateExistence(final ConfigNodeProcedureEnv env) {
(TemplateInfoResp)
env.getConfigManager().getConsensusManager().read(checkTemplateSettablePlan);
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
resp = new TemplateInfoResp();
@@ -188,7 +200,7 @@ private void preSetTemplate(final ConfigNodeProcedureEnv env) {
setNextState(SetTemplateState.PRE_RELEASE);
} else {
LOGGER.warn(
- "Failed to pre set template {} on path {} due to {}",
+ ProcedureMessages.FAILED_TO_PRE_SET_TEMPLATE_ON_PATH_DUE_TO,
templateName,
templateSetPath,
status.getMessage());
@@ -218,11 +230,13 @@ private void preReleaseTemplate(final ConfigNodeProcedureEnv env) {
for (final Map.Entry entry : statusMap.entrySet()) {
if (entry.getValue().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Failed to sync template {} pre-set info on path {} to DataNode {}",
+ ProcedureMessages.FAILED_TO_SYNC_TEMPLATE_PRE_SET_INFO_ON_PATH_TO,
templateName,
templateSetPath,
dataNodeLocationMap.get(entry.getKey()));
- setFailure(new ProcedureException(new MetadataException("Pre set template failed")));
+ setFailure(
+ new ProcedureException(
+ new MetadataException(ProcedureMessages.PRE_SET_TEMPLATE_FAILED)));
return;
}
}
@@ -237,7 +251,7 @@ private Template getTemplate(final ConfigNodeProcedureEnv env) {
(TemplateInfoResp)
env.getConfigManager().getConsensusManager().read(getSchemaTemplatePlan);
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the read API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_READ_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
res.setMessage(e.getMessage());
templateResp = new TemplateInfoResp();
@@ -327,7 +341,8 @@ protected void onAllReplicasetFailure(
new ProcedureException(
new MetadataException(
String.format(
- "Set template %s to %s failed when [check time series existence on DataNode] because "
+ ProcedureMessages
+ .SET_TEMPLATE_TO_FAILED_WHEN_CHECK_TIME_SERIES_EXISTENCE_ON
+ "failed to check time series existence in all replicaset of schemaRegion %s. Failures: %s",
templateName,
templateSetPath,
@@ -370,7 +385,7 @@ private void commitSetTemplate(final ConfigNodeProcedureEnv env) {
setNextState(SetTemplateState.COMMIT_RELEASE);
} else {
LOGGER.warn(
- "Failed to commit set template {} on path {} due to {}",
+ ProcedureMessages.FAILED_TO_COMMIT_SET_TEMPLATE_ON_PATH_DUE_TO,
templateName,
templateSetPath,
status.getMessage());
@@ -400,7 +415,7 @@ private void commitReleaseTemplate(final ConfigNodeProcedureEnv env) {
for (final Map.Entry entry : statusMap.entrySet()) {
if (entry.getValue().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Failed to sync template {} commit-set info on path {} to DataNode {}",
+ ProcedureMessages.FAILED_TO_SYNC_TEMPLATE_COMMIT_SET_INFO_ON_PATH_TO,
templateName,
templateSetPath,
dataNodeLocationMap.get(entry.getKey()));
@@ -408,8 +423,11 @@ private void commitReleaseTemplate(final ConfigNodeProcedureEnv env) {
new ProcedureException(
new MetadataException(
String.format(
- "Failed to set schemaengine template %s on path %s because there's failure on DataNode %s",
- templateName, templateSetPath, dataNodeLocationMap.get(entry.getKey())))));
+ ProcedureMessages
+ .FAILED_TO_SET_SCHEMAENGINE_TEMPLATE_ON_PATH_BECAUSE_THERE_S,
+ templateName,
+ templateSetPath,
+ dataNodeLocationMap.get(entry.getKey())))));
return;
}
}
@@ -433,21 +451,21 @@ protected void rollbackState(final ConfigNodeProcedureEnv env, final SetTemplate
switch (state) {
case PRE_SET:
LOGGER.info(
- "Start rollback pre set schemaengine template {} on path {}",
+ ProcedureMessages.START_ROLLBACK_PRE_SET_SCHEMAENGINE_TEMPLATE_ON_PATH,
templateName,
templateSetPath);
rollbackPreSet(env);
break;
case PRE_RELEASE:
LOGGER.info(
- "Start rollback pre release schemaengine template {} on path {}",
+ ProcedureMessages.START_ROLLBACK_PRE_RELEASE_SCHEMAENGINE_TEMPLATE_ON_PATH,
templateName,
templateSetPath);
rollbackPreRelease(env);
break;
case COMMIT_SET:
LOGGER.info(
- "Start rollback commit set schemaengine template {} on path {}",
+ ProcedureMessages.START_ROLLBACK_COMMIT_SET_SCHEMAENGINE_TEMPLATE_ON_PATH,
templateName,
templateSetPath);
rollbackCommitSet(env);
@@ -455,7 +473,9 @@ protected void rollbackState(final ConfigNodeProcedureEnv env, final SetTemplate
}
} finally {
LOGGER.info(
- "Rollback SetTemplate-{} costs {}ms.", state, (System.currentTimeMillis() - startTime));
+ ProcedureMessages.ROLLBACK_SETTEMPLATE_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
@@ -472,7 +492,7 @@ private void rollbackPreSet(final ConfigNodeProcedureEnv env) {
}
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Failed to rollback pre set template {} on path {} due to {}",
+ ProcedureMessages.FAILED_TO_ROLLBACK_PRE_SET_TEMPLATE_ON_PATH_DUE_TO,
templateName,
templateSetPath,
status.getMessage());
@@ -508,12 +528,13 @@ private void rollbackPreRelease(final ConfigNodeProcedureEnv env) {
// all dataNodes must clear the related template cache
if (entry.getValue().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.error(
- "Failed to rollback pre release template info of template {} set on path {} on DataNode {}",
+ ProcedureMessages.FAILED_TO_ROLLBACK_PRE_RELEASE_TEMPLATE_INFO_OF_TEMPLATE_SET,
template.getName(),
templateSetPath,
dataNodeLocationMap.get(entry.getKey()));
setFailure(
- new ProcedureException(new MetadataException("Rollback pre release template failed")));
+ new ProcedureException(
+ new MetadataException(ProcedureMessages.ROLLBACK_PRE_RELEASE_TEMPLATE_FAILED)));
}
}
}
@@ -531,7 +552,7 @@ private void rollbackCommitSet(final ConfigNodeProcedureEnv env) {
}
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.warn(
- "Failed to rollback commit set template {} on path {} due to {}",
+ ProcedureMessages.FAILED_TO_ROLLBACK_COMMIT_SET_TEMPLATE_ON_PATH_DUE_TO,
templateName,
templateSetPath,
status.getMessage());
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/UnsetTemplateProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/UnsetTemplateProcedure.java
index 793e6fa6b181c..1fd7aefb33065 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/UnsetTemplateProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/UnsetTemplateProcedure.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager;
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -90,18 +91,19 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final UnsetTem
switch (state) {
case CONSTRUCT_BLACK_LIST:
LOGGER.info(
- "Construct schemaengine black list of template {} set on {}",
+ ProcedureMessages.CONSTRUCT_SCHEMAENGINE_BLACK_LIST_OF_TEMPLATE_SET_ON,
template.getName(),
path);
constructBlackList(env);
break;
case CLEAN_DATANODE_TEMPLATE_CACHE:
- LOGGER.info("Invalidate cache of template {} set on {}", template.getName(), path);
+ LOGGER.info(
+ ProcedureMessages.INVALIDATE_CACHE_OF_TEMPLATE_SET_ON, template.getName(), path);
invalidateCache(env);
break;
case CHECK_DATANODE_TEMPLATE_ACTIVATION:
LOGGER.info(
- "Check DataNode template activation of template {} set on {}",
+ ProcedureMessages.CHECK_DATANODE_TEMPLATE_ACTIVATION_OF_TEMPLATE_SET_ON,
template.getName(),
path);
if (isFailed()) {
@@ -115,16 +117,19 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final UnsetTem
}
break;
case UNSET_SCHEMA_TEMPLATE:
- LOGGER.info("Unset template {} on {}", template.getName(), path);
+ LOGGER.info(ProcedureMessages.UNSET_TEMPLATE_ON, template.getName(), path);
unsetTemplate(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized state " + state));
+ setFailure(new ProcedureException(ProcedureMessages.UNRECOGNIZED_STATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
- LOGGER.info("UnsetTemplate-[{}] costs {}ms", state, (System.currentTimeMillis() - startTime));
+ LOGGER.info(
+ ProcedureMessages.UNSETTEMPLATE_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
@@ -167,10 +172,11 @@ private void executeInvalidateCache(final ConfigNodeProcedureEnv env) throws Pro
// all dataNodes must clear the related template cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.error(
- "Failed to invalidate template cache of template {} set on {}",
+ ProcedureMessages.FAILED_TO_INVALIDATE_TEMPLATE_CACHE_OF_TEMPLATE_SET_ON,
template.getName(),
path);
- throw new ProcedureException(new MetadataException("Invalidate template cache failed"));
+ throw new ProcedureException(
+ new MetadataException(ProcedureMessages.INVALIDATE_TEMPLATE_CACHE_FAILED));
}
}
}
@@ -187,8 +193,11 @@ private boolean checkDataNodeTemplateActivation(final ConfigNodeProcedureEnv env
new ProcedureException(
new MetadataException(
String.format(
- "Unset template %s from %s failed when [check DataNode template activation] because %s",
- template.getName(), path, e.getMessage()))));
+ ProcedureMessages
+ .UNSET_TEMPLATE_FROM_FAILED_WHEN_CHECK_DATANODE_TEMPLATE_ACTIVATION_BECAUSE,
+ template.getName(),
+ path,
+ e.getMessage()))));
return false;
}
}
@@ -222,13 +231,14 @@ protected void rollbackState(
return;
} else {
LOGGER.error(
- "Failed to rollback pre unset template operation of template {} set on {}",
+ ProcedureMessages.FAILED_TO_ROLLBACK_PRE_UNSET_TEMPLATE_OPERATION_OF_TEMPLATE_SET,
template.getName(),
path);
rollbackException =
new ProcedureException(
new MetadataException(
- "Rollback template pre unset failed because of" + status.getMessage()));
+ ProcedureMessages.ROLLBACK_TEMPLATE_PRE_UNSET_FAILED_BECAUSE_OF
+ + status.getMessage()));
}
} catch (final ProcedureException e) {
rollbackException = e;
@@ -240,7 +250,8 @@ protected void rollbackState(
setFailure(
new ProcedureException(
new MetadataException(
- "Rollback unset template failed and the cluster template info management is strictly broken. Please try unset again.")));
+ ProcedureMessages
+ .ROLLBACK_UNSET_TEMPLATE_FAILED_AND_THE_CLUSTER_TEMPLATE_INFO_MANAGEMENT)));
}
}
@@ -263,8 +274,11 @@ private void executeRollbackInvalidateCache(ConfigNodeProcedureEnv env)
// all dataNodes must clear the related template cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.error(
- "Failed to rollback template cache of template {} set on {}", template.getName(), path);
- throw new ProcedureException(new MetadataException("Rollback template cache failed"));
+ ProcedureMessages.FAILED_TO_ROLLBACK_TEMPLATE_CACHE_OF_TEMPLATE_SET_ON,
+ template.getName(),
+ path);
+ throw new ProcedureException(
+ new MetadataException(ProcedureMessages.ROLLBACK_TEMPLATE_CACHE_FAILED));
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AbstractAlterOrDropTableProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AbstractAlterOrDropTableProcedure.java
index fe92c802ce687..7cf1ff1c24f83 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AbstractAlterOrDropTableProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AbstractAlterOrDropTableProcedure.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.exception.MetadataException;
import org.apache.iotdb.commons.schema.table.TsTable;
import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -96,14 +97,15 @@ protected void preRelease(final ConfigNodeProcedureEnv env, final @Nullable Stri
if (!failedResults.isEmpty()) {
// All dataNodes must clear the related schema cache
LOGGER.warn(
- "Failed to pre-release {} for table {}.{} to DataNode, failure results: {}",
+ ProcedureMessages.FAILED_TO_PRE_RELEASE_FOR_TABLE_TO_DATANODE_FAILURE_RESULTS,
getActionMessage(),
database,
table.getTableName(),
failedResults);
setFailure(
new ProcedureException(
- new MetadataException("Pre-release " + getActionMessage() + " failed")));
+ new MetadataException(
+ ProcedureMessages.PRE_RELEASE + getActionMessage() + " failed")));
}
}
@@ -117,7 +119,7 @@ protected void commitRelease(final ConfigNodeProcedureEnv env, final @Nullable S
database, table.getTableName(), env.getConfigManager(), oldName);
if (!failedResults.isEmpty()) {
LOGGER.warn(
- "Failed to {} for table {}.{} to DataNode, failure results: {}",
+ ProcedureMessages.FAILED_TO_FOR_TABLE_TO_DATANODE_FAILURE_RESULTS,
getActionMessage(),
database,
table.getTableName(),
@@ -143,14 +145,15 @@ protected void rollbackPreRelease(
if (!failedResults.isEmpty()) {
// All dataNodes must clear the related schema cache
LOGGER.warn(
- "Failed to rollback pre-release {} for table {}.{} info to DataNode, failure results: {}",
+ ProcedureMessages.FAILED_TO_ROLLBACK_PRE_RELEASE_FOR_TABLE_INFO_TO_DATANODE,
getActionMessage(),
database,
table.getTableName(),
failedResults);
setFailure(
new ProcedureException(
- new MetadataException("Rollback pre-release " + getActionMessage() + " failed")));
+ new MetadataException(
+ ProcedureMessages.ROLLBACK_PRE_RELEASE + getActionMessage() + " failed")));
}
}
@@ -206,7 +209,7 @@ protected void onAllReplicasetFailure(
new ProcedureException(
new MetadataException(
String.format(
- "[%s] for %s.%s failed when [%s] because failed to execute in all replicaset of %s %s. Failure nodes: %s",
+ ProcedureMessages.FOR_FAILED_WHEN_BECAUSE_FAILED_TO_EXECUTE_IN_ALL_REPLICASET,
this.getClass().getSimpleName(),
database,
tableName,
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AddTableColumnProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AddTableColumnProcedure.java
index d5b99942721bb..44f9e2f07f9fe 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AddTableColumnProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AddTableColumnProcedure.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.schema.table.column.TsTableColumnSchemaUtil;
import org.apache.iotdb.confignode.consensus.request.write.table.AddTableColumnPlan;
import org.apache.iotdb.confignode.consensus.request.write.table.view.AddTableViewColumnPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.schema.table.view.AddViewColumnProcedure;
@@ -71,29 +72,35 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final AddTable
try {
switch (state) {
case COLUMN_CHECK:
- LOGGER.info("Column check for table {}.{} when adding column", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.COLUMN_CHECK_FOR_TABLE_WHEN_ADDING_COLUMN, database, tableName);
columnCheck(env);
break;
case PRE_RELEASE:
- LOGGER.info("Pre release info of table {}.{} when adding column", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.PRE_RELEASE_INFO_OF_TABLE_WHEN_ADDING_COLUMN, database, tableName);
preRelease(env);
break;
case ADD_COLUMN:
- LOGGER.info("Add column to table {}.{}", database, tableName);
+ LOGGER.info(ProcedureMessages.ADD_COLUMN_TO_TABLE, database, tableName);
addColumn(env);
break;
case COMMIT_RELEASE:
- LOGGER.info("Commit release info of table {}.{} when adding column", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.COMMIT_RELEASE_INFO_OF_TABLE_WHEN_ADDING_COLUMN,
+ database,
+ tableName);
commitRelease(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized AddTableColumnState " + state));
+ setFailure(
+ new ProcedureException(ProcedureMessages.UNRECOGNIZED_ADDTABLECOLUMNSTATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "AddTableColumn-{}.{}-{} costs {}ms",
+ ProcedureMessages.ADDTABLECOLUMN_COSTS_MS,
database,
tableName,
state,
@@ -155,20 +162,24 @@ protected void rollbackState(final ConfigNodeProcedureEnv env, final AddTableCol
switch (state) {
case ADD_COLUMN:
LOGGER.info(
- "Start rollback Add column to table {}.{} when adding column",
+ ProcedureMessages.START_ROLLBACK_ADD_COLUMN_TO_TABLE_WHEN_ADDING_COLUMN,
database,
table.getTableName());
rollbackAddColumn(env);
break;
case PRE_RELEASE:
LOGGER.info(
- "Start rollback pre release info of table {}.{}", database, table.getTableName());
+ ProcedureMessages.START_ROLLBACK_PRE_RELEASE_INFO_OF_TABLE,
+ database,
+ table.getTableName());
rollbackPreRelease(env);
break;
}
} finally {
LOGGER.info(
- "Rollback DropTable-{} costs {}ms.", state, (System.currentTimeMillis() - startTime));
+ ProcedureMessages.ROLLBACK_DROPTABLE_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AlterTableColumnDataTypeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AlterTableColumnDataTypeProcedure.java
index b454fa38a5df6..12491f24a4867 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AlterTableColumnDataTypeProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/AlterTableColumnDataTypeProcedure.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.exception.MetadataException;
import org.apache.iotdb.commons.schema.table.TsTable;
import org.apache.iotdb.confignode.consensus.request.write.table.AlterColumnDataTypePlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.state.schema.AlterTableColumnDataTypeState;
@@ -79,34 +80,41 @@ protected Flow executeFromState(
switch (state) {
case CHECK_AND_INVALIDATE_COLUMN:
LOGGER.info(
- "Check and invalidate column {} in {}.{} when altering column data type",
+ ProcedureMessages.CHECK_AND_INVALIDATE_COLUMN_IN_WHEN_ALTERING_COLUMN_DATA_TYPE,
columnName,
database,
tableName);
checkAndPreAlterColumn(env);
break;
case PRE_RELEASE:
- LOGGER.info("Pre-release info of table {}.{} when altering column", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.PRE_RELEASE_INFO_OF_TABLE_WHEN_ALTERING_COLUMN,
+ database,
+ tableName);
preRelease(env);
break;
case ALTER_TABLE_COLUMN_DATA_TYPE:
- LOGGER.info("Altering column {} in {}.{} on configNode", columnName, database, tableName);
+ LOGGER.info(
+ ProcedureMessages.ALTERING_COLUMN_IN_ON_CONFIGNODE, columnName, database, tableName);
alterColumnDataType(env);
break;
case COMMIT_RELEASE:
LOGGER.info(
- "Commit release info of table {}.{} when altering column", database, tableName);
+ ProcedureMessages.COMMIT_RELEASE_INFO_OF_TABLE_WHEN_ALTERING_COLUMN,
+ database,
+ tableName);
commitRelease(env);
return Flow.NO_MORE_STATE;
default:
setFailure(
- new ProcedureException("Unrecognized AlterTableColumnDataTypeProcedure " + state));
+ new ProcedureException(
+ ProcedureMessages.UNRECOGNIZED_ALTERTABLECOLUMNDATATYPEPROCEDURE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "AlterTableColumnDataType-{}.{}-{} costs {}ms",
+ ProcedureMessages.ALTERTABLECOLUMNDATATYPE_COSTS_MS,
database,
tableName,
state,
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java
index d0fbdb605d12f..05e0facb3018e 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.confignode.consensus.request.write.table.PreCreateTablePlan;
import org.apache.iotdb.confignode.consensus.request.write.table.RollbackCreateTablePlan;
import org.apache.iotdb.confignode.exception.DatabaseNotExistsException;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -76,33 +77,35 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final CreateTa
try {
switch (state) {
case CHECK_TABLE_EXISTENCE:
- LOGGER.info("Check the existence of table {}.{}", database, table.getTableName());
+ LOGGER.info(
+ ProcedureMessages.CHECK_THE_EXISTENCE_OF_TABLE, database, table.getTableName());
checkTableExistence(env);
break;
case PRE_CREATE:
- LOGGER.info("Pre create table {}.{}", database, table.getTableName());
+ LOGGER.info(ProcedureMessages.PRE_CREATE_TABLE, database, table.getTableName());
preCreateTable(env);
break;
case PRE_RELEASE:
- LOGGER.info("Pre release table {}.{}", database, table.getTableName());
+ LOGGER.info(ProcedureMessages.PRE_RELEASE_TABLE, database, table.getTableName());
preReleaseTable(env);
break;
case COMMIT_CREATE:
- LOGGER.info("Commit create table {}.{}", database, table.getTableName());
+ LOGGER.info(ProcedureMessages.COMMIT_CREATE_TABLE, database, table.getTableName());
commitCreateTable(env);
break;
case COMMIT_RELEASE:
- LOGGER.info("Commit release table {}.{}", database, table.getTableName());
+ LOGGER.info(ProcedureMessages.COMMIT_RELEASE_TABLE, database, table.getTableName());
commitReleaseTable(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized CreateTableState " + state));
+ setFailure(
+ new ProcedureException(ProcedureMessages.UNRECOGNIZED_CREATETABLESTATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "CreateTable-{}.{}-{} costs {}ms",
+ ProcedureMessages.CREATETABLE_COSTS_MS,
database,
table.getTableName(),
state,
@@ -119,7 +122,8 @@ protected void checkTableExistence(final ConfigNodeProcedureEnv env) {
setFailure(
new ProcedureException(
new IoTDBException(
- String.format("Table '%s.%s' already exists.", database, table.getTableName()),
+ String.format(
+ ProcedureMessages.TABLE_ALREADY_EXISTS, database, table.getTableName()),
TABLE_ALREADY_EXISTS.getStatusCode())));
} else {
final TDatabaseSchema schema =
@@ -153,11 +157,12 @@ private void preReleaseTable(final ConfigNodeProcedureEnv env) {
if (!failedResults.isEmpty()) {
// All dataNodes must clear the related schema cache
LOGGER.warn(
- "Failed to sync table {}.{} pre-create info to DataNode, failure results: {}",
+ ProcedureMessages.FAILED_TO_SYNC_TABLE_PRE_CREATE_INFO_TO_DATANODE_FAILURE,
database,
table.getTableName(),
failedResults);
- setFailure(new ProcedureException(new MetadataException("Pre create table failed")));
+ setFailure(
+ new ProcedureException(new MetadataException(ProcedureMessages.PRE_CREATE_TABLE_FAILED)));
return;
}
@@ -186,7 +191,7 @@ private void commitReleaseTable(final ConfigNodeProcedureEnv env) {
if (!failedResults.isEmpty()) {
LOGGER.warn(
- "Failed to sync table {}.{} commit-create info to DataNode {}, failure results: ",
+ ProcedureMessages.FAILED_TO_SYNC_TABLE_COMMIT_CREATE_INFO_TO_DATANODE_FAILURE,
database,
table.getTableName(),
failedResults);
@@ -205,17 +210,21 @@ protected void rollbackState(final ConfigNodeProcedureEnv env, final CreateTable
try {
switch (state) {
case PRE_CREATE:
- LOGGER.info("Start rollback pre create table {}.{}", database, table.getTableName());
+ LOGGER.info(
+ ProcedureMessages.START_ROLLBACK_PRE_CREATE_TABLE, database, table.getTableName());
rollbackCreate(env);
break;
case PRE_RELEASE:
- LOGGER.info("Start rollback pre release table {}.{}", database, table.getTableName());
+ LOGGER.info(
+ ProcedureMessages.START_ROLLBACK_PRE_RELEASE_TABLE, database, table.getTableName());
rollbackPreRelease(env);
break;
}
} finally {
LOGGER.info(
- "Rollback CreateTable-{} costs {}ms.", state, (System.currentTimeMillis() - startTime));
+ ProcedureMessages.ROLLBACK_CREATETABLE_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
@@ -224,7 +233,8 @@ protected void rollbackCreate(final ConfigNodeProcedureEnv env) {
SchemaUtils.executeInConsensusLayer(
new RollbackCreateTablePlan(database, table.getTableName()), env, LOGGER);
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.warn("Failed to rollback table creation {}.{}", database, table.getTableName());
+ LOGGER.warn(
+ ProcedureMessages.FAILED_TO_ROLLBACK_TABLE_CREATION, database, table.getTableName());
setFailure(new ProcedureException(new IoTDBException(status)));
}
}
@@ -237,11 +247,13 @@ private void rollbackPreRelease(final ConfigNodeProcedureEnv env) {
if (!failedResults.isEmpty()) {
// All dataNodes must clear the related schema cache
LOGGER.warn(
- "Failed to sync table {}.{} rollback-create info to DataNode {}, failure results: ",
+ ProcedureMessages.FAILED_TO_SYNC_TABLE_ROLLBACK_CREATE_INFO_TO_DATANODE_FAILURE,
database,
table.getTableName(),
failedResults);
- setFailure(new ProcedureException(new MetadataException("Rollback create table failed")));
+ setFailure(
+ new ProcedureException(
+ new MetadataException(ProcedureMessages.ROLLBACK_CREATE_TABLE_FAILED)));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DeleteDevicesProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DeleteDevicesProcedure.java
index 89b81613172ef..66c1687d5d086 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DeleteDevicesProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DeleteDevicesProcedure.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeDeleteDevicesPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.ClusterManager;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -100,11 +101,14 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final DeleteDe
try {
switch (state) {
case CHECK_TABLE_EXISTENCE:
- LOGGER.info("Check the existence of table {}.{}", database, tableName);
+ LOGGER.info(ProcedureMessages.CHECK_THE_EXISTENCE_OF_TABLE, database, tableName);
checkTableExistence(env);
break;
case CONSTRUCT_BLACK_LIST:
- LOGGER.info("Construct schemaEngine black list of devices in {}.{}", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.CONSTRUCT_SCHEMAENGINE_BLACK_LIST_OF_DEVICES_IN,
+ database,
+ tableName);
constructBlackList(env);
if (deletedDevicesNum > 0) {
setNextState(CLEAN_DATANODE_SCHEMA_CACHE);
@@ -113,25 +117,28 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final DeleteDe
return Flow.NO_MORE_STATE;
}
case CLEAN_DATANODE_SCHEMA_CACHE:
- LOGGER.info("Invalidate cache of devices in {}.{}", database, tableName);
+ LOGGER.info(ProcedureMessages.INVALIDATE_CACHE_OF_DEVICES_IN, database, tableName);
invalidateCache(env);
break;
case DELETE_DATA:
- LOGGER.info("Delete data of devices in {}.{}", database, tableName);
+ LOGGER.info(ProcedureMessages.DELETE_DATA_OF_DEVICES_IN, database, tableName);
deleteData(env);
break;
case DELETE_DEVICE_SCHEMA:
- LOGGER.info("Delete devices in {}.{} in schemaEngine", database, tableName);
+ LOGGER.info(ProcedureMessages.DELETE_DEVICES_IN_IN_SCHEMAENGINE, database, tableName);
deleteDeviceSchema(env);
collectPayload4Pipe(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized state " + state));
+ setFailure(new ProcedureException(ProcedureMessages.UNRECOGNIZED_STATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
- LOGGER.info("DeleteDevices-[{}] costs {}ms", state, (System.currentTimeMillis() - startTime));
+ LOGGER.info(
+ ProcedureMessages.DELETEDEVICES_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
@@ -144,7 +151,7 @@ private void checkTableExistence(final ConfigNodeProcedureEnv env) {
setFailure(
new ProcedureException(
new IoTDBException(
- String.format("Table '%s.%s' not exists.", database, tableName),
+ String.format(ProcedureMessages.TABLE_NOT_EXISTS, database, tableName),
TABLE_NOT_EXISTS.getStatusCode())));
} else {
setNextState(CONSTRUCT_BLACK_LIST);
@@ -192,7 +199,8 @@ protected void onAllReplicasetFailure(
new ProcedureException(
new MetadataException(
String.format(
- "[%s] for %s.%s failed when construct black list for table because failed to execute in all replicaset of %s %s. Failures: %s",
+ ProcedureMessages
+ .FOR_FAILED_WHEN_CONSTRUCT_BLACK_LIST_FOR_TABLE_BECAUSE_FAILED,
this.getClass().getSimpleName(),
database,
tableName,
@@ -227,11 +235,12 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) {
// All dataNodes must clear the related schemaEngine cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.error(
- "Failed to invalidate schemaEngine cache of devices in table {}.{}",
+ ProcedureMessages.FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_DEVICES_IN_TABLE,
database,
tableName);
setFailure(
- new ProcedureException(new MetadataException("Invalidate schemaEngine cache failed")));
+ new ProcedureException(
+ new MetadataException(ProcedureMessages.INVALIDATE_SCHEMAENGINE_CACHE_FAILED)));
return;
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableColumnProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableColumnProcedure.java
index 66e1789e9ba72..4e4d0e758bf7f 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableColumnProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableColumnProcedure.java
@@ -32,6 +32,7 @@
import org.apache.iotdb.confignode.consensus.request.write.table.PreDeleteColumnPlan;
import org.apache.iotdb.confignode.consensus.request.write.table.view.CommitDeleteViewColumnPlan;
import org.apache.iotdb.confignode.consensus.request.write.table.view.PreDeleteViewColumnPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.schema.SchemaUtils;
@@ -89,7 +90,7 @@ protected Flow executeFromState(
switch (state) {
case CHECK_AND_INVALIDATE_COLUMN:
LOGGER.info(
- "Check and invalidate column {} in {}.{} when dropping column",
+ ProcedureMessages.CHECK_AND_INVALIDATE_COLUMN_IN_WHEN_DROPPING_COLUMN,
columnName,
database,
tableName);
@@ -97,7 +98,7 @@ protected Flow executeFromState(
break;
case INVALIDATE_CACHE:
LOGGER.info(
- "Invalidating cache for column {} in {}.{} when dropping column",
+ ProcedureMessages.INVALIDATING_CACHE_FOR_COLUMN_IN_WHEN_DROPPING_COLUMN,
columnName,
database,
tableName);
@@ -105,24 +106,26 @@ protected Flow executeFromState(
break;
case EXECUTE_ON_REGIONS:
LOGGER.info(
- "Executing on region for column {} in {}.{} when dropping column",
+ ProcedureMessages.EXECUTING_ON_REGION_FOR_COLUMN_IN_WHEN_DROPPING_COLUMN,
columnName,
database,
tableName);
executeOnRegions(env);
break;
case DROP_COLUMN:
- LOGGER.info("Dropping column {} in {}.{} on configNode", columnName, database, tableName);
+ LOGGER.info(
+ ProcedureMessages.DROPPING_COLUMN_IN_ON_CONFIGNODE, columnName, database, tableName);
dropColumn(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized DropTableColumnState " + state));
+ setFailure(
+ new ProcedureException(ProcedureMessages.UNRECOGNIZED_DROPTABLECOLUMNSTATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "DropTableColumn-{}.{}-{} costs {}ms",
+ ProcedureMessages.DROPTABLECOLUMN_COSTS_MS,
database,
tableName,
state,
@@ -160,7 +163,7 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) {
// All dataNodes must clear the related schemaEngine cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.error(
- "Failed to invalidate {} column {}'s cache of table {}.{}",
+ ProcedureMessages.FAILED_TO_INVALIDATE_COLUMN_S_CACHE_OF_TABLE,
isAttributeColumn ? "attribute" : "measurement",
columnName,
database,
@@ -169,8 +172,10 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) {
new ProcedureException(
new MetadataException(
String.format(
- "Invalidate column %s cache failed for table %s.%s",
- columnName, database, tableName))));
+ ProcedureMessages.INVALIDATE_COLUMN_CACHE_FAILED_FOR_TABLE,
+ columnName,
+ database,
+ tableName))));
return;
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableProcedure.java
index 1f02014e9943b..ac3ca2ef54fe7 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableProcedure.java
@@ -32,6 +32,7 @@
import org.apache.iotdb.confignode.consensus.request.write.table.PreDeleteTablePlan;
import org.apache.iotdb.confignode.consensus.request.write.table.view.CommitDeleteViewPlan;
import org.apache.iotdb.confignode.consensus.request.write.table.view.PreDeleteViewPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.schema.SchemaUtils;
@@ -79,34 +80,42 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final DropTabl
try {
switch (state) {
case CHECK_AND_INVALIDATE_TABLE:
- LOGGER.info("Check and invalidate table {}.{} when dropping table", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.CHECK_AND_INVALIDATE_TABLE_WHEN_DROPPING_TABLE,
+ database,
+ tableName);
checkAndPreDeleteTable(env);
break;
case INVALIDATE_CACHE:
LOGGER.info(
- "Invalidating cache for table {}.{} when dropping table", database, tableName);
+ ProcedureMessages.INVALIDATING_CACHE_FOR_TABLE_WHEN_DROPPING_TABLE,
+ database,
+ tableName);
invalidateCache(env);
break;
case DELETE_DATA:
- LOGGER.info("Deleting data for table {}.{}", database, tableName);
+ LOGGER.info(ProcedureMessages.DELETING_DATA_FOR_TABLE, database, tableName);
deleteData(env);
break;
case DELETE_DEVICES:
- LOGGER.info("Deleting devices for table {}.{} when dropping table", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.DELETING_DEVICES_FOR_TABLE_WHEN_DROPPING_TABLE,
+ database,
+ tableName);
deleteSchema(env);
break;
case DROP_TABLE:
- LOGGER.info("Dropping table {}.{} on configNode", database, tableName);
+ LOGGER.info(ProcedureMessages.DROPPING_TABLE_ON_CONFIGNODE, database, tableName);
dropTable(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized DropTableState " + state));
+ setFailure(new ProcedureException(ProcedureMessages.UNRECOGNIZED_DROPTABLESTATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "DropTable-{}.{}-{} costs {}ms",
+ ProcedureMessages.DROPTABLE_COSTS_MS,
database,
tableName,
state,
@@ -142,9 +151,13 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) {
for (final TSStatus status : statusMap.values()) {
// All dataNodes must clear the related schemaEngine cache
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.error("Failed to invalidate schemaEngine cache of table {}.{}", database, tableName);
+ LOGGER.error(
+ ProcedureMessages.FAILED_TO_INVALIDATE_SCHEMAENGINE_CACHE_OF_TABLE,
+ database,
+ tableName);
setFailure(
- new ProcedureException(new MetadataException("Invalidate schemaEngine cache failed")));
+ new ProcedureException(
+ new MetadataException(ProcedureMessages.INVALIDATE_SCHEMAENGINE_CACHE_FAILED)));
return;
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java
index a2a6c72577cf9..11d10901a352a 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableColumnProcedure.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.schema.table.TsTable;
import org.apache.iotdb.confignode.consensus.request.write.table.RenameTableColumnPlan;
import org.apache.iotdb.confignode.consensus.request.write.table.view.RenameViewColumnPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.schema.table.view.RenameViewColumnProcedure;
@@ -73,30 +74,38 @@ protected Flow executeFromState(
try {
switch (state) {
case COLUMN_CHECK:
- LOGGER.info("Column check for table {}.{} when renaming column", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.COLUMN_CHECK_FOR_TABLE_WHEN_RENAMING_COLUMN, database, tableName);
columnCheck(env);
break;
case PRE_RELEASE:
- LOGGER.info("Pre release info of table {}.{} when renaming column", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.PRE_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_COLUMN,
+ database,
+ tableName);
preRelease(env);
break;
case RENAME_COLUMN:
- LOGGER.info("Rename column to table {}.{} on config node", database, tableName);
+ LOGGER.info(ProcedureMessages.RENAME_COLUMN_TO_TABLE_ON_CONFIG_NODE, database, tableName);
renameColumn(env);
break;
case COMMIT_RELEASE:
LOGGER.info(
- "Commit release info of table {}.{} when renaming column", database, tableName);
+ ProcedureMessages.COMMIT_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_COLUMN,
+ database,
+ tableName);
commitRelease(env);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized RenameTableColumnState " + state));
+ setFailure(
+ new ProcedureException(
+ ProcedureMessages.UNRECOGNIZED_RENAMETABLECOLUMNSTATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "RenameTableColumn-{}.{}-{} costs {}ms",
+ ProcedureMessages.RENAMETABLECOLUMN_COSTS_MS,
database,
tableName,
state,
@@ -153,20 +162,22 @@ protected void rollbackState(final ConfigNodeProcedureEnv env, final RenameTable
switch (state) {
case RENAME_COLUMN:
LOGGER.info(
- "Start rollback Renaming column to table {}.{} on configNode",
+ ProcedureMessages.START_ROLLBACK_RENAMING_COLUMN_TO_TABLE_ON_CONFIGNODE,
database,
table.getTableName());
rollbackRenameColumn(env);
break;
case PRE_RELEASE:
LOGGER.info(
- "Start rollback pre release info of table {}.{}", database, table.getTableName());
+ ProcedureMessages.START_ROLLBACK_PRE_RELEASE_INFO_OF_TABLE,
+ database,
+ table.getTableName());
rollbackPreRelease(env);
break;
}
} finally {
LOGGER.info(
- "Rollback RenameTableColumn-{} costs {}ms.",
+ ProcedureMessages.ROLLBACK_RENAMETABLECOLUMN_COSTS_MS,
state,
(System.currentTimeMillis() - startTime));
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java
index 93d1035a7615c..ba5c379275bcb 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/RenameTableProcedure.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.schema.table.TsTable;
import org.apache.iotdb.confignode.consensus.request.write.table.RenameTablePlan;
import org.apache.iotdb.confignode.consensus.request.write.table.view.RenameViewPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.schema.table.view.RenameViewProcedure;
@@ -66,30 +67,35 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final RenameTa
try {
switch (state) {
case COLUMN_CHECK:
- LOGGER.info("Column check for table {}.{} when renaming table", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.COLUMN_CHECK_FOR_TABLE_WHEN_RENAMING_TABLE, database, tableName);
tableCheck(env);
break;
case PRE_RELEASE:
- LOGGER.info("Pre release info of table {}.{} when renaming table", database, tableName);
+ LOGGER.info(
+ ProcedureMessages.PRE_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_TABLE, database, tableName);
preRelease(env);
break;
case RENAME_TABLE:
- LOGGER.info("Rename column to table {}.{} on config node", database, tableName);
+ LOGGER.info(ProcedureMessages.RENAME_COLUMN_TO_TABLE_ON_CONFIG_NODE, database, tableName);
renameTable(env);
break;
case COMMIT_RELEASE:
LOGGER.info(
- "Commit release info of table {}.{} when renaming table", database, tableName);
+ ProcedureMessages.COMMIT_RELEASE_INFO_OF_TABLE_WHEN_RENAMING_TABLE,
+ database,
+ tableName);
commitRelease(env, tableName);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized RenameTableState " + state));
+ setFailure(
+ new ProcedureException(ProcedureMessages.UNRECOGNIZED_RENAMETABLESTATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "RenameTable-{}.{}-{} costs {}ms",
+ ProcedureMessages.RENAMETABLE_COSTS_MS,
database,
tableName,
state,
@@ -146,18 +152,24 @@ protected void rollbackState(final ConfigNodeProcedureEnv env, final RenameTable
switch (state) {
case RENAME_TABLE:
LOGGER.info(
- "Start rollback Renaming table {}.{} on configNode", database, table.getTableName());
+ ProcedureMessages.START_ROLLBACK_RENAMING_TABLE_ON_CONFIGNODE,
+ database,
+ table.getTableName());
rollbackRenameTable(env);
break;
case PRE_RELEASE:
LOGGER.info(
- "Start rollback pre release info of table {}.{}", database, table.getTableName());
+ ProcedureMessages.START_ROLLBACK_PRE_RELEASE_INFO_OF_TABLE,
+ database,
+ table.getTableName());
rollbackPreRelease(env, tableName);
break;
}
} finally {
LOGGER.info(
- "Rollback RenameTable-{} costs {}ms.", state, (System.currentTimeMillis() - startTime));
+ ProcedureMessages.ROLLBACK_RENAMETABLE_COSTS_MS,
+ state,
+ (System.currentTimeMillis() - startTime));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/SetTablePropertiesProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/SetTablePropertiesProcedure.java
index 138ae9c9b50ea..27e75e6c28c26 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/SetTablePropertiesProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/SetTablePropertiesProcedure.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.schema.table.TsTable;
import org.apache.iotdb.confignode.consensus.request.write.table.SetTablePropertiesPlan;
import org.apache.iotdb.confignode.consensus.request.write.table.view.SetViewPropertiesPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.schema.table.view.SetViewPropertiesProcedure;
@@ -81,35 +82,42 @@ protected Flow executeFromState(
case VALIDATE_TABLE:
validateTable(env);
LOGGER.info(
- "Validate table for table {}.{} when setting properties", database, tableName);
+ ProcedureMessages.VALIDATE_TABLE_FOR_TABLE_WHEN_SETTING_PROPERTIES,
+ database,
+ tableName);
if (!isFailed() && Objects.isNull(table)) {
LOGGER.info(
- "The updated table has the same properties with the original one. Skip the procedure.");
+ ProcedureMessages.THE_UPDATED_TABLE_HAS_THE_SAME_PROPERTIES_WITH_THE_ORIGINAL);
return Flow.NO_MORE_STATE;
}
break;
case PRE_RELEASE:
preRelease(env);
LOGGER.info(
- "Pre release info for table {}.{} when setting properties", database, tableName);
+ ProcedureMessages.PRE_RELEASE_INFO_FOR_TABLE_WHEN_SETTING_PROPERTIES,
+ database,
+ tableName);
break;
case SET_PROPERTIES:
setProperties(env);
- LOGGER.info("Set properties to table {}.{}", database, tableName);
+ LOGGER.info(ProcedureMessages.SET_PROPERTIES_TO_TABLE, database, tableName);
break;
case COMMIT_RELEASE:
commitRelease(env);
LOGGER.info(
- "Commit release info of table {}.{} when setting properties", database, tableName);
+ ProcedureMessages.COMMIT_RELEASE_INFO_OF_TABLE_WHEN_SETTING_PROPERTIES,
+ database,
+ tableName);
return Flow.NO_MORE_STATE;
default:
- setFailure(new ProcedureException("Unrecognized AddTableColumnState " + state));
+ setFailure(
+ new ProcedureException(ProcedureMessages.UNRECOGNIZED_ADDTABLECOLUMNSTATE + state));
return Flow.NO_MORE_STATE;
}
return Flow.HAS_MORE_STATE;
} finally {
LOGGER.info(
- "SetTableProperties-{}.{}-{} costs {}ms",
+ ProcedureMessages.SETTABLEPROPERTIES_COSTS_MS,
database,
tableName,
state,
@@ -176,20 +184,22 @@ protected void rollbackState(
switch (state) {
case PRE_RELEASE:
LOGGER.info(
- "Start rollback pre release info for table {}.{} when setting properties",
+ ProcedureMessages.START_ROLLBACK_PRE_RELEASE_INFO_FOR_TABLE_WHEN_SETTING_PROPERTIES,
database,
table.getTableName());
rollbackPreRelease(env);
break;
case SET_PROPERTIES:
LOGGER.info(
- "Start rollback set properties to table {}.{}", database, table.getTableName());
+ ProcedureMessages.START_ROLLBACK_SET_PROPERTIES_TO_TABLE,
+ database,
+ table.getTableName());
rollbackSetProperties(env);
break;
}
} finally {
LOGGER.info(
- "Rollback SetTableProperties-{} costs {}ms.",
+ ProcedureMessages.ROLLBACK_SETTABLEPROPERTIES_COSTS_MS,
state,
(System.currentTimeMillis() - startTime));
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/CreateTableViewProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/CreateTableViewProcedure.java
index c6defba4c8680..88a163157293a 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/CreateTableViewProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/view/CreateTableViewProcedure.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.schema.table.TsTable;
import org.apache.iotdb.confignode.consensus.request.write.table.view.PreCreateTableViewPlan;
import org.apache.iotdb.confignode.exception.DatabaseNotExistsException;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.schema.TreeDeviceViewFieldDetector;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -85,7 +86,7 @@ protected void checkTableExistence(final ConfigNodeProcedureEnv env) {
new ProcedureException(
new IoTDBException(
String.format(
- "Table '%s.%s' already exists.", database, table.getTableName()),
+ ProcedureMessages.TABLE_ALREADY_EXISTS, database, table.getTableName()),
TABLE_ALREADY_EXISTS.getStatusCode())));
return;
} else {
@@ -135,7 +136,8 @@ protected void rollbackCreate(final ConfigNodeProcedureEnv env) {
SchemaUtils.executeInConsensusLayer(
new PreCreateTableViewPlan(database, oldView, oldStatus), env, LOGGER);
if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.warn("Failed to rollback table creation {}.{}", database, table.getTableName());
+ LOGGER.warn(
+ ProcedureMessages.FAILED_TO_ROLLBACK_TABLE_CREATION, database, table.getTableName());
setFailure(new ProcedureException(new IoTDBException(status)));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/AbstractOperateSubscriptionProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/AbstractOperateSubscriptionProcedure.java
index 0b246ac4ef7d5..f9ada3650f2b2 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/AbstractOperateSubscriptionProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/AbstractOperateSubscriptionProcedure.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerGroupMeta;
import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.subscription.SubscriptionInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -80,12 +81,12 @@ protected AtomicReference acquireLockInternal(
@Override
protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProcedureEnv) {
- LOGGER.info("ProcedureId {} try to acquire subscription lock.", getProcId());
+ LOGGER.info(ProcedureMessages.PROCEDUREID_TRY_TO_ACQUIRE_SUBSCRIPTION_LOCK, getProcId());
subscriptionInfo = acquireLockInternal(configNodeProcedureEnv);
if (subscriptionInfo == null) {
- LOGGER.warn("ProcedureId {} failed to acquire subscription lock.", getProcId());
+ LOGGER.warn(ProcedureMessages.PROCEDUREID_FAILED_TO_ACQUIRE_SUBSCRIPTION_LOCK, getProcId());
} else {
- LOGGER.info("ProcedureId {} acquired subscription lock.", getProcId());
+ LOGGER.info(ProcedureMessages.PROCEDUREID_ACQUIRED_SUBSCRIPTION_LOCK, getProcId());
}
final ProcedureLockState procedureLockState = super.acquireLock(configNodeProcedureEnv);
@@ -93,21 +94,25 @@ protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProced
case LOCK_ACQUIRED:
if (subscriptionInfo == null) {
LOGGER.warn(
- "ProcedureId {}: LOCK_ACQUIRED. The following procedure should not be executed without subscription lock.",
+ ProcedureMessages
+ .PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_NOT_BE_EXECUTED_2,
getProcId());
} else {
LOGGER.info(
- "ProcedureId {}: LOCK_ACQUIRED. The following procedure should be executed with subscription lock.",
+ ProcedureMessages
+ .PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_BE_EXECUTED_WITH_3,
getProcId());
}
break;
case LOCK_EVENT_WAIT:
if (subscriptionInfo == null) {
LOGGER.warn(
- "ProcedureId {}: LOCK_EVENT_WAIT. Without acquiring subscription lock.", getProcId());
+ ProcedureMessages.PROCEDUREID_LOCK_EVENT_WAIT_WITHOUT_ACQUIRING_SUBSCRIPTION_LOCK,
+ getProcId());
} else {
LOGGER.info(
- "ProcedureId {}: LOCK_EVENT_WAIT. Subscription lock will be released.", getProcId());
+ ProcedureMessages.PROCEDUREID_LOCK_EVENT_WAIT_SUBSCRIPTION_LOCK_WILL_BE_RELEASED,
+ getProcId());
configNodeProcedureEnv
.getConfigManager()
.getSubscriptionManager()
@@ -119,12 +124,12 @@ protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProced
default:
if (subscriptionInfo == null) {
LOGGER.error(
- "ProcedureId {}: {}. Invalid lock state. Without acquiring subscription lock.",
+ ProcedureMessages.PROCEDUREID_INVALID_LOCK_STATE_WITHOUT_ACQUIRING_SUBSCRIPTION_LOCK,
getProcId(),
procedureLockState);
} else {
LOGGER.error(
- "ProcedureId {}: {}. Invalid lock state. Subscription lock will be released.",
+ ProcedureMessages.PROCEDUREID_INVALID_LOCK_STATE_SUBSCRIPTION_LOCK_WILL_BE_RELEASED,
getProcId(),
procedureLockState);
configNodeProcedureEnv
@@ -145,12 +150,16 @@ protected void releaseLock(ConfigNodeProcedureEnv configNodeProcedureEnv) {
if (subscriptionInfo == null) {
LOGGER.warn(
- "ProcedureId {} release lock. No need to release subscription lock.", getProcId());
+ ProcedureMessages.PROCEDUREID_RELEASE_LOCK_NO_NEED_TO_RELEASE_SUBSCRIPTION_LOCK,
+ getProcId());
} else {
- LOGGER.info("ProcedureId {} release lock. Subscription lock will be released.", getProcId());
+ LOGGER.info(
+ ProcedureMessages.PROCEDUREID_RELEASE_LOCK_SUBSCRIPTION_LOCK_WILL_BE_RELEASED,
+ getProcId());
if (this instanceof TopicMetaSyncProcedure
|| this instanceof ConsumerGroupMetaSyncProcedure) {
- LOGGER.info("Subscription meta sync procedure finished, updating last sync version.");
+ LOGGER.info(
+ ProcedureMessages.SUBSCRIPTION_META_SYNC_PROCEDURE_FINISHED_UPDATING_LAST_SYNC_VERSION);
configNodeProcedureEnv
.getConfigManager()
.getSubscriptionManager()
@@ -182,7 +191,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperateSubscriptionS
throws InterruptedException {
if (subscriptionInfo == null) {
LOGGER.warn(
- "ProcedureId {}: Subscription lock is not acquired, executeFromState({})'s execution will be skipped.",
+ ProcedureMessages
+ .PROCEDUREID_SUBSCRIPTION_LOCK_IS_NOT_ACQUIRED_EXECUTEFROMSTATE_S_EXECUTION_WILL,
getProcId(),
state);
return Flow.NO_MORE_STATE;
@@ -192,7 +202,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperateSubscriptionS
switch (state) {
case VALIDATE:
if (!executeFromValidate(env)) {
- LOGGER.info("ProcedureId {}: {}", getProcId(), SKIP_SUBSCRIPTION_PROCEDURE_MESSAGE);
+ LOGGER.info(
+ ProcedureMessages.PROCEDUREID, getProcId(), SKIP_SUBSCRIPTION_PROCEDURE_MESSAGE);
// On client side, the message returned after the successful execution of the
// subscription command corresponding to this procedure is "Msg: The statement is
// executed successfully."
@@ -211,13 +222,14 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperateSubscriptionS
default:
throw new UnsupportedOperationException(
String.format(
- "Unknown state during executing operateSubscriptionProcedure, %s", state));
+ ProcedureMessages.UNKNOWN_STATE_DURING_EXECUTING_OPERATESUBSCRIPTIONPROCEDURE,
+ state));
}
} catch (Exception e) {
// Retry before rollback
if (getCycles() < RETRY_THRESHOLD) {
LOGGER.warn(
- "ProcedureId {}: Encountered error when trying to {} at state [{}], retry [{}/{}]",
+ ProcedureMessages.PROCEDUREID_ENCOUNTERED_ERROR_WHEN_TRYING_TO_AT_STATE_RETRY,
getProcId(),
getOperation(),
state,
@@ -229,7 +241,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperateSubscriptionS
TimeUnit.MILLISECONDS.sleep(3000L);
} else {
LOGGER.warn(
- "ProcedureId {}: All {} retries failed when trying to {} at state [{}], will rollback...",
+ ProcedureMessages.PROCEDUREID_ALL_RETRIES_FAILED_WHEN_TRYING_TO_AT_STATE_WILL,
getProcId(),
RETRY_THRESHOLD,
getOperation(),
@@ -238,8 +250,10 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, OperateSubscriptionS
setFailure(
new ProcedureException(
String.format(
- "ProcedureId %s: Fail to %s because %s",
- getProcId(), getOperation().name(), e.getMessage())));
+ ProcedureMessages.PROCEDUREID_FAIL_TO_BECAUSE,
+ getProcId(),
+ getOperation().name(),
+ e.getMessage())));
}
}
@@ -251,7 +265,8 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperateSubscriptionStat
throws IOException, InterruptedException, ProcedureException {
if (subscriptionInfo == null) {
LOGGER.warn(
- "ProcedureId {}: Subscription lock is not acquired, rollbackState({})'s execution will be skipped.",
+ ProcedureMessages
+ .PROCEDUREID_SUBSCRIPTION_LOCK_IS_NOT_ACQUIRED_ROLLBACKSTATE_S_EXECUTION_WILL,
getProcId(),
state);
return;
@@ -265,7 +280,7 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperateSubscriptionStat
isRollbackFromValidateSuccessful = true;
} catch (Exception e) {
LOGGER.warn(
- "ProcedureId {}: Failed to rollback from state [{}], because {}",
+ ProcedureMessages.PROCEDUREID_FAILED_TO_ROLLBACK_FROM_STATE_BECAUSE,
getProcId(),
state,
e.getMessage(),
@@ -280,7 +295,7 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperateSubscriptionStat
}
} catch (Exception e) {
LOGGER.warn(
- "ProcedureId {}: Failed to rollback from state [{}], because {}",
+ ProcedureMessages.PROCEDUREID_FAILED_TO_ROLLBACK_FROM_STATE_BECAUSE,
getProcId(),
state,
e.getMessage(),
@@ -294,7 +309,7 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperateSubscriptionStat
isRollbackFromOperateOnDataNodesSuccessful = true;
} catch (Exception e) {
LOGGER.warn(
- "ProcedureId {}: Failed to rollback from state [{}], because {}",
+ ProcedureMessages.PROCEDUREID_FAILED_TO_ROLLBACK_FROM_STATE_BECAUSE,
getProcId(),
state,
e.getMessage(),
@@ -303,7 +318,9 @@ protected void rollbackState(ConfigNodeProcedureEnv env, OperateSubscriptionStat
break;
default:
throw new UnsupportedOperationException(
- String.format("Unknown state during rollback operateSubscriptionProcedure, %s", state));
+ String.format(
+ ProcedureMessages.UNKNOWN_STATE_DURING_ROLLBACK_OPERATESUBSCRIPTIONPROCEDURE,
+ state));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/AlterConsumerGroupProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/AlterConsumerGroupProcedure.java
index 69017422505cf..faf9cce8a68ff 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/AlterConsumerGroupProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/AlterConsumerGroupProcedure.java
@@ -23,6 +23,8 @@
import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerGroupMeta;
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.confignode.consensus.request.write.subscription.consumer.AlterConsumerGroupPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.subscription.SubscriptionInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.subscription.AbstractOperateSubscriptionProcedure;
@@ -76,7 +78,7 @@ protected SubscriptionOperation getOperation() {
@Override
public boolean executeFromValidate(ConfigNodeProcedureEnv env) throws SubscriptionException {
- LOGGER.info("AlterConsumerGroupProcedure: executeFromValidate, try to validate");
+ LOGGER.info(ProcedureMessages.ALTERCONSUMERGROUPPROCEDURE_EXECUTEFROMVALIDATE_TRY_TO_VALIDATE);
validateAndGetOldAndNewMeta(env);
return true;
@@ -93,7 +95,7 @@ protected void validateAndGetOldAndNewMeta(ConfigNodeProcedureEnv env) {
public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException {
LOGGER.info(
- "AlterConsumerGroupProcedure: executeFromOperateOnConfigNodes({})",
+ ProcedureMessages.ALTERCONSUMERGROUPPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES,
updatedConsumerGroupMeta.getConsumerGroupId());
TSStatus response;
@@ -103,7 +105,7 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
.getConsensusManager()
.write(new AlterConsumerGroupPlan(updatedConsumerGroupMeta));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response =
new TSStatus(TSStatusCode.ALTER_CONSUMER_ERROR.getStatusCode())
.setMessage(e.getMessage());
@@ -111,8 +113,9 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new SubscriptionException(
String.format(
- "Failed to alter consumer group %s on config nodes, because %s",
- updatedConsumerGroupMeta.getConsumerGroupId(), response));
+ ProcedureMessages.FAILED_TO_ALTER_CONSUMER_GROUP_ON_CONFIG_NODES_BECAUSE,
+ updatedConsumerGroupMeta.getConsumerGroupId(),
+ response));
}
}
@@ -120,7 +123,7 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
LOGGER.info(
- "AlterConsumerGroupProcedure: executeFromOperateOnDataNodes({})",
+ ProcedureMessages.ALTERCONSUMERGROUPPROCEDURE_EXECUTEFROMOPERATEONDATANODES,
updatedConsumerGroupMeta.getConsumerGroupId());
final List statuses =
@@ -130,21 +133,23 @@ public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
// throw exception instead of logging warn, do not rely on metadata synchronization
throw new SubscriptionException(
String.format(
- "Failed to alter consumer group (%s -> %s) on data nodes, because %s",
- existingConsumerGroupMeta, updatedConsumerGroupMeta, statuses));
+ ProcedureMessages.FAILED_TO_ALTER_CONSUMER_GROUP_ON_DATA_NODES_BECAUSE,
+ existingConsumerGroupMeta,
+ updatedConsumerGroupMeta,
+ statuses));
}
}
@Override
public void rollbackFromValidate(ConfigNodeProcedureEnv env) {
- LOGGER.info("AlterConsumerGroupProcedure: rollbackFromValidate");
+ LOGGER.info(ProcedureMessages.ALTERCONSUMERGROUPPROCEDURE_ROLLBACKFROMVALIDATE);
}
@Override
public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException {
LOGGER.info(
- "AlterConsumerGroupProcedure: rollbackFromOperateOnConfigNodes({})",
+ ProcedureMessages.ALTERCONSUMERGROUPPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES,
updatedConsumerGroupMeta.getConsumerGroupId());
TSStatus response;
@@ -154,7 +159,7 @@ public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
.getConsensusManager()
.write(new AlterConsumerGroupPlan(existingConsumerGroupMeta));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response =
new TSStatus(TSStatusCode.ALTER_CONSUMER_ERROR.getStatusCode())
.setMessage(e.getMessage());
@@ -162,15 +167,17 @@ public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new SubscriptionException(
String.format(
- "Failed to rollback from altering consumer group (%s -> %s) on config nodes, because %s",
- existingConsumerGroupMeta, updatedConsumerGroupMeta, response));
+ ProcedureMessages.FAILED_TO_ROLLBACK_FROM_ALTERING_CONSUMER_GROUP_ON_CONFIG_NODES,
+ existingConsumerGroupMeta,
+ updatedConsumerGroupMeta,
+ response));
}
}
@Override
public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
- LOGGER.info("AlterConsumerGroupProcedure: rollbackFromOperateOnDataNodes");
+ LOGGER.info(ProcedureMessages.ALTERCONSUMERGROUPPROCEDURE_ROLLBACKFROMOPERATEONDATANODES);
final List statuses =
env.pushSingleConsumerGroupOnDataNode(existingConsumerGroupMeta.serialize());
@@ -179,8 +186,10 @@ public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
// throw exception instead of logging warn, do not rely on metadata synchronization
throw new SubscriptionException(
String.format(
- "Failed to rollback from altering consumer group (%s -> %s) on data nodes, because %s",
- existingConsumerGroupMeta, updatedConsumerGroupMeta, statuses));
+ ProcedureMessages.FAILED_TO_ROLLBACK_FROM_ALTERING_CONSUMER_GROUP_ON_DATA_NODES,
+ existingConsumerGroupMeta,
+ updatedConsumerGroupMeta,
+ statuses));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/runtime/ConsumerGroupMetaSyncProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/runtime/ConsumerGroupMetaSyncProcedure.java
index 93eb6c5a5fc35..20a1173ac60bb 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/runtime/ConsumerGroupMetaSyncProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/consumer/runtime/ConsumerGroupMetaSyncProcedure.java
@@ -23,6 +23,8 @@
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.subscription.meta.consumer.ConsumerGroupMeta;
import org.apache.iotdb.confignode.consensus.request.write.subscription.consumer.runtime.ConsumerGroupHandleMetaChangePlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.subscription.SubscriptionInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.subscription.AbstractOperateSubscriptionProcedure;
@@ -77,7 +79,7 @@ protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProced
// Skip by setting the subscriptionInfo to null
subscriptionInfo = null;
LOGGER.info(
- "ConsumerGroupMetaSyncProcedure: acquireLock, skip the procedure due to the last execution time {}",
+ ProcedureMessages.CONSUMERGROUPMETASYNCPROCEDURE_ACQUIRELOCK_SKIP_THE_PROCEDURE_DUE_TO,
LAST_EXECUTION_TIME.get());
return ProcedureLockState.LOCK_ACQUIRED;
}
@@ -92,7 +94,7 @@ protected SubscriptionOperation getOperation() {
@Override
public boolean executeFromValidate(ConfigNodeProcedureEnv env) {
- LOGGER.info("ConsumerGroupMetaSyncProcedure: executeFromValidate");
+ LOGGER.info(ProcedureMessages.CONSUMERGROUPMETASYNCPROCEDURE_EXECUTEFROMVALIDATE);
LAST_EXECUTION_TIME.set(System.currentTimeMillis());
return true;
@@ -101,7 +103,7 @@ public boolean executeFromValidate(ConfigNodeProcedureEnv env) {
@Override
public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("ConsumerGroupMetaSyncProcedure: executeFromOperateOnConfigNodes");
+ LOGGER.info(ProcedureMessages.CONSUMERGROUPMETASYNCPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES);
final List consumerGroupMetaList =
new ArrayList<>(subscriptionInfo.get().getAllConsumerGroupMeta());
@@ -113,7 +115,7 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
.getConsensusManager()
.write(new ConsumerGroupHandleMetaChangePlan(consumerGroupMetaList));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -125,32 +127,33 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
@Override
public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
- LOGGER.info("ConsumerGroupMetaSyncProcedure: executeFromOperateOnDataNodes");
+ LOGGER.info(ProcedureMessages.CONSUMERGROUPMETASYNCPROCEDURE_EXECUTEFROMOPERATEONDATANODES);
Map respMap = pushConsumerGroupMetaToDataNodes(env);
if (pushConsumerGroupMetaHasException(respMap)) {
throw new SubscriptionException(
- String.format("Failed to push consumer group meta to dataNodes, details: %s", respMap));
+ String.format(
+ ProcedureMessages.FAILED_TO_PUSH_CONSUMER_GROUP_META_TO_DATANODES_DETAILS, respMap));
}
}
@Override
public void rollbackFromValidate(ConfigNodeProcedureEnv env) {
- LOGGER.info("ConsumerGroupMetaSyncProcedure: rollbackFromValidate");
+ LOGGER.info(ProcedureMessages.CONSUMERGROUPMETASYNCPROCEDURE_ROLLBACKFROMVALIDATE);
// Do nothing
}
@Override
public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("ConsumerGroupMetaSyncProcedure: rollbackFromOperateOnConfigNodes");
+ LOGGER.info(ProcedureMessages.CONSUMERGROUPMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES);
// Do nothing
}
@Override
public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("ConsumerGroupMetaSyncProcedure: rollbackFromOperateOnDataNodes");
+ LOGGER.info(ProcedureMessages.CONSUMERGROUPMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONDATANODES);
// Do nothing
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/AbstractOperateSubscriptionAndPipeProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/AbstractOperateSubscriptionAndPipeProcedure.java
index de90951262cf0..5c77fa3a5e4a3 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/AbstractOperateSubscriptionAndPipeProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/AbstractOperateSubscriptionAndPipeProcedure.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.confignode.procedure.impl.subscription.subscription;
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeMeta;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.pipe.PipeTaskInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.subscription.AbstractOperateSubscriptionProcedure;
@@ -47,14 +48,15 @@ public abstract class AbstractOperateSubscriptionAndPipeProcedure
@Override
protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProcedureEnv) {
- LOGGER.info("ProcedureId {} try to acquire subscription and pipe lock.", getProcId());
+ LOGGER.info(
+ ProcedureMessages.PROCEDUREID_TRY_TO_ACQUIRE_SUBSCRIPTION_AND_PIPE_LOCK, getProcId());
pipeTaskInfo =
configNodeProcedureEnv.getConfigManager().getPipeManager().getPipeTaskCoordinator().lock();
if (pipeTaskInfo == null) {
- LOGGER.warn("ProcedureId {} failed to acquire pipe lock.", getProcId());
+ LOGGER.warn(ProcedureMessages.PROCEDUREID_FAILED_TO_ACQUIRE_PIPE_LOCK, getProcId());
} else {
- LOGGER.info("ProcedureId {} acquired pipe lock.", getProcId());
+ LOGGER.info(ProcedureMessages.PROCEDUREID_ACQUIRED_PIPE_LOCK, getProcId());
}
final ProcedureLockState procedureLockState = super.acquireLock(configNodeProcedureEnv);
@@ -63,19 +65,25 @@ protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProced
case LOCK_ACQUIRED:
if (pipeTaskInfo == null) {
LOGGER.warn(
- "ProcedureId {}: LOCK_ACQUIRED. The following procedure should not be executed without pipe lock.",
+ ProcedureMessages
+ .PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_NOT_BE_EXECUTED,
getProcId());
} else {
LOGGER.info(
- "ProcedureId {}: LOCK_ACQUIRED. The following procedure should be executed with subscription and pipe lock.",
+ ProcedureMessages
+ .PROCEDUREID_LOCK_ACQUIRED_THE_FOLLOWING_PROCEDURE_SHOULD_BE_EXECUTED_WITH_2,
getProcId());
}
break;
case LOCK_EVENT_WAIT:
if (pipeTaskInfo == null) {
- LOGGER.warn("ProcedureId {}: LOCK_EVENT_WAIT. Without acquiring pipe lock.", getProcId());
+ LOGGER.warn(
+ ProcedureMessages.PROCEDUREID_LOCK_EVENT_WAIT_WITHOUT_ACQUIRING_PIPE_LOCK,
+ getProcId());
} else {
- LOGGER.info("ProcedureId {}: LOCK_EVENT_WAIT. Pipe lock will be released.", getProcId());
+ LOGGER.info(
+ ProcedureMessages.PROCEDUREID_LOCK_EVENT_WAIT_PIPE_LOCK_WILL_BE_RELEASED,
+ getProcId());
configNodeProcedureEnv
.getConfigManager()
.getPipeManager()
@@ -87,12 +95,12 @@ protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProced
default:
if (pipeTaskInfo == null) {
LOGGER.error(
- "ProcedureId {}: {}. Invalid lock state. Without acquiring pipe lock.",
+ ProcedureMessages.PROCEDUREID_INVALID_LOCK_STATE_WITHOUT_ACQUIRING_PIPE_LOCK,
getProcId(),
procedureLockState);
} else {
LOGGER.error(
- "ProcedureId {}: {}. Invalid lock state. Pipe lock will be released.",
+ ProcedureMessages.PROCEDUREID_INVALID_LOCK_STATE_PIPE_LOCK_WILL_BE_RELEASED,
getProcId(),
procedureLockState);
configNodeProcedureEnv
@@ -112,9 +120,11 @@ protected void releaseLock(ConfigNodeProcedureEnv configNodeProcedureEnv) {
super.releaseLock(configNodeProcedureEnv);
if (pipeTaskInfo == null) {
- LOGGER.warn("ProcedureId {} release lock. No need to release pipe lock.", getProcId());
+ LOGGER.warn(
+ ProcedureMessages.PROCEDUREID_RELEASE_LOCK_NO_NEED_TO_RELEASE_PIPE_LOCK, getProcId());
} else {
- LOGGER.info("ProcedureId {} release lock. Pipe lock will be released.", getProcId());
+ LOGGER.info(
+ ProcedureMessages.PROCEDUREID_RELEASE_LOCK_PIPE_LOCK_WILL_BE_RELEASED, getProcId());
configNodeProcedureEnv.getConfigManager().getPipeManager().getPipeTaskCoordinator().unlock();
pipeTaskInfo = null;
}
@@ -134,7 +144,8 @@ protected Map pushMultiPipeMetaToDataNodes(
for (String pipeName : pipeNames) {
PipeMeta pipeMeta = pipeTaskInfo.get().getPipeMetaByPipeName(pipeName);
if (pipeMeta == null) {
- LOGGER.warn("Pipe {} not found in PipeTaskInfo, can not push its meta.", pipeName);
+ LOGGER.warn(
+ ProcedureMessages.PIPE_NOT_FOUND_IN_PIPETASKINFO_CAN_NOT_PUSH_ITS_META, pipeName);
continue;
}
pipeMetaBinaryList.add(copyAndFilterOutNonWorkingDataRegionPipeTasks(pipeMeta).serialize());
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedure.java
index a0855d7a09112..e149744cb455b 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/CreateSubscriptionProcedure.java
@@ -28,6 +28,8 @@
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.DropPipePlanV2;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.OperateMultiplePipesPlanV2;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2;
import org.apache.iotdb.confignode.procedure.impl.pipe.task.CreatePipeProcedureV2;
@@ -85,7 +87,7 @@ protected SubscriptionOperation getOperation() {
@Override
protected boolean executeFromValidate(final ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("CreateSubscriptionProcedure: executeFromValidate");
+ LOGGER.info(ProcedureMessages.CREATESUBSCRIPTIONPROCEDURE_EXECUTEFROMVALIDATE);
alterConsumerGroupProcedure = null;
createPipeProcedures = new ArrayList<>();
@@ -143,7 +145,7 @@ protected boolean executeFromValidate(final ConfigNodeProcedureEnv env)
@Override
protected void executeFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("CreateSubscriptionProcedure: executeFromOperateOnConfigNodes");
+ LOGGER.info(ProcedureMessages.CREATESUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES);
// Execute AlterConsumerGroupProcedure
alterConsumerGroupProcedure.executeFromOperateOnConfigNodes(env);
@@ -160,22 +162,23 @@ protected void executeFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env)
.getConsensusManager()
.write(new OperateMultiplePipesPlanV2(createPipePlans));
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new SubscriptionException(
String.format(
- "Failed to create subscription with request %s on config nodes, because %s",
- subscribeReq, response));
+ ProcedureMessages.FAILED_TO_CREATE_SUBSCRIPTION_WITH_REQUEST_ON_CONFIG_NODES_BECAUSE,
+ subscribeReq,
+ response));
}
}
@Override
protected void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
- LOGGER.info("CreateSubscriptionProcedure: executeFromOperateOnDataNodes");
+ LOGGER.info(ProcedureMessages.CREATESUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONDATANODES);
// Push consumer group meta to data nodes
alterConsumerGroupProcedure.executeFromOperateOnDataNodes(env);
@@ -192,20 +195,23 @@ protected void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env)
// throw exception instead of logging warn, do not rely on metadata synchronization
throw new SubscriptionException(
String.format(
- "Failed to create pipes %s when creating subscription with request %s, details: %s, metadata will be synchronized later.",
- pipeNames, subscribeReq, exceptionMessage));
+ ProcedureMessages
+ .FAILED_TO_CREATE_PIPES_WHEN_CREATING_SUBSCRIPTION_WITH_REQUEST_DETAILS,
+ pipeNames,
+ subscribeReq,
+ exceptionMessage));
}
}
@Override
protected void rollbackFromValidate(final ConfigNodeProcedureEnv env) {
- LOGGER.info("CreateSubscriptionProcedure: rollbackFromValidate");
+ LOGGER.info(ProcedureMessages.CREATESUBSCRIPTIONPROCEDURE_ROLLBACKFROMVALIDATE);
}
@Override
protected void rollbackFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("CreateSubscriptionProcedure: rollbackFromOperateOnConfigNodes");
+ LOGGER.info(ProcedureMessages.CREATESUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES);
// Rollback CreatePipeProcedureV2s
final List dropPipePlans =
@@ -219,15 +225,17 @@ protected void rollbackFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env
.getConsensusManager()
.write(new OperateMultiplePipesPlanV2(dropPipePlans));
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new SubscriptionException(
String.format(
- "Failed to rollback creating subscription with request %s on config nodes, because %s",
- subscribeReq, response));
+ ProcedureMessages
+ .FAILED_TO_ROLLBACK_CREATING_SUBSCRIPTION_WITH_REQUEST_ON_CONFIG_NODES,
+ subscribeReq,
+ response));
}
// Rollback AlterConsumerGroupProcedure
@@ -237,7 +245,7 @@ protected void rollbackFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env
@Override
protected void rollbackFromOperateOnDataNodes(final ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
- LOGGER.info("CreateSubscriptionProcedure: rollbackFromOperateOnDataNodes");
+ LOGGER.info(ProcedureMessages.CREATESUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONDATANODES);
// Push all pipe metas to datanode, may be time-consuming
final String exceptionMessage =
@@ -247,8 +255,10 @@ protected void rollbackFromOperateOnDataNodes(final ConfigNodeProcedureEnv env)
// throw exception instead of logging warn, do not rely on metadata synchronization
throw new SubscriptionException(
String.format(
- "Failed to rollback create pipes when creating subscription with request %s, because %s",
- subscribeReq, exceptionMessage));
+ ProcedureMessages
+ .FAILED_TO_ROLLBACK_CREATE_PIPES_WHEN_CREATING_SUBSCRIPTION_WITH_REQUEST,
+ subscribeReq,
+ exceptionMessage));
}
// Rollback AlterConsumerGroupProcedure
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedure.java
index 6f668f29c5dac..491df5e5d2f98 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/subscription/DropSubscriptionProcedure.java
@@ -26,6 +26,8 @@
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.DropPipePlanV2;
import org.apache.iotdb.confignode.consensus.request.write.pipe.task.OperateMultiplePipesPlanV2;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.pipe.AbstractOperatePipeProcedureV2;
import org.apache.iotdb.confignode.procedure.impl.pipe.task.DropPipeProcedureV2;
@@ -83,7 +85,7 @@ protected SubscriptionOperation getOperation() {
@Override
protected boolean executeFromValidate(final ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("DropSubscriptionProcedure: executeFromValidate");
+ LOGGER.info(ProcedureMessages.DROPSUBSCRIPTIONPROCEDURE_EXECUTEFROMVALIDATE);
alterConsumerGroupProcedure = null;
dropPipeProcedures = new ArrayList<>();
@@ -126,7 +128,7 @@ protected boolean executeFromValidate(final ConfigNodeProcedureEnv env)
@Override
protected void executeFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("DropSubscriptionProcedure: executeFromOperateOnConfigNodes");
+ LOGGER.info(ProcedureMessages.DROPSUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES);
// Execute DropPipeProcedureV2s
final List dropPipePlans =
@@ -140,15 +142,16 @@ protected void executeFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env)
.getConsensusManager()
.write(new OperateMultiplePipesPlanV2(dropPipePlans));
} catch (final ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new SubscriptionException(
String.format(
- "Failed to drop subscription with request %s on config nodes, because %s",
- unsubscribeReq, response));
+ ProcedureMessages.FAILED_TO_DROP_SUBSCRIPTION_WITH_REQUEST_ON_CONFIG_NODES_BECAUSE,
+ unsubscribeReq,
+ response));
}
// Execute AlterConsumerGroupProcedure
@@ -158,7 +161,7 @@ protected void executeFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env)
@Override
protected void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
- LOGGER.info("DropSubscriptionProcedure: executeFromOperateOnDataNodes");
+ LOGGER.info(ProcedureMessages.DROPSUBSCRIPTIONPROCEDURE_EXECUTEFROMOPERATEONDATANODES);
// Push pipe meta to data nodes
final List pipeNames =
@@ -172,8 +175,11 @@ protected void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env)
// throw exception instead of logging warn, do not rely on metadata synchronization
throw new SubscriptionException(
String.format(
- "Failed to drop pipes %s when dropping subscription with request %s, because %s",
- pipeNames, unsubscribeReq, exceptionMessage));
+ ProcedureMessages
+ .FAILED_TO_DROP_PIPES_WHEN_DROPPING_SUBSCRIPTION_WITH_REQUEST_BECAUSE,
+ pipeNames,
+ unsubscribeReq,
+ exceptionMessage));
}
// Push consumer group meta to data nodes
@@ -182,13 +188,13 @@ protected void executeFromOperateOnDataNodes(final ConfigNodeProcedureEnv env)
@Override
protected void rollbackFromValidate(final ConfigNodeProcedureEnv env) {
- LOGGER.info("DropSubscriptionProcedure: rollbackFromLock");
+ LOGGER.info(ProcedureMessages.DROPSUBSCRIPTIONPROCEDURE_ROLLBACKFROMLOCK);
}
@Override
protected void rollbackFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("DropSubscriptionProcedure: rollbackFromOperateOnConfigNodes");
+ LOGGER.info(ProcedureMessages.DROPSUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES);
// Rollback AlterConsumerGroupProcedure
alterConsumerGroupProcedure.rollbackFromOperateOnConfigNodes(env);
@@ -199,7 +205,7 @@ protected void rollbackFromOperateOnConfigNodes(final ConfigNodeProcedureEnv env
@Override
protected void rollbackFromOperateOnDataNodes(final ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
- LOGGER.info("DropSubscriptionProcedure: rollbackFromOperateOnDataNodes");
+ LOGGER.info(ProcedureMessages.DROPSUBSCRIPTIONPROCEDURE_ROLLBACKFROMOPERATEONDATANODES);
// Rollback AlterConsumerGroupProcedure
alterConsumerGroupProcedure.rollbackFromOperateOnDataNodes(env);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/AlterTopicProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/AlterTopicProcedure.java
index 4faa2cfa0c17f..4a85db0458a9e 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/AlterTopicProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/AlterTopicProcedure.java
@@ -22,6 +22,8 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta;
import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.AlterTopicPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.subscription.SubscriptionInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.subscription.AbstractOperateSubscriptionProcedure;
@@ -85,7 +87,7 @@ protected SubscriptionOperation getOperation() {
@Override
public boolean executeFromValidate(ConfigNodeProcedureEnv env) throws SubscriptionException {
- LOGGER.info("AlterTopicProcedure: executeFromValidate");
+ LOGGER.info(ProcedureMessages.ALTERTOPICPROCEDURE_EXECUTEFROMVALIDATE);
subscriptionInfo.get().validateBeforeAlteringTopic(updatedTopicMeta);
@@ -97,22 +99,25 @@ public boolean executeFromValidate(ConfigNodeProcedureEnv env) throws Subscripti
@Override
public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("AlterTopicProcedure: executeFromOperateOnConfigNodes, try to alter topic");
+ LOGGER.info(
+ ProcedureMessages.ALTERTOPICPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES_TRY_TO_ALTER_TOPIC);
TSStatus response;
try {
response =
env.getConfigManager().getConsensusManager().write(new AlterTopicPlan(updatedTopicMeta));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response =
new TSStatus(TSStatusCode.ALTER_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage());
}
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new SubscriptionException(
String.format(
- "Failed to alter topic (%s -> %s) on config nodes, because %s",
- existedTopicMeta, updatedTopicMeta, response));
+ ProcedureMessages.FAILED_TO_ALTER_TOPIC_ON_CONFIG_NODES_BECAUSE,
+ existedTopicMeta,
+ updatedTopicMeta,
+ response));
}
}
@@ -120,7 +125,8 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
LOGGER.info(
- "AlterTopicProcedure: executeFromOperateOnDataNodes({})", updatedTopicMeta.getTopicName());
+ ProcedureMessages.ALTERTOPICPROCEDURE_EXECUTEFROMOPERATEONDATANODES,
+ updatedTopicMeta.getTopicName());
final List statuses = env.pushSingleTopicOnDataNode(updatedTopicMeta.serialize());
if (RpcUtils.squashResponseStatusList(statuses).getCode()
@@ -128,21 +134,25 @@ public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
// throw exception instead of logging warn, do not rely on metadata synchronization
throw new SubscriptionException(
String.format(
- "Failed to alter topic (%s -> %s) on data nodes, because %s",
- existedTopicMeta, updatedTopicMeta, statuses));
+ ProcedureMessages.FAILED_TO_ALTER_TOPIC_ON_DATA_NODES_BECAUSE,
+ existedTopicMeta,
+ updatedTopicMeta,
+ statuses));
}
}
@Override
public void rollbackFromValidate(ConfigNodeProcedureEnv env) {
- LOGGER.info("AlterTopicProcedure: rollbackFromValidate({})", updatedTopicMeta.getTopicName());
+ LOGGER.info(
+ ProcedureMessages.ALTERTOPICPROCEDURE_ROLLBACKFROMVALIDATE,
+ updatedTopicMeta.getTopicName());
}
@Override
public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException {
LOGGER.info(
- "AlterTopicProcedure: rollbackFromOperateOnConfigNodes({})",
+ ProcedureMessages.ALTERTOPICPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES,
updatedTopicMeta.getTopicName());
TSStatus response;
@@ -150,15 +160,17 @@ public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
response =
env.getConfigManager().getConsensusManager().write(new AlterTopicPlan(existedTopicMeta));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response =
new TSStatus(TSStatusCode.ALTER_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage());
}
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new SubscriptionException(
String.format(
- "Failed to rollback from altering topic (%s -> %s) on config nodes, because %s",
- updatedTopicMeta, existedTopicMeta, response));
+ ProcedureMessages.FAILED_TO_ROLLBACK_FROM_ALTERING_TOPIC_ON_CONFIG_NODES_BECAUSE,
+ updatedTopicMeta,
+ existedTopicMeta,
+ response));
}
}
@@ -166,7 +178,8 @@ public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
LOGGER.info(
- "AlterTopicProcedure: rollbackFromOperateOnDataNodes({})", updatedTopicMeta.getTopicName());
+ ProcedureMessages.ALTERTOPICPROCEDURE_ROLLBACKFROMOPERATEONDATANODES,
+ updatedTopicMeta.getTopicName());
final List statuses = env.pushSingleTopicOnDataNode(existedTopicMeta.serialize());
if (RpcUtils.squashResponseStatusList(statuses).getCode()
@@ -174,8 +187,10 @@ public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
// throw exception instead of logging warn, do not rely on metadata synchronization
throw new SubscriptionException(
String.format(
- "Failed to rollback from altering topic (%s -> %s) on data nodes, because %s",
- updatedTopicMeta, existedTopicMeta, statuses));
+ ProcedureMessages.FAILED_TO_ROLLBACK_FROM_ALTERING_TOPIC_ON_DATA_NODES_BECAUSE,
+ updatedTopicMeta,
+ existedTopicMeta,
+ statuses));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java
index c27205290a8c3..08ae98a578035 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java
@@ -23,6 +23,8 @@
import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta;
import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.CreateTopicPlan;
import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.DropTopicPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.subscription.AbstractOperateSubscriptionProcedure;
import org.apache.iotdb.confignode.procedure.impl.subscription.SubscriptionOperation;
@@ -67,7 +69,7 @@ protected SubscriptionOperation getOperation() {
@Override
protected boolean executeFromValidate(ConfigNodeProcedureEnv env) throws SubscriptionException {
- LOGGER.info("CreateTopicProcedure: executeFromValidate");
+ LOGGER.info(ProcedureMessages.CREATETOPICPROCEDURE_EXECUTEFROMVALIDATE);
// 1. check if the topic exists
if (!subscriptionInfo.get().validateBeforeCreatingTopic(createTopicReq)) {
@@ -86,27 +88,29 @@ protected boolean executeFromValidate(ConfigNodeProcedureEnv env) throws Subscri
@Override
protected void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("CreateTopicProcedure: executeFromOperateOnConfigNodes({})", topicMeta);
+ LOGGER.info(ProcedureMessages.CREATETOPICPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES, topicMeta);
TSStatus response;
try {
response = env.getConfigManager().getConsensusManager().write(new CreateTopicPlan(topicMeta));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response =
new TSStatus(TSStatusCode.CREATE_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage());
}
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new SubscriptionException(
String.format(
- "Failed to create topic %s on config nodes, because %s", topicMeta, response));
+ ProcedureMessages.FAILED_TO_CREATE_TOPIC_ON_CONFIG_NODES_BECAUSE,
+ topicMeta,
+ response));
}
}
@Override
protected void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
- LOGGER.info("CreateTopicProcedure: executeFromOperateOnDataNodes({})", topicMeta);
+ LOGGER.info(ProcedureMessages.CREATETOPICPROCEDURE_EXECUTEFROMOPERATEONDATANODES, topicMeta);
final List statuses = env.pushSingleTopicOnDataNode(topicMeta.serialize());
if (RpcUtils.squashResponseStatusList(statuses).getCode()
@@ -114,19 +118,19 @@ protected void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
// throw exception instead of logging warn, do not rely on metadata synchronization
throw new SubscriptionException(
String.format(
- "Failed to create topic %s on data nodes, because %s", topicMeta, statuses));
+ ProcedureMessages.FAILED_TO_CREATE_TOPIC_ON_DATA_NODES_BECAUSE, topicMeta, statuses));
}
}
@Override
protected void rollbackFromValidate(ConfigNodeProcedureEnv env) {
- LOGGER.info("CreateTopicProcedure: rollbackFromValidate({})", topicMeta);
+ LOGGER.info(ProcedureMessages.CREATETOPICPROCEDURE_ROLLBACKFROMVALIDATE, topicMeta);
}
@Override
protected void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("CreateTopicProcedure: rollbackFromCreateOnConfigNodes({})", topicMeta);
+ LOGGER.info(ProcedureMessages.CREATETOPICPROCEDURE_ROLLBACKFROMCREATEONCONFIGNODES, topicMeta);
TSStatus response;
try {
@@ -135,22 +139,23 @@ protected void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
.getConsensusManager()
.write(new DropTopicPlan(topicMeta.getTopicName()));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response =
new TSStatus(TSStatusCode.DROP_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage());
}
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new SubscriptionException(
String.format(
- "Failed to rollback creating topic %s on config nodes, because %s",
- topicMeta, response));
+ ProcedureMessages.FAILED_TO_ROLLBACK_CREATING_TOPIC_ON_CONFIG_NODES_BECAUSE,
+ topicMeta,
+ response));
}
}
@Override
protected void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("CreateTopicProcedure: rollbackFromCreateOnDataNodes({})", topicMeta);
+ LOGGER.info(ProcedureMessages.CREATETOPICPROCEDURE_ROLLBACKFROMCREATEONDATANODES, topicMeta);
final List statuses = env.dropSingleTopicOnDataNode(topicMeta.getTopicName());
if (RpcUtils.squashResponseStatusList(statuses).getCode()
@@ -158,8 +163,9 @@ protected void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
// throw exception instead of logging warn, do not rely on metadata synchronization
throw new SubscriptionException(
String.format(
- "Failed to rollback creating topic %s on data nodes, because %s",
- topicMeta, statuses));
+ ProcedureMessages.FAILED_TO_ROLLBACK_CREATING_TOPIC_ON_DATA_NODES_BECAUSE,
+ topicMeta,
+ statuses));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/DropTopicProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/DropTopicProcedure.java
index 363e5716ee8ea..8a17f99045abb 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/DropTopicProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/DropTopicProcedure.java
@@ -21,6 +21,8 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.DropTopicPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.subscription.AbstractOperateSubscriptionProcedure;
import org.apache.iotdb.confignode.procedure.impl.subscription.SubscriptionOperation;
@@ -62,7 +64,7 @@ protected SubscriptionOperation getOperation() {
@Override
protected boolean executeFromValidate(ConfigNodeProcedureEnv env) throws SubscriptionException {
- LOGGER.info("DropTopicProcedure: executeFromValidate({})", topicName);
+ LOGGER.info(ProcedureMessages.DROPTOPICPROCEDURE_EXECUTEFROMVALIDATE, topicName);
subscriptionInfo.get().validateBeforeDroppingTopic(topicName);
return true;
@@ -71,49 +73,50 @@ protected boolean executeFromValidate(ConfigNodeProcedureEnv env) throws Subscri
@Override
protected void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("DropTopicProcedure: executeFromOperateOnConfigNodes({})", topicName);
+ LOGGER.info(ProcedureMessages.DROPTOPICPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES, topicName);
TSStatus response;
try {
response = env.getConfigManager().getConsensusManager().write(new DropTopicPlan(topicName));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response =
new TSStatus(TSStatusCode.DROP_TOPIC_ERROR.getStatusCode()).setMessage(e.getMessage());
}
if (response.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new SubscriptionException(
String.format(
- "Failed to drop topic %s on config nodes, because %s", topicName, response));
+ ProcedureMessages.FAILED_TO_DROP_TOPIC_ON_CONFIG_NODES_BECAUSE, topicName, response));
}
}
@Override
protected void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropTopicProcedure: executeFromOperateOnDataNodes({})", topicName);
+ LOGGER.info(ProcedureMessages.DROPTOPICPROCEDURE_EXECUTEFROMOPERATEONDATANODES, topicName);
final List statuses = env.dropSingleTopicOnDataNode(topicName);
if (RpcUtils.squashResponseStatusList(statuses).getCode()
!= TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
// throw exception instead of logging warn, do not rely on metadata synchronization
throw new SubscriptionException(
- String.format("Failed to drop topic %s on data nodes, because %s", topicName, statuses));
+ String.format(
+ ProcedureMessages.FAILED_TO_DROP_TOPIC_ON_DATA_NODES_BECAUSE, topicName, statuses));
}
}
@Override
protected void rollbackFromValidate(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropTopicProcedure: rollbackFromValidate({})", topicName);
+ LOGGER.info(ProcedureMessages.DROPTOPICPROCEDURE_ROLLBACKFROMVALIDATE, topicName);
}
@Override
protected void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropTopicProcedure: rollbackFromCreateOnConfigNodes({})", topicName);
+ LOGGER.info(ProcedureMessages.DROPTOPICPROCEDURE_ROLLBACKFROMCREATEONCONFIGNODES, topicName);
}
@Override
protected void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("DropTopicProcedure: rollbackFromCreateOnDataNodes({})", topicName);
+ LOGGER.info(ProcedureMessages.DROPTOPICPROCEDURE_ROLLBACKFROMCREATEONDATANODES, topicName);
}
@Override
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/runtime/TopicMetaSyncProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/runtime/TopicMetaSyncProcedure.java
index 27919bbcb9ea3..b7432a2c588bf 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/runtime/TopicMetaSyncProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/runtime/TopicMetaSyncProcedure.java
@@ -23,6 +23,8 @@
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta;
import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.runtime.TopicHandleMetaChangePlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.subscription.SubscriptionInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.impl.subscription.AbstractOperateSubscriptionProcedure;
@@ -76,7 +78,8 @@ protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProced
// Skip by setting the subscriptionInfo to null
subscriptionInfo = null;
LOGGER.info(
- "TopicMetaSyncProcedure: acquireLock, skip the procedure due to the last execution time {}",
+ ProcedureMessages
+ .TOPICMETASYNCPROCEDURE_ACQUIRELOCK_SKIP_THE_PROCEDURE_DUE_TO_THE_LAST_EXECUTION,
LAST_EXECUTION_TIME.get());
return ProcedureLockState.LOCK_ACQUIRED;
}
@@ -91,7 +94,7 @@ protected SubscriptionOperation getOperation() {
@Override
public boolean executeFromValidate(ConfigNodeProcedureEnv env) {
- LOGGER.info("TopicMetaSyncProcedure: executeFromValidate");
+ LOGGER.info(ProcedureMessages.TOPICMETASYNCPROCEDURE_EXECUTEFROMVALIDATE);
LAST_EXECUTION_TIME.set(System.currentTimeMillis());
return true;
@@ -100,7 +103,7 @@ public boolean executeFromValidate(ConfigNodeProcedureEnv env) {
@Override
public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException {
- LOGGER.info("TopicMetaSyncProcedure: executeFromOperateOnConfigNodes");
+ LOGGER.info(ProcedureMessages.TOPICMETASYNCPROCEDURE_EXECUTEFROMOPERATEONCONFIGNODES);
final List topicMetaList = new ArrayList<>();
subscriptionInfo.get().getAllTopicMeta().forEach(topicMetaList::add);
@@ -112,7 +115,7 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
.getConsensusManager()
.write(new TopicHandleMetaChangePlan(topicMetaList));
} catch (ConsensusException e) {
- LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e);
+ LOGGER.warn(ConfigNodeMessages.FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS_LAYER_DUE, e);
response = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode());
response.setMessage(e.getMessage());
}
@@ -124,32 +127,32 @@ public void executeFromOperateOnConfigNodes(ConfigNodeProcedureEnv env)
@Override
public void executeFromOperateOnDataNodes(ConfigNodeProcedureEnv env)
throws SubscriptionException, IOException {
- LOGGER.info("TopicMetaSyncProcedure: executeFromOperateOnDataNodes");
+ LOGGER.info(ProcedureMessages.TOPICMETASYNCPROCEDURE_EXECUTEFROMOPERATEONDATANODES);
Map respMap = pushTopicMetaToDataNodes(env);
if (pushTopicMetaHasException(respMap)) {
throw new SubscriptionException(
- String.format("Failed to push topic meta to dataNodes, details: %s", respMap));
+ String.format(ProcedureMessages.FAILED_TO_PUSH_TOPIC_META_TO_DATANODES_DETAILS, respMap));
}
}
@Override
public void rollbackFromValidate(ConfigNodeProcedureEnv env) {
- LOGGER.info("TopicMetaSyncProcedure: rollbackFromValidate");
+ LOGGER.info(ProcedureMessages.TOPICMETASYNCPROCEDURE_ROLLBACKFROMVALIDATE);
// Do nothing
}
@Override
public void rollbackFromOperateOnConfigNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("TopicMetaSyncProcedure: rollbackFromOperateOnConfigNodes");
+ LOGGER.info(ProcedureMessages.TOPICMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONCONFIGNODES);
// Do nothing
}
@Override
public void rollbackFromOperateOnDataNodes(ConfigNodeProcedureEnv env) {
- LOGGER.info("TopicMetaSyncProcedure: rollbackFromOperateOnDataNodes");
+ LOGGER.info(ProcedureMessages.TOPICMETASYNCPROCEDURE_ROLLBACKFROMOPERATEONDATANODES);
// Do nothing
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedure.java
index 4b2fea1aac813..9011267525361 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/sync/AuthOperationProcedure.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan;
import org.apache.iotdb.confignode.consensus.request.write.auth.AuthorPlan;
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.node.AbstractNodeProcedure;
@@ -118,7 +119,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AuthOperationProcedu
}
}
if (dataNodesToInvalid.isEmpty()) {
- LOGGER.info("Auth procedure: clean datanode cache successfully");
+ LOGGER.info(ProcedureMessages.AUTH_PROCEDURE_CLEAN_DATANODE_CACHE_SUCCESSFULLY);
return Flow.NO_MORE_STATE;
} else {
setNextState(AuthOperationProcedureState.DATANODE_AUTHCACHE_INVALIDING);
@@ -127,14 +128,16 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, AuthOperationProcedu
}
} catch (Exception e) {
if (isRollbackSupported(state)) {
- LOGGER.error("Fail when execute {} ", plan);
+ LOGGER.error(ProcedureMessages.FAIL_WHEN_EXECUTE, plan);
setFailure(new ProcedureException(e));
} else {
- LOGGER.error("Retrievable error trying to execute plan {}, state: {}", plan, state, e);
+ LOGGER.error(
+ ProcedureMessages.RETRIEVABLE_ERROR_TRYING_TO_EXECUTE_PLAN_STATE, plan, state, e);
if (getCycles() > RETRY_THRESHOLD) {
setFailure(
new ProcedureException(
- String.format("Fail to execute plan [%s] at state[%s]", plan.toString(), state)));
+ String.format(
+ ProcedureMessages.FAIL_TO_EXECUTE_PLAN_AT_STATE, plan.toString(), state)));
}
}
}
@@ -159,9 +162,11 @@ private void writePlan(ConfigNodeProcedureEnv env) {
this.dataNodesToInvalid.add(new Pair<>(item, System.currentTimeMillis()));
}
LOGGER.info(
- "Execute auth plan {} success. To invalidate datanodes: {}", plan, dataNodesToInvalid);
+ ProcedureMessages.EXECUTE_AUTH_PLAN_SUCCESS_TO_INVALIDATE_DATANODES,
+ plan,
+ dataNodesToInvalid);
} else {
- LOGGER.info("Failed to execute plan {} because {}", plan, res.message);
+ LOGGER.info(ProcedureMessages.FAILED_TO_EXECUTE_PLAN_BECAUSE, plan, res.message);
setFailure(new ProcedureException(new IoTDBException(res)));
}
}
@@ -228,7 +233,7 @@ public void deserialize(ByteBuffer byteBuffer) {
this.user = plan.getUserName();
this.role = plan.getRoleName();
} catch (IOException e) {
- LOGGER.error("IO error when deserialize authplan.", e);
+ LOGGER.error(ProcedureMessages.IO_ERROR_WHEN_DESERIALIZE_AUTHPLAN, e);
}
if (byteBuffer.hasRemaining()) {
size = ReadWriteIOUtils.readInt(byteBuffer);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/AddNeverFinishSubProcedureProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/AddNeverFinishSubProcedureProcedure.java
index 1e82892af78e8..3676fcad674d2 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/AddNeverFinishSubProcedureProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/AddNeverFinishSubProcedureProcedure.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.confignode.procedure.impl.testonly;
import org.apache.iotdb.commons.utils.TestOnly;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -49,7 +50,9 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, Integer state)
}
if (state == 1) {
// test fail
- LOGGER.error("AddNeverFinishSubProcedureProcedure run again, which should never happen");
+ LOGGER.error(
+ ProcedureMessages
+ .ADDNEVERFINISHSUBPROCEDUREPROCEDURE_RUN_AGAIN_WHICH_SHOULD_NEVER_HAPPEN);
ProcedureTestUtils.createDatabase(env.getConfigManager(), FAIL_DATABASE_NAME);
}
return Flow.NO_MORE_STATE;
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/CreateManyDatabasesProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/CreateManyDatabasesProcedure.java
index 4693923bb301c..c109ef51acbf0 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/CreateManyDatabasesProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/testonly/CreateManyDatabasesProcedure.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.utils.TestOnly;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure;
@@ -77,10 +78,10 @@ private void createDatabase(ConfigNodeProcedureEnv env, int id) throws Procedure
if (!createFailedOnce) {
createFailedOnce = true;
} else {
- throw new ProcedureException("createDatabase fail twice");
+ throw new ProcedureException(ProcedureMessages.CREATEDATABASE_FAIL_TWICE);
}
} else if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != status.getCode()) {
- throw new ProcedureException("Unexpected fail, tsStatus is " + status);
+ throw new ProcedureException(ProcedureMessages.UNEXPECTED_FAIL_TSSTATUS_IS + status);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/trigger/CreateTriggerProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/trigger/CreateTriggerProcedure.java
index ee06f9a53cb9c..7190dcde353cc 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/trigger/CreateTriggerProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/trigger/CreateTriggerProcedure.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.confignode.consensus.request.write.trigger.AddTriggerInTablePlan;
import org.apache.iotdb.confignode.consensus.request.write.trigger.DeleteTriggerInTablePlan;
import org.apache.iotdb.confignode.consensus.request.write.trigger.UpdateTriggerStateInTablePlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.persistence.TriggerInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
@@ -123,7 +124,7 @@ protected Flow executeFromState(
} else {
throw new TriggerManagementException(
String.format(
- "Fail to create triggerInstance [%s] on Data Nodes",
+ ProcedureMessages.FAIL_TO_CREATE_TRIGGERINSTANCE_ON_DATA_NODES,
triggerInformation.getTriggerName()));
}
break;
@@ -140,7 +141,7 @@ protected Flow executeFromState(
} else {
throw new TriggerManagementException(
String.format(
- "Fail to active triggerInstance [%s] on Data Nodes",
+ ProcedureMessages.FAIL_TO_ACTIVE_TRIGGERINSTANCE_ON_DATA_NODES,
triggerInformation.getTriggerName()));
}
break;
@@ -165,7 +166,7 @@ protected Flow executeFromState(
return Flow.NO_MORE_STATE;
default:
- throw new IllegalArgumentException("Unknown CreateTriggerState: " + state);
+ throw new IllegalArgumentException(ProcedureMessages.UNKNOWN_CREATETRIGGERSTATE + state);
}
} catch (Exception e) {
if (isRollbackSupported(state)) {
@@ -181,8 +182,9 @@ protected Flow executeFromState(
setFailure(
new ProcedureException(
String.format(
- "Fail to create trigger [%s] at STATE [%s]",
- triggerInformation.getTriggerName(), state)));
+ ProcedureMessages.FAIL_TO_CREATE_TRIGGER_AT_STATE,
+ triggerInformation.getTriggerName(),
+ state)));
}
}
}
@@ -226,7 +228,7 @@ protected void rollbackState(ConfigNodeProcedureEnv env, CreateTriggerState stat
!= TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new TriggerManagementException(
String.format(
- "Fail to [CONFIG_NODE_INACTIVE] rollback of trigger [%s]",
+ ProcedureMessages.FAIL_TO_CONFIG_NODE_INACTIVE_ROLLBACK_OF_TRIGGER,
triggerInformation.getTriggerName()));
}
break;
@@ -242,7 +244,7 @@ protected void rollbackState(ConfigNodeProcedureEnv env, CreateTriggerState stat
!= TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new TriggerManagementException(
String.format(
- "Fail to [DATA_NODE_INACTIVE] rollback of trigger [%s]",
+ ProcedureMessages.FAIL_TO_DATA_NODE_INACTIVE_ROLLBACK_OF_TRIGGER,
triggerInformation.getTriggerName()));
}
break;
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/trigger/DropTriggerProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/trigger/DropTriggerProcedure.java
index 19bfcdc30d2ba..64050cf8d8aaa 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/trigger/DropTriggerProcedure.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/trigger/DropTriggerProcedure.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.confignode.consensus.request.write.pipe.payload.PipeEnrichedPlan;
import org.apache.iotdb.confignode.consensus.request.write.trigger.DeleteTriggerInTablePlan;
import org.apache.iotdb.confignode.consensus.request.write.trigger.UpdateTriggerStateInTablePlan;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.persistence.TriggerInfo;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
@@ -88,7 +89,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, DropTriggerState sta
setNextState(DropTriggerState.DATA_NODE_DROPPED);
} else {
throw new TriggerManagementException(
- String.format("Fail to drop trigger [%s] on Data Nodes", triggerName));
+ String.format(ProcedureMessages.FAIL_TO_DROP_TRIGGER_ON_DATA_NODES, triggerName));
}
break;
@@ -108,7 +109,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, DropTriggerState sta
return Flow.NO_MORE_STATE;
default:
- throw new IllegalArgumentException("Unknown DropTriggerState: " + state);
+ throw new IllegalArgumentException(ProcedureMessages.UNKNOWN_DROPTRIGGERSTATE + state);
}
} catch (Exception e) {
if (isRollbackSupported(state)) {
@@ -120,7 +121,8 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, DropTriggerState sta
if (getCycles() > RETRY_THRESHOLD) {
setFailure(
new ProcedureException(
- String.format("Fail to drop trigger [%s] at STATE [%s]", triggerName, state)));
+ String.format(
+ ProcedureMessages.FAIL_TO_DROP_TRIGGER_AT_STATE, triggerName, state)));
}
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ConfigProcedureStore.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ConfigProcedureStore.java
index 603a9a8a627d5..3712871658835 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ConfigProcedureStore.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ConfigProcedureStore.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.confignode.consensus.request.write.procedure.DeleteProcedurePlan;
import org.apache.iotdb.confignode.consensus.request.write.procedure.UpdateProcedurePlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.persistence.ProcedureInfo;
import org.apache.iotdb.confignode.procedure.Procedure;
@@ -173,7 +174,7 @@ public static void createOldProcWalDir() throws IOException {
} else {
throw new IOException(
String.format(
- "Start ConfigNode failed, because couldn't make system dirs: %s.",
+ ConfigNodeMessages.START_CONFIGNODE_FAILED_BECAUSE_COULDN_T_MAKE_SYSTEM_DIRS,
dir.getAbsolutePath()));
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java
index 140fffa852ccc..ee2a3b94696df 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.confignode.procedure.store;
import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException;
+import org.apache.iotdb.confignode.i18n.ProcedureMessages;
import org.apache.iotdb.confignode.procedure.Procedure;
import org.apache.iotdb.confignode.procedure.impl.cq.CreateCQProcedure;
import org.apache.iotdb.confignode.procedure.impl.node.AddConfigNodeProcedure;
@@ -106,8 +107,8 @@ public Procedure create(ByteBuffer buffer) throws IOException {
short typeCode = buffer.getShort();
ProcedureType procedureType = ProcedureType.convertToProcedureType(typeCode);
if (procedureType == null) {
- LOGGER.error("unrecognized log type " + typeCode);
- throw new IOException("unrecognized log type " + typeCode);
+ LOGGER.error(ProcedureMessages.UNRECOGNIZED_LOG_TYPE + typeCode);
+ throw new IOException(ProcedureMessages.UNRECOGNIZED_LOG_TYPE + typeCode);
}
Procedure procedure;
@@ -409,14 +410,16 @@ public Procedure create(ByteBuffer buffer) throws IOException {
procedure = new DataPartitionTableIntegrityCheckProcedure();
break;
default:
- LOGGER.error("Unknown Procedure type: {}", typeCode);
- throw new IOException("Unknown Procedure type: " + typeCode);
+ LOGGER.error(ProcedureMessages.UNKNOWN_PROCEDURE_TYPE_2, typeCode);
+ throw new IOException(ProcedureMessages.UNKNOWN_PROCEDURE_TYPE + typeCode);
}
try {
procedure.deserialize(buffer);
} catch (ThriftSerDeException e) {
LOGGER.warn(
- "Catch exception while deserializing procedure, this procedure will be ignored.", e);
+ ProcedureMessages
+ .CATCH_EXCEPTION_WHILE_DESERIALIZING_PROCEDURE_THIS_PROCEDURE_WILL_BE_IGNORED,
+ e);
procedure = null;
}
return procedure;
@@ -562,7 +565,7 @@ public static ProcedureType getProcedureType(final Procedure> procedure) {
return ProcedureType.DATA_PARTITION_TABLE_INTEGRITY_CHECK_PROCEDURE;
}
throw new UnsupportedOperationException(
- "Procedure type " + procedure.getClass() + " is not supported");
+ ProcedureMessages.PROCEDURE_TYPE + procedure.getClass() + " is not supported");
}
private static class ProcedureFactoryHolder {
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java
index 0d8e299ebc8f6..037f138286a18 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java
@@ -49,6 +49,7 @@
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
import org.apache.iotdb.confignode.conf.ConfigNodeStartupCheck;
import org.apache.iotdb.confignode.conf.SystemPropertiesUtils;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.consensus.ConsensusManager;
import org.apache.iotdb.confignode.manager.pipe.agent.PipeConfigNodeAgent;
@@ -118,11 +119,11 @@ public ConfigNode() {
public static void main(String[] args) throws Exception {
LOGGER.info(
- "{} environment variables: {}",
+ ConfigNodeMessages.ENVIRONMENT_VARIABLES,
ConfigNodeConstant.GLOBAL_NAME,
ConfigNodeConfig.getEnvironmentVariables());
LOGGER.info(
- "{} default charset is: {}",
+ ConfigNodeMessages.DEFAULT_CHARSET_IS,
ConfigNodeConstant.GLOBAL_NAME,
Charset.defaultCharset().displayName());
// let IoTDB handle the exception instead of ratis
@@ -138,26 +139,25 @@ public static void main(String[] args) throws Exception {
protected void start() throws IoTDBException {
try {
// Do ConfigNode startup checks
- LOGGER.info("Starting IoTDB {}", IoTDBConstant.VERSION_WITH_BUILD);
+ LOGGER.info(ConfigNodeMessages.STARTING_IOTDB, IoTDBConstant.VERSION_WITH_BUILD);
ConfigNodeStartupCheck checks = new ConfigNodeStartupCheck(IoTDBConstant.CN_ROLE);
checks.startUpCheck();
} catch (StartupException | ConfigurationException | IOException e) {
- LOGGER.error("Meet error when doing start checking", e);
- throw new IoTDBException("Error starting", -1);
+ LOGGER.error(ConfigNodeMessages.MEET_ERROR_WHEN_DOING_START_CHECKING, e);
+ throw new IoTDBException(ConfigNodeMessages.ERROR_STARTING, -1);
}
active();
- LOGGER.info("IoTDB started");
+ LOGGER.info(ConfigNodeMessages.IOTDB_STARTED);
}
@Override
protected void remove(Set nodeIds) throws IoTDBException {
throw new IoTDBException(
- "The remove-confignode script has been deprecated. Please connect to the CLI and use SQL: remove confignode [confignode_id].",
- -1);
+ ConfigNodeMessages.THE_REMOVE_CONFIGNODE_SCRIPT_HAS_BEEN_DEPRECATED_PLEASE_CONNECT_TO, -1);
}
public void active() {
- LOGGER.info("Activating {}...", ConfigNodeConstant.GLOBAL_NAME);
+ LOGGER.info(ConfigNodeMessages.ACTIVATING, ConfigNodeConstant.GLOBAL_NAME);
try {
processPid();
@@ -170,7 +170,7 @@ public void active() {
/* Restart */
if (SystemPropertiesUtils.isRestarted()) {
- LOGGER.info("{} is in restarting process...", ConfigNodeConstant.GLOBAL_NAME);
+ LOGGER.info(ConfigNodeMessages.IS_IN_RESTARTING_PROCESS, ConfigNodeConstant.GLOBAL_NAME);
int configNodeId = CONF.getConfigNodeId();
configManager.initConsensusManager();
@@ -182,7 +182,7 @@ public void active() {
setUpRPCService();
LOGGER.info(CONFIGURATION, CONF.getConfigMessage());
LOGGER.info(
- "{} has successfully restarted and joined the cluster: {}.",
+ ConfigNodeMessages.HAS_SUCCESSFULLY_RESTARTED_AND_JOINED_THE_CLUSTER,
ConfigNodeConstant.GLOBAL_NAME,
CONF.getClusterName());
@@ -212,7 +212,7 @@ public void active() {
/* Initial startup of Seed-ConfigNode */
if (ConfigNodeDescriptor.getInstance().isSeedConfigNode()) {
LOGGER.info(
- "The current {} is now starting as the Seed-ConfigNode.",
+ ConfigNodeMessages.THE_CURRENT_IS_NOW_STARTING_AS_THE_SEED_CONFIGNODE,
ConfigNodeConstant.GLOBAL_NAME);
/* Always set ConfigNodeId before initConsensusManager */
@@ -241,7 +241,7 @@ public void active() {
// The initial startup of Seed-ConfigNode finished
LOGGER.info(CONFIGURATION, CONF.getConfigMessage());
LOGGER.info(
- "{} has successfully started and joined the cluster: {}.",
+ ConfigNodeMessages.HAS_SUCCESSFULLY_STARTED_AND_JOINED_THE_CLUSTER,
ConfigNodeConstant.GLOBAL_NAME,
CONF.getClusterName());
return;
@@ -256,7 +256,7 @@ public void active() {
// we should wait for leader's scheduling
LOGGER.info(CONFIGURATION, CONF.getConfigMessage());
LOGGER.info(
- "{} {} has registered successfully. Waiting for the leader's scheduling to join the cluster: {}.",
+ ConfigNodeMessages.HAS_REGISTERED_SUCCESSFULLY_WAITING_FOR_THE_LEADER_S_SCHEDULING_TO,
ConfigNodeConstant.GLOBAL_NAME,
CONF.getConfigNodeId(),
CONF.getClusterName());
@@ -277,11 +277,11 @@ public void active() {
if (!isJoinedCluster) {
LOGGER.error(
- "The current ConfigNode can't joined the cluster because leader's scheduling failed. The possible cause is that the ip:port configuration is incorrect.");
+ ConfigNodeMessages.THE_CURRENT_CONFIGNODE_CAN_T_JOINED_THE_CLUSTER_BECAUSE_LEADER);
stop();
}
} catch (Throwable e) {
- LOGGER.error("Meet error while starting up.", e);
+ LOGGER.error(ConfigNodeMessages.MEET_ERROR_WHILE_STARTING_UP, e);
exitStatusCode = StatusUtils.retrieveExitStatusCode(e);
stop();
}
@@ -306,7 +306,7 @@ private void setUpInternalServices() throws StartupException {
// Init Pipe Runtime Agent
registerManager.register(PipeConfigNodeAgent.runtime());
- LOGGER.info("Successfully setup internal services.");
+ LOGGER.info(ConfigNodeMessages.SUCCESSFULLY_SETUP_INTERNAL_SERVICES);
}
private void setUpMetricService() throws StartupException {
@@ -354,10 +354,10 @@ void initConfigManager() {
try {
setConfigManager();
} catch (Exception e) {
- LOGGER.error("Can't start ConfigNode consensus group!", e);
+ LOGGER.error(ConfigNodeMessages.CAN_T_START_CONFIGNODE_CONSENSUS_GROUP, e);
stop();
}
- LOGGER.info("Successfully initialize ConfigManager.");
+ LOGGER.info(ConfigNodeMessages.SUCCESSFULLY_INITIALIZE_CONFIGMANAGER);
}
protected void setConfigManager() throws Exception {
@@ -380,8 +380,8 @@ private void sendRegisterConfigNodeRequest() throws StartupException, IOExceptio
TEndPoint seedConfigNode = CONF.getSeedConfigNode();
if (seedConfigNode == null) {
- LOGGER.error("Please set the cn_seed_config_node parameter in iotdb-system.properties file.");
- throw new StartupException("The seedConfigNode setting in conf is empty");
+ LOGGER.error(ConfigNodeMessages.PLEASE_SET_THE_CN_SEED_CONFIG_NODE_PARAMETER_IN_IOTDB);
+ throw new StartupException(ConfigNodeMessages.THE_SEEDCONFIGNODE_SETTING_IN_CONF_IS_EMPTY);
}
for (int retry = 0; retry < STARTUP_RETRY_NUM; retry++) {
@@ -401,8 +401,8 @@ private void sendRegisterConfigNodeRequest() throws StartupException, IOExceptio
if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
if (resp == null) {
- LOGGER.error("The result of register ConfigNode is empty!");
- throw new StartupException("The result of register ConfigNode is empty!");
+ LOGGER.error(ConfigNodeMessages.THE_RESULT_OF_REGISTER_CONFIGNODE_IS_EMPTY);
+ throw new StartupException(ConfigNodeMessages.THE_RESULT_OF_REGISTER_CONFIGNODE_IS_EMPTY);
}
/* Always set ConfigNodeId before initConsensusManager */
CONF.setConfigNodeId(resp.getConfigNodeId());
@@ -410,17 +410,17 @@ private void sendRegisterConfigNodeRequest() throws StartupException, IOExceptio
return;
} else if (status.getCode() == TSStatusCode.REDIRECTION_RECOMMEND.getStatusCode()) {
seedConfigNode = status.getRedirectNode();
- LOGGER.info("ConfigNode need redirect to {}, retry {} ...", seedConfigNode, retry);
+ LOGGER.info(ConfigNodeMessages.CONFIGNODE_NEED_REDIRECT_TO_RETRY, seedConfigNode, retry);
} else if (status.getCode() == TSStatusCode.INTERNAL_REQUEST_RETRY_ERROR.getStatusCode()) {
- LOGGER.warn("The result of register self ConfigNode is {}, retry {} ...", status, retry);
+ LOGGER.warn(
+ ConfigNodeMessages.THE_RESULT_OF_REGISTER_SELF_CONFIGNODE_IS_RETRY, status, retry);
} else {
throw new StartupException(status.getMessage());
}
startUpSleep("Register ConfigNode failed!");
}
- LOGGER.error(
- "The current ConfigNode can't send register request to the ConfigNode-leader after all retries!");
+ LOGGER.error(ConfigNodeMessages.THE_CURRENT_CONFIGNODE_CAN_T_SEND_REGISTER_REQUEST_TO_THE);
stop();
}
@@ -471,12 +471,12 @@ protected ConfigNodeRPCServiceProcessor getConfigNodeRPCServiceProcessor() {
private TConfigNodeLocation waitForLeaderElected() {
while (!configManager.getConsensusManager().isLeaderExist()) {
- LOGGER.info("Leader has not been elected yet, wait for 1 second");
+ LOGGER.info(ConfigNodeMessages.LEADER_HAS_NOT_BEEN_ELECTED_YET_WAIT_FOR_1_SECOND);
try {
TimeUnit.SECONDS.sleep(1);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOGGER.warn("Unexpected interruption during waiting for leader election.");
+ LOGGER.warn(ConfigNodeMessages.UNEXPECTED_INTERRUPTION_DURING_WAITING_FOR_LEADER_ELECTION);
}
}
return configManager.getConsensusManager().getLeaderLocation();
@@ -488,20 +488,20 @@ private TConfigNodeLocation waitForLeaderElected() {
* @throws IOException if close {@link ConfigNode} failed.
*/
public void deactivate() throws IOException {
- LOGGER.info("Deactivating {}...", ConfigNodeConstant.GLOBAL_NAME);
+ LOGGER.info(ConfigNodeMessages.DEACTIVATING, ConfigNodeConstant.GLOBAL_NAME);
registerManager.deregisterAll();
JMXService.deregisterMBean(mbeanName);
if (configManager != null) {
configManager.close();
}
- LOGGER.info("{} is deactivated.", ConfigNodeConstant.GLOBAL_NAME);
+ LOGGER.info(ConfigNodeMessages.IS_DEACTIVATED, ConfigNodeConstant.GLOBAL_NAME);
}
public void stop() {
try {
deactivate();
} catch (IOException e) {
- LOGGER.error("Meet error when deactivate ConfigNode", e);
+ LOGGER.error(ConfigNodeMessages.MEET_ERROR_WHEN_DEACTIVATE_CONFIGNODE, e);
}
System.exit(exitStatusCode);
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeShutdownHook.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeShutdownHook.java
index 5c3ec5af06322..9860ee6d497ce 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeShutdownHook.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeShutdownHook.java
@@ -29,6 +29,7 @@
import org.apache.iotdb.confignode.conf.ConfigNodeConfig;
import org.apache.iotdb.confignode.conf.ConfigNodeConstant;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.db.utils.MemUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -46,14 +47,14 @@ public class ConfigNodeShutdownHook extends Thread {
@Override
public void run() {
- LOGGER.info("ConfigNode exiting...");
+ LOGGER.info(ConfigNodeMessages.CONFIGNODE_EXITING);
boolean isLeader = getConfigNodeInstance().getConfigManager().getConsensusManager().isLeader();
try {
ConfigNode.getInstance().deactivate();
} catch (IOException e) {
- LOGGER.error("Meet error when deactivate ConfigNode", e);
+ LOGGER.error(ConfigNodeMessages.MEET_ERROR_WHEN_DEACTIVATE_CONFIGNODE, e);
}
if (!isLeader) {
@@ -84,7 +85,8 @@ public void run() {
}
if (!isReportSuccess) {
LOGGER.error(
- "Reporting ConfigNode shutdown failed. The cluster will still take the current ConfigNode as Running for a few seconds.");
+ ConfigNodeMessages
+ .REPORTING_CONFIGNODE_SHUTDOWN_FAILED_THE_CLUSTER_WILL_STILL_TAKE_THE);
}
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java
index 4dac0ea8e34ec..cbedf3c3f1a79 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java
@@ -83,6 +83,7 @@
import org.apache.iotdb.confignode.consensus.response.datanode.DataNodeToStatusResp;
import org.apache.iotdb.confignode.consensus.response.partition.RegionInfoListResp;
import org.apache.iotdb.confignode.consensus.response.ttl.ShowTTLResp;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.consensus.ConsensusManager;
import org.apache.iotdb.confignode.manager.schema.ClusterSchemaManager;
@@ -290,7 +291,7 @@ public TSystemConfigurationResp getSystemConfiguration() {
.convertToRpcSystemConfigurationResp();
// Print log to record the ConfigNode that performs the GetConfigurationRequest
- LOGGER.info("Execute GetSystemConfiguration with result {}", resp);
+ LOGGER.info(ConfigNodeMessages.EXECUTE_GETSYSTEMCONFIGURATION_WITH_RESULT, resp);
return resp;
}
@@ -299,12 +300,12 @@ public TGetClusterIdResp getClusterId() {
TGetClusterIdResp resp = new TGetClusterIdResp();
String clusterId = configManager.getClusterManager().getClusterId();
if (clusterId == null) {
- LOGGER.error("clusterId not generated yet, should never happen.");
+ LOGGER.error(ConfigNodeMessages.CLUSTERID_NOT_GENERATED_YET_SHOULD_NEVER_HAPPEN);
return resp.setClusterId("")
.setStatus(new TSStatus(TSStatusCode.GET_CLUSTER_ID_ERROR.getStatusCode()));
}
resp.setClusterId(clusterId).setStatus(RpcUtils.SUCCESS_STATUS);
- LOGGER.info("Execute getClusterId with result {}", resp);
+ LOGGER.info(ConfigNodeMessages.EXECUTE_GETCLUSTERID_WITH_RESULT, resp);
return resp;
}
@@ -315,7 +316,7 @@ public TDataNodeRegisterResp registerDataNode(TDataNodeRegisterReq req) {
.convertToRpcDataNodeRegisterResp();
// Print log to record the ConfigNode that performs the RegisterDatanodeRequest
- LOGGER.info("Execute RegisterDataNodeRequest {} with result {}", req, resp);
+ LOGGER.info(ConfigNodeMessages.EXECUTE_REGISTERDATANODEREQUEST_WITH_RESULT, req, resp);
return resp;
}
@@ -325,7 +326,7 @@ public TDataNodeRestartResp restartDataNode(TDataNodeRestartReq req) {
TDataNodeRestartResp resp = configManager.restartDataNode(req);
// Print log to record the ConfigNode that performs the RestartDatanodeRequest
- LOGGER.info("Execute RestartDataNodeRequest {} with result {}", req, resp);
+ LOGGER.info(ConfigNodeMessages.EXECUTE_RESTARTDATANODEREQUEST_WITH_RESULT, req, resp);
return resp;
}
@@ -334,22 +335,22 @@ public TDataNodeRestartResp restartDataNode(TDataNodeRestartReq req) {
public TAINodeRegisterResp registerAINode(TAINodeRegisterReq req) {
TAINodeRegisterResp resp =
((AINodeRegisterResp) configManager.registerAINode(req)).convertToAINodeRegisterResp();
- LOGGER.info("Execute RegisterAINodeRequest {} with result {}", req, resp);
+ LOGGER.info(ConfigNodeMessages.EXECUTE_REGISTERAINODEREQUEST_WITH_RESULT, req, resp);
return resp;
}
@Override
public TAINodeRestartResp restartAINode(TAINodeRestartReq req) {
TAINodeRestartResp resp = configManager.restartAINode(req);
- LOGGER.info("Execute RestartAINodeRequest {} with result {}", req, resp);
+ LOGGER.info(ConfigNodeMessages.EXECUTE_RESTARTAINODEREQUEST_WITH_RESULT, req, resp);
return resp;
}
@Override
public TSStatus removeAINode(TAINodeRemoveReq req) {
- LOGGER.info("ConfigNode RPC Service start to remove AINode");
+ LOGGER.info(ConfigNodeMessages.CONFIGNODE_RPC_SERVICE_START_TO_REMOVE_AINODE);
TSStatus status = configManager.removeAINode();
- LOGGER.info("ConfigNode RPC Service finished to remove AINode, result: {}", status);
+ LOGGER.info(ConfigNodeMessages.CONFIGNODE_RPC_SERVICE_FINISHED_TO_REMOVE_AINODE_RESULT, status);
return status;
}
@@ -371,13 +372,15 @@ public TAINodeConfigurationResp getAINodeConfiguration(int aiNodeId) throws TExc
@Override
public TDataNodeRemoveResp removeDataNode(TDataNodeRemoveReq req) {
- LOGGER.info("ConfigNode RPC Service start to remove DataNode, req: {}", req);
+ LOGGER.info(ConfigNodeMessages.CONFIGNODE_RPC_SERVICE_START_TO_REMOVE_DATANODE_REQ, req);
RemoveDataNodePlan removeDataNodePlan = new RemoveDataNodePlan(req.getDataNodeLocations());
DataNodeToStatusResp removeResp =
(DataNodeToStatusResp) configManager.removeDataNode(removeDataNodePlan);
TDataNodeRemoveResp resp = removeResp.convertToRpCDataNodeRemoveResp();
LOGGER.info(
- "ConfigNode RPC Service finished to remove DataNode, req: {}, result: {}", req, resp);
+ ConfigNodeMessages.CONFIGNODE_RPC_SERVICE_FINISHED_TO_REMOVE_DATANODE_REQ_RESULT,
+ req,
+ resp);
return resp;
}
@@ -420,7 +423,7 @@ public TSStatus setDatabase(final TDatabaseSchema databaseSchema) {
final TSStatus resp = configManager.setDatabase(setPlan);
// Print log to record the ConfigNode that performs the set SetDatabaseRequest
- LOGGER.info("Execute SetDatabase: {} with result: {}", databaseSchema, resp);
+ LOGGER.info(ConfigNodeMessages.EXECUTE_SETDATABASE_WITH_RESULT, databaseSchema, resp);
return resp;
}
@@ -463,7 +466,7 @@ public TSStatus alterDatabase(final TDatabaseSchema databaseSchema) {
}
if (errorResp != null) {
- LOGGER.warn("Execute AlterDatabase: {} with result: {}", databaseSchema, errorResp);
+ LOGGER.warn(ConfigNodeMessages.EXECUTE_ALTERDATABASE_WITH_RESULT, databaseSchema, errorResp);
return errorResp;
}
@@ -472,7 +475,7 @@ public TSStatus alterDatabase(final TDatabaseSchema databaseSchema) {
final TSStatus resp = configManager.alterDatabase(alterPlan);
// Print log to record the ConfigNode that performs the set SetDatabaseRequest
- LOGGER.info("Execute AlterDatabase: {} with result: {}", databaseSchema, resp);
+ LOGGER.info(ConfigNodeMessages.EXECUTE_ALTERDATABASE_WITH_RESULT, databaseSchema, resp);
return resp;
}
@@ -704,7 +707,7 @@ public TAuthorizerResp queryPermission(final TAuthorizerReq req) {
@Override
public TSStatus operateRPermission(final TAuthorizerRelationalReq req) {
if (req.getAuthorType() < 0 || req.getAuthorType() >= AuthorRType.values().length) {
- throw new IndexOutOfBoundsException("Invalid Author Type ordinal");
+ throw new IndexOutOfBoundsException(ConfigNodeMessages.INVALID_AUTHOR_TYPE_ORDINAL);
}
ConfigPhysicalPlanType configPhysicalPlanType =
AuthorInfo.getConfigPhysicalPlanTypeFromAuthorRType(req.getAuthorType());
@@ -725,7 +728,7 @@ public TSStatus operateRPermission(final TAuthorizerRelationalReq req) {
@Override
public TAuthorizerResp queryRPermission(final TAuthorizerRelationalReq req) {
if (req.getAuthorType() < 0 || req.getAuthorType() >= AuthorRType.values().length) {
- throw new IndexOutOfBoundsException("Invalid Author Type ordinal");
+ throw new IndexOutOfBoundsException(ConfigNodeMessages.INVALID_AUTHOR_TYPE_ORDINAL);
}
final PermissionInfoResp dataSet =
(PermissionInfoResp)
@@ -812,7 +815,7 @@ public TConfigNodeRegisterResp registerConfigNode(TConfigNodeRegisterReq req) {
TConfigNodeRegisterResp resp = configManager.registerConfigNode(req);
// Print log to record the ConfigNode that performs the RegisterConfigNodeRequest
- LOGGER.info("Execute RegisterConfigNodeRequest {} with result {}", req, resp);
+ LOGGER.info(ConfigNodeMessages.EXECUTE_REGISTERCONFIGNODEREQUEST_WITH_RESULT, req, resp);
return resp;
}
@@ -827,13 +830,13 @@ public TSStatus notifyRegisterSuccess() {
try {
SystemPropertiesUtils.storeSystemParameters();
} catch (IOException e) {
- LOGGER.error("Write confignode-system.properties failed", e);
+ LOGGER.error(ConfigNodeMessages.WRITE_CONFIGNODE_SYSTEM_PROPERTIES_FAILED, e);
return new TSStatus(TSStatusCode.WRITE_PROCESS_ERROR.getStatusCode());
}
// The initial startup of Non-Seed-ConfigNode finished
LOGGER.info(
- "{} has successfully started and joined the cluster: {}.",
+ ConfigNodeMessages.HAS_SUCCESSFULLY_STARTED_AND_JOINED_THE_CLUSTER,
ConfigNodeConstant.GLOBAL_NAME,
configNodeConfig.getClusterName());
return StatusUtils.OK;
@@ -846,7 +849,7 @@ public TSStatus removeConfigNode(TConfigNodeLocation configNodeLocation) throws
TSStatus status = configManager.removeConfigNode(removeConfigNodePlan);
// Print log to record the ConfigNode that performs the RemoveConfigNodeRequest
LOGGER.info(
- "The result of submitting RemoveConfigNode job is {}. RemoveConfigNodeRequest: {}",
+ ConfigNodeMessages.THE_RESULT_OF_SUBMITTING_REMOVECONFIGNODE_JOB_IS_REMOVECONFIGNODEREQUEST,
status,
configNodeLocation);
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/writelog/io/SingleFileLogReader.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/writelog/io/SingleFileLogReader.java
index f67cfdc286a70..46b255938b63c 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/writelog/io/SingleFileLogReader.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/writelog/io/SingleFileLogReader.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.confignode.writelog.io;
import org.apache.iotdb.confignode.consensus.request.ConfigPhysicalPlan;
+import org.apache.iotdb.confignode.i18n.ConfigNodeMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -77,7 +78,7 @@ public boolean hasNext() {
int readLen = logStream.read(buffer, 0, logSize);
if (readLen < logSize) {
- throw new IOException("Reach eof");
+ throw new IOException(ConfigNodeMessages.REACH_EOF);
}
final long checkSum = logStream.readLong();
@@ -86,10 +87,12 @@ public boolean hasNext() {
if (checkSummer.getValue() != checkSum) {
throw new IOException(
String.format(
- "The check sum of the No.%d log batch is incorrect! In "
+ ConfigNodeMessages.THE_CHECK_SUM_OF_THE_NO_LOG_BATCH_IS_INCORRECT
+ "file: "
+ "%d Calculated: %d.",
- idx, checkSum, checkSummer.getValue()));
+ idx,
+ checkSum,
+ checkSummer.getValue()));
}
batchLogReader = new BatchLogReader(ByteBuffer.wrap(buffer));
diff --git a/iotdb-core/consensus/pom.xml b/iotdb-core/consensus/pom.xml
index ee7bdeb6c9882..1939acbe42f4e 100644
--- a/iotdb-core/consensus/pom.xml
+++ b/iotdb-core/consensus/pom.xml
@@ -154,6 +154,10 @@
random
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
diff --git a/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/ConsensusMessages.java b/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/ConsensusMessages.java
new file mode 100644
index 0000000000000..9c7eec311757a
--- /dev/null
+++ b/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/ConsensusMessages.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.consensus.i18n;
+
+/**
+ * Shared / common consensus messages used across all consensus implementations. Log messages use
+ * SLF4J {@code {}} placeholders; exception messages use {@code %s} (String.format) or plain
+ * strings.
+ */
+public final class ConsensusMessages {
+
+ private ConsensusMessages() {}
+
+ // ===================== ConsensusFactory =====================
+
+ public static final String CONSTRUCT_FAILED_MSG =
+ "Construct consensusImpl failed, Please check your consensus className %s";
+ public static final String COULD_NOT_CONSTRUCT_ICONSENSUS =
+ "Couldn't Construct IConsensus class: {}";
+ public static final String UTILITY_CLASS_CONSENSUS_FACTORY = "Utility class ConsensusFactory";
+
+ // ===================== Exception messages (String.format %s) =====================
+
+ public static final String CONSENSUS_GROUP_NOT_EXIST =
+ "The consensus group %s doesn't exist";
+ public static final String CONSENSUS_GROUP_ALREADY_EXIST =
+ "The consensus group %d already exists";
+ public static final String ILLEGAL_PEER_NUM =
+ "Illegal peer num %d when adding consensus group";
+ public static final String ILLEGAL_PEER_ENDPOINT =
+ "Illegal addConsensusGroup because currentNode %s is not in consensusGroup %s";
+ public static final String PEER_ALREADY_IN_GROUP =
+ "Peer %s:%d is already in group %d";
+ public static final String PEER_NOT_IN_GROUP =
+ "Peer %s is not in group %d";
+
+ // ===================== Common log messages (SLF4J {}) =====================
+
+ public static final String UNABLE_TO_CREATE_CONSENSUS_DIR =
+ "Unable to create consensus dir at {}";
+ public static final String UNABLE_TO_CREATE_CONSENSUS_DIR_FMT =
+ "Unable to create consensus dir at %s";
+ public static final String UNABLE_TO_CREATE_CONSENSUS_DIR_FOR_GROUP =
+ "Unable to create consensus dir for group {} at {}";
+ public static final String UNABLE_TO_CREATE_CONSENSUS_DIR_FOR_GROUP_FMT =
+ "Unable to create consensus dir for group %s";
+ public static final String CANNOT_CREATE_LOCAL_PEER =
+ "Cannot create local peer for group {} with peers {}";
+ public static final String FAILED_TO_RESET_PEER_LIST_WHILE_START =
+ "Failed to reset peer list while start";
+ public static final String RECORD_CORRECT_PEER_LIST =
+ "Record correct peer list: {}";
+ public static final String INTERRUPTED_WHEN_SHUTTING_DOWN_EXECUTOR =
+ "{}: interrupted when shutting down add Executor with exception {}";
+ public static final String INTERRUPTED_WHEN_SHUTTING_DOWN_EXECUTOR_RATIS =
+ "{}: interrupted when shutting down add Executor with exception ";
+ public static final String SET_ACTIVE_STATUS = "set {} active status to {}";
+
+ // ===================== Peer reset log messages (SLF4J {}) =====================
+
+ public static final String RESET_PEER_LIST_NOT_IN_CORRECT =
+ "[RESET PEER LIST] {} Local peer is not in the correct configuration, delete it.";
+ public static final String RESET_PEER_LIST_DELETE_LOCAL_PEER =
+ "[RESET PEER LIST] Local peer is not in the correct peer list, delete local peer {}";
+ public static final String RESET_PEER_LIST_REMOVE_SYNC_CHANNEL =
+ "[RESET PEER LIST] {} Remove sync channel with: {}";
+ public static final String RESET_PEER_LIST_FAILED_TO_REMOVE_SYNC_CHANNEL =
+ "[RESET PEER LIST] {} Failed to remove sync channel with: {}";
+ public static final String RESET_PEER_LIST_BUILD_SYNC_CHANNEL =
+ "[RESET PEER LIST] {} Build sync channel with: {}";
+ public static final String RESET_PEER_LIST_FAILED_TO_BUILD_SYNC_CHANNEL =
+ "[RESET PEER LIST] {} Failed to build sync channel with: {}";
+ public static final String RESET_PEER_LIST_RESET_RESULT =
+ "[RESET PEER LIST] {} Local peer list has been reset: {} -> {}";
+ public static final String RESET_PEER_LIST_NOTHING_TO_RESET =
+ "[RESET PEER LIST] {} The current peer list is correct, nothing need to be reset: {}";
+ public static final String RESET_PEER_LIST_WILL_RESET =
+ "[RESET PEER LIST] Peer list will be reset from {} to {}";
+ public static final String RESET_PEER_LIST_RESET_SUCCESS =
+ "[RESET PEER LIST] Peer list has been reset to {}";
+ public static final String RESET_PEER_LIST_RESET_FAILED =
+ "[RESET PEER LIST] Peer list failed to reset to {}, reply is {}";
+
+ // ===================== SimpleConsensus messages =====================
+
+ public static final String SIMPLE_CONSENSUS_NOT_SUPPORT_MEMBERSHIP_CHANGES =
+ "SimpleConsensus does not support membership changes";
+ public static final String SIMPLE_CONSENSUS_NOT_SUPPORT_LEADER_TRANSFER =
+ "SimpleConsensus does not support leader transfer";
+ public static final String SIMPLE_CONSENSUS_NOT_SUPPORT_SNAPSHOT_TRIGGER =
+ "SimpleConsensus does not support snapshot trigger currently";
+ public static final String SIMPLE_CONSENSUS_NOT_SUPPORT_RESET_PEER_LIST =
+ "SimpleConsensus does not support reset peer list";
+ public static final String SIMPLE_CONSENSUS_NOOP_RECORD_PEER_LIST =
+ "SimpleConsensus will do nothing when calling recordCorrectPeerListBeforeStarting";
+
+ // ===================== RPC processor common messages =====================
+
+ public static final String UNEXPECTED_CONSENSUS_GROUP_ID_FOR_REQUEST =
+ "unexpected consensusGroupId %s for %s request";
+ public static final String UNEXPECTED_CONSENSUS_GROUP_ID_FOR_SYNC_LOG =
+ "unexpected consensusGroupId %s for TSyncLogEntriesReq which size is %s";
+ public static final String SYNC_LOG_SYSTEM_READ_ONLY =
+ "fail to sync logEntries because system is read-only.";
+ public static final String PEER_INACTIVE_NOT_READY =
+ "Peer is inactive and not ready to receive sync log request, %s, DataNode Id: %s";
+ public static final String PEER_INACTIVE_NOT_READY_WRITE =
+ "Peer is inactive and not ready to write request, %s, DataNode Id: %s";
+ public static final String REMOVE_SYNC_LOG_CHANNEL_FAILED =
+ "remove sync log channel failed";
+ public static final String FAILED_TO_CLEANUP_TRANSFERRED_SNAPSHOT =
+ "failed to cleanup transferred snapshot {}";
+
+ // ===================== Wait release resource messages =====================
+
+ public static final String WAIT_RELEASE_HAS_RELEASED =
+ "[WAIT RELEASE] {} has released all region related resource";
+ public static final String WAIT_RELEASE_STILL_RELEASING =
+ "[WAIT RELEASE] {} is still releasing all region related resource";
+ public static final String ERROR_WAITING_RELEASE_RESOURCE =
+ "error when waiting %s to release all region related resource. %s";
+ public static final String THREAD_INTERRUPTED_WAITING_RELEASE_RESOURCE =
+ "thread interrupted when waiting %s to release all region related resource. %s";
+
+ // ===================== Duplicate peer warning =====================
+
+ public static final String DUPLICATE_PEERS_IGNORED =
+ "Duplicate peers in the input list, ignore the duplicates.";
+
+ // ===================== Consensus pipe name =====================
+
+ public static final String INVALID_PIPE_NAME = "Invalid pipe name: ";
+
+ // ===================== Not active write reject =====================
+
+ public static final String NODE_NOT_ACTIVE_REJECT_WRITE =
+ "current node is not active and is not ready to receive user write.";
+
+ // ===================== Utility messages =====================
+
+ public static final String FAILED_TO_SERIALIZE_PEER = "Failed to serialize Peer";
+ public static final String VISIT_FILE_FAILED = "visit file {} failed due to {}";
+ public static final String IO_EXCEPTION_LISTING_SNAPSHOT_DIR =
+ "IOException occurred during listing snapshot directory: ";
+ public static final String FAILED_TO_LOAD_KEYSTORE =
+ "Failed or truststore to load keystore file";
+ public static final String KEYSTORE_FILE_NOT_FOUND = "keystore or truststore file not found";
+ public static final String FAILED_TO_READ_KEYSTORE =
+ "Failed to read key store or trust store.";
+ public static final String NOT_IMPLEMENTED_YET = "not implemented yet";
+}
diff --git a/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/IoTConsensusMessages.java b/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/IoTConsensusMessages.java
new file mode 100644
index 0000000000000..5817bbf637874
--- /dev/null
+++ b/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/IoTConsensusMessages.java
@@ -0,0 +1,301 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.consensus.i18n;
+
+/**
+ * IoTConsensus (v1) specific messages. Log messages use SLF4J {@code {}} placeholders; exception
+ * messages use {@code %s} (String.format) or plain strings.
+ */
+public final class IoTConsensusMessages {
+
+ private IoTConsensusMessages() {}
+
+ // ===================== IoTConsensus lifecycle =====================
+
+ public static final String INACTIVATE_NEW_PEER =
+ "[IoTConsensus] inactivate new peer: {}";
+ public static final String NOTIFY_PEERS_BUILD_SYNC_LOG =
+ "[IoTConsensus] notify current peers to build sync log...";
+ public static final String START_TAKE_SNAPSHOT =
+ "[IoTConsensus] start to take snapshot...";
+ public static final String START_TRANSMIT_SNAPSHOT =
+ "[IoTConsensus] start to transmit snapshot...";
+ public static final String TRIGGER_LOAD_SNAPSHOT =
+ "[IoTConsensus] trigger new peer to load snapshot...";
+ public static final String ACTIVATE_NEW_PEER =
+ "[IoTConsensus] activate new peer...";
+ public static final String CLEANUP_REMOTE_SNAPSHOT =
+ "[IoTConsensus] clean up remote snapshot...";
+ public static final String FAILED_CLEANUP_REMOTE_SNAPSHOT =
+ "[IoTConsensus] failed to cleanup remote snapshot";
+ public static final String ADD_REMOTE_PEER_FAILED_CLEANUP =
+ "[IoTConsensus] add remote peer failed, automatic cleanup side effects...";
+ public static final String CLEANUP_LOCAL_SNAPSHOT =
+ "[IoTConsensus] clean up local snapshot...";
+ public static final String NOT_SUPPORT_LEADER_TRANSFER =
+ "IoTConsensus does not support leader transfer";
+
+ // ===================== IoTConsensusServerImpl =====================
+
+ public static final String THROTTLE_DOWN = "[Throttle Down] index:{}, safeIndex:{}";
+ public static final String DATA_REGION_INDEX_AFTER_BUILD =
+ "DataRegion[{}]: index after build: safeIndex:{}, searchIndex: {}, lastConsensusRequest: {}";
+ public static final String WRITE_OPERATION_FAILED =
+ "{}: write operation failed. searchIndex: {}. Code: {}";
+ public static final String FAILED_TO_THROTTLE_DOWN = "Failed to throttle down because ";
+
+ // ===================== Snapshot =====================
+
+ public static final String CANNOT_MKDIR_FOR_SNAPSHOT =
+ "%s: cannot mkdir for snapshot";
+ public static final String UNKNOWN_ERROR_TAKING_SNAPSHOT =
+ "unknown error when taking snapshot";
+ public static final String ERROR_TAKING_SNAPSHOT =
+ "error when taking snapshot";
+ public static final String CANNOT_FIND_SNAPSHOT_DIR =
+ "Can not find any snapshot dir after build a new snapshot for group {}";
+ public static final String DELETE_OLD_SNAPSHOT_FAILED =
+ "Delete old snapshot dir {} failed";
+ public static final String FILE_NOT_EXIST = "File not exist: {}";
+ public static final String CLEANUP_LOCAL_SNAPSHOT_FAIL =
+ "Cleanup local snapshot fail. You may manually delete {}.";
+ public static final String INVALID_SNAPSHOT_FILE =
+ "invalid snapshot file. snapshotId: %s, filePath: %s";
+ public static final String ERROR_RECEIVING_SNAPSHOT =
+ "error when receiving snapshot %s";
+ public static final String INVALID_SNAPSHOT_RELATIVE_PATH =
+ "Invalid snapshotRelativePath: ";
+
+ // ===================== Snapshot transmission =====================
+
+ public static final String SNAPSHOT_TRANSMISSION_START =
+ "[SNAPSHOT TRANSMISSION] Start to transmit snapshots ({} files, total size {}) from dir {}";
+ public static final String SNAPSHOT_TRANSMISSION_ALL_FILES =
+ "[SNAPSHOT TRANSMISSION] All the files below shell be transmitted: {}";
+ public static final String SNAPSHOT_TRANSMISSION_ERROR =
+ "[SNAPSHOT TRANSMISSION] Error when transmitting snapshot fragment to %s";
+ public static final String SNAPSHOT_TRANSMISSION_PROGRESS =
+ "[SNAPSHOT TRANSMISSION] The overall progress for dir {}: files {}/{} done, size {}/{} done, time {} passed. File {} done.";
+ public static final String SNAPSHOT_TRANSMISSION_SEND_ERROR =
+ "[SNAPSHOT TRANSMISSION] Error when send snapshot file to %s";
+ public static final String SNAPSHOT_TRANSMISSION_COMPLETE =
+ "[SNAPSHOT TRANSMISSION] After {}, successfully transmit all snapshots from dir {}";
+
+ // ===================== Peer operations =====================
+
+ public static final String ERROR_INACTIVATING_PEER =
+ "error when inactivating %s. %s";
+ public static final String ERROR_INACTIVATING_PEER_SHORT =
+ "error when inactivating %s";
+ public static final String ERROR_TRIGGERING_SNAPSHOT_LOAD =
+ "error when triggering snapshot load %s. %s";
+ public static final String ERROR_ACTIVATING_PEER =
+ "error when activating %s. %s";
+ public static final String ERROR_ACTIVATING_PEER_SHORT =
+ "error when activating %s";
+ public static final String CLEANUP_REMOTE_SNAPSHOT_FAILED =
+ "cleanup remote snapshot failed of %s ,status is %s";
+ public static final String CLEANUP_REMOTE_SNAPSHOT_FAILED_SHORT =
+ "cleanup remote snapshot failed of %s";
+
+ // ===================== Sync log =====================
+
+ public static final String NOTIFY_PEERS_BUILD_SYNC_LOG_DETAIL =
+ "[IoTConsensus] notify current peers to build sync log. group member: {}, target: {}";
+ public static final String BUILD_SYNC_LOG_CHANNEL_FROM =
+ "[IoTConsensus] build sync log channel from {}";
+ public static final String BUILD_SYNC_LOG_CHANNEL_FAILED =
+ "build sync log channel failed from %s to %s";
+ public static final String CANNOT_NOTIFY_BUILD_SYNC_LOG =
+ "cannot notify {} to build sync log channel. Please check the status of this node manually";
+ public static final String BUILD_SYNC_LOG_CHANNEL_SUCCESS =
+ "[IoTConsensus] Successfully build sync log channel to {} with initialSyncIndex {}. {}";
+ public static final String SYNC_LOG_CHANNEL_STARTED =
+ "Sync log channel has started.";
+ public static final String SYNC_LOG_CHANNEL_START_LATER =
+ "Sync log channel maybe start later.";
+ public static final String REMOVING_SYNC_LOG_CHANNEL_FAILED =
+ "removing sync log channel failed from {} to {}";
+ public static final String EXCEPTION_REMOVING_SYNC_LOG_CHANNEL =
+ "Exception happened during removing sync log channel from {} to {}";
+ public static final String LOG_DISPATCHER_REMOVED_CLEANUP =
+ "[IoTConsensus] log dispatcher to {} removed and cleanup";
+ public static final String EXCEPTION_REMOVING_LOG_DISPATCHER =
+ "[IoTConsensus] Exception happened during removing log dispatcher thread, but configuration.dat will still be removed.";
+ public static final String SUGGEST_RESTART_DATANODE =
+ "It's suggested restart the DataNode to remove log dispatcher thread.";
+ public static final String LOG_DISPATCHER_REMOVED_AND_CLEANUP =
+ "[IoTConsensus] Log dispatcher thread to {} has been removed and cleanup";
+ public static final String CONFIGURATION_UPDATED =
+ "[IoTConsensus Configuration] Configuration updated to {}. {}";
+
+ // ===================== Wait sync log =====================
+
+ public static final String WAIT_SYNC_LOG_COMPLETED =
+ "[WAIT LOG SYNC] {} SyncLog is completed. TargetIndex: {}, CurrentSyncIndex: {}";
+ public static final String WAIT_SYNC_LOG_IN_PROGRESS =
+ "[WAIT LOG SYNC] {} SyncLog is still in progress. TargetIndex: {}, CurrentSyncIndex: {}";
+ public static final String ERROR_WAITING_SYNC_LOG_COMPLETE =
+ "error when waiting %s to complete SyncLog. %s";
+ public static final String THREAD_INTERRUPTED_WAITING_SYNC_LOG =
+ "thread interrupted when waiting %s to complete SyncLog. %s";
+
+ // ===================== Index controller =====================
+
+ public static final String UPDATE_INDEX =
+ "update index from currentIndex {} to {} for file prefix {} in {}";
+ public static final String VERSION_FILE_UPDATED =
+ "version file updated, previous: {}, current: {}";
+ public static final String FAILED_FLUSH_SYNC_INDEX =
+ "failed to flush sync index because previous version file {} does not exists. "
+ + "It may be caused by the target Peer is removed from current group. "
+ + "target file is {}";
+ public static final String ERROR_FLUSHING_NEXT_VERSION =
+ "Error occurred when flushing next version";
+ public static final String VERSION_FILE_UPGRADE =
+ "version file upgrade, previous: {}, current: {}";
+ public static final String ERROR_UPGRADING_VERSION_FILE =
+ "Error occurred when upgrading version file";
+ public static final String DELETE_OUTDATED_VERSION_FILE_FAILED =
+ "Delete outdated version file {} failed";
+ public static final String ERROR_CREATING_NEW_FILE =
+ "Error occurred when creating new file {}";
+ public static final String CONFIGURATION_EMPTY_UNEXPECTED =
+ "Configuration is empty, which is unexpected. Safe deleted search index won't be updated this time.";
+ public static final String SEARCH_INDEX_SMALLER_THAN_SAFELY_DELETED =
+ "The searchIndex for this region({}) is smaller than the safelyDeletedSearchIndex when "
+ + "the node is restarted, which means that the data of the current region is not flushed "
+ + "by the wal, but has been synchronized to other nodes. At this point, "
+ + "different replicas have been inconsistent and cannot be automatically recovered. "
+ + "To prevent subsequent logs from marking smaller searchIndex and exacerbating the "
+ + "inconsistency, we manually set the searchIndex({}) to safelyDeletedSearchIndex({}) "
+ + "here to reduce the impact of this problem in the future";
+
+ // ===================== LogDispatcher =====================
+
+ public static final String UNABLE_TO_SHUTDOWN_LOG_DISPATCHER =
+ "Unable to shutdown LogDispatcher service after {} seconds";
+ public static final String UNEXPECTED_INTERRUPTION_CLOSING_LOG_DISPATCHER =
+ "Unexpected Interruption when closing LogDispatcher service ";
+ public static final String DISPATCHER_STARTS =
+ "{}: Dispatcher for {} starts";
+ public static final String DISPATCHER_EXITS =
+ "{}: Dispatcher for {} exits";
+ public static final String DISPATCHER_DID_NOT_STOP =
+ "{}: Dispatcher for {} didn't stop after 30s.";
+ public static final String UNEXPECTED_ERROR_IN_LOG_DISPATCHER =
+ "Unexpected error in logDispatcher for peer {}";
+ public static final String PUSH_LOG_TO_QUEUE =
+ "{}->{}: Push a log to the queue, where the queue length is {}";
+ public static final String LOG_QUEUE_FULL =
+ "{}: Log queue of {} is full, ignore the log to this node, searchIndex: {}";
+ public static final String GET_BATCH_START_INDEX =
+ "{}: startIndex: {}, maxIndex: {}, pendingEntries size: {}, bufferedEntries size: {}";
+ public static final String ACCUMULATED_FROM_WAL_WHEN_EMPTY =
+ "{} : accumulated a {} from wal when empty";
+ public static final String ACCUMULATED_FROM_WAL =
+ "{} : accumulated a {} from wal";
+ public static final String ACCUMULATED_FROM_QUEUE =
+ "{} : accumulated a {} from queue";
+ public static final String ACCUMULATED_FROM_QUEUE_AND_WAL_GAP =
+ "gap {} : accumulated a {} from queue and wal when gap";
+ public static final String ACCUMULATED_FROM_QUEUE_AND_WAL =
+ "{} : accumulated a {} from queue and wal";
+ public static final String SEND_BATCH =
+ "Send Batch[startIndex:{}, endIndex:{}] to ConsensusGroup:{}";
+ public static final String CANNOT_SYNC_LOGS_TO_PEER =
+ "Can not sync logs to peer {} because";
+ public static final String CONSTRUCT_FROM_WAL =
+ "construct from WAL for one Entry, index : {}";
+ public static final String WAIT_NEXT_WAL_INTERRUPTED =
+ "wait for next WAL entry is interrupted";
+ public static final String SEARCH_ENTRY_FOUND_SMALLER =
+ "search for one Entry which index is {}, but find a smaller one, index : {}";
+ public static final String SEARCH_ENTRY_FOUND_LARGER =
+ "search for one Entry which index is {}, but find a larger one, index : {}."
+ + "Perhaps the wal file is corrupted, in which case we skip it and choose a larger index to replicate";
+ public static final String DATA_REGION_CONSTRUCT_FROM_WAL =
+ "DataRegion[{}]->{}: currentIndex: {}, maxIndex: {}";
+
+ // ===================== DispatchLogHandler =====================
+
+ public static final String CANNOT_SEND_TO_PEER =
+ "Can not send {} to peer {} for {} times because {}";
+ public static final String SEND_COMPLETE_BUT_CONTAINS_ERROR =
+ "Send {} to peer {} complete but contains unsuccessful status: {}";
+ public static final String CANNOT_SEND_TO_PEER_ON_ERROR =
+ "Can not send {} to peer for {} times {} because {}";
+ public static final String SKIP_RETRY_TAPPLICATION_EXCEPTION =
+ "Skip retrying this Batch {} because of TApplicationException.";
+ public static final String LOG_DISPATCHER_STOPPED_NO_RETRY =
+ "LogDispatcherThread {} has been stopped, "
+ + "we will not retrying this Batch {} after {} times";
+
+ // ===================== SyncLogCacheQueue =====================
+
+ public static final String CACHE_AND_INSERT_START =
+ "cacheAndInsert start: source = {}, region = {}, queue size {}, startSyncIndex = {}, endSyncIndex = {}";
+ public static final String CACHE_AND_INSERT_END =
+ "cacheAndInsert end: source = {}, region = {}, queue size {}, startSyncIndex = {}, endSyncIndex = {}, sortTime = {}ms, applyTime = {}ms";
+ public static final String WAITING_TARGET_REQUEST_TIMEOUT =
+ "waiting target request timeout. current index: {}, target index: {}";
+ public static final String CURRENT_WAITING_INTERRUPTED =
+ "current waiting is interrupted. SyncIndex: {}. Exception: ";
+
+ // ===================== SyncStatus =====================
+
+ public static final String SYNC_STATUS_OFFER =
+ "Offer Batch[startIndex:{}, endIndex:{}] to SyncStatus. "
+ + "Current size of SyncStatus: {}. Pending Size: {}";
+
+ // ===================== AsyncClient =====================
+
+ public static final String UNEXPECTED_EXCEPTION_IN_CLIENT =
+ "Unexpected exception occurs in {}, error msg is {}";
+ public static final String CLIENT_INVALIDATED = "This client has been invalidated";
+
+ // ===================== RPC Processor execute log sync =====================
+
+ public static final String EXECUTE_SYNC_LOG_ENTRIES =
+ "execute TSyncLogEntriesReq for {} with result {}";
+
+ // ===================== Memory Manager =====================
+
+ public static final String RESERVING_BYTES_FOR_REQUEST_SUCCEEDS =
+ "Reserving {} bytes for request {} succeeds, current total usage {}";
+ public static final String RESERVING_BYTES_FOR_REQUEST_FAILS =
+ "Reserving {} bytes for request {} fails, current total usage {}";
+ public static final String SKIP_MEMORY_RESERVATION =
+ "Skip memory reservation for {} because its ref count is not 0";
+ public static final String RESERVING_BYTES_FOR_BATCH_SUCCEEDS =
+ "Reserving {} bytes for batch {}-{} succeeds, current total usage {}";
+ public static final String RESERVING_BYTES_FOR_BATCH_FAILS =
+ "Reserving {} bytes for batch {}-{} fails, current total usage {}";
+ public static final String FREED_BYTES_FOR_REQUEST =
+ "Freed {} bytes for request {}, current total usage {}";
+ public static final String FREED_BYTES_FOR_BATCH =
+ "Freed {} bytes for batch {}-{}, current total usage {}";
+ public static final String FREE_MEMORY =
+ "{} free {} bytes, total memory size: {} bytes.";
+ public static final String INTERRUPTED_AFTER_POLLING_AND_SLEEPING =
+ "Interrupted after polling and sleeping";
+ public static final String INTERRUPTED_AFTER_GETTING_A_BATCH =
+ "Interrupted after getting a batch";
+}
diff --git a/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/IoTConsensusV2Messages.java b/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/IoTConsensusV2Messages.java
new file mode 100644
index 0000000000000..2bf2a80e94aa7
--- /dev/null
+++ b/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/IoTConsensusV2Messages.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.consensus.i18n;
+
+/**
+ * IoTConsensusV2 (pipe-based consensus) specific messages. Log messages use SLF4J {@code {}}
+ * placeholders; exception messages use {@code %s} (String.format) or plain strings.
+ */
+public final class IoTConsensusV2Messages {
+
+ private IoTConsensusV2Messages() {}
+
+ // ===================== IoTConsensusV2 lifecycle =====================
+
+ public static final String RECOVER_TASK_CANCELLED =
+ "IoTV2 Recover Task is cancelled";
+ public static final String RECOVER_FUTURE_EXCEPTION =
+ "Exception while waiting for recover future completion";
+ public static final String RECOVER_TASK_INTERRUPTED =
+ "IoTV2 Recover Task is interrupted";
+ public static final String FAILED_RECOVER_CONSENSUS =
+ "Failed to recover consensus from {} for {}, ignore it and continue recover other group, async backend checker thread will automatically deregister related pipe side effects for this failed consensus group.";
+ public static final String FAILED_RECOVER_CONSENSUS_READ_DIR =
+ "Failed to recover consensus from {} because read dir failed";
+ public static final String FAILED_RECOVER_CONSENSUS_SHORT =
+ "Failed to recover consensus from {}";
+
+ // ===================== IoTConsensusV2 peer operations =====================
+
+ public static final String START_DELETE_LOCAL_PEER =
+ "[{}] start to delete local peer for group {}";
+ public static final String FINISH_DELETE_LOCAL_PEER =
+ "[{}] finish deleting local peer for group {}";
+ public static final String INACTIVATE_NEW_PEER =
+ "[{}] inactivate new peer: {}";
+ public static final String NOTIFY_CREATE_CONSENSUS_PIPES =
+ "[{}] notify current peers to create consensus pipes...";
+ public static final String WAIT_PEERS_FINISH_TRANSFER =
+ "[{}] wait until all the other peers finish transferring...";
+ public static final String ACTIVATE_NEW_PEER =
+ "[{}] activate new peer...";
+ public static final String ADD_REMOTE_PEER_FAILED_CLEANUP =
+ "[{}] add remote peer failed, automatic cleanup side effects...";
+ public static final String FAILED_CLEANUP_SIDE_EFFECTS =
+ "[{}] failed to cleanup side effects after failed to add remote peer";
+ public static final String NOTIFY_DROP_CONSENSUS_PIPES =
+ "[{}] notify other peers to drop consensus pipes...";
+ public static final String INACTIVATE_PEER =
+ "[{}] inactivate peer {}";
+ public static final String WAIT_TARGET_PEER_COMPLETE_TRANSFER =
+ "[{}] wait target peer{} complete transfer...";
+ public static final String WAIT_PEER_RELEASE_RESOURCE =
+ "[{}] wait {} to release all resource...";
+ public static final String NOT_SUPPORT_LEADER_TRANSFER =
+ "%s does not support leader transfer";
+
+ // ===================== IoTConsensusV2ServerImpl =====================
+
+ public static final String ERROR_SET_PEER_ACTIVE =
+ "error when set peer %s to active %s. result status: %s";
+ public static final String ERROR_SET_PEER_ACTIVE_SHORT =
+ "error when set peer %s to active %s";
+ public static final String TARGET_PEER_MAY_BE_DOWN =
+ "target peer may be down, error when set peer {} to active {}";
+ public static final String CANNOT_NOTIFY_PEER_CREATE_PIPE =
+ "{} cannot notify peer {} to create consensus pipe, may because that peer is unknown currently, please manually check!";
+ public static final String CANNOT_CREATE_CONSENSUS_PIPE =
+ "{} cannot create consensus pipe to {}, may because target peer is unknown currently, please manually check!";
+ public static final String ERROR_NOTIFY_PEER_CREATE_PIPE =
+ "error when notify peer %s to create consensus pipe";
+ public static final String CANNOT_NOTIFY_PEER_DROP_PIPE =
+ "{} cannot notify peer {} to drop consensus pipe, may because that peer is unknown currently, please manually check!";
+ public static final String CANNOT_DROP_CONSENSUS_PIPE =
+ "{} cannot drop consensus pipe to {}, may because target peer is unknown currently, please manually check!";
+ public static final String ERROR_NOTIFY_PEER_DROP_PIPE =
+ "error when notify peer %s to drop consensus pipe";
+ public static final String INTERRUPTED_WAITING_TRANSFER =
+ "{} is interrupted when waiting for transfer completed";
+ public static final String INTERRUPTED_WAITING_TRANSFER_FMT =
+ "%s is interrupted when waiting for transfer completed";
+ public static final String CANNOT_CHECK_PIPE_TRANSMISSION =
+ "{} cannot check consensus pipes transmission completed to peer {}";
+ public static final String ERROR_CHECK_PIPE_TRANSMISSION =
+ "error when check consensus pipes transmission completed to peer %s";
+ public static final String CANNOT_CHECK_PIPE_TRANSMISSION_SHORT =
+ "{} cannot check consensus pipes transmission completed";
+
+ // ===================== IoTConsensusV2RPCServiceProcessor =====================
+
+ public static final String UNEXPECTED_GROUP_SET_ACTIVE =
+ "unexpected consensusGroupId %s for set active request %s";
+ public static final String UNEXPECTED_GROUP_CREATE_PIPE =
+ "unexpected consensusGroupId %s for create consensus pipe request %s";
+ public static final String UNEXPECTED_GROUP_DROP_PIPE =
+ "unexpected consensusGroupId %s for drop consensus pipe request %s";
+ public static final String UNEXPECTED_GROUP_CHECK_TRANSFER =
+ "unexpected consensusGroupId %s for check transfer completed request %s";
+ public static final String UNEXPECTED_GROUP_WAIT_RELEASE =
+ "unexpected consensusGroupId %s for TWaitReleaseAllRegionRelatedResourceRes request";
+ public static final String FAILED_CREATE_CONSENSUS_PIPE =
+ "Failed to create consensus pipe to target peer with req {}";
+ public static final String FAILED_DROP_CONSENSUS_PIPE =
+ "Failed to drop consensus pipe to target peer with req {}";
+ public static final String FAILED_CHECK_CONSENSUS_PIPE =
+ "Failed to check consensus pipe completed with req {}, set is completed to {}";
+}
diff --git a/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/RatisMessages.java b/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/RatisMessages.java
new file mode 100644
index 0000000000000..e33e9f2de8a10
--- /dev/null
+++ b/iotdb-core/consensus/src/main/i18n/en/org/apache/iotdb/consensus/i18n/RatisMessages.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.consensus.i18n;
+
+/**
+ * Ratis consensus specific messages. Log messages use SLF4J {@code {}} placeholders; exception
+ * messages use {@code %s} (String.format) or plain strings.
+ */
+public final class RatisMessages {
+
+ private RatisMessages() {}
+
+ // ===================== RatisConsensus =====================
+
+ public static final String INTERRUPTED_RETRYING_WRITE =
+ "{}: interrupted when retrying for write request {}";
+ public static final String NULL_REPLY_IN_WRITE_WITH_RETRY =
+ "null reply received in writeWithRetry for request ";
+ public static final String LEADER_READ_ONLY_STEP_DOWN_FAILED =
+ "leader {} read only, force step down failed due to, ";
+ public static final String TRY_ADD_CONFLICTING_PEER =
+ "{}: try to add a peer {} with conflicting id or address in {}";
+ public static final String IS_LEADER_REQUEST_FAILED =
+ "isLeader request failed with exception: ";
+ public static final String IS_LEADER_READY_REQUEST_FAILED =
+ "isLeaderReady request failed with exception: ";
+ public static final String GET_LOGICAL_CLOCK_REQUEST_FAILED =
+ "getLogicalClock request failed with exception: ";
+ public static final String IS_LEADER_READY_CHECKING_FAILED =
+ "isLeaderReady checking failed with exception: ";
+ public static final String LEADER_STILL_NOT_READY =
+ "{}: leader is still not ready after {}ms";
+ public static final String UNEXPECTED_INTERRUPTION_WAIT_LEADER_READY =
+ "Unexpected interruption when waitUntilLeaderReady";
+ public static final String FETCH_DIVISION_INFO_FAILED =
+ "fetch division info for group {} failed due to: ";
+ public static final String TRIGGER_SNAPSHOT_SUCCESS =
+ "{} group {}: successfully taken snapshot at index {} with force = {}";
+ public static final String GET_GROUP_FAILED =
+ "get group {} failed ";
+ public static final String BORROW_CLIENT_FROM_POOL_FAILED =
+ "Borrow client from pool for group {} failed.";
+ public static final String TRANSFER_LEADER_FAILED_TIMEOUT =
+ "transferLeader for group %s to %s failed. This could be due to a timeout, "
+ + "especially during heavy disk usage. Consider increasing the "
+ + "'ratis_transfer_leader_timeout_ms' configuration property.";
+ public static final String TRANSFER_LEADER_FAILED_STARTUP =
+ "transferLeader for group %s to %s failed. This could be due to a timeout, "
+ + "especially during initial startup. Consider increasing the "
+ + "'ratis_rpc_transfer_leader_timeout_ms' configuration property.";
+
+ // ===================== ApplicationStateMachineProxy =====================
+
+ public static final String STATEMACHINE_RUNTIME_EXCEPTION =
+ "application statemachine throws a runtime exception: ";
+ public static final String INTERRUPTED_WAITING_SYSTEM_READY =
+ "{}: interrupted when waiting until system ready: ";
+ public static final String REQUEST_MESSAGE_REQUIRED =
+ "An RequestMessage is required but got {}";
+ public static final String UNABLE_TO_CREATE_TEMP_SNAPSHOT_DIR =
+ "Unable to create temp snapshotDir at {}";
+ public static final String ATOMIC_RENAME_FAILED =
+ "{} atomic rename {} to {} failed with exception {}";
+ public static final String SNAPSHOT_DIR_INCOMPLETE_DELETING =
+ "Snapshot directory is incomplete, deleting {}";
+
+ // ===================== RatisClient =====================
+
+ public static final String CANNOT_CLOSE_RAFT_CLIENT =
+ "cannot close raft client ";
+ public static final String RAFT_CLIENT_REQUEST_FAILED =
+ "{}: raft client request failed and caught exception: ";
+
+ // ===================== DiskGuardian =====================
+
+ public static final String ERROR_LISTING_FILES =
+ "{}: Error caught when listing files for {} at {}:";
+ public static final String CLEAR_SNAPSHOT_FLAG_FAILED =
+ "{}: clear snapshot flag failed for group {}, please check the related implementation";
+ public static final String TAKE_SNAPSHOT_FAILED =
+ "{} take snapshot failed for group {} due to {}. Disk file status {}";
+
+ // ===================== SnapshotStorage =====================
+
+ public static final String CANNOT_CONSTRUCT_SNAPSHOT_DIR_STREAM =
+ "Cannot construct snapshot directory stream ";
+ public static final String CANNOT_RESOLVE_REAL_PATH =
+ "{} cannot resolve real path of {} due to ";
+
+ // ===================== ResponseMessage =====================
+
+ public static final String SERIALIZE_TSSTATUS_FAILED =
+ "serialize TSStatus failed {}";
+
+ // ===================== MetricRegistryManager =====================
+
+ public static final String REPORTER_DISABLED =
+ "Reporter is disabled from RatisMetricRegistries";
+ public static final String JMX_REPORTER_DISABLED =
+ "JMX Reporter is disabled from RatisMetricRegistries";
+ public static final String CONSOLE_REPORTER_DISABLED =
+ "Console Reporter is disabled from RatisMetricRegistries";
+}
diff --git a/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/ConsensusMessages.java b/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/ConsensusMessages.java
new file mode 100644
index 0000000000000..9802fe06a8d7e
--- /dev/null
+++ b/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/ConsensusMessages.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.consensus.i18n;
+
+/**
+ * 共享/通用共识消息,适用于所有共识实现。日志消息使用 SLF4J {@code {}} 占位符;异常消息使用 {@code %s}(String.format)或纯字符串。
+ */
+public final class ConsensusMessages {
+
+ private ConsensusMessages() {}
+
+ // ===================== ConsensusFactory =====================
+
+ public static final String CONSTRUCT_FAILED_MSG =
+ "构建共识实现失败,请检查共识类名 %s";
+ public static final String COULD_NOT_CONSTRUCT_ICONSENSUS =
+ "无法构建 IConsensus 类:{}";
+ public static final String UTILITY_CLASS_CONSENSUS_FACTORY = "工具类 ConsensusFactory";
+
+ // ===================== 异常消息(String.format %s)=====================
+
+ public static final String CONSENSUS_GROUP_NOT_EXIST =
+ "共识组 %s 不存在";
+ public static final String CONSENSUS_GROUP_ALREADY_EXIST =
+ "共识组 %d 已存在";
+ public static final String ILLEGAL_PEER_NUM =
+ "添加共识组时 peer 数量 %d 不合法";
+ public static final String ILLEGAL_PEER_ENDPOINT =
+ "添加共识组失败,当前节点 %s 不在共识组 %s 中";
+ public static final String PEER_ALREADY_IN_GROUP =
+ "Peer %s:%d 已在共识组 %d 中";
+ public static final String PEER_NOT_IN_GROUP =
+ "Peer %s 不在共识组 %d 中";
+
+ // ===================== 通用日志消息(SLF4J {})=====================
+
+ public static final String UNABLE_TO_CREATE_CONSENSUS_DIR =
+ "无法在 {} 创建共识目录";
+ public static final String UNABLE_TO_CREATE_CONSENSUS_DIR_FMT =
+ "无法在 %s 创建共识目录";
+ public static final String UNABLE_TO_CREATE_CONSENSUS_DIR_FOR_GROUP =
+ "无法为共识组 {} 在 {} 创建目录";
+ public static final String UNABLE_TO_CREATE_CONSENSUS_DIR_FOR_GROUP_FMT =
+ "无法为共识组 %s 创建目录";
+ public static final String CANNOT_CREATE_LOCAL_PEER =
+ "无法为共识组 {} 创建本地 peer,peers 为 {}";
+ public static final String FAILED_TO_RESET_PEER_LIST_WHILE_START =
+ "启动时重置 peer 列表失败";
+ public static final String RECORD_CORRECT_PEER_LIST =
+ "记录正确的 peer 列表:{}";
+ public static final String INTERRUPTED_WHEN_SHUTTING_DOWN_EXECUTOR =
+ "{}:关闭 Executor 时被中断,异常 {}";
+ public static final String INTERRUPTED_WHEN_SHUTTING_DOWN_EXECUTOR_RATIS =
+ "{}:关闭 Executor 时被中断,异常 ";
+ public static final String SET_ACTIVE_STATUS = "设置 {} 活跃状态为 {}";
+
+ // ===================== Peer 重置日志消息(SLF4J {})=====================
+
+ public static final String RESET_PEER_LIST_NOT_IN_CORRECT =
+ "[重置 PEER 列表] {} 本地 peer 不在正确的配置中,将其删除。";
+ public static final String RESET_PEER_LIST_DELETE_LOCAL_PEER =
+ "[重置 PEER 列表] 本地 peer 不在正确的 peer 列表中,删除本地 peer {}";
+ public static final String RESET_PEER_LIST_REMOVE_SYNC_CHANNEL =
+ "[重置 PEER 列表] {} 移除与 {} 的同步通道";
+ public static final String RESET_PEER_LIST_FAILED_TO_REMOVE_SYNC_CHANNEL =
+ "[重置 PEER 列表] {} 移除与 {} 的同步通道失败";
+ public static final String RESET_PEER_LIST_BUILD_SYNC_CHANNEL =
+ "[重置 PEER 列表] {} 建立与 {} 的同步通道";
+ public static final String RESET_PEER_LIST_FAILED_TO_BUILD_SYNC_CHANNEL =
+ "[重置 PEER 列表] {} 建立与 {} 的同步通道失败";
+ public static final String RESET_PEER_LIST_RESET_RESULT =
+ "[重置 PEER 列表] {} 本地 peer 列表已重置:{} -> {}";
+ public static final String RESET_PEER_LIST_NOTHING_TO_RESET =
+ "[重置 PEER 列表] {} 当前 peer 列表已正确,无需重置:{}";
+ public static final String RESET_PEER_LIST_WILL_RESET =
+ "[重置 PEER 列表] Peer 列表将从 {} 重置为 {}";
+ public static final String RESET_PEER_LIST_RESET_SUCCESS =
+ "[重置 PEER 列表] Peer 列表已重置为 {}";
+ public static final String RESET_PEER_LIST_RESET_FAILED =
+ "[重置 PEER 列表] Peer 列表重置为 {} 失败,回复为 {}";
+
+ // ===================== SimpleConsensus 消息 =====================
+
+ public static final String SIMPLE_CONSENSUS_NOT_SUPPORT_MEMBERSHIP_CHANGES =
+ "SimpleConsensus 不支持成员变更";
+ public static final String SIMPLE_CONSENSUS_NOT_SUPPORT_LEADER_TRANSFER =
+ "SimpleConsensus 不支持 leader 切换";
+ public static final String SIMPLE_CONSENSUS_NOT_SUPPORT_SNAPSHOT_TRIGGER =
+ "SimpleConsensus 目前不支持触发快照";
+ public static final String SIMPLE_CONSENSUS_NOT_SUPPORT_RESET_PEER_LIST =
+ "SimpleConsensus 不支持重置 peer 列表";
+ public static final String SIMPLE_CONSENSUS_NOOP_RECORD_PEER_LIST =
+ "SimpleConsensus 调用 recordCorrectPeerListBeforeStarting 时不执行任何操作";
+
+ // ===================== RPC 处理器通用消息 =====================
+
+ public static final String UNEXPECTED_CONSENSUS_GROUP_ID_FOR_REQUEST =
+ "共识组 ID %s 与 %s 请求不匹配";
+ public static final String UNEXPECTED_CONSENSUS_GROUP_ID_FOR_SYNC_LOG =
+ "共识组 ID %s 与 TSyncLogEntriesReq 不匹配,大小为 %s";
+ public static final String SYNC_LOG_SYSTEM_READ_ONLY =
+ "系统为只读模式,无法同步日志。";
+ public static final String PEER_INACTIVE_NOT_READY =
+ "Peer 处于非活跃状态,无法接收同步日志请求,%s,DataNode ID:%s";
+ public static final String PEER_INACTIVE_NOT_READY_WRITE =
+ "Peer 处于非活跃状态,无法接收写入请求,%s,DataNode ID:%s";
+ public static final String REMOVE_SYNC_LOG_CHANNEL_FAILED =
+ "移除同步日志通道失败";
+ public static final String FAILED_TO_CLEANUP_TRANSFERRED_SNAPSHOT =
+ "清理已传输的快照 {} 失败";
+
+ // ===================== 等待释放资源消息 =====================
+
+ public static final String WAIT_RELEASE_HAS_RELEASED =
+ "[等待释放] {} 已释放所有与 Region 相关的资源";
+ public static final String WAIT_RELEASE_STILL_RELEASING =
+ "[等待释放] {} 仍在释放与 Region 相关的资源";
+ public static final String ERROR_WAITING_RELEASE_RESOURCE =
+ "等待 %s 释放所有与 Region 相关的资源时出错。%s";
+ public static final String THREAD_INTERRUPTED_WAITING_RELEASE_RESOURCE =
+ "等待 %s 释放所有与 Region 相关的资源时线程被中断。%s";
+
+ // ===================== 重复 peer 警告 =====================
+
+ public static final String DUPLICATE_PEERS_IGNORED =
+ "输入列表中存在重复的 peer,已忽略重复项。";
+
+ // ===================== Consensus pipe name =====================
+
+ public static final String INVALID_PIPE_NAME = "无效的 pipe 名称:";
+
+ // ===================== 非活跃写入拒绝 =====================
+
+ public static final String NODE_NOT_ACTIVE_REJECT_WRITE =
+ "当前节点处于非活跃状态,无法接收用户写入请求。";
+
+ // ===================== 工具类消息 =====================
+
+ public static final String FAILED_TO_SERIALIZE_PEER = "序列化 Peer 失败";
+ public static final String VISIT_FILE_FAILED = "访问文件 {} 失败,原因 {}";
+ public static final String IO_EXCEPTION_LISTING_SNAPSHOT_DIR =
+ "列出快照目录时发生 IOException:";
+ public static final String FAILED_TO_LOAD_KEYSTORE =
+ "加载 keystore 或 truststore 文件失败";
+ public static final String KEYSTORE_FILE_NOT_FOUND = "keystore 或 truststore 文件未找到";
+ public static final String FAILED_TO_READ_KEYSTORE =
+ "读取 keystore 或 truststore 失败。";
+ public static final String NOT_IMPLEMENTED_YET = "尚未实现";
+}
diff --git a/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/IoTConsensusMessages.java b/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/IoTConsensusMessages.java
new file mode 100644
index 0000000000000..92bb5de1973e9
--- /dev/null
+++ b/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/IoTConsensusMessages.java
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.consensus.i18n;
+
+/**
+ * IoTConsensus(v1)特有消息。日志消息使用 SLF4J {@code {}} 占位符;异常消息使用 {@code %s}(String.format)或纯字符串。
+ */
+public final class IoTConsensusMessages {
+
+ private IoTConsensusMessages() {}
+
+ // ===================== IoTConsensus 生命周期 =====================
+
+ public static final String INACTIVATE_NEW_PEER =
+ "[IoTConsensus] 将新 peer 设为非活跃:{}";
+ public static final String NOTIFY_PEERS_BUILD_SYNC_LOG =
+ "[IoTConsensus] 通知当前 peer 建立同步日志通道...";
+ public static final String START_TAKE_SNAPSHOT =
+ "[IoTConsensus] 开始创建快照...";
+ public static final String START_TRANSMIT_SNAPSHOT =
+ "[IoTConsensus] 开始传输快照...";
+ public static final String TRIGGER_LOAD_SNAPSHOT =
+ "[IoTConsensus] 触发新 peer 加载快照...";
+ public static final String ACTIVATE_NEW_PEER =
+ "[IoTConsensus] 激活新 peer...";
+ public static final String CLEANUP_REMOTE_SNAPSHOT =
+ "[IoTConsensus] 清理远程快照...";
+ public static final String FAILED_CLEANUP_REMOTE_SNAPSHOT =
+ "[IoTConsensus] 清理远程快照失败";
+ public static final String ADD_REMOTE_PEER_FAILED_CLEANUP =
+ "[IoTConsensus] 添加远程 peer 失败,正在自动清理副作用...";
+ public static final String CLEANUP_LOCAL_SNAPSHOT =
+ "[IoTConsensus] 清理本地快照...";
+ public static final String NOT_SUPPORT_LEADER_TRANSFER =
+ "IoTConsensus 不支持 leader 切换";
+
+ // ===================== IoTConsensusServerImpl =====================
+
+ public static final String THROTTLE_DOWN = "[限流] index:{}, safeIndex:{}";
+ public static final String DATA_REGION_INDEX_AFTER_BUILD =
+ "DataRegion[{}]:构建后的 index:safeIndex:{},searchIndex: {},lastConsensusRequest: {}";
+ public static final String WRITE_OPERATION_FAILED =
+ "{}:写入操作失败。searchIndex: {}。Code: {}";
+ public static final String FAILED_TO_THROTTLE_DOWN = "限流失败,原因 ";
+
+ // ===================== 快照 =====================
+
+ public static final String CANNOT_MKDIR_FOR_SNAPSHOT =
+ "%s:无法为快照创建目录";
+ public static final String UNKNOWN_ERROR_TAKING_SNAPSHOT =
+ "创建快照时发生未知错误";
+ public static final String ERROR_TAKING_SNAPSHOT =
+ "创建快照时出错";
+ public static final String CANNOT_FIND_SNAPSHOT_DIR =
+ "为共识组 {} 创建新快照后找不到任何快照目录";
+ public static final String DELETE_OLD_SNAPSHOT_FAILED =
+ "删除旧快照目录 {} 失败";
+ public static final String FILE_NOT_EXIST = "文件不存在:{}";
+ public static final String CLEANUP_LOCAL_SNAPSHOT_FAIL =
+ "清理本地快照失败。您可能需要手动删除 {}。";
+ public static final String INVALID_SNAPSHOT_FILE =
+ "无效的快照文件。snapshotId: %s,filePath: %s";
+ public static final String ERROR_RECEIVING_SNAPSHOT =
+ "接收快照 %s 时出错";
+ public static final String INVALID_SNAPSHOT_RELATIVE_PATH =
+ "无效的 snapshotRelativePath:";
+
+ // ===================== 快照传输 =====================
+
+ public static final String SNAPSHOT_TRANSMISSION_START =
+ "[快照传输] 开始从目录 {} 传输快照({} 个文件,总大小 {})";
+ public static final String SNAPSHOT_TRANSMISSION_ALL_FILES =
+ "[快照传输] 以下所有文件将被传输:{}";
+ public static final String SNAPSHOT_TRANSMISSION_ERROR =
+ "[快照传输] 向 %s 传输快照片段时出错";
+ public static final String SNAPSHOT_TRANSMISSION_PROGRESS =
+ "[快照传输] 目录 {} 的整体进度:文件 {}/{} 已完成,大小 {}/{} 已完成,已耗时 {}。文件 {} 已完成。";
+ public static final String SNAPSHOT_TRANSMISSION_SEND_ERROR =
+ "[快照传输] 向 %s 发送快照文件时出错";
+ public static final String SNAPSHOT_TRANSMISSION_COMPLETE =
+ "[快照传输] 经过 {},已成功从目录 {} 传输所有快照";
+
+ // ===================== Peer 操作 =====================
+
+ public static final String ERROR_INACTIVATING_PEER =
+ "将 %s 设为非活跃时出错。%s";
+ public static final String ERROR_INACTIVATING_PEER_SHORT =
+ "将 %s 设为非活跃时出错";
+ public static final String ERROR_TRIGGERING_SNAPSHOT_LOAD =
+ "触发 %s 加载快照时出错。%s";
+ public static final String ERROR_ACTIVATING_PEER =
+ "激活 %s 时出错。%s";
+ public static final String ERROR_ACTIVATING_PEER_SHORT =
+ "激活 %s 时出错";
+ public static final String CLEANUP_REMOTE_SNAPSHOT_FAILED =
+ "清理 %s 的远程快照失败,状态为 %s";
+ public static final String CLEANUP_REMOTE_SNAPSHOT_FAILED_SHORT =
+ "清理 %s 的远程快照失败";
+
+ // ===================== 同步日志 =====================
+
+ public static final String NOTIFY_PEERS_BUILD_SYNC_LOG_DETAIL =
+ "[IoTConsensus] 通知当前 peer 建立同步日志通道。组成员:{},目标:{}";
+ public static final String BUILD_SYNC_LOG_CHANNEL_FROM =
+ "[IoTConsensus] 从 {} 建立同步日志通道";
+ public static final String BUILD_SYNC_LOG_CHANNEL_FAILED =
+ "从 %s 到 %s 建立同步日志通道失败";
+ public static final String CANNOT_NOTIFY_BUILD_SYNC_LOG =
+ "无法通知 {} 建立同步日志通道。请手动检查该节点状态";
+ public static final String BUILD_SYNC_LOG_CHANNEL_SUCCESS =
+ "[IoTConsensus] 成功建立到 {} 的同步日志通道,initialSyncIndex 为 {}。{}";
+ public static final String SYNC_LOG_CHANNEL_STARTED =
+ "同步日志通道已启动。";
+ public static final String SYNC_LOG_CHANNEL_START_LATER =
+ "同步日志通道可能稍后启动。";
+ public static final String REMOVING_SYNC_LOG_CHANNEL_FAILED =
+ "从 {} 到 {} 移除同步日志通道失败";
+ public static final String EXCEPTION_REMOVING_SYNC_LOG_CHANNEL =
+ "从 {} 到 {} 移除同步日志通道时发生异常";
+ public static final String LOG_DISPATCHER_REMOVED_CLEANUP =
+ "[IoTConsensus] 到 {} 的日志分发器已移除并清理";
+ public static final String EXCEPTION_REMOVING_LOG_DISPATCHER =
+ "[IoTConsensus] 移除日志分发器线程时发生异常,但 configuration.dat 仍将被移除。";
+ public static final String SUGGEST_RESTART_DATANODE =
+ "建议重启 DataNode 以移除日志分发器线程。";
+ public static final String LOG_DISPATCHER_REMOVED_AND_CLEANUP =
+ "[IoTConsensus] 到 {} 的日志分发器线程已移除并清理";
+ public static final String CONFIGURATION_UPDATED =
+ "[IoTConsensus 配置] 配置已更新为 {}。{}";
+
+ // ===================== 等待同步日志 =====================
+
+ public static final String WAIT_SYNC_LOG_COMPLETED =
+ "[等待日志同步] {} SyncLog 已完成。TargetIndex: {},CurrentSyncIndex: {}";
+ public static final String WAIT_SYNC_LOG_IN_PROGRESS =
+ "[等待日志同步] {} SyncLog 仍在进行中。TargetIndex: {},CurrentSyncIndex: {}";
+ public static final String ERROR_WAITING_SYNC_LOG_COMPLETE =
+ "等待 %s 完成 SyncLog 时出错。%s";
+ public static final String THREAD_INTERRUPTED_WAITING_SYNC_LOG =
+ "等待 %s 完成 SyncLog 时线程被中断。%s";
+
+ // ===================== Index 控制器 =====================
+
+ public static final String UPDATE_INDEX =
+ "更新 index:从 currentIndex {} 到 {},文件前缀 {},目录 {}";
+ public static final String VERSION_FILE_UPDATED =
+ "版本文件已更新,旧文件:{},新文件:{}";
+ public static final String FAILED_FLUSH_SYNC_INDEX =
+ "刷新同步 index 失败,因为前一个版本文件 {} 不存在。"
+ + "这可能是由于目标 Peer 已从当前组中移除。"
+ + "目标文件为 {}";
+ public static final String ERROR_FLUSHING_NEXT_VERSION =
+ "刷新下一个版本时出错";
+ public static final String VERSION_FILE_UPGRADE =
+ "版本文件升级,旧文件:{},新文件:{}";
+ public static final String ERROR_UPGRADING_VERSION_FILE =
+ "升级版本文件时出错";
+ public static final String DELETE_OUTDATED_VERSION_FILE_FAILED =
+ "删除过期版本文件 {} 失败";
+ public static final String ERROR_CREATING_NEW_FILE =
+ "创建新文件 {} 时出错";
+ public static final String CONFIGURATION_EMPTY_UNEXPECTED =
+ "配置为空,这是非预期的。本次不会更新安全删除搜索 index。";
+ public static final String SEARCH_INDEX_SMALLER_THAN_SAFELY_DELETED =
+ "此 region({}) 的 searchIndex 在节点重启时小于 safelyDeletedSearchIndex,"
+ + "这意味着当前 region 的数据未通过 WAL 刷新,但已同步到其他节点。"
+ + "此时不同副本已不一致且无法自动恢复。"
+ + "为防止后续日志标记更小的 searchIndex 加剧不一致,"
+ + "这里手动将 searchIndex({}) 设置为 safelyDeletedSearchIndex({}) "
+ + "以减少此问题在未来的影响";
+
+ // ===================== LogDispatcher =====================
+
+ public static final String UNABLE_TO_SHUTDOWN_LOG_DISPATCHER =
+ "在 {} 秒后仍无法关闭 LogDispatcher 服务";
+ public static final String UNEXPECTED_INTERRUPTION_CLOSING_LOG_DISPATCHER =
+ "关闭 LogDispatcher 服务时发生意外中断 ";
+ public static final String DISPATCHER_STARTS =
+ "{}:到 {} 的分发器启动";
+ public static final String DISPATCHER_EXITS =
+ "{}:到 {} 的分发器退出";
+ public static final String DISPATCHER_DID_NOT_STOP =
+ "{}:到 {} 的分发器在 30 秒后仍未停止。";
+ public static final String UNEXPECTED_ERROR_IN_LOG_DISPATCHER =
+ "peer {} 的日志分发器发生意外错误";
+ public static final String PUSH_LOG_TO_QUEUE =
+ "{}->{}:向队列推送一条日志,当前队列长度为 {}";
+ public static final String LOG_QUEUE_FULL =
+ "{}:{} 的日志队列已满,忽略此节点的日志,searchIndex:{}";
+ public static final String GET_BATCH_START_INDEX =
+ "{}:startIndex: {},maxIndex: {},pendingEntries 大小:{},bufferedEntries 大小:{}";
+ public static final String ACCUMULATED_FROM_WAL_WHEN_EMPTY =
+ "{} :空队列时从 WAL 累积了一个 {}";
+ public static final String ACCUMULATED_FROM_WAL =
+ "{} :从 WAL 累积了一个 {}";
+ public static final String ACCUMULATED_FROM_QUEUE =
+ "{} :从队列累积了一个 {}";
+ public static final String ACCUMULATED_FROM_QUEUE_AND_WAL_GAP =
+ "间隔 {} :在存在间隔时从队列和 WAL 累积了一个 {}";
+ public static final String ACCUMULATED_FROM_QUEUE_AND_WAL =
+ "{} :从队列和 WAL 累积了一个 {}";
+ public static final String SEND_BATCH =
+ "发送 Batch[startIndex:{}, endIndex:{}] 到共识组:{}";
+ public static final String CANNOT_SYNC_LOGS_TO_PEER =
+ "无法同步日志到 peer {},原因";
+ public static final String CONSTRUCT_FROM_WAL =
+ "从 WAL 构造一条日志,index:{}";
+ public static final String WAIT_NEXT_WAL_INTERRUPTED =
+ "等待下一条 WAL 日志时被中断";
+ public static final String SEARCH_ENTRY_FOUND_SMALLER =
+ "搜索 index 为 {} 的日志,但找到一条更小的,index:{}";
+ public static final String SEARCH_ENTRY_FOUND_LARGER =
+ "搜索 index 为 {} 的日志,但找到一条更大的,index:{}。"
+ + "WAL 文件可能已损坏,将跳过并选择更大的 index 进行复制";
+ public static final String DATA_REGION_CONSTRUCT_FROM_WAL =
+ "DataRegion[{}]->{}:currentIndex: {},maxIndex: {}";
+
+ // ===================== DispatchLogHandler =====================
+
+ public static final String CANNOT_SEND_TO_PEER =
+ "无法将 {} 发送到 peer {} ,已重试 {} 次,原因 {}";
+ public static final String SEND_COMPLETE_BUT_CONTAINS_ERROR =
+ "已将 {} 发送到 peer {} 但包含不成功的状态:{}";
+ public static final String CANNOT_SEND_TO_PEER_ON_ERROR =
+ "无法将 {} 发送到 peer,已重试 {} 次 {} ,原因 {}";
+ public static final String SKIP_RETRY_TAPPLICATION_EXCEPTION =
+ "由于 TApplicationException,跳过重试此 Batch {}。";
+ public static final String LOG_DISPATCHER_STOPPED_NO_RETRY =
+ "LogDispatcherThread {} 已停止,"
+ + "不会在 {} 次后重试此 Batch {}";
+
+ // ===================== SyncLogCacheQueue =====================
+
+ public static final String CACHE_AND_INSERT_START =
+ "缓存插入开始:source = {},region = {},队列大小 {},startSyncIndex = {},endSyncIndex = {}";
+ public static final String CACHE_AND_INSERT_END =
+ "缓存插入结束:source = {},region = {},队列大小 {},startSyncIndex = {},endSyncIndex = {},sortTime = {}ms,applyTime = {}ms";
+ public static final String WAITING_TARGET_REQUEST_TIMEOUT =
+ "等待目标请求超时。当前 index:{},目标 index:{}";
+ public static final String CURRENT_WAITING_INTERRUPTED =
+ "当前等待被中断。SyncIndex:{}。异常:";
+
+ // ===================== SyncStatus =====================
+
+ public static final String SYNC_STATUS_OFFER =
+ "向 SyncStatus 提交 Batch[startIndex:{}, endIndex:{}]。"
+ + "当前 SyncStatus 大小:{}。Pending 大小:{}";
+
+ // ===================== AsyncClient =====================
+
+ public static final String UNEXPECTED_EXCEPTION_IN_CLIENT =
+ "{} 中发生意外异常,错误信息为 {}";
+ public static final String CLIENT_INVALIDATED = "此客户端已被标记为无效";
+
+ // ===================== RPC 处理器执行日志同步 =====================
+
+ public static final String EXECUTE_SYNC_LOG_ENTRIES =
+ "执行 TSyncLogEntriesReq,共识组 {},结果 {}";
+
+ // ===================== 内存管理器 =====================
+
+ public static final String RESERVING_BYTES_FOR_REQUEST_SUCCEEDS =
+ "为请求 {} 预留 {} 字节成功,当前总使用量 {}";
+ public static final String RESERVING_BYTES_FOR_REQUEST_FAILS =
+ "为请求 {} 预留 {} 字节失败,当前总使用量 {}";
+ public static final String SKIP_MEMORY_RESERVATION =
+ "跳过 {} 的内存预留,因为其引用计数不为 0";
+ public static final String RESERVING_BYTES_FOR_BATCH_SUCCEEDS =
+ "为 batch {}-{} 预留 {} 字节成功,当前总使用量 {}";
+ public static final String RESERVING_BYTES_FOR_BATCH_FAILS =
+ "为 batch {}-{} 预留 {} 字节失败,当前总使用量 {}";
+ public static final String FREED_BYTES_FOR_REQUEST =
+ "为请求 {} 释放 {} 字节,当前总使用量 {}";
+ public static final String FREED_BYTES_FOR_BATCH =
+ "为 batch {}-{} 释放 {} 字节,当前总使用量 {}";
+ public static final String FREE_MEMORY =
+ "{} 释放 {} 字节,总内存大小:{} 字节。";
+ public static final String INTERRUPTED_AFTER_POLLING_AND_SLEEPING =
+ "轮询和等待后被中断";
+ public static final String INTERRUPTED_AFTER_GETTING_A_BATCH =
+ "获取批次后被中断";
+}
diff --git a/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/IoTConsensusV2Messages.java b/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/IoTConsensusV2Messages.java
new file mode 100644
index 0000000000000..c3d705f861bc6
--- /dev/null
+++ b/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/IoTConsensusV2Messages.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.consensus.i18n;
+
+/**
+ * IoTConsensusV2(基于 pipe 的共识)特有消息。日志消息使用 SLF4J {@code {}} 占位符;异常消息使用 {@code %s}(String.format)或纯字符串。
+ */
+public final class IoTConsensusV2Messages {
+
+ private IoTConsensusV2Messages() {}
+
+ // ===================== IoTConsensusV2 生命周期 =====================
+
+ public static final String RECOVER_TASK_CANCELLED =
+ "IoTV2 恢复任务已取消";
+ public static final String RECOVER_FUTURE_EXCEPTION =
+ "等待恢复 future 完成时发生异常";
+ public static final String RECOVER_TASK_INTERRUPTED =
+ "IoTV2 恢复任务被中断";
+ public static final String FAILED_RECOVER_CONSENSUS =
+ "从 {} 恢复共识组 {} 失败,忽略并继续恢复其他组,异步后台检查线程将自动注销该失败共识组的 pipe 副作用。";
+ public static final String FAILED_RECOVER_CONSENSUS_READ_DIR =
+ "从 {} 恢复共识失败,因为读取目录失败";
+ public static final String FAILED_RECOVER_CONSENSUS_SHORT =
+ "从 {} 恢复共识失败";
+
+ // ===================== IoTConsensusV2 peer 操作 =====================
+
+ public static final String START_DELETE_LOCAL_PEER =
+ "[{}] 开始删除共识组 {} 的本地 peer";
+ public static final String FINISH_DELETE_LOCAL_PEER =
+ "[{}] 完成删除共识组 {} 的本地 peer";
+ public static final String INACTIVATE_NEW_PEER =
+ "[{}] 将新 peer 设为非活跃:{}";
+ public static final String NOTIFY_CREATE_CONSENSUS_PIPES =
+ "[{}] 通知当前 peer 创建 consensus pipe...";
+ public static final String WAIT_PEERS_FINISH_TRANSFER =
+ "[{}] 等待所有其他 peer 完成传输...";
+ public static final String ACTIVATE_NEW_PEER =
+ "[{}] 激活新 peer...";
+ public static final String ADD_REMOTE_PEER_FAILED_CLEANUP =
+ "[{}] 添加远程 peer 失败,正在自动清理副作用...";
+ public static final String FAILED_CLEANUP_SIDE_EFFECTS =
+ "[{}] 添加远程 peer 失败后清理副作用失败";
+ public static final String NOTIFY_DROP_CONSENSUS_PIPES =
+ "[{}] 通知其他 peer 删除 consensus pipe...";
+ public static final String INACTIVATE_PEER =
+ "[{}] 将 peer {} 设为非活跃";
+ public static final String WAIT_TARGET_PEER_COMPLETE_TRANSFER =
+ "[{}] 等待目标 peer{} 完成传输...";
+ public static final String WAIT_PEER_RELEASE_RESOURCE =
+ "[{}] 等待 {} 释放所有资源...";
+ public static final String NOT_SUPPORT_LEADER_TRANSFER =
+ "%s 不支持 leader 切换";
+
+ // ===================== IoTConsensusV2ServerImpl =====================
+
+ public static final String ERROR_SET_PEER_ACTIVE =
+ "将 peer %s 设置为活跃状态 %s 时出错。结果状态:%s";
+ public static final String ERROR_SET_PEER_ACTIVE_SHORT =
+ "将 peer %s 设置为活跃状态 %s 时出错";
+ public static final String TARGET_PEER_MAY_BE_DOWN =
+ "目标 peer 可能已下线,将 peer {} 设置为活跃状态 {} 时出错";
+ public static final String CANNOT_NOTIFY_PEER_CREATE_PIPE =
+ "{} 无法通知 peer {} 创建 consensus pipe,该 peer 可能当前未知,请手动检查!";
+ public static final String CANNOT_CREATE_CONSENSUS_PIPE =
+ "{} 无法创建到 {} 的 consensus pipe,目标 peer 可能当前未知,请手动检查!";
+ public static final String ERROR_NOTIFY_PEER_CREATE_PIPE =
+ "通知 peer %s 创建 consensus pipe 时出错";
+ public static final String CANNOT_NOTIFY_PEER_DROP_PIPE =
+ "{} 无法通知 peer {} 删除 consensus pipe,该 peer 可能当前未知,请手动检查!";
+ public static final String CANNOT_DROP_CONSENSUS_PIPE =
+ "{} 无法删除到 {} 的 consensus pipe,目标 peer 可能当前未知,请手动检查!";
+ public static final String ERROR_NOTIFY_PEER_DROP_PIPE =
+ "通知 peer %s 删除 consensus pipe 时出错";
+ public static final String INTERRUPTED_WAITING_TRANSFER =
+ "{} 等待传输完成时被中断";
+ public static final String INTERRUPTED_WAITING_TRANSFER_FMT =
+ "%s 等待传输完成时被中断";
+ public static final String CANNOT_CHECK_PIPE_TRANSMISSION =
+ "{} 无法检查到 peer {} 的 consensus pipe 传输完成状态";
+ public static final String ERROR_CHECK_PIPE_TRANSMISSION =
+ "检查到 peer %s 的 consensus pipe 传输完成状态时出错";
+ public static final String CANNOT_CHECK_PIPE_TRANSMISSION_SHORT =
+ "{} 无法检查 consensus pipe 传输完成状态";
+
+ // ===================== IoTConsensusV2RPCServiceProcessor =====================
+
+ public static final String UNEXPECTED_GROUP_SET_ACTIVE =
+ "共识组 ID %s 与设置活跃请求 %s 不匹配";
+ public static final String UNEXPECTED_GROUP_CREATE_PIPE =
+ "共识组 ID %s 与创建 consensus pipe 请求 %s 不匹配";
+ public static final String UNEXPECTED_GROUP_DROP_PIPE =
+ "共识组 ID %s 与删除 consensus pipe 请求 %s 不匹配";
+ public static final String UNEXPECTED_GROUP_CHECK_TRANSFER =
+ "共识组 ID %s 与检查传输完成请求 %s 不匹配";
+ public static final String UNEXPECTED_GROUP_WAIT_RELEASE =
+ "共识组 ID %s 与 TWaitReleaseAllRegionRelatedResourceRes 请求不匹配";
+ public static final String FAILED_CREATE_CONSENSUS_PIPE =
+ "创建到目标 peer 的 consensus pipe 失败,请求 {}";
+ public static final String FAILED_DROP_CONSENSUS_PIPE =
+ "删除到目标 peer 的 consensus pipe 失败,请求 {}";
+ public static final String FAILED_CHECK_CONSENSUS_PIPE =
+ "检查 consensus pipe 完成状态失败,请求 {},将完成状态设为 {}";
+}
diff --git a/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/RatisMessages.java b/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/RatisMessages.java
new file mode 100644
index 0000000000000..12f840186d347
--- /dev/null
+++ b/iotdb-core/consensus/src/main/i18n/zh/org/apache/iotdb/consensus/i18n/RatisMessages.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.consensus.i18n;
+
+/**
+ * Ratis 共识特有消息。日志消息使用 SLF4J {@code {}} 占位符;异常消息使用 {@code %s}(String.format)或纯字符串。
+ */
+public final class RatisMessages {
+
+ private RatisMessages() {}
+
+ // ===================== RatisConsensus =====================
+
+ public static final String INTERRUPTED_RETRYING_WRITE =
+ "{}:重试写入请求 {} 时被中断";
+ public static final String NULL_REPLY_IN_WRITE_WITH_RETRY =
+ "writeWithRetry 中收到空回复,请求为 ";
+ public static final String LEADER_READ_ONLY_STEP_DOWN_FAILED =
+ "leader {} 处于只读模式,强制降级失败,原因 ";
+ public static final String TRY_ADD_CONFLICTING_PEER =
+ "{}:尝试添加 ID 或地址冲突的 peer {} 到 {}";
+ public static final String IS_LEADER_REQUEST_FAILED =
+ "isLeader 请求失败,异常:";
+ public static final String IS_LEADER_READY_REQUEST_FAILED =
+ "isLeaderReady 请求失败,异常:";
+ public static final String GET_LOGICAL_CLOCK_REQUEST_FAILED =
+ "getLogicalClock 请求失败,异常:";
+ public static final String IS_LEADER_READY_CHECKING_FAILED =
+ "isLeaderReady 检查失败,异常:";
+ public static final String LEADER_STILL_NOT_READY =
+ "{}:leader 在 {} 毫秒后仍未就绪";
+ public static final String UNEXPECTED_INTERRUPTION_WAIT_LEADER_READY =
+ "等待 leader 就绪时发生意外中断";
+ public static final String FETCH_DIVISION_INFO_FAILED =
+ "获取共识组 {} 的 division 信息失败,原因:";
+ public static final String TRIGGER_SNAPSHOT_SUCCESS =
+ "{} 共识组 {}:已在 index {} 成功创建快照,force = {}";
+ public static final String GET_GROUP_FAILED =
+ "获取共识组 {} 失败 ";
+ public static final String BORROW_CLIENT_FROM_POOL_FAILED =
+ "从连接池借用共识组 {} 的客户端失败。";
+ public static final String TRANSFER_LEADER_FAILED_TIMEOUT =
+ "共识组 %s 向 %s 切换 leader 失败。这可能是由于超时引起的,"
+ + "特别是在磁盘使用率较高时。请考虑增大 "
+ + "'ratis_transfer_leader_timeout_ms' 配置项。";
+ public static final String TRANSFER_LEADER_FAILED_STARTUP =
+ "共识组 %s 向 %s 切换 leader 失败。这可能是由于超时引起的,"
+ + "特别是在初始启动期间。请考虑增大 "
+ + "'ratis_rpc_transfer_leader_timeout_ms' 配置项。";
+
+ // ===================== ApplicationStateMachineProxy =====================
+
+ public static final String STATEMACHINE_RUNTIME_EXCEPTION =
+ "应用状态机抛出运行时异常:";
+ public static final String INTERRUPTED_WAITING_SYSTEM_READY =
+ "{}:等待系统就绪时被中断:";
+ public static final String REQUEST_MESSAGE_REQUIRED =
+ "需要 RequestMessage 但收到 {}";
+ public static final String UNABLE_TO_CREATE_TEMP_SNAPSHOT_DIR =
+ "无法在 {} 创建临时快照目录";
+ public static final String ATOMIC_RENAME_FAILED =
+ "{} 将 {} 原子重命名为 {} 失败,异常 {}";
+ public static final String SNAPSHOT_DIR_INCOMPLETE_DELETING =
+ "快照目录不完整,正在删除 {}";
+
+ // ===================== RatisClient =====================
+
+ public static final String CANNOT_CLOSE_RAFT_CLIENT =
+ "无法关闭 Raft 客户端 ";
+ public static final String RAFT_CLIENT_REQUEST_FAILED =
+ "{}:Raft 客户端请求失败并捕获异常:";
+
+ // ===================== DiskGuardian =====================
+
+ public static final String ERROR_LISTING_FILES =
+ "{}:列出共识组 {} 的文件时出错:";
+ public static final String CLEAR_SNAPSHOT_FLAG_FAILED =
+ "{}:清除共识组 {} 的快照标志失败,请检查相关实现";
+ public static final String TAKE_SNAPSHOT_FAILED =
+ "{} 共识组 {} 创建快照失败,原因 {}。磁盘文件状态 {}";
+
+ // ===================== SnapshotStorage =====================
+
+ public static final String CANNOT_CONSTRUCT_SNAPSHOT_DIR_STREAM =
+ "无法构建快照目录流 ";
+ public static final String CANNOT_RESOLVE_REAL_PATH =
+ "{} 无法解析 {} 的真实路径,原因 ";
+
+ // ===================== ResponseMessage =====================
+
+ public static final String SERIALIZE_TSSTATUS_FAILED =
+ "序列化 TSStatus 失败 {}";
+
+ // ===================== MetricRegistryManager =====================
+
+ public static final String REPORTER_DISABLED =
+ "Reporter 在 RatisMetricRegistries 中已禁用";
+ public static final String JMX_REPORTER_DISABLED =
+ "JMX Reporter 在 RatisMetricRegistries 中已禁用";
+ public static final String CONSOLE_REPORTER_DISABLED =
+ "Console Reporter 在 RatisMetricRegistries 中已禁用";
+}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ConsensusFactory.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ConsensusFactory.java
index d24955aba7396..26763033abada 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ConsensusFactory.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ConsensusFactory.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.consensus.iotv2.container.IoTV2GlobalComponentContainer;
import org.apache.iotdb.consensus.config.ConsensusConfig;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
import org.apache.iotdb.consensus.pipe.metric.IoTConsensusV2SyncLagManager;
import org.slf4j.Logger;
@@ -31,8 +32,7 @@
import java.util.Optional;
public class ConsensusFactory {
- public static final String CONSTRUCT_FAILED_MSG =
- "Construct consensusImpl failed, Please check your consensus className %s";
+ public static final String CONSTRUCT_FAILED_MSG = ConsensusMessages.CONSTRUCT_FAILED_MSG;
public static final String SIMPLE_CONSENSUS = "org.apache.iotdb.consensus.simple.SimpleConsensus";
public static final String RATIS_CONSENSUS = "org.apache.iotdb.consensus.ratis.RatisConsensus";
@@ -50,7 +50,7 @@ public class ConsensusFactory {
private static final Logger logger = LoggerFactory.getLogger(ConsensusFactory.class);
private ConsensusFactory() {
- throw new IllegalStateException("Utility class ConsensusFactory");
+ throw new IllegalStateException(ConsensusMessages.UTILITY_CLASS_CONSENSUS_FACTORY);
}
// Downstream code compares against IOT_CONSENSUS_V2 directly, so persisted legacy names must be
@@ -87,7 +87,7 @@ public static Optional getConsensusImpl(
| InstantiationException
| IllegalAccessException
| InvocationTargetException e) {
- logger.error("Couldn't Construct IConsensus class: {}", className, e);
+ logger.error(ConsensusMessages.COULD_NOT_CONSTRUCT_ICONSENSUS, className, e);
}
return Optional.empty();
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IStateMachine.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IStateMachine.java
index 6d115cb81618a..3354c83699b54 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IStateMachine.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/IStateMachine.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.consensus.common.DataSet;
import org.apache.iotdb.consensus.common.Peer;
import org.apache.iotdb.consensus.common.Utils;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
import javax.annotation.concurrent.ThreadSafe;
@@ -104,7 +105,7 @@ default boolean takeSnapshot(File snapshotDir, String snapshotTmpId, String snap
* @return true if all snapshot dir delete successfully
*/
default boolean clearSnapshot() {
- throw new UnsupportedOperationException("not implemented yet");
+ throw new UnsupportedOperationException(ConsensusMessages.NOT_IMPLEMENTED_YET);
}
/**
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java
index bc6ec923aaf9e..c3ee1d0b7d2e1 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.commons.consensus.SchemaRegionId;
import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil;
import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -70,7 +71,7 @@ public void serialize(DataOutputStream stream) {
BasicStructureSerDeUtil.write(nodeId, stream);
ThriftCommonsSerDeUtils.serializeTEndPoint(endpoint, stream);
} catch (IOException e) {
- logger.error("Failed to serialize Peer", e);
+ logger.error(ConsensusMessages.FAILED_TO_SERIALIZE_PEER, e);
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Utils.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Utils.java
index 106157c8f66ae..2d8e1452076a8 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Utils.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/common/Utils.java
@@ -18,6 +18,8 @@
*/
package org.apache.iotdb.consensus.common;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -58,7 +60,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) {
- logger.info("visit file {} failed due to {}", file.toAbsolutePath(), exc);
+ logger.info(ConsensusMessages.VISIT_FILE_FAILED, file.toAbsolutePath(), exc);
return FileVisitResult.TERMINATE;
}
@@ -68,7 +70,7 @@ public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
}
});
} catch (IOException ioException) {
- logger.error("IOException occurred during listing snapshot directory: ", ioException);
+ logger.error(ConsensusMessages.IO_EXCEPTION_LISTING_SNAPSHOT_DIR, ioException);
return Collections.emptyList();
}
return allFiles;
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/ConsensusGroupAlreadyExistException.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/ConsensusGroupAlreadyExistException.java
index 52f3c5ce439c8..b7bc440f225fa 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/ConsensusGroupAlreadyExistException.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/ConsensusGroupAlreadyExistException.java
@@ -20,13 +20,14 @@
package org.apache.iotdb.consensus.exception;
import org.apache.iotdb.commons.consensus.ConsensusGroupId;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
public class ConsensusGroupAlreadyExistException extends ConsensusException {
private final transient ConsensusGroupId groupId;
public ConsensusGroupAlreadyExistException(ConsensusGroupId groupId) {
- super(String.format("The consensus group %d already exists", groupId.getId()));
+ super(String.format(ConsensusMessages.CONSENSUS_GROUP_ALREADY_EXIST, groupId.getId()));
this.groupId = groupId;
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/ConsensusGroupNotExistException.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/ConsensusGroupNotExistException.java
index d80df6a615c5f..aa580d4cbedf4 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/ConsensusGroupNotExistException.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/ConsensusGroupNotExistException.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.consensus.exception;
import org.apache.iotdb.commons.consensus.ConsensusGroupId;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
public class ConsensusGroupNotExistException extends ConsensusException {
@@ -31,7 +32,7 @@ public ConsensusGroupNotExistException(String cause) {
}
public ConsensusGroupNotExistException(ConsensusGroupId groupId) {
- super(String.format("The consensus group %s doesn't exist", groupId));
+ super(String.format(ConsensusMessages.CONSENSUS_GROUP_NOT_EXIST, groupId));
this.groupId = groupId;
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerEndpointException.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerEndpointException.java
index 969ed3e38e0fc..1d3728ac23b57 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerEndpointException.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerEndpointException.java
@@ -21,15 +21,13 @@
import org.apache.iotdb.common.rpc.thrift.TEndPoint;
import org.apache.iotdb.consensus.common.Peer;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
import java.util.List;
public class IllegalPeerEndpointException extends ConsensusException {
public IllegalPeerEndpointException(TEndPoint currentNode, List peers) {
- super(
- String.format(
- "Illegal addConsensusGroup because currentNode %s is not in consensusGroup %s",
- currentNode, peers));
+ super(String.format(ConsensusMessages.ILLEGAL_PEER_ENDPOINT, currentNode, peers));
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerNumException.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerNumException.java
index c08ef050576b5..95f78d3d5d1b3 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerNumException.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerNumException.java
@@ -19,9 +19,11 @@
package org.apache.iotdb.consensus.exception;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
+
public class IllegalPeerNumException extends ConsensusException {
public IllegalPeerNumException(int size) {
- super(String.format("Illegal peer num %d when adding consensus group", size));
+ super(String.format(ConsensusMessages.ILLEGAL_PEER_NUM, size));
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/PeerAlreadyInConsensusGroupException.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/PeerAlreadyInConsensusGroupException.java
index b9142d56492a8..53e42e4e93aee 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/PeerAlreadyInConsensusGroupException.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/PeerAlreadyInConsensusGroupException.java
@@ -21,12 +21,15 @@
import org.apache.iotdb.commons.consensus.ConsensusGroupId;
import org.apache.iotdb.consensus.common.Peer;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
public class PeerAlreadyInConsensusGroupException extends ConsensusException {
public PeerAlreadyInConsensusGroupException(ConsensusGroupId groupId, Peer peer) {
super(
String.format(
- "Peer %s:%d is already in group %d",
- peer.getEndpoint().getIp(), peer.getEndpoint().getPort(), groupId.getId()));
+ ConsensusMessages.PEER_ALREADY_IN_GROUP,
+ peer.getEndpoint().getIp(),
+ peer.getEndpoint().getPort(),
+ groupId.getId()));
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/PeerNotInConsensusGroupException.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/PeerNotInConsensusGroupException.java
index 64bc461fa91f5..a2b5f4cb1e2aa 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/PeerNotInConsensusGroupException.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/exception/PeerNotInConsensusGroupException.java
@@ -20,9 +20,10 @@
package org.apache.iotdb.consensus.exception;
import org.apache.iotdb.commons.consensus.ConsensusGroupId;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
public class PeerNotInConsensusGroupException extends ConsensusException {
public PeerNotInConsensusGroupException(ConsensusGroupId groupId, String peer) {
- super(String.format("Peer %s is not in group %d", peer, groupId.getId()));
+ super(String.format(ConsensusMessages.PEER_NOT_IN_GROUP, peer, groupId.getId()));
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensus.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensus.java
index 256b918c02cb0..f3895d3edaf5d 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensus.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensus.java
@@ -50,6 +50,8 @@
import org.apache.iotdb.consensus.exception.IllegalPeerNumException;
import org.apache.iotdb.consensus.exception.PeerAlreadyInConsensusGroupException;
import org.apache.iotdb.consensus.exception.PeerNotInConsensusGroupException;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
+import org.apache.iotdb.consensus.i18n.IoTConsensusMessages;
import org.apache.iotdb.consensus.iot.client.AsyncIoTConsensusServiceClient;
import org.apache.iotdb.consensus.iot.client.IoTConsensusClientPool.AsyncIoTConsensusServiceClientPoolFactory;
import org.apache.iotdb.consensus.iot.client.IoTConsensusClientPool.SyncIoTConsensusServiceClientPoolFactory;
@@ -162,7 +164,8 @@ public synchronized void start() throws IOException {
private void initAndRecover() throws IOException {
if (!storageDir.exists()) {
if (!storageDir.mkdirs()) {
- throw new IOException(String.format("Unable to create consensus dir at %s", storageDir));
+ throw new IOException(
+ String.format(ConsensusMessages.UNABLE_TO_CREATE_CONSENSUS_DIR_FMT, storageDir));
}
} else {
try (DirectoryStream stream = Files.newDirectoryStream(storageDir.toPath())) {
@@ -193,7 +196,7 @@ private void initAndRecover() throws IOException {
} catch (ConsensusGroupNotExistException ignore) {
} catch (Exception e) {
- logger.warn("Failed to reset peer list while start", e);
+ logger.warn(ConsensusMessages.FAILED_TO_RESET_PEER_LIST_WHILE_START, e);
}
};
// make peers which are in list correct
@@ -221,7 +224,7 @@ public synchronized void stop() {
try {
backgroundTaskService.awaitTermination(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
- logger.warn("{}: interrupted when shutting down add Executor with exception {}", this, e);
+ logger.warn(ConsensusMessages.INTERRUPTED_WHEN_SHUTTING_DOWN_EXECUTOR, this, e);
Thread.currentThread().interrupt();
}
}
@@ -237,8 +240,9 @@ public TSStatus write(ConsensusGroupId groupId, IConsensusRequest request)
} else if (!impl.isActive()) {
String message =
String.format(
- "Peer is inactive and not ready to write request, %s, DataNode Id: %s",
- groupId.toString(), impl.getThisNode().getNodeId());
+ ConsensusMessages.PEER_INACTIVE_NOT_READY_WRITE,
+ groupId.toString(),
+ impl.getThisNode().getNodeId());
return RpcUtils.getStatus(TSStatusCode.WRITE_PROCESS_REJECT, message);
} else {
return impl.write(request);
@@ -274,7 +278,8 @@ public void createLocalPeer(ConsensusGroupId groupId, List peers)
String path = buildPeerDir(storageDir, groupId);
File file = new File(path);
if (!file.exists() && !file.mkdirs()) {
- logger.warn("Unable to create consensus dir for group {} at {}", groupId, path);
+ logger.warn(
+ ConsensusMessages.UNABLE_TO_CREATE_CONSENSUS_DIR_FOR_GROUP, groupId, path);
return null;
}
@@ -294,7 +299,8 @@ public void createLocalPeer(ConsensusGroupId groupId, List peers)
.orElseThrow(
() ->
new ConsensusException(
- String.format("Unable to create consensus dir for group %s", groupId)));
+ String.format(
+ ConsensusMessages.UNABLE_TO_CREATE_CONSENSUS_DIR_FOR_GROUP_FMT, groupId)));
KillPoint.setKillPoint(DataNodeKillPoints.DESTINATION_CREATE_LOCAL_PEER);
if (exist.get()) {
throw new ConsensusGroupAlreadyExistException(groupId);
@@ -330,47 +336,47 @@ public void addRemotePeer(ConsensusGroupId groupId, Peer peer) throws ConsensusE
}
try {
// step 1: inactive new Peer to prepare for following steps
- logger.info("[IoTConsensus] inactivate new peer: {}", peer);
+ logger.info(IoTConsensusMessages.INACTIVATE_NEW_PEER, peer);
impl.inactivatePeer(peer, false);
// step 2: notify all the other Peers to build the sync connection to newPeer
- logger.info("[IoTConsensus] notify current peers to build sync log...");
+ logger.info(IoTConsensusMessages.NOTIFY_PEERS_BUILD_SYNC_LOG);
impl.notifyPeersToBuildSyncLogChannel(peer);
// step 3: take snapshot
- logger.info("[IoTConsensus] start to take snapshot...");
+ logger.info(IoTConsensusMessages.START_TAKE_SNAPSHOT);
impl.takeSnapshot();
// step 4: transit snapshot
- logger.info("[IoTConsensus] start to transmit snapshot...");
+ logger.info(IoTConsensusMessages.START_TRANSMIT_SNAPSHOT);
impl.transmitSnapshot(peer);
// step 5: let the new peer load snapshot
- logger.info("[IoTConsensus] trigger new peer to load snapshot...");
+ logger.info(IoTConsensusMessages.TRIGGER_LOAD_SNAPSHOT);
impl.triggerSnapshotLoad(peer);
KillPoint.setKillPoint(DataNodeKillPoints.COORDINATOR_ADD_PEER_TRANSITION);
// step 6: active new Peer
- logger.info("[IoTConsensus] activate new peer...");
+ logger.info(IoTConsensusMessages.ACTIVATE_NEW_PEER);
impl.activePeer(peer);
// step 7: notify remote peer to clean up transferred snapshot
- logger.info("[IoTConsensus] clean up remote snapshot...");
+ logger.info(IoTConsensusMessages.CLEANUP_REMOTE_SNAPSHOT);
try {
impl.cleanupRemoteSnapshot(peer);
} catch (ConsensusGroupModifyPeerException e) {
- logger.warn("[IoTConsensus] failed to cleanup remote snapshot", e);
+ logger.warn(IoTConsensusMessages.FAILED_CLEANUP_REMOTE_SNAPSHOT, e);
}
KillPoint.setKillPoint(DataNodeKillPoints.COORDINATOR_ADD_PEER_DONE);
} catch (ConsensusGroupModifyPeerException e) {
- logger.info("[IoTConsensus] add remote peer failed, automatic cleanup side effects...");
+ logger.info(IoTConsensusMessages.ADD_REMOTE_PEER_FAILED_CLEANUP);
// try to clean up the sync log channel
impl.notifyPeersToRemoveSyncLogChannel(peer);
throw new ConsensusException(e);
} finally {
- logger.info("[IoTConsensus] clean up local snapshot...");
+ logger.info(IoTConsensusMessages.CLEANUP_LOCAL_SNAPSHOT);
impl.cleanupLocalSnapshot();
}
}
@@ -412,7 +418,7 @@ public void removeRemotePeer(ConsensusGroupId groupId, Peer peer) throws Consens
@Override
public void transferLeader(ConsensusGroupId groupId, Peer newLeader) throws ConsensusException {
- throw new ConsensusException("IoTConsensus does not support leader transfer");
+ throw new ConsensusException(IoTConsensusMessages.NOT_SUPPORT_LEADER_TRANSFER);
}
@Override
@@ -484,7 +490,7 @@ public void reloadConsensusConfig(ConsensusConfig consensusConfig) {
@Override
public void recordCorrectPeerListBeforeStarting(
Map> correctPeerList) {
- logger.info("Record correct peer list: {}", correctPeerList);
+ logger.info(ConsensusMessages.RECORD_CORRECT_PEER_LIST, correctPeerList);
this.correctPeerListBeforeStart = correctPeerList;
}
@@ -503,9 +509,7 @@ private void resetPeerListImpl(
Peer localPeer = new Peer(groupId, thisNodeId, thisNode);
if (!correctPeers.contains(localPeer)) {
- logger.info(
- "[RESET PEER LIST] {} Local peer is not in the correct configuration, delete it.",
- groupId);
+ logger.info(ConsensusMessages.RESET_PEER_LIST_NOT_IN_CORRECT, groupId);
deleteLocalPeer(groupId);
return;
}
@@ -518,9 +522,9 @@ private void resetPeerListImpl(
if (!correctPeers.contains(peer)) {
if (!impl.removeSyncLogChannel(peer)) {
logger.error(
- "[RESET PEER LIST] {} Failed to remove sync channel with: {}", groupId, peer);
+ ConsensusMessages.RESET_PEER_LIST_FAILED_TO_REMOVE_SYNC_CHANNEL, groupId, peer);
} else {
- logger.info("[RESET PEER LIST] {} Remove sync channel with: {}", groupId, peer);
+ logger.info(ConsensusMessages.RESET_PEER_LIST_REMOVE_SYNC_CHANNEL, groupId, peer);
}
}
}
@@ -528,22 +532,20 @@ private void resetPeerListImpl(
for (Peer peer : correctPeers) {
if (!impl.getConfiguration().contains(peer)) {
impl.buildSyncLogChannel(peer, startNow);
- logger.info("[RESET PEER LIST] {} Build sync channel with: {}", groupId, peer);
+ logger.info(ConsensusMessages.RESET_PEER_LIST_BUILD_SYNC_CHANNEL, groupId, peer);
}
}
// show result
String newPeerListStr = impl.getConfiguration().toString();
if (!previousPeerListStr.equals(newPeerListStr)) {
logger.info(
- "[RESET PEER LIST] {} Local peer list has been reset: {} -> {}",
+ ConsensusMessages.RESET_PEER_LIST_RESET_RESULT,
groupId,
previousPeerListStr,
newPeerListStr);
} else {
logger.info(
- "[RESET PEER LIST] {} The current peer list is correct, nothing need to be reset: {}",
- groupId,
- previousPeerListStr);
+ ConsensusMessages.RESET_PEER_LIST_NOTHING_TO_RESET, groupId, previousPeerListStr);
}
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensusServerImpl.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensusServerImpl.java
index d8fa9fc8f635b..74ba871643b9c 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensusServerImpl.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/IoTConsensusServerImpl.java
@@ -39,6 +39,8 @@
import org.apache.iotdb.consensus.common.request.IndexedConsensusRequest;
import org.apache.iotdb.consensus.config.IoTConsensusConfig;
import org.apache.iotdb.consensus.exception.ConsensusGroupModifyPeerException;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
+import org.apache.iotdb.consensus.i18n.IoTConsensusMessages;
import org.apache.iotdb.consensus.iot.client.AsyncIoTConsensusServiceClient;
import org.apache.iotdb.consensus.iot.client.SyncIoTConsensusServiceClient;
import org.apache.iotdb.consensus.iot.log.ConsensusReqReader;
@@ -184,7 +186,7 @@ public TSStatus write(IConsensusRequest request) {
ioTConsensusServerMetrics.recordGetStateMachineLockTime(
getStateMachineLockTime - consensusWriteStartTime);
if (needBlockWrite()) {
- logger.info("[Throttle Down] index:{}, safeIndex:{}", getSearchIndex(), getMinSyncIndex());
+ logger.info(IoTConsensusMessages.THROTTLE_DOWN, getSearchIndex(), getMinSyncIndex());
try {
boolean timeout =
!stateMachineCondition.await(
@@ -199,7 +201,7 @@ public TSStatus write(IConsensusRequest request) {
config.getReplication().getWalThrottleThreshold()));
}
} catch (InterruptedException e) {
- logger.error("Failed to throttle down because ", e);
+ logger.error(IoTConsensusMessages.FAILED_TO_THROTTLE_DOWN, e);
Thread.currentThread().interrupt();
}
}
@@ -212,7 +214,7 @@ public TSStatus write(IConsensusRequest request) {
lastConsensusRequest = indexedConsensusRequest;
if (indexedConsensusRequest.getSearchIndex() % 100000 == 0) {
logger.info(
- "DataRegion[{}]: index after build: safeIndex:{}, searchIndex: {}, lastConsensusRequest: {}",
+ IoTConsensusMessages.DATA_REGION_INDEX_AFTER_BUILD,
thisNode.getGroupId(),
getMinSyncIndex(),
indexedConsensusRequest.getSearchIndex(),
@@ -243,7 +245,7 @@ public TSStatus write(IConsensusRequest request) {
System.nanoTime() - writeToStateMachineEndTime);
} else {
logger.debug(
- "{}: write operation failed. searchIndex: {}. Code: {}",
+ IoTConsensusMessages.WRITE_OPERATION_FAILED,
thisNode.getGroupId(),
indexedConsensusRequest.getSearchIndex(),
result.getCode());
@@ -272,14 +274,15 @@ public void takeSnapshot() throws ConsensusGroupModifyPeerException {
}
if (!snapshotDir.mkdirs()) {
throw new ConsensusGroupModifyPeerException(
- String.format("%s: cannot mkdir for snapshot", thisNode.getGroupId()));
+ String.format(IoTConsensusMessages.CANNOT_MKDIR_FOR_SNAPSHOT, thisNode.getGroupId()));
}
if (!stateMachine.takeSnapshot(snapshotDir)) {
- throw new ConsensusGroupModifyPeerException("unknown error when taking snapshot");
+ throw new ConsensusGroupModifyPeerException(
+ IoTConsensusMessages.UNKNOWN_ERROR_TAKING_SNAPSHOT);
}
clearOldSnapshot();
} catch (IOException e) {
- throw new ConsensusGroupModifyPeerException("error when taking snapshot", e);
+ throw new ConsensusGroupModifyPeerException(IoTConsensusMessages.ERROR_TAKING_SNAPSHOT, e);
}
}
@@ -303,12 +306,11 @@ public void transmitSnapshot(Peer targetPeer) throws ConsensusGroupModifyPeerExc
long transitedFilesNum = 0;
long startTime = System.nanoTime();
logger.info(
- "[SNAPSHOT TRANSMISSION] Start to transmit snapshots ({} files, total size {}) from dir {}",
+ IoTConsensusMessages.SNAPSHOT_TRANSMISSION_START,
snapshotPaths.size(),
humanReadableByteCountSI(snapshotSizeSum),
snapshotDir);
- logger.info(
- "[SNAPSHOT TRANSMISSION] All the files below shell be transmitted: {}", allFilesStr);
+ logger.info(IoTConsensusMessages.SNAPSHOT_TRANSMISSION_ALL_FILES, allFilesStr);
try (SyncIoTConsensusServiceClient client =
syncClientManager.borrowClient(targetPeer.getEndpoint())) {
for (File file : snapshotPaths) {
@@ -323,15 +325,13 @@ public void transmitSnapshot(Peer targetPeer) throws ConsensusGroupModifyPeerExc
TSendSnapshotFragmentRes res = client.sendSnapshotFragment(req);
if (!isSuccess(res.getStatus())) {
throw new ConsensusGroupModifyPeerException(
- String.format(
- "[SNAPSHOT TRANSMISSION] Error when transmitting snapshot fragment to %s",
- targetPeer));
+ String.format(IoTConsensusMessages.SNAPSHOT_TRANSMISSION_ERROR, targetPeer));
}
}
transitedSnapshotSizeSum += reader.getTotalReadSize();
transitedFilesNum++;
logger.info(
- "[SNAPSHOT TRANSMISSION] The overall progress for dir {}: files {}/{} done, size {}/{} done, time {} passed. File {} done.",
+ IoTConsensusMessages.SNAPSHOT_TRANSMISSION_PROGRESS,
newSnapshotDirName,
transitedFilesNum,
snapshotPaths.size(),
@@ -346,11 +346,10 @@ public void transmitSnapshot(Peer targetPeer) throws ConsensusGroupModifyPeerExc
}
} catch (Exception e) {
throw new ConsensusGroupModifyPeerException(
- String.format("[SNAPSHOT TRANSMISSION] Error when send snapshot file to %s", targetPeer),
- e);
+ String.format(IoTConsensusMessages.SNAPSHOT_TRANSMISSION_SEND_ERROR, targetPeer), e);
}
logger.info(
- "[SNAPSHOT TRANSMISSION] After {}, successfully transmit all snapshots from dir {}",
+ IoTConsensusMessages.SNAPSHOT_TRANSMISSION_COMPLETE,
CommonDateTimeUtils.convertMillisecondToDurationStr(
(System.nanoTime() - startTime) / 1_000_000),
snapshotDir);
@@ -372,7 +371,7 @@ public void receiveSnapshotFragment(
}
} catch (IOException e) {
throw new ConsensusGroupModifyPeerException(
- String.format("error when receiving snapshot %s", snapshotId), e);
+ String.format(IoTConsensusMessages.ERROR_RECEIVING_SNAPSHOT, snapshotId), e);
}
}
@@ -380,8 +379,7 @@ private String calculateSnapshotPath(String snapshotId, String originalFilePath)
throws ConsensusGroupModifyPeerException {
if (!originalFilePath.contains(snapshotId)) {
throw new ConsensusGroupModifyPeerException(
- String.format(
- "invalid snapshot file. snapshotId: %s, filePath: %s", snapshotId, originalFilePath));
+ String.format(IoTConsensusMessages.INVALID_SNAPSHOT_FILE, snapshotId, originalFilePath));
}
return originalFilePath.substring(originalFilePath.indexOf(snapshotId));
}
@@ -390,9 +388,7 @@ private void clearOldSnapshot() {
File directory = new File(storageDir);
File[] versionFiles = directory.listFiles((dir, name) -> name.startsWith(SNAPSHOT_DIR_NAME));
if (versionFiles == null || versionFiles.length == 0) {
- logger.error(
- "Can not find any snapshot dir after build a new snapshot for group {}",
- thisNode.getGroupId());
+ logger.error(IoTConsensusMessages.CANNOT_FIND_SNAPSHOT_DIR, thisNode.getGroupId());
return;
}
for (File file : versionFiles) {
@@ -400,7 +396,7 @@ private void clearOldSnapshot() {
try {
FileUtils.deleteDirectory(file);
} catch (IOException e) {
- logger.error("Delete old snapshot dir {} failed", file.getAbsolutePath(), e);
+ logger.error(IoTConsensusMessages.DELETE_OLD_SNAPSHOT_FAILED, file.getAbsolutePath(), e);
}
}
}
@@ -419,7 +415,8 @@ private File getSnapshotPath(String snapshotRelativePath) {
.getCanonicalFile()
.toPath()
.startsWith(storageDirFile.getCanonicalFile().toPath())) {
- throw new IllegalArgumentException("Invalid snapshotRelativePath: " + snapshotRelativePath);
+ throw new IllegalArgumentException(
+ IoTConsensusMessages.INVALID_SNAPSHOT_RELATIVE_PATH + snapshotRelativePath);
}
} catch (IOException e) {
throw new IllegalArgumentException(e);
@@ -453,11 +450,12 @@ public void inactivatePeer(Peer peer, boolean forDeletionPurpose)
}
lastException =
new ConsensusGroupModifyPeerException(
- String.format("error when inactivating %s. %s", peer, res.getStatus()));
+ String.format(
+ IoTConsensusMessages.ERROR_INACTIVATING_PEER, peer, res.getStatus()));
} catch (Exception e) {
lastException =
new ConsensusGroupModifyPeerException(
- String.format("error when inactivating %s", peer), e);
+ String.format(IoTConsensusMessages.ERROR_INACTIVATING_PEER_SHORT, peer), e);
}
} catch (ClientManagerException e) {
lastException = new ConsensusGroupModifyPeerException(e);
@@ -475,11 +473,12 @@ public void triggerSnapshotLoad(Peer peer) throws ConsensusGroupModifyPeerExcept
thisNode.getGroupId().convertToTConsensusGroupId(), newSnapshotDirName));
if (!isSuccess(res.status)) {
throw new ConsensusGroupModifyPeerException(
- String.format("error when triggering snapshot load %s. %s", peer, res.getStatus()));
+ String.format(
+ IoTConsensusMessages.ERROR_TRIGGERING_SNAPSHOT_LOAD, peer, res.getStatus()));
}
} catch (Exception e) {
throw new ConsensusGroupModifyPeerException(
- String.format("error when activating %s", peer), e);
+ String.format(IoTConsensusMessages.ERROR_ACTIVATING_PEER_SHORT, peer), e);
}
}
@@ -490,11 +489,11 @@ public void activePeer(Peer peer) throws ConsensusGroupModifyPeerException {
client.activatePeer(new TActivatePeerReq(peer.getGroupId().convertToTConsensusGroupId()));
if (!isSuccess(res.status)) {
throw new ConsensusGroupModifyPeerException(
- String.format("error when activating %s. %s", peer, res.getStatus()));
+ String.format(IoTConsensusMessages.ERROR_ACTIVATING_PEER, peer, res.getStatus()));
}
} catch (Exception e) {
throw new ConsensusGroupModifyPeerException(
- String.format("error when activating %s", peer), e);
+ String.format(IoTConsensusMessages.ERROR_ACTIVATING_PEER_SHORT, peer), e);
}
}
@@ -504,11 +503,9 @@ public void notifyPeersToBuildSyncLogChannel(Peer targetPeer)
// configuration
List currentMembers = new ArrayList<>(this.configuration);
logger.info(
- "[IoTConsensus] notify current peers to build sync log. group member: {}, target: {}",
- currentMembers,
- targetPeer);
+ IoTConsensusMessages.NOTIFY_PEERS_BUILD_SYNC_LOG_DETAIL, currentMembers, targetPeer);
for (Peer peer : currentMembers) {
- logger.info("[IoTConsensus] build sync log channel from {}", peer);
+ logger.info(IoTConsensusMessages.BUILD_SYNC_LOG_CHANNEL_FROM, peer);
if (peer.equals(thisNode)) {
// use searchIndex for thisNode as the initialSyncIndex because targetPeer will load the
// snapshot produced by thisNode
@@ -525,7 +522,8 @@ public void notifyPeersToBuildSyncLogChannel(Peer targetPeer)
targetPeer.getNodeId()));
if (!isSuccess(res.status)) {
throw new ConsensusGroupModifyPeerException(
- String.format("build sync log channel failed from %s to %s", peer, targetPeer));
+ String.format(
+ IoTConsensusMessages.BUILD_SYNC_LOG_CHANNEL_FAILED, peer, targetPeer));
}
} catch (Exception e) {
// We use a simple way to deal with the connection issue when notifying other nodes to
@@ -566,14 +564,11 @@ public void notifyPeersToRemoveSyncLogChannel(Peer targetPeer) {
targetPeer.getEndpoint(),
targetPeer.getNodeId()));
if (!isSuccess(res.status)) {
- logger.warn("removing sync log channel failed from {} to {}", peer, targetPeer);
+ logger.warn(IoTConsensusMessages.REMOVING_SYNC_LOG_CHANNEL_FAILED, peer, targetPeer);
}
} catch (Exception e) {
logger.warn(
- "Exception happened during removing sync log channel from {} to {}",
- peer,
- targetPeer,
- e);
+ IoTConsensusMessages.EXCEPTION_REMOVING_SYNC_LOG_CHANNEL, peer, targetPeer, e);
}
}
}
@@ -590,14 +585,14 @@ public void waitTargetPeerUntilSyncLogCompleted(Peer targetPeer)
new TWaitSyncLogCompleteReq(targetPeer.getGroupId().convertToTConsensusGroupId()));
if (res.complete) {
logger.info(
- "[WAIT LOG SYNC] {} SyncLog is completed. TargetIndex: {}, CurrentSyncIndex: {}",
+ IoTConsensusMessages.WAIT_SYNC_LOG_COMPLETED,
targetPeer,
res.searchIndex,
res.safeIndex);
return;
}
logger.info(
- "[WAIT LOG SYNC] {} SyncLog is still in progress. TargetIndex: {}, CurrentSyncIndex: {}",
+ IoTConsensusMessages.WAIT_SYNC_LOG_IN_PROGRESS,
targetPeer,
res.searchIndex,
res.safeIndex);
@@ -606,14 +601,13 @@ public void waitTargetPeerUntilSyncLogCompleted(Peer targetPeer)
} catch (ClientManagerException | TException e) {
throw new ConsensusGroupModifyPeerException(
String.format(
- "error when waiting %s to complete SyncLog. %s", targetPeer, e.getMessage()),
+ IoTConsensusMessages.ERROR_WAITING_SYNC_LOG_COMPLETE, targetPeer, e.getMessage()),
e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ConsensusGroupModifyPeerException(
String.format(
- "thread interrupted when waiting %s to complete SyncLog. %s",
- targetPeer, e.getMessage()),
+ IoTConsensusMessages.THREAD_INTERRUPTED_WAITING_SYNC_LOG, targetPeer, e.getMessage()),
e);
}
}
@@ -633,24 +627,24 @@ public void waitReleaseAllRegionRelatedResource(Peer targetPeer)
new TWaitReleaseAllRegionRelatedResourceReq(
targetPeer.getGroupId().convertToTConsensusGroupId()));
if (res.releaseAllResource) {
- logger.info("[WAIT RELEASE] {} has released all region related resource", targetPeer);
+ logger.info(ConsensusMessages.WAIT_RELEASE_HAS_RELEASED, targetPeer);
return;
}
- logger.info("[WAIT RELEASE] {} is still releasing all region related resource", targetPeer);
+ logger.info(ConsensusMessages.WAIT_RELEASE_STILL_RELEASING, targetPeer);
Thread.sleep(checkIntervalInMs);
}
} catch (ClientManagerException | TException e) {
throw new ConsensusGroupModifyPeerException(
String.format(
- "error when waiting %s to release all region related resource. %s",
- targetPeer, e.getMessage()),
+ ConsensusMessages.ERROR_WAITING_RELEASE_RESOURCE, targetPeer, e.getMessage()),
e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ConsensusGroupModifyPeerException(
String.format(
- "thread interrupted when waiting %s to release all region related resource. %s",
- targetPeer, e.getMessage()),
+ ConsensusMessages.THREAD_INTERRUPTED_WAITING_RELEASE_RESOURCE,
+ targetPeer,
+ e.getMessage()),
e);
}
}
@@ -672,10 +666,12 @@ public void buildSyncLogChannel(Peer targetPeer, long initialSyncIndex, boolean
}
logDispatcher.addLogDispatcherThread(targetPeer, initialSyncIndex, startNow);
logger.info(
- "[IoTConsensus] Successfully build sync log channel to {} with initialSyncIndex {}. {}",
+ IoTConsensusMessages.BUILD_SYNC_LOG_CHANNEL_SUCCESS,
targetPeer,
initialSyncIndex,
- startNow ? "Sync log channel has started." : "Sync log channel maybe start later.");
+ startNow
+ ? IoTConsensusMessages.SYNC_LOG_CHANNEL_STARTED
+ : IoTConsensusMessages.SYNC_LOG_CHANNEL_START_LATER);
}
/**
@@ -687,25 +683,19 @@ public boolean removeSyncLogChannel(Peer targetPeer) {
String suggestion = "";
try {
logDispatcher.removeLogDispatcherThread(targetPeer);
- logger.info("[IoTConsensus] log dispatcher to {} removed and cleanup", targetPeer);
+ logger.info(IoTConsensusMessages.LOG_DISPATCHER_REMOVED_CLEANUP, targetPeer);
} catch (Exception e) {
- logger.warn(
- "[IoTConsensus] Exception happened during removing log dispatcher thread, but configuration.dat will still be removed.",
- e);
- suggestion = "It's suggested restart the DataNode to remove log dispatcher thread.";
+ logger.warn(IoTConsensusMessages.EXCEPTION_REMOVING_LOG_DISPATCHER, e);
+ suggestion = IoTConsensusMessages.SUGGEST_RESTART_DATANODE;
exceptionHappened = true;
}
if (!exceptionHappened) {
- logger.info(
- "[IoTConsensus] Log dispatcher thread to {} has been removed and cleanup", targetPeer);
+ logger.info(IoTConsensusMessages.LOG_DISPATCHER_REMOVED_AND_CLEANUP, targetPeer);
}
// step 2, update configuration
configuration.remove(targetPeer);
checkAndUpdateSafeDeletedSearchIndex();
- logger.info(
- "[IoTConsensus Configuration] Configuration updated to {}. {}",
- this.configuration,
- suggestion);
+ logger.info(IoTConsensusMessages.CONFIGURATION_UPDATED, this.configuration, suggestion);
return !exceptionHappened;
}
@@ -816,7 +806,7 @@ public boolean isActive() {
}
public void setActive(boolean active) {
- logger.info("set {} active status to {}", this.thisNode, active);
+ logger.info(ConsensusMessages.SET_ACTIVE_STATUS, this.thisNode, active);
this.active = active;
}
@@ -830,11 +820,11 @@ public void cleanupRemoteSnapshot(Peer targetPeer) throws ConsensusGroupModifyPe
if (!isSuccess(res.getStatus())) {
throw new ConsensusGroupModifyPeerException(
String.format(
- "cleanup remote snapshot failed of %s ,status is %s", targetPeer, res.getStatus()));
+ IoTConsensusMessages.CLEANUP_REMOTE_SNAPSHOT_FAILED, targetPeer, res.getStatus()));
}
} catch (Exception e) {
throw new ConsensusGroupModifyPeerException(
- String.format("cleanup remote snapshot failed of %s", targetPeer), e);
+ String.format(IoTConsensusMessages.CLEANUP_REMOTE_SNAPSHOT_FAILED_SHORT, targetPeer), e);
}
}
@@ -847,7 +837,7 @@ public void cleanupSnapshot(String snapshotId) throws ConsensusGroupModifyPeerEx
throw new ConsensusGroupModifyPeerException(e);
}
} else {
- logger.info("File not exist: {}", snapshotDir);
+ logger.info(IoTConsensusMessages.FILE_NOT_EXIST, snapshotDir);
}
}
@@ -856,8 +846,7 @@ public void cleanupLocalSnapshot() {
cleanupSnapshot(newSnapshotDirName);
stateMachine.clearSnapshot();
} catch (ConsensusGroupModifyPeerException e) {
- logger.warn(
- "Cleanup local snapshot fail. You may manually delete {}.", newSnapshotDirName, e);
+ logger.warn(IoTConsensusMessages.CLEANUP_LOCAL_SNAPSHOT_FAIL, newSnapshotDirName, e);
}
}
@@ -877,8 +866,7 @@ void checkAndUpdateIndex() {
*/
void checkAndUpdateSafeDeletedSearchIndex() {
if (configuration.isEmpty()) {
- logger.error(
- "Configuration is empty, which is unexpected. Safe deleted search index won't be updated this time.");
+ logger.error(IoTConsensusMessages.CONFIGURATION_EMPTY_UNEXPECTED);
} else if (configuration.size() == 1) {
consensusReqReader.setSafelyDeletedSearchIndex(Long.MAX_VALUE);
} else {
@@ -891,13 +879,7 @@ public void checkAndUpdateSearchIndex() {
long safelyDeletedSearchIndex = getMinFlushedSyncIndex();
if (currentSearchIndex < safelyDeletedSearchIndex) {
logger.warn(
- "The searchIndex for this region({}) is smaller than the safelyDeletedSearchIndex when "
- + "the node is restarted, which means that the data of the current region is not flushed "
- + "by the wal, but has been synchronized to other nodes. At this point, "
- + "different replicas have been inconsistent and cannot be automatically recovered. "
- + "To prevent subsequent logs from marking smaller searchIndex and exacerbating the "
- + "inconsistency, we manually set the searchIndex({}) to safelyDeletedSearchIndex({}) "
- + "here to reduce the impact of this problem in the future",
+ IoTConsensusMessages.SEARCH_INDEX_SMALLER_THAN_SAFELY_DELETED,
consensusGroupId,
currentSearchIndex,
safelyDeletedSearchIndex);
@@ -945,7 +927,7 @@ public SyncLogCacheQueue(int sourcePeerId) {
*/
private TSStatus cacheAndInsertLatestNode(DeserializedBatchIndexedConsensusRequest request) {
logger.debug(
- "cacheAndInsert start: source = {}, region = {}, queue size {}, startSyncIndex = {}, endSyncIndex = {}",
+ IoTConsensusMessages.CACHE_AND_INSERT_START,
sourcePeerId,
consensusGroupId,
requestCache.size(),
@@ -996,7 +978,7 @@ private TSStatus cacheAndInsertLatestNode(DeserializedBatchIndexedConsensusReque
&& requestCache.peek().getStartSyncIndex() == request.getStartSyncIndex()) {
// current thread hold the peek request thus it can write the peek immediately.
logger.info(
- "waiting target request timeout. current index: {}, target index: {}",
+ IoTConsensusMessages.WAITING_TARGET_REQUEST_TIMEOUT,
request.getStartSyncIndex(),
nextSyncIndex);
requestCache.remove(request);
@@ -1005,9 +987,7 @@ private TSStatus cacheAndInsertLatestNode(DeserializedBatchIndexedConsensusReque
}
} catch (InterruptedException e) {
logger.warn(
- "current waiting is interrupted. SyncIndex: {}. Exception: ",
- request.getStartSyncIndex(),
- e);
+ IoTConsensusMessages.CURRENT_WAITING_INTERRUPTED, request.getStartSyncIndex(), e);
Thread.currentThread().interrupt();
break;
}
@@ -1023,7 +1003,7 @@ private TSStatus cacheAndInsertLatestNode(DeserializedBatchIndexedConsensusReque
ioTConsensusServerMetrics.recordApplyCost(applyTime - sortTime);
queueSortCondition.signalAll();
logger.debug(
- "cacheAndInsert end: source = {}, region = {}, queue size {}, startSyncIndex = {}, endSyncIndex = {}, sortTime = {}ms, applyTime = {}ms",
+ IoTConsensusMessages.CACHE_AND_INSERT_END,
sourcePeerId,
consensusGroupId,
requestCache.size(),
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/AsyncIoTConsensusServiceClient.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/AsyncIoTConsensusServiceClient.java
index e2b5887826ade..ad0bcc0d9a25a 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/AsyncIoTConsensusServiceClient.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/AsyncIoTConsensusServiceClient.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.client.property.ThriftClientProperty;
import org.apache.iotdb.commons.conf.CommonConfig;
import org.apache.iotdb.commons.conf.CommonDescriptor;
+import org.apache.iotdb.consensus.i18n.IoTConsensusMessages;
import org.apache.iotdb.consensus.iot.thrift.IoTConsensusIService;
import org.apache.iotdb.rpc.TNonblockingTransportWrapper;
@@ -91,7 +92,7 @@ public void onError(Exception e) {
@Override
public void invalidate() {
if (!hasError()) {
- super.onError(new Exception("This client has been invalidated"));
+ super.onError(new Exception(IoTConsensusMessages.CLIENT_INVALIDATED));
}
}
@@ -124,7 +125,7 @@ public boolean isReady() {
return true;
} catch (Exception e) {
logger.info(
- "Unexpected exception occurs in {}, error msg is {}",
+ IoTConsensusMessages.UNEXPECTED_EXCEPTION_IN_CLIENT,
this,
ExceptionUtils.getRootCause(e).toString());
return false;
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java
index bb0326d7473e7..02c1cedde0970 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/client/DispatchLogHandler.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.utils.RetryUtils;
+import org.apache.iotdb.consensus.i18n.IoTConsensusMessages;
import org.apache.iotdb.consensus.iot.logdispatcher.Batch;
import org.apache.iotdb.consensus.iot.logdispatcher.LogDispatcher;
import org.apache.iotdb.consensus.iot.logdispatcher.LogDispatcher.LogDispatcherThread;
@@ -72,7 +73,7 @@ public void onComplete(TSyncLogEntriesRes response) {
String messages = String.join(", ", retryStatusMessages);
logger.warn(
- "Can not send {} to peer {} for {} times because {}",
+ IoTConsensusMessages.CANNOT_SEND_TO_PEER,
batch,
thread.getPeer(),
++retryCount,
@@ -86,7 +87,7 @@ public void onComplete(TSyncLogEntriesRes response) {
status -> status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode());
if (containsError) {
logger.debug(
- "Send {} to peer {} complete but contains unsuccessful status: {}",
+ IoTConsensusMessages.SEND_COMPLETE_BUT_CONTAINS_ERROR,
batch,
thread.getPeer(),
response.getStatuses());
@@ -106,7 +107,7 @@ public void onError(Exception exception) {
++retryCount;
Throwable rootCause = ExceptionUtils.getRootCause(exception);
logger.warn(
- "Can not send {} to peer for {} times {} because {}",
+ IoTConsensusMessages.CANNOT_SEND_TO_PEER_ON_ERROR,
batch,
thread.getPeer(),
retryCount,
@@ -114,7 +115,7 @@ public void onError(Exception exception) {
// skip TApplicationException caused by follower
if (rootCause instanceof TApplicationException) {
completeBatch(batch);
- logger.warn("Skip retrying this Batch {} because of TApplicationException.", batch);
+ logger.warn(IoTConsensusMessages.SKIP_RETRY_TAPPLICATION_EXCEPTION, batch);
logDispatcherThreadMetrics.recordSyncLogTimePerRequest(System.nanoTime() - createTime);
return;
}
@@ -134,8 +135,7 @@ private void sleepCorrespondingTimeAndRetryAsynchronous() {
() -> {
if (thread.isStopped()) {
logger.debug(
- "LogDispatcherThread {} has been stopped, "
- + "we will not retrying this Batch {} after {} times",
+ IoTConsensusMessages.LOG_DISPATCHER_STOPPED_NO_RETRY,
thread.getPeer(),
batch,
retryCount);
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/IndexController.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/IndexController.java
index 8c482a921852c..910a054bea360 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/IndexController.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/IndexController.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.consensus.iot.logdispatcher;
import org.apache.iotdb.consensus.common.Peer;
+import org.apache.iotdb.consensus.i18n.IoTConsensusMessages;
import org.apache.iotdb.consensus.ratis.utils.Utils;
import org.apache.tsfile.external.commons.io.FileUtils;
@@ -69,11 +70,7 @@ public IndexController(String storageDir, Peer peer, long initialIndex, long che
public synchronized void update(long index, boolean forcePersist) {
long newCurrentIndex = Math.max(currentIndex, index);
logger.debug(
- "update index from currentIndex {} to {} for file prefix {} in {}",
- currentIndex,
- newCurrentIndex,
- prefix,
- storageDir);
+ IoTConsensusMessages.UPDATE_INDEX, currentIndex, newCurrentIndex, prefix, storageDir);
currentIndex = newCurrentIndex;
checkPersist(forcePersist);
}
@@ -103,7 +100,7 @@ private void persist() {
if (oldFile.exists()) {
FileUtils.moveFile(oldFile, newFile);
logger.debug(
- "version file updated, previous: {}, current: {}",
+ IoTConsensusMessages.VERSION_FILE_UPDATED,
oldFile.getAbsolutePath(),
newFile.getAbsolutePath());
} else {
@@ -112,16 +109,14 @@ private void persist() {
// before all the async operations returns. We needn't add some sync operation here
// because it won't infect the correctness
logger.info(
- "failed to flush sync index because previous version file {} does not exists. "
- + "It may be caused by the target Peer is removed from current group. "
- + "target file is {}",
+ IoTConsensusMessages.FAILED_FLUSH_SYNC_INDEX,
oldFile.getAbsolutePath(),
newFile.getAbsolutePath());
}
lastFlushedIndex = flushIndex;
} catch (IOException e) {
- logger.error("Error occurred when flushing next version", e);
+ logger.error(IoTConsensusMessages.ERROR_FLUSHING_NEXT_VERSION, e);
}
}
@@ -139,12 +134,12 @@ private void upgrade() {
File newFile = new File(storageDir, prefix + fileVersion);
try {
logger.info(
- "version file upgrade, previous: {}, current: {}",
+ IoTConsensusMessages.VERSION_FILE_UPGRADE,
oldFile.getAbsolutePath(),
newFile.getAbsolutePath());
FileUtils.moveFile(oldFile, newFile);
} catch (IOException e) {
- logger.error("Error occurred when upgrading version file", e);
+ logger.error(IoTConsensusMessages.ERROR_UPGRADING_VERSION_FILE, e);
}
}));
}
@@ -179,7 +174,9 @@ private void deleteVersionFiles(File[] versionFiles, int maxVersionIndex) {
Files.delete(versionFiles[i].toPath());
} catch (IOException e) {
logger.error(
- "Delete outdated version file {} failed", versionFiles[i].getAbsolutePath(), e);
+ IoTConsensusMessages.DELETE_OUTDATED_VERSION_FILE_FAILED,
+ versionFiles[i].getAbsolutePath(),
+ e);
}
}
}
@@ -203,7 +200,8 @@ private void restore() {
} catch (IOException e) {
// TODO: (xingtanzjr) we need to handle the situation that file creation failed.
// Or the dispatcher won't run correctly
- logger.error("Error occurred when creating new file {}", versionFile.getAbsolutePath(), e);
+ logger.error(
+ IoTConsensusMessages.ERROR_CREATING_NEW_FILE, versionFile.getAbsolutePath(), e);
}
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/IoTConsensusMemoryManager.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/IoTConsensusMemoryManager.java
index 17074d5e66c69..9bc5486f7e4d2 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/IoTConsensusMemoryManager.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/IoTConsensusMemoryManager.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.service.metric.MetricService;
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.consensus.common.request.IndexedConsensusRequest;
+import org.apache.iotdb.consensus.i18n.IoTConsensusMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -49,7 +50,7 @@ public boolean reserve(IndexedConsensusRequest request) {
if (reserved) {
if (logger.isDebugEnabled()) {
logger.debug(
- "Reserving {} bytes for request {} succeeds, current total usage {}",
+ IoTConsensusMessages.RESERVING_BYTES_FOR_REQUEST_SUCCEEDS,
request.getMemorySize(),
request.getSearchIndex(),
memoryBlock.getUsedMemoryInBytes());
@@ -58,7 +59,7 @@ public boolean reserve(IndexedConsensusRequest request) {
request.decRef();
if (logger.isDebugEnabled()) {
logger.debug(
- "Reserving {} bytes for request {} fails, current total usage {}",
+ IoTConsensusMessages.RESERVING_BYTES_FOR_REQUEST_FAILS,
request.getMemorySize(),
request.getSearchIndex(),
memoryBlock.getUsedMemoryInBytes());
@@ -66,9 +67,7 @@ public boolean reserve(IndexedConsensusRequest request) {
}
return reserved;
} else if (logger.isDebugEnabled()) {
- logger.debug(
- "Skip memory reservation for {} because its ref count is not 0",
- request.getSearchIndex());
+ logger.debug(IoTConsensusMessages.SKIP_MEMORY_RESERVATION, request.getSearchIndex());
}
return true;
}
@@ -77,14 +76,14 @@ public boolean reserve(Batch batch) {
boolean reserved = reserve(batch.getMemorySize(), false);
if (reserved && logger.isDebugEnabled()) {
logger.debug(
- "Reserving {} bytes for batch {}-{} succeeds, current total usage {}",
+ IoTConsensusMessages.RESERVING_BYTES_FOR_BATCH_SUCCEEDS,
batch.getMemorySize(),
batch.getStartIndex(),
batch.getEndIndex(),
memoryBlock.getUsedMemoryInBytes());
} else if (logger.isDebugEnabled()) {
logger.debug(
- "Reserving {} bytes for batch {}-{} fails, current total usage {}",
+ IoTConsensusMessages.RESERVING_BYTES_FOR_BATCH_FAILS,
batch.getMemorySize(),
batch.getStartIndex(),
batch.getEndIndex(),
@@ -114,7 +113,7 @@ public void free(IndexedConsensusRequest request) {
free(request.getMemorySize(), true);
if (logger.isDebugEnabled()) {
logger.debug(
- "Freed {} bytes for request {}, current total usage {}",
+ IoTConsensusMessages.FREED_BYTES_FOR_REQUEST,
request.getMemorySize(),
request.getSearchIndex(),
memoryBlock.getUsedMemoryInBytes());
@@ -126,7 +125,7 @@ public void free(Batch batch) {
free(batch.getMemorySize(), false);
if (logger.isDebugEnabled()) {
logger.debug(
- "Freed {} bytes for batch {}-{}, current total usage {}",
+ IoTConsensusMessages.FREED_BYTES_FOR_BATCH,
batch.getMemorySize(),
batch.getStartIndex(),
batch.getEndIndex(),
@@ -142,7 +141,7 @@ private void free(long size, boolean fromQueue) {
syncMemorySizeInByte.addAndGet(-size);
}
logger.debug(
- "{} free {} bytes, total memory size: {} bytes.",
+ IoTConsensusMessages.FREE_MEMORY,
Thread.currentThread().getName(),
size,
currentUsedMemory);
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java
index 374691bf38bf1..d8212011415f5 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.consensus.common.Peer;
import org.apache.iotdb.consensus.common.request.IndexedConsensusRequest;
import org.apache.iotdb.consensus.config.IoTConsensusConfig;
+import org.apache.iotdb.consensus.i18n.IoTConsensusMessages;
import org.apache.iotdb.consensus.iot.IoTConsensusServerImpl;
import org.apache.iotdb.consensus.iot.client.AsyncIoTConsensusServiceClient;
import org.apache.iotdb.consensus.iot.client.DispatchLogHandler;
@@ -113,11 +114,11 @@ public synchronized void stop() {
int timeout = 10;
try {
if (!executorService.awaitTermination(timeout, TimeUnit.SECONDS)) {
- logger.error("Unable to shutdown LogDispatcher service after {} seconds", timeout);
+ logger.error(IoTConsensusMessages.UNABLE_TO_SHUTDOWN_LOG_DISPATCHER, timeout);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- logger.error("Unexpected Interruption when closing LogDispatcher service ");
+ logger.error(IoTConsensusMessages.UNEXPECTED_INTERRUPTION_CLOSING_LOG_DISPATCHER);
}
}
stopped = true;
@@ -187,13 +188,13 @@ public void offer(IndexedConsensusRequest request) {
threads.forEach(
thread -> {
logger.debug(
- "{}->{}: Push a log to the queue, where the queue length is {}",
+ IoTConsensusMessages.PUSH_LOG_TO_QUEUE,
impl.getThisNode().getGroupId(),
thread.getPeer().getEndpoint().getIp(),
thread.getPendingEntriesSize());
if (!thread.offer(request)) {
logger.debug(
- "{}: Log queue of {} is full, ignore the log to this node, searchIndex: {}",
+ IoTConsensusMessages.LOG_QUEUE_FULL,
impl.getThisNode().getGroupId(),
thread.getPeer(),
request.getSearchIndex());
@@ -318,7 +319,7 @@ private void setStopped() {
private void processStopped() {
try {
if (!runFinished.await(30, TimeUnit.SECONDS)) {
- logger.info("{}: Dispatcher for {} didn't stop after 30s.", impl.getThisNode(), peer);
+ logger.info(IoTConsensusMessages.DISPATCHER_DID_NOT_STOP, impl.getThisNode(), peer);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
@@ -348,7 +349,7 @@ public IoTConsensusServerImpl getImpl() {
@Override
public void run() {
- logger.info("{}: Dispatcher for {} starts", impl.getThisNode(), peer);
+ logger.info(IoTConsensusMessages.DISPATCHER_STARTS, impl.getThisNode(), peer);
try {
Batch batch;
while (!Thread.interrupted() && !stopped) {
@@ -367,12 +368,13 @@ public void run() {
}
// Immediately check for interrupts after poll and sleep
if (Thread.interrupted() || stopped) {
- throw new InterruptedException("Interrupted after polling and sleeping");
+ throw new InterruptedException(
+ IoTConsensusMessages.INTERRUPTED_AFTER_POLLING_AND_SLEEPING);
}
}
// Immediately check for interrupts after a time-consuming getBatch() operation
if (Thread.interrupted() || stopped) {
- throw new InterruptedException("Interrupted after getting a batch");
+ throw new InterruptedException(IoTConsensusMessages.INTERRUPTED_AFTER_GETTING_A_BATCH);
}
logDispatcherThreadMetrics.recordConstructBatchTime(System.nanoTime() - startTime);
// we may block here if the synchronization pipeline is full
@@ -386,10 +388,10 @@ public void run() {
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
- logger.error("Unexpected error in logDispatcher for peer {}", peer, e);
+ logger.error(IoTConsensusMessages.UNEXPECTED_ERROR_IN_LOG_DISPATCHER, peer, e);
}
runFinished.countDown();
- logger.info("{}: Dispatcher for {} exits", impl.getThisNode(), peer);
+ logger.info(IoTConsensusMessages.DISPATCHER_EXITS, impl.getThisNode(), peer);
}
public void updateSafelyDeletedSearchIndex() {
@@ -411,7 +413,7 @@ public Batch getBatch() {
synchronized (impl.getIndexObject()) {
maxIndex = impl.getSearchIndex() + 1;
logger.debug(
- "{}: startIndex: {}, maxIndex: {}, pendingEntries size: {}, bufferedEntries size: {}",
+ IoTConsensusMessages.GET_BATCH_START_INDEX,
impl.getThisNode().getGroupId(),
startIndex,
maxIndex,
@@ -444,7 +446,9 @@ public Batch getBatch() {
constructBatchFromWAL(startIndex, maxIndex, batches);
batches.buildIndex();
logger.debug(
- "{} : accumulated a {} from wal when empty", impl.getThisNode().getGroupId(), batches);
+ IoTConsensusMessages.ACCUMULATED_FROM_WAL_WHEN_EMPTY,
+ impl.getThisNode().getGroupId(),
+ batches);
} else {
// Notice that prev searchIndex >= startIndex
iterator = bufferedEntries.iterator();
@@ -458,7 +462,9 @@ public Batch getBatch() {
if (hasCorruptedData || !batches.canAccumulate()) {
batches.buildIndex();
logger.debug(
- "{} : accumulated a {} from wal", impl.getThisNode().getGroupId(), batches);
+ IoTConsensusMessages.ACCUMULATED_FROM_WAL,
+ impl.getThisNode().getGroupId(),
+ batches);
return batches;
}
}
@@ -469,7 +475,9 @@ public Batch getBatch() {
if (!batches.canAccumulate()) {
batches.buildIndex();
logger.debug(
- "{} : accumulated a {} from queue", impl.getThisNode().getGroupId(), batches);
+ IoTConsensusMessages.ACCUMULATED_FROM_QUEUE,
+ impl.getThisNode().getGroupId(),
+ batches);
return batches;
}
@@ -483,7 +491,7 @@ public Batch getBatch() {
if (hasCorruptedData || !batches.canAccumulate()) {
batches.buildIndex();
logger.debug(
- "gap {} : accumulated a {} from queue and wal when gap",
+ IoTConsensusMessages.ACCUMULATED_FROM_QUEUE_AND_WAL_GAP,
impl.getThisNode().getGroupId(),
batches);
return batches;
@@ -499,7 +507,9 @@ public Batch getBatch() {
}
batches.buildIndex();
logger.debug(
- "{} : accumulated a {} from queue and wal", impl.getThisNode().getGroupId(), batches);
+ IoTConsensusMessages.ACCUMULATED_FROM_QUEUE_AND_WAL,
+ impl.getThisNode().getGroupId(),
+ batches);
}
return batches;
}
@@ -511,13 +521,13 @@ public void sendBatchAsync(Batch batch, DispatchLogHandler handler) {
new TSyncLogEntriesReq(
selfPeerId, peer.getGroupId().convertToTConsensusGroupId(), batch.getLogEntries());
logger.debug(
- "Send Batch[startIndex:{}, endIndex:{}] to ConsensusGroup:{}",
+ IoTConsensusMessages.SEND_BATCH,
batch.getStartIndex(),
batch.getEndIndex(),
peer.getGroupId().convertToTConsensusGroupId());
client.syncLogEntries(req, handler);
} catch (Exception e) {
- logger.error("Can not sync logs to peer {} because", peer, e);
+ logger.error(IoTConsensusMessages.CANNOT_SYNC_LOGS_TO_PEER, peer, e);
handler.onError(e);
}
}
@@ -528,7 +538,7 @@ public SyncStatus getSyncStatus() {
private boolean constructBatchFromWAL(long currentIndex, long maxIndex, Batch logBatches) {
logger.debug(
- "DataRegion[{}]->{}: currentIndex: {}, maxIndex: {}",
+ IoTConsensusMessages.DATA_REGION_CONSTRUCT_FROM_WAL,
peer.getGroupId().getId(),
peer.getEndpoint().getIp(),
currentIndex,
@@ -539,27 +549,22 @@ private boolean constructBatchFromWAL(long currentIndex, long maxIndex, Batch lo
// Even if there is no WAL files, these code won't produce error.
walEntryIterator.skipTo(targetIndex);
while (targetIndex < maxIndex && logBatches.canAccumulate()) {
- logger.debug("construct from WAL for one Entry, index : {}", targetIndex);
+ logger.debug(IoTConsensusMessages.CONSTRUCT_FROM_WAL, targetIndex);
try {
walEntryIterator.waitForNextReady();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- logger.warn("wait for next WAL entry is interrupted");
+ logger.warn(IoTConsensusMessages.WAIT_NEXT_WAL_INTERRUPTED);
}
IndexedConsensusRequest data = walEntryIterator.next();
if (data.getSearchIndex() < targetIndex) {
// if the index of request is smaller than currentIndex, then continue
logger.warn(
- "search for one Entry which index is {}, but find a smaller one, index : {}",
- targetIndex,
- data.getSearchIndex());
+ IoTConsensusMessages.SEARCH_ENTRY_FOUND_SMALLER, targetIndex, data.getSearchIndex());
continue;
} else if (data.getSearchIndex() > targetIndex) {
logger.warn(
- "search for one Entry which index is {}, but find a larger one, index : {}."
- + "Perhaps the wal file is corrupted, in which case we skip it and choose a larger index to replicate",
- targetIndex,
- data.getSearchIndex());
+ IoTConsensusMessages.SEARCH_ENTRY_FOUND_LARGER, targetIndex, data.getSearchIndex());
hasCorruptedData = true;
}
targetIndex = data.getSearchIndex() + 1;
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/service/IoTConsensusRPCServiceProcessor.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/service/IoTConsensusRPCServiceProcessor.java
index 9038753063b6e..c5babea464d2a 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/service/IoTConsensusRPCServiceProcessor.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/service/IoTConsensusRPCServiceProcessor.java
@@ -30,6 +30,9 @@
import org.apache.iotdb.consensus.common.request.ByteBufferConsensusRequest;
import org.apache.iotdb.consensus.common.request.IoTConsensusRequest;
import org.apache.iotdb.consensus.exception.ConsensusGroupModifyPeerException;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
+import org.apache.iotdb.consensus.i18n.IoTConsensusMessages;
+import org.apache.iotdb.consensus.i18n.IoTConsensusV2Messages;
import org.apache.iotdb.consensus.iot.IoTConsensus;
import org.apache.iotdb.consensus.iot.IoTConsensusServerImpl;
import org.apache.iotdb.consensus.iot.thrift.IoTConsensusIService;
@@ -82,15 +85,16 @@ public TSyncLogEntriesRes syncLogEntries(TSyncLogEntriesReq req) {
if (impl == null) {
String message =
String.format(
- "unexpected consensusGroupId %s for TSyncLogEntriesReq which size is %s",
- groupId, req.getLogEntries().size());
+ ConsensusMessages.UNEXPECTED_CONSENSUS_GROUP_ID_FOR_SYNC_LOG,
+ groupId,
+ req.getLogEntries().size());
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
return new TSyncLogEntriesRes(Collections.singletonList(status));
}
if (impl.isReadOnly()) {
- String message = "fail to sync logEntries because system is read-only.";
+ String message = ConsensusMessages.SYNC_LOG_SYSTEM_READ_ONLY;
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.SYSTEM_READ_ONLY.getStatusCode());
status.setMessage(message);
@@ -99,8 +103,7 @@ public TSyncLogEntriesRes syncLogEntries(TSyncLogEntriesReq req) {
if (!impl.isActive()) {
String message =
String.format(
- "Peer is inactive and not ready to receive sync log request, %s, DataNode Id: %s",
- groupId, impl.getThisNode().getNodeId());
+ ConsensusMessages.PEER_INACTIVE_NOT_READY, groupId, impl.getThisNode().getNodeId());
TSStatus status = new TSStatus(TSStatusCode.WRITE_PROCESS_REJECT.getStatusCode());
status.setMessage(message);
return new TSyncLogEntriesRes(Collections.singletonList(status));
@@ -126,9 +129,7 @@ public TSyncLogEntriesRes syncLogEntries(TSyncLogEntriesReq req) {
TSStatus writeStatus =
impl.syncLog(logEntriesInThisBatch.getSourcePeerId(), deserializedRequest);
LOGGER.debug(
- "execute TSyncLogEntriesReq for {} with result {}",
- req.consensusGroupId,
- writeStatus.subStatus);
+ IoTConsensusMessages.EXECUTE_SYNC_LOG_ENTRIES, req.consensusGroupId, writeStatus.subStatus);
return new TSyncLogEntriesRes(writeStatus.subStatus)
.setReceiverMemSize(deserializedRequest.getMemorySize());
}
@@ -144,7 +145,10 @@ public TInactivatePeerRes inactivatePeer(TInactivatePeerReq req) throws TExcepti
if (impl == null) {
String message =
- String.format("unexpected consensusGroupId %s for inactivatePeer request", groupId);
+ String.format(
+ ConsensusMessages.UNEXPECTED_CONSENSUS_GROUP_ID_FOR_REQUEST,
+ groupId,
+ "inactivatePeer");
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -164,7 +168,10 @@ public TActivatePeerRes activatePeer(TActivatePeerReq req) throws TException {
IoTConsensusServerImpl impl = consensus.getImpl(groupId);
if (impl == null) {
String message =
- String.format("unexpected consensusGroupId %s for inactivatePeer request", groupId);
+ String.format(
+ ConsensusMessages.UNEXPECTED_CONSENSUS_GROUP_ID_FOR_REQUEST,
+ groupId,
+ "inactivatePeer");
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -183,7 +190,10 @@ public TBuildSyncLogChannelRes buildSyncLogChannel(TBuildSyncLogChannelReq req)
IoTConsensusServerImpl impl = consensus.getImpl(groupId);
if (impl == null) {
String message =
- String.format("unexpected consensusGroupId %s for buildSyncLogChannel request", groupId);
+ String.format(
+ ConsensusMessages.UNEXPECTED_CONSENSUS_GROUP_ID_FOR_REQUEST,
+ groupId,
+ "buildSyncLogChannel");
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -203,7 +213,10 @@ public TRemoveSyncLogChannelRes removeSyncLogChannel(TRemoveSyncLogChannelReq re
IoTConsensusServerImpl impl = consensus.getImpl(groupId);
if (impl == null) {
String message =
- String.format("unexpected consensusGroupId %s for buildSyncLogChannel request", groupId);
+ String.format(
+ ConsensusMessages.UNEXPECTED_CONSENSUS_GROUP_ID_FOR_REQUEST,
+ groupId,
+ "buildSyncLogChannel");
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -214,7 +227,7 @@ public TRemoveSyncLogChannelRes removeSyncLogChannel(TRemoveSyncLogChannelReq re
responseStatus = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
} else {
responseStatus = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
- responseStatus.setMessage("remove sync log channel failed");
+ responseStatus.setMessage(ConsensusMessages.REMOVE_SYNC_LOG_CHANNEL_FAILED);
}
return new TRemoveSyncLogChannelRes(responseStatus);
}
@@ -227,7 +240,10 @@ public TWaitSyncLogCompleteRes waitSyncLogComplete(TWaitSyncLogCompleteReq req)
IoTConsensusServerImpl impl = consensus.getImpl(groupId);
if (impl == null) {
String message =
- String.format("unexpected consensusGroupId %s for waitSyncLogComplete request", groupId);
+ String.format(
+ ConsensusMessages.UNEXPECTED_CONSENSUS_GROUP_ID_FOR_REQUEST,
+ groupId,
+ "waitSyncLogComplete");
LOGGER.error(message);
return new TWaitSyncLogCompleteRes(true, 0, 0);
}
@@ -243,10 +259,7 @@ public TWaitReleaseAllRegionRelatedResourceRes waitReleaseAllRegionRelatedResour
ConsensusGroupId.Factory.createFromTConsensusGroupId(req.getConsensusGroupId());
IoTConsensusServerImpl impl = consensus.getImpl(groupId);
if (impl == null) {
- String message =
- String.format(
- "unexpected consensusGroupId %s for TWaitReleaseAllRegionRelatedResourceRes request",
- groupId);
+ String message = String.format(IoTConsensusV2Messages.UNEXPECTED_GROUP_WAIT_RELEASE, groupId);
LOGGER.error(message);
return new TWaitReleaseAllRegionRelatedResourceRes(true);
}
@@ -262,7 +275,10 @@ public TSendSnapshotFragmentRes sendSnapshotFragment(TSendSnapshotFragmentReq re
IoTConsensusServerImpl impl = consensus.getImpl(groupId);
if (impl == null) {
String message =
- String.format("unexpected consensusGroupId %s for buildSyncLogChannel request", groupId);
+ String.format(
+ ConsensusMessages.UNEXPECTED_CONSENSUS_GROUP_ID_FOR_REQUEST,
+ groupId,
+ "buildSyncLogChannel");
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -287,7 +303,10 @@ public TTriggerSnapshotLoadRes triggerSnapshotLoad(TTriggerSnapshotLoadReq req)
IoTConsensusServerImpl impl = consensus.getImpl(groupId);
if (impl == null) {
String message =
- String.format("unexpected consensusGroupId %s for buildSyncLogChannel request", groupId);
+ String.format(
+ ConsensusMessages.UNEXPECTED_CONSENSUS_GROUP_ID_FOR_REQUEST,
+ groupId,
+ "buildSyncLogChannel");
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -306,7 +325,10 @@ public TCleanupTransferredSnapshotRes cleanupTransferredSnapshot(
IoTConsensusServerImpl impl = consensus.getImpl(groupId);
if (impl == null) {
String message =
- String.format("unexpected consensusGroupId %s for buildSyncLogChannel request", groupId);
+ String.format(
+ ConsensusMessages.UNEXPECTED_CONSENSUS_GROUP_ID_FOR_REQUEST,
+ groupId,
+ "buildSyncLogChannel");
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -317,7 +339,7 @@ public TCleanupTransferredSnapshotRes cleanupTransferredSnapshot(
impl.cleanupSnapshot(req.snapshotId);
responseStatus = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
} catch (ConsensusGroupModifyPeerException e) {
- LOGGER.error("failed to cleanup transferred snapshot {}", req.snapshotId, e);
+ LOGGER.error(ConsensusMessages.FAILED_TO_CLEANUP_TRANSFERRED_SNAPSHOT, req.snapshotId, e);
responseStatus = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
responseStatus.setMessage(e.getMessage());
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2.java
index 11d53f9c56410..806f499116d5a 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2.java
@@ -49,6 +49,8 @@
import org.apache.iotdb.consensus.exception.IllegalPeerNumException;
import org.apache.iotdb.consensus.exception.PeerAlreadyInConsensusGroupException;
import org.apache.iotdb.consensus.exception.PeerNotInConsensusGroupException;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
+import org.apache.iotdb.consensus.i18n.IoTConsensusV2Messages;
import org.apache.iotdb.consensus.pipe.service.IoTConsensusV2RPCService;
import org.apache.iotdb.consensus.pipe.service.IoTConsensusV2RPCServiceProcessor;
import org.apache.iotdb.rpc.RpcUtils;
@@ -125,12 +127,12 @@ public synchronized void start() throws IOException {
try {
recoverFuture.get();
} catch (CancellationException ce) {
- LOGGER.info("IoTV2 Recover Task is cancelled", ce);
+ LOGGER.info(IoTConsensusV2Messages.RECOVER_TASK_CANCELLED, ce);
} catch (ExecutionException ee) {
- LOGGER.error("Exception while waiting for recover future completion", ee);
+ LOGGER.error(IoTConsensusV2Messages.RECOVER_FUTURE_EXCEPTION, ee);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
- LOGGER.warn("IoTV2 Recover Task is interrupted", ie);
+ LOGGER.warn(IoTConsensusV2Messages.RECOVER_TASK_INTERRUPTED, ie);
}
}
@@ -138,8 +140,9 @@ private Future initAndRecover() throws IOException {
if (!storageDir.exists()) {
// init
if (!storageDir.mkdirs()) {
- LOGGER.warn("Unable to create consensus dir at {}", storageDir);
- throw new IOException(String.format("Unable to create consensus dir at %s", storageDir));
+ LOGGER.warn(ConsensusMessages.UNABLE_TO_CREATE_CONSENSUS_DIR, storageDir);
+ throw new IOException(
+ String.format(ConsensusMessages.UNABLE_TO_CREATE_CONSENSUS_DIR_FMT, storageDir));
}
return CompletableFuture.completedFuture(null);
} else {
@@ -162,7 +165,7 @@ private Future initAndRecover() throws IOException {
checkPeerListAndStartIfEligible(consensusGroupId, consensus);
} catch (Exception e) {
LOGGER.error(
- "Failed to recover consensus from {} for {}, ignore it and continue recover other group, async backend checker thread will automatically deregister related pipe side effects for this failed consensus group.",
+ IoTConsensusV2Messages.FAILED_RECOVER_CONSENSUS,
storageDir,
consensusGroupId,
e);
@@ -170,12 +173,12 @@ private Future initAndRecover() throws IOException {
}
} catch (IOException e) {
LOGGER.error(
- "Failed to recover consensus from {} because read dir failed", storageDir, e);
+ IoTConsensusV2Messages.FAILED_RECOVER_CONSENSUS_READ_DIR, storageDir, e);
}
})
.exceptionally(
e -> {
- LOGGER.error("Failed to recover consensus from {}", storageDir, e);
+ LOGGER.error(IoTConsensusV2Messages.FAILED_RECOVER_CONSENSUS_SHORT, storageDir, e);
return null;
});
}
@@ -190,7 +193,7 @@ private void checkPeerListAndStartIfEligible(
} catch (ConsensusGroupNotExistException ignore) {
} catch (Exception e) {
- LOGGER.warn("Failed to reset peer list while start", e);
+ LOGGER.warn(ConsensusMessages.FAILED_TO_RESET_PEER_LIST_WHILE_START, e);
}
};
@@ -229,8 +232,7 @@ public TSStatus write(ConsensusGroupId groupId, IConsensusRequest request)
return StatusUtils.getStatus(TSStatusCode.SYSTEM_READ_ONLY);
} else if (!impl.isActive()) {
return RpcUtils.getStatus(
- TSStatusCode.WRITE_PROCESS_REJECT,
- "current node is not active and is not ready to receive user write.");
+ TSStatusCode.WRITE_PROCESS_REJECT, ConsensusMessages.NODE_NOT_ACTIVE_REJECT_WRITE);
} else {
return impl.write(request);
}
@@ -277,9 +279,10 @@ public void createLocalPeer(ConsensusGroupId groupId, List peers)
final String path = getPeerDir(groupId);
File consensusDir = new File(path);
if (!consensusDir.exists() && !consensusDir.mkdirs()) {
- LOGGER.warn("Unable to create consensus dir for group {} at {}", groupId, path);
+ LOGGER.warn(ConsensusMessages.UNABLE_TO_CREATE_CONSENSUS_DIR_FOR_GROUP, groupId, path);
throw new ConsensusException(
- String.format("Unable to create consensus dir for group %s", groupId));
+ String.format(
+ ConsensusMessages.UNABLE_TO_CREATE_CONSENSUS_DIR_FOR_GROUP_FMT, groupId));
}
IoTConsensusV2ServerImpl consensus =
@@ -293,7 +296,7 @@ public void createLocalPeer(ConsensusGroupId groupId, List peers)
consensus.start();
KillPoint.setKillPoint(DataNodeKillPoints.DESTINATION_CREATE_LOCAL_PEER);
} catch (IOException e) {
- LOGGER.warn("Cannot create local peer for group {} with peers {}", groupId, peers, e);
+ LOGGER.warn(ConsensusMessages.CANNOT_CREATE_LOCAL_PEER, groupId, peers, e);
throw new ConsensusException(e);
} finally {
stateMachineMapLock.readLock().unlock();
@@ -315,14 +318,14 @@ public void deleteLocalPeer(ConsensusGroupId groupId) throws ConsensusException
if (!stateMachineMap.containsKey(groupId)) {
throw new ConsensusGroupNotExistException(groupId);
}
- LOGGER.info("[{}] start to delete local peer for group {}", CLASS_NAME, groupId);
+ LOGGER.info(IoTConsensusV2Messages.START_DELETE_LOCAL_PEER, CLASS_NAME, groupId);
final IoTConsensusV2ServerImpl consensus = stateMachineMap.get(groupId);
consensus.clear();
stateMachineMap.remove(groupId);
FileUtils.deleteFileOrDirectory(new File(getPeerDir(groupId)));
KillPoint.setKillPoint(IoTConsensusDeleteLocalPeerKillPoints.AFTER_DELETE);
- LOGGER.info("[{}] finish deleting local peer for group {}", CLASS_NAME, groupId);
+ LOGGER.info(IoTConsensusV2Messages.FINISH_DELETE_LOCAL_PEER, CLASS_NAME, groupId);
} finally {
stateMachineMapLock.readLock().unlock();
}
@@ -342,34 +345,32 @@ public void addRemotePeer(ConsensusGroupId groupId, Peer peer) throws ConsensusE
}
try {
// step 1: inactive new Peer to prepare for following steps
- LOGGER.info("[{}] inactivate new peer: {}", CLASS_NAME, peer);
+ LOGGER.info(IoTConsensusV2Messages.INACTIVATE_NEW_PEER, CLASS_NAME, peer);
impl.setRemotePeerActive(peer, false, false);
// step 2: notify all the other Peers to create consensus pipes to newPeer
// NOTE: For this step, all the other peers will try to transfer its user write data to target
- LOGGER.info("[{}] notify current peers to create consensus pipes...", CLASS_NAME);
+ LOGGER.info(IoTConsensusV2Messages.NOTIFY_CREATE_CONSENSUS_PIPES, CLASS_NAME);
impl.notifyPeersToCreateConsensusPipes(peer);
KillPoint.setKillPoint(DataNodeKillPoints.COORDINATOR_ADD_PEER_TRANSITION);
// step 3: wait until all other Peers finish transferring
- LOGGER.info("[{}] wait until all the other peers finish transferring...", CLASS_NAME);
+ LOGGER.info(IoTConsensusV2Messages.WAIT_PEERS_FINISH_TRANSFER, CLASS_NAME);
impl.waitPeersToTargetPeerTransmissionCompleted(peer);
// step 4: active new Peer to let new Peer receive client requests
- LOGGER.info("[{}] activate new peer...", CLASS_NAME);
+ LOGGER.info(IoTConsensusV2Messages.ACTIVATE_NEW_PEER, CLASS_NAME);
impl.setRemotePeerActive(peer, true, false);
KillPoint.setKillPoint(DataNodeKillPoints.COORDINATOR_ADD_PEER_DONE);
} catch (ConsensusGroupModifyPeerException e) {
try {
- LOGGER.warn(
- "[{}] add remote peer failed, automatic cleanup side effects...", CLASS_NAME, e);
+ LOGGER.warn(IoTConsensusV2Messages.ADD_REMOTE_PEER_FAILED_CLEANUP, CLASS_NAME, e);
// roll back
impl.notifyPeersToDropConsensusPipe(peer);
} catch (ConsensusGroupModifyPeerException mpe) {
- LOGGER.error(
- "[{}] failed to cleanup side effects after failed to add remote peer", CLASS_NAME, mpe);
+ LOGGER.error(IoTConsensusV2Messages.FAILED_CLEANUP_SIDE_EFFECTS, CLASS_NAME, mpe);
}
throw new ConsensusException(e);
}
@@ -387,23 +388,23 @@ public void removeRemotePeer(ConsensusGroupId groupId, Peer peer) throws Consens
try {
// let other peers to drop consensus pipes to target
- LOGGER.info("[{}] notify other peers to drop consensus pipes...", CLASS_NAME);
+ LOGGER.info(IoTConsensusV2Messages.NOTIFY_DROP_CONSENSUS_PIPES, CLASS_NAME);
impl.notifyPeersToDropConsensusPipe(peer);
KillPoint.setKillPoint(
IoTConsensusRemovePeerCoordinatorKillPoints
.AFTER_NOTIFY_PEERS_TO_REMOVE_REPLICATE_CHANNEL);
// let target peer reject new write
- LOGGER.info("[{}] inactivate peer {}", CLASS_NAME, peer);
+ LOGGER.info(IoTConsensusV2Messages.INACTIVATE_PEER, CLASS_NAME, peer);
impl.setRemotePeerActive(peer, false, true);
KillPoint.setKillPoint(IoTConsensusRemovePeerCoordinatorKillPoints.AFTER_INACTIVE_PEER);
// wait its consensus pipes to complete
- LOGGER.info("[{}] wait target peer{} complete transfer...", CLASS_NAME, peer);
+ LOGGER.info(IoTConsensusV2Messages.WAIT_TARGET_PEER_COMPLETE_TRANSFER, CLASS_NAME, peer);
impl.waitTargetPeerToPeersTransmissionCompleted(peer);
// wait target peer to release all resource
- LOGGER.info("[{}] wait {} to release all resource...", CLASS_NAME, peer);
+ LOGGER.info(IoTConsensusV2Messages.WAIT_PEER_RELEASE_RESOURCE, CLASS_NAME, peer);
impl.waitReleaseAllRegionRelatedResource(peer);
} catch (ConsensusGroupModifyPeerException e) {
throw new ConsensusException(e);
@@ -414,7 +415,7 @@ public void removeRemotePeer(ConsensusGroupId groupId, Peer peer) throws Consens
@Override
public void recordCorrectPeerListBeforeStarting(
Map> correctPeerList) {
- LOGGER.info("Record correct peer list: {}", correctPeerList);
+ LOGGER.info(ConsensusMessages.RECORD_CORRECT_PEER_LIST, correctPeerList);
this.correctPeerListBeforeStart = correctPeerList;
}
@@ -426,9 +427,7 @@ public void resetPeerList(ConsensusGroupId groupId, List correctPeers)
.orElseThrow(() -> new ConsensusGroupNotExistException(groupId));
if (!correctPeers.contains(new Peer(groupId, thisNodeId, thisNode))) {
- LOGGER.warn(
- "[RESET PEER LIST] {} Local peer is not in the correct configuration, delete it.",
- groupId);
+ LOGGER.warn(ConsensusMessages.RESET_PEER_LIST_NOT_IN_CORRECT, groupId);
deleteLocalPeer(groupId);
return;
}
@@ -440,10 +439,10 @@ public void resetPeerList(ConsensusGroupId groupId, List correctPeers)
if (!correctPeers.contains(peer)) {
try {
impl.dropConsensusPipeToTargetPeer(peer);
- LOGGER.info("[RESET PEER LIST] {} Remove sync channel with: {}", groupId, peer);
+ LOGGER.info(ConsensusMessages.RESET_PEER_LIST_REMOVE_SYNC_CHANNEL, groupId, peer);
} catch (ConsensusGroupModifyPeerException e) {
LOGGER.error(
- "[RESET PEER LIST] {} Failed to remove sync channel with: {}", groupId, peer, e);
+ ConsensusMessages.RESET_PEER_LIST_FAILED_TO_REMOVE_SYNC_CHANNEL, groupId, peer, e);
}
}
}
@@ -452,10 +451,10 @@ public void resetPeerList(ConsensusGroupId groupId, List correctPeers)
if (!impl.containsPeer(peer) && peer.getNodeId() != this.thisNodeId) {
try {
impl.createConsensusPipeToTargetPeer(peer);
- LOGGER.info("[RESET PEER LIST] {} Build sync channel with: {}", groupId, peer);
+ LOGGER.info(ConsensusMessages.RESET_PEER_LIST_BUILD_SYNC_CHANNEL, groupId, peer);
} catch (ConsensusGroupModifyPeerException e) {
LOGGER.warn(
- "[RESET PEER LIST] {} Failed to build sync channel with: {}", groupId, peer, e);
+ ConsensusMessages.RESET_PEER_LIST_FAILED_TO_BUILD_SYNC_CHANNEL, groupId, peer, e);
}
}
}
@@ -463,21 +462,19 @@ public void resetPeerList(ConsensusGroupId groupId, List correctPeers)
String currentPeerListStr = impl.getPeers().toString();
if (!previousPeerListStr.equals(currentPeerListStr)) {
LOGGER.info(
- "[RESET PEER LIST] {} Local peer list has been reset: {} -> {}",
+ ConsensusMessages.RESET_PEER_LIST_RESET_RESULT,
groupId,
previousPeerListStr,
impl.getPeers());
} else {
- LOGGER.info(
- "[RESET PEER LIST] {} The current peer list is correct, nothing need to be reset: {}",
- groupId,
- previousPeerListStr);
+ LOGGER.info(ConsensusMessages.RESET_PEER_LIST_NOTHING_TO_RESET, groupId, previousPeerListStr);
}
}
@Override
public void transferLeader(ConsensusGroupId groupId, Peer newLeader) throws ConsensusException {
- throw new ConsensusException(String.format("%s does not support leader transfer", CLASS_NAME));
+ throw new ConsensusException(
+ String.format(IoTConsensusV2Messages.NOT_SUPPORT_LEADER_TRANSFER, CLASS_NAME));
}
@Override
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2PeerManager.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2PeerManager.java
index b5206de814e26..9aab2cb1b40b0 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2PeerManager.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2PeerManager.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.consensus.pipe;
import org.apache.iotdb.consensus.common.Peer;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
import com.google.common.collect.ImmutableList;
import org.slf4j.Logger;
@@ -41,7 +42,7 @@ public IoTConsensusV2PeerManager(List peers) {
this.peers.addAll(peers);
if (this.peers.size() != peers.size()) {
- LOGGER.warn("Duplicate peers in the input list, ignore the duplicates.");
+ LOGGER.warn(ConsensusMessages.DUPLICATE_PEERS_IGNORED);
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2ServerImpl.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2ServerImpl.java
index 778ea51213306..415326cbf5248 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2ServerImpl.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/IoTConsensusV2ServerImpl.java
@@ -39,6 +39,8 @@
import org.apache.iotdb.consensus.config.IoTConsensusV2Config;
import org.apache.iotdb.consensus.config.IoTConsensusV2Config.ReplicateMode;
import org.apache.iotdb.consensus.exception.ConsensusGroupModifyPeerException;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
+import org.apache.iotdb.consensus.i18n.IoTConsensusV2Messages;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TCheckConsensusPipeCompletedReq;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TCheckConsensusPipeCompletedResp;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TNotifyPeerToCreateConsensusPipeReq;
@@ -200,18 +202,16 @@ public void setRemotePeerActive(Peer peer, boolean isActive, boolean isForDeleti
if (!RpcUtils.SUCCESS_STATUS.equals(res.getStatus())) {
throw new ConsensusGroupModifyPeerException(
String.format(
- "error when set peer %s to active %s. result status: %s",
- peer, isActive, res.getStatus()));
+ IoTConsensusV2Messages.ERROR_SET_PEER_ACTIVE, peer, isActive, res.getStatus()));
}
} catch (Exception e) {
throw new ConsensusGroupModifyPeerException(
- String.format("error when set peer %s to active %s", peer, isActive), e);
+ String.format(IoTConsensusV2Messages.ERROR_SET_PEER_ACTIVE_SHORT, peer, isActive), e);
}
} catch (ClientManagerException e) {
if (isForDeletionPurpose) {
// for remove peer, if target peer is already down, we can skip this step.
- LOGGER.warn(
- "target peer may be down, error when set peer {} to active {}", peer, isActive, e);
+ LOGGER.warn(IoTConsensusV2Messages.TARGET_PEER_MAY_BE_DOWN, peer, isActive, e);
} else {
// for add peer, if target peer is down, we need to throw exception to identify the failure
// of this addPeerProcedure.
@@ -237,14 +237,10 @@ public void notifyPeersToCreateConsensusPipes(Peer targetPeer)
targetPeer.getNodeId()));
if (!RpcUtils.SUCCESS_STATUS.equals(resp.getStatus())) {
throw new ConsensusGroupModifyPeerException(
- String.format("error when notify peer %s to create consensus pipe", peer));
+ String.format(IoTConsensusV2Messages.ERROR_NOTIFY_PEER_CREATE_PIPE, peer));
}
} catch (Exception e) {
- LOGGER.warn(
- "{} cannot notify peer {} to create consensus pipe, may because that peer is unknown currently, please manually check!",
- thisNode,
- peer,
- e);
+ LOGGER.warn(IoTConsensusV2Messages.CANNOT_NOTIFY_PEER_CREATE_PIPE, thisNode, peer, e);
}
}
@@ -253,11 +249,7 @@ public void notifyPeersToCreateConsensusPipes(Peer targetPeer)
// target.
createConsensusPipeToTargetPeer(targetPeer);
} catch (Exception e) {
- LOGGER.warn(
- "{} cannot create consensus pipe to {}, may because target peer is unknown currently, please manually check!",
- thisNode,
- targetPeer,
- e);
+ LOGGER.warn(IoTConsensusV2Messages.CANNOT_CREATE_CONSENSUS_PIPE, thisNode, targetPeer, e);
throw new ConsensusGroupModifyPeerException(e);
}
}
@@ -286,25 +278,17 @@ public void notifyPeersToDropConsensusPipe(Peer targetPeer)
targetPeer.getNodeId()));
if (!RpcUtils.SUCCESS_STATUS.equals(resp.getStatus())) {
throw new ConsensusGroupModifyPeerException(
- String.format("error when notify peer %s to drop consensus pipe", peer));
+ String.format(IoTConsensusV2Messages.ERROR_NOTIFY_PEER_DROP_PIPE, peer));
}
} catch (Exception e) {
- LOGGER.warn(
- "{} cannot notify peer {} to drop consensus pipe, may because that peer is unknown currently, please manually check!",
- thisNode,
- peer,
- e);
+ LOGGER.warn(IoTConsensusV2Messages.CANNOT_NOTIFY_PEER_DROP_PIPE, thisNode, peer, e);
}
}
try {
dropConsensusPipeToTargetPeer(targetPeer);
} catch (Exception e) {
- LOGGER.warn(
- "{} cannot drop consensus pipe to {}, may because target peer is unknown currently, please manually check!",
- thisNode,
- targetPeer,
- e);
+ LOGGER.warn(IoTConsensusV2Messages.CANNOT_DROP_CONSENSUS_PIPE, thisNode, targetPeer, e);
throw new ConsensusGroupModifyPeerException(e);
}
}
@@ -346,10 +330,10 @@ public void waitPeersToTargetPeerTransmissionCompleted(Peer targetPeer)
isFirstCheckForCurrentPeer = false;
}
} catch (InterruptedException e) {
- LOGGER.warn("{} is interrupted when waiting for transfer completed", thisNode, e);
+ LOGGER.warn(IoTConsensusV2Messages.INTERRUPTED_WAITING_TRANSFER, thisNode, e);
Thread.currentThread().interrupt();
throw new ConsensusGroupModifyPeerException(
- String.format("%s is interrupted when waiting for transfer completed", thisNode), e);
+ String.format(IoTConsensusV2Messages.INTERRUPTED_WAITING_TRANSFER_FMT, thisNode), e);
}
}
@@ -375,10 +359,10 @@ public void waitTargetPeerToPeersTransmissionCompleted(Peer targetPeer)
isFirstCheck = false;
}
} catch (InterruptedException e) {
- LOGGER.warn("{} is interrupted when waiting for transfer completed", thisNode, e);
+ LOGGER.warn(IoTConsensusV2Messages.INTERRUPTED_WAITING_TRANSFER, thisNode, e);
Thread.currentThread().interrupt();
throw new ConsensusGroupModifyPeerException(
- String.format("%s is interrupted when waiting for transfer completed", thisNode), e);
+ String.format(IoTConsensusV2Messages.INTERRUPTED_WAITING_TRANSFER_FMT, thisNode), e);
}
}
@@ -393,17 +377,13 @@ private boolean isRemotePeerConsensusPipesTransmissionCompleted(
consensusPipeNames,
refreshCachedProgressIndex));
if (!RpcUtils.SUCCESS_STATUS.equals(resp.getStatus())) {
- LOGGER.warn(
- "{} cannot check consensus pipes transmission completed to peer {}",
- thisNode,
- targetPeer);
+ LOGGER.warn(IoTConsensusV2Messages.CANNOT_CHECK_PIPE_TRANSMISSION, thisNode, targetPeer);
throw new ConsensusGroupModifyPeerException(
- String.format(
- "error when check consensus pipes transmission completed to peer %s", targetPeer));
+ String.format(IoTConsensusV2Messages.ERROR_CHECK_PIPE_TRANSMISSION, targetPeer));
}
return resp.isCompleted;
} catch (Exception e) {
- LOGGER.warn("{} cannot check consensus pipes transmission completed", thisNode, e);
+ LOGGER.warn(IoTConsensusV2Messages.CANNOT_CHECK_PIPE_TRANSMISSION_SHORT, thisNode, e);
return true;
}
}
@@ -448,25 +428,25 @@ public void waitReleaseAllRegionRelatedResource(Peer targetPeer)
new TWaitReleaseAllRegionRelatedResourceReq(
targetPeer.getGroupId().convertToTConsensusGroupId()));
if (res.releaseAllResource) {
- LOGGER.info("[WAIT RELEASE] {} has released all region related resource", targetPeer);
+ LOGGER.info(ConsensusMessages.WAIT_RELEASE_HAS_RELEASED, targetPeer);
return;
}
- LOGGER.info("[WAIT RELEASE] {} is still releasing all region related resource", targetPeer);
+ LOGGER.info(ConsensusMessages.WAIT_RELEASE_STILL_RELEASING, targetPeer);
Thread.sleep(checkIntervalInMs);
}
} catch (ClientManagerException | TException e) {
// in case of target peer is down or can not serve, we simply skip it.
LOGGER.warn(
String.format(
- "error when waiting %s to release all region related resource. %s",
- targetPeer, e.getMessage()),
+ ConsensusMessages.ERROR_WAITING_RELEASE_RESOURCE, targetPeer, e.getMessage()),
e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ConsensusGroupModifyPeerException(
String.format(
- "thread interrupted when waiting %s to release all region related resource. %s",
- targetPeer, e.getMessage()),
+ ConsensusMessages.THREAD_INTERRUPTED_WAITING_RELEASE_RESOURCE,
+ targetPeer,
+ e.getMessage()),
e);
}
}
@@ -484,7 +464,7 @@ public boolean isActive() {
}
public void setActive(boolean active) {
- LOGGER.info("set {} active status to {}", this.thisNode, active);
+ LOGGER.info(ConsensusMessages.SET_ACTIVE_STATUS, this.thisNode, active);
this.active.set(active);
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeName.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeName.java
index 1beabf2b3bd93..ac4bf154ccba8 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeName.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/consensuspipe/ConsensusPipeName.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.consensus.ConsensusGroupId;
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeStaticMeta;
import org.apache.iotdb.consensus.common.Peer;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
import java.util.Objects;
@@ -46,14 +47,14 @@ public ConsensusPipeName(
public ConsensusPipeName(String pipeName) throws IllegalArgumentException {
if (!pipeName.startsWith(PipeStaticMeta.CONSENSUS_PIPE_PREFIX)) {
- throw new IllegalArgumentException("Invalid pipe name: " + pipeName);
+ throw new IllegalArgumentException(ConsensusMessages.INVALID_PIPE_NAME + pipeName);
}
String[] pipeNameParts =
pipeName
.substring(PipeStaticMeta.CONSENSUS_PIPE_PREFIX.length())
.split(CONSENSUS_PIPE_NAME_SPLITTER_CHAR);
if (pipeNameParts.length != 3) {
- throw new IllegalArgumentException("Invalid pipe name: " + pipeName);
+ throw new IllegalArgumentException(ConsensusMessages.INVALID_PIPE_NAME + pipeName);
}
this.consensusGroupId = ConsensusGroupId.Factory.createFromString(pipeNameParts[0]);
this.senderDataNodeId = Integer.parseInt(pipeNameParts[1]);
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/service/IoTConsensusV2RPCServiceProcessor.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/service/IoTConsensusV2RPCServiceProcessor.java
index 701fceef967ef..e71aa1d419fe5 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/service/IoTConsensusV2RPCServiceProcessor.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/pipe/service/IoTConsensusV2RPCServiceProcessor.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.consensus.common.Peer;
import org.apache.iotdb.consensus.config.IoTConsensusV2Config;
import org.apache.iotdb.consensus.exception.ConsensusGroupModifyPeerException;
+import org.apache.iotdb.consensus.i18n.IoTConsensusV2Messages;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.IoTConsensusV2IService;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TCheckConsensusPipeCompletedReq;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TCheckConsensusPipeCompletedResp;
@@ -86,7 +87,7 @@ public TSetActiveResp setActive(TSetActiveReq req) throws TException {
IoTConsensusV2ServerImpl impl = iotConsensusV2.getImpl(groupId);
if (impl == null) {
String message =
- String.format("unexpected consensusGroupId %s for set active request %s", groupId, req);
+ String.format(IoTConsensusV2Messages.UNEXPECTED_GROUP_SET_ACTIVE, groupId, req);
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -110,8 +111,7 @@ public TNotifyPeerToCreateConsensusPipeResp notifyPeerToCreateConsensusPipe(
IoTConsensusV2ServerImpl impl = iotConsensusV2.getImpl(groupId);
if (impl == null) {
String message =
- String.format(
- "unexpected consensusGroupId %s for create consensus pipe request %s", groupId, req);
+ String.format(IoTConsensusV2Messages.UNEXPECTED_GROUP_CREATE_PIPE, groupId, req);
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -130,7 +130,7 @@ public TNotifyPeerToCreateConsensusPipeResp notifyPeerToCreateConsensusPipe(
} catch (ConsensusGroupModifyPeerException e) {
responseStatus = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
responseStatus.setMessage(e.getMessage());
- LOGGER.warn("Failed to create consensus pipe to target peer with req {}", req, e);
+ LOGGER.warn(IoTConsensusV2Messages.FAILED_CREATE_CONSENSUS_PIPE, req, e);
}
return new TNotifyPeerToCreateConsensusPipeResp(responseStatus);
}
@@ -143,8 +143,7 @@ public TNotifyPeerToDropConsensusPipeResp notifyPeerToDropConsensusPipe(
IoTConsensusV2ServerImpl impl = iotConsensusV2.getImpl(groupId);
if (impl == null) {
String message =
- String.format(
- "unexpected consensusGroupId %s for drop consensus pipe request %s", groupId, req);
+ String.format(IoTConsensusV2Messages.UNEXPECTED_GROUP_DROP_PIPE, groupId, req);
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -161,7 +160,7 @@ public TNotifyPeerToDropConsensusPipeResp notifyPeerToDropConsensusPipe(
} catch (ConsensusGroupModifyPeerException e) {
responseStatus = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
responseStatus.setMessage(e.getMessage());
- LOGGER.warn("Failed to drop consensus pipe to target peer with req {}", req, e);
+ LOGGER.warn(IoTConsensusV2Messages.FAILED_DROP_CONSENSUS_PIPE, req, e);
}
return new TNotifyPeerToDropConsensusPipeResp(responseStatus);
}
@@ -174,9 +173,7 @@ public TCheckConsensusPipeCompletedResp checkConsensusPipeCompleted(
IoTConsensusV2ServerImpl impl = iotConsensusV2.getImpl(groupId);
if (impl == null) {
String message =
- String.format(
- "unexpected consensusGroupId %s for check transfer completed request %s",
- groupId, req);
+ String.format(IoTConsensusV2Messages.UNEXPECTED_GROUP_CHECK_TRANSFER, groupId, req);
LOGGER.error(message);
TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
status.setMessage(message);
@@ -193,11 +190,7 @@ public TCheckConsensusPipeCompletedResp checkConsensusPipeCompleted(
responseStatus = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode());
responseStatus.setMessage(e.getMessage());
isCompleted = true;
- LOGGER.warn(
- "Failed to check consensus pipe completed with req {}, set is completed to {}",
- req,
- true,
- e);
+ LOGGER.warn(IoTConsensusV2Messages.FAILED_CHECK_CONSENSUS_PIPE, req, true, e);
}
return new TCheckConsensusPipeCompletedResp(responseStatus, isCompleted);
}
@@ -209,10 +202,7 @@ public TWaitReleaseAllRegionRelatedResourceResp waitReleaseAllRegionRelatedResou
ConsensusGroupId.Factory.createFromTConsensusGroupId(req.getConsensusGroupId());
IoTConsensusV2ServerImpl impl = iotConsensusV2.getImpl(groupId);
if (impl == null) {
- String message =
- String.format(
- "unexpected consensusGroupId %s for TWaitReleaseAllRegionRelatedResourceRes request",
- groupId);
+ String message = String.format(IoTConsensusV2Messages.UNEXPECTED_GROUP_WAIT_RELEASE, groupId);
LOGGER.error(message);
return new TWaitReleaseAllRegionRelatedResourceResp(true);
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java
index 63c8bbf0c7b01..dea3efbc0bad3 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.consensus.IStateMachine;
import org.apache.iotdb.consensus.common.DataSet;
import org.apache.iotdb.consensus.common.request.ByteBufferConsensusRequest;
+import org.apache.iotdb.consensus.i18n.RatisMessages;
import org.apache.iotdb.consensus.ratis.metrics.RatisMetricsManager;
import org.apache.iotdb.consensus.ratis.utils.Retriable;
import org.apache.iotdb.consensus.ratis.utils.Utils;
@@ -154,7 +155,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) {
ret = new ResponseMessage(result);
break;
} catch (Throwable rte) {
- logger.error("application statemachine throws a runtime exception: ", rte);
+ logger.error(RatisMessages.STATEMACHINE_RUNTIME_EXCEPTION, rte);
ret =
new ResponseMessage(
new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode())
@@ -189,7 +190,7 @@ private void waitUntilSystemAllowApply() {
"waitUntilSystemAllowApply",
logger);
} catch (InterruptedException e) {
- logger.warn("{}: interrupted when waiting until system ready: ", this, e);
+ logger.warn(RatisMessages.INTERRUPTED_WAITING_SYSTEM_READY, this, e);
Thread.currentThread().interrupt();
}
}
@@ -198,7 +199,7 @@ private void waitUntilSystemAllowApply() {
public CompletableFuture query(Message request) {
if (!(request instanceof RequestMessage)) {
// return null dataset to indicate an error
- logger.error("An RequestMessage is required but got {}", request);
+ logger.error(RatisMessages.REQUEST_MESSAGE_REQUIRED, request);
return CompletableFuture.completedFuture(new ResponseMessage(null));
}
RequestMessage requestMessage = (RequestMessage) request;
@@ -222,7 +223,7 @@ public long takeSnapshot() throws IOException {
snapshotTmpDir.mkdirs();
if (!snapshotTmpDir.isDirectory()) {
- logger.error("Unable to create temp snapshotDir at {}", snapshotTmpDir);
+ logger.error(RatisMessages.UNABLE_TO_CREATE_TEMP_SNAPSHOT_DIR, snapshotTmpDir);
return RaftLog.INVALID_LOG_INDEX;
}
@@ -240,12 +241,7 @@ public long takeSnapshot() throws IOException {
try {
Files.move(snapshotTmpDir.toPath(), snapshotDir.toPath(), StandardCopyOption.ATOMIC_MOVE);
} catch (IOException e) {
- logger.error(
- "{} atomic rename {} to {} failed with exception {}",
- this,
- snapshotTmpDir,
- snapshotDir,
- e);
+ logger.error(RatisMessages.ATOMIC_RENAME_FAILED, this, snapshotTmpDir, snapshotDir, e);
deleteIncompleteSnapshot(snapshotTmpDir);
return RaftLog.INVALID_LOG_INDEX;
}
@@ -260,7 +256,7 @@ private void deleteIncompleteSnapshot(File snapshotDir) throws IOException {
// statemachine is supposed to clear snapshotDir on failure
boolean isEmpty = snapshotDir.delete();
if (!isEmpty) {
- logger.info("Snapshot directory is incomplete, deleting {}", snapshotDir.getAbsolutePath());
+ logger.info(RatisMessages.SNAPSHOT_DIR_INCOMPLETE_DELETING, snapshotDir.getAbsolutePath());
FileUtils.deleteFully(snapshotDir);
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/DiskGuardian.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/DiskGuardian.java
index ae6e597aafb1f..ed3869587d640 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/DiskGuardian.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/DiskGuardian.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil;
import org.apache.iotdb.consensus.config.RatisConfig;
import org.apache.iotdb.consensus.exception.ConsensusException;
+import org.apache.iotdb.consensus.i18n.RatisMessages;
import org.apache.iotdb.consensus.ratis.utils.Utils;
import org.apache.ratis.protocol.RaftGroupId;
@@ -95,7 +96,7 @@ void updateNow() {
this.logFiles = latest;
} catch (IOException e) {
// keep the files unchanged
- logger.warn("{}: Error caught when listing files for {} at {}:", this, gid, e);
+ logger.warn(RatisMessages.ERROR_LISTING_FILES, this, gid, e);
}
}
@@ -184,14 +185,11 @@ private void snapshotDaemon() {
.triggerSnapshot(Utils.fromRaftGroupIdToConsensusGroupId(groupId), false);
final boolean flagCleared = snapshotFlag.get(groupId).compareAndSet(true, false);
if (!flagCleared) {
- logger.info(
- "{}: clear snapshot flag failed for group {}, please check the related implementation",
- this,
- groupId);
+ logger.info(RatisMessages.CLEAR_SNAPSHOT_FLAG_FAILED, this, groupId);
}
} catch (ConsensusException e) {
logger.info(
- "{} take snapshot failed for group {} due to {}. Disk file status {}",
+ RatisMessages.TAKE_SNAPSHOT_FAILED,
this,
groupId,
e,
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java
index 7b674493e1b05..a1adf72712922 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.client.ClientManager;
import org.apache.iotdb.commons.client.factory.BaseClientFactory;
import org.apache.iotdb.consensus.config.RatisConfig;
+import org.apache.iotdb.consensus.i18n.RatisMessages;
import org.apache.commons.pool2.PooledObject;
import org.apache.commons.pool2.impl.DefaultPooledObject;
@@ -72,7 +73,7 @@ private void invalidate() {
try {
raftClient.close();
} catch (IOException e) {
- logger.warn("cannot close raft client ", e);
+ logger.warn(RatisMessages.CANNOT_CLOSE_RAFT_CLIENT, e);
}
}
@@ -216,8 +217,7 @@ public Action handleAttemptFailure(Event event) {
.filter(StatusRuntimeException.class::isInstance);
if (unexpectedCause.isPresent()) {
- logger.info(
- "{}: raft client request failed and caught exception: ", this, unexpectedCause.get());
+ logger.info(RatisMessages.RAFT_CLIENT_REQUEST_FAILED, this, unexpectedCause.get());
return NO_RETRY_ACTION;
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java
index fc2515484b2f8..372b45b8afc6f 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java
@@ -46,6 +46,8 @@
import org.apache.iotdb.consensus.exception.PeerNotInConsensusGroupException;
import org.apache.iotdb.consensus.exception.RatisReadUnavailableException;
import org.apache.iotdb.consensus.exception.RatisRequestFailedException;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
+import org.apache.iotdb.consensus.i18n.RatisMessages;
import org.apache.iotdb.consensus.ratis.metrics.RatisMetricSet;
import org.apache.iotdb.consensus.ratis.metrics.RatisMetricsManager;
import org.apache.iotdb.consensus.ratis.utils.Retriable;
@@ -250,7 +252,7 @@ public synchronized void start() throws IOException {
} catch (ConsensusGroupNotExistException ignore) {
} catch (Exception e) {
- logger.warn("Failed to reset peer list while start", e);
+ logger.warn(ConsensusMessages.FAILED_TO_RESET_PEER_LIST_WHILE_START, e);
}
};
// make peers which are in list correct
@@ -269,7 +271,7 @@ public synchronized void stop() throws IOException {
try {
diskGuardian.stop();
} catch (InterruptedException e) {
- logger.warn("{}: interrupted when shutting down add Executor with exception ", this, e);
+ logger.warn(ConsensusMessages.INTERRUPTED_WHEN_SHUTTING_DOWN_EXECUTOR_RATIS, this, e);
Thread.currentThread().interrupt();
} finally {
clientManager.close();
@@ -288,7 +290,7 @@ private RaftClientReply writeWithRetry(
reply = Retriable.attempt(caller, writeRetryPolicy, () -> caller, logger);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- logger.debug("{}: interrupted when retrying for write request {}", this, caller);
+ logger.debug(RatisMessages.INTERRUPTED_RETRYING_WRITE, this, caller);
}
if (reply == null) {
@@ -297,8 +299,7 @@ private RaftClientReply writeWithRetry(
.setServerId(server.get().getId())
.setGroupId(groupId)
.setSuccess(false)
- .setException(
- new RaftException("null reply received in writeWithRetry for request " + caller))
+ .setException(new RaftException(RatisMessages.NULL_REPLY_IN_WRITE_WITH_RETRY + caller))
.build();
}
return reply;
@@ -335,7 +336,7 @@ public TSStatus write(ConsensusGroupId groupId, IConsensusRequest request)
try {
forceStepDownLeader(raftGroup);
} catch (Exception e) {
- logger.warn("leader {} read only, force step down failed due to, ", myself, e);
+ logger.warn(RatisMessages.LEADER_READ_ONLY_STEP_DOWN_FAILED, myself, e);
}
return StatusUtils.getStatus(TSStatusCode.SYSTEM_READ_ONLY);
}
@@ -590,11 +591,7 @@ public void addRemotePeer(ConsensusGroupId groupId, Peer peer) throws ConsensusE
p ->
p.getId().equals(peerToAdd.getId())
|| p.getAddress().equals(peerToAdd.getAddress()))) {
- logger.warn(
- "{}: try to add a peer {} with conflicting id or address in {}",
- this,
- peerToAdd,
- group.getPeers());
+ logger.warn(RatisMessages.TRY_ADD_CONFLICTING_PEER, this, peerToAdd, group.getPeers());
throw new PeerAlreadyInConsensusGroupException(groupId, peer);
}
@@ -637,7 +634,7 @@ public void removeRemotePeer(ConsensusGroupId groupId, Peer peer) throws Consens
@Override
public void recordCorrectPeerListBeforeStarting(
Map> correctPeerList) {
- logger.info("Record correct peer list: {}", correctPeerList);
+ logger.info(ConsensusMessages.RECORD_CORRECT_PEER_LIST, correctPeerList);
this.correctPeerListBeforeStart = correctPeerList;
}
@@ -662,9 +659,7 @@ public void resetPeerList(ConsensusGroupId groupId, List correctPeers)
myself.getId().equals(raftPeer.getId())
&& myself.getAddress().equals(raftPeer.getAddress()));
if (!myselfInCorrectPeers) {
- logger.info(
- "[RESET PEER LIST] Local peer is not in the correct peer list, delete local peer {}",
- groupId);
+ logger.info(ConsensusMessages.RESET_PEER_LIST_DELETE_LOCAL_PEER, groupId);
deleteLocalPeer(groupId);
return;
}
@@ -683,16 +678,12 @@ public void resetPeerList(ConsensusGroupId groupId, List correctPeers)
return;
}
- logger.info(
- "[RESET PEER LIST] Peer list will be reset from {} to {}",
- localRaftPeerSet,
- correctRaftPeerSet);
+ logger.info(ConsensusMessages.RESET_PEER_LIST_WILL_RESET, localRaftPeerSet, correctRaftPeerSet);
RaftClientReply reply = sendReconfiguration(newGroup);
if (reply.isSuccess()) {
- logger.info("[RESET PEER LIST] Peer list has been reset to {}", newGroupPeers);
+ logger.info(ConsensusMessages.RESET_PEER_LIST_RESET_SUCCESS, newGroupPeers);
} else {
- logger.warn(
- "[RESET PEER LIST] Peer list failed to reset to {}, reply is {}", newGroup, reply);
+ logger.warn(ConsensusMessages.RESET_PEER_LIST_RESET_FAILED, newGroup, reply);
}
}
@@ -751,7 +742,7 @@ public boolean isLeader(ConsensusGroupId groupId) {
return server.get().getDivision(raftGroupId).getInfo().isLeader();
} catch (IOException exception) {
// if the read fails, simply return not leader
- logger.info("isLeader request failed with exception: ", exception);
+ logger.info(RatisMessages.IS_LEADER_REQUEST_FAILED, exception);
return false;
}
}
@@ -763,7 +754,7 @@ public boolean isLeaderReady(ConsensusGroupId groupId) {
return server.get().getDivision(raftGroupId).getInfo().isLeaderReady();
} catch (IOException exception) {
// if the read fails, simply return not ready
- logger.info("isLeaderReady request failed with exception: ", exception);
+ logger.info(RatisMessages.IS_LEADER_READY_REQUEST_FAILED, exception);
return false;
}
}
@@ -775,7 +766,7 @@ public long getLogicalClock(ConsensusGroupId groupId) {
return server.get().getDivision(raftGroupId).getInfo().getCurrentTerm();
} catch (IOException exception) {
// if the read fails, simply return 0
- logger.info("getLogicalClock request failed with exception: ", exception);
+ logger.info(RatisMessages.GET_LOGICAL_CLOCK_REQUEST_FAILED, exception);
return 0;
}
}
@@ -786,7 +777,7 @@ private boolean waitUntilLeaderReady(RaftGroupId groupId) {
divisionInfo = server.get().getDivision(groupId).getInfo();
} catch (IOException e) {
// if the read fails, simply return not leader
- logger.info("isLeaderReady checking failed with exception: ", e);
+ logger.info(RatisMessages.IS_LEADER_READY_CHECKING_FAILED, e);
return false;
}
@@ -809,11 +800,11 @@ private boolean waitUntilLeaderReady(RaftGroupId groupId) {
logger);
if (divisionInfo.isLeader() && !divisionInfo.isLeaderReady()) {
logger.warn(
- "{}: leader is still not ready after {}ms", groupId, DEFAULT_WAIT_LEADER_READY_TIMEOUT);
+ RatisMessages.LEADER_STILL_NOT_READY, groupId, DEFAULT_WAIT_LEADER_READY_TIMEOUT);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- logger.warn("Unexpected interruption when waitUntilLeaderReady", e);
+ logger.warn(RatisMessages.UNEXPECTED_INTERRUPTION_WAIT_LEADER_READY, e);
return false;
}
return divisionInfo.isLeader();
@@ -833,7 +824,7 @@ public Peer getLeader(ConsensusGroupId groupId) {
try {
leaderId = server.get().getDivision(raftGroupId).getInfo().getLeaderId();
} catch (IOException e) {
- logger.warn("fetch division info for group {} failed due to: ", groupId, e);
+ logger.warn(RatisMessages.FETCH_DIVISION_INFO_FAILED, groupId, e);
return null;
}
if (leaderId == null) {
@@ -903,11 +894,7 @@ public void triggerSnapshot(ConsensusGroupId groupId, boolean force) throws Cons
throw new RatisRequestFailedException(reply.getException());
}
logger.info(
- "{} group {}: successfully taken snapshot at index {} with force = {}",
- this,
- raftGroupId,
- reply.getLogIndex(),
- force);
+ RatisMessages.TRIGGER_SNAPSHOT_SUCCESS, this, raftGroupId, reply.getLogIndex(), force);
} catch (IOException ioException) {
throw new RatisRequestFailedException(ioException);
}
@@ -956,7 +943,7 @@ private RaftGroup getGroupInfo(RaftGroupId raftGroupId) {
lastSeen.put(raftGroupId, raftGroup);
}
} catch (IOException e) {
- logger.debug("get group {} failed ", raftGroupId, e);
+ logger.debug(RatisMessages.GET_GROUP_FAILED, raftGroupId, e);
}
return raftGroup;
}
@@ -971,7 +958,7 @@ private RatisClient getRaftClient(RaftGroup group) throws ClientManagerException
try {
return clientManager.borrowClient(group);
} catch (ClientManagerException e) {
- logger.error("Borrow client from pool for group {} failed.", group, e);
+ logger.error(RatisMessages.BORROW_CLIENT_FROM_POOL_FAILED, group, e);
// rethrow the exception
throw e;
}
@@ -981,7 +968,7 @@ private RatisClient getConfigurationRaftClient(RaftGroup group) throws ClientMan
try {
return reconfigurationClientManager.borrowClient(group);
} catch (ClientManagerException e) {
- logger.error("Borrow client from pool for group {} failed.", group, e);
+ logger.error(RatisMessages.BORROW_CLIENT_FROM_POOL_FAILED, group, e);
// rethrow the exception
throw e;
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ResponseMessage.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ResponseMessage.java
index ce1b7c14593ce..8bfda01b5a6a8 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ResponseMessage.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ResponseMessage.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.consensus.ratis;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
+import org.apache.iotdb.consensus.i18n.RatisMessages;
import org.apache.iotdb.consensus.ratis.utils.Utils;
import org.apache.ratis.protocol.Message;
@@ -59,7 +60,7 @@ public ByteString getContent() {
try {
serializedData.set(ByteString.copyFrom(Utils.serializeTSStatus(status)));
} catch (TException e) {
- logger.warn("serialize TSStatus failed {}", status);
+ logger.warn(RatisMessages.SERIALIZE_TSSTATUS_FAILED, status);
}
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/SnapshotStorage.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/SnapshotStorage.java
index e663d97ea8c1e..81ff85c791b3c 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/SnapshotStorage.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/SnapshotStorage.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.consensus.ratis;
import org.apache.iotdb.consensus.IStateMachine;
+import org.apache.iotdb.consensus.i18n.RatisMessages;
import org.apache.iotdb.consensus.ratis.utils.Utils;
import org.apache.ratis.protocol.RaftGroupId;
@@ -124,7 +125,7 @@ private Path[] getSortedSnapshotDirPaths() {
}
}
} catch (IOException exception) {
- logger.warn("Cannot construct snapshot directory stream ", exception);
+ logger.warn(RatisMessages.CANNOT_CONSTRUCT_SNAPSHOT_DIR_STREAM, exception);
return null;
}
@@ -175,7 +176,7 @@ SnapshotInfo findLatestSnapshot() {
fileInfo = new FileInfo(file.toPath().toRealPath(), null);
}
} catch (IOException e) {
- logger.warn("{} cannot resolve real path of {} due to ", this, file, e);
+ logger.warn(RatisMessages.CANNOT_RESOLVE_REAL_PATH, this, file, e);
return null;
}
fileInfos.add(fileInfo);
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/metrics/MetricRegistryManager.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/metrics/MetricRegistryManager.java
index dcb31f352f1f3..2a8ca243295ad 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/metrics/MetricRegistryManager.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/metrics/MetricRegistryManager.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.consensus.ratis.metrics;
import org.apache.iotdb.commons.service.metric.MetricService;
+import org.apache.iotdb.consensus.i18n.RatisMessages;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.ratis.metrics.MetricRegistries;
@@ -81,19 +82,18 @@ public Collection getMetricRegistries() {
public void addReporterRegistration(
Consumer reporterRegistration,
Consumer stopReporter) {
- throw new UnsupportedOperationException("Reporter is disabled from RatisMetricRegistries");
+ throw new UnsupportedOperationException(RatisMessages.REPORTER_DISABLED);
}
@Override
public void enableJmxReporter() {
// We shall disable the JMX reporter since we already have one in MetricService
- throw new UnsupportedOperationException("JMX Reporter is disabled from RatisMetricRegistries");
+ throw new UnsupportedOperationException(RatisMessages.JMX_REPORTER_DISABLED);
}
@Override
public void enableConsoleReporter(TimeDuration timeDuration) {
// We shall disable the Console reporter since we already have one in MetricService
- throw new UnsupportedOperationException(
- "Console Reporter is disabled from RatisMetricRegistries");
+ throw new UnsupportedOperationException(RatisMessages.CONSOLE_REPORTER_DISABLED);
}
}
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Utils.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Utils.java
index adaf820572f86..2c24ff12b6d36 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Utils.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/ratis/utils/Utils.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.consensus.ConsensusGroupId;
import org.apache.iotdb.consensus.common.Peer;
import org.apache.iotdb.consensus.config.RatisConfig;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
import org.apache.iotdb.rpc.AutoScalingBufferWriteTransport;
import org.apache.ratis.client.RaftClientConfigKeys;
@@ -397,11 +398,11 @@ public static Parameters initRatisConfig(RaftProperties properties, RatisConfig
new NoHostnameVerificationTrustManager((X509TrustManager) originalTrustManager);
GrpcConfigKeys.TLS.setConf(parameters, new GrpcTlsConfig(keyManager, trustManager, true));
} catch (AccessDeniedException e) {
- LOGGER.error("Failed or truststore to load keystore file");
+ LOGGER.error(ConsensusMessages.FAILED_TO_LOAD_KEYSTORE);
} catch (FileNotFoundException e) {
- LOGGER.error("keystore or truststore file not found");
+ LOGGER.error(ConsensusMessages.KEYSTORE_FILE_NOT_FOUND);
} catch (Exception e) {
- LOGGER.error("Failed to read key store or trust store.", e);
+ LOGGER.error(ConsensusMessages.FAILED_TO_READ_KEYSTORE, e);
}
}
return parameters;
diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/simple/SimpleConsensus.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/simple/SimpleConsensus.java
index c0f006c5aacd1..273411a1b57a8 100644
--- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/simple/SimpleConsensus.java
+++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/simple/SimpleConsensus.java
@@ -38,6 +38,7 @@
import org.apache.iotdb.consensus.exception.ConsensusGroupNotExistException;
import org.apache.iotdb.consensus.exception.IllegalPeerEndpointException;
import org.apache.iotdb.consensus.exception.IllegalPeerNumException;
+import org.apache.iotdb.consensus.i18n.ConsensusMessages;
import org.apache.iotdb.rpc.TSStatusCode;
import org.slf4j.Logger;
@@ -88,7 +89,8 @@ public synchronized void start() throws IOException {
private void initAndRecover() throws IOException {
if (!storageDir.exists()) {
if (!storageDir.mkdirs()) {
- throw new IOException(String.format("Unable to create consensus dir at %s", storageDir));
+ throw new IOException(
+ String.format(ConsensusMessages.UNABLE_TO_CREATE_CONSENSUS_DIR_FMT, storageDir));
}
} else {
try (DirectoryStream stream = Files.newDirectoryStream(storageDir.toPath())) {
@@ -165,7 +167,8 @@ public void createLocalPeer(ConsensusGroupId groupId, List peers)
String path = buildPeerDir(groupId);
File file = new File(path);
if (!file.mkdirs()) {
- logger.warn("Unable to create consensus dir for group {} at {}", groupId, path);
+ logger.warn(
+ ConsensusMessages.UNABLE_TO_CREATE_CONSENSUS_DIR_FOR_GROUP, groupId, path);
return null;
}
@@ -179,7 +182,8 @@ public void createLocalPeer(ConsensusGroupId groupId, List peers)
.orElseThrow(
() ->
new ConsensusException(
- String.format("Unable to create consensus dir for group %s", groupId)));
+ String.format(
+ ConsensusMessages.UNABLE_TO_CREATE_CONSENSUS_DIR_FOR_GROUP_FMT, groupId)));
if (exist.get()) {
throw new ConsensusGroupAlreadyExistException(groupId);
}
@@ -203,28 +207,28 @@ public void deleteLocalPeer(ConsensusGroupId groupId) throws ConsensusException
@Override
public void addRemotePeer(ConsensusGroupId groupId, Peer peer) throws ConsensusException {
- throw new ConsensusException("SimpleConsensus does not support membership changes");
+ throw new ConsensusException(ConsensusMessages.SIMPLE_CONSENSUS_NOT_SUPPORT_MEMBERSHIP_CHANGES);
}
@Override
public void removeRemotePeer(ConsensusGroupId groupId, Peer peer) throws ConsensusException {
- throw new ConsensusException("SimpleConsensus does not support membership changes");
+ throw new ConsensusException(ConsensusMessages.SIMPLE_CONSENSUS_NOT_SUPPORT_MEMBERSHIP_CHANGES);
}
@Override
public void recordCorrectPeerListBeforeStarting(
Map> correctPeerList) {
- logger.info("SimpleConsensus will do nothing when calling recordCorrectPeerListBeforeStarting");
+ logger.info(ConsensusMessages.SIMPLE_CONSENSUS_NOOP_RECORD_PEER_LIST);
}
@Override
public void transferLeader(ConsensusGroupId groupId, Peer newLeader) throws ConsensusException {
- throw new ConsensusException("SimpleConsensus does not support leader transfer");
+ throw new ConsensusException(ConsensusMessages.SIMPLE_CONSENSUS_NOT_SUPPORT_LEADER_TRANSFER);
}
@Override
public void triggerSnapshot(ConsensusGroupId groupId, boolean force) throws ConsensusException {
- throw new ConsensusException("SimpleConsensus does not support snapshot trigger currently");
+ throw new ConsensusException(ConsensusMessages.SIMPLE_CONSENSUS_NOT_SUPPORT_SNAPSHOT_TRIGGER);
}
@Override
@@ -273,7 +277,7 @@ public void reloadConsensusConfig(ConsensusConfig consensusConfig) {
@Override
public void resetPeerList(ConsensusGroupId groupId, List correctPeers)
throws ConsensusException {
- throw new ConsensusException("SimpleConsensus does not support reset peer list");
+ throw new ConsensusException(ConsensusMessages.SIMPLE_CONSENSUS_NOT_SUPPORT_RESET_PEER_LIST);
}
private String buildPeerDir(ConsensusGroupId groupId) {
diff --git a/iotdb-core/datanode/pom.xml b/iotdb-core/datanode/pom.xml
index 40fc3f7f614be..97a296c9a9e53 100644
--- a/iotdb-core/datanode/pom.xml
+++ b/iotdb-core/datanode/pom.xml
@@ -451,6 +451,7 @@
${project.build.directory}/generated-sources/freemarker
+ ${project.basedir}/src/main/i18n/${i18n.locale}
diff --git a/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodeMiscMessages.java b/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodeMiscMessages.java
new file mode 100644
index 0000000000000..9aeea59084dff
--- /dev/null
+++ b/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodeMiscMessages.java
@@ -0,0 +1,915 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.i18n;
+
+/** Compile-time i18n constants for DataNode misc subsystems (English). */
+public final class DataNodeMiscMessages {
+
+ private DataNodeMiscMessages() {}
+
+ // ---------------------------------------------------------------------------
+ // protocol – BaseServerContextHandler
+ // ---------------------------------------------------------------------------
+ public static final String MULTIPLE_SERVER_CONTEXT_FACTORY =
+ "There are more than one ServerContextFactory implementation. pls check.";
+ public static final String SET_SERVER_CONTEXT_FACTORY =
+ "Will set ServerContextFactory from {} ";
+
+ // ---------------------------------------------------------------------------
+ // protocol – ConfigNodeInfo
+ // ---------------------------------------------------------------------------
+ public static final String UPDATE_CONFIG_NODE_SUCCESSFULLY =
+ "Update ConfigNode Successfully: {}, which takes {} ms.";
+ public static final String UPDATE_CONFIG_NODE_FAILED = "Update ConfigNode failed.";
+ public static final String SYSTEM_PROPERTIES_NOT_EXIST =
+ "System properties file not exist, not necessary to store ConfigNode list";
+ public static final String LOAD_CONFIG_NODE_SUCCESSFULLY =
+ "Load ConfigNode successfully: {}, which takes {} ms.";
+ public static final String CANNOT_PARSE_CONFIG_NODE_LIST =
+ "Cannot parse config node list in system.properties";
+
+ // ---------------------------------------------------------------------------
+ // protocol – ConfigNodeClient
+ // ---------------------------------------------------------------------------
+ public static final String NODE_LEADER_MAY_DOWN_TRY_NEXT =
+ "The current node leader may have been down {}, try next node";
+ public static final String UNEXPECTED_INTERRUPTION_CONNECT_CONFIG_NODE =
+ "Unexpected interruption when waiting to try to connect to ConfigNode";
+ public static final String NODE_MAY_DOWN_TRY_NEXT =
+ "The current node may have been down {},try next node";
+ public static final String FAILED_CONNECT_CONFIG_NODE_NOT_LEADER =
+ "Failed to connect to ConfigNode {} from DataNode {}, because the current node is not "
+ + "leader or not ready yet, will try again later";
+ public static final String UNEXPECTED_INTERRUPTION_CONNECT_CONFIG_NODE_BREAK =
+ "Unexpected interruption when waiting to try to connect to ConfigNode, may because current node has been down. Will break current execution process to avoid meaningless wait.";
+
+ // ---------------------------------------------------------------------------
+ // protocol – DataNodeInternalClient
+ // ---------------------------------------------------------------------------
+ public static final String USER_OPENS_INTERNAL_SESSION =
+ "User: {}, opens internal Session-{}.";
+ public static final String USER_OPENS_INTERNAL_SESSION_FAILED =
+ "User {} opens internal Session failed.";
+ public static final String USER_OPENS_INTERNAL_SESSION_FAILED_FMT =
+ "User %s opens internal Session failed.";
+
+ // ---------------------------------------------------------------------------
+ // protocol – AsyncTSStatusRPCHandler / AsyncConfigNodeTSStatusRPCHandler
+ // ---------------------------------------------------------------------------
+ public static final String SUCCESSFULLY_ON_DATANODE =
+ "Successfully {} on DataNode: {}";
+ public static final String FAILED_ON_DATANODE =
+ "Failed to {} on DataNode: {}, response: {}";
+ public static final String SUCCESSFULLY_ON_CONFIG_NODE =
+ "Successfully {} on ConfigNode: {}";
+ public static final String FAILED_ON_CONFIG_NODE =
+ "Failed to {} on ConfigNode: {}, response: {}";
+
+ // ---------------------------------------------------------------------------
+ // protocol – AINodeClient
+ // ---------------------------------------------------------------------------
+ public static final String AINODE_MAY_DOWN =
+ "The current AINode may have been down {}, because";
+ public static final String CANNOT_CONNECT_ANY_AINODE =
+ "Cannot connect to any AINode due to there are no available ones.";
+ public static final String UNEXPECTED_INTERRUPTION_CONNECT_AINODE =
+ "Unexpected interruption when waiting to try to connect to AINode, may because current node has been down. Will break current execution process to avoid meaningless wait.";
+
+ // ---------------------------------------------------------------------------
+ // protocol – SessionManager
+ // ---------------------------------------------------------------------------
+ public static final String LOGIN_STATUS =
+ "{}: Login status: {}. User : {}, opens Session-{}";
+ public static final String CLIENT_TRYING_CLOSE_ANOTHER_SESSION =
+ "The client-%s is trying to close another session %s, pls check if it's a bug";
+ public static final String SESSION_CLOSING = "Session-%s is closing";
+ public static final String FAILED_RELEASE_PREPARED_STATEMENT =
+ "Failed to release PreparedStatement resources for session {}: {}";
+ public static final String FAILED_RELEASE_PREPARED_STATEMENT_CLOSE =
+ "Failed to release PreparedStatement '{}' resources when closing statement {} for session {}: {}";
+ public static final String NOT_LOGIN = "{}: Not login. ";
+ public static final String CLIENT_SESSION_REGISTERED_REPEATEDLY =
+ "the client session is registered repeatedly, pls check whether this is a bug.";
+
+ // ---------------------------------------------------------------------------
+ // protocol – DataNodeRegionManager
+ // ---------------------------------------------------------------------------
+ public static final String CREATE_SCHEMA_REGION_FAILED_ILLEGAL_PATH =
+ "Create Schema Region {} failed because path is illegal.";
+ public static final String CREATE_SCHEMA_REGION_FAILED =
+ "Create Schema Region {} failed because {}";
+ public static final String CREATE_SCHEMA_REGION_FAILED_FMT =
+ "Create Schema Region failed because of %s";
+ public static final String SCHEMA_REGION_ALREADY_EXISTS_FMT =
+ "SchemaRegion %d already exists.";
+ public static final String CREATE_DATA_REGION_FAILED =
+ "Create Data Region {} failed because {}";
+ public static final String CREATE_DATA_REGION_FAILED_FMT =
+ "Create Data Region failed because of %s";
+ public static final String DATA_REGION_ALREADY_EXISTS_FMT = "DataRegion %d already exists.";
+ public static final String START_CREATE_NEW_REGION = "start to create new region {}";
+ public static final String CREATE_NEW_REGION_ERROR = "create new region {} error";
+ public static final String SUCCEED_CREATE_NEW_REGION = "succeed to create new region {}";
+ public static final String METADATA_ERROR = "{}: MetaData error: ";
+ public static final String CREATE_SCHEMA_REGION_FAILED_ILLEGAL_PATH_MSG =
+ "Create Schema Region failed because storageGroup path is illegal.";
+
+ // ---------------------------------------------------------------------------
+ // protocol – DataNodeInternalRPCServiceImpl
+ // ---------------------------------------------------------------------------
+ public static final String CONSENSUS_NOT_STARTED =
+ "Consensus has not been started after {} seconds, rejecting region request";
+ public static final String RECEIVE_FRAGMENT_INSTANCE =
+ "receive FragmentInstance to group[{}]";
+ public static final String DESERIALIZE_CONSENSUS_GROUP_ID_FAILED =
+ "Deserialize ConsensusGroupId failed. ";
+ public static final String DESERIALIZE_FRAGMENT_INSTANCE_FAILED =
+ "Deserialize FragmentInstance failed.";
+ public static final String RECEIVE_LOAD_NODE = "Receive load node from uuid {}.";
+ public static final String SCHEMA_CACHE_INVALIDATED =
+ "Schema cache of {} has been invalidated";
+ public static final String ERROR_PUSHING_PIPE_META =
+ "Error occurred when pushing pipe meta";
+ public static final String ERROR_PUSHING_SINGLE_PIPE_META =
+ "Error occurred when pushing single pipe meta";
+ public static final String ERROR_PUSHING_MULTI_PIPE_META =
+ "Error occurred when pushing multi pipe meta";
+ public static final String ERROR_PUSHING_TOPIC_META =
+ "Error occurred when pushing topic meta";
+ public static final String ERROR_PUSHING_SINGLE_TOPIC_META =
+ "Error occurred when pushing single topic meta";
+ public static final String ERROR_PUSHING_MULTI_TOPIC_META =
+ "Error occurred when pushing multi topic meta";
+ public static final String ERROR_PUSHING_CONSUMER_GROUP_META =
+ "Error occurred when pushing consumer group meta";
+ public static final String ERROR_PUSHING_SINGLE_CONSUMER_GROUP_META =
+ "Error occurred when pushing single consumer group meta";
+ public static final String EXCEPTION_EXECUTING_INTERNAL_SCHEMA_TASK =
+ "Exception occurs when executing internal schema task: ";
+ public static final String UNSUPPORTED_TYPE_UPDATING_TABLE =
+ "Unsupported type {} when updating table";
+ public static final String UNSUPPORTED_TYPE_UPDATING_TEMPLATE =
+ "Unsupported type {} when updating template";
+ public static final String FAILED_GET_MEMORY_FROM_METRIC =
+ "Failed to get memory from metric because: ";
+ public static final String CHANGE_REGION_LEADER = "[ChangeRegionLeader] {}";
+ public static final String REGION_TYPE_ILLEGAL = "region {} type is illegal";
+ public static final String START_DISABLE_DATA_NODE =
+ "start disable data node in the request: {}";
+ public static final String EXECUTE_STOP_AND_CLEAR = "Execute stopAndClearDataNode RPC method";
+ public static final String INTERRUPTED_STOP_AND_CLEAR =
+ "Meets InterruptedException in stopAndClearDataNode RPC method";
+ public static final String STOP_AND_CLEAR_ERROR = "Stop And Clear Data Node error";
+ public static final String RETRIEVED_EARLIEST_TIMESLOTS =
+ "Retrieved earliest timeslots for {} databases";
+ public static final String FAILED_GET_EARLIEST_TIMESLOTS = "Failed to get earliest timeslots";
+ public static final String FAILED_GENERATE_DATA_PARTITION_TABLE =
+ "Failed to generate DataPartitionTable";
+ public static final String FAILED_CHECK_DATA_PARTITION_TABLE_STATUS =
+ "Failed to check DataPartitionTable generation status";
+ public static final String DATA_PARTITION_TABLE_COMPLETED =
+ "DataPartitionTable generation completed with task ID: {}";
+ public static final String DATA_PARTITION_TABLE_FAILED =
+ "DataPartitionTable generation failed with task ID: {}";
+ public static final String PROCESS_DATA_DIR_COMPLETED =
+ "Process data directory for earliestTimeslots completed successfully";
+ public static final String ERROR_EXECUTING_BATCH_STATEMENT =
+ "Error occurred when executing executeBatchStatement: ";
+
+ // ---------------------------------------------------------------------------
+ // protocol – ClientRPCServiceImpl
+ // ---------------------------------------------------------------------------
+ public static final String IOTDB_SERVER_VERSION = "IoTDB server version: {}";
+ public static final String TEST_INSERT_BATCH_RECEIVE = "Test insert batch request receive.";
+ public static final String TEST_INSERT_ROW_RECEIVE = "Test insert row request receive.";
+ public static final String TEST_INSERT_STRING_RECORD_RECEIVE =
+ "Test insert string record request receive.";
+ public static final String TEST_INSERT_ROW_IN_BATCH_RECEIVE =
+ "Test insert row in batch request receive.";
+ public static final String TEST_INSERT_ROWS_IN_BATCH_RECEIVE =
+ "Test insert rows in batch request receive.";
+ public static final String TEST_INSERT_STRING_RECORDS_RECEIVE =
+ "Test insert string records request receive.";
+ public static final String START_BATCH_EXECUTING_TREE =
+ "Start batch executing {} sub-statement(s) in tree model, queryId: {}";
+ public static final String EXECUTING_SUB_STATEMENT_TREE =
+ "Executing sub-statement {}/{} in tree model, queryId: {}";
+ public static final String FAILED_EXECUTE_SUB_STATEMENT_TREE =
+ "Failed to execute sub-statement {}/{} in tree model, queryId: {}, completed: {}, remaining: {}, progress: {}%, error: {}";
+ public static final String SUCCESSFULLY_EXECUTED_SUB_STATEMENT_TREE =
+ "Successfully executed sub-statement {}/{} in tree model, queryId: {}";
+ public static final String COMPLETED_BATCH_EXECUTING_TREE =
+ "Completed batch executing all {} sub-statement(s) in tree model, queryId: {}";
+ public static final String START_BATCH_EXECUTING_TABLE =
+ "Start batch executing {} sub-statement(s) in table model, queryId: {}";
+ public static final String EXECUTING_SUB_STATEMENT_TABLE =
+ "Executing sub-statement {}/{} in table model, queryId: {}";
+ public static final String FAILED_EXECUTE_SUB_STATEMENT_TABLE =
+ "Failed to execute sub-statement {}/{} in table model, queryId: {}, completed: {}, remaining: {}, progress: {}%, error: {}";
+ public static final String SUCCESSFULLY_EXECUTED_SUB_STATEMENT_TABLE =
+ "Successfully executed sub-statement {}/{} in table model, queryId: {}";
+ public static final String COMPLETED_BATCH_EXECUTING_TABLE =
+ "Completed batch executing all {} sub-statement(s) in table model, queryId: {}";
+
+ // ---------------------------------------------------------------------------
+ // service – DataNode
+ // ---------------------------------------------------------------------------
+ public static final String DATANODE_ENV_VARS =
+ "IoTDB-DataNode environment variables: {}";
+ public static final String DATANODE_DEFAULT_CHARSET =
+ "IoTDB-DataNode default charset is: {}";
+ public static final String STARTING_DATANODE = "Starting DataNode...";
+ public static final String DATANODE_FIRST_START =
+ "DataNode is starting for the first time...";
+ public static final String DATANODE_RESTARTING = "DataNode is restarting...";
+ public static final String IOTDB_CONFIGURATION = "IoTDB configuration: {}";
+ public static final String DATANODE_SETUP_SUCCESSFULLY =
+ "Congratulations, IoTDB DataNode is set up successfully. Now, enjoy yourself!";
+ public static final String FAIL_TO_START_SERVER = "Fail to start server";
+ public static final String DATANODE_STARTED = "DataNode started";
+ public static final String DATANODE_PREPARED_SUCCESSFULLY =
+ "The DataNode is prepared successfully, which takes {} ms";
+ public static final String PULLING_SYSTEM_CONFIGURATIONS =
+ "Pulling system configurations from the ConfigNode-leader...";
+ public static final String CANNOT_PULL_SYSTEM_CONFIGURATIONS =
+ "Cannot pull system configurations from ConfigNode-leader";
+ public static final String SENDING_REGISTER_REQUEST =
+ "Sending register request to ConfigNode-leader...";
+ public static final String CANNOT_REGISTER_TO_CLUSTER =
+ "Cannot register to the cluster, because: {}";
+ public static final String CANNOT_REGISTER_AFTER_RETRIES =
+ "Cannot register into cluster after {} retries.";
+ public static final String PRECHECK_PASSED =
+ "Successfully pass the precheck, will do the formal registration soon.";
+ public static final String DELETE_SUCCEED = "delete {} succeed.";
+ public static final String DELETE_FAILED_NOT_EXIST =
+ "delete {} failed, because it does not exist.";
+ public static final String SENDING_RESTART_REQUEST =
+ "Sending restart request to ConfigNode-leader...";
+ public static final String CLEANED_SORT_TEMP_DIR =
+ "Cleaned up stale sort temp directory: {}";
+ public static final String MEET_ERROR_STARTING_UP = "Meet error while starting up.";
+ public static final String IOTDB_DATANODE_HAS_STARTED = "IoTDB DataNode has started.";
+ public static final String SETTING_UP_DATANODE = "Setting up IoTDB DataNode...";
+ public static final String RECOVER_SCHEMA = "Recover the schema...";
+ public static final String DATANODE_FAILED_SETUP = "IoTDB DataNode failed to set up.";
+ public static final String WAIT_DATABASES_READY =
+ "Wait for all databases ready, which takes {} ms.";
+ public static final String PREPARE_PIPE_RESOURCES =
+ "Prepare pipe resources successfully, which takes {} ms.";
+ public static final String RECOVER_SCHEMA_SUCCESSFULLY =
+ "Recover schema successfully, which takes {} ms.";
+ public static final String LOAD_CLASS_ERROR = "load class error: ";
+ public static final String EXCEPTION_SCHEMA_REGION_CONSENSUS_STOPPING =
+ "Exception during SchemaRegionConsensusImpl stopping";
+ public static final String EXCEPTION_DATA_REGION_CONSENSUS_STOPPING =
+ "Exception during DataRegionConsensusImpl stopping";
+
+ // ---------------------------------------------------------------------------
+ // service – DataNodeShutdownHook
+ // ---------------------------------------------------------------------------
+ public static final String DATANODE_EXITING = "DataNode exiting...";
+ public static final String INTERRUPTED_WAITING_PIPE_FINISH =
+ "Interrupted when waiting for pipe to finish";
+ public static final String TIMED_OUT_WAITING_PIPES =
+ "Timed out when waiting for pipes to finish, will break";
+ public static final String FAILED_BORROW_CONFIG_NODE_CLIENT =
+ "Failed to borrow ConfigNodeClient";
+ public static final String FAILED_REPORT_SHUTDOWN = "Failed to report shutdown";
+
+ // ---------------------------------------------------------------------------
+ // service – RegionMigrateService
+ // ---------------------------------------------------------------------------
+ public static final String REGION_BEGIN_MIGRATING =
+ "Region {} is notified to begin migrating";
+ public static final String REGION_FINISH_MIGRATING =
+ "Region {} is notified to finish migrating";
+ public static final String RESET_PEER_LIST_FAIL = "reset peer list fail";
+ public static final String REGION_MIGRATE_SERVICE_START = "Region migrate service start";
+ public static final String REGION_MIGRATE_SERVICE_STOP = "Region migrate service stop";
+
+ // ---------------------------------------------------------------------------
+ // service – SettleService
+ // ---------------------------------------------------------------------------
+ public static final String START_ERROR = "Start error";
+ public static final String WAITING_SETTLE_POOL_SHUTDOWN =
+ "Waiting for settle task pool to shut down";
+ public static final String SETTLE_SERVICE_STOPPED = "Settle service stopped";
+
+ // ---------------------------------------------------------------------------
+ // service – IoTDBInternalLocalReporter
+ // ---------------------------------------------------------------------------
+ public static final String CHECK_OR_CREATE_DATABASE_FAILED =
+ "IoTDBSessionReporter checkOrCreateDatabase failed.";
+ public static final String CHECK_OR_CREATE_DATABASE_FAILED_BECAUSE =
+ "IoTDBSessionReporter checkOrCreateDatabase failed because ";
+ public static final String INTERNAL_REPORTER_ALREADY_STARTED =
+ "IoTDB Internal Reporter already start";
+ public static final String INTERNAL_REPORTER_START = "IoTDBInternalReporter start!";
+ public static final String INTERNAL_REPORTER_STOP = "IoTDBInternalReporter stop!";
+ public static final String FAILED_UPDATE_METRIC_VALUE =
+ "Failed to update the value of metric with status {}";
+ public static final String FAILED_AUTO_CREATE_TIMESERIES =
+ "Failed to auto create timeseries for {} with status {}";
+
+ // ---------------------------------------------------------------------------
+ // service – ExternalService
+ // ---------------------------------------------------------------------------
+ public static final String FAILED_MAKE_EXTERNAL_SERVICE_DIR =
+ "Failed to make external service dir";
+ public static final String EXTERNAL_SERVICE_LIB_ROOT = "External Service lib root: {}";
+ public static final String FAILED_GET_OPEN_FILE_NUMBER =
+ "Failed to get open file number, because ";
+ public static final String UNEXPECTED_ERROR_GETTING_TSFILE_NAME =
+ "Unexpected error occurred when getting tsfile name";
+
+ // ---------------------------------------------------------------------------
+ // service – metrics
+ // ---------------------------------------------------------------------------
+ public static final String FAILED_GET_PROCESS_RESIDENT_MEMORY =
+ "Failed to get process resident memory for pid {}";
+ public static final String DATANODE_PORT_CHECK_SUCCESSFUL = "DataNode port check successful.";
+
+ // ---------------------------------------------------------------------------
+ // tools – WalChecker
+ // ---------------------------------------------------------------------------
+ public static final String CHECKING_FOLDER = "Checking folder: {}";
+ public static final String NO_SUB_DIRECTORIES =
+ "No sub-directories under the given directory, check ends";
+ public static final String CHECKING_DIRECTORY = "Checking the No.{} directory {}";
+ public static final String WAL_FILE_NOT_EXIST = "Wal file doesn't exist, skipping";
+ public static final String WAL_CHECK_FAILED = "{} fails the check because";
+ public static final String CHECK_FINISHED_NO_DAMAGED =
+ "Check finished. There is no damaged file";
+ public static final String FAILED_FILES_FOUND =
+ "There are {} failed files. They are {}";
+ public static final String NO_ENOUGH_ARGS =
+ "No enough args: require the walRootDirectory";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileSketchTool
+ // ---------------------------------------------------------------------------
+ public static final String FAIL_INIT_SKETCH_TOOL = "Fail to init TsFileSketchTool, {}";
+ public static final String FAIL_PARSE_TSFILE_METADATA = "Fail to parse TsFileMetadata, {}";
+ public static final String FAIL_PRINT_FILE_INFO = "Fail to printFileInfo, {}";
+ public static final String FAIL_PARSE_CHUNK = "Fail to parse chunk, {}";
+ public static final String FAIL_PRINT_TIMESERIES_INDEX = "Fail to printTimeseriesIndex, {}";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileSplitTool
+ // ---------------------------------------------------------------------------
+ public static final String SPLITTING_TSFILE = "Splitting TsFile {} ...";
+ public static final String UNSUPPORTED_SPLIT_WITH_MODIFICATION =
+ "Unsupported to split TsFile with modification currently.";
+ public static final String UNSUPPORTED_SPLIT_WITH_ALIGNED =
+ "Unsupported to split TsFile with aligned timeseries currently.";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileSplitByPartitionTool
+ // ---------------------------------------------------------------------------
+ public static final String DELETE_UNCOMPLETED_FILE = "delete uncomplated file {}";
+ public static final String CREATE_TSFILE_FAILED_EXISTS =
+ "Create new TsFile {} failed because it exists";
+ public static final String CREATE_TSFILE_FAILED = "Create new TsFile {} failed ";
+ public static final String INCORRECT_MAGIC_STRING =
+ "the file's MAGIC STRING is incorrect, file path: {}";
+ public static final String INCORRECT_VERSION_NUMBER =
+ "the file's Version Number is incorrect, file path: {}";
+ public static final String FILE_NOT_CLOSED_CORRECTLY =
+ "the file is not closed correctly, file path: {}";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileSelfCheckTool
+ // ---------------------------------------------------------------------------
+ public static final String ERROR_GETTING_TIMESERIES_METADATA =
+ "Error occurred while getting all TimeseriesMetadata with offset in TsFile.";
+ public static final String FILE_PATH = "file path: {}";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileValidationTool
+ // ---------------------------------------------------------------------------
+ public static final String NOT_DIRECTORY_OR_NOT_EXIST =
+ "{} is not a directory or does not exist, skip it.";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileValidationScan / TsFileStatisticScan
+ // ---------------------------------------------------------------------------
+ public static final String MEET_ERRORS_READING_FILE =
+ "Meet errors in reading file {} , skip it.";
+ public static final String MEET_ERROR = "meet error.";
+
+ // ---------------------------------------------------------------------------
+ // tools – MLogParser / PBTreeFileSketchTool
+ // ---------------------------------------------------------------------------
+ public static final String TOO_FEW_PARAMS =
+ "Too few params input, please check the following hint.";
+ public static final String PARSE_ERROR = "Parse error: {}";
+ public static final String ENCOUNTER_ERROR = "Encounter an error, because: {} ";
+ public static final String USE_HELP = "Use -help for more information";
+
+ // ---------------------------------------------------------------------------
+ // tools – SchemaRegionSnapshotParser
+ // ---------------------------------------------------------------------------
+ public static final String IOEXCEPTION_GET_FOLDER =
+ "ioexception when get {}'s folder";
+
+ // ---------------------------------------------------------------------------
+ // tools – SRStatementGenerator
+ // ---------------------------------------------------------------------------
+ public static final String ERROR_PARSER_TAG_ATTRIBUTES =
+ "Error when parser tag and attributes files";
+ public static final String MEASUREMENT_ATTRIBUTES_NO_SNAPSHOT =
+ "Measurement has set attributes or tags, but not find snapshot files";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileAndModSettleTool
+ // ---------------------------------------------------------------------------
+ public static final String CANNOT_FIND_TSFILE = "Cannot find TsFile : {}";
+ public static final String NOT_DIRECTORY_PATH = "It's not a directory path : {}";
+ public static final String CANNOT_FIND_DIRECTORY = "Cannot find Directory : {}";
+ public static final String START_SETTLING_TSFILE =
+ "Start settling for tsFile : {}";
+ public static final String FINISH_SETTLING_ALL =
+ "Finish settling all tsfiles Successfully!";
+ public static final String FAIL_SERIALIZE_TSFILE_RESOURCE =
+ "fail to serialize new tsfile resource.";
+ public static final String FAILED_DELETE_SETTLE_LOG =
+ "failed to delete settle log, log path:{}";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileSettleByCompactionTool
+ // ---------------------------------------------------------------------------
+ public static final String PARSE_COMMAND_LINE_FAILED =
+ "Parse command line args failed: {}";
+ public static final String ADD_SETTLE_COMPACTION_TASK_SUCCESS =
+ "Add Settle Compaction Task Successfully";
+ public static final String ADD_SETTLE_COMPACTION_TASK_FAILED =
+ "Add settle compaction task failed with status code: {}";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileResourcePipeStatisticsSetTool
+ // ---------------------------------------------------------------------------
+ public static final String UNKNOWN_ARGUMENT = "Unknown argument: {}";
+ public static final String NO_DATA_DIRS_PROVIDED =
+ "No data directories provided. Please specify with --dirs ...";
+ public static final String VALIDATION_REPAIR_COMPLETED =
+ "Validation and repair completed. Statistics:";
+ public static final String SEPARATOR_LINE = "------------------------------------------------------";
+ public static final String IS_GENERATED_BY_PIPE_MARK = "isGeneratedByPipe mark: {}";
+ public static final String RESET_PROGRESS_INDEX = "resetProgressIndex: {}";
+ public static final String DATA_DIRECTORIES = "Data directories: ";
+ public static final String INDENT_PATH = " {}";
+ public static final String ERROR_VALIDATING_REPAIRING_RESOURCE = "Error validating or repairing resource {}: {}";
+ public static final String ERROR_LOADING_RESOURCES_FROM_PARTITION = "Error loading resources from partition {}: {}";
+ public static final String TIME_PARTITION_PROCESS_COMPLETED = "TimePartition {} has {} total resources, {} to set isGeneratedByPipe resources, {} to reset progressIndex resources, {} changed resources. Process completed.";
+ public static final String SKIPPED_RESOURCE_FILE_NOT_EXIST = "{} is skipped because resource file is not exist.";
+ public static final String REPAIRING_TSFILE_RESOURCE = "Repairing TsFileResource: {}, isGeneratedByPipe mark: {}, actual mark: {}";
+ public static final String RESETTING_PROGRESS_INDEX_TO_MINIMUM = "Resetting TsFileResource:{} 's progressIndex to minimum, original progressIndex: {}";
+ public static final String MARKED_TSFILE_RESOURCE_AS = "Marked TsFileResource as {} in resource: {}";
+ public static final String RESET_PROGRESS_INDEX_TO_MINIMUM = "Reset TsFileResource:{} 's progressIndex to minimum.";
+ public static final String FAILED_TO_REPAIR_TSFILE_RESOURCE = "ERROR: Failed to repair TsFileResource: {}";
+ public static final String TOTAL_TIME_TAKEN = "Total time taken: {} ms, total TsFile resources: {}, set isGeneratedByPipe resources: {}, reset progressIndex resources: {}, changed resources: {}";
+
+ // ---------------------------------------------------------------------------
+ // tools – DelayAnalyzer
+ // ---------------------------------------------------------------------------
+ public static final String DELAY_ANALYZER_RESET = "[DelayAnalyzer] DelayAnalyzer has been reset";
+
+ // ---------------------------------------------------------------------------
+ // utils – DataNodeObjectFileService
+ // ---------------------------------------------------------------------------
+ public static final String FAILED_REMOVE_OBJECT_FILE =
+ "Failed to remove object file {}";
+ public static final String FAILED_REMOVE_EMPTY_OBJECT_DIR =
+ "Failed to remove empty object dir {}";
+ public static final String REMOVE_OBJECT_FILE =
+ "Remove object file {}, size is {}(byte)";
+
+ // ---------------------------------------------------------------------------
+ // utils – OpenFileNumUtil
+ // ---------------------------------------------------------------------------
+ public static final String CANNOT_GET_PID =
+ "Cannot get PID of IoTDB process because ";
+ public static final String UNSUPPORTED_OS_GET_PID =
+ "Unsupported OS {} for OpenFileNumUtil to get the PID of IoTDB.";
+ public static final String CANNOT_GET_OPEN_FILE_NUMBER =
+ "Cannot get open file number of IoTDB process because ";
+
+ // ---------------------------------------------------------------------------
+ // utils – MemUtils
+ // ---------------------------------------------------------------------------
+ public static final String UNSUPPORTED_DATA_POINT_TYPE = "Unsupported data point type";
+
+ // ---------------------------------------------------------------------------
+ // utils – ErrorHandlingUtils
+ // ---------------------------------------------------------------------------
+ public static final String ERROR_OPERATION_LOG =
+ "Status code: {}, operation: {} failed";
+
+ // ---------------------------------------------------------------------------
+ // utils – CommonUtils
+ // ---------------------------------------------------------------------------
+ public static final String INPUT_FLOAT_INFINITY = "The input float value is Infinity";
+ public static final String INPUT_DOUBLE_INFINITY = "The input double value is Infinity";
+ public static final String BOOLEAN_PARSE_ERROR =
+ "The BOOLEAN should be true/TRUE, false/FALSE or 0/1";
+ public static final String UNSUPPORTED_DATA_TYPE_FMT = "Unsupported data type:%s";
+ public static final String UNSUPPORTED_DATA_TYPE = "Unsupported data type: ";
+ public static final String AGGREGATE_FUNCTION_NAME_NULL =
+ "AggregateFunction Name must not be null";
+ public static final String INVALID_AGGREGATION_FUNCTION =
+ "Invalid Aggregation function: ";
+ public static final String INVALID_AGGREGATION_FUNCTION_FMT =
+ "Invalid Aggregation function: %s";
+ public static final String SCALAR_FUNCTION_NAME_NULL =
+ "ScalarFunction Name must not be null.";
+ public static final String DELETE_CURSOR_SIZE_ERROR =
+ "deleteCursor should be an array whose size is 1";
+
+ // ---------------------------------------------------------------------------
+ // utils – ThreadUtils
+ // ---------------------------------------------------------------------------
+ public static final String WAITING_TERMINATED_TIMEOUT =
+ "Waiting {} to be terminated is timeout";
+ public static final String POOL_NOT_EXIT_AFTER_TIMEOUT =
+ "{} still doesn't exit after 60s";
+
+ // ---------------------------------------------------------------------------
+ // utils – WindowEvaluationTaskPoolManager
+ // ---------------------------------------------------------------------------
+ public static final String WINDOW_EVAL_POOL_INIT =
+ "WindowEvaluationTaskPoolManager is initializing, thread number: {}";
+
+ // ---------------------------------------------------------------------------
+ // utils – LogWriter
+ // ---------------------------------------------------------------------------
+ public static final String INTERRUPTED_NO_WRITE =
+ "someone interrupt current thread, so no need to do write for io safety";
+
+ // ---------------------------------------------------------------------------
+ // conf – IoTDBStartCheck
+ // ---------------------------------------------------------------------------
+ public static final String STARTING_IOTDB = "Starting IoTDB {}";
+ public static final String CANNOT_CREATE_SCHEMA_DIR = "Can not create schema dir: {}";
+ public static final String SCHEMA_DIR_CREATED = " {} dir has been created.";
+ public static final String IOTDB_VERSION_TOO_OLD = "IoTDB version is too old";
+ public static final String REPAIR_SYSTEM_PROPERTIES = "repair system.properties, lack {}";
+ public static final String UNEXPECTED_CONSENSUS_GROUP_TYPE =
+ "Unexpected consensus group type";
+ public static final String ENCRYPT_MAGIC_STRING_NOT_MATCHED =
+ "encrypt_magic_string is not matched";
+
+ // ---------------------------------------------------------------------------
+ // conf – IoTDBDescriptor
+ // ---------------------------------------------------------------------------
+ public static final String FAILED_UPDATE_CONFIG_FILE = "Failed to update config file";
+ public static final String WILL_RELOAD_PROPERTIES = "Will reload properties from {} ";
+ public static final String GET_URL_FAILED = "get url failed";
+ public static final String START_READ_CONFIG_FILE = "Start to read config file {}";
+ public static final String FAIL_FIND_CONFIG_FILE =
+ "Fail to find config file {}, reject DataNode startup.";
+ public static final String CANNOT_LOAD_CONFIG_FILE =
+ "Cannot load config file, reject DataNode startup.";
+ public static final String INCORRECT_FORMAT_CONFIG_FILE =
+ "Incorrect format in config file, reject DataNode startup.";
+ public static final String COULD_NOT_LOAD_CONFIG =
+ "Couldn't load the configuration from any of the known sources.";
+ public static final String START_RELOAD_CONFIG_FILE = "Start to reload config file {}";
+ public static final String FAIL_RELOAD_CONFIG_FILE = "Fail to reload config file {}";
+ public static final String RELOAD_METRIC_SERVICE = "Reload metric service in level {}";
+ public static final String PAGE_SIZE_GREATER_THAN_GROUP_SIZE =
+ "page_size is greater than group size, will set it as the same with group size";
+ public static final String MQTT_HOST_NOT_CONFIGURED =
+ "MQTT host is not configured, will use dn_rpc_address.";
+ public static final String FAILED_PARSE_TRUSTED_URI =
+ "Failed to parse trusted_uri_pattern {}";
+ public static final String FAILED_GET_FILE_SIZE = "Failed to get file size of {}, because";
+ public static final String SET_DELAY_ANALYZER_WINDOW_SIZE =
+ "[DelayAnalyzer] Set delay_analyzer_window_size to {}";
+ public static final String FAIL_RELOAD_CONFIGURATION_FMT =
+ "Fail to reload configuration because %s";
+
+ // ---------------------------------------------------------------------------
+ // conf – IoTDBConfig
+ // ---------------------------------------------------------------------------
+ public static final String FAIL_GET_CANONICAL_PATH = "Fail to get canonical path of {}";
+ public static final String NO_DATA_DIR_SET =
+ "No data directory is set. loadTsFileDirs is kept as the default value.";
+ public static final String FAILED_GET_FIELD = "Failed to get field {}";
+ public static final String SKIP_FAILED_TABLE_SCHEMA_CHECK =
+ "skipFailedTableSchemaCheck is set to {}.";
+ public static final String DIR_REMOVED_FROM_DATA_DIRS =
+ "%s is removed from data_dirs parameter, please add it back.";
+
+ // ---------------------------------------------------------------------------
+ // conf – DataNodeMemoryConfig
+ // ---------------------------------------------------------------------------
+ public static final String FAIL_RELOAD_MEMORY_CONFIG_FMT =
+ "Fail to reload configuration because %s";
+
+ // ---------------------------------------------------------------------------
+ // conf – DataNodeStartupCheck
+ // ---------------------------------------------------------------------------
+ public static final String PORTS_HAVE_REPEAT =
+ "ports used in datanode have repeat.";
+
+ // ---------------------------------------------------------------------------
+ // conf – REST service
+ // ---------------------------------------------------------------------------
+ public static final String REST_COULD_NOT_LOAD_CONFIG =
+ "Couldn't load the REST Service configuration from any of the known sources.";
+ public static final String REST_START_READ_CONFIG = "Start to read config file {}";
+ public static final String REST_FAIL_FIND_CONFIG =
+ "REST service fail to find config file {}";
+ public static final String REST_CANNOT_LOAD_CONFIG =
+ "REST service cannot load config file, use default configuration";
+ public static final String REST_INCORRECT_FORMAT =
+ "REST service Incorrect format in config file, use default configuration";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionBroker
+ // ---------------------------------------------------------------------------
+ public static final String SUBSCRIPTION_PREFETCHING_QUEUE_STATE =
+ "Subscription: SubscriptionPrefetchingQueue state {}";
+ public static final String SUBSCRIPTION_UNEXPECTED_EXCEPTION =
+ "Subscription: unexpected exception (broken invariant) {}";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionReceiverV1
+ // ---------------------------------------------------------------------------
+ public static final String SUBSCRIPTION_UNKNOWN_REQUEST_TYPE =
+ "Subscription: Unknown PipeSubscribeRequestType, response status = {}.";
+ public static final String SUBSCRIPTION_CONSUMER_HEARTBEAT_SUCCESS =
+ "Subscription: consumer {} heartbeat successfully";
+ public static final String SUBSCRIPTION_CONSUMER_SUBSCRIBE_SUCCESS =
+ "Subscription: consumer {} subscribe {} successfully";
+ public static final String SUBSCRIPTION_CONSUMER_CLOSE_SUCCESS =
+ "Subscription: consumer {} close successfully";
+ public static final String SUBSCRIPTION_EXCEPTION_HANDSHAKING =
+ "Exception occurred when handshaking with request {}";
+ public static final String SUBSCRIPTION_EXCEPTION_HEARTBEAT =
+ "Exception occurred when heartbeat with request {}";
+ public static final String SUBSCRIPTION_EXCEPTION_SUBSCRIBING =
+ "Exception occurred when subscribing with request {}";
+ public static final String SUBSCRIPTION_EXCEPTION_UNSUBSCRIBING =
+ "Exception occurred when unsubscribing with request {}";
+ public static final String SUBSCRIPTION_EXCEPTION_POLLING =
+ "Exception occurred when polling with request {}";
+ public static final String SUBSCRIPTION_EXCEPTION_COMMITTING =
+ "Exception occurred when committing with request {}";
+ public static final String SUBSCRIPTION_EXCEPTION_CLOSING =
+ "Exception occurred when closing with request {}";
+ public static final String SUBSCRIPTION_EXCEPTION_CREATING_CONSUMER =
+ "Exception occurred when creating consumer {} in config node";
+ public static final String SUBSCRIPTION_EXCEPTION_CLOSING_CONSUMER =
+ "Exception occurred when closing consumer {} in config node";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionBrokerAgent
+ // ---------------------------------------------------------------------------
+ public static final String SUBSCRIPTION_CREATE_BROKER =
+ "Subscription: create broker bound to consumer group [{}]";
+ public static final String SUBSCRIPTION_DROP_BROKER =
+ "Subscription: drop broker bound to consumer group [{}]";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionConsumerAgent
+ // ---------------------------------------------------------------------------
+ public static final String EXCEPTION_DROPPING_CONSUMER_GROUP =
+ "Exception occurred when dropping consumer group {}";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionTopicAgent
+ // ---------------------------------------------------------------------------
+ public static final String EXCEPTION_DROPPING_TOPIC =
+ "Exception occurred when dropping topic {}";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionEvent
+ // ---------------------------------------------------------------------------
+ public static final String EVENT_NACKED_TIMES = "{} has been nacked {} times";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionPollResponseCache
+ // ---------------------------------------------------------------------------
+ public static final String NULL_RESPONSE_INVALIDATING =
+ "null response when invalidating, skip it";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionEventTsFileResponse
+ // ---------------------------------------------------------------------------
+ public static final String UNEXPECTED_RESPONSE_TYPE = "unexpected response type: {}";
+ public static final String UNEXPECTED_MESSAGE_TYPE = "unexpected message type: {}";
+ public static final String UNEXPECTED_RESPONSE_TYPE_FMT = "unexpected response type: %s";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionPipeEventBatches
+ // ---------------------------------------------------------------------------
+ public static final String EXCEPTION_SEALING_EVENTS =
+ "Exception occurred when sealing events from batch {}";
+ public static final String EXCEPTION_CONSTRUCT_NEW_BATCH =
+ "Exception occurred when construct new batch";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionPrefetchingQueue
+ // ---------------------------------------------------------------------------
+ public static final String EXCEPTION_EXECUTE_RECEIVER_SUBTASK =
+ "Exception {} occurred when {} execute receiver subtask";
+ public static final String EXCEPTION_CONSTRUCT_TABLET_ITERATOR =
+ "Exception {} occurred when {} construct ToTabletIterator";
+
+ // ---------------------------------------------------------------------------
+ // consensus – BaseStateMachine
+ // ---------------------------------------------------------------------------
+ public static final String UNEXPECTED_CONSENSUS_REQUEST =
+ "Unexpected IConsensusRequest : {}";
+ public static final String UNEXPECTED_CONSENSUS_REQUEST_EXCEPTION =
+ "Unexpected IConsensusRequest!";
+
+ // ---------------------------------------------------------------------------
+ // consensus – SchemaExecutionVisitor
+ // ---------------------------------------------------------------------------
+ public static final String IO_ERROR = "{}: IO error: ";
+ public static final String OPENED_PIPE_LISTENING_QUEUE =
+ "Opened pipe listening queue on schema region {}";
+ public static final String CLOSED_PIPE_LISTENING_QUEUE =
+ "Closed pipe listening queue on schema region {}";
+
+ // ---------------------------------------------------------------------------
+ // consensus – SchemaRegionStateMachine
+ // ---------------------------------------------------------------------------
+ public static final String FAIL_LOAD_SNAPSHOT = "Fail to load snapshot from {}";
+
+ // ---------------------------------------------------------------------------
+ // consensus – DataExecutionVisitor
+ // ---------------------------------------------------------------------------
+ public static final String ERROR_EXECUTING_PLAN_NODE =
+ "Error in executing plan node: {}";
+ public static final String ERROR_EXECUTING_PLAN_NODE_CAUSED =
+ "Error in executing plan node: {}, caused by {}";
+ public static final String REJECT_EXECUTING_PLAN_NODE =
+ "Reject in executing plan node: {}, caused by {}";
+ public static final String BATCH_FAILURE_INSERT_ROWS =
+ "Batch failure in executing a InsertRowsNode.";
+ public static final String BATCH_FAILURE_INSERT_MULTI_TABLETS =
+ "Batch failure in executing a InsertMultiTabletsNode.";
+ public static final String BATCH_FAILURE_INSERT_ROWS_ONE_DEVICE =
+ "Batch failure in executing a InsertRowsOfOneDeviceNode.";
+
+ // ---------------------------------------------------------------------------
+ // consensus – DataRegionStateMachine
+ // ---------------------------------------------------------------------------
+ public static final String EXCEPTION_REPLACING_DATA_REGION =
+ "Exception occurs when replacing data region in storage engine.";
+ public static final String UNEXPECTED_PLAN_NODE_TYPE =
+ "Unexpected PlanNode type {}, which is not SearchNode";
+ public static final String TABLE_NOT_EXISTS_OR_LOST =
+ "table is not exists or lost, result code is {}";
+ public static final String GET_FRAGMENT_INSTANCE_FAILED = "Get fragment instance failed";
+ public static final String CANNOT_GET_CANONICAL_FILE =
+ "{}: cannot get the canonical file of {} due to {}";
+
+ // ---------------------------------------------------------------------------
+ // auth – LoginLockManager
+ // ---------------------------------------------------------------------------
+ public static final String IP_LOGIN_ATTEMPTS_DISABLED =
+ "IP-level login attempts disabled (set to {})";
+ public static final String USER_LOGIN_ATTEMPTS_DISABLED =
+ "User-level login attempts disabled (set to {})";
+ public static final String IP_LOCKED = "IP '{}' locked for user ID '{}'";
+ public static final String USER_UNLOCKED_MANUAL = "User ID '{}' unlocked (manual)";
+ public static final String IP_UNLOCKED_MANUAL =
+ "IP '{}' for user ID '{}' unlocked (manual)";
+ public static final String USER_UNLOCKED_EXPIRED = "User ID '{}' unlocked (expired)";
+ public static final String IP_UNLOCKED_EXPIRED =
+ "IP '{}' for user ID '{}' unlocked (expired)";
+ public static final String IP_LOCKED_MULTIPLE_USERS =
+ "IP '{}' locked by {} different users → potential attack";
+ public static final String USER_MULTIPLE_IP_LOCKS =
+ "User ID '{}' has {} IP locks → potential attack";
+ public static final String FAILED_CHECK_IP_UP =
+ "Failed to check if IP address={} is up";
+
+ // ---------------------------------------------------------------------------
+ // auth – ClusterAuthorityFetcher
+ // ---------------------------------------------------------------------------
+ public static final String CACHE_USER_PATH_PRIVILEGES_ERROR =
+ "cache user's path privileges error";
+ public static final String CACHE_ROLE_PATH_PRIVILEGES_ERROR =
+ "cache role's path privileges error";
+
+ // ---------------------------------------------------------------------------
+ // auth – BasicAuthorityCache
+ // ---------------------------------------------------------------------------
+ public static final String DATANODE_CACHE_INIT_FAILED =
+ "datanode cache initialization failed";
+
+ // ---------------------------------------------------------------------------
+ // trigger – TriggerExecutor
+ // ---------------------------------------------------------------------------
+ public static final String TRIGGER_FIRE_ERROR =
+ "Error occurred when firing trigger, trigger: {}, cause: {}";
+
+ // ---------------------------------------------------------------------------
+ // trigger – TriggerInformationUpdater
+ // ---------------------------------------------------------------------------
+ public static final String TRIGGER_INFO_UPDATER_STARTED =
+ "Stateful-Trigger-Information-Updater is successfully started.";
+ public static final String TRIGGER_INFO_UPDATER_STOPPED =
+ "Stateful-Trigger-Information-Updater is successfully stopped.";
+ public static final String ERROR_UPDATING_TRIGGER_INFO =
+ "Meet error when updating trigger information:";
+
+ // ---------------------------------------------------------------------------
+ // trigger – TriggerFireVisitor
+ // ---------------------------------------------------------------------------
+ public static final String TRIGGER_INTERRUPTED_SLEEP =
+ "{} interrupted when sleep";
+
+ // ---------------------------------------------------------------------------
+ // trigger – TriggerClassLoaderManager / TriggerClassLoader
+ // ---------------------------------------------------------------------------
+ public static final String TRIGGER_LIB_ROOT = "Trigger lib root: {}";
+
+ // ---------------------------------------------------------------------------
+ // trigger – TriggerManagementService
+ // ---------------------------------------------------------------------------
+ public static final String ERROR_READING_MD5 =
+ "Error occurred when trying to read md5 of {}";
+
+ // ---------------------------------------------------------------------------
+ // partition – DataPartitionTableGenerator
+ // ---------------------------------------------------------------------------
+ public static final String TASK_ALREADY_STARTED =
+ "Task is already started or completed";
+
+ public static final String FROM_CONFIG_NODE = "' from config node.'";
+ public static final String IS_NOT_SUPPORTED = "' is not supported'";
+ public static final String CANNOT_SSL_HANDSHAKE_WITH_CN_LEADER = "Cannot SSL Handshake with ConfigNode-leader.";
+ public static final String CANNOT_CONNECT_TO_CN_LEADER = "Cannot connect to ConfigNode-leader.";
+ public static final String CAPACITY_LARGER_THAN_INITIAL_PERMITS = "Capacity should be larger than initial permits.";
+ public static final String CURRENT_TV_LIST_NOT_SORTED = "Current TVList is not sorted";
+ public static final String DN_CLIENT_NOT_SUPPORT_ADD_CONSENSUS_GROUP = "DataNode to ConfigNode client doesn't support addConsensusGroup.";
+ public static final String DN_CLIENT_NOT_SUPPORT_GET_HEARTBEAT = "DataNode to ConfigNode client doesn't support getConfigNodeHeartBeat.";
+ public static final String DN_CLIENT_NOT_SUPPORT_NOTIFY_REGISTER = "DataNode to ConfigNode client doesn't support notifyRegisterSuccess.";
+ public static final String DN_CLIENT_NOT_SUPPORT_REGISTER_CN = "DataNode to ConfigNode client doesn't support registerConfigNode.";
+ public static final String DN_CLIENT_NOT_SUPPORT_REMOVE_CONSENSUS_GROUP = "DataNode to ConfigNode client doesn't support removeConsensusGroup.";
+ public static final String DN_CLIENT_NOT_SUPPORT_REPORT_SHUTDOWN = "DataNode to ConfigNode client doesn't support reportConfigNodeShutdown.";
+ public static final String DN_CLIENT_NOT_SUPPORT_SET_STATUS = "DataNode to ConfigNode client doesn't support setDataNodeStatus.";
+ public static final String DN_CLIENT_NOT_SUPPORT_STOP_AND_CLEAR = "DataNode to ConfigNode client doesn't support stopAndClearConfigNode.";
+ public static final String ERROR_OCCURRED_DURING_CREATING_DIR = "Error occurred during creating directory ";
+ public static final String EXPECTING_NON_EMPTY_STRING_FOR = "Expecting a non-empty string for ";
+ public static final String FAILED_TO_CONSTRUCT_PIPE_SINK = "Failed to construct PipeSink, because of ";
+ public static final String FAILED_TO_GET_UDF_JAR = "Failed to get UDF jar from config node.";
+ public static final String FAILED_TO_GET_CONSUMER_GROUP_META = "Failed to get consumer group meta from config node.";
+ public static final String FAILED_TO_GET_TOPIC_META = "Failed to get topic meta from config node.";
+ public static final String FAILED_TO_GET_TRIGGER_JAR = "Failed to get trigger jar from config node.";
+ public static final String FETCH_SCHEMA_FAILED = "Fetch Schema failed. ";
+ public static final String INDEX_BELOW_START_POSITION = "Index below startPosition: ";
+ public static final String INDEX_EXCEEDS_END_POSITION = "Index exceeds endPosition: ";
+ public static final String INDEX_OUT_OF_BOUND_ERROR = "Index out of bound error!";
+ public static final String INVALID_PUSH_MULTI_PIPE_META_REQ = "Invalid TPushMultiPipeMetaReq";
+ public static final String INVALID_PUSH_MULTI_TOPIC_META_REQ = "Invalid TPushMultiTopicMetaReq";
+ public static final String INVALID_PUSH_SINGLE_PIPE_META_REQ = "Invalid TPushSinglePipeMetaReq";
+ public static final String INVALID_PARAM = "Invalid param";
+ public static final String INVALID_PARAMETERS_CHECK_USER_GUIDE = "Invalid parameters. Please check the user guide.";
+ public static final String INVALID_REQUEST = "Invalid request ";
+ public static final String PREPARED_STMT_NOT_SUPPORTED_FOR_TREE = "PreparedStatement is not supported for Tree model";
+ public static final String FILE_LENGTH_LARGER_THAN_MAX = "The file length is larger than max_object_file_size_in_bytes";
+ public static final String UNKNOWN_CONSENSUS_GROUP_TYPE = "Unknown consensus group type: ";
+ public static final String UNKNOWN_DATA_TYPE = "Unknown data type: ";
+ public static final String UNKNOWN_PARAMETER_TYPE = "Unknown parameter type: ";
+ public static final String UNKNOWN_SQL_DIALECT = "Unknown sql_dialect: ";
+ public static final String UNRECOGNIZED_MNODE_TYPE = "Unrecognized MNode type";
+ public static final String UNRECOGNIZED_DATATYPE = "Unrecognized datatype: ";
+ public static final String UNSUPPORTED_COLUMN_GENERATOR_TYPE = "Unsupported ColumnGeneratorType: ";
+ public static final String UNSUPPORTED_TRIGGER_FIRE_RESULT_TYPE = "Unsupported TriggerFireResult Type";
+ public static final String UTILITY_CLASS = "Utility class";
+ public static final String APPEND_SIZE_MUST_BE_POSITIVE = "appendSize must be positive";
+ public static final String BLOCKS_SHOULD_NEVER_BE_ZERO = "blocks should never be zero.";
+ public static final String END_INDEX_MUST_BE_GE_START_INDEX = "endIndex must be >= startIndex";
+ public static final String ERROR_CODE = "error code: ";
+ public static final String NULL_RESPONSE_WHEN_SERIALIZING = "null response when serializing";
+ public static final String OBJECT_STORAGE_NOT_SUPPORTED_YET = "object storage is not supported yet";
+ public static final String REGISTERED_TASK_COUNT_LT_ZERO = "registeredTaskCount < 0";
+ public static final String REGISTERED_TASK_COUNT_LE_ZERO = "registeredTaskCount <= 0";
+ public static final String REQUEST_TYPE_NOT_SUPPORTED = "request type is not supported: ";
+ public static final String UNEXPECTED_REQUEST_TYPE = "unexpected request type: %s";
+}
diff --git a/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodePipeMessages.java b/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodePipeMessages.java
new file mode 100644
index 0000000000000..b506d832b4e42
--- /dev/null
+++ b/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodePipeMessages.java
@@ -0,0 +1,1372 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.i18n;
+
+public final class DataNodePipeMessages {
+
+ // ===================== CONSENSUS =====================
+
+ public static final String CLOSING_DELETION_RESOURCE_MANAGER_FOR =
+ "Closing deletion resource manager for {}...";
+ public static final String DAL_THREAD_STILL_DOESN_T_EXIT_AFTER =
+ "DAL Thread {} still doesn't exit after 30s";
+ public static final String DELETIONMANAGER_CURRENT_DAL_DIR_IS_DELETED_SUCCESSFULLY =
+ "DeletionManager-{}: current DAL dir {} is deleted successfully";
+ public static final String DELETIONMANAGER_CURRENT_DAL_DIR_IS_NOT_INITIALIZED =
+ "DeletionManager-{}: current DAL dir {} is not initialized, no need to delete.";
+ public static final String DELETIONMANAGER_CURRENT_WAITING_IS_INTERRUPTED_MAY_BECAUSE =
+ "DeletionManager-{}: current waiting is interrupted. May because current application is "
+ + "down. ";
+ public static final String DELETIONMANAGER_DELETE_DELETION_FILE_IN_DIR =
+ "DeletionManager-{} delete deletion file in {} dir...";
+ public static final String DELETIONMANAGER_FAILED_TO_DELETE_FILE_IN_DIR =
+ "DeletionManager-{} failed to delete file in {} dir, please manually check!";
+ public static final String DELETIONRESOURCE_HAS_BEEN_RELEASED_TRIGGER_A_REMOVE =
+ "DeletionResource {} has been released, trigger a remove of DAL...";
+ public static final String DELETION_PERSIST_CANNOT_CREATE_FILE_PLEASE_CHECK =
+ "Deletion persist: Cannot create file {}, please check your file system manually.";
+ public static final String DELETION_PERSIST_CANNOT_WRITE_TO_MAY_CAUSE =
+ "Deletion persist: Cannot write to {}, may cause data inconsistency.";
+ public static final String DELETION_PERSIST_CURRENT_BATCH_FSYNC_DUE_TO =
+ "Deletion persist-{}: current batch fsync due to timeout";
+ public static final String DELETION_PERSIST_CURRENT_FILE_HAS_BEEN_CLOSED =
+ "Deletion persist-{}: current file has been closed";
+ public static final String DELETION_PERSIST_SERIALIZE_DELETION_RESOURCE =
+ "Deletion persist-{}: serialize deletion resource {}";
+ public static final String DELETION_PERSIST_STARTING_TO_PERSIST_CURRENT_WRITING =
+ "Deletion persist-{}: starting to persist, current writing: {}";
+ public static final String DELETION_PERSIST_SWITCHING_TO_A_NEW_FILE =
+ "Deletion persist-{}: switching to a new file, current writing: {}";
+ public static final String DELETION_RESOURCE_MANAGER_FOR_HAS_BEEN_SUCCESSFULLY =
+ "Deletion resource manager for {} has been successfully closed!";
+ public static final String DETECT_FILE_CORRUPTED_WHEN_RECOVER_DAL_DISCARD =
+ "Detect file corrupted when recover DAL-{}, discard all subsequent DALs...";
+ public static final String FAILED_TO_INITIALIZE_DELETIONRESOURCEMANAGER =
+ "Failed to initialize DeletionResourceManager";
+ public static final String FAILED_TO_READ_DELETION_FILE_MAY_BECAUSE =
+ "Failed to read deletion file {}, may because this file corrupted when writing it.";
+ public static final String FAILED_TO_RECOVER_DELETIONRESOURCEMANAGER =
+ "Failed to recover DeletionResourceManager";
+ public static final String FAIL_TO_ALLOCATE_DELETIONBUFFER_GROUP_S_BUFFER =
+ "Fail to allocate deletionBuffer-group-{}'s buffer because out of memory.";
+ public static final String FAIL_TO_CLOSE_CURRENT_LOGGING_FILE_WHEN =
+ "Fail to close current logging file when closing";
+ public static final String FAIL_TO_REGISTER_DELETIONRESOURCE_INTO_DELETIONBUFFER_BECAUSE =
+ "Fail to register DeletionResource into deletionBuffer-{} because this buffer is closed.";
+ public static final String INTERRUPTED_WHEN_WAITING_FOR_ALL_DELETIONS_FLUSHED =
+ "Interrupted when waiting for all deletions flushed.";
+ public static final String INTERRUPTED_WHEN_WAITING_FOR_RESULT =
+ "Interrupted when waiting for result.";
+ public static final String INTERRUPTED_WHEN_WAITING_FOR_TAKING_DELETIONRESOURCE_FROM =
+ "Interrupted when waiting for taking DeletionResource from blocking queue to serialize.";
+ public static final String INTERRUPTED_WHEN_WAITING_FOR_TAKING_WALENTRY_FROM =
+ "Interrupted when waiting for taking WALEntry from blocking queue to serialize.";
+ public static final String INVALID_DELETION_PROGRESS_INDEX = "Invalid deletion progress index: ";
+ public static final String PERSISTTHREAD_DID_NOT_TERMINATE_WITHIN_S =
+ "persistThread did not terminate within {}s";
+ public static final String READ_DELETION_FILE_MAGIC_VERSION =
+ "Read deletion file-{} magic version: {}";
+ public static final String READ_DELETION_FROM_FILE = "Read deletion: {} from file {}";
+ public static final String UNABLE_TO_CREATE_IOTCONSENSUSV2_DELETION_DIR_AT =
+ "Unable to create iotConsensusV2 deletion dir at {}";
+
+ // ===================== AGENT =====================
+
+ public static final String ATTEMPT_TO_REPORT_PIPE_EXCEPTION_TO_A =
+ "Attempt to report pipe exception to a null PipeTaskMeta.";
+ public static final String CANNOT_PARSE_REBOOT_TIMES_FROM_FILE_SET =
+ "Cannot parse reboot times from file {}, set the current time in seconds ({}) as the "
+ + "reboot times";
+ public static final String CANNOT_RECORD_REBOOT_TIMES_TO_FILE_THE =
+ "Cannot record reboot times {} to file {}, the reboot times will not be updated";
+ public static final String CANNOT_START_SIMPLEPROGRESSINDEXASSIGNER_BECAUSE_OF =
+ "Cannot start SimpleProgressIndexAssigner because of {}";
+ public static final String CREATE_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS =
+ "Create pipe DN task {} successfully within {} ms";
+ public static final String DEREGISTER_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT =
+ "Deregister subtask {}. runningTaskCount: {}, registeredTaskCount: {}";
+ public static final String DROP_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS =
+ "Drop pipe DN task {} successfully within {} ms";
+ public static final String ERROR_OCCURRED_WHEN_COLLECTING_EVENTS_FROM_PROCESSOR =
+ "Error occurred when collecting events from processor.";
+ public static final String EXCEPTION_IN_PIPE_EVENT_PROCESSING_IGNORED_BECAUSE =
+ "Exception in pipe event processing, ignored because pipe is dropped.{}";
+ public static final String EXCEPTION_OCCURRED_WHEN_CLOSING_PIPE_CONNECTOR_SUBTASK =
+ "Exception occurred when closing pipe connector subtask {}, root cause: {}";
+ public static final String EXCEPTION_OCCURRED_WHEN_CLOSING_PIPE_PROCESSOR_SUBTASK =
+ "Exception occurred when closing pipe processor subtask {}, root cause: {}";
+ public static final String EXCEPTION_OCCURS_WHEN_EXECUTING_PIPE_TASK =
+ "Exception occurs when executing pipe task: ";
+ public static final String FAILED_TO_CHECK_IF_PIPE_HAS_RELEASE =
+ "Failed to check if pipe has release region related resource with consensus group id: {}.";
+ public static final String FAILED_TO_CLEAR_CLOSE_THE_SCHEMA_REGION =
+ "Failed to clear/close the schema region listening queue, because {}. Will wait until "
+ + "success or the region's state machine is stopped.";
+ public static final String FAILED_TO_CLOSE_CONNECTOR_AFTER_FAILED_TO =
+ "Failed to close connector after failed to initialize connector. Ignore this exception.";
+ public static final String FAILED_TO_CLOSE_LISTENING_QUEUE_FOR_SCHEMAREGION =
+ "Failed to close listening queue for SchemaRegion ";
+ public static final String FAILED_TO_CLOSE_SOURCE_AFTER_FAILED_TO =
+ "Failed to close source after failed to initialize source. Ignore this exception.";
+ public static final String FAILED_TO_CONSTRUCT_PIPECONNECTOR_BECAUSE_OF =
+ "Failed to construct PipeConnector, because of ";
+ public static final String FAILED_TO_DECREASE_REFERENCE_COUNT_FOR_EVENT =
+ "Failed to decrease reference count for event {} in PipeRealtimePriorityBlockingQueue";
+ public static final String FAILED_TO_GET_PENDINGQUEUE_NO_SUCH_SUBTASK =
+ "Failed to get PendingQueue. No such subtask: ";
+ public static final String FAILED_TO_GET_PIPE_METAS_WILL_BE =
+ "Failed to get pipe metas, will be synced by configNode later...";
+ public static final String FAILED_TO_GET_PIPE_PLUGIN_JAR_FROM =
+ "Failed to get pipe plugin jar from config node.";
+ public static final String FAILED_TO_GET_PIPE_TASK_META_FROM =
+ "Failed to get pipe task meta from config node. Ignore the exception, because config "
+ + "node may not be ready yet, and meta will be pushed by config node later.";
+ public static final String FAILED_TO_PERSIST_PROGRESS_INDEX_TO_CONFIGNODE =
+ "Failed to persist progress index to configNode, status: {}";
+ public static final String FAILURE_WHEN_REGISTER_PIPE_PLUGIN_SKIP_THIS =
+ "Failure when register pipe plugin {}. Skip this plugin and continue startup.";
+ public static final String PIPECONNECTOR = "PipeConnector: ";
+ public static final String PIPEDATANODETASKBUILDER_FAILED_TO_PARSE_INCLUSION_AND_EXCLUSION =
+ "PipeDataNodeTaskBuilder failed to parse 'inclusion' and 'exclusion' parameters: {}";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_INCLUSION_CONTAINS_DATA_DELETE_REALTIME =
+ "PipeDataNodeTaskBuilder: When 'inclusion' contains 'data.delete', 'realtime-first' is "
+ + "defaulted to 'false' to prevent sync issues after deletion.";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_INCLUSION_INCLUDES_DATA_DELETE_REALTIME =
+ "PipeDataNodeTaskBuilder: When 'inclusion' includes 'data.delete', 'realtime-first' set "
+ + "to 'true' may result in data synchronization issues after deletion.";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_SOURCE_USES_SNAPSHOT_MODEL_REALTIME =
+ "PipeDataNodeTaskBuilder: When source uses snapshot model, 'realtime-first' is defaulted "
+ + "to 'false' to prevent premature halt before transfer completion.";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_SOURCE_USES_SNAPSHOT_MODEL_REALTIME_1 =
+ "PipeDataNodeTaskBuilder: When source uses snapshot model, 'realtime-first' set to "
+ + "'true' may cause prevent premature halt before transfer completion.";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_THE_REALTIME_SYNC_IS_ENABLED =
+ "PipeDataNodeTaskBuilder: When the realtime sync is enabled, not enabling the rate "
+ + "limiter in sending tsfile may introduce delay for realtime sending.";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_THE_REALTIME_SYNC_IS_ENABLED_1 =
+ "PipeDataNodeTaskBuilder: When the realtime sync is enabled, we enable rate limiter in "
+ + "sending tsfile by default to reserve disk and network IO for realtime sending.";
+ public static final String PIPEEVENTCOLLECTOR_THE_EVENT_IS_ALREADY_RELEASED_SKIPPING =
+ "PipeEventCollector: The event {} is already released, skipping it.";
+ public static final String PIPE_CONNECTOR_SUBTASK_WAS_CLOSED_WITHIN_MS =
+ "Pipe: connector subtask {} ({}) was closed within {} ms";
+ public static final String PIPE_META_NOT_FOUND = "Pipe meta not found: ";
+ public static final String PIPE_SINK_SUBTASKS_WITH_ATTRIBUTES_IS_BOUNDED =
+ "Pipe sink subtasks with attributes {} is bounded with sinkExecutor {} and "
+ + "callbackExecutor {}.";
+ public static final String PIPE_SKIPPING_TEMPORARY_TSFILE_WHICH_SHOULDN_T =
+ "Pipe skipping temporary TsFile which shouldn't be transferred: {}";
+ public static final String PULLED_PIPE_META_FROM_CONFIG_NODE_RECOVERING =
+ "Pulled pipe meta from config node: {}, recovering ...";
+ public static final String RECEIVED_PIPE_HEARTBEAT_REQUEST_FROM_CONFIG_NODE =
+ "Received pipe heartbeat request {} from config node.";
+ public static final String REGION_NO_TSFILEINSERTIONEVENTS_TO_REPLACE_FOR_SOURCE =
+ "Region {}: No TsFileInsertionEvents to replace for source files {}";
+ public static final String REGION_REPLACED_TSFILEINSERTIONEVENTS_WITH =
+ "Region {}: Replaced TsFileInsertionEvents {} with {}";
+ public static final String REGISTEREDTASKCOUNT_0 = "registeredTaskCount < 0";
+ public static final String REGISTEREDTASKCOUNT_0_1 = "registeredTaskCount <= 0";
+ public static final String REGISTER_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT =
+ "Register subtask {}. runningTaskCount: {}, registeredTaskCount: {}";
+ public static final String REPORT_PIPERUNTIMEEXCEPTION_TO_LOCAL_PIPETASKMETA_EXCEPTION_MESSAGE =
+ "Report PipeRuntimeException to local PipeTaskMeta({}), exception message: {}";
+ public static final String RUNNINGTASKCOUNT_0 = "runningTaskCount < 0";
+ public static final String RUNNINGTASKCOUNT_0_1 = "runningTaskCount <= 0";
+ public static final String SIMPLEPROGRESSINDEXASSIGNER_STARTED_SUCCESSFULLY_ISSIMPLECONSENSUSENABLE_R =
+ "SimpleProgressIndexAssigner started successfully. isSimpleConsensusEnable: {}, "
+ + "rebootTimes: {}";
+ public static final String STARTING_SIMPLEPROGRESSINDEXASSIGNER =
+ "Starting SimpleProgressIndexAssigner ...";
+ public static final String START_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS =
+ "Start pipe DN task {} successfully within {} ms";
+ public static final String START_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT =
+ "Start subtask {}. runningTaskCount: {}, registeredTaskCount: {}";
+ public static final String STOP_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS =
+ "Stop pipe DN task {} successfully within {} ms";
+ public static final String STOP_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT =
+ "Stop subtask {}. runningTaskCount: {}, registeredTaskCount: {}";
+ public static final String SUBTASK_IS_CLOSED_IGNORE_EXCEPTION =
+ "subtask {} is closed, ignore exception";
+ public static final String SUBTASK_WORKER_IS_INTERRUPTED = "subtask worker is interrupted";
+ public static final String SUCCESSFULLY_PERSISTED_ALL_PIPE_S_INFO_TO =
+ "Successfully persisted all pipe's info to configNode.";
+ public static final String THE_EXECUTOR_AND_HAS_BEEN_SUCCESSFULLY_SHUTDOWN =
+ "The executor {} and {} has been successfully shutdown.";
+
+ // ===================== EVENT =====================
+
+ public static final String DATABASENAMEFROMDATAREGION_IS_NULL =
+ "databaseNameFromDataRegion is null";
+ public static final String DECREASE_REFERENCE_COUNT_ERROR = "Decrease reference count error.";
+ public static final String DECREASE_REFERENCE_COUNT_FOR_MTREE_SNAPSHOT_OR =
+ "Decrease reference count for mTree snapshot {} or tLog {} or attribute snapshot {} error.";
+ public static final String DECREASE_REFERENCE_COUNT_FOR_TSFILE_ERROR =
+ "Decrease reference count for TsFile {} error.";
+ public static final String DO_NOT_HAS_A_COMPLETE_PAGE_BODY =
+ "do not has a complete page body. Expected:";
+ public static final String ERROR_WHILE_PARSING_TSFILE_INSERTION_EVENT =
+ "Error while parsing tsfile insertion event";
+ public static final String EXCEPTION_OCCURRED_WHEN_DETERMINING_THE_EVENT_TIME =
+ "Exception occurred when determining the event time of "
+ + "PipeInsertNodeTabletInsertionEvent({}) overlaps with the time range: [{}, {}]. "
+ + "Returning true to ensure data integrity.";
+ public static final String FAILED_TO_ALLOCATE_MEMORY_FOR_PARSING_TSFILE =
+ "{}: failed to allocate memory for parsing TsFile {}, tablet event no. {}, retry count "
+ + "is {}, will keep retrying.";
+ public static final String FAILED_TO_BUILD_TABLET = "Failed to build tablet";
+ public static final String FAILED_TO_CHECK_NEXT = "Failed to check next";
+ public static final String FAILED_TO_CLOSE_TSFILEREADER = "Failed to close TsFileReader";
+ public static final String FAILED_TO_CLOSE_TSFILESEQUENCEREADER =
+ "Failed to close TsFileSequenceReader";
+ public static final String FAILED_TO_CREATE_TSFILEINSERTIONDATATABLETITERATOR =
+ "failed to create TsFileInsertionDataTabletIterator";
+ public static final String FAILED_TO_GET_NEXT_TABLET_INSERTION_EVENT =
+ "Failed to get next tablet insertion event.";
+ public static final String FAILED_TO_LOAD_MODIFICATIONS_FROM_TSFILE =
+ "Failed to load modifications from TsFile: ";
+ public static final String FAILED_TO_READ_METADATA_FOR_DEVICEID_MEASUREMENT =
+ "Failed to read metadata for deviceId: {}, measurement: {}, removing";
+ public static final String FAILED_TO_RECORD_PARSE_END_TIME_FOR =
+ "Failed to record parse end time for pipe {}";
+ public static final String FAILED_TO_RECORD_TABLET_METRICS_FOR_PIPE =
+ "Failed to record tablet metrics for pipe {}";
+ public static final String FOUND_NULL_DEVICEID_REMOVING_ENTRY =
+ "Found null deviceId, removing entry";
+ public static final String INITIALIZE_DATA_CONTAINER_ERROR = "Initialize data container error.";
+ public static final String INSERTNODE_HAS_BEEN_RELEASED = "InsertNode has been released";
+ public static final String INSERTROWNODE_IS_PARSED_TO_ZERO_ROWS_ACCORDING =
+ "InsertRowNode({}) is parsed to zero rows according to the pattern({}) and time range "
+ + "[{}, {}], the corresponding source event({}) will be ignored.";
+ public static final String INSERTTABLETNODE_IS_PARSED_TO_ZERO_ROWS_ACCORDING =
+ "InsertTabletNode({}) is parsed to zero rows according to the pattern({}) and time range "
+ + "[{}, {}], the corresponding source event({}) will be ignored.";
+ public static final String INVALID_EVENT_TYPE = "Invalid event type: ";
+ public static final String INVALID_INPUT = "Invalid input: ";
+ public static final String ISGENERATEDBYPIPE_IS_NOT_SUPPORTED =
+ "isGeneratedByPipe() is not supported!";
+ public static final String MAYEVENTPATHSOVERLAPPEDWITHPATTERN_IS_NOT_SUPPORTED =
+ "mayEventPathsOverlappedWithPattern() is not supported!";
+ public static final String MAYEVENTTIMEOVERLAPPEDWITHTIMERANGE_IS_NOT_SUPPORTED =
+ "mayEventTimeOverlappedWithTimeRange() is not supported!";
+ public static final String NO_COMMIT_IDS_FOUND_IN_PIPECOMPACTEDTSFILEINSERTIONEVENT =
+ "No commit IDs found in PipeCompactedTsFileInsertionEvent.";
+ public static final String PIPECOMPACTEDTSFILEINSERTIONEVENT_DOES_NOT_SUPPORT_EQUALSINIOTCONSENSUSV2 =
+ "PipeCompactedTsFileInsertionEvent does not support equalsInIoTConsensusV2.";
+ public static final String PIPECOMPACTEDTSFILEINSERTIONEVENT_DOES_NOT_SUPPORT_GETREBOOTTIMES =
+ "PipeCompactedTsFileInsertionEvent does not support getRebootTimes.";
+ public static final String PIPE_FAILED_TO_GET_DEVICES_FROM_TSFILE =
+ "Pipe {}: failed to get devices from TsFile {}, extract it anyway";
+ public static final String PIPE_SKIPPING_TEMPORARY_TSFILE_S_PARSING_WHICH =
+ "Pipe skipping temporary TsFile's parsing which shouldn't be transferred: {}";
+ public static final String ROW_CAN_NOT_BE_CUSTOMIZED = "Row can not be customized";
+ public static final String SHALLOWCOPYSELFANDBINDPIPETASKMETAFORPROGRESSREPORT_IS_NOT_SUPPORTED =
+ "shallowCopySelfAndBindPipeTaskMetaForProgressReport() is not supported!";
+ public static final String SKIPPING_TEMPORARY_TSFILE_S_PROGRESSINDEX_WILL_REPORT =
+ "Skipping temporary TsFile {}'s progressIndex, will report MinimumProgressIndex";
+ public static final String TABLEPATTERNPARSER_DOES_NOT_SUPPORT_ROW_BY_ROW =
+ "TablePatternParser does not support row by row processing";
+ public static final String TABLEPATTERNPARSER_DOES_NOT_SUPPORT_TABLET_PROCESSING =
+ "TablePatternParser does not support tablet processing";
+ public static final String TABLEPATTERNPARSER_DOES_NOT_SUPPORT_TABLET_PROCESSING_WITH =
+ "TablePatternParser does not support tablet processing with collect";
+ public static final String TABLET_IS_PARSED_TO_ZERO_ROWS_ACCORDING =
+ "Tablet({}) is parsed to zero rows according to the pattern({}) and time range [{}, {}], "
+ + "the corresponding source event({}) will be ignored.";
+ public static final String TABLE_MODEL_TSFILE_PARSING_DOES_NOT_SUPPORT =
+ "Table model tsfile parsing does not support this type of ChunkMeta";
+ public static final String TEMPORARY_TSFILE_DETECTED_WILL_SKIP_ITS_TRANSFER =
+ "Temporary tsFile {} detected, will skip its transfer.";
+ public static final String TSFILE_HAS_INITIALIZED_PIPENAME_CREATION_TIME_PATTERN =
+ "TsFile {} has initialized {}, pipeName: {}, creation time: {}, pattern: {}, startTime: "
+ + "{}, endTime: {}, withMod: {}";
+ public static final String UNCOMPRESS_ERROR_UNCOMPRESS_SIZE =
+ "Uncompress error! uncompress size: ";
+ public static final String UNSUPPORTED = "UnSupported";
+ public static final String UNSUPPORTED_NODE_TYPE = "Unsupported node type ";
+ public static final String WAIT_FOR_MEMORY_ENOUGH_FOR_PARSING_FOR =
+ "Wait for memory enough for parsing {} for {} seconds.";
+
+ // ===================== PROCESSOR =====================
+
+ public static final String ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_BINARY_INPUT =
+ "AbstractSameTypeNumericOperator does not support binary input";
+ public static final String ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_BOOLEAN_INPUT =
+ "AbstractSameTypeNumericOperator does not support boolean input";
+ public static final String ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_DATE_INPUT =
+ "AbstractSameTypeNumericOperator does not support date input";
+ public static final String ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_STRING_INPUT =
+ "AbstractSameTypeNumericOperator does not support string input";
+ public static final String CHANGINGVALUESAMPLINGPROCESSOR_IN_IS_INITIALIZED_WITH =
+ "ChangingValueSamplingProcessor in {} is initialized with {}: {}, {}: {}, {}: {}.";
+ public static final String CLEAN_OUTDATED_INCOMPLETE_COMBINER_PIPENAME_CREATIONTIME_COMBINEID =
+ "Clean outdated incomplete combiner: pipeName={}, creationTime={}, combineId={}";
+ public static final String COMBINEHANDLER_NOT_FOUND_FOR_PIPEID =
+ "CombineHandler not found for pipeId = ";
+ public static final String COMBINER_COMBINE_COMPLETED_REGIONID_STATE_RECEIVEDREGIONIDSET_EX =
+ "Combiner combine completed: regionId: {}, state: {}, receivedRegionIdSet: {}, "
+ + "expectedRegionIdSet: {}";
+ public static final String COMBINER_COMBINE_REGIONID_STATE_RECEIVEDREGIONIDSET_EXPECTEDREGI =
+ "Combiner combine: regionId: {}, state: {}, receivedRegionIdSet: {}, expectedRegionIdSet: {}";
+ public static final String DATA_NODES_ENDPOINTS_FOR_TWO_STAGE_AGGREGATION =
+ "Data nodes' endpoints for two-stage aggregation: {}";
+ public static final String DIFFERENT_DATA_TYPE_ENCOUNTERED_IN_ONE_WINDOW =
+ "Different data type encountered in one window, will purge. Previous type: {}, now type: {}";
+ public static final String ENCOUNTERED_EXCEPTION_WHEN_DESERIALIZING_FROM_PIPETASKMETA =
+ "Encountered exception when deserializing from PipeTaskMeta";
+ public static final String END_POINTS_FOR_TWO_STAGE_AGGREGATION_PIPE =
+ "End points for two-stage aggregation pipe (pipeName={}, creationTime={}) were updated to {}";
+ public static final String ERROR_OCCURRED_WHEN_CLOSING_COMBINEHANDLER_ID =
+ "Error occurred when closing CombineHandler(id = {})";
+ public static final String ERROR_OCCURS_WHEN_RECEIVING_REQUEST =
+ "Error occurs when receiving request: {}.";
+ public static final String FAILED_TO_CLOSE_IOTDBSYNCCLIENT = "Failed to close IoTDBSyncClient";
+ public static final String FAILED_TO_CLOSE_OLD_IOTDBSYNCCLIENT =
+ "Failed to close old IoTDBSyncClient";
+ public static final String FAILED_TO_COMBINE_COUNT = "Failed to combine count: ";
+ public static final String FAILED_TO_CONSTRUCT_IOTDBSYNCCLIENT =
+ "Failed to construct IoTDBSyncClient";
+ public static final String FAILED_TO_FETCH_COMBINE_RESULT = "Failed to fetch combine result: ";
+ public static final String FAILED_TO_FETCH_DATA_NODES = "Failed to fetch data nodes";
+ public static final String FAILED_TO_FETCH_DATA_REGION_IDS = "Failed to fetch data region ids";
+ public static final String FAILED_TO_RECONSTRUCT_IOTDBSYNCCLIENT_AFTER_FAILURE_TO =
+ "Failed to reconstruct IoTDBSyncClient {} after failure to send request {} (watermark = {})";
+ public static final String FAILED_TO_SEND_REQUEST_WATERMARK_TO =
+ "Failed to send request {} (watermark = {}) to {}";
+ public static final String FAILED_TO_TRIGGER_COMBINE_WATERMARK_COUNT_PROGRESSINDEX =
+ "Failed to trigger combine. watermark={}, count={}, progressIndex={}";
+ public static final String FAILURE_OCCURRED_WHEN_TRYING_TO_COMMIT_PROGRESS =
+ "Failure occurred when trying to commit progress index. timestamp={}, count={}, "
+ + "progressIndex={}";
+ public static final String FETCHED_DATA_REGION_IDS_AT = "Fetched data region ids {} at {}";
+ public static final String FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_BINARY_INPUT =
+ "FractionPoweredSumOperator does not support binary input";
+ public static final String FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_BOOLEAN_INPUT =
+ "FractionPoweredSumOperator does not support boolean input";
+ public static final String FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_DATE_INPUT =
+ "FractionPoweredSumOperator does not support date input";
+ public static final String FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_STRING_INPUT =
+ "FractionPoweredSumOperator does not support string input";
+ public static final String GLOBAL_COUNT_IS_LESS_THAN_THE_LAST =
+ "Global count is less than the last collected count: timestamp={}, count={}";
+ public static final String IGNORED_TABLETINSERTIONEVENT_IS_NOT_AN_INSTANCE_OF =
+ "Ignored TabletInsertionEvent is not an instance of PipeInsertNodeTabletInsertionEvent "
+ + "or PipeRawTabletInsertionEvent: {}";
+ public static final String IGNORED_TSFILEINSERTIONEVENT_IS_EMPTY =
+ "Ignored TsFileInsertionEvent is empty: {}";
+ public static final String IGNORED_TSFILEINSERTIONEVENT_IS_NOT_AN_INSTANCE_OF =
+ "Ignored TsFileInsertionEvent is not an instance of PipeTsFileInsertionEvent: {}";
+ public static final String ILLEGAL_OUTPUT_SERIES_PATH = "Illegal output series path: ";
+ public static final String NO_DATA_NODES_ENDPOINTS_FETCHED = "No data nodes' endpoints fetched";
+ public static final String NO_EXPECTED_REGION_ID_SET_FETCHED =
+ "No expected region id set fetched";
+ public static final String PARTIALPATHLASTOBJECTCACHE_ALLOCATEDMEMORYBLOCK_HAS_EXPANDED_FROM_TO =
+ "PartialPathLastObjectCache.allocatedMemoryBlock has expanded from {} to {}.";
+ public static final String PARTIALPATHLASTOBJECTCACHE_ALLOCATEDMEMORYBLOCK_HAS_SHRUNK_FROM_TO =
+ "PartialPathLastObjectCache.allocatedMemoryBlock has shrunk from {} to {}.";
+ public static final String SENDING_REQUEST_WATERMARK_TO =
+ "Sending request {} (watermark = {}) to {}";
+ public static final String SWINGINGDOORTRENDINGSAMPLINGPROCESSOR_IN_IS_INITIALIZED_WITH =
+ "SwingingDoorTrendingSamplingProcessor in {} is initialized with {}: {}, {}: {}, {}: {}.";
+ public static final String THE_ABSTRACT_FORMAL_PROCESSOR_DOES_NOT_SUPPORT =
+ "The abstract formal processor does not support process events";
+ public static final String TUMBLINGTIMESAMPLINGPROCESSOR_IN_IS_INITIALIZED_WITH_S =
+ "TumblingTimeSamplingProcessor in {} is initialized with {}: {}s, {}: {}, {}: {}.";
+ public static final String TWOSTAGECOUNTPROCESSOR_CUSTOMIZED_BY_THREAD_PIPENAME_CREATIONTIME_RE =
+ "TwoStageCountProcessor customized by thread {}: pipeName={}, creationTime={}, "
+ + "regionId={}, outputSeries={}, localCommitProgressIndex={}, localCount={}";
+ public static final String TWO_STAGE_AGGREGATE_PIPE_PIPENAME_CREATIONTIME_RELATED =
+ "Two stage aggregate pipe (pipeName={}, creationTime={}) related region ids {}";
+ public static final String TWO_STAGE_AGGREGATE_RECEIVER_IS_EXITING =
+ "Two stage aggregate receiver is exiting.";
+ public static final String TWO_STAGE_COMBINE_REGION_ID_COMBINE_ID =
+ "Two stage combine (region id = {}, combine id = {}) incomplete: timestamp={}, count={}, "
+ + "progressIndex={}";
+ public static final String TWO_STAGE_COMBINE_REGION_ID_COMBINE_ID_1 =
+ "Two stage combine (region id = {}, combine id = {}) outdated: timestamp={}, count={}, "
+ + "progressIndex={}";
+ public static final String TWO_STAGE_COMBINE_REGION_ID_COMBINE_ID_2 =
+ "Two stage combine (region id = {}, combine id = {}) success: timestamp={}, count={}, "
+ + "progressIndex={}, committed progressIndex={}";
+ public static final String UNEXPECTED_STATE_CLASS = "Unexpected state class: ";
+ public static final String UNKNOWN_COMBINE_RESULT_TYPE = "Unknown combine result type: ";
+ public static final String UNKNOWN_REQUEST_TYPE = "Unknown request type {}: {}.";
+
+ // ===================== SOURCE =====================
+
+ public static final String ALL_DATA_IN_TSFILEEPOCH_WAS_EXTRACTED =
+ "All data in TsFileEpoch {} was extracted";
+ public static final String BUFFERSIZE_MUST_BE_A_POWER_OF_2 = "bufferSize must be a power of 2";
+ public static final String BUFFERSIZE_MUST_NOT_BE_LESS_THAN_1 =
+ "bufferSize must not be less than 1";
+ public static final String CAPTURE_TREE_AND_CAPTURE_TABLE_CAN_NOT =
+ "capture.tree and capture.table can not both be specified as false";
+ public static final String DATABASE_NAME_IS_NULL_WHEN_MATCHING_SOURCES =
+ "Database name is null when matching sources for table model event.";
+ public static final String DATA_REGION_INJECTED_WATERMARK_EVENT_WITH_TIMESTAMP =
+ "Data region {}: Injected watermark event with timestamp: {}";
+ public static final String DISCARD_TABLET_EVENT_BECAUSE_IT_IS_NOT =
+ "Discard tablet event {} because it is not reliable anymore. Change the state of "
+ + "TsFileEpoch to USING_BOTH.";
+ public static final String DISRUPTOR_ALREADY_STARTED = "Disruptor already started";
+ public static final String DISRUPTOR_SHUTDOWN_COMPLETED = "Disruptor shutdown completed";
+ public static final String DISRUPTOR_STARTED_WITH_BUFFER_SIZE =
+ "Disruptor started with buffer size: {}";
+ public static final String EXCEPTION_DURING_ONSHUTDOWN = "Exception during onShutdown()";
+ public static final String EXCEPTION_DURING_ONSTART = "Exception during onStart()";
+ public static final String EXCEPTION_ENCOUNTERED_WHEN_TRIGGERING_SCHEMA_REGION_SNAPSHOT =
+ "Exception encountered when triggering schema region snapshot.";
+ public static final String EXCEPTION_PROCESSING = "Exception processing: {} {}";
+ public static final String FAILED_TO_LOAD_SNAPSHOT = "Failed to load snapshot {}";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_FROM_BYTEBUFFER =
+ "Failed to load snapshot from byteBuffer {}.";
+ public static final String FAILED_TO_START_SOURCES = "failed to start sources.";
+ public static final String HEARTBEAT_EVENT_CAN_NOT_BE_SUPPLIED_BECAUSE =
+ "Heartbeat Event {} can not be supplied because the reference count can not be increased";
+ public static final String INTERRUPTED_WAITING_FOR_PROCESSOR_TO_STOP =
+ "Interrupted waiting for processor to stop";
+ public static final String IOTDBSCHEMAREGIONSOURCE_DOES_NOT_SUPPORT_TRANSFERRING_EVENTS_UNDER =
+ "IoTDBSchemaRegionSource does not support transferring events under simple consensus";
+ public static final String NOT_HAS_PRIVILEGE_TO_TRANSFER_EVENT =
+ "Not has privilege to transfer event: ";
+ public static final String NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN =
+ "Not has privilege to transfer plan: ";
+ public static final String NO_EVENT_HANDLER_CONFIGURED = "No event handler configured";
+ public static final String N_MUST_BE_0 = "n must be > 0";
+ public static final String PIPEREALTIMEDATAREGIONEXTRACTOR_OBSERVED_DATA_REGION_TIME_PARTITION_GROWT =
+ "PipeRealtimeDataRegionExtractor({}) observed data region {} time partition growth, "
+ + "recording time partition id bound: {}.";
+ public static final String PIPE_AND_IS_NOT_SET_USE_HYBRID =
+ "Pipe: '{}' ('{}') and '{}' ('{}') is not set, use hybrid mode by default.";
+ public static final String PIPE_ASSIGNER_ON_DATA_REGION_SHUTDOWN_INTERNAL =
+ "Pipe: Assigner on data region {} shutdown internal disruptor within {} ms";
+ public static final String PIPE_FAILED_TO_GET_DEVICES_FROM_TSFILE_1 =
+ "Pipe {}@{}: failed to get devices from TsFile {}, extract it anyway";
+ public static final String PIPE_FAILED_TO_INCREASE_REFERENCE_COUNT_FOR =
+ "Pipe {}@{}: failed to increase reference count for historical deletion event {}, will "
+ + "discard it";
+ public static final String PIPE_FAILED_TO_INCREASE_REFERENCE_COUNT_FOR_1 =
+ "Pipe {}@{}: failed to increase reference count for historical tsfile event {}, will "
+ + "discard it";
+ public static final String PIPE_FAILED_TO_INCREASE_REFERENCE_COUNT_FOR_2 =
+ "Pipe {}@{}: failed to increase reference count for terminate event, will resend it";
+ public static final String PIPE_FAILED_TO_PIN_TSFILERESOURCE =
+ "Pipe: failed to pin TsFileResource {}";
+ public static final String PIPE_FAILED_TO_START_TO_EXTRACT_HISTORICAL =
+ "Pipe {}@{}: failed to start to extract historical TsFile, storage engine is not ready. "
+ + "Will retry later.";
+ public static final String PIPE_FAILED_TO_UNPIN_SKIPPED_HISTORICAL_TSFILERESOURCE =
+ "Pipe {}@{}: failed to unpin skipped historical TsFileResource, original path: {}";
+ public static final String PIPE_FAILED_TO_UNPIN_TSFILERESOURCE_AFTER_CREATING =
+ "Pipe {}@{}: failed to unpin TsFileResource after creating event, original path: {}";
+ public static final String PIPE_FAILED_TO_UNPIN_TSFILERESOURCE_AFTER_DROPPING =
+ "Pipe {}@{}: failed to unpin TsFileResource after dropping pipe, original path: {}";
+ public static final String PIPE_FINISH_TO_EXTRACT_DELETIONS_EXTRACT_DELETIONS =
+ "Pipe {}@{}: finish to extract deletions, extract deletions count {}/{}, took {} ms";
+ public static final String PIPE_FINISH_TO_EXTRACT_HISTORICAL_TSFILE_EXTRACTED =
+ "Pipe {}@{}: finish to extract historical TsFile, extracted sequence file count {}/{}, "
+ + "extracted unsequence file count {}/{}, extracted file count {}/{}, took {} ms";
+ public static final String PIPE_FINISH_TO_SORT_ALL_EXTRACTED_RESOURCES =
+ "Pipe {}@{}: finish to sort all extracted resources, took {} ms";
+ public static final String PIPE_HISTORICAL_DATA_EXTRACTION_TIME_RANGE_START =
+ "Pipe {}@{}: historical data extraction time range, start time {}({}), end time {}({}), "
+ + "sloppy pattern {}, sloppy time range {}, should transfer mod file {}, username: {}, "
+ + "skip if no privileges: {}, is forwarding pipe requests: {}";
+ public static final String PIPE_IS_SET_TO_FALSE_USE_HEARTBEAT =
+ "Pipe: '{}' ('{}') is set to false, use heartbeat realtime source.";
+ public static final String PIPE_ON_DATA_REGION_SKIP_COMMIT_OF =
+ "Pipe {} on data region {} skip commit of event {} because it was flushed prematurely.";
+ public static final String PIPE_REALTIME_DATA_REGION_SOURCE_IS_INITIALIZED =
+ "Pipe {}@{}: realtime data region source is initialized with parameters: {}.";
+ public static final String PIPE_RESOURCE_MEETS_MAYTSFILECONTAINUNPROCESSEDDATA_CONDITION_EXTRACT =
+ "Pipe {}@{}: resource {} meets mayTsFileContainUnprocessedData condition, extractor "
+ + "progressIndex: {}, resource ProgressIndex: {}";
+ public static final String PIPE_SET_WATERMARK_INJECTOR_WITH_INTERVAL_MS =
+ "Pipe {}@{}: Set watermark injector with interval {} ms.";
+ public static final String PIPE_SKIP_HISTORICAL_TSFILE_BECAUSE_REALTIME_SOURCE =
+ "Pipe {}@{}: skip historical tsfile {} because realtime source in current task {} has "
+ + "already captured it.";
+ public static final String PIPE_SNAPSHOT_MODE_IS_ENABLED_USE_HEARTBEAT =
+ "Pipe: snapshot mode is enabled, use heartbeat realtime source.";
+ public static final String PIPE_STARTED_HISTORICAL_SOURCE_AND_REALTIME_SOURCE =
+ "Pipe {}@{}: Started historical source {} and realtime source {} successfully within {} ms.";
+ public static final String PIPE_STARTING_HISTORICAL_SOURCE_AND_REALTIME_SOURCE =
+ "Pipe {}@{}: Starting historical source {} and realtime source {}.";
+ public static final String PIPE_START_HISTORICAL_SOURCE_AND_REALTIME_SOURCE =
+ "Pipe {}@{}: Start historical source {} and realtime source {} error.";
+ public static final String PIPE_START_TO_EXTRACT_DELETIONS =
+ "Pipe {}@{}: start to extract deletions";
+ public static final String PIPE_START_TO_EXTRACT_HISTORICAL_TSFILE_ORIGINAL =
+ "Pipe {}@{}: start to extract historical TsFile, original sequence file count {}, "
+ + "original unSequence file count {}, start progress index {}";
+ public static final String PIPE_START_TO_FLUSH_DATA_REGION =
+ "Pipe {}@{}: start to flush data region";
+ public static final String PIPE_START_TO_SORT_ALL_EXTRACTED_RESOURCES =
+ "Pipe {}@{}: start to sort all extracted resources";
+ public static final String PIPE_TASK_CANNOTUSETABLETANYMORE_FOR_TSFILE_THE_MEMORY =
+ "Pipe task {}@{} canNotUseTabletAnyMore for tsFile {}: The memory usage of the insert "
+ + "node {} has reached the dangerous threshold of single pipe {}, event count: {}";
+ public static final String PIPE_UNEXPECTED_PROGRESSINDEX_TYPE_FALLBACK_TO_ORIGIN =
+ "Pipe {}@{}: unexpected ProgressIndex type {}, fallback to origin {}.";
+ public static final String PIPE_UNSUPPORTED_SOURCE_REALTIME_MODE_CREATE_A =
+ "Pipe: Unsupported source realtime mode: {}, create a hybrid source.";
+ public static final String PROCESSOR_INTERRUPTED = "Processor interrupted";
+ public static final String PROCESSOR_STOPPED = "Processor stopped";
+ public static final String SET_FOR_HISTORICAL_DELETION_EVENT =
+ "[{}]Set {} for historical deletion event {}";
+ public static final String SET_FOR_HISTORICAL_EVENT = "[{}]Set {} for historical event {}";
+ public static final String SET_FOR_REALTIME_EVENT = "[{}]Set {} for realtime event {}";
+ public static final String SOURCES_FILTERED_BY_DATABASE_AND_TABLE_IS =
+ "Sources filtered by database and table is null when matching sources for table model event.";
+ public static final String SOURCES_FILTERED_BY_DEVICE_IS_NULL_WHEN =
+ "Sources filtered by device is null when matching sources for tree model event.";
+ public static final String TAKE_SNAPSHOT_ERROR = "Take snapshot error: {}";
+ public static final String THE_ASSIGNER_QUEUE_CONTENT_HAS_EXCEEDED_HALF =
+ "The assigner queue content has exceeded half, it may be stuck and may block insertion. "
+ + "regionId: {}, capacity: {}, bufferSize: {}";
+ public static final String THE_PIPE_CANNOT_EXTRACT_TABLE_MODEL_DATA =
+ "The pipe cannot extract table model data when sql dialect is set to tree.";
+ public static final String THE_PIPE_CANNOT_EXTRACT_TREE_MODEL_DATA =
+ "The pipe cannot extract tree model data when sql dialect is set to table.";
+ public static final String THE_PIPE_CANNOT_TRANSFER_DATA_WHEN_DATA =
+ "The pipe cannot transfer data when data region is using ratis consensus.";
+ public static final String THE_REFERENCE_COUNT_OF_THE_EVENT_CANNOT =
+ "The reference count of the event {} cannot be increased, skipping it.";
+ public static final String THE_REFERENCE_COUNT_OF_THE_REALTIME_EVENT =
+ "The reference count of the realtime event {} cannot be increased, skipping it.";
+ public static final String TIMED_OUT_WAITING_FOR_PROCESSOR_TO_STOP =
+ "Timed out waiting for processor to stop";
+ public static final String TSFILEEPOCH_NOT_FOUND_FOR_TSFILE_CREATING_A =
+ "TsFileEpoch not found for TsFile {}, creating a new one";
+ public static final String WHEN_IS_SET_TO_FALSE_SPECIFYING_AND =
+ "When '{}' ('{}') is set to false, specifying {} and {} is invalid.";
+ public static final String WHEN_IS_SET_TO_TRUE_SPECIFYING_AND =
+ "When '{}' ('{}', '{}', '{}') is set to true, specifying {} and {} is invalid.";
+ public static final String WHEN_OR_IS_SPECIFIED_SPECIFYING_AND_IS =
+ "When {}, {}, {} or {} is specified, specifying {}, {}, {}, {}, {} and {} is invalid.";
+
+ // ===================== SINK =====================
+
+ public static final String ACQUIRE_IOPCITEMMGT_SUCCESSFULLY_INTERFACE_ADDRESS =
+ "Acquire IOPCItemMgt successfully! Interface address: {}";
+ public static final String ACQUIRE_IOPCSYNCIO_SUCCESSFULLY_INTERFACE_ADDRESS =
+ "Acquire IOPCSyncIO successfully! Interface address: {}";
+ public static final String ADDED_EVENT_TO_RETRY_QUEUE = "Added event {} to retry queue.";
+ public static final String BATCH_ID_CREATE_BATCH_DIR_SUCCESSFULLY_BATCH =
+ "Batch id = {}: Create batch dir successfully, batch file dir = {}.";
+ public static final String BATCH_ID_DELETE_THE_TSFILE_AFTER_FAILED =
+ "Batch id = {}: {} delete the tsfile {} after failed to write tablets into {}. {}";
+ public static final String BATCH_ID_FAILED_TO_BUILD_THE_TABLE =
+ "Batch id = {}: Failed to build the table model TSFile. Please check whether the written "
+ + "Tablet has time overlap and whether the Table Schema is correct.";
+ public static final String BATCH_ID_FAILED_TO_CLOSE_THE_TSFILE =
+ "Batch id = {}: Failed to close the tsfile {} after failed to write tablets into, because {}";
+ public static final String BATCH_ID_FAILED_TO_CLOSE_THE_TSFILE_1 =
+ "Batch id = {}: Failed to close the tsfile {} when trying to close batch, because {}";
+ public static final String BATCH_ID_FAILED_TO_CREATE_BATCH_FILE =
+ "Batch id = {}: Failed to create batch file dir {}.";
+ public static final String BATCH_ID_FAILED_TO_DELETE_THE_TSFILE =
+ "Batch id = {}: Failed to delete the tsfile {} when trying to close batch, because {}";
+ public static final String BATCH_ID_FAILED_TO_WRITE_TABLETS_INTO =
+ "Batch id = {}: Failed to write tablets into tsfile, because {}";
+ public static final String BATCH_ID_SEAL_TSFILE_SUCCESSFULLY =
+ "Batch id = {}: Seal tsfile {} successfully.";
+ public static final String BATCH_ID_UNSUPPORTED_EVENT_TYPE_WHEN_CONSTRUCTING =
+ "Batch id = {}: Unsupported event {} type {} when constructing tsfile batch";
+ public static final String CANNOT_INCREASE_REFERENCE_COUNT_FOR_EVENT_IGNORE =
+ "Cannot increase reference count for event: {}, ignore it in batch.";
+ public static final String CANNOT_SERIALIZE_BOTH_TABLET_AND_STATEMENT_ARE =
+ "Cannot serialize: both tablet and statement are null";
+ public static final String CERTIFICATE_DIRECTORY_IS_PLEASE_MOVE_CERTIFICATES_FROM =
+ "Certificate directory is: {}, Please move certificates from the reject dir to the "
+ + "trusted directory to allow encrypted access";
+ public static final String CLIENT_HAS_BEEN_RETURNED_TO_THE_POOL =
+ "Client has been returned to the pool. Current handler status is {}. Will not transfer {}.";
+ public static final String CLOSED_ASYNCPIPEDATATRANSFERSERVICECLIENTMANAGER_FOR_RECEIVER_ATTRIBUTES =
+ "Closed AsyncPipeDataTransferServiceClientManager for receiver attributes: {}";
+ public static final String CREATE_GROUP_SUCCESSFULLY_SERVER_HANDLE_UPDATE_RATE =
+ "Create group successfully! Server handle: {}, update rate: {} ms";
+ public static final String DELETENODETRANSFER_NO_EVENT_SUCCESSFULLY_PROCESSED =
+ "DeleteNodeTransfer: no.{} event successfully processed!";
+ public static final String DESERIALIZE_PIPEDATA_ERROR_BECAUSE_UNKNOWN_TYPE =
+ "Deserialize PipeData error because Unknown type ";
+ public static final String DESERIALIZE_PIPEDATA_ERROR_BECAUSE_UNKNOWN_TYPE_1 =
+ "Deserialize PipeData error because Unknown type {}.";
+ public static final String ERROR_GETTING_OPC_CLIENT = "Error getting opc client: ";
+ public static final String ERROR_PROGID_IS_INVALID_OR_UNREGISTERED_HRESULT =
+ "Error: ProgID is invalid or unregistered, (HRESULT=0x";
+ public static final String ERROR_RUNNING_OPC_CLIENT = "Error running opc client: ";
+ public static final String EXCEPTION_OCCURRED_WHEN_PIPETABLEMODELTSFILEBUILDERV2_WRITING_TABLETS_TO =
+ "Exception occurred when PipeTableModelTsFileBuilderV2 writing tablets to tsfile, use "
+ + "fallback tsfile builder: {}";
+ public static final String EXCEPTION_OCCURRED_WHEN_PIPETREEMODELTSFILEBUILDERV2_WRITING_TABLETS_TO =
+ "Exception occurred when PipeTreeModelTsFileBuilderV2 writing tablets to tsfile, use "
+ + "fallback tsfile builder: {}";
+ public static final String EXECUTE_STATEMENT_TO_DATABASE_SKIP_BECAUSE_NO =
+ "Execute statement {} to database {}, skip because no permission.";
+ public static final String FAILED_TO_ACQUIRE_IOPCITEMMGT_ERROR_CODE_0X =
+ "Failed to acquire IOPCItemMgt, error code: 0x";
+ public static final String FAILED_TO_ACQUIRE_IOPCSYNCIO_ERROR_CODE_0X =
+ "Failed to acquire IOPCSyncIO, error code: 0x";
+ public static final String FAILED_TO_ADD_ITEM = "Failed to add item ";
+ public static final String FAILED_TO_ADD_ITEM_WIN_ERROR_CODE =
+ "Failed to add item, win error code: 0x";
+ public static final String FAILED_TO_ADJUST_TIMEOUT_WHEN_FAILED_TO =
+ "Failed to adjust timeout when failed to transfer file.";
+ public static final String FAILED_TO_BORROW_CLIENT_FOR_CACHED_LEADER =
+ "failed to borrow client {}:{} for cached leader.";
+ public static final String FAILED_TO_BUILD_AND_STARTUP_OPCUASERVER =
+ "Failed to build and startup OpcUaServer";
+ public static final String FAILED_TO_CLOSE_ASYNCPIPEDATATRANSFERSERVICECLIENTMANAGER_FOR_RECEIVER_ATTRIBUTE =
+ "Failed to close AsyncPipeDataTransferServiceClientManager for receiver attributes: {}";
+ public static final String FAILED_TO_CLOSE_CLIENT_AFTER_HANDSHAKE_FAILURE =
+ "Failed to close client {}:{} after handshake failure when the manager is closed.";
+ public static final String FAILED_TO_CLOSE_CLIENT_MANAGER = "Failed to close client manager.";
+ public static final String FAILED_TO_CLOSE_FILE_READER_OR_DELETE =
+ "Failed to close file reader or delete tsFile when failed to transfer file.";
+ public static final String FAILED_TO_CLOSE_FILE_READER_OR_DELETE_1 =
+ "Failed to close file reader or delete tsFile when successfully transferred file.";
+ public static final String FAILED_TO_CLOSE_FILE_READER_WHEN_SUCCESSFULLY =
+ "Failed to close file reader when successfully transferred mod file.";
+ public static final String FAILED_TO_CLOSE_OR_INVALIDATE_CLIENT_WHEN =
+ "Failed to close or invalidate client when connector is closed. Client: {}, Exception: {}";
+ public static final String FAILED_TO_CLOSE_TRUSTLISTMANAGER_BECAUSE =
+ "Failed to close trustListManager, because {}.";
+ public static final String FAILED_TO_CONNECT_TO_SERVER_ERROR_CODE =
+ "Failed to connect to server, error code: 0x";
+ public static final String FAILED_TO_CONVERT_STATEMENT_TO_TABLET =
+ "Failed to convert statement to tablet.";
+ public static final String FAILED_TO_CONVERT_STATEMENT_TO_TABLET_FOR =
+ "Failed to convert statement to tablet for serialization";
+ public static final String FAILED_TO_CREATE_GROUP_ERROR_CODE_0X =
+ "Failed to create group,error code: 0x";
+ public static final String FAILED_TO_CREATE_NODES_AFTER_TRANSFER_DATA =
+ "Failed to create nodes after transfer data value, creation status: ";
+ public static final String FAILED_TO_DELETE_BATCH_FILE_THIS_FILE =
+ "Failed to delete batch file {}, this file should be deleted manually later";
+ public static final String FAILED_TO_GET_THE_SIZE_OF_PIPETRANSFERBATCHREQBUILDER =
+ "Failed to get the size of PipeTransferBatchReqBuilder, return 0. Exception: {}";
+ public static final String FAILED_TO_HANDSHAKE = "Failed to handshake.";
+ public static final String FAILED_TO_LOG_ERROR_WHEN_FAILED_TO =
+ "Failed to log error when failed to transfer file.";
+ public static final String FAILED_TO_PUSH_VALUE_CHANGE_TO_CLIENT =
+ "Failed to push value change to client, nodeId={}";
+ public static final String FAILED_TO_SEND_INITIAL_VALUE_TO_NEW =
+ "Failed to send initial value to new subscription, nodeId={}";
+ public static final String FAILED_TO_SERIALIZE_PROGRESS_INDEX =
+ "Failed to serialize progress index {}";
+ public static final String FAILED_TO_SHUTDOWN_EXECUTOR = "Failed to shutdown executor {}.";
+ public static final String FAILED_TO_TRANSFER_DATAVALUE = "Failed to transfer dataValue";
+ public static final String FAILED_TO_TRANSFER_DATAVALUE_AFTER_SUCCESSFULLY_CREATED =
+ "Failed to transfer dataValue after successfully created nodes";
+ public static final String FAILED_TO_TRANSFER_PIPEDELETENODEEVENT_COMMITTER_KEY_REPLICATE =
+ "Failed to transfer PipeDeleteNodeEvent {} (committer key={}, replicate index={}).";
+ public static final String FAILED_TO_TRANSFER_TABLETINSERTIONEVENT_COMMITTER_KEY_REPLICATE =
+ "Failed to transfer TabletInsertionEvent {} (committer key={}, replicate index={}).";
+ public static final String FAILED_TO_TRANSFER_TSFILE_BATCH =
+ "Failed to transfer tsfile batch ({}).";
+ public static final String FAILED_TO_TRANSFER_TSFILE_EVENT_ASYNCHRONOUSLY =
+ "Failed to transfer tsfile event {} asynchronously.";
+ public static final String FAILED_TO_UPDATE_LEADER_CACHE_FOR_DEVICE =
+ "Failed to update leader cache for device {} with endpoint {}:{}.";
+ public static final String FAILED_TO_WRITE = "Failed to write ";
+ public static final String FAILED_TO_WRITE_WIN_ERROR_CODE_0X =
+ "Failed to write, win error code: 0x";
+ public static final String GENERATE_STATEMENT_FROM_TABLET_ERROR =
+ "Generate Statement from tablet {} error.";
+ public static final String GOT_AN_ERROR_FROM = "Got an error \\\"{}\\\" from {}:{}.";
+ public static final String GOT_AN_ERROR_FROM_AN_UNKNOWN_CLIENT =
+ "Got an error \\\"{}\\\" from an unknown client.";
+ public static final String HANDSHAKE_SUCCESSFULLY_WITH_RECEIVER =
+ "Handshake successfully with receiver {}:{}.";
+ public static final String ILLEGAL_STATE_WHEN_RETURN_THE_CLIENT_TO =
+ "Illegal state when return the client to object pool, maybe the pool is already cleared. "
+ + "Will ignore.";
+ public static final String INSERTNODETRANSFER_NO_EVENT_SUCCESSFULLY_PROCESSED =
+ "InsertNodeTransfer: no.{} event successfully processed!";
+ public static final String INTERRUPTED_WHILE_WAITING_FOR_HANDSHAKE_RESPONSE =
+ "Interrupted while waiting for handshake response.";
+ public static final String IOTCONSENSUSV2ASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTConsensusV2AsyncConnector does not support transferring generic event: {}.";
+ public static final String IOTCONSENSUSV2ASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFER_GENERIC_EVENT =
+ "IoTConsensusV2AsyncConnector does not support transfer generic event: {}.";
+ public static final String IOTCONSENSUSV2ASYNCCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_CURRENT_EVEN =
+ "IoTConsensusV2AsyncConnector only support PipeTsFileInsertionEvent. Current event: {}.";
+ public static final String IOTCONSENSUSV2CONNECTOR_TRANSFERBUFFER_QUEUE_OFFER_IS_INTERRUPTED =
+ "IoTConsensusV2Connector transferBuffer queue offer is interrupted.";
+ public static final String IOTCONSENSUSV2TRANSFERBATCHREQBUILDER_THE_MAX_BATCH_SIZE_IS_ADJUSTED =
+ "IoTConsensusV2TransferBatchReqBuilder: the max batch size is adjusted from {} to {} due "
+ + "to the memory restriction";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_EVENT_NOT_FOUND_IN_TRANSFERBUFFER =
+ "IoTConsensusV2-ConsensusGroup-{}: event-{} not found in transferBuffer, skip removing. "
+ + "queue size = {}";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_EVENT_REPLICATE_INDEX_TRANSFER_FAILED =
+ "IoTConsensusV2-ConsensusGroup-{}: Event {} replicate index {} transfer failed, added to "
+ + "retry queue failed, this event will be ignored.";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_EVENT_REPLICATE_INDEX_TRANSFER_FAILED_1 =
+ "IoTConsensusV2-ConsensusGroup-{}: Event {} replicate index {} transfer failed, will be "
+ + "added to retry queue.";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_NO_EVENT_ADDED_TO_CONNECTOR =
+ "IoTConsensusV2-ConsensusGroup-{}: no.{} event-{} added to connector buffer";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_ONE_EVENT_SUCCESSFULLY_RECEIVED_BY =
+ "IoTConsensusV2-ConsensusGroup-{}: one event-{} successfully received by the follower, "
+ + "will be removed from queue, queue size = {}, limit size = {}";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_RETRYEVENTQUEUE_IS_NOT_EMPTY_AFTER =
+ "IoTConsensusV2-ConsensusGroup-{}: retryEventQueue is not empty after 20 seconds. "
+ + "retryQueue size: {}";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_RETRY_WITH_INTERVAL_FOR_INDEX =
+ "IoTConsensusV2-ConsensusGroup-{}: retry with interval {} for index {} {}";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_TRY_TO_REMOVE_EVENT_AFTER =
+ "IoTConsensusV2-ConsensusGroup-{}: try to remove event-{} after "
+ + "iotConsensusV2AsyncConnector being closed. Ignore it.";
+ public static final String IOTCONSENSUSV2_FAILED_TO_CLOSE_FILE_READER_WHEN =
+ "IoTConsensusV2-{}: Failed to close file reader when failed to transfer file.";
+ public static final String IOTCONSENSUSV2_FAILED_TO_CLOSE_FILE_READER_WHEN_1 =
+ "IoTConsensusV2-{}: Failed to close file reader when successfully transferred file.";
+ public static final String IOTCONSENSUSV2_FAILED_TO_CLOSE_FILE_READER_WHEN_2 =
+ "IoTConsensusV2-{}: Failed to close file reader when successfully transferred mod file.";
+ public static final String IOTCONSENSUSV2_FAILED_TO_TRANSFER_TABLETINSERTIONEVENT_BATCH_TOTAL =
+ "IoTConsensusV2: Failed to transfer TabletInsertionEvent batch. Total failed events: {}, "
+ + "related pipe names: {}";
+ public static final String IOTCONSENSUSV2_FAILED_TO_TRANSFER_TSFILEINSERTIONEVENT_COMMITTER_KEY =
+ "IoTConsensusV2-{}: Failed to transfer TsFileInsertionEvent {} (committer key {}, "
+ + "replicate index {}).";
+ public static final String IOTCONSENSUSV2_REDIRECT_FILE_POSITION_TO =
+ "IoTConsensusV2-{}: Redirect file position to {}.";
+ public static final String IOTCONSENSUSV2_SUCCESSFULLY_TRANSFERRED_FILE_COMMITTER_KEY_REPLICATE =
+ "IoTConsensusV2-{}: Successfully transferred file {} (committer key={}, replicate index={}).";
+ public static final String IOTDBCDCCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIPERAWTAB =
+ "IoTDBCDCConnector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent.";
+ public static final String IOTDBDATAREGIONAIRGAPCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBDataRegionAirGapConnector does not support transferring generic event: {}.";
+ public static final String IOTDBDATAREGIONAIRGAPCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_A =
+ "IoTDBDataRegionAirGapConnector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Ignore {}.";
+ public static final String IOTDBDATAREGIONAIRGAPCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_IGNORE =
+ "IoTDBDataRegionAirGapConnector only support PipeTsFileInsertionEvent. Ignore {}.";
+ public static final String IOTDBLEGACYPIPECONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBLegacyPipeConnector does not support transferring generic event: {}.";
+ public static final String IOTDBLEGACYPIPECONNECTOR_ONLY_SUPPORT_PIPEINSERTNODEINSERTIONEVENT_AND_PIPETABLE =
+ "IoTDBLegacyPipeConnector only support PipeInsertNodeInsertionEvent and "
+ + "PipeTabletInsertionEvent.";
+ public static final String IOTDBLEGACYPIPECONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT =
+ "IoTDBLegacyPipeConnector only support PipeTsFileInsertionEvent.";
+ public static final String IOTDBSCHEMAREGIONAIRGAPSINK_CAN_T_TRANSFER_TABLETINSERTIONEVENT =
+ "IoTDBSchemaRegionAirGapSink can't transfer TabletInsertionEvent.";
+ public static final String IOTDBSCHEMAREGIONAIRGAPSINK_CAN_T_TRANSFER_TSFILEINSERTIONEVENT =
+ "IoTDBSchemaRegionAirGapSink can't transfer TsFileInsertionEvent.";
+ public static final String IOTDBSCHEMAREGIONAIRGAPSINK_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBSchemaRegionAirGapSink does not support transferring generic event: {}.";
+ public static final String IOTDBSCHEMAREGIONCONNECTOR_CAN_T_TRANSFER_TABLETINSERTIONEVENT =
+ "IoTDBSchemaRegionConnector can't transfer TabletInsertionEvent.";
+ public static final String IOTDBSCHEMAREGIONCONNECTOR_CAN_T_TRANSFER_TSFILEINSERTIONEVENT =
+ "IoTDBSchemaRegionConnector can't transfer TsFileInsertionEvent.";
+ public static final String IOTDBSCHEMAREGIONCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBSchemaRegionConnector does not support transferring generic event: {}.";
+ public static final String IOTDBTHRIFTASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBThriftAsyncConnector does not support transferring generic event: {}.";
+ public static final String IOTDBTHRIFTASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFER_GENERIC_EVENT =
+ "IoTDBThriftAsyncConnector does not support transfer generic event: {}.";
+ public static final String IOTDBTHRIFTASYNCCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PI =
+ "IoTDBThriftAsyncConnector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Current event: {}.";
+ public static final String IOTDBTHRIFTASYNCCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_CURRENT_EVENT =
+ "IoTDBThriftAsyncConnector only support PipeTsFileInsertionEvent. Current event: {}.";
+ public static final String IOTDBTHRIFTSYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBThriftSyncConnector does not support transferring generic event: {}.";
+ public static final String IOTDBTHRIFTSYNCCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIP =
+ "IoTDBThriftSyncConnector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Ignore {}.";
+ public static final String IOTDBTHRIFTSYNCCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_IGNORE =
+ "IoTDBThriftSyncConnector only support PipeTsFileInsertionEvent. Ignore {}.";
+ public static final String LEADERCACHEMANAGER_ALLOCATEDMEMORYBLOCK_HAS_EXPANDED_FROM_TO =
+ "LeaderCacheManager.allocatedMemoryBlock has expanded from {} to {}.";
+ public static final String LEADERCACHEMANAGER_ALLOCATEDMEMORYBLOCK_HAS_SHRUNK_FROM_TO =
+ "LeaderCacheManager.allocatedMemoryBlock has shrunk from {} to {}.";
+ public static final String LOADING_KEYSTORE_AT = "Loading KeyStore at {}";
+ public static final String LOADING_KEYSTORE_AT_1 = "Loading KeyStore at {}.";
+ public static final String LOAD_KEYSTORE_FAILED_THE_EXISTING_KEYSTORE_MAY =
+ "Load keyStore failed, the existing keyStore may be stale, re-constructing...";
+ public static final String NO_OPC_CLIENT_OR_SERVER_IS_SPECIFIED =
+ "No OPC client or server is specified when transferring tablet";
+ public static final String OPC_DA_SINK_MUST_RUN_ON_WINDOWS =
+ "opc-da-sink must run on windows system.";
+ public static final String PIPETABLEMODETSFILEBUILDERV2_DOES_NOT_SUPPORT_TREE_MODEL_TABLET =
+ "PipeTableModeTsFileBuilderV2 does not support tree model tablet to build TSFile";
+ public static final String PIPETABLEMODETSFILEBUILDER_DOES_NOT_SUPPORT_TREE_MODEL_TABLET =
+ "PipeTableModeTsFileBuilder does not support tree model tablet to build TSFile";
+ public static final String PIPETREEMODELTSFILEBUILDERV2_DOES_NOT_SUPPORT_TABLE_MODEL_TABLET =
+ "PipeTreeModelTsFileBuilderV2 does not support table model tablet to build TSFile";
+ public static final String PIPETREEMODELTSFILEBUILDER_DOES_NOT_SUPPORT_TABLE_MODEL_TABLET =
+ "PipeTreeModelTsFileBuilder does not support table model tablet to build TSFile";
+ public static final String POLLED_EVENT_FROM_RETRY_QUEUE = "Polled event {} from retry queue.";
+ public static final String RECEIVED_AN_ERROR_MESSAGE_FROM =
+ "Received an error message {} from {}:{}";
+ public static final String RECEIVED_AN_UNKNOWN_MESSAGE_FROM =
+ "Received an unknown message {} from {}:{}";
+ public static final String RECEIVED_A_ACK_MESSAGE_FROM = "Received a ack message from {}:{}";
+ public static final String RECEIVED_A_BIND_MESSAGE_FROM = "Received a bind message from {}:{}";
+ public static final String REDIRECT_FILE_POSITION_TO = "Redirect file position to {}.";
+ public static final String REDIRECT_TO_POSITION_IN_TRANSFERRING_TSFILE =
+ "Redirect to position {} in transferring tsFile {}.";
+ public static final String SECURITY_DIR = "security dir: {}";
+ public static final String SECURITY_PKI_DIR = "security pki dir: {}";
+ public static final String SUCCESSFULLY_ADDED_ITEM = "Successfully added item {}.";
+ public static final String SUCCESSFULLY_CONVERTED_PROGID_TO_CLSID =
+ "Successfully converted progID {} to CLSID: {{}}";
+ public static final String SUCCESSFULLY_SHUTDOWN_EXECUTOR = "Successfully shutdown executor {}.";
+ public static final String SUCCESSFULLY_TRANSFERRED_DELETION_EVENT =
+ "Successfully transferred deletion event {}.";
+ public static final String SUCCESSFULLY_TRANSFERRED_FILE = "Successfully transferred file {}.";
+ public static final String SUCCESSFULLY_TRANSFERRED_FILE_AND =
+ "Successfully transferred file {}, {} and {}.";
+ public static final String SUCCESSFULLY_TRANSFERRED_FILE_BATCHED_TABLEINSERTIONEVENTS_REFERENCE_COUNT =
+ "Successfully transferred file {} (batched TableInsertionEvents, reference count={}).";
+ public static final String SUCCESSFULLY_TRANSFERRED_FILE_COMMITTER_KEY_COMMIT_ID =
+ "Successfully transferred file {} (committer key={}, commit id={}, reference count={}).";
+ public static final String SUCCESSFULLY_TRANSFERRED_SCHEMA_EVENT =
+ "Successfully transferred schema event {}.";
+ public static final String SUCCESSFULLY_TRANSFERRED_SCHEMA_REGION_SNAPSHOT_AND =
+ "Successfully transferred schema region snapshot {}, {} and {}.";
+ public static final String THE_BATCH_SIZE_LIMIT_HAS_EXPANDED_FROM =
+ "The batch size limit has expanded from {} to {}.";
+ public static final String THE_BATCH_SIZE_LIMIT_HAS_SHRUNK_FROM =
+ "The batch size limit has shrunk from {} to {}.";
+ public static final String THE_DEFAULT_QUALITY_CAN_ONLY_BE_GOOD =
+ "The default quality can only be 'GOOD', 'BAD' or 'UNCERTAIN'.";
+ public static final String THE_EVENT_ACK_IS_NOT_FOUND = "The event ack {} is not found.";
+ public static final String THE_EVENT_CAN_T_BE_TRANSFERRED_TO =
+ "The event {} can't be transferred to client, it will be retried later.";
+ public static final String THE_EVENT_IN_ERROR_IS_NOT_FOUND =
+ "The event in error {} is not found.";
+ public static final String THE_EVENT_POLLED_FROM_THE_QUEUE_IS =
+ "The event polled from the queue is not the same as the event peeked from the queue. "
+ + "Peeked event: {}, polled event: {}.";
+ public static final String THE_FILE_IS_NOT_FOUND_MAY_ALREADY =
+ "The file {} is not found, may already be deleted.";
+ public static final String THE_PIPE_WAS_DROPPED_SO_THE_EVENT =
+ "The pipe {} was dropped so the event ack {} will be ignored.";
+ public static final String THE_PIPE_WAS_DROPPED_SO_THE_EVENT_1 =
+ "The pipe {} was dropped so the event in error {} will be ignored.";
+ public static final String THE_PIPE_WAS_DROPPED_SO_THE_EVENT_2 =
+ "The pipe {} was dropped so the event {} will be dropped.";
+ public static final String THE_QUALITY_VALUE_ONLY_SUPPORTS_BOOLEAN_TYPE =
+ "The quality value only supports boolean type, while true == GOOD and false == BAD.";
+ public static final String THE_SCHEMA_REGION_AIR_GAP_CONNECTOR_DOES =
+ "The schema region air gap connector does not support transferring single file piece bytes.";
+ public static final String THE_SCHEMA_REGION_CONNECTOR_DOES_NOT_SUPPORT =
+ "The schema region connector does not support transferring single file piece req.";
+ public static final String THE_SECURITY_POLICY_CANNOT_BE_EMPTY =
+ "The security policy cannot be empty.";
+ public static final String THE_SECURITY_POLICY_CAN_ONLY_BE_NONE =
+ "The security policy can only be 'None', 'Basic128Rsa15', 'Basic256', 'Basic256Sha256', "
+ + "'Aes128_Sha256_RsaOaep' or 'Aes256_Sha256_RsaPss'.";
+ public static final String THE_SEGMENTS_OF_TABLETS_MUST_EXIST =
+ "The segments of tablets must exist";
+ public static final String THE_TABLET_OF_COMMITID_CAN_T_BE =
+ "The tablet of commitId: {} can't be parsed by client, it will be retried later.";
+ public static final String THE_TRANSFER_THREAD_IS_INTERRUPTED =
+ "The transfer thread is interrupted.";
+ public static final String THE_WEBSOCKET_CONNECTION_FROM_CLIENT_HAS_BEEN =
+ "The websocket connection from client has been closed!The code is {}. The reason is {}. "
+ + "Is it closed by remote? {}";
+ public static final String THE_WEBSOCKET_CONNECTION_FROM_CLIENT_HAS_BEEN_1 =
+ "The websocket connection from client {}:{} has been closed! The code is {}. The reason "
+ + "is {}. Is it closed by remote? {}";
+ public static final String THE_WEBSOCKET_CONNECTION_FROM_CLIENT_HAS_BEEN_2 =
+ "The websocket connection from client {}:{} has been opened!";
+ public static final String THE_WEBSOCKET_CONNECTION_FROM_HAS_BEEN_CLOSED =
+ "The websocket connection from {}:{} has been closed, but the ack message of commitId: "
+ + "{} is received.";
+ public static final String THE_WEBSOCKET_CONNECTION_FROM_HAS_BEEN_CLOSED_1 =
+ "The websocket connection from {}:{} has been closed, but the error message of commitId: "
+ + "{} is received.";
+ public static final String THE_WEBSOCKET_SERVER_HAS_BEEN_STARTED =
+ "The websocket server {}:{} has been started!";
+ public static final String THE_WRITTEN_TABLET_TIME_MAY_OVERLAP_OR =
+ "The written Tablet time may overlap or the Schema may be incorrect";
+ public static final String THIS_CONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIPERAWTABLET =
+ "This Connector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Ignore {}.";
+ public static final String TIMED_OUT_WHEN_WAITING_FOR_CLIENT_HANDSHAKE =
+ "Timed out when waiting for client handshake finish.";
+ public static final String TIOTCONSENSUSV2BATCHTRANSFERRESP_IS_NULL =
+ "TIoTConsensusV2BatchTransferResp is null";
+ public static final String TIOTCONSENSUSV2TRANSFERRESP_IS_NULL =
+ "TIoTConsensusV2TransferResp is null";
+ public static final String TPIPETRANSFERRESP_IS_NULL = "TPipeTransferResp is null";
+ public static final String TRANSFER_TSFILE_EVENT_ASYNCHRONOUSLY_WAS_INTERRUPTED =
+ "Transfer tsfile event {} asynchronously was interrupted.";
+ public static final String UNABLE_TO_CREATE_SECURITY_DIR = "unable to create security dir: ";
+ public static final String UNKNOWN_LOAD_BALANCE_STRATEGY_USE_ROUND_ROBIN =
+ "Unknown load balance strategy: {}, use round-robin strategy instead.";
+ public static final String UNSUPPORTED_BATCH_TYPE = "Unsupported batch type {}.";
+ public static final String UNSUPPORTED_BATCH_TYPE_WHEN_TRANSFERRING_TABLET_INSERTION =
+ "Unsupported batch type {} when transferring tablet insertion event.";
+ public static final String UNSUPPORTED_DATATYPE = "UnSupported dataType ";
+ public static final String UNSUPPORTED_EVENT_TYPE_WHEN_BUILDING_TRANSFER_REQUEST =
+ "Unsupported event {} type {} when building transfer request";
+ public static final String WAIT_FOR_RESOURCE_ENOUGH_FOR_SLICING_TSFILE =
+ "Wait for resource enough for slicing tsfile {} for {} seconds.";
+ public static final String WEBSOCKETCONNECTOR_FAILED_TO_INCREASE_THE_REFERENCE_COUNT =
+ "WebsocketConnector failed to increase the reference count of the event. Ignore it. "
+ + "Current event: {}.";
+ public static final String WEBSOCKETCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIPERAWTA =
+ "WebsocketConnector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Current event: {}.";
+ public static final String WEBSOCKETCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_CURRENT_EVENT =
+ "WebsocketConnector only support PipeTsFileInsertionEvent. Current event: {}.";
+ public static final String WHEN_THE_OPC_UA_SINK_POINTS_TO =
+ "When the OPC UA sink points to an outer server, the table model data is not supported.";
+ public static final String WHEN_THE_OPC_UA_SINK_SETS_WITH =
+ "When the OPC UA sink sets 'with-quality' to true, the table model data is not supported.";
+ public static final String WRITEBACKSINK_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIPERAWTABLETI =
+ "WriteBackSink only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Ignore {}.";
+
+ // ===================== RECEIVER =====================
+
+ public static final String ALL_RECEIVERS_RELATED_TO_ARE_RELEASED =
+ "All Receivers related to {} are released.";
+ public static final String AUTO_CREATE_DATABASE_FAILED_BECAUSE =
+ "Auto create database failed because: ";
+ public static final String CREATE_DATABASE_ERROR_STATEMENT_RESULT_STATUS =
+ "Create Database error, statement: {}, result status : {}.";
+ public static final String DATABASE_NAME_IS_UNEXPECTEDLY_NULL_FOR_LOADTSFILESTATEMENT =
+ "Database name is unexpectedly null for LoadTsFileStatement: {}. Skip data type conversion.";
+ public static final String DATABASE_NAME_IS_UNEXPECTEDLY_NULL_FOR_STATEMENT =
+ "Database name is unexpectedly null for statement: {}. Skip data type conversion.";
+ public static final String DATA_TYPE_CONVERSION_FOR_LOADTSFILESTATEMENT_IS_SUCCESSFUL =
+ "Data type conversion for LoadTsFileStatement {} is successful.";
+ public static final String DATA_TYPE_MISMATCH_DETECTED_TSSTATUS_FOR_LOADTSFILESTATEMENT =
+ "Data type mismatch detected (TSStatus: {}) for LoadTsFileStatement: {}. Start data type "
+ + "conversion.";
+ public static final String DELETE_ERROR_STATEMENT = "Delete {} error, statement: {}.";
+ public static final String DELETE_RESULT_STATUS = "Delete result status : {}.";
+ public static final String FAILED_TO_CLOSE_IOTDBAIRGAPRECEIVERAGENT_S_SERVER_SOCKET =
+ "Failed to close IoTDBAirGapReceiverAgent's server socket";
+ public static final String FAILED_TO_CONVERT_DATA_TYPE_FOR_LOADTSFILESTATEMENT =
+ "Failed to convert data type for LoadTsFileStatement: {}.";
+ public static final String FAILED_TO_EXECUTE_STATEMENT_AFTER_DATA_TYPE =
+ "Failed to execute statement after data type conversion.";
+ public static final String FAILED_TO_HANDLE_CONFIG_CLIENT_ID_EXIT =
+ "Failed to handle config client (id = {}) exit";
+ public static final String FAIL_TO_CREATE_IOTCONSENSUSV2_RECEIVER_FILE_FOLDERS =
+ "Fail to create iotConsensusV2 receiver file folders allocation strategy because all "
+ + "disks of folders are full.";
+ public static final String FAIL_TO_CREATE_PIPE_RECEIVER_FILE_FOLDERS =
+ "Fail to create pipe receiver file folders allocation strategy because all disks of "
+ + "folders are full.";
+ public static final String FAIL_TO_INITIATE_FILE_BUFFER_FOLDER_ERROR =
+ "Fail to initiate file buffer folder, Error msg: {}";
+ public static final String FAIL_TO_LOAD_PIPEDATA_BECAUSE = "Fail to load pipeData because {}.";
+ public static final String FAIL_TO_RENAME_FILE_TO = "Fail to rename file {} to {}";
+ public static final String INVOKE_HANDSHAKE_METHOD_FROM_CLIENT_IP =
+ "Invoke handshake method from client ip = {}";
+ public static final String INVOKE_TRANSPORTDATA_METHOD_FROM_CLIENT_IP =
+ "Invoke transportData method from client ip = {}";
+ public static final String INVOKE_TRANSPORTPIPEDATA_METHOD_FROM_CLIENT_IP =
+ "Invoke transportPipeData method from client ip = {}";
+ public static final String IOTCONSENSUSV2RECEIVER_THREAD_IS_INTERRUPTED_WHEN_WAITING_FOR =
+ "IoTConsensusV2Receiver thread is interrupted when waiting for receiver get initiated, "
+ + "may because system exit.";
+ public static final String IOTCONSENSUSV2_PIPENAME = "IoTConsensusV2-PipeName-{}: {}";
+ public static final String IOTCONSENSUSV2_PIPENAME_CURRENT_WAITING_IS_INTERRUPTED_ONSYNCEDCOMMITINDEX =
+ "IoTConsensusV2-PipeName-{}: current waiting is interrupted. onSyncedCommitIndex: {}. "
+ + "Exception: ";
+ public static final String IOTCONSENSUSV2_PIPENAME_CURRENT_WRITING_FILE_WRITER_IS =
+ "IoTConsensusV2-PipeName-{}: Current writing file writer is null. No need to close.";
+ public static final String IOTCONSENSUSV2_PIPENAME_CURRENT_WRITING_FILE_WRITER_WAS =
+ "IoTConsensusV2-PipeName-{}: Current writing file writer {} was closed.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_CLOSE_CURRENT_WRITING =
+ "IoTConsensusV2-PipeName-{}: Failed to close current writing file writer {}, because {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_FILE =
+ "IoTConsensusV2-PipeName-{}: Failed to create receiver file dir {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_FILE_1 =
+ "IoTConsensusV2-PipeName-{}: Failed to create receiver file dir {}. Because parent "
+ + "system dir have been deleted due to system concurrently exit.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_FILE_2 =
+ "IoTConsensusV2-PipeName-{}: Failed to create receiver file dir {}. May because "
+ + "authority or dir already exists etc.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_TSFILEWRITER =
+ "IoTConsensusV2-PipeName-{}: Failed to create receiver tsFileWriter-{} file dir {}";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_DELETE_BECAUSE =
+ "IoTConsensusV2-PipeName-{}: {} Failed to delete {}, because {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_GET_BASE_DIRECTORY =
+ "IoTConsensusV2-PipeName-{}: Failed to get base directory";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_LOAD_FILE_FROM =
+ "IoTConsensusV2-PipeName-{}: Failed to load file {} from req {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_READ_TSFILE_WHEN =
+ "IoTConsensusV2-PipeName-{}: Failed to read TsFile when counting points: {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_RETURN_TSFILEWRITER =
+ "IoTConsensusV2-PipeName-{}: Failed to return tsFileWriter {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_BECAUSE =
+ "IoTConsensusV2-PipeName-{}: Failed to seal file {}, because the file does not exist.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_BECAUSE_1 =
+ "IoTConsensusV2-PipeName-{}: Failed to seal file {}, because writing file is {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_BECAUSE_2 =
+ "IoTConsensusV2-PipeName-{}: Failed to seal file {}, because {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_FROM =
+ "IoTConsensusV2-PipeName-{}: Failed to seal file {} from req {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_STATUS =
+ "IoTConsensusV2-PipeName-{}: Failed to seal file {}, status is {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_WHEN =
+ "IoTConsensusV2-PipeName-{}: Failed to seal file {} when check final seal file, because "
+ + "the length of file is not correct. The original file has length {}, but receiver file "
+ + "has length {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_WHEN_1 =
+ "IoTConsensusV2-PipeName-{}: Failed to seal file {} when check non final seal, because "
+ + "the length of file is not correct. The original file has length {}, but receiver file "
+ + "has length {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_WRITE_FILE_PIECE =
+ "IoTConsensusV2-PipeName-{}: Failed to write file piece from req {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FILE_OFFSET_RESET_REQUESTED_BY =
+ "IoTConsensusV2-PipeName-{}: File offset reset requested by receiver, response status = {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_ILLEGAL_FILE_NAME_WHEN_CHECKING =
+ "IoTConsensusV2-PipeName-{}: Illegal file name {} when checking writing file.";
+ public static final String IOTCONSENSUSV2_PIPENAME_IS_NOT_EXISTED_NO_NEED =
+ "IoTConsensusV2-PipeName-{}: {} {} is not existed. No need to delete.";
+ public static final String IOTCONSENSUSV2_PIPENAME_NO_EVENT_GET_EXECUTED_AFTER =
+ "IoTConsensusV2-PipeName-{}: no.{} event get executed after awaiting timeout, current "
+ + "receiver syncIndex: {}";
+ public static final String IOTCONSENSUSV2_PIPENAME_NO_EVENT_GET_EXECUTED_BECAUSE =
+ "IoTConsensusV2-PipeName-{}: no.{} event get executed because receiver buffer's len >= "
+ + "pipeline, current receiver syncIndex {}, current buffer len {}";
+ public static final String IOTCONSENSUSV2_PIPENAME_PATH_TRAVERSAL_ATTEMPT_DETECTED_FILENAME =
+ "IoTConsensusV2-PipeName-{}: Path traversal attempt detected! Filename: {}";
+ public static final String IOTCONSENSUSV2_PIPENAME_PROCESS_NO_EVENT_SUCCESSFULLY =
+ "IoTConsensusV2-PipeName-{}: process no.{} event successfully!";
+ public static final String IOTCONSENSUSV2_PIPENAME_RECEIVED_A_DEPRECATED_REQUEST_WHICH =
+ "IoTConsensusV2-PipeName-{}: received a deprecated request-{}, which may because {}. ";
+ public static final String IOTCONSENSUSV2_PIPENAME_RECEIVER_DETECTED_AN_NEWER_PIPETASKRESTARTTIMES =
+ "IoTConsensusV2-PipeName-{}: receiver detected an newer pipeTaskRestartTimes, which "
+ + "indicates the pipe task has restarted. receiver will reset all its data.";
+ public static final String IOTCONSENSUSV2_PIPENAME_RECEIVER_DETECTED_AN_NEWER_REBOOTTIMES =
+ "IoTConsensusV2-PipeName-{}: receiver detected an newer rebootTimes, which indicates the "
+ + "leader has rebooted. receiver will reset all its data.";
+ public static final String IOTCONSENSUSV2_PIPENAME_RECEIVER_FILE_DIR_WAS_CREATED =
+ "IoTConsensusV2-PipeName-{}: Receiver file dir {} was created.";
+ public static final String IOTCONSENSUSV2_PIPENAME_RECEIVER_THREAD_GET_INTERRUPTED_WHEN =
+ "IoTConsensusV2-PipeName-{}: receiver thread get interrupted when exiting.";
+ public static final String IOTCONSENSUSV2_PIPENAME_SEAL_FILE_SUCCESSFULLY =
+ "IoTConsensusV2-PipeName-{}: Seal file {} successfully.";
+ public static final String IOTCONSENSUSV2_PIPENAME_SEAL_FILE_WITH_MODS_SUCCESSFULLY =
+ "IoTConsensusV2-PipeName-{}: Seal file with mods {} successfully.";
+ public static final String IOTCONSENSUSV2_PIPENAME_SKIP_LOAD_TSFILE_WHEN_SEALING =
+ "IoTConsensusV2-PipeName-{}: skip load tsfile-{} when sealing, because this region has "
+ + "been removed or migrated.";
+ public static final String IOTCONSENSUSV2_PIPENAME_STARTING_TO_RECEIVE_TSFILE_PIECES =
+ "IoTConsensusV2-PipeName-{}: starting to receive tsFile pieces";
+ public static final String IOTCONSENSUSV2_PIPENAME_STARTING_TO_RECEIVE_TSFILE_SEAL =
+ "IoTConsensusV2-PipeName-{}: starting to receive tsFile seal";
+ public static final String IOTCONSENSUSV2_PIPENAME_STARTING_TO_RECEIVE_TSFILE_SEAL_1 =
+ "IoTConsensusV2-PipeName-{}: starting to receive tsFile seal with mods";
+ public static final String IOTCONSENSUSV2_PIPENAME_START_TO_RECEIVE_NO_EVENT =
+ "IoTConsensusV2-PipeName-{}: start to receive no.{} event";
+ public static final String IOTCONSENSUSV2_PIPENAME_THE_POINT_COUNT_OF_TSFILE =
+ "IoTConsensusV2-PipeName-{}: The point count of TsFile {} is not given by sender, will "
+ + "read actual point count from TsFile.";
+ public static final String IOTCONSENSUSV2_PIPENAME_TSFILEWRITER_RETURNED_SELF =
+ "IoTConsensusV2-PipeName-{}: tsFileWriter-{} returned self";
+ public static final String IOTCONSENSUSV2_PIPENAME_TSFILEWRITER_ROLL_TO_WRITING_PATH =
+ "IoTConsensusV2-PipeName-{}: tsfileWriter-{} roll to writing path {}";
+ public static final String IOTCONSENSUSV2_PIPENAME_TSFILE_WRITER_IS_CLEANED_UP =
+ "IoTConsensusV2-PipeName-{}: tsfile writer-{} is cleaned up because no new requests were "
+ + "received for too long.";
+ public static final String IOTCONSENSUSV2_PIPENAME_UNKNOWN_PIPEREQUESTTYPE_RESPONSE_STATUS =
+ "IoTConsensusV2-PipeName-{}: Unknown PipeRequestType, response status = {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_WAS_DELETED =
+ "IoTConsensusV2-PipeName-{}: {} {} was deleted.";
+ public static final String IOTCONSENSUSV2_PIPENAME_WRITING_FILE_IS_NOT_AVAILABLE =
+ "IoTConsensusV2-PipeName-{}: Writing file {} is not available. Writing file is null: {}, "
+ + "writing file exists: {}, writing file writer is null: {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_WRITING_FILE_IS_NOT_EXISTED =
+ "IoTConsensusV2-PipeName-{}: Writing file {} is not existed or name is not correct, try "
+ + "to create it. Current writing file is {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_WRITING_FILE_S_OFFSET_IS =
+ "IoTConsensusV2-PipeName-{}: Writing file {}'s offset is {}, but request sender's offset "
+ + "is {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_WRITING_FILE_WAS_CREATED_READY =
+ "IoTConsensusV2-PipeName-{}: Writing file {} was created. Ready to write file pieces.";
+ public static final String IOTCONSENSUSV2_RECEIVE_ON_THE_FLY_NO_EVENT =
+ "IoTConsensusV2-{}: receive on-the-fly no.{} event after data region was deleted, discard it";
+ public static final String IOTCONSENSUSV2_TRANSFER_BATCH_HASN_T_BEEN_IMPLEMENTED =
+ "IoTConsensusV2 transfer batch hasn't been implemented yet.";
+ public static final String IOTCONSENSUSV2_TSFILEWRITER_SET_NULL_WRITING_FILE =
+ "IoTConsensusV2-{}: TsFileWriter-{} set null writing file";
+ public static final String IOTCONSENSUSV2_TSFILEWRITER_SET_NULL_WRITING_FILE_WRITER =
+ "IoTConsensusV2-{}: TsFileWriter-{} set null writing file writer";
+ public static final String IOTCONSENSUSV2_UNKNOWN_IOTCONSENSUSV2REQUESTVERSION_RESPONSE_STATUS =
+ "IoTConsensusV2: Unknown IoTConsensusV2RequestVersion, response status = {}.";
+ public static final String IOTCONSENSUSV2_UNKNOWN_PIPEREQUESTTYPE_RESPONSE_STATUS =
+ "IoTConsensusV2 Unknown PipeRequestType, response status = {}.";
+ public static final String IOTCONSENSUSV2_WAITING_FOR_THE_PREVIOUS_EVENT_TIMES =
+ "IoTConsensusV2-{}: Waiting for the previous event times out, current peek {}, current id {}";
+ public static final String IOTDBAIRGAPRECEIVERAGENT_STARTED =
+ "IoTDBAirGapReceiverAgent {} started.";
+ public static final String IOTDBAIRGAPRECEIVERAGENT_STOPPED =
+ "IoTDBAirGapReceiverAgent {} stopped.";
+ public static final String LOAD_ACTIVE_LISTENING_PIPE_DIR_IS_NOT =
+ "Load active listening pipe dir is not set.";
+ public static final String LOAD_PIPEDATA_WITH_SERIALIZE_NUMBER_SUCCESSFULLY =
+ "Load pipeData with serialize number {} successfully.";
+ public static final String LOAD_TSFILE_ERROR_STATEMENT = "Load TsFile {} error, statement: {}.";
+ public static final String LOAD_TSFILE_RESULT_STATUS = "Load TsFile result status : {}.";
+ public static final String PARSE_DATABASE_PARTIALPATH_ERROR =
+ "Parse database PartialPath {} error";
+ public static final String PIPE_AIR_GAP_RECEIVER_CHECKSUM_FAILED_EXPECTED =
+ "Pipe air gap receiver {}: checksum failed, expected: {}, actual: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_CLOSED_BECAUSE_OF =
+ "Pipe air gap receiver {} closed because of checksum failed. Socket: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_CLOSED_BECAUSE_OF_1 =
+ "Pipe air gap receiver {} closed because of exception. Socket: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_CLOSED_BECAUSE_SOCKET =
+ "Pipe air gap receiver {} closed because socket is closed. Socket: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_EXCEPTION_DURING_HANDLING =
+ "Pipe air gap receiver {}: Exception during handling receiving. Socket: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_HANDLE_DATA_FAILED =
+ "Pipe air gap receiver {}: Handle data failed, status: {}, req: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_SOCKET_CLOSED_WHEN =
+ "Pipe air gap receiver {}: Socket {} closed when listening to data. Because: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_STARTED_SOCKET =
+ "Pipe air gap receiver {} started. Socket: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_TEMPORARY_UNAVAILABLE_RETRY =
+ "Pipe air gap receiver {}: Temporary unavailable retry timed out, returning FAIL to sender.";
+ public static final String PIPE_AIR_GAP_RECEIVER_TSSTATUS_IS_ENCOUNTERED =
+ "Pipe air gap receiver {}: TSStatus {} is encountered at the air gap receiver, will ignore.";
+ public static final String PIPE_DATA_TRANSPORT_ERROR = "Pipe data transport error, {}";
+ public static final String PIPE_INSERTING_TABLET_TO_CASTING_TYPE_FROM =
+ "Pipe: Inserting tablet to {}.{}. Casting type from {} to {}.";
+ public static final String RECEIVERS_EXECUTOR_IS_CLOSED = "Receivers-{}' executor is closed.";
+ public static final String RECEIVER_EXIT_SUCCESSFULLY = "Receiver-{} exit successfully.";
+ public static final String RECEIVER_ID = "Receiver id = {}: {}";
+ public static final String RECEIVER_ID_THE_NUMBER_OF_DEVICE_PATHS =
+ "Receiver id = {}: The number of device paths is not equal to sub-status in statement "
+ + "{}: {}.";
+ public static final String RECEIVER_ID_UNKNOWN_PIPEREQUESTTYPE_RESPONSE_STATUS =
+ "Receiver id = {}: Unknown PipeRequestType, response status = {}.";
+ public static final String RECEIVER_ID_UNSUPPORTED_STATEMENT_TYPE_FOR_REDIRECTION =
+ "Receiver id = {}: Unsupported statement type {} for redirection.";
+ public static final String RECEIVER_IS_READY = "Receiver-{} is ready";
+ public static final String REGISTER_WITH_INTERVAL_IN_SECONDS_SUCCESSFULLY =
+ "Register {} with interval in seconds {} successfully.";
+ public static final String SOCKET_CLOSED_WHEN_EXECUTING_READTILLFULL =
+ "Socket closed when executing readTillFull.";
+ public static final String SOCKET_CLOSED_WHEN_EXECUTING_SKIPTILLENOUGH =
+ "Socket closed when executing skipTillEnough.";
+ public static final String START_LOAD_PIPEDATA_WITH_SERIALIZE_NUMBER_AND =
+ "Start load pipeData with serialize number {} and type {},value={}";
+ public static final String STORAGE_ENGINE_READONLY = "storage engine readonly";
+ public static final String SYNC_START_AT_TO_IS_DONE = "Sync {} start at {} to {} is done.";
+ public static final String TEMPORARY_UNAVAILABLE_EXCEPTION_ENCOUNTERED_AT_AIR_GAP =
+ "Temporary unavailable exception encountered at air gap receiver, will retry locally.";
+ public static final String THE_IOTCONSENSUSV2_REQUEST_VERSION_IS_DIFFERENT_FROM =
+ "The iotConsensusV2 request version {} is different from the sender request version {}, "
+ + "the receiver will be reset to the sender request version.";
+ public static final String THE_START_INDEX_OF_DATA_SYNC_IS =
+ "The start index {} of data sync is not valid. The file is not exist and start index "
+ + "should equal to 0).";
+ public static final String THE_START_INDEX_OF_DATA_SYNC_IS_1 =
+ "The start index {} of data sync is not valid. The start index of the file should equal "
+ + "to {}.";
+ public static final String THRIFT_CONNECTION_IS_NOT_ALIVE = "Thrift connection is not alive.";
+ public static final String TSFILECHECKER_DID_NOT_TERMINATE_WITHIN_S =
+ "TsFileChecker did not terminate within {}s";
+ public static final String TSFILECHECKER_THREAD_STILL_DOESN_T_EXIT_AFTER =
+ "TsFileChecker Thread {} still doesn't exit after 30s";
+ public static final String UNHANDLED_EXCEPTION_DURING_PIPE_AIR_GAP_RECEIVER =
+ "Unhandled exception during pipe air gap receiver listening";
+ public static final String UNSUPPORTED_DATA_TYPE = "Unsupported data type: ";
+
+ // ===================== RESOURCE =====================
+
+ public static final String CANNOT_GET_DATA_REGION_IDS_USE_DEFAULT =
+ "Cannot get data region ids, use default lock segment size: {}";
+ public static final String EXPAND_CALLBACK_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK =
+ "Expand callback is not supported in PipeFixedMemoryBlock";
+ public static final String EXPAND_METHOD_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK =
+ "Expand method is not supported in PipeFixedMemoryBlock";
+ public static final String FAILED_TO_CACHEDEVICEISALIGNEDMAPIFABSENT_FOR_TSFILE_BECAUSE_MEMORY =
+ "Failed to cacheDeviceIsAlignedMapIfAbsent for tsfile {}, because memory usage is high";
+ public static final String FAILED_TO_CACHEOBJECTSIFABSENT_FOR_TSFILE_BECAUSE_MEMORY =
+ "Failed to cacheObjectsIfAbsent for tsfile {}, because memory usage is high";
+ public static final String FAILED_TO_ESTIMATE_SIZE_FOR_INSERTNODE =
+ "Failed to estimate size for InsertNode: {}";
+ public static final String FAILED_TO_EXECUTE_THE_EXPAND_CALLBACK =
+ "Failed to execute the expand callback.";
+ public static final String FAILED_TO_EXECUTE_THE_SHRINK_CALLBACK =
+ "Failed to execute the shrink callback.";
+ public static final String FAILED_TO_GET_FILE_SIZE_OF_LINKED =
+ "failed to get file size of linked TsFile {}: ";
+ public static final String FORCEALLOCATEWITHRETRY_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY =
+ "forceAllocateWithRetry: interrupted while waiting for available memory";
+ public static final String FORCEALLOCATE_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY =
+ "forceAllocate: interrupted while waiting for available memory";
+ public static final String FORCERESIZE_CANNOT_RESIZE_A_NULL_OR_RELEASED =
+ "forceResize: cannot resize a null or released memory block";
+ public static final String FORCERESIZE_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY =
+ "forceResize: interrupted while waiting for available memory";
+ public static final String INTERRUPTED_WHILE_WAITING_FOR_THE_LOCK =
+ "Interrupted while waiting for the lock.";
+ public static final String IS_RELEASED_AFTER_THREAD_INTERRUPTION =
+ "{} is released after thread interruption.";
+ public static final String PIPEPERIODICALLOGREDUCER_IS_ALLOCATED_TO_BYTES =
+ "PipePeriodicalLogReducer is allocated to {} bytes.";
+ public static final String PIPETSFILERESOURCE_CACHED_DEVICEISALIGNEDMAP_FOR_TSFILE =
+ "PipeTsFileResource: Cached deviceIsAlignedMap for tsfile {}.";
+ public static final String PIPETSFILERESOURCE_CACHED_OBJECTS_FOR_TSFILE =
+ "PipeTsFileResource: Cached objects for tsfile {}.";
+ public static final String PIPETSFILERESOURCE_CLOSED_TSFILE_AND_CLEANED_UP =
+ "PipeTsFileResource: Closed tsfile {} and cleaned up.";
+ public static final String PIPETSFILERESOURCE_FAILED_TO_CACHE_OBJECTS_FOR_TSFILE =
+ "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory "
+ + "usage is high";
+ public static final String PIPETSFILERESOURCE_FAILED_TO_DELETE_TSFILE_WHEN_CLOSING =
+ "PipeTsFileResource: Failed to delete tsfile {} when closing, because {}. Please "
+ + "MANUALLY delete it.";
+ public static final String PIPETSFILERESOURCE_S_REFERENCE_COUNT_IS_DECREASED_TO =
+ "PipeTsFileResource's reference count is decreased to below 0.";
+ public static final String PIPE_HARDLINK_DIR_FOUND_DELETING_IT_RESULT =
+ "Pipe hardlink dir found, deleting it: {}, result: {}";
+ public static final String PIPE_SNAPSHOT_DIR_FOUND_DELETING_IT =
+ "Pipe snapshot dir found, deleting it: {},";
+ public static final String SHRINK_CALLBACK_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK =
+ "Shrink callback is not supported in PipeFixedMemoryBlock";
+ public static final String SHRINK_METHOD_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK =
+ "Shrink method is not supported in PipeFixedMemoryBlock";
+ public static final String THE_MEMORY_BLOCK_HAS_BEEN_RELEASED =
+ "The memory block has been released";
+ public static final String THE_MULTIPLE_N_MUST_BE_GREATER_THAN =
+ "The multiple n must be greater than 0";
+ public static final String TRYALLOCATE_ALLOCATED_MEMORY_TOTAL_MEMORY_SIZE_BYTES =
+ "tryAllocate: allocated memory, total memory size {} bytes, used memory size {} bytes, "
+ + "original requested memory size {} bytes, actual requested memory size {} bytes";
+ public static final String TRYALLOCATE_FAILED_TO_ALLOCATE_MEMORY_TOTAL_MEMORY =
+ "tryAllocate: failed to allocate memory, total memory size {} bytes, used memory size {} "
+ + "bytes, requested memory size {} bytes";
+ public static final String TRYEXPANDALLANDCHECKCONSISTENCY_MEMORY_USAGE_IS_NOT_CONSISTENT_WITH =
+ "tryExpandAllAndCheckConsistency: memory usage is not consistent with allocated blocks, "
+ + "usedMemorySizeInBytes is {} but sum of all blocks is {}";
+ public static final String TRYEXPANDALLANDCHECKCONSISTENCY_MEMORY_USAGE_OF_TABLETS_IS_NOT =
+ "tryExpandAllAndCheckConsistency: memory usage of tablets is not consistent with "
+ + "allocated blocks, usedMemorySizeInBytesOfTablets is {} but sum of all tablet blocks is "
+ + "{}";
+ public static final String TRYEXPANDALLANDCHECKCONSISTENCY_MEMORY_USAGE_OF_TSFILES_IS_NOT =
+ "tryExpandAllAndCheckConsistency: memory usage of tsfiles is not consistent with "
+ + "allocated blocks, usedMemorySizeInBytesOfTsFiles is {} but sum of all tsfile blocks is "
+ + "{}";
+
+ // ===================== METRIC =====================
+
+ public static final String FAILED_TO_DEREGISTER_PIPE_ASSIGNER_METRICS_PIPEDATAREGIONASSIGNER =
+ "Failed to deregister pipe assigner metrics, PipeDataRegionAssigner({}) does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_DATA_REGION_EXTRACTOR =
+ "Failed to deregister pipe data region extractor metrics, IoTDBDataRegionExtractor({}) "
+ + "does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_DATA_REGION_SINK =
+ "Failed to deregister pipe data region sink metrics, PipeSinkSubtask({}) does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_REMAINING_EVENT_AND =
+ "Failed to deregister pipe remaining event and time metrics, "
+ + "RemainingEventAndTimeOperator({}) does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_SCHEMA_REGION_CONNECTOR =
+ "Failed to deregister pipe schema region connector metrics, PipeConnectorSubtask({}) "
+ + "does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_SCHEMA_REGION_SOURCE =
+ "Failed to deregister pipe schema region source metrics, IoTDBSchemaRegionSource({}) "
+ + "does not exist";
+ public static final String FAILED_TO_DEREGISTER_PIPE_TSFILE_TO_TABLETS =
+ "Failed to deregister pipe tsfile to tablets metrics, pipeID({}) does not exist";
+ public static final String FAILED_TO_DEREGISTER_SCHEMA_REGION_LISTENER_METRICS =
+ "Failed to deregister schema region listener metrics, SchemaRegionListeningQueue({}) "
+ + "does not exist";
+ public static final String FAILED_TO_MARK_PIPE_DATA_REGION_EXTRACTOR =
+ "Failed to mark pipe data region extractor heartbeat event, IoTDBDataRegionExtractor({}) "
+ + "does not exist";
+ public static final String FAILED_TO_MARK_PIPE_DATA_REGION_EXTRACTOR_1 =
+ "Failed to mark pipe data region extractor tablet event, IoTDBDataRegionExtractor({}) "
+ + "does not exist";
+ public static final String FAILED_TO_MARK_PIPE_DATA_REGION_EXTRACTOR_2 =
+ "Failed to mark pipe data region extractor tsfile event, IoTDBDataRegionExtractor({}) "
+ + "does not exist";
+ public static final String FAILED_TO_MARK_PIPE_DATA_REGION_SINK =
+ "Failed to mark pipe data region sink tablet event, PipeSinkSubtask({}) does not exist";
+ public static final String FAILED_TO_MARK_PIPE_DATA_REGION_SINK_1 =
+ "Failed to mark pipe data region sink tsfile event, PipeSinkSubtask({}) does not exist";
+ public static final String FAILED_TO_MARK_PIPE_PROCESSOR_HEARTBEAT_EVENT =
+ "Failed to mark pipe processor heartbeat event, PipeProcessorSubtask({}) does not exist";
+ public static final String FAILED_TO_MARK_PIPE_PROCESSOR_TABLET_EVENT =
+ "Failed to mark pipe processor tablet event, PipeProcessorSubtask({}) does not exist";
+ public static final String FAILED_TO_MARK_PIPE_PROCESSOR_TSFILE_EVENT =
+ "Failed to mark pipe processor tsfile event, PipeProcessorSubtask({}) does not exist";
+ public static final String FAILED_TO_MARK_PIPE_REGION_COMMIT_REMAININGEVENTANDTIMEOPERATOR =
+ "Failed to mark pipe region commit, RemainingEventAndTimeOperator({}) does not exist";
+ public static final String FAILED_TO_MARK_PIPE_SCHEMA_REGION_WRITE =
+ "Failed to mark pipe schema region write plan event, PipeConnectorSubtask({}) does not exist";
+ public static final String FAILED_TO_MARK_PIPE_TSFILE_TO_TABLETS =
+ "Failed to mark pipe tsfile to tablets invocation, pipeID({}) does not exist";
+ public static final String FAILED_TO_RECORD_PIPE_TSFILE_TO_TABLETS =
+ "Failed to record pipe tsfile to tablets time, pipeID({}) does not exist";
+ public static final String FAILED_TO_RECORD_TABLET_GENERATED_PIPEID_DOES =
+ "Failed to record tablet generated, pipeID({}) does not exist";
+ public static final String FAILED_TO_SET_RECENT_PROCESSED_TSFILE_EPOCH =
+ "Failed to set recent processed tsfile epoch state, PipeRealtimeDataRegionExtractor({}) "
+ + "does not exist";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_ASSIGNER_METRICS =
+ "Failed to unbind from pipe assigner metrics, assigner map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_DATA_REGION =
+ "Failed to unbind from pipe data region sink metrics, sink map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_EXTRACTOR_METRICS =
+ "Failed to unbind from pipe extractor metrics, extractor map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_PROCESSOR_METRICS =
+ "Failed to unbind from pipe processor metrics, processor map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_REMAINING_EVENT =
+ "Failed to unbind from pipe remaining event and time metrics, "
+ + "RemainingEventAndTimeOperator map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_SCHEMA_REGION =
+ "Failed to unbind from pipe schema region connector metrics, connector map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_SCHEMA_REGION_1 =
+ "Failed to unbind from pipe schema region extractor metrics, extractor map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_SCHEMA_REGION_2 =
+ "Failed to unbind from pipe schema region listener metrics, listening queue map not empty";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_TSFILE_TO =
+ "Failed to unbind from pipe tsfile to tablets metrics, pipe map is not empty, pipe: {}";
+
+ // ---------------------------------------------------------------------------
+ // pipe – AbstractSameTypeNumericOperator
+ // ---------------------------------------------------------------------------
+ public static final String UNSUPPORTED_OUTPUT_DATATYPE_FMT = "Unsupported output datatype %s";
+
+ // ---------------------------------------------------------------------------
+ // pipe – IoTDBDataRegionSource
+ // ---------------------------------------------------------------------------
+ public static final String ILLEGAL_TREE_PATTERN_FMT = "Pattern \"%s\" is illegal.";
+
+ // ---------------------------------------------------------------------------
+ // pipe – OpcUaServerBuilder
+ // ---------------------------------------------------------------------------
+ public static final String UNABLE_CREATE_SECURITY_DIR = "Unable to create security dir: ";
+
+ // ---------------------------------------------------------------------------
+ // pipe – PipeDataNodePluginAgent
+ // ---------------------------------------------------------------------------
+ public static final String PLUGIN_NOT_REGISTERED_FMT = "plugin %s is not registered.";
+
+ private DataNodePipeMessages() {}
+}
diff --git a/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodeQueryMessages.java b/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodeQueryMessages.java
new file mode 100644
index 0000000000000..4f5c8ee0bd520
--- /dev/null
+++ b/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodeQueryMessages.java
@@ -0,0 +1,1393 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.i18n;
+
+public final class DataNodeQueryMessages {
+
+ // --- Common ---
+
+ public static final String NO_MATCHED_DATABASE_PLEASE_CHECK_THE_PATH =
+ "No matched database. Please check the path ";
+ public static final String THIS_NODE_ISN_T_INSTANCE_OF_SCHEMAENTITYNODE =
+ "This node isn't instance of SchemaEntityNode.";
+ public static final String THIS_NODE_ISN_T_INSTANCE_OF_SCHEMAMEASUREMENTNODE =
+ "This node isn't instance of SchemaMeasurementNode.";
+
+ // --- Execution ---
+
+ public static final String ERROR_SETTING_FUTURE_STATE_FOR =
+ "Error setting future state for {}";
+ public static final String ERROR_NOTIFYING_STATE_CHANGE_LISTENER_FOR =
+ "Error notifying state change listener for {}";
+ public static final String SERVER_IS_SHUTTING_DOWN =
+ "Server is shutting down";
+
+ // --- Execution / Aggregation ---
+
+ public static final String INVALID_AGGREGATION_FUNCTION =
+ "Invalid Aggregation function: ";
+ public static final String UNKNOWN_DATA_TYPE =
+ "Unknown data type: ";
+ public static final String COUNT_IF_WITH_SLIDINGWINDOW_IS_NOT_SUPPORTED_NOW =
+ "COUNT_IF with slidingWindow is not supported now";
+ public static final String TIME_DURATION_WITH_SLIDINGWINDOW_IS_NOT_SUPPORTED_NOW =
+ "TIME_DURATION with slidingWindow is not supported now";
+ public static final String MODE_WITH_SLIDINGWINDOW_IS_NOT_SUPPORTED_NOW =
+ "MODE with slidingWindow is not supported now";
+ public static final String INVALID_AGGREGATION_TYPE =
+ "Invalid Aggregation Type: ";
+
+ // --- Execution / Driver ---
+
+ public static final String QUERYDATASOURCE_SHOULD_NEVER_BE_NULL =
+ "QueryDataSource should never be null!";
+
+ // --- Execution / Exchange ---
+
+ public static final String SOURCE_HANDLE_FAILED_DUE_TO =
+ "Source handle failed due to: ";
+ public static final String SINK_FAILED_DUE_TO =
+ "Sink failed due to";
+ public static final String ISINKCHANNEL_FAILED_DUE_TO =
+ "ISinkChannel failed due to";
+ public static final String SINK_HANDLE_FAILED_DUE_TO =
+ "Sink handle failed due to";
+ public static final String MPPDATAEXCHANGEMANAGER_INIT_SUCCESSFULLY =
+ "MPPDataExchangeManager init successfully";
+ public static final String QUEUE_HAS_BEEN_DESTROYED =
+ "queue has been destroyed";
+ public static final String SINK_HANDLE_IS_BLOCKED =
+ "Sink handle is blocked.";
+ public static final String LOCALSINKCHANNEL_IS_ABORTED =
+ "LocalSinkChannel is ABORTED.";
+ public static final String ERROR_OCCURRED_WHEN_TRY_TO_ABORT_CHANNEL =
+ "Error occurred when try to abort channel.";
+ public static final String ERROR_OCCURRED_WHEN_TRY_TO_CLOSE_CHANNEL =
+ "Error occurred when try to close channel.";
+ public static final String SHUFFLESINKHANDLE_IS_ABORTED =
+ "ShuffleSinkHandle is aborted.";
+ public static final String UNSUPPORTED_TYPE_OF_SHUFFLE_STRATEGY =
+ "Unsupported type of shuffle strategy";
+ public static final String SINKCHANNEL_IS_ABORTED_OR_CLOSED =
+ "SinkChannel is aborted or closed. ";
+ public static final String THE_DATA_BLOCK_DOESN_T_EXIST_SEQUENCE_ID =
+ "The data block doesn't exist. Sequence ID: ";
+ public static final String THE_TSBLOCK_DOESNT_EXIST_SEQUENCE_ID_REMAINING =
+ "The TsBlock doesn't exist. Sequence ID is {}, remaining map is {}";
+ public static final String SINKCHANNEL_IS_ABORTED =
+ "SinkChannel is aborted.";
+ public static final String FAILED_TO_SEND_NEW_DATA_BLOCK_EVENT_ATTEMPT =
+ "Failed to send new data block event, attempt times: {}";
+ public static final String FAILED_TO_SEND_END_OF_DATA_BLOCK_EVENT =
+ "Failed to send end of data block event, attempt times: {}";
+ public static final String FAILED_TO_SEND_END_OF_DATA_BLOCK_EVENT_2 =
+ "Failed to send end of data block event after all retry";
+ public static final String SOURCE_HANDLE_IS_BLOCKED =
+ "Source handle is blocked.";
+ public static final String RESERVED_DATA_BLOCK_SIZE_IS_NULL =
+ "Reserved data block size is null.";
+ public static final String DATA_BLOCK_SIZE_IS_NULL =
+ "Data block size is null.";
+ public static final String SOURCE_HANDLE_IS_ABORTED =
+ "Source handle is aborted.";
+ public static final String SOURCEHANDLE_IS_CLOSED =
+ "SourceHandle is closed.";
+
+ // --- Execution / Executor ---
+
+ public static final String EXECUTE_FRAGMENTINSTANCE_IN_CONSENSUSGROUP_FAILED =
+ "Execute FragmentInstance in ConsensusGroup {} failed.";
+ public static final String EXECUTE_FRAGMENTINSTANCE_IN_QUERYEXECUTOR_FAILED =
+ "Execute FragmentInstance in QueryExecutor failed.";
+ public static final String FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS =
+ "Failed in the write API executing the consensus layer due to: ";
+
+ // --- Execution / Fragment ---
+
+ public static final String UNKNOWN_EXCEPTION =
+ "[Unknown exception]: ";
+ public static final String WAIT_MS_FOR_ALL_DRIVERS_CLOSED =
+ "Wait {}ms for all Drivers closed";
+ public static final String EXCEPTION_HAPPENED_WHEN_EXECUTING_UDTF =
+ "Exception happened when executing UDTF: ";
+ public static final String ERROR_WHEN_CREATE_FRAGMENTINSTANCEEXECUTION =
+ "error when create FragmentInstanceExecution.";
+ public static final String EXECUTE_ERROR_CAUSED_BY =
+ "Execute error caused by ";
+
+ // --- Execution / Memory ---
+
+ public static final String FREE_MORE_MEMORY_THAN_HAS_BEEN_RESERVED =
+ "Free more memory than has been reserved.";
+
+ // --- Execution / Operator ---
+
+ public static final String UNKNOWN_DATA_TYPE_2 =
+ "Unknown data type ";
+ public static final String ERROR_OCCURRED_WHEN_LOGGING_INTERMEDIATE_RESULT_OF_ANALYZE =
+ "Error occurred when logging intermediate result of analyze.";
+
+ // --- Execution / Operator / Process ---
+
+ public static final String GETWRITTENCOUNT_MEASUREMENT_IS_NOT_SUPPORTED =
+ "getWrittenCount(measurement) is not supported";
+ public static final String GETWRITTENCOUNT_IS_NOT_SUPPORTED =
+ "getWrittenCount() is not supported";
+ public static final String THE_MEMORY_THRESHOLD_MUST_BE_GREATER_THAN_0 =
+ "The memory threshold must be greater than 0.";
+ public static final String FAILED_TO_CREATE_DIRECTORIES =
+ "Failed to create directories: ";
+ public static final String TARGET_FILE_ALREADY_EXISTS =
+ "Target file already exists: ";
+ public static final String FAILED_TO_CREATE_FILE =
+ "Failed to create file: ";
+ public static final String DATA_TYPE_OF_TARGET_TIME_COLUMN_IS_NOT =
+ "Data type of target time column is not TIMESTAMP";
+ public static final String DUPLICATE_COLUMN_NAMES_IN_QUERY_DATASET =
+ "Duplicate column names in query dataset.";
+ public static final String SOME_SPECIFIED_TAG_COLUMNS_ARE_NOT_EXIST_IN =
+ "Some specified tag columns are not exist in query dataset.";
+ public static final String NUMBER_OF_FIELD_COLUMNS_SHOULD_BE_LARGER_THAN =
+ "Number of field columns should be larger than 0.";
+ public static final String ALL_CHILD_SHOULD_HAVE_SAME_TIME_COLUMN_RESULT =
+ "All child should have same time column result!";
+ public static final String LAST_READ_RESULT_SHOULD_ONLY_HAVE_ONE_RECORD =
+ "last read result should only have one record";
+
+ // --- Execution / Operator / Schema ---
+
+ public static final String FAILED_TO_CONVERT_NODE_PATH_TO_PARTIALPATH =
+ "Failed to convert node path to PartialPath {}";
+
+ // --- Execution / Operator / Source ---
+
+ public static final String ERROR_OCCURS_WHEN_SCANNING_ACTIVE_TIME_SERIES =
+ "Error occurs when scanning active time series.";
+ public static final String ERROR_WHILE_SCANNING_THE_FILE =
+ "Error while scanning the file";
+ public static final String ERROR_HAPPENED_WHILE_SCANNING_THE_FILE =
+ "Error happened while scanning the file";
+ public static final String ALL_CACHED_CHUNKS_SHOULD_BE_CONSUMED_FIRST =
+ "all cached chunks should be consumed first";
+ public static final String OVERLAPPED_DATA_SHOULD_BE_CONSUMED_FIRST =
+ "overlapped data should be consumed first";
+ public static final String NO_MORE_BATCH_DATA =
+ "No more batch data";
+ public static final String GETALLSATISFIEDPAGEDATA_SHOULDN_T_BE_CALLED_HERE =
+ "getAllSatisfiedPageData() shouldn't be called here";
+ public static final String GETPAGEREADER_SHOULDN_T_BE_CALLED_HERE =
+ "getPageReader() shouldn't be called here";
+ public static final String UNSUPPORTED_COLUMN_TYPE =
+ "Unsupported column type: ";
+ public static final String FAIL_TO_CLOSE_CTEDATAREADER =
+ "Fail to close CteDataReader";
+ public static final String UNKNOWN_TABLE =
+ "Unknown table: ";
+ public static final String FAILED_TO_CLOSE_READER_IN_TABLEDISKUSAGESUPPLIER =
+ "Failed to close reader in TableDiskUsageSupplier";
+ public static final String UNSUPPORTED_CATEGORY =
+ "Unsupported category: ";
+
+ // --- Execution / Operator / Window ---
+
+ public static final String UNSUPPORTED_INFERENCE_WINDOW_TYPE =
+ "Unsupported inference window type: ";
+
+ // --- Execution / Schedule ---
+
+ public static final String EXECUTOR_FAILED_TO_POLL_DRIVER_TASK_FROM_QUEUE =
+ "Executor {} failed to poll driver task from queue";
+ public static final String DRIVERTASK_SHOULD_NEVER_BE_NULL =
+ "DriverTask should never be null";
+ public static final String EXECUTEFAILED =
+ "[ExecuteFailed]";
+ public static final String EXECUTOR_EXITS_BECAUSE_IT_IS_CLOSED =
+ "Executor {} exits because it is closed.";
+ public static final String CLEAR_DRIVERTASK_FAILED =
+ "Clear DriverTask failed";
+ public static final String PUSHED_ELEMENT_IS_NULL =
+ "pushed element is null";
+
+ // --- Execution / Warnings ---
+
+ public static final String CODE_IS_NEGATIVE =
+ "code is negative";
+
+ // --- Metric ---
+
+ public static final String UNSUPPORTED_STAGE_IN_TREE_MODEL =
+ "Unsupported stage in tree model: ";
+ public static final String UNSUPPORTED_STAGE_IN_TABLE_MODEL =
+ "Unsupported stage in table model: ";
+
+ // --- Plan ---
+
+ public static final String TOPOLOGY_LATEST_VIEW_FROM_CONFIG_NODE =
+ "[Topology] latest view from config-node: {}";
+ public static final String EXPIRED_QUERIES_INFO_CLEAR_THREAD_IS_SUCCESSFULLY_STARTED =
+ "Expired-Queries-Info-Clear thread is successfully started.";
+ public static final String COST_MS =
+ "Cost: {} ms, {}";
+
+ // --- Plan / Analyze ---
+
+ public static final String COMPUTEDATAPARTITIONPARAMS_FOR =
+ "computeDataPartitionParams for ";
+ public static final String UNSUPPORTED_OPERATOR =
+ "Unsupported operator: ";
+ public static final String UNSUPPORTED_EXPRESSION =
+ "Unsupported expression: ";
+ public static final String ONLY_SUPPORT_AND_OPERATOR_IN_DELETION =
+ "Only support AND operator in deletion";
+ public static final String LEFT_HAND_EXPRESSION_IS_NOT_AN_IDENTIFIER =
+ "Left hand expression is not an identifier: ";
+ public static final String THE_LEFT_HAND_VALUE_MUST_BE_AN_IDENTIFIER =
+ "The left hand value must be an identifier: ";
+ public static final String THE_OPERATOR_OF_TAG_PREDICATE_MUST_BE_FOR =
+ "The operator of tag predicate must be '=' for ";
+ public static final String ONLY_TIME_FILTERS_ARE_SUPPORTED_IN_LAST_QUERY =
+ "Only time filters are supported in LAST query";
+ public static final String VIEWS_CANNOT_BE_USED_IN_GROUP_BY_TAGS =
+ "Views cannot be used in GROUP BY TAGS query yet.";
+ public static final String ONLY_TIME_FILTERS_ARE_SUPPORTED_IN_GROUP_BY =
+ "Only time filters are supported in GROUP BY TAGS query";
+ public static final String UNSUPPORTED_WINDOW_TYPE =
+ "Unsupported window type";
+ public static final String AGGREGATION_EXPRESSION_SHOULDN_T_EXIST_IN_GROUP_BY =
+ "Aggregation expression shouldn't exist in group by clause";
+ public static final String ONLY_SUPPORT_NUMERIC_TYPE_WHEN_DELTA_0 =
+ "Only support numeric type when delta != 0";
+ public static final String ONLY_SUPPORT_BOOLEAN_TYPE_IN_PREDICT_OF_GROUP =
+ "Only support boolean type in predict of group by series";
+ public static final String GROUP_BY_MONTH_DOESN_T_SUPPORT_ORDER_BY =
+ "Group by month doesn't support order by time desc now.";
+ public static final String NO_RUNNING_DATANODES =
+ "no Running DataNodes";
+ public static final String AN_ERROR_OCCURRED_WHEN_SERIALIZING_PATTERN_TREE =
+ "An error occurred when serializing pattern tree";
+ public static final String EXPRESSION_IN_GROUP_BY_SHOULD_INDICATE_ONE_VALUE =
+ "Expression in group by should indicate one value";
+ public static final String EXPRESSION_IN_ORDER_BY_SHOULD_INDICATE_ONE_VALUE =
+ "Expression in order by should indicate one value";
+ public static final String SHOULDN_T_ATTACH_HERE =
+ "shouldn't attach here";
+ public static final String SELECT_INTO_THE_I_OF_SHOULD_BE_AN =
+ "select into: the i of ${i} should be an integer.";
+ public static final String FAILED_TO_GET_DATABASE_MAP =
+ "Failed to get database Map";
+ public static final String LOAD_ANALYSIS_STAGE_ALL_TSFILES_HAVE_BEEN_ANALYZED =
+ "Load - Analysis Stage: all tsfiles have been analyzed.";
+ public static final String ASYNC_LOAD_HAS_FAILED_AND_IS_NOW_TRYING =
+ "Async Load has failed, and is now trying to load sync";
+ public static final String TSFILE_IS_EMPTY =
+ "TsFile {} is empty.";
+ public static final String THE_ENCRYPTION_WAY_OF_THE_TSFILE_IS_NOT =
+ "The encryption way of the TsFile is not supported.";
+ public static final String EMPTY_FILE_DETECTED_WILL_SKIP_LOADING_THIS_FILE =
+ "Empty file detected, will skip loading this file: {}";
+ public static final String AUTO_CREATE_OR_VERIFY_SCHEMA_ERROR =
+ "Auto create or verify schema error.";
+ public static final String FAILED_TO_FIND_TAG_COLUMN_MAPPING_FOR_TABLE =
+ "Failed to find tag column mapping for table {}";
+ public static final String AUTO_CREATE_DATABASE_FAILED_BECAUSE =
+ "Auto create database failed because: ";
+
+ // --- Plan / Execution ---
+
+ public static final String REACHMAXRETRYCOUNT =
+ "[ReachMaxRetryCount]";
+ public static final String ERROR_WHEN_EXECUTING_QUERY =
+ "error when executing query. {}";
+ public static final String WAITBEFORERETRY_WAIT_MS =
+ "[WaitBeforeRetry] wait {}ms.";
+ public static final String INTERRUPTED_WHEN_WAITING_RETRY =
+ "interrupted when waiting retry";
+ public static final String RETRY_RETRY_COUNT_IS =
+ "[Retry] retry count is: {}";
+ public static final String RESULTHANDLEABORTED =
+ "[ResultHandleAborted]";
+ public static final String UNSUPPORTED_DATABASE_PROPERTY_KEY =
+ "Unsupported database property key: ";
+ public static final String A_TABLE_CANNOT_HAVE_MORE_THAN_ONE_TIME =
+ "A table cannot have more than one time column";
+ public static final String THE_TIME_COLUMN_S_TYPE_SHALL_BE_TIMESTAMP =
+ "The time column's type shall be 'timestamp'.";
+ public static final String THE_TABLE_S_OLD_NAME_SHALL_NOT_BE =
+ "The table's old name shall not be equal to the new one.";
+ public static final String ADDING_TIME_COLUMN_IS_NOT_SUPPORTED =
+ "Adding TIME column is not supported.";
+ public static final String THE_COLUMN_S_OLD_NAME_SHALL_NOT_BE =
+ "The column's old name shall not be equal to the new one.";
+ public static final String DUPLICATED_PROPERTY =
+ "Duplicated property: ";
+ public static final String TABLE_PROPERTY =
+ "Table property '";
+ public static final String UNKNOWN_TYPE =
+ "Unknown type: %s";
+ public static final String FAILED_TO_CHECK_CONFIG_ITEM_PERMISSION =
+ "Failed to check config item permission";
+ public static final String CONFIGTASK_IS_NOT_IMPLEMENTED_FOR =
+ "ConfigTask is not implemented for: ";
+ public static final String FAILED_TO_GET_EXECUTABLE_FOR_UDF_USING_URI =
+ "Failed to get executable for UDF({}) using URI: {}.";
+ public static final String FAILED_TO_DROP_FUNCTION =
+ "[{}] Failed to drop function {}.";
+ public static final String FAILED_TO_DROP_TRIGGER =
+ "[{}] Failed to drop trigger {}.";
+ public static final String CANNOT_REMOVE_INVALID_NODEIDS =
+ "Cannot remove invalid nodeIds:{}";
+ public static final String STARTING_TO_REMOVE_DATANODE_WITH_NODEIDS =
+ "Starting to remove DataNode with nodeIds: {}";
+ public static final String START_TO_REMOVE_DATANODE_REMOVED_DATANODES_ENDPOINT =
+ "Start to remove datanode, removed DataNodes endpoint: {}";
+ public static final String SUBMIT_REMOVE_DATANODES_RESULT =
+ "Submit Remove DataNodes result {} ";
+ public static final String STARTING_TO_REMOVE_CONFIGNODE_WITH_NODE_ID =
+ "Starting to remove ConfigNode with node-id {}";
+ public static final String CONFIGNODE_IS_REMOVED =
+ "ConfigNode: {} is removed.";
+ public static final String STARTING_TO_REMOVE_AINODE =
+ "Starting to remove AINode";
+ public static final String REMOVE_AINODE_FAILED_BECAUSE_THERE_IS_NO_AINODE =
+ "Remove AINode failed because there is no AINode in the cluster.";
+ public static final String AINODE_IN_THE_CLUSTER_IS_REMOVED =
+ "AINode in the cluster is removed.";
+ public static final String FAILED_TO_HANDLETRANSFERCONFIGPLAN_STATUS_IS =
+ "Failed to handleTransferConfigPlan, status is {}.";
+ public static final String FAILED_TO_FETCHTABLES_STATUS_IS =
+ "Failed to fetchTables, status is {}.";
+ public static final String FAILED_TO_HANDLEPIPECONFIGCLIENTEXIT_STATUS_IS =
+ "Failed to handlePipeConfigClientExit, status is {}.";
+ public static final String FAILED_TO_HANDLEPIPECONFIGCLIENTEXIT =
+ "Failed to handlePipeConfigClientExit.";
+ public static final String NOT_SUPPORT_CURRENT_STATEMENT =
+ "Not support current statement";
+ public static final String WRONG_REQUEST_TYPE =
+ "Wrong request type";
+ public static final String WRONG_UNIT_TYPE =
+ "Wrong unit type";
+
+ // --- Plan / Expression ---
+
+ public static final String INVALID_EXPRESSION_TYPE =
+ "Invalid expression type: ";
+ public static final String UNSUPPORTED_EXPRESSION_TYPE =
+ "Unsupported expression type: ";
+ public static final String FUNCTION_CAST_MUST_SPECIFY_A_TARGET_DATA_TYPE =
+ "Function Cast must specify a target data type.";
+ public static final String FUNCTION_REPLACE_MUST_SPECIFY_FROM_AND_TO_COMPONENT =
+ "Function REPLACE must specify from and to component.";
+ public static final String PLEASE_ENSURE_INPUT_IS_CORRECT =
+ "please ensure input[%s] is correct";
+ public static final String CASE_EXPRESSION_CANNOT_BE_USED_WITH_NON_MAPPABLE =
+ "CASE expression cannot be used with non-mappable UDF";
+ public static final String UNSUPPORTED_TRANSFORMER_ACCESS_STRATEGY =
+ "Unsupported transformer access strategy";
+ public static final String AGGREGATE_FUNCTIONS_ARE_NOT_SUPPORTED_IN_WHERE_CLAUSE =
+ "aggregate functions are not supported in WHERE clause";
+ public static final String IS_NULL_CANNOT_BE_PUSHED_DOWN =
+ "IS NULL cannot be pushed down";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_IS_NULL_IS_NOT =
+ "TIMESTAMP does not support IS NULL/IS NOT NULL";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_LIKE_NOT_LIKE =
+ "TIMESTAMP does not support LIKE/NOT LIKE";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_REGEXP_NOT_REGEXP =
+ "TIMESTAMP does not support REGEXP/NOT REGEXP";
+ public static final String GROUPBYTIME_FILTER_CANNOT_EXIST_IN_VALUE_FILTER =
+ "GroupByTime filter cannot exist in value filter.";
+ public static final String IS_NULL_CAN_BE_PUSHED_DOWN =
+ "IS NULL can be pushed down";
+ public static final String GROUP_BY_TIME_CANNOT_BE_REVERSED =
+ "GROUP BY TIME cannot be reversed";
+
+ // --- Plan / Optimization ---
+
+ public static final String UNEXPECTED_PLAN_NODE =
+ "Unexpected plan node: ";
+ public static final String UNEXPECTED_PATH_TYPE =
+ "unexpected path type";
+ public static final String SOURCEPATH_MUST_BE_MEASUREMENTPATH_OR_ALIGNEDPATH =
+ "sourcePath must be MeasurementPath or AlignedPath";
+
+ // --- Plan / Parser ---
+
+ public static final String DATATYPE_MUST_BE_DECLARED =
+ "datatype must be declared";
+ public static final String UNSUPPORTED_ENCODING =
+ "Unsupported encoding: %s";
+ public static final String UNSUPPORTED_COMPRESSION =
+ "Unsupported compression: %s";
+ public static final String UNSUPPORTED_ENCODING_2 =
+ "unsupported encoding: %s";
+ public static final String UNSUPPORTED_COMPRESSOR =
+ "unsupported compressor: %s";
+ public static final String CREATE_ALIGNED_TIMESERIES_PROPERTY_IS_NOT_SUPPORTED_YET =
+ "create aligned timeseries: property is not supported yet.";
+ public static final String UNSUPPORTED_COMPRESSOR_2 =
+ "Unsupported compressor: %s";
+ public static final String PROPERTY_IS_UNSUPPORTED_YET =
+ "property %s is unsupported yet.";
+ public static final String THE_TIMESERIES_SHALL_NOT_BE_ROOT =
+ "The timeSeries shall not be root.";
+ public static final String UNSUPPORTED_DATATYPE =
+ "unsupported datatype: %s";
+ public static final String UNEXPECTED_FILTER_KEY =
+ "unexpected filter key";
+ public static final String URI_IS_EMPTY_PLEASE_SPECIFY_THE_URI =
+ "URI is empty, please specify the URI.";
+ public static final String INVALID_URI =
+ "Invalid URI: %s";
+ public static final String TRIGGER_DOES_NOT_SUPPORT_DELETE_AS_TRIGGER_EVENT =
+ "Trigger does not support DELETE as TRIGGER_EVENT for now.";
+ public static final String PLEASE_SPECIFY_TRIGGER_TYPE_STATELESS_OR_STATEFUL =
+ "Please specify trigger type: STATELESS or STATEFUL.";
+ public static final String RENAMING_VIEW_IS_NOT_SUPPORTED =
+ "Renaming view is not supported.";
+ public static final String VIEW_DOESN_T_SUPPORT_ALIAS =
+ "View doesn't support alias.";
+ public static final String MODELID_SHOULD_BE_2_64_CHARACTERS =
+ "ModelId should be 2-64 characters";
+ public static final String MODELID_SHOULD_NOT_START_WITH =
+ "ModelId should not start with '_'";
+ public static final String MODELID_CAN_ONLY_CONTAIN_LETTERS_NUMBERS_AND_UNDERSCORES =
+ "ModelId can only contain letters, numbers, and underscores";
+ public static final String DEVICE_ID_SHOULD_BE_CPU_OR_INTEGER =
+ "Device id should be 'cpu' or integer";
+ public static final String DATA_SHOULD_NOT_BE_SET_FOR_MODEL_TRAINING =
+ "data should not be set for model training";
+ public static final String DUPLICATED_GROUP_BY_KEY_LEVEL =
+ "duplicated group by key: LEVEL";
+ public static final String DUPLICATED_GROUP_BY_KEY_TAGS =
+ "duplicated group by key: TAGS";
+ public static final String UNKNOWN_GROUP_BY_TYPE =
+ "Unknown GROUP BY type.";
+ public static final String DUPLICATE_ALIAS_IN_SELECT_CLAUSE =
+ "duplicate alias in select clause";
+ public static final String CONSTANT_OPERAND_IS_NOT_ALLOWED =
+ "Constant operand is not allowed: ";
+ public static final String THE_TIME_WINDOWS_MAY_EXCEED_10000_PLEASE_ENSURE =
+ "The time windows may exceed 10000, please ensure your input.";
+ public static final String START_TIME_SHOULD_BE_SMALLER_THAN_ENDTIME_IN =
+ "Start time should be smaller than endTime in GroupBy";
+ public static final String KEEP_THRESHOLD_IN_GROUP_BY_CONDITION_SHOULD_BE =
+ "Keep threshold in group by condition should be set";
+ public static final String DUPLICATED_KEY_IN_GROUP_BY_TAGS =
+ "duplicated key in GROUP BY TAGS: ";
+ public static final String UNKNOWN_FILL_TYPE =
+ "Unknown FILL type.";
+ public static final String UNSUPPORTED_CONSTANT_VALUE_IN_FILL =
+ "Unsupported constant value in FILL: ";
+ public static final String OUT_OF_RANGE_LIMIT_N_N_SHOULD_BE =
+ "Out of range. LIMIT : N should be Int64.";
+ public static final String LIMIT_N_N_SHOULD_BE_GREATER_THAN_0 =
+ "LIMIT : N should be greater than 0.";
+ public static final String OFFSET_OFFSETVALUE_OFFSETVALUE_SHOULD_0 =
+ "OFFSET : OFFSETValue should >= 0.";
+ public static final String OUT_OF_RANGE_SLIMIT_SN_SN_SHOULD_BE =
+ "Out of range. SLIMIT : SN should be Int32.";
+ public static final String SLIMIT_SN_SN_SHOULD_BE_GREATER_THAN_0 =
+ "SLIMIT : SN should be greater than 0.";
+ public static final String SOFFSET_SOFFSETVALUE_SOFFSETVALUE_SHOULD_0 =
+ "SOFFSET : SOFFSETValue should >= 0.";
+ public static final String ONE_ROW_SHOULD_ONLY_HAVE_ONE_TIME_VALUE =
+ "One row should only have one time value";
+ public static final String INSERTSTATEMENT_SHOULD_CONTAIN_AT_LEAST_ONE_MEASUREMENT =
+ "InsertStatement should contain at least one measurement";
+ public static final String NEED_TIMESTAMPS_WHEN_INSERT_MULTI_ROWS =
+ "need timestamps when insert multi rows";
+ public static final String CAN_NOT_PARSE_TO_TIME =
+ "Can not parse %s to time";
+ public static final String PATH_CAN_NOT_START_WITH_ROOT_IN_SELECT =
+ "Path can not start with root in select clause.";
+ public static final String INPUT_TIMESTAMP_CANNOT_BE_EMPTY =
+ "input timestamp cannot be empty";
+ public static final String NOT_SUPPORT_FOR_THIS_ALIAS_PLEASE_ENCLOSE_IN =
+ "Not support for this alias, Please enclose in back quotes.";
+ public static final String STATEMENT_NEEDS_TARGET_PATHS =
+ "Statement needs target paths";
+ public static final String THE_DATATYPE_OF_TIMESTAMP_SHOULD_BE_LONG =
+ "The datatype of timestamp should be LONG.";
+ public static final String ATTRIBUTES_OF_FUNCTIONS_SHOULD_BE_QUOTED_WITH_OR =
+ "Attributes of functions should be quoted with '' or \"\"";
+ public static final String UNSUPPORTED_CONSTANT_VALUE =
+ "Unsupported constant value: ";
+ public static final String UNSUPPORTED_CONSTANT_OPERAND =
+ "Unsupported constant operand: ";
+ public static final String UNKNOWN_SYSTEM_STATUS_IN_SET_SYSTEM_COMMAND =
+ "Unknown system status in set system command.";
+ public static final String DEVICE_TEMPLATE_ALIAS_IS_NOT_SUPPORTED_YET =
+ "Device Template: alias is not supported yet.";
+ public static final String DEVICE_TEMPLATE_PROPERTY_IS_NOT_SUPPORTED_YET =
+ "Device Template: property is not supported yet.";
+ public static final String DEVICE_TEMPLATE_TAG_IS_NOT_SUPPORTED_YET =
+ "Device Template: tag is not supported yet.";
+ public static final String DEVICE_TEMPLATE_ATTRIBUTE_IS_NOT_SUPPORTED_YET =
+ "Device Template: attribute is not supported yet.";
+ public static final String EXPECTING_DATATYPE =
+ "Expecting datatype";
+ public static final String NOT_SUPPORT_FOR_THIS_SQL_IN_DROP_PIPE =
+ "Not support for this sql in DROP PIPE, please enter pipename.";
+ public static final String NOT_SUPPORT_FOR_THIS_SQL_IN_START_PIPE =
+ "Not support for this sql in START PIPE, please enter pipename.";
+ public static final String NOT_SUPPORT_FOR_THIS_SQL_IN_STOP_PIPE =
+ "Not support for this sql in STOP PIPE, please enter pipename.";
+ public static final String GET_REGION_ID_STATEMENT_EXPRESSION_MUST_BE_A =
+ "Get region id statement‘ expression must be a time expression";
+ public static final String WRONG_SPACE_QUOTA_TYPE =
+ "Wrong space quota type: ";
+ public static final String PLEASE_SET_THE_NUMBER_OF_DEVICES_GREATER_THAN =
+ "Please set the number of devices greater than 0";
+ public static final String PLEASE_SET_THE_NUMBER_OF_TIMESERIES_GREATER_THAN =
+ "Please set the number of timeseries greater than 0";
+ public static final String CANNOT_SET_THROTTLE_QUOTA_FOR_USER_ROOT =
+ "Cannot set throttle quota for user root.";
+ public static final String PLEASE_SET_THE_NUMBER_OF_REQUESTS_GREATER_THAN =
+ "Please set the number of requests greater than 0";
+ public static final String PLEASE_SET_THE_NUMBER_OF_CPU_GREATER_THAN =
+ "Please set the number of cpu greater than 0";
+ public static final String PLEASE_SET_THE_SIZE_GREATER_THAN_0 =
+ "Please set the size greater than 0";
+ public static final String PLEASE_SET_THE_DISK_SIZE_GREATER_THAN_0 =
+ "Please set the disk size greater than 0";
+ public static final String THERE_SHOULD_BE_ONLY_ONE_WINDOW_IN_CALL =
+ "There should be only one window in CALL INFERENCE.";
+ public static final String THE_CREATETABLEVIEW_IS_UNSUPPORTED_IN_TREE_SQL_DIALECT =
+ "The 'CreateTableView' is unsupported in tree sql-dialect.";
+ public static final String CURRENTLY_OTHER_EXPRESSIONS_ARE_NOT_SUPPORTED =
+ "Currently other expressions are not supported";
+ public static final String ALIGN_DESIGNATION_INCORRECT_AT =
+ "Align designation incorrect at: ";
+
+ // --- Plan / Relational / Analyzer ---
+
+ public static final String COLUMN_NOT_IN_GROUP_BY_CLAUSE =
+ "Column %s not in GROUP BY clause";
+ public static final String DATABASE_IS_NOT_SPECIFIED_FOR_INSERT =
+ "database is not specified for insert:";
+ public static final String IDENTIFIER_NOT_ALLOWED_IN_THIS_CONTEXT =
+ ".* not allowed in this context";
+ public static final String UNKNOWN_SIGN =
+ "Unknown sign: ";
+ public static final String DECIMALLITERAL_IS_NOT_SUPPORTED_YET =
+ "DecimalLiteral is not supported yet.";
+ public static final String GENERICLITERAL_IS_NOT_SUPPORTED_YET =
+ "GenericLiteral is not supported yet.";
+ public static final String DISTINCT_IS_NOT_SUPPORTED_FOR_NON_AGGREGATION_FUNCTIONS =
+ "DISTINCT is not supported for non-aggregation functions";
+ public static final String UNEXPECTED_PATTERN_RECOGNITION_FUNCTION =
+ "unexpected pattern recognition function ";
+ public static final String THE_INPUT_ARGUMENT_DOES_NOT_EXIST =
+ "the input argument does not exist";
+ public static final String MATCH_NUMBER_PATTERN_RECOGNITION_FUNCTION_TAKES_NO_ARGUMENTS =
+ "MATCH_NUMBER pattern recognition function takes no arguments";
+ public static final String UNEXPECTED_NAVIGATION_ANCHOR =
+ "Unexpected navigation anchor: ";
+ public static final String UNEXPECTED_MODE =
+ "Unexpected mode: ";
+ public static final String QUERY_TAKES_NO_PARAMETERS =
+ "Query takes no parameters";
+ public static final String NO_VALUE_PROVIDED_FOR_PARAMETER =
+ "No value provided for parameter";
+ public static final String CANNOT_EXTRACT_FROM =
+ "Cannot extract from %s";
+ public static final String UNKNOWN_IS_NOT_A_VALID_TYPE =
+ "UNKNOWN is not a valid type";
+ public static final String CANNOT_CAST_TO =
+ "Cannot cast %s to %s";
+ public static final String WINDOW_FRAME_START_CANNOT_BE_UNBOUNDED_FOLLOWING =
+ "Window frame start cannot be UNBOUNDED FOLLOWING";
+ public static final String WINDOW_FRAME_END_CANNOT_BE_UNBOUNDED_PRECEDING =
+ "Window frame end cannot be UNBOUNDED PRECEDING";
+ public static final String UNSUPPORTED_FRAME_TYPE =
+ "Unsupported frame type: ";
+ public static final String COLUMNS_ONLY_SUPPORT_TO_BE_USED_IN_SELECT =
+ "Columns only support to be used in SELECT and WHERE clause";
+ public static final String VS =
+ "%s: %s vs %s";
+ public static final String UNKNOWN_PATTERN_RECOGNITION_FUNCTION =
+ "Unknown pattern recognition function: ";
+ public static final String CANNOT_ACCESS_PREANALYZED_TYPES =
+ "Cannot access preanalyzed types";
+ public static final String CANNOT_ACCESS_RESOLVED_WINDOWS =
+ "Cannot access resolved windows";
+ public static final String REFERENCE_IS_AMBIGUOUS =
+ "Reference '%s' is ambiguous";
+ public static final String COLUMN_IS_AMBIGUOUS =
+ "Column '%s' is ambiguous";
+ public static final String UNSUPPORTED_NODE_TYPE =
+ "Unsupported node type: ";
+ public static final String CREATE_DATABASE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Create Database statement is not supported yet.";
+ public static final String ALTER_DATABASE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Alter Database statement is not supported yet.";
+ public static final String DROP_DATABASE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Drop Database statement is not supported yet.";
+ public static final String SHOW_DATABASE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Show Database statement is not supported yet.";
+ public static final String SHOW_TABLES_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Show Tables statement is not supported yet.";
+ public static final String DESCRIBE_TABLE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Describe Table statement is not supported yet.";
+ public static final String ADD_COLUMN_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Add Column statement is not supported yet.";
+ public static final String CREATE_INDEX_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Create Index statement is not supported yet.";
+ public static final String DROP_INDEX_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Drop Index statement is not supported yet.";
+ public static final String SHOW_INDEX_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Show Index statement is not supported yet.";
+ public static final String UPDATE_CAN_ONLY_SPECIFY_ATTRIBUTE_COLUMNS =
+ "Update can only specify attribute columns.";
+ public static final String DROP_FUNCTION_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Drop Function statement is not supported yet.";
+ public static final String SHOW_FUNCTION_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "Show Function statement is not supported yet.";
+ public static final String USE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "USE statement is not supported yet.";
+ public static final String TARGET_TABLE_SCHEMA_MISSES_A_TIME_CATEGORY_COLUMN =
+ "Target table schema misses a TIME category column";
+ public static final String TIME_COLUMN_CAN_NOT_BE_NULL =
+ "time column can not be null";
+ public static final String NO_FIELD_COLUMN_PRESENT =
+ "No Field column present";
+ public static final String FETCH_FIRST_WITH_TIES_CLAUSE_REQUIRES_ORDER_BY =
+ "FETCH FIRST WITH TIES clause requires ORDER BY";
+ public static final String RECURSIVE_CTE_IS_NOT_SUPPORTED_YET =
+ "recursive cte is not supported yet.";
+ public static final String MISSING_COLUMN_ALIASES_IN_RECURSIVE_WITH_QUERY =
+ "missing column aliases in recursive WITH query";
+ public static final String NESTED_RECURSIVE_WITH_QUERY =
+ "nested recursive WITH query";
+ public static final String THERE_IS_AT_LEAST_ONE_RESULT_OF_EXPANDED =
+ "There is at least one result of expanded";
+ public static final String UNSUPPORTED_EXPRESSION_2 =
+ "UnSupported Expression: ";
+ public static final String RELATION_NOT_FOUND_OR_NOT_ALLOWED =
+ "Relation not found or not allowed";
+ public static final String COLUMNS_NOT_ALLOWED_FOR_RELATION_THAT_HAS_NO =
+ "COLUMNS not allowed for relation that has no columns";
+ public static final String UNKNOWN_COLUMNNAME =
+ "Unknown ColumnName: ";
+ public static final String INVALID_REGEX =
+ "Invalid regex '%s'";
+ public static final String COLUMNS_ARE_NOT_SUPPORTED_IN_DEREFERENCEEXPRESSION =
+ "Columns are not supported in DereferenceExpression";
+ public static final String SELECT_NOT_ALLOWED_FROM_RELATION_THAT_HAS_NO =
+ "SELECT * not allowed from relation that has no columns";
+ public static final String COLUMN_ALIASES_NOT_SUPPORTED =
+ "Column aliases not supported";
+ public static final String SELECT_NOT_ALLOWED_IN_QUERIES_WITHOUT_FROM_CLAUSE =
+ "SELECT * not allowed in queries without FROM clause";
+ public static final String MULTIPLE_DATE_BIN_GAPFILL_CALLS_NOT_ALLOWED =
+ "multiple date_bin_gapfill calls not allowed";
+ public static final String PATTERN_RECOGNITION_OUTPUT_TABLE_HAS_NO_COLUMNS =
+ "pattern recognition output table has no columns";
+ public static final String NATURAL_JOIN_NOT_SUPPORTED =
+ "Natural join not supported";
+ public static final String UNKNOWN_FILL_METHOD =
+ "Unknown fill method: ";
+ public static final String RECURSIVE_REFERENCE_IN_INTERSECT_ALL =
+ "recursive reference in INTERSECT ALL";
+ public static final String TABLE_PROPERTY_2 =
+ "Table property ";
+ public static final String THE_DATABASE_MUST_BE_SET =
+ "The database must be set.";
+ public static final String AT_MOST_ONE_TABLE_ARGUMENT_CAN_BE_PASSED =
+ "At most one table argument can be passed to a table function";
+ public static final String DUPLICATE_ARGUMENT_NAME =
+ "Duplicate argument name: %s";
+ public static final String SETTING_MONTHLY_INTERVALS_IS_NOT_SUPPORTED =
+ "Setting monthly intervals is not supported.";
+ public static final String FILTER_PUSH_DOWN_DOES_NOT_SUPPORT_CASE_WHEN =
+ "Filter push down does not support CASE WHEN";
+ public static final String FILTER_PUSH_DOWN_DOES_NOT_SUPPORT_IF =
+ "Filter push down does not support IF";
+ public static final String FILTER_PUSH_DOWN_DOES_NOT_SUPPORT_NULLIF =
+ "Filter push down does not support NULLIF";
+ public static final String EXPRESSION_SHOULD_BE_NUMERIC_ACTUAL_IS =
+ "expression should be numeric, actual is ";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_IS_NULL =
+ "TIMESTAMP does not support IS NULL";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_IS_NOT_NULL =
+ "TIMESTAMP does not support IS NOT NULL";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_LIKE =
+ "TIMESTAMP does not support LIKE";
+ public static final String TIMESTAMP_DOES_NOT_CASE_WHEN =
+ "TIMESTAMP does not CASE WHEN";
+ public static final String TIMESTAMP_DOES_NOT_IF =
+ "TIMESTAMP does not IF";
+ public static final String TIMESTAMP_DOES_NOT_NULLIF =
+ "TIMESTAMP does not NULLIF";
+ public static final String SHOULD_NEVER_RETURN_NULL =
+ "Should never return null.";
+ public static final String IS_NULL_EXPRESSION_CAN_T_BE_PUSHED_DOWN =
+ "IS NULL Expression can't be pushed down";
+ public static final String NOT_EXPRESSION_CAN_T_BE_PUSHED_DOWN =
+ "Not Expression can't be pushed down";
+ public static final String UNSUPPORTED_OPERATOR_2 =
+ "Unsupported operator ";
+ public static final String THE_LOGICAL_EXPRESSION_HAS_NO_BOUNDED_COLUMN =
+ "The logical expression has no bounded column";
+ public static final String THE_NOT_EXPRESSION_HAS_NO_BOUNDED_COLUMN =
+ "The not expression has no bounded column";
+
+ // --- Plan / Relational / Metadata ---
+
+ public static final String TOO_MANY_DOTS_IN_TABLE_NAME =
+ "Too many dots in table name: %s";
+ public static final String OBJECT_TYPE_IS_NOT_SUPPORTED_AS_RETURN_TYPE =
+ "OBJECT type is not supported as return type";
+ public static final String INVALID_FUNCTION_PARAMETERS =
+ "Invalid function parameters: ";
+ public static final String UNKNOWN_FUNCTION =
+ "Unknown function: ";
+ public static final String THE_OBJECT_TYPE_COLUMN_IS_NOT_SUPPORTED =
+ "The object type column is not supported.";
+ public static final String NO_COLUMN_OTHER_THAN_TIME_PRESENT_PLEASE_CHECK =
+ "No column other than Time present, please check the request";
+ public static final String NO_FIELD_COLUMN_PRESENT_PLEASE_CHECK_THE_REQUEST =
+ "No Field column present, please check the request";
+ public static final String AUTO_ADD_TABLE_COLUMN_FAILED =
+ "Auto add table column failed.";
+ public static final String TAG_COLUMN_ONLY_SUPPORT_DATA_TYPE_STRING =
+ "Tag column only support data type STRING.";
+ public static final String ATTRIBUTE_COLUMN_ONLY_SUPPORT_DATA_TYPE_STRING =
+ "Attribute column only support data type STRING.";
+
+ // --- Plan / Relational / Planner ---
+
+ public static final String FAIL_TO_MATERIALIZE_CTE_BECAUSE =
+ "Fail to materialize CTE because {}";
+ public static final String BOTH_OBJECT_MUST_BE_TYPE_OF_NUMBER =
+ "Both object must be type of number";
+ public static final String NOT_YET_IMPLEMENTED =
+ "not yet implemented: ";
+ public static final String UNSUPPORTED_TYPE_IN_GENERICLITERAL =
+ "Unsupported type in GenericLiteral: ";
+ public static final String CANNOT_COERCE_TYPE =
+ "Cannot coerce type ";
+ public static final String UNKNOWN_TYPE_2 =
+ "Unknown type: ";
+ public static final String NODE_MUST_BE_A_LITERAL =
+ "node must be a Literal";
+ public static final String UNHANDLED_LITERAL_TYPE =
+ "Unhandled literal type: ";
+ public static final String NO_LITERAL_FORM_FOR_TYPE =
+ "No literal form for type %s";
+ public static final String WINDOW_FRAME_OFFSET_VALUE_MUST_NOT_BE_NEGATIVE =
+ "Window frame offset value must not be negative or null";
+ public static final String UNEXPECTED_TYPE =
+ "unexpected type: ";
+ public static final String FROM_CLAUSE_MUST_NOT_BE_EMPTY =
+ "From clause must not be empty";
+ public static final String COERCION_RESULT_IN_ANALYSIS_ONLY_CAN_BE_EMPTY =
+ "Coercion result in analysis only can be empty";
+ public static final String UNEXPECTED_RECURSIVE_CTE =
+ "unexpected recursive cte";
+ public static final String TABLE =
+ "Table ";
+ public static final String UNEXPECTED_JOIN_TYPE =
+ "Unexpected Join Type: ";
+ public static final String UNEXPECTED_ROWS_PER_MATCH =
+ "Unexpected rows per match: ";
+ public static final String UNEXPECTED_SKIP_TO_POSITION =
+ "Unexpected skip to position: ";
+ public static final String VALUES_IS_NOT_SUPPORTED_IN_CURRENT_VERSION =
+ "Values is not supported in current version.";
+ public static final String SUBSCRIPT_IS_NOT_SUPPORTED_IN_CURRENT_VERSION =
+ "Subscript is not supported in current version";
+
+ // --- Plan / Relational / Planner / IR ---
+
+ public static final String ILLEGAL_STATE_IN_VISITLOGICALEXPRESSION =
+ "Illegal state in visitLogicalExpression";
+ public static final String UNSUPPORTED_LOGICALEXPRESSION_OPERATOR =
+ "Unsupported LogicalExpression operator";
+ public static final String UNEXPECTED_EXPRESSION =
+ "Unexpected expression: ";
+ public static final String FAILED_TO_FETCH_SUBQUERY_RESULT =
+ "Failed to Fetch Subquery Result.";
+
+ // --- Plan / Relational / Planner / Iterative ---
+
+ public static final String UNEXPECTED_PATTERN =
+ "Unexpected Pattern: ";
+ public static final String TABLE_FUNCTION_DOES_NOT_SUPPORT_MULTIPLE_SOURCE_NOW =
+ "table function does not support multiple source now.";
+
+ // --- Plan / Relational / Planner / Node ---
+
+ public static final String SHOULD_NEVER_PUSH_DOWN_LIMIT_TO_AGGREGATIONTABLESCANNODE =
+ "Should never push down limit to AggregationTableScanNode.";
+ public static final String SHOULD_NEVER_PUSH_DOWN_OFFSET_TO_AGGREGATIONTABLESCANNODE =
+ "Should never push down offset to AggregationTableScanNode.";
+ public static final String NOT_SUPPORTED_YET =
+ "Not supported yet.";
+ public static final String COPYTONODE_SHOULD_NOT_BE_SERIALIZED =
+ "CopyToNode should not be serialized";
+
+ // --- Plan / Relational / Planner / Optimizations ---
+
+ public static final String LIST_PLANNODE_SIZE_SHOULD_1_BUT_NOW_IS =
+ "List.size should >= 1, but now is 0";
+ public static final String UNSUPPORTED_JOIN_TYPE =
+ "Unsupported Join Type: ";
+ public static final String TOPK_IS_NOT_SUPPORTED_IN_CORRELATED_SUBQUERY_FOR =
+ "TopK is not supported in correlated subquery for now";
+ public static final String UNEXPECTED_VALUE =
+ "Unexpected value: ";
+
+ // --- Plan / Relational / Security ---
+
+ public static final String USER_NOT_EXISTS =
+ "User not exists";
+ public static final String ONLY_THE_SUPERUSER_CAN_ALTER_HIM_HERSELF =
+ "Only the superuser can alter him/herself.";
+ public static final String DATABASE =
+ "DATABASE ";
+ public static final String TABLE_2 =
+ "TABLE ";
+ public static final String UNEXPECTED_VALUE_2 =
+ "Unexpected value:";
+ public static final String EACH_OPERATION_SHOULD_HAVE_PERMISSION_CHECK =
+ "Each operation should have permission check.";
+ public static final String UNKNOWN_AUTHORTYPE =
+ "Unknown authorType: ";
+
+ // --- Plan / Relational / SQL ---
+
+ public static final String UNKNOWN_AUTHORTYPE_2 =
+ "Unknown authorType:";
+ public static final String THE_RENAMING_FOR_BASE_TABLE_COLUMN_IS_CURRENTLY =
+ "The renaming for base table column is currently unsupported";
+ public static final String THE_RENAMING_FOR_BASE_TABLE_IS_CURRENTLY_UNSUPPORTED =
+ "The renaming for base table is currently unsupported";
+ public static final String UNEXPECTED_EXPRESSION_2 =
+ "unexpected expression: ";
+ public static final String THE_TABLE_SHOULD_ONLY_HAVE_ONE_COLUMN_FOUND =
+ "the table should only have one column found with TIME category";
+ public static final String TIMESTAMP_CANNOT_BE_NULL =
+ "Timestamp cannot be null";
+ public static final String SHOW_REGION_ID_IS_NOT_SUPPORTED_YET =
+ "SHOW REGION ID is not supported yet.";
+ public static final String SHOW_TIME_SLOT_IS_NOT_SUPPORTED_YET =
+ "SHOW TIME SLOT is not supported yet.";
+ public static final String COUNT_TIME_SLOT_IS_NOT_SUPPORTED_YET =
+ "COUNT TIME SLOT is not supported yet.";
+ public static final String SHOW_SERIES_SLOT_IS_NOT_SUPPORTED_YET =
+ "SHOW SERIES SLOT is not supported yet.";
+ public static final String MISSING_LIMIT_VALUE =
+ "Missing LIMIT value";
+ public static final String DATABASE_IS_NOT_SET_YET =
+ "Database is not set yet.";
+ public static final String AUTHOR_STATEMENT_PARSER_ERROR =
+ "author statement parser error";
+ public static final String UNSUPPORTED_SET_OPERATION =
+ "Unsupported set operation: ";
+ public static final String UNSUPPORTED_JOIN_CRITERIA =
+ "Unsupported join criteria";
+ public static final String TOLERANCE_IN_ASOF_JOIN_ONLY_SUPPORTS_INNER_TYPE =
+ "Tolerance in ASOF JOIN only supports INNER type now";
+ public static final String UNSUPPORTED_SIGN =
+ "Unsupported sign: ";
+ public static final String UNSUPPORTED_WINDOW_FRAME_TYPE =
+ "Unsupported window frame type: ";
+ public static final String UNSUPPORTED_BOUNDED_TYPE =
+ "Unsupported bounded type: ";
+ public static final String UNSUPPORTED_TRIM_SPECIFICATION =
+ "Unsupported trim specification: ";
+ public static final String TARGET_DATA_IN_SQL_SHOULD_BE_SET_IN =
+ "Target data in sql should be set in CREATE MODEL";
+ public static final String THE_TREE_MODEL_DATABASE_SHALL_NOT_BE_SPECIFIED =
+ "The tree model database shall not be specified in table model.";
+ public static final String UNSUPPORTED_SPECIAL_FUNCTION =
+ "Unsupported special function: ";
+ public static final String UNSUPPORTED_ORDERING =
+ "Unsupported ordering: ";
+ public static final String UNSUPPORTED_QUANTIFIER =
+ "Unsupported quantifier: ";
+ public static final String NOT_YET_IMPLEMENTED_WILDCARD_TRANSITION =
+ "not yet implemented: wildcard transition";
+ public static final String UNKNOWN_TABLE_ELEMENT =
+ "unknown table element: ";
+
+ // --- Plan / Scheduler ---
+
+ public static final String ERROR_HAPPENED_WHILE_FETCHING_QUERY_STATE =
+ "error happened while fetching query state";
+ public static final String INTERRUPTED_WHEN_DISPATCHING_READ_ASYNC =
+ "Interrupted when dispatching read async";
+ public static final String INTERRUPTED_WHEN_DISPATCHING_WRITE_ASYNC =
+ "Interrupted when dispatching write async";
+ public static final String DESERIALIZE_CONSENSUSGROUPID_FAILED =
+ "Deserialize ConsensusGroupId failed. ";
+ public static final String CAN_T_CONNECT_TO_NODE =
+ "can't connect to node {}";
+ public static final String CANCEL_QUERY_ON_NODE_FAILED =
+ "cancel query {} on node {} failed.";
+ public static final String CANNOT_DISPATCH_FI_FOR_LOAD_OPERATION =
+ "cannot dispatch FI for load operation";
+ public static final String RECEIVE_LOAD_NODE_FROM_UUID =
+ "Receive load node from uuid {}.";
+ public static final String LOAD_TSFILE_NODE_ERROR =
+ "Load TsFile Node {} error.";
+ public static final String SERIALIZE_TSFILERESOURCE_ERROR =
+ "Serialize TsFileResource {} error.";
+ public static final String LOAD_SKIP_TSFILE_BECAUSE_IT_HAS_NO_DATA =
+ "Load skip TsFile {}, because it has no data.";
+ public static final String LOADTSFILESCHEDULER_LOADS_TSFILE_ERROR =
+ "LoadTsFileScheduler loads TsFile {} error";
+ public static final String INTERRUPT_OR_EXECUTION_ERROR =
+ "Interrupt or Execution error.";
+ public static final String START_DISPATCHING_LOAD_COMMAND_FOR_UUID =
+ "Start dispatching Load command for uuid {}";
+ public static final String EXCEPTION_OCCURRED_DURING_SECOND_PHASE_OF_LOADING_TSFILE =
+ "Exception occurred during second phase of loading TsFile {}.";
+ public static final String START_LOAD_TSFILE_LOCALLY =
+ "Start load TsFile {} locally.";
+ public static final String LOAD_ALL_FAILED_TSFILES_ARE_CONVERTED_TO_TABLETS =
+ "Load: all failed TsFiles are converted to tablets and inserted.";
+
+ // --- Plan / Statement ---
+
+ public static final String METHOD_NOT_IMPLEMENTED_YET =
+ "Method not implemented yet";
+ public static final String INSERTION_CONTAINS_DUPLICATED_MEASUREMENT =
+ "Insertion contains duplicated measurement: ";
+ public static final String UNSUPPORTED_DATA_TYPE =
+ "Unsupported data type:";
+ public static final String FAILED_TO_CONVERT_INSERTTABLETSTATEMENT_TO_TABLET =
+ "Failed to convert InsertTabletStatement to Tablet";
+ public static final String MODEL_INFERENCE_DOES_NOT_SUPPORT_ALIGN_BY_DEVICE =
+ "Model inference does not support align by device now.";
+ public static final String MODEL_INFERENCE_DOES_NOT_SUPPORT_SELECT_INTO_NOW =
+ "Model inference does not support select into now.";
+ public static final String GROUP_BY_CLAUSES_DOESN_T_SUPPORT_GROUP_BY =
+ "GROUP BY CLAUSES doesn't support GROUP BY LEVEL now.";
+ public static final String GROUP_BY_LEVEL_DOES_NOT_SUPPORT_ALIGN_BY =
+ "GROUP BY LEVEL does not support align by device now.";
+ public static final String GROUP_BY_TAGS_DOES_NOT_SUPPORT_ALIGN_BY =
+ "GROUP BY TAGS does not support align by device now.";
+ public static final String HAVING_CLAUSE_IS_NOT_SUPPORTED_YET_IN_GROUP =
+ "Having clause is not supported yet in GROUP BY TAGS query";
+ public static final String OUTPUT_COLUMN_IS_DUPLICATED_WITH_THE_TAG_KEY =
+ "Output column is duplicated with the tag key: ";
+ public static final String LIMIT_OR_SLIMIT_ARE_NOT_SUPPORTED_YET_IN =
+ "Limit or slimit are not supported yet in GROUP BY TAGS";
+ public static final String EXPRESSION_OF_HAVING_CLAUSE_MUST_TO_BE_AN =
+ "Expression of HAVING clause must to be an Aggregation";
+ public static final String WHEN_HAVING_USED_WITH_GROUPBYLEVEL =
+ "When Having used with GroupByLevel: ";
+ public static final String ALIGN_BY_DEVICE =
+ "ALIGN BY DEVICE: ";
+ public static final String SORTING_BY_TIMESERIES_IS_ONLY_SUPPORTED_IN_LAST =
+ "Sorting by timeseries is only supported in last queries.";
+ public static final String LAST_QUERY_DOESN_T_SUPPORT_ALIGN_BY_DEVICE =
+ "Last query doesn't support align by device.";
+ public static final String LAST_QUERIES_CAN_ONLY_BE_APPLIED_ON_RAW =
+ "Last queries can only be applied on raw time series.";
+ public static final String SLIMIT_AND_SOFFSET_CAN_NOT_BE_USED_IN =
+ "SLIMIT and SOFFSET can not be used in LastQuery.";
+ public static final String SELECT_INTO_SLIMIT_CLAUSES_ARE_NOT_SUPPORTED =
+ "select into: slimit clauses are not supported.";
+ public static final String SELECT_INTO_SOFFSET_CLAUSES_ARE_NOT_SUPPORTED =
+ "select into: soffset clauses are not supported.";
+ public static final String SELECT_INTO_LAST_CLAUSES_ARE_NOT_SUPPORTED =
+ "select into: last clauses are not supported.";
+ public static final String SELECT_INTO_GROUP_BY_TAGS_CLAUSE_ARE_NOT =
+ "select into: GROUP BY TAGS clause are not supported.";
+ public static final String UNKNOWN_LITERAL_TYPE =
+ "Unknown literal type: %s";
+ public static final String ILLEGAL_PATH =
+ "illegal path: {}";
+ public static final String CQ_THE_START_TIME_OFFSET_SHOULD_BE_GREATER =
+ "CQ: The start time offset should be greater than 0.";
+ public static final String CQ_THE_END_TIME_OFFSET_SHOULD_BE_GREATER =
+ "CQ: The end time offset should be greater than or equal to 0.";
+ public static final String CQ_THE_QUERY_BODY_MISSES_AN_INTO_CLAUSE =
+ "CQ: The query body misses an INTO clause.";
+ public static final String CQ_SPECIFYING_TIME_FILTERS_IN_THE_QUERY_BODY =
+ "CQ: Specifying time filters in the query body is prohibited.";
+ public static final String IS_NOT_A_LEGAL_PATH =
+ "{} is not a legal path";
+
+ // --- Plan / Tree Planner ---
+
+ public static final String VALID_TREEDEVICEVIEWSCANNODE_IS_NOT_EXPECTED_HERE =
+ "Valid TreeDeviceViewScanNode is not expected here.";
+ public static final String MULTIPLE_COLUMNS_WITH_TIME_CATEGORY_FOUND =
+ "Multiple columns with TIME category found";
+ public static final String MISSING_TIME_CATEGORY_COLUMN =
+ "Missing TIME category column";
+ public static final String UNKNOWN_SQL_DIALECT =
+ "Unknown sql dialect: %s";
+ public static final String UNEXPECTED_PATH_TYPE_2 =
+ "Unexpected path type";
+ public static final String SHOULD_CALL_THE_CONCRETE_VISITXX_METHOD =
+ "should call the concrete visitXX() method";
+ public static final String OUTPUTCOLUMTYPES_SHOULD_NOT_BE_NULL_EMPTY =
+ "OutputColumTypes should not be null/empty";
+ public static final String UNKNOWN_FILL_POLICY =
+ "Unknown fill policy: ";
+ public static final String FILTER_CAN_NOT_CONTAIN_NON_MAPPABLE_UDF =
+ "Filter can not contain Non-Mappable UDF";
+ public static final String GROUPBYVARIATIONEXPRESSION_CAN_T_BE_NULL =
+ "groupByVariationExpression can't be null";
+ public static final String GROUPBYCONDITIONEXPRESSION_CAN_T_BE_NULL =
+ "groupByConditionExpression can't be null";
+ public static final String GROUPBYCOUNTEXPRESSION_CAN_T_BE_NULL =
+ "groupByCountExpression can't be null";
+ public static final String UNKNOWN_NODE_TYPE =
+ "Unknown node type: ";
+ public static final String UNSUPPORTED_COLUMN_GENERATOR_TYPE =
+ "Unsupported column generator type: ";
+ public static final String ROOT_NODE_MUST_RETURN_ONLY_ONE =
+ "root node must return only one";
+ public static final String SINGLEDEVICEVIEWNODE_HAVE_ONLY_ONE_CHILD =
+ "SingleDeviceViewNode have only one child";
+ public static final String AVAILABLE_REPLICAS =
+ "available replicas: {}";
+ public static final String UNEXPECTED_ERROR_OCCURS_WHEN_SERIALIZING_THIS_FRAGMENTINSTANCE =
+ "Unexpected error occurs when serializing this FragmentInstance.";
+ public static final String INVALID_NODE_TYPE =
+ "Invalid node type: ";
+ public static final String THIS_LASTQUERYSCANNODE_IS_DEPRECATED =
+ "This LastQueryScanNode is deprecated";
+ public static final String EXPLAINANALYZENODE_SHOULD_NOT_BE_SERIALIZED =
+ "ExplainAnalyzeNode should not be serialized";
+ public static final String EXPLAINANALYZENODE_SHOULD_NOT_BE_DESERIALIZED =
+ "ExplainAnalyzeNode should not be deserialized";
+ public static final String CLONE_OF_LOAD_SINGLE_TSFILE_IS_NOT_IMPLEMENTED =
+ "clone of load single TsFile is not implemented";
+ public static final String SPLIT_LOAD_SINGLE_TSFILE_IS_NOT_IMPLEMENTED =
+ "split load single TsFile is not implemented";
+ public static final String DELETE_AFTER_LOADING_ERROR =
+ "Delete After Loading {} error.";
+ public static final String CLONE_OF_LOAD_TSFILE_IS_NOT_IMPLEMENTED =
+ "clone of load TsFile is not implemented";
+ public static final String LOADTSFILE_STATEMENT_IS_NULL_DURING_TABLE_MODEL_SPLIT =
+ "LoadTsFile statement is null during table model split.";
+ public static final String CLONE_OF_LOAD_PIECE_TSFILE_IS_NOT_IMPLEMENTED =
+ "clone of load piece TsFile is not implemented";
+ public static final String SERIALIZE_TO_BYTEBUFFER_ERROR =
+ "Serialize to ByteBuffer error.";
+ public static final String SPLIT_LOAD_PIECE_TSFILE_IS_NOT_IMPLEMENTED =
+ "split load piece TsFile is not implemented";
+ public static final String DESERIALIZE_ERROR =
+ "Deserialize {} error.";
+ public static final String INVALID_LENGTH_FOR_SLICING =
+ "Invalid length for slicing: ";
+ public static final String CANNOT_DESERIALIZE_DEVICESSCHEMASCANNODE =
+ "Cannot deserialize DevicesSchemaScanNode";
+ public static final String CANNOT_DESERIALIZE_TIMESERIESSCHEMASCANNODE =
+ "Cannot deserialize TimeSeriesSchemaScanNode";
+ public static final String CLONE_OF_ALTERTIMESERIESNODE_IS_NOT_IMPLEMENTED =
+ "Clone of AlterTimeSeriesNode is not implemented";
+ public static final String CAN_NOT_DESERIALIZE_ALTERTIMESERIESNODE =
+ "Can not deserialize AlterTimeSeriesNode";
+ public static final String CLONE_OF_CREATEALIGNEDTIMESERIESNODE_IS_NOT_IMPLEMENTED =
+ "Clone of CreateAlignedTimeSeriesNode is not implemented";
+ public static final String CAN_NOT_DESERIALIZE_CREATEALIGNEDTIMESERIESNODE =
+ "Can not deserialize CreateAlignedTimeSeriesNode";
+ public static final String CLONE_OF_CREATEMULTITIMESERIESNODE_IS_NOT_IMPLEMENTED =
+ "Clone of CreateMultiTimeSeriesNode is not implemented";
+ public static final String CLONE_OF_CREATETIMESERIESNODE_IS_NOT_IMPLEMENTED =
+ "Clone of CreateTimeSeriesNode is not implemented";
+ public static final String CANNOT_DESERIALIZE_CREATETIMESERIESNODE =
+ "Cannot deserialize CreateTimeSeriesNode";
+ public static final String CLONE_OF_INTERNALCREATETIMESERIESNODE_IS_NOT_IMPLEMENTED =
+ "Clone of InternalCreateTimeSeriesNode is not implemented";
+ public static final String CLONE_OF_ALTERLOGICALNODE_IS_NOT_IMPLEMENTED =
+ "Clone of AlterLogicalNode is not implemented";
+ public static final String UNEXPECTED_DESCRIPTORTYPE =
+ "Unexpected descriptorType: ";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_ALIGNEDSERIESSCANNODE =
+ "no child is allowed for AlignedSeriesScanNode";
+ public static final String DEVICEREGIONSCANNODE_HAS_NO_CHILDREN =
+ "DeviceRegionScanNode has no children";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_SERIESSCANNODE =
+ "no child is allowed for SeriesScanNode";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_SERIESAGGREGATESCANNODE =
+ "no child is allowed for SeriesAggregateScanNode";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_SERIESSCANSOURCENODE =
+ "no child is allowed for SeriesScanSourceNode";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_SHOWDISKUSAGENODE =
+ "no child is allowed for ShowDiskUsageNode";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_SHOWQUERIESNODE =
+ "no child is allowed for ShowQueriesNode";
+ public static final String TIMESERIESREGIONSCANNODE_DOES_NOT_SUPPORT_ADDCHILD =
+ "TimeseriesRegionScanNode does not support addChild";
+ public static final String NOT_SUPPORTED =
+ "Not supported.";
+ public static final String CANNOT_DESERIALIZE_INSERTROWNODE =
+ "Cannot deserialize InsertRowNode";
+ public static final String UNEXPECTED_ERROR_OCCURS_WHEN_SERIALIZING_DELETEDATANODE =
+ "Unexpected error occurs when serializing deleteDataNode.";
+ public static final String DELETEDATANODES_IS_EMPTY =
+ "deleteDataNodes is empty";
+ public static final String INSERTMULTITABLETSNODE_NOT_SUPPORT_MERGE =
+ "InsertMultiTabletsNode not support merge";
+ public static final String CLONE_OF_INSERT_IS_NOT_IMPLEMENTED =
+ "clone of Insert is not implemented";
+ public static final String INSERTNODES_SHOULD_NEVER_BE_EMPTY =
+ "insertNodes should never be empty";
+ public static final String SERIALIZEATTRIBUTES_OF_INSERTNODE_IS_NOT_IMPLEMENTED =
+ "serializeAttributes of InsertNode is not implemented";
+ public static final String INSERTROWSOFONEDEVICENODE_NOT_SUPPORT_MERGE =
+ "InsertRowsOfOneDeviceNode not support merge";
+ public static final String CANNOT_DESERIALIZE_INSERTROWSOFONEDEVICENODE =
+ "Cannot deserialize InsertRowsOfOneDeviceNode";
+ public static final String CANNOT_DESERIALIZE_INSERTTABLETNODE =
+ "Cannot deserialize InsertTabletNode";
+ public static final String MERGE_IS_NOT_SUPPORTED =
+ "Merge is not supported";
+ public static final String FAILED_TO_SERIALIZE_MODENTRY_TO_WAL =
+ "Failed to serialize modEntry to WAL";
+ public static final String ALL_DATABASE_NAME_NEED_TO_BE_SAME =
+ "All database name need to be same";
+ public static final String INVALID_AGGREGATIONSTEP_TYPE =
+ "Invalid AggregationStep type: ";
+
+ // --- Transformation ---
+
+ public static final String SIZE_IS_0 =
+ "Size is 0";
+ public static final String CAN_NOT_CALL_NEXT_ON_EMPTYROWITERATOR =
+ "Can not call next on EmptyRowIterator";
+ public static final String THE_EXPRESSION_CANNOT_BE_NULL =
+ "The expression cannot be null";
+ public static final String UNSUPPORTED_TYPE =
+ "Unsupported type: ";
+ public static final String UNSUPPORTED_DATA_TYPE_2 =
+ "Unsupported data type: ";
+ public static final String UNSUPPORTED_DATA_TYPE_3 =
+ "unsupported data type: ";
+ public static final String ERROR_OCCURRED_DURING_INFERRING_UDF_DATA_TYPE =
+ "Error occurred during inferring UDF data type";
+ public static final String ERROR_OCCURRED_DURING_GETTING_UDF_ACCESS_STRATEGY =
+ "Error occurred during getting UDF access strategy";
+ public static final String TRANSFORMUTILS_SHOULD_NOT_BE_INSTANTIATED =
+ "TransformUtils should not be instantiated.";
+
+ // --- Execution / Exchange (additional) ---
+
+ public static final String ACK_TSBLOCK_FAILED =
+ "ack TsBlock [{}, {}) failed.";
+ public static final String CLOSE_CHANNEL_OF_SHUFFLESINKHANDLE_FAILED =
+ "Close channel of ShuffleSinkHandle {}, index {} failed.";
+ public static final String SHUFFLESINKHANDLE_ALREADY_IN_MAP =
+ "ShuffleSinkHandle for ";
+ public static final String IS_IN_THE_MAP =
+ " is in the map.";
+ public static final String SOURCE_HANDLE_FOR_PLAN_NODE =
+ "Source handle for plan node ";
+ public static final String OF =
+ " of ";
+ public static final String EXISTS =
+ " exists.";
+ public static final String FAILED_TO_PULL_TSBLOCKS =
+ "{} failed to pull TsBlocks [{}] to [{}] from SinkHandle {}, channel index {},";
+ public static final String FAILED_TO_GET_DATA_BLOCK =
+ "failed to get data block [{}, {}), attempt times: {}";
+ public static final String FAILED_TO_SEND_ACK_DATA_BLOCK_EVENT =
+ "failed to send ack data block event [{}, {}), attempt times: {}";
+ public static final String SEND_CLOSE_SINK_CHANNEL_EVENT_FAILED =
+ "[SendCloseSinkChannelEvent] to [ShuffleSinkHandle: {}, index: {}] failed.).";
+ public static final String LOCAL_SINK_CHANNEL_STATE_IS =
+ "LocalSinkChannel state is .";
+ public static final String SCH_LISTENER_ON_FINISH =
+ "[ScHListenerOnFinish]";
+ public static final String SCH_LISTENER_ALREADY_RELEASED =
+ "[ScHListenerAlreadyReleased]";
+ public static final String SCH_LISTENER_ON_ABORT =
+ "[ScHListenerOnAbort]";
+ public static final String SHUFFLE_SINK_HANDLE_LISTENER_ON_FINISH =
+ "[ShuffleSinkHandleListenerOnFinish]";
+ public static final String SHUFFLE_SINK_HANDLE_LISTENER_ON_END_OF_TSBLOCKS =
+ "[ShuffleSinkHandleListenerOnEndOfTsBlocks]";
+ public static final String SHUFFLE_SINK_HANDLE_LISTENER_ON_ABORT =
+ "[ShuffleSinkHandleListenerOnAbort]";
+ public static final String SKH_LISTENER_ON_FINISH =
+ "[SkHListenerOnFinish]";
+ public static final String SKH_LISTENER_ON_END_OF_TSBLOCKS =
+ "[SkHListenerOnEndOfTsBlocks]";
+ public static final String SKH_LISTENER_ON_ABORT =
+ "[SkHListenerOnAbort]";
+ public static final String CLOSE_SHUFFLE_SINK_HANDLE =
+ "Close ShuffleSinkHandle: {}";
+ public static final String GET_SHARED_TSBLOCK_QUEUE_FROM_LOCAL_SOURCE_HANDLE =
+ "Get SharedTsBlockQueue from local source handle";
+ public static final String CREATE_SHARED_TSBLOCK_QUEUE =
+ "Create SharedTsBlockQueue";
+ public static final String CREATE_LOCAL_SINK_HANDLE_FOR =
+ "Create local sink handle for {}";
+ public static final String CREATE_LOCAL_SOURCE_HANDLE_FOR =
+ "Create local source handle for {}";
+ public static final String GET_SHARED_TSBLOCK_QUEUE_FROM_LOCAL_SINK_HANDLE =
+ "Get SharedTsBlockQueue from local sink handle";
+ public static final String START_FORCE_RELEASE_FI_DATA_EXCHANGE_RESOURCE =
+ "[StartForceReleaseFIDataExchangeResource]";
+ public static final String CLOSE_SOURCE_HANDLE =
+ "[CloseSourceHandle] {}";
+ public static final String END_FORCE_RELEASE_FI_DATA_EXCHANGE_RESOURCE =
+ "[EndForceReleaseFIDataExchangeResource]";
+ public static final String CREATE_LOCAL_SINK_HANDLE_TO_PLAN_NODE =
+ "Create local sink handle to plan node {} of {} for {}";
+ public static final String CREATE_SINK_HANDLE_TO_PLAN_NODE =
+ "Create sink handle to plan node {} of {} for {}";
+ public static final String CREATE_LOCAL_SOURCE_HANDLE_FROM =
+ "Create local source handle from {} for plan node {} of {}";
+ public static final String GET_SERIALIZED_TSBLOCK =
+ "[GetSerializedTsBlock] TsBlock:{}";
+ public static final String START_ABORT_LOCAL_SOURCE_HANDLE =
+ "[StartAbortLocalSourceHandle]";
+ public static final String END_ABORT_LOCAL_SOURCE_HANDLE =
+ "[EndAbortLocalSourceHandle]";
+ public static final String START_CLOSE_LOCAL_SOURCE_HANDLE =
+ "[StartCloseLocalSourceHandle]";
+ public static final String END_CLOSE_LOCAL_SOURCE_HANDLE =
+ "[EndCloseLocalSourceHandle]";
+ public static final String START_SET_NO_MORE_TSBLOCKS =
+ "[StartSetNoMoreTsBlocks]";
+ public static final String START_ABORT_SINK_CHANNEL =
+ "[StartAbortSinkChannel]";
+ public static final String END_ABORT_SINK_CHANNEL =
+ "[EndAbortSinkChannel]";
+ public static final String START_CLOSE_SINK_CHANNEL =
+ "[StartCloseSinkChannel]";
+ public static final String END_CLOSE_SINK_CHANNEL =
+ "[EndCloseSinkChannel]";
+ public static final String ACK_TSBLOCK =
+ "[ACKTsBlock] {}.";
+ public static final String NOTIFY_NO_MORE_TSBLOCK =
+ "[NotifyNoMoreTsBlock]";
+ public static final String START_SEND_TSBLOCK_ON_LOCAL =
+ "[StartSendTsBlockOnLocal]";
+ public static final String START_SET_NO_MORE_TSBLOCKS_ON_LOCAL =
+ "[StartSetNoMoreTsBlocksOnLocal]";
+ public static final String END_SET_NO_MORE_TSBLOCKS_ON_LOCAL =
+ "[EndSetNoMoreTsBlocksOnLocal]";
+ public static final String START_ABORT_LOCAL_SINK_CHANNEL =
+ "[StartAbortLocalSinkChannel]";
+ public static final String END_ABORT_LOCAL_SINK_CHANNEL =
+ "[EndAbortLocalSinkChannel]";
+ public static final String START_CLOSE_LOCAL_SINK_CHANNEL =
+ "[StartCloseLocalSinkChannel]";
+ public static final String END_CLOSE_LOCAL_SINK_CHANNEL =
+ "[EndCloseLocalSinkChannel]";
+ public static final String GET_TSBLOCK_FROM_BUFFER =
+ "[GetTsBlockFromBuffer] sequenceId:{}, size:{}";
+ public static final String WAIT_FOR_MORE_TSBLOCK =
+ "[WaitForMoreTsBlock]";
+ public static final String RECEIVE_NO_MORE_TSBLOCK_EVENT =
+ "[ReceiveNoMoreTsBlockEvent]";
+ public static final String END_PULL_TSBLOCKS_FROM_REMOTE =
+ "[EndPullTsBlocksFromRemote] Count:{}";
+ public static final String PUT_TSBLOCKS_INTO_BUFFER =
+ "[PutTsBlocksIntoBuffer]";
+ public static final String SEND_ACK_TSBLOCK =
+ "[SendACKTsBlock] [{}, {}).";
+ public static final String START_ABORT_SHUFFLE_SINK_HANDLE =
+ "[StartAbortShuffleSinkHandle]";
+ public static final String END_ABORT_SHUFFLE_SINK_HANDLE =
+ "[EndAbortShuffleSinkHandle]";
+ public static final String START_CLOSE_SHUFFLE_SINK_HANDLE =
+ "[StartCloseShuffleSinkHandle]";
+ public static final String END_CLOSE_SHUFFLE_SINK_HANDLE =
+ "[EndCloseShuffleSinkHandle]";
+ public static final String SIGNAL_NO_MORE_TSBLOCK_ON_QUEUE =
+ "[SignalNoMoreTsBlockOnQueue]";
+ public static final String QUEUE_DESTROYED_WHEN_SET_NO_MORE_TSBLOCKS =
+ "The queue has been destroyed when calling setNoMoreTsBlocks.";
+ public static final String ADD_TSBLOCK =
+ "[addTsBlock] TsBlock:{}";
+
+ // --- Plan (additional debug) ---
+
+ public static final String QUERY_START_SQL =
+ "[QueryStart] sql: {}";
+ public static final String CLEAN_UP_QUERY =
+ "[CleanUpQuery]]";
+ public static final String RELEASE_QUERY_RESOURCE_STATE =
+ "[ReleaseQueryResource] state is: {}";
+ public static final String SKIP_EXECUTE =
+ "[SkipExecute]";
+ public static final String SKIP_EXECUTE_AFTER_LOGICAL_PLAN =
+ "[SkipExecute After LogicalPlan]";
+ public static final String RESULT_HANDLE_FINISHED =
+ "[ResultHandleFinished]";
+
+ // --- Execution / Operator / Source (additional debug) ---
+
+ public static final String SERIES_SCAN_UTIL_PAGE_READER_IS_MODIFIED =
+ "[SeriesScanUtil] pageReader.isModified() is {}";
+ public static final String GET_ALL_SATISFIED_PAGE_DATA_TSBLOCK =
+ "[getAllSatisfiedPageData] TsBlock:{}";
+
+ // --- Plan / Relational / Metadata (additional debug) ---
+
+ public static final String DEVICES_ARE_MISSING =
+ "{} devices are missing";
+
+ // --- Execution / Fragment (additional debug) ---
+
+ public static final String STATE_CHANGED_TO =
+ "[StateChanged] To {}";
+ public static final String ENTER_THE_STATE_CHANGE_LISTENER =
+ "Enter the stateChangeListener";
+
+ // --- Execution / Fragment (additional) ---
+
+ public static final String ERRORS_RELEASING_SINK =
+ "Errors occurred while attempting to release sink, potentially leading to resource leakage.";
+ public static final String ERRORS_DELETING_TMP_FILES =
+ "Errors occurred while attempting to delete tmp files, potentially leading to resource leakage.";
+ public static final String ERRORS_DEREGISTER_FI_FROM_MEMORY_POOL =
+ "Errors occurred while attempting to deRegister FI from Memory Pool, potentially leading to resource leakage, status is {}.";
+ public static final String ERRORS_RELEASING_MEMORY =
+ "Errors occurred while attempting to release memory, potentially leading to resource leakage.";
+ public static final String ERRORS_FINISHING_FI_PROCESS =
+ "Errors occurred while attempting to finish the FI process, potentially leading to resource leakage.";
+
+ // --- Plan (additional) ---
+
+ public static final String CLEANING_UP_STALE_QUERY =
+ "Cleaning up stale query with id {}, which has been running for {} ms, timeout duration is: {}ms";
+
+ // --- Plan / Tree Planner (additional) ---
+
+ public static final String ERROR_WHEN_READ_OBJECT_FILE =
+ "Error when read object file {}.";
+
+ // --- Additional Edge Cases ---
+
+ public static final String JOIN_TYPE_IS_NOT_SUPPORTED =
+ " Join type is not supported";
+ public static final String COLON_S_VS_S =
+ ": %s vs %s";
+ public static final String S_IS_NULL =
+ "%s is null";
+ public static final String IS_TOO_LARGE_STACK_OVERFLOW_WHILE_PARSING =
+ " is too large (stack overflow while parsing)";
+
+ public static final String ENTER_STATE_CHANGE_LISTENER = "Enter the stateChangeListener";
+
+ private DataNodeQueryMessages() {}
+}
diff --git a/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodeSchemaMessages.java b/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodeSchemaMessages.java
new file mode 100644
index 0000000000000..9574ca9bc8aad
--- /dev/null
+++ b/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/DataNodeSchemaMessages.java
@@ -0,0 +1,608 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.i18n;
+
+public final class DataNodeSchemaMessages {
+
+ // ======================== SchemaEngine ========================
+
+ public static final String USED_SCHEMA_ENGINE_MODE = "used schema engine mode: {}.";
+ public static final String SCHEMA_REGION_RECOVERY_ERROR =
+ "Something wrong happened during SchemaRegion recovery";
+ public static final String CLEAR_SCHEMA_REGION_MAP = "clear schema region map.";
+ public static final String FAILED_TO_UPDATE_SUBTREE_MEASUREMENT_COUNT =
+ "Failed to update subtree measurement count for template {} in schemaRegion {}";
+ public static final String RECOVER_SPEND = "Recover [{}] spend: {} ms";
+ public static final String SCHEMA_REGION_FAILED_TO_RECOVER =
+ "SchemaRegion [%d] in StorageGroup [%s] failed to recover.";
+ public static final String SCHEMA_REGION_ALREADY_DELETED =
+ "SchemaRegion(id = {}) has been deleted, skiped";
+ public static final String FAILED_TO_GET_TABLE_FOR_TIMESERIES_COUNT =
+ "Failed to get table {}.{} when calculating the time series number. Maybe the cluster is restarting or the table is being dropped.";
+ public static final String PEER_IS_SHUTTING_DOWN = "Peer is shutting down now.";
+ public static final String SCHEMA_REGION_DUPLICATED =
+ "SchemaRegion [%s] is duplicated between [%s] and [%s], and the former one has been recovered.";
+
+ // ======================== MemSchemaEngineStatistics ========================
+
+ public static final String CURRENT_SERIES_MEMORY_TOO_LARGE =
+ "Current series memory {} is too large...";
+ public static final String CURRENT_SERIES_MEMORY_BACK_TO_NORMAL =
+ "Current series memory {} come back to normal level, total series number is {}.";
+ public static final String WRONG_SCHEMA_ENGINE_STATISTICS_TYPE =
+ "Wrong SchemaEngineStatistics Type";
+
+ // ======================== MemSchemaRegionStatistics ========================
+
+ public static final String WRONG_SCHEMA_REGION_STATISTICS_TYPE =
+ "Wrong SchemaRegionStatistics Type";
+
+ // ======================== SchemaRegionUtils ========================
+
+ public static final String CANNOT_GET_FILES_IN_SCHEMA_REGION_DIR =
+ "Can't get files in schema region dir %s";
+ public static final String DELETE_SCHEMA_REGION_FILE = "Delete schema region file {}";
+ public static final String DELETE_SCHEMA_REGION_FILE_FAILED =
+ "Delete schema region file {} failed.";
+ public static final String FAILED_TO_DELETE_SCHEMA_REGION_FILE =
+ "Failed to delete schema region file %s";
+ public static final String DELETE_SCHEMA_REGION_FOLDER = "Delete schema region folder {}";
+ public static final String DELETE_SCHEMA_REGION_FOLDER_FAILED =
+ "Delete schema region folder {} failed.";
+ public static final String FAILED_TO_DELETE_SCHEMA_REGION_FOLDER =
+ "Failed to delete schema region folder %s";
+ public static final String DELETE_DATABASE_SCHEMA_FOLDER = "Delete database schema folder {}";
+ public static final String DELETE_DATABASE_SCHEMA_FOLDER_FAILED =
+ "Delete database schema folder {} failed";
+
+ // ======================== SchemaRegionLoader ========================
+
+ public static final String CLASS_NOT_SUBCLASS_OF_ISCHEMAREGION =
+ "Class %s is not a subclass of ISchemaRegion.";
+ public static final String DUPLICATED_SCHEMA_REGION_IMPL =
+ "Duplicated SchemaRegion implementation, {} and {}, with same mode name [{}]";
+ public static final String NO_SCHEMA_REGION_IMPL_WITH_TARGET_MODE =
+ "There's no SchemaRegion implementation with target mode {}. Use default mode {}";
+ public static final String SCHEMA_REGION_LOADER_INFO =
+ "[SchemaRegionLoader], schemaEngineMode:{}, currentMode:{}";
+
+ // ======================== SchemaRegionPlanType ========================
+
+ public static final String UNRECOGNIZED_SCHEMA_REGION_PLAN_TYPE =
+ "Unrecognized SchemaRegionPlanType of ";
+
+ // ======================== SchemaRegion Init/Dir ========================
+
+ public static final String CREATE_DATABASE_SCHEMA_FOLDER = "create database schema folder {}";
+ public static final String CREATE_DATABASE_SCHEMA_FOLDER_FAILED =
+ "create database schema folder {} failed.";
+ public static final String CREATE_SCHEMA_REGION_FOLDER = "create schema region folder {}";
+ public static final String CREATE_SCHEMA_REGION_FOLDER_FAILED =
+ "create schema region folder {} failed.";
+ public static final String CANNOT_RECOVER_ALL_SCHEMA_INFO =
+ "Cannot recover all schema info from {}, we try to recover as possible as we can";
+ public static final String CANNOT_RECOVER_ALL_MTREE =
+ "Cannot recover all MTree from {} file, we try to recover as possible as we can";
+
+ // ======================== SchemaRegion MLog ========================
+
+ public static final String CANNOT_FORCE_MLOG = "Cannot force {} mlog to the schema region";
+ public static final String SPEND_TIME_DESERIALIZE_MTREE =
+ "spend {} ms to deserialize {} mtree from mlog.bin";
+ public static final String FAILED_TO_PARSE_MLOG = "Failed to parse ";
+ public static final String MLOG_BIN_SUFFIX = " mlog.bin";
+ public static final String PARSE_MLOG_ERROR = "Parse mlog error at lineNumber {} because:";
+ public static final String CANNOT_OPERATE_CMD = "Can not operate cmd {} for err:";
+ public static final String MLOG_BIN_CORRUPTED =
+ "The mlog.bin has been corrupted. Please remove it or fix it, and then restart IoTDB";
+ public static final String CANNOT_CLOSE_METADATA_LOG_WRITER =
+ "Cannot close metadata log writer, because:";
+ public static final String MLOG_RECOVERY_CHECK_POINT = "MLog recovery check point: {}";
+ public static final String CANNOT_GET_MLOG_CHECKPOINT =
+ "Can not get check point in MLogDescription file because {}, use default value 0.";
+ public static final String FAILED_TO_SKIP_MLOG = "Failed to skip {} from {}";
+ public static final String UPDATE_MLOG_DESCRIPTION_FAILED = "Update {} failed because {}";
+ public static final String DIRECT_BUFFER_MEMORY_EXCEEDED =
+ "Total allocated memory for direct buffer will be ";
+ public static final String DIRECT_BUFFER_MEMORY_LIMIT = ", which is greater than limit mem cost: ";
+
+ // ======================== SchemaRegion Snapshot ========================
+
+ public static final String FAILED_TO_CREATE_SNAPSHOT_NOT_INITIALIZED =
+ "Failed to create snapshot of schemaRegion {}, because the schemaRegion has not been initialized.";
+ public static final String START_CREATE_SNAPSHOT = "Start create snapshot of schemaRegion {}";
+ public static final String MTREE_SNAPSHOT_CREATION_COST =
+ "MTree snapshot creation of schemaRegion {} costs {}ms.";
+ public static final String MTREE_SNAPSHOT_CREATION_COST_WITH_STATUS =
+ "MTree snapshot creation of schemaRegion {} costs {}ms. Status: {}";
+ public static final String TAG_SNAPSHOT_CREATION_COST =
+ "Tag snapshot creation of schemaRegion {} costs {}ms.";
+ public static final String TAG_SNAPSHOT_CREATION_COST_WITH_STATUS =
+ "Tag snapshot creation of schemaRegion {} costs {}ms. Status: {}";
+ public static final String DEVICE_ATTR_SNAPSHOT_CREATION_COST =
+ "Device attribute snapshot creation of schemaRegion {} costs {}ms. Status: {}";
+ public static final String DEVICE_ATTR_UPDATER_SNAPSHOT_CREATION_COST =
+ "Device attribute remote updater snapshot creation of schemaRegion {} costs {}ms. Status: {}";
+ public static final String SNAPSHOT_CREATION_COST =
+ "Snapshot creation of schemaRegion {} costs {}ms.";
+ public static final String SUCCESSFULLY_CREATE_SNAPSHOT =
+ "Successfully create snapshot of schemaRegion {}";
+ public static final String START_LOADING_SNAPSHOT =
+ "Start loading snapshot of schemaRegion {}";
+ public static final String DEVICE_ATTR_SNAPSHOT_LOADING_COST =
+ "Device attribute snapshot loading of schemaRegion {} costs {}ms.";
+ public static final String DEVICE_ATTR_UPDATER_SNAPSHOT_LOADING_COST =
+ "Device attribute remote updater snapshot loading of schemaRegion {} costs {}ms.";
+ public static final String TAG_SNAPSHOT_LOADING_COST =
+ "Tag snapshot loading of schemaRegion {} costs {}ms.";
+ public static final String MTREE_SNAPSHOT_LOADING_COST =
+ "MTree snapshot loading of schemaRegion {} costs {}ms.";
+ public static final String SNAPSHOT_LOADING_COST =
+ "Snapshot loading of schemaRegion {} costs {}ms.";
+ public static final String SUCCESSFULLY_LOAD_SNAPSHOT =
+ "Successfully load snapshot of schemaRegion {}";
+ public static final String FAILED_TO_LOAD_SNAPSHOT =
+ "Failed to load snapshot for schemaRegion {} due to {}. Use empty schemaRegion";
+ public static final String ERROR_DURING_INIT_SCHEMA_REGION =
+ "Error occurred during initializing schemaRegion {}";
+ public static final String FAILED_TO_RECOVER_TAG_INDEX =
+ "Failed to recover tagIndex for {} in schemaRegion {}.";
+ public static final String FAILED_TO_READ_TAG_ATTRIBUTE =
+ "Failed to read tag and attribute info because {}";
+
+ // ======================== DeviceAttributeStore ========================
+
+ public static final String FAILED_TO_DELETE_OLD_SNAPSHOT_DEVICE_ATTR =
+ "Failed to delete old snapshot {} while creating device attribute snapshot.";
+ public static final String FAILED_TO_RENAME_SNAPSHOT_DEVICE_ATTR =
+ "Failed to rename {} to {} while creating device attribute snapshot.";
+ public static final String FAILED_TO_CREATE_DEVICE_ATTR_SNAPSHOT =
+ "Failed to create device attribute snapshot due to {}";
+ public static final String DEVICE_ATTR_SNAPSHOT_NOT_FOUND =
+ "Device attribute snapshot {} not found, consider it as upgraded from the older version, use empty attributes";
+ public static final String LOAD_DEVICE_ATTR_SNAPSHOT_FAILED =
+ "Load device attribute snapshot from {} failed";
+
+ // ======================== DeviceAttributeCacheUpdater ========================
+
+ public static final String FAILED_TO_DELETE_OLD_SNAPSHOT_UPDATER =
+ "Failed to delete old snapshot {} while creating device attribute remote updater snapshot.";
+ public static final String FAILED_TO_RENAME_SNAPSHOT_UPDATER =
+ "Failed to rename {} to {} while creating device attribute remote updater snapshot.";
+ public static final String FAILED_TO_CREATE_UPDATER_SNAPSHOT =
+ "Failed to create device attribute remote updater snapshot due to {}";
+ public static final String UPDATER_SNAPSHOT_NOT_FOUND =
+ "Device attribute remote updater snapshot {} not found, consider it as upgraded from the older version, will not update remote";
+ public static final String LOAD_UPDATER_SNAPSHOT_FAILED =
+ "Load device attribute remote updater snapshot from {} failed, continue...";
+ public static final String REQUEST_MEMORY_SIZE_NEGATIVE =
+ "requestMemory size must not be negative";
+ public static final String RELEASE_MEMORY_SIZE_NEGATIVE =
+ "releaseMemory size must not be negative";
+
+ // ======================== MetaFormatUtils ========================
+
+ public static final String ILLEGAL_NAME = "%s is an illegal name.";
+ public static final String NAME_CONTAINS_UNSUPPORTED_CHAR =
+ "The name, %s, contains unsupported character.";
+ public static final String DATABASE_NAME_ILLEGAL_CHARS =
+ "The database name can only contain english or chinese characters, numbers, backticks and underscores. %s";
+ public static final String SDT_COMPRESSION_DEVIATION_REQUIRED =
+ "SDT compression deviation is required";
+ public static final String SDT_COMPRESSION_DEVIATION_NEGATIVE =
+ "SDT compression deviation cannot be negative";
+ public static final String SDT_COMPRESSION_DEVIATION_FORMAT_ERROR =
+ "SDT compression deviation formatting error";
+ public static final String SDT_COMPRESSION_MAX_GREATER_THAN_MIN =
+ "SDT compression maximum time needs to be greater than compression minimum time";
+ public static final String SDT_COMPRESSION_TIME_NEGATIVE =
+ "SDT compression %s time cannot be negative";
+ public static final String SDT_COMPRESSION_TIME_FORMAT_ERROR =
+ "SDT compression %s time formatting error";
+ public static final String SDT_ENABLED_NO_COMPRESSION_TIME =
+ "{} enabled SDT but did not set compression {} time";
+
+ // ======================== Tag/Attribute ========================
+
+ public static final String TIMESERIES_NO_TAG_ATTRIBUTE =
+ "TimeSeries [%s] does not have any tag/attribute.";
+ public static final String TIMESERIES_NO_SPECIFIC_TAG_ATTRIBUTE =
+ "TimeSeries [%s] does not have [%s] tag/attribute.";
+ public static final String TIMESERIES_ALREADY_HAS_ATTRIBUTE =
+ "TimeSeries [%s] already has the attribute [%s].";
+ public static final String TIMESERIES_ALREADY_HAS_TAG =
+ "TimeSeries [%s] already has the tag [%s].";
+ public static final String TIMESERIES_NO_TAG_ATTRIBUTE_LOG =
+ "TimeSeries [{}] does not have tag/attribute [{}]";
+ public static final String TIMESERIES_NO_SPECIFIC_TAG_ATTRIBUTE_FMT =
+ "TimeSeries [%s] does not have tag/attribute [%s].";
+
+ // ======================== TagManager Snapshot ========================
+
+ public static final String FAILED_TO_DELETE_OLD_TAG_SNAPSHOT =
+ "Failed to delete old snapshot {} while creating tagManager snapshot.";
+ public static final String FAILED_TO_RENAME_TAG_SNAPSHOT =
+ "Failed to rename {} to {} while creating tagManager snapshot.";
+ public static final String FAILED_TO_DELETE_AFTER_RENAME_FAILURE =
+ "Failed to delete {} after renaming failure.";
+ public static final String FAILED_TO_CREATE_TAG_SNAPSHOT =
+ "Failed to create tagManager snapshot due to {}";
+ public static final String FAILED_TO_DELETE_AFTER_TAG_SNAPSHOT_FAILURE =
+ "Failed to delete {} after creating tagManager snapshot failure.";
+ public static final String FAILED_TO_DELETE_FILE = "Failed to delete {}.";
+ public static final String FAILED_TO_DELETE_EXISTING_WHEN_LOADING =
+ "Failed to delete existing {} when loading snapshot.";
+ public static final String FAILED_TO_DELETE_EXISTING_WHEN_COPY_FAILURE =
+ "Failed to delete existing {} when copying snapshot failure.";
+
+ // ======================== TagLogFile ========================
+
+ public static final String CREATE_SCHEMA_FOLDER = "create schema folder {}.";
+ public static final String CREATE_SCHEMA_FOLDER_FAILED = "create schema folder {} failed.";
+
+ // ======================== MemMTreeSnapshotUtil ========================
+
+ public static final String FAILED_TO_DELETE_OLD_MTREE_SNAPSHOT =
+ "Failed to delete old snapshot {} while creating mTree snapshot.";
+ public static final String FAILED_TO_RENAME_MTREE_SNAPSHOT =
+ "Failed to rename {} to {} while creating mTree snapshot.";
+ public static final String FAILED_TO_CREATE_MTREE_SNAPSHOT =
+ "Failed to create mTree snapshot due to {}";
+ public static final String SERIALIZE_ERROR_INFO =
+ "Error occurred during serializing MemMTree.";
+ public static final String UNRECOGNIZED_MNODE_TYPE = "Unrecognized MNode type ";
+
+ // ======================== View ========================
+
+ public static final String IS_NO_VIEW = "[%s] is no view.";
+ public static final String VIEW_NOT_SUPPORTED = "View is not supported.";
+ public static final String VIEW_DOES_NOT_SUPPORT_ALIAS = "View doesn't support alias";
+ public static final String CANNOT_CONSTRUCT_ABSTRACT_CLASS =
+ "Can not construct abstract class.";
+
+ // ======================== PBTree ========================
+
+ public static final String TABLE_MODEL_NOT_SUPPORT_PBTREE =
+ "TableModel does not support PBTree yet.";
+ public static final String PBTREE_NOT_SUPPORT_ALTER_ENCODING =
+ "PBTree does not support altering encoding and compressor yet.";
+ public static final String NOT_IMPLEMENTED = "Not implemented";
+ public static final String PBTREE_FILE_OVERWRITTEN =
+ "PBTree File [{}] will be overwritten since already exists.";
+ public static final String SCHEMA_FILE_WRONG_VERSION =
+ "SchemaFile with wrong version, please check or upgrade.";
+ public static final String NODE_NO_CHILD_IN_PBTREE =
+ "Node [%s] has no child in pbtree file.";
+ public static final String SCHEMA_FILE_INSPECTED = "SchemaFile[%s] had been inspected.";
+ public static final String FAILED_TO_CREATE_SCHEMA_FILE_SNAPSHOT =
+ "Failed to create SchemaFile snapshot due to {}";
+ public static final String FAILED_TO_DELETE_OLD_PBTREE_SNAPSHOT =
+ "Failed to delete old snapshot {} while creating pbtree file snapshot.";
+
+ // ======================== PBTree Segment/Page ========================
+
+ public static final String FAILED_TO_INSERT_RELOCATED_SEGMENT =
+ "failed to insert buffer into relocated segment";
+ public static final String FAILED_TO_UPDATE_RELOCATED_SEGMENT =
+ "failed to update buffer upon relocated segment";
+ public static final String ALIAS_INDEX_PAGE_EXTEND_CAPACITY =
+ "AliasIndexPage can only extend to buffer with same capacity.";
+ public static final String SEGMENTS_SPLIT_SAME_CAPACITY =
+ "Segments only splits with same capacity.";
+ public static final String SEGMENT_SPLIT_NO_RECORDS =
+ "Segment can not be split with no records.";
+ public static final String SEGMENT_SPLIT_ONLY_ONE_RECORD =
+ "Segment can not be split with only one record.";
+ public static final String INTERNAL_PAGE_EXTEND_CAPACITY =
+ "InternalPage can only extend to buffer with same capacity.";
+ public static final String INTERNAL_SEGMENT_SPLIT_NO_KEY =
+ "Internal Segment cannot split without insert key";
+ public static final String INTERNAL_SEGMENT_LESS_THAN_2_POINTERS =
+ "Segment has less than 2 pointers can not be split.";
+ public static final String LEAF_SEGMENT_EXTEND_SMALLER =
+ "Leaf Segment cannot extend to a smaller buffer.";
+ public static final String RECORD_CONFLICT_NAME_WITH_ALIAS =
+ "Record [%s] has conflict name with alias of its siblings.";
+ public static final String RECORD_CONFLICT_ALIAS =
+ "Record [%s] has conflict alias [%s] with its siblings.";
+ public static final String RECORD_NOT_EXISTED = "Record[key:%s] Not Existed.";
+ public static final String SEGMENT_CACHE_MAP_INCONSISTENT =
+ "Segment cache map inconsistent with segment list in page %d.";
+ public static final String UNRECOGNIZED_NODE_TYPE = "Unrecognized node type: ";
+
+ // ======================== PBTree PageManager ========================
+
+ public static final String CHILD_SHALL_NOT_HAVE_SEGMENT_ADDRESS =
+ "A child in newChildBuffer shall not have segmentAddress.";
+ public static final String PAGE_INDEX_OUT_OF_RANGE = "Page index %d out of range.";
+ public static final String ROOT_PAGE_SHALL_NOT_BE_MIGRATED =
+ "Root page shall not be migrated.";
+ public static final String SUBORDINATE_INDEX_NOT_ON_SINGLE_PAGE =
+ "Subordinate index shall not build upon single page segment.";
+ public static final String SUBORDINATE_INDEX_BROKEN =
+ "File may be corrupted that subordinate index has broken.";
+ public static final String DUPLICATE_PAGE_INSTANCES =
+ "Duplicate page instances with identical index: {}";
+ public static final String PAGE_LOCKED_TIMES = "Page [{}] had been locked {} times.";
+ public static final String REENTRANT_WRITE_LOCKS_DETAIL =
+ "Reentrant write locks on page {}, content detail:{}";
+ public static final String REENTRANT_WRITE_LOCKS = "Reentrant write locks on page:{}";
+
+ // ======================== PBTree Flush ========================
+
+ public static final String IO_EXCEPTION_UPDATING_SG_MNODE =
+ "IOException occurred during updating StorageGroupMNode {}";
+ public static final String ERROR_DURING_MTREE_FLUSH =
+ "Error occurred during MTree flush, current node is {}";
+
+ // ======================== PBTree ReleaseFlushMonitor ========================
+
+ public static final String RELEASE_TASK_MONITOR_INTERRUPTED =
+ "ReleaseTaskMonitor thread is interrupted.";
+ public static final String RELEASE_FLUSH_TASK_TIMEOUT =
+ "Interrupt because the release task and flush task did not finish within {} milliseconds.";
+
+ // ======================== PBTree PagePool ========================
+
+ public static final String PAGE_CACHE_EVICTION_INTERRUPTED =
+ "Interrupted during page cache eviction. Consider increasing cache size, reducing concurrency, or extending timeout";
+
+ // ======================== ReadOnly MTreeStore ========================
+
+ public static final String READ_ONLY_REENTRANT_MTREE_STORE = "ReadOnlyReentrantMTreeStore";
+
+ // ======================== MNode ========================
+
+ public static final String WRONG_MNODE_TYPE = "Wrong MNode Type";
+ public static final String WRONG_NODE_TYPE = "Wrong node type";
+ public static final String SHOULD_CALL_EXACT_SUB_CLASS = "Should call exact sub class!";
+ public static final String VIEW_TABLE_NOT_ALLOWED = "View table is not allowed.";
+ public static final String TABLE_DEVICE_NOT_UNDER_TREE_MODEL =
+ "Table device shall not create under tree model";
+ public static final String NO_SATISFIED_MNODE_FACTORY = "No satisfied MNodeFactory found";
+
+ // ======================== Logfile ========================
+
+ public static final String READ_LOG_LENGTH_NEGATIVE = "Read log length %s is negative.";
+ public static final String PLAN_NOT_SUPPORT_DESERIALIZATION =
+ "%s plan doesn't support deserialization.";
+ public static final String PLAN_NOT_SUPPORT_SERIALIZATION =
+ "%s plan doesn't support serialization.";
+ public static final String SCHEMA_FILE_LOG_INCOMPLETE_ENTRY = "incomplete entry.";
+
+ // ======================== Template ========================
+
+ public static final String UNKNOWN_TEMPLATE_UPDATE_OPERATION_TYPE =
+ "Unknown template update operation type";
+
+ // ======================== InformationSchema ========================
+
+ public static final String SYSTEM_VIEW_NOT_SUPPORT_SHOW_CREATE =
+ "The system view does not support show create.";
+
+ // ======================== Traverser ========================
+
+ // (uses e.getMessage(), no string literal needed)
+
+ // ======================== BTreePageManager ========================
+
+ // (uses e.getMessage(), no string literal needed)
+
+ // ======================== Additional SchemaRegion ========================
+
+ public static final String SCHEMA_REGION_PLAN_NOT_SUPPORT_EMPTY =
+ "SchemaRegionPlan of type %s doesn't support creating empty plan.";
+ public static final String SCHEMA_REGION_PLAN_NOT_SUPPORT_RECOVER_MEMORY =
+ "SchemaRegionPlan of type %s doesn't support recover operation in SchemaRegionMemoryImpl.";
+ public static final String SCHEMA_REGION_PLAN_NOT_SUPPORT_RECOVER_PBTREE =
+ "SchemaRegionPlan of type %s doesn't support recover operation in SchemaRegionPBTreeImpl.";
+ public static final String PBTREE_NOT_SUPPORT_ALTER_DATA_TYPE =
+ "PBTree does not support altering timeseries data type.";
+
+ // ======================== Additional MTree ========================
+
+ public static final String DEVICE_NUM_UPPER_LIMIT =
+ "The number of devices has reached the upper limit";
+ public static final String TIMESERIES_TYPE_NOT_COMPATIBLE =
+ "The timeseries %s used new type %s is not compatible with the existing one %s";
+ public static final String ALIAS_DUPLICATED =
+ "The alias is duplicated with the name or alias of other measurement, alias: ";
+ public static final String LOGICAL_VIEW_NODE_TYPE_ERROR =
+ "Type of newMNode is not LogicalViewMNode! It's ";
+ public static final String TEMPLATE_SHOULD_MOUNTED_ON_ANCESTOR =
+ "There should be a template mounted on any ancestor of the node [%s] usingTemplate.";
+ public static final String DESCENDANT_SHOULD_NOT_EXIST =
+ "There should not exist descendant under this node %s";
+
+ // ======================== Additional SchemaFile/Page ========================
+
+ public static final String ADDING_CHILDREN_UNDER_TEMPLATE_NOT_ALLOWED =
+ "Adding or updating children of device using template [%s] is NOT allowed.";
+ public static final String CANNOT_FLUSH_NODE_NEGATIVE_ADDRESS =
+ "Cannot flush any node with negative address [%s] except for DatabaseNode.";
+ public static final String SEGMENTED_PAGE_SHARE_BUFFER =
+ "SegmentedPage can share entire buffer slice only when it contains one MAX SIZE segment.";
+ public static final String BYTEBUFFER_CORRUPTED_FOR_SCHEMA_PAGE =
+ "ByteBuffer is corrupted or set to a wrong position to load as a SchemaPage.";
+ public static final String NODE_NO_CHILD_IN_PBTREE_WITH_NAME =
+ "Node[%s] has no child[%s] in pbtree file.";
+ public static final String SINGLE_RECORD_TOO_LARGE =
+ "Single record larger than half page is not supported in SchemaFile now.";
+ public static final String PAGE_REPLACEMENT_ERROR =
+ "Page[%d] replacement error: Different ref count or lock object.";
+ public static final String NODE_NO_VALID_SEGMENT_ADDRESS =
+ "Node [%s] has no valid segment address in pbtree file.";
+
+ // ======================== Additional SchemaFileLog ========================
+
+ public static final String COMMIT_MARK_WITHOUT_PREPARE = "COMMIT_MARK without PREPARE_MARK";
+ public static final String EXTRANEOUS_BYTE_AFTER_PREPARE =
+ "an extraneous byte rather than COMMIT_MARK after PREPARE_MARK";
+ public static final String NOT_ENDED_BY_MARK =
+ "not ended by COMMIT_MARK nor PREPARE_MARK.";
+
+ // ======================== Additional MNodeContainer ========================
+
+ public static final String DUPLICATE_NODE_IN_BUFFERS =
+ "There shall not exist two node with the same name separately in newChildBuffer and updateChildBuffer";
+
+ // ======================== Additional Logfile ========================
+
+ public static final String FAILED_TO_CREATE_FILE_ALREADY_EXISTS =
+ "Failed to create file %s because the named file already exists";
+
+ // ======================== Additional View ========================
+
+ public static final String VISIT_EXPRESSION_NOT_SUPPORTED =
+ "visitExpression in TransformToExpressionVisitor is not supported.";
+
+ // ======================== Additional Tag ========================
+
+ public static final String BYTEBUFFER_SMALLER_THAN_TAG_SIZE =
+ "ByteBuffer capacity is smaller than tagAttributeTotalSize, which is not allowed.";
+ public static final String TIMESERIES_ALREADY_HAS_TAG_ATTRIBUTE_NAMED =
+ "TimeSeries [%s] already has a tag/attribute named [%s].";
+
+ // ======================== Additional Template ========================
+
+ public static final String FAILED_TO_CREATE_TEMPLATE =
+ "Failed to execute create device template {} in config node, status is {}.";
+ public static final String CREATE_TEMPLATE_ERROR_PREFIX = "create template error -";
+ public static final String CREATE_TEMPLATE_ERROR = "create template error.";
+ public static final String GET_ALL_TEMPLATE_ERROR = "get all template error.";
+ public static final String GET_TEMPLATE_INFO_ERROR = "get template info error.";
+ public static final String FAILED_TO_SET_TEMPLATE =
+ "Failed to execute set device template {} on path {} in config node, status is {}.";
+
+ // ======================== Additional InformationSchema ========================
+
+ public static final String INFORMATION_SCHEMA_READ_ONLY =
+ "The database 'information_schema' can only be queried";
+
+ // ======================== Additional GRASS/Updater ========================
+
+ public static final String FAILED_TO_WRITE_ATTR_COMMIT =
+ "Failed to write attribute commit message to region {}.";
+ public static final String FAILED_TO_FETCH_DATANODE_LOCATIONS =
+ "Failed to fetch dataNodeLocations, will retry.";
+
+ // ======================== Additional ResourceByPathUtils ========================
+
+ public static final String FAILED_TO_RESERVE_MEMORY_TVLIST =
+ "Failed to reserve memory for TVList: ramSize {}, timestampsSize {}, arrayMemCost {}, rowCount {}, dataTypes {}";
+
+ // ======================== Additional CachedMTreeStore ========================
+
+ public static final String ERROR_DURING_PBTREE_CLEAR =
+ "Error occurred during PBTree clear, {}";
+ public static final String ERROR_DURING_MTREE_FLUSH_SCHEMA_REGION =
+ "Error occurred during MTree flush, current SchemaRegionId is {}";
+ public static final String ERROR_DURING_MTREE_FLUSH_SCHEMA_REGION_BECAUSE =
+ "Error occurred during MTree flush, current SchemaRegionId is {} because {}";
+
+ // ======================== Additional MemMTreeSnapshotUtil ========================
+
+ public static final String DESERIALIZE_ERROR_INFO =
+ "Error occurred during deserializing MemMTree.";
+
+ // ======================== Additional MetaUtils ========================
+
+ public static final String PATH_NO_LONGER_THAN_SG_LEVEL =
+ "it is no longer than default sg level: ";
+ public static final String PATH_DOES_NOT_START_WITH_ROOT = "it does not start with ";
+
+ // ======================== FakeCRC32Deserializer ========================
+
+ public static final String READ_LOG_LENGTH_NEGATIVE_LOG =
+ "Read log length {} is negative.";
+
+ // ======================== SchemaLogReader ========================
+
+ public static final String FILE_CORRUPTED =
+ "File {} is corrupted. The uncorrupted size is {}.";
+ public static final String LOG_FILE_END_CORRUPTED_TRUNCATE =
+ "The end of log file {} is corrupted. Start truncate it. The unbroken size is {}. The file size is {}.";
+ public static final String FAIL_TO_TRUNCATE_LOG_FILE =
+ "Fail to truncate log file to size {}";
+
+ // ======================== SchemaRegionPlanDeserializer ========================
+
+ public static final String CANNOT_DESERIALIZE_SCHEMA_REGION_PLAN =
+ "Cannot deserialize SchemaRegionPlan from buffer";
+
+ // ======================== MTreeBelowSGMemoryImpl ========================
+
+ public static final String TIMESERIES_NUM_UPPER_LIMIT =
+ "The number of timeseries has reached the upper limit";
+ public static final String ALIAS_DUPLICATED_DETAIL =
+ ", fullPath: ";
+ public static final String ALIAS_DUPLICATED_OTHER_MEASUREMENT =
+ ", otherMeasurement: ";
+ public static final String START_CREATE_TABLE_DEVICE =
+ "Start to create table device {}.{}";
+ public static final String TABLE_DEVICE_ALREADY_EXISTS =
+ "Table device {}.{} already exists";
+ public static final String TABLE_DEVICE_CREATED =
+ "Table device {}.{} created";
+
+ // ======================== CachedMTreeStore / Scheduler ========================
+
+ public static final String MTREE_FLUSH_COST =
+ "It takes {}ms to flush MTree in SchemaRegion {}";
+
+ // ======================== DataNodeTableCache ========================
+
+ public static final String INIT_TABLE_CACHE_SUCCESS =
+ "Init DataNodeTableCache successfully";
+ public static final String PRE_UPDATE_TABLE_SUCCESS =
+ "Pre-update table {}.{} successfully";
+ public static final String PRE_RENAME_OLD_TABLE_SUCCESS =
+ "Pre-rename old table {}.{} successfully";
+ public static final String ROLLBACK_UPDATE_TABLE_SUCCESS =
+ "Rollback-update table {}.{} successfully";
+ public static final String ROLLBACK_RENAME_OLD_TABLE_SUCCESS =
+ "Rollback renaming old table {}.{} successfully.";
+ public static final String COMMIT_UPDATE_TABLE_SUCCESS_WITH_DETAIL =
+ "Commit-update table {}.{} successfully, {}";
+ public static final String COMMIT_UPDATE_TABLE_SUCCESS =
+ "Commit-update table {}.{} successfully.";
+ public static final String RENAME_OLD_TABLE_SUCCESS =
+ "Rename old table {}.{} successfully.";
+ public static final String INTERRUPTED_ACQUIRE_SEMAPHORE_GET_TABLES =
+ "Interrupted when trying to acquire semaphore when trying to get tables from configNode, ignore.";
+ public static final String UPDATE_TABLE_BY_FETCH_WITH_DETAIL =
+ "Update table {}.{} by table fetch, {}";
+ public static final String UPDATE_TABLE_BY_FETCH =
+ "Update table {}.{} by table fetch.";
+ public static final String COMPARE_TABLE_ADDED = "Added table: ";
+ public static final String COMPARE_TABLE_REMOVED = "Removed table: ";
+ public static final String COMPARE_TABLE_NAME = "Table name: ";
+ public static final String COMPARE_TABLE_REMOVED_PROPS = " Removed props: ";
+ public static final String COMPARE_TABLE_ADDED_PROPS = " Added props: ";
+ public static final String COMPARE_TABLE_REMOVED_COLUMNS = " Removed column(s): ";
+ public static final String COMPARE_TABLE_ADDED_COLUMNS = " Added column(s): ";
+ public static final String COMPARE_TABLE_NOT_MODIFIED = " Not modified";
+
+ // ======================== ClusterTemplateManager ========================
+
+ public static final String ILLEGAL_PATH_LOG = "illegal path {}";
+
+ private DataNodeSchemaMessages() {}
+}
diff --git a/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/StorageEngineMessages.java b/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/StorageEngineMessages.java
new file mode 100644
index 0000000000000..c2956d087e878
--- /dev/null
+++ b/iotdb-core/datanode/src/main/i18n/en/org/apache/iotdb/db/i18n/StorageEngineMessages.java
@@ -0,0 +1,509 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.i18n;
+
+public final class StorageEngineMessages {
+
+ private StorageEngineMessages() {}
+
+ // ======================== StorageEngine ========================
+
+ public static final String FAIL_TO_RECOVER_WAL = "Fail to recover wal.";
+ public static final String STORAGE_ENGINE_FAILED_TO_SET_UP = "Storage engine failed to set up.";
+ public static final String SEQ_MEMTABLE_FLUSH_CHECK_THREAD_STARTED = "start sequence memtable timed flush check thread successfully.";
+ public static final String UNSEQ_MEMTABLE_FLUSH_CHECK_THREAD_STARTED = "start unsequence memtable timed flush check thread successfully.";
+ public static final String STILL_NOT_EXIT_AFTER_30S = "{} still doesn't exit after 30s";
+ public static final String START_CLOSING_ALL_DB_PROCESSOR = "Start closing all database processor";
+ public static final String START_FORCE_CLOSING_ALL_DB_PROCESSOR = "Start force closing all database processor";
+ public static final String SYSTEM_READ_ONLY_NO_MERGE = "Current system mode is read only, does not support merge";
+ public static final String START_REPAIR_DATA = "start repair data";
+ public static final String STOP_REPAIR_DATA = "stop repair data";
+ public static final String REMOVING_DATA_REGION = "Removing data region {}";
+ public static final String FAILED_TO_DELETE_SNAPSHOT_DIR = "Failed to delete snapshot dir {}";
+ public static final String REMOVED_DATA_REGION = "Removed data region {}";
+ public static final String EXECUTE_LOAD_COMMAND_ERROR = "Execute load command {} error.";
+ public static final String START_REBOOTING_ALL_TIMED_SERVICE = "Start rebooting all timed service.";
+ public static final String STOP_ALL_TIMED_SERVICE_AND_RESTART = "Stop all timed service successfully, and now restart them.";
+ public static final String REBOOT_ALL_TIMED_SERVICE_SUCCESSFULLY = "Reboot all timed service successfully";
+ public static final String FAILED_TO_DELETE = "Failed to delete: {} -> {}";
+ public static final String FAILED_TO_CHECK_OBJECT_FILES = "Failed to check Object Files: {}";
+
+ // ======================== Buffer Cache ========================
+
+ public static final String BLOOM_FILTER_CACHE_SIZE = "BloomFilterCache size = {}";
+ public static final String GET_BLOOM_FILTER_FROM_CACHE = "get bloomFilter from cache where filePath is: {}";
+ public static final String STOP_SERVICE = "{}: stop {}...";
+ public static final String CHUNK_CACHE_SIZE = "ChunkCache size = {}";
+ public static final String GET_CHUNK_FROM_CACHE = "get chunk from cache whose key is: {}";
+ public static final String CACHE_MISS_IN_FILE = "Cache miss: {}.{} in file: {}";
+ public static final String DEVICE_ALL_SENSORS = "Device: {}, all sensors: {}";
+ public static final String TS_METADATA_FILTERED_BY_BLOOM_FILTER = "TimeSeries meta data {} is filter by bloomFilter!";
+ public static final String FILE_NO_SUCH_TIME_SERIES = "The file doesn't have this time series {}.";
+
+ // ======================== Resource Control - Disk ========================
+
+ public static final String FAILED_TO_DEREGISTER_FILE_LOCK = "Failed to deregister file lock because {}";
+ public static final String ALL_FOLDERS_FULL_CHANGE_TO_READ_ONLY = "All folders are full, change system mode to read-only.";
+ public static final String FAILED_TO_PROCESS_FOLDER = "Failed to process folder '";
+ public static final String FAIL_TO_GET_CANONICAL_PATH = "Fail to get canonical path of data dir {}";
+ public static final String ALL_DISKS_OF_TIER_FULL = "All disks of tier {} are full.";
+ public static final String FOLDERS_RESET_SUCCESSFULLY = "The folders is reset successfully, which takes {} ms.";
+ public static final String FOLDER_NOT_EXIST_CREATE_IT = "folder {} doesn't exist, create it";
+ public static final String FAILED_TO_STATISTIC_SIZE = "Failed to statistic the size of {}, because";
+ public static final String DISK_SPACE_INSUFFICIENT_READ_ONLY = "Disk space is insufficient, change system mode to read-only";
+ public static final String CANNOT_CALC_OCCUPIED_SPACE = "Cannot calculate occupied space for path {}.";
+
+ // ======================== Resource Control - Memory ========================
+
+ public static final String WAITING_FOR_THREAD_POOL_SHUTDOWN = "Waiting for {} thread pool to shut down.";
+ public static final String THREAD_POOL_NOT_EXIT_AFTER_MS = "{} thread pool doesn't exit after {}ms.";
+ public static final String INTERRUPTED_WAITING_THREAD_POOL_EXIT = "Interrupted while waiting {} thread pool to exit. ";
+ public static final String BUFFERED_ARRAY_SIZE_THRESHOLD = "BufferedArraySizeThreshold is {}";
+ public static final String CURRENT_SG_COST = "Current Sg cost is {}";
+ public static final String FORCE_DEGRADE_TSFILE_RESOURCE = "Force degrade tsfile resource {}";
+ public static final String CANNOT_DEGRADE_TIME_INDEX_ALL_FILE_LEVEL = "Can't degrade time index any more because all time index are file level.";
+ public static final String DEGRADE_TSFILE_RESOURCE = "Degrade tsfile resource {}";
+
+ // ======================== Resource Control - Quotas ========================
+
+ public static final String SPACE_QUOTA_RESTORE_SUCCEEDED = "Space quota limit restore succeeded, limit: {}.";
+ public static final String SPACE_QUOTA_RESTORE_FAILED = "Space quota limit restore failed, limit: {}.";
+ public static final String THROTTLE_QUOTA_RESTORED_SUCCESSFULLY = "Throttle quota limit restored successfully. ";
+ public static final String THROTTLE_QUOTA_RESTORED_FAILED = "Throttle quota limit restored failed. ";
+ public static final String INVALID_STATEMENT_TYPE = "Invalid statement type: ";
+
+ // ======================== DataRegion ========================
+
+ public static final String CREATE_DB_SYSTEM_DIR_FAILED = "create database system Directory {} failed";
+ public static final String CREATE_DATA_REGION_DIR_FAILED = "create data region directory {} failed";
+ public static final String IS_NOT_A_DIRECTORY = "{} is not a directory.";
+ public static final String FAIL_TO_CLOSE_TSFILE_WHEN_RECOVERING = "Fail to close TsFile {} when recovering";
+ public static final String FAIL_TO_RECOVER_SEALED_TSFILE_SKIP = "Fail to recover sealed TsFile {}, skip it.";
+ public static final String DATA_INCONSISTENT_NOT_TRIGGER_TWICE = "Data inconsistent exception is not supposed to be triggered twice";
+ public static final String INSERT_TO_TSFILE_PROCESSOR_REJECTED = "insert to TsFileProcessor rejected, {}";
+ public static final String INSERT_TO_TSFILE_PROCESSOR_ERROR = "insert to TsFileProcessor error ";
+ public static final String IOEXCEPTION_CREATING_TSFILE_PROCESSOR_RETRY = "meet IOException when creating TsFileProcessor, retry it again";
+ public static final String CANNOT_CLOSE_TSFILE_RESOURCE = "Cannot close a TsFileResource {}";
+ public static final String CANNOT_REMOVE_MOD_FILE = "Cannot remove mod file {}";
+ public static final String FAIL_TO_DELETE_DATA_REGION_FOLDER = "Fail to delete data region folder {}";
+ public static final String FAIL_TO_DELETE_DATA_REGION_OBJECT_FOLDER = "Fail to delete data region object folder {}";
+ public static final String FILES_WERE_CLOSED = "{} files were closed";
+ public static final String FAIL_TO_LOG_DELETE_TO_WAL = "Fail to log delete to wal.";
+ public static final String DELETION_EXECUTING_TABLE_DELETION = "[Deletion] Executing table deletion {}";
+ public static final String DELETION_UNSEALED_FILES_FOR = "[Deletion] unsealed files for {}: {}";
+ public static final String DELETION_SEALED_FILES_FOR = "[Deletion] sealed files for {}: {}";
+ public static final String WRITING_NO_FILE_RELATED_DELETION_TO_WAL = "Writing no-file-related deletion to WAL {}";
+ public static final String DELETION_SKIPPED_FILE_TIME = "[Deletion] {} skipped {}, file time {}";
+ public static final String EXPECT_IS_ACTUAL_IS = "expect is {}, actual is {}";
+ public static final String DELETION_DOES_NOT_INVOLVE_ANY_FILE = "[Deletion] Deletion {} does not involve any file";
+ public static final String FAIL_TO_WRITE_MOD_ENTRY_TO_FILES = "Fail to write modEntry {} to files";
+ public static final String REMOVE_TSFILE_DIRECTLY_WHEN_DELETE_DATA = "Remove tsfile {} directly when delete data";
+ public static final String MEET_ERROR_IN_COMPACTION_SCHEDULE = "Meet error in compaction schedule.";
+ public static final String MEET_ERROR_IN_TTL_CHECK = "Meet error in ttl check.";
+ public static final String FAILED_TO_EXECUTE_OBJECT_TTL_CHECK = "Failed to execute object ttl check";
+ public static final String MEET_ERROR_IN_INSERTION_COMPACTION_SCHEDULE = "Meet error in insertion compaction schedule.";
+ public static final String EXCEPTION_MOVE_NEW_TSFILE_IN_SETTLING = "Exception to move new tsfile in settling";
+ public static final String TSFILE_LOADED_IN_UNSEQ_LIST = "TsFile {} is successfully loaded in unsequence list.";
+ public static final String CANNOT_CLOSE_LAST_READER_AFTER_LOAD = "Cannot close last reader after loading TsFile {}";
+ public static final String FILE_ALREADY_LOADED_IN_UNSEQ_LIST = "The file {} has already been loaded in unsequence list";
+ public static final String CANNOT_DELETE_LOCAL_MOD_FILE = "Cannot delete localModFile {}";
+ public static final String REMOVE_TSFILE_SUCCESSFULLY = "Remove tsfile {} successfully.";
+ public static final String THREAD_INTERRUPTED_WAITING_COMPACTION = "Thread get interrupted when waiting compaction to finish";
+ public static final String PARTIAL_FAILED_INSERTING_ROWS_ONE_DEVICE = "Partial failed inserting rows of one device";
+ public static final String PARTIAL_FAILED_INSERTING_ROWS = "Partial failed inserting rows";
+ public static final String REJECTED_INSERTING_MULTI_TABLETS = "Rejected inserting multi tablets";
+ public static final String PARTIAL_FAILED_INSERTING_MULTI_TABLETS = "Partial failed inserting multi tablets";
+ public static final String INTERRUPTED_WAITING_DATA_REGION_DELETED = "Interrupted When waiting for data region deleted.";
+ public static final String FAILED_TO_RENAME = "Failed to rename {} to {},";
+
+ // ======================== Compaction ========================
+
+ public static final String SELECTOR_NOT_FOR_INNER_SPACE = "This kind of selector cannot be used to select inner space task";
+ public static final String SELECTOR_NOT_FOR_CROSS_SPACE = "This kind of selector cannot be used to select cross space task";
+ public static final String SELECTOR_NOT_FOR_SETTLE = "This kind of selector cannot be used to select settle task";
+ public static final String UNSEQ_FILE_NO_OVERLAP_WITH_SEQ = "Unseq file {} does not overlap with any seq files.";
+ public static final String CANNOT_SELECT_FILE_FOR_CROSS_COMPACTION = "{} cannot select file for cross space compaction";
+ public static final String CURRENT_FILE_SIZE = "Current File is {}, size is {}";
+ public static final String EXCEPTION_SELECTING_FILES = "Exception occurs while selecting files";
+ public static final String UNIMPLEMENTED = "unimplemented";
+ public static final String ILLEGAL_CROSS_COMPACTION_SELECTOR = "Illegal Cross Compaction Selector ";
+ public static final String ILLEGAL_COMPACTION_SELECTOR = "Illegal Compaction Selector ";
+ public static final String COMPACTION_SCHEDULE_TASK_MANAGER_STARTED = "Compaction schedule task manager started.";
+ public static final String WAITING_COMPACTION_SCHEDULE_POOL_SHUTDOWN = "Waiting for compaction schedule task thread pool to shut down";
+ public static final String COMPACTION_SCHEDULE_MANAGER_WAIT_TO_STOP = "CompactionScheduleTaskManager has wait for {} seconds to stop";
+ public static final String COMPACTION_SCHEDULE_TASK_MANAGER_STOPPED = "CompactionScheduleTaskManager stopped";
+ public static final String REPAIR_FAILED_RENAME_PROGRESS_FILE = "[RepairTaskManager] Failed to rename repair data progress file";
+ public static final String REPAIR_SKIP_TASK_STOPPING = "[RepairTaskManager] skip current task because repair task is stopping";
+ public static final String REPAIR_SCAN_TASK_CANCELLED = "[RepairScheduler] scan task is cancelled";
+ public static final String REPAIR_ERROR_SCAN_TIME_PARTITION = "[RepairScheduler] Meet errors when scan time partition files";
+ public static final String COMPACTION_TASK_MANAGER_STARTED = "Compaction task manager started.";
+ public static final String WAITING_TASK_EXECUTION_POOL_SHUTDOWN = "Waiting for task taskExecutionPool to shut down";
+ public static final String WAITING_TASK_EXECUTION_POOL_SHUTDOWN_MS = "Waiting for task taskExecutionPool to shut down in {} ms";
+ public static final String INTERRUPTED_WAITING_ALL_TASK_FINISH = "Interrupted when waiting all task finish";
+ public static final String ALL_COMPACTION_TASK_FINISH = "All compaction task finish";
+ public static final String COMPACTION_MANAGER_WAIT_TO_STOP = "CompactionManager has wait for {} seconds to stop";
+ public static final String COMPACTION_MANAGER_STOPPED = "CompactionManager stopped";
+ public static final String COMPACTION_THREAD_POOL_CANNOT_CLOSE = "CompactionThreadPool can not be closed in {} ms";
+ public static final String TIMEOUT_WAITING_TASK_FUTURE = "Timeout when waiting for task future";
+ public static final String COMPACTION_THREAD_TERMINATES = "CompactionThread-{} terminates because interruption";
+ public static final String EXCEPTION_EXECUTING_COMPACTION_TASK = "Exception occurred when executing compaction task. {}";
+ public static final String TIMEOUT_GET_COMPACTION_TASK_SUMMARY = "Timeout when trying to get compaction task summary";
+ public static final String TTL_CHECK_TASK_FAILED = "[TTLCheckTask-{}] Failed to execute ttl check";
+ public static final String ERROR_CREATING_SETTLE_LOG = "meet error when creating settle log, file path:{}";
+ public static final String WRITE_SETTLE_LOG_FAILED = "write settle log file failed, the log file:{}";
+ public static final String CLOSE_UPGRADE_LOG_FAILED = "close upgrade log file failed, the log file:{}";
+ public static final String FIND_SETTLED_FILE = "find settled file for {}";
+ public static final String GENERATE_SETTLED_FILE = "generate settled file for {}";
+ public static final String ALL_FILES_SETTLED_SUCCESSFULLY = "All files settled successfully! ";
+ public static final String SUB_COMPACTION_TASK_MEET_ERRORS = "[Compaction] SubCompactionTask meet errors ";
+ public static final String TASK_TYPE_NO_TMP_FILE_SUFFIX = "Current task type {} does not have tmp file suffix.";
+ public static final String CANNOT_GET_MOD_FILE = "Can not get mod file of {}";
+ public static final String COMPACTION_START_DELETE_REAL_FILE = "{} [Compaction] Compaction starts to delete real file ";
+ public static final String COMPACTION_START_DELETE_SOURCE_MODS = "{} [Compaction] Start to delete modifications of source files";
+ public static final String COMPACTION_DELETE_FILE = "[Compaction] delete file: {}";
+ public static final String FAILED_TO_READ_FILE_ATTRIBUTES = "Failed to read file attributes: {}";
+ public static final String FAILED_TO_CHECK_TABLE_DIR = "Failed to check table dir: {}";
+ public static final String REMOVE_OBJECT_FILE_SIZE = "Remove object file {}, size is {}(byte)";
+ public static final String FAILED_TO_DELETE_EXPIRED_OBJECT_FILE = "Failed to delete expired object file: {}";
+ public static final String SHOULD_CALL_EXACT_SUB_CLASS = "Should call exact sub class!";
+ public static final String NO_NEXT_BLOCK = "no next block";
+ public static final String METHOD_NOT_SUPPORTED_FAST_CROSS_WRITER = "Does not support this method in FastCrossCompactionWriter";
+ public static final String DEVICE_SHOULD_EXIST_IN_SEQ_FILE = "The device should exist in current seq file";
+ public static final String METHOD_NOT_SUPPORTED_FAST_INNER_WRITER = "Does not support this method in FastInnerCompactionWriter";
+ public static final String METHOD_NOT_SUPPORTED_READ_POINT_WRITER = "Does not support this method in ReadPointInnerCompactionWriter";
+ public static final String UNKNOWN_DATA_TYPE = "Unknown data type ";
+ public static final String FAILED_TO_DELETE_TARGET_FILE = "failed to delete target file %s";
+ public static final String SOURCE_FILES_CANNOT_BE_DELETED = "source files cannot be deleted successfully";
+ public static final String FAIL_TO_GET_TSFILE_NAME = "Fail to get the tsfile name of {}";
+ public static final String ERROR_ESTIMATE_INNER_COMPACTION_MEMORY = "Meet error when estimate inner compaction memory";
+ public static final String CANNOT_RECOVER_INSERTION_CROSS_TASK = "Can not recover InsertionCrossSpaceCompactionTask";
+ public static final String FAILED_TO_REPAIR_FILE = "Failed to repair file {}";
+ public static final String FAILED_DELETE_FULLY_DIRTY_SOURCE = "Failed to delete fully_dirty source file.";
+ public static final String RECOVER_MODS_FILE_ERROR = "recover mods file error on list files:{}";
+ public static final String UNKNOWN_COMPACTION_TASK_TYPE = "Unknown compaction task type {}";
+ public static final String RECOVER_COMPACTION_ERROR = "Recover compaction error";
+ public static final String COMPACTION_RECOVER_FAILED = "{} [Compaction][Recover] Failed to recover compaction";
+ public static final String MEET_ERROR_WHEN_READ_TSFILE = "Meet error when read tsfile {}";
+ public static final String UNKNOWN_REPAIR_LOG_FORMAT = "Unknown format of repair log";
+ public static final String REPAIR_START_CHECK_TSFILE = "[RepairScheduler] start check tsfile: {}";
+ public static final String REPAIR_SKIPPED_BROKEN_FILE = "[RepairScheduler] {} is skipped because it is broken";
+ public static final String REPAIR_FAILED_CREATE_LOGGER = "[RepairScheduler] Failed to create repair logger";
+ public static final String REPAIR_FAILED_CLOSE_LOGGER = "[RepairScheduler] Failed to close repair logger";
+ public static final String REPAIR_WAIT_COMPACTION_FINISH = "[RepairScheduler] Wait compaction schedule task finish";
+ public static final String REPAIR_WAIT_ALL_RUNNING_TASK_FINISH = "[RepairScheduler] Wait all running compaction task finish";
+ public static final String REPAIR_TASK_FINISHED = "[RepairScheduler] Repair task finished";
+ public static final String REPAIR_SCHEDULE_TASK_ERROR = "[RepairScheduler] Meet error when execute repair schedule task";
+ public static final String REPAIR_FAILED_INIT_SCHEDULE_TASK = "[RepairScheduler] Failed to init repair schedule task";
+ public static final String REPAIR_ALL_PARTITIONS_DONE_SKIP = "[RepairScheduler] All time partitions have been repaired, skip repair task";
+ public static final String END_MUST_GREATER_THAN_START = "end must greater than start";
+ public static final String DATA_DIRS_MUST_NOT_BE_EMPTY = "data_dirs must not be empty";
+ public static final String DOES_NOT_EXIST = "{} doesn't exist.";
+ public static final String CHECK_FAILED = "check {} failed.";
+ public static final String FAILED_TO_DEAL_WITH = "failed to deal with {}";
+ public static final String ERROR_OCCURRED = "error occurred";
+
+ // ======================== MemTable ========================
+
+ public static final String CANNOT_DESERIALIZE_OLD_MEMTABLE_SNAPSHOT = "Cannot deserialize OldMemTableSnapshot";
+ public static final String DEVICE_ID_LENGTH_SHOULD_BE_POSITIVE = "DeviceID's length should be larger than 0.";
+ public static final String CREATE_NEW_TSFILE_PROCESSOR = "create a new tsfile processor {}";
+ public static final String REOPEN_TSFILE_PROCESSOR = "reopen a tsfile processor {}";
+ public static final String EXCEPTION_DURING_WAL_FLUSH = "Exception during wal flush";
+ public static final String DELETION_IN_FLUSHING_MEMTABLE = "[Deletion] Deletion with {} in flushingMemTable";
+ public static final String START_WAIT_UNTIL_FILE_CLOSED = "Start to wait until file {} is closed";
+ public static final String FILE_CLOSED_SYNCHRONOUSLY = "File {} is closed synchronously";
+ public static final String DATAREGION_TSFILE_ERROR = "{}: {}";
+ public static final String DELETION_WRITTEN_WHEN_FLUSH = "[Deletion] Deletion : {} written when flush memtable";
+ public static final String FSYNC_MEMTABLE_TO_DISK_ERROR = "fsync memTable data to disk error,";
+ public static final String FLUSHING_MEMTABLES_CLEAR = "{} flushingMemtables is clear";
+ public static final String START_TO_END_FILE = "Start to end file {}";
+ public static final String ENDED_FILE = "Ended file {}";
+ public static final String START_TO_END_EMPTY_FILE = "Start to end empty file {}";
+ public static final String TIME_CHUNK_METADATA_SHOULD_NOT_BE_EMPTY = "TimeChunkMetadata in aligned device should not be empty";
+ public static final String WRITABLE_MEM_CHUNK_UNSUPPORTED_TYPE = "WritableMemChunk does not support data type: {}";
+
+ // ======================== Modification ========================
+
+ public static final String UNRECOGNIZED_PREDICATE_TYPE = "Unrecognized predicate type: ";
+ public static final String UNSUPPORTED_MOD_TYPE = "Unsupported mod type: ";
+ public static final String UNKNOWN_MOD_TYPE = "Unknown ModType: ";
+ public static final String CANNOT_CLOSE_MOD_FILE_INPUT_STREAM = "Cannot close mod file input stream of {}";
+ public static final String CANNOT_READ_MOD_FILE_INPUT_STREAM = "Cannot read mod file input stream of {}";
+ public static final String COMPACT_MODS_FILE_EXCEPTION = "compact mods file exception of {}";
+ public static final String SETTLE_SUCCESSFUL = "{} settle successful";
+ public static final String REMOVE_ORIGIN_OR_RENAME_MODS_ERROR = "remove origin file or rename new mods file error.";
+ public static final String DELETE_MODIFICATION_FILE_FAILED = "Delete ModificationFile {} failed.";
+ public static final String CANNOT_CREATE_HARDLINK = "Cannot create hardlink for {}";
+ public static final String ERROR_READING_MODIFICATIONS = "An error occurred when reading modifications";
+ public static final String ERROR_DECODE_LINE_TO_MODIFICATION = "An error occurred when decode line-[{}] to modification";
+ public static final String MODIFICATIONS_WILL_BE_TRUNCATED = "The modifications[{}] will be truncated to size {}.";
+ public static final String LAST_LINE_OF_MODS_INCOMPLETE = "The last line of Mods is incomplete, will be truncated";
+ public static final String UNKNOWN_MODIFICATION_TYPE = "Unknown modification type: ";
+ public static final String INCORRECT_DELETION_FIELDS_NUMBER = "Incorrect deletion fields number: ";
+ public static final String INVALID_TIMESTAMP = "Invalid timestamp: ";
+ public static final String INVALID_SERIES_PATH = "Invalid series path: ";
+
+ // ======================== WAL ========================
+
+ public static final String START_REBOOTING_WAL_DELETE_THREAD = "Start rebooting wal delete thread.";
+ public static final String STOP_WAL_DELETE_THREAD_AND_RESTART = "Stop wal delete thread successfully, and now restart it.";
+ public static final String TIMED_WAL_DELETE_THREAD_INTERRUPTED = "Timed wal delete thread is interrupted.";
+ public static final String INTERRUPTED_WAITING_WAL_FLUSHED = "Interrupted when waiting for all write-ahead logs flushed.";
+ public static final String STOPPING_WAL_MANAGER = "Stopping WALManager";
+ public static final String DELETING_OUTDATED_FILES_BEFORE_EXIT = "Deleting outdated files before exiting";
+ public static final String WAL_MANAGER_STOPPED = "WALManager stopped";
+ public static final String WAITING_THREAD_TERMINATED_TIMEOUT = "Waiting thread {} to be terminated is timeout";
+ public static final String THREAD_NOT_EXIT_AFTER_30S = "Thread {} still doesn't exit after 30s";
+ public static final String FAILED_TO_DELETE_OUTDATED_WAL_FILE = "Failed to delete outdated wal file";
+ public static final String UNRECOGNIZED_CHECKPOINT_TYPE = "unrecognized checkpoint type ";
+ public static final String CREATE_FOLDER_FOR_WAL_BUFFER = "create folder {} for wal buffer-{}.";
+ public static final String FAIL_TO_LOG_MAX_MEMTABLE_ID = "Fail to log max memTable id: {}";
+ public static final String FAIL_TO_MAKE_CHECKPOINT = "Fail to make checkpoint: {}";
+ public static final String MEMTABLE_ID_NOT_FOUND_IN_MAP = "memtableId {} not found in MemTableId2Info";
+ public static final String FAIL_TO_CLOSE_WAL_CHECKPOINT_WRITER = "Fail to close wal node-{}'s checkpoint writer.";
+ public static final String CANNOT_WRITE_TO = "Cannot write to {}";
+ public static final String REACH_END_OFFSET_OF_WAL_FILE = "Reach the end offset of wal file";
+ public static final String UNEXPECTED_END_OF_FILE = "Unexpected end of file";
+ public static final String WAL_SEGMENT_V1_FAILED_V2_SUCCESS = "Failed to load WAL segment in V1 way, try in V2 way successfully.";
+ public static final String UNEXPECTED_EXCEPTION = "Unexpected exception";
+ public static final String FAIL_TO_READ_WAL_ENTRY_SKIP_BROKEN = "Fail to read WALEntry from wal file {}, skip broken WALEntries.";
+ public static final String INVALID_CHECKPOINT_FILE_NAME = "Invalid checkpoint file name: ";
+ public static final String INVALID_WAL_FILE_NAME = "Invalid wal file name: ";
+ public static final String INTERRUPTED_WAITING_FOR_RESULT = "Interrupted when waiting for result.";
+ public static final String CANNOT_WRITE_WAL_INTO_FAKE_NODE = "Cannot write wal into a fake node. ";
+ public static final String CREATE_FOLDER_FOR_WAL_NODE = "create folder {} for wal node-{}.";
+ public static final String FAIL_TO_DELETE_WAL_NODE_OUTDATED_FILES = "Fail to delete wal node-{}'s outdated files.";
+ public static final String FAIL_TO_GET_DATA_REGION_PROCESSOR = "Fail to get data region processor for {}";
+ public static final String WAITING_TOO_LONG_FOR_MEMTABLE_FLUSH = "Waiting too long for memTable flush to be done.";
+ public static final String INTERRUPTED_WAITING_MEMTABLE_FLUSH = "Interrupted when waiting for memTable flush to be done.";
+ public static final String FAIL_TO_ROLL_WAL_LOG_WRITER = "Fail to roll wal log writer.";
+ public static final String FAIL_TO_SNAPSHOT_MEMTABLE = "Fail to snapshot memTable of {}";
+ public static final String START_RECOVERING_WAL_NODE_IN_DIR = "Start recovering WAL node in the directory {}";
+ public static final String ERROR_DELETE_CHECKPOINT_FILE = "error when delete checkpoint file. {}";
+ public static final String FAIL_TO_READ_WAL_LOGS_SKIP = "Fail to read wal logs from {}, skip them";
+ public static final String FAIL_TO_RENAME_FILE = "Fail to rename file {} to {}";
+ public static final String FAIL_TO_RECOVER_WAL_METADATA = "Fail to recover metadata of wal file {}";
+ public static final String START_RECOVERING_WAL = "Start recovering wal.";
+ public static final String SUCCESSFULLY_RECOVER_ALL_WAL_NODES = "Successfully recover all wal nodes.";
+ public static final String STORAGE_ENGINE_FAILED_TO_RECOVER = "StorageEngine failed to recover.";
+ public static final String CANNOT_RECOVER_TSFILE_WAL_ALREADY_STARTED = "Cannot recover tsfile from wal because wal recovery has already started";
+ public static final String FAIL_TO_REMOVE_RECOVER_PERFORMER = "Fail to remove recover performer for file {}";
+ public static final String TSFILE_MISSING_SKIP_RECOVERY = "TsFile {} is missing, will skip its recovery.";
+ public static final String UNSUPPORTED_TYPE = "Unsupported type ";
+ public static final String ERROR_REDO_WAL = "meet error when redo wal of {}";
+ public static final String CREATE_FOLDER_FOR_WAL_NODE_BUFFER = "Create folder {} for wal node-{}'s buffer.";
+ public static final String OPEN_NEW_WAL_FILE_FOR_BUFFER = "Open new wal file {} for wal node-{}'s buffer.";
+ public static final String FAIL_TO_ALLOCATE_WAL_BUFFER_OOM = "Fail to allocate wal node-{}'s buffer because out of memory.";
+ public static final String INTERRUPTED_WAITING_ADD_WAL_ENTRY = "Interrupted when waiting for adding WALEntry to buffer.";
+ public static final String HANDLE_ROLL_LOG_WRITER_SIGNAL = "Handle roll log writer signal for wal node-{}.";
+ public static final String INTERRUPTED_WAITING_WORKING_BUFFER = "Interrupted When waiting for available working buffer.";
+ public static final String FAIL_TO_PUT_CLOSE_SIGNAL = "Fail to put CLOSE_SIGNAL to walEntries.";
+ public static final String FAIL_TO_CLOSE_WAL_LOG_WRITER = "Fail to close wal node-{}'s log writer.";
+ public static final String UNKNOWN_WAL_ENTRY_TYPE = "Unknown WALEntry type";
+ public static final String UNKNOWN_WAL_ENTRY_TYPE_WITH_VALUE = "Unknown WALEntry type ";
+ public static final String INVALID_WAL_ENTRY_TYPE_CODE = "Invalid WALEntryType code: ";
+ public static final String CANNOT_SERIALIZE_CHECKPOINT_TO_WAL = "Cannot serialize checkpoint to wal files.";
+ public static final String UNSUPPORTED_WAL_ENTRY_TYPE = "Unsupported wal entry type ";
+ public static final String CANNOT_USE_WAL_INFO_AS_SIGNAL_TYPE = "Cannot use wal info type as wal signal type";
+ public static final String FAIL_TO_CREATE_WAL_NODE_DISKS_FULL = "Fail to create wal node because all disks of wal folders are full.";
+ public static final String FAILED_TO_CREATE_WAL_NODE_AFTER_RETRIES = "Failed to create WAL node after retries for identifier: ";
+ public static final String FAIL_TO_CREATE_WAL_NODE = "Fail to create wal node";
+
+ // ======================== Flush ========================
+
+ public static final String RESTORE_FILE_ERROR = "restore file error caused by ";
+ public static final String CANNOT_DELETE_OLD_COMPRESSION_FILE = "Can't delete old data region compression file {}";
+ public static final String CANNOT_DELETE_RATIO_FILE = "Cannot delete ratio file {}";
+ public static final String TAKE_TASK_INTO_IO_QUEUE_INTERRUPTED = "Take task into ioTaskQueue Interrupted";
+ public static final String PUT_TASK_INTO_IO_QUEUE_INTERRUPTED = "Put task into ioTaskQueue Interrupted";
+ public static final String TAKE_TASK_FROM_IO_QUEUE_INTERRUPTED = "take task from ioTaskQueue Interrupted";
+ public static final String FLUSH_SUB_TASK_MANAGER_STARTED = "Flush sub task manager started.";
+ public static final String FLUSH_SUB_TASK_MANAGER_STOPPED = "Flush sub task manager stopped";
+ public static final String FLUSH_TASK_MANAGER_STARTED = "Flush task manager started.";
+ public static final String FLUSH_TASK_MANAGER_STOPPED = "Flush task manager stopped";
+
+ // ======================== Read ========================
+
+ public static final String MEM_CHUNK_READER_NOT_SUPPORT_METHOD = "mem chunk reader does not support this method";
+ public static final String MEM_ALIGNED_PAGE_READER_TSBLOCK = "[memAlignedPageReader] TsBlock:{}";
+ public static final String AFTER_FILTER_CHUNK_METADATA_LIST = "After removed by filter Chunk meta data list is: ";
+ public static final String AFTER_MODIFICATION_CHUNK_METADATA_LIST = "After modification Chunk meta data list is: ";
+ public static final String TIME_DATA_SIZE_NOT_MATCH = "Time data size not match";
+ public static final String QUERY_OPENED_FILES = "Query has opened {} files !";
+ public static final String CANNOT_CLOSE_TSFILE_SEQUENCE_READER = "Can not close TsFileSequenceReader {} !";
+ public static final String QUERY_SEALED_FILE_INFO = "[Query Sealed File Info]\n";
+ public static final String QUERY_ID_FORMAT = "\t[queryId: {}]\n";
+ public static final String QUERY_FILE_PATH_FORMAT = "\t\t{}\n";
+ public static final String QUERY_UNSEALED_FILE_INFO = "[Query Unsealed File Info]\n";
+
+ // ======================== Snapshot ========================
+
+ public static final String EXCEPTION_LOAD_SNAPSHOT = "Exception occurs while load snapshot from {}";
+ public static final String READING_SNAPSHOT_LOG_FILE = "Reading snapshot log file {}";
+ public static final String REMOVE_ALL_DATA_FILES_IN_ORIGINAL_DIR = "Remove all data files in original data dir";
+ public static final String FAILED_TO_REMOVE_ORIGIN_DATA_FILES = "Failed to remove origin data files";
+ public static final String MOVING_SNAPSHOT_FILE_TO_DATA_DIRS = "Moving snapshot file to data dirs";
+ public static final String NO_COMPRESSION_RATIO_FILE_IN_DIR = "No compression ratio file in dir {}";
+ public static final String CANNOT_LOAD_COMPRESSION_RATIO = "Cannot load compression ratio from {}";
+ public static final String LOADED_COMPRESSION_RATIO = "Loaded compression ratio from {}";
+ public static final String EXCEPTION_READING_SNAPSHOT_FILE = "Exception occurs when reading snapshot file";
+ public static final String SNAPSHOT_NOT_COMPLETE_CANNOT_LOAD = "This snapshot is not complete, cannot load it";
+ public static final String CREATED_HARD_LINK = "Created hard link from {} to {}";
+ public static final String EXCEPTION_CLOSING_LOG_ANALYZER = "Exception occurs when closing log analyzer";
+ public static final String CANNOT_CREATE_PARENT_FOLDER = "Cannot create parent folder for ";
+ public static final String CANNOT_CREATE_FILE = "Cannot create file ";
+ public static final String FAILED_TO_CLOSE_SNAPSHOT_LOGGER = "Failed to close snapshot logger";
+ public static final String SNAPSHOTTING_COMPRESSION_RATIO = "Snapshotting compression ratio {}.";
+ public static final String CATCH_IO_EXCEPTION_CREATING_SNAPSHOT = "Catch IOException when creating snapshot";
+ public static final String HARD_LINK_TARGET_DIR_NOT_EXIST = "Hard link target dir {} doesn't exist";
+ public static final String HARD_LINK_SOURCE_FILE_NOT_EXIST = "Hard link source file {} doesn't exist, this file will be ignored.";
+ public static final String COPY_TARGET_DIR_NOT_EXIST = "Copy target dir {} doesn't exist";
+ public static final String COPY_SOURCE_FILE_NOT_EXIST = "Copy source file {} doesn't exist";
+ public static final String CANNOT_CREATE_DIRECTORY = "Cannot create directory ";
+ public static final String CLEANING_UP_SNAPSHOT_DIR = "Cleaning up snapshot dir for {}";
+ public static final String FAILED_TO_CREATE_DIR = "Failed to create directory %s";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_CLEAN_UP = "Failed to take snapshot for {}-{}, clean up";
+ public static final String SUCCESSFULLY_TAKE_SNAPSHOT = "Successfully take snapshot for {}-{}, snapshot directory is {}";
+ public static final String EXCEPTION_TAKING_SNAPSHOT = "Exception occurs when taking snapshot for {}-{}";
+ public static final String SNAPSHOT_COMPRESSION_RATIO_IN_DIR = "Snapshot compression ratio {} in {}.";
+ public static final String CANNOT_SNAPSHOT_COMPRESSION_RATIO = "Cannot snapshot compression ratio {} in {}.";
+ public static final String CLEAR_SNAPSHOT_DIR_FAIL = "Clear snapshot dir fail, you should manually delete this dir before do region migration again: {}";
+ public static final String HARD_LINK_SOURCE_FILE_RETRY = "Hard link source file {} doesn't exist, will retry for {} times...";
+ public static final String TRY_SHOW_FILES_IN_PARENT_DIR = "Try to show all files in parent dir...";
+ public static final String CANNOT_SHOW_FILES_PARENT_DIR_NULL = "Cannot show files because parent dir is null";
+ public static final String FAILED_DELETE_FOLDER_CLEANING_UP = "Failed to delete folder {} when cleaning up";
+
+ // ======================== TsFile Resource ========================
+
+ public static final String FAILED_TO_SERIALIZE_SHARED_MOD_FILE = "Failed to serialize shared mod file";
+ public static final String FAILED_TO_GET_SHARED_MOD_FILE = "Failed to get shared mod file";
+ public static final String UPGRADING_MOD_FILE_INTERRUPTED = "Upgrading mod file interrupted";
+ public static final String CANNOT_UPGRADE_MOD_FILE = "Cannot upgrade mod file";
+ public static final String TIME_INDEX_VALUE = "TimeIndex = {}";
+ public static final String RESOURCE_FILE_NOT_FOUND = "resource file not found";
+ public static final String CANNOT_BUILD_DEVICE_TIME_INDEX = "cannot build DeviceTimeIndex from resource ";
+ public static final String TSFILE_CANNOT_BE_DELETED = "TsFile {} cannot be deleted: {}";
+ public static final String MODIFICATION_FILE_CANNOT_BE_DELETED = "ModificationFile {} cannot be deleted: {}";
+ public static final String TSFILE_RESOURCE_CANNOT_BE_DELETED = "TsFileResource {} cannot be deleted: {}";
+ public static final String FILE_NAME_NOT_STANDARD = "File name may not meet the standard naming specifications.";
+ public static final String FAILED_TO_READ_MODS = "Failed to read mods from {} for {}";
+ public static final String INVALID_INPUT = "Invalid input: ";
+ public static final String ALL_DISKS_FULL_CANNOT_CREATE_TSFILE_DIR = "All disks are full, cannot create tsfile directory";
+ public static final String DISK_SPACE_INSUFFICIENT = "Disk space insufficient";
+ public static final String FAILED_TO_CREATE_TSFILE_DIR_AFTER_RETRIES = "Failed to create tsfile directory after retries";
+ public static final String FAILED_TO_CREATE_DIR_AFTER_RETRIES = "Failed to create directory after retries";
+ public static final String TSFILE_NAME_FORMAT_INCORRECT = "tsfile file name format is incorrect:";
+ public static final String WRONG_TIME_INDEX_TYPE_LOG = "Wrong timeIndex type {}";
+ public static final String WRONG_TIME_INDEX_TYPE = "Wrong timeIndex type ";
+ public static final String ERROR_RECORD_FILE_TIME_INDEX_CACHE = "Meet error when record FileTimeIndexCache: {}";
+ public static final String ERROR_RECORD_FILE_TIME_INDEX_CACHE_NO_DETAIL = "Meet error when record FileTimeIndexCache";
+ public static final String ERROR_COMPACT_FILE_TIME_INDEX_CACHE = "Meet error when compact FileTimeIndexCache: {}";
+ public static final String ERROR_COMPACT_FILE_TIME_INDEX_CACHE_NO_DETAIL = "Meet error when compact FileTimeIndexCache";
+ public static final String FILE_TIME_INDEX_FILE_ALREADY_EXISTS = "FileTimeIndex file has existed,filePath:{}";
+ public static final String ERROR_CLOSE_FILE_TIME_INDEX_CACHE = "Meet error when close FileTimeIndexCache: {}";
+ public static final String END_OF_STREAM_REACHED = "The end of stream has been reached";
+ public static final String V012_FILE_TIME_INDEX_SHOULD_NEVER_APPEAR = "V012_FILE_TIME_INDEX should never appear";
+ public static final String INVALID_ORDINAL = "Invalid ordinal";
+
+ // ======================== DataRegion Utils ========================
+
+ public static final String FAILED_TO_SCAN_FILE = "Failed to scan file {}";
+ public static final String DEVICE_LEVEL_METADATA_INDEX_NOT_SUPPORTED = "device level metadata index node is not supported";
+ public static final String NO_MORE_DATA_IN_SHARED_TIME_BUFFER = "No more data in SharedTimeDataBuffer";
+ public static final String FAILED_TO_CALC_TSFILE_TABLE_SIZES = "Failed to calculate tsfile table sizes";
+ public static final String TIME_INDEX_IS_NULL = "{} {} time index is null";
+ public static final String EMPTY_RESOURCE = "{} {} empty resource";
+ public static final String ERROR_VALIDATE_RESOURCE_FILE = "meet error when validate .resource file:{},e";
+ public static final String ILLEGAL_TSFILE = "{} {} illegal tsfile";
+ public static final String ERROR_VALIDATING_TSFILE = "Meets error when validating TsFile {}, ";
+ public static final String EXCEPTION_APPLY_TABLE_DISK_USAGE_INDEX = "Meet exception when apply TableDiskUsageIndex operation.";
+ public static final String FAILED_RECOVER_TABLE_DISK_USAGE_INDEX = "Failed to recover TableDiskUsageIndex";
+ public static final String FAILED_SYNC_TABLE_SIZE_INDEX = "Failed to sync tsfile table size index.";
+ public static final String WRITE_OBJECT_DELTA = "writeObjectDelta";
+ public static final String EXCEPTION_REMOVE_TABLE_DISK_USAGE_INDEX = "Meet exception when remove TableDiskUsageIndex.";
+ public static final String INTERRUPTED_ADDING_OP_TO_QUEUE = "Interrupted while adding operation {} to queue.";
+ public static final String FAILED_TO_MOVE_FILE = "Failed to move {} to {}";
+ public static final String FAILED_TO_READ_KEY_FILE_DURING_COMPACTION = "Failed to read key file during compaction";
+ public static final String FAILED_COMPACTION_TABLE_SIZE_INDEX = "Failed to execute compaction for tsfile table size index file";
+ public static final String FAILED_TO_READ_TABLE_SIZE_INDEX = "Failed to read table tsfile size index file {}";
+ public static final String TABLE_NUM_SHOULD_BE_POSITIVE = "tableNum should be greater than 0";
+ public static final String BACKWARD_SEEK_NOT_SUPPORTED = "Backward seek is not supported";
+ public static final String THREAD_INTERRUPTED_SKIP_WRITE_FOR_IO_SAFETY = "someone interrupt current thread, so no need to do write for io safety";
+ public static final String PARTITION_LOG_FILE_ALREADY_EXISTS = "Partition log file has existed,filePath:{}";
+
+ // ======================== Load TsFile ========================
+
+ public static final String UNSUPPORTED_TSFILE_DATA_TYPE = "Unsupported TsFileData type: ";
+ public static final String DELETE_AFTER_LOADING_ERROR = "Delete After Loading {} error.";
+ public static final String LOAD_TSFILE_DIR_CREATED = "Load TsFile dir {} is created.";
+ public static final String CANNOT_CREATE_TSFILE_FOR_WRITING = "Can not create TsFile {} for writing.";
+ public static final String CLOSE_TSFILE_IO_WRITER_ERROR = "Close TsFileIOWriter {} error.";
+ public static final String CLOSE_MODIFICATION_FILE_ERROR = "Close ModificationFile {} error.";
+ public static final String TASK_DIR_NOT_EMPTY_SKIP_DELETE = "Task dir {} is not empty, skip deleting.";
+ public static final String LOAD_CLEANUP_TASK_CANCELED = "Load cleanup task {} is canceled.";
+ public static final String LOAD_CLEANUP_TASK_STARTS = "Load cleanup task {} starts.";
+ public static final String LOAD_CLEANUP_TASK_ERROR = "Load cleanup task {} error.";
+ public static final String FAILED_UPDATE_FILE_COUNTER_DIR_NOT_EXIST = "Failed to update file counter, dir({}) does not exist";
+ public static final String UNSUPPORTED_STAGE = "Unsupported stage: ";
+ public static final String RELEASE_MEMORY_BLOCK_FAILED = "Release memory block {} failed";
+ public static final String EXCEED_TOTAL_MEMORY_SIZE = "{} has exceed total memory size";
+ public static final String REDUCE_MEMORY_USAGE_TO_NEGATIVE = "{} has reduce memory usage to negative";
+ public static final String FORCE_ALLOCATE_INTERRUPTED = "forceAllocate: interrupted while waiting for available memory";
+ public static final String LOAD_ALLOCATED_MEMORY_BLOCK = "Load: Allocated MemoryBlock from query engine, size: {}";
+ public static final String RELEASE_DATA_CACHE_MEMORY_BLOCK = "Release Data Cache Memory Block {}";
+ public static final String START_DATA_TYPE_CONVERSION_DOT = "Start data type conversion for LoadTsFileStatement: {}.";
+ public static final String START_DATA_TYPE_CONVERSION = "Start data type conversion for LoadTsFileStatement: {}";
+ public static final String FAIL_TO_LOAD_TSFILE_TO_ACTIVE_DIR = "Fail to load tsfile to Active dir";
+ public static final String FAIL_TO_LOAD_DISK_SPACE = "Fail to load disk space of file {}";
+ public static final String LOAD_ACTIVE_LISTENING_DIR_NOT_SET = "Load active listening dir is not set.";
+ public static final String FAILED_TO_CREATE_TARGET_DIR = "Failed to create target directory ";
+ public static final String FAILED_LOAD_ACTIVE_LISTENING_DIRS = "Failed to load active listening dirs";
+ public static final String INVALID_PARAMETER = "Invalid parameter '";
+ public static final String UTILITY_CLASS = "Utility class";
+ public static final String TSFILE_DATA_BYTE_ARRAY_SIZE_MISMATCH = "TsFileData byte array read error, size mismatch.";
+ public static final String UNKNOWN_TSFILE_DATA_TYPE = "Unknown TsFileData type: ";
+ public static final String FILE_MAGIC_STRING_INCORRECT = "the file's MAGIC STRING is incorrect, file path: {}";
+ public static final String FILE_VERSION_TOO_OLD = "the file's Version Number is too old, file path: {}";
+ public static final String FILE_NOT_CLOSED_CORRECTLY = "the file is not closed correctly, file path: {}";
+ public static final String MINIO_SELECTOR_REQUIRES_ONE_DIR = "MinIO selector requires at least one directory";
+ public static final String ADD_MOUNT_POINT = "Add {}'s mount point {}";
+ public static final String FAILED_TO_CHECK_DIRECTORY = "Failed to check directory: {}";
+ public static final String FAILED_TO_LIST_FILES_IN_DIR = "Failed to list files in directory: {}";
+ public static final String FAILED_TO_DELETE_FILE_OR_DIR = "Failed to delete file or directory: {}";
+ public static final String FAILED_TO_CLEANUP_DIRECTORY = "Failed to cleanup directory: {}";
+ public static final String CLEANED_UP_ACTIVE_LOAD_DIRS = "Cleaned up active load listening directories";
+ public static final String UNEXPECTED_ERROR_CLEANUP_ACTIVE_DIRS = "Unexpected error during cleanup of active load listening directories";
+ public static final String ACTIVE_LOAD_DIR_SCANNER_REGISTERED = "Active load dir scanner periodical job registered";
+ public static final String ERROR_ACTIVE_LOAD_DIR_SCANNING = "Error occurred during active load dir scanning.";
+ public static final String SYSTEM_READ_ONLY_SKIP_ACTIVE_SCAN = "Current system is read-only mode. Skip active load dir scanning.";
+ public static final String FILE_DELETED_IGNORE_EXCEPTION = "The file has been deleted. Ignore this exception.";
+ public static final String EXCEPTION_SCANNING_DIR = "Exception occurred during scanning dir: {}";
+ public static final String ERROR_CREATING_DIR_FOR_ACTIVE_LOAD = "Error occurred during creating directory {} for active load.";
+ public static final String FAILED_COUNT_ACTIVE_DIRS_FILE_NUMBER = "Failed to count active listening dirs file number.";
+ public static final String ACTIVE_LOAD_METRIC_COLLECTOR_REGISTERED = "Active load metric collector periodical jobs registered";
+ public static final String DATABASE_NAME_MUST_NOT_BE_EMPTY = "Database name must not be empty.";
+ public static final String ERROR_EXECUTING_ACTIVE_LOAD_JOB = "Error occurred when executing active load periodical job.";
+ public static final String ACTIVE_LOAD_EXECUTOR_STARTED = "Active load periodical jobs executor is started successfully.";
+ public static final String ACTIVE_LOAD_EXECUTOR_STOPPED = "Active load periodical jobs executor is stopped successfully.";
+ public static final String ERROR_MOVING_FILE_TO_FAIL_DIR = "Error occurred during moving file {} to fail directory.";
+ public static final String FAILED_COUNT_FILES_IN_FAIL_DIR = "Failed to count failed files in fail directory.";
+
+ public static final String STRING_NOT_LEGAL_REPAIR_LOG = "String '%s' is not a legal repair log";
+}
diff --git a/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodeMiscMessages.java b/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodeMiscMessages.java
new file mode 100644
index 0000000000000..6213624518ce1
--- /dev/null
+++ b/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodeMiscMessages.java
@@ -0,0 +1,914 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.i18n;
+
+/** 编译时国际化常量 - DataNode 杂项子系统(中文)。 */
+public final class DataNodeMiscMessages {
+
+ private DataNodeMiscMessages() {}
+
+ // ---------------------------------------------------------------------------
+ // protocol – BaseServerContextHandler
+ // ---------------------------------------------------------------------------
+ public static final String MULTIPLE_SERVER_CONTEXT_FACTORY =
+ "存在多个 ServerContextFactory 实现,请检查。";
+ public static final String SET_SERVER_CONTEXT_FACTORY =
+ "将从 {} 设置 ServerContextFactory";
+
+ // ---------------------------------------------------------------------------
+ // protocol – ConfigNodeInfo
+ // ---------------------------------------------------------------------------
+ public static final String UPDATE_CONFIG_NODE_SUCCESSFULLY =
+ "成功更新 ConfigNode:{},耗时 {} 毫秒。";
+ public static final String UPDATE_CONFIG_NODE_FAILED = "更新 ConfigNode 失败。";
+ public static final String SYSTEM_PROPERTIES_NOT_EXIST =
+ "系统属性文件不存在,无需存储 ConfigNode 列表";
+ public static final String LOAD_CONFIG_NODE_SUCCESSFULLY =
+ "成功加载 ConfigNode:{},耗时 {} 毫秒。";
+ public static final String CANNOT_PARSE_CONFIG_NODE_LIST =
+ "无法解析 system.properties 中的 ConfigNode 列表";
+
+ // ---------------------------------------------------------------------------
+ // protocol – ConfigNodeClient
+ // ---------------------------------------------------------------------------
+ public static final String NODE_LEADER_MAY_DOWN_TRY_NEXT =
+ "当前节点 Leader {} 可能已宕机,尝试下一个节点";
+ public static final String UNEXPECTED_INTERRUPTION_CONNECT_CONFIG_NODE =
+ "等待连接 ConfigNode 时发生意外中断";
+ public static final String NODE_MAY_DOWN_TRY_NEXT =
+ "当前节点 {} 可能已宕机,尝试下一个节点";
+ public static final String FAILED_CONNECT_CONFIG_NODE_NOT_LEADER =
+ "从 DataNode {} 连接 ConfigNode {} 失败,因为当前节点不是 Leader 或尚未就绪,稍后将重试";
+ public static final String UNEXPECTED_INTERRUPTION_CONNECT_CONFIG_NODE_BREAK =
+ "等待连接 ConfigNode 时发生意外中断,可能是当前节点已宕机,将中断当前执行流程以避免无意义等待。";
+
+ // ---------------------------------------------------------------------------
+ // protocol – DataNodeInternalClient
+ // ---------------------------------------------------------------------------
+ public static final String USER_OPENS_INTERNAL_SESSION =
+ "用户 {} 打开了内部 Session-{}。";
+ public static final String USER_OPENS_INTERNAL_SESSION_FAILED =
+ "用户 {} 打开内部 Session 失败。";
+ public static final String USER_OPENS_INTERNAL_SESSION_FAILED_FMT =
+ "用户 %s 打开内部 Session 失败。";
+
+ // ---------------------------------------------------------------------------
+ // protocol – AsyncTSStatusRPCHandler / AsyncConfigNodeTSStatusRPCHandler
+ // ---------------------------------------------------------------------------
+ public static final String SUCCESSFULLY_ON_DATANODE =
+ "在 DataNode {} 上成功执行 {}";
+ public static final String FAILED_ON_DATANODE =
+ "在 DataNode {} 上执行 {} 失败,响应:{}";
+ public static final String SUCCESSFULLY_ON_CONFIG_NODE =
+ "在 ConfigNode {} 上成功执行 {}";
+ public static final String FAILED_ON_CONFIG_NODE =
+ "在 ConfigNode {} 上执行 {} 失败,响应:{}";
+
+ // ---------------------------------------------------------------------------
+ // protocol – AINodeClient
+ // ---------------------------------------------------------------------------
+ public static final String AINODE_MAY_DOWN =
+ "当前 AINode {} 可能已宕机,原因:";
+ public static final String CANNOT_CONNECT_ANY_AINODE =
+ "无法连接任何 AINode,因为没有可用的节点。";
+ public static final String UNEXPECTED_INTERRUPTION_CONNECT_AINODE =
+ "等待连接 AINode 时发生意外中断,可能是当前节点已宕机,将中断当前执行流程以避免无意义等待。";
+
+ // ---------------------------------------------------------------------------
+ // protocol – SessionManager
+ // ---------------------------------------------------------------------------
+ public static final String LOGIN_STATUS =
+ "{}: 登录状态:{}。用户:{},打开 Session-{}";
+ public static final String CLIENT_TRYING_CLOSE_ANOTHER_SESSION =
+ "客户端 %s 正在尝试关闭另一个会话 %s,请检查是否存在 Bug";
+ public static final String SESSION_CLOSING = "Session-%s 正在关闭";
+ public static final String FAILED_RELEASE_PREPARED_STATEMENT =
+ "释放会话 {} 的 PreparedStatement 资源失败:{}";
+ public static final String FAILED_RELEASE_PREPARED_STATEMENT_CLOSE =
+ "关闭语句 {} 时释放会话 {} 的 PreparedStatement '{}' 资源失败:{}";
+ public static final String NOT_LOGIN = "{}: 未登录。";
+ public static final String CLIENT_SESSION_REGISTERED_REPEATEDLY =
+ "客户端会话被重复注册,请检查是否存在 Bug。";
+
+ // ---------------------------------------------------------------------------
+ // protocol – DataNodeRegionManager
+ // ---------------------------------------------------------------------------
+ public static final String CREATE_SCHEMA_REGION_FAILED_ILLEGAL_PATH =
+ "创建 Schema Region {} 失败,因为路径非法。";
+ public static final String CREATE_SCHEMA_REGION_FAILED =
+ "创建 Schema Region {} 失败,原因:{}";
+ public static final String CREATE_SCHEMA_REGION_FAILED_FMT =
+ "创建 Schema Region 失败,原因:%s";
+ public static final String SCHEMA_REGION_ALREADY_EXISTS_FMT =
+ "SchemaRegion %d 已存在。";
+ public static final String CREATE_DATA_REGION_FAILED =
+ "创建 Data Region {} 失败,原因:{}";
+ public static final String CREATE_DATA_REGION_FAILED_FMT =
+ "创建 Data Region 失败,原因:%s";
+ public static final String DATA_REGION_ALREADY_EXISTS_FMT = "DataRegion %d 已存在。";
+ public static final String START_CREATE_NEW_REGION = "开始创建新 Region {}";
+ public static final String CREATE_NEW_REGION_ERROR = "创建新 Region {} 失败";
+ public static final String SUCCEED_CREATE_NEW_REGION = "成功创建新 Region {}";
+ public static final String METADATA_ERROR = "{}: 元数据错误:";
+ public static final String CREATE_SCHEMA_REGION_FAILED_ILLEGAL_PATH_MSG =
+ "创建 Schema Region 失败,因为存储组路径非法。";
+
+ // ---------------------------------------------------------------------------
+ // protocol – DataNodeInternalRPCServiceImpl
+ // ---------------------------------------------------------------------------
+ public static final String CONSENSUS_NOT_STARTED =
+ "共识协议在 {} 秒后仍未启动,拒绝 Region 请求";
+ public static final String RECEIVE_FRAGMENT_INSTANCE =
+ "收到 FragmentInstance,目标组 [{}]";
+ public static final String DESERIALIZE_CONSENSUS_GROUP_ID_FAILED =
+ "反序列化 ConsensusGroupId 失败。";
+ public static final String DESERIALIZE_FRAGMENT_INSTANCE_FAILED =
+ "反序列化 FragmentInstance 失败。";
+ public static final String RECEIVE_LOAD_NODE = "收到 UUID 为 {} 的加载节点请求。";
+ public static final String SCHEMA_CACHE_INVALIDATED =
+ "{} 的 Schema 缓存已失效";
+ public static final String ERROR_PUSHING_PIPE_META =
+ "推送 Pipe 元数据时发生错误";
+ public static final String ERROR_PUSHING_SINGLE_PIPE_META =
+ "推送单个 Pipe 元数据时发生错误";
+ public static final String ERROR_PUSHING_MULTI_PIPE_META =
+ "推送多个 Pipe 元数据时发生错误";
+ public static final String ERROR_PUSHING_TOPIC_META =
+ "推送 Topic 元数据时发生错误";
+ public static final String ERROR_PUSHING_SINGLE_TOPIC_META =
+ "推送单个 Topic 元数据时发生错误";
+ public static final String ERROR_PUSHING_MULTI_TOPIC_META =
+ "推送多个 Topic 元数据时发生错误";
+ public static final String ERROR_PUSHING_CONSUMER_GROUP_META =
+ "推送消费者组元数据时发生错误";
+ public static final String ERROR_PUSHING_SINGLE_CONSUMER_GROUP_META =
+ "推送单个消费者组元数据时发生错误";
+ public static final String EXCEPTION_EXECUTING_INTERNAL_SCHEMA_TASK =
+ "执行内部 Schema 任务时发生异常:";
+ public static final String UNSUPPORTED_TYPE_UPDATING_TABLE =
+ "更新表时遇到不支持的类型 {}";
+ public static final String UNSUPPORTED_TYPE_UPDATING_TEMPLATE =
+ "更新模板时遇到不支持的类型 {}";
+ public static final String FAILED_GET_MEMORY_FROM_METRIC =
+ "从指标获取内存信息失败,原因:";
+ public static final String CHANGE_REGION_LEADER = "[变更 Region Leader] {}";
+ public static final String REGION_TYPE_ILLEGAL = "Region {} 的类型非法";
+ public static final String START_DISABLE_DATA_NODE =
+ "开始在请求中禁用 DataNode:{}";
+ public static final String EXECUTE_STOP_AND_CLEAR = "执行 stopAndClearDataNode RPC 方法";
+ public static final String INTERRUPTED_STOP_AND_CLEAR =
+ "在 stopAndClearDataNode RPC 方法中遇到中断异常";
+ public static final String STOP_AND_CLEAR_ERROR = "停止并清理 DataNode 时发生错误";
+ public static final String RETRIEVED_EARLIEST_TIMESLOTS =
+ "已获取 {} 个数据库的最早时间槽";
+ public static final String FAILED_GET_EARLIEST_TIMESLOTS = "获取最早时间槽失败";
+ public static final String FAILED_GENERATE_DATA_PARTITION_TABLE =
+ "生成数据分区表失败";
+ public static final String FAILED_CHECK_DATA_PARTITION_TABLE_STATUS =
+ "检查数据分区表生成状态失败";
+ public static final String DATA_PARTITION_TABLE_COMPLETED =
+ "数据分区表生成完成,任务 ID:{}";
+ public static final String DATA_PARTITION_TABLE_FAILED =
+ "数据分区表生成失败,任务 ID:{}";
+ public static final String PROCESS_DATA_DIR_COMPLETED =
+ "处理数据目录以获取最早时间槽已成功完成";
+ public static final String ERROR_EXECUTING_BATCH_STATEMENT =
+ "执行批量语句时发生错误:";
+
+ // ---------------------------------------------------------------------------
+ // protocol – ClientRPCServiceImpl
+ // ---------------------------------------------------------------------------
+ public static final String IOTDB_SERVER_VERSION = "IoTDB 服务器版本:{}";
+ public static final String TEST_INSERT_BATCH_RECEIVE = "收到测试批量插入请求。";
+ public static final String TEST_INSERT_ROW_RECEIVE = "收到测试行插入请求。";
+ public static final String TEST_INSERT_STRING_RECORD_RECEIVE =
+ "收到测试字符串记录插入请求。";
+ public static final String TEST_INSERT_ROW_IN_BATCH_RECEIVE =
+ "收到测试批量行插入请求。";
+ public static final String TEST_INSERT_ROWS_IN_BATCH_RECEIVE =
+ "收到测试批量多行插入请求。";
+ public static final String TEST_INSERT_STRING_RECORDS_RECEIVE =
+ "收到测试字符串记录批量插入请求。";
+ public static final String START_BATCH_EXECUTING_TREE =
+ "开始在树模型中批量执行 {} 个子语句,queryId:{}";
+ public static final String EXECUTING_SUB_STATEMENT_TREE =
+ "正在树模型中执行第 {}/{} 个子语句,queryId:{}";
+ public static final String FAILED_EXECUTE_SUB_STATEMENT_TREE =
+ "树模型中第 {}/{} 个子语句执行失败,queryId:{},已完成:{},剩余:{},进度:{}%,错误:{}";
+ public static final String SUCCESSFULLY_EXECUTED_SUB_STATEMENT_TREE =
+ "树模型中第 {}/{} 个子语句执行成功,queryId:{}";
+ public static final String COMPLETED_BATCH_EXECUTING_TREE =
+ "树模型中全部 {} 个子语句批量执行完成,queryId:{}";
+ public static final String START_BATCH_EXECUTING_TABLE =
+ "开始在表模型中批量执行 {} 个子语句,queryId:{}";
+ public static final String EXECUTING_SUB_STATEMENT_TABLE =
+ "正在表模型中执行第 {}/{} 个子语句,queryId:{}";
+ public static final String FAILED_EXECUTE_SUB_STATEMENT_TABLE =
+ "表模型中第 {}/{} 个子语句执行失败,queryId:{},已完成:{},剩余:{},进度:{}%,错误:{}";
+ public static final String SUCCESSFULLY_EXECUTED_SUB_STATEMENT_TABLE =
+ "表模型中第 {}/{} 个子语句执行成功,queryId:{}";
+ public static final String COMPLETED_BATCH_EXECUTING_TABLE =
+ "表模型中全部 {} 个子语句批量执行完成,queryId:{}";
+
+ // ---------------------------------------------------------------------------
+ // service – DataNode
+ // ---------------------------------------------------------------------------
+ public static final String DATANODE_ENV_VARS =
+ "IoTDB-DataNode 环境变量:{}";
+ public static final String DATANODE_DEFAULT_CHARSET =
+ "IoTDB-DataNode 默认字符集:{}";
+ public static final String STARTING_DATANODE = "正在启动 DataNode...";
+ public static final String DATANODE_FIRST_START =
+ "DataNode 首次启动中...";
+ public static final String DATANODE_RESTARTING = "DataNode 正在重启...";
+ public static final String IOTDB_CONFIGURATION = "IoTDB 配置信息:{}";
+ public static final String DATANODE_SETUP_SUCCESSFULLY =
+ "恭喜,IoTDB DataNode 已成功启动,祝使用愉快!";
+ public static final String FAIL_TO_START_SERVER = "启动服务器失败";
+ public static final String DATANODE_STARTED = "DataNode 已启动";
+ public static final String DATANODE_PREPARED_SUCCESSFULLY =
+ "DataNode 准备就绪,耗时 {} 毫秒";
+ public static final String PULLING_SYSTEM_CONFIGURATIONS =
+ "正在从 ConfigNode Leader 拉取系统配置...";
+ public static final String CANNOT_PULL_SYSTEM_CONFIGURATIONS =
+ "无法从 ConfigNode Leader 拉取系统配置";
+ public static final String SENDING_REGISTER_REQUEST =
+ "正在向 ConfigNode Leader 发送注册请求...";
+ public static final String CANNOT_REGISTER_TO_CLUSTER =
+ "无法注册到集群,原因:{}";
+ public static final String CANNOT_REGISTER_AFTER_RETRIES =
+ "重试 {} 次后仍无法注册到集群。";
+ public static final String PRECHECK_PASSED =
+ "预检查通过,即将进行正式注册。";
+ public static final String DELETE_SUCCEED = "删除 {} 成功。";
+ public static final String DELETE_FAILED_NOT_EXIST =
+ "删除 {} 失败,因为目录不存在。";
+ public static final String SENDING_RESTART_REQUEST =
+ "正在向 ConfigNode Leader 发送重启请求...";
+ public static final String CLEANED_SORT_TEMP_DIR =
+ "已清理过期的排序临时目录:{}";
+ public static final String MEET_ERROR_STARTING_UP = "启动过程中遇到错误。";
+ public static final String IOTDB_DATANODE_HAS_STARTED = "IoTDB DataNode 已启动。";
+ public static final String SETTING_UP_DATANODE = "正在配置 IoTDB DataNode...";
+ public static final String RECOVER_SCHEMA = "正在恢复 Schema...";
+ public static final String DATANODE_FAILED_SETUP = "IoTDB DataNode 启动失败。";
+ public static final String WAIT_DATABASES_READY =
+ "等待所有数据库就绪,耗时 {} 毫秒。";
+ public static final String PREPARE_PIPE_RESOURCES =
+ "Pipe 资源准备完成,耗时 {} 毫秒。";
+ public static final String RECOVER_SCHEMA_SUCCESSFULLY =
+ "Schema 恢复完成,耗时 {} 毫秒。";
+ public static final String LOAD_CLASS_ERROR = "加载类失败:";
+ public static final String EXCEPTION_SCHEMA_REGION_CONSENSUS_STOPPING =
+ "停止 SchemaRegionConsensusImpl 时发生异常";
+ public static final String EXCEPTION_DATA_REGION_CONSENSUS_STOPPING =
+ "停止 DataRegionConsensusImpl 时发生异常";
+
+ // ---------------------------------------------------------------------------
+ // service – DataNodeShutdownHook
+ // ---------------------------------------------------------------------------
+ public static final String DATANODE_EXITING = "DataNode 正在退出...";
+ public static final String INTERRUPTED_WAITING_PIPE_FINISH =
+ "等待 Pipe 完成时被中断";
+ public static final String TIMED_OUT_WAITING_PIPES =
+ "等待 Pipe 完成超时,将终止等待";
+ public static final String FAILED_BORROW_CONFIG_NODE_CLIENT =
+ "借用 ConfigNodeClient 失败";
+ public static final String FAILED_REPORT_SHUTDOWN = "上报关闭状态失败";
+
+ // ---------------------------------------------------------------------------
+ // service – RegionMigrateService
+ // ---------------------------------------------------------------------------
+ public static final String REGION_BEGIN_MIGRATING =
+ "Region {} 收到开始迁移通知";
+ public static final String REGION_FINISH_MIGRATING =
+ "Region {} 收到完成迁移通知";
+ public static final String RESET_PEER_LIST_FAIL = "重置对等节点列表失败";
+ public static final String REGION_MIGRATE_SERVICE_START = "Region 迁移服务已启动";
+ public static final String REGION_MIGRATE_SERVICE_STOP = "Region 迁移服务已停止";
+
+ // ---------------------------------------------------------------------------
+ // service – SettleService
+ // ---------------------------------------------------------------------------
+ public static final String START_ERROR = "启动错误";
+ public static final String WAITING_SETTLE_POOL_SHUTDOWN =
+ "正在等待 Settle 任务池关闭";
+ public static final String SETTLE_SERVICE_STOPPED = "Settle 服务已停止";
+
+ // ---------------------------------------------------------------------------
+ // service – IoTDBInternalLocalReporter
+ // ---------------------------------------------------------------------------
+ public static final String CHECK_OR_CREATE_DATABASE_FAILED =
+ "IoTDBSessionReporter 检查或创建数据库失败。";
+ public static final String CHECK_OR_CREATE_DATABASE_FAILED_BECAUSE =
+ "IoTDBSessionReporter 检查或创建数据库失败,原因:";
+ public static final String INTERNAL_REPORTER_ALREADY_STARTED =
+ "IoTDB 内部 Reporter 已经启动";
+ public static final String INTERNAL_REPORTER_START = "IoTDB 内部 Reporter 启动!";
+ public static final String INTERNAL_REPORTER_STOP = "IoTDB 内部 Reporter 停止!";
+ public static final String FAILED_UPDATE_METRIC_VALUE =
+ "更新指标值失败,状态:{}";
+ public static final String FAILED_AUTO_CREATE_TIMESERIES =
+ "自动创建时间序列 {} 失败,状态:{}";
+
+ // ---------------------------------------------------------------------------
+ // service – ExternalService
+ // ---------------------------------------------------------------------------
+ public static final String FAILED_MAKE_EXTERNAL_SERVICE_DIR =
+ "创建外部服务目录失败";
+ public static final String EXTERNAL_SERVICE_LIB_ROOT = "外部服务库根目录:{}";
+ public static final String FAILED_GET_OPEN_FILE_NUMBER =
+ "获取打开文件数失败,原因:";
+ public static final String UNEXPECTED_ERROR_GETTING_TSFILE_NAME =
+ "获取 TsFile 名称时发生意外错误";
+
+ // ---------------------------------------------------------------------------
+ // service – metrics
+ // ---------------------------------------------------------------------------
+ public static final String FAILED_GET_PROCESS_RESIDENT_MEMORY =
+ "获取进程 {} 的常驻内存失败";
+ public static final String DATANODE_PORT_CHECK_SUCCESSFUL = "DataNode 端口检查通过。";
+
+ // ---------------------------------------------------------------------------
+ // tools – WalChecker
+ // ---------------------------------------------------------------------------
+ public static final String CHECKING_FOLDER = "正在检查目录:{}";
+ public static final String NO_SUB_DIRECTORIES =
+ "指定目录下无子目录,检查结束";
+ public static final String CHECKING_DIRECTORY = "正在检查第 {} 个目录 {}";
+ public static final String WAL_FILE_NOT_EXIST = "WAL 文件不存在,跳过";
+ public static final String WAL_CHECK_FAILED = "{} 检查未通过,原因:";
+ public static final String CHECK_FINISHED_NO_DAMAGED =
+ "检查完成,没有损坏的文件";
+ public static final String FAILED_FILES_FOUND =
+ "共有 {} 个文件检查失败,文件列表:{}";
+ public static final String NO_ENOUGH_ARGS =
+ "参数不足:需要提供 WAL 根目录路径";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileSketchTool
+ // ---------------------------------------------------------------------------
+ public static final String FAIL_INIT_SKETCH_TOOL = "初始化 TsFileSketchTool 失败,{}";
+ public static final String FAIL_PARSE_TSFILE_METADATA = "解析 TsFileMetadata 失败,{}";
+ public static final String FAIL_PRINT_FILE_INFO = "输出文件信息失败,{}";
+ public static final String FAIL_PARSE_CHUNK = "解析 Chunk 失败,{}";
+ public static final String FAIL_PRINT_TIMESERIES_INDEX = "输出时间序列索引失败,{}";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileSplitTool
+ // ---------------------------------------------------------------------------
+ public static final String SPLITTING_TSFILE = "正在拆分 TsFile {}...";
+ public static final String UNSUPPORTED_SPLIT_WITH_MODIFICATION =
+ "暂不支持拆分带有修改记录的 TsFile。";
+ public static final String UNSUPPORTED_SPLIT_WITH_ALIGNED =
+ "暂不支持拆分包含对齐时间序列的 TsFile。";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileSplitByPartitionTool
+ // ---------------------------------------------------------------------------
+ public static final String DELETE_UNCOMPLETED_FILE = "删除未完成的文件 {}";
+ public static final String CREATE_TSFILE_FAILED_EXISTS =
+ "创建新 TsFile {} 失败,因为文件已存在";
+ public static final String CREATE_TSFILE_FAILED = "创建新 TsFile {} 失败";
+ public static final String INCORRECT_MAGIC_STRING =
+ "文件的 MAGIC STRING 不正确,文件路径:{}";
+ public static final String INCORRECT_VERSION_NUMBER =
+ "文件的版本号不正确,文件路径:{}";
+ public static final String FILE_NOT_CLOSED_CORRECTLY =
+ "文件未正确关闭,文件路径:{}";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileSelfCheckTool
+ // ---------------------------------------------------------------------------
+ public static final String ERROR_GETTING_TIMESERIES_METADATA =
+ "获取 TsFile 中所有带偏移量的时间序列元数据时发生错误。";
+ public static final String FILE_PATH = "文件路径:{}";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileValidationTool
+ // ---------------------------------------------------------------------------
+ public static final String NOT_DIRECTORY_OR_NOT_EXIST =
+ "{} 不是目录或不存在,跳过。";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileValidationScan / TsFileStatisticScan
+ // ---------------------------------------------------------------------------
+ public static final String MEET_ERRORS_READING_FILE =
+ "读取文件 {} 时遇到错误,跳过。";
+ public static final String MEET_ERROR = "遇到错误。";
+
+ // ---------------------------------------------------------------------------
+ // tools – MLogParser / PBTreeFileSketchTool
+ // ---------------------------------------------------------------------------
+ public static final String TOO_FEW_PARAMS =
+ "输入参数过少,请参考以下提示。";
+ public static final String PARSE_ERROR = "解析错误:{}";
+ public static final String ENCOUNTER_ERROR = "遇到错误,原因:{}";
+ public static final String USE_HELP = "使用 -help 获取更多信息";
+
+ // ---------------------------------------------------------------------------
+ // tools – SchemaRegionSnapshotParser
+ // ---------------------------------------------------------------------------
+ public static final String IOEXCEPTION_GET_FOLDER =
+ "获取 {} 的目录时发生 IO 异常";
+
+ // ---------------------------------------------------------------------------
+ // tools – SRStatementGenerator
+ // ---------------------------------------------------------------------------
+ public static final String ERROR_PARSER_TAG_ATTRIBUTES =
+ "解析标签和属性文件时发生错误";
+ public static final String MEASUREMENT_ATTRIBUTES_NO_SNAPSHOT =
+ "测量值已设置属性或标签,但未找到快照文件";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileAndModSettleTool
+ // ---------------------------------------------------------------------------
+ public static final String CANNOT_FIND_TSFILE = "找不到 TsFile:{}";
+ public static final String NOT_DIRECTORY_PATH = "不是目录路径:{}";
+ public static final String CANNOT_FIND_DIRECTORY = "找不到目录:{}";
+ public static final String START_SETTLING_TSFILE =
+ "开始整理 TsFile:{}";
+ public static final String FINISH_SETTLING_ALL =
+ "所有 TsFile 整理完成!";
+ public static final String FAIL_SERIALIZE_TSFILE_RESOURCE =
+ "序列化新的 TsFile 资源失败。";
+ public static final String FAILED_DELETE_SETTLE_LOG =
+ "删除整理日志失败,日志路径:{}";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileSettleByCompactionTool
+ // ---------------------------------------------------------------------------
+ public static final String PARSE_COMMAND_LINE_FAILED =
+ "解析命令行参数失败:{}";
+ public static final String ADD_SETTLE_COMPACTION_TASK_SUCCESS =
+ "添加整理合并任务成功";
+ public static final String ADD_SETTLE_COMPACTION_TASK_FAILED =
+ "添加整理合并任务失败,状态码:{}";
+
+ // ---------------------------------------------------------------------------
+ // tools – TsFileResourcePipeStatisticsSetTool
+ // ---------------------------------------------------------------------------
+ public static final String UNKNOWN_ARGUMENT = "未知参数:{}";
+ public static final String NO_DATA_DIRS_PROVIDED =
+ "未提供数据目录,请使用 --dirs ... 指定。";
+ public static final String VALIDATION_REPAIR_COMPLETED =
+ "校验和修复已完成。统计信息:";
+ public static final String SEPARATOR_LINE = "------------------------------------------------------";
+ public static final String IS_GENERATED_BY_PIPE_MARK = "isGeneratedByPipe 标记: {}";
+ public static final String RESET_PROGRESS_INDEX = "resetProgressIndex: {}";
+ public static final String DATA_DIRECTORIES = "数据目录: ";
+ public static final String INDENT_PATH = " {}";
+ public static final String ERROR_VALIDATING_REPAIRING_RESOURCE = "校验或修复资源 {} 时出错: {}";
+ public static final String ERROR_LOADING_RESOURCES_FROM_PARTITION = "从分区 {} 加载资源时出错: {}";
+ public static final String TIME_PARTITION_PROCESS_COMPLETED = "时间分区 {} 共有 {} 个资源,{} 个需要设置 isGeneratedByPipe,{} 个需要重置 progressIndex,{} 个已更改。处理完成。";
+ public static final String SKIPPED_RESOURCE_FILE_NOT_EXIST = "{} 已跳过,因为资源文件不存在。";
+ public static final String REPAIRING_TSFILE_RESOURCE = "正在修复 TsFileResource: {},isGeneratedByPipe 标记: {},实际标记: {}";
+ public static final String RESETTING_PROGRESS_INDEX_TO_MINIMUM = "正在将 TsFileResource:{} 的 progressIndex 重置为最小值,原始 progressIndex: {}";
+ public static final String MARKED_TSFILE_RESOURCE_AS = "已将 TsFileResource 标记为 {} ,资源: {}";
+ public static final String RESET_PROGRESS_INDEX_TO_MINIMUM = "已将 TsFileResource:{} 的 progressIndex 重置为最小值。";
+ public static final String FAILED_TO_REPAIR_TSFILE_RESOURCE = "错误: 修复 TsFileResource 失败: {}";
+ public static final String TOTAL_TIME_TAKEN = "总耗时: {} 毫秒,TsFile 资源总数: {},设置 isGeneratedByPipe 的资源数: {},重置 progressIndex 的资源数: {},已更改的资源数: {}";
+
+ // ---------------------------------------------------------------------------
+ // tools – DelayAnalyzer
+ // ---------------------------------------------------------------------------
+ public static final String DELAY_ANALYZER_RESET = "[延迟分析器] 延迟分析器已重置";
+
+ // ---------------------------------------------------------------------------
+ // utils – DataNodeObjectFileService
+ // ---------------------------------------------------------------------------
+ public static final String FAILED_REMOVE_OBJECT_FILE =
+ "删除对象文件 {} 失败";
+ public static final String FAILED_REMOVE_EMPTY_OBJECT_DIR =
+ "删除空对象目录 {} 失败";
+ public static final String REMOVE_OBJECT_FILE =
+ "删除对象文件 {},大小为 {}(字节)";
+
+ // ---------------------------------------------------------------------------
+ // utils – OpenFileNumUtil
+ // ---------------------------------------------------------------------------
+ public static final String CANNOT_GET_PID =
+ "无法获取 IoTDB 进程的 PID,原因:";
+ public static final String UNSUPPORTED_OS_GET_PID =
+ "不支持的操作系统 {},无法通过 OpenFileNumUtil 获取 IoTDB 的 PID。";
+ public static final String CANNOT_GET_OPEN_FILE_NUMBER =
+ "无法获取 IoTDB 进程的打开文件数,原因:";
+
+ // ---------------------------------------------------------------------------
+ // utils – MemUtils
+ // ---------------------------------------------------------------------------
+ public static final String UNSUPPORTED_DATA_POINT_TYPE = "不支持的数据点类型";
+
+ // ---------------------------------------------------------------------------
+ // utils – ErrorHandlingUtils
+ // ---------------------------------------------------------------------------
+ public static final String ERROR_OPERATION_LOG =
+ "状态码:{},操作 {} 失败";
+
+ // ---------------------------------------------------------------------------
+ // utils – CommonUtils
+ // ---------------------------------------------------------------------------
+ public static final String INPUT_FLOAT_INFINITY = "输入的浮点数值为 Infinity";
+ public static final String INPUT_DOUBLE_INFINITY = "输入的双精度数值为 Infinity";
+ public static final String BOOLEAN_PARSE_ERROR =
+ "BOOLEAN 值应为 true/TRUE、false/FALSE 或 0/1";
+ public static final String UNSUPPORTED_DATA_TYPE_FMT = "不支持的数据类型:%s";
+ public static final String UNSUPPORTED_DATA_TYPE = "不支持的数据类型:";
+ public static final String AGGREGATE_FUNCTION_NAME_NULL =
+ "聚合函数名称不能为空";
+ public static final String INVALID_AGGREGATION_FUNCTION =
+ "无效的聚合函数:";
+ public static final String INVALID_AGGREGATION_FUNCTION_FMT =
+ "无效的聚合函数:%s";
+ public static final String SCALAR_FUNCTION_NAME_NULL =
+ "标量函数名称不能为空。";
+ public static final String DELETE_CURSOR_SIZE_ERROR =
+ "deleteCursor 应为大小为 1 的数组";
+
+ // ---------------------------------------------------------------------------
+ // utils – ThreadUtils
+ // ---------------------------------------------------------------------------
+ public static final String WAITING_TERMINATED_TIMEOUT =
+ "等待 {} 终止超时";
+ public static final String POOL_NOT_EXIT_AFTER_TIMEOUT =
+ "{} 在 60 秒后仍未退出";
+
+ // ---------------------------------------------------------------------------
+ // utils – WindowEvaluationTaskPoolManager
+ // ---------------------------------------------------------------------------
+ public static final String WINDOW_EVAL_POOL_INIT =
+ "WindowEvaluationTaskPoolManager 正在初始化,线程数:{}";
+
+ // ---------------------------------------------------------------------------
+ // utils – LogWriter
+ // ---------------------------------------------------------------------------
+ public static final String INTERRUPTED_NO_WRITE =
+ "当前线程被中断,为保证 IO 安全跳过写入操作";
+
+ // ---------------------------------------------------------------------------
+ // conf – IoTDBStartCheck
+ // ---------------------------------------------------------------------------
+ public static final String STARTING_IOTDB = "正在启动 IoTDB {}";
+ public static final String CANNOT_CREATE_SCHEMA_DIR = "无法创建 Schema 目录:{}";
+ public static final String SCHEMA_DIR_CREATED = " {} 目录已创建。";
+ public static final String IOTDB_VERSION_TOO_OLD = "IoTDB 版本过旧";
+ public static final String REPAIR_SYSTEM_PROPERTIES = "修复 system.properties,缺少 {}";
+ public static final String UNEXPECTED_CONSENSUS_GROUP_TYPE =
+ "未预期的共识组类型";
+ public static final String ENCRYPT_MAGIC_STRING_NOT_MATCHED =
+ "加密魔术字符串不匹配";
+
+ // ---------------------------------------------------------------------------
+ // conf – IoTDBDescriptor
+ // ---------------------------------------------------------------------------
+ public static final String FAILED_UPDATE_CONFIG_FILE = "更新配置文件失败";
+ public static final String WILL_RELOAD_PROPERTIES = "将从 {} 重新加载配置";
+ public static final String GET_URL_FAILED = "获取 URL 失败";
+ public static final String START_READ_CONFIG_FILE = "开始读取配置文件 {}";
+ public static final String FAIL_FIND_CONFIG_FILE =
+ "找不到配置文件 {},拒绝启动 DataNode。";
+ public static final String CANNOT_LOAD_CONFIG_FILE =
+ "无法加载配置文件,拒绝启动 DataNode。";
+ public static final String INCORRECT_FORMAT_CONFIG_FILE =
+ "配置文件格式不正确,拒绝启动 DataNode。";
+ public static final String COULD_NOT_LOAD_CONFIG =
+ "无法从任何已知来源加载配置。";
+ public static final String START_RELOAD_CONFIG_FILE = "开始重新加载配置文件 {}";
+ public static final String FAIL_RELOAD_CONFIG_FILE = "重新加载配置文件 {} 失败";
+ public static final String RELOAD_METRIC_SERVICE = "重新加载指标服务,级别 {}";
+ public static final String PAGE_SIZE_GREATER_THAN_GROUP_SIZE =
+ "page_size 大于 group size,将设置为与 group size 相同";
+ public static final String MQTT_HOST_NOT_CONFIGURED =
+ "MQTT 主机未配置,将使用 dn_rpc_address。";
+ public static final String FAILED_PARSE_TRUSTED_URI =
+ "解析 trusted_uri_pattern {} 失败";
+ public static final String FAILED_GET_FILE_SIZE = "获取 {} 的文件大小失败,原因:";
+ public static final String SET_DELAY_ANALYZER_WINDOW_SIZE =
+ "[延迟分析器] 设置 delay_analyzer_window_size 为 {}";
+ public static final String FAIL_RELOAD_CONFIGURATION_FMT =
+ "重新加载配置失败,原因:%s";
+
+ // ---------------------------------------------------------------------------
+ // conf – IoTDBConfig
+ // ---------------------------------------------------------------------------
+ public static final String FAIL_GET_CANONICAL_PATH = "获取 {} 的规范路径失败";
+ public static final String NO_DATA_DIR_SET =
+ "未设置数据目录,loadTsFileDirs 保持默认值。";
+ public static final String FAILED_GET_FIELD = "获取字段 {} 失败";
+ public static final String SKIP_FAILED_TABLE_SCHEMA_CHECK =
+ "skipFailedTableSchemaCheck 已设置为 {}。";
+ public static final String DIR_REMOVED_FROM_DATA_DIRS =
+ "%s 已从 data_dirs 参数中移除,请将其添加回去。";
+
+ // ---------------------------------------------------------------------------
+ // conf – DataNodeMemoryConfig
+ // ---------------------------------------------------------------------------
+ public static final String FAIL_RELOAD_MEMORY_CONFIG_FMT =
+ "重新加载配置失败,原因:%s";
+
+ // ---------------------------------------------------------------------------
+ // conf – DataNodeStartupCheck
+ // ---------------------------------------------------------------------------
+ public static final String PORTS_HAVE_REPEAT =
+ "DataNode 使用的端口存在重复。";
+
+ // ---------------------------------------------------------------------------
+ // conf – REST service
+ // ---------------------------------------------------------------------------
+ public static final String REST_COULD_NOT_LOAD_CONFIG =
+ "无法从任何已知来源加载 REST 服务配置。";
+ public static final String REST_START_READ_CONFIG = "开始读取配置文件 {}";
+ public static final String REST_FAIL_FIND_CONFIG =
+ "REST 服务找不到配置文件 {}";
+ public static final String REST_CANNOT_LOAD_CONFIG =
+ "REST 服务无法加载配置文件,使用默认配置";
+ public static final String REST_INCORRECT_FORMAT =
+ "REST 服务配置文件格式不正确,使用默认配置";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionBroker
+ // ---------------------------------------------------------------------------
+ public static final String SUBSCRIPTION_PREFETCHING_QUEUE_STATE =
+ "订阅:SubscriptionPrefetchingQueue 状态 {}";
+ public static final String SUBSCRIPTION_UNEXPECTED_EXCEPTION =
+ "订阅:意外异常(不变量被破坏){}";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionReceiverV1
+ // ---------------------------------------------------------------------------
+ public static final String SUBSCRIPTION_UNKNOWN_REQUEST_TYPE =
+ "订阅:未知的 PipeSubscribeRequestType,响应状态 = {}。";
+ public static final String SUBSCRIPTION_CONSUMER_HEARTBEAT_SUCCESS =
+ "订阅:消费者 {} 心跳成功";
+ public static final String SUBSCRIPTION_CONSUMER_SUBSCRIBE_SUCCESS =
+ "订阅:消费者 {} 订阅 {} 成功";
+ public static final String SUBSCRIPTION_CONSUMER_CLOSE_SUCCESS =
+ "订阅:消费者 {} 关闭成功";
+ public static final String SUBSCRIPTION_EXCEPTION_HANDSHAKING =
+ "握手请求 {} 时发生异常";
+ public static final String SUBSCRIPTION_EXCEPTION_HEARTBEAT =
+ "心跳请求 {} 时发生异常";
+ public static final String SUBSCRIPTION_EXCEPTION_SUBSCRIBING =
+ "订阅请求 {} 时发生异常";
+ public static final String SUBSCRIPTION_EXCEPTION_UNSUBSCRIBING =
+ "取消订阅请求 {} 时发生异常";
+ public static final String SUBSCRIPTION_EXCEPTION_POLLING =
+ "拉取请求 {} 时发生异常";
+ public static final String SUBSCRIPTION_EXCEPTION_COMMITTING =
+ "提交请求 {} 时发生异常";
+ public static final String SUBSCRIPTION_EXCEPTION_CLOSING =
+ "关闭请求 {} 时发生异常";
+ public static final String SUBSCRIPTION_EXCEPTION_CREATING_CONSUMER =
+ "在 ConfigNode 上创建消费者 {} 时发生异常";
+ public static final String SUBSCRIPTION_EXCEPTION_CLOSING_CONSUMER =
+ "在 ConfigNode 上关闭消费者 {} 时发生异常";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionBrokerAgent
+ // ---------------------------------------------------------------------------
+ public static final String SUBSCRIPTION_CREATE_BROKER =
+ "订阅:创建绑定到消费者组 [{}] 的 Broker";
+ public static final String SUBSCRIPTION_DROP_BROKER =
+ "订阅:删除绑定到消费者组 [{}] 的 Broker";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionConsumerAgent
+ // ---------------------------------------------------------------------------
+ public static final String EXCEPTION_DROPPING_CONSUMER_GROUP =
+ "删除消费者组 {} 时发生异常";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionTopicAgent
+ // ---------------------------------------------------------------------------
+ public static final String EXCEPTION_DROPPING_TOPIC =
+ "删除 Topic {} 时发生异常";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionEvent
+ // ---------------------------------------------------------------------------
+ public static final String EVENT_NACKED_TIMES = "{} 已被否定确认 {} 次";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionPollResponseCache
+ // ---------------------------------------------------------------------------
+ public static final String NULL_RESPONSE_INVALIDATING =
+ "使缓存失效时响应为空,跳过";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionEventTsFileResponse
+ // ---------------------------------------------------------------------------
+ public static final String UNEXPECTED_RESPONSE_TYPE = "意外的响应类型:{}";
+ public static final String UNEXPECTED_MESSAGE_TYPE = "意外的消息类型:{}";
+ public static final String UNEXPECTED_RESPONSE_TYPE_FMT = "意外的响应类型:%s";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionPipeEventBatches
+ // ---------------------------------------------------------------------------
+ public static final String EXCEPTION_SEALING_EVENTS =
+ "从批次 {} 封存事件时发生异常";
+ public static final String EXCEPTION_CONSTRUCT_NEW_BATCH =
+ "构造新批次时发生异常";
+
+ // ---------------------------------------------------------------------------
+ // subscription – SubscriptionPrefetchingQueue
+ // ---------------------------------------------------------------------------
+ public static final String EXCEPTION_EXECUTE_RECEIVER_SUBTASK =
+ "异常 {} 在 {} 执行接收子任务时发生";
+ public static final String EXCEPTION_CONSTRUCT_TABLET_ITERATOR =
+ "异常 {} 在 {} 构造 ToTabletIterator 时发生";
+
+ // ---------------------------------------------------------------------------
+ // consensus – BaseStateMachine
+ // ---------------------------------------------------------------------------
+ public static final String UNEXPECTED_CONSENSUS_REQUEST =
+ "未预期的 IConsensusRequest:{}";
+ public static final String UNEXPECTED_CONSENSUS_REQUEST_EXCEPTION =
+ "未预期的 IConsensusRequest!";
+
+ // ---------------------------------------------------------------------------
+ // consensus – SchemaExecutionVisitor
+ // ---------------------------------------------------------------------------
+ public static final String IO_ERROR = "{}: IO 错误:";
+ public static final String OPENED_PIPE_LISTENING_QUEUE =
+ "已在 Schema Region {} 上打开 Pipe 监听队列";
+ public static final String CLOSED_PIPE_LISTENING_QUEUE =
+ "已在 Schema Region {} 上关闭 Pipe 监听队列";
+
+ // ---------------------------------------------------------------------------
+ // consensus – SchemaRegionStateMachine
+ // ---------------------------------------------------------------------------
+ public static final String FAIL_LOAD_SNAPSHOT = "从 {} 加载快照失败";
+
+ // ---------------------------------------------------------------------------
+ // consensus – DataExecutionVisitor
+ // ---------------------------------------------------------------------------
+ public static final String ERROR_EXECUTING_PLAN_NODE =
+ "执行计划节点 {} 时发生错误";
+ public static final String ERROR_EXECUTING_PLAN_NODE_CAUSED =
+ "执行计划节点 {} 时发生错误,原因:{}";
+ public static final String REJECT_EXECUTING_PLAN_NODE =
+ "拒绝执行计划节点 {},原因:{}";
+ public static final String BATCH_FAILURE_INSERT_ROWS =
+ "执行 InsertRowsNode 时出现批量失败。";
+ public static final String BATCH_FAILURE_INSERT_MULTI_TABLETS =
+ "执行 InsertMultiTabletsNode 时出现批量失败。";
+ public static final String BATCH_FAILURE_INSERT_ROWS_ONE_DEVICE =
+ "执行 InsertRowsOfOneDeviceNode 时出现批量失败。";
+
+ // ---------------------------------------------------------------------------
+ // consensus – DataRegionStateMachine
+ // ---------------------------------------------------------------------------
+ public static final String EXCEPTION_REPLACING_DATA_REGION =
+ "替换存储引擎中的数据 Region 时发生异常。";
+ public static final String UNEXPECTED_PLAN_NODE_TYPE =
+ "未预期的 PlanNode 类型 {},不是 SearchNode";
+ public static final String TABLE_NOT_EXISTS_OR_LOST =
+ "表不存在或已丢失,结果码为 {}";
+ public static final String GET_FRAGMENT_INSTANCE_FAILED = "获取 FragmentInstance 失败";
+ public static final String CANNOT_GET_CANONICAL_FILE =
+ "{}: 无法获取 {} 的规范文件路径,原因:{}";
+
+ // ---------------------------------------------------------------------------
+ // auth – LoginLockManager
+ // ---------------------------------------------------------------------------
+ public static final String IP_LOGIN_ATTEMPTS_DISABLED =
+ "IP 级别登录尝试已禁用(设置为 {})";
+ public static final String USER_LOGIN_ATTEMPTS_DISABLED =
+ "用户级别登录尝试已禁用(设置为 {})";
+ public static final String IP_LOCKED = "IP '{}' 已锁定,用户 ID '{}'";
+ public static final String USER_UNLOCKED_MANUAL = "用户 ID '{}' 已解锁(手动)";
+ public static final String IP_UNLOCKED_MANUAL =
+ "IP '{}' 已为用户 ID '{}' 解锁(手动)";
+ public static final String USER_UNLOCKED_EXPIRED = "用户 ID '{}' 已解锁(过期)";
+ public static final String IP_UNLOCKED_EXPIRED =
+ "IP '{}' 已为用户 ID '{}' 解锁(过期)";
+ public static final String IP_LOCKED_MULTIPLE_USERS =
+ "IP '{}' 被 {} 个不同用户锁定 → 可能遭受攻击";
+ public static final String USER_MULTIPLE_IP_LOCKS =
+ "用户 ID '{}' 有 {} 个 IP 锁定 → 可能遭受攻击";
+ public static final String FAILED_CHECK_IP_UP =
+ "检查 IP 地址 {} 是否可达失败";
+
+ // ---------------------------------------------------------------------------
+ // auth – ClusterAuthorityFetcher
+ // ---------------------------------------------------------------------------
+ public static final String CACHE_USER_PATH_PRIVILEGES_ERROR =
+ "缓存用户路径权限时发生错误";
+ public static final String CACHE_ROLE_PATH_PRIVILEGES_ERROR =
+ "缓存角色路径权限时发生错误";
+
+ // ---------------------------------------------------------------------------
+ // auth – BasicAuthorityCache
+ // ---------------------------------------------------------------------------
+ public static final String DATANODE_CACHE_INIT_FAILED =
+ "DataNode 缓存初始化失败";
+
+ // ---------------------------------------------------------------------------
+ // trigger – TriggerExecutor
+ // ---------------------------------------------------------------------------
+ public static final String TRIGGER_FIRE_ERROR =
+ "触发器触发时发生错误,触发器:{},原因:{}";
+
+ // ---------------------------------------------------------------------------
+ // trigger – TriggerInformationUpdater
+ // ---------------------------------------------------------------------------
+ public static final String TRIGGER_INFO_UPDATER_STARTED =
+ "有状态触发器信息更新器已成功启动。";
+ public static final String TRIGGER_INFO_UPDATER_STOPPED =
+ "有状态触发器信息更新器已成功停止。";
+ public static final String ERROR_UPDATING_TRIGGER_INFO =
+ "更新触发器信息时遇到错误:";
+
+ // ---------------------------------------------------------------------------
+ // trigger – TriggerFireVisitor
+ // ---------------------------------------------------------------------------
+ public static final String TRIGGER_INTERRUPTED_SLEEP =
+ "{} 在休眠时被中断";
+
+ // ---------------------------------------------------------------------------
+ // trigger – TriggerClassLoaderManager / TriggerClassLoader
+ // ---------------------------------------------------------------------------
+ public static final String TRIGGER_LIB_ROOT = "触发器库根目录:{}";
+
+ // ---------------------------------------------------------------------------
+ // trigger – TriggerManagementService
+ // ---------------------------------------------------------------------------
+ public static final String ERROR_READING_MD5 =
+ "尝试读取 {} 的 MD5 时发生错误";
+
+ // ---------------------------------------------------------------------------
+ // partition – DataPartitionTableGenerator
+ // ---------------------------------------------------------------------------
+ public static final String TASK_ALREADY_STARTED =
+ "任务已启动或已完成";
+
+ public static final String FROM_CONFIG_NODE = " 从 ConfigNode 获取失败。";
+ public static final String IS_NOT_SUPPORTED = " 不受支持";
+ public static final String CANNOT_SSL_HANDSHAKE_WITH_CN_LEADER = "无法与 ConfigNode-leader 进行 SSL 握手。";
+ public static final String CANNOT_CONNECT_TO_CN_LEADER = "无法连接到 ConfigNode-leader。";
+ public static final String CAPACITY_LARGER_THAN_INITIAL_PERMITS = "容量应大于初始许可数。";
+ public static final String CURRENT_TV_LIST_NOT_SORTED = "当前 TVList 未排序";
+ public static final String DN_CLIENT_NOT_SUPPORT_ADD_CONSENSUS_GROUP = "DataNode 到 ConfigNode 客户端不支持 addConsensusGroup。";
+ public static final String DN_CLIENT_NOT_SUPPORT_GET_HEARTBEAT = "DataNode 到 ConfigNode 客户端不支持 getConfigNodeHeartBeat。";
+ public static final String DN_CLIENT_NOT_SUPPORT_NOTIFY_REGISTER = "DataNode 到 ConfigNode 客户端不支持 notifyRegisterSuccess。";
+ public static final String DN_CLIENT_NOT_SUPPORT_REGISTER_CN = "DataNode 到 ConfigNode 客户端不支持 registerConfigNode。";
+ public static final String DN_CLIENT_NOT_SUPPORT_REMOVE_CONSENSUS_GROUP = "DataNode 到 ConfigNode 客户端不支持 removeConsensusGroup。";
+ public static final String DN_CLIENT_NOT_SUPPORT_REPORT_SHUTDOWN = "DataNode 到 ConfigNode 客户端不支持 reportConfigNodeShutdown。";
+ public static final String DN_CLIENT_NOT_SUPPORT_SET_STATUS = "DataNode 到 ConfigNode 客户端不支持 setDataNodeStatus。";
+ public static final String DN_CLIENT_NOT_SUPPORT_STOP_AND_CLEAR = "DataNode 到 ConfigNode 客户端不支持 stopAndClearConfigNode。";
+ public static final String ERROR_OCCURRED_DURING_CREATING_DIR = "创建目录时出错:";
+ public static final String EXPECTING_NON_EMPTY_STRING_FOR = "期望非空字符串:";
+ public static final String FAILED_TO_CONSTRUCT_PIPE_SINK = "构造 PipeSink 失败,原因:";
+ public static final String FAILED_TO_GET_UDF_JAR = "从 ConfigNode 获取 UDF jar 失败。";
+ public static final String FAILED_TO_GET_CONSUMER_GROUP_META = "从 ConfigNode 获取消费者组元数据失败。";
+ public static final String FAILED_TO_GET_TOPIC_META = "从 ConfigNode 获取 Topic 元数据失败。";
+ public static final String FAILED_TO_GET_TRIGGER_JAR = "从 ConfigNode 获取 Trigger jar 失败。";
+ public static final String FETCH_SCHEMA_FAILED = "获取 Schema 失败。";
+ public static final String INDEX_BELOW_START_POSITION = "索引低于起始位置:";
+ public static final String INDEX_EXCEEDS_END_POSITION = "索引超过结束位置:";
+ public static final String INDEX_OUT_OF_BOUND_ERROR = "索引越界错误!";
+ public static final String INVALID_PUSH_MULTI_PIPE_META_REQ = "无效的 TPushMultiPipeMetaReq";
+ public static final String INVALID_PUSH_MULTI_TOPIC_META_REQ = "无效的 TPushMultiTopicMetaReq";
+ public static final String INVALID_PUSH_SINGLE_PIPE_META_REQ = "无效的 TPushSinglePipeMetaReq";
+ public static final String INVALID_PARAM = "无效参数";
+ public static final String INVALID_PARAMETERS_CHECK_USER_GUIDE = "参数无效,请查看用户指南。";
+ public static final String INVALID_REQUEST = "无效请求 ";
+ public static final String PREPARED_STMT_NOT_SUPPORTED_FOR_TREE = "Tree 模型不支持 PreparedStatement";
+ public static final String FILE_LENGTH_LARGER_THAN_MAX = "文件长度超过 max_object_file_size_in_bytes";
+ public static final String UNKNOWN_CONSENSUS_GROUP_TYPE = "未知共识组类型:";
+ public static final String UNKNOWN_DATA_TYPE = "未知数据类型:";
+ public static final String UNKNOWN_PARAMETER_TYPE = "未知参数类型:";
+ public static final String UNKNOWN_SQL_DIALECT = "未知 sql_dialect:";
+ public static final String UNRECOGNIZED_MNODE_TYPE = "无法识别的 MNode 类型";
+ public static final String UNRECOGNIZED_DATATYPE = "无法识别的数据类型:";
+ public static final String UNSUPPORTED_COLUMN_GENERATOR_TYPE = "不支持的 ColumnGeneratorType:";
+ public static final String UNSUPPORTED_TRIGGER_FIRE_RESULT_TYPE = "不支持的 TriggerFireResult 类型";
+ public static final String UTILITY_CLASS = "工具类";
+ public static final String APPEND_SIZE_MUST_BE_POSITIVE = "appendSize 必须为正";
+ public static final String BLOCKS_SHOULD_NEVER_BE_ZERO = "blocks 不应为零。";
+ public static final String END_INDEX_MUST_BE_GE_START_INDEX = "endIndex 必须 >= startIndex";
+ public static final String ERROR_CODE = "错误码:";
+ public static final String NULL_RESPONSE_WHEN_SERIALIZING = "序列化时响应为空";
+ public static final String OBJECT_STORAGE_NOT_SUPPORTED_YET = "暂不支持对象存储";
+ public static final String REGISTERED_TASK_COUNT_LT_ZERO = "registeredTaskCount < 0";
+ public static final String REGISTERED_TASK_COUNT_LE_ZERO = "registeredTaskCount <= 0";
+ public static final String REQUEST_TYPE_NOT_SUPPORTED = "不支持的请求类型:";
+ public static final String UNEXPECTED_REQUEST_TYPE = "意外的请求类型:%s";
+}
diff --git a/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodePipeMessages.java b/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodePipeMessages.java
new file mode 100644
index 0000000000000..5313bf6e63ac7
--- /dev/null
+++ b/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodePipeMessages.java
@@ -0,0 +1,1323 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.i18n;
+
+public final class DataNodePipeMessages {
+
+ // ===================== CONSENSUS =====================
+
+ public static final String CLOSING_DELETION_RESOURCE_MANAGER_FOR = "正在关闭 {} 的删除资源管理器...";
+ public static final String DAL_THREAD_STILL_DOESN_T_EXIT_AFTER = "DAL 线程 {} 在 30 秒后仍未退出";
+ public static final String DELETIONMANAGER_CURRENT_DAL_DIR_IS_DELETED_SUCCESSFULLY =
+ "DeletionManager-{}:current DAL dir {} 已成功删除";
+ public static final String DELETIONMANAGER_CURRENT_DAL_DIR_IS_NOT_INITIALIZED =
+ "DeletionManager-{}:current DAL dir {} 未初始化,无需删除。";
+ public static final String DELETIONMANAGER_CURRENT_WAITING_IS_INTERRUPTED_MAY_BECAUSE =
+ "DeletionManager-{}:current waiting is interrupted. May because current application is "
+ + "down. ";
+ public static final String DELETIONMANAGER_DELETE_DELETION_FILE_IN_DIR =
+ "DeletionManager-{} delete deletion file in {} dir...";
+ public static final String DELETIONMANAGER_FAILED_TO_DELETE_FILE_IN_DIR =
+ "DeletionManager-{} 删除 file in {} dir, please manually check! 失败";
+ public static final String DELETIONRESOURCE_HAS_BEEN_RELEASED_TRIGGER_A_REMOVE =
+ "DeletionResource {} 已释放,触发移除 DAL...";
+ public static final String DELETION_PERSIST_CANNOT_CREATE_FILE_PLEASE_CHECK =
+ "Deletion persist: Cannot create file {}, please check your file system manually.";
+ public static final String DELETION_PERSIST_CANNOT_WRITE_TO_MAY_CAUSE =
+ "Deletion persist: Cannot write to {}, may cause data inconsistency.";
+ public static final String DELETION_PERSIST_CURRENT_BATCH_FSYNC_DUE_TO =
+ "Deletion persist-{}:current batch fsync due to timeout";
+ public static final String DELETION_PERSIST_CURRENT_FILE_HAS_BEEN_CLOSED =
+ "Deletion persist-{}:current file 已关闭";
+ public static final String DELETION_PERSIST_SERIALIZE_DELETION_RESOURCE =
+ "Deletion persist-{}:serialize deletion resource {}";
+ public static final String DELETION_PERSIST_STARTING_TO_PERSIST_CURRENT_WRITING =
+ "Deletion persist-{}:starting to persist, current writing: {}";
+ public static final String DELETION_PERSIST_SWITCHING_TO_A_NEW_FILE =
+ "Deletion persist-{}:switching to a new file, current writing: {}";
+ public static final String DELETION_RESOURCE_MANAGER_FOR_HAS_BEEN_SUCCESSFULLY =
+ "{} 的删除资源管理器已成功关闭!";
+ public static final String DETECT_FILE_CORRUPTED_WHEN_RECOVER_DAL_DISCARD =
+ "recover DAL-{}, discard all subsequent DALs... 时检测到 file corrupted";
+ public static final String FAILED_TO_INITIALIZE_DELETIONRESOURCEMANAGER =
+ "初始化 DeletionResourceManager 失败";
+ public static final String FAILED_TO_READ_DELETION_FILE_MAY_BECAUSE =
+ "读取 deletion file {}, may 失败,原因:this file corrupted when writing it.";
+ public static final String FAILED_TO_RECOVER_DELETIONRESOURCEMANAGER =
+ "恢复 DeletionResourceManager 失败";
+ public static final String FAIL_TO_ALLOCATE_DELETIONBUFFER_GROUP_S_BUFFER =
+ "分配 deletionBuffer-group-{}'s buffer 失败,原因:out of memory.";
+ public static final String FAIL_TO_CLOSE_CURRENT_LOGGING_FILE_WHEN = "关闭时无法关闭当前日志文件";
+ public static final String FAIL_TO_REGISTER_DELETIONRESOURCE_INTO_DELETIONBUFFER_BECAUSE =
+ "注册 DeletionResource into deletionBuffer-{} 失败,原因:this buffer is closed.";
+ public static final String INTERRUPTED_WHEN_WAITING_FOR_ALL_DELETIONS_FLUSHED = "等待所有删除操作刷盘时被中断。";
+ public static final String INTERRUPTED_WHEN_WAITING_FOR_RESULT = "等待结果时被中断。";
+ public static final String INTERRUPTED_WHEN_WAITING_FOR_TAKING_DELETIONRESOURCE_FROM =
+ "等待从阻塞队列中取出 DeletionResource 进行序列化时被中断。";
+ public static final String INTERRUPTED_WHEN_WAITING_FOR_TAKING_WALENTRY_FROM =
+ "等待从阻塞队列中取出 WALEntry 进行序列化时被中断。";
+ public static final String INVALID_DELETION_PROGRESS_INDEX = "无效的删除进度索引:";
+ public static final String PERSISTTHREAD_DID_NOT_TERMINATE_WITHIN_S = "persistThread 在 {} 秒内未终止";
+ public static final String READ_DELETION_FILE_MAGIC_VERSION =
+ "读取 deletion file-{} magic version: {}";
+ public static final String READ_DELETION_FROM_FILE = "从 file {} 读取 deletion: {}";
+ public static final String UNABLE_TO_CREATE_IOTCONSENSUSV2_DELETION_DIR_AT =
+ "无法在 {} 创建 iotConsensusV2 删除目录";
+
+ // ===================== AGENT =====================
+
+ public static final String ATTEMPT_TO_REPORT_PIPE_EXCEPTION_TO_A =
+ "Attempt to report pipe exception to a null PipeTaskMeta.";
+ public static final String CANNOT_PARSE_REBOOT_TIMES_FROM_FILE_SET =
+ "无法解析 reboot times from file {}, set the current time in seconds ({}) as the reboot times";
+ public static final String CANNOT_RECORD_REBOOT_TIMES_TO_FILE_THE =
+ "无法记录 reboot times {} to file {}, the reboot times will not be updated";
+ public static final String CANNOT_START_SIMPLEPROGRESSINDEXASSIGNER_BECAUSE_OF =
+ "无法启动 SimpleProgressIndexAssigner because of {}";
+ public static final String CREATE_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS =
+ "创建 pipe DN task {} successfully within {} ms";
+ public static final String DEREGISTER_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT =
+ "Deregister subtask {}. runningTaskCount: {}, registeredTaskCount: {}";
+ public static final String DROP_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS =
+ "Drop pipe DN task {} successfully within {} ms";
+ public static final String ERROR_OCCURRED_WHEN_COLLECTING_EVENTS_FROM_PROCESSOR =
+ "collecting events from processor 时发生错误";
+ public static final String EXCEPTION_IN_PIPE_EVENT_PROCESSING_IGNORED_BECAUSE =
+ "pipe event processing, ignored because pipe is dropped.{} 中发生异常";
+ public static final String EXCEPTION_OCCURRED_WHEN_CLOSING_PIPE_CONNECTOR_SUBTASK =
+ "closing pipe connector subtask {}, root cause: {} 时发生异常";
+ public static final String EXCEPTION_OCCURRED_WHEN_CLOSING_PIPE_PROCESSOR_SUBTASK =
+ "closing pipe processor subtask {}, root cause: {} 时发生异常";
+ public static final String EXCEPTION_OCCURS_WHEN_EXECUTING_PIPE_TASK =
+ "executing pipe task: 时发生异常";
+ public static final String FAILED_TO_CHECK_IF_PIPE_HAS_RELEASE =
+ "check if pipe has release region related resource with consensus group id: {} 失败。";
+ public static final String FAILED_TO_CLEAR_CLOSE_THE_SCHEMA_REGION =
+ "Failed to clear/close the schema region listening queue, because {}. Will wait until "
+ + "success or the region's state machine is stopped.";
+ public static final String FAILED_TO_CLOSE_CONNECTOR_AFTER_FAILED_TO =
+ "关闭 connector after failed to initialize connector. Ignore this exception 失败。";
+ public static final String FAILED_TO_CLOSE_LISTENING_QUEUE_FOR_SCHEMAREGION =
+ "关闭 listening queue for SchemaRegion 失败";
+ public static final String FAILED_TO_CLOSE_SOURCE_AFTER_FAILED_TO =
+ "关闭 source after failed to initialize source. Ignore this exception 失败。";
+ public static final String FAILED_TO_CONSTRUCT_PIPECONNECTOR_BECAUSE_OF =
+ "构造 PipeConnector 失败,原因:of ";
+ public static final String FAILED_TO_DECREASE_REFERENCE_COUNT_FOR_EVENT =
+ "减少 reference count for event {} in PipeRealtimePriorityBlockingQueue 失败";
+ public static final String FAILED_TO_GET_PENDINGQUEUE_NO_SUCH_SUBTASK =
+ "获取 PendingQueue. No such subtask: 失败";
+ public static final String FAILED_TO_GET_PIPE_METAS_WILL_BE =
+ "获取 pipe metas, will be synced by configNode later 失败。";
+ public static final String FAILED_TO_GET_PIPE_PLUGIN_JAR_FROM =
+ "获取 pipe plugin jar from config node 失败。";
+ public static final String FAILED_TO_GET_PIPE_TASK_META_FROM =
+ "获取 pipe task meta from config node. Ignore the exception 失败,原因:config node may not be "
+ + "ready yet, and meta will be pushed by config node later.";
+ public static final String FAILED_TO_PERSIST_PROGRESS_INDEX_TO_CONFIGNODE =
+ "persist progress index to configNode, status: {} 失败";
+ public static final String FAILURE_WHEN_REGISTER_PIPE_PLUGIN_SKIP_THIS =
+ "Failure when register pipe plugin {}. Skip this plugin and continue startup.";
+ public static final String PIPECONNECTOR = "PipeConnector: ";
+ public static final String PIPEDATANODETASKBUILDER_FAILED_TO_PARSE_INCLUSION_AND_EXCLUSION =
+ "PipeDataNodeTaskBuilder failed to parse 'inclusion' and 'exclusion' parameters: {}";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_INCLUSION_CONTAINS_DATA_DELETE_REALTIME =
+ "PipeDataNodeTaskBuilder: When 'inclusion' contains 'data.delete', 'realtime-first' is "
+ + "defaulted to 'false' to prevent sync issues after deletion.";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_INCLUSION_INCLUDES_DATA_DELETE_REALTIME =
+ "PipeDataNodeTaskBuilder: When 'inclusion' includes 'data.delete', 'realtime-first' set "
+ + "to 'true' may result in data synchronization issues after deletion.";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_SOURCE_USES_SNAPSHOT_MODEL_REALTIME =
+ "PipeDataNodeTaskBuilder: When source uses snapshot model, 'realtime-first' is defaulted "
+ + "to 'false' to prevent premature halt before transfer completion.";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_SOURCE_USES_SNAPSHOT_MODEL_REALTIME_1 =
+ "PipeDataNodeTaskBuilder: When source uses snapshot model, 'realtime-first' set to "
+ + "'true' may cause prevent premature halt before transfer completion.";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_THE_REALTIME_SYNC_IS_ENABLED =
+ "PipeDataNodeTaskBuilder: When the realtime sync is enabled, not enabling the rate "
+ + "limiter in sending tsfile may introduce delay for realtime sending.";
+ public static final String PIPEDATANODETASKBUILDER_WHEN_THE_REALTIME_SYNC_IS_ENABLED_1 =
+ "PipeDataNodeTaskBuilder: When the realtime sync is enabled, we enable rate limiter in "
+ + "sending tsfile by default to reserve disk and network IO for realtime sending.";
+ public static final String PIPEEVENTCOLLECTOR_THE_EVENT_IS_ALREADY_RELEASED_SKIPPING =
+ "PipeEventCollector: The event {} is already released, skipping it.";
+ public static final String PIPE_CONNECTOR_SUBTASK_WAS_CLOSED_WITHIN_MS =
+ "Pipe:connector subtask {} ({}) 已关闭 within {} ms";
+ public static final String PIPE_META_NOT_FOUND = "Pipe meta not found: ";
+ public static final String PIPE_SINK_SUBTASKS_WITH_ATTRIBUTES_IS_BOUNDED =
+ "Pipe sink subtasks with attributes {} is bounded with sinkExecutor {} and "
+ + "callbackExecutor {}.";
+ public static final String PIPE_SKIPPING_TEMPORARY_TSFILE_WHICH_SHOULDN_T =
+ "Pipe skipping temporary TsFile which shouldn't be transferred: {}";
+ public static final String PULLED_PIPE_META_FROM_CONFIG_NODE_RECOVERING =
+ "Pulled pipe meta from config node: {}, recovering ...";
+ public static final String RECEIVED_PIPE_HEARTBEAT_REQUEST_FROM_CONFIG_NODE =
+ "Received pipe heartbeat request {} from config node.";
+ public static final String REGION_NO_TSFILEINSERTIONEVENTS_TO_REPLACE_FOR_SOURCE =
+ "Region {}: No TsFileInsertionEvents to replace for source files {}";
+ public static final String REGION_REPLACED_TSFILEINSERTIONEVENTS_WITH =
+ "Region {}: Replaced TsFileInsertionEvents {} with {}";
+ public static final String REGISTEREDTASKCOUNT_0 = "registeredTaskCount < 0";
+ public static final String REGISTEREDTASKCOUNT_0_1 = "registeredTaskCount <= 0";
+ public static final String REGISTER_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT =
+ "Register subtask {}. runningTaskCount: {}, registeredTaskCount: {}";
+ public static final String REPORT_PIPERUNTIMEEXCEPTION_TO_LOCAL_PIPETASKMETA_EXCEPTION_MESSAGE =
+ "Report PipeRuntimeException to local PipeTaskMeta({}), exception message: {}";
+ public static final String RUNNINGTASKCOUNT_0 = "runningTaskCount < 0";
+ public static final String RUNNINGTASKCOUNT_0_1 = "runningTaskCount <= 0";
+ public static final String SIMPLEPROGRESSINDEXASSIGNER_STARTED_SUCCESSFULLY_ISSIMPLECONSENSUSENABLE_R =
+ "SimpleProgressIndexAssigner started successfully. isSimpleConsensusEnable: {}, "
+ + "rebootTimes: {}";
+ public static final String STARTING_SIMPLEPROGRESSINDEXASSIGNER =
+ "Starting SimpleProgressIndexAssigner ...";
+ public static final String START_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS =
+ "Start pipe DN task {} successfully within {} ms";
+ public static final String START_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT =
+ "Start subtask {}. runningTaskCount: {}, registeredTaskCount: {}";
+ public static final String STOP_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS =
+ "Stop pipe DN task {} successfully within {} ms";
+ public static final String STOP_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT =
+ "Stop subtask {}. runningTaskCount: {}, registeredTaskCount: {}";
+ public static final String SUBTASK_IS_CLOSED_IGNORE_EXCEPTION =
+ "subtask {} 已关闭, ignore exception";
+ public static final String SUBTASK_WORKER_IS_INTERRUPTED = "subtask worker is interrupted";
+ public static final String SUCCESSFULLY_PERSISTED_ALL_PIPE_S_INFO_TO =
+ "成功 persisted all pipe's info to configNode。";
+ public static final String THE_EXECUTOR_AND_HAS_BEEN_SUCCESSFULLY_SHUTDOWN =
+ "The executor {} and {} has been successfully shutdown.";
+
+ // ===================== EVENT =====================
+
+ public static final String DATABASENAMEFROMDATAREGION_IS_NULL = "databaseNameFromDataRegion 为空";
+ public static final String DECREASE_REFERENCE_COUNT_ERROR = "减少引用计数出错。";
+ public static final String DECREASE_REFERENCE_COUNT_FOR_MTREE_SNAPSHOT_OR =
+ "Decrease reference count for mTree snapshot {} or tLog {} or attribute snapshot {} 出错。";
+ public static final String DECREASE_REFERENCE_COUNT_FOR_TSFILE_ERROR =
+ "Decrease reference count for TsFile {} 出错。";
+ public static final String DO_NOT_HAS_A_COMPLETE_PAGE_BODY =
+ "do not has a complete page body. Expected:";
+ public static final String ERROR_WHILE_PARSING_TSFILE_INSERTION_EVENT =
+ "Error while parsing tsfile insertion event";
+ public static final String EXCEPTION_OCCURRED_WHEN_DETERMINING_THE_EVENT_TIME =
+ "determining the event time of PipeInsertNodeTabletInsertionEvent({}) overlaps with the "
+ + "time range: [{}, {}]. Returning true to ensure data integrity 时发生异常";
+ public static final String FAILED_TO_ALLOCATE_MEMORY_FOR_PARSING_TSFILE =
+ "{}: failed to allocate memory for parsing TsFile {}, tablet event no. {}, retry count "
+ + "is {}, will keep retrying.";
+ public static final String FAILED_TO_BUILD_TABLET = "构建 tablet 失败";
+ public static final String FAILED_TO_CHECK_NEXT = "check next 失败";
+ public static final String FAILED_TO_CLOSE_TSFILEREADER = "关闭 TsFileReader 失败";
+ public static final String FAILED_TO_CLOSE_TSFILESEQUENCEREADER = "关闭 TsFileSequenceReader 失败";
+ public static final String FAILED_TO_CREATE_TSFILEINSERTIONDATATABLETITERATOR =
+ "创建 TsFileInsertionDataTabletIterator 失败";
+ public static final String FAILED_TO_GET_NEXT_TABLET_INSERTION_EVENT =
+ "获取 next tablet insertion event 失败。";
+ public static final String FAILED_TO_LOAD_MODIFICATIONS_FROM_TSFILE =
+ "加载 modifications from TsFile: 失败";
+ public static final String FAILED_TO_READ_METADATA_FOR_DEVICEID_MEASUREMENT =
+ "读取 metadata for deviceId: {}, measurement: {}, removing 失败";
+ public static final String FAILED_TO_RECORD_PARSE_END_TIME_FOR =
+ "记录 parse end time for pipe {} 失败";
+ public static final String FAILED_TO_RECORD_TABLET_METRICS_FOR_PIPE =
+ "记录 tablet metrics for pipe {} 失败";
+ public static final String FOUND_NULL_DEVICEID_REMOVING_ENTRY =
+ "Found null deviceId, removing entry";
+ public static final String INITIALIZE_DATA_CONTAINER_ERROR = "Initialize data container 出错。";
+ public static final String INSERTNODE_HAS_BEEN_RELEASED = "InsertNode 已被释放";
+ public static final String INSERTROWNODE_IS_PARSED_TO_ZERO_ROWS_ACCORDING =
+ "InsertRowNode({}) is parsed to zero rows according to the pattern({}) and time range "
+ + "[{}, {}], the corresponding source event({}) will be ignored.";
+ public static final String INSERTTABLETNODE_IS_PARSED_TO_ZERO_ROWS_ACCORDING =
+ "InsertTabletNode({}) is parsed to zero rows according to the pattern({}) and time range "
+ + "[{}, {}], the corresponding source event({}) will be ignored.";
+ public static final String INVALID_EVENT_TYPE = "无效的 event type: ";
+ public static final String INVALID_INPUT = "无效的 input: ";
+ public static final String ISGENERATEDBYPIPE_IS_NOT_SUPPORTED =
+ "isGeneratedByPipe() is not supported!";
+ public static final String MAYEVENTPATHSOVERLAPPEDWITHPATTERN_IS_NOT_SUPPORTED =
+ "mayEventPathsOverlappedWithPattern() is not supported!";
+ public static final String MAYEVENTTIMEOVERLAPPEDWITHTIMERANGE_IS_NOT_SUPPORTED =
+ "mayEventTimeOverlappedWithTimeRange() is not supported!";
+ public static final String NO_COMMIT_IDS_FOUND_IN_PIPECOMPACTEDTSFILEINSERTIONEVENT =
+ "No commit IDs found in PipeCompactedTsFileInsertionEvent.";
+ public static final String PIPECOMPACTEDTSFILEINSERTIONEVENT_DOES_NOT_SUPPORT_EQUALSINIOTCONSENSUSV2 =
+ "PipeCompactedTsFileInsertionEvent 不支持 equalsInIoTConsensusV2.";
+ public static final String PIPECOMPACTEDTSFILEINSERTIONEVENT_DOES_NOT_SUPPORT_GETREBOOTTIMES =
+ "PipeCompactedTsFileInsertionEvent 不支持 getRebootTimes.";
+ public static final String PIPE_FAILED_TO_GET_DEVICES_FROM_TSFILE =
+ "Pipe {}:获取 devices from TsFile {}, extract it anyway 失败";
+ public static final String PIPE_SKIPPING_TEMPORARY_TSFILE_S_PARSING_WHICH =
+ "Pipe skipping temporary TsFile's parsing which shouldn't be transferred: {}";
+ public static final String ROW_CAN_NOT_BE_CUSTOMIZED = "Row can not be customized";
+ public static final String SHALLOWCOPYSELFANDBINDPIPETASKMETAFORPROGRESSREPORT_IS_NOT_SUPPORTED =
+ "shallowCopySelfAndBindPipeTaskMetaForProgressReport() is not supported!";
+ public static final String SKIPPING_TEMPORARY_TSFILE_S_PROGRESSINDEX_WILL_REPORT =
+ "跳过 temporary TsFile {}'s progressIndex, will report MinimumProgressIndex";
+ public static final String TABLEPATTERNPARSER_DOES_NOT_SUPPORT_ROW_BY_ROW =
+ "TablePatternParser 不支持 row by row processing";
+ public static final String TABLEPATTERNPARSER_DOES_NOT_SUPPORT_TABLET_PROCESSING =
+ "TablePatternParser 不支持 tablet processing";
+ public static final String TABLEPATTERNPARSER_DOES_NOT_SUPPORT_TABLET_PROCESSING_WITH =
+ "TablePatternParser 不支持 tablet processing with collect";
+ public static final String TABLET_IS_PARSED_TO_ZERO_ROWS_ACCORDING =
+ "Tablet({}) is parsed to zero rows according to the pattern({}) and time range [{}, {}], "
+ + "the corresponding source event({}) will be ignored.";
+ public static final String TABLE_MODEL_TSFILE_PARSING_DOES_NOT_SUPPORT =
+ "Table model tsfile parsing 不支持 this type of ChunkMeta";
+ public static final String TEMPORARY_TSFILE_DETECTED_WILL_SKIP_ITS_TRANSFER =
+ "Temporary tsFile {} detected, will skip its transfer.";
+ public static final String TSFILE_HAS_INITIALIZED_PIPENAME_CREATION_TIME_PATTERN =
+ "TsFile {} has initialized {}, pipeName: {}, creation time: {}, pattern: {}, startTime: "
+ + "{}, endTime: {}, withMod: {}";
+ public static final String UNCOMPRESS_ERROR_UNCOMPRESS_SIZE =
+ "Uncompress error! uncompress size: ";
+ public static final String UNSUPPORTED = "不支持";
+ public static final String UNSUPPORTED_NODE_TYPE = "不支持的 node type ";
+ public static final String WAIT_FOR_MEMORY_ENOUGH_FOR_PARSING_FOR =
+ "等待 memory enough,已等待 parsing {} for {} 秒。";
+
+ // ===================== PROCESSOR =====================
+
+ public static final String ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_BINARY_INPUT =
+ "AbstractSameTypeNumericOperator 不支持 binary input";
+ public static final String ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_BOOLEAN_INPUT =
+ "AbstractSameTypeNumericOperator 不支持 boolean input";
+ public static final String ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_DATE_INPUT =
+ "AbstractSameTypeNumericOperator 不支持 date input";
+ public static final String ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_STRING_INPUT =
+ "AbstractSameTypeNumericOperator 不支持 string input";
+ public static final String CHANGINGVALUESAMPLINGPROCESSOR_IN_IS_INITIALIZED_WITH =
+ "ChangingValueSamplingProcessor in {} is initialized with {}: {}, {}: {}, {}: {}.";
+ public static final String CLEAN_OUTDATED_INCOMPLETE_COMBINER_PIPENAME_CREATIONTIME_COMBINEID =
+ "清理 outdated incomplete combiner: pipeName={}, creationTime={}, combineId={}";
+ public static final String COMBINEHANDLER_NOT_FOUND_FOR_PIPEID =
+ "CombineHandler not found for pipeId = ";
+ public static final String COMBINER_COMBINE_COMPLETED_REGIONID_STATE_RECEIVEDREGIONIDSET_EX =
+ "Combiner combine completed: regionId: {}, state: {}, receivedRegionIdSet: {}, "
+ + "expectedRegionIdSet: {}";
+ public static final String COMBINER_COMBINE_REGIONID_STATE_RECEIVEDREGIONIDSET_EXPECTEDREGI =
+ "Combiner combine: regionId: {}, state: {}, receivedRegionIdSet: {}, expectedRegionIdSet: {}";
+ public static final String DATA_NODES_ENDPOINTS_FOR_TWO_STAGE_AGGREGATION =
+ "Data nodes' endpoints for two-stage aggregation: {}";
+ public static final String DIFFERENT_DATA_TYPE_ENCOUNTERED_IN_ONE_WINDOW =
+ "Different data type encountered in one window, will purge. Previous type: {}, now type: {}";
+ public static final String ENCOUNTERED_EXCEPTION_WHEN_DESERIALIZING_FROM_PIPETASKMETA =
+ "Encountered exception when deserializing from PipeTaskMeta";
+ public static final String END_POINTS_FOR_TWO_STAGE_AGGREGATION_PIPE =
+ "End points for two-stage aggregation pipe (pipeName={}, creationTime={}) were updated to {}";
+ public static final String ERROR_OCCURRED_WHEN_CLOSING_COMBINEHANDLER_ID =
+ "closing CombineHandler(id = {}) 时发生错误";
+ public static final String ERROR_OCCURS_WHEN_RECEIVING_REQUEST = "receiving request: {} 时发生错误";
+ public static final String FAILED_TO_CLOSE_IOTDBSYNCCLIENT = "关闭 IoTDBSyncClient 失败";
+ public static final String FAILED_TO_CLOSE_OLD_IOTDBSYNCCLIENT = "关闭 old IoTDBSyncClient 失败";
+ public static final String FAILED_TO_COMBINE_COUNT = "combine count: 失败";
+ public static final String FAILED_TO_CONSTRUCT_IOTDBSYNCCLIENT = "构造 IoTDBSyncClient 失败";
+ public static final String FAILED_TO_FETCH_COMBINE_RESULT = "获取 combine result: 失败";
+ public static final String FAILED_TO_FETCH_DATA_NODES = "获取 data nodes 失败";
+ public static final String FAILED_TO_FETCH_DATA_REGION_IDS = "获取 data region ids 失败";
+ public static final String FAILED_TO_RECONSTRUCT_IOTDBSYNCCLIENT_AFTER_FAILURE_TO =
+ "reconstruct IoTDBSyncClient {} after failure to send request {} (watermark = {}) 失败";
+ public static final String FAILED_TO_SEND_REQUEST_WATERMARK_TO =
+ "发送 request {} (watermark = {}) to {} 失败";
+ public static final String FAILED_TO_TRIGGER_COMBINE_WATERMARK_COUNT_PROGRESSINDEX =
+ "trigger combine. watermark={}, count={}, progressIndex={} 失败";
+ public static final String FAILURE_OCCURRED_WHEN_TRYING_TO_COMMIT_PROGRESS =
+ "Failure occurred when trying to commit progress index. timestamp={}, count={}, "
+ + "progressIndex={}";
+ public static final String FETCHED_DATA_REGION_IDS_AT = "Fetched data region ids {} at {}";
+ public static final String FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_BINARY_INPUT =
+ "FractionPoweredSumOperator 不支持 binary input";
+ public static final String FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_BOOLEAN_INPUT =
+ "FractionPoweredSumOperator 不支持 boolean input";
+ public static final String FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_DATE_INPUT =
+ "FractionPoweredSumOperator 不支持 date input";
+ public static final String FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_STRING_INPUT =
+ "FractionPoweredSumOperator 不支持 string input";
+ public static final String GLOBAL_COUNT_IS_LESS_THAN_THE_LAST =
+ "Global count is less than the last collected count: timestamp={}, count={}";
+ public static final String IGNORED_TABLETINSERTIONEVENT_IS_NOT_AN_INSTANCE_OF =
+ "已忽略 TabletInsertionEvent is not an instance of PipeInsertNodeTabletInsertionEvent or "
+ + "PipeRawTabletInsertionEvent: {}";
+ public static final String IGNORED_TSFILEINSERTIONEVENT_IS_EMPTY =
+ "Ignored TsFileInsertionEvent 为空: {}";
+ public static final String IGNORED_TSFILEINSERTIONEVENT_IS_NOT_AN_INSTANCE_OF =
+ "已忽略 TsFileInsertionEvent is not an instance of PipeTsFileInsertionEvent: {}";
+ public static final String ILLEGAL_OUTPUT_SERIES_PATH = "非法的 output series path: ";
+ public static final String NO_DATA_NODES_ENDPOINTS_FETCHED = "No data nodes' endpoints fetched";
+ public static final String NO_EXPECTED_REGION_ID_SET_FETCHED =
+ "No expected region id set fetched";
+ public static final String PARTIALPATHLASTOBJECTCACHE_ALLOCATEDMEMORYBLOCK_HAS_EXPANDED_FROM_TO =
+ "PartialPathLastObjectCache.allocatedMemoryBlock has expanded from {} to {}.";
+ public static final String PARTIALPATHLASTOBJECTCACHE_ALLOCATEDMEMORYBLOCK_HAS_SHRUNK_FROM_TO =
+ "PartialPathLastObjectCache.allocatedMemoryBlock has shrunk from {} to {}.";
+ public static final String SENDING_REQUEST_WATERMARK_TO = "正在发送 request {} (watermark = {}) 到 {}";
+ public static final String SWINGINGDOORTRENDINGSAMPLINGPROCESSOR_IN_IS_INITIALIZED_WITH =
+ "SwingingDoorTrendingSamplingProcessor in {} is initialized with {}: {}, {}: {}, {}: {}.";
+ public static final String THE_ABSTRACT_FORMAL_PROCESSOR_DOES_NOT_SUPPORT = "抽象形式处理器不支持处理事件";
+ public static final String TUMBLINGTIMESAMPLINGPROCESSOR_IN_IS_INITIALIZED_WITH_S =
+ "TumblingTimeSamplingProcessor in {} is initialized with {}: {}s, {}: {}, {}: {}.";
+ public static final String TWOSTAGECOUNTPROCESSOR_CUSTOMIZED_BY_THREAD_PIPENAME_CREATIONTIME_RE =
+ "TwoStageCountProcessor customized by thread {}: pipeName={}, creationTime={}, "
+ + "regionId={}, outputSeries={}, localCommitProgressIndex={}, localCount={}";
+ public static final String TWO_STAGE_AGGREGATE_PIPE_PIPENAME_CREATIONTIME_RELATED =
+ "Two stage aggregate pipe (pipeName={}, creationTime={}) related region ids {}";
+ public static final String TWO_STAGE_AGGREGATE_RECEIVER_IS_EXITING =
+ "Two stage aggregate receiver is exiting.";
+ public static final String TWO_STAGE_COMBINE_REGION_ID_COMBINE_ID =
+ "Two stage combine (region id = {}, combine id = {}) incomplete: timestamp={}, count={}, "
+ + "progressIndex={}";
+ public static final String TWO_STAGE_COMBINE_REGION_ID_COMBINE_ID_1 =
+ "Two stage combine (region id = {}, combine id = {}) outdated: timestamp={}, count={}, "
+ + "progressIndex={}";
+ public static final String TWO_STAGE_COMBINE_REGION_ID_COMBINE_ID_2 =
+ "Two stage combine (region id = {}, combine id = {}) success: timestamp={}, count={}, "
+ + "progressIndex={}, committed progressIndex={}";
+ public static final String UNEXPECTED_STATE_CLASS = "Unexpected state class: ";
+ public static final String UNKNOWN_COMBINE_RESULT_TYPE = "未知的 combine result type: ";
+ public static final String UNKNOWN_REQUEST_TYPE = "未知的 request type {}: {}。";
+
+ // ===================== SOURCE =====================
+
+ public static final String ALL_DATA_IN_TSFILEEPOCH_WAS_EXTRACTED =
+ "All data in TsFileEpoch {} 已提取";
+ public static final String BUFFERSIZE_MUST_BE_A_POWER_OF_2 = "bufferSize must be a power of 2";
+ public static final String BUFFERSIZE_MUST_NOT_BE_LESS_THAN_1 =
+ "bufferSize must not be less than 1";
+ public static final String CAPTURE_TREE_AND_CAPTURE_TABLE_CAN_NOT =
+ "capture.tree 和 capture.table 不能同时设为 false";
+ public static final String DATABASE_NAME_IS_NULL_WHEN_MATCHING_SOURCES =
+ "匹配表模型事件的 source 时数据库名称为空。";
+ public static final String DATA_REGION_INJECTED_WATERMARK_EVENT_WITH_TIMESTAMP =
+ "Data region {}: Injected watermark event with timestamp: {}";
+ public static final String DISCARD_TABLET_EVENT_BECAUSE_IT_IS_NOT =
+ "Discard tablet event {} because it is not reliable anymore. Change the state of "
+ + "TsFileEpoch to USING_BOTH.";
+ public static final String DISRUPTOR_ALREADY_STARTED = "Disruptor already started";
+ public static final String DISRUPTOR_SHUTDOWN_COMPLETED = "Disruptor 关闭完成";
+ public static final String DISRUPTOR_STARTED_WITH_BUFFER_SIZE = "Disruptor 已启动,缓冲区大小:{}";
+ public static final String EXCEPTION_DURING_ONSHUTDOWN = "onShutdown() 期间发生异常";
+ public static final String EXCEPTION_DURING_ONSTART = "onStart() 期间发生异常";
+ public static final String EXCEPTION_ENCOUNTERED_WHEN_TRIGGERING_SCHEMA_REGION_SNAPSHOT =
+ "Exception encountered when triggering schema region snapshot.";
+ public static final String EXCEPTION_PROCESSING = "处理时发生异常:{} {}";
+ public static final String FAILED_TO_LOAD_SNAPSHOT = "加载 snapshot {} 失败";
+ public static final String FAILED_TO_LOAD_SNAPSHOT_FROM_BYTEBUFFER =
+ "加载 snapshot from byteBuffer {} 失败。";
+ public static final String FAILED_TO_START_SOURCES = "启动 sources 失败。";
+ public static final String HEARTBEAT_EVENT_CAN_NOT_BE_SUPPLIED_BECAUSE =
+ "Heartbeat Event {} can not be supplied because the reference count can not be increased";
+ public static final String INTERRUPTED_WAITING_FOR_PROCESSOR_TO_STOP =
+ "Interrupted waiting for processor to stop";
+ public static final String IOTDBSCHEMAREGIONSOURCE_DOES_NOT_SUPPORT_TRANSFERRING_EVENTS_UNDER =
+ "IoTDBSchemaRegionSource 不支持 transferring events under simple consensus";
+ public static final String NOT_HAS_PRIVILEGE_TO_TRANSFER_EVENT = "没有权限 transfer event: ";
+ public static final String NOT_HAS_PRIVILEGE_TO_TRANSFER_PLAN = "没有权限传输计划:";
+ public static final String NO_EVENT_HANDLER_CONFIGURED = "No event handler configured";
+ public static final String N_MUST_BE_0 = "n must be > 0";
+ public static final String PIPEREALTIMEDATAREGIONEXTRACTOR_OBSERVED_DATA_REGION_TIME_PARTITION_GROWT =
+ "PipeRealtimeDataRegionExtractor({}) observed data region {} time partition growth, "
+ + "recording time partition id bound: {}.";
+ public static final String PIPE_AND_IS_NOT_SET_USE_HYBRID =
+ "Pipe:'{}' ('{}') and '{}' ('{}') is not set, use hybrid mode by default.";
+ public static final String PIPE_ASSIGNER_ON_DATA_REGION_SHUTDOWN_INTERNAL =
+ "Pipe:Assigner on data region {} shutdown internal disruptor within {} ms";
+ public static final String PIPE_FAILED_TO_GET_DEVICES_FROM_TSFILE_1 =
+ "Pipe {}@{}:获取 devices from TsFile {}, extract it anyway 失败";
+ public static final String PIPE_FAILED_TO_INCREASE_REFERENCE_COUNT_FOR =
+ "Pipe {}@{}:增加 reference count for historical deletion event {}, will discard it 失败";
+ public static final String PIPE_FAILED_TO_INCREASE_REFERENCE_COUNT_FOR_1 =
+ "Pipe {}@{}:增加 reference count for historical tsfile event {}, will discard it 失败";
+ public static final String PIPE_FAILED_TO_INCREASE_REFERENCE_COUNT_FOR_2 =
+ "Pipe {}@{}:增加 reference count for terminate event, will resend it 失败";
+ public static final String PIPE_FAILED_TO_PIN_TSFILERESOURCE = "Pipe:固定 TsFileResource {} 失败";
+ public static final String PIPE_FAILED_TO_START_TO_EXTRACT_HISTORICAL =
+ "Pipe {}@{}:启动 to extract historical TsFile, storage engine is not ready. Will retry "
+ + "later 失败。";
+ public static final String PIPE_FAILED_TO_UNPIN_SKIPPED_HISTORICAL_TSFILERESOURCE =
+ "Pipe {}@{}:unpin skipped historical TsFileResource, original path: {} 失败";
+ public static final String PIPE_FAILED_TO_UNPIN_TSFILERESOURCE_AFTER_CREATING =
+ "Pipe {}@{}:unpin TsFileResource after creating event, original path: {} 失败";
+ public static final String PIPE_FAILED_TO_UNPIN_TSFILERESOURCE_AFTER_DROPPING =
+ "Pipe {}@{}:unpin TsFileResource after dropping pipe, original path: {} 失败";
+ public static final String PIPE_FINISH_TO_EXTRACT_DELETIONS_EXTRACT_DELETIONS =
+ "Pipe {}@{}:finish to extract deletions, extract deletions count {}/{}, took {} ms";
+ public static final String PIPE_FINISH_TO_EXTRACT_HISTORICAL_TSFILE_EXTRACTED =
+ "Pipe {}@{}:finish to extract historical TsFile, extracted sequence file count {}/{}, "
+ + "extracted unsequence file count {}/{}, extracted file count {}/{}, took {} ms";
+ public static final String PIPE_FINISH_TO_SORT_ALL_EXTRACTED_RESOURCES =
+ "Pipe {}@{}:finish to sort all extracted resources, took {} ms";
+ public static final String PIPE_HISTORICAL_DATA_EXTRACTION_TIME_RANGE_START =
+ "Pipe {}@{}:historical data extraction time range, start time {}({}), end time {}({}), "
+ + "sloppy pattern {}, sloppy time range {}, should transfer mod file {}, username: {}, "
+ + "skip if no privileges: {}, is forwarding pipe requests: {}";
+ public static final String PIPE_IS_SET_TO_FALSE_USE_HEARTBEAT =
+ "Pipe:'{}' ('{}') is set to false, use heartbeat realtime source.";
+ public static final String PIPE_ON_DATA_REGION_SKIP_COMMIT_OF =
+ "Pipe {} on data region {} skip commit of event {} because it was flushed prematurely.";
+ public static final String PIPE_REALTIME_DATA_REGION_SOURCE_IS_INITIALIZED =
+ "Pipe {}@{}:realtime data region source is initialized with parameters: {}.";
+ public static final String PIPE_RESOURCE_MEETS_MAYTSFILECONTAINUNPROCESSEDDATA_CONDITION_EXTRACT =
+ "Pipe {}@{}:resource {} meets mayTsFileContainUnprocessedData condition, extractor "
+ + "progressIndex: {}, resource ProgressIndex: {}";
+ public static final String PIPE_SET_WATERMARK_INJECTOR_WITH_INTERVAL_MS =
+ "Pipe {}@{}:Set watermark injector with interval {} ms.";
+ public static final String PIPE_SKIP_HISTORICAL_TSFILE_BECAUSE_REALTIME_SOURCE =
+ "Pipe {}@{}:skip historical tsfile {} because realtime source in current task {} has "
+ + "already captured it.";
+ public static final String PIPE_SNAPSHOT_MODE_IS_ENABLED_USE_HEARTBEAT =
+ "Pipe:快照模式已启用,使用 heartbeat 实时 source。";
+ public static final String PIPE_STARTED_HISTORICAL_SOURCE_AND_REALTIME_SOURCE =
+ "Pipe {}@{}:在 {} ms 内成功启动 historical source {} and realtime source {}。";
+ public static final String PIPE_STARTING_HISTORICAL_SOURCE_AND_REALTIME_SOURCE =
+ "Pipe {}@{}:Starting historical source {} and realtime source {}.";
+ public static final String PIPE_START_HISTORICAL_SOURCE_AND_REALTIME_SOURCE =
+ "Pipe {}@{}:Start historical source {} and realtime source {} 出错。";
+ public static final String PIPE_START_TO_EXTRACT_DELETIONS = "Pipe {}@{}:开始提取 deletions";
+ public static final String PIPE_START_TO_EXTRACT_HISTORICAL_TSFILE_ORIGINAL =
+ "Pipe {}@{}:开始提取 historical TsFile, original sequence file count {}, original unSequence "
+ + "file count {}, start progress index {}";
+ public static final String PIPE_START_TO_FLUSH_DATA_REGION = "Pipe {}@{}:开始刷新 data region";
+ public static final String PIPE_START_TO_SORT_ALL_EXTRACTED_RESOURCES =
+ "Pipe {}@{}:开始排序 all extracted resources";
+ public static final String PIPE_TASK_CANNOTUSETABLETANYMORE_FOR_TSFILE_THE_MEMORY =
+ "Pipe task {}@{} canNotUseTabletAnyMore for tsFile {}: The memory usage of the insert "
+ + "node {} has reached the dangerous threshold of single pipe {}, event count: {}";
+ public static final String PIPE_UNEXPECTED_PROGRESSINDEX_TYPE_FALLBACK_TO_ORIGIN =
+ "Pipe {}@{}:unexpected ProgressIndex type {}, fallback to origin {}.";
+ public static final String PIPE_UNSUPPORTED_SOURCE_REALTIME_MODE_CREATE_A =
+ "Pipe:不支持的 source realtime mode: {}, create a hybrid source。";
+ public static final String PROCESSOR_INTERRUPTED = "处理器被中断";
+ public static final String PROCESSOR_STOPPED = "处理器已停止";
+ public static final String SET_FOR_HISTORICAL_DELETION_EVENT =
+ "[{}]Set {} for historical deletion event {}";
+ public static final String SET_FOR_HISTORICAL_EVENT = "[{}]Set {} for historical event {}";
+ public static final String SET_FOR_REALTIME_EVENT = "[{}]Set {} for realtime event {}";
+ public static final String SOURCES_FILTERED_BY_DATABASE_AND_TABLE_IS =
+ "Sources filtered by database and table 为空 when matching sources for table model event.";
+ public static final String SOURCES_FILTERED_BY_DEVICE_IS_NULL_WHEN =
+ "Sources filtered by device 为空 when matching sources for tree model event.";
+ public static final String TAKE_SNAPSHOT_ERROR = "Take snapshot error: {}";
+ public static final String THE_ASSIGNER_QUEUE_CONTENT_HAS_EXCEEDED_HALF =
+ "The assigner queue content has exceeded half, it may be stuck and may block insertion. "
+ + "regionId: {}, capacity: {}, bufferSize: {}";
+ public static final String THE_PIPE_CANNOT_EXTRACT_TABLE_MODEL_DATA =
+ "The pipe cannot extract table model data when sql dialect is set to tree.";
+ public static final String THE_PIPE_CANNOT_EXTRACT_TREE_MODEL_DATA =
+ "The pipe cannot extract tree model data when sql dialect is set to table.";
+ public static final String THE_PIPE_CANNOT_TRANSFER_DATA_WHEN_DATA =
+ "The pipe cannot transfer data when data region is using ratis consensus.";
+ public static final String THE_REFERENCE_COUNT_OF_THE_EVENT_CANNOT =
+ "The reference count of the event {} cannot be increased, skipping it.";
+ public static final String THE_REFERENCE_COUNT_OF_THE_REALTIME_EVENT =
+ "The reference count of the realtime event {} cannot be increased, skipping it.";
+ public static final String TIMED_OUT_WAITING_FOR_PROCESSOR_TO_STOP =
+ "Timed out waiting for processor to stop";
+ public static final String TSFILEEPOCH_NOT_FOUND_FOR_TSFILE_CREATING_A =
+ "TsFileEpoch not found for TsFile {}, creating a new one";
+ public static final String WHEN_IS_SET_TO_FALSE_SPECIFYING_AND =
+ "When '{}' ('{}') is set to false, specifying {} and {} is invalid.";
+ public static final String WHEN_IS_SET_TO_TRUE_SPECIFYING_AND =
+ "When '{}' ('{}', '{}', '{}') is set to true, specifying {} and {} is invalid.";
+ public static final String WHEN_OR_IS_SPECIFIED_SPECIFYING_AND_IS =
+ "When {}, {}, {} or {} is specified, specifying {}, {}, {}, {}, {} and {} is invalid.";
+
+ // ===================== SINK =====================
+
+ public static final String ACQUIRE_IOPCITEMMGT_SUCCESSFULLY_INTERFACE_ADDRESS =
+ "成功获取 IOPCItemMgt! Interface address: {}";
+ public static final String ACQUIRE_IOPCSYNCIO_SUCCESSFULLY_INTERFACE_ADDRESS =
+ "成功获取 IOPCSyncIO! Interface address: {}";
+ public static final String ADDED_EVENT_TO_RETRY_QUEUE = "已将 event {} 添加到 retry queue";
+ public static final String BATCH_ID_CREATE_BATCH_DIR_SUCCESSFULLY_BATCH =
+ "批次 id = {}:创建 batch dir successfully, batch file dir = {}.";
+ public static final String BATCH_ID_DELETE_THE_TSFILE_AFTER_FAILED =
+ "批次 id = {}:{} delete the tsfile {} after failed to write tablets into {}. {}";
+ public static final String BATCH_ID_FAILED_TO_BUILD_THE_TABLE =
+ "批次 id = {}:构建 the table model TSFile. Please check whether the written Tablet has time "
+ + "overlap and whether the Table Schema is correct 失败。";
+ public static final String BATCH_ID_FAILED_TO_CLOSE_THE_TSFILE =
+ "批次 id = {}:关闭 the tsfile {} after failed to write tablets into 失败,原因:{}";
+ public static final String BATCH_ID_FAILED_TO_CLOSE_THE_TSFILE_1 =
+ "批次 id = {}:关闭 the tsfile {} when trying to close batch 失败,原因:{}";
+ public static final String BATCH_ID_FAILED_TO_CREATE_BATCH_FILE =
+ "批次 id = {}:创建 batch file dir {} 失败。";
+ public static final String BATCH_ID_FAILED_TO_DELETE_THE_TSFILE =
+ "批次 id = {}:删除 the tsfile {} when trying to close batch 失败,原因:{}";
+ public static final String BATCH_ID_FAILED_TO_WRITE_TABLETS_INTO =
+ "批次 id = {}:写入 tablets into tsfile 失败,原因:{}";
+ public static final String BATCH_ID_SEAL_TSFILE_SUCCESSFULLY = "批次 id = {}:成功封存 tsfile {}。";
+ public static final String BATCH_ID_UNSUPPORTED_EVENT_TYPE_WHEN_CONSTRUCTING =
+ "批次 id = {}:不支持的 event {} type {} when constructing tsfile batch";
+ public static final String CANNOT_INCREASE_REFERENCE_COUNT_FOR_EVENT_IGNORE =
+ "无法增加 reference count for event: {}, ignore it in batch";
+ public static final String CANNOT_SERIALIZE_BOTH_TABLET_AND_STATEMENT_ARE =
+ "Cannot serialize: both tablet and statement are null";
+ public static final String CERTIFICATE_DIRECTORY_IS_PLEASE_MOVE_CERTIFICATES_FROM =
+ "Certificate directory is: {}, Please move certificates from the reject dir to the "
+ + "trusted directory to allow encrypted access";
+ public static final String CLIENT_HAS_BEEN_RETURNED_TO_THE_POOL =
+ "Client has been returned to the pool. Current handler status is {}. Will not transfer {}.";
+ public static final String CLOSED_ASYNCPIPEDATATRANSFERSERVICECLIENTMANAGER_FOR_RECEIVER_ATTRIBUTES =
+ "已关闭 AsyncPipeDataTransferServiceClientManager for receiver attributes: {}";
+ public static final String CREATE_GROUP_SUCCESSFULLY_SERVER_HANDLE_UPDATE_RATE =
+ "创建 group successfully! Server handle: {}, update rate: {} ms";
+ public static final String DELETENODETRANSFER_NO_EVENT_SUCCESSFULLY_PROCESSED =
+ "DeleteNodeTransfer: no.{} event successfully processed!";
+ public static final String DESERIALIZE_PIPEDATA_ERROR_BECAUSE_UNKNOWN_TYPE =
+ "Deserialize PipeData error because Unknown type ";
+ public static final String DESERIALIZE_PIPEDATA_ERROR_BECAUSE_UNKNOWN_TYPE_1 =
+ "Deserialize PipeData error because Unknown type {}.";
+ public static final String ERROR_GETTING_OPC_CLIENT = "Error getting opc client: ";
+ public static final String ERROR_PROGID_IS_INVALID_OR_UNREGISTERED_HRESULT =
+ "Error: ProgID is invalid or unregistered, (HRESULT=0x";
+ public static final String ERROR_RUNNING_OPC_CLIENT = "Error running opc client: ";
+ public static final String EXCEPTION_OCCURRED_WHEN_PIPETABLEMODELTSFILEBUILDERV2_WRITING_TABLETS_TO =
+ "PipeTableModelTsFileBuilderV2 writing tablets to tsfile, use fallback tsfile builder: "
+ + "{} 时发生异常";
+ public static final String EXCEPTION_OCCURRED_WHEN_PIPETREEMODELTSFILEBUILDERV2_WRITING_TABLETS_TO =
+ "PipeTreeModelTsFileBuilderV2 writing tablets to tsfile, use fallback tsfile builder: {} "
+ + "时发生异常";
+ public static final String EXECUTE_STATEMENT_TO_DATABASE_SKIP_BECAUSE_NO =
+ "Execute statement {} to database {}, skip because no permission.";
+ public static final String FAILED_TO_ACQUIRE_IOPCITEMMGT_ERROR_CODE_0X =
+ "获取 IOPCItemMgt, error code: 0x 失败";
+ public static final String FAILED_TO_ACQUIRE_IOPCSYNCIO_ERROR_CODE_0X =
+ "获取 IOPCSyncIO, error code: 0x 失败";
+ public static final String FAILED_TO_ADD_ITEM = "add item 失败";
+ public static final String FAILED_TO_ADD_ITEM_WIN_ERROR_CODE = "add item, win error code: 0x 失败";
+ public static final String FAILED_TO_ADJUST_TIMEOUT_WHEN_FAILED_TO =
+ "adjust timeout when failed to transfer file 失败。";
+ public static final String FAILED_TO_BORROW_CLIENT_FOR_CACHED_LEADER =
+ "borrow client {}:{} for cached leader 失败。";
+ public static final String FAILED_TO_BUILD_AND_STARTUP_OPCUASERVER =
+ "构建 and startup OpcUaServer 失败";
+ public static final String FAILED_TO_CLOSE_ASYNCPIPEDATATRANSFERSERVICECLIENTMANAGER_FOR_RECEIVER_ATTRIBUTE =
+ "关闭 AsyncPipeDataTransferServiceClientManager for receiver attributes: {} 失败";
+ public static final String FAILED_TO_CLOSE_CLIENT_AFTER_HANDSHAKE_FAILURE =
+ "关闭 client {}:{} after handshake failure when the manager is closed 失败。";
+ public static final String FAILED_TO_CLOSE_CLIENT_MANAGER = "关闭 client manager 失败。";
+ public static final String FAILED_TO_CLOSE_FILE_READER_OR_DELETE =
+ "关闭 file reader or delete tsFile when failed to transfer file 失败。";
+ public static final String FAILED_TO_CLOSE_FILE_READER_OR_DELETE_1 =
+ "关闭 file reader or delete tsFile when successfully transferred file 失败。";
+ public static final String FAILED_TO_CLOSE_FILE_READER_WHEN_SUCCESSFULLY =
+ "关闭 file reader when successfully transferred mod file 失败。";
+ public static final String FAILED_TO_CLOSE_OR_INVALIDATE_CLIENT_WHEN =
+ "关闭 or invalidate client when connector is closed. Client: {}, Exception: {} 失败";
+ public static final String FAILED_TO_CLOSE_TRUSTLISTMANAGER_BECAUSE =
+ "关闭 trustListManager 失败,原因:{}.";
+ public static final String FAILED_TO_CONNECT_TO_SERVER_ERROR_CODE =
+ "连接 to server, error code: 0x 失败";
+ public static final String FAILED_TO_CONVERT_STATEMENT_TO_TABLET = "转换 statement to tablet 失败。";
+ public static final String FAILED_TO_CONVERT_STATEMENT_TO_TABLET_FOR =
+ "转换 statement to tablet for serialization 失败";
+ public static final String FAILED_TO_CREATE_GROUP_ERROR_CODE_0X = "创建 group,error code: 0x 失败";
+ public static final String FAILED_TO_CREATE_NODES_AFTER_TRANSFER_DATA =
+ "创建 nodes after transfer data value, creation status: 失败";
+ public static final String FAILED_TO_DELETE_BATCH_FILE_THIS_FILE =
+ "删除 batch file {}, this file should be deleted manually later 失败";
+ public static final String FAILED_TO_GET_THE_SIZE_OF_PIPETRANSFERBATCHREQBUILDER =
+ "获取 the size of PipeTransferBatchReqBuilder, return 0. Exception: {} 失败";
+ public static final String FAILED_TO_HANDSHAKE = "Failed to handshake.";
+ public static final String FAILED_TO_LOG_ERROR_WHEN_FAILED_TO =
+ "log error when failed to transfer file 失败。";
+ public static final String FAILED_TO_PUSH_VALUE_CHANGE_TO_CLIENT =
+ "push value change to client, nodeId={} 失败";
+ public static final String FAILED_TO_SEND_INITIAL_VALUE_TO_NEW =
+ "发送 initial value to new subscription, nodeId={} 失败";
+ public static final String FAILED_TO_SERIALIZE_PROGRESS_INDEX = "序列化 progress index {} 失败";
+ public static final String FAILED_TO_SHUTDOWN_EXECUTOR = "关闭 executor {} 失败。";
+ public static final String FAILED_TO_TRANSFER_DATAVALUE = "传输 dataValue 失败";
+ public static final String FAILED_TO_TRANSFER_DATAVALUE_AFTER_SUCCESSFULLY_CREATED =
+ "传输 dataValue after successfully created nodes 失败";
+ public static final String FAILED_TO_TRANSFER_PIPEDELETENODEEVENT_COMMITTER_KEY_REPLICATE =
+ "传输 PipeDeleteNodeEvent {} (committer key={}, replicate index={}) 失败。";
+ public static final String FAILED_TO_TRANSFER_TABLETINSERTIONEVENT_COMMITTER_KEY_REPLICATE =
+ "传输 TabletInsertionEvent {} (committer key={}, replicate index={}) 失败。";
+ public static final String FAILED_TO_TRANSFER_TSFILE_BATCH = "传输 tsfile batch ({}) 失败。";
+ public static final String FAILED_TO_TRANSFER_TSFILE_EVENT_ASYNCHRONOUSLY =
+ "传输 tsfile event {} asynchronously 失败。";
+ public static final String FAILED_TO_UPDATE_LEADER_CACHE_FOR_DEVICE =
+ "更新 leader cache for device {} with endpoint {}:{} 失败。";
+ public static final String FAILED_TO_WRITE = "Failed to write ";
+ public static final String FAILED_TO_WRITE_WIN_ERROR_CODE_0X =
+ "Failed to write, win error code: 0x";
+ public static final String GENERATE_STATEMENT_FROM_TABLET_ERROR = "从 tablet {} 生成 Statement 出错。";
+ public static final String GOT_AN_ERROR_FROM = "Got an error \\\"{}\\\" from {}:{}.";
+ public static final String GOT_AN_ERROR_FROM_AN_UNKNOWN_CLIENT =
+ "Got an error \\\"{}\\\" from an unknown client.";
+ public static final String HANDSHAKE_SUCCESSFULLY_WITH_RECEIVER =
+ "握手 successfully with receiver {}:{}.";
+ public static final String ILLEGAL_STATE_WHEN_RETURN_THE_CLIENT_TO =
+ "非法的 state when return the client to object pool, maybe the pool is already cleared. "
+ + "Will ignore。";
+ public static final String INSERTNODETRANSFER_NO_EVENT_SUCCESSFULLY_PROCESSED =
+ "InsertNodeTransfer: no.{} event successfully processed!";
+ public static final String INTERRUPTED_WHILE_WAITING_FOR_HANDSHAKE_RESPONSE =
+ "waiting for handshake response 时被中断。";
+ public static final String IOTCONSENSUSV2ASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTConsensusV2AsyncConnector 不支持 transferring generic event: {}.";
+ public static final String IOTCONSENSUSV2ASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFER_GENERIC_EVENT =
+ "IoTConsensusV2AsyncConnector 不支持 transfer generic event: {}.";
+ public static final String IOTCONSENSUSV2ASYNCCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_CURRENT_EVEN =
+ "IoTConsensusV2AsyncConnector only support PipeTsFileInsertionEvent. Current event: {}.";
+ public static final String IOTCONSENSUSV2CONNECTOR_TRANSFERBUFFER_QUEUE_OFFER_IS_INTERRUPTED =
+ "IoTConsensusV2Connector transferBuffer queue offer is interrupted.";
+ public static final String IOTCONSENSUSV2TRANSFERBATCHREQBUILDER_THE_MAX_BATCH_SIZE_IS_ADJUSTED =
+ "IoTConsensusV2TransferBatchReqBuilder: the max batch size is adjusted from {} to {} due "
+ + "to the memory restriction";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_EVENT_NOT_FOUND_IN_TRANSFERBUFFER =
+ "IoTConsensusV2-ConsensusGroup-{}: event-{} not found in transferBuffer, skip removing. "
+ + "queue size = {}";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_EVENT_REPLICATE_INDEX_TRANSFER_FAILED =
+ "IoTConsensusV2-ConsensusGroup-{}: Event {} replicate index {} transfer failed, added to "
+ + "retry queue failed, this event will be ignored.";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_EVENT_REPLICATE_INDEX_TRANSFER_FAILED_1 =
+ "IoTConsensusV2-ConsensusGroup-{}: Event {} replicate index {} transfer failed, will be "
+ + "added to retry queue.";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_NO_EVENT_ADDED_TO_CONNECTOR =
+ "IoTConsensusV2-ConsensusGroup-{}: no.{} event-{} added to connector buffer";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_ONE_EVENT_SUCCESSFULLY_RECEIVED_BY =
+ "IoTConsensusV2-ConsensusGroup-{}: one event-{} successfully received by the follower, "
+ + "will be removed from queue, queue size = {}, limit size = {}";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_RETRYEVENTQUEUE_IS_NOT_EMPTY_AFTER =
+ "IoTConsensusV2-ConsensusGroup-{}: retryEventQueue is not empty after 20 seconds. "
+ + "retryQueue size: {}";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_RETRY_WITH_INTERVAL_FOR_INDEX =
+ "IoTConsensusV2-ConsensusGroup-{}: retry with interval {} for index {} {}";
+ public static final String IOTCONSENSUSV2_CONSENSUSGROUP_TRY_TO_REMOVE_EVENT_AFTER =
+ "IoTConsensusV2-ConsensusGroup-{}: try to remove event-{} after "
+ + "iotConsensusV2AsyncConnector being closed. Ignore it.";
+ public static final String IOTCONSENSUSV2_FAILED_TO_CLOSE_FILE_READER_WHEN =
+ "IoTConsensusV2-{}:关闭 file reader when failed to transfer file 失败。";
+ public static final String IOTCONSENSUSV2_FAILED_TO_CLOSE_FILE_READER_WHEN_1 =
+ "IoTConsensusV2-{}:关闭 file reader when successfully transferred file 失败。";
+ public static final String IOTCONSENSUSV2_FAILED_TO_CLOSE_FILE_READER_WHEN_2 =
+ "IoTConsensusV2-{}:关闭 file reader when successfully transferred mod file 失败。";
+ public static final String IOTCONSENSUSV2_FAILED_TO_TRANSFER_TABLETINSERTIONEVENT_BATCH_TOTAL =
+ "IoTConsensusV2:传输 TabletInsertionEvent batch. Total failed events: {}, related pipe "
+ + "names: {} 失败";
+ public static final String IOTCONSENSUSV2_FAILED_TO_TRANSFER_TSFILEINSERTIONEVENT_COMMITTER_KEY =
+ "IoTConsensusV2-{}:传输 TsFileInsertionEvent {} (committer key {}, replicate index {}) 失败。";
+ public static final String IOTCONSENSUSV2_REDIRECT_FILE_POSITION_TO =
+ "IoTConsensusV2-{}:Redirect file position to {}.";
+ public static final String IOTCONSENSUSV2_SUCCESSFULLY_TRANSFERRED_FILE_COMMITTER_KEY_REPLICATE =
+ "IoTConsensusV2-{}:成功 transferred file {} (committer key={}, replicate index={})。";
+ public static final String IOTDBCDCCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIPERAWTAB =
+ "IoTDBCDCConnector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent.";
+ public static final String IOTDBDATAREGIONAIRGAPCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBDataRegionAirGapConnector 不支持 transferring generic event: {}.";
+ public static final String IOTDBDATAREGIONAIRGAPCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_A =
+ "IoTDBDataRegionAirGapConnector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Ignore {}.";
+ public static final String IOTDBDATAREGIONAIRGAPCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_IGNORE =
+ "IoTDBDataRegionAirGapConnector only support PipeTsFileInsertionEvent. Ignore {}.";
+ public static final String IOTDBLEGACYPIPECONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBLegacyPipeConnector 不支持 transferring generic event: {}.";
+ public static final String IOTDBLEGACYPIPECONNECTOR_ONLY_SUPPORT_PIPEINSERTNODEINSERTIONEVENT_AND_PIPETABLE =
+ "IoTDBLegacyPipeConnector only support PipeInsertNodeInsertionEvent and "
+ + "PipeTabletInsertionEvent.";
+ public static final String IOTDBLEGACYPIPECONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT =
+ "IoTDBLegacyPipeConnector only support PipeTsFileInsertionEvent.";
+ public static final String IOTDBSCHEMAREGIONAIRGAPSINK_CAN_T_TRANSFER_TABLETINSERTIONEVENT =
+ "IoTDBSchemaRegionAirGapSink can't transfer TabletInsertionEvent.";
+ public static final String IOTDBSCHEMAREGIONAIRGAPSINK_CAN_T_TRANSFER_TSFILEINSERTIONEVENT =
+ "IoTDBSchemaRegionAirGapSink can't transfer TsFileInsertionEvent.";
+ public static final String IOTDBSCHEMAREGIONAIRGAPSINK_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBSchemaRegionAirGapSink 不支持 transferring generic event: {}.";
+ public static final String IOTDBSCHEMAREGIONCONNECTOR_CAN_T_TRANSFER_TABLETINSERTIONEVENT =
+ "IoTDBSchemaRegionConnector can't transfer TabletInsertionEvent.";
+ public static final String IOTDBSCHEMAREGIONCONNECTOR_CAN_T_TRANSFER_TSFILEINSERTIONEVENT =
+ "IoTDBSchemaRegionConnector can't transfer TsFileInsertionEvent.";
+ public static final String IOTDBSCHEMAREGIONCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBSchemaRegionConnector 不支持 transferring generic event: {}.";
+ public static final String IOTDBTHRIFTASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBThriftAsyncConnector 不支持 transferring generic event: {}.";
+ public static final String IOTDBTHRIFTASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFER_GENERIC_EVENT =
+ "IoTDBThriftAsyncConnector 不支持 transfer generic event: {}.";
+ public static final String IOTDBTHRIFTASYNCCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PI =
+ "IoTDBThriftAsyncConnector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Current event: {}.";
+ public static final String IOTDBTHRIFTASYNCCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_CURRENT_EVENT =
+ "IoTDBThriftAsyncConnector only support PipeTsFileInsertionEvent. Current event: {}.";
+ public static final String IOTDBTHRIFTSYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT =
+ "IoTDBThriftSyncConnector 不支持 transferring generic event: {}.";
+ public static final String IOTDBTHRIFTSYNCCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIP =
+ "IoTDBThriftSyncConnector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Ignore {}.";
+ public static final String IOTDBTHRIFTSYNCCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_IGNORE =
+ "IoTDBThriftSyncConnector only support PipeTsFileInsertionEvent. Ignore {}.";
+ public static final String LEADERCACHEMANAGER_ALLOCATEDMEMORYBLOCK_HAS_EXPANDED_FROM_TO =
+ "LeaderCacheManager.allocatedMemoryBlock has expanded from {} to {}.";
+ public static final String LEADERCACHEMANAGER_ALLOCATEDMEMORYBLOCK_HAS_SHRUNK_FROM_TO =
+ "LeaderCacheManager.allocatedMemoryBlock has shrunk from {} to {}.";
+ public static final String LOADING_KEYSTORE_AT = "正在从 {} 加载 KeyStore";
+ public static final String LOADING_KEYSTORE_AT_1 = "正在从 {}. 加载 KeyStore";
+ public static final String LOAD_KEYSTORE_FAILED_THE_EXISTING_KEYSTORE_MAY =
+ "Load keyStore failed, the existing keyStore may be stale, re-constructing...";
+ public static final String NO_OPC_CLIENT_OR_SERVER_IS_SPECIFIED =
+ "No OPC client or server is specified when transferring tablet";
+ public static final String OPC_DA_SINK_MUST_RUN_ON_WINDOWS = "opc-da-sink 必须在 Windows 系统上运行。";
+ public static final String PIPETABLEMODETSFILEBUILDERV2_DOES_NOT_SUPPORT_TREE_MODEL_TABLET =
+ "PipeTableModeTsFileBuilderV2 不支持 tree model tablet to build TSFile";
+ public static final String PIPETABLEMODETSFILEBUILDER_DOES_NOT_SUPPORT_TREE_MODEL_TABLET =
+ "PipeTableModeTsFileBuilder 不支持 tree model tablet to build TSFile";
+ public static final String PIPETREEMODELTSFILEBUILDERV2_DOES_NOT_SUPPORT_TABLE_MODEL_TABLET =
+ "PipeTreeModelTsFileBuilderV2 不支持 table model tablet to build TSFile";
+ public static final String PIPETREEMODELTSFILEBUILDER_DOES_NOT_SUPPORT_TABLE_MODEL_TABLET =
+ "PipeTreeModelTsFileBuilder 不支持 table model tablet to build TSFile";
+ public static final String POLLED_EVENT_FROM_RETRY_QUEUE = "Polled event {} from retry queue.";
+ public static final String RECEIVED_AN_ERROR_MESSAGE_FROM =
+ "Received an error message {} from {}:{}";
+ public static final String RECEIVED_AN_UNKNOWN_MESSAGE_FROM =
+ "Received an unknown message {} from {}:{}";
+ public static final String RECEIVED_A_ACK_MESSAGE_FROM = "Received a ack message from {}:{}";
+ public static final String RECEIVED_A_BIND_MESSAGE_FROM = "Received a bind message from {}:{}";
+ public static final String REDIRECT_FILE_POSITION_TO = "Redirect file position to {}.";
+ public static final String REDIRECT_TO_POSITION_IN_TRANSFERRING_TSFILE =
+ "Redirect to position {} in transferring tsFile {}.";
+ public static final String SECURITY_DIR = "security dir: {}";
+ public static final String SECURITY_PKI_DIR = "security pki dir: {}";
+ public static final String SUCCESSFULLY_ADDED_ITEM = "成功 added item {}。";
+ public static final String SUCCESSFULLY_CONVERTED_PROGID_TO_CLSID =
+ "成功 converted progID {} to CLSID: {{}}";
+ public static final String SUCCESSFULLY_SHUTDOWN_EXECUTOR = "成功 shutdown executor {}。";
+ public static final String SUCCESSFULLY_TRANSFERRED_DELETION_EVENT =
+ "成功 transferred deletion event {}。";
+ public static final String SUCCESSFULLY_TRANSFERRED_FILE = "成功 transferred file {}。";
+ public static final String SUCCESSFULLY_TRANSFERRED_FILE_AND =
+ "成功 transferred file {}, {} and {}。";
+ public static final String SUCCESSFULLY_TRANSFERRED_FILE_BATCHED_TABLEINSERTIONEVENTS_REFERENCE_COUNT =
+ "成功 transferred file {} (batched TableInsertionEvents, reference count={})。";
+ public static final String SUCCESSFULLY_TRANSFERRED_FILE_COMMITTER_KEY_COMMIT_ID =
+ "成功 transferred file {} (committer key={}, commit id={}, reference count={})。";
+ public static final String SUCCESSFULLY_TRANSFERRED_SCHEMA_EVENT =
+ "成功 transferred schema event {}。";
+ public static final String SUCCESSFULLY_TRANSFERRED_SCHEMA_REGION_SNAPSHOT_AND =
+ "成功 transferred schema region snapshot {}, {} and {}。";
+ public static final String THE_BATCH_SIZE_LIMIT_HAS_EXPANDED_FROM =
+ "The batch size limit has expanded from {} to {}.";
+ public static final String THE_BATCH_SIZE_LIMIT_HAS_SHRUNK_FROM =
+ "The batch size limit has shrunk from {} to {}.";
+ public static final String THE_DEFAULT_QUALITY_CAN_ONLY_BE_GOOD =
+ "The default quality can only be 'GOOD', 'BAD' or 'UNCERTAIN'.";
+ public static final String THE_EVENT_ACK_IS_NOT_FOUND = "The event ack {} is not found.";
+ public static final String THE_EVENT_CAN_T_BE_TRANSFERRED_TO =
+ "The event {} can't be transferred to client, it will be retried later.";
+ public static final String THE_EVENT_IN_ERROR_IS_NOT_FOUND =
+ "The event in error {} is not found.";
+ public static final String THE_EVENT_POLLED_FROM_THE_QUEUE_IS =
+ "The event polled from the queue is not the same as the event peeked from the queue. "
+ + "Peeked event: {}, polled event: {}.";
+ public static final String THE_FILE_IS_NOT_FOUND_MAY_ALREADY =
+ "The file {} is not found, may already be deleted.";
+ public static final String THE_PIPE_WAS_DROPPED_SO_THE_EVENT =
+ "The pipe {} was dropped so the event ack {} will be ignored.";
+ public static final String THE_PIPE_WAS_DROPPED_SO_THE_EVENT_1 =
+ "The pipe {} was dropped so the event in error {} will be ignored.";
+ public static final String THE_PIPE_WAS_DROPPED_SO_THE_EVENT_2 =
+ "The pipe {} was dropped so the event {} will be dropped.";
+ public static final String THE_QUALITY_VALUE_ONLY_SUPPORTS_BOOLEAN_TYPE =
+ "The quality value only supports boolean type, while true == GOOD and false == BAD.";
+ public static final String THE_SCHEMA_REGION_AIR_GAP_CONNECTOR_DOES =
+ "The schema region air gap connector 不支持 transferring single file piece bytes.";
+ public static final String THE_SCHEMA_REGION_CONNECTOR_DOES_NOT_SUPPORT =
+ "The schema region connector 不支持 transferring single file piece req.";
+ public static final String THE_SECURITY_POLICY_CANNOT_BE_EMPTY =
+ "The security policy cannot be empty.";
+ public static final String THE_SECURITY_POLICY_CAN_ONLY_BE_NONE =
+ "The security policy can only be 'None', 'Basic128Rsa15', 'Basic256', 'Basic256Sha256', "
+ + "'Aes128_Sha256_RsaOaep' or 'Aes256_Sha256_RsaPss'.";
+ public static final String THE_SEGMENTS_OF_TABLETS_MUST_EXIST =
+ "The segments of tablets must exist";
+ public static final String THE_TABLET_OF_COMMITID_CAN_T_BE =
+ "The tablet of commitId: {} can't be parsed by client, it will be retried later.";
+ public static final String THE_TRANSFER_THREAD_IS_INTERRUPTED = "传输线程被中断。";
+ public static final String THE_WEBSOCKET_CONNECTION_FROM_CLIENT_HAS_BEEN =
+ "The websocket connection from client 已关闭!The code is {}. The reason is {}. Is it closed "
+ + "by remote? {}";
+ public static final String THE_WEBSOCKET_CONNECTION_FROM_CLIENT_HAS_BEEN_1 =
+ "The websocket connection from client {}:{} 已关闭! The code is {}. The reason is {}. Is it "
+ + "closed by remote? {}";
+ public static final String THE_WEBSOCKET_CONNECTION_FROM_CLIENT_HAS_BEEN_2 =
+ "The websocket connection from client {}:{} has been opened!";
+ public static final String THE_WEBSOCKET_CONNECTION_FROM_HAS_BEEN_CLOSED =
+ "The websocket connection from {}:{} 已关闭, but the ack message of commitId: {} is received.";
+ public static final String THE_WEBSOCKET_CONNECTION_FROM_HAS_BEEN_CLOSED_1 =
+ "The websocket connection from {}:{} 已关闭, but the error message of commitId: {} is received.";
+ public static final String THE_WEBSOCKET_SERVER_HAS_BEEN_STARTED =
+ "The websocket server {}:{} 已启动!";
+ public static final String THE_WRITTEN_TABLET_TIME_MAY_OVERLAP_OR =
+ "The written Tablet time may overlap or the Schema may be incorrect";
+ public static final String THIS_CONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIPERAWTABLET =
+ "This Connector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Ignore {}.";
+ public static final String TIMED_OUT_WHEN_WAITING_FOR_CLIENT_HANDSHAKE =
+ "Timed out when waiting for client handshake finish.";
+ public static final String TIOTCONSENSUSV2BATCHTRANSFERRESP_IS_NULL =
+ "TIoTConsensusV2BatchTransferResp 为空";
+ public static final String TIOTCONSENSUSV2TRANSFERRESP_IS_NULL = "TIoTConsensusV2TransferResp 为空";
+ public static final String TPIPETRANSFERRESP_IS_NULL = "TPipeTransferResp 为空";
+ public static final String TRANSFER_TSFILE_EVENT_ASYNCHRONOUSLY_WAS_INTERRUPTED =
+ "Transfer tsfile event {} asynchronously was interrupted.";
+ public static final String UNABLE_TO_CREATE_SECURITY_DIR = "无法创建 security dir: ";
+ public static final String UNKNOWN_LOAD_BALANCE_STRATEGY_USE_ROUND_ROBIN =
+ "未知的 load balance strategy: {}, use round-robin strategy instead。";
+ public static final String UNSUPPORTED_BATCH_TYPE = "不支持的 batch type {}。";
+ public static final String UNSUPPORTED_BATCH_TYPE_WHEN_TRANSFERRING_TABLET_INSERTION =
+ "不支持的 batch type {} when transferring tablet insertion event。";
+ public static final String UNSUPPORTED_DATATYPE = "不支持的 dataType ";
+ public static final String UNSUPPORTED_EVENT_TYPE_WHEN_BUILDING_TRANSFER_REQUEST =
+ "不支持的 event {} type {} when building transfer request";
+ public static final String WAIT_FOR_RESOURCE_ENOUGH_FOR_SLICING_TSFILE =
+ "等待 resource enough,已等待 slicing tsfile {} for {} 秒。";
+ public static final String WEBSOCKETCONNECTOR_FAILED_TO_INCREASE_THE_REFERENCE_COUNT =
+ "WebsocketConnector failed to increase the reference count of the event. Ignore it. "
+ + "Current event: {}.";
+ public static final String WEBSOCKETCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIPERAWTA =
+ "WebsocketConnector only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Current event: {}.";
+ public static final String WEBSOCKETCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_CURRENT_EVENT =
+ "WebsocketConnector only support PipeTsFileInsertionEvent. Current event: {}.";
+ public static final String WHEN_THE_OPC_UA_SINK_POINTS_TO =
+ "When the OPC UA sink points to an outer server, the table model data is not supported.";
+ public static final String WHEN_THE_OPC_UA_SINK_SETS_WITH =
+ "When the OPC UA sink sets 'with-quality' to true, the table model data is not supported.";
+ public static final String WRITEBACKSINK_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIPERAWTABLETI =
+ "WriteBackSink only support PipeInsertNodeTabletInsertionEvent and "
+ + "PipeRawTabletInsertionEvent. Ignore {}.";
+
+ // ===================== RECEIVER =====================
+
+ public static final String ALL_RECEIVERS_RELATED_TO_ARE_RELEASED =
+ "All Receivers related to {} are released.";
+ public static final String AUTO_CREATE_DATABASE_FAILED_BECAUSE = "自动创建 database failed because: ";
+ public static final String CREATE_DATABASE_ERROR_STATEMENT_RESULT_STATUS =
+ "创建 Database error, statement: {}, result status : {}.";
+ public static final String DATABASE_NAME_IS_UNEXPECTEDLY_NULL_FOR_LOADTSFILESTATEMENT =
+ "Database name is unexpectedly null for LoadTsFileStatement: {}. Skip data type conversion.";
+ public static final String DATABASE_NAME_IS_UNEXPECTEDLY_NULL_FOR_STATEMENT =
+ "Database name is unexpectedly null for statement: {}. Skip data type conversion.";
+ public static final String DATA_TYPE_CONVERSION_FOR_LOADTSFILESTATEMENT_IS_SUCCESSFUL =
+ "Data type conversion for LoadTsFileStatement {} is successful.";
+ public static final String DATA_TYPE_MISMATCH_DETECTED_TSSTATUS_FOR_LOADTSFILESTATEMENT =
+ "Data type mismatch detected (TSStatus: {}) for LoadTsFileStatement: {}. Start data type "
+ + "conversion.";
+ public static final String DELETE_ERROR_STATEMENT = "Delete {} error, statement: {}.";
+ public static final String DELETE_RESULT_STATUS = "Delete result status : {}.";
+ public static final String FAILED_TO_CLOSE_IOTDBAIRGAPRECEIVERAGENT_S_SERVER_SOCKET =
+ "关闭 IoTDBAirGapReceiverAgent's server socket 失败";
+ public static final String FAILED_TO_CONVERT_DATA_TYPE_FOR_LOADTSFILESTATEMENT =
+ "转换 data type for LoadTsFileStatement: {} 失败。";
+ public static final String FAILED_TO_EXECUTE_STATEMENT_AFTER_DATA_TYPE =
+ "execute statement after data type conversion 失败。";
+ public static final String FAILED_TO_HANDLE_CONFIG_CLIENT_ID_EXIT =
+ "处理 config client (id = {}) exit 失败";
+ public static final String FAIL_TO_CREATE_IOTCONSENSUSV2_RECEIVER_FILE_FOLDERS =
+ "创建 iotConsensusV2 receiver file folders allocation strategy 失败,原因:all disks of folders "
+ + "are full.";
+ public static final String FAIL_TO_CREATE_PIPE_RECEIVER_FILE_FOLDERS =
+ "创建 pipe receiver file folders allocation strategy 失败,原因:all disks of folders are full.";
+ public static final String FAIL_TO_INITIATE_FILE_BUFFER_FOLDER_ERROR =
+ "初始化 file buffer folder, Error msg: {} 失败";
+ public static final String FAIL_TO_LOAD_PIPEDATA_BECAUSE = "加载 pipeData 失败,原因:{}.";
+ public static final String FAIL_TO_RENAME_FILE_TO = "rename file {} to {} 失败";
+ public static final String INVOKE_HANDSHAKE_METHOD_FROM_CLIENT_IP =
+ "Invoke handshake method from client ip = {}";
+ public static final String INVOKE_TRANSPORTDATA_METHOD_FROM_CLIENT_IP =
+ "Invoke transportData method from client ip = {}";
+ public static final String INVOKE_TRANSPORTPIPEDATA_METHOD_FROM_CLIENT_IP =
+ "Invoke transportPipeData method from client ip = {}";
+ public static final String IOTCONSENSUSV2RECEIVER_THREAD_IS_INTERRUPTED_WHEN_WAITING_FOR =
+ "IoTConsensusV2Receiver thread is interrupted when waiting for receiver get initiated, "
+ + "may because system exit.";
+ public static final String IOTCONSENSUSV2_PIPENAME = "IoTConsensusV2-PipeName-{}:{}";
+ public static final String IOTCONSENSUSV2_PIPENAME_CURRENT_WAITING_IS_INTERRUPTED_ONSYNCEDCOMMITINDEX =
+ "IoTConsensusV2-PipeName-{}:current waiting is interrupted. onSyncedCommitIndex: {}. "
+ + "Exception: ";
+ public static final String IOTCONSENSUSV2_PIPENAME_CURRENT_WRITING_FILE_WRITER_IS =
+ "IoTConsensusV2-PipeName-{}:Current writing file writer 为空,无需关闭。";
+ public static final String IOTCONSENSUSV2_PIPENAME_CURRENT_WRITING_FILE_WRITER_WAS =
+ "IoTConsensusV2-PipeName-{}:Current writing file writer {} 已关闭.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_CLOSE_CURRENT_WRITING =
+ "IoTConsensusV2-PipeName-{}:关闭 current writing file writer {} 失败,原因:{}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_FILE =
+ "IoTConsensusV2-PipeName-{}:创建 receiver file dir {} 失败。";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_FILE_1 =
+ "IoTConsensusV2-PipeName-{}:创建 receiver file dir {}. Because parent system dir have been "
+ + "deleted due to system concurrently exit 失败。";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_FILE_2 =
+ "IoTConsensusV2-PipeName-{}:创建 receiver file dir {}. May 失败,原因:authority or dir already "
+ + "exists etc.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_TSFILEWRITER =
+ "IoTConsensusV2-PipeName-{}:创建 receiver tsFileWriter-{} file dir {} 失败";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_DELETE_BECAUSE =
+ "IoTConsensusV2-PipeName-{}:{} Failed to delete {}, because {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_GET_BASE_DIRECTORY =
+ "IoTConsensusV2-PipeName-{}:获取 base directory 失败";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_LOAD_FILE_FROM =
+ "IoTConsensusV2-PipeName-{}:加载 file {} from req {} 失败。";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_READ_TSFILE_WHEN =
+ "IoTConsensusV2-PipeName-{}:读取 TsFile when counting points: {} 失败。";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_RETURN_TSFILEWRITER =
+ "IoTConsensusV2-PipeName-{}:return tsFileWriter {} 失败。";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_BECAUSE =
+ "IoTConsensusV2-PipeName-{}:封存 file {} 失败,原因:the file does not exist.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_BECAUSE_1 =
+ "IoTConsensusV2-PipeName-{}:封存 file {} 失败,原因:writing file is {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_BECAUSE_2 =
+ "IoTConsensusV2-PipeName-{}:封存 file {} 失败,原因:{}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_FROM =
+ "IoTConsensusV2-PipeName-{}:封存 file {} from req {} 失败。";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_STATUS =
+ "IoTConsensusV2-PipeName-{}:封存 file {}, status is {} 失败。";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_WHEN =
+ "IoTConsensusV2-PipeName-{}:封存 file {} when check final seal file 失败,原因:the length of "
+ + "file is not correct. The original file has length {}, but receiver file has length {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_WHEN_1 =
+ "IoTConsensusV2-PipeName-{}:封存 file {} when check non final seal 失败,原因:the length of "
+ + "file is not correct. The original file has length {}, but receiver file has length {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_FAILED_TO_WRITE_FILE_PIECE =
+ "IoTConsensusV2-PipeName-{}:写入 file piece from req {} 失败。";
+ public static final String IOTCONSENSUSV2_PIPENAME_FILE_OFFSET_RESET_REQUESTED_BY =
+ "IoTConsensusV2-PipeName-{}:File offset reset requested by receiver, response status = {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_ILLEGAL_FILE_NAME_WHEN_CHECKING =
+ "IoTConsensusV2-PipeName-{}:非法的 file name {} when checking writing file。";
+ public static final String IOTCONSENSUSV2_PIPENAME_IS_NOT_EXISTED_NO_NEED =
+ "IoTConsensusV2-PipeName-{}:{} {} 不存在,无需删除。";
+ public static final String IOTCONSENSUSV2_PIPENAME_NO_EVENT_GET_EXECUTED_AFTER =
+ "IoTConsensusV2-PipeName-{}:no.{} event get executed after awaiting timeout, current "
+ + "receiver syncIndex: {}";
+ public static final String IOTCONSENSUSV2_PIPENAME_NO_EVENT_GET_EXECUTED_BECAUSE =
+ "IoTConsensusV2-PipeName-{}:no.{} event get executed because receiver buffer's len >= "
+ + "pipeline, current receiver syncIndex {}, current buffer len {}";
+ public static final String IOTCONSENSUSV2_PIPENAME_PATH_TRAVERSAL_ATTEMPT_DETECTED_FILENAME =
+ "IoTConsensusV2-PipeName-{}:Path traversal attempt detected! Filename: {}";
+ public static final String IOTCONSENSUSV2_PIPENAME_PROCESS_NO_EVENT_SUCCESSFULLY =
+ "IoTConsensusV2-PipeName-{}:process no.{} event successfully!";
+ public static final String IOTCONSENSUSV2_PIPENAME_RECEIVED_A_DEPRECATED_REQUEST_WHICH =
+ "IoTConsensusV2-PipeName-{}:received a deprecated request-{}, which may because {}. ";
+ public static final String IOTCONSENSUSV2_PIPENAME_RECEIVER_DETECTED_AN_NEWER_PIPETASKRESTARTTIMES =
+ "IoTConsensusV2-PipeName-{}:receiver detected an newer pipeTaskRestartTimes, which "
+ + "indicates the pipe task has restarted. receiver will reset all its data.";
+ public static final String IOTCONSENSUSV2_PIPENAME_RECEIVER_DETECTED_AN_NEWER_REBOOTTIMES =
+ "IoTConsensusV2-PipeName-{}:receiver detected an newer rebootTimes, which indicates the "
+ + "leader has rebooted. receiver will reset all its data.";
+ public static final String IOTCONSENSUSV2_PIPENAME_RECEIVER_FILE_DIR_WAS_CREATED =
+ "IoTConsensusV2-PipeName-{}:Receiver file dir {} 已创建.";
+ public static final String IOTCONSENSUSV2_PIPENAME_RECEIVER_THREAD_GET_INTERRUPTED_WHEN =
+ "IoTConsensusV2-PipeName-{}:receiver thread get interrupted when exiting.";
+ public static final String IOTCONSENSUSV2_PIPENAME_SEAL_FILE_SUCCESSFULLY =
+ "IoTConsensusV2-PipeName-{}:成功封存 file {}。";
+ public static final String IOTCONSENSUSV2_PIPENAME_SEAL_FILE_WITH_MODS_SUCCESSFULLY =
+ "IoTConsensusV2-PipeName-{}:成功封存 file with mods {}。";
+ public static final String IOTCONSENSUSV2_PIPENAME_SKIP_LOAD_TSFILE_WHEN_SEALING =
+ "IoTConsensusV2-PipeName-{}:skip load tsfile-{} when sealing, because this region has "
+ + "been removed or migrated.";
+ public static final String IOTCONSENSUSV2_PIPENAME_STARTING_TO_RECEIVE_TSFILE_PIECES =
+ "IoTConsensusV2-PipeName-{}:开始接收 tsFile pieces";
+ public static final String IOTCONSENSUSV2_PIPENAME_STARTING_TO_RECEIVE_TSFILE_SEAL =
+ "IoTConsensusV2-PipeName-{}:开始接收 tsFile seal";
+ public static final String IOTCONSENSUSV2_PIPENAME_STARTING_TO_RECEIVE_TSFILE_SEAL_1 =
+ "IoTConsensusV2-PipeName-{}:开始接收 tsFile seal with mods";
+ public static final String IOTCONSENSUSV2_PIPENAME_START_TO_RECEIVE_NO_EVENT =
+ "IoTConsensusV2-PipeName-{}:开始接收 no.{} event";
+ public static final String IOTCONSENSUSV2_PIPENAME_THE_POINT_COUNT_OF_TSFILE =
+ "IoTConsensusV2-PipeName-{}:The point count of TsFile {} is not given by sender, will "
+ + "read actual point count from TsFile.";
+ public static final String IOTCONSENSUSV2_PIPENAME_TSFILEWRITER_RETURNED_SELF =
+ "IoTConsensusV2-PipeName-{}:tsFileWriter-{} returned self";
+ public static final String IOTCONSENSUSV2_PIPENAME_TSFILEWRITER_ROLL_TO_WRITING_PATH =
+ "IoTConsensusV2-PipeName-{}:tsfileWriter-{} roll to writing path {}";
+ public static final String IOTCONSENSUSV2_PIPENAME_TSFILE_WRITER_IS_CLEANED_UP =
+ "IoTConsensusV2-PipeName-{}:tsfile writer-{} is cleaned up because no new requests were "
+ + "received for too long.";
+ public static final String IOTCONSENSUSV2_PIPENAME_UNKNOWN_PIPEREQUESTTYPE_RESPONSE_STATUS =
+ "IoTConsensusV2-PipeName-{}:未知的 PipeRequestType, response status = {}。";
+ public static final String IOTCONSENSUSV2_PIPENAME_WAS_DELETED =
+ "IoTConsensusV2-PipeName-{}:{} {} 已删除.";
+ public static final String IOTCONSENSUSV2_PIPENAME_WRITING_FILE_IS_NOT_AVAILABLE =
+ "IoTConsensusV2-PipeName-{}:Writing file {} 不可用. Writing file is null: {}, writing file "
+ + "exists: {}, writing file writer is null: {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_WRITING_FILE_IS_NOT_EXISTED =
+ "IoTConsensusV2-PipeName-{}:Writing file {} 不存在或名称不正确,尝试创建。Current writing file is {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_WRITING_FILE_S_OFFSET_IS =
+ "IoTConsensusV2-PipeName-{}:Writing file {}'s offset is {}, but request sender's offset "
+ + "is {}.";
+ public static final String IOTCONSENSUSV2_PIPENAME_WRITING_FILE_WAS_CREATED_READY =
+ "IoTConsensusV2-PipeName-{}:Writing file {} 已创建. Ready to write file pieces.";
+ public static final String IOTCONSENSUSV2_RECEIVE_ON_THE_FLY_NO_EVENT =
+ "IoTConsensusV2-{}:receive on-the-fly no.{} event after data region 已删除, discard it";
+ public static final String IOTCONSENSUSV2_TRANSFER_BATCH_HASN_T_BEEN_IMPLEMENTED =
+ "IoTConsensusV2 transfer batch hasn't been implemented yet.";
+ public static final String IOTCONSENSUSV2_TSFILEWRITER_SET_NULL_WRITING_FILE =
+ "IoTConsensusV2-{}:TsFileWriter-{} set null writing file";
+ public static final String IOTCONSENSUSV2_TSFILEWRITER_SET_NULL_WRITING_FILE_WRITER =
+ "IoTConsensusV2-{}:TsFileWriter-{} set null writing file writer";
+ public static final String IOTCONSENSUSV2_UNKNOWN_IOTCONSENSUSV2REQUESTVERSION_RESPONSE_STATUS =
+ "IoTConsensusV2:未知的 IoTConsensusV2RequestVersion, response status = {}。";
+ public static final String IOTCONSENSUSV2_UNKNOWN_PIPEREQUESTTYPE_RESPONSE_STATUS =
+ "IoTConsensusV2 Unknown PipeRequestType, response status = {}.";
+ public static final String IOTCONSENSUSV2_WAITING_FOR_THE_PREVIOUS_EVENT_TIMES =
+ "IoTConsensusV2-{}:等待 the previous event times out, current peek {}, current id {}";
+ public static final String IOTDBAIRGAPRECEIVERAGENT_STARTED =
+ "IoTDBAirGapReceiverAgent {} started.";
+ public static final String IOTDBAIRGAPRECEIVERAGENT_STOPPED =
+ "IoTDBAirGapReceiverAgent {} stopped.";
+ public static final String LOAD_ACTIVE_LISTENING_PIPE_DIR_IS_NOT =
+ "Load active listening pipe dir is not set.";
+ public static final String LOAD_PIPEDATA_WITH_SERIALIZE_NUMBER_SUCCESSFULLY =
+ "Load pipeData with serialize number {} successfully.";
+ public static final String LOAD_TSFILE_ERROR_STATEMENT = "Load TsFile {} error, statement: {}.";
+ public static final String LOAD_TSFILE_RESULT_STATUS = "Load TsFile result status : {}.";
+ public static final String PARSE_DATABASE_PARTIALPATH_ERROR = "Parse database PartialPath {} 出错。";
+ public static final String PIPE_AIR_GAP_RECEIVER_CHECKSUM_FAILED_EXPECTED =
+ "Pipe air gap receiver {}: checksum failed, expected: {}, actual: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_CLOSED_BECAUSE_OF =
+ "Pipe air gap receiver {} closed because of checksum failed. Socket: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_CLOSED_BECAUSE_OF_1 =
+ "Pipe air gap receiver {} closed because of exception. Socket: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_CLOSED_BECAUSE_SOCKET =
+ "Pipe air gap receiver {} closed because socket 已关闭. Socket: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_EXCEPTION_DURING_HANDLING =
+ "Pipe air gap receiver {}: Exception during handling receiving. Socket: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_HANDLE_DATA_FAILED =
+ "Pipe air gap receiver {}: Handle data failed, status: {}, req: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_SOCKET_CLOSED_WHEN =
+ "Pipe air gap receiver {}: Socket {} closed when listening to data. Because: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_STARTED_SOCKET =
+ "Pipe air gap receiver {} started. Socket: {}";
+ public static final String PIPE_AIR_GAP_RECEIVER_TEMPORARY_UNAVAILABLE_RETRY =
+ "Pipe air gap receiver {}: Temporary unavailable retry timed out, returning FAIL to sender.";
+ public static final String PIPE_AIR_GAP_RECEIVER_TSSTATUS_IS_ENCOUNTERED =
+ "Pipe air gap receiver {}: TSStatus {} is encountered at the air gap receiver, will ignore.";
+ public static final String PIPE_DATA_TRANSPORT_ERROR = "Pipe data transport error, {}";
+ public static final String PIPE_INSERTING_TABLET_TO_CASTING_TYPE_FROM =
+ "Pipe:Inserting tablet to {}.{}. Casting type from {} to {}.";
+ public static final String RECEIVERS_EXECUTOR_IS_CLOSED = "Receivers-{}' executor 已关闭.";
+ public static final String RECEIVER_EXIT_SUCCESSFULLY = "Receiver-{} exit successfully.";
+ public static final String RECEIVER_ID = "接收器 id = {}:{}";
+ public static final String RECEIVER_ID_THE_NUMBER_OF_DEVICE_PATHS =
+ "接收器 id = {}:The number of device paths is not equal to sub-status in statement {}: {}.";
+ public static final String RECEIVER_ID_UNKNOWN_PIPEREQUESTTYPE_RESPONSE_STATUS =
+ "接收器 id = {}:未知的 PipeRequestType, response status = {}。";
+ public static final String RECEIVER_ID_UNSUPPORTED_STATEMENT_TYPE_FOR_REDIRECTION =
+ "接收器 id = {}:不支持的 statement type {} for redirection。";
+ public static final String RECEIVER_IS_READY = "Receiver-{} is ready";
+ public static final String REGISTER_WITH_INTERVAL_IN_SECONDS_SUCCESSFULLY =
+ "Register {} with interval in seconds {} successfully.";
+ public static final String SOCKET_CLOSED_WHEN_EXECUTING_READTILLFULL =
+ "Socket closed when executing readTillFull.";
+ public static final String SOCKET_CLOSED_WHEN_EXECUTING_SKIPTILLENOUGH =
+ "Socket closed when executing skipTillEnough.";
+ public static final String START_LOAD_PIPEDATA_WITH_SERIALIZE_NUMBER_AND =
+ "Start load pipeData with serialize number {} and type {},value={}";
+ public static final String STORAGE_ENGINE_READONLY = "storage engine readonly";
+ public static final String SYNC_START_AT_TO_IS_DONE = "Sync {} start at {} to {} is done.";
+ public static final String TEMPORARY_UNAVAILABLE_EXCEPTION_ENCOUNTERED_AT_AIR_GAP =
+ "Temporary unavailable exception encountered at air gap receiver, will retry locally.";
+ public static final String THE_IOTCONSENSUSV2_REQUEST_VERSION_IS_DIFFERENT_FROM =
+ "The iotConsensusV2 request version {} is different from the sender request version {}, "
+ + "the receiver will be reset to the sender request version.";
+ public static final String THE_START_INDEX_OF_DATA_SYNC_IS =
+ "The start index {} of data sync is not valid. The file is not exist and start index "
+ + "should equal to 0).";
+ public static final String THE_START_INDEX_OF_DATA_SYNC_IS_1 =
+ "The start index {} of data sync is not valid. The start index of the file should equal "
+ + "to {}.";
+ public static final String THRIFT_CONNECTION_IS_NOT_ALIVE = "Thrift 连接已断开。";
+ public static final String TSFILECHECKER_DID_NOT_TERMINATE_WITHIN_S =
+ "TsFileChecker did not terminate within {}s";
+ public static final String TSFILECHECKER_THREAD_STILL_DOESN_T_EXIT_AFTER =
+ "TsFileChecker Thread {} still doesn't exit after 30s";
+ public static final String UNHANDLED_EXCEPTION_DURING_PIPE_AIR_GAP_RECEIVER =
+ "Unhandled exception during pipe air gap receiver listening";
+ public static final String UNSUPPORTED_DATA_TYPE = "不支持的 data type: ";
+
+ // ===================== RESOURCE =====================
+
+ public static final String CANNOT_GET_DATA_REGION_IDS_USE_DEFAULT =
+ "无法获取 data region ids, use default lock segment size: {}";
+ public static final String EXPAND_CALLBACK_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK =
+ "Expand callback is not supported in PipeFixedMemoryBlock";
+ public static final String EXPAND_METHOD_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK =
+ "Expand method is not supported in PipeFixedMemoryBlock";
+ public static final String FAILED_TO_CACHEDEVICEISALIGNEDMAPIFABSENT_FOR_TSFILE_BECAUSE_MEMORY =
+ "cacheDeviceIsAlignedMapIfAbsent for tsfile {} 失败,原因:memory usage is high";
+ public static final String FAILED_TO_CACHEOBJECTSIFABSENT_FOR_TSFILE_BECAUSE_MEMORY =
+ "cacheObjectsIfAbsent for tsfile {} 失败,原因:memory usage is high";
+ public static final String FAILED_TO_ESTIMATE_SIZE_FOR_INSERTNODE =
+ "estimate size for InsertNode: {} 失败";
+ public static final String FAILED_TO_EXECUTE_THE_EXPAND_CALLBACK =
+ "execute the expand callback 失败。";
+ public static final String FAILED_TO_EXECUTE_THE_SHRINK_CALLBACK =
+ "execute the shrink callback 失败。";
+ public static final String FAILED_TO_GET_FILE_SIZE_OF_LINKED =
+ "获取 file size of linked TsFile {}: 失败";
+ public static final String FORCEALLOCATEWITHRETRY_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY =
+ "forceAllocateWithRetry:等待可用内存时被中断";
+ public static final String FORCEALLOCATE_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY =
+ "forceAllocate: interrupted while waiting for available memory";
+ public static final String FORCERESIZE_CANNOT_RESIZE_A_NULL_OR_RELEASED =
+ "forceResize: cannot resize a null or released memory block";
+ public static final String FORCERESIZE_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY =
+ "forceResize: interrupted while waiting for available memory";
+ public static final String INTERRUPTED_WHILE_WAITING_FOR_THE_LOCK = "waiting for the lock 时被中断。";
+ public static final String IS_RELEASED_AFTER_THREAD_INTERRUPTION =
+ "{} is released after thread interruption.";
+ public static final String PIPEPERIODICALLOGREDUCER_IS_ALLOCATED_TO_BYTES =
+ "PipePeriodicalLogReducer is allocated to {} bytes.";
+ public static final String PIPETSFILERESOURCE_CACHED_DEVICEISALIGNEDMAP_FOR_TSFILE =
+ "PipeTsFileResource: Cached deviceIsAlignedMap for tsfile {}.";
+ public static final String PIPETSFILERESOURCE_CACHED_OBJECTS_FOR_TSFILE =
+ "PipeTsFileResource: Cached objects for tsfile {}.";
+ public static final String PIPETSFILERESOURCE_CLOSED_TSFILE_AND_CLEANED_UP =
+ "PipeTsFileResource: Closed tsfile {} and cleaned up.";
+ public static final String PIPETSFILERESOURCE_FAILED_TO_CACHE_OBJECTS_FOR_TSFILE =
+ "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory "
+ + "usage is high";
+ public static final String PIPETSFILERESOURCE_FAILED_TO_DELETE_TSFILE_WHEN_CLOSING =
+ "PipeTsFileResource: Failed to delete tsfile {} when closing, because {}. Please "
+ + "MANUALLY delete it.";
+ public static final String PIPETSFILERESOURCE_S_REFERENCE_COUNT_IS_DECREASED_TO =
+ "PipeTsFileResource's reference count is decreased to below 0.";
+ public static final String PIPE_HARDLINK_DIR_FOUND_DELETING_IT_RESULT =
+ "Pipe hardlink dir found, deleting it: {}, result: {}";
+ public static final String PIPE_SNAPSHOT_DIR_FOUND_DELETING_IT =
+ "Pipe snapshot dir found, deleting it: {},";
+ public static final String SHRINK_CALLBACK_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK =
+ "Shrink callback is not supported in PipeFixedMemoryBlock";
+ public static final String SHRINK_METHOD_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK =
+ "Shrink method is not supported in PipeFixedMemoryBlock";
+ public static final String THE_MEMORY_BLOCK_HAS_BEEN_RELEASED = "内存块已被释放";
+ public static final String THE_MULTIPLE_N_MUST_BE_GREATER_THAN =
+ "The multiple n must be greater than 0";
+ public static final String TRYALLOCATE_ALLOCATED_MEMORY_TOTAL_MEMORY_SIZE_BYTES =
+ "tryAllocate: allocated memory, total memory size {} bytes, used memory size {} bytes, "
+ + "original requested memory size {} bytes, actual requested memory size {} bytes";
+ public static final String TRYALLOCATE_FAILED_TO_ALLOCATE_MEMORY_TOTAL_MEMORY =
+ "tryAllocate: failed to allocate memory, total memory size {} bytes, used memory size {} "
+ + "bytes, requested memory size {} bytes";
+ public static final String TRYEXPANDALLANDCHECKCONSISTENCY_MEMORY_USAGE_IS_NOT_CONSISTENT_WITH =
+ "tryExpandAllAndCheckConsistency: memory usage is not consistent with allocated blocks, "
+ + "usedMemorySizeInBytes is {} but sum of all blocks is {}";
+ public static final String TRYEXPANDALLANDCHECKCONSISTENCY_MEMORY_USAGE_OF_TABLETS_IS_NOT =
+ "tryExpandAllAndCheckConsistency: memory usage of tablets is not consistent with "
+ + "allocated blocks, usedMemorySizeInBytesOfTablets is {} but sum of all tablet blocks is "
+ + "{}";
+ public static final String TRYEXPANDALLANDCHECKCONSISTENCY_MEMORY_USAGE_OF_TSFILES_IS_NOT =
+ "tryExpandAllAndCheckConsistency: memory usage of tsfiles is not consistent with "
+ + "allocated blocks, usedMemorySizeInBytesOfTsFiles is {} but sum of all tsfile blocks is "
+ + "{}";
+
+ // ===================== METRIC =====================
+
+ public static final String FAILED_TO_DEREGISTER_PIPE_ASSIGNER_METRICS_PIPEDATAREGIONASSIGNER =
+ "注销 pipe assigner metrics, PipeDataRegionAssigner({}) does not exist 失败";
+ public static final String FAILED_TO_DEREGISTER_PIPE_DATA_REGION_EXTRACTOR =
+ "注销 pipe data region extractor metrics, IoTDBDataRegionExtractor({}) does not exist 失败";
+ public static final String FAILED_TO_DEREGISTER_PIPE_DATA_REGION_SINK =
+ "注销 pipe data region sink metrics, PipeSinkSubtask({}) does not exist 失败";
+ public static final String FAILED_TO_DEREGISTER_PIPE_REMAINING_EVENT_AND =
+ "注销 pipe remaining event and time metrics, RemainingEventAndTimeOperator({}) does not "
+ + "exist 失败";
+ public static final String FAILED_TO_DEREGISTER_PIPE_SCHEMA_REGION_CONNECTOR =
+ "注销 pipe schema region connector metrics, PipeConnectorSubtask({}) does not exist 失败";
+ public static final String FAILED_TO_DEREGISTER_PIPE_SCHEMA_REGION_SOURCE =
+ "注销 pipe schema region source metrics, IoTDBSchemaRegionSource({}) does not exist 失败";
+ public static final String FAILED_TO_DEREGISTER_PIPE_TSFILE_TO_TABLETS =
+ "注销 pipe tsfile to tablets metrics, pipeID({}) does not exist 失败";
+ public static final String FAILED_TO_DEREGISTER_SCHEMA_REGION_LISTENER_METRICS =
+ "注销 schema region listener metrics, SchemaRegionListeningQueue({}) does not exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_DATA_REGION_EXTRACTOR =
+ "mark pipe data region extractor heartbeat event, IoTDBDataRegionExtractor({}) does not "
+ + "exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_DATA_REGION_EXTRACTOR_1 =
+ "mark pipe data region extractor tablet event, IoTDBDataRegionExtractor({}) does not "
+ + "exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_DATA_REGION_EXTRACTOR_2 =
+ "mark pipe data region extractor tsfile event, IoTDBDataRegionExtractor({}) does not "
+ + "exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_DATA_REGION_SINK =
+ "mark pipe data region sink tablet event, PipeSinkSubtask({}) does not exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_DATA_REGION_SINK_1 =
+ "mark pipe data region sink tsfile event, PipeSinkSubtask({}) does not exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_PROCESSOR_HEARTBEAT_EVENT =
+ "mark pipe processor heartbeat event, PipeProcessorSubtask({}) does not exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_PROCESSOR_TABLET_EVENT =
+ "mark pipe processor tablet event, PipeProcessorSubtask({}) does not exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_PROCESSOR_TSFILE_EVENT =
+ "mark pipe processor tsfile event, PipeProcessorSubtask({}) does not exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_REGION_COMMIT_REMAININGEVENTANDTIMEOPERATOR =
+ "mark pipe region commit, RemainingEventAndTimeOperator({}) does not exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_SCHEMA_REGION_WRITE =
+ "mark pipe schema region write plan event, PipeConnectorSubtask({}) does not exist 失败";
+ public static final String FAILED_TO_MARK_PIPE_TSFILE_TO_TABLETS =
+ "mark pipe tsfile to tablets invocation, pipeID({}) does not exist 失败";
+ public static final String FAILED_TO_RECORD_PIPE_TSFILE_TO_TABLETS =
+ "记录 pipe tsfile to tablets time, pipeID({}) does not exist 失败";
+ public static final String FAILED_TO_RECORD_TABLET_GENERATED_PIPEID_DOES =
+ "记录 tablet generated, pipeID({}) does not exist 失败";
+ public static final String FAILED_TO_SET_RECENT_PROCESSED_TSFILE_EPOCH =
+ "设置 recent processed tsfile epoch state, PipeRealtimeDataRegionExtractor({}) does not "
+ + "exist 失败";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_ASSIGNER_METRICS =
+ "解绑 from pipe assigner metrics, assigner map not empty 失败";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_DATA_REGION =
+ "解绑 from pipe data region sink metrics, sink map not empty 失败";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_EXTRACTOR_METRICS =
+ "解绑 from pipe extractor metrics, extractor map not empty 失败";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_PROCESSOR_METRICS =
+ "解绑 from pipe processor metrics, processor map not empty 失败";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_REMAINING_EVENT =
+ "解绑 from pipe remaining event and time metrics, RemainingEventAndTimeOperator map not "
+ + "empty 失败";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_SCHEMA_REGION =
+ "解绑 from pipe schema region connector metrics, connector map not empty 失败";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_SCHEMA_REGION_1 =
+ "解绑 from pipe schema region extractor metrics, extractor map not empty 失败";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_SCHEMA_REGION_2 =
+ "解绑 from pipe schema region listener metrics, listening queue map not empty 失败";
+ public static final String FAILED_TO_UNBIND_FROM_PIPE_TSFILE_TO =
+ "解绑 from pipe tsfile to tablets metrics, pipe map is not empty, pipe: {} 失败";
+
+ // ---------------------------------------------------------------------------
+ // pipe – AbstractSameTypeNumericOperator
+ // ---------------------------------------------------------------------------
+ public static final String UNSUPPORTED_OUTPUT_DATATYPE_FMT = "不支持的输出数据类型 %s";
+
+ // ---------------------------------------------------------------------------
+ // pipe – IoTDBDataRegionSource
+ // ---------------------------------------------------------------------------
+ public static final String ILLEGAL_TREE_PATTERN_FMT = "Pattern \"%s\" 非法。";
+
+ // ---------------------------------------------------------------------------
+ // pipe – OpcUaServerBuilder
+ // ---------------------------------------------------------------------------
+ public static final String UNABLE_CREATE_SECURITY_DIR = "无法创建安全目录:";
+
+ // ---------------------------------------------------------------------------
+ // pipe – PipeDataNodePluginAgent
+ // ---------------------------------------------------------------------------
+ public static final String PLUGIN_NOT_REGISTERED_FMT = "插件 %s 未注册。";
+
+ private DataNodePipeMessages() {}
+}
diff --git a/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodeQueryMessages.java b/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodeQueryMessages.java
new file mode 100644
index 0000000000000..1f0be57b678e8
--- /dev/null
+++ b/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodeQueryMessages.java
@@ -0,0 +1,1393 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.i18n;
+
+public final class DataNodeQueryMessages {
+
+ // --- Common ---
+
+ public static final String NO_MATCHED_DATABASE_PLEASE_CHECK_THE_PATH =
+ "未找到匹配的数据库,请检查路径 ";
+ public static final String THIS_NODE_ISN_T_INSTANCE_OF_SCHEMAENTITYNODE =
+ "该节点不是 SchemaEntityNode 实例。";
+ public static final String THIS_NODE_ISN_T_INSTANCE_OF_SCHEMAMEASUREMENTNODE =
+ "该节点不是 SchemaMeasurementNode 实例。";
+
+ // --- Execution ---
+
+ public static final String ERROR_SETTING_FUTURE_STATE_FOR =
+ "为 {} 设置 future 状态时出错";
+ public static final String ERROR_NOTIFYING_STATE_CHANGE_LISTENER_FOR =
+ "通知 {} 的状态变更监听器时出错";
+ public static final String SERVER_IS_SHUTTING_DOWN =
+ "服务器正在关闭";
+
+ // --- Execution / Aggregation ---
+
+ public static final String INVALID_AGGREGATION_FUNCTION =
+ "无效的聚合函数:";
+ public static final String UNKNOWN_DATA_TYPE =
+ "未知的数据类型:";
+ public static final String COUNT_IF_WITH_SLIDINGWINDOW_IS_NOT_SUPPORTED_NOW =
+ "目前不支持 COUNT_IF 与滑动窗口组合使用";
+ public static final String TIME_DURATION_WITH_SLIDINGWINDOW_IS_NOT_SUPPORTED_NOW =
+ "目前不支持 TIME_DURATION 与滑动窗口组合使用";
+ public static final String MODE_WITH_SLIDINGWINDOW_IS_NOT_SUPPORTED_NOW =
+ "目前不支持 MODE 与滑动窗口组合使用";
+ public static final String INVALID_AGGREGATION_TYPE =
+ "无效的聚合类型:";
+
+ // --- Execution / Driver ---
+
+ public static final String QUERYDATASOURCE_SHOULD_NEVER_BE_NULL =
+ "QueryDataSource 不应为 null!";
+
+ // --- Execution / Exchange ---
+
+ public static final String SOURCE_HANDLE_FAILED_DUE_TO =
+ "Source handle 失败,原因:";
+ public static final String SINK_FAILED_DUE_TO =
+ "Sink 失败,原因";
+ public static final String ISINKCHANNEL_FAILED_DUE_TO =
+ "ISinkChannel 失败,原因";
+ public static final String SINK_HANDLE_FAILED_DUE_TO =
+ "Sink handle 失败,原因";
+ public static final String MPPDATAEXCHANGEMANAGER_INIT_SUCCESSFULLY =
+ "MPPDataExchangeManager 初始化成功";
+ public static final String QUEUE_HAS_BEEN_DESTROYED =
+ "队列已被销毁";
+ public static final String SINK_HANDLE_IS_BLOCKED =
+ "Sink handle 已被阻塞。";
+ public static final String LOCALSINKCHANNEL_IS_ABORTED =
+ "LocalSinkChannel 已中止。";
+ public static final String ERROR_OCCURRED_WHEN_TRY_TO_ABORT_CHANNEL =
+ "尝试中止通道时发生错误。";
+ public static final String ERROR_OCCURRED_WHEN_TRY_TO_CLOSE_CHANNEL =
+ "尝试关闭通道时发生错误。";
+ public static final String SHUFFLESINKHANDLE_IS_ABORTED =
+ "ShuffleSinkHandle 已中止。";
+ public static final String UNSUPPORTED_TYPE_OF_SHUFFLE_STRATEGY =
+ "不支持的 shuffle 策略类型";
+ public static final String SINKCHANNEL_IS_ABORTED_OR_CLOSED =
+ "SinkChannel 已中止或关闭。";
+ public static final String THE_DATA_BLOCK_DOESN_T_EXIST_SEQUENCE_ID =
+ "数据块不存在。序列 ID:";
+ public static final String THE_TSBLOCK_DOESNT_EXIST_SEQUENCE_ID_REMAINING =
+ "TsBlock 不存在。序列 ID 为 {},剩余映射为 {}";
+ public static final String SINKCHANNEL_IS_ABORTED =
+ "SinkChannel 已中止。";
+ public static final String FAILED_TO_SEND_NEW_DATA_BLOCK_EVENT_ATTEMPT =
+ "发送新数据块事件失败,尝试次数:{}";
+ public static final String FAILED_TO_SEND_END_OF_DATA_BLOCK_EVENT =
+ "发送数据块结束事件失败,尝试次数:{}";
+ public static final String FAILED_TO_SEND_END_OF_DATA_BLOCK_EVENT_2 =
+ "所有重试后仍无法发送数据块结束事件";
+ public static final String SOURCE_HANDLE_IS_BLOCKED =
+ "Source handle 已被阻塞。";
+ public static final String RESERVED_DATA_BLOCK_SIZE_IS_NULL =
+ "预留的数据块大小为 null。";
+ public static final String DATA_BLOCK_SIZE_IS_NULL =
+ "数据块大小为 null。";
+ public static final String SOURCE_HANDLE_IS_ABORTED =
+ "Source handle 已中止。";
+ public static final String SOURCEHANDLE_IS_CLOSED =
+ "SourceHandle 已关闭。";
+
+ // --- Execution / Executor ---
+
+ public static final String EXECUTE_FRAGMENTINSTANCE_IN_CONSENSUSGROUP_FAILED =
+ "在共识组 {} 中执行 FragmentInstance 失败。";
+ public static final String EXECUTE_FRAGMENTINSTANCE_IN_QUERYEXECUTOR_FAILED =
+ "在 QueryExecutor 中执行 FragmentInstance 失败。";
+ public static final String FAILED_IN_THE_WRITE_API_EXECUTING_THE_CONSENSUS =
+ "写入 API 执行共识层时失败,原因:";
+
+ // --- Execution / Fragment ---
+
+ public static final String UNKNOWN_EXCEPTION =
+ "[未知异常]:";
+ public static final String WAIT_MS_FOR_ALL_DRIVERS_CLOSED =
+ "等待 {} 毫秒以关闭所有 Driver";
+ public static final String EXCEPTION_HAPPENED_WHEN_EXECUTING_UDTF =
+ "执行 UDTF 时发生异常:";
+ public static final String ERROR_WHEN_CREATE_FRAGMENTINSTANCEEXECUTION =
+ "创建 FragmentInstanceExecution 时出错。";
+ public static final String EXECUTE_ERROR_CAUSED_BY =
+ "执行错误,原因:";
+
+ // --- Execution / Memory ---
+
+ public static final String FREE_MORE_MEMORY_THAN_HAS_BEEN_RESERVED =
+ "释放的内存超过已预留的量。";
+
+ // --- Execution / Operator ---
+
+ public static final String UNKNOWN_DATA_TYPE_2 =
+ "未知的数据类型 ";
+ public static final String ERROR_OCCURRED_WHEN_LOGGING_INTERMEDIATE_RESULT_OF_ANALYZE =
+ "记录分析中间结果时发生错误。";
+
+ // --- Execution / Operator / Process ---
+
+ public static final String GETWRITTENCOUNT_MEASUREMENT_IS_NOT_SUPPORTED =
+ "不支持 getWrittenCount(measurement) 操作";
+ public static final String GETWRITTENCOUNT_IS_NOT_SUPPORTED =
+ "不支持 getWrittenCount() 操作";
+ public static final String THE_MEMORY_THRESHOLD_MUST_BE_GREATER_THAN_0 =
+ "内存阈值必须大于 0。";
+ public static final String FAILED_TO_CREATE_DIRECTORIES =
+ "创建目录失败:";
+ public static final String TARGET_FILE_ALREADY_EXISTS =
+ "目标文件已存在:";
+ public static final String FAILED_TO_CREATE_FILE =
+ "创建文件失败:";
+ public static final String DATA_TYPE_OF_TARGET_TIME_COLUMN_IS_NOT =
+ "目标时间列的数据类型不是 TIMESTAMP";
+ public static final String DUPLICATE_COLUMN_NAMES_IN_QUERY_DATASET =
+ "查询数据集中存在重复的列名。";
+ public static final String SOME_SPECIFIED_TAG_COLUMNS_ARE_NOT_EXIST_IN =
+ "部分指定的标签列在查询数据集中不存在。";
+ public static final String NUMBER_OF_FIELD_COLUMNS_SHOULD_BE_LARGER_THAN =
+ "字段列的数量应大于 0。";
+ public static final String ALL_CHILD_SHOULD_HAVE_SAME_TIME_COLUMN_RESULT =
+ "所有子节点应具有相同的时间列结果!";
+ public static final String LAST_READ_RESULT_SHOULD_ONLY_HAVE_ONE_RECORD =
+ "Last 读取结果应只有一条记录";
+
+ // --- Execution / Operator / Schema ---
+
+ public static final String FAILED_TO_CONVERT_NODE_PATH_TO_PARTIALPATH =
+ "将节点路径转换为 PartialPath {} 失败";
+
+ // --- Execution / Operator / Source ---
+
+ public static final String ERROR_OCCURS_WHEN_SCANNING_ACTIVE_TIME_SERIES =
+ "扫描活跃时间序列时发生错误。";
+ public static final String ERROR_WHILE_SCANNING_THE_FILE =
+ "扫描文件时发生错误";
+ public static final String ERROR_HAPPENED_WHILE_SCANNING_THE_FILE =
+ "扫描文件时发生错误";
+ public static final String ALL_CACHED_CHUNKS_SHOULD_BE_CONSUMED_FIRST =
+ "所有缓存的 chunk 应先被消费";
+ public static final String OVERLAPPED_DATA_SHOULD_BE_CONSUMED_FIRST =
+ "重叠数据应先被消费";
+ public static final String NO_MORE_BATCH_DATA =
+ "没有更多的批次数据";
+ public static final String GETALLSATISFIEDPAGEDATA_SHOULDN_T_BE_CALLED_HERE =
+ "此处不应调用 getAllSatisfiedPageData()";
+ public static final String GETPAGEREADER_SHOULDN_T_BE_CALLED_HERE =
+ "此处不应调用 getPageReader()";
+ public static final String UNSUPPORTED_COLUMN_TYPE =
+ "不支持的列类型:";
+ public static final String FAIL_TO_CLOSE_CTEDATAREADER =
+ "关闭 CteDataReader 失败";
+ public static final String UNKNOWN_TABLE =
+ "未知的表:";
+ public static final String FAILED_TO_CLOSE_READER_IN_TABLEDISKUSAGESUPPLIER =
+ "在 TableDiskUsageSupplier 中关闭 reader 失败";
+ public static final String UNSUPPORTED_CATEGORY =
+ "不支持的列类别:";
+
+ // --- Execution / Operator / Window ---
+
+ public static final String UNSUPPORTED_INFERENCE_WINDOW_TYPE =
+ "不支持的推理窗口类型:";
+
+ // --- Execution / Schedule ---
+
+ public static final String EXECUTOR_FAILED_TO_POLL_DRIVER_TASK_FROM_QUEUE =
+ "执行器 {} 从队列中获取驱动任务失败";
+ public static final String DRIVERTASK_SHOULD_NEVER_BE_NULL =
+ "DriverTask 不应为 null";
+ public static final String EXECUTEFAILED =
+ "[执行失败]";
+ public static final String EXECUTOR_EXITS_BECAUSE_IT_IS_CLOSED =
+ "执行器 {} 因已关闭而退出。";
+ public static final String CLEAR_DRIVERTASK_FAILED =
+ "清除 DriverTask 失败";
+ public static final String PUSHED_ELEMENT_IS_NULL =
+ "推入的元素为 null";
+
+ // --- Execution / Warnings ---
+
+ public static final String CODE_IS_NEGATIVE =
+ "code 为负数";
+
+ // --- Metric ---
+
+ public static final String UNSUPPORTED_STAGE_IN_TREE_MODEL =
+ "树模型中不支持的阶段:";
+ public static final String UNSUPPORTED_STAGE_IN_TABLE_MODEL =
+ "表模型中不支持的阶段:";
+
+ // --- Plan ---
+
+ public static final String TOPOLOGY_LATEST_VIEW_FROM_CONFIG_NODE =
+ "[拓扑] 来自 ConfigNode 的最新视图:{}";
+ public static final String EXPIRED_QUERIES_INFO_CLEAR_THREAD_IS_SUCCESSFULLY_STARTED =
+ "过期查询信息清理线程已成功启动。";
+ public static final String COST_MS =
+ "耗时:{} 毫秒,{}";
+
+ // --- Plan / Analyze ---
+
+ public static final String COMPUTEDATAPARTITIONPARAMS_FOR =
+ "计算数据分区参数,目标:";
+ public static final String UNSUPPORTED_OPERATOR =
+ "不支持的运算符:";
+ public static final String UNSUPPORTED_EXPRESSION =
+ "不支持的表达式:";
+ public static final String ONLY_SUPPORT_AND_OPERATOR_IN_DELETION =
+ "删除操作仅支持 AND 运算符";
+ public static final String LEFT_HAND_EXPRESSION_IS_NOT_AN_IDENTIFIER =
+ "左侧表达式不是标识符:";
+ public static final String THE_LEFT_HAND_VALUE_MUST_BE_AN_IDENTIFIER =
+ "左侧值必须是标识符:";
+ public static final String THE_OPERATOR_OF_TAG_PREDICATE_MUST_BE_FOR =
+ "标签谓词的运算符必须为 '=',目标:";
+ public static final String ONLY_TIME_FILTERS_ARE_SUPPORTED_IN_LAST_QUERY =
+ "LAST 查询中仅支持时间过滤器";
+ public static final String VIEWS_CANNOT_BE_USED_IN_GROUP_BY_TAGS =
+ "视图暂不支持在 GROUP BY TAGS 查询中使用。";
+ public static final String ONLY_TIME_FILTERS_ARE_SUPPORTED_IN_GROUP_BY =
+ "GROUP BY TAGS 查询中仅支持时间过滤器";
+ public static final String UNSUPPORTED_WINDOW_TYPE =
+ "不支持的窗口类型";
+ public static final String AGGREGATION_EXPRESSION_SHOULDN_T_EXIST_IN_GROUP_BY =
+ "GROUP BY 子句中不应包含聚合表达式";
+ public static final String ONLY_SUPPORT_NUMERIC_TYPE_WHEN_DELTA_0 =
+ "当 delta != 0 时仅支持数值类型";
+ public static final String ONLY_SUPPORT_BOOLEAN_TYPE_IN_PREDICT_OF_GROUP =
+ "GROUP BY SERIES 的谓词中仅支持布尔类型";
+ public static final String GROUP_BY_MONTH_DOESN_T_SUPPORT_ORDER_BY =
+ "按月分组目前不支持按时间降序排列。";
+ public static final String NO_RUNNING_DATANODES =
+ "没有运行中的 DataNode";
+ public static final String AN_ERROR_OCCURRED_WHEN_SERIALIZING_PATTERN_TREE =
+ "序列化模式树时发生错误";
+ public static final String EXPRESSION_IN_GROUP_BY_SHOULD_INDICATE_ONE_VALUE =
+ "GROUP BY 中的表达式应指定一个值";
+ public static final String EXPRESSION_IN_ORDER_BY_SHOULD_INDICATE_ONE_VALUE =
+ "ORDER BY 中的表达式应指定一个值";
+ public static final String SHOULDN_T_ATTACH_HERE =
+ "不应在此处附加";
+ public static final String SELECT_INTO_THE_I_OF_SHOULD_BE_AN =
+ "SELECT INTO:${i} 中的 i 应为整数。";
+ public static final String FAILED_TO_GET_DATABASE_MAP =
+ "获取数据库映射失败";
+ public static final String LOAD_ANALYSIS_STAGE_ALL_TSFILES_HAVE_BEEN_ANALYZED =
+ "加载 - 分析阶段:所有 TsFile 已分析完毕。";
+ public static final String ASYNC_LOAD_HAS_FAILED_AND_IS_NOW_TRYING =
+ "异步加载失败,正在尝试同步加载";
+ public static final String TSFILE_IS_EMPTY =
+ "TsFile {} 为空。";
+ public static final String THE_ENCRYPTION_WAY_OF_THE_TSFILE_IS_NOT =
+ "不支持该 TsFile 的加密方式。";
+ public static final String EMPTY_FILE_DETECTED_WILL_SKIP_LOADING_THIS_FILE =
+ "检测到空文件,将跳过加载此文件:{}";
+ public static final String AUTO_CREATE_OR_VERIFY_SCHEMA_ERROR =
+ "自动创建或验证 schema 出错。";
+ public static final String FAILED_TO_FIND_TAG_COLUMN_MAPPING_FOR_TABLE =
+ "未找到表 {} 的标签列映射";
+ public static final String AUTO_CREATE_DATABASE_FAILED_BECAUSE =
+ "自动创建数据库失败,原因:";
+
+ // --- Plan / Execution ---
+
+ public static final String REACHMAXRETRYCOUNT =
+ "[已达最大重试次数]";
+ public static final String ERROR_WHEN_EXECUTING_QUERY =
+ "执行查询时出错。{}";
+ public static final String WAITBEFORERETRY_WAIT_MS =
+ "[重试前等待] 等待 {} 毫秒。";
+ public static final String INTERRUPTED_WHEN_WAITING_RETRY =
+ "等待重试时被中断";
+ public static final String RETRY_RETRY_COUNT_IS =
+ "[重试] 重试次数:{}";
+ public static final String RESULTHANDLEABORTED =
+ "[结果句柄已中止]";
+ public static final String UNSUPPORTED_DATABASE_PROPERTY_KEY =
+ "不支持的数据库属性键:";
+ public static final String A_TABLE_CANNOT_HAVE_MORE_THAN_ONE_TIME =
+ "一个表不能有多于一个时间列";
+ public static final String THE_TIME_COLUMN_S_TYPE_SHALL_BE_TIMESTAMP =
+ "时间列的类型应为 'timestamp'。";
+ public static final String THE_TABLE_S_OLD_NAME_SHALL_NOT_BE =
+ "表的旧名称不应与新名称相同。";
+ public static final String ADDING_TIME_COLUMN_IS_NOT_SUPPORTED =
+ "不支持添加 TIME 列。";
+ public static final String THE_COLUMN_S_OLD_NAME_SHALL_NOT_BE =
+ "列的旧名称不应与新名称相同。";
+ public static final String DUPLICATED_PROPERTY =
+ "重复的属性:";
+ public static final String TABLE_PROPERTY =
+ "表属性 '";
+ public static final String UNKNOWN_TYPE =
+ "未知的类型:%s";
+ public static final String FAILED_TO_CHECK_CONFIG_ITEM_PERMISSION =
+ "检查配置项权限失败";
+ public static final String CONFIGTASK_IS_NOT_IMPLEMENTED_FOR =
+ "ConfigTask 未针对以下内容实现:";
+ public static final String FAILED_TO_GET_EXECUTABLE_FOR_UDF_USING_URI =
+ "无法使用 URI {} 获取 UDF({}) 的可执行文件。";
+ public static final String FAILED_TO_DROP_FUNCTION =
+ "[{}] 删除函数 {} 失败。";
+ public static final String FAILED_TO_DROP_TRIGGER =
+ "[{}] 删除触发器 {} 失败。";
+ public static final String CANNOT_REMOVE_INVALID_NODEIDS =
+ "无法移除无效的节点 ID:{}";
+ public static final String STARTING_TO_REMOVE_DATANODE_WITH_NODEIDS =
+ "开始移除 DataNode,节点 ID:{}";
+ public static final String START_TO_REMOVE_DATANODE_REMOVED_DATANODES_ENDPOINT =
+ "开始移除 DataNode,已移除的 DataNode 端点:{}";
+ public static final String SUBMIT_REMOVE_DATANODES_RESULT =
+ "提交移除 DataNode 结果 {} ";
+ public static final String STARTING_TO_REMOVE_CONFIGNODE_WITH_NODE_ID =
+ "开始移除 ConfigNode,节点 ID:{}";
+ public static final String CONFIGNODE_IS_REMOVED =
+ "ConfigNode {} 已移除。";
+ public static final String STARTING_TO_REMOVE_AINODE =
+ "开始移除 AINode";
+ public static final String REMOVE_AINODE_FAILED_BECAUSE_THERE_IS_NO_AINODE =
+ "移除 AINode 失败,因为集群中没有 AINode。";
+ public static final String AINODE_IN_THE_CLUSTER_IS_REMOVED =
+ "集群中的 AINode 已移除。";
+ public static final String FAILED_TO_HANDLETRANSFERCONFIGPLAN_STATUS_IS =
+ "handleTransferConfigPlan 失败,状态为 {}。";
+ public static final String FAILED_TO_FETCHTABLES_STATUS_IS =
+ "fetchTables 失败,状态为 {}。";
+ public static final String FAILED_TO_HANDLEPIPECONFIGCLIENTEXIT_STATUS_IS =
+ "handlePipeConfigClientExit 失败,状态为 {}。";
+ public static final String FAILED_TO_HANDLEPIPECONFIGCLIENTEXIT =
+ "handlePipeConfigClientExit 失败。";
+ public static final String NOT_SUPPORT_CURRENT_STATEMENT =
+ "不支持当前语句";
+ public static final String WRONG_REQUEST_TYPE =
+ "错误的请求类型";
+ public static final String WRONG_UNIT_TYPE =
+ "错误的单位类型";
+
+ // --- Plan / Expression ---
+
+ public static final String INVALID_EXPRESSION_TYPE =
+ "无效的表达式类型:";
+ public static final String UNSUPPORTED_EXPRESSION_TYPE =
+ "不支持的表达式类型:";
+ public static final String FUNCTION_CAST_MUST_SPECIFY_A_TARGET_DATA_TYPE =
+ "CAST 函数必须指定目标数据类型。";
+ public static final String FUNCTION_REPLACE_MUST_SPECIFY_FROM_AND_TO_COMPONENT =
+ "REPLACE 函数必须指定 from 和 to 参数。";
+ public static final String PLEASE_ENSURE_INPUT_IS_CORRECT =
+ "请确保输入 [%s] 正确";
+ public static final String CASE_EXPRESSION_CANNOT_BE_USED_WITH_NON_MAPPABLE =
+ "CASE 表达式不能与非映射型 UDF 一起使用";
+ public static final String UNSUPPORTED_TRANSFORMER_ACCESS_STRATEGY =
+ "不支持的转换器访问策略";
+ public static final String AGGREGATE_FUNCTIONS_ARE_NOT_SUPPORTED_IN_WHERE_CLAUSE =
+ "WHERE 子句中不支持聚合函数";
+ public static final String IS_NULL_CANNOT_BE_PUSHED_DOWN =
+ "IS NULL 不能下推";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_IS_NULL_IS_NOT =
+ "TIMESTAMP 不支持 IS NULL/IS NOT NULL";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_LIKE_NOT_LIKE =
+ "TIMESTAMP 不支持 LIKE/NOT LIKE";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_REGEXP_NOT_REGEXP =
+ "TIMESTAMP 不支持 REGEXP/NOT REGEXP";
+ public static final String GROUPBYTIME_FILTER_CANNOT_EXIST_IN_VALUE_FILTER =
+ "GroupByTime 过滤器不能存在于值过滤器中。";
+ public static final String IS_NULL_CAN_BE_PUSHED_DOWN =
+ "IS NULL 可以下推";
+ public static final String GROUP_BY_TIME_CANNOT_BE_REVERSED =
+ "GROUP BY TIME 不能反转";
+
+ // --- Plan / Optimization ---
+
+ public static final String UNEXPECTED_PLAN_NODE =
+ "意外的计划节点:";
+ public static final String UNEXPECTED_PATH_TYPE =
+ "意外的路径类型";
+ public static final String SOURCEPATH_MUST_BE_MEASUREMENTPATH_OR_ALIGNEDPATH =
+ "sourcePath 必须为 MeasurementPath 或 AlignedPath";
+
+ // --- Plan / Parser ---
+
+ public static final String DATATYPE_MUST_BE_DECLARED =
+ "必须声明数据类型";
+ public static final String UNSUPPORTED_ENCODING =
+ "不支持的编码:%s";
+ public static final String UNSUPPORTED_COMPRESSION =
+ "不支持的压缩方式:%s";
+ public static final String UNSUPPORTED_ENCODING_2 =
+ "不支持的编码:%s";
+ public static final String UNSUPPORTED_COMPRESSOR =
+ "不支持的压缩器:%s";
+ public static final String CREATE_ALIGNED_TIMESERIES_PROPERTY_IS_NOT_SUPPORTED_YET =
+ "创建对齐时间序列:暂不支持 property。";
+ public static final String UNSUPPORTED_COMPRESSOR_2 =
+ "不支持的压缩器:%s";
+ public static final String PROPERTY_IS_UNSUPPORTED_YET =
+ "暂不支持属性 %s。";
+ public static final String THE_TIMESERIES_SHALL_NOT_BE_ROOT =
+ "时间序列不应为 root。";
+ public static final String UNSUPPORTED_DATATYPE =
+ "不支持的数据类型:%s";
+ public static final String UNEXPECTED_FILTER_KEY =
+ "意外的过滤键";
+ public static final String URI_IS_EMPTY_PLEASE_SPECIFY_THE_URI =
+ "URI 为空,请指定 URI。";
+ public static final String INVALID_URI =
+ "无效的 URI:%s";
+ public static final String TRIGGER_DOES_NOT_SUPPORT_DELETE_AS_TRIGGER_EVENT =
+ "触发器目前不支持 DELETE 作为触发事件。";
+ public static final String PLEASE_SPECIFY_TRIGGER_TYPE_STATELESS_OR_STATEFUL =
+ "请指定触发器类型:STATELESS 或 STATEFUL。";
+ public static final String RENAMING_VIEW_IS_NOT_SUPPORTED =
+ "不支持重命名视图。";
+ public static final String VIEW_DOESN_T_SUPPORT_ALIAS =
+ "视图不支持别名。";
+ public static final String MODELID_SHOULD_BE_2_64_CHARACTERS =
+ "ModelId 应为 2-64 个字符";
+ public static final String MODELID_SHOULD_NOT_START_WITH =
+ "ModelId 不应以 '_' 开头";
+ public static final String MODELID_CAN_ONLY_CONTAIN_LETTERS_NUMBERS_AND_UNDERSCORES =
+ "ModelId 只能包含字母、数字和下划线";
+ public static final String DEVICE_ID_SHOULD_BE_CPU_OR_INTEGER =
+ "设备 ID 应为 'cpu' 或整数";
+ public static final String DATA_SHOULD_NOT_BE_SET_FOR_MODEL_TRAINING =
+ "模型训练时不应设置数据";
+ public static final String DUPLICATED_GROUP_BY_KEY_LEVEL =
+ "重复的 GROUP BY 键:LEVEL";
+ public static final String DUPLICATED_GROUP_BY_KEY_TAGS =
+ "重复的 GROUP BY 键:TAGS";
+ public static final String UNKNOWN_GROUP_BY_TYPE =
+ "未知的 GROUP BY 类型。";
+ public static final String DUPLICATE_ALIAS_IN_SELECT_CLAUSE =
+ "SELECT 子句中存在重复的别名";
+ public static final String CONSTANT_OPERAND_IS_NOT_ALLOWED =
+ "不允许使用常量操作数:";
+ public static final String THE_TIME_WINDOWS_MAY_EXCEED_10000_PLEASE_ENSURE =
+ "时间窗口可能超过 10000 个,请确认输入。";
+ public static final String START_TIME_SHOULD_BE_SMALLER_THAN_ENDTIME_IN =
+ "GroupBy 中的起始时间应小于结束时间";
+ public static final String KEEP_THRESHOLD_IN_GROUP_BY_CONDITION_SHOULD_BE =
+ "应设置 GROUP BY 条件中的保持阈值";
+ public static final String DUPLICATED_KEY_IN_GROUP_BY_TAGS =
+ "GROUP BY TAGS 中存在重复的键:";
+ public static final String UNKNOWN_FILL_TYPE =
+ "未知的 FILL 类型。";
+ public static final String UNSUPPORTED_CONSTANT_VALUE_IN_FILL =
+ "FILL 中不支持的常量值:";
+ public static final String OUT_OF_RANGE_LIMIT_N_N_SHOULD_BE =
+ "超出范围。LIMIT :N 应为 Int64。";
+ public static final String LIMIT_N_N_SHOULD_BE_GREATER_THAN_0 =
+ "LIMIT :N 应大于 0。";
+ public static final String OFFSET_OFFSETVALUE_OFFSETVALUE_SHOULD_0 =
+ "OFFSET :OFFSETValue 应 >= 0。";
+ public static final String OUT_OF_RANGE_SLIMIT_SN_SN_SHOULD_BE =
+ "超出范围。SLIMIT :SN 应为 Int32。";
+ public static final String SLIMIT_SN_SN_SHOULD_BE_GREATER_THAN_0 =
+ "SLIMIT :SN 应大于 0。";
+ public static final String SOFFSET_SOFFSETVALUE_SOFFSETVALUE_SHOULD_0 =
+ "SOFFSET :SOFFSETValue 应 >= 0。";
+ public static final String ONE_ROW_SHOULD_ONLY_HAVE_ONE_TIME_VALUE =
+ "一行数据应只有一个时间值";
+ public static final String INSERTSTATEMENT_SHOULD_CONTAIN_AT_LEAST_ONE_MEASUREMENT =
+ "InsertStatement 应至少包含一个测量值";
+ public static final String NEED_TIMESTAMPS_WHEN_INSERT_MULTI_ROWS =
+ "插入多行时需要时间戳";
+ public static final String CAN_NOT_PARSE_TO_TIME =
+ "无法将 %s 解析为时间";
+ public static final String PATH_CAN_NOT_START_WITH_ROOT_IN_SELECT =
+ "SELECT 子句中的路径不能以 root 开头。";
+ public static final String INPUT_TIMESTAMP_CANNOT_BE_EMPTY =
+ "输入时间戳不能为空";
+ public static final String NOT_SUPPORT_FOR_THIS_ALIAS_PLEASE_ENCLOSE_IN =
+ "不支持此别名,请使用反引号括起来。";
+ public static final String STATEMENT_NEEDS_TARGET_PATHS =
+ "语句需要目标路径";
+ public static final String THE_DATATYPE_OF_TIMESTAMP_SHOULD_BE_LONG =
+ "时间戳的数据类型应为 LONG。";
+ public static final String ATTRIBUTES_OF_FUNCTIONS_SHOULD_BE_QUOTED_WITH_OR =
+ "函数的属性应使用 '' 或 \"\" 引起来";
+ public static final String UNSUPPORTED_CONSTANT_VALUE =
+ "不支持的常量值:";
+ public static final String UNSUPPORTED_CONSTANT_OPERAND =
+ "不支持的常量操作数:";
+ public static final String UNKNOWN_SYSTEM_STATUS_IN_SET_SYSTEM_COMMAND =
+ "SET SYSTEM 命令中的系统状态未知。";
+ public static final String DEVICE_TEMPLATE_ALIAS_IS_NOT_SUPPORTED_YET =
+ "设备模板:暂不支持别名。";
+ public static final String DEVICE_TEMPLATE_PROPERTY_IS_NOT_SUPPORTED_YET =
+ "设备模板:暂不支持属性。";
+ public static final String DEVICE_TEMPLATE_TAG_IS_NOT_SUPPORTED_YET =
+ "设备模板:暂不支持标签。";
+ public static final String DEVICE_TEMPLATE_ATTRIBUTE_IS_NOT_SUPPORTED_YET =
+ "设备模板:暂不支持属性。";
+ public static final String EXPECTING_DATATYPE =
+ "需要数据类型";
+ public static final String NOT_SUPPORT_FOR_THIS_SQL_IN_DROP_PIPE =
+ "DROP PIPE 不支持此 SQL,请输入管道名。";
+ public static final String NOT_SUPPORT_FOR_THIS_SQL_IN_START_PIPE =
+ "START PIPE 不支持此 SQL,请输入管道名。";
+ public static final String NOT_SUPPORT_FOR_THIS_SQL_IN_STOP_PIPE =
+ "STOP PIPE 不支持此 SQL,请输入管道名。";
+ public static final String GET_REGION_ID_STATEMENT_EXPRESSION_MUST_BE_A =
+ "GET REGION ID 语句的表达式必须是时间表达式";
+ public static final String WRONG_SPACE_QUOTA_TYPE =
+ "错误的空间配额类型:";
+ public static final String PLEASE_SET_THE_NUMBER_OF_DEVICES_GREATER_THAN =
+ "请将设备数设置为大于 0";
+ public static final String PLEASE_SET_THE_NUMBER_OF_TIMESERIES_GREATER_THAN =
+ "请将时间序列数设置为大于 0";
+ public static final String CANNOT_SET_THROTTLE_QUOTA_FOR_USER_ROOT =
+ "不能为 root 用户设置限流配额。";
+ public static final String PLEASE_SET_THE_NUMBER_OF_REQUESTS_GREATER_THAN =
+ "请将请求数设置为大于 0";
+ public static final String PLEASE_SET_THE_NUMBER_OF_CPU_GREATER_THAN =
+ "请将 CPU 数量设置为大于 0";
+ public static final String PLEASE_SET_THE_SIZE_GREATER_THAN_0 =
+ "请将大小设置为大于 0";
+ public static final String PLEASE_SET_THE_DISK_SIZE_GREATER_THAN_0 =
+ "请将磁盘大小设置为大于 0";
+ public static final String THERE_SHOULD_BE_ONLY_ONE_WINDOW_IN_CALL =
+ "CALL INFERENCE 中应只有一个窗口。";
+ public static final String THE_CREATETABLEVIEW_IS_UNSUPPORTED_IN_TREE_SQL_DIALECT =
+ "树模型 SQL 方言中不支持 'CreateTableView'。";
+ public static final String CURRENTLY_OTHER_EXPRESSIONS_ARE_NOT_SUPPORTED =
+ "目前不支持其他表达式";
+ public static final String ALIGN_DESIGNATION_INCORRECT_AT =
+ "对齐指定不正确,位于:";
+
+ // --- Plan / Relational / Analyzer ---
+
+ public static final String COLUMN_NOT_IN_GROUP_BY_CLAUSE =
+ "列 %s 不在 GROUP BY 子句中";
+ public static final String DATABASE_IS_NOT_SPECIFIED_FOR_INSERT =
+ "未指定插入操作的数据库:";
+ public static final String IDENTIFIER_NOT_ALLOWED_IN_THIS_CONTEXT =
+ "此上下文中不允许 .*";
+ public static final String UNKNOWN_SIGN =
+ "未知的符号:";
+ public static final String DECIMALLITERAL_IS_NOT_SUPPORTED_YET =
+ "暂不支持 DecimalLiteral。";
+ public static final String GENERICLITERAL_IS_NOT_SUPPORTED_YET =
+ "暂不支持 GenericLiteral。";
+ public static final String DISTINCT_IS_NOT_SUPPORTED_FOR_NON_AGGREGATION_FUNCTIONS =
+ "非聚合函数不支持 DISTINCT";
+ public static final String UNEXPECTED_PATTERN_RECOGNITION_FUNCTION =
+ "意外的模式识别函数 ";
+ public static final String THE_INPUT_ARGUMENT_DOES_NOT_EXIST =
+ "输入参数不存在";
+ public static final String MATCH_NUMBER_PATTERN_RECOGNITION_FUNCTION_TAKES_NO_ARGUMENTS =
+ "MATCH_NUMBER 模式识别函数不接受参数";
+ public static final String UNEXPECTED_NAVIGATION_ANCHOR =
+ "意外的导航锚点:";
+ public static final String UNEXPECTED_MODE =
+ "意外的模式:";
+ public static final String QUERY_TAKES_NO_PARAMETERS =
+ "查询不接受参数";
+ public static final String NO_VALUE_PROVIDED_FOR_PARAMETER =
+ "未提供参数值";
+ public static final String CANNOT_EXTRACT_FROM =
+ "无法从 %s 中提取";
+ public static final String UNKNOWN_IS_NOT_A_VALID_TYPE =
+ "UNKNOWN 不是有效的类型";
+ public static final String CANNOT_CAST_TO =
+ "无法将 %s 转换为 %s";
+ public static final String WINDOW_FRAME_START_CANNOT_BE_UNBOUNDED_FOLLOWING =
+ "窗口帧起始位置不能为 UNBOUNDED FOLLOWING";
+ public static final String WINDOW_FRAME_END_CANNOT_BE_UNBOUNDED_PRECEDING =
+ "窗口帧结束位置不能为 UNBOUNDED PRECEDING";
+ public static final String UNSUPPORTED_FRAME_TYPE =
+ "不支持的帧类型:";
+ public static final String COLUMNS_ONLY_SUPPORT_TO_BE_USED_IN_SELECT =
+ "Columns 仅支持在 SELECT 和 WHERE 子句中使用";
+ public static final String VS =
+ "%s:%s 与 %s";
+ public static final String UNKNOWN_PATTERN_RECOGNITION_FUNCTION =
+ "未知的模式识别函数:";
+ public static final String CANNOT_ACCESS_PREANALYZED_TYPES =
+ "无法访问预分析类型";
+ public static final String CANNOT_ACCESS_RESOLVED_WINDOWS =
+ "无法访问已解析的窗口";
+ public static final String REFERENCE_IS_AMBIGUOUS =
+ "引用 '%s' 有歧义";
+ public static final String COLUMN_IS_AMBIGUOUS =
+ "列 '%s' 有歧义";
+ public static final String UNSUPPORTED_NODE_TYPE =
+ "不支持的节点类型:";
+ public static final String CREATE_DATABASE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Create Database 语句。";
+ public static final String ALTER_DATABASE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Alter Database 语句。";
+ public static final String DROP_DATABASE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Drop Database 语句。";
+ public static final String SHOW_DATABASE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Show Database 语句。";
+ public static final String SHOW_TABLES_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Show Tables 语句。";
+ public static final String DESCRIBE_TABLE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Describe Table 语句。";
+ public static final String ADD_COLUMN_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Add Column 语句。";
+ public static final String CREATE_INDEX_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Create Index 语句。";
+ public static final String DROP_INDEX_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Drop Index 语句。";
+ public static final String SHOW_INDEX_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Show Index 语句。";
+ public static final String UPDATE_CAN_ONLY_SPECIFY_ATTRIBUTE_COLUMNS =
+ "UPDATE 只能指定属性列。";
+ public static final String DROP_FUNCTION_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Drop Function 语句。";
+ public static final String SHOW_FUNCTION_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 Show Function 语句。";
+ public static final String USE_STATEMENT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 USE 语句。";
+ public static final String TARGET_TABLE_SCHEMA_MISSES_A_TIME_CATEGORY_COLUMN =
+ "目标表结构缺少 TIME 类别的列";
+ public static final String TIME_COLUMN_CAN_NOT_BE_NULL =
+ "时间列不能为 null";
+ public static final String NO_FIELD_COLUMN_PRESENT =
+ "没有 Field 列";
+ public static final String FETCH_FIRST_WITH_TIES_CLAUSE_REQUIRES_ORDER_BY =
+ "FETCH FIRST WITH TIES 子句需要 ORDER BY";
+ public static final String RECURSIVE_CTE_IS_NOT_SUPPORTED_YET =
+ "暂不支持递归 CTE。";
+ public static final String MISSING_COLUMN_ALIASES_IN_RECURSIVE_WITH_QUERY =
+ "递归 WITH 查询中缺少列别名";
+ public static final String NESTED_RECURSIVE_WITH_QUERY =
+ "嵌套的递归 WITH 查询";
+ public static final String THERE_IS_AT_LEAST_ONE_RESULT_OF_EXPANDED =
+ "至少存在一个展开后的结果";
+ public static final String UNSUPPORTED_EXPRESSION_2 =
+ "不支持的表达式:";
+ public static final String RELATION_NOT_FOUND_OR_NOT_ALLOWED =
+ "关系未找到或不允许访问";
+ public static final String COLUMNS_NOT_ALLOWED_FOR_RELATION_THAT_HAS_NO =
+ "无列的关系不允许使用 COLUMNS";
+ public static final String UNKNOWN_COLUMNNAME =
+ "未知的列名:";
+ public static final String INVALID_REGEX =
+ "无效的正则表达式 '%s'";
+ public static final String COLUMNS_ARE_NOT_SUPPORTED_IN_DEREFERENCEEXPRESSION =
+ "DereferenceExpression 中不支持 Columns";
+ public static final String SELECT_NOT_ALLOWED_FROM_RELATION_THAT_HAS_NO =
+ "不允许对无列的关系使用 SELECT *";
+ public static final String COLUMN_ALIASES_NOT_SUPPORTED =
+ "不支持列别名";
+ public static final String SELECT_NOT_ALLOWED_IN_QUERIES_WITHOUT_FROM_CLAUSE =
+ "没有 FROM 子句的查询中不允许使用 SELECT *";
+ public static final String MULTIPLE_DATE_BIN_GAPFILL_CALLS_NOT_ALLOWED =
+ "不允许多次调用 date_bin_gapfill";
+ public static final String PATTERN_RECOGNITION_OUTPUT_TABLE_HAS_NO_COLUMNS =
+ "模式识别输出表没有列";
+ public static final String NATURAL_JOIN_NOT_SUPPORTED =
+ "不支持自然连接";
+ public static final String UNKNOWN_FILL_METHOD =
+ "未知的填充方法:";
+ public static final String RECURSIVE_REFERENCE_IN_INTERSECT_ALL =
+ "INTERSECT ALL 中存在递归引用";
+ public static final String TABLE_PROPERTY_2 =
+ "表属性 ";
+ public static final String THE_DATABASE_MUST_BE_SET =
+ "必须设置数据库。";
+ public static final String AT_MOST_ONE_TABLE_ARGUMENT_CAN_BE_PASSED =
+ "最多只能向表函数传递一个表参数";
+ public static final String DUPLICATE_ARGUMENT_NAME =
+ "重复的参数名:%s";
+ public static final String SETTING_MONTHLY_INTERVALS_IS_NOT_SUPPORTED =
+ "不支持设置按月间隔。";
+ public static final String FILTER_PUSH_DOWN_DOES_NOT_SUPPORT_CASE_WHEN =
+ "过滤下推不支持 CASE WHEN";
+ public static final String FILTER_PUSH_DOWN_DOES_NOT_SUPPORT_IF =
+ "过滤下推不支持 IF";
+ public static final String FILTER_PUSH_DOWN_DOES_NOT_SUPPORT_NULLIF =
+ "过滤下推不支持 NULLIF";
+ public static final String EXPRESSION_SHOULD_BE_NUMERIC_ACTUAL_IS =
+ "表达式应为数值类型,实际为 ";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_IS_NULL =
+ "TIMESTAMP 不支持 IS NULL";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_IS_NOT_NULL =
+ "TIMESTAMP 不支持 IS NOT NULL";
+ public static final String TIMESTAMP_DOES_NOT_SUPPORT_LIKE =
+ "TIMESTAMP 不支持 LIKE";
+ public static final String TIMESTAMP_DOES_NOT_CASE_WHEN =
+ "TIMESTAMP 不支持 CASE WHEN";
+ public static final String TIMESTAMP_DOES_NOT_IF =
+ "TIMESTAMP 不支持 IF";
+ public static final String TIMESTAMP_DOES_NOT_NULLIF =
+ "TIMESTAMP 不支持 NULLIF";
+ public static final String SHOULD_NEVER_RETURN_NULL =
+ "不应返回 null。";
+ public static final String IS_NULL_EXPRESSION_CAN_T_BE_PUSHED_DOWN =
+ "IS NULL 表达式不能下推";
+ public static final String NOT_EXPRESSION_CAN_T_BE_PUSHED_DOWN =
+ "NOT 表达式不能下推";
+ public static final String UNSUPPORTED_OPERATOR_2 =
+ "不支持的运算符 ";
+ public static final String THE_LOGICAL_EXPRESSION_HAS_NO_BOUNDED_COLUMN =
+ "逻辑表达式没有绑定的列";
+ public static final String THE_NOT_EXPRESSION_HAS_NO_BOUNDED_COLUMN =
+ "NOT 表达式没有绑定的列";
+
+ // --- Plan / Relational / Metadata ---
+
+ public static final String TOO_MANY_DOTS_IN_TABLE_NAME =
+ "表名中含有过多的点号:%s";
+ public static final String OBJECT_TYPE_IS_NOT_SUPPORTED_AS_RETURN_TYPE =
+ "不支持 OBJECT 类型作为返回值类型";
+ public static final String INVALID_FUNCTION_PARAMETERS =
+ "无效的函数参数:";
+ public static final String UNKNOWN_FUNCTION =
+ "未知的函数:";
+ public static final String THE_OBJECT_TYPE_COLUMN_IS_NOT_SUPPORTED =
+ "不支持 object 类型的列。";
+ public static final String NO_COLUMN_OTHER_THAN_TIME_PRESENT_PLEASE_CHECK =
+ "除时间列外没有其他列,请检查请求";
+ public static final String NO_FIELD_COLUMN_PRESENT_PLEASE_CHECK_THE_REQUEST =
+ "没有 Field 列,请检查请求";
+ public static final String AUTO_ADD_TABLE_COLUMN_FAILED =
+ "自动添加表列失败。";
+ public static final String TAG_COLUMN_ONLY_SUPPORT_DATA_TYPE_STRING =
+ "标签列仅支持 STRING 数据类型。";
+ public static final String ATTRIBUTE_COLUMN_ONLY_SUPPORT_DATA_TYPE_STRING =
+ "属性列仅支持 STRING 数据类型。";
+
+ // --- Plan / Relational / Planner ---
+
+ public static final String FAIL_TO_MATERIALIZE_CTE_BECAUSE =
+ "物化 CTE 失败,原因:{}";
+ public static final String BOTH_OBJECT_MUST_BE_TYPE_OF_NUMBER =
+ "两个对象都必须为数值类型";
+ public static final String NOT_YET_IMPLEMENTED =
+ "尚未实现:";
+ public static final String UNSUPPORTED_TYPE_IN_GENERICLITERAL =
+ "GenericLiteral 中不支持的类型:";
+ public static final String CANNOT_COERCE_TYPE =
+ "无法将类型 ";
+ public static final String UNKNOWN_TYPE_2 =
+ "未知的类型:";
+ public static final String NODE_MUST_BE_A_LITERAL =
+ "节点必须为 Literal";
+ public static final String UNHANDLED_LITERAL_TYPE =
+ "未处理的字面量类型:";
+ public static final String NO_LITERAL_FORM_FOR_TYPE =
+ "类型 %s 没有字面量形式";
+ public static final String WINDOW_FRAME_OFFSET_VALUE_MUST_NOT_BE_NEGATIVE =
+ "窗口帧偏移值不能为负数或 null";
+ public static final String UNEXPECTED_TYPE =
+ "意外的类型:";
+ public static final String FROM_CLAUSE_MUST_NOT_BE_EMPTY =
+ "FROM 子句不能为空";
+ public static final String COERCION_RESULT_IN_ANALYSIS_ONLY_CAN_BE_EMPTY =
+ "分析中的类型转换结果只能为空";
+ public static final String UNEXPECTED_RECURSIVE_CTE =
+ "意外的递归 CTE";
+ public static final String TABLE =
+ "表 ";
+ public static final String UNEXPECTED_JOIN_TYPE =
+ "意外的 Join 类型:";
+ public static final String UNEXPECTED_ROWS_PER_MATCH =
+ "意外的 rowsPerMatch:";
+ public static final String UNEXPECTED_SKIP_TO_POSITION =
+ "意外的 skipTo 位置:";
+ public static final String VALUES_IS_NOT_SUPPORTED_IN_CURRENT_VERSION =
+ "当前版本不支持 Values。";
+ public static final String SUBSCRIPT_IS_NOT_SUPPORTED_IN_CURRENT_VERSION =
+ "当前版本不支持下标操作";
+
+ // --- Plan / Relational / Planner / IR ---
+
+ public static final String ILLEGAL_STATE_IN_VISITLOGICALEXPRESSION =
+ "visitLogicalExpression 中的非法状态";
+ public static final String UNSUPPORTED_LOGICALEXPRESSION_OPERATOR =
+ "不支持的逻辑表达式运算符";
+ public static final String UNEXPECTED_EXPRESSION =
+ "意外的表达式:";
+ public static final String FAILED_TO_FETCH_SUBQUERY_RESULT =
+ "获取子查询结果失败。";
+
+ // --- Plan / Relational / Planner / Iterative ---
+
+ public static final String UNEXPECTED_PATTERN =
+ "意外的 Pattern:";
+ public static final String TABLE_FUNCTION_DOES_NOT_SUPPORT_MULTIPLE_SOURCE_NOW =
+ "表函数目前不支持多个数据源。";
+
+ // --- Plan / Relational / Planner / Node ---
+
+ public static final String SHOULD_NEVER_PUSH_DOWN_LIMIT_TO_AGGREGATIONTABLESCANNODE =
+ "不应将 limit 下推到 AggregationTableScanNode。";
+ public static final String SHOULD_NEVER_PUSH_DOWN_OFFSET_TO_AGGREGATIONTABLESCANNODE =
+ "不应将 offset 下推到 AggregationTableScanNode。";
+ public static final String NOT_SUPPORTED_YET =
+ "暂不支持。";
+ public static final String COPYTONODE_SHOULD_NOT_BE_SERIALIZED =
+ "CopyToNode 不应被序列化";
+
+ // --- Plan / Relational / Planner / Optimizations ---
+
+ public static final String LIST_PLANNODE_SIZE_SHOULD_1_BUT_NOW_IS =
+ "List.size 应 >= 1,但当前为 0";
+ public static final String UNSUPPORTED_JOIN_TYPE =
+ "不支持的 Join 类型:";
+ public static final String TOPK_IS_NOT_SUPPORTED_IN_CORRELATED_SUBQUERY_FOR =
+ "目前不支持在关联子查询中使用 TopK";
+ public static final String UNEXPECTED_VALUE =
+ "意外的值:";
+
+ // --- Plan / Relational / Security ---
+
+ public static final String USER_NOT_EXISTS =
+ "用户不存在";
+ public static final String ONLY_THE_SUPERUSER_CAN_ALTER_HIM_HERSELF =
+ "仅超级用户可以修改自身信息。";
+ public static final String DATABASE =
+ "数据库 ";
+ public static final String TABLE_2 =
+ "表 ";
+ public static final String UNEXPECTED_VALUE_2 =
+ "意外的值:";
+ public static final String EACH_OPERATION_SHOULD_HAVE_PERMISSION_CHECK =
+ "每个操作都应进行权限检查。";
+ public static final String UNKNOWN_AUTHORTYPE =
+ "未知的授权类型:";
+
+ // --- Plan / Relational / SQL ---
+
+ public static final String UNKNOWN_AUTHORTYPE_2 =
+ "未知的授权类型:";
+ public static final String THE_RENAMING_FOR_BASE_TABLE_COLUMN_IS_CURRENTLY =
+ "目前不支持重命名基表列";
+ public static final String THE_RENAMING_FOR_BASE_TABLE_IS_CURRENTLY_UNSUPPORTED =
+ "目前不支持重命名基表";
+ public static final String UNEXPECTED_EXPRESSION_2 =
+ "意外的表达式:";
+ public static final String THE_TABLE_SHOULD_ONLY_HAVE_ONE_COLUMN_FOUND =
+ "表中应只有一个 TIME 类别的列";
+ public static final String TIMESTAMP_CANNOT_BE_NULL =
+ "时间戳不能为 null";
+ public static final String SHOW_REGION_ID_IS_NOT_SUPPORTED_YET =
+ "暂不支持 SHOW REGION ID。";
+ public static final String SHOW_TIME_SLOT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 SHOW TIME SLOT。";
+ public static final String COUNT_TIME_SLOT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 COUNT TIME SLOT。";
+ public static final String SHOW_SERIES_SLOT_IS_NOT_SUPPORTED_YET =
+ "暂不支持 SHOW SERIES SLOT。";
+ public static final String MISSING_LIMIT_VALUE =
+ "缺少 LIMIT 值";
+ public static final String DATABASE_IS_NOT_SET_YET =
+ "尚未设置数据库。";
+ public static final String AUTHOR_STATEMENT_PARSER_ERROR =
+ "授权语句解析错误";
+ public static final String UNSUPPORTED_SET_OPERATION =
+ "不支持的集合操作:";
+ public static final String UNSUPPORTED_JOIN_CRITERIA =
+ "不支持的 join 条件";
+ public static final String TOLERANCE_IN_ASOF_JOIN_ONLY_SUPPORTS_INNER_TYPE =
+ "ASOF JOIN 中的容差目前仅支持 INNER 类型";
+ public static final String UNSUPPORTED_SIGN =
+ "不支持的符号:";
+ public static final String UNSUPPORTED_WINDOW_FRAME_TYPE =
+ "不支持的窗口帧类型:";
+ public static final String UNSUPPORTED_BOUNDED_TYPE =
+ "不支持的边界类型:";
+ public static final String UNSUPPORTED_TRIM_SPECIFICATION =
+ "不支持的 TRIM 规范:";
+ public static final String TARGET_DATA_IN_SQL_SHOULD_BE_SET_IN =
+ "SQL 中的目标数据应在 CREATE MODEL 中设置";
+ public static final String THE_TREE_MODEL_DATABASE_SHALL_NOT_BE_SPECIFIED =
+ "表模型中不应指定树模型数据库。";
+ public static final String UNSUPPORTED_SPECIAL_FUNCTION =
+ "不支持的特殊函数:";
+ public static final String UNSUPPORTED_ORDERING =
+ "不支持的排序方式:";
+ public static final String UNSUPPORTED_QUANTIFIER =
+ "不支持的量词:";
+ public static final String NOT_YET_IMPLEMENTED_WILDCARD_TRANSITION =
+ "尚未实现:通配符转换";
+ public static final String UNKNOWN_TABLE_ELEMENT =
+ "未知的表元素:";
+
+ // --- Plan / Scheduler ---
+
+ public static final String ERROR_HAPPENED_WHILE_FETCHING_QUERY_STATE =
+ "获取查询状态时发生错误";
+ public static final String INTERRUPTED_WHEN_DISPATCHING_READ_ASYNC =
+ "异步分发读取操作时被中断";
+ public static final String INTERRUPTED_WHEN_DISPATCHING_WRITE_ASYNC =
+ "异步分发写入操作时被中断";
+ public static final String DESERIALIZE_CONSENSUSGROUPID_FAILED =
+ "反序列化 ConsensusGroupId 失败。";
+ public static final String CAN_T_CONNECT_TO_NODE =
+ "无法连接到节点 {}";
+ public static final String CANCEL_QUERY_ON_NODE_FAILED =
+ "在节点 {} 上取消查询 {} 失败。";
+ public static final String CANNOT_DISPATCH_FI_FOR_LOAD_OPERATION =
+ "无法为加载操作分发 FI";
+ public static final String RECEIVE_LOAD_NODE_FROM_UUID =
+ "接收来自 uuid {} 的加载节点。";
+ public static final String LOAD_TSFILE_NODE_ERROR =
+ "加载 TsFile 节点 {} 出错。";
+ public static final String SERIALIZE_TSFILERESOURCE_ERROR =
+ "序列化 TsFileResource {} 出错。";
+ public static final String LOAD_SKIP_TSFILE_BECAUSE_IT_HAS_NO_DATA =
+ "跳过加载 TsFile {},因为没有数据。";
+ public static final String LOADTSFILESCHEDULER_LOADS_TSFILE_ERROR =
+ "LoadTsFileScheduler 加载 TsFile {} 出错";
+ public static final String INTERRUPT_OR_EXECUTION_ERROR =
+ "中断或执行错误。";
+ public static final String START_DISPATCHING_LOAD_COMMAND_FOR_UUID =
+ "开始分发 uuid {} 的加载命令";
+ public static final String EXCEPTION_OCCURRED_DURING_SECOND_PHASE_OF_LOADING_TSFILE =
+ "加载 TsFile {} 的第二阶段发生异常。";
+ public static final String START_LOAD_TSFILE_LOCALLY =
+ "开始本地加载 TsFile {}。";
+ public static final String LOAD_ALL_FAILED_TSFILES_ARE_CONVERTED_TO_TABLETS =
+ "加载:所有失败的 TsFile 已转换为 Tablet 并插入。";
+
+ // --- Plan / Statement ---
+
+ public static final String METHOD_NOT_IMPLEMENTED_YET =
+ "方法尚未实现";
+ public static final String INSERTION_CONTAINS_DUPLICATED_MEASUREMENT =
+ "插入操作包含重复的测量值:";
+ public static final String UNSUPPORTED_DATA_TYPE =
+ "不支持的数据类型:";
+ public static final String FAILED_TO_CONVERT_INSERTTABLETSTATEMENT_TO_TABLET =
+ "将 InsertTabletStatement 转换为 Tablet 失败";
+ public static final String MODEL_INFERENCE_DOES_NOT_SUPPORT_ALIGN_BY_DEVICE =
+ "模型推理目前不支持按设备对齐。";
+ public static final String MODEL_INFERENCE_DOES_NOT_SUPPORT_SELECT_INTO_NOW =
+ "模型推理目前不支持 SELECT INTO。";
+ public static final String GROUP_BY_CLAUSES_DOESN_T_SUPPORT_GROUP_BY =
+ "GROUP BY 子句目前不支持 GROUP BY LEVEL。";
+ public static final String GROUP_BY_LEVEL_DOES_NOT_SUPPORT_ALIGN_BY =
+ "GROUP BY LEVEL 目前不支持按设备对齐。";
+ public static final String GROUP_BY_TAGS_DOES_NOT_SUPPORT_ALIGN_BY =
+ "GROUP BY TAGS 目前不支持按设备对齐。";
+ public static final String HAVING_CLAUSE_IS_NOT_SUPPORTED_YET_IN_GROUP =
+ "GROUP BY TAGS 查询中暂不支持 HAVING 子句";
+ public static final String OUTPUT_COLUMN_IS_DUPLICATED_WITH_THE_TAG_KEY =
+ "输出列与标签键重复:";
+ public static final String LIMIT_OR_SLIMIT_ARE_NOT_SUPPORTED_YET_IN =
+ "GROUP BY TAGS 中暂不支持 LIMIT 或 SLIMIT";
+ public static final String EXPRESSION_OF_HAVING_CLAUSE_MUST_TO_BE_AN =
+ "HAVING 子句的表达式必须是聚合函数";
+ public static final String WHEN_HAVING_USED_WITH_GROUPBYLEVEL =
+ "当 HAVING 与 GroupByLevel 一起使用时:";
+ public static final String ALIGN_BY_DEVICE =
+ "按设备对齐:";
+ public static final String SORTING_BY_TIMESERIES_IS_ONLY_SUPPORTED_IN_LAST =
+ "按时间序列排序仅在 LAST 查询中支持。";
+ public static final String LAST_QUERY_DOESN_T_SUPPORT_ALIGN_BY_DEVICE =
+ "LAST 查询不支持按设备对齐。";
+ public static final String LAST_QUERIES_CAN_ONLY_BE_APPLIED_ON_RAW =
+ "LAST 查询只能应用于原始时间序列。";
+ public static final String SLIMIT_AND_SOFFSET_CAN_NOT_BE_USED_IN =
+ "LAST 查询中不能使用 SLIMIT 和 SOFFSET。";
+ public static final String SELECT_INTO_SLIMIT_CLAUSES_ARE_NOT_SUPPORTED =
+ "SELECT INTO:不支持 SLIMIT 子句。";
+ public static final String SELECT_INTO_SOFFSET_CLAUSES_ARE_NOT_SUPPORTED =
+ "SELECT INTO:不支持 SOFFSET 子句。";
+ public static final String SELECT_INTO_LAST_CLAUSES_ARE_NOT_SUPPORTED =
+ "SELECT INTO:不支持 LAST 子句。";
+ public static final String SELECT_INTO_GROUP_BY_TAGS_CLAUSE_ARE_NOT =
+ "SELECT INTO:不支持 GROUP BY TAGS 子句。";
+ public static final String UNKNOWN_LITERAL_TYPE =
+ "未知的字面量类型:%s";
+ public static final String ILLEGAL_PATH =
+ "非法路径:{}";
+ public static final String CQ_THE_START_TIME_OFFSET_SHOULD_BE_GREATER =
+ "连续查询:起始时间偏移量应大于 0。";
+ public static final String CQ_THE_END_TIME_OFFSET_SHOULD_BE_GREATER =
+ "连续查询:结束时间偏移量应大于或等于 0。";
+ public static final String CQ_THE_QUERY_BODY_MISSES_AN_INTO_CLAUSE =
+ "连续查询:查询体缺少 INTO 子句。";
+ public static final String CQ_SPECIFYING_TIME_FILTERS_IN_THE_QUERY_BODY =
+ "连续查询:禁止在查询体中指定时间过滤器。";
+ public static final String IS_NOT_A_LEGAL_PATH =
+ "{} 不是合法路径";
+
+ // --- Plan / Tree Planner ---
+
+ public static final String VALID_TREEDEVICEVIEWSCANNODE_IS_NOT_EXPECTED_HERE =
+ "此处不应出现有效的 TreeDeviceViewScanNode。";
+ public static final String MULTIPLE_COLUMNS_WITH_TIME_CATEGORY_FOUND =
+ "发现多个 TIME 类别的列";
+ public static final String MISSING_TIME_CATEGORY_COLUMN =
+ "缺少 TIME 类别的列";
+ public static final String UNKNOWN_SQL_DIALECT =
+ "未知的 SQL 方言:%s";
+ public static final String UNEXPECTED_PATH_TYPE_2 =
+ "意外的路径类型";
+ public static final String SHOULD_CALL_THE_CONCRETE_VISITXX_METHOD =
+ "应调用具体的 visitXX() 方法";
+ public static final String OUTPUTCOLUMTYPES_SHOULD_NOT_BE_NULL_EMPTY =
+ "OutputColumTypes 不应为 null 或空";
+ public static final String UNKNOWN_FILL_POLICY =
+ "未知的填充策略:";
+ public static final String FILTER_CAN_NOT_CONTAIN_NON_MAPPABLE_UDF =
+ "过滤器不能包含非映射型 UDF";
+ public static final String GROUPBYVARIATIONEXPRESSION_CAN_T_BE_NULL =
+ "groupByVariationExpression 不能为 null";
+ public static final String GROUPBYCONDITIONEXPRESSION_CAN_T_BE_NULL =
+ "groupByConditionExpression 不能为 null";
+ public static final String GROUPBYCOUNTEXPRESSION_CAN_T_BE_NULL =
+ "groupByCountExpression 不能为 null";
+ public static final String UNKNOWN_NODE_TYPE =
+ "未知的节点类型:";
+ public static final String UNSUPPORTED_COLUMN_GENERATOR_TYPE =
+ "不支持的列生成器类型:";
+ public static final String ROOT_NODE_MUST_RETURN_ONLY_ONE =
+ "根节点必须只返回一个结果";
+ public static final String SINGLEDEVICEVIEWNODE_HAVE_ONLY_ONE_CHILD =
+ "SingleDeviceViewNode 只有一个子节点";
+ public static final String AVAILABLE_REPLICAS =
+ "可用副本:{}";
+ public static final String UNEXPECTED_ERROR_OCCURS_WHEN_SERIALIZING_THIS_FRAGMENTINSTANCE =
+ "序列化此 FragmentInstance 时发生意外错误。";
+ public static final String INVALID_NODE_TYPE =
+ "无效的节点类型:";
+ public static final String THIS_LASTQUERYSCANNODE_IS_DEPRECATED =
+ "此 LastQueryScanNode 已弃用";
+ public static final String EXPLAINANALYZENODE_SHOULD_NOT_BE_SERIALIZED =
+ "ExplainAnalyzeNode 不应被序列化";
+ public static final String EXPLAINANALYZENODE_SHOULD_NOT_BE_DESERIALIZED =
+ "ExplainAnalyzeNode 不应被反序列化";
+ public static final String CLONE_OF_LOAD_SINGLE_TSFILE_IS_NOT_IMPLEMENTED =
+ "单 TsFile 加载的 clone 未实现";
+ public static final String SPLIT_LOAD_SINGLE_TSFILE_IS_NOT_IMPLEMENTED =
+ "单 TsFile 加载的 split 未实现";
+ public static final String DELETE_AFTER_LOADING_ERROR =
+ "加载后删除 {} 出错。";
+ public static final String CLONE_OF_LOAD_TSFILE_IS_NOT_IMPLEMENTED =
+ "TsFile 加载的 clone 未实现";
+ public static final String LOADTSFILE_STATEMENT_IS_NULL_DURING_TABLE_MODEL_SPLIT =
+ "表模型拆分期间 LoadTsFile 语句为 null。";
+ public static final String CLONE_OF_LOAD_PIECE_TSFILE_IS_NOT_IMPLEMENTED =
+ "TsFile 分片加载的 clone 未实现";
+ public static final String SERIALIZE_TO_BYTEBUFFER_ERROR =
+ "序列化到 ByteBuffer 出错。";
+ public static final String SPLIT_LOAD_PIECE_TSFILE_IS_NOT_IMPLEMENTED =
+ "TsFile 分片加载的 split 未实现";
+ public static final String DESERIALIZE_ERROR =
+ "反序列化 {} 出错。";
+ public static final String INVALID_LENGTH_FOR_SLICING =
+ "无效的切片长度:";
+ public static final String CANNOT_DESERIALIZE_DEVICESSCHEMASCANNODE =
+ "无法反序列化 DevicesSchemaScanNode";
+ public static final String CANNOT_DESERIALIZE_TIMESERIESSCHEMASCANNODE =
+ "无法反序列化 TimeSeriesSchemaScanNode";
+ public static final String CLONE_OF_ALTERTIMESERIESNODE_IS_NOT_IMPLEMENTED =
+ "AlterTimeSeriesNode 的 clone 未实现";
+ public static final String CAN_NOT_DESERIALIZE_ALTERTIMESERIESNODE =
+ "无法反序列化 AlterTimeSeriesNode";
+ public static final String CLONE_OF_CREATEALIGNEDTIMESERIESNODE_IS_NOT_IMPLEMENTED =
+ "CreateAlignedTimeSeriesNode 的 clone 未实现";
+ public static final String CAN_NOT_DESERIALIZE_CREATEALIGNEDTIMESERIESNODE =
+ "无法反序列化 CreateAlignedTimeSeriesNode";
+ public static final String CLONE_OF_CREATEMULTITIMESERIESNODE_IS_NOT_IMPLEMENTED =
+ "CreateMultiTimeSeriesNode 的 clone 未实现";
+ public static final String CLONE_OF_CREATETIMESERIESNODE_IS_NOT_IMPLEMENTED =
+ "CreateTimeSeriesNode 的 clone 未实现";
+ public static final String CANNOT_DESERIALIZE_CREATETIMESERIESNODE =
+ "无法反序列化 CreateTimeSeriesNode";
+ public static final String CLONE_OF_INTERNALCREATETIMESERIESNODE_IS_NOT_IMPLEMENTED =
+ "InternalCreateTimeSeriesNode 的 clone 未实现";
+ public static final String CLONE_OF_ALTERLOGICALNODE_IS_NOT_IMPLEMENTED =
+ "AlterLogicalNode 的 clone 未实现";
+ public static final String UNEXPECTED_DESCRIPTORTYPE =
+ "意外的 descriptorType:";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_ALIGNEDSERIESSCANNODE =
+ "AlignedSeriesScanNode 不允许有子节点";
+ public static final String DEVICEREGIONSCANNODE_HAS_NO_CHILDREN =
+ "DeviceRegionScanNode 没有子节点";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_SERIESSCANNODE =
+ "SeriesScanNode 不允许有子节点";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_SERIESAGGREGATESCANNODE =
+ "SeriesAggregateScanNode 不允许有子节点";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_SERIESSCANSOURCENODE =
+ "SeriesScanSourceNode 不允许有子节点";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_SHOWDISKUSAGENODE =
+ "ShowDiskUsageNode 不允许有子节点";
+ public static final String NO_CHILD_IS_ALLOWED_FOR_SHOWQUERIESNODE =
+ "ShowQueriesNode 不允许有子节点";
+ public static final String TIMESERIESREGIONSCANNODE_DOES_NOT_SUPPORT_ADDCHILD =
+ "TimeseriesRegionScanNode 不支持 addChild";
+ public static final String NOT_SUPPORTED =
+ "不支持。";
+ public static final String CANNOT_DESERIALIZE_INSERTROWNODE =
+ "无法反序列化 InsertRowNode";
+ public static final String UNEXPECTED_ERROR_OCCURS_WHEN_SERIALIZING_DELETEDATANODE =
+ "序列化 deleteDataNode 时发生意外错误。";
+ public static final String DELETEDATANODES_IS_EMPTY =
+ "deleteDataNodes 为空";
+ public static final String INSERTMULTITABLETSNODE_NOT_SUPPORT_MERGE =
+ "InsertMultiTabletsNode 不支持合并";
+ public static final String CLONE_OF_INSERT_IS_NOT_IMPLEMENTED =
+ "Insert 的 clone 未实现";
+ public static final String INSERTNODES_SHOULD_NEVER_BE_EMPTY =
+ "insertNodes 不应为空";
+ public static final String SERIALIZEATTRIBUTES_OF_INSERTNODE_IS_NOT_IMPLEMENTED =
+ "InsertNode 的 serializeAttributes 未实现";
+ public static final String INSERTROWSOFONEDEVICENODE_NOT_SUPPORT_MERGE =
+ "InsertRowsOfOneDeviceNode 不支持合并";
+ public static final String CANNOT_DESERIALIZE_INSERTROWSOFONEDEVICENODE =
+ "无法反序列化 InsertRowsOfOneDeviceNode";
+ public static final String CANNOT_DESERIALIZE_INSERTTABLETNODE =
+ "无法反序列化 InsertTabletNode";
+ public static final String MERGE_IS_NOT_SUPPORTED =
+ "不支持合并";
+ public static final String FAILED_TO_SERIALIZE_MODENTRY_TO_WAL =
+ "将 modEntry 序列化到 WAL 失败";
+ public static final String ALL_DATABASE_NAME_NEED_TO_BE_SAME =
+ "所有数据库名称必须相同";
+ public static final String INVALID_AGGREGATIONSTEP_TYPE =
+ "无效的 AggregationStep 类型:";
+
+ // --- Transformation ---
+
+ public static final String SIZE_IS_0 =
+ "大小为 0";
+ public static final String CAN_NOT_CALL_NEXT_ON_EMPTYROWITERATOR =
+ "不能在 EmptyRowIterator 上调用 next";
+ public static final String THE_EXPRESSION_CANNOT_BE_NULL =
+ "表达式不能为 null";
+ public static final String UNSUPPORTED_TYPE =
+ "不支持的类型:";
+ public static final String UNSUPPORTED_DATA_TYPE_2 =
+ "不支持的数据类型:";
+ public static final String UNSUPPORTED_DATA_TYPE_3 =
+ "不支持的数据类型:";
+ public static final String ERROR_OCCURRED_DURING_INFERRING_UDF_DATA_TYPE =
+ "推断 UDF 数据类型时发生错误";
+ public static final String ERROR_OCCURRED_DURING_GETTING_UDF_ACCESS_STRATEGY =
+ "获取 UDF 访问策略时发生错误";
+ public static final String TRANSFORMUTILS_SHOULD_NOT_BE_INSTANTIATED =
+ "TransformUtils 不应被实例化。";
+
+ // --- Execution / Exchange (additional) ---
+
+ public static final String ACK_TSBLOCK_FAILED =
+ "确认 TsBlock [{}, {}) 失败。";
+ public static final String CLOSE_CHANNEL_OF_SHUFFLESINKHANDLE_FAILED =
+ "关闭 ShuffleSinkHandle {} 的通道(索引 {})失败。";
+ public static final String SHUFFLESINKHANDLE_ALREADY_IN_MAP =
+ "ShuffleSinkHandle ";
+ public static final String IS_IN_THE_MAP =
+ " 已存在于映射中。";
+ public static final String SOURCE_HANDLE_FOR_PLAN_NODE =
+ "计划节点 ";
+ public static final String OF =
+ " 的 ";
+ public static final String EXISTS =
+ " 的 Source handle 已存在。";
+ public static final String FAILED_TO_PULL_TSBLOCKS =
+ "{} 从 SinkHandle {} 的通道索引 {} 拉取 TsBlocks [{}] 到 [{}] 失败,";
+ public static final String FAILED_TO_GET_DATA_BLOCK =
+ "获取数据块 [{}, {}) 失败,尝试次数:{}";
+ public static final String FAILED_TO_SEND_ACK_DATA_BLOCK_EVENT =
+ "发送数据块确认事件 [{}, {}) 失败,尝试次数:{}";
+ public static final String SEND_CLOSE_SINK_CHANNEL_EVENT_FAILED =
+ "[发送关闭SinkChannel事件] 到 [ShuffleSinkHandle: {}, 索引: {}] 失败。";
+ public static final String LOCAL_SINK_CHANNEL_STATE_IS =
+ "LocalSinkChannel 状态为 .";
+ public static final String SCH_LISTENER_ON_FINISH =
+ "[ScH监听器完成]";
+ public static final String SCH_LISTENER_ALREADY_RELEASED =
+ "[ScH监听器已释放]";
+ public static final String SCH_LISTENER_ON_ABORT =
+ "[ScH监听器中止]";
+ public static final String SHUFFLE_SINK_HANDLE_LISTENER_ON_FINISH =
+ "[ShuffleSinkHandle监听器完成]";
+ public static final String SHUFFLE_SINK_HANDLE_LISTENER_ON_END_OF_TSBLOCKS =
+ "[ShuffleSinkHandle监听器TsBlock结束]";
+ public static final String SHUFFLE_SINK_HANDLE_LISTENER_ON_ABORT =
+ "[ShuffleSinkHandle监听器中止]";
+ public static final String SKH_LISTENER_ON_FINISH =
+ "[SkH监听器完成]";
+ public static final String SKH_LISTENER_ON_END_OF_TSBLOCKS =
+ "[SkH监听器TsBlock结束]";
+ public static final String SKH_LISTENER_ON_ABORT =
+ "[SkH监听器中止]";
+ public static final String CLOSE_SHUFFLE_SINK_HANDLE =
+ "关闭 ShuffleSinkHandle: {}";
+ public static final String GET_SHARED_TSBLOCK_QUEUE_FROM_LOCAL_SOURCE_HANDLE =
+ "从本地源句柄获取 SharedTsBlockQueue";
+ public static final String CREATE_SHARED_TSBLOCK_QUEUE =
+ "创建 SharedTsBlockQueue";
+ public static final String CREATE_LOCAL_SINK_HANDLE_FOR =
+ "为 {} 创建本地 Sink 句柄";
+ public static final String CREATE_LOCAL_SOURCE_HANDLE_FOR =
+ "为 {} 创建本地 Source 句柄";
+ public static final String GET_SHARED_TSBLOCK_QUEUE_FROM_LOCAL_SINK_HANDLE =
+ "从本地 Sink 句柄获取 SharedTsBlockQueue";
+ public static final String START_FORCE_RELEASE_FI_DATA_EXCHANGE_RESOURCE =
+ "[开始强制释放FI数据交换资源]";
+ public static final String CLOSE_SOURCE_HANDLE =
+ "[关闭SourceHandle] {}";
+ public static final String END_FORCE_RELEASE_FI_DATA_EXCHANGE_RESOURCE =
+ "[结束强制释放FI数据交换资源]";
+ public static final String CREATE_LOCAL_SINK_HANDLE_TO_PLAN_NODE =
+ "为计划节点 {} 的 {} 创建本地 Sink 句柄,目标 {}";
+ public static final String CREATE_SINK_HANDLE_TO_PLAN_NODE =
+ "为计划节点 {} 的 {} 创建 Sink 句柄,目标 {}";
+ public static final String CREATE_LOCAL_SOURCE_HANDLE_FROM =
+ "从 {} 为计划节点 {} 的 {} 创建本地 Source 句柄";
+ public static final String GET_SERIALIZED_TSBLOCK =
+ "[获取序列化TsBlock] TsBlock:{}";
+ public static final String START_ABORT_LOCAL_SOURCE_HANDLE =
+ "[开始中止LocalSourceHandle]";
+ public static final String END_ABORT_LOCAL_SOURCE_HANDLE =
+ "[结束中止LocalSourceHandle]";
+ public static final String START_CLOSE_LOCAL_SOURCE_HANDLE =
+ "[开始关闭LocalSourceHandle]";
+ public static final String END_CLOSE_LOCAL_SOURCE_HANDLE =
+ "[结束关闭LocalSourceHandle]";
+ public static final String START_SET_NO_MORE_TSBLOCKS =
+ "[开始设置无更多TsBlock]";
+ public static final String START_ABORT_SINK_CHANNEL =
+ "[开始中止SinkChannel]";
+ public static final String END_ABORT_SINK_CHANNEL =
+ "[结束中止SinkChannel]";
+ public static final String START_CLOSE_SINK_CHANNEL =
+ "[开始关闭SinkChannel]";
+ public static final String END_CLOSE_SINK_CHANNEL =
+ "[结束关闭SinkChannel]";
+ public static final String ACK_TSBLOCK =
+ "[确认TsBlock] {}.";
+ public static final String NOTIFY_NO_MORE_TSBLOCK =
+ "[通知无更多TsBlock]";
+ public static final String START_SEND_TSBLOCK_ON_LOCAL =
+ "[开始在本地发送TsBlock]";
+ public static final String START_SET_NO_MORE_TSBLOCKS_ON_LOCAL =
+ "[开始在本地设置无更多TsBlock]";
+ public static final String END_SET_NO_MORE_TSBLOCKS_ON_LOCAL =
+ "[结束在本地设置无更多TsBlock]";
+ public static final String START_ABORT_LOCAL_SINK_CHANNEL =
+ "[开始中止LocalSinkChannel]";
+ public static final String END_ABORT_LOCAL_SINK_CHANNEL =
+ "[结束中止LocalSinkChannel]";
+ public static final String START_CLOSE_LOCAL_SINK_CHANNEL =
+ "[开始关闭LocalSinkChannel]";
+ public static final String END_CLOSE_LOCAL_SINK_CHANNEL =
+ "[结束关闭LocalSinkChannel]";
+ public static final String GET_TSBLOCK_FROM_BUFFER =
+ "[从缓冲区获取TsBlock] sequenceId:{}, size:{}";
+ public static final String WAIT_FOR_MORE_TSBLOCK =
+ "[等待更多TsBlock]";
+ public static final String RECEIVE_NO_MORE_TSBLOCK_EVENT =
+ "[收到无更多TsBlock事件]";
+ public static final String END_PULL_TSBLOCKS_FROM_REMOTE =
+ "[结束从远端拉取TsBlock] 数量:{}";
+ public static final String PUT_TSBLOCKS_INTO_BUFFER =
+ "[将TsBlock放入缓冲区]";
+ public static final String SEND_ACK_TSBLOCK =
+ "[发送确认TsBlock] [{}, {}).";
+ public static final String START_ABORT_SHUFFLE_SINK_HANDLE =
+ "[开始中止ShuffleSinkHandle]";
+ public static final String END_ABORT_SHUFFLE_SINK_HANDLE =
+ "[结束中止ShuffleSinkHandle]";
+ public static final String START_CLOSE_SHUFFLE_SINK_HANDLE =
+ "[开始关闭ShuffleSinkHandle]";
+ public static final String END_CLOSE_SHUFFLE_SINK_HANDLE =
+ "[结束关闭ShuffleSinkHandle]";
+ public static final String SIGNAL_NO_MORE_TSBLOCK_ON_QUEUE =
+ "[队列信号无更多TsBlock]";
+ public static final String QUEUE_DESTROYED_WHEN_SET_NO_MORE_TSBLOCKS =
+ "调用 setNoMoreTsBlocks 时队列已被销毁。";
+ public static final String ADD_TSBLOCK =
+ "[添加TsBlock] TsBlock:{}";
+
+ // --- Plan (additional debug) ---
+
+ public static final String QUERY_START_SQL =
+ "[查询开始] sql: {}";
+ public static final String CLEAN_UP_QUERY =
+ "[清理查询]";
+ public static final String RELEASE_QUERY_RESOURCE_STATE =
+ "[释放查询资源] 状态为: {}";
+ public static final String SKIP_EXECUTE =
+ "[跳过执行]";
+ public static final String SKIP_EXECUTE_AFTER_LOGICAL_PLAN =
+ "[逻辑计划后跳过执行]";
+ public static final String RESULT_HANDLE_FINISHED =
+ "[结果句柄已完成]";
+
+ // --- Execution / Operator / Source (additional debug) ---
+
+ public static final String SERIES_SCAN_UTIL_PAGE_READER_IS_MODIFIED =
+ "[SeriesScanUtil] pageReader.isModified() 为 {}";
+ public static final String GET_ALL_SATISFIED_PAGE_DATA_TSBLOCK =
+ "[getAllSatisfiedPageData] TsBlock:{}";
+
+ // --- Plan / Relational / Metadata (additional debug) ---
+
+ public static final String DEVICES_ARE_MISSING =
+ "{} 个设备缺失";
+
+ // --- Execution / Fragment (additional debug) ---
+
+ public static final String STATE_CHANGED_TO =
+ "[状态变更] 变更为 {}";
+ public static final String ENTER_THE_STATE_CHANGE_LISTENER =
+ "进入状态变更监听器";
+
+ // --- Execution / Fragment (additional) ---
+
+ public static final String ERRORS_RELEASING_SINK =
+ "尝试释放 Sink 时发生错误,可能导致资源泄漏。";
+ public static final String ERRORS_DELETING_TMP_FILES =
+ "尝试删除临时文件时发生错误,可能导致资源泄漏。";
+ public static final String ERRORS_DEREGISTER_FI_FROM_MEMORY_POOL =
+ "尝试从内存池注销分片实例时发生错误,可能导致资源泄漏,状态为 {}。";
+ public static final String ERRORS_RELEASING_MEMORY =
+ "尝试释放内存时发生错误,可能导致资源泄漏。";
+ public static final String ERRORS_FINISHING_FI_PROCESS =
+ "尝试完成分片实例流程时发生错误,可能导致资源泄漏。";
+
+ // --- Plan (additional) ---
+
+ public static final String CLEANING_UP_STALE_QUERY =
+ "正在清理过期查询,ID: {},已运行 {} 毫秒,超时时间:{} 毫秒";
+
+ // --- Plan / Tree Planner (additional) ---
+
+ public static final String ERROR_WHEN_READ_OBJECT_FILE =
+ "读取对象文件 {} 时出错。";
+
+ // --- Additional Edge Cases ---
+
+ public static final String JOIN_TYPE_IS_NOT_SUPPORTED =
+ " Join 类型不受支持";
+ public static final String COLON_S_VS_S =
+ ":%s 与 %s";
+ public static final String S_IS_NULL =
+ "%s 为 null";
+ public static final String IS_TOO_LARGE_STACK_OVERFLOW_WHILE_PARSING =
+ " 过大(解析时发生栈溢出)";
+
+ public static final String ENTER_STATE_CHANGE_LISTENER = "进入状态变更监听器";
+
+ private DataNodeQueryMessages() {}
+}
diff --git a/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodeSchemaMessages.java b/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodeSchemaMessages.java
new file mode 100644
index 0000000000000..6d1d7c190ac91
--- /dev/null
+++ b/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/DataNodeSchemaMessages.java
@@ -0,0 +1,599 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.i18n;
+
+public final class DataNodeSchemaMessages {
+
+ // ======================== SchemaEngine 相关消息 ========================
+
+ public static final String USED_SCHEMA_ENGINE_MODE = "使用的 schema 引擎模式:{}。";
+ public static final String SCHEMA_REGION_RECOVERY_ERROR = "SchemaRegion 恢复过程中发生异常";
+ public static final String CLEAR_SCHEMA_REGION_MAP = "已清空 schema region 映射表。";
+ public static final String FAILED_TO_UPDATE_SUBTREE_MEASUREMENT_COUNT =
+ "更新模板 {} 在 schemaRegion {} 中的子树测点计数失败";
+ public static final String RECOVER_SPEND = "恢复 [{}] 耗时:{} ms";
+ public static final String SCHEMA_REGION_FAILED_TO_RECOVER =
+ "StorageGroup [%s] 中的 SchemaRegion [%d] 恢复失败。";
+ public static final String SCHEMA_REGION_ALREADY_DELETED =
+ "SchemaRegion(id = {}) 已被删除,已跳过";
+ public static final String FAILED_TO_GET_TABLE_FOR_TIMESERIES_COUNT =
+ "计算时间序列数量时获取表 {}.{} 失败,可能是集群正在重启或表正在被删除。";
+ public static final String PEER_IS_SHUTTING_DOWN = "节点正在关闭中。";
+ public static final String SCHEMA_REGION_DUPLICATED =
+ "SchemaRegion [%s] 在 [%s] 和 [%s] 之间重复,前者已被恢复。";
+
+ // ======================== MemSchemaEngineStatistics 相关消息 ========================
+
+ public static final String CURRENT_SERIES_MEMORY_TOO_LARGE =
+ "当前时间序列内存 {} 过大...";
+ public static final String CURRENT_SERIES_MEMORY_BACK_TO_NORMAL =
+ "当前时间序列内存 {} 已恢复正常水平,总时间序列数量为 {}。";
+ public static final String WRONG_SCHEMA_ENGINE_STATISTICS_TYPE =
+ "SchemaEngineStatistics 类型错误";
+
+ // ======================== MemSchemaRegionStatistics 相关消息 ========================
+
+ public static final String WRONG_SCHEMA_REGION_STATISTICS_TYPE =
+ "SchemaRegionStatistics 类型错误";
+
+ // ======================== SchemaRegionUtils 相关消息 ========================
+
+ public static final String CANNOT_GET_FILES_IN_SCHEMA_REGION_DIR =
+ "无法获取 schema region 目录 %s 中的文件";
+ public static final String DELETE_SCHEMA_REGION_FILE = "删除 schema region 文件 {}";
+ public static final String DELETE_SCHEMA_REGION_FILE_FAILED =
+ "删除 schema region 文件 {} 失败。";
+ public static final String FAILED_TO_DELETE_SCHEMA_REGION_FILE =
+ "删除 schema region 文件 %s 失败";
+ public static final String DELETE_SCHEMA_REGION_FOLDER = "删除 schema region 目录 {}";
+ public static final String DELETE_SCHEMA_REGION_FOLDER_FAILED =
+ "删除 schema region 目录 {} 失败。";
+ public static final String FAILED_TO_DELETE_SCHEMA_REGION_FOLDER =
+ "删除 schema region 目录 %s 失败";
+ public static final String DELETE_DATABASE_SCHEMA_FOLDER = "删除数据库 schema 目录 {}";
+ public static final String DELETE_DATABASE_SCHEMA_FOLDER_FAILED =
+ "删除数据库 schema 目录 {} 失败";
+
+ // ======================== SchemaRegionLoader 相关消息 ========================
+
+ public static final String CLASS_NOT_SUBCLASS_OF_ISCHEMAREGION =
+ "类 %s 不是 ISchemaRegion 的子类。";
+ public static final String DUPLICATED_SCHEMA_REGION_IMPL =
+ "存在重复的 SchemaRegion 实现,{} 和 {} 使用了相同的模式名称 [{}]";
+ public static final String NO_SCHEMA_REGION_IMPL_WITH_TARGET_MODE =
+ "未找到目标模式 {} 的 SchemaRegion 实现,使用默认模式 {}";
+ public static final String SCHEMA_REGION_LOADER_INFO =
+ "[SchemaRegionLoader],schemaEngineMode:{},currentMode:{}";
+
+ // ======================== SchemaRegionPlanType 相关消息 ========================
+
+ public static final String UNRECOGNIZED_SCHEMA_REGION_PLAN_TYPE =
+ "无法识别的 SchemaRegionPlanType:";
+
+ // ======================== SchemaRegion 初始化/目录 相关消息 ========================
+
+ public static final String CREATE_DATABASE_SCHEMA_FOLDER = "创建数据库 schema 目录 {}";
+ public static final String CREATE_DATABASE_SCHEMA_FOLDER_FAILED =
+ "创建数据库 schema 目录 {} 失败。";
+ public static final String CREATE_SCHEMA_REGION_FOLDER = "创建 schema region 目录 {}";
+ public static final String CREATE_SCHEMA_REGION_FOLDER_FAILED =
+ "创建 schema region 目录 {} 失败。";
+ public static final String CANNOT_RECOVER_ALL_SCHEMA_INFO =
+ "无法从 {} 恢复所有 schema 信息,将尽可能恢复";
+ public static final String CANNOT_RECOVER_ALL_MTREE =
+ "无法从 {} 文件恢复所有 MTree,将尽可能恢复";
+
+ // ======================== SchemaRegion MLog 相关消息 ========================
+
+ public static final String CANNOT_FORCE_MLOG = "无法将 {} 的 mlog 强制写入 schema region";
+ public static final String SPEND_TIME_DESERIALIZE_MTREE =
+ "从 mlog.bin 反序列化 {} 的 mtree 耗时 {} ms";
+ public static final String FAILED_TO_PARSE_MLOG = "解析 ";
+ public static final String MLOG_BIN_SUFFIX = " mlog.bin 失败";
+ public static final String PARSE_MLOG_ERROR = "在行号 {} 处解析 mlog 出错:";
+ public static final String CANNOT_OPERATE_CMD = "无法执行命令 {},错误:";
+ public static final String MLOG_BIN_CORRUPTED =
+ "mlog.bin 文件已损坏,请删除或修复该文件,然后重启 IoTDB";
+ public static final String CANNOT_CLOSE_METADATA_LOG_WRITER =
+ "无法关闭元数据日志写入器:";
+ public static final String MLOG_RECOVERY_CHECK_POINT = "MLog 恢复检查点:{}";
+ public static final String CANNOT_GET_MLOG_CHECKPOINT =
+ "无法从 MLogDescription 文件获取检查点,原因:{},使用默认值 0。";
+ public static final String FAILED_TO_SKIP_MLOG = "从 {} 跳过 {} 失败";
+ public static final String UPDATE_MLOG_DESCRIPTION_FAILED = "更新 {} 失败,原因:{}";
+ public static final String DIRECT_BUFFER_MEMORY_EXCEEDED =
+ "直接缓冲区的总分配内存将达到 ";
+ public static final String DIRECT_BUFFER_MEMORY_LIMIT = ",超过内存限制:";
+
+ // ======================== SchemaRegion 快照相关消息 ========================
+
+ public static final String FAILED_TO_CREATE_SNAPSHOT_NOT_INITIALIZED =
+ "创建 schemaRegion {} 的快照失败,因为该 schemaRegion 尚未初始化。";
+ public static final String START_CREATE_SNAPSHOT = "开始创建 schemaRegion {} 的快照";
+ public static final String MTREE_SNAPSHOT_CREATION_COST =
+ "schemaRegion {} 的 MTree 快照创建耗时 {}ms。";
+ public static final String MTREE_SNAPSHOT_CREATION_COST_WITH_STATUS =
+ "schemaRegion {} 的 MTree 快照创建耗时 {}ms,状态:{}";
+ public static final String TAG_SNAPSHOT_CREATION_COST =
+ "schemaRegion {} 的 Tag 快照创建耗时 {}ms。";
+ public static final String TAG_SNAPSHOT_CREATION_COST_WITH_STATUS =
+ "schemaRegion {} 的 Tag 快照创建耗时 {}ms,状态:{}";
+ public static final String DEVICE_ATTR_SNAPSHOT_CREATION_COST =
+ "schemaRegion {} 的设备属性快照创建耗时 {}ms,状态:{}";
+ public static final String DEVICE_ATTR_UPDATER_SNAPSHOT_CREATION_COST =
+ "schemaRegion {} 的设备属性远程更新器快照创建耗时 {}ms,状态:{}";
+ public static final String SNAPSHOT_CREATION_COST =
+ "schemaRegion {} 的快照创建耗时 {}ms。";
+ public static final String SUCCESSFULLY_CREATE_SNAPSHOT =
+ "成功创建 schemaRegion {} 的快照";
+ public static final String START_LOADING_SNAPSHOT =
+ "开始加载 schemaRegion {} 的快照";
+ public static final String DEVICE_ATTR_SNAPSHOT_LOADING_COST =
+ "schemaRegion {} 的设备属性快照加载耗时 {}ms。";
+ public static final String DEVICE_ATTR_UPDATER_SNAPSHOT_LOADING_COST =
+ "schemaRegion {} 的设备属性远程更新器快照加载耗时 {}ms。";
+ public static final String TAG_SNAPSHOT_LOADING_COST =
+ "schemaRegion {} 的 Tag 快照加载耗时 {}ms。";
+ public static final String MTREE_SNAPSHOT_LOADING_COST =
+ "schemaRegion {} 的 MTree 快照加载耗时 {}ms。";
+ public static final String SNAPSHOT_LOADING_COST =
+ "schemaRegion {} 的快照加载耗时 {}ms。";
+ public static final String SUCCESSFULLY_LOAD_SNAPSHOT =
+ "成功加载 schemaRegion {} 的快照";
+ public static final String FAILED_TO_LOAD_SNAPSHOT =
+ "加载 schemaRegion {} 的快照失败,原因:{},将使用空的 schemaRegion";
+ public static final String ERROR_DURING_INIT_SCHEMA_REGION =
+ "初始化 schemaRegion {} 过程中发生错误";
+ public static final String FAILED_TO_RECOVER_TAG_INDEX =
+ "在 schemaRegion {} 中恢复 {} 的 tagIndex 失败。";
+ public static final String FAILED_TO_READ_TAG_ATTRIBUTE =
+ "读取 tag 和 attribute 信息失败:{}";
+
+ // ======================== DeviceAttributeStore 相关消息 ========================
+
+ public static final String FAILED_TO_DELETE_OLD_SNAPSHOT_DEVICE_ATTR =
+ "创建设备属性快照时删除旧快照 {} 失败。";
+ public static final String FAILED_TO_RENAME_SNAPSHOT_DEVICE_ATTR =
+ "创建设备属性快照时将 {} 重命名为 {} 失败。";
+ public static final String FAILED_TO_CREATE_DEVICE_ATTR_SNAPSHOT =
+ "创建设备属性快照失败:{}";
+ public static final String DEVICE_ATTR_SNAPSHOT_NOT_FOUND =
+ "未找到设备属性快照 {},视为从旧版本升级,使用空属性";
+ public static final String LOAD_DEVICE_ATTR_SNAPSHOT_FAILED =
+ "从 {} 加载设备属性快照失败";
+
+ // ======================== DeviceAttributeCacheUpdater 相关消息 ========================
+
+ public static final String FAILED_TO_DELETE_OLD_SNAPSHOT_UPDATER =
+ "创建设备属性远程更新器快照时删除旧快照 {} 失败。";
+ public static final String FAILED_TO_RENAME_SNAPSHOT_UPDATER =
+ "创建设备属性远程更新器快照时将 {} 重命名为 {} 失败。";
+ public static final String FAILED_TO_CREATE_UPDATER_SNAPSHOT =
+ "创建设备属性远程更新器快照失败:{}";
+ public static final String UPDATER_SNAPSHOT_NOT_FOUND =
+ "未找到设备属性远程更新器快照 {},视为从旧版本升级,将不更新远程节点";
+ public static final String LOAD_UPDATER_SNAPSHOT_FAILED =
+ "从 {} 加载设备属性远程更新器快照失败,继续...";
+ public static final String REQUEST_MEMORY_SIZE_NEGATIVE =
+ "requestMemory 大小不能为负数";
+ public static final String RELEASE_MEMORY_SIZE_NEGATIVE =
+ "releaseMemory 大小不能为负数";
+
+ // ======================== MetaFormatUtils 相关消息 ========================
+
+ public static final String ILLEGAL_NAME = "%s 是不合法的名称。";
+ public static final String NAME_CONTAINS_UNSUPPORTED_CHAR =
+ "名称 %s 包含不支持的字符。";
+ public static final String DATABASE_NAME_ILLEGAL_CHARS =
+ "数据库名称只能包含中英文字符、数字、反引号和下划线。%s";
+ public static final String SDT_COMPRESSION_DEVIATION_REQUIRED =
+ "SDT 压缩偏差为必填项";
+ public static final String SDT_COMPRESSION_DEVIATION_NEGATIVE =
+ "SDT 压缩偏差不能为负数";
+ public static final String SDT_COMPRESSION_DEVIATION_FORMAT_ERROR =
+ "SDT 压缩偏差格式错误";
+ public static final String SDT_COMPRESSION_MAX_GREATER_THAN_MIN =
+ "SDT 压缩最大时间必须大于最小时间";
+ public static final String SDT_COMPRESSION_TIME_NEGATIVE =
+ "SDT 压缩 %s 时间不能为负数";
+ public static final String SDT_COMPRESSION_TIME_FORMAT_ERROR =
+ "SDT 压缩 %s 时间格式错误";
+ public static final String SDT_ENABLED_NO_COMPRESSION_TIME =
+ "{} 已启用 SDT 但未设置压缩 {} 时间";
+
+ // ======================== Tag/Attribute 相关消息 ========================
+
+ public static final String TIMESERIES_NO_TAG_ATTRIBUTE =
+ "时间序列 [%s] 没有任何 tag/attribute。";
+ public static final String TIMESERIES_NO_SPECIFIC_TAG_ATTRIBUTE =
+ "时间序列 [%s] 没有 [%s] tag/attribute。";
+ public static final String TIMESERIES_ALREADY_HAS_ATTRIBUTE =
+ "时间序列 [%s] 已有 attribute [%s]。";
+ public static final String TIMESERIES_ALREADY_HAS_TAG =
+ "时间序列 [%s] 已有 tag [%s]。";
+ public static final String TIMESERIES_NO_TAG_ATTRIBUTE_LOG =
+ "时间序列 [{}] 没有 tag/attribute [{}]";
+ public static final String TIMESERIES_NO_SPECIFIC_TAG_ATTRIBUTE_FMT =
+ "时间序列 [%s] 没有 tag/attribute [%s]。";
+
+ // ======================== TagManager 快照相关消息 ========================
+
+ public static final String FAILED_TO_DELETE_OLD_TAG_SNAPSHOT =
+ "创建 tagManager 快照时删除旧快照 {} 失败。";
+ public static final String FAILED_TO_RENAME_TAG_SNAPSHOT =
+ "创建 tagManager 快照时将 {} 重命名为 {} 失败。";
+ public static final String FAILED_TO_DELETE_AFTER_RENAME_FAILURE =
+ "重命名失败后删除 {} 失败。";
+ public static final String FAILED_TO_CREATE_TAG_SNAPSHOT =
+ "创建 tagManager 快照失败:{}";
+ public static final String FAILED_TO_DELETE_AFTER_TAG_SNAPSHOT_FAILURE =
+ "创建 tagManager 快照失败后删除 {} 失败。";
+ public static final String FAILED_TO_DELETE_FILE = "删除 {} 失败。";
+ public static final String FAILED_TO_DELETE_EXISTING_WHEN_LOADING =
+ "加载快照时删除已有的 {} 失败。";
+ public static final String FAILED_TO_DELETE_EXISTING_WHEN_COPY_FAILURE =
+ "复制快照失败时删除已有的 {} 失败。";
+
+ // ======================== TagLogFile 相关消息 ========================
+
+ public static final String CREATE_SCHEMA_FOLDER = "创建 schema 目录 {}。";
+ public static final String CREATE_SCHEMA_FOLDER_FAILED = "创建 schema 目录 {} 失败。";
+
+ // ======================== MemMTreeSnapshotUtil 相关消息 ========================
+
+ public static final String FAILED_TO_DELETE_OLD_MTREE_SNAPSHOT =
+ "创建 mTree 快照时删除旧快照 {} 失败。";
+ public static final String FAILED_TO_RENAME_MTREE_SNAPSHOT =
+ "创建 mTree 快照时将 {} 重命名为 {} 失败。";
+ public static final String FAILED_TO_CREATE_MTREE_SNAPSHOT =
+ "创建 mTree 快照失败:{}";
+ public static final String SERIALIZE_ERROR_INFO =
+ "序列化 MemMTree 过程中发生错误。";
+ public static final String UNRECOGNIZED_MNODE_TYPE = "无法识别的 MNode 类型 ";
+
+ // ======================== View 相关消息 ========================
+
+ public static final String IS_NO_VIEW = "[%s] 不是视图。";
+ public static final String VIEW_NOT_SUPPORTED = "不支持视图。";
+ public static final String VIEW_DOES_NOT_SUPPORT_ALIAS = "视图不支持别名";
+ public static final String CANNOT_CONSTRUCT_ABSTRACT_CLASS =
+ "无法构造抽象类。";
+
+ // ======================== PBTree 相关消息 ========================
+
+ public static final String TABLE_MODEL_NOT_SUPPORT_PBTREE =
+ "表模型尚不支持 PBTree。";
+ public static final String PBTREE_NOT_SUPPORT_ALTER_ENCODING =
+ "PBTree 尚不支持修改编码和压缩方式。";
+ public static final String NOT_IMPLEMENTED = "尚未实现";
+ public static final String PBTREE_FILE_OVERWRITTEN =
+ "PBTree 文件 [{}] 已存在,将被覆盖。";
+ public static final String SCHEMA_FILE_WRONG_VERSION =
+ "SchemaFile 版本错误,请检查或升级。";
+ public static final String NODE_NO_CHILD_IN_PBTREE =
+ "节点 [%s] 在 pbtree 文件中没有子节点。";
+ public static final String SCHEMA_FILE_INSPECTED = "SchemaFile[%s] 已被检查。";
+ public static final String FAILED_TO_CREATE_SCHEMA_FILE_SNAPSHOT =
+ "创建 SchemaFile 快照失败:{}";
+ public static final String FAILED_TO_DELETE_OLD_PBTREE_SNAPSHOT =
+ "创建 pbtree 文件快照时删除旧快照 {} 失败。";
+
+ // ======================== PBTree Segment/Page 相关消息 ========================
+
+ public static final String FAILED_TO_INSERT_RELOCATED_SEGMENT =
+ "向重定位的 segment 插入缓冲区失败";
+ public static final String FAILED_TO_UPDATE_RELOCATED_SEGMENT =
+ "在重定位的 segment 上更新缓冲区失败";
+ public static final String ALIAS_INDEX_PAGE_EXTEND_CAPACITY =
+ "AliasIndexPage 只能扩展到相同容量的缓冲区。";
+ public static final String SEGMENTS_SPLIT_SAME_CAPACITY =
+ "Segment 只能以相同容量进行拆分。";
+ public static final String SEGMENT_SPLIT_NO_RECORDS =
+ "没有记录的 Segment 无法拆分。";
+ public static final String SEGMENT_SPLIT_ONLY_ONE_RECORD =
+ "只有一条记录的 Segment 无法拆分。";
+ public static final String INTERNAL_PAGE_EXTEND_CAPACITY =
+ "InternalPage 只能扩展到相同容量的缓冲区。";
+ public static final String INTERNAL_SEGMENT_SPLIT_NO_KEY =
+ "内部 Segment 没有插入键时无法拆分";
+ public static final String INTERNAL_SEGMENT_LESS_THAN_2_POINTERS =
+ "指针数少于 2 的 Segment 无法拆分。";
+ public static final String LEAF_SEGMENT_EXTEND_SMALLER =
+ "叶子 Segment 无法扩展到更小的缓冲区。";
+ public static final String RECORD_CONFLICT_NAME_WITH_ALIAS =
+ "记录 [%s] 的名称与其兄弟节点的别名冲突。";
+ public static final String RECORD_CONFLICT_ALIAS =
+ "记录 [%s] 的别名 [%s] 与其兄弟节点冲突。";
+ public static final String RECORD_NOT_EXISTED = "记录[key:%s] 不存在。";
+ public static final String SEGMENT_CACHE_MAP_INCONSISTENT =
+ "页面 %d 中的 Segment 缓存映射与 Segment 列表不一致。";
+ public static final String UNRECOGNIZED_NODE_TYPE = "无法识别的节点类型:";
+
+ // ======================== PBTree PageManager 相关消息 ========================
+
+ public static final String CHILD_SHALL_NOT_HAVE_SEGMENT_ADDRESS =
+ "newChildBuffer 中的子节点不应有 segmentAddress。";
+ public static final String PAGE_INDEX_OUT_OF_RANGE = "页面索引 %d 超出范围。";
+ public static final String ROOT_PAGE_SHALL_NOT_BE_MIGRATED =
+ "根页面不应被迁移。";
+ public static final String SUBORDINATE_INDEX_NOT_ON_SINGLE_PAGE =
+ "不应在单页面 segment 上构建从属索引。";
+ public static final String SUBORDINATE_INDEX_BROKEN =
+ "文件可能已损坏,从属索引已断裂。";
+ public static final String DUPLICATE_PAGE_INSTANCES =
+ "存在索引相同的重复页面实例:{}";
+ public static final String PAGE_LOCKED_TIMES = "页面 [{}] 已被锁定 {} 次。";
+ public static final String REENTRANT_WRITE_LOCKS_DETAIL =
+ "页面 {} 上存在可重入写锁,内容详情:{}";
+ public static final String REENTRANT_WRITE_LOCKS = "页面 {} 上存在可重入写锁";
+
+ // ======================== PBTree Flush 相关消息 ========================
+
+ public static final String IO_EXCEPTION_UPDATING_SG_MNODE =
+ "更新 StorageGroupMNode {} 时发生 IO 异常";
+ public static final String ERROR_DURING_MTREE_FLUSH =
+ "MTree 刷写过程中发生错误,当前节点为 {}";
+
+ // ======================== PBTree ReleaseFlushMonitor 相关消息 ========================
+
+ public static final String RELEASE_TASK_MONITOR_INTERRUPTED =
+ "ReleaseTaskMonitor 线程被中断。";
+ public static final String RELEASE_FLUSH_TASK_TIMEOUT =
+ "释放任务和刷写任务未在 {} 毫秒内完成,已中断。";
+
+ // ======================== PBTree PagePool 相关消息 ========================
+
+ public static final String PAGE_CACHE_EVICTION_INTERRUPTED =
+ "页面缓存淘汰过程中被中断,请考虑增加缓存大小、降低并发或延长超时时间";
+
+ // ======================== ReadOnly MTreeStore 相关消息 ========================
+
+ public static final String READ_ONLY_REENTRANT_MTREE_STORE = "ReadOnlyReentrantMTreeStore";
+
+ // ======================== MNode 相关消息 ========================
+
+ public static final String WRONG_MNODE_TYPE = "错误的 MNode 类型";
+ public static final String WRONG_NODE_TYPE = "错误的节点类型";
+ public static final String SHOULD_CALL_EXACT_SUB_CLASS = "应调用具体的子类!";
+ public static final String VIEW_TABLE_NOT_ALLOWED = "不允许使用视图表。";
+ public static final String TABLE_DEVICE_NOT_UNDER_TREE_MODEL =
+ "不应在树模型下创建表设备";
+ public static final String NO_SATISFIED_MNODE_FACTORY = "未找到满足条件的 MNodeFactory";
+
+ // ======================== Logfile 相关消息 ========================
+
+ public static final String READ_LOG_LENGTH_NEGATIVE = "读取的日志长度 %s 为负数。";
+ public static final String PLAN_NOT_SUPPORT_DESERIALIZATION =
+ "%s 计划不支持反序列化。";
+ public static final String PLAN_NOT_SUPPORT_SERIALIZATION =
+ "%s 计划不支持序列化。";
+ public static final String SCHEMA_FILE_LOG_INCOMPLETE_ENTRY = "不完整的日志条目。";
+
+ // ======================== Template 相关消息 ========================
+
+ public static final String UNKNOWN_TEMPLATE_UPDATE_OPERATION_TYPE =
+ "未知的模板更新操作类型";
+
+ // ======================== InformationSchema 相关消息 ========================
+
+ public static final String SYSTEM_VIEW_NOT_SUPPORT_SHOW_CREATE =
+ "系统视图不支持 show create。";
+
+ // ======================== 附加 SchemaRegion 相关消息 ========================
+
+ public static final String SCHEMA_REGION_PLAN_NOT_SUPPORT_EMPTY =
+ "类型为 %s 的 SchemaRegionPlan 不支持创建空计划。";
+ public static final String SCHEMA_REGION_PLAN_NOT_SUPPORT_RECOVER_MEMORY =
+ "类型为 %s 的 SchemaRegionPlan 不支持在 SchemaRegionMemoryImpl 中执行恢复操作。";
+ public static final String SCHEMA_REGION_PLAN_NOT_SUPPORT_RECOVER_PBTREE =
+ "类型为 %s 的 SchemaRegionPlan 不支持在 SchemaRegionPBTreeImpl 中执行恢复操作。";
+ public static final String PBTREE_NOT_SUPPORT_ALTER_DATA_TYPE =
+ "PBTree 尚不支持修改时间序列数据类型。";
+
+ // ======================== 附加 MTree 相关消息 ========================
+
+ public static final String DEVICE_NUM_UPPER_LIMIT =
+ "设备数量已达到上限";
+ public static final String TIMESERIES_TYPE_NOT_COMPATIBLE =
+ "时间序列 %s 使用的新类型 %s 与已有类型 %s 不兼容";
+ public static final String ALIAS_DUPLICATED =
+ "别名与其他测量的名称或别名重复,别名:";
+ public static final String LOGICAL_VIEW_NODE_TYPE_ERROR =
+ "newMNode 的类型不是 LogicalViewMNode!实际类型为 ";
+ public static final String TEMPLATE_SHOULD_MOUNTED_ON_ANCESTOR =
+ "使用模板的节点 [%s] 的祖先节点上应挂载模板。";
+ public static final String DESCENDANT_SHOULD_NOT_EXIST =
+ "节点 %s 下不应存在后代节点";
+
+ // ======================== 附加 SchemaFile/Page 相关消息 ========================
+
+ public static final String ADDING_CHILDREN_UNDER_TEMPLATE_NOT_ALLOWED =
+ "不允许添加或更新使用模板 [%s] 的设备的子节点。";
+ public static final String CANNOT_FLUSH_NODE_NEGATIVE_ADDRESS =
+ "除 DatabaseNode 外,不能刷写地址为负 [%s] 的节点。";
+ public static final String SEGMENTED_PAGE_SHARE_BUFFER =
+ "SegmentedPage 仅在包含一个最大尺寸 segment 时才能共享整个缓冲区切片。";
+ public static final String BYTEBUFFER_CORRUPTED_FOR_SCHEMA_PAGE =
+ "ByteBuffer 已损坏或位置设置错误,无法加载为 SchemaPage。";
+ public static final String NODE_NO_CHILD_IN_PBTREE_WITH_NAME =
+ "节点 [%s] 在 pbtree 文件中没有子节点 [%s]。";
+ public static final String SINGLE_RECORD_TOO_LARGE =
+ "SchemaFile 目前不支持超过半页大小的单条记录。";
+ public static final String PAGE_REPLACEMENT_ERROR =
+ "页面 [%d] 替换错误:引用计数或锁对象不一致。";
+ public static final String NODE_NO_VALID_SEGMENT_ADDRESS =
+ "节点 [%s] 在 pbtree 文件中没有有效的 segment 地址。";
+
+ // ======================== 附加 SchemaFileLog 相关消息 ========================
+
+ public static final String COMMIT_MARK_WITHOUT_PREPARE = "存在 COMMIT_MARK 但缺少 PREPARE_MARK";
+ public static final String EXTRANEOUS_BYTE_AFTER_PREPARE =
+ "PREPARE_MARK 之后出现了非 COMMIT_MARK 的多余字节";
+ public static final String NOT_ENDED_BY_MARK =
+ "未以 COMMIT_MARK 或 PREPARE_MARK 结尾。";
+
+ // ======================== 附加 MNodeContainer 相关消息 ========================
+
+ public static final String DUPLICATE_NODE_IN_BUFFERS =
+ "不应在 newChildBuffer 和 updateChildBuffer 中分别存在同名的两个节点";
+
+ // ======================== 附加 Logfile 相关消息 ========================
+
+ public static final String FAILED_TO_CREATE_FILE_ALREADY_EXISTS =
+ "创建文件 %s 失败,因为同名文件已存在";
+
+ // ======================== 附加 View 相关消息 ========================
+
+ public static final String VISIT_EXPRESSION_NOT_SUPPORTED =
+ "TransformToExpressionVisitor 中不支持 visitExpression。";
+
+ // ======================== 附加 Tag 相关消息 ========================
+
+ public static final String BYTEBUFFER_SMALLER_THAN_TAG_SIZE =
+ "ByteBuffer 容量小于 tagAttributeTotalSize,不允许此操作。";
+ public static final String TIMESERIES_ALREADY_HAS_TAG_ATTRIBUTE_NAMED =
+ "时间序列 [%s] 已有名为 [%s] 的 tag/attribute。";
+
+ // ======================== 附加 Template 相关消息 ========================
+
+ public static final String FAILED_TO_CREATE_TEMPLATE =
+ "在 ConfigNode 中执行创建设备模板 {} 失败,状态为 {}。";
+ public static final String CREATE_TEMPLATE_ERROR_PREFIX = "创建模板出错 - ";
+ public static final String CREATE_TEMPLATE_ERROR = "创建模板出错。";
+ public static final String GET_ALL_TEMPLATE_ERROR = "获取所有模板出错。";
+ public static final String GET_TEMPLATE_INFO_ERROR = "获取模板信息出错。";
+ public static final String FAILED_TO_SET_TEMPLATE =
+ "在 ConfigNode 中执行设置设备模板 {} 到路径 {} 失败,状态为 {}。";
+
+ // ======================== 附加 InformationSchema 相关消息 ========================
+
+ public static final String INFORMATION_SCHEMA_READ_ONLY =
+ "数据库 'information_schema' 仅支持查询操作";
+
+ // ======================== 附加 GRASS/Updater 相关消息 ========================
+
+ public static final String FAILED_TO_WRITE_ATTR_COMMIT =
+ "向 region {} 写入属性提交消息失败。";
+ public static final String FAILED_TO_FETCH_DATANODE_LOCATIONS =
+ "获取 DataNode 位置信息失败,将重试。";
+
+ // ======================== 附加 ResourceByPathUtils 相关消息 ========================
+
+ public static final String FAILED_TO_RESERVE_MEMORY_TVLIST =
+ "为 TVList 预留内存失败:ramSize {},timestampsSize {},arrayMemCost {},rowCount {},dataTypes {}";
+
+ // ======================== 附加 CachedMTreeStore 相关消息 ========================
+
+ public static final String ERROR_DURING_PBTREE_CLEAR =
+ "PBTree 清理过程中发生错误:{}";
+ public static final String ERROR_DURING_MTREE_FLUSH_SCHEMA_REGION =
+ "MTree 刷写过程中发生错误,当前 SchemaRegionId 为 {}";
+ public static final String ERROR_DURING_MTREE_FLUSH_SCHEMA_REGION_BECAUSE =
+ "MTree 刷写过程中发生错误,当前 SchemaRegionId 为 {},原因:{}";
+
+ // ======================== 附加 MemMTreeSnapshotUtil 相关消息 ========================
+
+ public static final String DESERIALIZE_ERROR_INFO =
+ "反序列化 MemMTree 过程中发生错误。";
+
+ // ======================== 附加 MetaUtils 相关消息 ========================
+
+ public static final String PATH_NO_LONGER_THAN_SG_LEVEL =
+ "路径长度不超过默认 sg 层级:";
+ public static final String PATH_DOES_NOT_START_WITH_ROOT = "路径不以 ";
+
+ // ======================== FakeCRC32Deserializer 相关消息 ========================
+
+ public static final String READ_LOG_LENGTH_NEGATIVE_LOG =
+ "读取的日志长度 {} 为负数。";
+
+ // ======================== SchemaLogReader 相关消息 ========================
+
+ public static final String FILE_CORRUPTED =
+ "文件 {} 已损坏,未损坏的大小为 {}。";
+ public static final String LOG_FILE_END_CORRUPTED_TRUNCATE =
+ "日志文件 {} 的末尾已损坏,开始截断。未损坏的大小为 {},文件大小为 {}。";
+ public static final String FAIL_TO_TRUNCATE_LOG_FILE =
+ "截断日志文件到大小 {} 失败";
+
+ // ======================== SchemaRegionPlanDeserializer 相关消息 ========================
+
+ public static final String CANNOT_DESERIALIZE_SCHEMA_REGION_PLAN =
+ "无法从缓冲区反序列化 SchemaRegionPlan";
+
+ // ======================== MTreeBelowSGMemoryImpl 相关消息 ========================
+
+ public static final String TIMESERIES_NUM_UPPER_LIMIT =
+ "时间序列数量已达到上限";
+ public static final String ALIAS_DUPLICATED_DETAIL =
+ ",完整路径:";
+ public static final String ALIAS_DUPLICATED_OTHER_MEASUREMENT =
+ ",其他测量:";
+ public static final String START_CREATE_TABLE_DEVICE =
+ "开始创建表设备 {}.{}";
+ public static final String TABLE_DEVICE_ALREADY_EXISTS =
+ "表设备 {}.{} 已存在";
+ public static final String TABLE_DEVICE_CREATED =
+ "表设备 {}.{} 已创建";
+
+ // ======================== CachedMTreeStore / Scheduler 相关消息 ========================
+
+ public static final String MTREE_FLUSH_COST =
+ "在 SchemaRegion {} 中刷写 MTree 耗时 {}ms";
+
+ // ======================== DataNodeTableCache 相关消息 ========================
+
+ public static final String INIT_TABLE_CACHE_SUCCESS =
+ "DataNodeTableCache 初始化成功";
+ public static final String PRE_UPDATE_TABLE_SUCCESS =
+ "预更新表 {}.{} 成功";
+ public static final String PRE_RENAME_OLD_TABLE_SUCCESS =
+ "预重命名旧表 {}.{} 成功";
+ public static final String ROLLBACK_UPDATE_TABLE_SUCCESS =
+ "回滚更新表 {}.{} 成功";
+ public static final String ROLLBACK_RENAME_OLD_TABLE_SUCCESS =
+ "回滚重命名旧表 {}.{} 成功。";
+ public static final String COMMIT_UPDATE_TABLE_SUCCESS_WITH_DETAIL =
+ "提交更新表 {}.{} 成功,{}";
+ public static final String COMMIT_UPDATE_TABLE_SUCCESS =
+ "提交更新表 {}.{} 成功。";
+ public static final String RENAME_OLD_TABLE_SUCCESS =
+ "重命名旧表 {}.{} 成功。";
+ public static final String INTERRUPTED_ACQUIRE_SEMAPHORE_GET_TABLES =
+ "尝试获取信号量以从 ConfigNode 获取表时被中断,已忽略。";
+ public static final String UPDATE_TABLE_BY_FETCH_WITH_DETAIL =
+ "通过表拉取更新表 {}.{},{}";
+ public static final String UPDATE_TABLE_BY_FETCH =
+ "通过表拉取更新表 {}.{}。";
+ public static final String COMPARE_TABLE_ADDED = "新增表:";
+ public static final String COMPARE_TABLE_REMOVED = "已移除表:";
+ public static final String COMPARE_TABLE_NAME = "表名:";
+ public static final String COMPARE_TABLE_REMOVED_PROPS = " 已移除属性:";
+ public static final String COMPARE_TABLE_ADDED_PROPS = " 新增属性:";
+ public static final String COMPARE_TABLE_REMOVED_COLUMNS = " 已移除列:";
+ public static final String COMPARE_TABLE_ADDED_COLUMNS = " 新增列:";
+ public static final String COMPARE_TABLE_NOT_MODIFIED = " 未修改";
+
+ // ======================== ClusterTemplateManager 相关消息 ========================
+
+ public static final String ILLEGAL_PATH_LOG = "非法路径 {}";
+
+ private DataNodeSchemaMessages() {}
+}
diff --git a/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/StorageEngineMessages.java b/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/StorageEngineMessages.java
new file mode 100644
index 0000000000000..1db60c47d7c9e
--- /dev/null
+++ b/iotdb-core/datanode/src/main/i18n/zh/org/apache/iotdb/db/i18n/StorageEngineMessages.java
@@ -0,0 +1,509 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.i18n;
+
+public final class StorageEngineMessages {
+
+ private StorageEngineMessages() {}
+
+ // ======================== StorageEngine ========================
+
+ public static final String FAIL_TO_RECOVER_WAL = "WAL 恢复失败。";
+ public static final String STORAGE_ENGINE_FAILED_TO_SET_UP = "存储引擎启动失败。";
+ public static final String SEQ_MEMTABLE_FLUSH_CHECK_THREAD_STARTED = "顺序 memtable 定时 flush 检查线程启动成功。";
+ public static final String UNSEQ_MEMTABLE_FLUSH_CHECK_THREAD_STARTED = "乱序 memtable 定时 flush 检查线程启动成功。";
+ public static final String STILL_NOT_EXIT_AFTER_30S = "{} 在 30 秒后仍未退出";
+ public static final String START_CLOSING_ALL_DB_PROCESSOR = "开始关闭所有数据库处理器";
+ public static final String START_FORCE_CLOSING_ALL_DB_PROCESSOR = "开始强制关闭所有数据库处理器";
+ public static final String SYSTEM_READ_ONLY_NO_MERGE = "当前系统为只读模式,不支持合并操作";
+ public static final String START_REPAIR_DATA = "开始修复数据";
+ public static final String STOP_REPAIR_DATA = "停止修复数据";
+ public static final String REMOVING_DATA_REGION = "正在移除 DataRegion {}";
+ public static final String FAILED_TO_DELETE_SNAPSHOT_DIR = "删除快照目录 {} 失败";
+ public static final String REMOVED_DATA_REGION = "已移除 DataRegion {}";
+ public static final String EXECUTE_LOAD_COMMAND_ERROR = "执行加载命令 {} 出错。";
+ public static final String START_REBOOTING_ALL_TIMED_SERVICE = "开始重启所有定时服务。";
+ public static final String STOP_ALL_TIMED_SERVICE_AND_RESTART = "所有定时服务已成功停止,正在重启。";
+ public static final String REBOOT_ALL_TIMED_SERVICE_SUCCESSFULLY = "所有定时服务重启成功";
+ public static final String FAILED_TO_DELETE = "删除失败: {} -> {}";
+ public static final String FAILED_TO_CHECK_OBJECT_FILES = "检查对象文件失败: {}";
+
+ // ======================== Buffer Cache ========================
+
+ public static final String BLOOM_FILTER_CACHE_SIZE = "BloomFilterCache 大小 = {}";
+ public static final String GET_BLOOM_FILTER_FROM_CACHE = "从缓存中获取布隆过滤器,文件路径: {}";
+ public static final String STOP_SERVICE = "{}: 正在停止 {}...";
+ public static final String CHUNK_CACHE_SIZE = "ChunkCache 大小 = {}";
+ public static final String GET_CHUNK_FROM_CACHE = "从缓存中获取 Chunk,键为: {}";
+ public static final String CACHE_MISS_IN_FILE = "缓存未命中: {}.{},文件: {}";
+ public static final String DEVICE_ALL_SENSORS = "设备: {},所有传感器: {}";
+ public static final String TS_METADATA_FILTERED_BY_BLOOM_FILTER = "时间序列元数据 {} 已被布隆过滤器过滤!";
+ public static final String FILE_NO_SUCH_TIME_SERIES = "文件中不包含该时间序列 {}。";
+
+ // ======================== Resource Control - Disk ========================
+
+ public static final String FAILED_TO_DEREGISTER_FILE_LOCK = "注销文件锁失败,原因: {}";
+ public static final String ALL_FOLDERS_FULL_CHANGE_TO_READ_ONLY = "所有目录已满,切换系统为只读模式。";
+ public static final String FAILED_TO_PROCESS_FOLDER = "处理目录失败 '";
+ public static final String FAIL_TO_GET_CANONICAL_PATH = "获取数据目录 {} 的规范路径失败";
+ public static final String ALL_DISKS_OF_TIER_FULL = "第 {} 层的所有磁盘已满。";
+ public static final String FOLDERS_RESET_SUCCESSFULLY = "目录重置成功,耗时 {} 毫秒。";
+ public static final String FOLDER_NOT_EXIST_CREATE_IT = "目录 {} 不存在,正在创建";
+ public static final String FAILED_TO_STATISTIC_SIZE = "统计 {} 的大小失败,原因";
+ public static final String DISK_SPACE_INSUFFICIENT_READ_ONLY = "磁盘空间不足,切换系统为只读模式";
+ public static final String CANNOT_CALC_OCCUPIED_SPACE = "无法计算路径 {} 的已用空间。";
+
+ // ======================== Resource Control - Memory ========================
+
+ public static final String WAITING_FOR_THREAD_POOL_SHUTDOWN = "正在等待 {} 线程池关闭。";
+ public static final String THREAD_POOL_NOT_EXIT_AFTER_MS = "{} 线程池在 {} 毫秒后仍未退出。";
+ public static final String INTERRUPTED_WAITING_THREAD_POOL_EXIT = "等待 {} 线程池退出时被中断。 ";
+ public static final String BUFFERED_ARRAY_SIZE_THRESHOLD = "BufferedArraySizeThreshold 为 {}";
+ public static final String CURRENT_SG_COST = "当前存储组内存开销为 {}";
+ public static final String FORCE_DEGRADE_TSFILE_RESOURCE = "强制降级 TsFile 资源 {}";
+ public static final String CANNOT_DEGRADE_TIME_INDEX_ALL_FILE_LEVEL = "无法继续降级时间索引,所有时间索引已为文件级别。";
+ public static final String DEGRADE_TSFILE_RESOURCE = "降级 TsFile 资源 {}";
+
+ // ======================== Resource Control - Quotas ========================
+
+ public static final String SPACE_QUOTA_RESTORE_SUCCEEDED = "空间配额限制恢复成功,限制: {}。";
+ public static final String SPACE_QUOTA_RESTORE_FAILED = "空间配额限制恢复失败,限制: {}。";
+ public static final String THROTTLE_QUOTA_RESTORED_SUCCESSFULLY = "流量配额限制恢复成功。 ";
+ public static final String THROTTLE_QUOTA_RESTORED_FAILED = "流量配额限制恢复失败。 ";
+ public static final String INVALID_STATEMENT_TYPE = "无效的语句类型: ";
+
+ // ======================== DataRegion ========================
+
+ public static final String CREATE_DB_SYSTEM_DIR_FAILED = "创建数据库系统目录 {} 失败";
+ public static final String CREATE_DATA_REGION_DIR_FAILED = "创建 DataRegion 目录 {} 失败";
+ public static final String IS_NOT_A_DIRECTORY = "{} 不是目录。";
+ public static final String FAIL_TO_CLOSE_TSFILE_WHEN_RECOVERING = "恢复过程中关闭 TsFile {} 失败";
+ public static final String FAIL_TO_RECOVER_SEALED_TSFILE_SKIP = "恢复已封闭的 TsFile {} 失败,跳过该文件。";
+ public static final String DATA_INCONSISTENT_NOT_TRIGGER_TWICE = "数据不一致异常不应被触发两次";
+ public static final String INSERT_TO_TSFILE_PROCESSOR_REJECTED = "写入 TsFileProcessor 被拒绝, {}";
+ public static final String INSERT_TO_TSFILE_PROCESSOR_ERROR = "写入 TsFileProcessor 出错 ";
+ public static final String IOEXCEPTION_CREATING_TSFILE_PROCESSOR_RETRY = "创建 TsFileProcessor 时遇到 IOException,正在重试";
+ public static final String CANNOT_CLOSE_TSFILE_RESOURCE = "无法关闭 TsFileResource {}";
+ public static final String CANNOT_REMOVE_MOD_FILE = "无法删除修改文件 {}";
+ public static final String FAIL_TO_DELETE_DATA_REGION_FOLDER = "删除 DataRegion 目录 {} 失败";
+ public static final String FAIL_TO_DELETE_DATA_REGION_OBJECT_FOLDER = "删除 DataRegion 对象目录 {} 失败";
+ public static final String FILES_WERE_CLOSED = "{} 个文件已关闭";
+ public static final String FAIL_TO_LOG_DELETE_TO_WAL = "写入删除日志到 WAL 失败。";
+ public static final String DELETION_EXECUTING_TABLE_DELETION = "[Deletion] 正在执行表删除 {}";
+ public static final String DELETION_UNSEALED_FILES_FOR = "[Deletion] {} 的未封闭文件: {}";
+ public static final String DELETION_SEALED_FILES_FOR = "[Deletion] {} 的已封闭文件: {}";
+ public static final String WRITING_NO_FILE_RELATED_DELETION_TO_WAL = "将无关文件的删除操作写入 WAL {}";
+ public static final String DELETION_SKIPPED_FILE_TIME = "[Deletion] {} 跳过 {},文件时间 {}";
+ public static final String EXPECT_IS_ACTUAL_IS = "期望值为 {},实际值为 {}";
+ public static final String DELETION_DOES_NOT_INVOLVE_ANY_FILE = "[Deletion] 删除操作 {} 不涉及任何文件";
+ public static final String FAIL_TO_WRITE_MOD_ENTRY_TO_FILES = "将修改条目 {} 写入文件失败";
+ public static final String REMOVE_TSFILE_DIRECTLY_WHEN_DELETE_DATA = "删除数据时直接移除 TsFile {}";
+ public static final String MEET_ERROR_IN_COMPACTION_SCHEDULE = "compaction 调度过程中遇到错误。";
+ public static final String MEET_ERROR_IN_TTL_CHECK = "TTL 检查过程中遇到错误。";
+ public static final String FAILED_TO_EXECUTE_OBJECT_TTL_CHECK = "执行对象 TTL 检查失败";
+ public static final String MEET_ERROR_IN_INSERTION_COMPACTION_SCHEDULE = "插入 compaction 调度过程中遇到错误。";
+ public static final String EXCEPTION_MOVE_NEW_TSFILE_IN_SETTLING = "在 settle 过程中移动新 TsFile 时发生异常";
+ public static final String TSFILE_LOADED_IN_UNSEQ_LIST = "TsFile {} 已成功加载到乱序列表中。";
+ public static final String CANNOT_CLOSE_LAST_READER_AFTER_LOAD = "加载 TsFile {} 后无法关闭上一个读取器";
+ public static final String FILE_ALREADY_LOADED_IN_UNSEQ_LIST = "文件 {} 已加载到乱序列表中";
+ public static final String CANNOT_DELETE_LOCAL_MOD_FILE = "无法删除本地修改文件 {}";
+ public static final String REMOVE_TSFILE_SUCCESSFULLY = "成功移除 TsFile {}。";
+ public static final String THREAD_INTERRUPTED_WAITING_COMPACTION = "等待 compaction 完成时线程被中断";
+ public static final String PARTIAL_FAILED_INSERTING_ROWS_ONE_DEVICE = "单设备部分行插入失败";
+ public static final String PARTIAL_FAILED_INSERTING_ROWS = "部分行插入失败";
+ public static final String REJECTED_INSERTING_MULTI_TABLETS = "多 tablet 插入被拒绝";
+ public static final String PARTIAL_FAILED_INSERTING_MULTI_TABLETS = "多 tablet 部分插入失败";
+ public static final String INTERRUPTED_WAITING_DATA_REGION_DELETED = "等待 DataRegion 删除时被中断。";
+ public static final String FAILED_TO_RENAME = "重命名 {} 为 {} 失败,";
+
+ // ======================== Compaction ========================
+
+ public static final String SELECTOR_NOT_FOR_INNER_SPACE = "此选择器不能用于选择内部空间任务";
+ public static final String SELECTOR_NOT_FOR_CROSS_SPACE = "此选择器不能用于选择跨空间任务";
+ public static final String SELECTOR_NOT_FOR_SETTLE = "此选择器不能用于选择 settle 任务";
+ public static final String UNSEQ_FILE_NO_OVERLAP_WITH_SEQ = "乱序文件 {} 与任何顺序文件都不重叠。";
+ public static final String CANNOT_SELECT_FILE_FOR_CROSS_COMPACTION = "{} 无法为跨空间 compaction 选择文件";
+ public static final String CURRENT_FILE_SIZE = "当前文件为 {},大小为 {}";
+ public static final String EXCEPTION_SELECTING_FILES = "选择文件时发生异常";
+ public static final String UNIMPLEMENTED = "未实现";
+ public static final String ILLEGAL_CROSS_COMPACTION_SELECTOR = "非法的跨空间 compaction 选择器 ";
+ public static final String ILLEGAL_COMPACTION_SELECTOR = "非法的 compaction 选择器 ";
+ public static final String COMPACTION_SCHEDULE_TASK_MANAGER_STARTED = "compaction 调度任务管理器已启动。";
+ public static final String WAITING_COMPACTION_SCHEDULE_POOL_SHUTDOWN = "正在等待 compaction 调度任务线程池关闭";
+ public static final String COMPACTION_SCHEDULE_MANAGER_WAIT_TO_STOP = "CompactionScheduleTaskManager 已等待 {} 秒以停止";
+ public static final String COMPACTION_SCHEDULE_TASK_MANAGER_STOPPED = "CompactionScheduleTaskManager 已停止";
+ public static final String REPAIR_FAILED_RENAME_PROGRESS_FILE = "[RepairTaskManager] 重命名修复数据进度文件失败";
+ public static final String REPAIR_SKIP_TASK_STOPPING = "[RepairTaskManager] 修复任务正在停止,跳过当前任务";
+ public static final String REPAIR_SCAN_TASK_CANCELLED = "[RepairScheduler] 扫描任务已取消";
+ public static final String REPAIR_ERROR_SCAN_TIME_PARTITION = "[RepairScheduler] 扫描时间分区文件时遇到错误";
+ public static final String COMPACTION_TASK_MANAGER_STARTED = "compaction 任务管理器已启动。";
+ public static final String WAITING_TASK_EXECUTION_POOL_SHUTDOWN = "正在等待任务执行线程池关闭";
+ public static final String WAITING_TASK_EXECUTION_POOL_SHUTDOWN_MS = "正在等待任务执行线程池关闭,超时 {} 毫秒";
+ public static final String INTERRUPTED_WAITING_ALL_TASK_FINISH = "等待所有任务完成时被中断";
+ public static final String ALL_COMPACTION_TASK_FINISH = "所有 compaction 任务已完成";
+ public static final String COMPACTION_MANAGER_WAIT_TO_STOP = "CompactionManager 已等待 {} 秒以停止";
+ public static final String COMPACTION_MANAGER_STOPPED = "CompactionManager 已停止";
+ public static final String COMPACTION_THREAD_POOL_CANNOT_CLOSE = "compaction 线程池在 {} 毫秒内无法关闭";
+ public static final String TIMEOUT_WAITING_TASK_FUTURE = "等待任务结果超时";
+ public static final String COMPACTION_THREAD_TERMINATES = "CompactionThread-{} 因中断而终止";
+ public static final String EXCEPTION_EXECUTING_COMPACTION_TASK = "执行 compaction 任务时发生异常。{}";
+ public static final String TIMEOUT_GET_COMPACTION_TASK_SUMMARY = "尝试获取 compaction 任务摘要超时";
+ public static final String TTL_CHECK_TASK_FAILED = "[TTLCheckTask-{}] 执行 TTL 检查失败";
+ public static final String ERROR_CREATING_SETTLE_LOG = "创建 settle 日志时出错,文件路径: {}";
+ public static final String WRITE_SETTLE_LOG_FAILED = "写入 settle 日志文件失败,日志文件: {}";
+ public static final String CLOSE_UPGRADE_LOG_FAILED = "关闭升级日志文件失败,日志文件: {}";
+ public static final String FIND_SETTLED_FILE = "找到 {} 的 settle 文件";
+ public static final String GENERATE_SETTLED_FILE = "为 {} 生成 settle 文件";
+ public static final String ALL_FILES_SETTLED_SUCCESSFULLY = "所有文件 settle 成功! ";
+ public static final String SUB_COMPACTION_TASK_MEET_ERRORS = "[Compaction] 子 compaction 任务遇到错误 ";
+ public static final String TASK_TYPE_NO_TMP_FILE_SUFFIX = "当前任务类型 {} 没有临时文件后缀。";
+ public static final String CANNOT_GET_MOD_FILE = "无法获取 {} 的修改文件";
+ public static final String COMPACTION_START_DELETE_REAL_FILE = "{} [Compaction] compaction 开始删除实际文件 ";
+ public static final String COMPACTION_START_DELETE_SOURCE_MODS = "{} [Compaction] 开始删除源文件的修改记录";
+ public static final String COMPACTION_DELETE_FILE = "[Compaction] 删除文件: {}";
+ public static final String FAILED_TO_READ_FILE_ATTRIBUTES = "读取文件属性失败: {}";
+ public static final String FAILED_TO_CHECK_TABLE_DIR = "检查表目录失败: {}";
+ public static final String REMOVE_OBJECT_FILE_SIZE = "移除对象文件 {},大小为 {}(字节)";
+ public static final String FAILED_TO_DELETE_EXPIRED_OBJECT_FILE = "删除过期对象文件失败: {}";
+ public static final String SHOULD_CALL_EXACT_SUB_CLASS = "应调用具体的子类!";
+ public static final String NO_NEXT_BLOCK = "没有下一个块";
+ public static final String METHOD_NOT_SUPPORTED_FAST_CROSS_WRITER = "FastCrossCompactionWriter 不支持此方法";
+ public static final String DEVICE_SHOULD_EXIST_IN_SEQ_FILE = "设备应存在于当前顺序文件中";
+ public static final String METHOD_NOT_SUPPORTED_FAST_INNER_WRITER = "FastInnerCompactionWriter 不支持此方法";
+ public static final String METHOD_NOT_SUPPORTED_READ_POINT_WRITER = "ReadPointInnerCompactionWriter 不支持此方法";
+ public static final String UNKNOWN_DATA_TYPE = "未知的数据类型 ";
+ public static final String FAILED_TO_DELETE_TARGET_FILE = "删除目标文件 %s 失败";
+ public static final String SOURCE_FILES_CANNOT_BE_DELETED = "源文件无法成功删除";
+ public static final String FAIL_TO_GET_TSFILE_NAME = "获取 {} 的 TsFile 名称失败";
+ public static final String ERROR_ESTIMATE_INNER_COMPACTION_MEMORY = "估算内部 compaction 内存时遇到错误";
+ public static final String CANNOT_RECOVER_INSERTION_CROSS_TASK = "无法恢复 InsertionCrossSpaceCompactionTask";
+ public static final String FAILED_TO_REPAIR_FILE = "修复文件 {} 失败";
+ public static final String FAILED_DELETE_FULLY_DIRTY_SOURCE = "删除完全脏的源文件失败。";
+ public static final String RECOVER_MODS_FILE_ERROR = "列出文件时恢复修改文件出错: {}";
+ public static final String UNKNOWN_COMPACTION_TASK_TYPE = "未知的 compaction 任务类型 {}";
+ public static final String RECOVER_COMPACTION_ERROR = "恢复 compaction 出错";
+ public static final String COMPACTION_RECOVER_FAILED = "{} [Compaction][Recover] 恢复 compaction 失败";
+ public static final String MEET_ERROR_WHEN_READ_TSFILE = "读取 TsFile {} 时遇到错误";
+ public static final String UNKNOWN_REPAIR_LOG_FORMAT = "未知的修复日志格式";
+ public static final String REPAIR_START_CHECK_TSFILE = "[RepairScheduler] 开始检查 TsFile: {}";
+ public static final String REPAIR_SKIPPED_BROKEN_FILE = "[RepairScheduler] {} 因损坏而被跳过";
+ public static final String REPAIR_FAILED_CREATE_LOGGER = "[RepairScheduler] 创建修复日志器失败";
+ public static final String REPAIR_FAILED_CLOSE_LOGGER = "[RepairScheduler] 关闭修复日志器失败";
+ public static final String REPAIR_WAIT_COMPACTION_FINISH = "[RepairScheduler] 等待 compaction 调度任务完成";
+ public static final String REPAIR_WAIT_ALL_RUNNING_TASK_FINISH = "[RepairScheduler] 等待所有正在运行的 compaction 任务完成";
+ public static final String REPAIR_TASK_FINISHED = "[RepairScheduler] 修复任务已完成";
+ public static final String REPAIR_SCHEDULE_TASK_ERROR = "[RepairScheduler] 执行修复调度任务时遇到错误";
+ public static final String REPAIR_FAILED_INIT_SCHEDULE_TASK = "[RepairScheduler] 初始化修复调度任务失败";
+ public static final String REPAIR_ALL_PARTITIONS_DONE_SKIP = "[RepairScheduler] 所有时间分区已修复,跳过修复任务";
+ public static final String END_MUST_GREATER_THAN_START = "结束值必须大于起始值";
+ public static final String DATA_DIRS_MUST_NOT_BE_EMPTY = "data_dirs 不能为空";
+ public static final String DOES_NOT_EXIST = "{} 不存在。";
+ public static final String CHECK_FAILED = "检查 {} 失败。";
+ public static final String FAILED_TO_DEAL_WITH = "处理 {} 失败";
+ public static final String ERROR_OCCURRED = "发生错误";
+
+ // ======================== MemTable ========================
+
+ public static final String CANNOT_DESERIALIZE_OLD_MEMTABLE_SNAPSHOT = "无法反序列化旧版 MemTable 快照";
+ public static final String DEVICE_ID_LENGTH_SHOULD_BE_POSITIVE = "DeviceID 的长度应大于 0。";
+ public static final String CREATE_NEW_TSFILE_PROCESSOR = "创建新的 TsFile 处理器 {}";
+ public static final String REOPEN_TSFILE_PROCESSOR = "重新打开 TsFile 处理器 {}";
+ public static final String EXCEPTION_DURING_WAL_FLUSH = "WAL flush 过程中发生异常";
+ public static final String DELETION_IN_FLUSHING_MEMTABLE = "[Deletion] 在 flushing 中的 memtable 中执行删除 {}";
+ public static final String START_WAIT_UNTIL_FILE_CLOSED = "开始等待文件 {} 关闭";
+ public static final String FILE_CLOSED_SYNCHRONOUSLY = "文件 {} 已同步关闭";
+ public static final String DATAREGION_TSFILE_ERROR = "{}: {}";
+ public static final String DELETION_WRITTEN_WHEN_FLUSH = "[Deletion] flush memtable 时写入删除: {}";
+ public static final String FSYNC_MEMTABLE_TO_DISK_ERROR = "将 memtable 数据同步到磁盘出错,";
+ public static final String FLUSHING_MEMTABLES_CLEAR = "{} 的 flushing memtable 已清空";
+ public static final String START_TO_END_FILE = "开始结束文件 {}";
+ public static final String ENDED_FILE = "已结束文件 {}";
+ public static final String START_TO_END_EMPTY_FILE = "开始结束空文件 {}";
+ public static final String TIME_CHUNK_METADATA_SHOULD_NOT_BE_EMPTY = "对齐设备中的 TimeChunkMetadata 不应为空";
+ public static final String WRITABLE_MEM_CHUNK_UNSUPPORTED_TYPE = "WritableMemChunk 不支持数据类型: {}";
+
+ // ======================== Modification ========================
+
+ public static final String UNRECOGNIZED_PREDICATE_TYPE = "无法识别的谓词类型: ";
+ public static final String UNSUPPORTED_MOD_TYPE = "不支持的修改类型: ";
+ public static final String UNKNOWN_MOD_TYPE = "未知的 ModType: ";
+ public static final String CANNOT_CLOSE_MOD_FILE_INPUT_STREAM = "无法关闭 {} 的修改文件输入流";
+ public static final String CANNOT_READ_MOD_FILE_INPUT_STREAM = "无法读取 {} 的修改文件输入流";
+ public static final String COMPACT_MODS_FILE_EXCEPTION = "压缩 {} 的修改文件时发生异常";
+ public static final String SETTLE_SUCCESSFUL = "{} settle 成功";
+ public static final String REMOVE_ORIGIN_OR_RENAME_MODS_ERROR = "删除原始文件或重命名新修改文件出错。";
+ public static final String DELETE_MODIFICATION_FILE_FAILED = "删除修改文件 {} 失败。";
+ public static final String CANNOT_CREATE_HARDLINK = "无法为 {} 创建硬链接";
+ public static final String ERROR_READING_MODIFICATIONS = "读取修改记录时发生错误";
+ public static final String ERROR_DECODE_LINE_TO_MODIFICATION = "将行 [{}] 解码为修改记录时发生错误";
+ public static final String MODIFICATIONS_WILL_BE_TRUNCATED = "修改记录 [{}] 将被截断至大小 {}。";
+ public static final String LAST_LINE_OF_MODS_INCOMPLETE = "修改文件的最后一行不完整,将被截断";
+ public static final String UNKNOWN_MODIFICATION_TYPE = "未知的修改类型: ";
+ public static final String INCORRECT_DELETION_FIELDS_NUMBER = "删除字段数量不正确: ";
+ public static final String INVALID_TIMESTAMP = "无效的时间戳: ";
+ public static final String INVALID_SERIES_PATH = "无效的序列路径: ";
+
+ // ======================== WAL ========================
+
+ public static final String START_REBOOTING_WAL_DELETE_THREAD = "开始重启 WAL 删除线程。";
+ public static final String STOP_WAL_DELETE_THREAD_AND_RESTART = "WAL 删除线程已成功停止,正在重启。";
+ public static final String TIMED_WAL_DELETE_THREAD_INTERRUPTED = "定时 WAL 删除线程被中断。";
+ public static final String INTERRUPTED_WAITING_WAL_FLUSHED = "等待所有 WAL 日志 flush 完成时被中断。";
+ public static final String STOPPING_WAL_MANAGER = "正在停止 WALManager";
+ public static final String DELETING_OUTDATED_FILES_BEFORE_EXIT = "退出前删除过期文件";
+ public static final String WAL_MANAGER_STOPPED = "WALManager 已停止";
+ public static final String WAITING_THREAD_TERMINATED_TIMEOUT = "等待线程 {} 终止超时";
+ public static final String THREAD_NOT_EXIT_AFTER_30S = "线程 {} 在 30 秒后仍未退出";
+ public static final String FAILED_TO_DELETE_OUTDATED_WAL_FILE = "删除过期 WAL 文件失败";
+ public static final String UNRECOGNIZED_CHECKPOINT_TYPE = "无法识别的检查点类型 ";
+ public static final String CREATE_FOLDER_FOR_WAL_BUFFER = "为 WAL buffer-{} 创建目录 {}。";
+ public static final String FAIL_TO_LOG_MAX_MEMTABLE_ID = "记录最大 memtable ID: {} 失败";
+ public static final String FAIL_TO_MAKE_CHECKPOINT = "创建检查点 {} 失败";
+ public static final String MEMTABLE_ID_NOT_FOUND_IN_MAP = "在 MemTableId2Info 中未找到 memtable ID {}";
+ public static final String FAIL_TO_CLOSE_WAL_CHECKPOINT_WRITER = "关闭 WAL 节点 {} 的检查点写入器失败。";
+ public static final String CANNOT_WRITE_TO = "无法写入 {}";
+ public static final String REACH_END_OFFSET_OF_WAL_FILE = "已到达 WAL 文件的末尾偏移量";
+ public static final String UNEXPECTED_END_OF_FILE = "文件意外结束";
+ public static final String WAL_SEGMENT_V1_FAILED_V2_SUCCESS = "以 V1 方式加载 WAL 段失败,以 V2 方式重试成功。";
+ public static final String UNEXPECTED_EXCEPTION = "意外异常";
+ public static final String FAIL_TO_READ_WAL_ENTRY_SKIP_BROKEN = "从 WAL 文件 {} 读取 WALEntry 失败,跳过损坏的 WALEntry。";
+ public static final String INVALID_CHECKPOINT_FILE_NAME = "无效的检查点文件名: ";
+ public static final String INVALID_WAL_FILE_NAME = "无效的 WAL 文件名: ";
+ public static final String INTERRUPTED_WAITING_FOR_RESULT = "等待结果时被中断。";
+ public static final String CANNOT_WRITE_WAL_INTO_FAKE_NODE = "无法将 WAL 写入虚拟节点。 ";
+ public static final String CREATE_FOLDER_FOR_WAL_NODE = "为 WAL 节点 {} 创建目录 {}。";
+ public static final String FAIL_TO_DELETE_WAL_NODE_OUTDATED_FILES = "删除 WAL 节点 {} 的过期文件失败。";
+ public static final String FAIL_TO_GET_DATA_REGION_PROCESSOR = "获取 {} 的 DataRegion 处理器失败";
+ public static final String WAITING_TOO_LONG_FOR_MEMTABLE_FLUSH = "等待 memtable flush 完成时间过长。";
+ public static final String INTERRUPTED_WAITING_MEMTABLE_FLUSH = "等待 memtable flush 完成时被中断。";
+ public static final String FAIL_TO_ROLL_WAL_LOG_WRITER = "滚动 WAL 日志写入器失败。";
+ public static final String FAIL_TO_SNAPSHOT_MEMTABLE = "对 {} 的 memtable 进行快照失败";
+ public static final String START_RECOVERING_WAL_NODE_IN_DIR = "开始恢复目录 {} 中的 WAL 节点";
+ public static final String ERROR_DELETE_CHECKPOINT_FILE = "删除检查点文件 {} 时出错";
+ public static final String FAIL_TO_READ_WAL_LOGS_SKIP = "从 {} 读取 WAL 日志失败,跳过这些日志";
+ public static final String FAIL_TO_RENAME_FILE = "重命名文件 {} 为 {} 失败";
+ public static final String FAIL_TO_RECOVER_WAL_METADATA = "恢复 WAL 文件 {} 的元数据失败";
+ public static final String START_RECOVERING_WAL = "开始恢复 WAL。";
+ public static final String SUCCESSFULLY_RECOVER_ALL_WAL_NODES = "已成功恢复所有 WAL 节点。";
+ public static final String STORAGE_ENGINE_FAILED_TO_RECOVER = "存储引擎恢复失败。";
+ public static final String CANNOT_RECOVER_TSFILE_WAL_ALREADY_STARTED = "无法从 WAL 恢复 TsFile,因为 WAL 恢复已经开始";
+ public static final String FAIL_TO_REMOVE_RECOVER_PERFORMER = "移除文件 {} 的恢复执行器失败";
+ public static final String TSFILE_MISSING_SKIP_RECOVERY = "TsFile {} 缺失,将跳过恢复。";
+ public static final String UNSUPPORTED_TYPE = "不支持的类型 ";
+ public static final String ERROR_REDO_WAL = "重做 {} 的 WAL 时遇到错误";
+ public static final String CREATE_FOLDER_FOR_WAL_NODE_BUFFER = "为 WAL 节点 {} 的 buffer 创建目录 {}。";
+ public static final String OPEN_NEW_WAL_FILE_FOR_BUFFER = "为 WAL 节点 {} 的 buffer 打开新 WAL 文件 {}。";
+ public static final String FAIL_TO_ALLOCATE_WAL_BUFFER_OOM = "由于内存不足,无法为 WAL 节点 {} 分配 buffer。";
+ public static final String INTERRUPTED_WAITING_ADD_WAL_ENTRY = "等待将 WALEntry 添加到 buffer 时被中断。";
+ public static final String HANDLE_ROLL_LOG_WRITER_SIGNAL = "处理 WAL 节点 {} 的滚动日志写入器信号。";
+ public static final String INTERRUPTED_WAITING_WORKING_BUFFER = "等待可用工作 buffer 时被中断。";
+ public static final String FAIL_TO_PUT_CLOSE_SIGNAL = "将 CLOSE_SIGNAL 放入 walEntries 失败。";
+ public static final String FAIL_TO_CLOSE_WAL_LOG_WRITER = "关闭 WAL 节点 {} 的日志写入器失败。";
+ public static final String UNKNOWN_WAL_ENTRY_TYPE = "未知的 WALEntry 类型";
+ public static final String UNKNOWN_WAL_ENTRY_TYPE_WITH_VALUE = "未知的 WALEntry 类型 ";
+ public static final String INVALID_WAL_ENTRY_TYPE_CODE = "无效的 WALEntryType 编码: ";
+ public static final String CANNOT_SERIALIZE_CHECKPOINT_TO_WAL = "无法将检查点序列化到 WAL 文件。";
+ public static final String UNSUPPORTED_WAL_ENTRY_TYPE = "不支持的 WAL 条目类型 ";
+ public static final String CANNOT_USE_WAL_INFO_AS_SIGNAL_TYPE = "不能将 WAL 信息类型用作 WAL 信号类型";
+ public static final String FAIL_TO_CREATE_WAL_NODE_DISKS_FULL = "由于 WAL 目录的所有磁盘已满,无法创建 WAL 节点。";
+ public static final String FAILED_TO_CREATE_WAL_NODE_AFTER_RETRIES = "重试后仍无法创建 WAL 节点,标识符: ";
+ public static final String FAIL_TO_CREATE_WAL_NODE = "创建 WAL 节点失败";
+
+ // ======================== Flush ========================
+
+ public static final String RESTORE_FILE_ERROR = "恢复文件出错,原因 ";
+ public static final String CANNOT_DELETE_OLD_COMPRESSION_FILE = "无法删除旧的 DataRegion 压缩文件 {}";
+ public static final String CANNOT_DELETE_RATIO_FILE = "无法删除压缩率文件 {}";
+ public static final String TAKE_TASK_INTO_IO_QUEUE_INTERRUPTED = "将任务放入 ioTaskQueue 时被中断";
+ public static final String PUT_TASK_INTO_IO_QUEUE_INTERRUPTED = "将任务放入 ioTaskQueue 时被中断";
+ public static final String TAKE_TASK_FROM_IO_QUEUE_INTERRUPTED = "从 ioTaskQueue 取出任务时被中断";
+ public static final String FLUSH_SUB_TASK_MANAGER_STARTED = "flush 子任务管理器已启动。";
+ public static final String FLUSH_SUB_TASK_MANAGER_STOPPED = "flush 子任务管理器已停止";
+ public static final String FLUSH_TASK_MANAGER_STARTED = "flush 任务管理器已启动。";
+ public static final String FLUSH_TASK_MANAGER_STOPPED = "flush 任务管理器已停止";
+
+ // ======================== Read ========================
+
+ public static final String MEM_CHUNK_READER_NOT_SUPPORT_METHOD = "内存 Chunk 读取器不支持此方法";
+ public static final String MEM_ALIGNED_PAGE_READER_TSBLOCK = "[memAlignedPageReader] TsBlock:{}";
+ public static final String AFTER_FILTER_CHUNK_METADATA_LIST = "按过滤器移除后的 Chunk 元数据列表: ";
+ public static final String AFTER_MODIFICATION_CHUNK_METADATA_LIST = "修改后的 Chunk 元数据列表: ";
+ public static final String TIME_DATA_SIZE_NOT_MATCH = "时间数据大小不匹配";
+ public static final String QUERY_OPENED_FILES = "查询已打开 {} 个文件!";
+ public static final String CANNOT_CLOSE_TSFILE_SEQUENCE_READER = "无法关闭 TsFileSequenceReader {}!";
+ public static final String QUERY_SEALED_FILE_INFO = "[Query Sealed File Info]\n";
+ public static final String QUERY_ID_FORMAT = "\t[queryId: {}]\n";
+ public static final String QUERY_FILE_PATH_FORMAT = "\t\t{}\n";
+ public static final String QUERY_UNSEALED_FILE_INFO = "[Query Unsealed File Info]\n";
+
+ // ======================== Snapshot ========================
+
+ public static final String EXCEPTION_LOAD_SNAPSHOT = "从 {} 加载快照时发生异常";
+ public static final String READING_SNAPSHOT_LOG_FILE = "正在读取快照日志文件 {}";
+ public static final String REMOVE_ALL_DATA_FILES_IN_ORIGINAL_DIR = "移除原始数据目录中的所有数据文件";
+ public static final String FAILED_TO_REMOVE_ORIGIN_DATA_FILES = "移除原始数据文件失败";
+ public static final String MOVING_SNAPSHOT_FILE_TO_DATA_DIRS = "正在将快照文件移动到数据目录";
+ public static final String NO_COMPRESSION_RATIO_FILE_IN_DIR = "目录 {} 中没有压缩率文件";
+ public static final String CANNOT_LOAD_COMPRESSION_RATIO = "无法从 {} 加载压缩率";
+ public static final String LOADED_COMPRESSION_RATIO = "已从 {} 加载压缩率";
+ public static final String EXCEPTION_READING_SNAPSHOT_FILE = "读取快照文件时发生异常";
+ public static final String SNAPSHOT_NOT_COMPLETE_CANNOT_LOAD = "此快照不完整,无法加载";
+ public static final String CREATED_HARD_LINK = "已创建从 {} 到 {} 的硬链接";
+ public static final String EXCEPTION_CLOSING_LOG_ANALYZER = "关闭日志分析器时发生异常";
+ public static final String CANNOT_CREATE_PARENT_FOLDER = "无法创建父目录: ";
+ public static final String CANNOT_CREATE_FILE = "无法创建文件: ";
+ public static final String FAILED_TO_CLOSE_SNAPSHOT_LOGGER = "关闭快照日志器失败";
+ public static final String SNAPSHOTTING_COMPRESSION_RATIO = "正在快照压缩率文件 {}。";
+ public static final String CATCH_IO_EXCEPTION_CREATING_SNAPSHOT = "创建快照时捕获到 IOException";
+ public static final String HARD_LINK_TARGET_DIR_NOT_EXIST = "硬链接目标目录 {} 不存在";
+ public static final String HARD_LINK_SOURCE_FILE_NOT_EXIST = "硬链接源文件 {} 不存在,该文件将被忽略。";
+ public static final String COPY_TARGET_DIR_NOT_EXIST = "复制目标目录 {} 不存在";
+ public static final String COPY_SOURCE_FILE_NOT_EXIST = "复制源文件 {} 不存在";
+ public static final String CANNOT_CREATE_DIRECTORY = "无法创建目录: ";
+ public static final String CLEANING_UP_SNAPSHOT_DIR = "正在清理 {} 的快照目录";
+ public static final String FAILED_TO_CREATE_DIR = "创建目录 %s 失败";
+ public static final String FAILED_TO_TAKE_SNAPSHOT_CLEAN_UP = "为 {}-{} 创建快照失败,正在清理";
+ public static final String SUCCESSFULLY_TAKE_SNAPSHOT = "已成功为 {}-{} 创建快照,快照目录为 {}";
+ public static final String EXCEPTION_TAKING_SNAPSHOT = "为 {}-{} 创建快照时发生异常";
+ public static final String SNAPSHOT_COMPRESSION_RATIO_IN_DIR = "快照压缩率文件 {} 已保存到 {}。";
+ public static final String CANNOT_SNAPSHOT_COMPRESSION_RATIO = "无法快照压缩率文件 {} 到 {}。";
+ public static final String CLEAR_SNAPSHOT_DIR_FAIL = "清理快照目录失败,请在再次执行 Region 迁移前手动删除此目录: {}";
+ public static final String HARD_LINK_SOURCE_FILE_RETRY = "硬链接源文件 {} 不存在,将重试 {} 次...";
+ public static final String TRY_SHOW_FILES_IN_PARENT_DIR = "尝试显示父目录中的所有文件...";
+ public static final String CANNOT_SHOW_FILES_PARENT_DIR_NULL = "无法显示文件,因为父目录为空";
+ public static final String FAILED_DELETE_FOLDER_CLEANING_UP = "清理时删除目录 {} 失败";
+
+ // ======================== TsFile Resource ========================
+
+ public static final String FAILED_TO_SERIALIZE_SHARED_MOD_FILE = "序列化共享修改文件失败";
+ public static final String FAILED_TO_GET_SHARED_MOD_FILE = "获取共享修改文件失败";
+ public static final String UPGRADING_MOD_FILE_INTERRUPTED = "升级修改文件被中断";
+ public static final String CANNOT_UPGRADE_MOD_FILE = "无法升级修改文件";
+ public static final String TIME_INDEX_VALUE = "TimeIndex = {}";
+ public static final String RESOURCE_FILE_NOT_FOUND = "资源文件未找到";
+ public static final String CANNOT_BUILD_DEVICE_TIME_INDEX = "无法从资源文件构建 DeviceTimeIndex: ";
+ public static final String TSFILE_CANNOT_BE_DELETED = "TsFile {} 无法删除: {}";
+ public static final String MODIFICATION_FILE_CANNOT_BE_DELETED = "修改文件 {} 无法删除: {}";
+ public static final String TSFILE_RESOURCE_CANNOT_BE_DELETED = "TsFileResource {} 无法删除: {}";
+ public static final String FILE_NAME_NOT_STANDARD = "文件名可能不符合标准命名规范。";
+ public static final String FAILED_TO_READ_MODS = "从 {} 读取 {} 的修改记录失败";
+ public static final String INVALID_INPUT = "无效输入: ";
+ public static final String ALL_DISKS_FULL_CANNOT_CREATE_TSFILE_DIR = "所有磁盘已满,无法创建 TsFile 目录";
+ public static final String DISK_SPACE_INSUFFICIENT = "磁盘空间不足";
+ public static final String FAILED_TO_CREATE_TSFILE_DIR_AFTER_RETRIES = "重试后仍无法创建 TsFile 目录";
+ public static final String FAILED_TO_CREATE_DIR_AFTER_RETRIES = "重试后仍无法创建目录";
+ public static final String TSFILE_NAME_FORMAT_INCORRECT = "TsFile 文件名格式不正确: ";
+ public static final String WRONG_TIME_INDEX_TYPE_LOG = "错误的 timeIndex 类型 {}";
+ public static final String WRONG_TIME_INDEX_TYPE = "错误的 timeIndex 类型 ";
+ public static final String ERROR_RECORD_FILE_TIME_INDEX_CACHE = "记录 FileTimeIndexCache 时遇到错误: {}";
+ public static final String ERROR_RECORD_FILE_TIME_INDEX_CACHE_NO_DETAIL = "记录 FileTimeIndexCache 时遇到错误";
+ public static final String ERROR_COMPACT_FILE_TIME_INDEX_CACHE = "压缩 FileTimeIndexCache 时遇到错误: {}";
+ public static final String ERROR_COMPACT_FILE_TIME_INDEX_CACHE_NO_DETAIL = "压缩 FileTimeIndexCache 时遇到错误";
+ public static final String FILE_TIME_INDEX_FILE_ALREADY_EXISTS = "FileTimeIndex 文件已存在,文件路径: {}";
+ public static final String ERROR_CLOSE_FILE_TIME_INDEX_CACHE = "关闭 FileTimeIndexCache 时遇到错误: {}";
+ public static final String END_OF_STREAM_REACHED = "已到达流的末尾";
+ public static final String V012_FILE_TIME_INDEX_SHOULD_NEVER_APPEAR = "V012_FILE_TIME_INDEX 不应出现";
+ public static final String INVALID_ORDINAL = "无效的序号";
+
+ // ======================== DataRegion Utils ========================
+
+ public static final String FAILED_TO_SCAN_FILE = "扫描文件 {} 失败";
+ public static final String DEVICE_LEVEL_METADATA_INDEX_NOT_SUPPORTED = "不支持设备级别的元数据索引节点";
+ public static final String NO_MORE_DATA_IN_SHARED_TIME_BUFFER = "SharedTimeDataBuffer 中没有更多数据";
+ public static final String FAILED_TO_CALC_TSFILE_TABLE_SIZES = "计算 TsFile 表大小失败";
+ public static final String TIME_INDEX_IS_NULL = "{} {} 时间索引为空";
+ public static final String EMPTY_RESOURCE = "{} {} 资源为空";
+ public static final String ERROR_VALIDATE_RESOURCE_FILE = "验证 .resource 文件 {} 时出错";
+ public static final String ILLEGAL_TSFILE = "{} {} 非法 TsFile";
+ public static final String ERROR_VALIDATING_TSFILE = "验证 TsFile {} 时遇到错误, ";
+ public static final String EXCEPTION_APPLY_TABLE_DISK_USAGE_INDEX = "应用 TableDiskUsageIndex 操作时遇到异常。";
+ public static final String FAILED_RECOVER_TABLE_DISK_USAGE_INDEX = "恢复 TableDiskUsageIndex 失败";
+ public static final String FAILED_SYNC_TABLE_SIZE_INDEX = "同步 TsFile 表大小索引失败。";
+ public static final String WRITE_OBJECT_DELTA = "writeObjectDelta";
+ public static final String EXCEPTION_REMOVE_TABLE_DISK_USAGE_INDEX = "移除 TableDiskUsageIndex 时遇到异常。";
+ public static final String INTERRUPTED_ADDING_OP_TO_QUEUE = "将操作 {} 添加到队列时被中断。";
+ public static final String FAILED_TO_MOVE_FILE = "移动 {} 到 {} 失败";
+ public static final String FAILED_TO_READ_KEY_FILE_DURING_COMPACTION = "compaction 过程中读取键文件失败";
+ public static final String FAILED_COMPACTION_TABLE_SIZE_INDEX = "对 TsFile 表大小索引文件执行 compaction 失败";
+ public static final String FAILED_TO_READ_TABLE_SIZE_INDEX = "读取表 TsFile 大小索引文件 {} 失败";
+ public static final String TABLE_NUM_SHOULD_BE_POSITIVE = "tableNum 应大于 0";
+ public static final String BACKWARD_SEEK_NOT_SUPPORTED = "不支持向后查找";
+ public static final String THREAD_INTERRUPTED_SKIP_WRITE_FOR_IO_SAFETY = "当前线程被中断,为保证 IO 安全无需执行写入";
+ public static final String PARTITION_LOG_FILE_ALREADY_EXISTS = "分区日志文件已存在,文件路径: {}";
+
+ // ======================== Load TsFile ========================
+
+ public static final String UNSUPPORTED_TSFILE_DATA_TYPE = "不支持的 TsFileData 类型: ";
+ public static final String DELETE_AFTER_LOADING_ERROR = "加载后删除 {} 出错。";
+ public static final String LOAD_TSFILE_DIR_CREATED = "已创建加载 TsFile 目录 {}。";
+ public static final String CANNOT_CREATE_TSFILE_FOR_WRITING = "无法创建 TsFile {} 用于写入。";
+ public static final String CLOSE_TSFILE_IO_WRITER_ERROR = "关闭 TsFileIOWriter {} 出错。";
+ public static final String CLOSE_MODIFICATION_FILE_ERROR = "关闭修改文件 {} 出错。";
+ public static final String TASK_DIR_NOT_EMPTY_SKIP_DELETE = "任务目录 {} 非空,跳过删除。";
+ public static final String LOAD_CLEANUP_TASK_CANCELED = "加载清理任务 {} 已取消。";
+ public static final String LOAD_CLEANUP_TASK_STARTS = "加载清理任务 {} 开始。";
+ public static final String LOAD_CLEANUP_TASK_ERROR = "加载清理任务 {} 出错。";
+ public static final String FAILED_UPDATE_FILE_COUNTER_DIR_NOT_EXIST = "更新文件计数器失败,目录 ({}) 不存在";
+ public static final String UNSUPPORTED_STAGE = "不支持的阶段: ";
+ public static final String RELEASE_MEMORY_BLOCK_FAILED = "释放内存块 {} 失败";
+ public static final String EXCEED_TOTAL_MEMORY_SIZE = "{} 已超出总内存大小";
+ public static final String REDUCE_MEMORY_USAGE_TO_NEGATIVE = "{} 的内存使用量已降为负数";
+ public static final String FORCE_ALLOCATE_INTERRUPTED = "forceAllocate: 等待可用内存时被中断";
+ public static final String LOAD_ALLOCATED_MEMORY_BLOCK = "Load: 从查询引擎分配内存块,大小: {}";
+ public static final String RELEASE_DATA_CACHE_MEMORY_BLOCK = "释放数据缓存内存块 {}";
+ public static final String START_DATA_TYPE_CONVERSION_DOT = "开始对 LoadTsFileStatement: {} 进行数据类型转换。";
+ public static final String START_DATA_TYPE_CONVERSION = "开始对 LoadTsFileStatement: {} 进行数据类型转换";
+ public static final String FAIL_TO_LOAD_TSFILE_TO_ACTIVE_DIR = "加载 TsFile 到 Active 目录失败";
+ public static final String FAIL_TO_LOAD_DISK_SPACE = "获取文件 {} 的磁盘空间失败";
+ public static final String LOAD_ACTIVE_LISTENING_DIR_NOT_SET = "未设置加载 Active 监听目录。";
+ public static final String FAILED_TO_CREATE_TARGET_DIR = "创建目标目录失败: ";
+ public static final String FAILED_LOAD_ACTIVE_LISTENING_DIRS = "加载 Active 监听目录失败";
+ public static final String INVALID_PARAMETER = "无效的参数 '";
+ public static final String UTILITY_CLASS = "工具类";
+ public static final String TSFILE_DATA_BYTE_ARRAY_SIZE_MISMATCH = "TsFileData 字节数组读取错误,大小不匹配。";
+ public static final String UNKNOWN_TSFILE_DATA_TYPE = "未知的 TsFileData 类型: ";
+ public static final String FILE_MAGIC_STRING_INCORRECT = "文件的 MAGIC STRING 不正确,文件路径: {}";
+ public static final String FILE_VERSION_TOO_OLD = "文件的版本号过旧,文件路径: {}";
+ public static final String FILE_NOT_CLOSED_CORRECTLY = "文件未正确关闭,文件路径: {}";
+ public static final String MINIO_SELECTOR_REQUIRES_ONE_DIR = "MinIO 选择器至少需要一个目录";
+ public static final String ADD_MOUNT_POINT = "添加 {} 的挂载点 {}";
+ public static final String FAILED_TO_CHECK_DIRECTORY = "检查目录失败: {}";
+ public static final String FAILED_TO_LIST_FILES_IN_DIR = "列出目录 {} 中的文件失败";
+ public static final String FAILED_TO_DELETE_FILE_OR_DIR = "删除文件或目录 {} 失败";
+ public static final String FAILED_TO_CLEANUP_DIRECTORY = "清理目录 {} 失败";
+ public static final String CLEANED_UP_ACTIVE_LOAD_DIRS = "已清理 Active 加载监听目录";
+ public static final String UNEXPECTED_ERROR_CLEANUP_ACTIVE_DIRS = "清理 Active 加载监听目录时发生意外错误";
+ public static final String ACTIVE_LOAD_DIR_SCANNER_REGISTERED = "Active 加载目录扫描定期任务已注册";
+ public static final String ERROR_ACTIVE_LOAD_DIR_SCANNING = "Active 加载目录扫描过程中发生错误。";
+ public static final String SYSTEM_READ_ONLY_SKIP_ACTIVE_SCAN = "当前系统为只读模式,跳过 Active 加载目录扫描。";
+ public static final String FILE_DELETED_IGNORE_EXCEPTION = "文件已被删除,忽略此异常。";
+ public static final String EXCEPTION_SCANNING_DIR = "扫描目录 {} 时发生异常";
+ public static final String ERROR_CREATING_DIR_FOR_ACTIVE_LOAD = "为 Active 加载创建目录 {} 时发生错误。";
+ public static final String FAILED_COUNT_ACTIVE_DIRS_FILE_NUMBER = "统计 Active 监听目录文件数量失败。";
+ public static final String ACTIVE_LOAD_METRIC_COLLECTOR_REGISTERED = "Active 加载指标收集定期任务已注册";
+ public static final String DATABASE_NAME_MUST_NOT_BE_EMPTY = "数据库名称不能为空。";
+ public static final String ERROR_EXECUTING_ACTIVE_LOAD_JOB = "执行 Active 加载定期任务时发生错误。";
+ public static final String ACTIVE_LOAD_EXECUTOR_STARTED = "Active 加载定期任务执行器已成功启动。";
+ public static final String ACTIVE_LOAD_EXECUTOR_STOPPED = "Active 加载定期任务执行器已成功停止。";
+ public static final String ERROR_MOVING_FILE_TO_FAIL_DIR = "将文件 {} 移动到失败目录时发生错误。";
+ public static final String FAILED_COUNT_FILES_IN_FAIL_DIR = "统计失败目录中的失败文件数量失败。";
+
+ public static final String STRING_NOT_LEGAL_REPAIR_LOG = "字符串 '%s' 不是合法的修复日志";
+}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/BasicAuthorityCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/BasicAuthorityCache.java
index 1de4f0b0da89a..1056475f5c0a2 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/BasicAuthorityCache.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/BasicAuthorityCache.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.auth.entity.Role;
import org.apache.iotdb.commons.auth.entity.User;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
@@ -90,7 +91,7 @@ public boolean invalidateCache(final String userName, final String roleName) {
userCache.invalidate(userName);
}
if (userCache.getIfPresent(userName) != null) {
- LOGGER.error("datanode cache initialization failed");
+ LOGGER.error(DataNodeMiscMessages.DATANODE_CACHE_INIT_FAILED);
return false;
}
}
@@ -99,7 +100,7 @@ public boolean invalidateCache(final String userName, final String roleName) {
roleCache.invalidate(roleName);
}
if (roleCache.getIfPresent(roleName) != null) {
- LOGGER.error("datanode cache initialization failed");
+ LOGGER.error(DataNodeMiscMessages.DATANODE_CACHE_INIT_FAILED);
return false;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/ClusterAuthorityFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/ClusterAuthorityFetcher.java
index f255d30385b6b..9ee232c921b83 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/ClusterAuthorityFetcher.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/ClusterAuthorityFetcher.java
@@ -48,6 +48,7 @@
import org.apache.iotdb.confignode.rpc.thrift.TPathPrivilege;
import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp;
import org.apache.iotdb.confignode.rpc.thrift.TRoleResp;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.protocol.client.ConfigNodeClient;
import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager;
import org.apache.iotdb.db.protocol.client.ConfigNodeInfo;
@@ -717,7 +718,7 @@ public User cacheUser(TPermissionInfoResp tPermissionInfoResp) {
try {
user.loadTreePrivilegeInfo(privilegeList);
} catch (MetadataException e) {
- LOGGER.error("cache user's path privileges error", e);
+ LOGGER.error(DataNodeMiscMessages.CACHE_USER_PATH_PRIVILEGES_ERROR, e);
}
if (tPermissionInfoResp.isSetRoleInfo()) {
for (String roleName : tPermissionInfoResp.getRoleInfo().keySet()) {
@@ -740,7 +741,7 @@ public Role cacheRole(String roleName, TPermissionInfoResp tPermissionInfoResp)
try {
role.loadTreePrivilegeInfo(resp.getPrivilegeList());
} catch (MetadataException e) {
- LOGGER.error("cache role's path privileges error", e);
+ LOGGER.error(DataNodeMiscMessages.CACHE_ROLE_PATH_PRIVILEGES_ERROR, e);
}
return role;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/LoginLockManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/LoginLockManager.java
index f81306d2e62e9..aa49484e8b923 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/LoginLockManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/auth/LoginLockManager.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.auth.entity.User;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -70,7 +71,7 @@ public LoginLockManager(
// Set and validate failedLoginAttempts (IP level)
if (failedLoginAttempts <= 0) {
this.failedLoginAttempts = -1; // Completely disable IP-level restrictions
- LOGGER.info("IP-level login attempts disabled (set to {})", failedLoginAttempts);
+ LOGGER.info(DataNodeMiscMessages.IP_LOGIN_ATTEMPTS_DISABLED, failedLoginAttempts);
} else {
this.failedLoginAttempts = failedLoginAttempts;
}
@@ -78,7 +79,7 @@ public LoginLockManager(
// Set and validate failedLoginAttemptsPerUser (user level)
if (failedLoginAttemptsPerUser <= 0) {
this.failedLoginAttemptsPerUser = -1; // Disable user-level restrictions
- LOGGER.info("User-level login attempts disabled (set to {})", failedLoginAttemptsPerUser);
+ LOGGER.info(DataNodeMiscMessages.USER_LOGIN_ATTEMPTS_DISABLED, failedLoginAttemptsPerUser);
// Additional check: if IP-level is enabled (>1), enable user-level with default 1000
if (this.failedLoginAttempts > 1) {
@@ -214,7 +215,7 @@ public void recordFailure(long userId, String ip) {
// Check if threshold reached (log only when it just reaches)
int failCountIp = existing.getFailureCount();
if (failCountIp >= failedLoginAttempts) {
- LOGGER.info("IP '{}' locked for user ID '{}'", ip, userId);
+ LOGGER.info(DataNodeMiscMessages.IP_LOCKED, ip, userId);
}
return existing;
});
@@ -275,12 +276,12 @@ public void unlock(long userId, String ip) {
userLocks.remove(userId);
// Also remove all IP locks for this user
userIpLocks.keySet().removeIf(key -> key.startsWith(userId + "@"));
- LOGGER.info("User ID '{}' unlocked (manual)", userId);
+ LOGGER.info(DataNodeMiscMessages.USER_UNLOCKED_MANUAL, userId);
} else {
// Unlock specific user@ip lock
String userIpKey = buildUserIpKey(userId, ip);
userIpLocks.remove(userIpKey);
- LOGGER.info("IP '{}' for user ID '{}' unlocked (manual)", ip, userId);
+ LOGGER.info(DataNodeMiscMessages.IP_UNLOCKED_MANUAL, ip, userId);
}
}
@@ -298,7 +299,7 @@ public void cleanExpiredLocks() {
// Remove outdated failures
info.removeOldFailures(cutoffTime);
if (info.getFailureCount() == 0) {
- LOGGER.info("User ID '{}' unlocked (expired)", entry.getKey());
+ LOGGER.info(DataNodeMiscMessages.USER_UNLOCKED_EXPIRED, entry.getKey());
return true;
}
return false;
@@ -314,7 +315,7 @@ public void cleanExpiredLocks() {
info.removeOldFailures(cutoffTime);
if (info.getFailureCount() == 0) {
String[] parts = entry.getKey().split("@");
- LOGGER.info("IP '{}' for user ID '{}' unlocked (expired)", parts[1], parts[0]);
+ LOGGER.info(DataNodeMiscMessages.IP_UNLOCKED_EXPIRED, parts[1], parts[0]);
return true;
}
return false;
@@ -336,7 +337,7 @@ private void checkForPotentialAttacks(long userId, String ip) {
}
if (usersForIp.size() > 50) {
- LOGGER.warn("IP '{}' locked by {} different users → potential attack", ip, usersForIp.size());
+ LOGGER.warn(DataNodeMiscMessages.IP_LOCKED_MULTIPLE_USERS, ip, usersForIp.size());
}
// Check if user has many IP locks
@@ -348,7 +349,7 @@ private void checkForPotentialAttacks(long userId, String ip) {
}
if (ipsForUser.size() > 100) {
- LOGGER.warn("User ID '{}' has {} IP locks → potential attack", userId, ipsForUser.size());
+ LOGGER.warn(DataNodeMiscMessages.USER_MULTIPLE_IP_LOCKS, userId, ipsForUser.size());
}
}
@@ -397,7 +398,7 @@ private boolean isFromLocalhost(String ip) {
}
}
} catch (Exception e) {
- LOGGER.warn("Failed to check if IP address={} is up", ip, e);
+ LOGGER.warn(DataNodeMiscMessages.FAILED_CHECK_IP_UP, ip, e);
return false; // In case of error, assume non-local
}
return false;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/DataNodeMemoryConfig.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/DataNodeMemoryConfig.java
index b2673ba9f5f9b..7982eff927b87 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/DataNodeMemoryConfig.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/DataNodeMemoryConfig.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.conf.TrimProperties;
import org.apache.iotdb.commons.memory.MemoryConfig;
import org.apache.iotdb.commons.memory.MemoryManager;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.estimator.AbstractCompactionEstimator;
import org.apache.iotdb.db.utils.MemUtils;
@@ -422,7 +423,7 @@ private void initQueryEngineMemoryAllocate(
"enable_query_memory_estimation"))));
} catch (Exception e) {
- LOGGER.error(String.format("Fail to reload configuration because %s", e));
+ LOGGER.error(String.format(DataNodeMiscMessages.FAIL_RELOAD_CONFIGURATION_FMT, e));
}
String queryMemoryAllocateProportion =
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/DataNodeStartupCheck.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/DataNodeStartupCheck.java
index 7291469f67619..e1306b4595cd1 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/DataNodeStartupCheck.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/DataNodeStartupCheck.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.exception.StartupException;
import org.apache.iotdb.commons.service.StartupChecks;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -51,9 +52,9 @@ private void checkDataNodePortUnique() throws StartupException {
portSet.add(config.getDataRegionConsensusPort());
portSet.add(config.getSchemaRegionConsensusPort());
if (portSet.size() != DATANODE_PORTS) {
- throw new StartupException("ports used in datanode have repeat.");
+ throw new StartupException(DataNodeMiscMessages.PORTS_HAVE_REPEAT);
} else {
- LOGGER.info("DataNode port check successful.");
+ LOGGER.info(DataNodeMiscMessages.DATANODE_PORT_CHECK_SUCCESSFUL);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index 9b177cffcfa96..fa0c802bfdcaa 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -29,6 +29,7 @@
import org.apache.iotdb.commons.utils.FileUtils;
import org.apache.iotdb.consensus.ConsensusFactory;
import org.apache.iotdb.db.exception.LoadConfigurationException;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.protocol.thrift.impl.ClientRPCServiceImpl;
import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.LastCacheLoadStrategy;
import org.apache.iotdb.db.storageengine.dataregion.compaction.execute.performer.constant.CrossCompactionPerformer;
@@ -1420,8 +1421,7 @@ void reloadDataDirs(String[][] newTierDataDirs) throws LoadConfigurationExceptio
newDir ->
Objects.equals(
new File(newDir).getAbsolutePath(), new File(oldDir).getAbsolutePath()))) {
- String msg =
- String.format("%s is removed from data_dirs parameter, please add it back.", oldDir);
+ String msg = String.format(DataNodeMiscMessages.DIR_REMOVED_FROM_DATA_DIRS, oldDir);
logger.error(msg);
throw new LoadConfigurationException(msg);
}
@@ -1457,7 +1457,7 @@ public static String addDataHomeDir(final String dir) {
try {
dataHomeDir = dataHomeFile.getCanonicalPath();
} catch (IOException e) {
- logger.error("Fail to get canonical path of {}", dataHomeFile, e);
+ logger.error(DataNodeMiscMessages.FAIL_GET_CANONICAL_PATH, dataHomeFile, e);
}
return FileUtils.addPrefix2FilePath(dataHomeDir, dir);
}
@@ -1562,7 +1562,7 @@ public String[] getLoadTsFileDirs() {
public void formulateLoadTsFileDirs(String[][] tierDataDirs) {
if (tierDataDirs.length < 1) {
- logger.warn("No data directory is set. loadTsFileDirs is kept as the default value.");
+ logger.warn(DataNodeMiscMessages.NO_DATA_DIR_SET);
return;
}
@@ -3533,7 +3533,7 @@ public String getConfigMessage() {
.append(configContent)
.append(";");
} catch (Exception e) {
- logger.warn("Failed to get field {}", configField, e);
+ logger.warn(DataNodeMiscMessages.FAILED_GET_FIELD, configField, e);
}
}
return configMessage.toString();
@@ -4023,7 +4023,7 @@ public void setSkipFailedTableSchemaCheck(boolean skipFailedTableSchemaCheck) {
return;
}
this.skipFailedTableSchemaCheck = skipFailedTableSchemaCheck;
- logger.info("skipFailedTableSchemaCheck is set to {}.", skipFailedTableSchemaCheck);
+ logger.info(DataNodeMiscMessages.SKIP_FAILED_TABLE_SCHEMA_CHECK, skipFailedTableSchemaCheck);
}
public long getLoadActiveListeningCheckIntervalSeconds() {
@@ -4216,7 +4216,7 @@ public String getSortTmpDir() {
}
public String getObjectStorageBucket() {
- throw new UnsupportedOperationException("object storage is not supported yet");
+ throw new UnsupportedOperationException(DataNodeMiscMessages.OBJECT_STORAGE_NOT_SUPPORTED_YET);
}
public long getDataRatisPeriodicSnapshotInterval() {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index 7193fd27c582d..523205bd8ba05 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -36,6 +36,7 @@
import org.apache.iotdb.confignode.rpc.thrift.TRatisConfig;
import org.apache.iotdb.consensus.config.IoTConsensusV2Config;
import org.apache.iotdb.db.consensus.DataRegionConsensusImpl;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.pipe.resource.log.PipePeriodicalLogReducer;
import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.LastCacheLoadStrategy;
import org.apache.iotdb.db.service.metrics.IoTDBInternalLocalReporter;
@@ -130,7 +131,7 @@ public class IoTDBDescriptor {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
- LOGGER.error("Failed to update config file", e);
+ LOGGER.error(DataNodeMiscMessages.FAILED_UPDATE_CONFIG_FILE, e);
}
}
@@ -140,7 +141,7 @@ protected IoTDBDescriptor() {
ServiceLoader.load(IPropertiesLoader.class);
boolean hasProperties = false;
for (IPropertiesLoader loader : propertiesLoaderServiceLoader) {
- LOGGER.info("Will reload properties from {} ", loader.getClass().getName());
+ LOGGER.info(DataNodeMiscMessages.WILL_RELOAD_PROPERTIES, loader.getClass().getName());
hasProperties = true;
Properties properties = loader.loadProperties();
TrimProperties trimProperties = new TrimProperties();
@@ -213,7 +214,7 @@ else if (!urlString.endsWith(".properties")) {
try {
return new URL(urlString);
} catch (MalformedURLException e) {
- LOGGER.warn("get url failed", e);
+ LOGGER.warn(DataNodeMiscMessages.GET_URL_FAILED, e);
return null;
}
}
@@ -226,19 +227,19 @@ private void loadProps() {
URL url = getPropsUrl(CommonConfig.SYSTEM_CONFIG_NAME);
if (url != null) {
try (InputStream inputStream = url.openStream()) {
- LOGGER.info("Start to read config file {}", url);
+ LOGGER.info(DataNodeMiscMessages.START_READ_CONFIG_FILE, url);
Properties properties = new Properties();
properties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
commonProperties.putAll(properties);
loadProperties(commonProperties);
} catch (FileNotFoundException e) {
- LOGGER.error("Fail to find config file {}, reject DataNode startup.", url, e);
+ LOGGER.error(DataNodeMiscMessages.FAIL_FIND_CONFIG_FILE, url, e);
System.exit(-1);
} catch (IOException e) {
- LOGGER.error("Cannot load config file, reject DataNode startup.", e);
+ LOGGER.error(DataNodeMiscMessages.CANNOT_LOAD_CONFIG_FILE, e);
System.exit(-1);
} catch (Exception e) {
- LOGGER.error("Incorrect format in config file, reject DataNode startup.", e);
+ LOGGER.error(DataNodeMiscMessages.INCORRECT_FORMAT_CONFIG_FILE, e);
System.exit(-1);
} finally {
// update all data seriesPath
@@ -1187,7 +1188,7 @@ public void loadProperties(TrimProperties properties) throws BadNodeUrlException
"delay_analyzer_window_size",
ConfigurationFileUtils.getConfigurationDefaultValue("delay_analyzer_window_size")));
if (delayAnalyzerWindowSize > 0) {
- LOGGER.info("[DelayAnalyzer] Set delay_analyzer_window_size to {}", delayAnalyzerWindowSize);
+ LOGGER.info(DataNodeMiscMessages.SET_DELAY_ANALYZER_WINDOW_SIZE, delayAnalyzerWindowSize);
conf.setDelayAnalyzerWindowSize(delayAnalyzerWindowSize);
}
@@ -1806,7 +1807,7 @@ public long getThrottleThresholdWithDirs() {
try {
newThrottleThreshold = Math.min(newThrottleThreshold, fileStore.getUsableSpace());
} catch (IOException e) {
- LOGGER.error("Failed to get file size of {}, because", fileStore, e);
+ LOGGER.error(DataNodeMiscMessages.FAILED_GET_FILE_SIZE, fileStore, e);
}
}
newThrottleThreshold = (long) (newThrottleThreshold * dirUseProportion * walFileStores.size());
@@ -1898,7 +1899,7 @@ private void loadTsFileProps(TrimProperties properties) throws IOException {
ConfigurationFileUtils.getConfigurationDefaultValue("page_size_in_byte"))));
if (TSFileDescriptor.getInstance().getConfig().getPageSizeInByte()
> TSFileDescriptor.getInstance().getConfig().getGroupSizeInByte()) {
- LOGGER.warn("page_size is greater than group size, will set it as the same with group size");
+ LOGGER.warn(DataNodeMiscMessages.PAGE_SIZE_GREATER_THAN_GROUP_SIZE);
TSFileDescriptor.getInstance()
.getConfig()
.setPageSizeInByte(TSFileDescriptor.getInstance().getConfig().getGroupSizeInByte());
@@ -1993,7 +1994,7 @@ private void loadMqttProps(TrimProperties properties) {
if (properties.getProperty(IoTDBConstant.MQTT_HOST_NAME) != null) {
conf.setMqttHost(properties.getProperty(IoTDBConstant.MQTT_HOST_NAME).trim());
} else {
- LOGGER.info("MQTT host is not configured, will use dn_rpc_address.");
+ LOGGER.info(DataNodeMiscMessages.MQTT_HOST_NOT_CONFIGURED);
conf.setMqttHost(properties.getProperty(IoTDBConstant.DN_RPC_ADDRESS, conf.getRpcAddress()));
}
@@ -2312,7 +2313,8 @@ public synchronized void loadHotModifiedProps(TrimProperties properties)
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
- throw new QueryProcessException(String.format("Fail to reload configuration because %s", e));
+ throw new QueryProcessException(
+ String.format(DataNodeMiscMessages.FAIL_RELOAD_CONFIGURATION_FMT, e));
}
}
@@ -2344,7 +2346,7 @@ private void loadTrustedUriPattern(TrimProperties properties) throws IOException
try {
pattern = Pattern.compile(trustedUriPattern);
} catch (Exception e) {
- LOGGER.warn("Failed to parse trusted_uri_pattern {}", trustedUriPattern);
+ LOGGER.warn(DataNodeMiscMessages.FAILED_PARSE_TRUSTED_URI, trustedUriPattern);
pattern = commonDescriptor.getConfig().getTrustedUriPattern();
}
} else {
@@ -2356,18 +2358,18 @@ private void loadTrustedUriPattern(TrimProperties properties) throws IOException
public synchronized void loadHotModifiedProps() throws QueryProcessException {
URL url = getPropsUrl(CommonConfig.SYSTEM_CONFIG_NAME);
if (url == null) {
- LOGGER.warn("Couldn't load the configuration from any of the known sources.");
+ LOGGER.warn(DataNodeMiscMessages.COULD_NOT_LOAD_CONFIG);
return;
}
TrimProperties commonProperties = new TrimProperties();
try (InputStream inputStream = url.openStream()) {
- LOGGER.info("Start to reload config file {}", url);
+ LOGGER.info(DataNodeMiscMessages.START_RELOAD_CONFIG_FILE, url);
commonProperties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
ConfigurationFileUtils.loadConfigurationDefaultValueFromTemplate();
loadHotModifiedProps(commonProperties);
} catch (Exception e) {
- LOGGER.warn("Fail to reload config file {}", url, e);
+ LOGGER.warn(DataNodeMiscMessages.FAIL_RELOAD_CONFIG_FILE, url, e);
throw new QueryProcessException(
String.format("Fail to reload config file %s because %s", url, e.getMessage()));
} finally {
@@ -2378,7 +2380,7 @@ public synchronized void loadHotModifiedProps() throws QueryProcessException {
public void reloadMetricProperties(TrimProperties properties) {
ReloadLevel reloadLevel = MetricConfigDescriptor.getInstance().loadHotProps(properties, false);
- LOGGER.info("Reload metric service in level {}", reloadLevel);
+ LOGGER.info(DataNodeMiscMessages.RELOAD_METRIC_SERVICE, reloadLevel);
if (reloadLevel == ReloadLevel.RESTART_INTERNAL_REPORTER) {
IoTDBInternalReporter internalReporter;
if (MetricConfigDescriptor.getInstance().getMetricConfig().getInternalReportType()
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBStartCheck.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBStartCheck.java
index d85444a56d454..89acffa553863 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBStartCheck.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBStartCheck.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.file.SystemFileFactory;
import org.apache.iotdb.commons.file.SystemPropertiesHandler;
import org.apache.iotdb.consensus.ConsensusFactory;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALMode;
import org.apache.iotdb.db.storageengine.rescon.disk.DirectoryChecker;
@@ -136,16 +137,16 @@ private String getVal(String paramName) {
}
private IoTDBStartCheck() {
- logger.info("Starting IoTDB {}", IoTDBConstant.VERSION_WITH_BUILD);
+ logger.info(DataNodeMiscMessages.STARTING_IOTDB, IoTDBConstant.VERSION_WITH_BUILD);
// check whether SCHEMA_DIR exists, create if not exists
File dir = SystemFileFactory.INSTANCE.getFile(SCHEMA_DIR);
if (!dir.exists()) {
if (!dir.mkdirs()) {
- logger.error("Can not create schema dir: {}", SCHEMA_DIR);
+ logger.error(DataNodeMiscMessages.CANNOT_CREATE_SCHEMA_DIR, SCHEMA_DIR);
System.exit(-1);
} else {
- logger.info(" {} dir has been created.", SCHEMA_DIR);
+ logger.info(DataNodeMiscMessages.SCHEMA_DIR_CREATED, SCHEMA_DIR);
}
}
@@ -246,7 +247,7 @@ public void checkSystemConfig() throws ConfigurationException, IOException {
}
String versionString = properties.getProperty(IOTDB_VERSION_STRING);
if (versionString.startsWith("0.")) {
- logger.error("IoTDB version is too old");
+ logger.error(DataNodeMiscMessages.IOTDB_VERSION_TOO_OLD);
System.exit(-1);
}
checkImmutableSystemProperties();
@@ -271,7 +272,7 @@ private void checkImmutableSystemProperties() throws IOException {
for (Entry> entry : systemProperties.entrySet()) {
if (!properties.containsKey(entry.getKey())) {
upgradePropertiesFileFromBrokenFile();
- logger.info("repair system.properties, lack {}", entry.getKey());
+ logger.info(DataNodeMiscMessages.REPAIR_SYSTEM_PROPERTIES, entry.getKey());
}
}
@@ -374,7 +375,7 @@ public boolean checkConsensusProtocolExists(TConsensusGroupType type) {
return properties.containsKey(SCHEMA_REGION_CONSENSUS_PROTOCOL);
}
- logger.error("Unexpected consensus group type");
+ logger.error(DataNodeMiscMessages.UNEXPECTED_CONSENSUS_GROUP_TYPE);
return false;
}
@@ -428,7 +429,7 @@ public void checkEncryptMagicString() throws IOException, ConfigurationException
EncryptUtils.getEncrypt().getDecryptor().decrypt(magicStringBytes),
TSFileConfig.STRING_CHARSET);
if (!Objects.equals(decryptedMagicString, magicString)) {
- logger.error("encrypt_magic_string is not matched");
+ logger.error(DataNodeMiscMessages.ENCRYPT_MAGIC_STRING_NOT_MATCHED);
throw new ConfigurationException(
"Changing encrypt type or key for tsfile encryption after first start is not permitted. Here is your token hint info: "
+ CommonDescriptor.getInstance().getConfig().getUserEncryptTokenHint());
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceDescriptor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceDescriptor.java
index c4f9d131de958..2e4ed0c28a6d0 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceDescriptor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/rest/IoTDBRestServiceDescriptor.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.conf.IoTDBConstant;
import org.apache.iotdb.commons.conf.TrimProperties;
import org.apache.iotdb.db.conf.IoTDBConfig;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -59,20 +60,20 @@ public static IoTDBRestServiceDescriptor getInstance() {
private TrimProperties loadProps(String configName) {
URL url = getPropsUrl(configName);
if (url == null) {
- logger.warn("Couldn't load the REST Service configuration from any of the known sources.");
+ logger.warn(DataNodeMiscMessages.REST_COULD_NOT_LOAD_CONFIG);
return null;
}
try (InputStream inputStream = url.openStream()) {
- logger.info("Start to read config file {}", url);
+ logger.info(DataNodeMiscMessages.START_READ_CONFIG_FILE, url);
TrimProperties trimProperties = new TrimProperties();
trimProperties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
return trimProperties;
} catch (FileNotFoundException e) {
- logger.warn("REST service fail to find config file {}", url, e);
+ logger.warn(DataNodeMiscMessages.REST_FAIL_FIND_CONFIG, url, e);
} catch (IOException e) {
- logger.warn("REST service cannot load config file, use default configuration", e);
+ logger.warn(DataNodeMiscMessages.REST_CANNOT_LOAD_CONFIG, e);
} catch (Exception e) {
- logger.warn("REST service Incorrect format in config file, use default configuration", e);
+ logger.warn(DataNodeMiscMessages.REST_INCORRECT_FORMAT, e);
}
return null;
}
@@ -154,7 +155,7 @@ else if (!urlString.endsWith(".properties")) {
try {
return new URL(urlString);
} catch (MalformedURLException e) {
- logger.warn("get url failed", e);
+ logger.warn(DataNodeMiscMessages.GET_URL_FAILED, e);
return null;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/BaseStateMachine.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/BaseStateMachine.java
index 73f99f8b269cb..bed0d16a049cb 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/BaseStateMachine.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/BaseStateMachine.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.request.IConsensusRequest;
import org.apache.iotdb.consensus.IStateMachine;
import org.apache.iotdb.consensus.common.request.ByteBufferConsensusRequest;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.queryengine.plan.planner.plan.FragmentInstance;
import org.slf4j.Logger;
@@ -40,8 +41,9 @@ protected FragmentInstance getFragmentInstance(IConsensusRequest request) {
} else if (request instanceof FragmentInstance) {
instance = (FragmentInstance) request;
} else {
- logger.error("Unexpected IConsensusRequest : {}", request);
- throw new IllegalArgumentException("Unexpected IConsensusRequest!");
+ logger.error(DataNodeMiscMessages.UNEXPECTED_CONSENSUS_REQUEST, request);
+ throw new IllegalArgumentException(
+ DataNodeMiscMessages.UNEXPECTED_CONSENSUS_REQUEST_EXCEPTION);
}
return instance;
}
@@ -58,8 +60,9 @@ protected PlanNode getPlanNode(IConsensusRequest request) {
} else if (request instanceof PlanNode) {
node = (PlanNode) request;
} else {
- logger.error("Unexpected IConsensusRequest : {}", request);
- throw new IllegalArgumentException("Unexpected IConsensusRequest!");
+ logger.error(DataNodeMiscMessages.UNEXPECTED_CONSENSUS_REQUEST, request);
+ throw new IllegalArgumentException(
+ DataNodeMiscMessages.UNEXPECTED_CONSENSUS_REQUEST_EXCEPTION);
}
return node;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java
index 5de040eeba28a..88d23360b54f4 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataExecutionVisitor.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.db.exception.WriteProcessRejectException;
import org.apache.iotdb.db.exception.query.OutOfTTLException;
import org.apache.iotdb.db.exception.runtime.TableLostRuntimeException;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanVisitor;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedDeleteDataNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeEnrichedInsertNode;
@@ -78,13 +79,13 @@ public TSStatus visitInsertRow(InsertRowNode node, DataRegion dataRegion) {
dataRegion.insertSeparatorToWAL();
return StatusUtils.OK;
} catch (OutOfTTLException e) {
- LOGGER.warn("Error in executing plan node: {}, caused by {}", node, e.getMessage());
+ LOGGER.warn(DataNodeMiscMessages.ERROR_EXECUTING_PLAN_NODE_CAUSED, node, e.getMessage());
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
} catch (WriteProcessRejectException e) {
- LOGGER.warn("Reject in executing plan node: {}, caused by {}", node, e.getMessage());
+ LOGGER.warn(DataNodeMiscMessages.REJECT_EXECUTING_PLAN_NODE, node, e.getMessage());
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
} catch (WriteProcessException e) {
- LOGGER.error("Error in executing plan node: {}", node, e);
+ LOGGER.error(DataNodeMiscMessages.ERROR_EXECUTING_PLAN_NODE, node, e);
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
}
}
@@ -102,13 +103,13 @@ public TSStatus visitInsertTablet(final InsertTabletNode node, final DataRegion
dataRegion.insertSeparatorToWAL();
return StatusUtils.OK;
} catch (final OutOfTTLException e) {
- LOGGER.debug("Error in executing plan node: {}, caused by {}", node, e.getMessage());
+ LOGGER.debug(DataNodeMiscMessages.ERROR_EXECUTING_PLAN_NODE_CAUSED, node, e.getMessage());
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
} catch (final WriteProcessRejectException e) {
- LOGGER.warn("Reject in executing plan node: {}, caused by {}", node, e.getMessage());
+ LOGGER.warn(DataNodeMiscMessages.REJECT_EXECUTING_PLAN_NODE, node, e.getMessage());
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
} catch (final WriteProcessException e) {
- LOGGER.error("Error in executing plan node: {}", node, e);
+ LOGGER.error(DataNodeMiscMessages.ERROR_EXECUTING_PLAN_NODE, node, e);
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
} catch (final BatchProcessException e) {
LOGGER.warn(
@@ -139,10 +140,10 @@ public TSStatus visitInsertRows(InsertRowsNode node, DataRegion dataRegion) {
dataRegion.insertSeparatorToWAL();
return StatusUtils.OK;
} catch (WriteProcessRejectException e) {
- LOGGER.warn("Reject in executing plan node: {}, caused by {}", node, e.getMessage());
+ LOGGER.warn(DataNodeMiscMessages.REJECT_EXECUTING_PLAN_NODE, node, e.getMessage());
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
} catch (BatchProcessException e) {
- LOGGER.warn("Batch failure in executing a InsertRowsNode.");
+ LOGGER.warn(DataNodeMiscMessages.BATCH_FAILURE_INSERT_ROWS);
TSStatus firstStatus = null;
// for each error
for (Map.Entry failedEntry : node.getResults().entrySet()) {
@@ -164,7 +165,7 @@ public TSStatus visitInsertRows(InsertRowsNode node, DataRegion dataRegion) {
}
return firstStatus;
} catch (SemanticException | TableLostRuntimeException e) {
- LOGGER.error("Error in executing plan node: {}, caused by {}", node, e.getMessage());
+ LOGGER.error(DataNodeMiscMessages.ERROR_EXECUTING_PLAN_NODE_CAUSED, node, e.getMessage());
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
}
}
@@ -176,10 +177,10 @@ public TSStatus visitInsertMultiTablets(InsertMultiTabletsNode node, DataRegion
dataRegion.insertSeparatorToWAL();
return StatusUtils.OK;
} catch (WriteProcessRejectException e) {
- LOGGER.warn("Reject in executing plan node: {}, caused by {}", node, e.getMessage());
+ LOGGER.warn(DataNodeMiscMessages.REJECT_EXECUTING_PLAN_NODE, node, e.getMessage());
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
} catch (BatchProcessException e) {
- LOGGER.warn("Batch failure in executing a InsertMultiTabletsNode.");
+ LOGGER.warn(DataNodeMiscMessages.BATCH_FAILURE_INSERT_MULTI_TABLETS);
TSStatus firstStatus = null;
for (Map.Entry failedEntry : node.getResults().entrySet()) {
InsertTabletNode insertTabletNode =
@@ -211,13 +212,13 @@ public TSStatus visitInsertRowsOfOneDevice(
dataRegion.insertSeparatorToWAL();
return StatusUtils.OK;
} catch (WriteProcessRejectException e) {
- LOGGER.warn("Reject in executing plan node: {}, caused by {}", node, e.getMessage());
+ LOGGER.warn(DataNodeMiscMessages.REJECT_EXECUTING_PLAN_NODE, node, e.getMessage());
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
} catch (WriteProcessException e) {
- LOGGER.error("Error in executing plan node: {}", node, e);
+ LOGGER.error(DataNodeMiscMessages.ERROR_EXECUTING_PLAN_NODE, node, e);
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
} catch (BatchProcessException e) {
- LOGGER.warn("Batch failure in executing a InsertRowsOfOneDeviceNode.");
+ LOGGER.warn(DataNodeMiscMessages.BATCH_FAILURE_INSERT_ROWS_ONE_DEVICE);
TSStatus firstStatus = null;
for (Map.Entry failedEntry : node.getResults().entrySet()) {
InsertRowNode insertRowNode = node.getInsertRowNodeList().get(failedEntry.getKey());
@@ -267,7 +268,7 @@ public TSStatus visitDeleteData(DeleteDataNode node, DataRegion dataRegion) {
dataRegion.insertSeparatorToWAL();
return StatusUtils.OK;
} catch (IOException | IllegalPathException e) {
- LOGGER.error("Error in executing plan node: {}", node, e);
+ LOGGER.error(DataNodeMiscMessages.ERROR_EXECUTING_PLAN_NODE, node, e);
return new TSStatus(TSStatusCode.WRITE_PROCESS_ERROR.getStatusCode());
} finally {
dataRegion.writeUnlock();
@@ -282,7 +283,7 @@ public TSStatus visitDeleteData(
dataRegion.insertSeparatorToWAL();
return StatusUtils.OK;
} catch (final IOException e) {
- LOGGER.error("Error in executing plan node: {}", node, e);
+ LOGGER.error(DataNodeMiscMessages.ERROR_EXECUTING_PLAN_NODE, node, e);
return new TSStatus(TSStatusCode.WRITE_PROCESS_ERROR.getStatusCode());
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataRegionStateMachine.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataRegionStateMachine.java
index 5225f33971874..d6bb21e77ba37 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataRegionStateMachine.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/DataRegionStateMachine.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.consensus.iot.log.GetConsensusReqReaderPlan;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.consensus.statemachine.BaseStateMachine;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.queryengine.execution.fragment.FragmentInstanceManager;
import org.apache.iotdb.db.queryengine.plan.planner.plan.FragmentInstance;
@@ -130,7 +131,7 @@ public void loadSnapshot(File latestSnapshotRootDir) {
region.getDataRegionIdString())
.loadSnapshotForStateMachine();
if (newRegion == null) {
- logger.error("Fail to load snapshot from {}", latestSnapshotRootDir);
+ logger.error(DataNodeMiscMessages.FAIL_LOAD_SNAPSHOT, latestSnapshotRootDir);
return;
}
this.region = newRegion;
@@ -142,7 +143,7 @@ public void loadSnapshot(File latestSnapshotRootDir) {
TimeSeriesMetadataCache.getInstance().clear();
BloomFilterCache.getInstance().clear();
} catch (Exception e) {
- logger.error("Exception occurs when replacing data region in storage engine.", e);
+ logger.error(DataNodeMiscMessages.EXCEPTION_REPLACING_DATA_REGION, e);
}
}
@@ -156,7 +157,7 @@ protected PlanNode grabPlanNode(IndexedConsensusRequest indexedRequest) {
((SearchNode) planNode).setSearchIndex(indexedRequest.getSearchIndex());
searchNodes.add((SearchNode) planNode);
} else {
- logger.warn("Unexpected PlanNode type {}, which is not SearchNode", planNode.getClass());
+ logger.warn(DataNodeMiscMessages.UNEXPECTED_PLAN_NODE_TYPE, planNode.getClass());
if (onlyOne == null) {
onlyOne = planNode;
} else {
@@ -233,7 +234,7 @@ protected TSStatus write(PlanNode planNode) {
} else {
if (TSStatusCode.TABLE_NOT_EXISTS.getStatusCode() == result.getCode()
|| TSStatusCode.TABLE_IS_LOST.getStatusCode() == result.getCode()) {
- logger.info("table is not exists or lost, result code is {}", result.getCode());
+ logger.info(DataNodeMiscMessages.TABLE_NOT_EXISTS_OR_LOST, result.getCode());
}
break;
}
@@ -250,7 +251,7 @@ public DataSet read(IConsensusRequest request) {
try {
fragmentInstance = getFragmentInstance(request);
} catch (IllegalArgumentException e) {
- logger.error("Get fragment instance failed", e);
+ logger.error(DataNodeMiscMessages.GET_FRAGMENT_INSTANCE_FAILED, e);
return null;
}
return QUERY_INSTANCE_MANAGER.execDataQueryFragmentInstance(fragmentInstance, region);
@@ -280,7 +281,7 @@ public File getSnapshotRoot() {
+ region.getDataRegionIdString();
return new File(snapshotDir).getCanonicalFile();
} catch (IOException | NullPointerException e) {
- logger.warn("{}: cannot get the canonical file of {} due to {}", this, snapshotDir, e);
+ logger.warn(DataNodeMiscMessages.CANNOT_GET_CANONICAL_FILE, this, snapshotDir, e);
return null;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/IoTConsensusDataRegionStateMachine.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/IoTConsensusDataRegionStateMachine.java
index 562c9bbf999de..41d1ddf2de1de 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/IoTConsensusDataRegionStateMachine.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/dataregion/IoTConsensusDataRegionStateMachine.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.consensus.common.request.DeserializedBatchIndexedConsensusRequest;
import org.apache.iotdb.consensus.common.request.IndexedConsensusRequest;
import org.apache.iotdb.consensus.common.request.IoTConsensusRequest;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.storageengine.dataregion.DataRegion;
import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntry;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -113,8 +114,9 @@ protected PlanNode getPlanNode(IConsensusRequest request) {
} else if (request instanceof PlanNode) {
node = (PlanNode) request;
} else {
- LOGGER.error("Unexpected IConsensusRequest : {}", request);
- throw new IllegalArgumentException("Unexpected IConsensusRequest!");
+ LOGGER.error(DataNodeMiscMessages.UNEXPECTED_CONSENSUS_REQUEST, request);
+ throw new IllegalArgumentException(
+ DataNodeMiscMessages.UNEXPECTED_CONSENSUS_REQUEST_EXCEPTION);
}
return node;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java
index f3ddfbab7b292..9e512a9c64541 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/consensus/statemachine/schemaregion/SchemaExecutionVisitor.java
@@ -32,6 +32,7 @@
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.exception.metadata.MeasurementAlreadyExistException;
import org.apache.iotdb.db.exception.metadata.template.TemplateIsInUseException;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.source.schemaregion.SchemaRegionListeningQueue;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanVisitor;
@@ -396,7 +397,7 @@ private void executeInternalCreateAlignedTimeSeries(
}
} catch (final MetadataException e) {
- logger.warn("{}: MetaData error: ", e.getMessage(), e);
+ logger.warn(DataNodeMiscMessages.METADATA_ERROR, e.getMessage(), e);
failingStatus.add(RpcUtils.getStatus(e.getErrorCode(), e.getMessage()));
shouldRetry = false;
}
@@ -452,7 +453,7 @@ public TSStatus visitAlterTimeSeries(
logMetaDataException(String.format("%s: MetaData error: ", IoTDBConstant.GLOBAL_DB_NAME), e);
return RpcUtils.getStatus(e.getErrorCode(), e.getMessage());
} catch (IOException e) {
- logger.error("{}: IO error: ", IoTDBConstant.GLOBAL_DB_NAME, e);
+ logger.error(DataNodeMiscMessages.IO_ERROR, IoTDBConstant.GLOBAL_DB_NAME, e);
return RpcUtils.getStatus(TSStatusCode.INTERNAL_SERVER_ERROR, e.getMessage());
}
return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS, "Execute successfully");
@@ -648,7 +649,7 @@ public TSStatus visitCreateLogicalView(
SchemaRegionWritePlanFactory.getCreateLogicalViewPlan(
entry.getKey(), entry.getValue()));
} catch (final MetadataException e) {
- logger.error("{}: MetaData error: ", IoTDBConstant.GLOBAL_DB_NAME, e);
+ logger.error(DataNodeMiscMessages.METADATA_ERROR, IoTDBConstant.GLOBAL_DB_NAME, e);
failingStatus.add(RpcUtils.getStatus(e.getErrorCode(), e.getMessage()));
}
}
@@ -668,7 +669,7 @@ public TSStatus visitAlterLogicalView(
schemaRegion.alterLogicalView(
SchemaRegionWritePlanFactory.getAlterLogicalViewPlan(entry.getKey(), entry.getValue()));
} catch (final MetadataException e) {
- logger.warn("{}: MetaData error: ", IoTDBConstant.GLOBAL_DB_NAME, e);
+ logger.warn(DataNodeMiscMessages.METADATA_ERROR, IoTDBConstant.GLOBAL_DB_NAME, e);
failingStatus.add(RpcUtils.getStatus(e.getErrorCode(), e.getMessage()));
}
}
@@ -845,10 +846,10 @@ public TSStatus visitPipeOperateSchemaQueueNode(
final SchemaRegionId id = schemaRegion.getSchemaRegionId();
final SchemaRegionListeningQueue queue = PipeDataNodeAgent.runtime().schemaListener(id);
if (node.isOpen() && !queue.isOpened()) {
- logger.info("Opened pipe listening queue on schema region {}", id);
+ logger.info(DataNodeMiscMessages.OPENED_PIPE_LISTENING_QUEUE, id);
queue.open();
} else if (!node.isOpen() && queue.isOpened()) {
- logger.info("Closed pipe listening queue on schema region {}", id);
+ logger.info(DataNodeMiscMessages.CLOSED_PIPE_LISTENING_QUEUE, id);
queue.close();
}
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/partition/DataPartitionTableGenerator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/partition/DataPartitionTableGenerator.java
index 1b43cb2bc5446..bc4b47e2a14ca 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/partition/DataPartitionTableGenerator.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/partition/DataPartitionTableGenerator.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor;
import org.apache.iotdb.commons.utils.TimePartitionUtils;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodeMiscMessages;
import org.apache.iotdb.db.storageengine.StorageEngine;
import org.apache.iotdb.db.storageengine.dataregion.DataRegion;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileManager;
@@ -115,7 +116,7 @@ public enum TaskStatus {
/** Start generating DataPartitionTable asynchronously. */
public CompletableFuture startGeneration() {
if (status != TaskStatus.NOT_STARTED) {
- throw new IllegalStateException("Task is already started or completed");
+ throw new IllegalStateException(DataNodeMiscMessages.TASK_ALREADY_STARTED);
}
status = TaskStatus.IN_PROGRESS;
@@ -201,7 +202,7 @@ private void generateDataPartitionTableByMemory() {
processedTimePartitions.get(),
failedTimePartitions.get());
} catch (Exception e) {
- LOG.error("Failed to generate DataPartitionTable", e);
+ LOG.error(DataNodeMiscMessages.FAILED_GENERATE_DATA_PARTITION_TABLE, e);
status = TaskStatus.FAILED;
errorMessage = "Generation failed: " + e.getMessage();
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/PipeDataNodePluginAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/PipeDataNodePluginAgent.java
index 567893c2cbe09..70226b35dc708 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/PipeDataNodePluginAgent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/plugin/PipeDataNodePluginAgent.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.pipe.agent.plugin.service.PipePluginExecutableManager;
import org.apache.iotdb.commons.pipe.datastructure.visibility.Visibility;
import org.apache.iotdb.commons.pipe.datastructure.visibility.VisibilityUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.plugin.dataregion.PipeDataRegionPluginAgent;
import org.apache.iotdb.db.pipe.agent.plugin.schemaregion.PipeSchemaRegionPluginAgent;
import org.apache.iotdb.db.pipe.source.schemaregion.SchemaRegionListeningFilter;
@@ -253,10 +254,12 @@ public boolean checkIfPluginSameType(final String oldPluginName, final String ne
final PipePluginMeta newPipePluginMeta = pipePluginMetaKeeper.getPipePluginMeta(newPluginName);
if (oldPipePluginMeta == null) {
- throw new PipeException(String.format("plugin %s is not registered.", oldPluginName));
+ throw new PipeException(
+ String.format(DataNodePipeMessages.PLUGIN_NOT_REGISTERED_FMT, oldPluginName));
}
if (newPipePluginMeta == null) {
- throw new PipeException(String.format("plugin %s is not registered.", newPluginName));
+ throw new PipeException(
+ String.format(DataNodePipeMessages.PLUGIN_NOT_REGISTERED_FMT, newPluginName));
}
return Objects.equals(oldPipePluginMeta.getClassName(), (newPipePluginMeta.getClassName()));
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java
index 286c8a5eaebfe..4f573fc60ac50 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.confignode.rpc.thrift.TGetJarInListResp;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.protocol.client.ConfigNodeClient;
import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager;
@@ -93,7 +94,7 @@ public static synchronized void launchPipePluginAgent(
PipeDataNodeAgent.plugin().markPluginLoadFailure(meta, e);
// Ignore a single broken plugin and continue startup.
LOGGER.warn(
- "Failure when register pipe plugin {}. Skip this plugin and continue startup.",
+ DataNodePipeMessages.FAILURE_WHEN_REGISTER_PIPE_PLUGIN_SKIP_THIS,
meta.getPluginName(),
e);
}
@@ -145,7 +146,7 @@ private static void fetchAndSavePipePluginJars(List pipePluginMe
final TGetJarInListResp resp =
configNodeClient.getPipePluginJar(new TGetJarInListReq(jarNameList));
if (resp.getStatus().getCode() == TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()) {
- throw new StartupException("Failed to get pipe plugin jar from config node.");
+ throw new StartupException(DataNodePipeMessages.FAILED_TO_GET_PIPE_PLUGIN_JAR_FROM);
}
final List jarList = resp.getJarList();
for (int i = 0; i < pipePluginMetaList.size(); i++) {
@@ -165,7 +166,7 @@ public static synchronized void launchPipeTaskAgent() {
ConfigNodeClientManager.getInstance().borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) {
final TGetAllPipeInfoResp getAllPipeInfoResp = configNodeClient.getAllPipeInfo();
if (getAllPipeInfoResp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.warn("Failed to get pipe metas, will be synced by configNode later...");
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_GET_PIPE_METAS_WILL_BE);
}
PipeDataNodeAgent.task()
@@ -175,7 +176,8 @@ public static synchronized void launchPipeTaskAgent() {
byteBuffer -> {
final PipeMeta pipeMeta = PipeMeta.deserialize4TaskAgent(byteBuffer);
LOGGER.info(
- "Pulled pipe meta from config node: {}, recovering ...", pipeMeta);
+ DataNodePipeMessages.PULLED_PIPE_META_FROM_CONFIG_NODE_RECOVERING,
+ pipeMeta);
return pipeMeta;
})
.collect(Collectors.toList()));
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java
index 8f75b7b188541..5e75199e38899 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java
@@ -39,6 +39,7 @@
import org.apache.iotdb.commons.service.ServiceType;
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner;
import org.apache.iotdb.db.pipe.resource.log.PipePeriodicalLogReducer;
@@ -213,13 +214,13 @@ public void report(EnrichedEvent event, PipeRuntimeException pipeRuntimeExceptio
if (event.getPipeTaskMeta() != null) {
report(event.getPipeTaskMeta(), pipeRuntimeException);
} else {
- LOGGER.warn("Attempt to report pipe exception to a null PipeTaskMeta.", pipeRuntimeException);
+ LOGGER.warn(DataNodePipeMessages.ATTEMPT_TO_REPORT_PIPE_EXCEPTION_TO_A, pipeRuntimeException);
}
}
public void report(PipeTaskMeta pipeTaskMeta, PipeRuntimeException pipeRuntimeException) {
LOGGER.warn(
- "Report PipeRuntimeException to local PipeTaskMeta({}), exception message: {}",
+ DataNodePipeMessages.REPORT_PIPERUNTIMEEXCEPTION_TO_LOCAL_PIPETASKMETA_EXCEPTION_MESSAGE,
pipeTaskMeta,
pipeRuntimeException.getMessage(),
pipeRuntimeException);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/SimpleProgressIndexAssigner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/SimpleProgressIndexAssigner.java
index 9ebd7df0ed9dc..e8fd45bf06752 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/SimpleProgressIndexAssigner.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/SimpleProgressIndexAssigner.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.file.SystemFileFactory;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
import org.apache.tsfile.external.commons.io.FileUtils;
@@ -58,18 +59,22 @@ public class SimpleProgressIndexAssigner {
public void start() {
isSimpleConsensusEnable =
IOTDB_CONFIG.getDataRegionConsensusProtocolClass().equals(SIMPLE_CONSENSUS);
- LOGGER.info("Starting SimpleProgressIndexAssigner ...");
+ LOGGER.info(DataNodePipeMessages.STARTING_SIMPLEPROGRESSINDEXASSIGNER);
try {
makeDirIfNecessary();
parseRebootTimes();
recordRebootTimes();
LOGGER.info(
- "SimpleProgressIndexAssigner started successfully. isSimpleConsensusEnable: {}, rebootTimes: {}",
+ DataNodePipeMessages
+ .SIMPLEPROGRESSINDEXASSIGNER_STARTED_SUCCESSFULLY_ISSIMPLECONSENSUSENABLE_R,
isSimpleConsensusEnable,
rebootTimes);
} catch (Exception e) {
- LOGGER.error("Cannot start SimpleProgressIndexAssigner because of {}", e.getMessage(), e);
+ LOGGER.error(
+ DataNodePipeMessages.CANNOT_START_SIMPLEPROGRESSINDEXASSIGNER_BECAUSE_OF,
+ e.getMessage(),
+ e);
}
}
@@ -93,7 +98,7 @@ private void parseRebootTimes() {
} catch (final Exception e) {
rebootTimes = (int) (System.currentTimeMillis() / 1000);
LOGGER.error(
- "Cannot parse reboot times from file {}, set the current time in seconds ({}) as the reboot times",
+ DataNodePipeMessages.CANNOT_PARSE_REBOOT_TIMES_FROM_FILE_SET,
file.getAbsolutePath(),
rebootTimes);
}
@@ -107,7 +112,7 @@ private void recordRebootTimes() {
fos.getFD().sync();
} catch (final Exception e) {
LOGGER.error(
- "Cannot record reboot times {} to file {}, the reboot times will not be updated",
+ DataNodePipeMessages.CANNOT_RECORD_REBOOT_TIMES_TO_FILE_THE,
rebootTimes,
file.getAbsolutePath());
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTask.java
index 0d0b955c2109d..0d4e50cabef1f 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTask.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTask.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.pipe.agent.task.PipeTask;
import org.apache.iotdb.commons.pipe.agent.task.stage.PipeTaskStage;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -59,7 +60,7 @@ public void create() {
processorStage.create();
sinkStage.create();
LOGGER.info(
- "Create pipe DN task {} successfully within {} ms",
+ DataNodePipeMessages.CREATE_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS,
this,
System.currentTimeMillis() - startTime);
}
@@ -71,7 +72,7 @@ public void drop() {
processorStage.drop();
sinkStage.drop();
LOGGER.info(
- "Drop pipe DN task {} successfully within {} ms",
+ DataNodePipeMessages.DROP_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS,
this,
System.currentTimeMillis() - startTime);
}
@@ -83,7 +84,7 @@ public void start() {
processorStage.start();
sinkStage.start();
LOGGER.info(
- "Start pipe DN task {} successfully within {} ms",
+ DataNodePipeMessages.START_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS,
this,
System.currentTimeMillis() - startTime);
}
@@ -95,7 +96,7 @@ public void stop() {
processorStage.stop();
sinkStage.stop();
LOGGER.info(
- "Stop pipe DN task {} successfully within {} ms",
+ DataNodePipeMessages.STOP_PIPE_DN_TASK_SUCCESSFULLY_WITHIN_MS,
this,
System.currentTimeMillis() - startTime);
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java
index 06d6a512b30a4..7c829bfbf2b44 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java
@@ -49,6 +49,7 @@
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.consensus.SchemaRegionConsensusImpl;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.agent.task.builder.PipeDataNodeBuilder;
import org.apache.iotdb.db.pipe.agent.task.builder.PipeDataNodeTaskBuilder;
@@ -217,9 +218,7 @@ public List handlePipeMetaChangesInternal(
clearSchemaRegionListeningQueueIfNecessary(pipeMetaListFromCoordinator);
closeSchemaRegionListeningQueueIfNecessary(validSchemaRegionIds, exceptionMessages);
} catch (final Exception e) {
- LOGGER.warn(
- "Failed to clear/close the schema region listening queue, because {}. Will wait until success or the region's state machine is stopped.",
- e.getMessage());
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLEAR_CLOSE_THE_SCHEMA_REGION, e.getMessage());
// Do not use null pipe name to retain the field "required" to be compatible with the lower
// versions
exceptionMessages.add(
@@ -295,7 +294,7 @@ private void closeSchemaRegionListeningQueueIfNecessary(
schemaRegionId, new PipeOperateSchemaQueueNode(new PlanNodeId(""), false));
} catch (final ConsensusException e) {
throw new PipeException(
- "Failed to close listening queue for SchemaRegion "
+ DataNodePipeMessages.FAILED_TO_CLOSE_LISTENING_QUEUE_FOR_SCHEMAREGION
+ schemaRegionId
+ ", because "
+ e.getMessage(),
@@ -482,7 +481,8 @@ protected void collectPipeMetaListInternal(
PipeConfig.getInstance().getPipeMetaReportMaxLogNumPerRound(),
PipeConfig.getInstance().getPipeMetaReportMaxLogIntervalRounds(),
pipeMetaKeeper.getPipeMetaCount());
- LOGGER.debug("Received pipe heartbeat request {} from config node.", req.heartbeatId);
+ LOGGER.debug(
+ DataNodePipeMessages.RECEIVED_PIPE_HEARTBEAT_REQUEST_FROM_CONFIG_NODE, req.heartbeatId);
final Set dataRegionIds =
StorageEngine.getInstance().getAllDataRegionIds().stream()
@@ -571,9 +571,7 @@ public Set getPipeTaskRegionIdSet(final String pipeName, final long cre
public boolean hasPipeReleaseRegionRelatedResource(final int consensusGroupId) {
if (!tryReadLockWithTimeOut(10)) {
- LOGGER.warn(
- "Failed to check if pipe has release region related resource with consensus group id: {}.",
- consensusGroupId);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CHECK_IF_PIPE_HAS_RELEASE, consensusGroupId);
return false;
}
@@ -616,7 +614,7 @@ public void runPipeTasks(
try {
future.get();
} catch (final ExecutionException | InterruptedException e) {
- LOGGER.warn("Exception occurs when executing pipe task: ", e);
+ LOGGER.warn(DataNodePipeMessages.EXCEPTION_OCCURS_WHEN_EXECUTING_PIPE_TASK, e);
throw new PipeException(e.toString());
}
}
@@ -637,9 +635,9 @@ public void persistAllProgressIndex() {
configNodeClient.pushHeartbeat(
IoTDBDescriptor.getInstance().getConfig().getDataNodeId(), resp);
if (TSStatusCode.SUCCESS_STATUS.getStatusCode() != result.getCode()) {
- LOGGER.warn("Failed to persist progress index to configNode, status: {}", result);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_PERSIST_PROGRESS_INDEX_TO_CONFIGNODE, result);
} else {
- LOGGER.info("Successfully persisted all pipe's info to configNode.");
+ LOGGER.info(DataNodePipeMessages.SUCCESSFULLY_PERSISTED_ALL_PIPE_S_INFO_TO);
}
} catch (final Exception e) {
LOGGER.warn(e.getMessage());
@@ -658,7 +656,7 @@ public ProgressIndex getPipeTaskProgressIndex(final String pipeName, final int c
try {
if (!pipeMetaKeeper.containsPipeMeta(pipeName)) {
- throw new PipeException("Pipe meta not found: " + pipeName);
+ throw new PipeException(DataNodePipeMessages.PIPE_META_NOT_FOUND + pipeName);
}
return pipeMetaKeeper
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java
index d66a4f14a7961..8747c12d076b6 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java
@@ -29,6 +29,7 @@
import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant;
import org.apache.iotdb.commons.pipe.config.constant.PipeSourceConstant;
import org.apache.iotdb.commons.pipe.config.constant.SystemConstant;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.task.PipeDataNodeTask;
import org.apache.iotdb.db.pipe.agent.task.execution.PipeProcessorSubtaskExecutor;
import org.apache.iotdb.db.pipe.agent.task.execution.PipeSubtaskExecutorManager;
@@ -169,7 +170,7 @@ private void checkConflict(
PipeTaskAgent.isSnapshotMode(sourceParameters);
} catch (final IllegalPathException e) {
LOGGER.warn(
- "PipeDataNodeTaskBuilder failed to parse 'inclusion' and 'exclusion' parameters: {}",
+ DataNodePipeMessages.PIPEDATANODETASKBUILDER_FAILED_TO_PARSE_INCLUSION_AND_EXCLUSION,
e.getMessage(),
e);
return;
@@ -185,18 +186,22 @@ private void checkConflict(
sinkParameters.addAttribute(PipeSinkConstant.CONNECTOR_REALTIME_FIRST_KEY, "false");
if (insertionDeletionListeningOptionPair.right) {
LOGGER.info(
- "PipeDataNodeTaskBuilder: When 'inclusion' contains 'data.delete', 'realtime-first' is defaulted to 'false' to prevent sync issues after deletion.");
+ DataNodePipeMessages
+ .PIPEDATANODETASKBUILDER_WHEN_INCLUSION_CONTAINS_DATA_DELETE_REALTIME);
} else {
LOGGER.info(
- "PipeDataNodeTaskBuilder: When source uses snapshot model, 'realtime-first' is defaulted to 'false' to prevent premature halt before transfer completion.");
+ DataNodePipeMessages
+ .PIPEDATANODETASKBUILDER_WHEN_SOURCE_USES_SNAPSHOT_MODEL_REALTIME);
}
} else if (isRealtime) {
if (insertionDeletionListeningOptionPair.right) {
LOGGER.warn(
- "PipeDataNodeTaskBuilder: When 'inclusion' includes 'data.delete', 'realtime-first' set to 'true' may result in data synchronization issues after deletion.");
+ DataNodePipeMessages
+ .PIPEDATANODETASKBUILDER_WHEN_INCLUSION_INCLUDES_DATA_DELETE_REALTIME);
} else {
LOGGER.warn(
- "PipeDataNodeTaskBuilder: When source uses snapshot model, 'realtime-first' set to 'true' may cause prevent premature halt before transfer completion.");
+ DataNodePipeMessages
+ .PIPEDATANODETASKBUILDER_WHEN_SOURCE_USES_SNAPSHOT_MODEL_REALTIME_1);
}
}
}
@@ -216,10 +221,9 @@ private void checkConflict(
sinkParameters.addAttribute(
PipeSinkConstant.SINK_ENABLE_SEND_TSFILE_LIMIT, Boolean.TRUE.toString());
LOGGER.info(
- "PipeDataNodeTaskBuilder: When the realtime sync is enabled, we enable rate limiter in sending tsfile by default to reserve disk and network IO for realtime sending.");
+ DataNodePipeMessages.PIPEDATANODETASKBUILDER_WHEN_THE_REALTIME_SYNC_IS_ENABLED_1);
} else if (!enableSendTsFileLimit) {
- LOGGER.warn(
- "PipeDataNodeTaskBuilder: When the realtime sync is enabled, not enabling the rate limiter in sending tsfile may introduce delay for realtime sending.");
+ LOGGER.warn(DataNodePipeMessages.PIPEDATANODETASKBUILDER_WHEN_THE_REALTIME_SYNC_IS_ENABLED);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/connection/PipeEventCollector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/connection/PipeEventCollector.java
index b000c5d2366fc..ad44b78042ac3 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/connection/PipeEventCollector.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/connection/PipeEventCollector.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBTreePatternOperations;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.event.ProgressReportEvent;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.event.common.deletion.PipeDeleteDataNodeEvent;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
@@ -97,7 +98,8 @@ public void collect(final Event event) {
} catch (final PipeException e) {
throw e;
} catch (final Exception e) {
- throw new PipeException("Error occurred when collecting events from processor.", e);
+ throw new PipeException(
+ DataNodePipeMessages.ERROR_OCCURRED_WHEN_COLLECTING_EVENTS_FROM_PROCESSOR, e);
}
}
@@ -129,7 +131,7 @@ private void parseAndCollectEvent(final PipeRawTabletInsertionEvent sourceEvent)
private void parseAndCollectEvent(final PipeTsFileInsertionEvent sourceEvent) throws Exception {
if (!sourceEvent.waitForTsFileClose()) {
LOGGER.warn(
- "Pipe skipping temporary TsFile which shouldn't be transferred: {}",
+ DataNodePipeMessages.PIPE_SKIPPING_TEMPORARY_TSFILE_WHICH_SHOULDN_T,
sourceEvent.getTsFile());
return;
}
@@ -222,7 +224,8 @@ private void collectEvent(final Event event) {
if (event instanceof EnrichedEvent) {
final EnrichedEvent enrichedEvent = (EnrichedEvent) event;
if (!enrichedEvent.increaseReferenceCount(PipeEventCollector.class.getName())) {
- LOGGER.warn("PipeEventCollector: The event {} is already released, skipping it.", event);
+ LOGGER.warn(
+ DataNodePipeMessages.PIPEEVENTCOLLECTOR_THE_EVENT_IS_ALREADY_RELEASED_SKIPPING, event);
isFailedToIncreaseReferenceCount = true;
return;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java
index 1163f8e4e8ed4..c40b37b31e556 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.resource.log.PipeLogger;
import org.apache.iotdb.commons.utils.ErrorHandlingCommonUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.agent.task.connection.PipeEventCollector;
import org.apache.iotdb.db.pipe.event.UserDefinedEnrichedEvent;
@@ -260,7 +261,7 @@ protected boolean executeOnce() throws Exception {
e);
} else {
LOGGER.info(
- "Exception in pipe event processing, ignored because pipe is dropped.{}",
+ DataNodePipeMessages.EXCEPTION_IN_PIPE_EVENT_PROCESSING_IGNORED_BECAUSE,
e.getMessage() != null ? " Message: " + e.getMessage() : "");
clearReferenceCountAndReleaseLastEvent(event);
}
@@ -291,7 +292,7 @@ public void close() {
// closed, the execution thread may still deliver events downstream.
} catch (final Exception e) {
LOGGER.info(
- "Exception occurred when closing pipe processor subtask {}, root cause: {}",
+ DataNodePipeMessages.EXCEPTION_OCCURRED_WHEN_CLOSING_PIPE_PROCESSOR_SUBTASK,
taskID,
ErrorHandlingCommonUtils.getRootCause(e).getMessage(),
e);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtaskWorker.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtaskWorker.java
index b9584d2c586b3..2bb4acc123245 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtaskWorker.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtaskWorker.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.agent.task.subtask.processor;
import org.apache.iotdb.commons.concurrent.WrappedRunnable;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -73,7 +74,7 @@ private boolean runSubtasks() {
subtask.onSuccess(hasAtLeastOneEventProcessed);
} catch (final Exception e) {
if (subtask.isClosed()) {
- LOGGER.warn("subtask {} is closed, ignore exception", subtask, e);
+ LOGGER.warn(DataNodePipeMessages.SUBTASK_IS_CLOSED_IGNORE_EXCEPTION, subtask, e);
} else {
subtask.onFailure(e);
}
@@ -88,7 +89,7 @@ private void sleepIfNecessary(final boolean canSleepBeforeNextRound) {
try {
Thread.sleep(sleepingTimeInMilliSecond);
} catch (final InterruptedException e) {
- LOGGER.warn("subtask worker is interrupted", e);
+ LOGGER.warn(DataNodePipeMessages.SUBTASK_WORKER_IS_INTERRUPTED, e);
Thread.currentThread().interrupt();
}
} else {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeRealtimePriorityBlockingQueue.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeRealtimePriorityBlockingQueue.java
index f972bba0e6ede..4b65746b3abed 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeRealtimePriorityBlockingQueue.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeRealtimePriorityBlockingQueue.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.pipe.agent.task.progress.CommitterKey;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.task.connection.PipeEventCollector;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.tsfile.PipeCompactedTsFileInsertionEvent;
@@ -219,7 +220,7 @@ public synchronized void replace(
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
if (eventsToBeRemovedGroupByCommitterKey.isEmpty()) {
LOGGER.info(
- "Region {}: No TsFileInsertionEvents to replace for source files {}",
+ DataNodePipeMessages.REGION_NO_TSFILEINSERTIONEVENTS_TO_REPLACE_FOR_SOURCE,
regionId,
sourceFiles.stream()
.map(TsFileResource::getTsFilePath)
@@ -277,10 +278,7 @@ public synchronized void replace(
try {
event.decreaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName(), false);
} catch (final Exception e) {
- LOGGER.warn(
- "Failed to decrease reference count for event {} in PipeRealtimePriorityBlockingQueue",
- event,
- e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_DECREASE_REFERENCE_COUNT_FOR_EVENT, event, e);
}
}
return; // Exit early if any event failed to increase the reference count
@@ -302,9 +300,7 @@ public synchronized void replace(
event.decreaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName(), false);
} catch (final Exception e) {
LOGGER.warn(
- "Failed to decrease reference count for event {} in PipeRealtimePriorityBlockingQueue",
- event,
- e);
+ DataNodePipeMessages.FAILED_TO_DECREASE_REFERENCE_COUNT_FOR_EVENT, event, e);
}
eventCounter.decreaseEventCount(event);
}
@@ -318,7 +314,7 @@ public synchronized void replace(
tsfileInsertEventDeque.removeIf(eventsToRemove::contains);
LOGGER.info(
- "Region {}: Replaced TsFileInsertionEvents {} with {}",
+ DataNodePipeMessages.REGION_REPLACED_TSFILEINSERTIONEVENTS_WITH,
regionId,
eventsToBeRemovedGroupByCommitterKey.values().stream()
.flatMap(Set::stream)
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtask.java
index a11a1a68f0ce4..ec6038a8748ee 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtask.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtask.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.commons.pipe.sink.protocol.PipeConnectorWithEventDiscard;
import org.apache.iotdb.commons.queryengine.plan.planner.plan.node.PlanNodeType;
import org.apache.iotdb.commons.utils.ErrorHandlingCommonUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.event.UserDefinedEnrichedEvent;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
@@ -150,7 +151,7 @@ private void transferHeartbeatEvent(final PipeHeartbeatEvent event) {
outputPipeSink.transfer(event);
} catch (final Exception e) {
throw new PipeConnectionException(
- "PipeConnector: "
+ DataNodePipeMessages.PIPECONNECTOR
+ outputPipeSink.getClass().getName()
+ "(id: "
+ taskID
@@ -177,13 +178,13 @@ public void close() {
final long startTime = System.currentTimeMillis();
outputPipeSink.close();
LOGGER.info(
- "Pipe: connector subtask {} ({}) was closed within {} ms",
+ DataNodePipeMessages.PIPE_CONNECTOR_SUBTASK_WAS_CLOSED_WITHIN_MS,
taskID,
outputPipeSink,
System.currentTimeMillis() - startTime);
} catch (final Exception e) {
LOGGER.info(
- "Exception occurred when closing pipe connector subtask {}, root cause: {}",
+ DataNodePipeMessages.EXCEPTION_OCCURRED_WHEN_CLOSING_PIPE_CONNECTOR_SUBTASK,
taskID,
ErrorHandlingCommonUtils.getRootCause(e).getMessage(),
e);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskLifeCycle.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskLifeCycle.java
index 1780f5a87efa8..85634277627c4 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskLifeCycle.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskLifeCycle.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.agent.task.subtask.sink;
import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.task.execution.PipeSinkSubtaskExecutor;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.pipe.api.event.Event;
@@ -60,7 +61,7 @@ public UnboundedBlockingPendingQueue getPendingQueue() {
public synchronized void register() {
if (registeredTaskCount < 0) {
- throw new IllegalStateException("registeredTaskCount < 0");
+ throw new IllegalStateException(DataNodePipeMessages.REGISTEREDTASKCOUNT_0);
}
if (registeredTaskCount == 0) {
@@ -72,7 +73,7 @@ public synchronized void register() {
registeredTaskCount++;
LOGGER.info(
- "Register subtask {}. runningTaskCount: {}, registeredTaskCount: {}",
+ DataNodePipeMessages.REGISTER_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT,
subtask,
runningTaskCount,
registeredTaskCount);
@@ -95,7 +96,7 @@ public synchronized void register() {
public synchronized boolean deregister(
final String pipeNameToDeregister, final long creationTimeToDeregister, final int regionId) {
if (registeredTaskCount <= 0) {
- throw new IllegalStateException("registeredTaskCount <= 0");
+ throw new IllegalStateException(DataNodePipeMessages.REGISTEREDTASKCOUNT_0_1);
}
subtask.discardEventsOfPipe(pipeNameToDeregister, creationTimeToDeregister, regionId);
@@ -111,7 +112,7 @@ public synchronized boolean deregister(
} finally {
registeredTaskCount--;
LOGGER.info(
- "Deregister subtask {}. runningTaskCount: {}, registeredTaskCount: {}",
+ DataNodePipeMessages.DEREGISTER_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT,
subtask,
runningTaskCount,
registeredTaskCount);
@@ -120,7 +121,7 @@ public synchronized boolean deregister(
public synchronized void start() {
if (runningTaskCount < 0) {
- throw new IllegalStateException("runningTaskCount < 0");
+ throw new IllegalStateException(DataNodePipeMessages.RUNNINGTASKCOUNT_0);
}
if (runningTaskCount == 0) {
@@ -134,7 +135,7 @@ public synchronized void start() {
runningTaskCount++;
LOGGER.info(
- "Start subtask {}. runningTaskCount: {}, registeredTaskCount: {}",
+ DataNodePipeMessages.START_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT,
subtask,
runningTaskCount,
registeredTaskCount);
@@ -142,7 +143,7 @@ public synchronized void start() {
public synchronized void stop() {
if (runningTaskCount <= 0) {
- throw new IllegalStateException("runningTaskCount <= 0");
+ throw new IllegalStateException(DataNodePipeMessages.RUNNINGTASKCOUNT_0_1);
}
if (runningTaskCount == 1) {
@@ -151,7 +152,7 @@ public synchronized void stop() {
runningTaskCount--;
LOGGER.info(
- "Stop subtask {}. runningTaskCount: {}, registeredTaskCount: {}",
+ DataNodePipeMessages.STOP_SUBTASK_RUNNINGTASKCOUNT_REGISTEREDTASKCOUNT,
subtask,
runningTaskCount,
registeredTaskCount);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskManager.java
index 4faa42db004a1..367b92104062d 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/sink/PipeSinkSubtaskManager.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.commons.pipe.config.constant.SystemConstant;
import org.apache.iotdb.commons.pipe.config.plugin.configuraion.PipeTaskRuntimeConfiguration;
import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskSinkRuntimeEnvironment;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.agent.task.execution.PipeSinkSubtaskExecutor;
import org.apache.iotdb.db.pipe.consensus.ReplicateProgressDataNodeManager;
@@ -159,7 +160,8 @@ public synchronized String register(
closeException);
}
throw new PipeException(
- "Failed to construct PipeConnector, because of " + e.getMessage(), e);
+ DataNodePipeMessages.FAILED_TO_CONSTRUCT_PIPECONNECTOR_BECAUSE_OF + e.getMessage(),
+ e);
}
// 2. Construct PipeConnectorSubtaskLifeCycle to manage PipeConnectorSubtask's life cycle
@@ -178,7 +180,7 @@ public synchronized String register(
}
LOGGER.info(
- "Pipe sink subtasks with attributes {} is bounded with sinkExecutor {} and callbackExecutor {}.",
+ DataNodePipeMessages.PIPE_SINK_SUBTASKS_WITH_ATTRIBUTES_IS_BOUNDED,
attributeSortedString,
executor.getWorkingThreadName(),
executor.getCallbackThreadName());
@@ -215,7 +217,7 @@ public synchronized void deregister(
attributeSortedString2SubtaskLifeCycleMap.remove(attributeSortedString);
executor.shutdown();
LOGGER.info(
- "The executor {} and {} has been successfully shutdown.",
+ DataNodePipeMessages.THE_EXECUTOR_AND_HAS_BEEN_SUCCESSFULLY_SHUTDOWN,
executor.getWorkingThreadName(),
executor.getCallbackThreadName());
}
@@ -252,7 +254,7 @@ public UnboundedBlockingPendingQueue getPipeSinkPendingQueue(
final String attributeSortedString) {
if (!attributeSortedString2SubtaskLifeCycleMap.containsKey(attributeSortedString)) {
throw new PipeException(
- "Failed to get PendingQueue. No such subtask: " + attributeSortedString);
+ DataNodePipeMessages.FAILED_TO_GET_PENDINGQUEUE_NO_SUCH_SUBTASK + attributeSortedString);
}
// All subtasks share the same pending queue
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/DeletionResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/DeletionResource.java
index 59faeebb81ad0..051f224890cc5 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/DeletionResource.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/DeletionResource.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.consensus.DataRegionConsensusImpl;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.deletion.PipeDeleteDataNodeEvent;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.AbstractDeleteDataNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteNodeType;
@@ -76,7 +77,7 @@ public synchronized void decreaseReference() {
}
public void removeSelf() {
- LOGGER.info("DeletionResource {} has been released, trigger a remove of DAL...", this);
+ LOGGER.info(DataNodePipeMessages.DELETIONRESOURCE_HAS_BEEN_RELEASED_TRIGGER_A_REMOVE, this);
removeHook.accept(this);
}
@@ -103,7 +104,7 @@ public synchronized Status waitForResult() {
try {
this.wait();
} catch (InterruptedException e) {
- LOGGER.warn("Interrupted when waiting for result.", e);
+ LOGGER.warn(DataNodePipeMessages.INTERRUPTED_WHEN_WAITING_FOR_RESULT, e);
Thread.currentThread().interrupt();
currentStatus = Status.FAILURE;
break;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/DeletionResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/DeletionResourceManager.java
index 5ba810608ebe1..d2292dae0ad27 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/DeletionResourceManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/DeletionResourceManager.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.consensus.pipe.IoTConsensusV2;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.consensus.DataRegionConsensusImpl;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.consensus.ReplicateProgressDataNodeManager;
import org.apache.iotdb.db.pipe.consensus.deletion.persist.DeletionBuffer;
import org.apache.iotdb.db.pipe.consensus.deletion.persist.PageCacheDeletionBuffer;
@@ -89,7 +90,8 @@ private void initAndRecover() throws IOException {
if (!storageDir.exists()) {
// Init
if (!storageDir.mkdirs()) {
- LOGGER.warn("Unable to create iotConsensusV2 deletion dir at {}", storageDir);
+ LOGGER.warn(
+ DataNodePipeMessages.UNABLE_TO_CREATE_IOTCONSENSUSV2_DELETION_DIR_AT, storageDir);
throw new IOException(
String.format("Unable to create iotConsensusV2 deletion dir at %s", storageDir));
}
@@ -112,7 +114,7 @@ private void initAndRecover() throws IOException {
deletion.getDeleteDataNode(), key -> deletion));
} catch (IOException e) {
LOGGER.warn(
- "Detect file corrupted when recover DAL-{}, discard all subsequent DALs...",
+ DataNodePipeMessages.DETECT_FILE_CORRUPTED_WHEN_RECOVER_DAL_DISCARD,
path.getFileName());
break;
}
@@ -127,10 +129,11 @@ private void initAndRecover() throws IOException {
@Override
public void close() {
- LOGGER.info("Closing deletion resource manager for {}...", dataRegionId);
+ LOGGER.info(DataNodePipeMessages.CLOSING_DELETION_RESOURCE_MANAGER_FOR, dataRegionId);
this.deleteNode2ResourcesMap.clear();
this.deletionBuffer.close();
- LOGGER.info("Deletion resource manager for {} has been successfully closed!", dataRegionId);
+ LOGGER.info(
+ DataNodePipeMessages.DELETION_RESOURCE_MANAGER_FOR_HAS_BEEN_SUCCESSFULLY, dataRegionId);
}
public DeletionResource registerDeletionResource(AbstractDeleteDataNode deleteDataNode) {
@@ -158,7 +161,7 @@ public List getAllDeletionResources() {
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOGGER.warn(
- "DeletionManager-{}: current waiting is interrupted. May because current application is down. ",
+ DataNodePipeMessages.DELETIONMANAGER_CURRENT_WAITING_IS_INTERRUPTED_MAY_BECAUSE,
dataRegionId,
e);
return deleteNode2ResourcesMap.values().stream().collect(ImmutableList.toImmutableList());
@@ -171,12 +174,12 @@ public void removeDAL() {
if (storageDir.exists()) {
FileUtils.deleteFileOrDirectory(storageDir);
LOGGER.info(
- "DeletionManager-{}: current DAL dir {} is deleted successfully",
+ DataNodePipeMessages.DELETIONMANAGER_CURRENT_DAL_DIR_IS_DELETED_SUCCESSFULLY,
dataRegionId,
storageDir);
} else {
LOGGER.info(
- "DeletionManager-{}: current DAL dir {} is not initialized, no need to delete.",
+ DataNodePipeMessages.DELETIONMANAGER_CURRENT_DAL_DIR_IS_NOT_INITIALIZED,
dataRegionId,
storageDir);
}
@@ -214,11 +217,13 @@ private synchronized void removeDeletionResource(DeletionResource deletionResour
File fileToDelete = deletionPaths[i].toFile();
FileUtils.deleteFileOrDirectory(fileToDelete);
LOGGER.info(
- "DeletionManager-{} delete deletion file in {} dir...", dataRegionId, fileToDelete);
+ DataNodePipeMessages.DELETIONMANAGER_DELETE_DELETION_FILE_IN_DIR,
+ dataRegionId,
+ fileToDelete);
}
} catch (IOException e) {
LOGGER.warn(
- "DeletionManager-{} failed to delete file in {} dir, please manually check!",
+ DataNodePipeMessages.DELETIONMANAGER_FAILED_TO_DELETE_FILE_IN_DIR,
dataRegionId,
storageDir);
}
@@ -291,7 +296,7 @@ public static DeletionResourceManager getInstance(int groupId) {
try {
return new DeletionResourceManager(groupId);
} catch (IOException e) {
- LOGGER.error("Failed to initialize DeletionResourceManager", e);
+ LOGGER.error(DataNodePipeMessages.FAILED_TO_INITIALIZE_DELETIONRESOURCEMANAGER, e);
throw new RuntimeException(e);
}
});
@@ -340,7 +345,7 @@ public void recoverForTest() {
}
}
} catch (IOException e) {
- LOGGER.error("Failed to recover DeletionResourceManager", e);
+ LOGGER.error(DataNodePipeMessages.FAILED_TO_RECOVER_DELETIONRESOURCEMANAGER, e);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/persist/PageCacheDeletionBuffer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/persist/PageCacheDeletionBuffer.java
index bae0b8fb98862..8b7946998ac1a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/persist/PageCacheDeletionBuffer.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/persist/PageCacheDeletionBuffer.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.consensus.ReplicateProgressDataNodeManager;
import org.apache.iotdb.db.pipe.consensus.deletion.DeletionResource;
import org.apache.iotdb.db.pipe.consensus.deletion.DeletionResourceManager;
@@ -129,12 +130,12 @@ public void start() {
DeletionResourceManager.MAGIC_VERSION_V1.getBytes(StandardCharsets.UTF_8)));
}
LOGGER.info(
- "Deletion persist-{}: starting to persist, current writing: {}", dataRegionId, logFile);
+ DataNodePipeMessages.DELETION_PERSIST_STARTING_TO_PERSIST_CURRENT_WRITING,
+ dataRegionId,
+ logFile);
} catch (IOException e) {
LOGGER.warn(
- "Deletion persist: Cannot create file {}, please check your file system manually.",
- logFile,
- e);
+ DataNodePipeMessages.DELETION_PERSIST_CANNOT_CREATE_FILE_PLEASE_CHECK, logFile, e);
throw new RuntimeException(e);
}
}
@@ -155,9 +156,7 @@ private void allocateBuffers() {
serializeBuffer = ByteBuffer.allocateDirect(DAL_BUFFER_SIZE);
} catch (OutOfMemoryError e) {
LOGGER.error(
- "Fail to allocate deletionBuffer-group-{}'s buffer because out of memory.",
- dataRegionId,
- e);
+ DataNodePipeMessages.FAIL_TO_ALLOCATE_DELETIONBUFFER_GROUP_S_BUFFER, dataRegionId, e);
close();
throw e;
}
@@ -166,7 +165,7 @@ private void allocateBuffers() {
public void registerDeletionResource(DeletionResource deletionResource) {
if (isClosed) {
LOGGER.error(
- "Fail to register DeletionResource into deletionBuffer-{} because this buffer is closed.",
+ DataNodePipeMessages.FAIL_TO_REGISTER_DELETIONRESOURCE_INTO_DELETIONBUFFER_BECAUSE,
dataRegionId);
return;
}
@@ -179,13 +178,13 @@ private void appendCurrentBatch() throws IOException {
}
private void fsyncCurrentLoggingFile() throws IOException {
- LOGGER.info("Deletion persist-{}: current batch fsync due to timeout", dataRegionId);
+ LOGGER.info(DataNodePipeMessages.DELETION_PERSIST_CURRENT_BATCH_FSYNC_DUE_TO, dataRegionId);
this.logChannel.force(false);
pendingDeletionsInOneTask.forEach(DeletionResource::onPersistSucceed);
}
private void closeCurrentLoggingFile(boolean notifySuccess) throws IOException {
- LOGGER.info("Deletion persist-{}: current file has been closed", dataRegionId);
+ LOGGER.info(DataNodePipeMessages.DELETION_PERSIST_CURRENT_FILE_HAS_BEEN_CLOSED, dataRegionId);
// Close old resource to fsync.
if (this.logStream != null) {
this.logStream.close();
@@ -230,7 +229,8 @@ private void switchLoggingFile() throws IOException {
maxProgressIndexInCurrentFile);
// IoTConsensusV2 ensures that deleteDataNodes use recoverProgressIndex.
if (!(curProgressIndex instanceof SimpleProgressIndex)) {
- throw new IOException("Invalid deletion progress index: " + curProgressIndex);
+ throw new IOException(
+ DataNodePipeMessages.INVALID_DELETION_PROGRESS_INDEX + curProgressIndex);
}
SimpleProgressIndex progressIndex = (SimpleProgressIndex) curProgressIndex;
// Deletion file name format:
@@ -252,9 +252,7 @@ private void switchLoggingFile() throws IOException {
DeletionResourceManager.MAGIC_VERSION_V1.getBytes(StandardCharsets.UTF_8)));
}
LOGGER.info(
- "Deletion persist-{}: switching to a new file, current writing: {}",
- dataRegionId,
- logFile);
+ DataNodePipeMessages.DELETION_PERSIST_SWITCHING_TO_A_NEW_FILE, dataRegionId, logFile);
} finally {
resetFileAttribute();
}
@@ -270,10 +268,10 @@ public void close() {
persistThread.shutdownNow();
try {
if (!persistThread.awaitTermination(30, TimeUnit.SECONDS)) {
- LOGGER.warn("persistThread did not terminate within {}s", 30);
+ LOGGER.warn(DataNodePipeMessages.PERSISTTHREAD_DID_NOT_TERMINATE_WITHIN_S, 30);
}
} catch (InterruptedException e) {
- LOGGER.warn("DAL Thread {} still doesn't exit after 30s", dataRegionId);
+ LOGGER.warn(DataNodePipeMessages.DAL_THREAD_STILL_DOESN_T_EXIT_AFTER, dataRegionId);
Thread.currentThread().interrupt();
}
}
@@ -281,7 +279,7 @@ public void close() {
try {
closeCurrentLoggingFile(false);
} catch (IOException e) {
- LOGGER.error("Fail to close current logging file when closing", e);
+ LOGGER.error(DataNodePipeMessages.FAIL_TO_CLOSE_CURRENT_LOGGING_FILE_WHEN, e);
}
// clean buffer
MmapUtil.clean(serializeBuffer);
@@ -295,7 +293,7 @@ private void waitUntilFlushAllDeletionsOrTimeOut() {
try {
Thread.sleep(50);
} catch (InterruptedException e) {
- LOGGER.error("Interrupted when waiting for all deletions flushed.");
+ LOGGER.error(DataNodePipeMessages.INTERRUPTED_WHEN_WAITING_FOR_ALL_DELETIONS_FLUSHED);
Thread.currentThread().interrupt();
}
}
@@ -310,8 +308,7 @@ public void run() {
try {
persistDeletion();
} catch (IOException e) {
- LOGGER.warn(
- "Deletion persist: Cannot write to {}, may cause data inconsistency.", logFile, e);
+ LOGGER.warn(DataNodePipeMessages.DELETION_PERSIST_CANNOT_WRITE_TO_MAY_CAUSE, logFile, e);
// if any exception occurred, this batch will not be written to disk and lost.
pendingDeletionsInOneTask.forEach(deletionResource -> deletionResource.onPersistFailed(e));
rollbackFileAttribute(currentTaskBatchSize.get());
@@ -324,7 +321,9 @@ public void run() {
private boolean serializeDeletionToBatchBuffer(DeletionResource deletionResource) {
LOGGER.debug(
- "Deletion persist-{}: serialize deletion resource {}", dataRegionId, deletionResource);
+ DataNodePipeMessages.DELETION_PERSIST_SERIALIZE_DELETION_RESOURCE,
+ dataRegionId,
+ deletionResource);
ByteBuffer buffer = deletionResource.serialize();
// if working buffer doesn't have enough space
if (buffer.position() > serializeBuffer.remaining()) {
@@ -348,8 +347,7 @@ private void persistDeletion() throws IOException {
maxProgressIndexInCurrentFile.updateToMinimumEqualOrIsAfterProgressIndex(
firstDeletionResource.getProgressIndex());
} catch (InterruptedException e) {
- LOGGER.warn(
- "Interrupted when waiting for taking DeletionResource from blocking queue to serialize.");
+ LOGGER.warn(DataNodePipeMessages.INTERRUPTED_WHEN_WAITING_FOR_TAKING_DELETIONRESOURCE_FROM);
Thread.currentThread().interrupt();
return;
}
@@ -363,8 +361,7 @@ private void persistDeletion() throws IOException {
deletionResource =
deletionResources.poll(config.getWalAsyncModeFsyncDelayInMs(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
- LOGGER.warn(
- "Interrupted when waiting for taking WALEntry from blocking queue to serialize.");
+ LOGGER.warn(DataNodePipeMessages.INTERRUPTED_WHEN_WAITING_FOR_TAKING_WALENTRY_FROM);
Thread.currentThread().interrupt();
}
// If timeout, flush deletions to disk.
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/recover/DeletionReader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/recover/DeletionReader.java
index 1fa00cd0a0e65..0bd6f48d51f79 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/recover/DeletionReader.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/deletion/recover/DeletionReader.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.consensus.deletion.recover;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.consensus.deletion.DeletionResource;
import org.apache.iotdb.db.pipe.consensus.deletion.DeletionResourceManager;
@@ -63,7 +64,7 @@ public List readAllDeletions() throws IOException {
magicStringBuffer.flip();
String magicVersion = new String(magicStringBuffer.array(), StandardCharsets.UTF_8);
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Read deletion file-{} magic version: {}", logFile, magicVersion);
+ LOGGER.debug(DataNodePipeMessages.READ_DELETION_FILE_MAGIC_VERSION, logFile, magicVersion);
}
// Read deletions
@@ -79,16 +80,13 @@ public List readAllDeletions() throws IOException {
DeletionResource.deserialize(byteBuffer, regionId, removeHook);
deletions.add(deletionResource);
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Read deletion: {} from file {}", deletionResource, logFile);
+ LOGGER.debug(DataNodePipeMessages.READ_DELETION_FROM_FILE, deletionResource, logFile);
}
}
return deletions;
} catch (IOException e) {
// if file is corrupted, throw an exception and skip subsequence DAL.
- LOGGER.warn(
- "Failed to read deletion file {}, may because this file corrupted when writing it.",
- logFile,
- e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_READ_DELETION_FILE_MAY_BECAUSE, logFile, e);
throw e;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/PipeInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/PipeInsertionEvent.java
index 3e1f4b476ae5e..7884e9dafcbb9 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/PipeInsertionEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/PipeInsertionEvent.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.pipe.datastructure.pattern.TreePattern;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.utils.PathUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent;
import javax.validation.constraints.NotNull;
@@ -129,7 +130,7 @@ protected PipeInsertionEvent(
public boolean isTableModelEvent() {
if (isTableModelEvent == null) {
if (sourceDatabaseNameFromDataRegion == null) {
- throw new IllegalStateException("databaseNameFromDataRegion is null");
+ throw new IllegalStateException(DataNodePipeMessages.DATABASENAMEFROMDATAREGION_IS_NULL);
}
return isTableModelEvent = PathUtils.isTableModelDatabase(sourceDatabaseNameFromDataRegion);
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeDataTypeTransformer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeDataTypeTransformer.java
index caa98cdad6ace..ece35eed0f61a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeDataTypeTransformer.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeDataTypeTransformer.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.event.common.row;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.pipe.api.type.Type;
import org.apache.tsfile.enums.TSDataType;
@@ -64,7 +65,7 @@ private static Type getPipeDataType(final byte type) {
case 11:
return Type.STRING;
default:
- throw new IllegalArgumentException("Invalid input: " + type);
+ throw new IllegalArgumentException(DataNodePipeMessages.INVALID_INPUT + type);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRowCollector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRowCollector.java
index 822e7fad4bc6a..97b21695c1742 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRowCollector.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/row/PipeRowCollector.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletEventConverter;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
@@ -57,7 +58,7 @@ public PipeRowCollector(
@Override
public void collectRow(Row row) {
if (!(row instanceof PipeRow)) {
- throw new PipeException("Row can not be customized");
+ throw new PipeException(DataNodePipeMessages.ROW_CAN_NOT_BE_CUSTOMIZED);
}
final PipeRow pipeRow = (PipeRow) row;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionSnapshotEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionSnapshotEvent.java
index 14d8c6b69476d..71aae4c3614d2 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionSnapshotEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaRegionSnapshotEvent.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource;
import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager;
import org.apache.iotdb.commons.queryengine.plan.planner.plan.node.PlanNodeType;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.queryengine.plan.statement.StatementType;
@@ -355,7 +356,7 @@ protected void finalizeResource() {
}
} catch (final Exception e) {
LOGGER.warn(
- "Decrease reference count for mTree snapshot {} or tLog {} or attribute snapshot {} error.",
+ DataNodePipeMessages.DECREASE_REFERENCE_COUNT_FOR_MTREE_SNAPSHOT_OR,
mTreeSnapshotPath,
tagLogSnapshotPath,
attributeSnapshotPath,
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaSerializableEventType.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaSerializableEventType.java
index da73865c2a6f7..9164ea3337e7a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaSerializableEventType.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/schema/PipeSchemaSerializableEventType.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.event.common.schema;
import org.apache.iotdb.commons.pipe.event.SerializableEvent;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -72,7 +73,7 @@ public static SerializableEvent deserialize(final ByteBuffer buffer, final byte
event = new PipeSchemaRegionSnapshotEvent(2);
break;
default:
- throw new IllegalArgumentException("Invalid event type: " + eventType);
+ throw new IllegalArgumentException(DataNodePipeMessages.INVALID_EVENT_TYPE + eventType);
}
event.deserializeFromByteBuffer(buffer);
return event;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/statement/PipeStatementInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/statement/PipeStatementInsertionEvent.java
index 42afe2e5b6c96..9f812d90b3c8b 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/statement/PipeStatementInsertionEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/statement/PipeStatementInsertionEvent.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.pipe.datastructure.pattern.TreePattern;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
@@ -141,7 +142,7 @@ public EnrichedEvent shallowCopySelfAndBindPipeTaskMetaForProgressReport(
long startTime,
long endTime) {
throw new UnsupportedOperationException(
- "shallowCopySelfAndBindPipeTaskMetaForProgressReport() is not supported!");
+ DataNodePipeMessages.SHALLOWCOPYSELFANDBINDPIPETASKMETAFORPROGRESSREPORT_IS_NOT_SUPPORTED);
}
@Override
@@ -152,13 +153,13 @@ public boolean isGeneratedByPipe() {
@Override
public boolean mayEventTimeOverlappedWithTimeRange() {
throw new UnsupportedOperationException(
- "mayEventTimeOverlappedWithTimeRange() is not supported!");
+ DataNodePipeMessages.MAYEVENTTIMEOVERLAPPEDWITHTIMERANGE_IS_NOT_SUPPORTED);
}
@Override
public boolean mayEventPathsOverlappedWithPattern() {
throw new UnsupportedOperationException(
- "mayEventPathsOverlappedWithPattern() is not supported!");
+ DataNodePipeMessages.MAYEVENTPATHSOVERLAPPEDWITHPATTERN_IS_NOT_SUPPORTED);
}
public void markAsNeedToReport() {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java
index 6841cee70c9bc..86579bb2aeb12 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java
@@ -33,6 +33,7 @@
import org.apache.iotdb.commons.pipe.datastructure.pattern.TreePattern;
import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource;
import org.apache.iotdb.db.auth.AuthorityChecker;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
@@ -250,7 +251,7 @@ public PipeInsertNodeTabletInsertionEvent shallowCopySelfAndBindPipeTaskMetaForP
final long endTime) {
final InsertNode node = insertNode;
if (Objects.isNull(node)) {
- throw new PipeException("InsertNode has been released");
+ throw new PipeException(DataNodePipeMessages.INSERTNODE_HAS_BEEN_RELEASED);
}
return new PipeInsertNodeTabletInsertionEvent(
getRawIsTableModelEvent(),
@@ -273,7 +274,7 @@ public PipeInsertNodeTabletInsertionEvent shallowCopySelfAndBindPipeTaskMetaForP
public boolean isGeneratedByPipe() {
final InsertNode node = insertNode;
if (Objects.isNull(node)) {
- throw new PipeException("InsertNode has been released");
+ throw new PipeException(DataNodePipeMessages.INSERTNODE_HAS_BEEN_RELEASED);
}
return node.isGeneratedByPipe();
}
@@ -383,7 +384,7 @@ public boolean mayEventTimeOverlappedWithTimeRange() {
return true;
} catch (final Exception e) {
LOGGER.warn(
- "Exception occurred when determining the event time of PipeInsertNodeTabletInsertionEvent({}) overlaps with the time range: [{}, {}]. Returning true to ensure data integrity.",
+ DataNodePipeMessages.EXCEPTION_OCCURRED_WHEN_DETERMINING_THE_EVENT_TIME,
this,
startTime,
endTime,
@@ -422,7 +423,7 @@ public boolean mayEventPathsOverlappedWithPattern() {
return true;
} catch (final Exception e) {
LOGGER.warn(
- "Exception occurred when determining the event time of PipeInsertNodeTabletInsertionEvent({}) overlaps with the time range: [{}, {}]. Returning true to ensure data integrity.",
+ DataNodePipeMessages.EXCEPTION_OCCURRED_WHEN_DETERMINING_THE_EVENT_TIME,
this,
startTime,
endTime,
@@ -498,7 +499,7 @@ private List initEventParsers() {
eventParsers = new ArrayList<>();
final InsertNode node = getInsertNode();
if (Objects.isNull(node)) {
- throw new PipeException("InsertNode has been released");
+ throw new PipeException(DataNodePipeMessages.INSERTNODE_HAS_BEEN_RELEASED);
}
switch (node.getType()) {
case INSERT_ROW:
@@ -540,7 +541,8 @@ private List initEventParsers() {
}
break;
default:
- throw new UnSupportedDataTypeException("Unsupported node type " + node.getType());
+ throw new UnSupportedDataTypeException(
+ DataNodePipeMessages.UNSUPPORTED_NODE_TYPE + node.getType());
}
final int size = eventParsers.size();
@@ -550,7 +552,7 @@ private List initEventParsers() {
return eventParsers;
} catch (final Exception e) {
- throw new PipeException("Initialize data container error.", e);
+ throw new PipeException(DataNodePipeMessages.INITIALIZE_DATA_CONTAINER_ERROR, e);
}
}
@@ -673,7 +675,7 @@ protected void finalizeResource() {
return null;
});
} catch (final Exception e) {
- LOGGER.warn("Decrease reference count error.", e);
+ LOGGER.warn(DataNodePipeMessages.DECREASE_REFERENCE_COUNT_ERROR, e);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java
index 7e85106af346e..59a1a87b25fa8 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java
@@ -29,6 +29,7 @@
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource;
import org.apache.iotdb.commons.utils.TestOnly;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.parser.TabletInsertionEventParser;
@@ -368,7 +369,8 @@ public EnrichedEvent shallowCopySelfAndBindPipeTaskMetaForProgressReport(
@Override
public boolean isGeneratedByPipe() {
- throw new UnsupportedOperationException("isGeneratedByPipe() is not supported!");
+ throw new UnsupportedOperationException(
+ DataNodePipeMessages.ISGENERATEDBYPIPE_IS_NOT_SUPPORTED);
}
@Override
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/parser/TabletInsertionEventParser.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/parser/TabletInsertionEventParser.java
index 1091d0cd728ca..15fca0ef0b785 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/parser/TabletInsertionEventParser.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/parser/TabletInsertionEventParser.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertTabletNode;
import org.apache.iotdb.pipe.api.access.Row;
@@ -182,7 +183,7 @@ protected void parse(final InsertRowNode insertRowNode) {
this.rowCount = this.timestampColumn.length;
if (this.rowCount == 0 && LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "InsertRowNode({}) is parsed to zero rows according to the pattern({}) and time range [{}, {}], the corresponding source event({}) will be ignored.",
+ DataNodePipeMessages.INSERTROWNODE_IS_PARSED_TO_ZERO_ROWS_ACCORDING,
insertRowNode,
getPattern(),
this.sourceEvent.getStartTime(),
@@ -276,7 +277,7 @@ protected void parse(final InsertTabletNode insertTabletNode) throws IllegalPath
this.rowCount = this.timestampColumn.length;
if (rowCount == 0 && LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "InsertTabletNode({}) is parsed to zero rows according to the pattern({}) and time range [{}, {}], the corresponding source event({}) will be ignored.",
+ DataNodePipeMessages.INSERTTABLETNODE_IS_PARSED_TO_ZERO_ROWS_ACCORDING,
insertTabletNode,
getPattern(),
sourceEvent.getStartTime(),
@@ -383,7 +384,7 @@ protected void parse(final Tablet tablet, final boolean isAligned) {
this.rowCount = this.timestampColumn.length;
if (this.rowCount == 0 && LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "Tablet({}) is parsed to zero rows according to the pattern({}) and time range [{}, {}], the corresponding source event({}) will be ignored.",
+ DataNodePipeMessages.TABLET_IS_PARSED_TO_ZERO_ROWS_ACCORDING,
tablet,
getPattern(),
this.sourceEvent.getStartTime(),
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/parser/TabletInsertionEventTablePatternParser.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/parser/TabletInsertionEventTablePatternParser.java
index 2a967ef38cd98..f234045007aaa 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/parser/TabletInsertionEventTablePatternParser.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/parser/TabletInsertionEventTablePatternParser.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta;
import org.apache.iotdb.commons.pipe.datastructure.pattern.TablePattern;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalInsertRowNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalInsertTabletNode;
@@ -103,7 +104,7 @@ protected void generateColumnIndexMapper(
@Override
public List processRowByRow(BiConsumer consumer) {
if (LOGGER.isWarnEnabled()) {
- LOGGER.warn("TablePatternParser does not support row by row processing");
+ LOGGER.warn(DataNodePipeMessages.TABLEPATTERNPARSER_DOES_NOT_SUPPORT_ROW_BY_ROW);
}
return Collections.emptyList();
}
@@ -111,7 +112,7 @@ public List processRowByRow(BiConsumer
@Override
public List processTablet(BiConsumer consumer) {
if (LOGGER.isWarnEnabled()) {
- LOGGER.warn("TablePatternParser does not support tablet processing");
+ LOGGER.warn(DataNodePipeMessages.TABLEPATTERNPARSER_DOES_NOT_SUPPORT_TABLET_PROCESSING);
}
return Collections.emptyList();
}
@@ -120,7 +121,7 @@ public List processTablet(BiConsumer
public List processTabletWithCollect(
BiConsumer consumer) {
if (LOGGER.isWarnEnabled()) {
- LOGGER.warn("TablePatternParser does not support tablet processing with collect");
+ LOGGER.warn(DataNodePipeMessages.TABLEPATTERNPARSER_DOES_NOT_SUPPORT_TABLET_PROCESSING_WITH);
}
return Collections.emptyList();
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java
index 95ff0a25373fe..a0f1d52b967ba 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.pipe.datastructure.pattern.TablePattern;
import org.apache.iotdb.commons.pipe.datastructure.pattern.TreePattern;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper;
import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
@@ -144,7 +145,7 @@ private ProgressIndex bindOverridingProgressIndex(Set
@Override
public int getRebootTimes() {
throw new UnsupportedOperationException(
- "PipeCompactedTsFileInsertionEvent does not support getRebootTimes.");
+ DataNodePipeMessages.PIPECOMPACTEDTSFILEINSERTIONEVENT_DOES_NOT_SUPPORT_GETREBOOTTIMES);
}
@Override
@@ -160,7 +161,7 @@ public long getCommitId() {
.orElseThrow(
() ->
new IllegalStateException(
- "No commit IDs found in PipeCompactedTsFileInsertionEvent."));
+ DataNodePipeMessages.NO_COMMIT_IDS_FOUND_IN_PIPECOMPACTEDTSFILEINSERTIONEVENT));
}
// return dummy events for each commit ID (except the max one)
@@ -180,7 +181,8 @@ public List getCommitIds() {
@Override
public boolean equalsInIoTConsensusV2(final Object o) {
throw new UnsupportedOperationException(
- "PipeCompactedTsFileInsertionEvent does not support equalsInIoTConsensusV2.");
+ DataNodePipeMessages
+ .PIPECOMPACTEDTSFILEINSERTIONEVENT_DOES_NOT_SUPPORT_EQUALSINIOTCONSENSUSV2);
}
@Override
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java
index 8bd84ebb7d685..c4fdabc55bedf 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java
@@ -36,6 +36,7 @@
import org.apache.iotdb.commons.pipe.resource.log.PipeLogger;
import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource;
import org.apache.iotdb.db.auth.AuthorityChecker;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
@@ -390,7 +391,7 @@ public ProgressIndex getProgressIndex() {
public ProgressIndex forceGetProgressIndex() {
if (resource.isEmpty()) {
LOGGER.warn(
- "Skipping temporary TsFile {}'s progressIndex, will report MinimumProgressIndex", tsFile);
+ DataNodePipeMessages.SKIPPING_TEMPORARY_TSFILE_S_PROGRESSINDEX_WILL_REPORT, tsFile);
return MinimumProgressIndex.INSTANCE;
}
if (Objects.nonNull(overridingProgressIndex)) {
@@ -468,7 +469,7 @@ public void throwIfNoPrivilege() {
return;
}
if (!waitForTsFileClose()) {
- LOGGER.info("Temporary tsFile {} detected, will skip its transfer.", tsFile);
+ LOGGER.info(DataNodePipeMessages.TEMPORARY_TSFILE_DETECTED_WILL_SKIP_ITS_TRANSFER, tsFile);
return;
}
if (isTableModelEvent()) {
@@ -598,7 +599,7 @@ public boolean mayEventPathsOverlappedWithPattern() {
return getDeviceSet().stream().anyMatch(treePattern::mayOverlapWithDevice);
} catch (final Exception e) {
LOGGER.info(
- "Pipe {}: failed to get devices from TsFile {}, extract it anyway",
+ DataNodePipeMessages.PIPE_FAILED_TO_GET_DEVICES_FROM_TSFILE,
pipeName,
resource.getTsFilePath(),
e);
@@ -669,14 +670,14 @@ public void consumeTabletInsertionEventsWithRetry(
} catch (final PipeRuntimeOutOfMemoryCriticalException e) {
if (retryCount++ % 100 == 0) {
LOGGER.warn(
- "{}: failed to allocate memory for parsing TsFile {}, tablet event no. {}, retry count is {}, will keep retrying.",
+ DataNodePipeMessages.FAILED_TO_ALLOCATE_MEMORY_FOR_PARSING_TSFILE,
callerName,
getTsFile(),
tabletEventCount,
retryCount);
} else if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "{}: failed to allocate memory for parsing TsFile {}, tablet event no. {}, retry count is {}, will keep retrying.",
+ DataNodePipeMessages.FAILED_TO_ALLOCATE_MEMORY_FOR_PARSING_TSFILE,
callerName,
getTsFile(),
tabletEventCount,
@@ -699,8 +700,7 @@ public Iterable toTabletInsertionEvents(final long timeout
throws PipeException {
try {
if (!waitForTsFileClose()) {
- LOGGER.warn(
- "Pipe skipping temporary TsFile's parsing which shouldn't be transferred: {}", tsFile);
+ LOGGER.warn(DataNodePipeMessages.PIPE_SKIPPING_TEMPORARY_TSFILE_S_PARSING_WHICH, tsFile);
return Collections.emptyList();
}
waitForResourceEnough4Parsing(timeoutMs);
@@ -747,13 +747,13 @@ private void waitForResourceEnough4Parsing(final long timeoutMs) throws Interrup
final double waitTimeSeconds = (currentTime - startTime) / 1000.0;
if (elapsedRecordTimeSeconds > 10.0) {
LOGGER.info(
- "Wait for memory enough for parsing {} for {} seconds.",
+ DataNodePipeMessages.WAIT_FOR_MEMORY_ENOUGH_FOR_PARSING_FOR,
resource != null ? resource.getTsFilePath() : "tsfile",
waitTimeSeconds);
lastRecordTime = currentTime;
} else if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "Wait for memory enough for parsing {} for {} seconds.",
+ DataNodePipeMessages.WAIT_FOR_MEMORY_ENOUGH_FOR_PARSING_FOR,
resource != null ? resource.getTsFilePath() : "tsfile",
waitTimeSeconds);
}
@@ -769,7 +769,7 @@ private void waitForResourceEnough4Parsing(final long timeoutMs) throws Interrup
final long currentTime = System.currentTimeMillis();
final double waitTimeSeconds = (currentTime - startTime) / 1000.0;
LOGGER.info(
- "Wait for memory enough for parsing {} for {} seconds.",
+ DataNodePipeMessages.WAIT_FOR_MEMORY_ENOUGH_FOR_PARSING_FOR,
resource != null ? resource.getTsFilePath() : "tsfile",
waitTimeSeconds);
}
@@ -935,7 +935,8 @@ protected void finalizeResource() {
return null;
});
} catch (final Exception e) {
- LOGGER.warn("Decrease reference count for TsFile {} error.", tsFile.getPath(), e);
+ LOGGER.warn(
+ DataNodePipeMessages.DECREASE_REFERENCE_COUNT_FOR_TSFILE_ERROR, tsFile.getPath(), e);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/aggregator/TsFileInsertionPointCounter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/aggregator/TsFileInsertionPointCounter.java
index 05cc76186b61b..c797277ee948a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/aggregator/TsFileInsertionPointCounter.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/aggregator/TsFileInsertionPointCounter.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.event.common.tsfile.aggregator;
import org.apache.iotdb.commons.pipe.datastructure.pattern.TreePattern;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.tsfile.file.metadata.IDeviceID;
import org.apache.tsfile.file.metadata.TimeseriesMetadata;
@@ -169,7 +170,7 @@ public void close() {
tsFileSequenceReader.close();
}
} catch (final IOException e) {
- LOGGER.warn("Failed to close TsFileSequenceReader", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_TSFILESEQUENCEREADER, e);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/TsFileInsertionEventParser.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/TsFileInsertionEventParser.java
index a723adce0dfda..6c4d390c6c2d8 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/TsFileInsertionEventParser.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/TsFileInsertionEventParser.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.pipe.datastructure.pattern.TablePattern;
import org.apache.iotdb.commons.pipe.datastructure.pattern.TreePattern;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tsfile.parser.table.TsFileInsertionEventTableParser;
import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics;
@@ -113,7 +114,7 @@ protected TsFileInsertionEventParser(
IoTDBDescriptor.getInstance().getConfig().getPipeDataStructureTabletSizeInBytes());
LOGGER.info(
- "TsFile {} has initialized {}, pipeName: {}, creation time: {}, pattern: {}, startTime: {}, endTime: {}, withMod: {}",
+ DataNodePipeMessages.TSFILE_HAS_INITIALIZED_PIPENAME_CREATION_TIME_PATTERN,
tsFile,
getClass().getSimpleName(),
pipeName,
@@ -156,7 +157,7 @@ protected void recordParseEndTime() {
PipeTsFileToTabletsMetrics.getInstance().recordTsFileToTabletTime(taskID, totalTimeNanos);
parseEndTimeRecorded = true;
} catch (final Exception e) {
- LOGGER.warn("Failed to record parse end time for pipe {}", pipeName, e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_RECORD_PARSE_END_TIME_FOR, pipeName, e);
}
}
@@ -175,7 +176,7 @@ protected void recordTabletMetrics(final Tablet tablet) {
final long tabletMemorySize = PipeMemoryWeightUtil.calculateTabletSizeInBytes(tablet);
PipeTsFileToTabletsMetrics.getInstance().recordTabletGenerated(taskID, tabletMemorySize);
} catch (final Exception e) {
- LOGGER.warn("Failed to record tablet metrics for pipe {}", pipeName, e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_RECORD_TABLET_METRICS_FOR_PIPE, pipeName, e);
}
}
@@ -190,7 +191,7 @@ public void close() {
tsFileSequenceReader.close();
}
} catch (final IOException e) {
- LOGGER.warn("Failed to close TsFileSequenceReader", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_TSFILESEQUENCEREADER, e);
}
if (allocatedMemoryBlockForTablet != null) {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/query/TsFileInsertionEventQueryParser.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/query/TsFileInsertionEventQueryParser.java
index 7e7226a61656f..9069a99cbd733 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/query/TsFileInsertionEventQueryParser.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/query/TsFileInsertionEventQueryParser.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.commons.pipe.datastructure.pattern.TreePattern;
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.auth.AuthorityChecker;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tsfile.parser.TsFileInsertionEventParser;
@@ -206,7 +207,7 @@ public TsFileInsertionEventQueryParser(
// Check if deviceId is deleted
if (deviceId == null) {
- LOGGER.warn("Found null deviceId, removing entry");
+ LOGGER.warn(DataNodePipeMessages.FOUND_NULL_DEVICEID_REMOVING_ENTRY);
iterator.remove();
continue;
}
@@ -236,7 +237,7 @@ public TsFileInsertionEventQueryParser(
currentModifications);
} catch (IOException e) {
LOGGER.warn(
- "Failed to read metadata for deviceId: {}, measurement: {}, removing",
+ DataNodePipeMessages.FAILED_TO_READ_METADATA_FOR_DEVICEID_MEASUREMENT,
deviceId,
measurement,
e);
@@ -421,7 +422,8 @@ public boolean hasNext() {
} catch (final Exception e) {
close();
throw new PipeException(
- "failed to create TsFileInsertionDataTabletIterator", e);
+ DataNodePipeMessages.FAILED_TO_CREATE_TSFILEINSERTIONDATATABLETITERATOR,
+ e);
}
}
@@ -519,7 +521,7 @@ public void close() {
tsFileReader.close();
}
} catch (final IOException e) {
- LOGGER.warn("Failed to close TsFileReader", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_TSFILEREADER, e);
}
super.close();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/query/TsFileInsertionEventQueryParserTabletIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/query/TsFileInsertionEventQueryParserTabletIterator.java
index 776b5e1e6fac1..20ba62496ca79 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/query/TsFileInsertionEventQueryParserTabletIterator.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/query/TsFileInsertionEventQueryParserTabletIterator.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.event.common.tsfile.parser.query;
import org.apache.iotdb.commons.path.PatternTreeMap;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tsfile.parser.util.ModsOperationUtil;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock;
@@ -117,7 +118,7 @@ public boolean hasNext() {
try {
return queryDataSet.hasNext();
} catch (final IOException e) {
- throw new PipeException("Failed to check next", e);
+ throw new PipeException(DataNodePipeMessages.FAILED_TO_CHECK_NEXT, e);
}
}
@@ -130,7 +131,7 @@ public Tablet next() {
try {
return buildNextTablet();
} catch (final IOException e) {
- throw new PipeException("Failed to build tablet", e);
+ throw new PipeException(DataNodePipeMessages.FAILED_TO_BUILD_TABLET, e);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/scan/SinglePageWholeChunkReader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/scan/SinglePageWholeChunkReader.java
index 2b2743bad18f1..511bf4ecb6d88 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/scan/SinglePageWholeChunkReader.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/scan/SinglePageWholeChunkReader.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.db.pipe.event.common.tsfile.parser.scan;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
+
import org.apache.tsfile.compress.IUnCompressor;
import org.apache.tsfile.encoding.decoder.Decoder;
import org.apache.tsfile.encrypt.EncryptParameter;
@@ -83,7 +85,7 @@ public static ByteBuffer readCompressedPageData(PageHeader pageHeader, ByteBuffe
// doesn't have a complete page body
if (compressedPageBodyLength > chunkBuffer.remaining()) {
throw new IOException(
- "do not has a complete page body. Expected:"
+ DataNodePipeMessages.DO_NOT_HAS_A_COMPLETE_PAGE_BODY
+ compressedPageBodyLength
+ ". Actual:"
+ chunkBuffer.remaining());
@@ -105,7 +107,7 @@ public static ByteBuffer uncompressPageData(
compressedPageData.array(), 0, compressedPageBodyLength, uncompressedPageData.array(), 0);
} catch (Exception e) {
throw new IOException(
- "Uncompress error! uncompress size: "
+ DataNodePipeMessages.UNCOMPRESS_ERROR_UNCOMPRESS_SIZE
+ pageHeader.getUncompressedSize()
+ "compressed size: "
+ pageHeader.getCompressedSize()
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/scan/TsFileInsertionEventScanParser.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/scan/TsFileInsertionEventScanParser.java
index 32823459fcfd3..dc4b348c6b51c 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/scan/TsFileInsertionEventScanParser.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/scan/TsFileInsertionEventScanParser.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.commons.pipe.datastructure.pattern.TreePattern;
import org.apache.iotdb.db.auth.AuthorityChecker;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tsfile.parser.TsFileInsertionEventParser;
@@ -356,7 +357,7 @@ private Tablet getNextTablet() {
return tablet;
} catch (final Exception e) {
close();
- throw new PipeException("Failed to get next tablet insertion event.", e);
+ throw new PipeException(DataNodePipeMessages.FAILED_TO_GET_NEXT_TABLET_INSERTION_EVENT, e);
}
}
@@ -425,7 +426,8 @@ private boolean putValueToColumns(final BatchData data, final Tablet tablet, fin
tablet.addValue(rowIndex, i, primitiveType.getBinary().getValues());
break;
default:
- throw new UnSupportedDataTypeException("UnSupported" + primitiveType.getDataType());
+ throw new UnSupportedDataTypeException(
+ DataNodePipeMessages.UNSUPPORTED + primitiveType.getDataType());
}
}
} else {
@@ -461,7 +463,8 @@ private boolean putValueToColumns(final BatchData data, final Tablet tablet, fin
tablet.addValue(rowIndex, 0, data.getBinary().getValues());
break;
default:
- throw new UnSupportedDataTypeException("UnSupported" + data.getDataType());
+ throw new UnSupportedDataTypeException(
+ DataNodePipeMessages.UNSUPPORTED + data.getDataType());
}
}
return isNeedFillTime;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParser.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParser.java
index 87cf374aeb266..ab1cc38a72797 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParser.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParser.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.pipe.datastructure.pattern.TablePattern;
import org.apache.iotdb.db.auth.AuthorityChecker;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tsfile.parser.TsFileInsertionEventParser;
@@ -182,7 +183,8 @@ && hasTablePrivilege(entry.getKey()),
return hasNext;
} catch (Exception e) {
close();
- throw new PipeException("Error while parsing tsfile insertion event", e);
+ throw new PipeException(
+ DataNodePipeMessages.ERROR_WHILE_PARSING_TSFILE_INSERTION_EVENT, e);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParserTabletIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParserTabletIterator.java
index f05cf872c798b..95d3cc066552e 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParserTabletIterator.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParserTabletIterator.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.event.common.tsfile.parser.table;
import org.apache.iotdb.commons.path.PatternTreeMap;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tsfile.parser.util.ModsOperationUtil;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock;
@@ -203,7 +204,7 @@ public boolean hasNext() {
chunkMetadataIterator.next();
if (alignedChunkMetadata == null) {
throw new PipeException(
- "Table model tsfile parsing does not support this type of ChunkMeta");
+ DataNodePipeMessages.TABLE_MODEL_TSFILE_PARSING_DOES_NOT_SUPPORT);
}
// Reduce the number of times Chunks are read
@@ -477,7 +478,8 @@ private boolean fillMeasurementValueColumns(
binary.getValues() == null ? Binary.EMPTY_VALUE.getValues() : binary.getValues());
break;
default:
- throw new UnSupportedDataTypeException("UnSupported" + primitiveType.getDataType());
+ throw new UnSupportedDataTypeException(
+ DataNodePipeMessages.UNSUPPORTED + primitiveType.getDataType());
}
}
return needFillTime;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/util/ModsOperationUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/util/ModsOperationUtil.java
index 66fed43feade6..e2b65b5415ccc 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/util/ModsOperationUtil.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/util/ModsOperationUtil.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.event.common.tsfile.parser.util;
import org.apache.iotdb.commons.path.PatternTreeMap;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.storageengine.dataregion.modification.ModEntry;
import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile;
import org.apache.iotdb.db.utils.ModificationUtils;
@@ -61,7 +62,8 @@ private ModsOperationUtil() {
.forEach(
modification -> modifications.append(modification.keyOfPatternTree(), modification));
} catch (Exception e) {
- throw new PipeException("Failed to load modifications from TsFile: " + tsFile.getPath(), e);
+ throw new PipeException(
+ DataNodePipeMessages.FAILED_TO_LOAD_MODIFICATIONS_FROM_TSFILE + tsFile.getPath(), e);
}
return modifications;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java
index 6535d371a915c..540e15e29bfd8 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager;
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.source.dataregion.IoTDBDataRegionSource;
@@ -133,8 +134,7 @@ private void createAutoGauge(final String pipeID) {
public void unbindFrom(final AbstractMetricService metricService) {
ImmutableSet.copyOf(remainingEventAndTimeOperatorMap.keySet()).forEach(this::deregister);
if (!remainingEventAndTimeOperatorMap.isEmpty()) {
- LOGGER.warn(
- "Failed to unbind from pipe remaining event and time metrics, RemainingEventAndTimeOperator map not empty");
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_UNBIND_FROM_PIPE_REMAINING_EVENT);
}
}
@@ -345,9 +345,7 @@ public void freezeRate(final String pipeID) {
public void deregister(final String pipeID) {
if (!remainingEventAndTimeOperatorMap.containsKey(pipeID)) {
- LOGGER.warn(
- "Failed to deregister pipe remaining event and time metrics, RemainingEventAndTimeOperator({}) does not exist",
- pipeID);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_DEREGISTER_PIPE_REMAINING_EVENT_AND, pipeID);
return;
}
if (Objects.nonNull(metricService)) {
@@ -363,7 +361,7 @@ public void markRegionCommit(final String pipeID, final boolean isDataRegion) {
remainingEventAndTimeOperatorMap.get(pipeID);
if (Objects.isNull(operator)) {
LOGGER.warn(
- "Failed to mark pipe region commit, RemainingEventAndTimeOperator({}) does not exist",
+ DataNodePipeMessages.FAILED_TO_MARK_PIPE_REGION_COMMIT_REMAININGEVENTANDTIMEOPERATOR,
pipeID);
return;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java
index f9436377bb3c6..fa67c6663d143 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.source.dataregion.IoTDBDataRegionSource;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -103,9 +104,7 @@ private void createMetrics(final String pipeID) {
public void unbindFrom(final AbstractMetricService metricService) {
ImmutableSet.copyOf(pipe).forEach(this::deregister);
if (!pipe.isEmpty()) {
- LOGGER.warn(
- "Failed to unbind from pipe tsfile to tablets metrics, pipe map is not empty, pipe: {}",
- pipe);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_UNBIND_FROM_PIPE_TSFILE_TO, pipe);
}
}
@@ -158,8 +157,7 @@ public void register(final IoTDBDataRegionSource extractor) {
public void deregister(final String pipeID) {
if (!pipe.contains(pipeID)) {
- LOGGER.warn(
- "Failed to deregister pipe tsfile to tablets metrics, pipeID({}) does not exist", pipeID);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_DEREGISTER_PIPE_TSFILE_TO_TABLETS, pipeID);
return;
}
try {
@@ -179,8 +177,7 @@ public void markTsFileToTabletInvocation(final String taskID) {
}
final Rate rate = pipeRateMap.get(taskID);
if (rate == null) {
- LOGGER.info(
- "Failed to mark pipe tsfile to tablets invocation, pipeID({}) does not exist", taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_MARK_PIPE_TSFILE_TO_TABLETS, taskID);
return;
}
rate.mark();
@@ -192,8 +189,7 @@ public void recordTsFileToTabletTime(final String taskID, long costTimeInNanos)
}
final Timer timer = pipeTimerMap.get(taskID);
if (timer == null) {
- LOGGER.info(
- "Failed to record pipe tsfile to tablets time, pipeID({}) does not exist", taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_RECORD_PIPE_TSFILE_TO_TABLETS, taskID);
return;
}
timer.updateNanos(costTimeInNanos);
@@ -210,7 +206,7 @@ public void recordTabletGenerated(final String taskID, long tabletMemorySize) {
}
final Counter tabletCount = pipeTabletCountMap.get(taskID);
if (tabletCount == null) {
- LOGGER.info("Failed to record tablet generated, pipeID({}) does not exist", taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_RECORD_TABLET_GENERATED_PIPEID_DOES, taskID);
return;
}
tabletCount.inc();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/processor/PipeProcessorMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/processor/PipeProcessorMetrics.java
index 1e0dd86b41ab7..4fb94c9bc13cf 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/processor/PipeProcessorMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/processor/PipeProcessorMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.task.subtask.processor.PipeProcessorSubtask;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -112,7 +113,7 @@ public void unbindFrom(final AbstractMetricService metricService) {
deregister(taskID);
}
if (!processorMap.isEmpty()) {
- LOGGER.warn("Failed to unbind from pipe processor metrics, processor map not empty");
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_UNBIND_FROM_PIPE_PROCESSOR_METRICS);
}
}
@@ -182,9 +183,7 @@ public void markTabletEvent(final String taskID) {
}
final Rate rate = tabletRateMap.get(taskID);
if (rate == null) {
- LOGGER.info(
- "Failed to mark pipe processor tablet event, PipeProcessorSubtask({}) does not exist",
- taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_MARK_PIPE_PROCESSOR_TABLET_EVENT, taskID);
return;
}
rate.mark();
@@ -196,9 +195,7 @@ public void markTsFileEvent(final String taskID) {
}
final Rate rate = tsFileRateMap.get(taskID);
if (rate == null) {
- LOGGER.info(
- "Failed to mark pipe processor tsfile event, PipeProcessorSubtask({}) does not exist",
- taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_MARK_PIPE_PROCESSOR_TSFILE_EVENT, taskID);
return;
}
rate.mark();
@@ -210,9 +207,7 @@ public void markPipeHeartbeatEvent(final String taskID) {
}
final Rate rate = pipeHeartbeatRateMap.get(taskID);
if (rate == null) {
- LOGGER.info(
- "Failed to mark pipe processor heartbeat event, PipeProcessorSubtask({}) does not exist",
- taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_MARK_PIPE_PROCESSOR_HEARTBEAT_EVENT, taskID);
return;
}
rate.mark();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionListenerMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionListenerMetrics.java
index 0e0ac1455661b..526ec7df281d1 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionListenerMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionListenerMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.source.schemaregion.SchemaRegionListeningQueue;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -72,8 +73,7 @@ private void createAutoGauge(final Integer schemaRegionId) {
public void unbindFrom(final AbstractMetricService metricService) {
ImmutableSet.copyOf(listeningQueueMap.keySet()).forEach(this::deregister);
if (!listeningQueueMap.isEmpty()) {
- LOGGER.warn(
- "Failed to unbind from pipe schema region listener metrics, listening queue map not empty");
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_UNBIND_FROM_PIPE_SCHEMA_REGION_2);
}
}
@@ -102,8 +102,7 @@ public void register(
public void deregister(final Integer schemaRegionId) {
if (!listeningQueueMap.containsKey(schemaRegionId)) {
LOGGER.warn(
- "Failed to deregister schema region listener metrics, SchemaRegionListeningQueue({}) does not exist",
- schemaRegionId);
+ DataNodePipeMessages.FAILED_TO_DEREGISTER_SCHEMA_REGION_LISTENER_METRICS, schemaRegionId);
return;
}
if (Objects.nonNull(metricService)) {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSinkMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSinkMetrics.java
index c6e8ad5295070..d9b79fdda8e82 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSinkMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSinkMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.task.subtask.sink.PipeSinkSubtask;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -76,8 +77,7 @@ private void createRate(final String taskID) {
public void unbindFrom(final AbstractMetricService metricService) {
ImmutableSet.copyOf(connectorMap.keySet()).forEach(this::deregister);
if (!connectorMap.isEmpty()) {
- LOGGER.warn(
- "Failed to unbind from pipe schema region connector metrics, connector map not empty");
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_UNBIND_FROM_PIPE_SCHEMA_REGION);
}
}
@@ -110,9 +110,7 @@ public void register(final PipeSinkSubtask pipeSinkSubtask) {
public void deregister(final String taskID) {
if (!connectorMap.containsKey(taskID)) {
- LOGGER.warn(
- "Failed to deregister pipe schema region connector metrics, PipeConnectorSubtask({}) does not exist",
- taskID);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_DEREGISTER_PIPE_SCHEMA_REGION_CONNECTOR, taskID);
return;
}
if (Objects.nonNull(metricService)) {
@@ -127,9 +125,7 @@ public void markSchemaEvent(final String taskID) {
}
final Rate rate = schemaRateMap.get(taskID);
if (rate == null) {
- LOGGER.info(
- "Failed to mark pipe schema region write plan event, PipeConnectorSubtask({}) does not exist",
- taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_MARK_PIPE_SCHEMA_REGION_WRITE, taskID);
return;
}
rate.mark();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSourceMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSourceMetrics.java
index ce5d60449fdc4..459fa9fcaa3cf 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSourceMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/schema/PipeSchemaRegionSourceMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.source.schemaregion.IoTDBSchemaRegionSource;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -75,8 +76,7 @@ private void createAutoGauge(final String taskID) {
public void unbindFrom(final AbstractMetricService metricService) {
ImmutableSet.copyOf(sourceMap.keySet()).forEach(this::deregister);
if (!sourceMap.isEmpty()) {
- LOGGER.warn(
- "Failed to unbind from pipe schema region extractor metrics, extractor map not empty");
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_UNBIND_FROM_PIPE_SCHEMA_REGION_1);
}
}
@@ -110,9 +110,7 @@ public void register(final IoTDBSchemaRegionSource source) {
public void deregister(final String taskID) {
if (!sourceMap.containsKey(taskID)) {
- LOGGER.warn(
- "Failed to deregister pipe schema region source metrics, IoTDBSchemaRegionSource({}) does not exist",
- taskID);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_DEREGISTER_PIPE_SCHEMA_REGION_SOURCE, taskID);
return;
}
if (Objects.nonNull(metricService)) {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionSinkMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionSinkMetrics.java
index 2502c385e5c03..dd7707d1b9685 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionSinkMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionSinkMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.task.subtask.sink.PipeSinkSubtask;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -268,7 +269,7 @@ public void unbindFrom(final AbstractMetricService metricService) {
deregister(taskID);
}
if (!sinkMap.isEmpty()) {
- LOGGER.warn("Failed to unbind from pipe data region sink metrics, sink map not empty");
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_UNBIND_FROM_PIPE_DATA_REGION);
}
}
@@ -446,9 +447,7 @@ public void register(final PipeSinkSubtask pipeSinkSubtask) {
public void deregister(final String taskID) {
if (!sinkMap.containsKey(taskID)) {
- LOGGER.warn(
- "Failed to deregister pipe data region sink metrics, PipeSinkSubtask({}) does not exist",
- taskID);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_DEREGISTER_PIPE_DATA_REGION_SINK, taskID);
return;
}
if (Objects.nonNull(metricService)) {
@@ -463,9 +462,7 @@ public void markTabletEvent(final String taskID) {
}
final Rate rate = tabletRateMap.get(taskID);
if (rate == null) {
- LOGGER.info(
- "Failed to mark pipe data region sink tablet event, PipeSinkSubtask({}) does not exist",
- taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_MARK_PIPE_DATA_REGION_SINK, taskID);
return;
}
rate.mark();
@@ -477,9 +474,7 @@ public void markTsFileEvent(final String taskID) {
}
final Rate rate = tsFileRateMap.get(taskID);
if (rate == null) {
- LOGGER.info(
- "Failed to mark pipe data region sink tsfile event, PipeSinkSubtask({}) does not exist",
- taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_MARK_PIPE_DATA_REGION_SINK_1, taskID);
return;
}
rate.mark();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeAssignerMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeAssignerMetrics.java
index 59e4e892d8e51..3665ec2429497 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeAssignerMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeAssignerMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.source.dataregion.realtime.assigner.PipeDataRegionAssigner;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -90,7 +91,7 @@ public void unbindFrom(AbstractMetricService metricService) {
deregister(dataRegionId);
}
if (!assignerMap.isEmpty()) {
- LOGGER.warn("Failed to unbind from pipe assigner metrics, assigner map not empty");
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_UNBIND_FROM_PIPE_ASSIGNER_METRICS);
}
}
@@ -132,7 +133,7 @@ public void deregister(final int dataRegionId) {
synchronized (this) {
if (!assignerMap.containsKey(dataRegionId)) {
LOGGER.warn(
- "Failed to deregister pipe assigner metrics, PipeDataRegionAssigner({}) does not exist",
+ DataNodePipeMessages.FAILED_TO_DEREGISTER_PIPE_ASSIGNER_METRICS_PIPEDATAREGIONASSIGNER,
dataRegionId);
return;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeDataRegionSourceMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeDataRegionSourceMetrics.java
index 918445b57e7f2..f44799e055097 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeDataRegionSourceMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeDataRegionSourceMetrics.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.source.dataregion.IoTDBDataRegionSource;
import org.apache.iotdb.db.pipe.source.dataregion.realtime.epoch.TsFileEpoch;
import org.apache.iotdb.metrics.AbstractMetricService;
@@ -186,7 +187,7 @@ public void unbindFrom(final AbstractMetricService metricService) {
deregister(taskID);
}
if (!extractorMap.isEmpty()) {
- LOGGER.warn("Failed to unbind from pipe extractor metrics, extractor map not empty");
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_UNBIND_FROM_PIPE_EXTRACTOR_METRICS);
}
}
@@ -298,9 +299,7 @@ public void register(final IoTDBDataRegionSource extractor) {
public void deregister(final String taskID) {
if (!extractorMap.containsKey(taskID)) {
- LOGGER.warn(
- "Failed to deregister pipe data region extractor metrics, IoTDBDataRegionExtractor({}) does not exist",
- taskID);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_DEREGISTER_PIPE_DATA_REGION_EXTRACTOR, taskID);
return;
}
if (Objects.nonNull(metricService)) {
@@ -315,9 +314,7 @@ public void markTabletEvent(final String taskID) {
}
final Rate rate = tabletRateMap.get(taskID);
if (rate == null) {
- LOGGER.info(
- "Failed to mark pipe data region extractor tablet event, IoTDBDataRegionExtractor({}) does not exist",
- taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_MARK_PIPE_DATA_REGION_EXTRACTOR_1, taskID);
return;
}
rate.mark();
@@ -329,9 +326,7 @@ public void markTsFileEvent(final String taskID) {
}
final Rate rate = tsFileRateMap.get(taskID);
if (rate == null) {
- LOGGER.info(
- "Failed to mark pipe data region extractor tsfile event, IoTDBDataRegionExtractor({}) does not exist",
- taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_MARK_PIPE_DATA_REGION_EXTRACTOR_2, taskID);
return;
}
rate.mark();
@@ -343,9 +338,7 @@ public void markPipeHeartbeatEvent(final String taskID) {
}
final Rate rate = pipeHeartbeatRateMap.get(taskID);
if (rate == null) {
- LOGGER.info(
- "Failed to mark pipe data region extractor heartbeat event, IoTDBDataRegionExtractor({}) does not exist",
- taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_MARK_PIPE_DATA_REGION_EXTRACTOR, taskID);
return;
}
rate.mark();
@@ -358,9 +351,7 @@ public void setRecentProcessedTsFileEpochState(
}
final Gauge gauge = recentProcessedTsFileEpochStateMap.get(taskID);
if (gauge == null) {
- LOGGER.info(
- "Failed to set recent processed tsfile epoch state, PipeRealtimeDataRegionExtractor({}) does not exist",
- taskID);
+ LOGGER.info(DataNodePipeMessages.FAILED_TO_SET_RECENT_PROCESSED_TSFILE_EPOCH, taskID);
return;
}
gauge.set(state.getId());
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AbstractFormalProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AbstractFormalProcessor.java
index b74227c16aec7..c523547d9f1d8 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AbstractFormalProcessor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AbstractFormalProcessor.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.processor.aggregate;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.pipe.api.PipePlugin;
import org.apache.iotdb.pipe.api.PipeProcessor;
import org.apache.iotdb.pipe.api.collector.EventCollector;
@@ -37,7 +38,7 @@ public final void process(
final TabletInsertionEvent tabletInsertionEvent, final EventCollector eventCollector)
throws Exception {
throw new UnsupportedOperationException(
- "The abstract formal processor does not support process events");
+ DataNodePipeMessages.THE_ABSTRACT_FORMAL_PROCESSOR_DOES_NOT_SUPPORT);
}
@Override
@@ -45,13 +46,13 @@ public final void process(
final TsFileInsertionEvent tsFileInsertionEvent, final EventCollector eventCollector)
throws Exception {
throw new UnsupportedOperationException(
- "The abstract formal processor does not support process events");
+ DataNodePipeMessages.THE_ABSTRACT_FORMAL_PROCESSOR_DOES_NOT_SUPPORT);
}
@Override
public final void process(final Event event, final EventCollector eventCollector)
throws Exception {
throw new UnsupportedOperationException(
- "The abstract formal processor does not support process events");
+ DataNodePipeMessages.THE_ABSTRACT_FORMAL_PROCESSOR_DOES_NOT_SUPPORT);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AggregateProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AggregateProcessor.java
index 63bdbd1daec39..b584db153d0ee 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AggregateProcessor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/AggregateProcessor.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskProcessorRuntimeEnvironment;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.utils.PathUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.agent.plugin.dataregion.PipeDataRegionPluginAgent;
import org.apache.iotdb.db.pipe.event.common.row.PipeResetTabletRow;
@@ -370,7 +371,8 @@ public void customize(
try {
stateReference.get().restoreTimestampAndWindows(entry.getValue());
} catch (final IOException e) {
- throw new PipeException("Encountered exception when deserializing from PipeTaskMeta", e);
+ throw new PipeException(
+ DataNodePipeMessages.ENCOUNTERED_EXCEPTION_WHEN_DESERIALIZING_FROM_PIPETASKMETA, e);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbstractSameTypeNumericOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbstractSameTypeNumericOperator.java
index 2e31365647681..f3a609ee4905e 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbstractSameTypeNumericOperator.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/sametype/numeric/AbstractSameTypeNumericOperator.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.sametype.numeric;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.IntermediateResultOperator;
import org.apache.iotdb.pipe.api.type.Binary;
@@ -96,25 +97,25 @@ public boolean initAndGetIsSupport(final Binary initialInput, final long initial
@Override
public void updateValue(final boolean input, final long timestamp) {
throw new UnsupportedOperationException(
- "AbstractSameTypeNumericOperator does not support boolean input");
+ DataNodePipeMessages.ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_BOOLEAN_INPUT);
}
@Override
public void updateValue(final LocalDate input, final long timestamp) {
throw new UnsupportedOperationException(
- "AbstractSameTypeNumericOperator does not support date input");
+ DataNodePipeMessages.ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_DATE_INPUT);
}
@Override
public void updateValue(final String input, final long timestamp) {
throw new UnsupportedOperationException(
- "AbstractSameTypeNumericOperator does not support string input");
+ DataNodePipeMessages.ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_STRING_INPUT);
}
@Override
public void updateValue(final Binary input, final long timestamp) {
throw new UnsupportedOperationException(
- "AbstractSameTypeNumericOperator does not support binary input");
+ DataNodePipeMessages.ABSTRACTSAMETYPENUMERICOPERATOR_DOES_NOT_SUPPORT_BINARY_INPUT);
}
@Override
@@ -162,7 +163,8 @@ public void serialize(final DataOutputStream outputStream) throws IOException {
case TEXT:
case BLOB:
default:
- throw new IOException(String.format("Unsupported output datatype %s", outPutDataType));
+ throw new IOException(
+ String.format(DataNodePipeMessages.UNSUPPORTED_OUTPUT_DATATYPE_FMT, outPutDataType));
}
}
@@ -189,7 +191,8 @@ public void deserialize(final ByteBuffer byteBuffer) throws IOException {
case DATE:
case TIMESTAMP:
default:
- throw new IOException(String.format("Unsupported output datatype %s", outPutDataType));
+ throw new IOException(
+ String.format(DataNodePipeMessages.UNSUPPORTED_OUTPUT_DATATYPE_FMT, outPutDataType));
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/specifictype/doubletype/FractionPoweredSumOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/specifictype/doubletype/FractionPoweredSumOperator.java
index a49d90c6ede7b..0e84372fa3865 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/specifictype/doubletype/FractionPoweredSumOperator.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/operator/intermediateresult/specifictype/doubletype/FractionPoweredSumOperator.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.specifictype.doubletype;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.IntermediateResultOperator;
import org.apache.iotdb.pipe.api.type.Binary;
@@ -104,7 +105,7 @@ public boolean initAndGetIsSupport(final Binary initialInput, final long initial
@Override
public void updateValue(final boolean input, final long timestamp) {
throw new UnsupportedOperationException(
- "FractionPoweredSumOperator does not support boolean input");
+ DataNodePipeMessages.FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_BOOLEAN_INPUT);
}
@Override
@@ -115,7 +116,7 @@ public void updateValue(final int input, final long timestamp) {
@Override
public void updateValue(final LocalDate input, final long timestamp) {
throw new UnsupportedOperationException(
- "FractionPoweredSumOperator does not support date input");
+ DataNodePipeMessages.FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_DATE_INPUT);
}
@Override
@@ -136,13 +137,13 @@ public void updateValue(final double input, final long timestamp) {
@Override
public void updateValue(final String input, final long timestamp) {
throw new UnsupportedOperationException(
- "FractionPoweredSumOperator does not support string input");
+ DataNodePipeMessages.FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_STRING_INPUT);
}
@Override
public void updateValue(final Binary initialInput, final long initialTimestamp) {
throw new UnsupportedOperationException(
- "FractionPoweredSumOperator does not support binary input");
+ DataNodePipeMessages.FRACTIONPOWEREDSUMOPERATOR_DOES_NOT_SUPPORT_BINARY_INPUT);
}
@Override
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/window/datastructure/TimeSeriesWindow.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/window/datastructure/TimeSeriesWindow.java
index 6cd046fb594fc..ca636fd789889 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/window/datastructure/TimeSeriesWindow.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/aggregate/window/datastructure/TimeSeriesWindow.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.processor.aggregate.window.datastructure;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.processor.aggregate.operator.aggregatedresult.AggregatedResultOperator;
import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.CustomizedReadableIntermediateResults;
import org.apache.iotdb.db.pipe.processor.aggregate.operator.intermediateresult.IntermediateResultOperator;
@@ -144,7 +145,7 @@ public Pair updateIntermediateResult(
entry.getValue().setLeft(TSDataType.BOOLEAN);
} else if (entry.getValue().getLeft() != TSDataType.BOOLEAN) {
LOGGER.warn(
- "Different data type encountered in one window, will purge. Previous type: {}, now type: {}",
+ DataNodePipeMessages.DIFFERENT_DATA_TYPE_ENCOUNTERED_IN_ONE_WINDOW,
entry.getValue().getLeft(),
TSDataType.BOOLEAN);
return new Pair<>(WindowState.PURGE, null);
@@ -200,7 +201,7 @@ public Pair updateIntermediateResult(
entry.getValue().setLeft(TSDataType.INT32);
} else if (entry.getValue().getLeft() != TSDataType.INT32) {
LOGGER.warn(
- "Different data type encountered in one window, will purge. Previous type: {}, now type: {}",
+ DataNodePipeMessages.DIFFERENT_DATA_TYPE_ENCOUNTERED_IN_ONE_WINDOW,
entry.getValue().getLeft(),
TSDataType.INT32);
return new Pair<>(WindowState.PURGE, null);
@@ -255,7 +256,7 @@ public Pair updateIntermediateResult(
entry.getValue().setLeft(TSDataType.DATE);
} else if (entry.getValue().getLeft() != TSDataType.DATE) {
LOGGER.warn(
- "Different data type encountered in one window, will purge. Previous type: {}, now type: {}",
+ DataNodePipeMessages.DIFFERENT_DATA_TYPE_ENCOUNTERED_IN_ONE_WINDOW,
entry.getValue().getLeft(),
TSDataType.DATE);
return new Pair<>(WindowState.PURGE, null);
@@ -310,7 +311,7 @@ public Pair updateIntermediateResult(
entry.getValue().setLeft(TSDataType.INT64);
} else if (entry.getValue().getLeft() != TSDataType.INT64) {
LOGGER.warn(
- "Different data type encountered in one window, will purge. Previous type: {}, now type: {}",
+ DataNodePipeMessages.DIFFERENT_DATA_TYPE_ENCOUNTERED_IN_ONE_WINDOW,
entry.getValue().getLeft(),
TSDataType.INT64);
return new Pair<>(WindowState.PURGE, null);
@@ -365,7 +366,7 @@ public Pair updateIntermediateResult(
entry.getValue().setLeft(TSDataType.FLOAT);
} else if (entry.getValue().getLeft() != TSDataType.FLOAT) {
LOGGER.warn(
- "Different data type encountered in one window, will purge. Previous type: {}, now type: {}",
+ DataNodePipeMessages.DIFFERENT_DATA_TYPE_ENCOUNTERED_IN_ONE_WINDOW,
entry.getValue().getLeft(),
TSDataType.FLOAT);
return new Pair<>(WindowState.PURGE, null);
@@ -419,7 +420,7 @@ public Pair updateIntermediateResult(
entry.getValue().setLeft(TSDataType.DOUBLE);
} else if (entry.getValue().getLeft() != TSDataType.DOUBLE) {
LOGGER.warn(
- "Different data type encountered in one window, will purge. Previous type: {}, now type: {}",
+ DataNodePipeMessages.DIFFERENT_DATA_TYPE_ENCOUNTERED_IN_ONE_WINDOW,
entry.getValue().getLeft(),
TSDataType.DOUBLE);
return new Pair<>(WindowState.PURGE, null);
@@ -473,7 +474,7 @@ public Pair updateIntermediateResult(
entry.getValue().setLeft(TSDataType.TEXT);
} else if (entry.getValue().getLeft() != TSDataType.TEXT) {
LOGGER.warn(
- "Different data type encountered in one window, will purge. Previous type: {}, now type: {}",
+ DataNodePipeMessages.DIFFERENT_DATA_TYPE_ENCOUNTERED_IN_ONE_WINDOW,
entry.getValue().getLeft(),
TSDataType.TEXT);
return new Pair<>(WindowState.PURGE, null);
@@ -527,7 +528,7 @@ public Pair updateIntermediateResult(
entry.getValue().setLeft(TSDataType.BLOB);
} else if (entry.getValue().getLeft() != TSDataType.BLOB) {
LOGGER.warn(
- "Different data type encountered in one window, will purge. Previous type: {}, now type: {}",
+ DataNodePipeMessages.DIFFERENT_DATA_TYPE_ENCOUNTERED_IN_ONE_WINDOW,
entry.getValue().getLeft(),
TSDataType.BLOB);
return new Pair<>(WindowState.PURGE, null);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/PartialPathLastObjectCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/PartialPathLastObjectCache.java
index e51aed46dc747..225e0c3e86eb8 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/PartialPathLastObjectCache.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/PartialPathLastObjectCache.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.processor.downsampling;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock;
import org.apache.iotdb.db.utils.MemUtils;
@@ -65,7 +66,8 @@ protected PartialPathLastObjectCache(final long memoryLimitInBytes) {
.eviction()
.ifPresent(eviction -> eviction.setMaximum(newMemory));
LOGGER.info(
- "PartialPathLastObjectCache.allocatedMemoryBlock has shrunk from {} to {}.",
+ DataNodePipeMessages
+ .PARTIALPATHLASTOBJECTCACHE_ALLOCATEDMEMORYBLOCK_HAS_SHRUNK_FROM_TO,
oldMemory,
newMemory);
})
@@ -77,7 +79,8 @@ protected PartialPathLastObjectCache(final long memoryLimitInBytes) {
.eviction()
.ifPresent(eviction -> eviction.setMaximum(newMemory));
LOGGER.info(
- "PartialPathLastObjectCache.allocatedMemoryBlock has expanded from {} to {}.",
+ DataNodePipeMessages
+ .PARTIALPATHLASTOBJECTCACHE_ALLOCATEDMEMORYBLOCK_HAS_EXPANDED_FROM_TO,
oldMemory,
newMemory);
});
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/changing/ChangingValueSamplingProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/changing/ChangingValueSamplingProcessor.java
index 0ab08ecb403a9..255fc78bb5df4 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/changing/ChangingValueSamplingProcessor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/changing/ChangingValueSamplingProcessor.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.processor.downsampling.changing;
import org.apache.iotdb.commons.pipe.config.constant.PipeProcessorConstant;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.row.PipeRemarkableRow;
import org.apache.iotdb.db.pipe.event.common.row.PipeRow;
import org.apache.iotdb.db.pipe.processor.downsampling.DownSamplingProcessor;
@@ -124,7 +125,7 @@ public void customize(
super.customize(parameters, configuration);
LOGGER.info(
- "ChangingValueSamplingProcessor in {} is initialized with {}: {}, {}: {}, {}: {}.",
+ DataNodePipeMessages.CHANGINGVALUESAMPLINGPROCESSOR_IN_IS_INITIALIZED_WITH,
dataBaseNameWithPathSeparator,
PipeProcessorConstant.PROCESSOR_CHANGING_VALUE_COMPRESSION_DEVIATION,
compressionDeviation,
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/sdt/SwingingDoorTrendingSamplingProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/sdt/SwingingDoorTrendingSamplingProcessor.java
index f7e90f749c8d4..33cdbc3623757 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/sdt/SwingingDoorTrendingSamplingProcessor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/sdt/SwingingDoorTrendingSamplingProcessor.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.processor.downsampling.sdt;
import org.apache.iotdb.commons.pipe.config.constant.PipeProcessorConstant;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.row.PipeRemarkableRow;
import org.apache.iotdb.db.pipe.event.common.row.PipeRow;
import org.apache.iotdb.db.pipe.processor.downsampling.DownSamplingProcessor;
@@ -125,7 +126,7 @@ public void customize(
super.customize(parameters, configuration);
LOGGER.info(
- "SwingingDoorTrendingSamplingProcessor in {} is initialized with {}: {}, {}: {}, {}: {}.",
+ DataNodePipeMessages.SWINGINGDOORTRENDINGSAMPLINGPROCESSOR_IN_IS_INITIALIZED_WITH,
dataBaseNameWithPathSeparator,
PipeProcessorConstant.PROCESSOR_SDT_COMPRESSION_DEVIATION_KEY,
compressionDeviation,
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/tumbling/TumblingTimeSamplingProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/tumbling/TumblingTimeSamplingProcessor.java
index a9c735414d62c..36fa0b8382f7b 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/tumbling/TumblingTimeSamplingProcessor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/downsampling/tumbling/TumblingTimeSamplingProcessor.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.processor.downsampling.tumbling;
import org.apache.iotdb.commons.queryengine.utils.TimestampPrecisionUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.processor.downsampling.DownSamplingProcessor;
import org.apache.iotdb.db.pipe.processor.downsampling.PartialPathLastObjectCache;
import org.apache.iotdb.pipe.api.access.Row;
@@ -76,7 +77,7 @@ public void customize(
super.customize(parameters, configuration);
LOGGER.info(
- "TumblingTimeSamplingProcessor in {} is initialized with {}: {}s, {}: {}, {}: {}.",
+ DataNodePipeMessages.TUMBLINGTIMESAMPLINGPROCESSOR_IN_IS_INITIALIZED_WITH_S,
dataBaseNameWithPathSeparator,
PROCESSOR_TUMBLING_TIME_INTERVAL_SECONDS_KEY,
intervalInCurrentPrecision,
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/Combiner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/Combiner.java
index 2ad490b1d032c..69d0c855570a4 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/Combiner.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/Combiner.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.processor.twostage.operator.Operator;
import org.apache.iotdb.db.pipe.processor.twostage.state.State;
import org.apache.iotdb.rpc.RpcUtils;
@@ -71,7 +72,7 @@ public TSStatus combine(int regionId, State state) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "Combiner combine: regionId: {}, state: {}, receivedRegionIdSet: {}, expectedRegionIdSet: {}",
+ DataNodePipeMessages.COMBINER_COMBINE_REGIONID_STATE_RECEIVEDREGIONIDSET_EXPECTEDREGI,
regionId,
state,
receivedRegionIdSet,
@@ -84,7 +85,7 @@ public TSStatus combine(int regionId, State state) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(
- "Combiner combine completed: regionId: {}, state: {}, receivedRegionIdSet: {}, expectedRegionIdSet: {}",
+ DataNodePipeMessages.COMBINER_COMBINE_COMPLETED_REGIONID_STATE_RECEIVEDREGIONIDSET_EX,
regionId,
state,
receivedRegionIdSet,
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/PipeCombineHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/PipeCombineHandler.java
index afa4e2cd64a1a..a831078bbe2be 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/PipeCombineHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/PipeCombineHandler.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.confignode.rpc.thrift.TRegionInfo;
import org.apache.iotdb.confignode.rpc.thrift.TShowRegionReq;
import org.apache.iotdb.confignode.rpc.thrift.TShowRegionResp;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.processor.twostage.exchange.payload.FetchCombineResultResponse;
import org.apache.iotdb.db.pipe.processor.twostage.operator.Operator;
@@ -125,7 +126,7 @@ private Map fetchExpectedRegionId2DataNodeIdMap() {
configNodeClient.showRegion(
new TShowRegionReq().setConsensusGroupType(TConsensusGroupType.DataRegion));
if (showRegionResp == null || !showRegionResp.isSetRegionInfoList()) {
- throw new PipeException("Failed to fetch data region ids");
+ throw new PipeException(DataNodePipeMessages.FAILED_TO_FETCH_DATA_REGION_IDS);
}
for (final TRegionInfo regionInfo : showRegionResp.getRegionInfoList()) {
if (!RegionRoleType.Leader.getRoleType().equals(regionInfo.getRoleType())) {
@@ -135,13 +136,13 @@ private Map fetchExpectedRegionId2DataNodeIdMap() {
regionInfo.getConsensusGroupId().getId(), regionInfo.getDataNodeId());
}
} catch (ClientManagerException | TException e) {
- throw new PipeException("Failed to fetch data nodes", e);
+ throw new PipeException(DataNodePipeMessages.FAILED_TO_FETCH_DATA_NODES, e);
}
ALL_REGION_ID_2_DATANODE_ID_MAP_LAST_UPDATE_TIME.set(System.currentTimeMillis());
LOGGER.info(
- "Fetched data region ids {} at {}",
+ DataNodePipeMessages.FETCHED_DATA_REGION_IDS_AT,
ALL_REGION_ID_2_DATANODE_ID_MAP,
ALL_REGION_ID_2_DATANODE_ID_MAP_LAST_UPDATE_TIME.get());
}
@@ -152,7 +153,7 @@ private Map fetchExpectedRegionId2DataNodeIdMap() {
regionId -> !ALL_REGION_ID_2_DATANODE_ID_MAP.containsKey(regionId));
if (LOGGER.isInfoEnabled()) {
LOGGER.info(
- "Two stage aggregate pipe (pipeName={}, creationTime={}) related region ids {}",
+ DataNodePipeMessages.TWO_STAGE_AGGREGATE_PIPE_PIPENAME_CREATIONTIME_RELATED,
pipeName,
creationTime,
pipeRelatedRegionIdSet);
@@ -183,7 +184,8 @@ public synchronized void cleanOutdatedCombiner() {
entry -> {
if (!entry.getValue().isComplete()) {
LOGGER.info(
- "Clean outdated incomplete combiner: pipeName={}, creationTime={}, combineId={}",
+ DataNodePipeMessages
+ .CLEAN_OUTDATED_INCOMPLETE_COMBINER_PIPENAME_CREATIONTIME_COMBINEID,
pipeName,
creationTime,
entry.getKey());
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/PipeCombineHandlerManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/PipeCombineHandlerManager.java
index 1b8342af77e97..b3d018a54662d 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/PipeCombineHandlerManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/combiner/PipeCombineHandlerManager.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.processor.twostage.combiner;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.processor.twostage.exchange.payload.CombineRequest;
import org.apache.iotdb.db.pipe.processor.twostage.exchange.payload.FetchCombineResultRequest;
@@ -75,7 +76,7 @@ public synchronized void deregister(String pipeName, long creationTime) {
try {
pipeId2CombineHandler.remove(pipeId).close();
} catch (Exception e) {
- LOGGER.warn("Error occurred when closing CombineHandler(id = {})", pipeId, e);
+ LOGGER.warn(DataNodePipeMessages.ERROR_OCCURRED_WHEN_CLOSING_COMBINEHANDLER_ID, pipeId, e);
}
}
}
@@ -101,7 +102,7 @@ public TPipeTransferResp handle(CombineRequest combineRequest) {
final PipeCombineHandler handler = pipeId2CombineHandler.get(pipeId);
if (Objects.isNull(handler)) {
- throw new PipeException("CombineHandler not found for pipeId = " + pipeId);
+ throw new PipeException(DataNodePipeMessages.COMBINEHANDLER_NOT_FOUND_FOR_PIPEID + pipeId);
}
return new TPipeTransferResp()
@@ -120,7 +121,7 @@ public FetchCombineResultResponse handle(FetchCombineResultRequest fetchCombineR
final PipeCombineHandler handler = pipeId2CombineHandler.get(pipeId);
if (Objects.isNull(handler)) {
- throw new PipeException("CombineHandler not found for pipeId = " + pipeId);
+ throw new PipeException(DataNodePipeMessages.COMBINEHANDLER_NOT_FOUND_FOR_PIPEID + pipeId);
}
return handler.fetchCombineResult(fetchCombineResultRequest.getCombineIdList());
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java
index ae6f7f9a5fbd4..5a91f9f196eec 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.processor.twostage.exchange.payload;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.processor.twostage.state.CountState;
import org.apache.iotdb.db.pipe.processor.twostage.state.State;
import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
@@ -123,7 +124,8 @@ private State instantiateState(final String stateClassName) throws Exception {
if (CountState.class.getName().equals(stateClassName)) {
return new CountState();
}
- throw new IllegalArgumentException("Unexpected state class: " + stateClassName);
+ throw new IllegalArgumentException(
+ DataNodePipeMessages.UNEXPECTED_STATE_CLASS + stateClassName);
}
@Override
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/receiver/TwoStageAggregateReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/receiver/TwoStageAggregateReceiver.java
index 240f7a89e5fca..4ab24850cf9e0 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/receiver/TwoStageAggregateReceiver.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/receiver/TwoStageAggregateReceiver.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.pipe.receiver.IoTDBReceiver;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.processor.twostage.combiner.PipeCombineHandlerManager;
import org.apache.iotdb.db.pipe.processor.twostage.exchange.payload.CombineRequest;
import org.apache.iotdb.db.pipe.processor.twostage.exchange.payload.FetchCombineResultRequest;
@@ -59,13 +60,13 @@ public TPipeTransferResp receive(TPipeTransferReq req) {
}
}
- LOGGER.warn("Unknown request type {}: {}.", rawRequestType, req);
+ LOGGER.warn(DataNodePipeMessages.UNKNOWN_REQUEST_TYPE, rawRequestType, req);
return new TPipeTransferResp(
RpcUtils.getStatus(
TSStatusCode.PIPE_TYPE_ERROR,
String.format("Unknown request type %s.", rawRequestType)));
} catch (Exception e) {
- LOGGER.warn("Error occurs when receiving request: {}.", req, e);
+ LOGGER.warn(DataNodePipeMessages.ERROR_OCCURS_WHEN_RECEIVING_REQUEST, req, e);
return new TPipeTransferResp(
RpcUtils.getStatus(
TSStatusCode.PIPE_ERROR,
@@ -76,7 +77,7 @@ public TPipeTransferResp receive(TPipeTransferReq req) {
@Override
public void handleExit() {
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Two stage aggregate receiver is exiting.");
+ LOGGER.debug(DataNodePipeMessages.TWO_STAGE_AGGREGATE_RECEIVER_IS_EXITING);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/sender/TwoStageAggregateSender.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/sender/TwoStageAggregateSender.java
index 3c36559a300e6..9b6e17e6e4a76 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/sender/TwoStageAggregateSender.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/sender/TwoStageAggregateSender.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient;
import org.apache.iotdb.confignode.rpc.thrift.TDataNodeInfo;
import org.apache.iotdb.confignode.rpc.thrift.TShowDataNodesResp;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.processor.twostage.combiner.PipeCombineHandlerManager;
import org.apache.iotdb.db.protocol.client.ConfigNodeClient;
import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager;
@@ -81,17 +82,18 @@ public synchronized TPipeTransferResp request(long watermark, TPipeTransferReq r
client = reconstructIoTDBSyncClient(endPoint);
}
- LOGGER.info("Sending request {} (watermark = {}) to {}", req, watermark, endPoint);
+ LOGGER.info(DataNodePipeMessages.SENDING_REQUEST_WATERMARK_TO, req, watermark, endPoint);
try {
return client.pipeTransfer(req);
} catch (Exception e) {
- LOGGER.warn("Failed to send request {} (watermark = {}) to {}", req, watermark, endPoint, e);
+ LOGGER.warn(
+ DataNodePipeMessages.FAILED_TO_SEND_REQUEST_WATERMARK_TO, req, watermark, endPoint, e);
try {
reconstructIoTDBSyncClient(endPoint);
} catch (Exception ex) {
LOGGER.warn(
- "Failed to reconstruct IoTDBSyncClient {} after failure to send request {} (watermark = {})",
+ DataNodePipeMessages.FAILED_TO_RECONSTRUCT_IOTDBSYNCCLIENT_AFTER_FAILURE_TO,
endPoint,
req,
watermark,
@@ -122,7 +124,7 @@ private static boolean tryFetchEndPointsIfNecessary() {
ConfigNodeClientManager.getInstance().borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) {
final TShowDataNodesResp showDataNodesResp = configNodeClient.showDataNodes();
if (showDataNodesResp == null || showDataNodesResp.getDataNodesInfoList() == null) {
- throw new PipeException("Failed to fetch data nodes");
+ throw new PipeException(DataNodePipeMessages.FAILED_TO_FETCH_DATA_NODES);
}
for (final TDataNodeInfo dataNodeInfo : showDataNodesResp.getDataNodesInfoList()) {
dataNodeId2EndPointMap.put(
@@ -130,18 +132,20 @@ private static boolean tryFetchEndPointsIfNecessary() {
new TEndPoint(dataNodeInfo.getRpcAddresss(), dataNodeInfo.getRpcPort()));
}
} catch (ClientManagerException | TException e) {
- throw new PipeException("Failed to fetch data nodes", e);
+ throw new PipeException(DataNodePipeMessages.FAILED_TO_FETCH_DATA_NODES, e);
}
if (dataNodeId2EndPointMap.isEmpty()) {
- throw new PipeException("No data nodes' endpoints fetched");
+ throw new PipeException(DataNodePipeMessages.NO_DATA_NODES_ENDPOINTS_FETCHED);
}
DATANODE_ID_2_END_POINTS.set(dataNodeId2EndPointMap);
DATANODE_ID_2_END_POINTS_LAST_UPDATE_TIME.set(currentTime);
}
- LOGGER.info("Data nodes' endpoints for two-stage aggregation: {}", DATANODE_ID_2_END_POINTS);
+ LOGGER.info(
+ DataNodePipeMessages.DATA_NODES_ENDPOINTS_FOR_TWO_STAGE_AGGREGATION,
+ DATANODE_ID_2_END_POINTS);
return true;
}
@@ -153,7 +157,7 @@ private void tryConstructClients(boolean endPointsChanged) {
final Set expectedDataNodeIdSet =
PipeCombineHandlerManager.getInstance().getExpectedDataNodeIdSet(pipeName, creationTime);
if (expectedDataNodeIdSet.isEmpty()) {
- throw new PipeException("No expected region id set fetched");
+ throw new PipeException(DataNodePipeMessages.NO_EXPECTED_REGION_ID_SET_FETCHED);
}
endPoints =
@@ -162,7 +166,7 @@ private void tryConstructClients(boolean endPointsChanged) {
.map(Map.Entry::getValue)
.toArray(TEndPoint[]::new);
LOGGER.info(
- "End points for two-stage aggregation pipe (pipeName={}, creationTime={}) were updated to {}",
+ DataNodePipeMessages.END_POINTS_FOR_TWO_STAGE_AGGREGATION_PIPE,
pipeName,
creationTime,
endPoints);
@@ -175,7 +179,7 @@ private void tryConstructClients(boolean endPointsChanged) {
try {
endPointIoTDBSyncClientMap.put(endPoint, constructIoTDBSyncClient(endPoint));
} catch (TTransportException e) {
- LOGGER.warn("Failed to construct IoTDBSyncClient", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CONSTRUCT_IOTDBSYNCCLIENT, e);
}
}
@@ -184,7 +188,7 @@ private void tryConstructClients(boolean endPointsChanged) {
try {
endPointIoTDBSyncClientMap.remove(endPoint).close();
} catch (Exception e) {
- LOGGER.warn("Failed to close IoTDBSyncClient", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_IOTDBSYNCCLIENT, e);
}
}
}
@@ -197,7 +201,7 @@ private IoTDBSyncClient reconstructIoTDBSyncClient(TEndPoint endPoint)
try {
oldClient.close();
} catch (Exception e) {
- LOGGER.warn("Failed to close old IoTDBSyncClient", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_OLD_IOTDBSYNCCLIENT, e);
}
}
final IoTDBSyncClient newClient = constructIoTDBSyncClient(endPoint);
@@ -224,7 +228,7 @@ public void close() {
try {
client.close();
} catch (Exception e) {
- LOGGER.warn("Failed to close IoTDBSyncClient", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_IOTDBSYNCCLIENT, e);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/plugin/TwoStageCountProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/plugin/TwoStageCountProcessor.java
index 31871968351fb..d9e2ed8bc17af 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/plugin/TwoStageCountProcessor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/plugin/TwoStageCountProcessor.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskProcessorRuntimeEnvironment;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.utils.PathUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
@@ -115,7 +116,8 @@ public void validate(PipeParameterValidator validator) throws Exception {
try {
PathUtils.isLegalPath(Objects.requireNonNull(rawOutputSeries));
} catch (Exception e) {
- throw new PipeParameterNotValidException("Illegal output series path: " + rawOutputSeries);
+ throw new PipeParameterNotValidException(
+ DataNodePipeMessages.ILLEGAL_OUTPUT_SERIES_PATH + rawOutputSeries);
}
}
@@ -182,7 +184,7 @@ public void process(TabletInsertionEvent tabletInsertionEvent, EventCollector ev
if (!(tabletInsertionEvent instanceof PipeInsertNodeTabletInsertionEvent)
&& !(tabletInsertionEvent instanceof PipeRawTabletInsertionEvent)) {
LOGGER.warn(
- "Ignored TabletInsertionEvent is not an instance of PipeInsertNodeTabletInsertionEvent or PipeRawTabletInsertionEvent: {}",
+ DataNodePipeMessages.IGNORED_TABLETINSERTIONEVENT_IS_NOT_AN_INSTANCE_OF,
tabletInsertionEvent);
return;
}
@@ -205,7 +207,7 @@ public void process(TsFileInsertionEvent tsFileInsertionEvent, EventCollector ev
throws Exception {
if (!(tsFileInsertionEvent instanceof PipeTsFileInsertionEvent)) {
LOGGER.warn(
- "Ignored TsFileInsertionEvent is not an instance of PipeTsFileInsertionEvent: {}",
+ DataNodePipeMessages.IGNORED_TSFILEINSERTIONEVENT_IS_NOT_AN_INSTANCE_OF,
tsFileInsertionEvent);
return;
}
@@ -214,7 +216,7 @@ public void process(TsFileInsertionEvent tsFileInsertionEvent, EventCollector ev
event.skipReportOnCommit();
if (!event.waitForTsFileClose()) {
- LOGGER.warn("Ignored TsFileInsertionEvent is empty: {}", event);
+ LOGGER.warn(DataNodePipeMessages.IGNORED_TSFILEINSERTIONEVENT_IS_EMPTY, event);
return;
}
@@ -257,7 +259,7 @@ private void collectGlobalCountIfNecessary(EventCollector eventCollector) throws
if (timestampCountPair.right < lastCollectedTimestampCountPair.right) {
timestampCountPair.right = lastCollectedTimestampCountPair.right;
LOGGER.warn(
- "Global count is less than the last collected count: timestamp={}, count={}",
+ DataNodePipeMessages.GLOBAL_COUNT_IS_LESS_THAN_THE_LAST,
timestampCountPair.left,
timestampCountPair.right);
}
@@ -303,7 +305,7 @@ private void commitLocalProgressIndexIfNecessary() {
if (fetchCombineResultResponse.getStatus().getCode()
!= TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new PipeException(
- "Failed to fetch combine result: "
+ DataNodePipeMessages.FAILED_TO_FETCH_COMBINE_RESULT
+ fetchCombineResultResponse.getStatus().getMessage());
}
@@ -315,7 +317,7 @@ private void commitLocalProgressIndexIfNecessary() {
switch (resultType) {
case OUTDATED:
LOGGER.warn(
- "Two stage combine (region id = {}, combine id = {}) outdated: timestamp={}, count={}, progressIndex={}",
+ DataNodePipeMessages.TWO_STAGE_COMBINE_REGION_ID_COMBINE_ID_1,
regionId,
combineId,
pair.left[0],
@@ -324,7 +326,7 @@ private void commitLocalProgressIndexIfNecessary() {
continue;
case INCOMPLETE:
LOGGER.info(
- "Two stage combine (region id = {}, combine id = {}) incomplete: timestamp={}, count={}, progressIndex={}",
+ DataNodePipeMessages.TWO_STAGE_COMBINE_REGION_ID_COMBINE_ID,
regionId,
combineId,
pair.left[0],
@@ -338,7 +340,7 @@ private void commitLocalProgressIndexIfNecessary() {
pipeTaskMeta.updateProgressIndex(
new StateProgressIndex(pair.left[0], state, pair.right));
LOGGER.info(
- "Two stage combine (region id = {}, combine id = {}) success: timestamp={}, count={}, progressIndex={}, committed progressIndex={}",
+ DataNodePipeMessages.TWO_STAGE_COMBINE_REGION_ID_COMBINE_ID_2,
regionId,
combineId,
pair.left[0],
@@ -347,13 +349,14 @@ private void commitLocalProgressIndexIfNecessary() {
pipeTaskMeta.getProgressIndex());
continue;
default:
- throw new PipeException("Unknown combine result type: " + resultType);
+ throw new PipeException(
+ DataNodePipeMessages.UNKNOWN_COMBINE_RESULT_TYPE + resultType);
}
}
} catch (Exception e) {
localCommitQueue.add(pair);
LOGGER.warn(
- "Failure occurred when trying to commit progress index. timestamp={}, count={}, progressIndex={}",
+ DataNodePipeMessages.FAILURE_OCCURRED_WHEN_TRYING_TO_COMMIT_PROGRESS,
pair.left[0],
pair.left[1],
pair.right,
@@ -386,14 +389,15 @@ private boolean triggerCombine(Pair pair) {
Long.toString(watermark),
new CountState(count)));
if (resp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- throw new PipeException("Failed to combine count: " + resp.getStatus().getMessage());
+ throw new PipeException(
+ DataNodePipeMessages.FAILED_TO_COMBINE_COUNT + resp.getStatus().getMessage());
}
localCommitQueue.add(pair);
return true;
} catch (Exception e) {
localRequestQueue.add(pair);
LOGGER.warn(
- "Failed to trigger combine. watermark={}, count={}, progressIndex={}",
+ DataNodePipeMessages.FAILED_TO_TRIGGER_COMBINE_WATERMARK_COUNT_PROGRESSINDEX,
watermark,
count,
progressIndex,
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiver.java
index 278c1ccaaefc5..825ff4c5eac2c 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiver.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiver.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.pipe.sink.payload.airgap.AirGapELanguageConstant;
import org.apache.iotdb.commons.pipe.sink.payload.airgap.AirGapOneByteResponse;
import org.apache.iotdb.commons.pipe.sink.payload.airgap.AirGapPseudoTPipeTransferRequest;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.receiver.protocol.thrift.IoTDBDataNodeReceiverAgent;
import org.apache.iotdb.db.protocol.session.ClientSession;
@@ -73,7 +74,7 @@ public void runMayThrow() throws Throwable {
socket.setSoTimeout(PipeConfig.getInstance().getPipeSinkTransferTimeoutMs());
socket.setKeepAlive(true);
- LOGGER.info("Pipe air gap receiver {} started. Socket: {}", receiverId, socket);
+ LOGGER.info(DataNodePipeMessages.PIPE_AIR_GAP_RECEIVER_STARTED_SOCKET, receiverId, socket);
SessionManager.getInstance().registerSession(new ClientSession(socket));
@@ -83,15 +84,10 @@ public void runMayThrow() throws Throwable {
receive();
}
LOGGER.info(
- "Pipe air gap receiver {} closed because socket is closed. Socket: {}",
- receiverId,
- socket);
+ DataNodePipeMessages.PIPE_AIR_GAP_RECEIVER_CLOSED_BECAUSE_SOCKET, receiverId, socket);
} catch (final Exception e) {
LOGGER.warn(
- "Pipe air gap receiver {} closed because of exception. Socket: {}",
- receiverId,
- socket,
- e);
+ DataNodePipeMessages.PIPE_AIR_GAP_RECEIVER_CLOSED_BECAUSE_OF_1, receiverId, socket, e);
throw e;
} finally {
// session will be closed and removed here
@@ -112,9 +108,7 @@ private void receive() throws IOException {
// We directly close the socket here.
if (!checkSum(data)) {
LOGGER.warn(
- "Pipe air gap receiver {} closed because of checksum failed. Socket: {}",
- receiverId,
- socket);
+ DataNodePipeMessages.PIPE_AIR_GAP_RECEIVER_CLOSED_BECAUSE_OF, receiverId, socket);
try {
fail();
} finally {
@@ -136,14 +130,14 @@ private void receive() throws IOException {
handleReq(req, System.currentTimeMillis());
} catch (final PipeConnectionException e) {
LOGGER.info(
- "Pipe air gap receiver {}: Socket {} closed when listening to data. Because: {}",
+ DataNodePipeMessages.PIPE_AIR_GAP_RECEIVER_SOCKET_CLOSED_WHEN,
receiverId,
socket,
e.getMessage());
socket.close();
} catch (final Exception e) {
LOGGER.warn(
- "Pipe air gap receiver {}: Exception during handling receiving. Socket: {}",
+ DataNodePipeMessages.PIPE_AIR_GAP_RECEIVER_EXCEPTION_DURING_HANDLING,
receiverId,
socket,
e);
@@ -162,7 +156,7 @@ private void handleReq(final AirGapPseudoTPipeTransferRequest req, final long st
|| status.getCode()
== TSStatusCode.PIPE_RECEIVER_IDEMPOTENT_CONFLICT_EXCEPTION.getStatusCode()) {
LOGGER.info(
- "Pipe air gap receiver {}: TSStatus {} is encountered at the air gap receiver, will ignore.",
+ DataNodePipeMessages.PIPE_AIR_GAP_RECEIVER_TSSTATUS_IS_ENCOUNTERED,
receiverId,
resp.getStatus());
ok();
@@ -173,20 +167,18 @@ private void handleReq(final AirGapPseudoTPipeTransferRequest req, final long st
} catch (final InterruptedException e) {
Thread.currentThread().interrupt();
}
- LOGGER.info(
- "Temporary unavailable exception encountered at air gap receiver, will retry locally.");
+ LOGGER.info(DataNodePipeMessages.TEMPORARY_UNAVAILABLE_EXCEPTION_ENCOUNTERED_AT_AIR_GAP);
if (System.currentTimeMillis() - startTime
< PipeConfig.getInstance().getPipeAirGapRetryMaxMs()) {
handleReq(req, startTime);
} else {
LOGGER.warn(
- "Pipe air gap receiver {}: Temporary unavailable retry timed out, returning FAIL to sender.",
- receiverId);
+ DataNodePipeMessages.PIPE_AIR_GAP_RECEIVER_TEMPORARY_UNAVAILABLE_RETRY, receiverId);
fail();
}
} else {
LOGGER.warn(
- "Pipe air gap receiver {}: Handle data failed, status: {}, req: {}",
+ DataNodePipeMessages.PIPE_AIR_GAP_RECEIVER_HANDLE_DATA_FAILED,
receiverId,
resp.getStatus(),
req);
@@ -215,7 +207,7 @@ private boolean checkSum(byte[] bytes) {
final long actualChecksum = crc32.getValue();
if (expectedChecksum != actualChecksum) {
LOGGER.warn(
- "Pipe air gap receiver {}: checksum failed, expected: {}, actual: {}",
+ DataNodePipeMessages.PIPE_AIR_GAP_RECEIVER_CHECKSUM_FAILED_EXPECTED,
receiverId,
expectedChecksum,
actualChecksum);
@@ -306,7 +298,8 @@ private void readTillFull(final InputStream inputStream, final byte[] readBuffer
// In socket input stream readBytes == -1 indicates EOF, namely the
// socket is closed
if (readBytes == -1) {
- throw new PipeConnectionException("Socket closed when executing readTillFull.");
+ throw new PipeConnectionException(
+ DataNodePipeMessages.SOCKET_CLOSED_WHEN_EXECUTING_READTILLFULL);
}
alreadyReadBytes += readBytes;
}
@@ -328,7 +321,8 @@ private void skipTillEnough(final InputStream inputStream, final long length)
// In socket input stream skippedBytes == 0 indicates EOF, namely the
// socket is closed
if (skippedBytes == 0) {
- throw new PipeConnectionException("Socket closed when executing skipTillEnough.");
+ throw new PipeConnectionException(
+ DataNodePipeMessages.SOCKET_CLOSED_WHEN_EXECUTING_SKIPTILLENOUGH);
}
currentSkippedBytes += skippedBytes;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiverAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiverAgent.java
index 1aa828a4f8c01..c14ce2054b7e9 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiverAgent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/airgap/IoTDBAirGapReceiverAgent.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.service.IService;
import org.apache.iotdb.commons.service.ServiceType;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -60,7 +61,7 @@ public void listen() {
ThreadName.PIPE_AIR_GAP_RECEIVER.getName() + "-" + airGapReceiverId);
airGapReceiverThread.start();
} catch (final IOException e) {
- LOGGER.warn("Unhandled exception during pipe air gap receiver listening", e);
+ LOGGER.warn(DataNodePipeMessages.UNHANDLED_EXCEPTION_DURING_PIPE_AIR_GAP_RECEIVER, e);
}
if (allowSubmitListen.get()) {
@@ -79,7 +80,7 @@ public void start() throws StartupException {
allowSubmitListen.set(true);
listenExecutor.submit(this::listen);
- LOGGER.info("IoTDBAirGapReceiverAgent {} started.", serverSocket);
+ LOGGER.info(DataNodePipeMessages.IOTDBAIRGAPRECEIVERAGENT_STARTED, serverSocket);
}
@Override
@@ -89,13 +90,13 @@ public void stop() {
serverSocket.close();
}
} catch (final IOException e) {
- LOGGER.warn("Failed to close IoTDBAirGapReceiverAgent's server socket", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_IOTDBAIRGAPRECEIVERAGENT_S_SERVER_SOCKET, e);
}
allowSubmitListen.set(false);
listenExecutor.shutdown();
- LOGGER.info("IoTDBAirGapReceiverAgent {} stopped.", serverSocket);
+ LOGGER.info(DataNodePipeMessages.IOTDBAIRGAPRECEIVERAGENT_STOPPED, serverSocket);
}
@Override
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2Receiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2Receiver.java
index 27ce077252ef0..59905e54dadbf 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2Receiver.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2Receiver.java
@@ -47,6 +47,7 @@
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.DiskSpaceInsufficientException;
import org.apache.iotdb.db.exception.load.LoadFileException;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.consensus.metric.IoTConsensusV2ReceiverMetrics;
import org.apache.iotdb.db.pipe.event.common.tsfile.aggregator.TsFileInsertionPointCounter;
import org.apache.iotdb.db.pipe.sink.protocol.iotconsensusv2.payload.request.IoTConsensusV2DeleteNodeReq;
@@ -142,7 +143,7 @@ public IoTConsensusV2Receiver(
try {
initiateTsFileBufferFolder(receiverBaseDirsName);
} catch (Exception e) {
- LOGGER.error("Fail to initiate file buffer folder, Error msg: {}", e.getMessage());
+ LOGGER.error(DataNodePipeMessages.FAIL_TO_INITIATE_FILE_BUFFER_FOLDER_ERROR, e.getMessage());
throw new RuntimeException(e);
}
@@ -150,9 +151,7 @@ public IoTConsensusV2Receiver(
this.folderManager = new FolderManager(receiveDirs, DirectoryStrategyType.SEQUENCE_STRATEGY);
this.iotConsensusV2TsFileWriterPool = new IoTConsensusV2TsFileWriterPool(consensusPipeName);
} catch (Exception e) {
- LOGGER.error(
- "Fail to create iotConsensusV2 receiver file folders allocation strategy because all disks of folders are full.",
- e);
+ LOGGER.error(DataNodePipeMessages.FAIL_TO_CREATE_IOTCONSENSUSV2_RECEIVER_FILE_FOLDERS, e);
throw new RuntimeException(e);
}
@@ -211,7 +210,8 @@ public TIoTConsensusV2TransferResp receive(final TIoTConsensusV2TransferReq req)
TSStatusCode.PIPE_TYPE_ERROR,
String.format("IoTConsensusV2 Unknown PipeRequestType %s.", rawRequestType));
if (LOGGER.isWarnEnabled()) {
- LOGGER.warn("IoTConsensusV2 Unknown PipeRequestType, response status = {}.", status);
+ LOGGER.warn(
+ DataNodePipeMessages.IOTCONSENSUSV2_UNKNOWN_PIPEREQUESTTYPE_RESPONSE_STATUS, status);
}
return new TIoTConsensusV2TransferResp(status);
}
@@ -279,7 +279,7 @@ private TIoTConsensusV2TransferResp loadEvent(final TIoTConsensusV2TransferReq r
return handleTransferFileSealWithMods(
IoTConsensusV2TsFileSealWithModReq.fromTIoTConsensusV2TransferReq(req));
case TRANSFER_TABLET_BATCH:
- LOGGER.info("IoTConsensusV2 transfer batch hasn't been implemented yet.");
+ LOGGER.info(DataNodePipeMessages.IOTCONSENSUSV2_TRANSFER_BATCH_HASN_T_BEEN_IMPLEMENTED);
default:
break;
}
@@ -291,13 +291,13 @@ private TIoTConsensusV2TransferResp loadEvent(final TIoTConsensusV2TransferReq r
TSStatusCode.IOT_CONSENSUS_V2_TYPE_ERROR,
String.format("Unknown IoTConsensusV2RequestType %s.", rawRequestType));
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Unknown PipeRequestType, response status = {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_UNKNOWN_PIPEREQUESTTYPE_RESPONSE_STATUS,
consensusPipeName,
status);
return new TIoTConsensusV2TransferResp(status);
} catch (Exception e) {
final String error = String.format("Serialization error during pipe receiving, %s", e);
- LOGGER.warn("IoTConsensusV2-PipeName-{}: {}", consensusPipeName, error, e);
+ LOGGER.warn(DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME, consensusPipeName, error, e);
return new TIoTConsensusV2TransferResp(RpcUtils.getStatus(TSStatusCode.PIPE_ERROR, error));
}
}
@@ -344,7 +344,8 @@ private TIoTConsensusV2TransferResp handleTransferFilePiece(
try {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "IoTConsensusV2-PipeName-{}: starting to receive tsFile pieces", consensusPipeName);
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_STARTING_TO_RECEIVE_TSFILE_PIECES,
+ consensusPipeName);
}
long startBorrowTsFileWriterNanos = System.nanoTime();
IoTConsensusV2TsFileWriter tsFileWriter =
@@ -374,7 +375,7 @@ private TIoTConsensusV2TransferResp handleTransferFilePiece(
"Request sender to reset file reader's offset from %s to %s.",
req.getStartWritingOffset(), writingFileWriter.length()));
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: File offset reset requested by receiver, response status = {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FILE_OFFSET_RESET_REQUESTED_BY,
consensusPipeName,
status);
return IoTConsensusV2TransferFilePieceResp.toTIoTConsensusV2TransferResp(
@@ -391,7 +392,7 @@ private TIoTConsensusV2TransferResp handleTransferFilePiece(
RpcUtils.SUCCESS_STATUS, writingFileWriter.length());
} catch (Exception e) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to write file piece from req {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_WRITE_FILE_PIECE,
consensusPipeName,
req,
e);
@@ -418,7 +419,9 @@ private TIoTConsensusV2TransferResp handleTransferFilePiece(
private TIoTConsensusV2TransferResp handleTransferFileSeal(
final IoTConsensusV2TsFileSealReq req) {
- LOGGER.info("IoTConsensusV2-PipeName-{}: starting to receive tsFile seal", consensusPipeName);
+ LOGGER.info(
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_STARTING_TO_RECEIVE_TSFILE_SEAL,
+ consensusPipeName);
long startBorrowTsFileWriterNanos = System.nanoTime();
IoTConsensusV2TsFileWriter tsFileWriter =
iotConsensusV2TsFileWriterPool.borrowCorrespondingWriter(req.getCommitId());
@@ -468,12 +471,12 @@ private TIoTConsensusV2TransferResp handleTransferFileSeal(
if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: Seal file {} successfully.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_SEAL_FILE_SUCCESSFULLY,
consensusPipeName,
fileAbsolutePath);
} else {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to seal file {}, because {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_BECAUSE_2,
consensusPipeName,
fileAbsolutePath,
status.getMessage());
@@ -481,7 +484,7 @@ private TIoTConsensusV2TransferResp handleTransferFileSeal(
return new TIoTConsensusV2TransferResp(status);
} catch (IOException e) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to seal file {} from req {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_FROM,
consensusPipeName,
writingFile,
req,
@@ -492,7 +495,7 @@ private TIoTConsensusV2TransferResp handleTransferFileSeal(
String.format("Failed to seal file %s because %s", writingFile, e.getMessage())));
} catch (LoadFileException e) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to load file {} from req {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_LOAD_FILE_FROM,
consensusPipeName,
writingFile,
req,
@@ -512,7 +515,8 @@ private TIoTConsensusV2TransferResp handleTransferFileSeal(
private TIoTConsensusV2TransferResp handleTransferFileSealWithMods(
final IoTConsensusV2TsFileSealWithModReq req) {
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: starting to receive tsFile seal with mods", consensusPipeName);
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_STARTING_TO_RECEIVE_TSFILE_SEAL_1,
+ consensusPipeName);
long startBorrowTsFileWriterNanos = System.nanoTime();
IoTConsensusV2TsFileWriter tsFileWriter =
iotConsensusV2TsFileWriterPool.borrowCorrespondingWriter(req.getCommitId());
@@ -598,12 +602,12 @@ private TIoTConsensusV2TransferResp handleTransferFileSealWithMods(
if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: Seal file with mods {} successfully.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_SEAL_FILE_WITH_MODS_SUCCESSFULLY,
consensusPipeName,
fileAbsolutePaths);
} else {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to seal file {}, status is {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_STATUS,
consensusPipeName,
fileAbsolutePaths,
status);
@@ -612,7 +616,7 @@ private TIoTConsensusV2TransferResp handleTransferFileSealWithMods(
} catch (Exception e) {
final Throwable rootCause = e instanceof IllegalArgumentException ? e.getCause() : e;
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to seal file {} from req {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_FROM,
consensusPipeName,
req.getFileNames(),
req,
@@ -649,7 +653,7 @@ private TIoTConsensusV2TransferResp checkNonFinalFileSeal(
TSStatusCode.IOT_CONSENSUS_V2_TRANSFER_FILE_ERROR,
String.format("Failed to seal file %s, the file does not exist.", fileName));
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to seal file {}, because the file does not exist.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_BECAUSE,
consensusPipeName,
fileName);
return new TIoTConsensusV2TransferResp(status);
@@ -694,7 +698,7 @@ private TSStatus loadFileToDataRegion(String filePath, ProgressIndex progressInd
// Data region is null indicates that dr has been removed or migrated. In those cases, there
// is no need to replicate data. we just return success to avoid leader keeping retry
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: skip load tsfile-{} when sealing, because this region has been removed or migrated.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_SKIP_LOAD_TSFILE_WHEN_SEALING,
consensusPipeName,
filePath);
}
@@ -723,7 +727,7 @@ private void updateWritePointCountMetrics(
updateWritePointCountMetrics(counter.count());
} catch (IOException e) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to read TsFile when counting points: {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_READ_TSFILE_WHEN,
consensusPipeName,
tsFileAbsolutePath,
e);
@@ -791,7 +795,7 @@ private TIoTConsensusV2TransferResp checkFinalFileSeal(
String.format(
"Failed to seal file %s, because writing file is %s.", fileName, writingFile));
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to seal file {}, because writing file is {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_SEAL_FILE_BECAUSE_1,
consensusPipeName,
fileName,
writingFile);
@@ -832,7 +836,7 @@ private boolean isFileExistedAndNameCorrect(
.equals(resolveWritingFilePath(tsFileWriter, fileName));
} catch (final IOException e) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Illegal file name {} when checking writing file.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_ILLEGAL_FILE_NAME_WHEN_CHECKING,
consensusPipeName,
fileName,
e);
@@ -848,7 +852,7 @@ private boolean isWritingFileOffsetNonCorrect(
final boolean offsetCorrect = writingFileWriter.length() == offset;
if (!offsetCorrect) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Writing file {}'s offset is {}, but request sender's offset is {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_WRITING_FILE_S_OFFSET_IS,
consensusPipeName,
writingFile.getPath(),
writingFileWriter.length(),
@@ -890,12 +894,12 @@ private void updateWritingFileIfNeeded(
if (!tsFileWriter.getLocalWritingDir().exists()) {
if (tsFileWriter.getLocalWritingDir().mkdirs()) {
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: Receiver file dir {} was created.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_RECEIVER_FILE_DIR_WAS_CREATED,
consensusPipeName,
tsFileWriter.getLocalWritingDir().getPath());
} else {
LOGGER.error(
- "IoTConsensusV2-PipeName-{}: Failed to create receiver file dir {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_FILE,
consensusPipeName,
tsFileWriter.getLocalWritingDir().getPath());
}
@@ -905,7 +909,7 @@ private void updateWritingFileIfNeeded(
tsFileWriter.setWritingFile(resolveWritingFilePath(tsFileWriter, fileName).toFile());
tsFileWriter.setWritingFileWriter(new RandomAccessFile(tsFileWriter.getWritingFile(), "rw"));
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: Writing file {} was created. Ready to write file pieces.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_WRITING_FILE_WAS_CREATED_READY,
consensusPipeName,
tsFileWriter.getWritingFile().getPath());
}
@@ -917,7 +921,7 @@ private Path resolveWritingFilePath(
tsFileWriter.getLocalWritingDir().toPath(), fileName);
} catch (final IOException e) {
LOGGER.error(
- "IoTConsensusV2-PipeName-{}: Path traversal attempt detected! Filename: {}",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_PATH_TRAVERSAL_ATTEMPT_DETECTED_FILENAME,
consensusPipeName,
fileName);
throw e;
@@ -934,7 +938,7 @@ private void initiateTsFileBufferFolder(List receiverBaseDirsName) throw
final File systemDir = new File(IoTDBDescriptor.getInstance().getConfig().getSystemDir());
if (!systemDir.exists()) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to create receiver file dir {}. Because parent system dir have been deleted due to system concurrently exit.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_FILE_1,
consensusPipeName,
newReceiverDir.getPath());
throw new IOException(
@@ -948,7 +952,7 @@ private void initiateTsFileBufferFolder(List receiverBaseDirsName) throw
if (!newReceiverDir.mkdirs()) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to create receiver file dir {}. May because authority or dir already exists etc.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_FILE_2,
consensusPipeName,
newReceiverDir.getPath());
throw new IOException(
@@ -986,15 +990,16 @@ public synchronized void handleExit() {
scheduledTsFileWriterCheckerPool.shutdownNow();
try {
if (!scheduledTsFileWriterCheckerPool.awaitTermination(30, TimeUnit.SECONDS)) {
- LOGGER.warn("TsFileChecker did not terminate within {}s", 30);
+ LOGGER.warn(DataNodePipeMessages.TSFILECHECKER_DID_NOT_TERMINATE_WITHIN_S, 30);
}
} catch (InterruptedException e) {
- LOGGER.warn("TsFileChecker Thread {} still doesn't exit after 30s", consensusPipeName);
+ LOGGER.warn(
+ DataNodePipeMessages.TSFILECHECKER_THREAD_STILL_DOESN_T_EXIT_AFTER, consensusPipeName);
Thread.currentThread().interrupt();
}
// Clear the tsFileWriters, receiverBuffer and receiver base dirs
requestExecutor.clear(false, true);
- LOGGER.info("Receiver-{} exit successfully.", consensusPipeName.toString());
+ LOGGER.info(DataNodePipeMessages.RECEIVER_EXIT_SUCCESSFULLY, consensusPipeName.toString());
}
public void closeExecutor() {
@@ -1027,7 +1032,7 @@ public IoTConsensusV2TsFileWriterPool(ConsensusPipeName consensusPipeName)
IOTDB_CONFIG.getTsFileWriterCheckInterval(),
TimeUnit.MILLISECONDS);
LOGGER.info(
- "Register {} with interval in seconds {} successfully.",
+ DataNodePipeMessages.REGISTER_WITH_INTERVAL_IN_SECONDS_SUCCESSFULLY,
ThreadName.IOT_CONSENSUS_V2_TSFILE_WRITER_CHECKER.getName(),
IOTDB_CONFIG.getTsFileWriterCheckInterval());
}
@@ -1093,7 +1098,7 @@ private void checkZombieTsFileWriter() {
>= IOTDB_CONFIG.getTsFileWriterZombieThreshold()) {
releaseTsFileWriter(writer, false);
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: tsfile writer-{} is cleaned up because no new requests were received for too long.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_TSFILE_WRITER_IS_CLEANED_UP,
consensusPipeName,
writer.index);
}
@@ -1113,7 +1118,8 @@ public void releaseAllWriters(ConsensusPipeName consensusPipeName) {
} catch (final InterruptedException e) {
Thread.currentThread().interrupt();
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: receiver thread get interrupted when exiting.",
+ DataNodePipeMessages
+ .IOTCONSENSUSV2_PIPENAME_RECEIVER_THREAD_GET_INTERRUPTED_WHEN,
consensusPipeName.toString());
// avoid infinite loop
break;
@@ -1154,7 +1160,7 @@ public void rollToNextWritingPath() throws IOException, DiskSpaceInsufficientExc
receiverBasePath -> {
if (receiverBasePath == null) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to get base directory",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_GET_BASE_DIRECTORY,
consensusPipeName);
return null;
}
@@ -1167,14 +1173,16 @@ public void rollToNextWritingPath() throws IOException, DiskSpaceInsufficientExc
if (writingDir.mkdirs()) {
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: tsfileWriter-{} roll to writing path {}",
+ DataNodePipeMessages
+ .IOTCONSENSUSV2_PIPENAME_TSFILEWRITER_ROLL_TO_WRITING_PATH,
consensusPipeName,
index,
writingDir.getPath());
return writingDir;
}
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to create receiver tsFileWriter-{} file dir {}",
+ DataNodePipeMessages
+ .IOTCONSENSUSV2_PIPENAME_FAILED_TO_CREATE_RECEIVER_TSFILEWRITER,
consensusPipeName,
index,
writingDir.getPath());
@@ -1201,7 +1209,7 @@ public void setWritingFile(File writingFile) {
this.writingFile = writingFile;
if (writingFile == null) {
LOGGER.info(
- "IoTConsensusV2-{}: TsFileWriter-{} set null writing file",
+ DataNodePipeMessages.IOTCONSENSUSV2_TSFILEWRITER_SET_NULL_WRITING_FILE,
consensusPipeName.toString(),
index);
}
@@ -1215,7 +1223,7 @@ public void setWritingFileWriter(RandomAccessFile writingFileWriter) throws IOEx
this.writingFileWriter = writingFileWriter;
if (writingFileWriter == null) {
LOGGER.info(
- "IoTConsensusV2-{}: TsFileWriter-{} set null writing file writer",
+ DataNodePipeMessages.IOTCONSENSUSV2_TSFILEWRITER_SET_NULL_WRITING_FILE_WRITER,
consensusPipeName.toString(),
index);
} else {
@@ -1263,7 +1271,7 @@ public void returnSelf(ConsensusPipeName consensusPipeName)
this.commitIdOfCorrespondingHolderEvent = null;
this.isUsed = false;
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: tsFileWriter-{} returned self",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_TSFILEWRITER_RETURNED_SELF,
consensusPipeName.toString(),
index);
}
@@ -1278,7 +1286,7 @@ private void closeCurrentWritingFileWriter(
}
tsFileWriter.getWritingFileWriter().close();
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: Current writing file writer {} was closed.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_CURRENT_WRITING_FILE_WRITER_WAS,
consensusPipeName,
tsFileWriter.getWritingFile() == null
? "null"
@@ -1286,7 +1294,7 @@ private void closeCurrentWritingFileWriter(
tsFileWriter.setWritingFileWriter(null);
} catch (IOException e) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to close current writing file writer {}, because {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_CLOSE_CURRENT_WRITING,
consensusPipeName,
tsFileWriter.getWritingFile() == null
? "null"
@@ -1297,7 +1305,7 @@ private void closeCurrentWritingFileWriter(
} else {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "IoTConsensusV2-PipeName-{}: Current writing file writer is null. No need to close.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_CURRENT_WRITING_FILE_WRITER_IS,
consensusPipeName.toString());
}
}
@@ -1328,13 +1336,13 @@ private void deleteFileOrDirectoryIfExists(File file, boolean deleteDir, String
RetryUtils.retryOnException(() -> FileUtils.delete(file));
}
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: {} {} was deleted.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_WAS_DELETED,
consensusPipeName,
reason,
file.getPath());
} catch (IOException e) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: {} Failed to delete {}, because {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_DELETE_BECAUSE,
consensusPipeName,
reason,
file.getPath(),
@@ -1344,7 +1352,7 @@ private void deleteFileOrDirectoryIfExists(File file, boolean deleteDir, String
} else {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "IoTConsensusV2-PipeName-{}: {} {} is not existed. No need to delete.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_IS_NOT_EXISTED_NO_NEED,
consensusPipeName,
reason,
file.getPath());
@@ -1367,7 +1375,7 @@ private void releaseTsFileWriter(
tsFileWriter.returnSelf(consensusPipeName);
} catch (IOException | DiskSpaceInsufficientException e) {
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: Failed to return tsFileWriter {}.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_FAILED_TO_RETURN_TSFILEWRITER,
consensusPipeName,
tsFileWriter,
e);
@@ -1462,7 +1470,7 @@ private TIoTConsensusV2TransferResp onRequest(
}
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: start to receive no.{} event",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_START_TO_RECEIVE_NO_EVENT,
consensusPipeName,
tCommitId);
// Judge whether connector has rebooted or not, if the rebootTimes increases compared to
@@ -1527,7 +1535,7 @@ private TIoTConsensusV2TransferResp onRequest(
if (reqExecutionOrderBuffer.size() >= IOTDB_CONFIG.getIotConsensusV2PipelineSize()
&& reqExecutionOrderBuffer.first().equals(requestMeta)) {
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: no.{} event get executed because receiver buffer's len >= pipeline, current receiver syncIndex {}, current buffer len {}",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_NO_EVENT_GET_EXECUTED_BECAUSE,
consensusPipeName,
tCommitId,
onSyncedReplicateIndex,
@@ -1573,7 +1581,7 @@ private TIoTConsensusV2TransferResp onRequest(
// if current event is the first event in reqBuffer, we can process it.
if (reqExecutionOrderBuffer.first().equals(requestMeta)) {
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: no.{} event get executed after awaiting timeout, current receiver syncIndex: {}",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_NO_EVENT_GET_EXECUTED_AFTER,
consensusPipeName,
tCommitId,
onSyncedReplicateIndex);
@@ -1600,7 +1608,7 @@ private TIoTConsensusV2TransferResp onRequest(
"Waiting for the previous event times out, returns an error to let the sender retry and continue scheduling."));
// TODO: Turn it to debug after GA
LOGGER.info(
- "IoTConsensusV2-{}: Waiting for the previous event times out, current peek {}, current id {}",
+ DataNodePipeMessages.IOTCONSENSUSV2_WAITING_FOR_THE_PREVIOUS_EVENT_TIMES,
consensusPipeName,
reqExecutionOrderBuffer.first().commitId,
tCommitId);
@@ -1610,7 +1618,8 @@ private TIoTConsensusV2TransferResp onRequest(
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOGGER.warn(
- "IoTConsensusV2-PipeName-{}: current waiting is interrupted. onSyncedCommitIndex: {}. Exception: ",
+ DataNodePipeMessages
+ .IOTCONSENSUSV2_PIPENAME_CURRENT_WAITING_IS_INTERRUPTED_ONSYNCEDCOMMITINDEX,
consensusPipeName,
tCommitId.getReplicateIndex(),
e);
@@ -1636,7 +1645,7 @@ private TIoTConsensusV2TransferResp onRequest(
*/
private void resetWithNewestRebootTime(int connectorRebootTimes) {
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: receiver detected an newer rebootTimes, which indicates the leader has rebooted. receiver will reset all its data.",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_RECEIVER_DETECTED_AN_NEWER_REBOOTTIMES,
consensusPipeName);
// since pipe task will resend all data that hasn't synchronized after dataNode reboots, it's
// safe to clear all events in buffer.
@@ -1648,7 +1657,8 @@ private void resetWithNewestRebootTime(int connectorRebootTimes) {
private void resetWithNewestRestartTime(int pipeTaskRestartTimes) {
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: receiver detected an newer pipeTaskRestartTimes, which indicates the pipe task has restarted. receiver will reset all its data.",
+ DataNodePipeMessages
+ .IOTCONSENSUSV2_PIPENAME_RECEIVER_DETECTED_AN_NEWER_PIPETASKRESTARTTIMES,
consensusPipeName);
// since pipe task will resend all data that hasn't synchronized after restarts, it's safe to
// clear all events in buffer.
@@ -1658,7 +1668,7 @@ private void resetWithNewestRestartTime(int pipeTaskRestartTimes) {
private void onSuccess(TCommitId commitId, boolean isTransferTsFileSeal) {
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: process no.{} event successfully!",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_PROCESS_NO_EVENT_SUCCESSFULLY,
consensusPipeName,
commitId);
RequestMeta curMeta = reqExecutionOrderBuffer.pollFirst();
@@ -1715,7 +1725,7 @@ private TIoTConsensusV2TransferResp deprecatedResp(String msg, TCommitId tCommit
"IoTConsensusV2 receiver received a deprecated request, which may because %s. Consider to discard it.",
msg)));
LOGGER.info(
- "IoTConsensusV2-PipeName-{}: received a deprecated request-{}, which may because {}. ",
+ DataNodePipeMessages.IOTCONSENSUSV2_PIPENAME_RECEIVED_A_DEPRECATED_REQUEST_WHICH,
consensusPipeName,
tCommitId,
msg);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2ReceiverAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2ReceiverAgent.java
index be2659c1cf4ac..35cc8a5957357 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2ReceiverAgent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2ReceiverAgent.java
@@ -33,6 +33,7 @@
import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeReceiver;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.consensus.DataRegionConsensusImpl;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.rpc.RpcUtils;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -101,9 +102,7 @@ public static TIoTConsensusV2TransferResp closedResp(String consensusInfo, TComm
TSStatusCode.IOT_CONSENSUS_V2_CLOSE_ERROR,
"IoTConsensusV2 receiver received a request after it was closed."));
LOGGER.info(
- "IoTConsensusV2-{}: receive on-the-fly no.{} event after data region was deleted, discard it",
- consensusInfo,
- tCommitId);
+ DataNodePipeMessages.IOTCONSENSUSV2_RECEIVE_ON_THE_FLY_NO_EVENT, consensusInfo, tCommitId);
return new TIoTConsensusV2TransferResp(status);
}
@@ -126,7 +125,8 @@ public TIoTConsensusV2TransferResp receive(TIoTConsensusV2TransferReq req) {
TSStatusCode.IOT_CONSENSUS_V2_VERSION_ERROR,
String.format("Unknown IoTConsensusV2RequestVersion %s.", reqVersion));
LOGGER.warn(
- "IoTConsensusV2: Unknown IoTConsensusV2RequestVersion, response status = {}.", status);
+ DataNodePipeMessages.IOTCONSENSUSV2_UNKNOWN_IOTCONSENSUSV2REQUESTVERSION_RESPONSE_STATUS,
+ status);
return new TIoTConsensusV2TransferResp(status);
}
}
@@ -196,7 +196,7 @@ private IoTConsensusV2Receiver internalSetAndGetReceiver(
RECEIVER_CONSTRUCTORS
.get(reqVersion)
.apply(iotConsensusV2, consensusGroupId, consensusPipeName));
- LOGGER.info("Receiver-{} is ready", consensusPipeName);
+ LOGGER.info(DataNodePipeMessages.RECEIVER_IS_READY, consensusPipeName);
} else {
throw new UnsupportedOperationException(
String.format("Unsupported iotConsensusV2 request version %d", reqVersion));
@@ -216,8 +216,7 @@ private void waitUntilReceiverGetInitiated(
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOGGER.warn(
- "IoTConsensusV2Receiver thread is interrupted when waiting for receiver get initiated, may because system exit.",
- e);
+ DataNodePipeMessages.IOTCONSENSUSV2RECEIVER_THREAD_IS_INTERRUPTED_WHEN_WAITING_FOR, e);
}
}
@@ -252,7 +251,7 @@ public final void releaseReceiverResource(DataRegionId dataRegionId) {
this.replicaReceiverMap.remove(dataRegionId);
// 4. GC receiver map
consensusPipe2ReciverMap.clear();
- LOGGER.info("All Receivers related to {} are released.", dataRegionId);
+ LOGGER.info(DataNodePipeMessages.ALL_RECEIVERS_RELATED_TO_ARE_RELEASED, dataRegionId);
} finally {
receiverLifeCircleLock.writeLock().unlock();
}
@@ -265,7 +264,7 @@ public final void closeReceiverExecutor() {
(consensusPipeName, receiverReference) -> {
if (receiverReference != null) {
receiverReference.get().closeExecutor();
- LOGGER.info("Receivers-{}' executor is closed.", consensusPipeName);
+ LOGGER.info(DataNodePipeMessages.RECEIVERS_EXECUTOR_IS_CLOSED, consensusPipeName);
}
});
});
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/IoTDBLegacyPipeReceiverAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/IoTDBLegacyPipeReceiverAgent.java
index 073286cab81ae..bc7b2e1a2eba0 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/IoTDBLegacyPipeReceiverAgent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/IoTDBLegacyPipeReceiverAgent.java
@@ -29,6 +29,7 @@
import org.apache.iotdb.commons.utils.FileUtils;
import org.apache.iotdb.db.auth.AuthorityChecker;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.payload.legacy.PipeData;
import org.apache.iotdb.db.pipe.sink.payload.legacy.TsFilePipeData;
import org.apache.iotdb.db.protocol.session.SessionManager;
@@ -111,7 +112,9 @@ public TSStatus handshake(
}
final SyncIdentityInfo identityInfo = new SyncIdentityInfo(syncIdentityInfo, remoteAddress);
- LOGGER.info("Invoke handshake method from client ip = {}", identityInfo.getRemoteAddress());
+ LOGGER.info(
+ DataNodePipeMessages.INVOKE_HANDSHAKE_METHOD_FROM_CLIENT_IP,
+ identityInfo.getRemoteAddress());
if (!new File(getFileDataDir(identityInfo)).exists()) {
new File(getFileDataDir(identityInfo)).mkdirs();
@@ -171,11 +174,13 @@ private boolean registerDatabase(
&& result.status.code != TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode()
&& result.status.code != TSStatusCode.DATABASE_CONFLICT.getStatusCode()) {
LOGGER.error(
- "Create Database error, statement: {}, result status : {}.", statement, result.status);
+ DataNodePipeMessages.CREATE_DATABASE_ERROR_STATEMENT_RESULT_STATUS,
+ statement,
+ result.status);
return false;
}
} catch (final IllegalPathException e) {
- LOGGER.error("Parse database PartialPath {} error", database, e);
+ LOGGER.error(DataNodePipeMessages.PARSE_DATABASE_PARTIALPATH_ERROR, database, e);
return false;
}
@@ -195,10 +200,11 @@ public TSStatus transportPipeData(final ByteBuffer buff) throws TException {
// step1. check connection
final SyncIdentityInfo identityInfo = getCurrentSyncIdentityInfo();
if (identityInfo == null) {
- throw new TException("Thrift connection is not alive.");
+ throw new TException(DataNodePipeMessages.THRIFT_CONNECTION_IS_NOT_ALIVE);
}
LOGGER.debug(
- "Invoke transportPipeData method from client ip = {}", identityInfo.getRemoteAddress());
+ DataNodePipeMessages.INVOKE_TRANSPORTPIPEDATA_METHOD_FROM_CLIENT_IP,
+ identityInfo.getRemoteAddress());
final String fileDir = getFileDataDir(identityInfo);
// step2. deserialize PipeData
@@ -214,23 +220,24 @@ public TSStatus transportPipeData(final ByteBuffer buff) throws TException {
handleTsFilePipeData(tsFilePipeData, fileDir);
}
} catch (final IOException e) {
- LOGGER.error("Pipe data transport error, {}", e.getMessage());
+ LOGGER.error(DataNodePipeMessages.PIPE_DATA_TRANSPORT_ERROR, e.getMessage());
return RpcUtils.getStatus(
TSStatusCode.PIPESERVER_ERROR, "Pipe data transport error, " + e.getMessage());
}
// step3. load PipeData
LOGGER.info(
- "Start load pipeData with serialize number {} and type {},value={}",
+ DataNodePipeMessages.START_LOAD_PIPEDATA_WITH_SERIALIZE_NUMBER_AND,
pipeData.getSerialNumber(),
pipeData.getPipeDataType(),
pipeData);
try {
pipeData.createLoader().load();
LOGGER.info(
- "Load pipeData with serialize number {} successfully.", pipeData.getSerialNumber());
+ DataNodePipeMessages.LOAD_PIPEDATA_WITH_SERIALIZE_NUMBER_SUCCESSFULLY,
+ pipeData.getSerialNumber());
} catch (final PipeException e) {
- LOGGER.error("Fail to load pipeData because {}.", e.getMessage());
+ LOGGER.error(DataNodePipeMessages.FAIL_TO_LOAD_PIPEDATA_BECAUSE, e.getMessage());
return RpcUtils.getStatus(
TSStatusCode.PIPESERVER_ERROR, "Fail to load pipeData because " + e.getMessage());
}
@@ -273,7 +280,7 @@ private void handleTsFilePipeData(final TsFilePipeData tsFilePipeData, final Str
.getName()
.substring(0, targetFile.getName().length() - PATCH_SUFFIX.length()));
if (!targetFile.renameTo(newFile)) {
- LOGGER.error("Fail to rename file {} to {}", targetFile, newFile);
+ LOGGER.error(DataNodePipeMessages.FAIL_TO_RENAME_FILE_TO, targetFile, newFile);
}
}
}
@@ -294,10 +301,11 @@ public TSStatus transportFile(final TSyncTransportMetaInfo metaInfo, final ByteB
// step1. check connection
final SyncIdentityInfo identityInfo = getCurrentSyncIdentityInfo();
if (identityInfo == null) {
- throw new TException("Thrift connection is not alive.");
+ throw new TException(DataNodePipeMessages.THRIFT_CONNECTION_IS_NOT_ALIVE);
}
LOGGER.debug(
- "Invoke transportData method from client ip = {}", identityInfo.getRemoteAddress());
+ DataNodePipeMessages.INVOKE_TRANSPORTDATA_METHOD_FROM_CLIENT_IP,
+ identityInfo.getRemoteAddress());
final String fileDir = getFileDataDir(identityInfo);
final String fileName = metaInfo.fileName;
@@ -318,7 +326,8 @@ public TSStatus transportFile(final TSyncTransportMetaInfo metaInfo, final ByteB
buff.get(byteArray);
randomAccessFile.write(byteArray);
recordStartIndex(new File(fileDir, fileName), startIndex + length);
- LOGGER.debug("Sync {} start at {} to {} is done.", fileName, startIndex, startIndex + length);
+ LOGGER.debug(
+ DataNodePipeMessages.SYNC_START_AT_TO_IS_DONE, fileName, startIndex, startIndex + length);
} catch (final IOException e) {
LOGGER.error(e.getMessage());
return RpcUtils.getStatus(TSStatusCode.SYNC_FILE_ERROR, e.getMessage());
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/DeletionLoader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/DeletionLoader.java
index ccb91ceca964f..ad991483e45bc 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/DeletionLoader.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/DeletionLoader.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.db.auth.AuthorityChecker;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.load.LoadFileException;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.protocol.session.SessionManager;
import org.apache.iotdb.db.queryengine.plan.Coordinator;
import org.apache.iotdb.db.queryengine.plan.execution.ExecutionResult;
@@ -55,7 +56,7 @@ public DeletionLoader(Deletion deletion) {
@Override
public void load() throws PipeException {
if (CommonDescriptor.getInstance().getConfig().isReadOnly()) {
- throw new PipeException("storage engine readonly");
+ throw new PipeException(DataNodePipeMessages.STORAGE_ENGINE_READONLY);
}
try {
Statement statement = generateStatement();
@@ -79,8 +80,8 @@ public void load() throws PipeException {
false,
statement.isDebug());
if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.error("Delete {} error, statement: {}.", deletion, statement);
- LOGGER.error("Delete result status : {}.", result.status);
+ LOGGER.error(DataNodePipeMessages.DELETE_ERROR_STATEMENT, deletion, statement);
+ LOGGER.error(DataNodePipeMessages.DELETE_RESULT_STATUS, result.status);
throw new LoadFileException(
String.format("Can not execute delete statement: %s", statement));
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/TsFileLoader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/TsFileLoader.java
index 1c95b574b54c7..e32a8bbf8d629 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/TsFileLoader.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/legacy/loader/TsFileLoader.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.db.auth.AuthorityChecker;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.load.LoadFileException;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.protocol.session.SessionManager;
import org.apache.iotdb.db.queryengine.plan.Coordinator;
import org.apache.iotdb.db.queryengine.plan.execution.ExecutionResult;
@@ -82,8 +83,8 @@ public void load() {
false,
statement.isDebug());
if (result.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- LOGGER.error("Load TsFile {} error, statement: {}.", tsFile.getPath(), statement);
- LOGGER.error("Load TsFile result status : {}.", result.status);
+ LOGGER.error(DataNodePipeMessages.LOAD_TSFILE_ERROR_STATEMENT, tsFile.getPath(), statement);
+ LOGGER.error(DataNodePipeMessages.LOAD_TSFILE_RESULT_STATUS, result.status);
throw new LoadFileException(
String.format("Can not execute load TsFile statement: %s", statement));
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java
index c10fdbc4f6720..e1f296fb81b2d 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java
@@ -46,6 +46,7 @@
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.DiskSpaceInsufficientException;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionSnapshotEvent;
import org.apache.iotdb.db.pipe.metric.receiver.PipeDataNodeReceiverMetrics;
@@ -189,9 +190,7 @@ public class IoTDBDataNodeReceiver extends IoTDBFileReceiver {
new FolderManager(
Arrays.asList(RECEIVER_FILE_BASE_DIRS), DirectoryStrategyType.SEQUENCE_STRATEGY);
} catch (final DiskSpaceInsufficientException e) {
- LOGGER.error(
- "Fail to create pipe receiver file folders allocation strategy because all disks of folders are full.",
- e);
+ LOGGER.error(DataNodePipeMessages.FAIL_TO_CREATE_PIPE_RECEIVER_FILE_FOLDERS, e);
}
}
@@ -445,7 +444,7 @@ public synchronized TPipeTransferResp receive(final TPipeTransferReq req) {
TSStatusCode.PIPE_TYPE_ERROR,
String.format("Unknown PipeRequestType %s.", rawRequestType));
LOGGER.warn(
- "Receiver id = {}: Unknown PipeRequestType, response status = {}.",
+ DataNodePipeMessages.RECEIVER_ID_UNKNOWN_PIPEREQUESTTYPE_RESPONSE_STATUS,
receiverId.get(),
status);
return new TPipeTransferResp(status);
@@ -578,7 +577,7 @@ private TSStatus loadTsFileAsync(final String dataBaseName, final List a
shouldMarkAsPipeRequest.get());
if (!LoadUtil.loadFilesToActiveDir(loadAttributes, absolutePaths, true)) {
- throw new PipeException("Load active listening pipe dir is not set.");
+ throw new PipeException(DataNodePipeMessages.LOAD_ACTIVE_LISTENING_PIPE_DIR_IS_NOT);
}
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
}
@@ -761,7 +760,7 @@ private TSStatus executeBatchStatementAndAddRedirectInfo(final InsertBaseStateme
devicePaths = ((InsertMultiTabletsStatement) statement).getDevicePaths();
} else {
LOGGER.warn(
- "Receiver id = {}: Unsupported statement type {} for redirection.",
+ DataNodePipeMessages.RECEIVER_ID_UNSUPPORTED_STATEMENT_TYPE_FOR_REDIRECTION,
receiverId.get(),
statement);
return result;
@@ -775,7 +774,7 @@ private TSStatus executeBatchStatementAndAddRedirectInfo(final InsertBaseStateme
}
} else {
LOGGER.warn(
- "Receiver id = {}: The number of device paths is not equal to sub-status in statement {}: {}.",
+ DataNodePipeMessages.RECEIVER_ID_THE_NUMBER_OF_DEVICE_PATHS,
receiverId.get(),
statement,
result);
@@ -816,7 +815,7 @@ private TSStatus executeStatementAndClassifyExceptions(
PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes(),
PipeDataNodeResourceManager.memory().getTotalNonFloatingMemorySizeInBytes());
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Receiver id = {}: {}", receiverId.get(), message, e);
+ LOGGER.debug(DataNodePipeMessages.RECEIVER_ID, receiverId.get(), message, e);
}
return new TSStatus(
TSStatusCode.PIPE_RECEIVER_TEMPORARY_UNAVAILABLE_EXCEPTION.getStatusCode())
@@ -1031,7 +1030,8 @@ private void autoCreateDatabaseIfNecessary(final String database) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
- throw new PipeException("Auto create database failed because: " + e.getMessage());
+ throw new PipeException(
+ DataNodePipeMessages.AUTO_CREATE_DATABASE_FAILED_BECAUSE + e.getMessage());
}
}
@@ -1105,7 +1105,8 @@ public synchronized void handleExit() {
try {
ClusterConfigTaskExecutor.getInstance().handlePipeConfigClientExit(configReceiverId.get());
} catch (final Exception e) {
- LOGGER.warn("Failed to handle config client (id = {}) exit", configReceiverId.get(), e);
+ LOGGER.warn(
+ DataNodePipeMessages.FAILED_TO_HANDLE_CONFIG_CLIENT_ID_EXIT, configReceiverId.get(), e);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/converter/ValueConverter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/converter/ValueConverter.java
index 2f1f219c8ed22..db81d583aa335 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/converter/ValueConverter.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/converter/ValueConverter.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.receiver.transform.converter;
import org.apache.iotdb.commons.queryengine.utils.DateTimeUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.utils.DataNodeDateTimeUtils;
import org.apache.iotdb.db.utils.TypeInferenceUtils;
@@ -751,7 +752,8 @@ public static Object parse(final String value, final TSDataType dataType) {
case STRING:
return parseString(value);
default:
- throw new UnsupportedOperationException("Unsupported data type: " + dataType);
+ throw new UnsupportedOperationException(
+ DataNodePipeMessages.UNSUPPORTED_DATA_TYPE + dataType);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/statement/PipeConvertedInsertTabletStatement.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/statement/PipeConvertedInsertTabletStatement.java
index cf03c7c0c94b9..24f8a793c1c15 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/statement/PipeConvertedInsertTabletStatement.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/transform/statement/PipeConvertedInsertTabletStatement.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.receiver.transform.statement;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.receiver.transform.converter.ArrayConverter;
import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement;
@@ -98,7 +99,7 @@ public PipeConvertedInsertTabletStatement(final InsertTabletStatement insertTabl
@Override
protected boolean checkAndCastDataType(int columnIndex, TSDataType dataType) {
LOGGER.info(
- "Pipe: Inserting tablet to {}.{}. Casting type from {} to {}.",
+ DataNodePipeMessages.PIPE_INSERTING_TABLET_TO_CASTING_TYPE_FROM,
devicePath,
measurements[columnIndex],
dataTypes[columnIndex],
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeTableStatementDataTypeConvertExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeTableStatementDataTypeConvertExecutionVisitor.java
index 10d0423e6fe93..908b0055afe54 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeTableStatementDataTypeConvertExecutionVisitor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeTableStatementDataTypeConvertExecutionVisitor.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.datastructure.pattern.TablePattern;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tsfile.parser.table.TsFileInsertionEventTableParser;
import org.apache.iotdb.db.pipe.receiver.protocol.thrift.IoTDBDataNodeReceiver;
@@ -77,14 +78,13 @@ private Optional tryExecute(final Statement statement, final String da
try {
if (Objects.isNull(databaseName)) {
LOGGER.warn(
- "Database name is unexpectedly null for statement: {}. Skip data type conversion.",
- statement);
+ DataNodePipeMessages.DATABASE_NAME_IS_UNEXPECTEDLY_NULL_FOR_STATEMENT, statement);
return Optional.empty();
}
return Optional.of(statementExecutor.execute(statement, databaseName));
} catch (final Exception e) {
- LOGGER.warn("Failed to execute statement after data type conversion.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_EXECUTE_STATEMENT_AFTER_DATA_TYPE, e);
return Optional.empty();
}
}
@@ -104,7 +104,7 @@ public Optional visitLoadFile(
if (Objects.isNull(databaseName)) {
LOGGER.warn(
- "Database name is unexpectedly null for LoadTsFileStatement: {}. Skip data type conversion.",
+ DataNodePipeMessages.DATABASE_NAME_IS_UNEXPECTEDLY_NULL_FOR_LOADTSFILESTATEMENT,
loadTsFileStatement);
return Optional.empty();
}
@@ -120,7 +120,7 @@ public Optional visitLoadFile(
}
LOGGER.warn(
- "Data type mismatch detected (TSStatus: {}) for LoadTsFileStatement: {}. Start data type conversion.",
+ DataNodePipeMessages.DATA_TYPE_MISMATCH_DETECTED_TSSTATUS_FOR_LOADTSFILESTATEMENT,
status,
loadTsFileStatement);
@@ -185,7 +185,9 @@ public Optional visitLoadFile(
}
} catch (final Exception e) {
LOGGER.warn(
- "Failed to convert data type for LoadTsFileStatement: {}.", loadTsFileStatement, e);
+ DataNodePipeMessages.FAILED_TO_CONVERT_DATA_TYPE_FOR_LOADTSFILESTATEMENT,
+ loadTsFileStatement,
+ e);
return Optional.empty();
}
}
@@ -195,7 +197,8 @@ public Optional visitLoadFile(
}
LOGGER.warn(
- "Data type conversion for LoadTsFileStatement {} is successful.", loadTsFileStatement);
+ DataNodePipeMessages.DATA_TYPE_CONVERSION_FOR_LOADTSFILESTATEMENT_IS_SUCCESSFUL,
+ loadTsFileStatement);
return Optional.of(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()));
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeTreeStatementDataTypeConvertExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeTreeStatementDataTypeConvertExecutionVisitor.java
index ae60b87450aa8..e78e273fc0c0f 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeTreeStatementDataTypeConvertExecutionVisitor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/visitor/PipeTreeStatementDataTypeConvertExecutionVisitor.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBTreePattern;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tsfile.parser.scan.TsFileInsertionEventScanParser;
import org.apache.iotdb.db.pipe.receiver.protocol.thrift.IoTDBDataNodeReceiver;
import org.apache.iotdb.db.pipe.receiver.transform.statement.PipeConvertedInsertRowStatement;
@@ -74,7 +75,7 @@ private Optional tryExecute(final Statement statement) {
try {
return Optional.of(statementExecutor.execute(statement));
} catch (final Exception e) {
- LOGGER.warn("Failed to execute statement after data type conversion.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_EXECUTE_STATEMENT_AFTER_DATA_TYPE, e);
return Optional.empty();
}
}
@@ -95,7 +96,7 @@ public Optional visitLoadFile(
}
LOGGER.warn(
- "Data type mismatch detected (TSStatus: {}) for LoadTsFileStatement: {}. Start data type conversion.",
+ DataNodePipeMessages.DATA_TYPE_MISMATCH_DETECTED_TSSTATUS_FOR_LOADTSFILESTATEMENT,
status,
loadTsFileStatement);
@@ -145,7 +146,9 @@ file, new IoTDBTreePattern(null), Long.MIN_VALUE, Long.MAX_VALUE, null, null, tr
}
} catch (final Exception e) {
LOGGER.warn(
- "Failed to convert data type for LoadTsFileStatement: {}.", loadTsFileStatement, e);
+ DataNodePipeMessages.FAILED_TO_CONVERT_DATA_TYPE_FOR_LOADTSFILESTATEMENT,
+ loadTsFileStatement,
+ e);
return Optional.empty();
}
}
@@ -155,7 +158,8 @@ file, new IoTDBTreePattern(null), Long.MIN_VALUE, Long.MAX_VALUE, null, null, tr
}
LOGGER.warn(
- "Data type conversion for LoadTsFileStatement {} is successful.", loadTsFileStatement);
+ DataNodePipeMessages.DATA_TYPE_CONVERSION_FOR_LOADTSFILESTATEMENT_IS_SUCCESSFUL,
+ loadTsFileStatement);
return Optional.of(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()));
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java
index 26c2190e7e60b..0ee642e7d52e7 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.tsfile.external.commons.io.FileUtils;
import org.slf4j.Logger;
@@ -50,7 +51,7 @@ private static void cleanTsFileDir() {
dataDir + File.separator + PipeConfig.getInstance().getPipeHardlinkBaseDirName());
if (pipeHardLinkDir.isDirectory()) {
LOGGER.info(
- "Pipe hardlink dir found, deleting it: {}, result: {}",
+ DataNodePipeMessages.PIPE_HARDLINK_DIR_FOUND_DELETING_IT_RESULT,
pipeHardLinkDir,
FileUtils.deleteQuietly(pipeHardLinkDir));
}
@@ -64,7 +65,7 @@ private static void cleanSnapshotDir() {
+ File.separator
+ PipeSnapshotResourceManager.PIPE_SNAPSHOT_DIR_NAME);
if (iotConsensusV2Dir.isDirectory()) {
- LOGGER.info("Pipe snapshot dir found, deleting it: {},", iotConsensusV2Dir);
+ LOGGER.info(DataNodePipeMessages.PIPE_SNAPSHOT_DIR_FOUND_DELETING_IT, iotConsensusV2Dir);
org.apache.iotdb.commons.utils.FileUtils.deleteFileOrDirectory(iotConsensusV2Dir);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/log/PipePeriodicalLogReducer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/log/PipePeriodicalLogReducer.java
index 3f5a013320d68..946450192c5f7 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/log/PipePeriodicalLogReducer.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/log/PipePeriodicalLogReducer.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.resource.log;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock;
@@ -78,7 +79,8 @@ public static void update() {
PipeDataNodeResourceManager.memory()
.resize(block, PipeConfig.getInstance().getPipeLoggerCacheMaxSizeInBytes(), false);
LOGGER.info(
- "PipePeriodicalLogReducer is allocated to {} bytes.", block.getMemoryUsageInBytes());
+ DataNodePipeMessages.PIPEPERIODICALLOGREDUCER_IS_ALLOCATED_TO_BYTES,
+ block.getMemoryUsageInBytes());
loggerCache
.policy()
.eviction()
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/InsertNodeMemoryEstimator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/InsertNodeMemoryEstimator.java
index 7f1d7357b02e4..db6b48a7d38b5 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/InsertNodeMemoryEstimator.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/InsertNodeMemoryEstimator.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.consensus.index.ProgressIndex;
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertMultiTabletsNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode;
@@ -175,7 +176,7 @@ public static long sizeOf(final InsertNode insertNode) {
return 0L;
}
} catch (Exception e) {
- LOGGER.warn("Failed to estimate size for InsertNode: {}", e.getMessage(), e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_ESTIMATE_SIZE_FOR_INSERTNODE, e.getMessage(), e);
return 0L;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeFixedMemoryBlock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeFixedMemoryBlock.java
index 5b2eaa7c09e4d..47073fbbedd13 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeFixedMemoryBlock.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeFixedMemoryBlock.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.db.pipe.resource.memory;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
+
import java.util.function.BiConsumer;
import java.util.function.LongUnaryOperator;
@@ -41,24 +43,24 @@ boolean expand() {
@Override
public PipeMemoryBlock setShrinkMethod(LongUnaryOperator shrinkMethod) {
throw new UnsupportedOperationException(
- "Shrink method is not supported in PipeFixedMemoryBlock");
+ DataNodePipeMessages.SHRINK_METHOD_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK);
}
@Override
public PipeMemoryBlock setShrinkCallback(BiConsumer shrinkCallback) {
throw new UnsupportedOperationException(
- "Shrink callback is not supported in PipeFixedMemoryBlock");
+ DataNodePipeMessages.SHRINK_CALLBACK_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK);
}
@Override
public PipeMemoryBlock setExpandMethod(LongUnaryOperator extendMethod) {
throw new UnsupportedOperationException(
- "Expand method is not supported in PipeFixedMemoryBlock");
+ DataNodePipeMessages.EXPAND_METHOD_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK);
}
@Override
public PipeMemoryBlock setExpandCallback(BiConsumer expandCallback) {
throw new UnsupportedOperationException(
- "Expand callback is not supported in PipeFixedMemoryBlock");
+ DataNodePipeMessages.EXPAND_CALLBACK_IS_NOT_SUPPORTED_IN_PIPEFIXEDMEMORYBLOCK);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlock.java
index 4218cc8f754cd..72ba8e35ea0f4 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlock.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryBlock.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.resource.memory;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.slf4j.Logger;
@@ -112,7 +113,7 @@ private boolean doShrink() {
try {
shrinkCallback.get().accept(oldMemorySizeInBytes, newMemorySizeInBytes);
} catch (Exception e) {
- LOGGER.warn("Failed to execute the shrink callback.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_EXECUTE_THE_SHRINK_CALLBACK, e);
}
}
return true;
@@ -147,7 +148,7 @@ private boolean doExpand() {
try {
expandCallback.get().accept(oldMemorySizeInBytes, newMemorySizeInBytes);
} catch (Exception e) {
- LOGGER.warn("Failed to execute the expand callback.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_EXECUTE_THE_EXPAND_CALLBACK, e);
}
}
return true;
@@ -187,7 +188,7 @@ public void close() {
pipeMemoryManager.removeExpandableBlock(this);
}
if (isInterrupted) {
- LOGGER.warn("{} is released after thread interruption.", this);
+ LOGGER.warn(DataNodePipeMessages.IS_RELEASED_AFTER_THREAD_INTERRUPTION, this);
}
break;
} finally {
@@ -198,7 +199,7 @@ public void close() {
// Each time the close task is run, it means that the interrupt status left by the previous
// tryLock does not need to be retained. Otherwise, it will lead to an infinite loop.
isInterrupted = true;
- LOGGER.warn("Interrupted while waiting for the lock.", e);
+ LOGGER.warn(DataNodePipeMessages.INTERRUPTED_WHILE_WAITING_FOR_THE_LOCK, e);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java
index d806a7450dc6e..c99efe5e3da3e 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.memory.MemoryBlockType;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.resource.memory.strategy.ThresholdAllocationStrategy;
@@ -160,7 +161,10 @@ public PipeTabletMemoryBlock forceAllocateForTabletWithRetry(long tabletSizeInBy
Thread.sleep(PIPE_CONFIG.getPipeMemoryAllocateRetryIntervalInMs());
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
- LOGGER.warn("forceAllocateWithRetry: interrupted while waiting for available memory", ex);
+ LOGGER.warn(
+ DataNodePipeMessages
+ .FORCEALLOCATEWITHRETRY_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY,
+ ex);
}
}
@@ -202,7 +206,10 @@ public PipeTsFileMemoryBlock forceAllocateForTsFileWithRetry(long tsFileSizeInBy
Thread.sleep(PIPE_CONFIG.getPipeMemoryAllocateRetryIntervalInMs());
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
- LOGGER.warn("forceAllocateWithRetry: interrupted while waiting for available memory", ex);
+ LOGGER.warn(
+ DataNodePipeMessages
+ .FORCEALLOCATEWITHRETRY_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY,
+ ex);
}
}
@@ -245,7 +252,10 @@ public PipeModelFixedMemoryBlock forceAllocateForModelFixedMemoryBlock(
Thread.sleep(PIPE_CONFIG.getPipeMemoryAllocateRetryIntervalInMs());
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
- LOGGER.warn("forceAllocateWithRetry: interrupted while waiting for available memory", ex);
+ LOGGER.warn(
+ DataNodePipeMessages
+ .FORCEALLOCATEWITHRETRY_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY,
+ ex);
}
}
@@ -286,7 +296,8 @@ private PipeMemoryBlock forceAllocateWithRetry(long sizeInBytes, PipeMemoryBlock
this.wait(PIPE_CONFIG.getPipeMemoryAllocateRetryIntervalInMs());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOGGER.warn("forceAllocate: interrupted while waiting for available memory", e);
+ LOGGER.warn(
+ DataNodePipeMessages.FORCEALLOCATE_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY, e);
}
}
@@ -308,7 +319,7 @@ public void forceResize(final PipeMemoryBlock block, final long targetSize) {
public synchronized void resize(
final PipeMemoryBlock block, final long targetSize, final boolean force) {
if (block == null || block.isReleased()) {
- LOGGER.warn("forceResize: cannot resize a null or released memory block");
+ LOGGER.warn(DataNodePipeMessages.FORCERESIZE_CANNOT_RESIZE_A_NULL_OR_RELEASED);
return;
}
@@ -363,7 +374,8 @@ public synchronized void resize(
this.wait(PIPE_CONFIG.getPipeMemoryAllocateRetryIntervalInMs());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOGGER.warn("forceResize: interrupted while waiting for available memory", e);
+ LOGGER.warn(
+ DataNodePipeMessages.FORCERESIZE_INTERRUPTED_WHILE_WAITING_FOR_AVAILABLE_MEMORY, e);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java
index 833bd3577eb68..7693c8ff51250 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.row.PipeRow;
import org.apache.iotdb.db.utils.MemUtils;
@@ -316,7 +317,7 @@ public static long calculateAlignedChunkMetaBytesUsed(
*/
private static int roundUpToMultiple(int num, int n) {
if (n == 0) {
- throw new IllegalArgumentException("The multiple n must be greater than 0");
+ throw new IllegalArgumentException(DataNodePipeMessages.THE_MULTIPLE_N_MUST_BE_GREATER_THAN);
}
// Calculate the rounded up value to the nearest multiple of n
return ((num + n - 1) / n) * n;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeModelFixedMemoryBlock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeModelFixedMemoryBlock.java
index 647fb81a4b91c..90b3d0329f153 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeModelFixedMemoryBlock.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeModelFixedMemoryBlock.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.resource.memory;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.resource.memory.strategy.DynamicMemoryAllocationStrategy;
import java.util.Collections;
@@ -83,7 +84,7 @@ synchronized void releaseMemory(final PipeDynamicMemoryBlock memoryBlock) {
synchronized void dynamicallyAdjustMemory(final PipeDynamicMemoryBlock block) {
if (this.isReleased() || block.isReleased() || !memoryBlocks.contains(block)) {
- throw new IllegalStateException("The memory block has been released");
+ throw new IllegalStateException(DataNodePipeMessages.THE_MEMORY_BLOCK_HAS_BEEN_RELEASED);
}
allocationStrategy.dynamicallyAdjustMemory(block);
}
@@ -91,7 +92,7 @@ synchronized void dynamicallyAdjustMemory(final PipeDynamicMemoryBlock block) {
synchronized void resetMemoryBlockSize(
final PipeDynamicMemoryBlock block, final long memorySizeInBytes) {
if (this.isReleased() || block.isReleased() || !memoryBlocks.contains(block)) {
- throw new IllegalStateException("The memory block has been released");
+ throw new IllegalStateException(DataNodePipeMessages.THE_MEMORY_BLOCK_HAS_BEEN_RELEASED);
}
final long diff = memorySizeInBytes - block.getMemoryUsageInBytes();
@@ -112,7 +113,7 @@ synchronized void resetMemoryBlockSize(
Stream getMemoryBlocksStream() {
if (isReleased()) {
- throw new IllegalStateException("The memory block has been released");
+ throw new IllegalStateException(DataNodePipeMessages.THE_MEMORY_BLOCK_HAS_BEEN_RELEASED);
}
return memoryBlocks.stream();
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFilePublicResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFilePublicResource.java
index 47134fe117c95..fe54ee48b547a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFilePublicResource.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFilePublicResource.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.resource.tsfile;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil;
@@ -120,7 +121,7 @@ synchronized boolean cacheDeviceIsAlignedMapIfAbsent(final File tsFile) throws I
MEMORY_SUFFICIENT_THRESHOLD);
if (allocatedMemoryBlock == null) {
LOGGER.info(
- "Failed to cacheDeviceIsAlignedMapIfAbsent for tsfile {}, because memory usage is high",
+ DataNodePipeMessages.FAILED_TO_CACHEDEVICEISALIGNEDMAPIFABSENT_FOR_TSFILE_BECAUSE_MEMORY,
tsFile.getPath());
return false;
}
@@ -147,13 +148,15 @@ synchronized boolean cacheDeviceIsAlignedMapIfAbsent(final File tsFile) throws I
.forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD);
if (allocatedMemoryBlock == null) {
LOGGER.info(
- "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high",
+ DataNodePipeMessages.PIPETSFILERESOURCE_FAILED_TO_CACHE_OBJECTS_FOR_TSFILE,
tsFile.getPath());
deviceIsAlignedMap = null;
return false;
}
- LOGGER.info("PipeTsFileResource: Cached deviceIsAlignedMap for tsfile {}.", tsFile.getPath());
+ LOGGER.info(
+ DataNodePipeMessages.PIPETSFILERESOURCE_CACHED_DEVICEISALIGNEDMAP_FOR_TSFILE,
+ tsFile.getPath());
return true;
}
@@ -178,7 +181,7 @@ synchronized boolean cacheObjectsIfAbsent(final File tsFile) throws IOException
MEMORY_SUFFICIENT_THRESHOLD);
if (allocatedMemoryBlock == null) {
LOGGER.info(
- "Failed to cacheObjectsIfAbsent for tsfile {}, because memory usage is high",
+ DataNodePipeMessages.FAILED_TO_CACHEOBJECTSIFABSENT_FOR_TSFILE_BECAUSE_MEMORY,
tsFile.getPath());
return false;
}
@@ -214,7 +217,7 @@ synchronized boolean cacheObjectsIfAbsent(final File tsFile) throws IOException
.forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD);
if (allocatedMemoryBlock == null) {
LOGGER.info(
- "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high",
+ DataNodePipeMessages.PIPETSFILERESOURCE_FAILED_TO_CACHE_OBJECTS_FOR_TSFILE,
tsFile.getPath());
deviceIsAlignedMap = null;
deviceMeasurementsMap = null;
@@ -222,7 +225,8 @@ synchronized boolean cacheObjectsIfAbsent(final File tsFile) throws IOException
return false;
}
- LOGGER.info("PipeTsFileResource: Cached objects for tsfile {}.", tsFile.getPath());
+ LOGGER.info(
+ DataNodePipeMessages.PIPETSFILERESOURCE_CACHED_OBJECTS_FOR_TSFILE, tsFile.getPath());
return true;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java
index 8b37f87709447..51421cdcec8da 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.db.pipe.resource.tsfile;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -74,7 +76,7 @@ public boolean decreaseReferenceCount() {
return true;
}
if (finalReferenceCount < 0) {
- LOGGER.warn("PipeTsFileResource's reference count is decreased to below 0.");
+ LOGGER.warn(DataNodePipeMessages.PIPETSFILERESOURCE_S_REFERENCE_COUNT_IS_DECREASED_TO);
}
return false;
}
@@ -86,14 +88,16 @@ public synchronized void close() {
successful = Files.deleteIfExists(hardlinkOrCopiedFile.toPath());
} catch (final Exception e) {
LOGGER.error(
- "PipeTsFileResource: Failed to delete tsfile {} when closing, because {}. Please MANUALLY delete it.",
+ DataNodePipeMessages.PIPETSFILERESOURCE_FAILED_TO_DELETE_TSFILE_WHEN_CLOSING,
hardlinkOrCopiedFile,
e.getMessage(),
e);
}
if (successful) {
- LOGGER.info("PipeTsFileResource: Closed tsfile {} and cleaned up.", hardlinkOrCopiedFile);
+ LOGGER.info(
+ DataNodePipeMessages.PIPETSFILERESOURCE_CLOSED_TSFILE_AND_CLEANED_UP,
+ hardlinkOrCopiedFile);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java
index c84504fa52b7e..90cd17539f67b 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.utils.FileUtils;
import org.apache.iotdb.commons.utils.TestOnly;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
import org.apache.tsfile.enums.TSDataType;
@@ -386,7 +387,7 @@ public long getTotalLinkedTsFileSize(final @Nonnull String pipeName) {
try {
return resource.getFileSize();
} catch (Exception e) {
- LOGGER.warn("failed to get file size of linked TsFile {}: ", resource, e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_GET_FILE_SIZE_OF_LINKED, resource, e);
return 0;
}
})
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceSegmentLock.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceSegmentLock.java
index cd1be83e55fd6..aaf5ce4db5a5f 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceSegmentLock.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceSegmentLock.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.resource.tsfile;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.storageengine.StorageEngine;
import org.slf4j.Logger;
@@ -51,7 +52,7 @@ private void initIfNecessary() {
SEGMENT_LOCK_MAX_SIZE);
} catch (final Exception e) {
LOGGER.warn(
- "Cannot get data region ids, use default lock segment size: {}", lockSegmentSize);
+ DataNodePipeMessages.CANNOT_GET_DATA_REGION_IDS_USE_DEFAULT, lockSegmentSize);
lockSegmentSize = SEGMENT_LOCK_MIN_SIZE;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeAsyncClientManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeAsyncClientManager.java
index 09580fec279f7..39a5a19a52913 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeAsyncClientManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeAsyncClientManager.java
@@ -33,6 +33,7 @@
import org.apache.iotdb.commons.pipe.sink.client.IoTDBClientManager;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.common.PipeTransferHandshakeConstant;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req;
import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req;
import org.apache.iotdb.pipe.api.exception.PipeConnectionException;
@@ -168,7 +169,7 @@ public IoTDBDataNodeAsyncClientManager(
break;
default:
LOGGER.warn(
- "Unknown load balance strategy: {}, use round-robin strategy instead.",
+ DataNodePipeMessages.UNKNOWN_LOAD_BALANCE_STRATEGY_USE_ROUND_ROBIN,
loadBalanceStrategy);
loadBalancer = new RoundRobinLoadBalancer();
}
@@ -199,7 +200,7 @@ public AsyncPipeDataTransferServiceClient borrowClient(final TEndPoint endPoint)
}
} catch (final Exception e) {
LOGGER.warn(
- "failed to borrow client {}:{} for cached leader.",
+ DataNodePipeMessages.FAILED_TO_BORROW_CLIENT_FOR_CACHED_LEADER,
endPoint.getIp(),
endPoint.getPort(),
e);
@@ -251,7 +252,7 @@ public void onComplete(final TPipeTransferResp response) {
response.getStatus().getMessage())));
} else {
LOGGER.info(
- "Handshake successfully with receiver {}:{}.",
+ DataNodePipeMessages.HANDSHAKE_SUCCESSFULLY_WITH_RECEIVER,
targetNodeUrl.getIp(),
targetNodeUrl.getPort());
client.markHandshakeFinished();
@@ -341,7 +342,8 @@ public void onError(final Exception e) {
}
if (exception.get() != null) {
markUnhealthy(targetNodeUrl);
- throw new PipeConnectionException("Failed to handshake.", exception.get());
+ throw new PipeConnectionException(
+ DataNodePipeMessages.FAILED_TO_HANDSHAKE, exception.get());
} else {
markHealthy(targetNodeUrl);
}
@@ -356,7 +358,7 @@ public void onError(final Exception e) {
client.invalidateAll();
} catch (final Exception e) {
LOGGER.warn(
- "Failed to close client {}:{} after handshake failure when the manager is closed.",
+ DataNodePipeMessages.FAILED_TO_CLOSE_CLIENT_AFTER_HANDSHAKE_FAILURE,
targetNodeUrl.getIp(),
targetNodeUrl.getPort(),
e);
@@ -373,7 +375,8 @@ private void waitHandshakeFinished(final AtomicBoolean isHandshakeFinished) {
try {
while (!isHandshakeFinished.get()) {
if (isClosed) {
- throw new PipeConnectionException("Timed out when waiting for client handshake finish.");
+ throw new PipeConnectionException(
+ DataNodePipeMessages.TIMED_OUT_WHEN_WAITING_FOR_CLIENT_HANDSHAKE);
}
synchronized (isHandshakeFinished) {
isHandshakeFinished.wait(1);
@@ -381,7 +384,8 @@ private void waitHandshakeFinished(final AtomicBoolean isHandshakeFinished) {
}
} catch (final InterruptedException e) {
Thread.currentThread().interrupt();
- throw new PipeException("Interrupted while waiting for handshake response.", e);
+ throw new PipeException(
+ DataNodePipeMessages.INTERRUPTED_WHILE_WAITING_FOR_HANDSHAKE_RESPONSE, e);
}
}
@@ -415,11 +419,13 @@ public void close() {
try {
clientManager.close();
LOGGER.info(
- "Closed AsyncPipeDataTransferServiceClientManager for receiver attributes: {}",
+ DataNodePipeMessages
+ .CLOSED_ASYNCPIPEDATATRANSFERSERVICECLIENTMANAGER_FOR_RECEIVER_ATTRIBUTES,
receiverAttributes);
} catch (final Exception e) {
LOGGER.warn(
- "Failed to close AsyncPipeDataTransferServiceClientManager for receiver attributes: {}",
+ DataNodePipeMessages
+ .FAILED_TO_CLOSE_ASYNCPIPEDATATRANSFERSERVICECLIENTMANAGER_FOR_RECEIVER_ATTRIBUTE,
receiverAttributes,
e);
}
@@ -430,9 +436,9 @@ public void close() {
if (executor != null) {
try {
executor.shutdown();
- LOGGER.info("Successfully shutdown executor {}.", executor);
+ LOGGER.info(DataNodePipeMessages.SUCCESSFULLY_SHUTDOWN_EXECUTOR, executor);
} catch (final Exception e) {
- LOGGER.warn("Failed to shutdown executor {}.", executor);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_SHUTDOWN_EXECUTOR, executor);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeCacheLeaderClientManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeCacheLeaderClientManager.java
index a9dbd43544e73..f32c8cb72bbd5 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeCacheLeaderClientManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeCacheLeaderClientManager.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TEndPoint;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock;
@@ -70,7 +71,7 @@ public LeaderCacheManager() {
.eviction()
.ifPresent(eviction -> eviction.setMaximum(newMemory));
LOGGER.info(
- "LeaderCacheManager.allocatedMemoryBlock has shrunk from {} to {}.",
+ DataNodePipeMessages.LEADERCACHEMANAGER_ALLOCATEDMEMORYBLOCK_HAS_SHRUNK_FROM_TO,
oldMemory,
newMemory);
})
@@ -89,7 +90,8 @@ public LeaderCacheManager() {
.eviction()
.ifPresent(eviction -> eviction.setMaximum(newMemory));
LOGGER.info(
- "LeaderCacheManager.allocatedMemoryBlock has expanded from {} to {}.",
+ DataNodePipeMessages
+ .LEADERCACHEMANAGER_ALLOCATEDMEMORYBLOCK_HAS_EXPANDED_FROM_TO,
oldMemory,
newMemory);
});
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeSyncClientManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeSyncClientManager.java
index b8eff41a9eab1..841982accd3e6 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeSyncClientManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/client/IoTDBDataNodeSyncClientManager.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClientManager;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferHandshakeV2Req;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV1Req;
import org.apache.iotdb.db.pipe.sink.payload.evolvable.request.PipeTransferDataNodeHandshakeV2Req;
@@ -128,7 +129,7 @@ public void updateLeaderCache(final String deviceId, final TEndPoint endPoint) {
LEADER_CACHE_MANAGER.updateLeaderEndPoint(deviceId, endPoint);
} catch (final Exception e) {
LOGGER.warn(
- "Failed to update leader cache for device {} with endpoint {}:{}.",
+ DataNodePipeMessages.FAILED_TO_UPDATE_LEADER_CACHE_FOR_DEVICE,
deviceId,
endPoint.getIp(),
endPoint.getPort(),
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventBatch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventBatch.java
index c44e12a4bbf20..8bf69e6e6b01a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventBatch.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventBatch.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.sink.payload.evolvable.batch;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock;
import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink;
@@ -106,7 +107,7 @@ public synchronized boolean onEvent(final TabletInsertionEvent event)
firstEventProcessingTime = System.currentTimeMillis();
}
} else {
- LOGGER.warn("Cannot increase reference count for event: {}, ignore it in batch.", event);
+ LOGGER.warn(DataNodePipeMessages.CANNOT_INCREASE_REFERENCE_COUNT_FOR_EVENT_IGNORE, event);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java
index 9c4c2fe495327..7b511e23fc6c9 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTabletEventTsFileBatch.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.sink.payload.evolvable.batch;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil;
@@ -130,7 +131,7 @@ protected boolean constructBatch(final TabletInsertionEvent event) {
}
} else {
LOGGER.warn(
- "Batch id = {}: Unsupported event {} type {} when constructing tsfile batch",
+ DataNodePipeMessages.BATCH_ID_UNSUPPORTED_EVENT_TYPE_WHEN_CONSTRUCTING,
currentBatchId.get(),
event,
event.getClass());
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java
index 49d9d8cea09be..3bec537614c60 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/batch/PipeTransferBatchReqBuilder.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TEndPoint;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
import org.apache.iotdb.db.pipe.sink.client.IoTDBDataNodeCacheLeaderClientManager;
@@ -138,7 +139,9 @@ public synchronized void onEvent(final TabletInsertionEvent event)
throws IOException, WALPipeException {
if (!(event instanceof EnrichedEvent)) {
LOGGER.warn(
- "Unsupported event {} type {} when building transfer request", event, event.getClass());
+ DataNodePipeMessages.UNSUPPORTED_EVENT_TYPE_WHEN_BUILDING_TRANSFER_REQUEST,
+ event,
+ event.getClass());
return;
}
@@ -212,7 +215,7 @@ public int size() {
.reduce(0, Integer::sum);
} catch (final Exception e) {
LOGGER.warn(
- "Failed to get the size of PipeTransferBatchReqBuilder, return 0. Exception: {}",
+ DataNodePipeMessages.FAILED_TO_GET_THE_SIZE_OF_PIPETRANSFERBATCHREQBUILDER,
e.getMessage(),
e);
return 0;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReq.java
index af9b37edbf6ff..1504b3eadb9b9 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReq.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReq.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.exception.MetadataException;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.util.TabletStatementConverter;
import org.apache.iotdb.db.pipe.sink.util.sorter.PipeTreeModelTabletEventSorter;
import org.apache.iotdb.db.queryengine.plan.statement.crud.InsertTabletStatement;
@@ -59,7 +60,7 @@ public Tablet getTablet() {
try {
tablet = statement.convertToTablet();
} catch (final MetadataException e) {
- LOGGER.warn("Failed to convert statement to tablet.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CONVERT_STATEMENT_TO_TABLET, e);
return null;
}
}
@@ -94,7 +95,7 @@ public InsertTabletStatement constructStatement() {
statement = new InsertTabletStatement(tablet, isAligned, null);
return statement;
} catch (final MetadataException e) {
- LOGGER.warn("Generate Statement from tablet {} error.", tablet, e);
+ LOGGER.warn(DataNodePipeMessages.GENERATE_STATEMENT_FROM_TABLET_ERROR, tablet, e);
return null;
}
}
@@ -175,12 +176,12 @@ public byte[] toTPipeTransferBytes() throws IOException {
tabletToSerialize = statement.convertToTablet();
isAlignedToSerialize = statement.isAligned();
} catch (final MetadataException e) {
- throw new IOException("Failed to convert statement to tablet for serialization", e);
+ throw new IOException(DataNodePipeMessages.FAILED_TO_CONVERT_STATEMENT_TO_TABLET_FOR, e);
}
}
if (tabletToSerialize == null) {
- throw new IOException("Cannot serialize: both tablet and statement are null");
+ throw new IOException(DataNodePipeMessages.CANNOT_SERIALIZE_BOTH_TABLET_AND_STATEMENT_ARE);
}
try (final PublicBAOS byteArrayOutputStream = new PublicBAOS();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReqV2.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReqV2.java
index 3c5f420a317fd..f6b910a8844ed 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReqV2.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/evolvable/request/PipeTransferTabletRawReqV2.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.exception.MetadataException;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.IoTDBSinkRequestVersion;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeRequestType;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.util.TabletStatementConverter;
import org.apache.iotdb.db.pipe.sink.util.sorter.PipeTableModelTabletEventSorter;
import org.apache.iotdb.db.pipe.sink.util.sorter.PipeTreeModelTabletEventSorter;
@@ -77,7 +78,7 @@ public InsertTabletStatement constructStatement() {
return new InsertTabletStatement(tablet, isAligned, dataBaseName);
} catch (final MetadataException e) {
- LOGGER.warn("Generate Statement from tablet {} error.", tablet, e);
+ LOGGER.warn(DataNodePipeMessages.GENERATE_STATEMENT_FROM_TABLET_ERROR, tablet, e);
return null;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/PipeData.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/PipeData.java
index 643ee849096a3..486ffafc08da0 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/PipeData.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/payload/legacy/PipeData.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.sink.payload.legacy;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.receiver.protocol.legacy.loader.ILoader;
import org.slf4j.Logger;
@@ -81,9 +82,9 @@ public static PipeData createPipeData(DataInputStream stream) throws IOException
pipeData = new DeletionPipeData();
break;
default:
- LOGGER.error("Deserialize PipeData error because Unknown type {}.", type);
+ LOGGER.error(DataNodePipeMessages.DESERIALIZE_PIPEDATA_ERROR_BECAUSE_UNKNOWN_TYPE_1, type);
throw new UnsupportedOperationException(
- "Deserialize PipeData error because Unknown type " + type);
+ DataNodePipeMessages.DESERIALIZE_PIPEDATA_ERROR_BECAUSE_UNKNOWN_TYPE + type);
}
pipeData.deserialize(stream);
return pipeData;
@@ -116,7 +117,7 @@ public static PipeDataType getPipeDataType(byte type) {
case 1:
return PipeDataType.DELETION;
default:
- throw new IllegalArgumentException("Invalid input: " + type);
+ throw new IllegalArgumentException(DataNodePipeMessages.INVALID_INPUT + type);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBDataRegionAirGapSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBDataRegionAirGapSink.java
index 7f904324bbbcd..ee1a1c5e6292e 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBDataRegionAirGapSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBDataRegionAirGapSink.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.sink.limiter.TsFileSendRateLimiter;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.deletion.PipeDeleteDataNodeEvent;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
@@ -124,14 +125,15 @@ public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exc
// PipeProcessor can change the type of tsFileInsertionEvent
if (!(tsFileInsertionEvent instanceof PipeTsFileInsertionEvent)) {
LOGGER.warn(
- "IoTDBDataRegionAirGapConnector only support PipeTsFileInsertionEvent. Ignore {}.",
+ DataNodePipeMessages
+ .IOTDBDATAREGIONAIRGAPCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_IGNORE,
tsFileInsertionEvent);
return;
}
if (!((PipeTsFileInsertionEvent) tsFileInsertionEvent).waitForTsFileClose()) {
LOGGER.warn(
- "Pipe skipping temporary TsFile which shouldn't be transferred: {}",
+ DataNodePipeMessages.PIPE_SKIPPING_TEMPORARY_TSFILE_WHICH_SHOULDN_T,
((PipeTsFileInsertionEvent) tsFileInsertionEvent).getTsFile());
return;
}
@@ -163,7 +165,8 @@ public void transfer(final Event event) throws Exception {
doTransferWrapper(socket, (PipeDeleteDataNodeEvent) event);
} else if (!(event instanceof PipeHeartbeatEvent || event instanceof PipeTerminateEvent)) {
LOGGER.warn(
- "IoTDBDataRegionAirGapConnector does not support transferring generic event: {}.",
+ DataNodePipeMessages
+ .IOTDBDATAREGIONAIRGAPCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT,
event);
}
} catch (final IOException e) {
@@ -348,7 +351,7 @@ private void doTransfer(
errorMessage,
pipeTsFileInsertionEvent.toString());
} else {
- LOGGER.info("Successfully transferred file {}.", tsFile);
+ LOGGER.info(DataNodePipeMessages.SUCCESSFULLY_TRANSFERRED_FILE, tsFile);
}
} else {
transferFilePieces(pipeName, creationTime, tsFile, socket, false);
@@ -369,7 +372,7 @@ private void doTransfer(
errorMessage,
pipeTsFileInsertionEvent.toString());
} else {
- LOGGER.info("Successfully transferred file {}.", tsFile);
+ LOGGER.info(DataNodePipeMessages.SUCCESSFULLY_TRANSFERRED_FILE, tsFile);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBSchemaRegionAirGapSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBSchemaRegionAirGapSink.java
index bc056857c17c1..a67654ea9d8c5 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBSchemaRegionAirGapSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/airgap/IoTDBSchemaRegionAirGapSink.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionSnapshotEvent;
import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent;
@@ -52,13 +53,13 @@ public class IoTDBSchemaRegionAirGapSink extends IoTDBDataNodeAirGapSink {
@Override
public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception {
throw new UnsupportedOperationException(
- "IoTDBSchemaRegionAirGapSink can't transfer TabletInsertionEvent.");
+ DataNodePipeMessages.IOTDBSCHEMAREGIONAIRGAPSINK_CAN_T_TRANSFER_TABLETINSERTIONEVENT);
}
@Override
public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exception {
throw new UnsupportedOperationException(
- "IoTDBSchemaRegionAirGapSink can't transfer TsFileInsertionEvent.");
+ DataNodePipeMessages.IOTDBSCHEMAREGIONAIRGAPSINK_CAN_T_TRANSFER_TSFILEINSERTIONEVENT);
}
@Override
@@ -73,7 +74,9 @@ public void transfer(final Event event) throws Exception {
doTransferWrapper(socket, (PipeSchemaRegionSnapshotEvent) event);
} else if (!(event instanceof PipeHeartbeatEvent)) {
LOGGER.warn(
- "IoTDBSchemaRegionAirGapSink does not support transferring generic event: {}.", event);
+ DataNodePipeMessages
+ .IOTDBSCHEMAREGIONAIRGAPSINK_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT,
+ event);
}
} catch (final IOException e) {
isSocketAlive.set(socketIndex, false);
@@ -191,7 +194,7 @@ private void doTransfer(
true);
} else {
LOGGER.info(
- "Successfully transferred schema region snapshot {}, {} and {}.",
+ DataNodePipeMessages.SUCCESSFULLY_TRANSFERRED_SCHEMA_REGION_SNAPSHOT_AND,
mtreeSnapshotFile,
tagLogSnapshotFile,
attributeSnapshotFile);
@@ -207,7 +210,7 @@ protected void mayLimitRateAndRecordIO(final long requiredBytes) {
protected byte[] getTransferSingleFilePieceBytes(
final String fileName, final long position, final byte[] payLoad) {
throw new UnsupportedOperationException(
- "The schema region air gap connector does not support transferring single file piece bytes.");
+ DataNodePipeMessages.THE_SCHEMA_REGION_AIR_GAP_CONNECTOR_DOES);
}
@Override
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/IoTConsensusV2AsyncSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/IoTConsensusV2AsyncSink.java
index 6912290bb2bbb..a4dba48b7e98b 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/IoTConsensusV2AsyncSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/IoTConsensusV2AsyncSink.java
@@ -38,6 +38,7 @@
import org.apache.iotdb.consensus.pipe.metric.IoTConsensusV2SyncLagManager;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.consensus.ReplicateProgressDataNodeManager;
import org.apache.iotdb.db.pipe.consensus.metric.IoTConsensusV2SinkMetrics;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
@@ -176,7 +177,7 @@ private boolean addEvent2Buffer(EnrichedEvent event) {
try {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "IoTConsensusV2-ConsensusGroup-{}: no.{} event-{} added to connector buffer",
+ DataNodePipeMessages.IOTCONSENSUSV2_CONSENSUSGROUP_NO_EVENT_ADDED_TO_CONNECTOR,
consensusGroupId,
event.getReplicateIndexForIoTV2(),
event);
@@ -203,7 +204,9 @@ private boolean addEvent2Buffer(EnrichedEvent event) {
}
return result;
} catch (InterruptedException e) {
- LOGGER.info("IoTConsensusV2Connector transferBuffer queue offer is interrupted.", e);
+ LOGGER.info(
+ DataNodePipeMessages.IOTCONSENSUSV2CONNECTOR_TRANSFERBUFFER_QUEUE_OFFER_IS_INTERRUPTED,
+ e);
Thread.currentThread().interrupt();
return false;
}
@@ -216,7 +219,7 @@ private boolean addEvent2Buffer(EnrichedEvent event) {
public synchronized void removeEventFromBuffer(EnrichedEvent event) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "IoTConsensusV2-ConsensusGroup-{}: one event-{} successfully received by the follower, will be removed from queue, queue size = {}, limit size = {}",
+ DataNodePipeMessages.IOTCONSENSUSV2_CONSENSUSGROUP_ONE_EVENT_SUCCESSFULLY_RECEIVED_BY,
consensusGroupId,
event,
transferBuffer.size(),
@@ -224,7 +227,7 @@ public synchronized void removeEventFromBuffer(EnrichedEvent event) {
}
if (transferBuffer.isEmpty()) {
LOGGER.info(
- "IoTConsensusV2-ConsensusGroup-{}: try to remove event-{} after iotConsensusV2AsyncConnector being closed. Ignore it.",
+ DataNodePipeMessages.IOTCONSENSUSV2_CONSENSUSGROUP_TRY_TO_REMOVE_EVENT_AFTER,
consensusGroupId,
event);
return;
@@ -238,7 +241,7 @@ public synchronized void removeEventFromBuffer(EnrichedEvent event) {
iterator.remove();
} else {
LOGGER.warn(
- "IoTConsensusV2-ConsensusGroup-{}: event-{} not found in transferBuffer, skip removing. queue size = {}",
+ DataNodePipeMessages.IOTCONSENSUSV2_CONSENSUSGROUP_EVENT_NOT_FOUND_IN_TRANSFERBUFFER,
consensusGroupId,
event,
transferBuffer.size());
@@ -354,7 +357,8 @@ public void transfer(TsFileInsertionEvent tsFileInsertionEvent) throws Exception
if (!(tsFileInsertionEvent instanceof PipeTsFileInsertionEvent)) {
LOGGER.warn(
- "IoTConsensusV2AsyncConnector only support PipeTsFileInsertionEvent. Current event: {}.",
+ DataNodePipeMessages
+ .IOTCONSENSUSV2ASYNCCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_CURRENT_EVEN,
tsFileInsertionEvent);
return;
}
@@ -446,7 +450,9 @@ public void transfer(Event event) throws Exception {
if (!(event instanceof PipeHeartbeatEvent)) {
LOGGER.warn(
- "IoTConsensusV2AsyncConnector does not support transferring generic event: {}.", event);
+ DataNodePipeMessages
+ .IOTCONSENSUSV2ASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT,
+ event);
}
}
@@ -515,7 +521,7 @@ private void asyncTransferQueuedEventsIfNecessary() {
// just in case that some events are polled and re-added into queue again and again,
// causing this loop to run forever.
LOGGER.warn(
- "IoTConsensusV2-ConsensusGroup-{}: retryEventQueue is not empty after 20 seconds. retryQueue size: {}",
+ DataNodePipeMessages.IOTCONSENSUSV2_CONSENSUSGROUP_RETRYEVENTQUEUE_IS_NOT_EMPTY_AFTER,
consensusGroupId,
retryEventQueue.size());
return;
@@ -530,7 +536,7 @@ private void asyncTransferQueuedEventsIfNecessary() {
? peekedEvent.getRetryInterval()
: 0L;
LOGGER.info(
- "IoTConsensusV2-ConsensusGroup-{}: retry with interval {} for index {} {}",
+ DataNodePipeMessages.IOTCONSENSUSV2_CONSENSUSGROUP_RETRY_WITH_INTERVAL_FOR_INDEX,
consensusGroupId,
retryInterval,
peekedEvent.getReplicateIndexForIoTV2(),
@@ -549,7 +555,8 @@ private void asyncTransferQueuedEventsIfNecessary() {
} else {
if (LOGGER.isWarnEnabled()) {
LOGGER.warn(
- "IoTConsensusV2AsyncConnector does not support transfer generic event: {}.",
+ DataNodePipeMessages
+ .IOTCONSENSUSV2ASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFER_GENERIC_EVENT,
peekedEvent);
}
}
@@ -623,13 +630,14 @@ public synchronized void addFailureEventToRetryQueue(final EnrichedEvent event)
boolean res = retryEventQueue.offer(event);
if (res) {
LOGGER.info(
- "IoTConsensusV2-ConsensusGroup-{}: Event {} replicate index {} transfer failed, will be added to retry queue.",
+ DataNodePipeMessages
+ .IOTCONSENSUSV2_CONSENSUSGROUP_EVENT_REPLICATE_INDEX_TRANSFER_FAILED_1,
consensusGroupId,
event,
event.getReplicateIndexForIoTV2());
} else {
LOGGER.warn(
- "IoTConsensusV2-ConsensusGroup-{}: Event {} replicate index {} transfer failed, added to retry queue failed, this event will be ignored.",
+ DataNodePipeMessages.IOTCONSENSUSV2_CONSENSUSGROUP_EVENT_REPLICATE_INDEX_TRANSFER_FAILED,
consensusGroupId,
event,
event.getReplicateIndexForIoTV2());
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/IoTConsensusV2SyncSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/IoTConsensusV2SyncSink.java
index e6fe44b346742..481e340a739f3 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/IoTConsensusV2SyncSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/IoTConsensusV2SyncSink.java
@@ -34,6 +34,7 @@
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TCommitId;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2BatchTransferResp;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferResp;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.consensus.metric.IoTConsensusV2SinkMetrics;
import org.apache.iotdb.db.pipe.event.common.deletion.PipeDeleteDataNodeEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
@@ -286,7 +287,7 @@ private void doTransfer(final PipeDeleteDataNodeEvent pipeDeleteDataNodeEvent)
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "Successfully transferred deletion event {}.",
+ DataNodePipeMessages.SUCCESSFULLY_TRANSFERRED_DELETION_EVENT,
pipeDeleteDataNodeEvent.getDeletionResource());
}
}
@@ -424,7 +425,7 @@ private void doTransfer(final PipeTsFileInsertionEvent pipeTsFileInsertionEvent)
tsFile.getName());
}
- LOGGER.info("Successfully transferred file {}.", tsFile);
+ LOGGER.info(DataNodePipeMessages.SUCCESSFULLY_TRANSFERRED_FILE, tsFile);
}
protected void transferFilePieces(
@@ -484,7 +485,7 @@ protected void transferFilePieces(
== TSStatusCode.IOT_CONSENSUS_V2_TRANSFER_FILE_OFFSET_RESET.getStatusCode()) {
position = resp.getEndWritingOffset();
reader.seek(position);
- LOGGER.info("Redirect file position to {}.", position);
+ LOGGER.info(DataNodePipeMessages.REDIRECT_FILE_POSITION_TO, position);
continue;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2DeleteEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2DeleteEventHandler.java
index 2cb19e798f279..35ba6a89f7a56 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2DeleteEventHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2DeleteEventHandler.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.utils.RetryUtils;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferReq;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferResp;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.consensus.metric.IoTConsensusV2SinkMetrics;
import org.apache.iotdb.db.pipe.event.common.deletion.PipeDeleteDataNodeEvent;
import org.apache.iotdb.db.pipe.sink.protocol.iotconsensusv2.IoTConsensusV2AsyncSink;
@@ -70,7 +71,7 @@ public void transfer(AsyncIoTConsensusV2ServiceClient client) throws TException
public void onComplete(TIoTConsensusV2TransferResp response) {
// Just in case
if (response == null) {
- onError(new PipeException("TIoTConsensusV2TransferResp is null"));
+ onError(new PipeException(DataNodePipeMessages.TIOTCONSENSUSV2TRANSFERRESP_IS_NULL));
return;
}
@@ -85,7 +86,7 @@ public void onComplete(TIoTConsensusV2TransferResp response) {
if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.info(
- "DeleteNodeTransfer: no.{} event successfully processed!",
+ DataNodePipeMessages.DELETENODETRANSFER_NO_EVENT_SUCCESSFULLY_PROCESSED,
event.getReplicateIndexForIoTV2());
}
// if code flow reach here, meaning the file will not be resent and will be ignored.
@@ -102,7 +103,7 @@ public void onComplete(TIoTConsensusV2TransferResp response) {
@Override
public void onError(Exception e) {
LOGGER.warn(
- "Failed to transfer PipeDeleteNodeEvent {} (committer key={}, replicate index={}).",
+ DataNodePipeMessages.FAILED_TO_TRANSFER_PIPEDELETENODEEVENT_COMMITTER_KEY_REPLICATE,
event.coreReportMessage(),
event.getCommitterKey(),
event.getReplicateIndexForIoTV2(),
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TabletBatchEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TabletBatchEventHandler.java
index 4bdede5944ac8..b2026c809dd3c 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TabletBatchEventHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TabletBatchEventHandler.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2BatchTransferReq;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2BatchTransferResp;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferResp;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.consensus.metric.IoTConsensusV2SinkMetrics;
import org.apache.iotdb.db.pipe.sink.protocol.iotconsensusv2.IoTConsensusV2AsyncSink;
import org.apache.iotdb.db.pipe.sink.protocol.iotconsensusv2.payload.builder.IoTConsensusV2AsyncBatchReqBuilder;
@@ -73,7 +74,7 @@ public void transfer(final AsyncIoTConsensusV2ServiceClient client) throws TExce
public void onComplete(final TIoTConsensusV2BatchTransferResp response) {
// Just in case
if (response == null) {
- onError(new PipeException("TIoTConsensusV2BatchTransferResp is null"));
+ onError(new PipeException(DataNodePipeMessages.TIOTCONSENSUSV2BATCHTRANSFERRESP_IS_NULL));
return;
}
@@ -117,7 +118,7 @@ public void onComplete(final TIoTConsensusV2BatchTransferResp response) {
@Override
public void onError(final Exception exception) {
LOGGER.warn(
- "IoTConsensusV2: Failed to transfer TabletInsertionEvent batch. Total failed events: {}, related pipe names: {}",
+ DataNodePipeMessages.IOTCONSENSUSV2_FAILED_TO_TRANSFER_TABLETINSERTIONEVENT_BATCH_TOTAL,
events.size(),
events.stream()
.map(
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TabletInsertionEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TabletInsertionEventHandler.java
index 971a41a295637..4c31942692d1d 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TabletInsertionEventHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TabletInsertionEventHandler.java
@@ -25,6 +25,7 @@
import org.apache.iotdb.commons.utils.RetryUtils;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferReq;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferResp;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.consensus.metric.IoTConsensusV2SinkMetrics;
import org.apache.iotdb.db.pipe.sink.protocol.iotconsensusv2.IoTConsensusV2AsyncSink;
import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.handler.PipeTransferTabletInsertionEventHandler;
@@ -76,7 +77,7 @@ protected abstract void doTransfer(
public void onComplete(TIoTConsensusV2TransferResp response) {
// Just in case
if (response == null) {
- onError(new PipeException("TIoTConsensusV2TransferResp is null"));
+ onError(new PipeException(DataNodePipeMessages.TIOTCONSENSUSV2TRANSFERRESP_IS_NULL));
return;
}
@@ -94,7 +95,7 @@ public void onComplete(TIoTConsensusV2TransferResp response) {
if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
LOGGER.info(
- "InsertNodeTransfer: no.{} event successfully processed!",
+ DataNodePipeMessages.INSERTNODETRANSFER_NO_EVENT_SUCCESSFULLY_PROCESSED,
((EnrichedEvent) event).getReplicateIndexForIoTV2());
}
@@ -113,7 +114,7 @@ public void onComplete(TIoTConsensusV2TransferResp response) {
public void onError(Exception exception) {
EnrichedEvent event = (EnrichedEvent) this.event;
LOGGER.warn(
- "Failed to transfer TabletInsertionEvent {} (committer key={}, replicate index={}).",
+ DataNodePipeMessages.FAILED_TO_TRANSFER_TABLETINSERTIONEVENT_COMMITTER_KEY_REPLICATE,
event.coreReportMessage(),
event.getCommitterKey(),
event.getReplicateIndexForIoTV2(),
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TsFileInsertionEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TsFileInsertionEventHandler.java
index 28850bc543dd9..490806cfddb27 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TsFileInsertionEventHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/handler/IoTConsensusV2TsFileInsertionEventHandler.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.utils.RetryUtils;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TCommitId;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferResp;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.consensus.metric.IoTConsensusV2SinkMetrics;
import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent;
import org.apache.iotdb.db.pipe.sink.protocol.iotconsensusv2.IoTConsensusV2AsyncSink;
@@ -135,7 +136,7 @@ public void transfer(final AsyncIoTConsensusV2ServiceClient client)
reader.close();
} catch (final IOException e) {
LOGGER.warn(
- "IoTConsensusV2-{}: Failed to close file reader when successfully transferred mod file.",
+ DataNodePipeMessages.IOTCONSENSUSV2_FAILED_TO_CLOSE_FILE_READER_WHEN_2,
consensusPipeName,
e);
}
@@ -224,7 +225,7 @@ public void onComplete(final TIoTConsensusV2TransferResp response) {
}
} catch (final IOException e) {
LOGGER.warn(
- "IoTConsensusV2-{}: Failed to close file reader when successfully transferred file.",
+ DataNodePipeMessages.IOTCONSENSUSV2_FAILED_TO_CLOSE_FILE_READER_WHEN_1,
consensusPipeName,
e);
} finally {
@@ -232,7 +233,8 @@ public void onComplete(final TIoTConsensusV2TransferResp response) {
IoTConsensusV2TsFileInsertionEventHandler.class.getName(), true);
LOGGER.info(
- "IoTConsensusV2-{}: Successfully transferred file {} (committer key={}, replicate index={}).",
+ DataNodePipeMessages
+ .IOTCONSENSUSV2_SUCCESSFULLY_TRANSFERRED_FILE_COMMITTER_KEY_REPLICATE,
consensusPipeName,
tsFile,
event.getCommitterKey(),
@@ -263,7 +265,9 @@ public void onComplete(final TIoTConsensusV2TransferResp response) {
position = resp.getEndWritingOffset();
reader.seek(position);
LOGGER.info(
- "IoTConsensusV2-{}: Redirect file position to {}.", consensusPipeName, position);
+ DataNodePipeMessages.IOTCONSENSUSV2_REDIRECT_FILE_POSITION_TO,
+ consensusPipeName,
+ position);
} else {
final TSStatus status = response.getStatus();
// Only handle the failed statuses to avoid string format performance overhead
@@ -287,7 +291,7 @@ public void onComplete(final TIoTConsensusV2TransferResp response) {
@Override
public void onError(final Exception exception) {
LOGGER.warn(
- "IoTConsensusV2-{}: Failed to transfer TsFileInsertionEvent {} (committer key {}, replicate index {}).",
+ DataNodePipeMessages.IOTCONSENSUSV2_FAILED_TO_TRANSFER_TSFILEINSERTIONEVENT_COMMITTER_KEY,
consensusPipeName,
tsFile,
event.getCommitterKey(),
@@ -309,7 +313,7 @@ public void onError(final Exception exception) {
}
} catch (final IOException e) {
LOGGER.warn(
- "IoTConsensusV2-{}: Failed to close file reader when failed to transfer file.",
+ DataNodePipeMessages.IOTCONSENSUSV2_FAILED_TO_CLOSE_FILE_READER_WHEN,
consensusPipeName,
e);
} finally {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/builder/IoTConsensusV2TransferBatchReqBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/builder/IoTConsensusV2TransferBatchReqBuilder.java
index 20ba2e0552e47..677c77e0540f5 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/builder/IoTConsensusV2TransferBatchReqBuilder.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/builder/IoTConsensusV2TransferBatchReqBuilder.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TCommitId;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferReq;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock;
@@ -103,13 +104,17 @@ protected IoTConsensusV2TransferBatchReqBuilder(
.setShrinkCallback(
(oldMemory, newMemory) ->
LOGGER.info(
- "The batch size limit has shrunk from {} to {}.", oldMemory, newMemory))
+ DataNodePipeMessages.THE_BATCH_SIZE_LIMIT_HAS_SHRUNK_FROM,
+ oldMemory,
+ newMemory))
.setExpandMethod(
oldMemory -> Math.min(Math.max(oldMemory, 1) * 2, requestMaxBatchSizeInBytes))
.setExpandCallback(
(oldMemory, newMemory) ->
LOGGER.info(
- "The batch size limit has expanded from {} to {}.", oldMemory, newMemory));
+ DataNodePipeMessages.THE_BATCH_SIZE_LIMIT_HAS_EXPANDED_FROM,
+ oldMemory,
+ newMemory));
if (getMaxBatchSizeInBytes() != requestMaxBatchSizeInBytes) {
LOGGER.info(
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2DeleteNodeReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2DeleteNodeReq.java
index a0558417b3960..c91393aa26391 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2DeleteNodeReq.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2DeleteNodeReq.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.queryengine.plan.planner.plan.node.PlanNodeType;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TCommitId;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferReq;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.AbstractDeleteDataNode;
import org.apache.tsfile.utils.PublicBAOS;
@@ -74,7 +75,7 @@ public static IoTConsensusV2DeleteNodeReq toTIoTConsensusV2TransferReq(
req.progressIndex =
ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
} catch (IOException e) {
- LOGGER.warn("Failed to serialize progress index {}", progressIndex, e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_SERIALIZE_PROGRESS_INDEX, progressIndex, e);
}
return req;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2TabletBinaryReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2TabletBinaryReq.java
index ae2901c3f039e..d48121018c854 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2TabletBinaryReq.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2TabletBinaryReq.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.queryengine.plan.planner.plan.node.PlanNode;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TCommitId;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferReq;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntry;
@@ -75,7 +76,7 @@ public static IoTConsensusV2TabletBinaryReq toTIoTConsensusV2TransferReq(
req.progressIndex =
ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
} catch (IOException e) {
- LOGGER.warn("Failed to serialize progress index {}", progressIndex, e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_SERIALIZE_PROGRESS_INDEX, progressIndex, e);
}
return req;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2TabletInsertNodeReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2TabletInsertNodeReq.java
index bd89f23a63ddc..5f076b68ec387 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2TabletInsertNodeReq.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/iotconsensusv2/payload/request/IoTConsensusV2TabletInsertNodeReq.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.queryengine.plan.planner.plan.node.PlanNodeType;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TCommitId;
import org.apache.iotdb.consensus.iotconsensusv2.thrift.TIoTConsensusV2TransferReq;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
import org.apache.tsfile.utils.PublicBAOS;
@@ -71,7 +72,7 @@ public static IoTConsensusV2TabletInsertNodeReq toTIoTConsensusV2TransferRawReq(
req.progressIndex =
ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
} catch (IOException e) {
- LOGGER.warn("Failed to serialize progress index {}", progressIndex, e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_SERIALIZE_PROGRESS_INDEX, progressIndex, e);
}
return req;
@@ -102,7 +103,7 @@ public static IoTConsensusV2TabletInsertNodeReq toTIoTConsensusV2TransferReq(
req.progressIndex =
ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size());
} catch (IOException e) {
- LOGGER.warn("Failed to serialize progress index {}", progressIndex, e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_SERIALIZE_PROGRESS_INDEX, progressIndex, e);
}
return req;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/legacy/IoTDBLegacyPipeSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/legacy/IoTDBLegacyPipeSink.java
index e296a7e0faa6e..829c9aed6b988 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/legacy/IoTDBLegacyPipeSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/legacy/IoTDBLegacyPipeSink.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
@@ -280,12 +281,12 @@ public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exc
public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exception {
if (!(tsFileInsertionEvent instanceof PipeTsFileInsertionEvent)) {
throw new NotImplementedException(
- "IoTDBLegacyPipeConnector only support PipeTsFileInsertionEvent.");
+ DataNodePipeMessages.IOTDBLEGACYPIPECONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT);
}
if (!((PipeTsFileInsertionEvent) tsFileInsertionEvent).waitForTsFileClose()) {
LOGGER.warn(
- "Pipe skipping temporary TsFile which shouldn't be transferred: {}",
+ DataNodePipeMessages.PIPE_SKIPPING_TEMPORARY_TSFILE_WHICH_SHOULDN_T,
((PipeTsFileInsertionEvent) tsFileInsertionEvent).getTsFile());
return;
}
@@ -305,7 +306,8 @@ public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exc
public void transfer(final Event event) throws Exception {
if (!(event instanceof PipeHeartbeatEvent || event instanceof PipeTerminateEvent)) {
LOGGER.warn(
- "IoTDBLegacyPipeConnector does not support transferring generic event: {}.", event);
+ DataNodePipeMessages.IOTDBLEGACYPIPECONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT,
+ event);
}
}
@@ -409,7 +411,8 @@ private void transportSingleFilePieceByPiece(final File file) throws IOException
} else if (status.code == TSStatusCode.SYNC_FILE_REDIRECTION_ERROR.getStatusCode()) {
position = Long.parseLong(status.message);
randomAccessFile.seek(position);
- LOGGER.info("Redirect to position {} in transferring tsFile {}.", position, file);
+ LOGGER.info(
+ DataNodePipeMessages.REDIRECT_TO_POSITION_IN_TRANSFERRING_TSFILE, position, file);
} else if (status.code == TSStatusCode.SYNC_FILE_ERROR.getStatusCode()) {
final String errorMsg =
String.format("Network failed to receive tsFile %s, status: %s", file, status);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaServerHandle.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaServerHandle.java
index 816d757391b9b..d5d62f915f2ff 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaServerHandle.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaServerHandle.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.sink.protocol.opcda;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.util.sorter.PipeTreeModelTabletEventSorter;
import org.apache.iotdb.pipe.api.exception.PipeException;
@@ -85,7 +86,8 @@ public class OpcDaServerHandle implements Closeable {
if (hr.intValue() != WinError.S_OK.intValue()) {
throw new PipeException(
- "Failed to connect to server, error code: 0x" + Integer.toHexString(hr.intValue()));
+ DataNodePipeMessages.FAILED_TO_CONNECT_TO_SERVER_ERROR_CODE
+ + Integer.toHexString(hr.intValue()));
}
opcServer = new OpcDaHeader.IOPCServer(ppvServer.getValue());
@@ -110,12 +112,13 @@ public class OpcDaServerHandle implements Closeable {
if (hr2 == WinError.S_OK.intValue()) {
LOGGER.info(
- "Create group successfully! Server handle: {}, update rate: {} ms",
+ DataNodePipeMessages.CREATE_GROUP_SUCCESSFULLY_SERVER_HANDLE_UPDATE_RATE,
phServerGroup.getValue(),
pRevisedUpdateRate.getValue());
} else {
throw new PipeException(
- "Failed to create group,error code: 0x" + Integer.toHexString(hr.intValue()));
+ DataNodePipeMessages.FAILED_TO_CREATE_GROUP_ERROR_CODE_0X
+ + Integer.toHexString(hr.intValue()));
}
final IUnknown groupUnknown = new Unknown(phOPCGroup.getValue());
@@ -126,10 +129,13 @@ public class OpcDaServerHandle implements Closeable {
groupUnknown.QueryInterface(
new Guid.REFIID(new Guid.GUID.ByReference(IID_IOPCItemMgt).getPointer()), ppvItemMgt);
if (hr.intValue() == WinError.S_OK.intValue()) {
- LOGGER.info("Acquire IOPCItemMgt successfully! Interface address: {}", ppvItemMgt.getValue());
+ LOGGER.info(
+ DataNodePipeMessages.ACQUIRE_IOPCITEMMGT_SUCCESSFULLY_INTERFACE_ADDRESS,
+ ppvItemMgt.getValue());
} else {
throw new PipeException(
- "Failed to acquire IOPCItemMgt, error code: 0x" + Integer.toHexString(hr.intValue()));
+ DataNodePipeMessages.FAILED_TO_ACQUIRE_IOPCITEMMGT_ERROR_CODE_0X
+ + Integer.toHexString(hr.intValue()));
}
itemMgt = new OpcDaHeader.IOPCItemMgt(ppvItemMgt.getValue());
@@ -140,10 +146,13 @@ public class OpcDaServerHandle implements Closeable {
groupUnknown.QueryInterface(
new Guid.REFIID(new Guid.GUID.ByReference(IID_IOPCSyncIO).getPointer()), ppvSyncIO);
if (hr.intValue() == WinError.S_OK.intValue()) {
- LOGGER.info("Acquire IOPCSyncIO successfully! Interface address: {}", ppvSyncIO.getValue());
+ LOGGER.info(
+ DataNodePipeMessages.ACQUIRE_IOPCSYNCIO_SUCCESSFULLY_INTERFACE_ADDRESS,
+ ppvSyncIO.getValue());
} else {
throw new PipeException(
- "Failed to acquire IOPCSyncIO, error code: 0x" + Integer.toHexString(hr.intValue()));
+ DataNodePipeMessages.FAILED_TO_ACQUIRE_IOPCSYNCIO_ERROR_CODE_0X
+ + Integer.toHexString(hr.intValue()));
}
syncIO = new OpcDaHeader.IOPCSyncIO(ppvSyncIO.getValue());
}
@@ -170,11 +179,11 @@ static String getClsIDFromProgID(final String progID) {
pclsid.Data4[5],
pclsid.Data4[6],
pclsid.Data4[7]);
- LOGGER.info("Successfully converted progID {} to CLSID: {{}}", progID, clsidStr);
+ LOGGER.info(DataNodePipeMessages.SUCCESSFULLY_CONVERTED_PROGID_TO_CLSID, progID, clsidStr);
return clsidStr;
} else {
throw new PipeException(
- "Error: ProgID is invalid or unregistered, (HRESULT=0x"
+ DataNodePipeMessages.ERROR_PROGID_IS_INVALID_OR_UNREGISTERED_HRESULT
+ Integer.toHexString(hr.intValue())
+ ")");
}
@@ -232,10 +241,10 @@ private void addItem(final String itemId, final TSDataType type) {
try {
if (itemError == WinError.S_OK.intValue()) {
- LOGGER.debug("Successfully added item {}.", itemId);
+ LOGGER.debug(DataNodePipeMessages.SUCCESSFULLY_ADDED_ITEM, itemId);
} else {
throw new PipeException(
- "Failed to add item "
+ DataNodePipeMessages.FAILED_TO_ADD_ITEM
+ itemId
+ ", opc error code: 0x"
+ Integer.toHexString(itemError));
@@ -246,7 +255,8 @@ private void addItem(final String itemId, final TSDataType type) {
}
if (hr != WinError.S_OK.intValue()) {
- throw new PipeException("Failed to add item, win error code: 0x" + Integer.toHexString(hr));
+ throw new PipeException(
+ DataNodePipeMessages.FAILED_TO_ADD_ITEM_WIN_ERROR_CODE + Integer.toHexString(hr));
}
final Pointer pItemResults = ppItemResults.getValue();
@@ -281,7 +291,7 @@ private void writeData(final String itemId, final Variant.VARIANT value) {
try {
if (itemError != WinError.S_OK.intValue()) {
throw new PipeException(
- "Failed to write "
+ DataNodePipeMessages.FAILED_TO_WRITE
+ itemId
+ ", value: "
+ value
@@ -294,7 +304,8 @@ private void writeData(final String itemId, final Variant.VARIANT value) {
}
if (hr != WinError.S_OK.intValue()) {
- throw new PipeException("Failed to write, win error code: 0x" + Integer.toHexString(hr));
+ throw new PipeException(
+ DataNodePipeMessages.FAILED_TO_WRITE_WIN_ERROR_CODE_0X + Integer.toHexString(hr));
}
}
@@ -321,7 +332,8 @@ private short convertTsDataType2VariantType(final TSDataType dataType) {
case OBJECT:
return Variant.VT_BSTR;
default:
- throw new UnSupportedDataTypeException("UnSupported dataType " + dataType);
+ throw new UnSupportedDataTypeException(
+ DataNodePipeMessages.UNSUPPORTED_DATATYPE + dataType);
}
}
@@ -360,7 +372,7 @@ private Variant.VARIANT getTabletObjectValue4Opc(
value.setValue(Variant.VT_BSTR, bstr);
break;
default:
- throw new UnSupportedDataTypeException("UnSupported dataType " + type);
+ throw new UnSupportedDataTypeException(DataNodePipeMessages.UNSUPPORTED_DATATYPE + type);
}
return value;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaSink.java
index 322943de402ea..d13e743bfc9a6 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcda/OpcDaSink.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.sink.protocol.opcda;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.protocol.opcua.OpcUaSink;
import org.apache.iotdb.pipe.api.PipeConnector;
import org.apache.iotdb.pipe.api.annotation.TreeModel;
@@ -72,7 +73,8 @@ public void validate(final PipeParameterValidator validator) throws Exception {
validator.getParameters().hasAttribute(CONNECTOR_OPC_DA_PROGID_KEY));
if (!System.getProperty("os.name").toLowerCase().startsWith("windows")) {
- throw new PipeParameterNotValidException("opc-da-sink must run on windows system.");
+ throw new PipeParameterNotValidException(
+ DataNodePipeMessages.OPC_DA_SINK_MUST_RUN_ON_WINDOWS);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaSink.java
index b5f383619e153..a4daf16610ce6 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/OpcUaSink.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.consensus.DataRegionId;
import org.apache.iotdb.commons.utils.PathUtils;
import org.apache.iotdb.db.conf.IoTDBConfig;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
import org.apache.iotdb.db.pipe.sink.protocol.opcua.client.ClientRunner;
@@ -243,8 +244,7 @@ public void customize(
databaseName = Objects.nonNull(region) ? region.getDatabaseName() : "root.__temp_db";
if (withQuality && PathUtils.isTableModelDatabase(databaseName)) {
- throw new PipeException(
- "When the OPC UA sink sets 'with-quality' to true, the table model data is not supported.");
+ throw new PipeException(DataNodePipeMessages.WHEN_THE_OPC_UA_SINK_SETS_WITH);
}
nodeUrl = parameters.getStringByKeys(CONNECTOR_OPC_UA_NODE_URL_KEY, SINK_OPC_UA_NODE_URL_KEY);
@@ -252,8 +252,7 @@ public void customize(
customizeServer(parameters);
} else {
if (PathUtils.isTableModelDatabase(databaseName)) {
- throw new PipeException(
- "When the OPC UA sink points to an outer server, the table model data is not supported.");
+ throw new PipeException(DataNodePipeMessages.WHEN_THE_OPC_UA_SINK_POINTS_TO);
}
customizeClient(parameters);
}
@@ -309,7 +308,7 @@ private void customizeServer(final PipeParameters parameters) {
.map(this::getSecurityPolicy)
.collect(Collectors.toSet());
if (securityPolicies.isEmpty()) {
- throw new PipeException("The security policy cannot be empty.");
+ throw new PipeException(DataNodePipeMessages.THE_SECURITY_POLICY_CANNOT_BE_EMPTY);
}
final long debounceTimeMs =
parameters.getLongOrDefault(
@@ -356,7 +355,8 @@ private void customizeServer(final PipeParameters parameters) {
} catch (final PipeException e) {
throw e;
} catch (final Exception e) {
- throw new PipeException("Failed to build and startup OpcUaServer", e);
+ throw new PipeException(
+ DataNodePipeMessages.FAILED_TO_BUILD_AND_STARTUP_OPCUASERVER, e);
}
})
.getRight();
@@ -444,8 +444,7 @@ private SecurityPolicy getSecurityPolicy(final String securityPolicy) {
case CONNECTOR_OPC_UA_SECURITY_POLICY_AES256_SHA256_RSAPSS_VALUE:
return SecurityPolicy.Aes256_Sha256_RsaPss;
default:
- throw new PipeException(
- "The security policy can only be 'None', 'Basic128Rsa15', 'Basic256', 'Basic256Sha256', 'Aes128_Sha256_RsaOaep' or 'Aes256_Sha256_RsaPss'.");
+ throw new PipeException(DataNodePipeMessages.THE_SECURITY_POLICY_CAN_ONLY_BE_NONE);
}
}
@@ -458,7 +457,7 @@ private StatusCode getQuality(final String quality) {
case CONNECTOR_OPC_UA_DEFAULT_QUALITY_UNCERTAIN_VALUE:
return StatusCode.UNCERTAIN;
default:
- throw new PipeException("The default quality can only be 'GOOD', 'BAD' or 'UNCERTAIN'.");
+ throw new PipeException(DataNodePipeMessages.THE_DEFAULT_QUALITY_CAN_ONLY_BE_GOOD);
}
}
@@ -488,8 +487,7 @@ public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exc
} else if (Objects.nonNull(client)) {
client.transfer(tablet, this);
} else {
- throw new PipeException(
- "No OPC client or server is specified when transferring tablet");
+ throw new PipeException(DataNodePipeMessages.NO_OPC_CLIENT_OR_SERVER_IS_SPECIFIED);
}
});
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/ClientRunner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/ClientRunner.java
index 69fe16f1aaa55..2047e4fbe5919 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/ClientRunner.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/ClientRunner.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.sink.protocol.opcua.client;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.pipe.api.exception.PipeException;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
@@ -74,13 +75,13 @@ public ClientRunner(
private OpcUaClient createClient() throws Exception {
Files.createDirectories(securityDir);
if (!Files.exists(securityDir)) {
- throw new Exception("unable to create security dir: " + securityDir);
+ throw new Exception(DataNodePipeMessages.UNABLE_TO_CREATE_SECURITY_DIR + securityDir);
}
final File pkiDir = securityDir.resolve("pki").toFile();
- logger.info("security dir: {}", securityDir.toAbsolutePath());
- logger.info("security pki dir: {}", pkiDir.getAbsolutePath());
+ logger.info(DataNodePipeMessages.SECURITY_DIR, securityDir.toAbsolutePath());
+ logger.info(DataNodePipeMessages.SECURITY_PKI_DIR, pkiDir.getAbsolutePath());
final IoTDBKeyStoreLoaderClient loader =
new IoTDBKeyStoreLoaderClient().load(securityDir, password.toCharArray());
@@ -116,11 +117,19 @@ public void run() {
configurableUaClient.run(client);
} catch (final Exception e) {
throw new PipeException(
- "Error running opc client: " + e.getClass().getSimpleName() + ": " + e.getMessage(), e);
+ DataNodePipeMessages.ERROR_RUNNING_OPC_CLIENT
+ + e.getClass().getSimpleName()
+ + ": "
+ + e.getMessage(),
+ e);
}
} catch (final Exception e) {
throw new PipeException(
- "Error getting opc client: " + e.getClass().getSimpleName() + ": " + e.getMessage(), e);
+ DataNodePipeMessages.ERROR_GETTING_OPC_CLIENT
+ + e.getClass().getSimpleName()
+ + ": "
+ + e.getMessage(),
+ e);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/IoTDBKeyStoreLoaderClient.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/IoTDBKeyStoreLoaderClient.java
index bfaf378822c31..455018695dc27 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/IoTDBKeyStoreLoaderClient.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/IoTDBKeyStoreLoaderClient.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.db.pipe.sink.protocol.opcua.client;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
+
import org.eclipse.milo.opcua.sdk.server.util.HostnameUtil;
import org.eclipse.milo.opcua.stack.core.util.SelfSignedCertificateBuilder;
import org.eclipse.milo.opcua.stack.core.util.SelfSignedCertificateGenerator;
@@ -56,7 +58,7 @@ IoTDBKeyStoreLoaderClient load(final Path baseDir, final char[] password) throws
final Path serverKeyStore = baseDir.resolve("iotdb-client.pfx");
- logger.info("Loading KeyStore at {}.", serverKeyStore);
+ logger.info(DataNodePipeMessages.LOADING_KEYSTORE_AT_1, serverKeyStore);
if (!Files.exists(serverKeyStore)) {
keyStore.load(null, password);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/IoTDBOpcUaClient.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/IoTDBOpcUaClient.java
index a9aa95cacdf2a..9aea22ab3e37d 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/IoTDBOpcUaClient.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/client/IoTDBOpcUaClient.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.pipe.resource.log.PipeLogger;
import org.apache.iotdb.commons.utils.TestOnly;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.protocol.opcua.OpcUaSink;
import org.apache.iotdb.db.pipe.sink.protocol.opcua.server.OpcUaNameSpace;
import org.apache.iotdb.pipe.api.exception.PipeException;
@@ -143,7 +144,7 @@ private void transferTabletRowForClientServerModel(
if (Objects.nonNull(sink.getQualityName()) && sink.getQualityName().equals(name)) {
if (!type.equals(TSDataType.BOOLEAN)) {
throw new UnsupportedOperationException(
- "The quality value only supports boolean type, while true == GOOD and false == BAD.");
+ DataNodePipeMessages.THE_QUALITY_VALUE_ONLY_SUPPORTS_BOOLEAN_TYPE);
}
currentQuality = values.get(i) == Boolean.TRUE ? StatusCode.GOOD : StatusCode.BAD;
continue;
@@ -203,7 +204,7 @@ private void writeValue(
if (!result.getStatusCode().equals(StatusCode.GOOD)
&& !(result.getStatusCode().getValue() == StatusCodes.Bad_NodeIdExists)) {
throw new PipeException(
- "Failed to create nodes after transfer data value, creation status: "
+ DataNodePipeMessages.FAILED_TO_CREATE_NODES_AFTER_TRANSFER_DATA
+ addStatus
+ getErrorString(segments, name, opcDataType, value, writeStatus));
}
@@ -211,12 +212,12 @@ private void writeValue(
writeStatus = client.writeValue(nodeId, dataValue).get();
if (writeStatus.getValue() != StatusCode.GOOD.getValue()) {
throw new PipeException(
- "Failed to transfer dataValue after successfully created nodes"
+ DataNodePipeMessages.FAILED_TO_TRANSFER_DATAVALUE_AFTER_SUCCESSFULLY_CREATED
+ getErrorString(segments, name, opcDataType, value, writeStatus));
}
} else if (writeStatus.getValue() != StatusCode.GOOD.getValue()) {
throw new PipeException(
- "Failed to transfer dataValue"
+ DataNodePipeMessages.FAILED_TO_TRANSFER_DATAVALUE
+ getErrorString(segments, name, opcDataType, value, writeStatus));
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaKeyStoreLoader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaKeyStoreLoader.java
index 16f176a3c5370..5fd6e5501cabf 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaKeyStoreLoader.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaKeyStoreLoader.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.pipe.sink.protocol.opcua.server;
import org.apache.iotdb.commons.utils.FileUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import com.google.common.collect.Sets;
import org.eclipse.milo.opcua.sdk.server.util.HostnameUtil;
@@ -60,13 +61,13 @@ OpcUaKeyStoreLoader load(final Path baseDir, final char[] password) throws Excep
final File serverKeyStore = baseDir.resolve("iotdb-server.pfx").toFile();
- LOGGER.info("Loading KeyStore at {}", serverKeyStore);
+ LOGGER.info(DataNodePipeMessages.LOADING_KEYSTORE_AT, serverKeyStore);
if (serverKeyStore.exists()) {
try (InputStream is = Files.newInputStream(serverKeyStore.toPath())) {
keyStore.load(is, password);
} catch (final IOException e) {
- LOGGER.warn("Load keyStore failed, the existing keyStore may be stale, re-constructing...");
+ LOGGER.warn(DataNodePipeMessages.LOAD_KEYSTORE_FAILED_THE_EXISTING_KEYSTORE_MAY);
FileUtils.deleteFileOrDirectory(serverKeyStore);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaNameSpace.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaNameSpace.java
index 816532ffbd8d4..917720220bf2d 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaNameSpace.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaNameSpace.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.pipe.resource.log.PipeLogger;
import org.apache.iotdb.commons.queryengine.utils.DateTimeUtils;
import org.apache.iotdb.commons.queryengine.utils.TimestampPrecisionUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.protocol.opcua.OpcUaSink;
import org.apache.iotdb.db.pipe.sink.util.sorter.PipeTableModelTabletEventSorter;
import org.apache.iotdb.db.pipe.sink.util.sorter.PipeTreeModelTabletEventSorter;
@@ -213,7 +214,8 @@ private void transferTabletRowForClientServerModel(
final List values,
final OpcUaSink sink) {
if (segments.length == 0) {
- throw new PipeRuntimeCriticalException("The segments of tablets must exist");
+ throw new PipeRuntimeCriticalException(
+ DataNodePipeMessages.THE_SEGMENTS_OF_TABLETS_MUST_EXIST);
}
final StringBuilder currentStr = new StringBuilder();
UaNode folderNode = null;
@@ -281,7 +283,7 @@ private void transferTabletRowForClientServerModel(
if (Objects.nonNull(sink.getQualityName()) && sink.getQualityName().equals(name)) {
if (!type.equals(TSDataType.BOOLEAN)) {
throw new UnsupportedOperationException(
- "The quality value only supports boolean type, while true == GOOD and false == BAD.");
+ DataNodePipeMessages.THE_QUALITY_VALUE_ONLY_SUPPORTS_BOOLEAN_TYPE);
}
currentQuality = values.get(i) == Boolean.TRUE ? StatusCode.GOOD : StatusCode.BAD;
continue;
@@ -401,7 +403,7 @@ private static Object getTabletObjectValue4Opc(
case STRING:
return ((Binary[]) column)[rowIndex].toString();
default:
- throw new UnSupportedDataTypeException("UnSupported dataType " + type);
+ throw new UnSupportedDataTypeException(DataNodePipeMessages.UNSUPPORTED_DATATYPE + type);
}
}
@@ -526,7 +528,8 @@ private void transferTabletForPubSubModel(
case UNKNOWN:
default:
throw new PipeRuntimeNonCriticalException(
- "Unsupported data type: " + tablet.getSchemas().get(columnIndex).getType());
+ DataNodePipeMessages.UNSUPPORTED_DATA_TYPE
+ + tablet.getSchemas().get(columnIndex).getType());
}
// Send the event
@@ -559,7 +562,8 @@ public static NodeId convertToOpcDataType(final TSDataType type) {
case OBJECT:
case UNKNOWN:
default:
- throw new PipeRuntimeNonCriticalException("Unsupported data type: " + type);
+ throw new PipeRuntimeNonCriticalException(
+ DataNodePipeMessages.UNSUPPORTED_DATA_TYPE + type);
}
}
@@ -608,7 +612,9 @@ public void notifyNodeValueChange(
} catch (Exception e) {
// Single client push failure does not affect other clients
LOGGER.warn(
- "Failed to push value change to client, nodeId={}", nodeId, e);
+ DataNodePipeMessages.FAILED_TO_PUSH_VALUE_CHANGE_TO_CLIENT,
+ nodeId,
+ e);
}
}
} finally {
@@ -653,7 +659,7 @@ public void onDataItemsCreated(final List dataItems) {
item.setValue(node.getValue());
}
} catch (Exception e) {
- LOGGER.warn("Failed to send initial value to new subscription, nodeId={}", nodeId, e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_SEND_INITIAL_VALUE_TO_NEW, nodeId, e);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaServerBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaServerBuilder.java
index 281d6eae77edd..297805162a914 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaServerBuilder.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/opcua/server/OpcUaServerBuilder.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.sink.protocol.opcua.server;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.pipe.api.exception.PipeException;
import org.eclipse.milo.opcua.sdk.server.OpcUaServer;
@@ -136,7 +137,7 @@ public long getDebounceTimeMs() {
public OpcUaServer build() throws Exception {
Files.createDirectories(securityDir);
if (!Files.exists(securityDir)) {
- throw new PipeException("Unable to create security dir: " + securityDir);
+ throw new PipeException(DataNodePipeMessages.UNABLE_CREATE_SECURITY_DIR + securityDir);
}
final File pkiDir = securityDir.resolve("pki").toFile();
@@ -157,7 +158,7 @@ public OpcUaServer build() throws Exception {
trustListManager = new DefaultTrustListManager(pkiDir);
LOGGER.info(
- "Certificate directory is: {}, Please move certificates from the reject dir to the trusted directory to allow encrypted access",
+ DataNodePipeMessages.CERTIFICATE_DIRECTORY_IS_PLEASE_MOVE_CERTIFICATES_FROM,
pkiDir.getAbsolutePath());
final KeyPair httpsKeyPair = SelfSignedCertificateGenerator.generateRsaKeyPair(2048);
@@ -356,7 +357,7 @@ public void close() {
try {
trustListManager.close();
} catch (final IOException e) {
- LOGGER.warn("Failed to close trustListManager, because {}.", e.getMessage());
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_TRUSTLISTMANAGER_BECAUSE, e.getMessage());
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/IoTDBDataRegionAsyncSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/IoTDBDataRegionAsyncSink.java
index 554c6c43dfbb0..5756b9e3f80b3 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/IoTDBDataRegionAsyncSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/IoTDBDataRegionAsyncSink.java
@@ -28,6 +28,7 @@
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.resource.log.PipeLogger;
import org.apache.iotdb.commons.pipe.sink.protocol.IoTDBSink;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.deletion.PipeDeleteDataNodeEvent;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
@@ -270,14 +271,15 @@ private void transferInBatchWithoutCheck(
sealedFile.left));
}
} catch (final Exception e) {
- LOGGER.warn("Failed to transfer tsfile batch ({}).", dbTsFilePairs, e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_TRANSFER_TSFILE_BATCH, dbTsFilePairs, e);
if (eventsHadBeenAddedToRetryQueue.compareAndSet(false, true)) {
addFailureEventsToRetryQueue(events, e);
}
}
} else {
LOGGER.warn(
- "Unsupported batch type {} when transferring tablet insertion event.", batch.getClass());
+ DataNodePipeMessages.UNSUPPORTED_BATCH_TYPE_WHEN_TRANSFERRING_TABLET_INSERTION,
+ batch.getClass());
}
endPointAndBatch.getRight().onSuccess();
@@ -381,7 +383,8 @@ public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exc
if (!(tsFileInsertionEvent instanceof PipeTsFileInsertionEvent)) {
LOGGER.warn(
- "IoTDBThriftAsyncConnector only support PipeTsFileInsertionEvent. Current event: {}.",
+ DataNodePipeMessages
+ .IOTDBTHRIFTASYNCCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_CURRENT_EVENT,
tsFileInsertionEvent);
return;
}
@@ -461,14 +464,14 @@ private void transfer(final PipeTransferTsFileHandler pipeTransferTsFileHandler)
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
LOGGER.warn(
- "Transfer tsfile event {} asynchronously was interrupted.",
+ DataNodePipeMessages.TRANSFER_TSFILE_EVENT_ASYNCHRONOUSLY_WAS_INTERRUPTED,
pipeTransferTsFileHandler.getTsFile(),
e);
}
pipeTransferTsFileHandler.onError(e);
LOGGER.warn(
- "Failed to transfer tsfile event {} asynchronously.",
+ DataNodePipeMessages.FAILED_TO_TRANSFER_TSFILE_EVENT_ASYNCHRONOUSLY,
pipeTransferTsFileHandler.getTsFile(),
e);
}
@@ -484,7 +487,9 @@ public void transfer(final Event event) throws Exception {
|| event instanceof PipeDeleteDataNodeEvent
|| event instanceof PipeTerminateEvent)) {
LOGGER.warn(
- "IoTDBThriftAsyncConnector does not support transferring generic event: {}.", event);
+ DataNodePipeMessages
+ .IOTDBTHRIFTASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT,
+ event);
return;
}
@@ -574,7 +579,8 @@ private void transferQueuedEventsIfNecessary(final boolean forced) {
retryTransfer((PipeRawTabletInsertionEvent) peekedEvent);
} else {
LOGGER.warn(
- "IoTDBThriftAsyncConnector does not support transfer generic event: {}.",
+ DataNodePipeMessages
+ .IOTDBTHRIFTASYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFER_GENERIC_EVENT,
peekedEvent);
}
@@ -598,7 +604,7 @@ private void transferQueuedEventsIfNecessary(final boolean forced) {
polledEvent);
}
if (polledEvent != null && LOGGER.isDebugEnabled()) {
- LOGGER.debug("Polled event {} from retry queue.", polledEvent);
+ LOGGER.debug(DataNodePipeMessages.POLLED_EVENT_FROM_RETRY_QUEUE, polledEvent);
}
}
@@ -714,7 +720,7 @@ public void addFailureEventToRetryQueue(final Event event, final Exception e) {
}
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Added event {} to retry queue.", event);
+ LOGGER.debug(DataNodePipeMessages.ADDED_EVENT_TO_RETRY_QUEUE, event);
}
if (isClosed.get()) {
@@ -803,7 +809,7 @@ public synchronized void close() {
transferTsFileClientManager.close();
}
} catch (final Exception e) {
- LOGGER.warn("Failed to close client manager.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_CLIENT_MANAGER, e);
}
// clear reference count of events in retry queue after closing async client
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java
index 52c52b1038e9d..dc6eae943b8b1 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.resource.log.PipeLogger;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.payload.evolvable.batch.PipeTabletEventPlainBatch;
import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink;
import org.apache.iotdb.db.pipe.sink.util.cacher.LeaderCacheUtils;
@@ -83,7 +84,7 @@ public void transfer(final AsyncPipeDataTransferServiceClient client) throws TEx
protected boolean onCompleteInternal(final TPipeTransferResp response) {
// Just in case
if (response == null) {
- onError(new PipeException("TPipeTransferResp is null"));
+ onError(new PipeException(DataNodePipeMessages.TPIPETRANSFERRESP_IS_NULL));
return false;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java
index ac252818c14d3..1500be2789430 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient;
import org.apache.iotdb.commons.pipe.resource.log.PipeLogger;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.PipeInsertionEvent;
import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink;
import org.apache.iotdb.pipe.api.exception.PipeException;
@@ -62,7 +63,7 @@ public void transfer(final AsyncPipeDataTransferServiceClient client) throws TEx
protected boolean onCompleteInternal(final TPipeTransferResp response) {
// Just in case
if (response == null) {
- onError(new PipeException("TPipeTransferResp is null"));
+ onError(new PipeException(DataNodePipeMessages.TPIPETRANSFERRESP_IS_NULL));
return false;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTrackableHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTrackableHandler.java
index a8b4a3b7a79a7..1825c6290bcb6 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTrackableHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTrackableHandler.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.client.ThriftClient;
import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.sink.protocol.thrift.async.IoTDBDataRegionAsyncSink;
import org.apache.iotdb.service.rpc.thrift.TPipeTransferReq;
import org.apache.iotdb.service.rpc.thrift.TPipeTransferResp;
@@ -101,8 +102,7 @@ protected boolean tryTransfer(
client.returnSelf(
(e) -> {
if (e instanceof IllegalStateException) {
- LOGGER.info(
- "Illegal state when return the client to object pool, maybe the pool is already cleared. Will ignore.");
+ LOGGER.info(DataNodePipeMessages.ILLEGAL_STATE_WHEN_RETURN_THE_CLIENT_TO);
return true;
}
return false;
@@ -137,7 +137,7 @@ public void closeClient() {
client.invalidateAll();
} catch (final Exception e) {
LOGGER.warn(
- "Failed to close or invalidate client when connector is closed. Client: {}, Exception: {}",
+ DataNodePipeMessages.FAILED_TO_CLOSE_OR_INVALIDATE_CLIENT_WHEN,
client,
e.getMessage(),
e);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java
index 35a28d1413a5d..474fc21a1a586 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/async/handler/PipeTransferTsFileHandler.java
@@ -27,6 +27,7 @@
import org.apache.iotdb.commons.pipe.sink.limiter.TsFileSendRateLimiter;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.response.PipeTransferFilePieceResp;
import org.apache.iotdb.commons.utils.RetryUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent;
import org.apache.iotdb.db.pipe.metric.overview.PipeResourceMetrics;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
@@ -161,7 +162,7 @@ public void transfer(
if (client == null) {
LOGGER.warn(
- "Client has been returned to the pool. Current handler status is {}. Will not transfer {}.",
+ DataNodePipeMessages.CLIENT_HAS_BEEN_RETURNED_TO_THE_POOL,
sink.isClosed() ? "CLOSED" : "NOT CLOSED",
tsFile);
return;
@@ -183,7 +184,7 @@ public void transfer(
try {
reader.close();
} catch (final IOException e) {
- LOGGER.warn("Failed to close file reader when successfully transferred mod file.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_FILE_READER_WHEN_SUCCESSFULLY, e);
}
reader = new RandomAccessFile(tsFile, "r");
transfer(clientManager, client);
@@ -289,8 +290,7 @@ protected boolean onCompleteInternal(final TPipeTransferResp response) {
});
}
} catch (final IOException e) {
- LOGGER.warn(
- "Failed to close file reader or delete tsFile when successfully transferred file.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_FILE_READER_OR_DELETE_1, e);
} finally {
final int referenceCount = eventsReferenceCount.decrementAndGet();
if (referenceCount <= 0) {
@@ -301,14 +301,15 @@ protected boolean onCompleteInternal(final TPipeTransferResp response) {
if (events.size() <= 1 || LOGGER.isDebugEnabled()) {
LOGGER.info(
- "Successfully transferred file {} (committer key={}, commit id={}, reference count={}).",
+ DataNodePipeMessages.SUCCESSFULLY_TRANSFERRED_FILE_COMMITTER_KEY_COMMIT_ID,
tsFile,
events.stream().map(EnrichedEvent::getCommitterKey).collect(Collectors.toList()),
events.stream().map(EnrichedEvent::getCommitIds).collect(Collectors.toList()),
referenceCount);
} else {
LOGGER.info(
- "Successfully transferred file {} (batched TableInsertionEvents, reference count={}).",
+ DataNodePipeMessages
+ .SUCCESSFULLY_TRANSFERRED_FILE_BATCHED_TABLEINSERTIONEVENTS_REFERENCE_COUNT,
tsFile,
referenceCount);
}
@@ -331,7 +332,7 @@ protected boolean onCompleteInternal(final TPipeTransferResp response) {
if (code == TSStatusCode.PIPE_TRANSFER_FILE_OFFSET_RESET.getStatusCode()) {
position = resp.getEndWritingOffset();
reader.seek(position);
- LOGGER.info("Redirect file position to {}.", position);
+ LOGGER.info(DataNodePipeMessages.REDIRECT_FILE_POSITION_TO, position);
} else {
final TSStatus status = response.getStatus();
// Only handle the failed statuses to avoid string format performance overhead
@@ -378,7 +379,7 @@ protected void onErrorInternal(final Exception exception) {
tsFile);
}
} catch (final Exception e) {
- LOGGER.warn("Failed to log error when failed to transfer file.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_LOG_ERROR_WHEN_FAILED_TO, e);
}
try {
@@ -386,7 +387,7 @@ protected void onErrorInternal(final Exception exception) {
clientManager.adjustTimeoutIfNecessary(exception);
}
} catch (final Exception e) {
- LOGGER.warn("Failed to adjust timeout when failed to transfer file.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_ADJUST_TIMEOUT_WHEN_FAILED_TO, e);
}
try {
@@ -403,7 +404,7 @@ protected void onErrorInternal(final Exception exception) {
});
}
} catch (final IOException e) {
- LOGGER.warn("Failed to close file reader or delete tsFile when failed to transfer file.", e);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_CLOSE_FILE_READER_OR_DELETE, e);
} finally {
try {
returnClientIfNecessary();
@@ -428,8 +429,7 @@ private void returnClientIfNecessary() {
client.returnSelf(
(e) -> {
if (e instanceof IllegalStateException) {
- LOGGER.info(
- "Illegal state when return the client to object pool, maybe the pool is already cleared. Will ignore.");
+ LOGGER.info(DataNodePipeMessages.ILLEGAL_STATE_WHEN_RETURN_THE_CLIENT_TO);
return true;
}
return false;
@@ -443,7 +443,7 @@ protected void doTransfer(
throws TException {
if (client == null) {
LOGGER.warn(
- "Client has been returned to the pool. Current handler status is {}. Will not transfer {}.",
+ DataNodePipeMessages.CLIENT_HAS_BEEN_RETURNED_TO_THE_POOL,
sink.isClosed() ? "CLOSED" : "NOT CLOSED",
tsFile);
return;
@@ -493,13 +493,13 @@ private void waitForResourceEnough4Slicing(final long timeoutMs) throws Interrup
final double waitTimeSeconds = (currentTime - startTime) / 1000.0;
if (elapsedRecordTimeSeconds > 10.0) {
LOGGER.info(
- "Wait for resource enough for slicing tsfile {} for {} seconds.",
+ DataNodePipeMessages.WAIT_FOR_RESOURCE_ENOUGH_FOR_SLICING_TSFILE,
tsFile,
waitTimeSeconds);
lastRecordTime = currentTime;
} else if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "Wait for resource enough for slicing tsfile {} for {} seconds.",
+ DataNodePipeMessages.WAIT_FOR_RESOURCE_ENOUGH_FOR_SLICING_TSFILE,
tsFile,
waitTimeSeconds);
}
@@ -514,6 +514,6 @@ private void waitForResourceEnough4Slicing(final long timeoutMs) throws Interrup
final long currentTime = System.currentTimeMillis();
final double waitTimeSeconds = (currentTime - startTime) / 1000.0;
LOGGER.info(
- "Wait for resource enough for slicing tsfile {} for {} seconds.", tsFile, waitTimeSeconds);
+ DataNodePipeMessages.WAIT_FOR_RESOURCE_ENOUGH_FOR_SLICING_TSFILE, tsFile, waitTimeSeconds);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java
index e8c4420861c59..5f0b5b1fa0ba4 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBDataRegionSyncSink.java
@@ -26,6 +26,7 @@
import org.apache.iotdb.commons.pipe.sink.limiter.TsFileSendRateLimiter;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq;
import org.apache.iotdb.commons.utils.RetryUtils;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.deletion.PipeDeleteDataNodeEvent;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
@@ -164,7 +165,8 @@ public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exc
// PipeProcessor can change the type of tsFileInsertionEvent
if (!(tsFileInsertionEvent instanceof PipeTsFileInsertionEvent)) {
LOGGER.warn(
- "IoTDBThriftSyncConnector only support PipeTsFileInsertionEvent. Ignore {}.",
+ DataNodePipeMessages
+ .IOTDBTHRIFTSYNCCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_IGNORE,
tsFileInsertionEvent);
return;
}
@@ -200,7 +202,8 @@ public void transfer(final Event event) throws Exception {
if (!(event instanceof PipeHeartbeatEvent || event instanceof PipeTerminateEvent)) {
LOGGER.warn(
- "IoTDBThriftSyncConnector does not support transferring generic event: {}.", event);
+ DataNodePipeMessages.IOTDBTHRIFTSYNCCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT,
+ event);
}
}
@@ -257,7 +260,8 @@ private void doTransfer(final PipeDeleteDataNodeEvent pipeDeleteDataNodeEvent)
}
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Successfully transferred deletion event {}.", pipeDeleteDataNodeEvent);
+ LOGGER.debug(
+ DataNodePipeMessages.SUCCESSFULLY_TRANSFERRED_DELETION_EVENT, pipeDeleteDataNodeEvent);
}
}
@@ -276,7 +280,7 @@ private void doTransferWrapper(final Pair endPo
} else if (batch instanceof PipeTabletEventTsFileBatch) {
doTransfer((PipeTabletEventTsFileBatch) batch);
} else {
- LOGGER.warn("Unsupported batch type {}.", batch.getClass());
+ LOGGER.warn(DataNodePipeMessages.UNSUPPORTED_BATCH_TYPE, batch.getClass());
}
batch.decreaseEventsReferenceCount(IoTDBDataRegionSyncSink.class.getName(), true);
batch.onSuccess();
@@ -343,10 +347,9 @@ private void doTransfer(final PipeTabletEventTsFileBatch batchToTransfer)
return null;
});
} catch (final NoSuchFileException e) {
- LOGGER.info("The file {} is not found, may already be deleted.", dbTsFile);
+ LOGGER.info(DataNodePipeMessages.THE_FILE_IS_NOT_FOUND_MAY_ALREADY, dbTsFile);
} catch (final Exception e) {
- LOGGER.warn(
- "Failed to delete batch file {}, this file should be deleted manually later", dbTsFile);
+ LOGGER.warn(DataNodePipeMessages.FAILED_TO_DELETE_BATCH_FILE_THIS_FILE, dbTsFile);
}
}
}
@@ -586,7 +589,7 @@ private void doTransfer(
tsFile.getName());
}
- LOGGER.info("Successfully transferred file {}.", tsFile);
+ LOGGER.info(DataNodePipeMessages.SUCCESSFULLY_TRANSFERRED_FILE, tsFile);
}
@Override
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBSchemaRegionSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBSchemaRegionSink.java
index 15a72d1193aec..aa84702e6f104 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBSchemaRegionSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/thrift/sync/IoTDBSchemaRegionSink.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.pipe.sink.client.IoTDBSyncClient;
import org.apache.iotdb.commons.pipe.sink.payload.thrift.request.PipeTransferFilePieceReq;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionSnapshotEvent;
import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent;
@@ -57,13 +58,13 @@ public class IoTDBSchemaRegionSink extends IoTDBDataNodeSyncSink {
@Override
public void transfer(final TabletInsertionEvent tabletInsertionEvent) throws Exception {
throw new UnsupportedOperationException(
- "IoTDBSchemaRegionConnector can't transfer TabletInsertionEvent.");
+ DataNodePipeMessages.IOTDBSCHEMAREGIONCONNECTOR_CAN_T_TRANSFER_TABLETINSERTIONEVENT);
}
@Override
public void transfer(final TsFileInsertionEvent tsFileInsertionEvent) throws Exception {
throw new UnsupportedOperationException(
- "IoTDBSchemaRegionConnector can't transfer TsFileInsertionEvent.");
+ DataNodePipeMessages.IOTDBSCHEMAREGIONCONNECTOR_CAN_T_TRANSFER_TSFILEINSERTIONEVENT);
}
@Override
@@ -74,7 +75,9 @@ public void transfer(final Event event) throws Exception {
doTransferWrapper((PipeSchemaRegionSnapshotEvent) event);
} else if (!(event instanceof PipeHeartbeatEvent)) {
LOGGER.warn(
- "IoTDBSchemaRegionConnector does not support transferring generic event: {}.", event);
+ DataNodePipeMessages
+ .IOTDBSCHEMAREGIONCONNECTOR_DOES_NOT_SUPPORT_TRANSFERRING_GENERIC_EVENT,
+ event);
}
}
@@ -131,7 +134,8 @@ private void doTransfer(final PipeSchemaRegionWritePlanEvent pipeSchemaRegionWri
true);
}
- LOGGER.info("Successfully transferred schema event {}.", pipeSchemaRegionWritePlanEvent);
+ LOGGER.info(
+ DataNodePipeMessages.SUCCESSFULLY_TRANSFERRED_SCHEMA_EVENT, pipeSchemaRegionWritePlanEvent);
}
private void doTransferWrapper(final PipeSchemaRegionSnapshotEvent pipeSchemaRegionSnapshotEvent)
@@ -226,7 +230,7 @@ private void doTransfer(final PipeSchemaRegionSnapshotEvent snapshotEvent)
}
LOGGER.info(
- "Successfully transferred file {}, {} and {}.",
+ DataNodePipeMessages.SUCCESSFULLY_TRANSFERRED_FILE_AND,
mTreeSnapshotFile,
tagLogSnapshotFile,
attributeSnapshotFile);
@@ -236,7 +240,7 @@ private void doTransfer(final PipeSchemaRegionSnapshotEvent snapshotEvent)
protected PipeTransferFilePieceReq getTransferSingleFilePieceReq(
final String fileName, final long position, final byte[] payLoad) {
throw new UnsupportedOperationException(
- "The schema region connector does not support transferring single file piece req.");
+ DataNodePipeMessages.THE_SCHEMA_REGION_CONNECTOR_DOES_NOT_SUPPORT);
}
@Override
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketConnectorServer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketConnectorServer.java
index 516f9bbc437dd..bbb4cb9a3a826 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketConnectorServer.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketConnectorServer.java
@@ -23,6 +23,7 @@
import org.apache.iotdb.commons.external.collections4.bidimap.DualTreeBidiMap;
import org.apache.iotdb.commons.pipe.datastructure.Triple;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
import org.apache.iotdb.pipe.api.event.Event;
import org.apache.iotdb.pipe.api.exception.PipeException;
@@ -156,7 +157,9 @@ public void start() {
@Override
public void onStart() {
LOGGER.info(
- "The websocket server {}:{} has been started!", getAddress().getHostName(), getPort());
+ DataNodePipeMessages.THE_WEBSOCKET_SERVER_HAS_BEEN_STARTED,
+ getAddress().getHostName(),
+ getPort());
}
public boolean isStarted() {
@@ -166,7 +169,7 @@ public boolean isStarted() {
@Override
public void onOpen(WebSocket webSocket, ClientHandshake clientHandshake) {
LOGGER.info(
- "The websocket connection from client {}:{} has been opened!",
+ DataNodePipeMessages.THE_WEBSOCKET_CONNECTION_FROM_CLIENT_HAS_BEEN_2,
webSocket.getRemoteSocketAddress().getHostName(),
webSocket.getRemoteSocketAddress().getPort());
}
@@ -175,8 +178,7 @@ public void onOpen(WebSocket webSocket, ClientHandshake clientHandshake) {
public void onClose(WebSocket webSocket, int code, String reason, boolean remote) {
if (webSocket.getRemoteSocketAddress() != null) {
LOGGER.info(
- "The websocket connection from client {}:{} has been closed! "
- + "The code is {}. The reason is {}. Is it closed by remote? {}",
+ DataNodePipeMessages.THE_WEBSOCKET_CONNECTION_FROM_CLIENT_HAS_BEEN_1,
webSocket.getRemoteSocketAddress().getHostName(),
webSocket.getRemoteSocketAddress().getPort(),
code,
@@ -184,11 +186,7 @@ public void onClose(WebSocket webSocket, int code, String reason, boolean remote
remote);
} else {
LOGGER.warn(
- "The websocket connection from client has been closed!"
- + "The code is {}. The reason is {}. Is it closed by remote? {}",
- code,
- reason,
- remote);
+ DataNodePipeMessages.THE_WEBSOCKET_CONNECTION_FROM_CLIENT_HAS_BEEN, code, reason, remote);
}
router.remove(router.getKey(webSocket));
}
@@ -197,7 +195,7 @@ public void onClose(WebSocket webSocket, int code, String reason, boolean remote
public void onMessage(WebSocket webSocket, String s) {
if (s.startsWith("BIND")) {
LOGGER.info(
- "Received a bind message from {}:{}",
+ DataNodePipeMessages.RECEIVED_A_BIND_MESSAGE_FROM,
webSocket.getRemoteSocketAddress().getHostName(),
webSocket.getRemoteSocketAddress().getPort());
@@ -205,7 +203,7 @@ public void onMessage(WebSocket webSocket, String s) {
} else if (s.startsWith("ACK")) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "Received a ack message from {}:{}",
+ DataNodePipeMessages.RECEIVED_A_ACK_MESSAGE_FROM,
webSocket.getRemoteSocketAddress().getHostName(),
webSocket.getRemoteSocketAddress().getPort());
}
@@ -213,7 +211,7 @@ public void onMessage(WebSocket webSocket, String s) {
handleAck(webSocket, Long.parseLong(s.replace("ACK:", "")));
} else if (s.startsWith("ERROR")) {
LOGGER.warn(
- "Received an error message {} from {}:{}",
+ DataNodePipeMessages.RECEIVED_AN_ERROR_MESSAGE_FROM,
s,
webSocket.getRemoteSocketAddress().getHostName(),
webSocket.getRemoteSocketAddress().getPort());
@@ -221,7 +219,7 @@ public void onMessage(WebSocket webSocket, String s) {
handleError(webSocket, Long.parseLong(s.replace("ERROR:", "")));
} else {
LOGGER.warn(
- "Received an unknown message {} from {}:{}",
+ DataNodePipeMessages.RECEIVED_AN_UNKNOWN_MESSAGE_FROM,
s,
webSocket.getRemoteSocketAddress().getHostName(),
webSocket.getRemoteSocketAddress().getPort());
@@ -243,8 +241,7 @@ private void handleAck(WebSocket webSocket, long eventId) {
final String pipeName = router.getKey(webSocket);
if (pipeName == null) {
LOGGER.warn(
- "The websocket connection from {}:{} has been closed, "
- + "but the ack message of commitId: {} is received.",
+ DataNodePipeMessages.THE_WEBSOCKET_CONNECTION_FROM_HAS_BEEN_CLOSED,
webSocket.getRemoteSocketAddress().getHostName(),
webSocket.getRemoteSocketAddress().getPort(),
eventId);
@@ -254,14 +251,13 @@ private void handleAck(WebSocket webSocket, long eventId) {
final ConcurrentHashMap eventId2EventMap =
eventsWaitingForAck.get(pipeName);
if (eventId2EventMap == null) {
- LOGGER.warn(
- "The pipe {} was dropped so the event ack {} will be ignored.", pipeName, eventId);
+ LOGGER.warn(DataNodePipeMessages.THE_PIPE_WAS_DROPPED_SO_THE_EVENT, pipeName, eventId);
return;
}
final EventWaitingForAck eventWrapper = eventId2EventMap.remove(eventId);
if (eventWrapper == null) {
- LOGGER.warn("The event ack {} is not found.", eventId);
+ LOGGER.warn(DataNodePipeMessages.THE_EVENT_ACK_IS_NOT_FOUND, eventId);
return;
}
@@ -274,8 +270,7 @@ private synchronized void handleError(WebSocket webSocket, long eventId) {
final String pipeName = router.getKey(webSocket);
if (pipeName == null) {
LOGGER.warn(
- "The websocket connection from {}:{} has been closed, "
- + "but the error message of commitId: {} is received.",
+ DataNodePipeMessages.THE_WEBSOCKET_CONNECTION_FROM_HAS_BEEN_CLOSED_1,
webSocket.getRemoteSocketAddress().getHostName(),
webSocket.getRemoteSocketAddress().getPort(),
eventId);
@@ -287,19 +282,17 @@ private synchronized void handleError(WebSocket webSocket, long eventId) {
final PriorityBlockingQueue eventTransferQueue =
eventsWaitingForTransfer.get(pipeName);
if (eventId2EventMap == null || eventTransferQueue == null) {
- LOGGER.warn(
- "The pipe {} was dropped so the event in error {} will be ignored.", pipeName, eventId);
+ LOGGER.warn(DataNodePipeMessages.THE_PIPE_WAS_DROPPED_SO_THE_EVENT_1, pipeName, eventId);
return;
}
final EventWaitingForAck eventWrapper = eventId2EventMap.remove(eventId);
if (eventWrapper == null) {
- LOGGER.warn("The event in error {} is not found.", eventId);
+ LOGGER.warn(DataNodePipeMessages.THE_EVENT_IN_ERROR_IS_NOT_FOUND, eventId);
return;
}
- LOGGER.warn(
- "The tablet of commitId: {} can't be parsed by client, it will be retried later.", eventId);
+ LOGGER.warn(DataNodePipeMessages.THE_TABLET_OF_COMMITID_CAN_T_BE, eventId);
synchronized (eventTransferQueue) {
eventTransferQueue.put(
new EventWaitingForTransfer(eventId, eventWrapper.connector, eventWrapper.event));
@@ -310,13 +303,13 @@ private synchronized void handleError(WebSocket webSocket, long eventId) {
public void onError(WebSocket webSocket, Exception e) {
if (webSocket.getRemoteSocketAddress() != null) {
LOGGER.warn(
- "Got an error \"{}\" from {}:{}.",
+ DataNodePipeMessages.GOT_AN_ERROR_FROM,
e.getMessage(),
webSocket.getLocalSocketAddress().getHostName(),
webSocket.getLocalSocketAddress().getPort(),
e);
} else {
- LOGGER.warn("Got an error \"{}\" from an unknown client.", e.getMessage(), e);
+ LOGGER.warn(DataNodePipeMessages.GOT_AN_ERROR_FROM_AN_UNKNOWN_CLIENT, e.getMessage(), e);
// if the remote socket address is null, it means the connection is not established yet.
// we should close the connection manually.
router.remove(router.getKey(webSocket));
@@ -334,7 +327,7 @@ public void addEvent(Event event, WebSocketSink connector) {
eventsWaitingForTransfer.get(pipeName);
if (queue == null) {
- LOGGER.warn("The pipe {} was dropped so the event {} will be dropped.", connector, event);
+ LOGGER.warn(DataNodePipeMessages.THE_PIPE_WAS_DROPPED_SO_THE_EVENT_2, connector, event);
discardEvent(event);
return;
}
@@ -401,7 +394,7 @@ public void run() {
}
transfer(pipeName, queueElement);
} catch (InterruptedException e) {
- LOGGER.warn("The transfer thread is interrupted.", e);
+ LOGGER.warn(DataNodePipeMessages.THE_TRANSFER_THREAD_IS_INTERRUPTED, e);
Thread.currentThread().interrupt();
}
}
@@ -424,8 +417,8 @@ private void transfer(String pipeName, EventWaitingForTransfer element) {
tabletBuffer = ((PipeRawTabletInsertionEvent) event).convertToTablet().serialize();
} else {
throw new NotImplementedException(
- "IoTDBCDCConnector only support "
- + "PipeInsertNodeTabletInsertionEvent and PipeRawTabletInsertionEvent.");
+ DataNodePipeMessages
+ .IOTDBCDCCONNECTOR_ONLY_SUPPORT_PIPEINSERTNODETABLETINSERTIONEVENT_AND_PIPERAWTAB);
}
if (tabletBuffer == null) {
@@ -452,8 +445,7 @@ private void transfer(String pipeName, EventWaitingForTransfer element) {
final ConcurrentHashMap eventId2EventMap =
eventsWaitingForAck.get(pipeName);
if (eventId2EventMap == null) {
- LOGGER.warn(
- "The pipe {} was dropped so the event ack {} will be ignored.", pipeName, eventId);
+ LOGGER.warn(DataNodePipeMessages.THE_PIPE_WAS_DROPPED_SO_THE_EVENT, pipeName, eventId);
discardEvent(event);
return;
}
@@ -464,13 +456,12 @@ private void transfer(String pipeName, EventWaitingForTransfer element) {
eventsWaitingForTransfer.get(pipeName);
if (queue == null || isDroppedPipe(event)) {
LOGGER.warn(
- "The pipe {} was dropped so the event {} will be dropped.", pipeName, eventId);
+ DataNodePipeMessages.THE_PIPE_WAS_DROPPED_SO_THE_EVENT_2, pipeName, eventId);
discardEvent(event);
return;
}
- LOGGER.warn(
- "The event {} can't be transferred to client, it will be retried later.", eventId, e);
+ LOGGER.warn(DataNodePipeMessages.THE_EVENT_CAN_T_BE_TRANSFERRED_TO, eventId, e);
queue.put(new EventWaitingForTransfer(eventId, connector, event));
}
}
@@ -484,7 +475,7 @@ private boolean sleepIfNecessary() {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
- LOGGER.warn("The transfer thread is interrupted.", e);
+ LOGGER.warn(DataNodePipeMessages.THE_TRANSFER_THREAD_IS_INTERRUPTED, e);
Thread.currentThread().interrupt();
}
return true;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketSink.java
index e8a38d63e6e42..3c487ff135680 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/websocket/WebSocketSink.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.pipe.config.constant.PipeSinkConstant;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.sink.protocol.PipeConnectorWithEventDiscard;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent;
@@ -124,7 +125,7 @@ public void transfer(final TabletInsertionEvent tabletInsertionEvent) {
if (!((EnrichedEvent) tabletInsertionEvent)
.increaseReferenceCount(WebSocketSink.class.getName())) {
LOGGER.warn(
- "WebsocketConnector failed to increase the reference count of the event. Ignore it. Current event: {}.",
+ DataNodePipeMessages.WEBSOCKETCONNECTOR_FAILED_TO_INCREASE_THE_REFERENCE_COUNT,
tabletInsertionEvent);
return;
}
@@ -136,7 +137,8 @@ public void transfer(final TabletInsertionEvent tabletInsertionEvent) {
public void transfer(TsFileInsertionEvent tsFileInsertionEvent) throws Exception {
if (!(tsFileInsertionEvent instanceof PipeTsFileInsertionEvent)) {
LOGGER.warn(
- "WebsocketConnector only support PipeTsFileInsertionEvent. Current event: {}.",
+ DataNodePipeMessages
+ .WEBSOCKETCONNECTOR_ONLY_SUPPORT_PIPETSFILEINSERTIONEVENT_CURRENT_EVENT,
tsFileInsertionEvent);
return;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/writeback/WriteBackSink.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/writeback/WriteBackSink.java
index 8e02f6af358ad..4c0795f12cdb3 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/writeback/WriteBackSink.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/protocol/writeback/WriteBackSink.java
@@ -30,6 +30,7 @@
import org.apache.iotdb.confignode.rpc.thrift.TDatabaseSchema;
import org.apache.iotdb.db.auth.AuthorityChecker;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.pipe.event.common.statement.PipeStatementInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
@@ -414,7 +415,7 @@ private TSStatus executeStatementForTableModel(
throw e;
}
LOGGER.debug(
- "Execute statement {} to database {}, skip because no permission.",
+ DataNodePipeMessages.EXECUTE_STATEMENT_TO_DATABASE_SKIP_BECAUSE_NO,
statement.getClass().getSimpleName(),
dataBaseName);
return StatusUtils.OK;
@@ -488,7 +489,8 @@ private void autoCreateDatabaseIfNecessary(final String database) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
- throw new PipeException("Auto create database failed because: " + e.getMessage());
+ throw new PipeException(
+ DataNodePipeMessages.AUTO_CREATE_DATABASE_FAILED_BECAUSE + e.getMessage());
}
ALREADY_CREATED_DATABASES.add(database);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTableModelTsFileBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTableModelTsFileBuilder.java
index c105a86ff7718..c98c978988d86 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTableModelTsFileBuilder.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTableModelTsFileBuilder.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.db.pipe.sink.util.builder;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.pipe.api.exception.PipeException;
import org.apache.tsfile.enums.ColumnCategory;
@@ -72,7 +73,7 @@ public void bufferTableModelTablet(String dataBase, Tablet tablet) {
@Override
public void bufferTreeModelTablet(Tablet tablet, Boolean isAligned) {
throw new UnsupportedOperationException(
- "PipeTableModeTsFileBuilder does not support tree model tablet to build TSFile");
+ DataNodePipeMessages.PIPETABLEMODETSFILEBUILDER_DOES_NOT_SUPPORT_TREE_MODEL_TABLET);
}
@Override
@@ -155,7 +156,7 @@ List> writeTableModelTabletsToTsFiles(
tryBestToWriteTabletsIntoOneFile(device2TabletsLinkedList);
} catch (final Exception e) {
LOGGER.warn(
- "Batch id = {}: Failed to write tablets into tsfile, because {}",
+ DataNodePipeMessages.BATCH_ID_FAILED_TO_WRITE_TABLETS_INTO,
currentBatchId.get(),
e.getMessage(),
e);
@@ -165,7 +166,7 @@ List> writeTableModelTabletsToTsFiles(
fileWriter.close();
} catch (final Exception closeException) {
LOGGER.warn(
- "Batch id = {}: Failed to close the tsfile {} after failed to write tablets into, because {}",
+ DataNodePipeMessages.BATCH_ID_FAILED_TO_CLOSE_THE_TSFILE,
currentBatchId.get(),
file.getPath(),
closeException.getMessage(),
@@ -178,7 +179,7 @@ List> writeTableModelTabletsToTsFiles(
for (final Pair sealedFile : sealedFiles) {
final boolean deleteSuccess = FileUtils.deleteQuietly(sealedFile.right);
LOGGER.warn(
- "Batch id = {}: {} delete the tsfile {} after failed to write tablets into {}. {}",
+ DataNodePipeMessages.BATCH_ID_DELETE_THE_TSFILE_AFTER_FAILED,
currentBatchId.get(),
deleteSuccess ? "Successfully" : "Failed to",
sealedFile.right.getPath(),
@@ -196,7 +197,7 @@ List> writeTableModelTabletsToTsFiles(
fileWriter.close();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
- "Batch id = {}: Seal tsfile {} successfully.",
+ DataNodePipeMessages.BATCH_ID_SEAL_TSFILE_SUCCESSFULLY,
currentBatchId.get(),
sealedFile.getPath());
}
@@ -337,11 +338,8 @@ void tryBestToWriteTabletsIntoOneFile(final Set> device2TabletsLin
fileWriter.writeTable(tablet, pair.right);
} catch (WriteProcessException e) {
LOGGER.warn(
- "Batch id = {}: Failed to build the table model TSFile. Please check whether the written Tablet has time overlap and whether the Table Schema is correct.",
- currentBatchId.get(),
- e);
- throw new PipeException(
- "The written Tablet time may overlap or the Schema may be incorrect");
+ DataNodePipeMessages.BATCH_ID_FAILED_TO_BUILD_THE_TABLE, currentBatchId.get(), e);
+ throw new PipeException(DataNodePipeMessages.THE_WRITTEN_TABLET_TIME_MAY_OVERLAP_OR);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTableModelTsFileBuilderV2.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTableModelTsFileBuilderV2.java
index e88476a27db11..8c89109e1eda3 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTableModelTsFileBuilderV2.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTableModelTsFileBuilderV2.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.commons.queryengine.plan.planner.plan.node.PlanNodeId;
import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.RelationalInsertTabletNode;
import org.apache.iotdb.db.storageengine.dataregion.flush.MemTableFlushTask;
import org.apache.iotdb.db.storageengine.dataregion.memtable.IMemTable;
@@ -82,7 +83,7 @@ public void bufferTableModelTablet(String dataBase, Tablet tablet) {
@Override
public void bufferTreeModelTablet(Tablet tablet, Boolean isAligned) {
throw new UnsupportedOperationException(
- "PipeTableModeTsFileBuilderV2 does not support tree model tablet to build TSFile");
+ DataNodePipeMessages.PIPETABLEMODETSFILEBUILDERV2_DOES_NOT_SUPPORT_TREE_MODEL_TABLET);
}
@Override
@@ -98,7 +99,8 @@ public List> convertTabletToTsFileWithDBInfo() throws IOExcep
return pairList;
} catch (final Exception e) {
LOGGER.warn(
- "Exception occurred when PipeTableModelTsFileBuilderV2 writing tablets to tsfile, use fallback tsfile builder: {}",
+ DataNodePipeMessages
+ .EXCEPTION_OCCURRED_WHEN_PIPETABLEMODELTSFILEBUILDERV2_WRITING_TABLETS_TO,
e.getMessage(),
e);
return fallbackBuilder.convertTabletToTsFileWithDBInfo();
@@ -133,7 +135,7 @@ private List> writeTabletsToTsFiles(final String dataBase)
sealedFiles.add(new Pair<>(dataBase, writer.getFile()));
} catch (final Exception e) {
LOGGER.warn(
- "Batch id = {}: Failed to write tablets into tsfile, because {}",
+ DataNodePipeMessages.BATCH_ID_FAILED_TO_WRITE_TABLETS_INTO,
currentBatchId.get(),
e.getMessage(),
e);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTreeModelTsFileBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTreeModelTsFileBuilder.java
index 89827cd842164..34e30c99d4a79 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTreeModelTsFileBuilder.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/sink/util/builder/PipeTreeModelTsFileBuilder.java
@@ -19,6 +19,8 @@
package org.apache.iotdb.db.pipe.sink.util.builder;
+import org.apache.iotdb.db.i18n.DataNodePipeMessages;
+
import org.apache.tsfile.exception.write.WriteProcessException;
import org.apache.tsfile.external.commons.io.FileUtils;
import org.apache.tsfile.file.metadata.IDeviceID;
@@ -64,7 +66,7 @@ public PipeTreeModelTsFileBuilder(
@Override
public void bufferTableModelTablet(final String dataBase, final Tablet tablet) {
throw new UnsupportedOperationException(
- "PipeTreeModelTsFileBuilder does not support table model tablet to build TSFile");
+ DataNodePipeMessages.PIPETREEMODELTSFILEBUILDER_DOES_NOT_SUPPORT_TABLE_MODEL_TABLET);
}
@Override
@@ -150,7 +152,7 @@ private List> writeTabletsToTsFiles()
tryBestToWriteTabletsIntoOneFile(device2TabletsLinkedList, device2Aligned);
} catch (final Exception e) {
LOGGER.warn(
- "Batch id = {}: Failed to write tablets into tsfile, because {}",
+ DataNodePipeMessages.BATCH_ID_FAILED_TO_WRITE_TABLETS_INTO,
currentBatchId.get(),
e.getMessage(),
e);
@@ -160,7 +162,7 @@ private List> writeTabletsToTsFiles()
fileWriter.close();
} catch (final Exception closeException) {
LOGGER.warn(
- "Batch id = {}: Failed to close the tsfile {} after failed to write tablets into, because {}",
+ DataNodePipeMessages.BATCH_ID_FAILED_TO_CLOSE_THE_TSFILE,
currentBatchId.get(),
file.getPath(),
closeException.getMessage(),
@@ -173,7 +175,7 @@ private List> writeTabletsToTsFiles()
for (final Pair sealedFile : sealedFiles) {
final boolean deleteSuccess = FileUtils.deleteQuietly(sealedFile.right);
LOGGER.warn(
- "Batch id = {}: {} delete the tsfile {} after failed to write tablets into {}. {}",
+ DataNodePipeMessages.BATCH_ID_DELETE_THE_TSFILE_AFTER_FAILED,
currentBatchId.get(),
deleteSuccess ? "Successfully" : "Failed to",
sealedFile.right.getPath(),
@@ -191,7 +193,7 @@ private List