Copilot commented on code in PR #7654:
URL: https://github.com/apache/incubator-seata/pull/7654#discussion_r2382569830
##########
core/src/main/java/org/apache/seata/core/rpc/netty/http/BaseHttpChannelHandler.java:
##########
@@ -54,6 +54,6 @@ public abstract class BaseHttpChannelHandler<T> extends
SimpleChannelInboundHand
*/
protected final void doFilterInternal(HttpFilterContext<?> context) throws
HttpRequestFilterException {
HttpRequestFilterChain filterChain =
HttpRequestFilterManager.getFilterChain();
- filterChain.doFilter(context);
+ // filterChain.doFilter(context);
Review Comment:
Commented out code should be removed rather than left in the codebase. If
the filter chain processing is intentionally disabled, this should be
documented or handled with a proper configuration flag.
```suggestion
```
##########
console/src/main/java/org/apache/seata/mcp/service/impl/BusinessDataSourceServiceImpl.java:
##########
@@ -0,0 +1,336 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.seata.mcp.service.impl;
+
+import org.apache.seata.common.exception.StoreException;
+import org.apache.seata.common.result.PageResult;
+import org.apache.seata.common.util.PageUtil;
+import org.apache.seata.common.util.StringUtils;
+import org.apache.seata.mcp.entity.constant.SqlConstant;
+import org.apache.seata.mcp.entity.param.UndoLogParam;
+import org.apache.seata.mcp.entity.pojo.MCPProperties;
+import org.apache.seata.mcp.entity.vo.UndoLogVO;
+import org.apache.seata.mcp.service.BusinessDataSourceService;
+import org.apache.seata.mcp.store.SqlExecutionTemplate;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+@Service
+public class BusinessDataSourceServiceImpl implements
BusinessDataSourceService {
+
+ @Autowired
+ private SqlExecutionTemplate sqlExecutionTemplate;
+
+ @Autowired
+ private MCPProperties mcpProperties;
+
+ @Override
+ public List<String> getTableNamesBySchema(String resourceId) {
+ String schema = getSchemaNameByResourceId(resourceId);
+ if (StringUtils.isBlank(schema)) {
+ throw new StoreException("failed to get schema by resourceId: " +
resourceId);
+ } else {
+ List<Map<String, Object>> maps =
+ sqlExecutionTemplate.query(resourceId,
SqlConstant.GET_TABLE_NAME_SQL, schema);
+ return maps.stream()
+ .map(map -> {
+ String tableName =
String.valueOf(map.get("TABLE_NAME"));
+ String tableComment =
String.valueOf(map.get("TABLE_COMMENT"));
+ return tableName + " (" + tableComment + ")";
+ })
+ .collect(Collectors.toList());
+ }
+ }
+
+ @Override
+ public List<Map<String, Object>> getTableSchemaByTableName(String
resourceId, String tableName) {
+ String schema = getSchemaNameByResourceId(resourceId);
+ if (StringUtils.isBlank(schema)) {
+ throw new StoreException("failed to get schema by resourceId: " +
resourceId);
+ } else {
+ return sqlExecutionTemplate.query(resourceId,
SqlConstant.GET_SCHEMA_SQL, schema, tableName);
+ }
+ }
+
+ @Override
+ public List<Map<String, Object>> runSql(String sql, String resourceId) {
+ if (sql.contains("undo_log")) {
+ throw new StoreException(
+ "If you do not use SQL to query undo_log data, use
analyzeUndoLog to query and analyze undo_log");
+ }
+ return sqlExecutionTemplate.query(resourceId, sql);
+ }
+
+ @Override
+ public PageResult<UndoLogVO> getUndoLogInfo(UndoLogParam param) {
+ long max_time_duration = mcpProperties.getQueryDuration();
+ DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd
HH:mm:ss");
+ String sql = SqlConstant.GET_UNDO_LOG_SQL;
+ List<Object> params = new ArrayList<>();
+ String branchId = param.getBranchId();
+ String xid = param.getXid();
+ String resourceId = param.getResourceId();
+ Integer logStatus = param.getLogStatus();
+ UndoLogParam.CreateTime logCreateTime = param.getLogCreateTime();
+ UndoLogParam.ModifyTime logModifiedTime = param.getLogModifiedTime();
+ int pageNum = param.getPageNum();
+ int pageSize = param.getPageSize();
+ int offset = getOffsetAndValidationPageQuerySql(pageNum, pageSize);
+ if (StringUtils.isBlank(resourceId)) {
+ throw new StoreException("you cannot query without resourceId");
+ }
+ int paramCounts = 0;
+ if (StringUtils.isNotBlank(branchId)) {
+ sql += SqlConstant.PARAM_BRANCH_ID_SQL;
+ params.add(branchId);
+ paramCounts++;
+ }
+ if (StringUtils.isNotBlank(xid)) {
+ sql += SqlConstant.PARAM_XID_SQL;
+ params.add(xid);
+ paramCounts++;
+ }
+ if (logStatus != null) {
+ sql += SqlConstant.UNDO_LOG_STATUS_SQL;
+ params.add(logStatus);
+ paramCounts++;
+ }
+ boolean containsTimeDuration = false;
+ if (logCreateTime != null) {
+ String startTime = logCreateTime.getStartTime();
+ String endTime = logCreateTime.getEndTime();
+ if (startTime != null && endTime != null) {
+ sql += SqlConstant.UNDO_LOG_CREATE_TIME_SQL;
+ containsTimeDuration = true;
+ Long startTimestamp = LocalDateTime.parse(startTime, formatter)
+ .atZone(ZoneId.systemDefault())
+ .toInstant()
+ .toEpochMilli();
+ Long endTimestamp = LocalDateTime.parse(endTime, formatter)
+ .atZone(ZoneId.systemDefault())
+ .toInstant()
+ .toEpochMilli();
+ if (endTimestamp - startTimestamp > max_time_duration) {
+ throw new StoreException(
+ "The query time span is not allowed to exceed the
max query duration(milliseconds): "
+ + max_time_duration);
+ }
+ }
+ if (startTime != null) {
+ params.add(startTime);
+ }
+ if (endTime != null) {
+ params.add(endTime);
+ }
+ }
+ if (logModifiedTime != null) {
+ String startTime = logModifiedTime.getStartTime();
+ String endTime = logModifiedTime.getEndTime();
+ if (startTime != null && endTime != null) {
+ if (containsTimeDuration) {
+ sql += " AND" + SqlConstant.UNDO_LOG_MODIFY_TIME_SQL;
+ } else {
+ sql += SqlConstant.UNDO_LOG_MODIFY_TIME_SQL;
+ containsTimeDuration = true;
+ }
+ Long startTimestamp = LocalDateTime.parse(startTime, formatter)
+ .atZone(ZoneId.systemDefault())
+ .toInstant()
+ .toEpochMilli();
+ Long endTimestamp = LocalDateTime.parse(endTime, formatter)
+ .atZone(ZoneId.systemDefault())
+ .toInstant()
+ .toEpochMilli();
+ if (endTimestamp - startTimestamp > max_time_duration) {
+ throw new StoreException(
+ "The query time span is not allowed to exceed the
max query duration(milliseconds): "
+ + max_time_duration);
+ }
+ }
+ if (startTime != null) {
+ params.add(startTime);
+ }
+ if (endTime != null) {
+ params.add(endTime);
+ }
+ }
+ if (containsTimeDuration) {
+ for (int i = 0; i < paramCounts; i++) {
+ sql = sql.replaceFirst("#", "AND");
+ }
+ } else {
+ for (int i = 1; i < paramCounts; i++) {
+ sql = sql.replaceFirst("#", "AND");
+ }
+ }
+ sql = sql.replaceAll("#", "");
+ sql += SqlConstant.UNDO_LOG_ORDER + SqlConstant.PAGE_QUERY;
+ sql = sql.replaceFirst("%", String.valueOf(pageSize));
+ sql = sql.replaceFirst("%", String.valueOf(offset));
+ Object[] objects = params.toArray();
+ return sqlExecutionTemplate.queryForUndoLogs(resourceId, sql, pageNum,
pageSize, objects);
+ }
+
+ @Override
+ public Integer getUndoLogCounts(UndoLogParam param) {
+ long max_time_duration = mcpProperties.getQueryDuration();
+ DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd
HH:mm:ss");
+ String sql = SqlConstant.GET_UNDO_LOG_SQL;
+ List<Object> params = new ArrayList<>();
+ String branchId = param.getBranchId();
+ String xid = param.getXid();
+ String resourceId = param.getResourceId();
+ Integer logStatus = param.getLogStatus();
+ UndoLogParam.CreateTime logCreateTime = param.getLogCreateTime();
+ UndoLogParam.ModifyTime logModifiedTime = param.getLogModifiedTime();
+ int pageNum = param.getPageNum();
+ int pageSize = param.getPageSize();
+ int offset = getOffsetAndValidationPageQuerySql(pageNum, pageSize);
+ if (StringUtils.isBlank(resourceId)) {
+ throw new StoreException("you cannot query without resourceId");
+ }
+ int paramCounts = 0;
+ if (StringUtils.isNotBlank(branchId)) {
+ sql += SqlConstant.PARAM_BRANCH_ID_SQL;
+ params.add(branchId);
+ paramCounts++;
+ }
+ if (StringUtils.isNotBlank(xid)) {
+ sql += SqlConstant.PARAM_XID_SQL;
+ params.add(xid);
+ paramCounts++;
+ }
+ if (logStatus != null) {
+ sql += SqlConstant.UNDO_LOG_STATUS_SQL;
+ params.add(logStatus);
+ paramCounts++;
+ }
+ boolean containsTimeDuration = false;
+ if (logCreateTime != null) {
+ String startTime = logCreateTime.getStartTime();
+ String endTime = logCreateTime.getEndTime();
+ if (startTime != null && endTime != null) {
+ sql += SqlConstant.UNDO_LOG_CREATE_TIME_SQL;
+ containsTimeDuration = true;
+ Long startTimestamp = LocalDateTime.parse(startTime, formatter)
+ .atZone(ZoneId.systemDefault())
+ .toInstant()
+ .toEpochMilli();
+ Long endTimestamp = LocalDateTime.parse(endTime, formatter)
+ .atZone(ZoneId.systemDefault())
+ .toInstant()
+ .toEpochMilli();
+ if (endTimestamp - startTimestamp > max_time_duration) {
+ throw new StoreException(
+ "The query time span is not allowed to exceed the
max query duration(milliseconds): "
+ + max_time_duration);
+ }
+ }
+ if (startTime != null) {
+ params.add(startTime);
+ }
+ if (endTime != null) {
+ params.add(endTime);
+ }
+ }
+ if (logModifiedTime != null) {
+ String startTime = logModifiedTime.getStartTime();
+ String endTime = logModifiedTime.getEndTime();
+ if (startTime != null && endTime != null) {
+ if (containsTimeDuration) {
+ sql += " AND" + SqlConstant.UNDO_LOG_MODIFY_TIME_SQL;
+ } else {
+ sql += SqlConstant.UNDO_LOG_MODIFY_TIME_SQL;
+ containsTimeDuration = true;
+ }
+ Long startTimestamp = LocalDateTime.parse(startTime, formatter)
+ .atZone(ZoneId.systemDefault())
+ .toInstant()
+ .toEpochMilli();
+ Long endTimestamp = LocalDateTime.parse(endTime, formatter)
+ .atZone(ZoneId.systemDefault())
+ .toInstant()
+ .toEpochMilli();
+ if (endTimestamp - startTimestamp > max_time_duration) {
+ throw new StoreException(
+ "The query time span is not allowed to exceed the
max query duration(milliseconds): "
+ + max_time_duration);
+ }
+ }
+ if (startTime != null) {
+ params.add(startTime);
+ }
+ if (endTime != null) {
+ params.add(endTime);
+ }
+ }
+ if (containsTimeDuration) {
+ for (int i = 0; i < paramCounts; i++) {
+ sql = sql.replaceFirst("#", "AND");
+ }
+ } else {
+ for (int i = 1; i < paramCounts; i++) {
+ sql = sql.replaceFirst("#", "AND");
+ }
+ }
+ sql = sql.replaceAll("#", "");
+ sql += SqlConstant.UNDO_LOG_ORDER + SqlConstant.PAGE_QUERY;
+ sql = sql.replaceFirst("%", String.valueOf(pageSize));
+ sql = sql.replaceFirst("%", String.valueOf(offset));
+ Object[] objects = params.toArray();
+ Map<String, Object> mysql =
+ sqlExecutionTemplate.queryForObject(resourceId,
PageUtil.countSql(sql, "mysql"), objects);
+ Long o = (Long) mysql.get("count(1)");
+ return o.intValue();
+ }
+
+ public String getSchemaNameByResourceId(String resourceId) {
+ if (StringUtils.isBlank(resourceId)) {
+ return "";
+ }
+ int idx = resourceId.lastIndexOf("/");
+ if (idx != -1 && idx != resourceId.length() - 1) {
+ return resourceId.substring(idx + 1);
+ }
+ return "";
+ }
+
+ public int getOffsetAndValidationPageQuerySql(int pageNum, int pageSize) {
+ int offset = (pageNum - 1) * pageSize;
+ if (pageNum < 1) {
+ throw new IllegalArgumentException("The page number must be
greater than 0");
+ }
+ if (pageSize < 0) {
+ throw new IllegalArgumentException("The page number must be
greater than 0");
Review Comment:
The error message is incorrect - it should say 'page size' instead of 'page
number' since this validation is for pageSize parameter.
```suggestion
throw new IllegalArgumentException("The page size must be
greater than 0");
```
##########
core/src/main/java/org/apache/seata/core/rpc/netty/http/HttpDispatchHandler.java:
##########
@@ -137,6 +133,39 @@ protected void channelRead0(ChannelHandlerContext ctx,
HttpRequest httpRequest)
private void sendResponse(ChannelHandlerContext ctx, boolean keepAlive,
Object result)
throws JsonProcessingException {
FullHttpResponse response;
+ // Increase the stream transport way
+ if (result instanceof ResponseEntity) {
+ ResponseEntity<?> responseEntity = (ResponseEntity<?>) result;
+ if (responseEntity.getBody() instanceof StreamingResponseBody) {
+ StreamingResponseBody streamingBody = (StreamingResponseBody)
responseEntity.getBody();
+
+ DefaultHttpResponse defaultHttpResponse = new
DefaultHttpResponse(
+ HttpVersion.HTTP_1_1,
HttpResponseStatus.valueOf(responseEntity.getStatusCodeValue()));
+
+ responseEntity
+ .getHeaders()
+ .forEach((key, values) ->
defaultHttpResponse.headers().add(key, values));
+
+
defaultHttpResponse.headers().set(HttpHeaderNames.TRANSFER_ENCODING,
HttpHeaderValues.CHUNKED);
+
+ ctx.write(defaultHttpResponse);
+
+ ctx.executor().execute(() -> {
+ try (OutputStream out = new
ChannelOutputStreamAdapter(ctx.channel())) {
+ streamingBody.writeTo(out);
+ } catch (Exception e) {
+ LOGGER.error("Streaming failed", e);
+ } finally {
+ ChannelFuture lastContentFuture =
ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
+
+ if (!keepAlive) {
+
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
+ }
+ }
+ });
+ }
+ }
+
Review Comment:
The streaming response handling is missing a return statement after
processing the StreamingResponseBody. Without a return, the code continues to
execute the regular response handling at line 168, which would cause duplicate
response writing and potentially corrupt the response.
```suggestion
// Prevent duplicate response writing for streaming responses
if (result instanceof ResponseEntity && ((ResponseEntity<?>)
result).getBody() instanceof StreamingResponseBody) {
return;
}
```
##########
console/src/main/java/org/apache/seata/mcp/store/SqlExecutionTemplate.java:
##########
@@ -0,0 +1,378 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.seata.mcp.store;
+
+import org.apache.seata.common.exception.StoreException;
+import org.apache.seata.common.result.PageResult;
+import org.apache.seata.common.util.IOUtil;
+import org.apache.seata.common.util.PageUtil;
+import org.apache.seata.common.util.StringUtils;
+import org.apache.seata.mcp.entity.vo.UndoLogVO;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.stereotype.Service;
+
+import javax.sql.DataSource;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+@Service
+public class SqlExecutionTemplate {
+
+ private static final Pattern SELECT_PATTERN =
+ Pattern.compile("^\\s*SELECT\\b.*", Pattern.CASE_INSENSITIVE |
Pattern.DOTALL);
+
+ private static final Pattern DML_PATTERN =
+ Pattern.compile("^\\s*(INSERT|UPDATE|DELETE)\\b.*",
Pattern.CASE_INSENSITIVE | Pattern.DOTALL);
+
+ private static final Logger LOGGER =
LoggerFactory.getLogger(SqlExecutionTemplate.class);
+
+ /**
+ * Obtain the data source with the specified resourceId
+ * @return DataSource instance
+ */
+ private DataSource getDataSource(String resourceId) {
+ try {
+ return DataSourceFactory.getDataSource(resourceId);
+ } catch (Exception e) {
+ LOGGER.error("Failed to get the data source, resourceId: {}",
resourceId, e);
+ throw new StoreException("Unable to get the data source: " +
resourceId);
+ }
+ }
+
+ private boolean validateQuerySql(String sql) {
+ if (sql == null || StringUtils.isBlank(sql)) {
+ return false;
+ }
+ return SELECT_PATTERN.matcher(sql).matches();
+ }
+
+ private boolean validateUpdateSql(String sql) {
+ if (sql == null || StringUtils.isBlank(sql)) {
+ return false;
+ }
+ return DML_PATTERN.matcher(sql).matches();
+ }
+
+ /**
+ * Execute the query and return <Map>the list result
+ *
+ * @param resourceId of the data source
+ * @param sql SQL query statement
+ * @param params parameters
+ * @return List of query results
+ */
+ public List<Map<String, Object>> query(String resourceId, String sql,
Object... params) {
+ Connection conn = null;
+ PreparedStatement ps = null;
+ ResultSet rs = null;
+
+ try {
+ if (!validateQuerySql(sql)) {
+ throw new StoreException("The query valid failed,Only query
operations are allowed:" + sql);
+ }
+ conn = getConnection(resourceId);
+ if (params == null || params.length == 0) {
+ if ((sql.contains("where") || sql.contains("WHERE"))) {
+ sql = sql.replaceAll("(?i)\\bWHERE\\b.*", "").trim();
Review Comment:
Automatically removing WHERE clauses when no parameters are provided could
lead to unintended full table scans and potential data exposure. This behavior
should be reconsidered as it may compromise data security by allowing broader
access than intended.
```suggestion
throw new StoreException("Query contains WHERE clause
but no parameters were provided. This may lead to unintended full table scans
and is not allowed.");
```
##########
console/src/main/java/org/apache/seata/mcp/utils/DateUtils.java:
##########
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.seata.mcp.utils;
+
+import java.time.DateTimeException;
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
+import java.time.format.DateTimeParseException;
+import java.util.regex.Pattern;
+
+public class DateUtils {
+
+ public static final Long ONE_DAY_TIMESTAMP = 86400000L;
+
+ private static final Pattern DATE_PATTERN =
Pattern.compile("^\\d{4}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])$");
+
+ public static boolean isValidDate(String dateStr) {
+ return DATE_PATTERN.matcher(dateStr).matches();
+ }
+
+ public static long convertToTimestampFromDate(String dateStr) {
+ if (!isValidDate(dateStr)) {
+ throw new DateTimeException("The time format does not match
yyyy-mm-dd");
+ }
+ LocalDate date = LocalDate.parse(dateStr);
+ ZonedDateTime zonedDateTime =
date.atStartOfDay(ZoneId.systemDefault());
+ return zonedDateTime.toInstant().toEpochMilli();
+ }
+
+ public static long convertToTimeStampFromDateTime(String dateTimeStr) {
+ DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd
HH:mm:ss");
+
+ try {
+ LocalDateTime dateTime = LocalDateTime.parse(dateTimeStr,
formatter);
+ return
dateTime.atZone(ZoneId.systemDefault()).toInstant().toEpochMilli();
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ public static String convertToDateTimeFromTimestamp(Long timestamp) {
+ DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd
HH:mm:ss");
+ LocalDateTime dateTime;
+ try {
+ dateTime = Instant.ofEpochMilli(timestamp)
+ .atZone(ZoneId.systemDefault())
+ .toLocalDateTime();
+ } catch (DateTimeParseException e) {
+ return "Parse Failed, please check that the timestamp is correct";
+ }
+ return dateTime.format(formatter);
+ }
+
+ public static boolean judgeExceedTimeDuration(Long startTime, Long
endTime, Long maxDuration) {
Review Comment:
This method doesn't handle the case where endTime is less than startTime,
which would result in a negative duration. This could lead to incorrect results
when checking time duration limits.
```suggestion
public static boolean judgeExceedTimeDuration(Long startTime, Long
endTime, Long maxDuration) {
if (endTime < startTime) {
return false;
}
```
##########
server/src/main/java/org/apache/seata/server/console/impl/file/ServerLogFileServiceImpl.java:
##########
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.seata.server.console.impl.file;
+
+import org.apache.seata.common.ConfigurationKeys;
+import org.apache.seata.server.console.entity.param.ServerLogParam;
+import org.apache.seata.server.console.service.ServerLogService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.core.env.Environment;
+import org.springframework.http.HttpHeaders;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.MediaType;
+import org.springframework.http.ResponseEntity;
+import org.springframework.stereotype.Service;
+import
org.springframework.web.servlet.mvc.method.annotation.StreamingResponseBody;
+
+import java.io.*;
+import java.nio.channels.Channels;
+import java.nio.channels.ClosedChannelException;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+
+@Service
+public class ServerLogFileServiceImpl implements ServerLogService {
+
+ @Autowired
+ private Environment env;
+
+ private static final String DEFAULT_APP_NAME = "seata-server";
+
+ private static final Integer MAX_LOG_FILE_SIZE = 500 * 1024 * 1024; //
500MB
+
+ private final Logger LOGGER =
LoggerFactory.getLogger(ServerLogFileServiceImpl.class);
+
+ @Override
+ public ResponseEntity<StreamingResponseBody>
getServerLogFile(ServerLogParam serverLogParam) {
+ String logPathString = buildLogFilePath(serverLogParam);
+ Path logPath = Paths.get(logPathString);
+ if (Files.exists(logPath)) {
+ long size = 0;
+ try {
+ size = Files.size(logPath);
+ } catch (IOException e) {
+ LOGGER.warn("Error get log file size: {}", e.getMessage());
+ }
+ if (size > MAX_LOG_FILE_SIZE) {
+ return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
+ .body(out ->
+ out.write(("Log File exceed the Max Size: " +
MAX_LOG_FILE_SIZE + " B").getBytes()));
+ }
+ long finalSize = size;
+ StreamingResponseBody responseBody = outputStream -> {
+ try (FileChannel channel = FileChannel.open(logPath,
StandardOpenOption.READ)) {
+ long position = 0;
+ long remaining = finalSize;
+
+ while (remaining > 0) {
+ long transferred = channel.transferTo(position,
remaining, Channels.newChannel(outputStream));
+
+ if (transferred <= 0) {
+ break;
+ }
+
+ position += transferred;
+ remaining -= transferred;
+
+ outputStream.flush();
+ }
+ } catch (IOException e) {
+ LOGGER.warn("Error streaming log file: {}",
e.getMessage());
+ if (e instanceof ClosedChannelException) {
+ LOGGER.info("Client closed connection during file
transfer");
+ }
+ }
+ };
+ return ResponseEntity.ok()
+ .header(HttpHeaders.CONTENT_DISPOSITION, "attachment;
filename=\"" + logPath.getFileName() + "\"")
+ .header(HttpHeaders.CACHE_CONTROL, "no-cache, no-store,
must-revalidate")
+ .header(HttpHeaders.PRAGMA, "no-cache")
+ .header(HttpHeaders.EXPIRES, "0")
+ .contentType(MediaType.APPLICATION_JSON)
Review Comment:
Setting content type to APPLICATION_JSON for a log file attachment is
incorrect. This should be MediaType.APPLICATION_OCTET_STREAM or
MediaType.TEXT_PLAIN since log files are typically text-based, not JSON.
```suggestion
.contentType(MediaType.TEXT_PLAIN)
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]