GEODE-2420: Enable export logs size estimation and user warning Adds 'export logs' option, --file-limit-size, to allow user to set maximun size of the epxorted logs zip file.
When size checking is enabled (file-limit-size > 0) then the check will also prevent filling up the disk on each member while consolidating and filtering the logs. Project: http://git-wip-us.apache.org/repos/asf/geode/repo Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/3ce3437d Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/3ce3437d Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/3ce3437d Branch: refs/heads/feature/GEODE-2632-17 Commit: 3ce3437ddaf9c3614b11f066011ed33664259e9f Parents: 18db4bf Author: Ken Howe <kh...@pivotal.io> Authored: Fri May 5 14:04:11 2017 -0700 Committer: Ken Howe <kh...@pivotal.io> Committed: Tue May 23 08:16:17 2017 -0700 ---------------------------------------------------------------------- .../membership/InternalDistributedMember.java | 22 +- .../cli/commands/ExportLogsCommand.java | 277 ++++++++++++------- .../cli/functions/SizeExportLogsFunction.java | 4 +- .../management/internal/cli/util/LogSizer.java | 6 +- .../cli/commands/ExportLogsCommandTest.java | 225 ++++++++++++++- .../cli/commands/ExportLogsDUnitTest.java | 7 + .../commands/ExportLogsFileSizeLimitTest.java | 2 +- .../cli/commands/ExportLogsTestSuite.java | 10 +- .../SizeExportLogsFunctionCacheTest.java | 12 +- 9 files changed, 436 insertions(+), 129 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/geode/blob/3ce3437d/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/InternalDistributedMember.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/InternalDistributedMember.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/InternalDistributedMember.java index 7170f20..6982d31 100755 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/InternalDistributedMember.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/InternalDistributedMember.java @@ -190,8 +190,8 @@ public class InternalDistributedMember implements DistributedMember, Externaliza * string). * <p> * - * <b> [bruce]THIS METHOD IS FOR TESTING ONLY. DO NOT USE IT TO CREATE IDs FOR USE IN THE PRODUCT. - * IT DOES NOT PROPERLY INITIALIZE ATTRIBUTES NEEDED FOR P2P FUNCTIONALITY. </b> + * <b> THIS METHOD IS FOR TESTING ONLY. DO NOT USE IT TO CREATE IDs FOR USE IN THE PRODUCT. IT + * DOES NOT PROPERLY INITIALIZE ATTRIBUTES NEEDED FOR P2P FUNCTIONALITY. </b> * * * @param i the hostname, must be for the current host @@ -228,8 +228,8 @@ public class InternalDistributedMember implements DistributedMember, Externaliza * string). * <p> * - * <b> [bruce]THIS METHOD IS FOR TESTING ONLY. DO NOT USE IT TO CREATE IDs FOR USE IN THE PRODUCT. - * IT DOES NOT PROPERLY INITIALIZE ATTRIBUTES NEEDED FOR P2P FUNCTIONALITY. </b> + * <b> THIS METHOD IS FOR TESTING ONLY. DO NOT USE IT TO CREATE IDs FOR USE IN THE PRODUCT. IT + * DOES NOT PROPERLY INITIALIZE ATTRIBUTES NEEDED FOR P2P FUNCTIONALITY. </b> * * * @param i the hostname, must be for the current host @@ -264,8 +264,8 @@ public class InternalDistributedMember implements DistributedMember, Externaliza * LonerDistributionManager. * <p> * - * < b> [bruce]DO NOT USE THIS METHOD TO CREATE ANYTHING OTHER THAN A LONER ID WITHOUT TALKING TO - * ME FIRST. IT DOES NOT PROPERLY INITIALIZE THE ID. </b> + * < b> DO NOT USE THIS METHOD TO CREATE ANYTHING OTHER THAN A LONER ID. IT DOES NOT PROPERLY + * INITIALIZE THE ID. </b> * * @param host the hostname, must be for the current host * @param p the membership listening port @@ -298,8 +298,8 @@ public class InternalDistributedMember implements DistributedMember, Externaliza * address). * <p> * - * <b> [bruce]THIS METHOD IS FOR TESTING ONLY. DO NOT USE IT TO CREATE IDs FOR USE IN THE PRODUCT. - * IT DOES NOT PROPERLY INITIALIZE ATTRIBUTES NEEDED FOR P2P FUNCTIONALITY. </b> + * <b> THIS METHOD IS FOR TESTING ONLY. DO NOT USE IT TO CREATE IDs FOR USE IN THE PRODUCT. IT + * DOES NOT PROPERLY INITIALIZE ATTRIBUTES NEEDED FOR P2P FUNCTIONALITY. </b> * * * @param i the hostname, must be for the current host @@ -314,8 +314,8 @@ public class InternalDistributedMember implements DistributedMember, Externaliza * Create a InternalDistributedMember as defined by the given address. * <p> * - * <b> [bruce]THIS METHOD IS FOR TESTING ONLY. DO NOT USE IT TO CREATE IDs FOR USE IN THE PRODUCT. - * IT DOES NOT PROPERLY INITIALIZE ATTRIBUTES NEEDED FOR P2P FUNCTIONALITY. </b> + * <b> THIS METHOD IS FOR TESTING ONLY. DO NOT USE IT TO CREATE IDs FOR USE IN THE PRODUCT. IT + * DOES NOT PROPERLY INITIALIZE ATTRIBUTES NEEDED FOR P2P FUNCTIONALITY. </b> * * @param addr address of the server * @param p the listening port of the server @@ -911,7 +911,7 @@ public class InternalDistributedMember implements DistributedMember, Externaliza public void toDataPre_GFE_7_1_0_0(DataOutput out) throws IOException { Assert.assertTrue(netMbr.getVmKind() > 0); - // [bruce] disabled to allow post-connect setting of the port for loner systems + // disabled to allow post-connect setting of the port for loner systems // Assert.assertTrue(getPort() > 0); // if (this.getPort() == 0) { // InternalDistributedSystem.getLoggerI18n().warning(LocalizedStrings.DEBUG, http://git-wip-us.apache.org/repos/asf/geode/blob/3ce3437d/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ExportLogsCommand.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ExportLogsCommand.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ExportLogsCommand.java index 20ec1f5..af681da 100644 --- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ExportLogsCommand.java +++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ExportLogsCommand.java @@ -34,9 +34,11 @@ import org.springframework.shell.core.annotation.CliOption; import org.apache.geode.cache.CacheFactory; import org.apache.geode.cache.Region; +import org.apache.geode.cache.execute.ResultCollector; import org.apache.geode.distributed.DistributedMember; import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.logging.LogService; +import org.apache.geode.management.ManagementException; import org.apache.geode.management.cli.CliMetaData; import org.apache.geode.management.cli.ConverterHint; import org.apache.geode.management.cli.Result; @@ -47,6 +49,7 @@ import org.apache.geode.management.internal.cli.functions.SizeExportLogsFunction import org.apache.geode.management.internal.cli.i18n.CliStrings; import org.apache.geode.management.internal.cli.result.ResultBuilder; import org.apache.geode.management.internal.cli.util.ExportLogsCacheWriter; +import org.apache.geode.management.internal.cli.util.BytesToString; import org.apache.geode.management.internal.configuration.utils.ZipUtils; import org.apache.geode.management.internal.security.ResourceOperation; import org.apache.geode.security.ResourcePermission; @@ -60,7 +63,7 @@ public class ExportLogsCommand implements CommandMarker { public final static String DEFAULT_EXPORT_LOG_LEVEL = "ALL"; - private static final Pattern DISK_SPACE_LIMIT_PATTERN = Pattern.compile("(\\d+)([mgtMGT]?)"); + private static final Pattern DISK_SPACE_LIMIT_PATTERN = Pattern.compile("(\\d+)([kmgtKMGT]?)"); private InternalCache getCache() { return (InternalCache) CacheFactory.getAnyInstance(); @@ -102,104 +105,130 @@ public class ExportLogsCommand implements CommandMarker { help = CliStrings.EXPORT_LOGS__LOGSONLY__HELP) boolean logsOnly, @CliOption(key = CliStrings.EXPORT_LOGS__STATSONLY, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", - help = CliStrings.EXPORT_LOGS__STATSONLY__HELP) boolean statsOnly) { - // @CliOption(key = CliStrings.EXPORT_LOGS__FILESIZELIMIT, - // unspecifiedDefaultValue = CliStrings.EXPORT_LOGS__FILESIZELIMIT__UNSPECIFIED_DEFAULT, - // specifiedDefaultValue = CliStrings.EXPORT_LOGS__FILESIZELIMIT__SPECIFIED_DEFAULT, - // help = CliStrings.EXPORT_LOGS__FILESIZELIMIT__HELP) String fileSizeLimit) { - Result result = null; + help = CliStrings.EXPORT_LOGS__STATSONLY__HELP) boolean statsOnly, + @CliOption(key = CliStrings.EXPORT_LOGS__FILESIZELIMIT, + unspecifiedDefaultValue = CliStrings.EXPORT_LOGS__FILESIZELIMIT__UNSPECIFIED_DEFAULT, + specifiedDefaultValue = CliStrings.EXPORT_LOGS__FILESIZELIMIT__SPECIFIED_DEFAULT, + help = CliStrings.EXPORT_LOGS__FILESIZELIMIT__HELP) String fileSizeLimit) { + + long totalEstimatedExportSize = 0; + Result result; InternalCache cache = getCache(); try { - Set<DistributedMember> targetMembers = - CliUtil.findMembersIncludingLocators(groups, memberIds); + Set<DistributedMember> targetMembers = getMembers(groups, memberIds); if (targetMembers.isEmpty()) { return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE); } - if (false) { - // TODO: get estimated size of exported logs from all servers first - Map<String, Integer> fileSizesFromMembers = new HashMap<>(); + if (parseFileSizeLimit(fileSizeLimit) > 0) { + // Get estimated size of exported logs from all servers before exporting anything for (DistributedMember server : targetMembers) { SizeExportLogsFunction.Args args = new SizeExportLogsFunction.Args(start, end, logLevel, onlyLogLevel, logsOnly, statsOnly); - List<Object> results = (List<Object>) CliUtil - .executeFunction(new SizeExportLogsFunction(), args, server).getResult(); + List<Object> results = (List<Object>) estimateLogSize(args, server).getResult(); long estimatedSize = 0; long diskAvailable = 0; long diskSize = 0; - List<?> res = (List<?>) results.get(0); - if (res.get(0) instanceof ExportedLogsSizeInfo) { - ExportedLogsSizeInfo sizeInfo = (ExportedLogsSizeInfo) res.get(0); - estimatedSize = sizeInfo.getLogsSize(); - diskAvailable = sizeInfo.getDiskAvailable(); - diskSize = sizeInfo.getDiskSize(); - } else { - estimatedSize = 0; + if (!results.isEmpty()) { + List<?> res = (List<?>) results.get(0); + if (res.get(0) instanceof ExportedLogsSizeInfo) { + ExportedLogsSizeInfo sizeInfo = (ExportedLogsSizeInfo) res.get(0); + estimatedSize = sizeInfo.getLogsSize(); + diskAvailable = sizeInfo.getDiskAvailable(); + diskSize = sizeInfo.getDiskSize(); + } + } + logger.info("Received estimated export size from member {}: {}", server.getId(), + estimatedSize); + totalEstimatedExportSize += estimatedSize; + + // If export size checking is enabled, then estimated size on each member shouldn't exceed + // the available disk on that member + try { + isSizeCheckEnabledAndWithinDiskSpaceOfMember(server.getName(), + parseFileSizeLimit(fileSizeLimit), estimatedSize, diskAvailable, diskSize); + } catch (ManagementException e) { + return ResultBuilder.createUserErrorResult(e.getMessage()); } - - - logger.info("Received file size from member {}: {}", server.getId(), estimatedSize); } - // TODO: Check log size limits on the locator + // The sum of the estimated export sizes from each member should not exceed the + // disk availble on the locator + try { + isSizeCheckEnabledAndWithinDiskSpaceOfMember("locator", parseFileSizeLimit(fileSizeLimit), + totalEstimatedExportSize, getLocalDiskAvailable(), getLocalDiskSize()); + } catch (ManagementException e) { + return ResultBuilder.createUserErrorResult(e.getMessage()); + } } - // get zipped files from all servers next - Map<String, Path> zipFilesFromMembers = new HashMap<>(); - for (DistributedMember server : targetMembers) { - Region region = ExportLogsFunction.createOrGetExistingExportLogsRegion(true, cache); + if (testhookSkipExports()) { + result = ResultBuilder.createInfoResult("Estimated size of exported logs is " + + new BytesToString().of(totalEstimatedExportSize)); + } else { + // get zipped files from all servers next + Map<String, Path> zipFilesFromMembers = new HashMap<>(); + for (DistributedMember server : targetMembers) { + Region region = ExportLogsFunction.createOrGetExistingExportLogsRegion(true, cache); - ExportLogsCacheWriter cacheWriter = - (ExportLogsCacheWriter) region.getAttributes().getCacheWriter(); + ExportLogsCacheWriter cacheWriter = + (ExportLogsCacheWriter) region.getAttributes().getCacheWriter(); - cacheWriter.startFile(server.getName()); + cacheWriter.startFile(server.getName()); - CliUtil.executeFunction(new ExportLogsFunction(), - new ExportLogsFunction.Args(start, end, logLevel, onlyLogLevel, logsOnly, statsOnly), - server).getResult(); - Path zipFile = cacheWriter.endFile(); - ExportLogsFunction.destroyExportLogsRegion(cache); + CliUtil.executeFunction(new ExportLogsFunction(), + new ExportLogsFunction.Args(start, end, logLevel, onlyLogLevel, logsOnly, statsOnly), + server).getResult(); + Path zipFile = cacheWriter.endFile(); + ExportLogsFunction.destroyExportLogsRegion(cache); - // only put the zipfile in the map if it is not null - if (zipFile != null) { - logger.info("Received zip file from member {}: {}", server.getId(), zipFile); - zipFilesFromMembers.put(server.getId(), zipFile); + // only put the zipfile in the map if it is not null + if (zipFile != null) { + logger.info("Received zip file from member {}: {}", server.getId(), zipFile); + zipFilesFromMembers.put(server.getId(), zipFile); + } } - } - if (zipFilesFromMembers.isEmpty()) { - return ResultBuilder.createUserErrorResult("No files to be exported."); - } + if (zipFilesFromMembers.isEmpty()) { + return ResultBuilder.createUserErrorResult("No files to be exported."); + } - Path tempDir = Files.createTempDirectory("exportedLogs"); - // make sure the directory is created, so that even if there is no files unzipped to this dir, - // we can still zip it and send an empty zip file back to the client - Path exportedLogsDir = tempDir.resolve("exportedLogs"); - FileUtils.forceMkdir(exportedLogsDir.toFile()); - - for (Path zipFile : zipFilesFromMembers.values()) { - Path unzippedMemberDir = - exportedLogsDir.resolve(zipFile.getFileName().toString().replace(".zip", "")); - ZipUtils.unzip(zipFile.toAbsolutePath().toString(), unzippedMemberDir.toString()); - FileUtils.deleteQuietly(zipFile.toFile()); - } + Path tempDir = Files.createTempDirectory("exportedLogs"); + // make sure the directory is created, so that even if there is no files unzipped to this + // dir, we can still zip it and send an empty zip file back to the client + Path exportedLogsDir = tempDir.resolve("exportedLogs"); + FileUtils.forceMkdir(exportedLogsDir.toFile()); + + for (Path zipFile : zipFilesFromMembers.values()) { + Path unzippedMemberDir = + exportedLogsDir.resolve(zipFile.getFileName().toString().replace(".zip", "")); + ZipUtils.unzip(zipFile.toAbsolutePath().toString(), unzippedMemberDir.toString()); + FileUtils.deleteQuietly(zipFile.toFile()); + } - Path dirPath; - if (StringUtils.isBlank(dirName)) { - dirPath = Paths.get(System.getProperty("user.dir")); - } else { - dirPath = Paths.get(dirName); + Path dirPath; + if (StringUtils.isBlank(dirName)) { + dirPath = Paths.get(System.getProperty("user.dir")); + } else { + dirPath = Paths.get(dirName); + } + Path exportedLogsZipFile = + dirPath.resolve("exportedLogs_" + System.currentTimeMillis() + ".zip").toAbsolutePath(); + + logger.info("Zipping into: " + exportedLogsZipFile.toString()); + ZipUtils.zipDirectory(exportedLogsDir, exportedLogsZipFile); + try { + isFileSizeCheckEnabledAndWithinLimit(parseFileSizeLimit(fileSizeLimit), + exportedLogsZipFile.toFile()); + } catch (ManagementException e) { + return ResultBuilder.createUserErrorResult(e.getMessage()); + } finally { + FileUtils.deleteDirectory(tempDir.toFile()); + } + result = ResultBuilder.createInfoResult(exportedLogsZipFile.toString()); } - Path exportedLogsZipFile = - dirPath.resolve("exportedLogs_" + System.currentTimeMillis() + ".zip").toAbsolutePath(); - - logger.info("Zipping into: " + exportedLogsZipFile.toString()); - ZipUtils.zipDirectory(exportedLogsDir, exportedLogsZipFile); - FileUtils.deleteDirectory(tempDir.toFile()); - - result = ResultBuilder.createInfoResult(exportedLogsZipFile.toString()); } catch (Exception ex) { logger.error(ex.getMessage(), ex); result = ResultBuilder.createGemFireErrorResult(ex.getMessage()); @@ -211,43 +240,99 @@ public class ExportLogsCommand implements CommandMarker { } /** + * Test hook for unit testing. To limit scope of test to only estimate size of exports (i.e. skip + * the filtering and exporting logs & stats from cluster members), stub this method to return true + * to skip exporting. + */ + boolean testhookSkipExports() { + return false; + } + + /** + * Wrapper to enable stubbing of static method call for unit testing + */ + Set<DistributedMember> getMembers(String[] groups, String[] memberIds) { + return CliUtil.findMembersIncludingLocators(groups, memberIds); + } + + /** + * Wrapper to enable stubbing of static method call for unit testing + */ + ResultCollector estimateLogSize(SizeExportLogsFunction.Args args, DistributedMember member) { + return CliUtil.executeFunction(new SizeExportLogsFunction(), args, member); + } + + /** + * Wrapper to enable stubbing of static method call for unit testing + */ + long getLocalDiskSize() { + return FileUtils.getUserDirectory().getTotalSpace(); + } + + /** + * Wrapper to enable stubbing of static method call for unit testing + */ + long getLocalDiskAvailable() { + return FileUtils.getUserDirectory().getUsableSpace(); + } + + /** * Returns file size limit in bytes */ - int parseFileSizeLimit(String fileSizeLimit) { + private long parseFileSizeLimit(String fileSizeLimit) { if (StringUtils.isEmpty(fileSizeLimit)) { return 0; } - int sizeLimit = parseSize(fileSizeLimit); - int byteMultiplier = parseByteMultiplier(fileSizeLimit); + long sizeLimit = parseSize(fileSizeLimit); + long byteMultiplier = parseByteMultiplier(fileSizeLimit); return sizeLimit * byteMultiplier; } /** - * Throws IllegalArgumentException if file size is over fileSizeLimitBytes + * Throws ManagementException if file size is over fileSizeLimit bytes + * + * @return false == limit is zero (checking disabled)<br> + * true == file size is less than limit<br> + * exception == file size is over limit */ - void checkOverDiskSpaceThreshold(int fileSizeLimitBytes, File file) { - // TODO:GEODE-2420: warn user if exportedLogsZipFile size > threshold - if (FileUtils.sizeOf(file) > fileSizeLimitBytes) { - throw new IllegalArgumentException("TOO BIG"); // FileTooBigException + boolean isFileSizeCheckEnabledAndWithinLimit(long fileSizeLimitBytes, File file) { + if (fileSizeLimitBytes < 1) { + // size checks disabled + return false; } + if (FileUtils.sizeOf(file) < fileSizeLimitBytes) { + return true; + } + StringBuilder sb = new StringBuilder(); + sb.append("Exported logs zip file size = ").append(FileUtils.sizeOf(file)).append(", ") + .append(CliStrings.EXPORT_LOGS__FILESIZELIMIT).append(" = ").append(fileSizeLimitBytes) + .append(". To disable exported logs file size check use option \"--file-size-limit=0\"."); + throw new ManagementException(sb.toString()); // FileTooBigException } /** - * Throws IllegalArgumentException if file size is over fileSizeLimitBytes false == limit is zero - * true == file size is less than limit exception == file size is over limit + * Throws ManagementException if export file size checking is enabled and the space required on a + * cluster member to filter and zip up files to be exported exceeds the disk space available */ - boolean isFileSizeCheckEnabledAndWithinLimit(int fileSizeLimitBytes, File file) { - // TODO:GEODE-2420: warn user if exportedLogsZipFile size > threshold + boolean isSizeCheckEnabledAndWithinDiskSpaceOfMember(String memberName, long fileSizeLimitBytes, + long estimatedSize, long diskAvailable, long diskSize) { + // TODO:GEODE-2420: warn user if exportedLogs filtering will exceed disk available if (fileSizeLimitBytes < 1) { + // size checks disabled return false; } - if (FileUtils.sizeOf(file) < fileSizeLimitBytes) { - return true; + StringBuilder sb = new StringBuilder(); + BytesToString bytesToString = new BytesToString(); + if (estimatedSize > diskAvailable) { + sb.append("Estimated disk space required (").append(bytesToString.of(estimatedSize)) + .append(") to consolidate logs on member ").append(memberName) + .append(" will exceed available disk space (").append(bytesToString.of(diskAvailable)) + .append(")"); + throw new ManagementException(sb.toString()); // FileTooBigException } - throw new IllegalArgumentException("TOO BIG: fileSizeLimit = " + fileSizeLimitBytes - + ", fileSize = " + FileUtils.sizeOf(file)); // FileTooBigException + return true; } static int parseSize(String diskSpaceLimit) { @@ -259,24 +344,26 @@ public class ExportLogsCommand implements CommandMarker { } } - static int parseByteMultiplier(String diskSpaceLimit) { + static long parseByteMultiplier(String diskSpaceLimit) { Matcher matcher = DISK_SPACE_LIMIT_PATTERN.matcher(diskSpaceLimit); if (!matcher.matches()) { throw new IllegalArgumentException(); } switch (matcher.group(2).toLowerCase()) { + case "k": + return KILOBYTE; case "t": - return (int) TERABYTE; + return TERABYTE; case "g": - return (int) GIGABYTE; + return GIGABYTE; case "m": default: - return (int) MEGABYTE; + return MEGABYTE; } } - static final int MEGABYTE = (int) Math.pow(1024, 2); - static final int GIGABYTE = (int) Math.pow(1024, 3); - static final int TERABYTE = (int) Math.pow(1024, 4); - + static final long KILOBYTE = 1024L; + static final long MEGABYTE = KILOBYTE * 1024; + static final long GIGABYTE = MEGABYTE * 1024; + static final long TERABYTE = GIGABYTE * 1024; } http://git-wip-us.apache.org/repos/asf/geode/blob/3ce3437d/geode-core/src/main/java/org/apache/geode/management/internal/cli/functions/SizeExportLogsFunction.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/functions/SizeExportLogsFunction.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/functions/SizeExportLogsFunction.java index 8d20dc0..3d98fe9 100644 --- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/functions/SizeExportLogsFunction.java +++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/functions/SizeExportLogsFunction.java @@ -59,7 +59,9 @@ public class SizeExportLogsFunction extends ExportLogsFunction implements Functi long estimateLogFileSize(final DistributedMember member, final File logFile, final File statArchive, final Args args) throws ParseException, IOException { - LOGGER.info("SizeExportLogsFunction started for member {}", member); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("SizeExportLogsFunction started for member {}", member); + } File baseLogFile = null; File baseStatsFile = null; http://git-wip-us.apache.org/repos/asf/geode/blob/3ce3437d/geode-core/src/main/java/org/apache/geode/management/internal/cli/util/LogSizer.java ---------------------------------------------------------------------- diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/util/LogSizer.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/util/LogSizer.java index 0a799f6..f1adae6 100644 --- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/util/LogSizer.java +++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/util/LogSizer.java @@ -42,8 +42,10 @@ public class LogSizer { /** * @param logFilter the filter that's used to check if we need to accept the file or the logLine - * @param baseLogFile if not null, we will export the logs in that directory - * @param baseStatsFile if not null, we will export stats in that directory + * @param baseLogFile if not null, we will estimate the additional disk space required to filter + * and export the logs in that directory + * @param baseStatsFile if not null, we will estimate the additional disk space required to filter + * and export the stats in that directory */ public LogSizer(LogFilter logFilter, File baseLogFile, File baseStatsFile) throws ParseException { assert logFilter != null; http://git-wip-us.apache.org/repos/asf/geode/blob/3ce3437d/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsCommandTest.java ---------------------------------------------------------------------- diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsCommandTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsCommandTest.java index a02c07f..2b7839a 100644 --- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsCommandTest.java +++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsCommandTest.java @@ -17,25 +17,37 @@ package org.apache.geode.management.internal.cli.commands; import static org.apache.geode.management.internal.cli.commands.ExportLogsCommand.*; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import org.apache.geode.cache.Cache; import org.apache.geode.cache.execute.Execution; +import org.apache.geode.cache.execute.FunctionException; import org.apache.geode.cache.execute.ResultCollector; import org.apache.geode.distributed.DistributedMember; +import org.apache.geode.distributed.internal.membership.InternalDistributedMember; +import org.apache.geode.internal.cache.GemFireCacheImpl; +import org.apache.geode.management.ManagementException; +import org.apache.geode.management.cli.Result; +import org.apache.geode.management.internal.cli.functions.ExportedLogsSizeInfo; +import org.apache.geode.management.internal.cli.functions.SizeExportLogsFunction; import org.apache.geode.test.junit.categories.UnitTest; -import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.mockito.Matchers; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; @Category(UnitTest.class) public class ExportLogsCommandTest { - private ExportLogsCommand createExportLogsCommand(final Cache cache, - final DistributedMember distributedMember, final Execution functionExecutor) { - return new TestExportLogsCommand(cache, distributedMember, functionExecutor); - } - @Test public void parseSize_sizeWithUnit_shouldReturnSize() throws Exception { assertThat(ExportLogsCommand.parseSize("1000m")).isEqualTo(1000); @@ -52,6 +64,11 @@ public class ExportLogsCommandTest { } @Test + public void parseByteMultiplier_sizeWith_k_shouldReturnUnit() throws Exception { + assertThat(ExportLogsCommand.parseByteMultiplier("1000k")).isEqualTo(KILOBYTE); + } + + @Test public void parseByteMultiplier_sizeWith_m_shouldReturnUnit() throws Exception { assertThat(ExportLogsCommand.parseByteMultiplier("1000m")).isEqualTo(MEGABYTE); } @@ -67,6 +84,11 @@ public class ExportLogsCommandTest { } @Test + public void parseByteMultiplier_sizeWith_K_shouldReturnUnit() throws Exception { + assertThat(ExportLogsCommand.parseByteMultiplier("1000K")).isEqualTo(KILOBYTE); + } + + @Test public void parseByteMultiplier_sizeWith_M_shouldReturnUnit() throws Exception { assertThat(ExportLogsCommand.parseByteMultiplier("1000M")).isEqualTo(MEGABYTE); } @@ -99,17 +121,170 @@ public class ExportLogsCommandTest { .isInstanceOf(IllegalArgumentException.class); } - @Ignore @Test - public void sizeFromMember_withinLimits() throws Exception { + public void sizeTooBigOnMember_sizeChecksDisabled_returnsFalse() throws Exception { final Cache mockCache = mock(Cache.class); final DistributedMember mockDistributedMember = mock(DistributedMember.class); final Execution mockFuntionExecutor = mock(Execution.class); - final ResultCollector mockResultCollector = mock(ResultCollector.class); + final ExportLogsCommand cmd = + createExportLogsCommand(mockCache, mockDistributedMember, mockFuntionExecutor); + assertThat(cmd.isSizeCheckEnabledAndWithinDiskSpaceOfMember("clusterMember", 0, MEGABYTE + 1024, + MEGABYTE, TERABYTE)).isFalse(); + } + @Test + public void sizeOKOnMember_sizeChecksEnabled_returnsTrue() throws Exception { + final Cache mockCache = mock(Cache.class); + final DistributedMember mockDistributedMember = mock(DistributedMember.class); + final Execution mockFuntionExecutor = mock(Execution.class); final ExportLogsCommand cmd = createExportLogsCommand(mockCache, mockDistributedMember, mockFuntionExecutor); - // cmd.exportLogs(); + assertThat(cmd.isSizeCheckEnabledAndWithinDiskSpaceOfMember("clusterMember", 10 * MEGABYTE, + MEGABYTE - 1024, MEGABYTE, TERABYTE)).isTrue(); + } + + @Test + public void sizeTooBigOnMember_sizeChecksEnabled_shouldThrow() throws Exception { + final Cache mockCache = mock(Cache.class); + final DistributedMember mockDistributedMember = mock(DistributedMember.class); + final Execution mockFuntionExecutor = mock(Execution.class); + final ExportLogsCommand cmd = + createExportLogsCommand(mockCache, mockDistributedMember, mockFuntionExecutor); + assertThatThrownBy(() -> cmd.isSizeCheckEnabledAndWithinDiskSpaceOfMember("clusterMember", + 10 * MEGABYTE, MEGABYTE + 1024, MEGABYTE, TERABYTE)) + .isInstanceOf(ManagementException.class); + } + + @Test + public void sizeFromMember_withinLimits() throws Exception { + final GemFireCacheImpl mockCache = mock(GemFireCacheImpl.class); + final ExportLogsCommand realCmd = new ExportLogsCommand(); + + ExportLogsCommand spyCmd = spy(realCmd); + String start = null; + String end = null; + String logLevel = null; + boolean onlyLogLevel = false; + boolean logsOnly = false; + boolean statsOnly = false; + + InternalDistributedMember member1 = new InternalDistributedMember("member1", 12345); + InternalDistributedMember member2 = new InternalDistributedMember("member2", 98765); + member1.getNetMember().setName("member1"); + member2.getNetMember().setName("member2"); + Set<DistributedMember> testMembers = new HashSet<>(); + testMembers.add(member1); + testMembers.add(member2); + + ResultCollector testResults1 = new CustomCollector(); + testResults1.addResult(member1, + Arrays.asList(new ExportedLogsSizeInfo(MEGABYTE, 2 * MEGABYTE, TERABYTE))); + ResultCollector testResults2 = new CustomCollector(); + testResults2.addResult(member2, + Arrays.asList(new ExportedLogsSizeInfo(2 * MEGABYTE, 2 * MEGABYTE, TERABYTE))); + + doReturn(mockCache).when(spyCmd).getCache(); + doReturn(testMembers).when(spyCmd).getMembers(null, null); + doReturn(testResults1).when(spyCmd) + .estimateLogSize(Matchers.any(SizeExportLogsFunction.Args.class), eq(member1)); + doReturn(testResults2).when(spyCmd) + .estimateLogSize(Matchers.any(SizeExportLogsFunction.Args.class), eq(member2)); + doReturn(true).when(spyCmd).testhookSkipExports(); + + Result res = spyCmd.exportLogs("working dir", null, null, logLevel, onlyLogLevel, false, start, + end, logsOnly, statsOnly, "1g"); + assertThat(res.getStatus()).isEqualTo(Result.Status.OK); + } + + @Test + public void sizeFromMember_greaterThanDiskAvailable_shouldReturnErrorResult() throws Exception { + final GemFireCacheImpl mockCache = mock(GemFireCacheImpl.class); + final ExportLogsCommand realCmd = new ExportLogsCommand(); + ExportLogsCommand spyCmd = spy(realCmd); + + String start = null; + String end = null; + String logLevel = null; + boolean onlyLogLevel = false; + boolean logsOnly = false; + boolean statsOnly = false; + + InternalDistributedMember member1 = new InternalDistributedMember("member1", 12345); + InternalDistributedMember member2 = new InternalDistributedMember("member2", 98765); + member1.getNetMember().setName("member1"); + member2.getNetMember().setName("member2"); + Set<DistributedMember> testMembers = new HashSet<>(); + testMembers.add(member1); + testMembers.add(member2); + + ResultCollector testResults1 = new CustomCollector(); + testResults1.addResult(member1, + Arrays.asList(new ExportedLogsSizeInfo(2 * MEGABYTE, MEGABYTE, TERABYTE))); + ResultCollector testResults2 = new CustomCollector(); + testResults2.addResult(member2, + Arrays.asList(new ExportedLogsSizeInfo(2 * MEGABYTE, 2 * MEGABYTE, TERABYTE))); + + doReturn(mockCache).when(spyCmd).getCache(); + doReturn(testMembers).when(spyCmd).getMembers(null, null); + doReturn(testResults1).when(spyCmd) + .estimateLogSize(Matchers.any(SizeExportLogsFunction.Args.class), eq(member1)); + doReturn(testResults2).when(spyCmd) + .estimateLogSize(Matchers.any(SizeExportLogsFunction.Args.class), eq(member2)); + doReturn(true).when(spyCmd).testhookSkipExports(); + + Result res = spyCmd.exportLogs("working dir", null, null, logLevel, onlyLogLevel, false, start, + end, logsOnly, statsOnly, "1g"); + assertThat(res.getStatus()).isEqualTo(Result.Status.ERROR); + } + + @Test + public void sizeFromAllMembers_greaterThanLocalDiskAvailable_shouldReturnErrorResult() + throws Exception { + final GemFireCacheImpl mockCache = mock(GemFireCacheImpl.class); + final ExportLogsCommand realCmd = new ExportLogsCommand(); + ExportLogsCommand spyCmd = spy(realCmd); + + String start = null; + String end = null; + String logLevel = null; + boolean onlyLogLevel = false; + boolean logsOnly = false; + boolean statsOnly = false; + + InternalDistributedMember member1 = new InternalDistributedMember("member1", 12345); + InternalDistributedMember member2 = new InternalDistributedMember("member2", 98765); + member1.getNetMember().setName("member1"); + member2.getNetMember().setName("member2"); + Set<DistributedMember> testMembers = new HashSet<>(); + testMembers.add(member1); + testMembers.add(member2); + + ResultCollector testResults1 = new CustomCollector(); + testResults1.addResult(member1, + Arrays.asList(new ExportedLogsSizeInfo(75 * MEGABYTE, GIGABYTE, TERABYTE))); + ResultCollector testResults2 = new CustomCollector(); + testResults2.addResult(member2, + Arrays.asList(new ExportedLogsSizeInfo(60 * MEGABYTE, GIGABYTE, TERABYTE))); + + doReturn(mockCache).when(spyCmd).getCache(); + doReturn(testMembers).when(spyCmd).getMembers(null, null); + doReturn(testResults1).when(spyCmd) + .estimateLogSize(Matchers.any(SizeExportLogsFunction.Args.class), eq(member1)); + doReturn(testResults2).when(spyCmd) + .estimateLogSize(Matchers.any(SizeExportLogsFunction.Args.class), eq(member2)); + doReturn(125 * MEGABYTE).when(spyCmd).getLocalDiskAvailable(); + doReturn(GIGABYTE).when(spyCmd).getLocalDiskSize(); + + doReturn(true).when(spyCmd).testhookSkipExports(); + + Result res = spyCmd.exportLogs("working dir", null, null, logLevel, onlyLogLevel, false, start, + end, logsOnly, statsOnly, "125m"); + assertThat(res.getStatus()).isEqualTo(Result.Status.ERROR); + } + + private ExportLogsCommand createExportLogsCommand(final Cache cache, + final DistributedMember distributedMember, final Execution functionExecutor) { + return new TestExportLogsCommand(cache, distributedMember, functionExecutor); } private static class TestExportLogsCommand extends ExportLogsCommand { @@ -118,7 +293,7 @@ public class ExportLogsCommandTest { private final DistributedMember distributedMember; private final Execution functionExecutor; - public TestExportLogsCommand(final Cache cache, final DistributedMember distributedMember, + TestExportLogsCommand(final Cache cache, final DistributedMember distributedMember, final Execution functionExecutor) { assert cache != null; this.cache = cache; @@ -126,4 +301,32 @@ public class ExportLogsCommandTest { this.functionExecutor = functionExecutor; } } + + public static class CustomCollector implements ResultCollector<Object, List<Object>> { + private ArrayList<Object> results = new ArrayList<>(); + + @Override + public List<Object> getResult() throws FunctionException { + return results; + } + + @Override + public List<Object> getResult(final long timeout, final TimeUnit unit) + throws FunctionException, InterruptedException { + return results; + } + + @Override + public void addResult(final DistributedMember memberID, final Object resultOfSingleExecution) { + results.add(resultOfSingleExecution); + } + + @Override + public void endResults() {} + + @Override + public void clearResults() { + results.clear(); + } + } } http://git-wip-us.apache.org/repos/asf/geode/blob/3ce3437d/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsDUnitTest.java ---------------------------------------------------------------------- diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsDUnitTest.java index 96ac765..0e0f6aa 100644 --- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsDUnitTest.java +++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsDUnitTest.java @@ -28,6 +28,7 @@ import org.apache.geode.cache.Cache; import org.apache.geode.distributed.ConfigurationProperties; import org.apache.geode.internal.cache.GemFireCacheImpl; import org.apache.geode.internal.logging.LogService; +import org.apache.geode.management.cli.Result; import org.apache.geode.management.internal.cli.functions.ExportLogsFunction; import org.apache.geode.management.internal.cli.result.CommandResult; import org.apache.geode.management.internal.cli.util.CommandStringBuilder; @@ -182,6 +183,12 @@ public class ExportLogsDUnitTest { } @Test + public void testExportedZipFileTooBig() throws Exception { + CommandResult result = gfshConnector.executeCommand("export logs --file-size-limit=10k"); + assertThat(result.getStatus()).isEqualTo(Result.Status.ERROR); + } + + @Test public void testExportWithNoFilters() throws Exception { gfshConnector.executeAndVerifyCommand("export logs --log-level=all"); http://git-wip-us.apache.org/repos/asf/geode/blob/3ce3437d/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsFileSizeLimitTest.java ---------------------------------------------------------------------- diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsFileSizeLimitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsFileSizeLimitTest.java index ec2bcfe..1058053 100644 --- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsFileSizeLimitTest.java +++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsFileSizeLimitTest.java @@ -77,7 +77,7 @@ public class ExportLogsFileSizeLimitTest { assertThat(exportLogsCommand.isFileSizeCheckEnabledAndWithinLimit(0, file)).isFalse(); } - private void fillUpFile(File file, int sizeInBytes) throws IOException { + private void fillUpFile(File file, long sizeInBytes) throws IOException { PrintWriter writer = new PrintWriter(file, "UTF-8"); while (FileUtils.sizeOf(file) < sizeInBytes) { writer.println("this is a line of data in the file"); http://git-wip-us.apache.org/repos/asf/geode/blob/3ce3437d/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsTestSuite.java ---------------------------------------------------------------------- diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsTestSuite.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsTestSuite.java index 90a92f3..8f8c532 100644 --- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsTestSuite.java +++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsTestSuite.java @@ -14,14 +14,22 @@ */ package org.apache.geode.management.internal.cli.commands; +import org.apache.geode.management.internal.cli.functions.SizeExportLogsFunctionCacheTest; +import org.apache.geode.management.internal.cli.functions.SizeExportLogsFunctionFileTest; +import org.apache.geode.management.internal.cli.util.LogSizerTest; +import org.junit.Ignore; import org.junit.runner.RunWith; import org.junit.runners.Suite; /** * All of the JUnit, DUnit and Integration tests for gfsh command export logs. */ + +@Ignore @Suite.SuiteClasses({ExportLogsCommandTest.class, ExportLogsFileSizeLimitTest.class, - ExportLogsIntegrationTest.class, ExportLogsDUnitTest.class,}) + ExportLogsIntegrationTest.class, ExportLogsDUnitTest.class, + SizeExportLogsFunctionCacheTest.class, SizeExportLogsFunctionFileTest.class, + LogSizerTest.class}) @RunWith(Suite.class) public class ExportLogsTestSuite { } http://git-wip-us.apache.org/repos/asf/geode/blob/3ce3437d/geode-core/src/test/java/org/apache/geode/management/internal/cli/functions/SizeExportLogsFunctionCacheTest.java ---------------------------------------------------------------------- diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/functions/SizeExportLogsFunctionCacheTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/functions/SizeExportLogsFunctionCacheTest.java index d8f2f2d..0e8348d 100644 --- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/functions/SizeExportLogsFunctionCacheTest.java +++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/functions/SizeExportLogsFunctionCacheTest.java @@ -16,8 +16,6 @@ package org.apache.geode.management.internal.cli.functions; import static org.assertj.core.api.Assertions.*; import static org.apache.geode.distributed.ConfigurationProperties.*; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import org.apache.commons.io.FileUtils; import org.apache.geode.cache.Cache; @@ -50,9 +48,9 @@ public class SizeExportLogsFunctionCacheTest { private FunctionContext functionContext; private File dir; private DistributedMember member; - File logFile; - File statFile; - String name; + private File logFile; + private File statFile; + private String name; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @@ -142,12 +140,12 @@ public class SizeExportLogsFunctionCacheTest { FunctionContext context = new FunctionContextImpl("functionId", null, resultSender); new SizeExportLogsFunction().execute(context); - assertThatThrownBy(() -> resultSender.getResults()).isInstanceOf(NullPointerException.class); + assertThatThrownBy(resultSender::getResults).isInstanceOf(NullPointerException.class); } private static class TestResultSender implements ResultSender { - private final List<Object> results = new LinkedList<Object>(); + private final List<Object> results = new LinkedList<>(); private Throwable t;