Repository: incubator-hawq Updated Branches: refs/heads/master 1584bb264 -> 833c2f97c
HAWQ-1159. Skip namenode check while namenode not part of hawq cluster Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/833c2f97 Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/833c2f97 Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/833c2f97 Branch: refs/heads/master Commit: 833c2f97c8f0ad90e01791cdad2f5c6e0d05a418 Parents: 1584bb2 Author: rlei <r...@pivotal.io> Authored: Wed Nov 16 13:26:47 2016 +0800 Committer: rlei <r...@pivotal.io> Committed: Wed Nov 16 20:26:32 2016 +0800 ---------------------------------------------------------------------- tools/bin/gpcheck | 101 ++++++++++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 47 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/833c2f97/tools/bin/gpcheck ---------------------------------------------------------------------- diff --git a/tools/bin/gpcheck b/tools/bin/gpcheck index 3d45f18..d00057d 100755 --- a/tools/bin/gpcheck +++ b/tools/bin/gpcheck @@ -441,7 +441,10 @@ def createHostList(): if HADOOP_HOME: try: namenode_host = getHDFSNamenodeHost() - gpcheck_info.hosts[namenode_host] = GpCheckHost(namenode_host, is_namenode=True) + if namenode_host in hostlist: + gpcheck_info.hosts[namenode_host] = GpCheckHost(namenode_host, is_namenode=True) + else: + logger.warning("utility will skip HDFS namenode check since it's not in current host list.") except Exception, e: checkFailed(None, "utility will SKIP HDFS namenode check: %s" % str(e)) @@ -1296,58 +1299,62 @@ if __name__ == '__main__': if gpcheck_info.is_root: logger.info("gpcheck will perform block device's readahead checks when run as root") - try: - checkPlatform() - parseargs() - readConfigFile() - - except GpCheckError, e: - logger.error(str(e)) - sys.exit(1) + try: + checkPlatform() + parseargs() + readConfigFile() + + except GpCheckError, e: + logger.error(str(e)) + sys.exit(1) + if pool: + pool.join() + pool.haltWork() + pool.joinWorkers() - try: - tmpdir = tempfile.mkdtemp(prefix='gpcheck') - except Exception, e: - logger.error("Error creating tmp dir on master: %s" % e) - sys.exit(1) + try: + tmpdir = tempfile.mkdtemp(prefix='gpcheck') + except Exception, e: + logger.error("Error creating tmp dir on master: %s" % e) + sys.exit(1) - try: - # Phase 1: collect input - if options.zipin: - readZip() # load information into gpcheck_info from zip - else: - # read host info into gpcheck_info.hosts from --file or --host - createHostList() - # collect each server's system environment configuration - runCollections() - # read collected data into gpcheck_info - readDataFiles() - # read HAWQ configuration - readHAWQConfiguration() - - # Phase 2: generate output - if options.stdout: - doPrint() - elif options.zipout: - doZip("./gpcheck_%s" % time.time()) - else: - runTests() - if found_errors: - sys.exit(1) + try: + # Phase 1: collect input + if options.zipin: + readZip() # load information into gpcheck_info from zip + else: + # read host info into gpcheck_info.hosts from --file or --host + createHostList() + # collect each server's system environment configuration + runCollections() + # read collected data into gpcheck_info + readDataFiles() + # read HAWQ configuration + readHAWQConfiguration() + + # Phase 2: generate output + if options.stdout: + doPrint() + elif options.zipout: + doZip("./gpcheck_%s" % time.time()) + else: + runTests() + if found_errors: + sys.exit(1) - except GpCheckError, e: - logger.error(str(e)) - sys.exit(1) + except GpCheckError, e: + logger.error(str(e)) + sys.exit(1) + finally: + logger.info("Clean up...") + try: + if tmpdir: + shutil.rmtree(tmpdir) + except Exception, e: + logger.error("error removing tempdir during job cleanup: %s" % e) finally: - logger.info("Clean up...") - try: - if tmpdir: - shutil.rmtree(tmpdir) - except Exception, e: - logger.error("error removing tempdir during job cleanup: %s" % e) - if pool: pool.join() pool.haltWork()