KAFKA-2715: Removed previous system_test folder ewencp Nothing too complicated here
Author: Geoff Anderson <[email protected]> Reviewers: Ewen Cheslack-Postava, Gwen Shapira Closes #392 from granders/minor-remove-system-test Project: http://git-wip-us.apache.org/repos/asf/kafka/repo Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/d50499a0 Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/d50499a0 Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/d50499a0 Branch: refs/heads/trunk Commit: d50499a0e08586d61445df6e287762333dc22d0a Parents: c001b20 Author: Geoff Anderson <[email protected]> Authored: Fri Oct 30 15:13:16 2015 -0700 Committer: Gwen Shapira <[email protected]> Committed: Fri Oct 30 15:13:16 2015 -0700 ---------------------------------------------------------------------- Vagrantfile | 2 +- build.gradle | 3 +- system_test/README.txt | 83 - system_test/__init__.py | 1 - system_test/broker_failure/README | 72 - .../broker_failure/bin/kafka-run-class.sh | 67 - system_test/broker_failure/bin/run-test.sh | 815 ------ .../broker_failure/config/log4j.properties | 86 - .../config/mirror_producer.properties | 27 - .../config/mirror_producer1.properties | 28 - .../config/mirror_producer2.properties | 28 - .../config/mirror_producer3.properties | 28 - .../config/server_source1.properties | 76 - .../config/server_source2.properties | 76 - .../config/server_source3.properties | 76 - .../config/server_source4.properties | 76 - .../config/server_target1.properties | 79 - .../config/server_target2.properties | 79 - .../config/server_target3.properties | 79 - .../config/whitelisttest.consumer.properties | 29 - .../config/zookeeper_source.properties | 18 - .../config/zookeeper_target.properties | 18 - system_test/cluster_config.json | 58 - system_test/common/util.sh | 182 -- system_test/logging.conf | 56 - system_test/metrics.json | 174 -- system_test/mirror_maker_testsuite/__init__.py | 1 - .../mirror_maker_testsuite/cluster_config.json | 136 - .../config/console_consumer.properties | 0 .../config/consumer.properties | 0 .../config/log4j.properties | 0 .../config/mirror_consumer.properties | 12 - .../config/mirror_producer.properties | 12 - .../config/producer.properties | 0 .../config/producer_performance.properties | 0 .../config/server.properties | 139 - .../config/zookeeper.properties | 23 - .../mirror_maker_testsuite/mirror_maker_test.py | 324 --- .../testcase_15001_properties.json | 158 -- .../testcase_15002_properties.json | 158 -- .../testcase_15003/cluster_config.json | 135 - .../testcase_15003_properties.json | 156 -- .../testcase_15004/cluster_config.json | 135 - .../testcase_15004_properties.json | 156 -- .../testcase_15005/cluster_config.json | 153 -- .../testcase_15005_properties.json | 178 -- .../testcase_15006/cluster_config.json | 153 -- .../testcase_15006_properties.json | 178 -- .../testcase_5001/testcase_5001_properties.json | 160 -- .../testcase_5002/testcase_5002_properties.json | 160 -- .../testcase_5003/cluster_config.json | 135 - .../testcase_5003/testcase_5003_properties.json | 159 -- .../testcase_5004/cluster_config.json | 135 - .../testcase_5004/testcase_5004_properties.json | 159 -- .../testcase_5005/cluster_config.json | 153 -- .../testcase_5005/testcase_5005_properties.json | 182 -- .../testcase_5006/cluster_config.json | 153 -- .../testcase_5006/testcase_5006_properties.json | 182 -- .../cluster_config.json | 103 - .../config/console_consumer.properties | 2 - .../config/producer_performance.properties | 0 .../config/server.properties | 143 - .../config/zookeeper.properties | 23 - .../offset_management_test.py | 299 --- .../testcase_7001/testcase_7001_properties.json | 95 - .../config/kafka_server_1.properties | 147 - .../config/kafka_server_2.properties | 147 - .../config/kafka_server_3.properties | 147 - .../config/kafka_server_4.properties | 147 - .../testcase_7002/config/zookeeper_0.properties | 24 - .../testcase_7002/testcase_7002_properties.json | 127 - system_test/producer_perf/README | 9 - system_test/producer_perf/bin/expected.out | 32 - .../producer_perf/bin/run-compression-test.sh | 61 - system_test/producer_perf/bin/run-test.sh | 61 - .../producer_perf/config/server.properties | 78 - .../producer_perf/config/zookeeper.properties | 18 - system_test/replication_testsuite/__init__.py | 1 - .../config/console_consumer.properties | 0 .../config/consumer.properties | 0 .../config/log4j.properties | 0 .../config/producer.properties | 0 .../config/producer_performance.properties | 0 .../config/server.properties | 139 - .../config/zookeeper.properties | 20 - .../replication_testsuite/replica_basic_test.py | 461 ---- .../testcase_0001/testcase_0001_properties.json | 85 - .../testcase_0002/testcase_0002_properties.json | 85 - .../testcase_0003/testcase_0003_properties.json | 85 - .../testcase_0004/testcase_0004_properties.json | 85 - .../testcase_0005/testcase_0005_properties.json | 85 - .../testcase_0006/testcase_0006_properties.json | 85 - .../testcase_0007/testcase_0007_properties.json | 85 - .../testcase_0008/testcase_0008_properties.json | 85 - .../testcase_0009/testcase_0009_properties.json | 85 - .../testcase_0010/testcase_0010_properties.json | 85 - .../testcase_0011/testcase_0011_properties.json | 86 - .../testcase_0021/cluster_config.json | 76 - .../testcase_0021/testcase_0021_properties.json | 105 - .../testcase_0022/cluster_config.json | 76 - .../testcase_0022/testcase_0022_properties.json | 105 - .../testcase_0023/cluster_config.json | 76 - .../testcase_0023/testcase_0023_properties.json | 105 - .../testcase_0024/testcase_0024_properties.json | 87 - .../testcase_0101/testcase_0101_properties.json | 87 - .../testcase_0102/testcase_0102_properties.json | 87 - .../testcase_0103/testcase_0103_properties.json | 87 - .../testcase_0104/testcase_0104_properties.json | 87 - .../testcase_0105/testcase_0105_properties.json | 87 - .../testcase_0106/testcase_0106_properties.json | 87 - .../testcase_0107/testcase_0107_properties.json | 87 - .../testcase_0108/testcase_0108_properties.json | 87 - .../testcase_0109/testcase_0109_properties.json | 87 - .../testcase_0110/testcase_0110_properties.json | 87 - .../testcase_0111/testcase_0111_properties.json | 89 - .../testcase_0112/testcase_0112_properties.json | 89 - .../testcase_0113/testcase_0113_properties.json | 89 - .../testcase_0114/testcase_0114_properties.json | 89 - .../testcase_0115/testcase_0115_properties.json | 89 - .../testcase_0116/testcase_0116_properties.json | 89 - .../testcase_0117/testcase_0117_properties.json | 89 - .../testcase_0118/testcase_0118_properties.json | 89 - .../testcase_0119/testcase_0119_properties.json | 89 - .../testcase_0121/cluster_config.json | 76 - .../testcase_0121/testcase_0121_properties.json | 109 - .../testcase_0122/cluster_config.json | 76 - .../testcase_0122/testcase_0122_properties.json | 109 - .../testcase_0123/cluster_config.json | 76 - .../testcase_0123/testcase_0123_properties.json | 109 - .../testcase_0124/cluster_config.json | 76 - .../testcase_0124/testcase_0124_properties.json | 113 - .../testcase_0125/cluster_config.json | 76 - .../testcase_0125/testcase_0125_properties.json | 113 - .../testcase_0126/cluster_config.json | 76 - .../testcase_0126/testcase_0126_properties.json | 113 - .../testcase_0127/cluster_config.json | 76 - .../testcase_0127/testcase_0127_properties.json | 113 - .../testcase_0128/testcase_0128_properties.json | 90 - .../testcase_0131/cluster_config.json | 76 - .../testcase_0131/testcase_0131_properties.json | 112 - .../testcase_0132/cluster_config.json | 76 - .../testcase_0132/testcase_0132_properties.json | 109 - .../testcase_0133/cluster_config.json | 76 - .../testcase_0133/testcase_0133_properties.json | 109 - .../testcase_0134/testcase_0134_properties.json | 93 - .../testcase_0151/testcase_0151_properties.json | 90 - .../testcase_0152/testcase_0152_properties.json | 90 - .../testcase_0153/testcase_0153_properties.json | 90 - .../testcase_0154/testcase_0154_properties.json | 90 - .../testcase_0155/testcase_0155_properties.json | 90 - .../testcase_0156/testcase_0156_properties.json | 90 - .../testcase_0157/testcase_0157_properties.json | 90 - .../testcase_0158/testcase_0158_properties.json | 90 - .../testcase_0159/testcase_0159_properties.json | 92 - .../testcase_0201/testcase_0201_properties.json | 89 - .../testcase_0202/testcase_0202_properties.json | 89 - .../testcase_0203/testcase_0203_properties.json | 89 - .../testcase_0204/testcase_0204_properties.json | 89 - .../testcase_0205/testcase_0205_properties.json | 89 - .../testcase_0206/testcase_0206_properties.json | 89 - .../testcase_0207/testcase_0207_properties.json | 89 - .../testcase_0208/testcase_0208_properties.json | 89 - .../testcase_0209/testcase_0209_properties.json | 91 - .../testcase_0251/testcase_0251_properties.json | 89 - .../testcase_0252/testcase_0252_properties.json | 89 - .../testcase_0253/testcase_0253_properties.json | 89 - .../testcase_0254/testcase_0254_properties.json | 89 - .../testcase_0255/testcase_0255_properties.json | 89 - .../testcase_0256/testcase_0256_properties.json | 89 - .../testcase_0257/testcase_0257_properties.json | 89 - .../testcase_0258/testcase_0258_properties.json | 89 - .../testcase_0259/testcase_0259_properties.json | 91 - .../testcase_0301/testcase_0301_properties.json | 87 - .../testcase_0302/testcase_0302_properties.json | 87 - .../testcase_0303/testcase_0303_properties.json | 87 - .../testcase_0304/testcase_0304_properties.json | 87 - .../testcase_0305/testcase_0305_properties.json | 87 - .../testcase_0306/testcase_0306_properties.json | 87 - .../testcase_0307/testcase_0307_properties.json | 87 - .../testcase_0308/testcase_0308_properties.json | 87 - .../testcase_0309/testcase_0309_properties.json | 89 - .../testcase_1/cluster_config.json | 58 - .../testcase_1/testcase_1_properties.json | 81 - .../testcase_10101_properties.json | 86 - .../testcase_10102_properties.json | 86 - .../testcase_10103_properties.json | 86 - .../testcase_10104_properties.json | 86 - .../testcase_10105_properties.json | 86 - .../testcase_10106_properties.json | 86 - .../testcase_10107_properties.json | 86 - .../testcase_10108_properties.json | 86 - .../testcase_10109_properties.json | 86 - .../testcase_10110_properties.json | 86 - .../testcase_10131/cluster_config.json | 76 - .../testcase_10131_properties.json | 110 - .../testcase_10132/cluster_config.json | 76 - .../testcase_10132_properties.json | 107 - .../testcase_10133/cluster_config.json | 76 - .../testcase_10133_properties.json | 107 - .../testcase_10134_properties.json | 92 - .../testcase_4001/cluster_config.json | 76 - .../testcase_4001/testcase_4001_properties.json | 113 - .../testcase_4002/cluster_config.json | 76 - .../testcase_4002/testcase_4002_properties.json | 113 - .../testcase_4003/cluster_config.json | 76 - .../testcase_4003/testcase_4003_properties.json | 113 - .../testcase_4004/cluster_config.json | 76 - .../testcase_4004/testcase_4004_properties.json | 113 - .../testcase_4005/cluster_config.json | 76 - .../testcase_4005/testcase_4005_properties.json | 113 - .../testcase_4006/cluster_config.json | 76 - .../testcase_4006/testcase_4006_properties.json | 113 - .../testcase_4007/cluster_config.json | 76 - .../testcase_4007/testcase_4007_properties.json | 113 - .../testcase_4008/cluster_config.json | 76 - .../testcase_4008/testcase_4008_properties.json | 113 - .../testcase_4011/cluster_config.json | 76 - .../testcase_4011/testcase_4011_properties.json | 113 - .../testcase_4012/cluster_config.json | 76 - .../testcase_4012/testcase_4012_properties.json | 113 - .../testcase_4013/cluster_config.json | 76 - .../testcase_4013/testcase_4013_properties.json | 113 - .../testcase_4014/cluster_config.json | 76 - .../testcase_4014/testcase_4014_properties.json | 113 - .../testcase_4015/cluster_config.json | 76 - .../testcase_4015/testcase_4015_properties.json | 113 - .../testcase_4016/cluster_config.json | 76 - .../testcase_4016/testcase_4016_properties.json | 113 - .../testcase_4017/cluster_config.json | 76 - .../testcase_4017/testcase_4017_properties.json | 113 - .../testcase_4018/cluster_config.json | 76 - .../testcase_4018/testcase_4018_properties.json | 113 - .../testcase_9051/cluster_config.json | 58 - .../testcase_9051/testcase_9051_properties.json | 86 - system_test/run_all.sh | 7 - system_test/run_all_replica.sh | 7 - system_test/run_sanity.sh | 7 - system_test/system_test_env.py | 138 - system_test/system_test_runner.py | 331 --- system_test/testcase_to_run_all.json | 139 - system_test/testcase_to_run_all_replica.json | 123 - system_test/testcase_to_run_sanity.json | 5 - system_test/testcase_to_skip.json | 3 - system_test/utils/__init__.py | 1 - system_test/utils/kafka_system_test_utils.py | 2512 ------------------ system_test/utils/metrics.py | 298 --- system_test/utils/pyh.py | 161 -- system_test/utils/replication_utils.py | 70 - system_test/utils/setup_utils.py | 47 - system_test/utils/system_test_utils.py | 638 ----- system_test/utils/testcase_env.py | 173 -- 251 files changed, 2 insertions(+), 26346 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/Vagrantfile ---------------------------------------------------------------------- diff --git a/Vagrantfile b/Vagrantfile index 31b99b4..caece17 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -148,7 +148,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end # Exclude some directories that can grow very large from syncing - override.vm.synced_folder ".", "/vagrant", type: "rsync", :rsync_excludes => ['.git', 'core/data/', 'logs/', 'system_test/', 'tests/results/', 'results/'] + override.vm.synced_folder ".", "/vagrant", type: "rsync", :rsync_excludes => ['.git', 'core/data/', 'logs/', 'tests/results/', 'results/'] end def name_node(node, name) http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/build.gradle ---------------------------------------------------------------------- diff --git a/build.gradle b/build.gradle index d177e2e..279c51f 100644 --- a/build.gradle +++ b/build.gradle @@ -75,8 +75,7 @@ rat { 'gradlew', 'gradlew.bat', '**/README.md', - '.reviewboardrc', - 'system_test/**' + '.reviewboardrc' ]) } http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/README.txt ---------------------------------------------------------------------- diff --git a/system_test/README.txt b/system_test/README.txt deleted file mode 100644 index e96d15d..0000000 --- a/system_test/README.txt +++ /dev/null @@ -1,83 +0,0 @@ -# ========================== -# Quick Start -# ========================== - -* Please note that the following commands should be executed after downloading the kafka source code to build all the required binaries: - 1. <kafka install dir>/ $ ./gradlew jar - - Now you are ready to follow the steps below. - 1. Update system_test/cluster_config.json for "kafka_home" & "java_home" specific to your environment - 2. Edit system_test/replication_testsuite/testcase_1/testcase_1_properties.json and update "broker-list" to the proper settings of your environment. (If this test is to be run in a single localhost, no change is required for this.) - 3. Create testcase_to_run.json file with the tests you wish to run. You can start by just copying one of our preset test suites. For example: - cp testcase_to_run_sanity.json testcase_to_run.json - 4. To run the test, go to <kafka_home>/system_test and run the following command: - $ python -u -B system_test_runner.py 2>&1 | tee system_test_output.log - 5. To turn on debugging, update system_test/logging.conf by changing the level in handlers session from INFO to DEBUG. - - We also have three built-in test suites you can use after you set your environment (steps 1 and 2 above): - * run_sanity.sh - will run a single basic replication test - * run_all_replica.sh - will run all replication tests - * run_all.sh - will run all replication and mirror_maker tests - -# ========================== -# Overview -# ========================== - -"system_test" is now transformed to a system regression test framework intended for the automation of system / integration testing of data platform software such as Kafka. The test framework is implemented in Python which is a popular scripting language with well supported features. - -The framework has the following levels: - -1. The first level is generic and does not depend on any product specific details. - location: system_test - a. system_test_runner.py - It implements the main class RegTest as an entry point. - b. system_test_env.py - It implements the class RegTestEnv which defines the testing environment of a test session such as the base directory and environment variables specific to the local machine. - -2. The second level defines a suite of testing such as Kafka's replication (including basic testing, failure testing, ... etc) - location: system_test/<suite directory name>*. - - * Please note the test framework will look for a specific suffix of the directories under system_test to determine what test suites are available. The suffix of <suite directory name> can be defined in SystemTestEnv class (system_test_env.py) - - a. replica_basic_test.py - This is a test module file. It implements the test logic for basic replication testing as follows: - - i. start zookeepers - ii. start brokers - iii. create kafka topics - iv. lookup the brokerid as a leader - v. terminate the leader (if defined in the testcase config json file) - vi. start producer to send n messages - vii. start consumer to receive messages - viii. validate if there is data loss - - b. config/ - This config directory provides templates for all properties files needed for zookeeper, brokers, producer and consumer (any changes in the files under this directory would be reflected or overwritten by the settings under testcase_<n>/testcase_<n>_properties.json) - - d. testcase_<n>** - The testcase directory contains the testcase argument definition file: testcase_1_properties.json. This file defines the specific configurations for the testcase such as the followings (eg. producer related): - i. no. of producer threads - ii. no. of messages to produce - iii. zkconnect string - - When this test case is being run, the test framework will copy and update the template properties files to testcase_<n>/config. The logs of various components will be saved in testcase_<n>/logs - - ** Please note the test framework will look for a specific prefix of the directories under system_test/<test suite dir>/ to determine what test cases are available. The prefix of <testcase directory name> can be defined in SystemTestEnv class (system_test_env.py) - -# ========================== -# Adding Test Case -# ========================== - -To create a new test suite called "broker_testsuite", please do the followings: - - 1. Copy and paste system_test/replication_testsuite => system_test/broker_testsuite - 2. Rename system_test/broker_testsuite/replica_basic_test.py => system_test/broker_testsuite/broker_basic_test.py - 3. Edit system_test/broker_testsuite/broker_basic_test.py and update all ReplicaBasicTest related class name to BrokerBasicTest (as an example) - 4. Follow the flow of system_test/broker_testsuite/broker_basic_test.py and modify the necessary test logic accordingly. - - -To create a new test case under "replication_testsuite", please do the followings: - - 1. Copy and paste system_test/replication_testsuite/testcase_1 => system_test/replication_testsuite/testcase_2 - 2. Rename system_test/replication_testsuite/testcase_2/testcase_1_properties.json => system_test/replication_testsuite/testcase_2/testcase_2_properties.json - 3. Update system_test/replication_testsuite/testcase_2/testcase_2_properties.json with the corresponding settings for testcase 2. - -Note: -The following testcases are for the old producer and the old mirror maker. We can remove them once we phase out the old producer client. - replication_testsuite: testcase_{10101 - 10110} testcase_{10131 - 10134} - mirror_maker_testsuite: testcase_{15001 - 15006} http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/__init__.py ---------------------------------------------------------------------- diff --git a/system_test/__init__.py b/system_test/__init__.py deleted file mode 100644 index 8d1c8b6..0000000 --- a/system_test/__init__.py +++ /dev/null @@ -1 +0,0 @@ - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/README ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/README b/system_test/broker_failure/README deleted file mode 100644 index e7ff738..0000000 --- a/system_test/broker_failure/README +++ /dev/null @@ -1,72 +0,0 @@ -** Please note that the following commands should be executed - after downloading the kafka source code to build all the - required binaries: - 1. <kafka install dir>/ $ ./sbt update - 2. <kafka install dir>/ $ ./sbt package - - Now you are ready to follow the steps below. - -This script performs broker failure tests in an environment with -Mirrored Source & Target clusters in a single machine: - -1. Start a cluster of Kafka source brokers -2. Start a cluster of Kafka target brokers -3. Start one or more Mirror Maker to create mirroring between - source and target clusters -4. A producer produces batches of messages to the SOURCE brokers - in the background -5. The Kafka SOURCE, TARGET brokers and Mirror Maker will be - terminated in a round-robin fashion and wait for the consumer - to catch up. -6. Repeat step 5 as many times as specified in the script -7. An independent ConsoleConsumer in publish/subcribe mode to - consume messages from the SOURCE brokers cluster -8. An independent ConsoleConsumer in publish/subcribe mode to - consume messages from the TARGET brokers cluster - -Expected results: -================== -There should not be any discrepancies by comparing the unique -message checksums from the source ConsoleConsumer and the -target ConsoleConsumer. - -Notes: -================== -The number of Kafka SOURCE brokers can be increased as follows: -1. Update the value of $num_kafka_source_server in this script -2. Make sure that there are corresponding number of prop files: - $base_dir/config/server_source{1..4}.properties - -The number of Kafka TARGET brokers can be increased as follows: -1. Update the value of $num_kafka_target_server in this script -2. Make sure that there are corresponding number of prop files: - $base_dir/config/server_target{1..3}.properties - -Quick Start: -================== -In the directory <kafka home>/system_test/broker_failure, -execute this script as following: - $ bin/run-test.sh -n <num of iterations> -s <servers to bounce> - -num of iterations - the number of iterations that the test runs - -servers to bounce - the servers to be bounced in a round-robin fashion. - - Values to be entered: - 1 - source broker - 2 - mirror maker - 3 - target broker - - Example: - * To bounce only mirror maker and target broker - in turns, enter the value 23. - * To bounce only mirror maker, enter the value 2. - * To run the test without bouncing, enter 0. - -At the end of the test, the received messages checksums in both -SOURCE & TARGET will be compared. If all checksums are matched, -the test is PASSED. Otherwise, the test is FAILED. - -In the event of failure, by default the brokers and zookeepers -remain running to make it easier to debug the issue - hit Ctrl-C -to shut them down. http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/bin/kafka-run-class.sh ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/bin/kafka-run-class.sh b/system_test/broker_failure/bin/kafka-run-class.sh deleted file mode 100755 index 05f46b6..0000000 --- a/system_test/broker_failure/bin/kafka-run-class.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ $# -lt 1 ]; -then - echo "USAGE: $0 classname [opts]" - exit 1 -fi - -base_dir=$(dirname $0)/.. -kafka_inst_dir=${base_dir}/../.. - -for file in $kafka_inst_dir/project/boot/scala-2.8.0/lib/*.jar; -do - CLASSPATH=$CLASSPATH:$file -done - -for file in $kafka_inst_dir/core/target/scala_2.8.0/*.jar; -do - CLASSPATH=$CLASSPATH:$file -done - -for file in $kafka_inst_dir/core/lib/*.jar; -do - CLASSPATH=$CLASSPATH:$file -done - -for file in $kafka_inst_dir/perf/target/scala_2.8.0/kafka*.jar; -do - CLASSPATH=$CLASSPATH:$file -done - -for file in $kafka_inst_dir/core/lib_managed/scala_2.8.0/compile/*.jar; -do - if [ ${file##*/} != "sbt-launch.jar" ]; then - CLASSPATH=$CLASSPATH:$file - fi -done -if [ -z "$KAFKA_JMX_OPTS" ]; then - KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " -fi -if [ -z "$KAFKA_OPTS" ]; then - KAFKA_OPTS="-Xmx512M -server -Dlog4j.configuration=file:$base_dir/config/log4j.properties" -fi -if [ $JMX_PORT ]; then - KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT " -fi -if [ -z "$JAVA_HOME" ]; then - JAVA="java" -else - JAVA="$JAVA_HOME/bin/java" -fi - -$JAVA $KAFKA_OPTS $KAFKA_JMX_OPTS -cp $CLASSPATH $@ http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/bin/run-test.sh ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/bin/run-test.sh b/system_test/broker_failure/bin/run-test.sh deleted file mode 100755 index 549cd1f..0000000 --- a/system_test/broker_failure/bin/run-test.sh +++ /dev/null @@ -1,815 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# =========== -# run-test.sh -# =========== - -# ==================================== -# Do not change the followings -# (keep this section at the beginning -# of this script) -# ==================================== -readonly system_test_root=$(dirname $0)/../.. # path of <kafka install>/system_test -readonly common_dir=${system_test_root}/common # common util scripts for system_test -source ${common_dir}/util.sh # include the util script - -readonly base_dir=$(dirname $0)/.. # the base dir of this test suite -readonly test_start_time="$(date +%s)" # time starting this test -readonly bounce_source_id=1 -readonly bounce_mir_mkr_id=2 -readonly bounce_target_id=3 -readonly log4j_prop_file=$base_dir/config/log4j.properties - -iter=1 # init a counter to keep track of iterations -num_iterations=5 # total no. of iterations to run -svr_to_bounce=0 # servers to bounce: 1-source 2-mirror_maker 3-target - # 12 - source & mirror_maker - # 13 - source & target - -# ==================================== -# No need to change the following -# configurations in most cases -# ==================================== -readonly zk_source_port=2181 # source zk port -readonly zk_target_port=2182 # target zk port -readonly test_topic=test01 # topic used in this test -readonly consumer_grp=group1 # consumer group -readonly source_console_consumer_grp=source -readonly target_console_consumer_grp=target -readonly message_size=100 -readonly console_consumer_timeout_ms=15000 -readonly num_kafka_source_server=4 # requires same no. of property files such as: - # $base_dir/config/server_source{1..4}.properties -readonly num_kafka_target_server=3 # requires same no. of property files such as: - # $base_dir/config/server_target{1..3}.properties -readonly num_kafka_mirror_maker=3 # any values greater than 0 -readonly wait_time_after_killing_broker=0 # wait after broker is stopped but before starting again -readonly wait_time_after_restarting_broker=10 - -# ==================================== -# Change the followings as needed -# ==================================== -num_msg_per_batch=500 # no. of msg produced in each calling of ProducerPerformance -num_producer_threads=5 # no. of producer threads to send msg -producer_sleep_min=5 # min & max sleep time (in sec) between each -producer_sleep_max=5 # batch of messages sent from producer - -# ==================================== -# zookeeper -# ==================================== -pid_zk_source= -pid_zk_target= -zk_log4j_log= - -# ==================================== -# kafka source -# ==================================== -kafka_source_pids= -kafka_source_prop_files= -kafka_source_log_files= -kafka_topic_creation_log_file=$base_dir/kafka_topic_creation.log -kafka_log4j_log= - -# ==================================== -# kafka target -# ==================================== -kafka_target_pids= -kafka_target_prop_files= -kafka_target_log_files= - -# ==================================== -# mirror maker -# ==================================== -kafka_mirror_maker_pids= -kafka_mirror_maker_log_files= -consumer_prop_file=$base_dir/config/whitelisttest.consumer.properties -mirror_producer_prop_files= - -# ==================================== -# console consumer source -# ==================================== -console_consumer_source_pid= -console_consumer_source_log=$base_dir/console_consumer_source.log -console_consumer_source_mid_log=$base_dir/console_consumer_source_mid.log -console_consumer_source_mid_sorted_log=$base_dir/console_consumer_source_mid_sorted.log -console_consumer_source_mid_sorted_uniq_log=$base_dir/console_consumer_source_mid_sorted_uniq.log - -# ==================================== -# console consumer target -# ==================================== -console_consumer_target_pid= -console_consumer_target_log=$base_dir/console_consumer_target.log -console_consumer_target_mid_log=$base_dir/console_consumer_target_mid.log -console_consumer_target_mid_sorted_log=$base_dir/console_consumer_target_mid_sorted.log -console_consumer_target_mid_sorted_uniq_log=$base_dir/console_consumer_target_mid_sorted_uniq.log - -# ==================================== -# producer -# ==================================== -background_producer_pid= -producer_performance_log=$base_dir/producer_performance.log -producer_performance_mid_log=$base_dir/producer_performance_mid.log -producer_performance_mid_sorted_log=$base_dir/producer_performance_mid_sorted.log -producer_performance_mid_sorted_uniq_log=$base_dir/producer_performance_mid_sorted_uniq.log -tmp_file_to_stop_background_producer=/tmp/tmp_file_to_stop_background_producer - -# ==================================== -# test reports -# ==================================== -checksum_diff_log=$base_dir/checksum_diff.log - - -# ==================================== -# initialize prop and log files -# ==================================== -initialize() { - for ((i=1; i<=$num_kafka_target_server; i++)) - do - kafka_target_prop_files[${i}]=$base_dir/config/server_target${i}.properties - kafka_target_log_files[${i}]=$base_dir/kafka_target${i}.log - kafka_mirror_maker_log_files[${i}]=$base_dir/kafka_mirror_maker${i}.log - done - - for ((i=1; i<=$num_kafka_source_server; i++)) - do - kafka_source_prop_files[${i}]=$base_dir/config/server_source${i}.properties - kafka_source_log_files[${i}]=$base_dir/kafka_source${i}.log - done - - for ((i=1; i<=$num_kafka_mirror_maker; i++)) - do - mirror_producer_prop_files[${i}]=$base_dir/config/mirror_producer${i}.properties - done - - zk_log4j_log=`grep "log4j.appender.zookeeperAppender.File=" $log4j_prop_file | awk -F '=' '{print $2}'` - kafka_log4j_log=`grep "log4j.appender.kafkaAppender.File=" $log4j_prop_file | awk -F '=' '{print $2}'` -} - -# ========================================= -# cleanup -# ========================================= -cleanup() { - info "cleaning up" - - rm -rf $tmp_file_to_stop_background_producer - rm -rf $kafka_topic_creation_log_file - - rm -rf /tmp/zookeeper_source - rm -rf /tmp/zookeeper_target - - rm -rf /tmp/kafka-source{1..4}-logs - rm -rf /tmp/kafka-target{1..3}-logs - - rm -rf $zk_log4j_log - rm -rf $kafka_log4j_log - - for ((i=1; i<=$num_kafka_target_server; i++)) - do - rm -rf ${kafka_target_log_files[${i}]} - rm -rf ${kafka_mirror_maker_log_files[${i}]} - done - - rm -f $base_dir/zookeeper_source.log - rm -f $base_dir/zookeeper_target.log - rm -f $base_dir/kafka_source{1..4}.log - - rm -f $producer_performance_log - rm -f $producer_performance_mid_log - rm -f $producer_performance_mid_sorted_log - rm -f $producer_performance_mid_sorted_uniq_log - - rm -f $console_consumer_target_log - rm -f $console_consumer_source_log - rm -f $console_consumer_target_mid_log - rm -f $console_consumer_source_mid_log - - rm -f $checksum_diff_log - - rm -f $console_consumer_target_mid_sorted_log - rm -f $console_consumer_source_mid_sorted_log - rm -f $console_consumer_target_mid_sorted_uniq_log - rm -f $console_consumer_source_mid_sorted_uniq_log -} - -# ========================================= -# wait_for_zero_consumer_lags -# ========================================= -wait_for_zero_consumer_lags() { - - this_group_name=$1 - this_zk_port=$2 - - # no of times to check for zero lagging - no_of_zero_to_verify=3 - - while [ 'x' == 'x' ] - do - TOTAL_LAG=0 - CONSUMER_LAGS=`$base_dir/bin/kafka-run-class.sh kafka.tools.ConsumerOffsetChecker \ - --group $target_console_consumer_grp \ - --zkconnect localhost:$zk_target_port \ - --topic $test_topic \ - | grep "Consumer lag" | tr -d ' ' | cut -f2 -d '='` - - for lag in $CONSUMER_LAGS; - do - TOTAL_LAG=$(($TOTAL_LAG + $lag)) - done - - info "mirror console consumer TOTAL_LAG = $TOTAL_LAG" - if [ $TOTAL_LAG -eq 0 ]; then - if [ $no_of_zero_to_verify -eq 0 ]; then - echo - return 0 - fi - no_of_zero_to_verify=$(($no_of_zero_to_verify - 1)) - fi - sleep 1 - done -} - -# ========================================= -# create_topic -# ========================================= -create_topic() { - this_topic_to_create=$1 - this_zk_conn_str=$2 - this_replica_factor=$3 - - info "creating topic [$this_topic_to_create] on [$this_zk_conn_str]" - $base_dir/../../bin/kafka-create-topic.sh \ - --topic $this_topic_to_create \ - --zookeeper $this_zk_conn_str \ - --replica $this_replica_factor \ - 2> $kafka_topic_creation_log_file -} - -# ========================================= -# start_zk -# ========================================= -start_zk() { - info "starting zookeepers" - - $base_dir/../../bin/zookeeper-server-start.sh \ - $base_dir/config/zookeeper_source.properties \ - 2>&1 > $base_dir/zookeeper_source.log & - pid_zk_source=$! - - $base_dir/../../bin/zookeeper-server-start.sh \ - $base_dir/config/zookeeper_target.properties \ - 2>&1 > $base_dir/zookeeper_target.log & - pid_zk_target=$! -} - -# ========================================= -# start_source_servers_cluster -# ========================================= -start_source_servers_cluster() { - info "starting source cluster" - - for ((i=1; i<=$num_kafka_source_server; i++)) - do - start_source_server $i - done -} - -# ========================================= -# start_source_server -# ========================================= -start_source_server() { - s_idx=$1 - - $base_dir/bin/kafka-run-class.sh kafka.Kafka \ - ${kafka_source_prop_files[$s_idx]} \ - 2>&1 >> ${kafka_source_log_files[$s_idx]} & - kafka_source_pids[${s_idx}]=$! - - info " -> kafka_source_pids[$s_idx]: ${kafka_source_pids[$s_idx]}" -} - -# ========================================= -# start_target_servers_cluster -# ========================================= -start_target_servers_cluster() { - info "starting mirror cluster" - - for ((i=1; i<=$num_kafka_target_server; i++)) - do - start_target_server $i - done -} - -# ========================================= -# start_target_server -# ========================================= -start_target_server() { - s_idx=$1 - - $base_dir/bin/kafka-run-class.sh kafka.Kafka \ - ${kafka_target_prop_files[${s_idx}]} \ - 2>&1 >> ${kafka_target_log_files[${s_idx}]} & - kafka_target_pids[$s_idx]=$! - - info " -> kafka_target_pids[$s_idx]: ${kafka_target_pids[$s_idx]}" -} - -# ========================================= -# start_target_mirror_maker -# ========================================= -start_target_mirror_maker() { - info "starting mirror maker" - - for ((i=1; i<=$num_kafka_mirror_maker; i++)) - do - start_mirror_maker $i - done -} - -# ========================================= -# start_mirror_maker -# ========================================= -start_mirror_maker() { - s_idx=$1 - - $base_dir/bin/kafka-run-class.sh kafka.tools.MirrorMaker \ - --consumer.config $consumer_prop_file \ - --producer.config ${mirror_producer_prop_files[${s_idx}]} \ - --whitelist=\".*\" \ - 2>&1 >> ${kafka_mirror_maker_log_files[$s_idx]} & - kafka_mirror_maker_pids[${s_idx}]=$! - - info " -> kafka_mirror_maker_pids[$s_idx]: ${kafka_mirror_maker_pids[$s_idx]}" -} - -# ========================================= -# start_console_consumer -# ========================================= -start_console_consumer() { - - this_consumer_grp=$1 - this_consumer_zk_port=$2 - this_consumer_log=$3 - this_msg_formatter=$4 - - info "starting console consumers for $this_consumer_grp" - - $base_dir/bin/kafka-run-class.sh kafka.tools.ConsoleConsumer \ - --zookeeper localhost:$this_consumer_zk_port \ - --topic $test_topic \ - --group $this_consumer_grp \ - --from-beginning \ - --consumer-timeout-ms $console_consumer_timeout_ms \ - --formatter "kafka.tools.ConsoleConsumer\$${this_msg_formatter}" \ - 2>&1 > ${this_consumer_log} & - console_consumer_pid=$! - - info " -> console consumer pid: $console_consumer_pid" -} - -# ========================================= -# force_shutdown_background_producer -# - to be called when user press Ctrl-C -# ========================================= -force_shutdown_background_producer() { - info "force shutting down producer" - `ps auxw | grep "run\-test\|ProducerPerformance" | grep -v grep | awk '{print $2}' | xargs kill -9` -} - -# ========================================= -# force_shutdown_consumer -# - to be called when user press Ctrl-C -# ========================================= -force_shutdown_consumer() { - info "force shutting down consumer" - `ps auxw | grep ChecksumMessageFormatter | grep -v grep | awk '{print $2}' | xargs kill -9` -} - -# ========================================= -# shutdown_servers -# ========================================= -shutdown_servers() { - - info "shutting down mirror makers" - for ((i=1; i<=$num_kafka_mirror_maker; i++)) - do - #info "stopping mm pid: ${kafka_mirror_maker_pids[$i]}" - if [ "x${kafka_mirror_maker_pids[$i]}" != "x" ]; then - kill_child_processes 0 ${kafka_mirror_maker_pids[$i]}; - fi - done - - info "shutting down target servers" - for ((i=1; i<=$num_kafka_target_server; i++)) - do - if [ "x${kafka_target_pids[$i]}" != "x" ]; then - kill_child_processes 0 ${kafka_target_pids[$i]}; - fi - done - - info "shutting down source servers" - for ((i=1; i<=$num_kafka_source_server; i++)) - do - if [ "x${kafka_source_pids[$i]}" != "x" ]; then - kill_child_processes 0 ${kafka_source_pids[$i]}; - fi - done - - info "shutting down zookeeper servers" - if [ "x${pid_zk_target}" != "x" ]; then kill_child_processes 0 ${pid_zk_target}; fi - if [ "x${pid_zk_source}" != "x" ]; then kill_child_processes 0 ${pid_zk_source}; fi -} - -# ========================================= -# start_background_producer -# ========================================= -start_background_producer() { - - topic=$1 - - batch_no=0 - - while [ ! -e $tmp_file_to_stop_background_producer ] - do - sleeptime=$(get_random_range $producer_sleep_min $producer_sleep_max) - - info "producing $num_msg_per_batch messages on topic '$topic'" - $base_dir/bin/kafka-run-class.sh \ - kafka.tools.ProducerPerformance \ - --brokerinfo zk.connect=localhost:2181 \ - --topics $topic \ - --messages $num_msg_per_batch \ - --message-size $message_size \ - --threads $num_producer_threads \ - --initial-message-id $batch_no \ - 2>&1 >> $base_dir/producer_performance.log # appending all producers' msgs - - batch_no=$(($batch_no + $num_msg_per_batch)) - sleep $sleeptime - done -} - -# ========================================= -# cmp_checksum -# ========================================= -cmp_checksum() { - - cmp_result=0 - - grep MessageID $console_consumer_source_log | sed s'/^.*MessageID://g' | awk -F ':' '{print $1}' > $console_consumer_source_mid_log - grep MessageID $console_consumer_target_log | sed s'/^.*MessageID://g' | awk -F ':' '{print $1}' > $console_consumer_target_mid_log - grep MessageID $producer_performance_log | sed s'/^.*MessageID://g' | awk -F ':' '{print $1}' > $producer_performance_mid_log - - sort $console_consumer_target_mid_log > $console_consumer_target_mid_sorted_log - sort $console_consumer_source_mid_log > $console_consumer_source_mid_sorted_log - sort $producer_performance_mid_log > $producer_performance_mid_sorted_log - - sort -u $console_consumer_target_mid_log > $console_consumer_target_mid_sorted_uniq_log - sort -u $console_consumer_source_mid_log > $console_consumer_source_mid_sorted_uniq_log - sort -u $producer_performance_mid_log > $producer_performance_mid_sorted_uniq_log - - msg_count_from_source_consumer=`cat $console_consumer_source_mid_log | wc -l | tr -d ' '` - uniq_msg_count_from_source_consumer=`cat $console_consumer_source_mid_sorted_uniq_log | wc -l | tr -d ' '` - - msg_count_from_mirror_consumer=`cat $console_consumer_target_mid_log | wc -l | tr -d ' '` - uniq_msg_count_from_mirror_consumer=`cat $console_consumer_target_mid_sorted_uniq_log | wc -l | tr -d ' '` - - uniq_msg_count_from_producer=`cat $producer_performance_mid_sorted_uniq_log | wc -l | tr -d ' '` - - total_msg_published=`cat $producer_performance_mid_log | wc -l | tr -d ' '` - - duplicate_msg_in_producer=$(( $total_msg_published - $uniq_msg_count_from_producer )) - - crc_only_in_mirror_consumer=`comm -23 $console_consumer_target_mid_sorted_uniq_log $console_consumer_source_mid_sorted_uniq_log` - crc_only_in_source_consumer=`comm -13 $console_consumer_target_mid_sorted_uniq_log $console_consumer_source_mid_sorted_uniq_log` - crc_common_in_both_consumer=`comm -12 $console_consumer_target_mid_sorted_uniq_log $console_consumer_source_mid_sorted_uniq_log` - - crc_only_in_producer=`comm -23 $producer_performance_mid_sorted_uniq_log $console_consumer_source_mid_sorted_uniq_log` - - duplicate_mirror_mid=`comm -23 $console_consumer_target_mid_sorted_log $console_consumer_target_mid_sorted_uniq_log` - no_of_duplicate_msg=$(( $msg_count_from_mirror_consumer - $uniq_msg_count_from_mirror_consumer \ - + $msg_count_from_source_consumer - $uniq_msg_count_from_source_consumer - \ - 2*$duplicate_msg_in_producer )) - - source_mirror_uniq_msg_diff=$(($uniq_msg_count_from_source_consumer - $uniq_msg_count_from_mirror_consumer)) - - echo "" - echo "========================================================" - echo "no. of messages published : $total_msg_published" - echo "producer unique msg rec'd : $uniq_msg_count_from_producer" - echo "source consumer msg rec'd : $msg_count_from_source_consumer" - echo "source consumer unique msg rec'd : $uniq_msg_count_from_source_consumer" - echo "mirror consumer msg rec'd : $msg_count_from_mirror_consumer" - echo "mirror consumer unique msg rec'd : $uniq_msg_count_from_mirror_consumer" - echo "total source/mirror duplicate msg : $no_of_duplicate_msg" - echo "source/mirror uniq msg count diff : $source_mirror_uniq_msg_diff" - echo "========================================================" - echo "(Please refer to $checksum_diff_log for more details)" - echo "" - - echo "========================================================" >> $checksum_diff_log - echo "crc only in producer" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "${crc_only_in_producer}" >> $checksum_diff_log - echo "" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "crc only in source consumer" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "${crc_only_in_source_consumer}" >> $checksum_diff_log - echo "" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "crc only in mirror consumer" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "${crc_only_in_mirror_consumer}" >> $checksum_diff_log - echo "" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "duplicate crc in mirror consumer" >> $checksum_diff_log - echo "========================================================" >> $checksum_diff_log - echo "${duplicate_mirror_mid}" >> $checksum_diff_log - - echo "=================" - if [[ $source_mirror_uniq_msg_diff -eq 0 && $uniq_msg_count_from_source_consumer -gt 0 ]]; then - echo "## Test PASSED" - else - echo "## Test FAILED" - fi - echo "=================" - echo - - return $cmp_result -} - -# ========================================= -# start_test -# ========================================= -start_test() { - - echo - info "===========================================================" - info "#### Starting Kafka Broker / Mirror Maker Failure Test ####" - info "===========================================================" - echo - - start_zk - sleep 2 - - start_source_servers_cluster - sleep 2 - - create_topic $test_topic localhost:$zk_source_port 1 - sleep 2 - - start_target_servers_cluster - sleep 2 - - start_target_mirror_maker - sleep 2 - - start_background_producer $test_topic & - background_producer_pid=$! - - info "Started background producer pid [${background_producer_pid}]" - sleep 5 - - # loop for no. of iterations specified in $num_iterations - while [ $num_iterations -ge $iter ] - do - # if $svr_to_bounce is '0', it means no bouncing - if [[ $num_iterations -ge $iter && $svr_to_bounce -gt 0 ]]; then - idx= - - # check which type of broker bouncing is requested: source, mirror_maker or target - - # $svr_to_bounce contains $bounce_target_id - eg. '3', '123', ... etc - svr_idx=`expr index $svr_to_bounce $bounce_target_id` - if [[ $num_iterations -ge $iter && $svr_idx -gt 0 ]]; then - echo - info "==========================================" - info "Iteration $iter of ${num_iterations}" - info "==========================================" - - # bounce target kafka broker - idx=$(get_random_range 1 $num_kafka_target_server) - - if [ "x${kafka_target_pids[$idx]}" != "x" ]; then - echo - info "#### Bouncing Kafka TARGET Broker ####" - - info "terminating kafka target[$idx] with process id ${kafka_target_pids[$idx]}" - kill_child_processes 0 ${kafka_target_pids[$idx]} - - info "sleeping for ${wait_time_after_killing_broker}s" - sleep $wait_time_after_killing_broker - - info "starting kafka target server" - start_target_server $idx - fi - iter=$(($iter+1)) - info "sleeping for ${wait_time_after_restarting_broker}s" - sleep $wait_time_after_restarting_broker - fi - - # $svr_to_bounce contains $bounce_mir_mkr_id - eg. '2', '123', ... etc - svr_idx=`expr index $svr_to_bounce $bounce_mir_mkr_id` - if [[ $num_iterations -ge $iter && $svr_idx -gt 0 ]]; then - echo - info "==========================================" - info "Iteration $iter of ${num_iterations}" - info "==========================================" - - # bounce mirror maker - idx=$(get_random_range 1 $num_kafka_mirror_maker) - - if [ "x${kafka_mirror_maker_pids[$idx]}" != "x" ]; then - echo - info "#### Bouncing Kafka Mirror Maker ####" - - info "terminating kafka mirror maker [$idx] with process id ${kafka_mirror_maker_pids[$idx]}" - kill_child_processes 0 ${kafka_mirror_maker_pids[$idx]} - - info "sleeping for ${wait_time_after_killing_broker}s" - sleep $wait_time_after_killing_broker - - info "starting kafka mirror maker" - start_mirror_maker $idx - fi - iter=$(($iter+1)) - info "sleeping for ${wait_time_after_restarting_broker}s" - sleep $wait_time_after_restarting_broker - fi - - # $svr_to_bounce contains $bounce_source_id - eg. '1', '123', ... etc - svr_idx=`expr index $svr_to_bounce $bounce_source_id` - if [[ $num_iterations -ge $iter && $svr_idx -gt 0 ]]; then - echo - info "==========================================" - info "Iteration $iter of ${num_iterations}" - info "==========================================" - - # bounce source kafka broker - idx=$(get_random_range 1 $num_kafka_source_server) - - if [ "x${kafka_source_pids[$idx]}" != "x" ]; then - echo - info "#### Bouncing Kafka SOURCE Broker ####" - - info "terminating kafka source[$idx] with process id ${kafka_source_pids[$idx]}" - kill_child_processes 0 ${kafka_source_pids[$idx]} - - info "sleeping for ${wait_time_after_killing_broker}s" - sleep $wait_time_after_killing_broker - - info "starting kafka source server" - start_source_server $idx - fi - iter=$(($iter+1)) - info "sleeping for ${wait_time_after_restarting_broker}s" - sleep $wait_time_after_restarting_broker - fi - else - echo - info "==========================================" - info "Iteration $iter of ${num_iterations}" - info "==========================================" - - info "No bouncing performed" - iter=$(($iter+1)) - info "sleeping for ${wait_time_after_restarting_broker}s" - sleep $wait_time_after_restarting_broker - fi - done - - # notify background producer to stop - `touch $tmp_file_to_stop_background_producer` - - echo - info "Tests completed. Waiting for consumers to catch up " - - # ======================================================= - # remove the following 'sleep 30' when KAFKA-313 is fixed - # ======================================================= - info "sleeping 30 sec" - sleep 30 -} - -# ========================================= -# print_usage -# ========================================= -print_usage() { - echo - echo "Error : invalid no. of arguments" - echo "Usage : $0 -n <no. of iterations> -s <servers to bounce>" - echo - echo " num of iterations - the number of iterations that the test runs" - echo - echo " servers to bounce - the servers to be bounced in a round-robin fashion" - echo " Values of the servers:" - echo " 0 - no bouncing" - echo " 1 - source broker" - echo " 2 - mirror maker" - echo " 3 - target broker" - echo " Example:" - echo " * To bounce only mirror maker and target broker" - echo " in turns, enter the value 23" - echo " * To bounce only mirror maker, enter the value 2" - echo " * To run the test without bouncing, enter 0" - echo - echo "Usage Example : $0 -n 10 -s 12" - echo " (run 10 iterations and bounce source broker (1) + mirror maker (2) in turn)" - echo -} - - -# ========================================= -# -# Main test begins here -# -# ========================================= - -# get command line arguments -while getopts "hb:i:n:s:x:" opt -do - case $opt in - b) - num_msg_per_batch=$OPTARG - ;; - h) - print_usage - exit - ;; - i) - producer_sleep_min=$OPTARG - ;; - n) - num_iterations=$OPTARG - ;; - s) - svr_to_bounce=$OPTARG - ;; - x) - producer_sleep_max=$OPTARG - ;; - ?) - print_usage - exit - ;; - esac -done - -# initialize and cleanup -initialize -cleanup -sleep 5 - -# Ctrl-c trap. Catches INT signal -trap "shutdown_servers; force_shutdown_consumer; force_shutdown_background_producer; cmp_checksum; exit 0" INT - -# starting the test -start_test - -# starting consumer to consume data in source -start_console_consumer $source_console_consumer_grp $zk_source_port $console_consumer_source_log DecodedMessageFormatter - -# starting consumer to consume data in target -start_console_consumer $target_console_consumer_grp $zk_target_port $console_consumer_target_log DecodedMessageFormatter - -# wait for zero source consumer lags -wait_for_zero_consumer_lags $source_console_consumer_grp $zk_source_port - -# wait for zero target consumer lags -wait_for_zero_consumer_lags $target_console_consumer_grp $zk_target_port - -# ======================================================= -# remove the following 'sleep 30' when KAFKA-313 is fixed -# ======================================================= -info "sleeping 30 sec" -sleep 30 - -shutdown_servers - -cmp_checksum -result=$? - -# =============================================== -# Report the time taken -# =============================================== -test_end_time="$(date +%s)" -total_test_time_sec=$(( $test_end_time - $test_start_time )) -total_test_time_min=$(( $total_test_time_sec / 60 )) -info "Total time taken: $total_test_time_min min for $num_iterations iterations" -echo - -exit $result http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/log4j.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/log4j.properties b/system_test/broker_failure/config/log4j.properties deleted file mode 100644 index 23ece9b..0000000 --- a/system_test/broker_failure/config/log4j.properties +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -log4j.rootLogger=INFO, stdout - -# ==================================== -# messages going to kafkaAppender -# ==================================== -log4j.logger.kafka=DEBUG, kafkaAppender -log4j.logger.org.I0Itec.zkclient.ZkClient=INFO, kafkaAppender -log4j.logger.org.apache.zookeeper=INFO, kafkaAppender - -# ==================================== -# messages going to zookeeperAppender -# ==================================== -# (comment out this line to redirect ZK-related messages to kafkaAppender -# to allow reading both Kafka and ZK debugging messages in a single file) -log4j.logger.org.apache.zookeeper=INFO, zookeeperAppender - -# ==================================== -# stdout -# ==================================== -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n - -# ==================================== -# fileAppender -# ==================================== -log4j.appender.fileAppender=org.apache.log4j.FileAppender -log4j.appender.fileAppender.File=/tmp/kafka_all_request.log -log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.fileAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -# ==================================== -# kafkaAppender -# ==================================== -log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.kafkaAppender.File=/tmp/kafka.log -log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.additivity.kafka=true - -# ==================================== -# zookeeperAppender -# ==================================== -log4j.appender.zookeeperAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.zookeeperAppender.File=/tmp/zookeeper.log -log4j.appender.zookeeperAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.zookeeperAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.additivity.org.apache.zookeeper=false - -# ==================================== -# other available debugging info -# ==================================== -#log4j.logger.kafka.server.EmbeddedConsumer$MirroringThread=TRACE -#log4j.logger.kafka.server.KafkaRequestHandlers=TRACE -#log4j.logger.kafka.producer.async.AsyncProducer=TRACE -#log4j.logger.kafka.producer.async.ProducerSendThread=TRACE -#log4j.logger.kafka.producer.async.DefaultEventHandler=TRACE - -log4j.logger.kafka.consumer=DEBUG -log4j.logger.kafka.tools.VerifyConsumerRebalance=DEBUG -log4j.logger.kafka.tools.ConsumerOffsetChecker=DEBUG - -# to print message checksum from ProducerPerformance -log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG - -# to print socket buffer size validated by Kafka broker -log4j.logger.kafka.network.Acceptor=DEBUG - -# to print socket buffer size validated by SimpleConsumer -log4j.logger.kafka.consumer.SimpleConsumer=TRACE - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/mirror_producer.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/mirror_producer.properties b/system_test/broker_failure/config/mirror_producer.properties deleted file mode 100644 index 7f80a1e..0000000 --- a/system_test/broker_failure/config/mirror_producer.properties +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -producer.type=async - -# to avoid dropping events if the queue is full, wait indefinitely -queue.enqueue.timeout.ms=-1 - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/mirror_producer1.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/mirror_producer1.properties b/system_test/broker_failure/config/mirror_producer1.properties deleted file mode 100644 index 81dae76..0000000 --- a/system_test/broker_failure/config/mirror_producer1.properties +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -#broker.list=0:localhost:9081 -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -producer.type=async - -# to avoid dropping events if the queue is full, wait indefinitely -queue.enqueue.timeout.ms=-1 - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/mirror_producer2.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/mirror_producer2.properties b/system_test/broker_failure/config/mirror_producer2.properties deleted file mode 100644 index 714b95d..0000000 --- a/system_test/broker_failure/config/mirror_producer2.properties +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -#broker.list=0:localhost:9082 -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -producer.type=async - -# to avoid dropping events if the queue is full, wait indefinitely -queue.enqueue.timeout.ms=-1 - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/mirror_producer3.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/mirror_producer3.properties b/system_test/broker_failure/config/mirror_producer3.properties deleted file mode 100644 index e8fa72d..0000000 --- a/system_test/broker_failure/config/mirror_producer3.properties +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -#broker.list=0:localhost:9083 -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -producer.type=async - -# to avoid dropping events if the queue is full, wait indefinitely -queue.enqueue.timeout.ms=-1 - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/server_source1.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/server_source1.properties b/system_test/broker_failure/config/server_source1.properties deleted file mode 100644 index bbf288e..0000000 --- a/system_test/broker_failure/config/server_source1.properties +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=1 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9091 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-source1-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2181 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/server_source2.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/server_source2.properties b/system_test/broker_failure/config/server_source2.properties deleted file mode 100644 index 570bafc..0000000 --- a/system_test/broker_failure/config/server_source2.properties +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=2 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9092 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-source2-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2181 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/server_source3.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/server_source3.properties b/system_test/broker_failure/config/server_source3.properties deleted file mode 100644 index df8ff6a..0000000 --- a/system_test/broker_failure/config/server_source3.properties +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=3 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9093 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-source3-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.size=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2181 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/server_source4.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/server_source4.properties b/system_test/broker_failure/config/server_source4.properties deleted file mode 100644 index ee9c7fd..0000000 --- a/system_test/broker_failure/config/server_source4.properties +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=4 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9094 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-source4-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2181 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/server_target1.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/server_target1.properties b/system_test/broker_failure/config/server_target1.properties deleted file mode 100644 index 7f776bd..0000000 --- a/system_test/broker_failure/config/server_target1.properties +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=1 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9081 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-target1-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - -# topic partition count map -# topic.partition.count.map=topic1:3, topic2:4 - http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/server_target2.properties ---------------------------------------------------------------------- diff --git a/system_test/broker_failure/config/server_target2.properties b/system_test/broker_failure/config/server_target2.properties deleted file mode 100644 index 6d997dc..0000000 --- a/system_test/broker_failure/config/server_target2.properties +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see kafka.server.KafkaConfig for additional details and defaults - -# the id of the broker -broker.id=2 - -# hostname of broker. If not set, will pick up from the value returned -# from getLocalHost. If there are multiple interfaces getLocalHost -# may not be what you want. -# host.name= - -# number of logical partitions on this broker -num.partitions=1 - -# the port the socket server runs on -port=9082 - -# the number of processor threads the socket server uses. Defaults to the number of cores on the machine -num.threads=8 - -# the directory in which to store log files -log.dir=/tmp/kafka-target2-logs - -# the send buffer used by the socket server -socket.send.buffer.bytes=1048576 - -# the receive buffer used by the socket server -socket.receive.buffer.bytes=1048576 - -# the maximum size of a log segment -log.segment.bytes=10000000 - -# the interval between running cleanup on the logs -log.cleanup.interval.mins=1 - -# the minimum age of a log file to eligible for deletion -log.retention.hours=168 - -#the number of messages to accept without flushing the log to disk -log.flush.interval.messages=600 - -#set the following properties to use zookeeper - -# enable connecting to zookeeper -enable.zookeeper=true - -# zk connection string -# comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" -zk.connect=localhost:2182 - -# timeout in ms for connecting to zookeeper -zk.connection.timeout.ms=1000000 - -# time based topic flush intervals in ms -#log.flush.intervals.ms.per.topic=topic:1000 - -# default time based flush interval in ms -log.flush.interval.ms=1000 - -# time based topic flasher time rate in ms -log.flush.scheduler.interval.ms=1000 - -# topic partition count map -# topic.partition.count.map=topic1:3, topic2:4 -
