This is an automated email from the ASF dual-hosted git repository.

dcapwell pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 16da961f2bbe9a63d97844ecbc54c55838a5964a
Merge: 83e1e9e 6c8c791
Author: David Capwell <dcapw...@apache.org>
AuthorDate: Wed Oct 7 14:34:11 2020 -0700

    Merge branch 'cassandra-3.11' into trunk

 build.xml                                          | 22 +++++++++++++++++++++-
 .../diag/LastEventIdBroadcasterMBean.java          | 18 ++++++++++++++++++
 .../cassandra/hadoop/cql3/CqlClientHelper.java     | 18 ++++++++++++++++++
 .../org/apache/cassandra/hints/InputPosition.java  | 18 ++++++++++++++++++
 .../apache/cassandra/schema/SchemaProvider.java    | 18 ++++++++++++++++++
 src/java/org/apache/cassandra/tools/JMXTool.java   | 18 ++++++++++++++++++
 .../utils/logging/LogbackLoggingSupport.java       | 18 ++++++++++++++++++
 .../cassandra/utils/logging/LoggingSupport.java    | 18 ++++++++++++++++++
 .../utils/logging/LoggingSupportFactory.java       | 18 ++++++++++++++++++
 .../utils/logging/NoOpFallbackLoggingSupport.java  | 18 ++++++++++++++++++
 .../cassandra/distributed/impl/FileLogAction.java  | 18 ++++++++++++++++++
 .../test/FullRepairCoordinatorTimeoutTest.java     | 18 ++++++++++++++++++
 .../IncrementalRepairCoordinatorTimeoutTest.java   | 18 ++++++++++++++++++
 .../test/PreviewRepairCoordinatorTimeoutTest.java  | 18 ++++++++++++++++++
 .../distributed/test/RepairCoordinatorTimeout.java | 18 ++++++++++++++++++
 .../distributed/test/TableEstimatesTest.java       | 18 ++++++++++++++++++
 .../org/apache/cassandra/cql3/ViewComplexTest.java | 18 ++++++++++++++++++
 .../cql3/validation/operations/TTLTest.java        | 18 ++++++++++++++++++
 .../apache/cassandra/db/marshal/EmptyTypeTest.java | 18 ++++++++++++++++++
 .../apache/cassandra/io/DiskSpaceMetricsTest.java  | 18 ++++++++++++++++++
 .../reads/repair/AbstractReadRepairTest.java       | 18 ++++++++++++++++++
 .../cassandra/tools/JMXCompatabilityTest.java      | 18 ++++++++++++++++++
 .../org/apache/cassandra/tools/JMXToolTest.java    | 18 ++++++++++++++++++
 .../apache/cassandra/tools/TopPartitionsTest.java  | 18 ++++++++++++++++++
 .../org/apache/cassandra/utils/AssertUtil.java     | 18 ++++++++++++++++++
 .../apache/cassandra/utils/ByteArrayUtilTest.java  | 18 ++++++++++++++++++
 .../org/apache/cassandra/utils/GeneratorsTest.java | 18 ++++++++++++++++++
 27 files changed, 489 insertions(+), 1 deletion(-)

diff --cc build.xml
index 6961cf9,f078d34..6a3eb1e
--- a/build.xml
+++ b/build.xml
@@@ -999,18 -861,9 +999,18 @@@
              <src path="${build.src.java}"/>
              <src path="${build.src.gen-java}"/>
              <compilerarg value="-XDignore.symbol.file"/>
 -            <compilerarg value="-Xbootclasspath/p:${build.src.jdkoverride}"/>
 -            <classpath refid="cassandra.classpath"/>
 +            <compilerarg line="${jdk11-javac-exports}"/>
 +            <classpath>
 +                <path refid="cassandra.classpath"/>
 +            </classpath>
          </javac>
 +    </target>
 +
-     <target 
depends="init,gen-cql3-grammar,generate-cql-html,generate-jflex-java"
++    <target 
depends="init,gen-cql3-grammar,generate-cql-html,generate-jflex-java,rat-report"
 +            name="build-project">
 +        <echo message="${ant.project.name}: ${ant.file}"/>
 +        <!-- Order matters! -->
 +        <antcall target="_build_java"/>
          <antcall target="createVersionPropFile"/>
          <copy todir="${build.classes.main}">
              <fileset dir="${build.src.resources}" />
@@@ -1345,7 -1199,25 +1345,25 @@@
        </rat:report>
      </target>
  
+     <target name="rat-report" depends="rat-init" description="License checks 
on source" >
+       <rat:report xmlns:rat="antlib:org.apache.rat.anttasks"
+                   reportFile="${build.dir}/src.rat.txt">
+         <fileset dir="src/java"/>
+         <fileset dir="test/unit"/>
+         <fileset dir="test/distributed"/>
+       </rat:report>
+       <fail message="Some files have missing or incorrect license 
information. Check RAT report in ${build.dir}/src.rat.txt for more details!">
+         <condition>
+           <and>
+             <not>
+               <resourcecontains resource="${build.dir}/src.rat.txt" 
substring="0 Unknown Licenses" casesensitive="false" />
+             </not>
+           </and>
+         </condition>
+       </fail>
+     </target>
+ 
 -  <target name="build-jmh" depends="build-test" description="Create JMH uber 
jar">
 +  <target name="build-jmh" depends="build-test, jar" description="Create JMH 
uber jar">
        <jar jarfile="${build.test.dir}/deps.jar">
            <zipgroupfileset dir="${build.dir.lib}/jars">
                <include name="*jmh*.jar"/>
diff --cc src/java/org/apache/cassandra/diag/LastEventIdBroadcasterMBean.java
index 03f05dc,0000000..01c7bbf
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/diag/LastEventIdBroadcasterMBean.java
+++ b/src/java/org/apache/cassandra/diag/LastEventIdBroadcasterMBean.java
@@@ -1,41 -1,0 +1,59 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.diag;
 +
 +import java.util.Map;
 +
 +/**
 + * Provides a list of event types and the corresponding highest event IDs. 
Consumers may these IDs to determine
 + * if new data is available.
 + *
 + * <p>Example result</p>
 + *
 + * <table>
 + *     <tr>
 + *         <th>Event</th>
 + *         <th>Last ID</th>
 + *     </tr>
 + *     <tr>
 + *         <td>BootstrapEvent</td>
 + *         <td>312</td>
 + *     </tr>
 + *     <tr>
 + *         <td>CompactionEvent</td>
 + *         <td>a53f9338-5f24-11e8-9c2d-fa7ae01bbebc</td>
 + *     </tr>
 + * </table>
 + *
 + * <p>Clients may either retrieve the current list of all events IDs, or make 
conditional requests for event IDs
 + * based on the timestamp of the last update (much in the sense of e.g. 
HTTP's If-Modified-Since semantics).</p>
 + */
 +public interface LastEventIdBroadcasterMBean
 +{
 +    /**
 +     * Retrieves a list of all event types and their highest IDs.
 +     */
 +    Map<String, Comparable> getLastEventIds();
 +
 +    /**
 +     * Retrieves a list of all event types and their highest IDs, if updated 
since specified timestamp, or null.
 +     * @param lastUpdate timestamp to use to determine if IDs have been 
updated
 +     */
 +    Map<String, Comparable> getLastEventIdsIfModified(long lastUpdate);
 +}
diff --cc src/java/org/apache/cassandra/hadoop/cql3/CqlClientHelper.java
index d154243,0000000..e0a6384
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/hadoop/cql3/CqlClientHelper.java
+++ b/src/java/org/apache/cassandra/hadoop/cql3/CqlClientHelper.java
@@@ -1,91 -1,0 +1,109 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.hadoop.cql3;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Objects;
 +
 +import com.datastax.driver.core.Host;
 +import com.datastax.driver.core.Metadata;
 +import com.datastax.driver.core.Token;
 +import com.datastax.driver.core.TokenRange;
 +
 +public class CqlClientHelper
 +{
 +    private CqlClientHelper()
 +    {
 +    }
 +
 +    public static Map<TokenRange, List<Host>> 
getLocalPrimaryRangeForDC(String keyspace, Metadata metadata, String targetDC)
 +    {
 +        Objects.requireNonNull(keyspace, "keyspace");
 +        Objects.requireNonNull(metadata, "metadata");
 +        Objects.requireNonNull(targetDC, "targetDC");
 +
 +        // In 2.1 the logic was to have a set of nodes used as a seed, they 
were used to query
 +        // client.describe_local_ring(keyspace) -> List<TokenRange>; this 
should include all nodes in the local dc.
 +        // TokenRange contained the endpoints in order, so .endpoints.get(0) 
is the primary owner
 +        // Client does not have a similar API, instead it returns Set<Host>.  
To replicate this we first need
 +        // to compute the primary owners, then add in the replicas
 +
 +        List<Token> tokens = new ArrayList<>();
 +        Map<Token, Host> tokenToHost = new HashMap<>();
 +        for (Host host : metadata.getAllHosts())
 +        {
 +            if (!targetDC.equals(host.getDatacenter()))
 +                continue;
 +
 +            for (Token token : host.getTokens())
 +            {
 +                Host previous = tokenToHost.putIfAbsent(token, host);
 +                if (previous != null)
 +                    throw new IllegalStateException("Two hosts share the same 
token; hosts " + host.getHostId() + ":"
 +                                                    + host.getTokens() + ", " 
+ previous.getHostId() + ":" + previous.getTokens());
 +                tokens.add(token);
 +            }
 +        }
 +        Collections.sort(tokens);
 +
 +        Map<TokenRange, List<Host>> rangeToReplicas = new HashMap<>();
 +
 +        // The first token in the ring uses the last token as its 'start', 
handle this here to simplify the loop
 +        Token start = tokens.get(tokens.size() - 1);
 +        Token end = tokens.get(0);
 +
 +        addRange(keyspace, metadata, tokenToHost, rangeToReplicas, start, 
end);
 +        for (int i = 1; i < tokens.size(); i++)
 +        {
 +            start = tokens.get(i - 1);
 +            end = tokens.get(i);
 +
 +            addRange(keyspace, metadata, tokenToHost, rangeToReplicas, start, 
end);
 +        }
 +
 +        return rangeToReplicas;
 +    }
 +
 +    private static void addRange(String keyspace,
 +                                 Metadata metadata,
 +                                 Map<Token, Host> tokenToHost,
 +                                 Map<TokenRange, List<Host>> rangeToReplicas,
 +                                 Token start, Token end)
 +    {
 +        Host host = tokenToHost.get(end);
 +        String dc = host.getDatacenter();
 +
 +        TokenRange range = metadata.newTokenRange(start, end);
 +        List<Host> replicas = new ArrayList<>();
 +        replicas.add(host);
 +        // get all the replicas for the specific DC
 +        for (Host replica : metadata.getReplicas(keyspace, range))
 +        {
 +            if (dc.equals(replica.getDatacenter()) && !host.equals(replica))
 +                replicas.add(replica);
 +        }
 +        List<Host> previous = rangeToReplicas.put(range, replicas);
 +        if (previous != null)
 +            throw new IllegalStateException("Two hosts (" + host + ", " + 
previous + ") map to the same token range: " + range);
 +    }
 +}
diff --cc src/java/org/apache/cassandra/schema/SchemaProvider.java
index 51c4e6b,0000000..07324ed
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/schema/SchemaProvider.java
+++ b/src/java/org/apache/cassandra/schema/SchemaProvider.java
@@@ -1,49 -1,0 +1,67 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.schema;
 +
 +import javax.annotation.Nullable;
 +
 +import org.apache.cassandra.db.Keyspace;
 +import org.apache.cassandra.exceptions.UnknownTableException;
 +import org.apache.cassandra.io.sstable.Descriptor;
 +
 +public interface SchemaProvider
 +{
 +    @Nullable
 +    Keyspace getKeyspaceInstance(String keyspaceName);
 +
 +    void storeKeyspaceInstance(Keyspace keyspace);
 +
 +    @Nullable
 +    KeyspaceMetadata getKeyspaceMetadata(String keyspaceName);
 +
 +    @Nullable
 +    TableMetadata getTableMetadata(TableId id);
 +
 +    @Nullable
 +    TableMetadata getTableMetadata(String keyspace, String table);
 +
 +    default TableMetadata getExistingTableMetadata(TableId id) throws 
UnknownTableException
 +    {
 +        TableMetadata metadata = getTableMetadata(id);
 +        if (metadata != null)
 +            return metadata;
 +
 +        String message =
 +            String.format("Couldn't find table with id %s. If a table was 
just created, this is likely due to the schema"
 +                          + "not being fully propagated.  Please wait for 
schema agreement on table creation.",
 +                          id);
 +        throw new UnknownTableException(message, id);
 +    }
 +
 +    @Nullable
 +    TableMetadataRef getTableMetadataRef(String keyspace, String table);
 +
 +    @Nullable
 +    TableMetadataRef getTableMetadataRef(TableId id);
 +
 +    @Nullable
 +    default TableMetadataRef getTableMetadataRef(Descriptor descriptor)
 +    {
 +        return getTableMetadataRef(descriptor.ksname, descriptor.cfname);
 +    }
 +}
diff --cc src/java/org/apache/cassandra/tools/JMXTool.java
index 30d3d22,0000000..e917179
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/tools/JMXTool.java
+++ b/src/java/org/apache/cassandra/tools/JMXTool.java
@@@ -1,848 -1,0 +1,866 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.tools;
 +
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.OutputStream;
 +import java.io.OutputStreamWriter;
 +import java.io.PrintStream;
 +import java.io.UnsupportedEncodingException;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Comparator;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Objects;
 +import java.util.Optional;
 +import java.util.Set;
 +import java.util.TreeMap;
 +import java.util.TreeSet;
 +import java.util.concurrent.Callable;
 +import java.util.function.BiConsumer;
 +import java.util.regex.Pattern;
 +import java.util.stream.Collectors;
 +import java.util.stream.Stream;
 +import javax.inject.Inject;
 +import javax.management.InstanceNotFoundException;
 +import javax.management.IntrospectionException;
 +import javax.management.MBeanAttributeInfo;
 +import javax.management.MBeanFeatureInfo;
 +import javax.management.MBeanInfo;
 +import javax.management.MBeanOperationInfo;
 +import javax.management.MBeanParameterInfo;
 +import javax.management.MBeanServerConnection;
 +import javax.management.MalformedObjectNameException;
 +import javax.management.ObjectName;
 +import javax.management.ReflectionException;
 +import javax.management.remote.JMXConnector;
 +import javax.management.remote.JMXConnectorFactory;
 +import javax.management.remote.JMXServiceURL;
 +
 +import com.google.common.base.Preconditions;
 +import com.google.common.base.Predicate;
 +import com.google.common.collect.Sets;
 +import com.google.common.collect.Sets.SetView;
 +
 +import com.fasterxml.jackson.core.type.TypeReference;
 +import com.fasterxml.jackson.databind.ObjectMapper;
 +import io.airlift.airline.Arguments;
 +import io.airlift.airline.Cli;
 +import io.airlift.airline.Command;
 +import io.airlift.airline.Help;
 +import io.airlift.airline.HelpOption;
 +import io.airlift.airline.Option;
 +import org.yaml.snakeyaml.TypeDescription;
 +import org.yaml.snakeyaml.Yaml;
 +import org.yaml.snakeyaml.constructor.Constructor;
 +import org.yaml.snakeyaml.nodes.MappingNode;
 +import org.yaml.snakeyaml.nodes.Node;
 +import org.yaml.snakeyaml.nodes.Tag;
 +import org.yaml.snakeyaml.representer.Representer;
 +
 +public class JMXTool
 +{
 +    private static final List<String> METRIC_PACKAGES = 
Arrays.asList("org.apache.cassandra.metrics",
 +                                                                      
"org.apache.cassandra.db",
 +                                                                      
"org.apache.cassandra.hints",
 +                                                                      
"org.apache.cassandra.internal",
 +                                                                      
"org.apache.cassandra.net",
 +                                                                      
"org.apache.cassandra.request",
 +                                                                      
"org.apache.cassandra.service");
 +
 +    private static final Comparator<MBeanOperationInfo> OPERATOR_COMPARATOR = 
(a, b) -> {
 +        int rc = a.getName().compareTo(b.getName());
 +        if (rc != 0)
 +            return rc;
 +        String[] aSig = 
Stream.of(a.getSignature()).map(MBeanParameterInfo::getName).toArray(String[]::new);
 +        String[] bSig = 
Stream.of(b.getSignature()).map(MBeanParameterInfo::getName).toArray(String[]::new);
 +        rc = Integer.compare(aSig.length, bSig.length);
 +        if (rc != 0)
 +            return rc;
 +        for (int i = 0; i < aSig.length; i++)
 +        {
 +            rc = aSig[i].compareTo(bSig[i]);
 +            if (rc != 0)
 +                return rc;
 +        }
 +        return rc;
 +    };
 +
 +    @Command(name = "dump", description = "Dump the Apache Cassandra JMX 
objects and metadata.")
 +    public static final class Dump implements Callable<Void>
 +    {
 +        @Inject
 +        private HelpOption helpOption;
 +
 +        @Option(title = "url", name = { "-u", "--url" }, description = "JMX 
url to target")
 +        private String targetUrl = 
"service:jmx:rmi:///jndi/rmi://localhost:7199/jmxrmi";
 +
 +        @Option(title = "format", name = { "-f", "--format" }, description = 
"What format to dump content as; supported values are console (default), json, 
and yaml")
 +        private Format format = Format.console;
 +
 +        public Void call() throws Exception
 +        {
 +            Map<String, Info> map = load(new JMXServiceURL(targetUrl));
 +            format.dump(System.out, map);
 +            return null;
 +        }
 +
 +        public enum Format
 +        {
 +            console
 +            {
 +                void dump(OutputStream output, Map<String, Info> map)
 +                {
 +                    @SuppressWarnings("resource")
 +                    // output should be released by caller
 +                    PrintStream out = toPrintStream(output);
 +                    for (Map.Entry<String, Info> e : map.entrySet())
 +                    {
 +                        String name = e.getKey();
 +                        Info info = e.getValue();
 +
 +                        out.println(name);
 +                        out.println("\tAttributes");
 +                        Stream.of(info.attributes).forEach(a -> printRow(out, 
a.name, a.type, a.access));
 +                        out.println("\tOperations");
 +                        Stream.of(info.operations).forEach(o -> {
 +                            String args = Stream.of(o.parameters)
 +                                                .map(i -> i.name + ": " + 
i.type)
 +                                                
.collect(Collectors.joining(",", "(", ")"));
 +                            printRow(out, o.name, o.returnType, args);
 +                        });
 +                    }
 +                }
 +            },
 +            json
 +            {
 +                void dump(OutputStream output, Map<String, Info> map) throws 
IOException
 +                {
 +                    ObjectMapper mapper = new ObjectMapper();
 +                    mapper.writeValue(output, map);
 +                }
 +            },
 +            yaml
 +            {
 +                void dump(OutputStream output, Map<String, Info> map) throws 
IOException
 +                {
 +                    Representer representer = new Representer();
 +                    representer.addClassTag(Info.class, Tag.MAP); // avoid 
the auto added tag
 +                    Yaml yaml = new Yaml(representer);
 +                    yaml.dump(map, new OutputStreamWriter(output));
 +                }
 +            };
 +
 +            private static PrintStream toPrintStream(OutputStream output)
 +            {
 +                try
 +                {
 +                    return output instanceof PrintStream ? (PrintStream) 
output : new PrintStream(output, true, "UTF-8");
 +                }
 +                catch (UnsupportedEncodingException e)
 +                {
 +                    throw new AssertionError(e); // utf-8 is a required 
charset for the JVM
 +                }
 +            }
 +
 +            abstract void dump(OutputStream output, Map<String, Info> map) 
throws IOException;
 +        }
 +    }
 +
 +    @Command(name = "diff", description = "Diff two jmx dump files and report 
their differences")
 +    public static final class Diff implements Callable<Void>
 +    {
 +        @Inject
 +        private HelpOption helpOption;
 +
 +        @Arguments(title = "files", usage = "<left> <right>", description = 
"Files to diff")
 +        private List<File> files;
 +
 +        @Option(title = "format", name = { "-f", "--format" }, description = 
"What format the files are in; only support json and yaml as format")
 +        private Format format = Format.yaml;
 +
 +        @Option(title = "ignore left", name = { "--ignore-missing-on-left" }, 
description = "Ignore results missing on the left")
 +        private boolean ignoreMissingLeft;
 +
 +        @Option(title = "ignore right", name = { "--ignore-missing-on-right" 
}, description = "Ignore results missing on the right")
 +        private boolean ignoreMissingRight;
 +
 +        @Option(title = "exclude objects", name = "--exclude-object", 
description
 +                                                                      = 
"Ignores processing specific objects. " +
 +                                                                        "Each 
usage should take a single object, " +
 +                                                                        "but 
can use this flag multiple times.")
 +        private List<CliPattern> excludeObjects = new ArrayList<>();
 +
 +        @Option(title = "exclude attributes", name = "--exclude-attribute", 
description
 +                                                                            = 
"Ignores processing specific attributes. " +
 +                                                                              
"Each usage should take a single attribute, " +
 +                                                                              
"but can use this flag multiple times.")
 +        private List<CliPattern> excludeAttributes = new ArrayList<>();
 +
 +        @Option(title = "exclude operations", name = "--exclude-operation", 
description
 +                                                                            = 
"Ignores processing specific operations. " +
 +                                                                              
"Each usage should take a single operation, " +
 +                                                                              
"but can use this flag multiple times.")
 +        private List<CliPattern> excludeOperations = new ArrayList<>();
 +
 +        public Void call() throws Exception
 +        {
 +            Preconditions.checkArgument(files.size() == 2, "files requires 2 
arguments but given %s", files);
 +            Map<String, Info> left;
 +            Map<String, Info> right;
 +            try (FileInputStream leftStream = new 
FileInputStream(files.get(0));
 +                 FileInputStream rightStream = new 
FileInputStream(files.get(1)))
 +            {
 +                left = format.load(leftStream);
 +                right = format.load(rightStream);
 +            }
 +
 +            diff(left, right);
 +            return null;
 +        }
 +
 +        private void diff(Map<String, Info> left, Map<String, Info> right)
 +        {
 +            DiffResult<String> objectNames = diff(left.keySet(), 
right.keySet(), name -> {
 +                for (CliPattern p : excludeObjects)
 +                {
 +                    if (p.pattern.matcher(name).matches())
 +                        return false;
 +                }
 +                return true;
 +            });
 +
 +            if (!ignoreMissingRight && !objectNames.notInRight.isEmpty())
 +            {
 +                System.out.println("Objects not in right:");
 +                printSet(0, objectNames.notInRight);
 +            }
 +            if (!ignoreMissingLeft && !objectNames.notInLeft.isEmpty())
 +            {
 +                System.out.println("Objects not in left: ");
 +                printSet(0, objectNames.notInLeft);
 +            }
 +            Runnable printHeader = new Runnable()
 +            {
 +                boolean printedHeader = false;
 +
 +                public void run()
 +                {
 +                    if (!printedHeader)
 +                    {
 +                        System.out.println("Difference found in attribute or 
operation");
 +                        printedHeader = true;
 +                    }
 +                }
 +            };
 +
 +            for (String key : objectNames.shared)
 +            {
 +                Info leftInfo = left.get(key);
 +                Info rightInfo = right.get(key);
 +                DiffResult<Attribute> attributes = 
diff(leftInfo.attributeSet(), rightInfo.attributeSet(), attribute -> {
 +                    for (CliPattern p : excludeAttributes)
 +                    {
 +                        if (p.pattern.matcher(attribute.name).matches())
 +                            return false;
 +                    }
 +                    return true;
 +                });
 +                if (!ignoreMissingRight && !attributes.notInRight.isEmpty())
 +                {
 +                    printHeader.run();
 +                    System.out.println(key + "\tattribute not in right:");
 +                    printSet(1, attributes.notInRight);
 +                }
 +                if (!ignoreMissingLeft && !attributes.notInLeft.isEmpty())
 +                {
 +                    printHeader.run();
 +                    System.out.println(key + "\tattribute not in left:");
 +                    printSet(1, attributes.notInLeft);
 +                }
 +
 +                DiffResult<Operation> operations = 
diff(leftInfo.operationSet(), rightInfo.operationSet(), operation -> {
 +                    for (CliPattern p : excludeOperations)
 +                    {
 +                        if (p.pattern.matcher(operation.name).matches())
 +                            return false;
 +                    }
 +                    return true;
 +                });
 +                if (!ignoreMissingRight && !operations.notInRight.isEmpty())
 +                {
 +                    printHeader.run();
 +                    System.out.println(key + "\toperation not in right:");
 +                    printSet(1, operations.notInRight, (sb, o) ->
 +                                                       
rightInfo.getOperation(o.name).ifPresent(match ->
 +                                                                              
                  sb.append("\t").append("similar in right: ").append(match)));
 +                }
 +                if (!ignoreMissingLeft && !operations.notInLeft.isEmpty())
 +                {
 +                    printHeader.run();
 +                    System.out.println(key + "\toperation not in left:");
 +                    printSet(1, operations.notInLeft, (sb, o) ->
 +                                                      
leftInfo.getOperation(o.name).ifPresent(match ->
 +                                                                              
                sb.append("\t").append("similar in left: ").append(match)));
 +                }
 +            }
 +        }
 +
 +        private static <T extends Comparable<T>> void printSet(int indent, 
Set<T> set)
 +        {
 +            printSet(indent, set, (i1, i2) -> {});
 +        }
 +
 +        private static <T extends Comparable<T>> void printSet(int indent, 
Set<T> set, BiConsumer<StringBuilder, T> fn)
 +        {
 +            StringBuilder sb = new StringBuilder();
 +            for (T t : new TreeSet<>(set))
 +            {
 +                sb.setLength(0);
 +                for (int i = 0; i < indent; i++)
 +                    sb.append('\t');
 +                sb.append(t);
 +                fn.accept(sb, t);
 +                System.out.println(sb);
 +            }
 +        }
 +
 +        private static <T> DiffResult<T> diff(Set<T> left, Set<T> right, 
Predicate<T> fn)
 +        {
 +            left = Sets.filter(left, fn);
 +            right = Sets.filter(right, fn);
 +            return new DiffResult<>(Sets.difference(left, right), 
Sets.difference(right, left), Sets.intersection(left, right));
 +        }
 +
 +        private static final class DiffResult<T>
 +        {
 +            private final SetView<T> notInRight;
 +            private final SetView<T> notInLeft;
 +            private final SetView<T> shared;
 +
 +            private DiffResult(SetView<T> notInRight, SetView<T> notInLeft, 
SetView<T> shared)
 +            {
 +                this.notInRight = notInRight;
 +                this.notInLeft = notInLeft;
 +                this.shared = shared;
 +            }
 +        }
 +
 +        public enum Format
 +        {
 +            json
 +            {
 +                Map<String, Info> load(InputStream input) throws IOException
 +                {
 +                    ObjectMapper mapper = new ObjectMapper();
 +                    return mapper.readValue(input, new 
TypeReference<Map<String, Info>>() {});
 +                }
 +            },
 +            yaml
 +            {
 +                Map<String, Info> load(InputStream input) throws IOException
 +                {
 +                    Yaml yaml = new Yaml(new CustomConstructor());
 +                    return (Map<String, Info>) yaml.load(input);
 +                }
 +            };
 +
 +            abstract Map<String, Info> load(InputStream input) throws 
IOException;
 +        }
 +
 +        private static final class CustomConstructor extends Constructor
 +        {
 +            private static final String ROOT = "__root__";
 +            private static final TypeDescription INFO_TYPE = new 
TypeDescription(Info.class);
 +
 +            public CustomConstructor()
 +            {
 +                this.rootTag = new Tag(ROOT);
 +                this.addTypeDescription(INFO_TYPE);
 +            }
 +
 +            protected Object constructObject(Node node)
 +            {
 +                if (ROOT.equals(node.getTag().getValue()) && node instanceof 
MappingNode)
 +                {
 +                    MappingNode mn = (MappingNode) node;
 +                    return mn.getValue().stream()
 +                                .collect(Collectors.toMap(t -> 
super.constructObject(t.getKeyNode()),
 +                                                          t -> {
 +                                                              Node child = 
t.getValueNode();
 +                                                              
child.setType(INFO_TYPE.getType());
 +                                                              return 
super.constructObject(child);
 +                                                          }));
 +                }
 +                else
 +                {
 +                    return super.constructObject(node);
 +                }
 +            }
 +        }
 +    }
 +
 +    private static Map<String, Info> load(JMXServiceURL url) throws 
IOException, MalformedObjectNameException, IntrospectionException, 
InstanceNotFoundException, ReflectionException
 +    {
 +        try (JMXConnector jmxc = JMXConnectorFactory.connect(url, null))
 +        {
 +            MBeanServerConnection mbsc = jmxc.getMBeanServerConnection();
 +
 +            Map<String, Info> map = new TreeMap<>();
 +            for (String pkg : new TreeSet<>(METRIC_PACKAGES))
 +            {
 +                Set<ObjectName> metricNames = new 
TreeSet<>(mbsc.queryNames(new ObjectName(pkg + ":*"), null));
 +                for (ObjectName name : metricNames)
 +                {
 +                    if (mbsc.isRegistered(name))
 +                    {
 +                        MBeanInfo info = mbsc.getMBeanInfo(name);
 +                        map.put(name.toString(), Info.from(info));
 +                    }
 +                }
 +            }
 +            return map;
 +        }
 +    }
 +
 +    private static String getAccess(MBeanAttributeInfo a)
 +    {
 +        String access;
 +        if (a.isReadable())
 +        {
 +            if (a.isWritable())
 +                access = "read/write";
 +            else
 +                access = "read-only";
 +        }
 +        else if (a.isWritable())
 +            access = "write-only";
 +        else
 +            access = "no-access";
 +        return access;
 +    }
 +
 +    private static String normalizeType(String type)
 +    {
 +        switch (type)
 +        {
 +            case "[Z":
 +                return "boolean[]";
 +            case "[B":
 +                return "byte[]";
 +            case "[S":
 +                return "short[]";
 +            case "[I":
 +                return "int[]";
 +            case "[J":
 +                return "long[]";
 +            case "[F":
 +                return "float[]";
 +            case "[D":
 +                return "double[]";
 +            case "[C":
 +                return "char[]";
 +        }
 +        if (type.startsWith("[L"))
 +            return type.substring(2, type.length() - 1) + "[]"; // -1 will 
remove the ; at the end
 +        return type;
 +    }
 +
 +    private static final StringBuilder ROW_BUFFER = new StringBuilder();
 +
 +    private static void printRow(PrintStream out, String... args)
 +    {
 +        ROW_BUFFER.setLength(0);
 +        ROW_BUFFER.append("\t\t");
 +        for (String a : args)
 +            ROW_BUFFER.append(a).append("\t");
 +        out.println(ROW_BUFFER);
 +    }
 +
 +    public static final class Info
 +    {
 +        private Attribute[] attributes;
 +        private Operation[] operations;
 +
 +        public Info()
 +        {
 +        }
 +
 +        public Info(Attribute[] attributes, Operation[] operations)
 +        {
 +            this.attributes = attributes;
 +            this.operations = operations;
 +        }
 +
 +        private static Info from(MBeanInfo info)
 +        {
 +            Attribute[] attributes = Stream.of(info.getAttributes())
 +                                           
.sorted(Comparator.comparing(MBeanFeatureInfo::getName))
 +                                           .map(Attribute::from)
 +                                           .toArray(Attribute[]::new);
 +
 +            Operation[] operations = Stream.of(info.getOperations())
 +                                           .sorted(OPERATOR_COMPARATOR)
 +                                           .map(Operation::from)
 +                                           .toArray(Operation[]::new);
 +            return new Info(attributes, operations);
 +        }
 +
 +        public Attribute[] getAttributes()
 +        {
 +            return attributes;
 +        }
 +
 +        public void setAttributes(Attribute[] attributes)
 +        {
 +            this.attributes = attributes;
 +        }
 +
 +        public Set<String> attributeNames()
 +        {
 +            return Stream.of(attributes).map(a -> 
a.name).collect(Collectors.toSet());
 +        }
 +
 +        public Set<Attribute> attributeSet()
 +        {
 +            return new HashSet<>(Arrays.asList(attributes));
 +        }
 +
 +        public Operation[] getOperations()
 +        {
 +            return operations;
 +        }
 +
 +        public void setOperations(Operation[] operations)
 +        {
 +            this.operations = operations;
 +        }
 +
 +        public Set<String> operationNames()
 +        {
 +            return Stream.of(operations).map(o -> 
o.name).collect(Collectors.toSet());
 +        }
 +
 +        public Set<Operation> operationSet()
 +        {
 +            return new HashSet<>(Arrays.asList(operations));
 +        }
 +
 +        public Optional<Attribute> getAttribute(String name)
 +        {
 +            return Stream.of(attributes).filter(a -> 
a.name.equals(name)).findFirst();
 +        }
 +
 +        public Attribute getAttributePresent(String name)
 +        {
 +            return getAttribute(name).orElseThrow(AssertionError::new);
 +        }
 +
 +        public Optional<Operation> getOperation(String name)
 +        {
 +            return Stream.of(operations).filter(o -> 
o.name.equals(name)).findFirst();
 +        }
 +
 +        public Operation getOperationPresent(String name)
 +        {
 +            return getOperation(name).orElseThrow(AssertionError::new);
 +        }
 +
 +        @Override
 +        public boolean equals(Object o)
 +        {
 +            if (this == o) return true;
 +            if (o == null || getClass() != o.getClass()) return false;
 +            Info info = (Info) o;
 +            return Arrays.equals(attributes, info.attributes) &&
 +                   Arrays.equals(operations, info.operations);
 +        }
 +
 +        @Override
 +        public int hashCode()
 +        {
 +            int result = Arrays.hashCode(attributes);
 +            result = 31 * result + Arrays.hashCode(operations);
 +            return result;
 +        }
 +    }
 +
 +    public static final class Attribute implements Comparable<Attribute>
 +    {
 +        private String name;
 +        private String type;
 +        private String access;
 +
 +        public Attribute()
 +        {
 +        }
 +
 +        public Attribute(String name, String type, String access)
 +        {
 +            this.name = name;
 +            this.type = type;
 +            this.access = access;
 +        }
 +
 +        private static Attribute from(MBeanAttributeInfo info)
 +        {
 +            return new Attribute(info.getName(), 
normalizeType(info.getType()), JMXTool.getAccess(info));
 +        }
 +
 +        public String getName()
 +        {
 +            return name;
 +        }
 +
 +        public void setName(String name)
 +        {
 +            this.name = name;
 +        }
 +
 +        public String getType()
 +        {
 +            return type;
 +        }
 +
 +        public void setType(String type)
 +        {
 +            this.type = type;
 +        }
 +
 +        public String getAccess()
 +        {
 +            return access;
 +        }
 +
 +        public void setAccess(String access)
 +        {
 +            this.access = access;
 +        }
 +
 +        public boolean equals(Object o)
 +        {
 +            if (this == o) return true;
 +            if (o == null || getClass() != o.getClass()) return false;
 +            Attribute attribute = (Attribute) o;
 +            return Objects.equals(name, attribute.name) &&
 +                   Objects.equals(type, attribute.type);
 +        }
 +
 +        public int hashCode()
 +        {
 +            return Objects.hash(name, type);
 +        }
 +
 +        public String toString()
 +        {
 +            return name + ": " + type;
 +        }
 +
 +        public int compareTo(Attribute o)
 +        {
 +            int rc = name.compareTo(o.name);
 +            if (rc != 0)
 +                return rc;
 +            return type.compareTo(o.type);
 +        }
 +    }
 +
 +    public static final class Operation implements Comparable<Operation>
 +    {
 +        private String name;
 +        private Parameter[] parameters;
 +        private String returnType;
 +
 +        public Operation()
 +        {
 +        }
 +
 +        public Operation(String name, Parameter[] parameters, String 
returnType)
 +        {
 +            this.name = name;
 +            this.parameters = parameters;
 +            this.returnType = returnType;
 +        }
 +
 +        private static Operation from(MBeanOperationInfo info)
 +        {
 +            Parameter[] params = 
Stream.of(info.getSignature()).map(Parameter::from).toArray(Parameter[]::new);
 +            return new Operation(info.getName(), params, 
normalizeType(info.getReturnType()));
 +        }
 +
 +        public String getName()
 +        {
 +            return name;
 +        }
 +
 +        public void setName(String name)
 +        {
 +            this.name = name;
 +        }
 +
 +        public Parameter[] getParameters()
 +        {
 +            return parameters;
 +        }
 +
 +        public void setParameters(Parameter[] parameters)
 +        {
 +            this.parameters = parameters;
 +        }
 +
 +        public List<String> parameterTypes()
 +        {
 +            return Stream.of(parameters).map(p -> 
p.type).collect(Collectors.toList());
 +        }
 +
 +        public String getReturnType()
 +        {
 +            return returnType;
 +        }
 +
 +        public void setReturnType(String returnType)
 +        {
 +            this.returnType = returnType;
 +        }
 +
 +        public boolean equals(Object o)
 +        {
 +            if (this == o) return true;
 +            if (o == null || getClass() != o.getClass()) return false;
 +            Operation operation = (Operation) o;
 +            return Objects.equals(name, operation.name) &&
 +                   Arrays.equals(parameters, operation.parameters) &&
 +                   Objects.equals(returnType, operation.returnType);
 +        }
 +
 +        public int hashCode()
 +        {
 +            int result = Objects.hash(name, returnType);
 +            result = 31 * result + Arrays.hashCode(parameters);
 +            return result;
 +        }
 +
 +        public String toString()
 +        {
 +            return name + 
Stream.of(parameters).map(Parameter::toString).collect(Collectors.joining(", ", 
"(", ")")) + ": " + returnType;
 +        }
 +
 +        public int compareTo(Operation o)
 +        {
 +            int rc = name.compareTo(o.name);
 +            if (rc != 0)
 +                return rc;
 +            rc = Integer.compare(parameters.length, o.parameters.length);
 +            if (rc != 0)
 +                return rc;
 +            for (int i = 0; i < parameters.length; i++)
 +            {
 +                rc = parameters[i].type.compareTo(o.parameters[i].type);
 +                if (rc != 0)
 +                    return rc;
 +            }
 +            return returnType.compareTo(o.returnType);
 +        }
 +    }
 +
 +    public static final class Parameter
 +    {
 +        private String name;
 +        private String type;
 +
 +        public Parameter()
 +        {
 +        }
 +
 +        public Parameter(String name, String type)
 +        {
 +            this.name = name;
 +            this.type = type;
 +        }
 +
 +        private static Parameter from(MBeanParameterInfo info)
 +        {
 +            return new Parameter(info.getName(), 
normalizeType(info.getType()));
 +        }
 +
 +        public String getName()
 +        {
 +            return name;
 +        }
 +
 +        public void setName(String name)
 +        {
 +            this.name = name;
 +        }
 +
 +        public String getType()
 +        {
 +            return type;
 +        }
 +
 +        public void setType(String type)
 +        {
 +            this.type = type;
 +        }
 +
 +        public boolean equals(Object o)
 +        {
 +            if (this == o) return true;
 +            if (o == null || getClass() != o.getClass()) return false;
 +            Parameter parameter = (Parameter) o;
 +            return Objects.equals(type, parameter.type);
 +        }
 +
 +        public int hashCode()
 +        {
 +            return Objects.hash(type);
 +        }
 +
 +        public String toString()
 +        {
 +            return name + ": " + type;
 +        }
 +    }
 +
 +    public static final class CliPattern
 +    {
 +        private final Pattern pattern;
 +
 +        public CliPattern(String pattern)
 +        {
 +            this.pattern = Pattern.compile(pattern);
 +        }
 +    }
 +
 +    public static void main(String[] args) throws Exception
 +    {
 +        Cli.CliBuilder<Callable<Void>> builder = Cli.builder("jmxtool");
 +        builder.withDefaultCommand(Help.class);
 +        builder.withCommands(Help.class, Dump.class, Diff.class);
 +
 +        Cli<Callable<Void>> parser = builder.build();
 +        Callable<Void> command = parser.parse(args);
 +        command.call();
 +    }
 +}
diff --cc 
test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java
index 31d509b,0000000..0f48a23
mode 100644,000000..100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java
@@@ -1,115 -1,0 +1,133 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.distributed.impl;
 +
 +import java.io.File;
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.io.RandomAccessFile;
 +import java.io.UncheckedIOException;
 +import java.util.Objects;
 +import java.util.function.Predicate;
 +
 +import com.google.common.io.Closeables;
 +
 +import org.apache.cassandra.utils.AbstractIterator;
 +import org.apache.cassandra.distributed.api.LogAction;
 +import org.apache.cassandra.distributed.api.LineIterator;
 +
 +public class FileLogAction implements LogAction
 +{
 +    private final File file;
 +
 +    public FileLogAction(File file)
 +    {
 +        this.file = Objects.requireNonNull(file);
 +    }
 +
 +    @Override
 +    public long mark()
 +    {
 +        return file.length();
 +    }
 +
 +    @Override
 +    public LineIterator match(long startPosition, Predicate<String> fn)
 +    {
 +        RandomAccessFile reader;
 +        try
 +        {
 +            reader = new RandomAccessFile(file, "r");
 +        }
 +        catch (FileNotFoundException e)
 +        {
 +            // if file isn't present, don't return an empty stream as it 
looks the same as no log lines matched
 +            throw new UncheckedIOException(e);
 +        }
 +        if (startPosition > 0) // -1 used to disable, so ignore any negative 
values or 0 (default offset)
 +        {
 +            try
 +            {
 +                reader.seek(startPosition);
 +            }
 +            catch (IOException e)
 +            {
 +                throw new UncheckedIOException("Unable to seek to " + 
startPosition, e);
 +            }
 +        }
 +        return new FileLineIterator(reader, fn);
 +    }
 +
 +    private static final class FileLineIterator extends 
AbstractIterator<String> implements LineIterator
 +    {
 +        private final RandomAccessFile reader;
 +        private final Predicate<String> fn;
 +
 +        private FileLineIterator(RandomAccessFile reader, Predicate<String> 
fn)
 +        {
 +            this.reader = reader;
 +            this.fn = fn;
 +        }
 +
 +        @Override
 +        public long mark()
 +        {
 +            try
 +            {
 +                return reader.getFilePointer();
 +            }
 +            catch (IOException e)
 +            {
 +                throw new UncheckedIOException(e);
 +            }
 +        }
 +
 +        @Override
 +        protected String computeNext()
 +        {
 +            try
 +            {
 +                String s;
 +                while ((s = reader.readLine()) != null)
 +                {
 +                    if (fn.test(s))
 +                        return s;
 +                }
 +                return endOfData();
 +            }
 +            catch (IOException e)
 +            {
 +                throw new UncheckedIOException(e);
 +            }
 +        }
 +
 +        @Override
 +        public void close()
 +        {
 +            try
 +            {
 +                Closeables.close(reader, true);
 +            }
 +            catch (IOException impossible)
 +            {
 +                throw new AssertionError(impossible);
 +            }
 +        }
 +    }
 +}
diff --cc 
test/distributed/org/apache/cassandra/distributed/test/FullRepairCoordinatorTimeoutTest.java
index d91cb5d,0000000..086dd0b
mode 100644,000000..100644
--- 
a/test/distributed/org/apache/cassandra/distributed/test/FullRepairCoordinatorTimeoutTest.java
+++ 
b/test/distributed/org/apache/cassandra/distributed/test/FullRepairCoordinatorTimeoutTest.java
@@@ -1,16 -1,0 +1,34 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.distributed.test;
 +
 +import org.junit.runner.RunWith;
 +import org.junit.runners.Parameterized;
 +
 +import 
org.apache.cassandra.distributed.test.DistributedRepairUtils.RepairParallelism;
 +import 
org.apache.cassandra.distributed.test.DistributedRepairUtils.RepairType;
 +
 +@RunWith(Parameterized.class)
 +public class FullRepairCoordinatorTimeoutTest extends RepairCoordinatorTimeout
 +{
 +    public FullRepairCoordinatorTimeoutTest(RepairParallelism parallelism, 
boolean withNotifications)
 +    {
 +        super(RepairType.FULL, parallelism, withNotifications);
 +    }
 +}
diff --cc 
test/distributed/org/apache/cassandra/distributed/test/IncrementalRepairCoordinatorTimeoutTest.java
index 0fdae57,0000000..a6666d9
mode 100644,000000..100644
--- 
a/test/distributed/org/apache/cassandra/distributed/test/IncrementalRepairCoordinatorTimeoutTest.java
+++ 
b/test/distributed/org/apache/cassandra/distributed/test/IncrementalRepairCoordinatorTimeoutTest.java
@@@ -1,16 -1,0 +1,34 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.distributed.test;
 +
 +import org.junit.runner.RunWith;
 +import org.junit.runners.Parameterized;
 +
 +import 
org.apache.cassandra.distributed.test.DistributedRepairUtils.RepairParallelism;
 +import 
org.apache.cassandra.distributed.test.DistributedRepairUtils.RepairType;
 +
 +@RunWith(Parameterized.class)
 +public class IncrementalRepairCoordinatorTimeoutTest extends 
RepairCoordinatorTimeout
 +{
 +    public IncrementalRepairCoordinatorTimeoutTest(RepairParallelism 
parallelism, boolean withNotifications)
 +    {
 +        super(RepairType.INCREMENTAL, parallelism, withNotifications);
 +    }
 +}
diff --cc 
test/distributed/org/apache/cassandra/distributed/test/PreviewRepairCoordinatorTimeoutTest.java
index 8b90909,0000000..160ccd8
mode 100644,000000..100644
--- 
a/test/distributed/org/apache/cassandra/distributed/test/PreviewRepairCoordinatorTimeoutTest.java
+++ 
b/test/distributed/org/apache/cassandra/distributed/test/PreviewRepairCoordinatorTimeoutTest.java
@@@ -1,16 -1,0 +1,34 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.distributed.test;
 +
 +import org.junit.runner.RunWith;
 +import org.junit.runners.Parameterized;
 +
 +import 
org.apache.cassandra.distributed.test.DistributedRepairUtils.RepairParallelism;
 +import 
org.apache.cassandra.distributed.test.DistributedRepairUtils.RepairType;
 +
 +@RunWith(Parameterized.class)
 +public class PreviewRepairCoordinatorTimeoutTest extends 
RepairCoordinatorTimeout
 +{
 +    public PreviewRepairCoordinatorTimeoutTest(RepairParallelism parallelism, 
boolean withNotifications)
 +    {
 +        super(RepairType.PREVIEW, parallelism, withNotifications);
 +    }
 +}
diff --cc 
test/distributed/org/apache/cassandra/distributed/test/RepairCoordinatorTimeout.java
index f523396,0000000..b475e55
mode 100644,000000..100644
--- 
a/test/distributed/org/apache/cassandra/distributed/test/RepairCoordinatorTimeout.java
+++ 
b/test/distributed/org/apache/cassandra/distributed/test/RepairCoordinatorTimeout.java
@@@ -1,67 -1,0 +1,85 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.distributed.test;
 +
 +import java.time.Duration;
 +
 +import org.junit.Assert;
 +import org.junit.Before;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.distributed.api.NodeToolResult;
 +import 
org.apache.cassandra.distributed.test.DistributedRepairUtils.RepairParallelism;
 +import 
org.apache.cassandra.distributed.test.DistributedRepairUtils.RepairType;
 +import org.apache.cassandra.net.Verb;
 +
 +import static java.lang.String.format;
 +import static 
org.apache.cassandra.distributed.test.DistributedRepairUtils.assertParentRepairFailedWithMessageContains;
 +import static 
org.apache.cassandra.distributed.test.DistributedRepairUtils.assertParentRepairNotExist;
 +import static 
org.apache.cassandra.distributed.test.DistributedRepairUtils.getRepairExceptions;
 +import static org.apache.cassandra.utils.AssertUtil.assertTimeoutPreemptively;
 +
 +public abstract class RepairCoordinatorTimeout extends RepairCoordinatorBase
 +{
 +    public RepairCoordinatorTimeout(RepairType repairType, RepairParallelism 
parallelism, boolean withNotifications)
 +    {
 +        super(repairType, parallelism, withNotifications);
 +    }
 +
 +    @Before
 +    public void beforeTest()
 +    {
 +        CLUSTER.filters().reset();
 +    }
 +
 +    @Test
 +    public void prepareRPCTimeout()
 +    {
 +        String table = tableName("preparerpctimeout");
 +        assertTimeoutPreemptively(Duration.ofMinutes(1), () -> {
 +            CLUSTER.schemaChange(format("CREATE TABLE %s.%s (key text, value 
text, PRIMARY KEY (key))", KEYSPACE, table));
 +            CLUSTER.verbs(Verb.PREPARE_MSG).drop();
 +
 +            long repairExceptions = getRepairExceptions(CLUSTER, 1);
 +            NodeToolResult result = repair(1, KEYSPACE, table);
 +            result.asserts()
 +                  .failure()
 +                  .errorContains("Did not get replies from all endpoints.");
 +            if (withNotifications)
 +            {
 +                result.asserts()
 +                      
.notificationContains(NodeToolResult.ProgressEventType.START, "Starting repair 
command")
 +                      
.notificationContains(NodeToolResult.ProgressEventType.START, "repairing 
keyspace " + KEYSPACE + " with repair options")
 +                      
.notificationContains(NodeToolResult.ProgressEventType.ERROR, "Did not get 
replies from all endpoints.")
 +                      
.notificationContains(NodeToolResult.ProgressEventType.COMPLETE, "finished with 
error");
 +            }
 +
 +            if (repairType != RepairType.PREVIEW)
 +            {
 +                assertParentRepairFailedWithMessageContains(CLUSTER, 
KEYSPACE, table, "Did not get replies from all endpoints.");
 +            }
 +            else
 +            {
 +                assertParentRepairNotExist(CLUSTER, KEYSPACE, table);
 +            }
 +
 +            Assert.assertEquals(repairExceptions + 1, 
getRepairExceptions(CLUSTER, 1));
 +        });
 +    }
 +}
diff --cc 
test/distributed/org/apache/cassandra/distributed/test/TableEstimatesTest.java
index 0130f28,0000000..5a660ff
mode 100644,000000..100644
--- 
a/test/distributed/org/apache/cassandra/distributed/test/TableEstimatesTest.java
+++ 
b/test/distributed/org/apache/cassandra/distributed/test/TableEstimatesTest.java
@@@ -1,84 -1,0 +1,102 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.distributed.test;
 +
 +import java.io.IOException;
 +
 +import org.junit.AfterClass;
 +import org.junit.Assume;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.distributed.Cluster;
 +import org.apache.cassandra.distributed.api.ConsistencyLevel;
 +import org.apache.cassandra.distributed.api.IInstance;
 +import org.apache.cassandra.distributed.api.QueryResult;
 +import org.assertj.core.api.Assertions;
 +
 +public class TableEstimatesTest extends TestBaseImpl
 +{
 +    private static Cluster CLUSTER;
 +
 +    @BeforeClass
 +    public static void setupCluster() throws IOException
 +    {
 +        CLUSTER = init(Cluster.build(1).start());
 +    }
 +
 +    @AfterClass
 +    public static void teardownCluster()
 +    {
 +        if (CLUSTER != null)
 +            CLUSTER.close();
 +    }
 +
 +    /**
 +     * Replaces Python Dtest: 
nodetool_test.py#test_refresh_size_estimates_clears_invalid_entries
 +     */
 +    @Test
 +    public void refreshTableEstimatesClearsInvalidEntries()
 +    {
 +        String table_estimatesInsert = "INSERT INTO system.table_estimates 
(keyspace_name, table_name, range_type, range_start, range_end, 
mean_partition_size, partitions_count) VALUES (?, ?, ?, ?, ?, ?, ?)";
 +        IInstance node = CLUSTER.get(1);
 +
 +        try
 +        {
 +            node.executeInternal(table_estimatesInsert, "system_auth", 
"bad_table", "local_primary", "-5", "5", 0L, 0L);
 +            node.executeInternal(table_estimatesInsert, "bad_keyspace", 
"bad_table", "local_primary", "-5", "5", 0L, 0L);
 +        }
 +        catch (Exception e)
 +        {
 +            // to make this test portable (with the intent to extract out), 
handle the case where the table_estimates isn't defined
 +            
Assertions.assertThat(e.getClass().getCanonicalName()).isEqualTo("org.apache.cassandra.exceptions.InvalidRequestException");
 +            Assertions.assertThat(e).hasMessageContaining("does not exist");
 +            Assume.assumeTrue("system.table_estimates not present", false);
 +        }
 +
 +        node.nodetoolResult("refreshsizeestimates").asserts().success();
 +
 +        QueryResult qr = CLUSTER.coordinator(1).executeWithResult("SELECT * 
FROM system.table_estimates WHERE keyspace_name=? AND table_name=?", 
ConsistencyLevel.ONE, "system_auth", "bad_table");
 +        Assertions.assertThat(qr).isExhausted();
 +
 +        qr = CLUSTER.coordinator(1).executeWithResult("SELECT * FROM 
system.table_estimates WHERE keyspace_name=?", ConsistencyLevel.ONE, 
"bad_keyspace");
 +        Assertions.assertThat(qr).isExhausted();
 +    }
 +
 +    /**
 +     * Replaces Python Dtest: 
nodetool_test.py#test_refresh_size_estimates_clears_invalid_entries
 +     */
 +    @Test
 +    public void refreshSizeEstimatesClearsInvalidEntries()
 +    {
 +        String size_estimatesInsert = "INSERT INTO system.size_estimates 
(keyspace_name, table_name, range_start, range_end, mean_partition_size, 
partitions_count) VALUES (?, ?, ?, ?, ?, ?)";
 +        IInstance node = CLUSTER.get(1);
 +
 +        node.executeInternal(size_estimatesInsert, "system_auth", 
"bad_table", "-5", "5", 0L, 0L);
 +        node.executeInternal(size_estimatesInsert, "bad_keyspace", 
"bad_table", "-5", "5", 0L, 0L);
 +
 +        node.nodetoolResult("refreshsizeestimates").asserts().success();
 +
 +        QueryResult qr = CLUSTER.coordinator(1).executeWithResult("SELECT * 
FROM system.size_estimates WHERE keyspace_name=? AND table_name=?", 
ConsistencyLevel.ONE, "system_auth", "bad_table");
 +        Assertions.assertThat(qr).isExhausted();
 +
 +        qr = CLUSTER.coordinator(1).executeWithResult("SELECT * FROM 
system.size_estimates WHERE keyspace_name=?", ConsistencyLevel.ONE, 
"bad_keyspace");
 +        Assertions.assertThat(qr).isExhausted();
 +    }
 +}
diff --cc 
test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java
index 765e1fa,0000000..5315060
mode 100644,000000..100644
--- 
a/test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java
+++ 
b/test/unit/org/apache/cassandra/service/reads/repair/AbstractReadRepairTest.java
@@@ -1,350 -1,0 +1,368 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.service.reads.repair;
 +
 +import java.nio.ByteBuffer;
 +import java.util.List;
 +import java.util.Set;
 +import java.util.UUID;
 +import java.util.concurrent.TimeUnit;
 +import java.util.function.Consumer;
 +
 +import com.google.common.base.Predicates;
 +import com.google.common.collect.ImmutableList;
 +import com.google.common.collect.Iterables;
 +import com.google.common.collect.Lists;
 +import com.google.common.collect.Sets;
 +import com.google.common.primitives.Ints;
 +
 +import org.apache.cassandra.dht.ByteOrderedPartitioner;
 +import org.apache.cassandra.dht.Murmur3Partitioner;
 +import org.apache.cassandra.dht.Token;
 +import org.apache.cassandra.gms.Gossiper;
 +import org.apache.cassandra.locator.EndpointsForToken;
 +import org.apache.cassandra.locator.ReplicaPlan;
 +import org.junit.Assert;
 +import org.junit.Before;
 +import org.junit.Ignore;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.SchemaLoader;
 +import org.apache.cassandra.Util;
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.cql3.ColumnIdentifier;
 +import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
 +import org.apache.cassandra.db.Clustering;
 +import org.apache.cassandra.db.ColumnFamilyStore;
 +import org.apache.cassandra.db.ConsistencyLevel;
 +import org.apache.cassandra.db.DecoratedKey;
 +import org.apache.cassandra.db.Keyspace;
 +import org.apache.cassandra.db.Mutation;
 +import org.apache.cassandra.db.ReadCommand;
 +import org.apache.cassandra.db.ReadResponse;
 +import org.apache.cassandra.db.partitions.PartitionIterator;
 +import org.apache.cassandra.db.partitions.PartitionUpdate;
 +import 
org.apache.cassandra.db.partitions.SingletonUnfilteredPartitionIterator;
 +import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
 +import org.apache.cassandra.db.partitions.UnfilteredPartitionIterators;
 +import org.apache.cassandra.db.rows.BTreeRow;
 +import org.apache.cassandra.db.rows.BufferCell;
 +import org.apache.cassandra.db.rows.Cell;
 +import org.apache.cassandra.db.rows.Row;
 +import org.apache.cassandra.db.rows.RowIterator;
 +import org.apache.cassandra.locator.EndpointsForRange;
 +import org.apache.cassandra.locator.InetAddressAndPort;
 +import org.apache.cassandra.locator.Replica;
 +import org.apache.cassandra.locator.ReplicaPlans;
 +import org.apache.cassandra.locator.ReplicaUtils;
 +import org.apache.cassandra.net.Message;
 +import org.apache.cassandra.schema.KeyspaceMetadata;
 +import org.apache.cassandra.schema.KeyspaceParams;
 +import org.apache.cassandra.schema.MigrationManager;
 +import org.apache.cassandra.schema.TableMetadata;
 +import org.apache.cassandra.schema.Tables;
 +import org.apache.cassandra.service.StorageService;
 +import org.apache.cassandra.utils.ByteBufferUtil;
 +
 +import static org.apache.cassandra.locator.Replica.fullReplica;
 +import static org.apache.cassandra.locator.ReplicaUtils.FULL_RANGE;
 +import static org.apache.cassandra.net.Verb.INTERNAL_RSP;
 +
 +@Ignore
 +public abstract  class AbstractReadRepairTest
 +{
 +    static Keyspace ks;
 +    static ColumnFamilyStore cfs;
 +    static TableMetadata cfm;
 +    static InetAddressAndPort target1;
 +    static InetAddressAndPort target2;
 +    static InetAddressAndPort target3;
 +    static List<InetAddressAndPort> targets;
 +
 +    static Replica replica1;
 +    static Replica replica2;
 +    static Replica replica3;
 +    static EndpointsForRange replicas;
 +    static ReplicaPlan.ForRead<?> replicaPlan;
 +
 +    static long now = TimeUnit.NANOSECONDS.toMicros(System.nanoTime());
 +    static DecoratedKey key;
 +    static Cell<?> cell1;
 +    static Cell<?> cell2;
 +    static Cell<?> cell3;
 +    static Mutation resolved;
 +
 +    static ReadCommand command;
 +
 +    static void assertRowsEqual(Row expected, Row actual)
 +    {
 +        try
 +        {
 +            Assert.assertEquals(expected == null, actual == null);
 +            if (expected == null)
 +                return;
 +            Assert.assertEquals(expected.clustering(), actual.clustering());
 +            Assert.assertEquals(expected.deletion(), actual.deletion());
 +            Assert.assertArrayEquals(Iterables.toArray(expected.cells(), 
Cell.class), Iterables.toArray(expected.cells(), Cell.class));
 +        } catch (Throwable t)
 +        {
 +            throw new AssertionError(String.format("Row comparison failed, 
expected %s got %s", expected, actual), t);
 +        }
 +    }
 +
 +    static void assertRowsEqual(RowIterator expected, RowIterator actual)
 +    {
 +        assertRowsEqual(expected.staticRow(), actual.staticRow());
 +        while (expected.hasNext())
 +        {
 +            assert actual.hasNext();
 +            assertRowsEqual(expected.next(), actual.next());
 +        }
 +        assert !actual.hasNext();
 +    }
 +
 +    static void assertPartitionsEqual(PartitionIterator expected, 
PartitionIterator actual)
 +    {
 +        while (expected.hasNext())
 +        {
 +            assert actual.hasNext();
 +            assertRowsEqual(expected.next(), actual.next());
 +        }
 +
 +        assert !actual.hasNext();
 +    }
 +
 +    static void assertMutationEqual(Mutation expected, Mutation actual)
 +    {
 +        Assert.assertEquals(expected.getKeyspaceName(), 
actual.getKeyspaceName());
 +        Assert.assertEquals(expected.key(), actual.key());
 +        Assert.assertEquals(expected.key(), actual.key());
 +        PartitionUpdate expectedUpdate = 
Iterables.getOnlyElement(expected.getPartitionUpdates());
 +        PartitionUpdate actualUpdate = 
Iterables.getOnlyElement(actual.getPartitionUpdates());
 +        assertRowsEqual(Iterables.getOnlyElement(expectedUpdate), 
Iterables.getOnlyElement(actualUpdate));
 +    }
 +
 +    static DecoratedKey dk(int v)
 +    {
 +        return 
DatabaseDescriptor.getPartitioner().decorateKey(ByteBufferUtil.bytes(v));
 +    }
 +
 +    static Cell<?> cell(String name, String value, long timestamp)
 +    {
 +        return 
BufferCell.live(cfm.getColumn(ColumnIdentifier.getInterned(name, false)), 
timestamp, ByteBufferUtil.bytes(value));
 +    }
 +
 +    static PartitionUpdate update(Cell<?>... cells)
 +    {
 +        Row.Builder builder = BTreeRow.unsortedBuilder();
 +        builder.newRow(Clustering.EMPTY);
 +        for (Cell<?> cell: cells)
 +        {
 +            builder.addCell(cell);
 +        }
 +        return PartitionUpdate.singleRowUpdate(cfm, key, builder.build());
 +    }
 +
 +    static PartitionIterator partition(Cell<?>... cells)
 +    {
 +        UnfilteredPartitionIterator iter = new 
SingletonUnfilteredPartitionIterator(update(cells).unfilteredIterator());
 +        return UnfilteredPartitionIterators.filter(iter, 
Ints.checkedCast(TimeUnit.MICROSECONDS.toSeconds(now)));
 +    }
 +
 +    static Mutation mutation(Cell<?>... cells)
 +    {
 +        return new Mutation(update(cells));
 +    }
 +
 +    @SuppressWarnings("resource")
 +    static Message<ReadResponse> msg(InetAddressAndPort from, Cell<?>... 
cells)
 +    {
 +        UnfilteredPartitionIterator iter = new 
SingletonUnfilteredPartitionIterator(update(cells).unfilteredIterator());
 +        return Message.builder(INTERNAL_RSP, 
ReadResponse.createDataResponse(iter, command))
 +                      .from(from)
 +                      .build();
 +    }
 +
 +    static class ResultConsumer implements Consumer<PartitionIterator>
 +    {
 +
 +        PartitionIterator result = null;
 +
 +        @Override
 +        public void accept(PartitionIterator partitionIterator)
 +        {
 +            Assert.assertNotNull(partitionIterator);
 +            result = partitionIterator;
 +        }
 +    }
 +
 +    private static boolean configured = false;
 +
 +    static void configureClass(ReadRepairStrategy repairStrategy) throws 
Throwable
 +    {
 +        SchemaLoader.loadSchema();
 +        String ksName = "ks";
 +
 +        String ddl = String.format("CREATE TABLE tbl (k int primary key, v 
text) WITH read_repair='%s'",
 +                                   repairStrategy.toString().toLowerCase());
 +
 +        cfm = CreateTableStatement.parse(ddl, ksName).build();
 +        assert cfm.params.readRepair == repairStrategy;
 +        KeyspaceMetadata ksm = KeyspaceMetadata.create(ksName, 
KeyspaceParams.simple(3), Tables.of(cfm));
 +        MigrationManager.announceNewKeyspace(ksm, false);
 +
 +        ks = Keyspace.open(ksName);
 +        cfs = ks.getColumnFamilyStore("tbl");
 +
 +        cfs.sampleReadLatencyNanos = 0;
 +        cfs.additionalWriteLatencyNanos = 0;
 +
 +        target1 = InetAddressAndPort.getByName("127.0.0.255");
 +        target2 = InetAddressAndPort.getByName("127.0.0.254");
 +        target3 = InetAddressAndPort.getByName("127.0.0.253");
 +
 +        targets = ImmutableList.of(target1, target2, target3);
 +
 +        replica1 = fullReplica(target1, FULL_RANGE);
 +        replica2 = fullReplica(target2, FULL_RANGE);
 +        replica3 = fullReplica(target3, FULL_RANGE);
 +        replicas = EndpointsForRange.of(replica1, replica2, replica3);
 +
 +        replicaPlan = replicaPlan(ConsistencyLevel.QUORUM, replicas);
 +
 +        StorageService.instance.getTokenMetadata().clearUnsafe();
 +        
StorageService.instance.getTokenMetadata().updateNormalToken(ByteOrderedPartitioner.instance.getToken(ByteBuffer.wrap(new
 byte[] { 0 })), replica1.endpoint());
 +        
StorageService.instance.getTokenMetadata().updateNormalToken(ByteOrderedPartitioner.instance.getToken(ByteBuffer.wrap(new
 byte[] { 1 })), replica2.endpoint());
 +        
StorageService.instance.getTokenMetadata().updateNormalToken(ByteOrderedPartitioner.instance.getToken(ByteBuffer.wrap(new
 byte[] { 2 })), replica3.endpoint());
 +        Gossiper.instance.initializeNodeUnsafe(replica1.endpoint(), 
UUID.randomUUID(), 1);
 +        Gossiper.instance.initializeNodeUnsafe(replica2.endpoint(), 
UUID.randomUUID(), 1);
 +        Gossiper.instance.initializeNodeUnsafe(replica3.endpoint(), 
UUID.randomUUID(), 1);
 +
 +        // default test values
 +        key  = dk(5);
 +        cell1 = cell("v", "val1", now);
 +        cell2 = cell("v", "val2", now);
 +        cell3 = cell("v", "val3", now);
 +        resolved = mutation(cell1, cell2);
 +
 +        command = Util.cmd(cfs, 1).build();
 +
 +        configured = true;
 +    }
 +
 +    static Set<InetAddressAndPort> epSet(InetAddressAndPort... eps)
 +    {
 +        return Sets.newHashSet(eps);
 +    }
 +
 +    @Before
 +    public void setUp()
 +    {
 +        assert configured : "configureClass must be called in a @BeforeClass 
method";
 +
 +        cfs.sampleReadLatencyNanos = 0;
 +        cfs.additionalWriteLatencyNanos = 0;
 +    }
 +
 +    static ReplicaPlan.ForRangeRead replicaPlan(ConsistencyLevel 
consistencyLevel, EndpointsForRange replicas)
 +    {
 +        return replicaPlan(ks, consistencyLevel, replicas, replicas);
 +    }
 +
 +    static ReplicaPlan.ForTokenWrite repairPlan(ReplicaPlan.ForRangeRead 
readPlan)
 +    {
 +        return repairPlan(readPlan, readPlan.candidates());
 +    }
 +
 +    static ReplicaPlan.ForTokenWrite repairPlan(EndpointsForRange 
liveAndDown, EndpointsForRange targets)
 +    {
 +        return repairPlan(replicaPlan(liveAndDown, targets), liveAndDown);
 +    }
 +
 +    static ReplicaPlan.ForTokenWrite repairPlan(ReplicaPlan.ForRangeRead 
readPlan, EndpointsForRange liveAndDown)
 +    {
 +        Token token = readPlan.range().left.getToken();
 +        EndpointsForToken pending = EndpointsForToken.empty(token);
 +        return ReplicaPlans.forWrite(ks, ConsistencyLevel.TWO, 
liveAndDown.forToken(token), pending, Predicates.alwaysTrue(), 
ReplicaPlans.writeReadRepair(readPlan));
 +    }
 +    static ReplicaPlan.ForRangeRead replicaPlan(EndpointsForRange replicas, 
EndpointsForRange targets)
 +    {
 +        return replicaPlan(ks, ConsistencyLevel.QUORUM, replicas, targets);
 +    }
 +    static ReplicaPlan.ForRangeRead replicaPlan(Keyspace keyspace, 
ConsistencyLevel consistencyLevel, EndpointsForRange replicas)
 +    {
 +        return replicaPlan(keyspace, consistencyLevel, replicas, replicas);
 +    }
 +    static ReplicaPlan.ForRangeRead replicaPlan(Keyspace keyspace, 
ConsistencyLevel consistencyLevel, EndpointsForRange replicas, 
EndpointsForRange targets)
 +    {
 +        return new ReplicaPlan.ForRangeRead(keyspace, consistencyLevel, 
ReplicaUtils.FULL_BOUNDS, replicas, targets, 1);
 +    }
 +
 +    public abstract InstrumentedReadRepair 
createInstrumentedReadRepair(ReadCommand command, ReplicaPlan.Shared<?, ?> 
replicaPlan, long queryStartNanoTime);
 +
 +    public InstrumentedReadRepair 
createInstrumentedReadRepair(ReplicaPlan.Shared<?, ?> replicaPlan)
 +    {
 +        return createInstrumentedReadRepair(command, replicaPlan, 
System.nanoTime());
 +
 +    }
 +
 +    /**
 +     * If we haven't received enough full data responses by the time the 
speculation
 +     * timeout occurs, we should send read requests to additional replicas
 +     */
 +    @Test
 +    public void readSpeculationCycle()
 +    {
 +        InstrumentedReadRepair repair = 
createInstrumentedReadRepair(ReplicaPlan.shared(replicaPlan(replicas, 
EndpointsForRange.of(replica1, replica2))));
 +        ResultConsumer consumer = new ResultConsumer();
 +
 +        Assert.assertEquals(epSet(), repair.getReadRecipients());
 +        repair.startRepair(null, consumer);
 +
 +        Assert.assertEquals(epSet(target1, target2), 
repair.getReadRecipients());
 +        repair.maybeSendAdditionalReads();
 +        Assert.assertEquals(epSet(target1, target2, target3), 
repair.getReadRecipients());
 +        Assert.assertNull(consumer.result);
 +    }
 +
 +    /**
 +     * If we receive enough data responses by the before the speculation 
timeout
 +     * passes, we shouldn't send additional read requests
 +     */
 +    @Test
 +    public void noSpeculationRequired()
 +    {
 +        InstrumentedReadRepair repair = 
createInstrumentedReadRepair(ReplicaPlan.shared(replicaPlan(replicas, 
EndpointsForRange.of(replica1, replica2))));
 +        ResultConsumer consumer = new ResultConsumer();
 +
 +        Assert.assertEquals(epSet(), repair.getReadRecipients());
 +        repair.startRepair(null, consumer);
 +
 +        Assert.assertEquals(epSet(target1, target2), 
repair.getReadRecipients());
 +        repair.getReadCallback().onResponse(msg(target1, cell1));
 +        repair.getReadCallback().onResponse(msg(target2, cell1));
 +
 +        repair.maybeSendAdditionalReads();
 +        Assert.assertEquals(epSet(target1, target2), 
repair.getReadRecipients());
 +
 +        repair.awaitReads();
 +
 +        assertPartitionsEqual(partition(cell1), consumer.result);
 +    }
 +}
diff --cc test/unit/org/apache/cassandra/tools/JMXCompatabilityTest.java
index 2494dd2,0000000..b7bbe23
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/tools/JMXCompatabilityTest.java
+++ b/test/unit/org/apache/cassandra/tools/JMXCompatabilityTest.java
@@@ -1,170 -1,0 +1,188 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.tools;
 +
 +import java.util.Arrays;
 +import java.util.List;
 +
 +import com.google.common.collect.Lists;
 +import org.junit.BeforeClass;
 +import org.junit.ClassRule;
 +import org.junit.Test;
 +import org.junit.rules.TemporaryFolder;
 +
 +import com.datastax.driver.core.SimpleStatement;
 +import org.apache.cassandra.cql3.CQLTester;
 +import org.apache.cassandra.service.CassandraDaemon;
 +import org.apache.cassandra.service.GCInspector;
 +import org.apache.cassandra.tools.ToolRunner.ToolResult;
 +import org.apache.cassandra.transport.ProtocolVersion;
 +import org.assertj.core.api.Assertions;
 +
 +/**
 + * This class is to monitor the JMX compatability cross different versions, 
and relies on a gold set of metrics which
 + * were generated following the instructions below.  These tests only check 
for breaking changes, so will ignore any
 + * new metrics added in a release.  If the latest release is not finalized 
yet then the latest version might fail
 + * if a unrelesed metric gets renamed, if this happens then the gold set 
should be updated for the latest version.
 + *
 + * If a test fails for a previous version, then this means we have a JMX 
compatability regression, if the metric has
 + * gone through proper deprecation then the metric can be excluded using the 
patterns used in other tests, if the metric
 + * has not gone through proper deprecation then the change should be looked 
at more carfuly to avoid breaking users.
 + *
 + * In order to generate the dump for another version, launch a cluster then 
run the following
 + * {@code
 + * create keyspace cql_test_keyspace WITH replication = {'class': 
'SimpleStrategy', 'replication_factor': '1'};
 + * use cql_test_keyspace;
 + * CREATE TABLE table_00 (pk int PRIMARY KEY);
 + * insert into table_00 (pk) values (42);
 + * select * from table_00 where pk=42;
 + * }
 + */
 +public class JMXCompatabilityTest extends CQLTester
 +{
 +    @ClassRule
 +    public static TemporaryFolder TMP = new TemporaryFolder();
 +
 +    private static boolean CREATED_TABLE = false;
 +
 +    @BeforeClass
 +    public static void setup() throws Exception
 +    {
 +        startJMXServer();
 +    }
 +
 +    private void setupStandardTables() throws Throwable
 +    {
 +        if (CREATED_TABLE)
 +            return;
 +
 +        // force loading mbean which CassandraDaemon creates
 +        GCInspector.register();
 +        CassandraDaemon.registerNativeAccess();
 +
 +        String name = KEYSPACE + "." + createTable("CREATE TABLE %s (pk int 
PRIMARY KEY)");
 +
 +        // use net to register everything like storage proxy
 +        executeNet(ProtocolVersion.CURRENT, new SimpleStatement("INSERT INTO 
" + name + " (pk) VALUES (?)", 42));
 +        executeNet(ProtocolVersion.CURRENT, new SimpleStatement("SELECT * 
FROM " + name + " WHERE pk=?", 42));
 +
 +        String script = "tools/bin/jmxtool dump -f yaml --url 
service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi > " + 
TMP.getRoot().getAbsolutePath() + "/out.yaml";
 +        ToolRunner.invoke("bash", "-c", script).assertOnCleanExit();
 +        CREATED_TABLE = true;
 +    }
 +
 +    @Test
 +    public void diff30() throws Throwable
 +    {
 +        List<String> excludeObjects = 
Arrays.asList("org.apache.cassandra.metrics:type=ThreadPools.*",
 +                                                    
"org.apache.cassandra.internal:.*",
 +                                                    
"org.apache.cassandra.metrics:type=DroppedMessage.*",
 +                                                    
"org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=ConditionNotMet",
 +                                                    
"org.apache.cassandra.metrics:type=Client,name=connectedThriftClients", // 
removed in CASSANDRA-11115
 +                                                    
"org.apache.cassandra.request:type=ReadRepairStage", // removed in 
CASSANDRA-13910
 +                                                    
"org.apache.cassandra.db:type=HintedHandoffManager", // removed in 
CASSANDRA-15939
 +
 +                                                    // dropped tables
 +                                                    
"org.apache.cassandra.metrics:type=Table,keyspace=system,scope=(schema_aggregates|schema_columnfamilies|schema_columns|schema_functions|schema_keyspaces|schema_triggers|schema_usertypes),name=.*",
 +                                                    
".*keyspace=system,(scope|table|columnfamily)=views_builds_in_progress.*",
 +                                                    
".*keyspace=system,(scope|table|columnfamily)=range_xfers.*",
 +                                                    
".*keyspace=system,(scope|table|columnfamily)=hints.*",
 +                                                    
".*keyspace=system,(scope|table|columnfamily)=batchlog.*");
 +        List<String> excludeAttributes = Arrays.asList("RPCServerRunning", // 
removed in CASSANDRA-11115
 +                                                       
"MaxNativeProtocolVersion");
 +        List<String> excludeOperations = Arrays.asList("startRPCServer", 
"stopRPCServer", // removed in CASSANDRA-11115
 +                                                       // nodetool apis that 
were changed,
 +                                                       "decommission", // -> 
decommission(boolean)
 +                                                       "forceRepairAsync", // 
-> repairAsync
 +                                                       
"forceRepairRangeAsync", // -> repairAsync
 +                                                       "beginLocalSampling", 
// -> beginLocalSampling(p1: java.lang.String, p2: int, p3: int): void
 +                                                       "finishLocalSampling" 
// -> finishLocalSampling(p1: java.lang.String, p2: int): java.util.List
 +        );
 +
 +        diff(excludeObjects, excludeAttributes, excludeOperations, 
"test/data/jmxdump/cassandra-3.0-jmx.yaml");
 +    }
 +
 +    @Test
 +    public void diff311() throws Throwable
 +    {
 +        List<String> excludeObjects = 
Arrays.asList("org.apache.cassandra.metrics:type=ThreadPools.*",
 +                                                    
"org.apache.cassandra.internal:.*",
 +                                                    
"org.apache.cassandra.metrics:type=DroppedMessage.*",
 +                                                    
"org.apache.cassandra.metrics:type=ClientRequest,scope=CASRead,name=ConditionNotMet",
 +                                                    
"org.apache.cassandra.metrics:type=Client,name=connectedThriftClients", // 
removed in CASSANDRA-11115
 +                                                    
"org.apache.cassandra.request:type=ReadRepairStage", // removed in 
CASSANDRA-13910
 +                                                    
"org.apache.cassandra.db:type=HintedHandoffManager", // removed in 
CASSANDRA-15939
 +
 +                                                    // dropped tables
 +                                                    
"org.apache.cassandra.metrics:type=Table,keyspace=system,scope=(schema_aggregates|schema_columnfamilies|schema_columns|schema_functions|schema_keyspaces|schema_triggers|schema_usertypes),name=.*",
 +                                                    
".*keyspace=system,(scope|table|columnfamily)=views_builds_in_progress.*",
 +                                                    
".*keyspace=system,(scope|table|columnfamily)=range_xfers.*",
 +                                                    
".*keyspace=system,(scope|table|columnfamily)=hints.*",
 +                                                    
".*keyspace=system,(scope|table|columnfamily)=batchlog.*"
 +        );
 +        List<String> excludeAttributes = Arrays.asList("RPCServerRunning", // 
removed in CASSANDRA-11115
 +                                                       
"MaxNativeProtocolVersion",
 +                                                       
"StreamingSocketTimeout");
 +        List<String> excludeOperations = Arrays.asList("startRPCServer", 
"stopRPCServer", // removed in CASSANDRA-11115
 +                                                       // nodetool apis that 
were changed,
 +                                                       "decommission", // -> 
decommission(boolean)
 +                                                       "forceRepairAsync", // 
-> repairAsync
 +                                                       
"forceRepairRangeAsync", // -> repairAsync
 +                                                       "beginLocalSampling", 
// -> beginLocalSampling(p1: java.lang.String, p2: int, p3: int): void
 +                                                       "finishLocalSampling" 
// -> finishLocalSampling(p1: java.lang.String, p2: int): java.util.List
 +        );
 +
 +        diff(excludeObjects, excludeAttributes, excludeOperations, 
"test/data/jmxdump/cassandra-3.11-jmx.yaml");
 +    }
 +
 +    @Test
 +    public void diff40() throws Throwable
 +    {
 +        List<String> excludeObjects = Arrays.asList();
 +        List<String> excludeAttributes = Arrays.asList();
 +        List<String> excludeOperations = Arrays.asList();
 +
 +        diff(excludeObjects, excludeAttributes, excludeOperations, 
"test/data/jmxdump/cassandra-4.0-jmx.yaml");
 +    }
 +
 +    private void diff(List<String> excludeObjects, List<String> 
excludeAttributes, List<String> excludeOperations, String original) throws 
Throwable
 +    {
 +        setupStandardTables();
 +
 +        List<String> args = Lists.newArrayList("tools/bin/jmxtool", "diff",
 +                                               "-f", "yaml",
 +                                               "--ignore-missing-on-left",
 +                                               original, 
TMP.getRoot().getAbsolutePath() + "/out.yaml");
 +        excludeObjects.forEach(a -> {
 +            args.add("--exclude-object");
 +            args.add(a);
 +        });
 +        excludeAttributes.forEach(a -> {
 +            args.add("--exclude-attribute");
 +            args.add(a);
 +        });
 +        excludeOperations.forEach(a -> {
 +            args.add("--exclude-operation");
 +            args.add(a);
 +        });
 +        ToolResult result = ToolRunner.invoke(args);
 +        result.assertOnCleanExit();
 +        Assertions.assertThat(result.getStdout()).isEmpty();
 +    }
 +}
diff --cc test/unit/org/apache/cassandra/tools/JMXToolTest.java
index 4fba8cd,0000000..e6f4615
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/tools/JMXToolTest.java
+++ b/test/unit/org/apache/cassandra/tools/JMXToolTest.java
@@@ -1,172 -1,0 +1,190 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.tools;
 +
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.List;
 +import java.util.Map;
 +
 +import org.junit.Test;
 +
 +import org.apache.cassandra.io.util.DataInputBuffer;
 +import org.apache.cassandra.io.util.DataOutputBuffer;
 +import org.apache.cassandra.tools.ToolRunner.ToolResult;
 +import org.apache.cassandra.utils.Generators;
 +import org.assertj.core.api.Assertions;
 +import org.quicktheories.core.Gen;
 +import org.quicktheories.generators.SourceDSL;
 +
 +import static org.apache.cassandra.utils.FailingConsumer.orFail;
 +import static org.quicktheories.QuickTheory.qt;
 +
 +public class JMXToolTest
 +{
 +    @Test
 +    public void jsonSerde()
 +    {
 +        serde(JMXTool.Dump.Format.json, JMXTool.Diff.Format.json);
 +    }
 +
 +    @Test
 +    public void yamlSerde()
 +    {
 +        serde(JMXTool.Dump.Format.yaml, JMXTool.Diff.Format.yaml);
 +    }
 +
 +    @Test
 +    public void cliHelp()
 +    {
 +        ToolResult result = jmxtool();
 +        result.assertOnCleanExit();
 +
 +        Assertions.assertThat(result.getStdout())
 +                  .isEqualTo("usage: jmxtool <command> [<args>]\n" +
 +                             "\n" +
 +                             "The most commonly used jmxtool commands are:\n" 
+
 +                             "    diff   Diff two jmx dump files and report 
their differences\n" +
 +                             "    dump   Dump the Apache Cassandra JMX 
objects and metadata.\n" +
 +                             "    help   Display help information\n" +
 +                             "\n" +
 +                             "See 'jmxtool help <command>' for more 
information on a specific command.\n" +
 +                             "\n");
 +    }
 +
 +    @Test
 +    public void cliHelpDiff()
 +    {
 +        ToolResult result = jmxtool("help", "diff");
 +        result.assertOnCleanExit();
 +
 +        Assertions.assertThat(result.getStdout())
 +                  .isEqualTo("NAME\n" +
 +                             "        jmxtool diff - Diff two jmx dump files 
and report their differences\n" +
 +                             "\n" +
 +                             "SYNOPSIS\n" +
 +                             "        jmxtool diff [--exclude-attribute 
<exclude attributes>...]\n" +
 +                             "                [--exclude-object <exclude 
objects>...]\n" +
 +                             "                [--exclude-operation <exclude 
operations>...]\n" +
 +                             "                [(-f <format> | --format 
<format>)] [(-h | --help)]\n" +
 +                             "                [--ignore-missing-on-left] 
[--ignore-missing-on-right] [--] <left>\n" +
 +                             "                <right>\n" +
 +                             "\n" +
 +                             "OPTIONS\n" +
 +                             "        --exclude-attribute <exclude 
attributes>\n" +
 +                             "            Ignores processing specific 
attributes. Each usage should take a\n" +
 +                             "            single attribute, but can use this 
flag multiple times.\n" +
 +                             "\n" +
 +                             "        --exclude-object <exclude objects>\n" +
 +                             "            Ignores processing specific 
objects. Each usage should take a single\n" +
 +                             "            object, but can use this flag 
multiple times.\n" +
 +                             "\n" +
 +                             "        --exclude-operation <exclude 
operations>\n" +
 +                             "            Ignores processing specific 
operations. Each usage should take a\n" +
 +                             "            single operation, but can use this 
flag multiple times.\n" +
 +                             "\n" +
 +                             "        -f <format>, --format <format>\n" +
 +                             "            What format the files are in; only 
support json and yaml as format\n" +
 +                             "\n" +
 +                             "        -h, --help\n" +
 +                             "            Display help information\n" +
 +                             "\n" +
 +                             "        --ignore-missing-on-left\n" +
 +                             "            Ignore results missing on the 
left\n" +
 +                             "\n" +
 +                             "        --ignore-missing-on-right\n" +
 +                             "            Ignore results missing on the 
right\n" +
 +                             "\n" +
 +                             "        --\n" +
 +                             "            This option can be used to separate 
command-line options from the\n" +
 +                             "            list of argument, (useful when 
arguments might be mistaken for\n" +
 +                             "            command-line options\n" +
 +                             "\n" +
 +                             "        <left> <right>\n" +
 +                             "            Files to diff\n" +
 +                             "\n" +
 +                             "\n");
 +    }
 +
 +    @Test
 +    public void cliHelpDump()
 +    {
 +        ToolResult result = jmxtool("help", "dump");
 +        result.assertOnCleanExit();
 +
 +        Assertions.assertThat(result.getStdout())
 +                  .isEqualTo("NAME\n" +
 +                             "        jmxtool dump - Dump the Apache 
Cassandra JMX objects and metadata.\n" +
 +                             "\n" +
 +                             "SYNOPSIS\n" +
 +                             "        jmxtool dump [(-f <format> | --format 
<format>)] [(-h | --help)]\n" +
 +                             "                [(-u <url> | --url <url>)]\n" +
 +                             "\n" +
 +                             "OPTIONS\n" +
 +                             "        -f <format>, --format <format>\n" +
 +                             "            What format to dump content as; 
supported values are console\n" +
 +                             "            (default), json, and yaml\n" +
 +                             "\n" +
 +                             "        -h, --help\n" +
 +                             "            Display help information\n" +
 +                             "\n" +
 +                             "        -u <url>, --url <url>\n" +
 +                             "            JMX url to target\n" +
 +                             "\n" +
 +                             "\n");
 +    }
 +
 +    private static ToolResult jmxtool(String... args)
 +    {
 +        List<String> cmd = new ArrayList<>(1 + args.length);
 +        cmd.add("tools/bin/jmxtool");
 +        cmd.addAll(Arrays.asList(args));
 +        return ToolRunner.invoke(cmd);
 +    }
 +
 +    private void serde(JMXTool.Dump.Format serializer, JMXTool.Diff.Format 
deserializer)
 +    {
 +        DataOutputBuffer buffer = new DataOutputBuffer();
 +        qt().withShrinkCycles(0).forAll(gen()).checkAssert(orFail(map -> 
serde(serializer, deserializer, buffer, map)));
 +    }
 +
 +    private void serde(JMXTool.Dump.Format serializer,
 +                       JMXTool.Diff.Format deserializer,
 +                       DataOutputBuffer buffer,
 +                       Map<String, JMXTool.Info> map) throws IOException
 +    {
 +        buffer.clear();
 +        serializer.dump(buffer, map);
 +        Map<String, JMXTool.Info> read = deserializer.load(new 
DataInputBuffer(buffer.buffer(), false));
 +        Assertions.assertThat(read)
 +                  .as("property deserialize(serialize(value)) == value 
failed")
 +                  .isEqualTo(map);
 +    }
 +
 +    private static final Gen<JMXTool.Attribute> attributeGen = 
Generators.IDENTIFIER_GEN.zip(Generators.IDENTIFIER_GEN, 
Generators.IDENTIFIER_GEN, JMXTool.Attribute::new);
 +    private static final Gen<JMXTool.Parameter> parameterGen = 
Generators.IDENTIFIER_GEN.zip(Generators.IDENTIFIER_GEN, 
JMXTool.Parameter::new);
 +    private static final Gen<JMXTool.Operation> operationGen = 
Generators.IDENTIFIER_GEN.zip(SourceDSL.arrays().ofClass(parameterGen, 
JMXTool.Parameter.class).withLengthBetween(0, 10), Generators.IDENTIFIER_GEN, 
JMXTool.Operation::new);
 +    private static final Gen<JMXTool.Info> infoGen = 
SourceDSL.arrays().ofClass(attributeGen, 
JMXTool.Attribute.class).withLengthBetween(0, 
10).zip(SourceDSL.arrays().ofClass(operationGen, 
JMXTool.Operation.class).withLengthBetween(0, 10), JMXTool.Info::new);
 +
 +    private static Gen<Map<String, JMXTool.Info>> gen()
 +    {
 +        return SourceDSL.maps().of(Generators.IDENTIFIER_GEN, 
infoGen).ofSizeBetween(0, 10);
 +    }
 +}
diff --cc test/unit/org/apache/cassandra/tools/TopPartitionsTest.java
index d02b4c4,0000000..caa46ea
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/tools/TopPartitionsTest.java
+++ b/test/unit/org/apache/cassandra/tools/TopPartitionsTest.java
@@@ -1,67 -1,0 +1,85 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.tools;
 +
 +import static java.lang.String.format;
 +import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
 +import static org.junit.Assert.assertEquals;
 +
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.ArrayBlockingQueue;
 +import java.util.concurrent.BlockingQueue;
 +import java.util.concurrent.Executors;
 +import java.util.concurrent.TimeUnit;
 +
 +import javax.management.openmbean.CompositeData;
 +import javax.management.openmbean.TabularDataSupport;
 +
 +import org.apache.cassandra.SchemaLoader;
 +import org.apache.cassandra.db.ColumnFamilyStore;
 +import org.apache.cassandra.db.SystemKeyspace;
 +import org.apache.cassandra.exceptions.ConfigurationException;
 +import org.apache.cassandra.service.StorageService;
 +import org.junit.BeforeClass;
 +import org.junit.Test;
 +
 +import com.google.common.collect.Lists;
 +
 +public class TopPartitionsTest
 +{
 +    @BeforeClass
 +    public static void loadSchema() throws ConfigurationException
 +    {
 +        SchemaLoader.prepareServer();
 +    }
 +
 +    @Test
 +    public void testServiceTopPartitionsNoArg() throws Exception
 +    {
 +        BlockingQueue<Map<String, List<CompositeData>>> q = new 
ArrayBlockingQueue<>(1);
 +        ColumnFamilyStore.all();
 +        Executors.newCachedThreadPool().execute(() ->
 +        {
 +            try
 +            {
 +                q.put(StorageService.instance.samplePartitions(1000, 100, 10, 
Lists.newArrayList("READS", "WRITES")));
 +            }
 +            catch (Exception e)
 +            {
 +                e.printStackTrace();
 +            }
 +        });
 +        Thread.sleep(100);
 +        SystemKeyspace.persistLocalMetadata();
 +        Map<String, List<CompositeData>> result = q.poll(5, TimeUnit.SECONDS);
 +        List<CompositeData> cd = result.get("WRITES");
 +        assertEquals(1, cd.size());
 +    }
 +
 +    @Test
 +    public void testServiceTopPartitionsSingleTable() throws Exception
 +    {
 +        ColumnFamilyStore.getIfExists("system", 
"local").beginLocalSampling("READS", 5, 100000);
 +        String req = "SELECT * FROM system.%s WHERE key='%s'";
 +        executeInternal(format(req, SystemKeyspace.LOCAL, 
SystemKeyspace.LOCAL));
 +        List<CompositeData> result = ColumnFamilyStore.getIfExists("system", 
"local").finishLocalSampling("READS", 5);
 +        assertEquals(1, result.size());
 +    }
 +}
diff --cc test/unit/org/apache/cassandra/utils/AssertUtil.java
index 4d35ede,0000000..ec1ce5e
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/utils/AssertUtil.java
+++ b/test/unit/org/apache/cassandra/utils/AssertUtil.java
@@@ -1,128 -1,0 +1,146 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.utils;
 +
 +import java.time.Duration;
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.ExecutionException;
 +import java.util.concurrent.ExecutorService;
 +import java.util.concurrent.Executors;
 +import java.util.concurrent.Future;
 +import java.util.concurrent.TimeUnit;
 +import java.util.concurrent.TimeoutException;
 +
 +import com.google.common.base.Joiner;
 +import com.google.common.collect.HashMultimap;
 +import com.google.common.collect.Multimap;
 +
 +import org.apache.cassandra.concurrent.NamedThreadFactory;
 +
 +public final class AssertUtil
 +{
 +    private AssertUtil()
 +    {
 +
 +    }
 +
 +    /**
 +     * Launch the input in another thread, throws a assert failure if it 
takes longer than the defined timeout.
 +     *
 +     * An attempt to halt the thread uses an interrupt, but only works if the 
underline logic respects it.
 +     *
 +     * The assert message will contain the stacktrace at the time of the 
timeout; grouped by common threads.
 +     */
 +    public static void assertTimeoutPreemptively(Duration timeout, Executable 
fn)
 +    {
 +        StackTraceElement caller = Thread.currentThread().getStackTrace()[2];
 +        assertTimeoutPreemptively(caller, timeout, () -> {
 +            fn.execute();
 +            return null;
 +        });
 +    }
 +
 +    /**
 +     * Launch the input in another thread, throws a assert failure if it 
takes longer than the defined timeout.
 +     *
 +     * An attempt to halt the thread uses an interrupt, but only works if the 
underline logic respects it.
 +     *
 +     * The assert message will contain the stacktrace at the time of the 
timeout; grouped by common threads.
 +     */
 +    public static <T> T assertTimeoutPreemptively(Duration timeout, 
ThrowingSupplier<T> supplier)
 +    {
 +        StackTraceElement caller = Thread.currentThread().getStackTrace()[2];
 +        return assertTimeoutPreemptively(caller, timeout, supplier);
 +    }
 +
 +    private static <T> T assertTimeoutPreemptively(StackTraceElement caller, 
Duration timeout, ThrowingSupplier<T> supplier)
 +    {
 +
 +        String[] split = caller.getClassName().split("\\.");
 +        String simpleClassName = split[split.length - 1];
 +        ExecutorService executorService = 
Executors.newSingleThreadExecutor(new NamedThreadFactory("TimeoutTest-" + 
simpleClassName + "#" + caller.getMethodName()));
 +        try
 +        {
 +            Future<T> future = executorService.submit(() -> {
 +                try {
 +                    return supplier.get();
 +                }
 +                catch (Throwable throwable) {
 +                    throw Throwables.throwAsUncheckedException(throwable);
 +                }
 +            });
 +
 +            long timeoutInNanos = timeout.toNanos();
 +            try
 +            {
 +                return future.get(timeoutInNanos, TimeUnit.NANOSECONDS);
 +            }
 +            catch (TimeoutException ex)
 +            {
 +                future.cancel(true);
 +                Map<Thread, StackTraceElement[]> threadDump = 
Thread.getAllStackTraces();
 +                StringBuilder sb = new StringBuilder("execution timed out 
after ").append(TimeUnit.NANOSECONDS.toMillis(timeoutInNanos)).append(" ms\n");
 +                Multimap<List<StackTraceElement>, Thread> groupCommonThreads 
= HashMultimap.create();
 +                for (Map.Entry<Thread, StackTraceElement[]> e : 
threadDump.entrySet())
 +                    groupCommonThreads.put(Arrays.asList(e.getValue()), 
e.getKey());
 +
 +                for (Map.Entry<List<StackTraceElement>, Collection<Thread>> e 
: groupCommonThreads.asMap().entrySet())
 +                {
 +                    sb.append("Threads: ");
 +                    Joiner.on(", ").appendTo(sb, 
e.getValue().stream().map(Thread::getName).iterator());
 +                    sb.append("\n");
 +                    for (StackTraceElement elem : e.getKey())
 +                        
sb.append("\t").append(elem.getClassName()).append(".").append(elem.getMethodName()).append("[").append(elem.getLineNumber()).append("]\n");
 +                    sb.append("\n");
 +                }
 +                throw new AssertionError(sb.toString());
 +            }
 +            catch (InterruptedException e)
 +            {
 +                Thread.currentThread().interrupt();
 +                throw Throwables.throwAsUncheckedException(e);
 +            }
 +            catch (ExecutionException ex)
 +            {
 +                throw Throwables.throwAsUncheckedException(ex.getCause());
 +            }
 +            catch (Throwable ex)
 +            {
 +                throw Throwables.throwAsUncheckedException(ex);
 +            }
 +        }
 +        finally
 +        {
 +            executorService.shutdownNow();
 +        }
 +    }
 +
 +    public interface ThrowingSupplier<T>
 +    {
 +        T get() throws Throwable;
 +    }
 +
 +    public interface Executable
 +    {
 +        void execute() throws Throwable;
 +    }
 +}
diff --cc test/unit/org/apache/cassandra/utils/ByteArrayUtilTest.java
index 1feaab1,0000000..f03275e
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/utils/ByteArrayUtilTest.java
+++ b/test/unit/org/apache/cassandra/utils/ByteArrayUtilTest.java
@@@ -1,209 -1,0 +1,227 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.utils;
 +
 +import java.nio.ByteBuffer;
 +import java.nio.ByteOrder;
 +import java.util.Arrays;
 +
 +import org.junit.Test;
 +
 +import org.quicktheories.core.Gen;
 +import org.quicktheories.generators.SourceDSL;
 +
 +import static org.assertj.core.api.Assertions.assertThat;
 +import static org.assertj.core.api.Assertions.assertThatThrownBy;
 +import static org.quicktheories.QuickTheory.qt;
 +
 +public class ByteArrayUtilTest
 +{
 +    private static final ByteOrder ORDER = ByteOrder.BIG_ENDIAN;
 +
 +    @Test
 +    public void putGetBoolean()
 +    {
 +        byte[] bytes = new byte[10];
 +        for (int i = 0; i < bytes.length; i++)
 +        {
 +            for (boolean b : Arrays.asList(Boolean.TRUE, Boolean.FALSE))
 +            {
 +                ByteArrayUtil.putBoolean(bytes, i, b);
 +                assertThat(ByteArrayUtil.getBoolean(bytes, i))
 +                          .as("get(put(b)) == b")
 +                          .isEqualTo(b);
 +            }
 +        }
 +    }
 +
 +    @Test
 +    public void putBooleanArrayTooSmall()
 +    {
 +        putArrayToSmall(1, bytes -> ByteArrayUtil.putBoolean(bytes, 0, true));
 +    }
 +
 +    @Test
 +    public void putBooleanArrayOutOfBounds()
 +    {
 +        byte[] bytes = new byte[16];
 +        assertThatThrownBy(() -> ByteArrayUtil.putBoolean(bytes, bytes.length 
+ 10, true))
 +            .isInstanceOf(IndexOutOfBoundsException.class);
 +    }
 +
 +    @Test
 +    public void putGetShort()
 +    {
 +        Gen<Short> gen = SourceDSL.integers().between(Short.MIN_VALUE, 
Short.MAX_VALUE).map(Integer::shortValue);
 +        byte[] bytes = new byte[Short.BYTES + 1];
 +        ByteBuffer buffer = ByteBuffer.wrap(bytes).order(ORDER);
 +        qt().forAll(gen).checkAssert(jnum -> {
 +            short value = jnum.shortValue();
 +            ByteArrayUtil.putShort(bytes, 1, value);
 +            assertThat(ByteArrayUtil.getShort(bytes, 1))
 +                      .as("get(put(b)) == b")
 +                      .isEqualTo(value)
 +                      .isEqualTo(buffer.getShort(1));
 +        });
 +    }
 +
 +    @Test
 +    public void putShortArrayTooSmall()
 +    {
 +        putArrayToSmall(Short.BYTES, bytes -> ByteArrayUtil.putShort(bytes, 
0, (short) 42));
 +    }
 +
 +    @Test
 +    public void putShortArrayOutOfBounds()
 +    {
 +        byte[] bytes = new byte[16];
 +        assertThatThrownBy(() -> ByteArrayUtil.putInt(bytes, bytes.length + 
10, (short) 42))
 +        .isInstanceOf(IndexOutOfBoundsException.class);
 +    }
 +
 +    @Test
 +    public void putGetInt()
 +    {
 +        Gen<Integer> gen = SourceDSL.integers().all();
 +        byte[] bytes = new byte[Integer.BYTES + 1];
 +        ByteBuffer buffer = ByteBuffer.wrap(bytes).order(ORDER);
 +        qt().forAll(gen).checkAssert(jnum -> {
 +            int value = jnum.intValue();
 +            ByteArrayUtil.putInt(bytes, 1, value);
 +            assertThat(ByteArrayUtil.getInt(bytes, 1))
 +                      .as("get(put(b)) == b")
 +                      .isEqualTo(value)
 +                      .isEqualTo(buffer.getInt(1));
 +        });
 +    }
 +
 +    @Test
 +    public void putIntArrayTooSmall()
 +    {
 +        putArrayToSmall(Integer.BYTES, bytes -> ByteArrayUtil.putInt(bytes, 
0, 42));
 +    }
 +
 +    @Test
 +    public void putIntArrayOutOfBounds()
 +    {
 +        byte[] bytes = new byte[16];
 +        assertThatThrownBy(() -> ByteArrayUtil.putInt(bytes, bytes.length + 
10, 42))
 +            .isInstanceOf(IndexOutOfBoundsException.class);
 +    }
 +
 +    @Test
 +    public void putGetLong()
 +    {
 +        Gen<Long> gen = SourceDSL.longs().all();
 +        byte[] bytes = new byte[Long.BYTES + 1];
 +        ByteBuffer buffer = ByteBuffer.wrap(bytes).order(ORDER);
 +        qt().forAll(gen).checkAssert(jnum -> {
 +            long value = jnum.longValue();
 +            ByteArrayUtil.putLong(bytes, 1, value);
 +            assertThat(ByteArrayUtil.getLong(bytes, 1))
 +                      .as("get(put(b)) == b")
 +                      .isEqualTo(value)
 +                      .isEqualTo(buffer.getLong(1));
 +        });
 +    }
 +
 +    @Test
 +    public void putLongArrayTooSmall()
 +    {
 +        putArrayToSmall(Long.BYTES, bytes -> ByteArrayUtil.putLong(bytes, 0, 
42L));
 +    }
 +
 +    @Test
 +    public void putLongArrayOutOfBounds()
 +    {
 +        byte[] bytes = new byte[16];
 +        assertThatThrownBy(() -> ByteArrayUtil.putLong(bytes, bytes.length + 
10, 42))
 +            .isInstanceOf(IndexOutOfBoundsException.class);
 +    }
 +
 +    @Test
 +    public void putGetFloat()
 +    {
 +        Gen<Float> gen = SourceDSL.floats().any();
 +        byte[] bytes = new byte[Float.BYTES + 1];
 +        ByteBuffer buffer = ByteBuffer.wrap(bytes).order(ORDER);
 +        qt().forAll(gen).checkAssert(jnum -> {
 +            float value = jnum.floatValue();
 +            ByteArrayUtil.putFloat(bytes, 1, value);
 +            assertThat(ByteArrayUtil.getFloat(bytes, 1))
 +                      .as("get(put(b)) == b")
 +                      .isEqualTo(value)
 +                      .isEqualTo(buffer.getFloat(1));
 +        });
 +    }
 +
 +    @Test
 +    public void putFloatArrayTooSmall()
 +    {
 +        putArrayToSmall(Float.BYTES, bytes -> ByteArrayUtil.putFloat(bytes, 
0, 42f));
 +    }
 +
 +    @Test
 +    public void putFloatArrayOutOfBounds()
 +    {
 +        byte[] bytes = new byte[16];
 +        assertThatThrownBy(() -> ByteArrayUtil.putFloat(bytes, bytes.length + 
10, 42.0f))
 +            .isInstanceOf(IndexOutOfBoundsException.class);
 +    }
 +
 +    @Test
 +    public void putGetDouble()
 +    {
 +        Gen<Double> gen = SourceDSL.doubles().any();
 +        byte[] bytes = new byte[Double.BYTES + 1];
 +        ByteBuffer buffer = ByteBuffer.wrap(bytes).order(ORDER);
 +        qt().forAll(gen).checkAssert(jnum -> {
 +            double value = jnum.doubleValue();
 +            ByteArrayUtil.putDouble(bytes, 1, value);
 +            assertThat(ByteArrayUtil.getDouble(bytes, 1))
 +                      .as("get(put(b)) == b")
 +                      .isEqualTo(value)
 +                      .isEqualTo(buffer.getDouble(1));
 +        });
 +    }
 +
 +    @Test
 +    public void putDoubleArrayTooSmall()
 +    {
 +        putArrayToSmall(Double.BYTES, bytes -> ByteArrayUtil.putDouble(bytes, 
0, 42.0));
 +    }
 +
 +    @Test
 +    public void putDoubleArrayOutOfBounds()
 +    {
 +        byte[] bytes = new byte[16];
 +        assertThatThrownBy(() -> ByteArrayUtil.putDouble(bytes, bytes.length 
+ 10, 42.0))
 +            .isInstanceOf(IndexOutOfBoundsException.class);
 +    }
 +
 +    private static void putArrayToSmall(int targetBytes, 
FailingConsumer<byte[]> fn)
 +    {
 +        for (int i = 0; i < targetBytes - 1; i++)
 +        {
 +            byte[] bytes = new byte[i];
 +            assertThatThrownBy(() -> 
fn.doAccept(bytes)).isInstanceOf(IndexOutOfBoundsException.class);
 +            assertThat(bytes).isEqualTo(new byte[i]);
 +        }
 +    }
 +}
diff --cc test/unit/org/apache/cassandra/utils/GeneratorsTest.java
index b91421c,0000000..7fa0561
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/utils/GeneratorsTest.java
+++ b/test/unit/org/apache/cassandra/utils/GeneratorsTest.java
@@@ -1,30 -1,0 +1,48 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
 +package org.apache.cassandra.utils;
 +
 +import com.google.common.net.InternetDomainName;
 +import org.junit.Test;
 +
 +import org.assertj.core.api.Assertions;
 +
 +import static org.quicktheories.QuickTheory.qt;
 +
 +public class GeneratorsTest
 +{
 +    @Test
 +    public void randomUUID()
 +    {
 +        qt().forAll(Generators.UUID_RANDOM_GEN).checkAssert(uuid -> {
 +            Assertions.assertThat(uuid.version())
 +                      .as("version was not random uuid")
 +                      .isEqualTo(4);
 +            Assertions.assertThat(uuid.variant())
 +                      .as("varient not set to IETF (2)")
 +                      .isEqualTo(2);
 +        });
 +    }
 +
 +    @Test
 +    public void dnsDomainName()
 +    {
 +        
qt().forAll(Generators.DNS_DOMAIN_NAME).checkAssert(InternetDomainName::from);
 +    }
 +}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to