http://git-wip-us.apache.org/repos/asf/calcite/blob/5cee486f/avatica/pom.xml ---------------------------------------------------------------------- diff --git a/avatica/pom.xml b/avatica/pom.xml index 3027df0..7a747fe 100644 --- a/avatica/pom.xml +++ b/avatica/pom.xml @@ -18,197 +18,772 @@ limitations under the License. <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> - <groupId>org.apache.calcite</groupId> - <artifactId>calcite</artifactId> - <version>1.7.0-SNAPSHOT</version> + <groupId>org.apache</groupId> + <artifactId>apache</artifactId> + <version>17</version> + <!-- Tell Maven that it's OK that we're not attached to the parent directory --> + <relativePath/> </parent> - <artifactId>calcite-avatica</artifactId> - <packaging>jar</packaging> - <name>Calcite Avatica</name> - <description>JDBC driver framework.</description> + <!-- The basics. --> + <groupId>org.apache.calcite.avatica</groupId> + <artifactId>calcite-avatica-parent</artifactId> + <packaging>pom</packaging> + <version>1.7.0-SNAPSHOT</version> + + <!-- More project information. --> + <name>Avatica Project</name> + <description>Avatica is a JDBC driver framework which is a part of Apache Calcite</description> + <url>http://calcite.apache.org/avatica</url> + <inceptionYear>2012</inceptionYear> + + <mailingLists> + <mailingList> + <name>Apache Calcite developers list</name> + <subscribe>[email protected]</subscribe> + <unsubscribe>[email protected]</unsubscribe> + <post>[email protected]</post> + <archive>http://mail-archives.apache.org/mod_mbox/calcite-dev</archive> + </mailingList> + </mailingLists> <properties> - <top.dir>${project.basedir}/..</top.dir> + <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> + <top.dir>${project.basedir}</top.dir> + <version.major>1</version.major> + <version.minor>6</version.minor> + + <!-- This list is in alphabetical order. --> + <build-helper-maven-plugin.version>1.9</build-helper-maven-plugin.version> + <checksum-maven-plugin.version>1.2</checksum-maven-plugin.version> + <commons-lang3.version>3.2</commons-lang3.version> + <commons-logging.version>1.1.3</commons-logging.version> + <dropwizard-metrics3.version>3.1.2</dropwizard-metrics3.version> + <findbugs.version>1.3.9</findbugs.version> + <freemarker.version>2.3.19</freemarker.version> + <git-commit-id-plugin.version>2.1.9</git-commit-id-plugin.version> + + <!-- We support guava versions as old as 14.0.1 (the version used by Hive) + but prefer more recent versions. --> + <guava.version>14.0.1</guava.version> + <h2.version>1.4.185</h2.version> + <hadoop.version>2.6.0</hadoop.version> + <hamcrest.version>1.3</hamcrest.version> + <hsqldb.version>2.3.1</hsqldb.version> + <httpclient.version>4.5.2</httpclient.version> + <httpcore.version>4.4.4</httpcore.version> + <hydromatic-toolbox.version>0.3</hydromatic-toolbox.version> + <jackson.version>2.6.3</jackson.version> + <jcip-annotations.version>1.0-1</jcip-annotations.version> + <jetty.version>9.2.7.v20150116</jetty.version> + <junit.version>4.12</junit.version> + <maven-checkstyle-plugin.version>2.12.1</maven-checkstyle-plugin.version> + <maven-dependency-plugin.version>2.10</maven-dependency-plugin.version> + + <!-- keep failsafe version in sync with surefire; apache 17 has surefire 2.7.2 --> + <maven-failsafe-plugin.version>2.7.2</maven-failsafe-plugin.version> + <maven-scm-provider.version>1.9.1</maven-scm-provider.version> + <maven-shade-plugin.version>2.1</maven-shade-plugin.version> + <maven-source-plugin.version>2.4</maven-source-plugin.version> + <mockito-all.version>1.10.19</mockito-all.version> + <protobuf.version>3.0.0-beta-1</protobuf.version> + <scott-data-hsqldb.version>0.1</scott-data-hsqldb.version> + <servlet.version>3.0.1</servlet.version> + <slf4j.version>1.7.13</slf4j.version> </properties> - <dependencies> - <!-- Make sure that there are no dependencies on other calcite modules, - or on libraries other than Jackson. --> - <dependency> - <groupId>org.apache.calcite</groupId> - <artifactId>calcite-avatica-metrics</artifactId> - </dependency> - <dependency> - <groupId>com.fasterxml.jackson.core</groupId> - <artifactId>jackson-core</artifactId> - </dependency> - <dependency> - <groupId>com.fasterxml.jackson.core</groupId> - <artifactId>jackson-annotations</artifactId> - </dependency> - <dependency> - <groupId>com.fasterxml.jackson.core</groupId> - <artifactId>jackson-databind</artifactId> - </dependency> - <dependency> - <groupId>com.google.protobuf</groupId> - <artifactId>protobuf-java</artifactId> - </dependency> - <dependency> - <groupId>org.apache.httpcomponents</groupId> - <artifactId>httpclient</artifactId> - </dependency> - <dependency> - <groupId>org.apache.httpcomponents</groupId> - <artifactId>httpcore</artifactId> - </dependency> - <dependency> - <groupId>org.slf4j</groupId> - <artifactId>slf4j-api</artifactId> - </dependency> - <dependency> - <groupId>junit</groupId> - <artifactId>junit</artifactId> - <scope>test</scope> - </dependency> - <dependency> - <groupId>org.hamcrest</groupId> - <artifactId>hamcrest-core</artifactId> - <scope>test</scope> - </dependency> - <dependency> - <groupId>org.mockito</groupId> - <artifactId>mockito-all</artifactId> - <scope>test</scope> - </dependency> - <dependency> - <groupId>org.slf4j</groupId> - <artifactId>slf4j-log4j12</artifactId> - <scope>test</scope> - </dependency> - </dependencies> + <issueManagement> + <system>Jira</system> + <url>https://issues.apache.org/jira/browse/CALCITE</url> + </issueManagement> + + <scm> + <connection>scm:git:https://git-wip-us.apache.org/repos/asf/calcite.git</connection> + <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/calcite.git</developerConnection> + <url>https://github.com/apache/calcite</url> + <tag>HEAD</tag> + </scm> + + <modules> + <module>core</module> + <module>metrics</module> + <module>metrics-dropwizardmetrics3</module> + <module>noop-driver</module> + <module>server</module> + </modules> + + <!-- No dependencies here. Declare dependency VERSIONS in + dependencyManagement, below, and each dependency in the module that uses + it. --> + <dependencies /> + + <dependencyManagement> + <dependencies> + <!-- Sorted by groupId, artifactId; calcite dependencies first. --> + <dependency> + <groupId>org.apache.calcite</groupId> + <artifactId>calcite-avatica</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>org.apache.calcite</groupId> + <artifactId>calcite-avatica-metrics</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>org.apache.calcite</groupId> + <artifactId>calcite-avatica-noop</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>org.apache.calcite</groupId> + <artifactId>calcite-avatica-server</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>org.apache.calcite</groupId> + <artifactId>calcite-avatica</artifactId> + <version>${project.version}</version> + <type>test-jar</type> + </dependency> + + <!-- Now third-party dependencies, sorted by groupId and artifactId. --> + <dependency> + <groupId>com.fasterxml.jackson.core</groupId> + <artifactId>jackson-core</artifactId> + <version>${jackson.version}</version> + </dependency> + <dependency> + <groupId>com.fasterxml.jackson.core</groupId> + <artifactId>jackson-annotations</artifactId> + <version>${jackson.version}</version> + </dependency> + <dependency> + <groupId>com.fasterxml.jackson.core</groupId> + <artifactId>jackson-databind</artifactId> + <version>${jackson.version}</version> + </dependency> + <dependency> + <groupId>com.github.stephenc.jcip</groupId> + <artifactId>jcip-annotations</artifactId> + <version>${jcip-annotations.version}</version> + </dependency> + <dependency> + <groupId>com.google.code.findbugs</groupId> + <artifactId>jsr305</artifactId> + <version>${findbugs.version}</version> + </dependency> + <dependency> + <groupId>com.google.guava</groupId> + <artifactId>guava</artifactId> + <version>${guava.version}</version> + </dependency> + <dependency> + <groupId>com.google.protobuf</groupId> + <artifactId>protobuf-java</artifactId> + <version>${protobuf.version}</version> + </dependency> + <dependency> + <groupId>com.h2database</groupId> + <artifactId>h2</artifactId> + <version>${h2.version}</version> + </dependency> + <dependency> + <groupId>javax.servlet</groupId> + <artifactId>javax.servlet-api</artifactId> + <version>${servlet.version}</version> + </dependency> + <dependency> + <groupId>junit</groupId> + <artifactId>junit</artifactId> + <version>${junit.version}</version> + </dependency> + <dependency> + <groupId>net.hydromatic</groupId> + <artifactId>scott-data-hsqldb</artifactId> + <version>${scott-data-hsqldb.version}</version> + </dependency> + <dependency> + <groupId>org.apache.commons</groupId> + <artifactId>commons-lang3</artifactId> + <version>${commons-lang3.version}</version> + </dependency> + <dependency> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-common</artifactId> + <version>${hadoop.version}</version> + </dependency> + <dependency> + <groupId>org.apache.httpcomponents</groupId> + <artifactId>httpclient</artifactId> + <version>${httpclient.version}</version> + </dependency> + <dependency> + <groupId>org.apache.httpcomponents</groupId> + <artifactId>httpcore</artifactId> + <version>${httpcore.version}</version> + </dependency> + <dependency> + <groupId>org.mockito</groupId> + <artifactId>mockito-all</artifactId> + <version>${mockito-all.version}</version> + </dependency> + <dependency> + <groupId>org.hamcrest</groupId> + <artifactId>hamcrest-core</artifactId> + <version>${hamcrest.version}</version> + </dependency> + <dependency> + <groupId>org.hsqldb</groupId> + <artifactId>hsqldb</artifactId> + <version>${hsqldb.version}</version> + </dependency> + <dependency> + <groupId>org.eclipse.jetty</groupId> + <artifactId>jetty-server</artifactId> + <version>${jetty.version}</version> + </dependency> + <dependency> + <groupId>org.eclipse.jetty</groupId> + <artifactId>jetty-util</artifactId> + <version>${jetty.version}</version> + </dependency> + <dependency> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-api</artifactId> + <version>${slf4j.version}</version> + </dependency> + <dependency> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-log4j12</artifactId> + <version>${slf4j.version}</version> + </dependency> + </dependencies> + </dependencyManagement> <build> - <pluginManagement> - <plugins> - <plugin> - <groupId>org.eclipse.m2e</groupId> - <artifactId>lifecycle-mapping</artifactId> - <version>1.0.0</version> - <configuration> - <lifecycleMappingMetadata> - <pluginExecutions> - <pluginExecution> - <pluginExecutionFilter> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-checkstyle-plugin</artifactId> - <versionRange>[2.12.1,)</versionRange> - <goals> - <goal>check</goal> - </goals> - </pluginExecutionFilter> - <action> - <ignore /> - </action> - </pluginExecution> - </pluginExecutions> - </lifecycleMappingMetadata> - </configuration> - </plugin> - </plugins> - </pluginManagement> <plugins> <plugin> - <artifactId>maven-dependency-plugin</artifactId> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-source-plugin</artifactId> <executions> <execution> - <id>analyze</id> + <id>attach-sources</id> + <phase>verify</phase> <goals> - <goal>analyze-only</goal> + <goal>jar-no-fork</goal> + <goal>test-jar-no-fork</goal> </goals> - <configuration> - <failOnWarning>true</failOnWarning> - <!-- ignore "unused but declared" warnings --> - <ignoredUnusedDeclaredDependencies> - <ignoredUnusedDeclaredDependency>org.slf4j:slf4j-log4j12</ignoredUnusedDeclaredDependency> - </ignoredUnusedDeclaredDependencies> - </configuration> </execution> </executions> </plugin> - <!-- Parent module has the same plugin and does the work of - generating -sources.jar for each project. But without the - plugin declared here, IDEs don't know the sources are - available. --> <plugin> <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-source-plugin</artifactId> + <artifactId>maven-remote-resources-plugin</artifactId> <executions> <execution> - <id>attach-sources</id> - <phase>verify</phase> + <id>root-resources</id> <goals> - <goal>jar-no-fork</goal> - <goal>test-jar-no-fork</goal> + <goal>process</goal> </goals> + <configuration> + <runOnlyAtExecutionRoot>true</runOnlyAtExecutionRoot> + <resourceBundles> + <resourceBundle>org.apache:apache-jar-resource-bundle:1.4</resourceBundle> + </resourceBundles> + </configuration> + </execution> + <execution> + <id>non-root-resources</id> + <configuration> + <resourceBundles> + <resourceBundle>org.apache:apache-jar-resource-bundle:1.4</resourceBundle> + </resourceBundles> + </configuration> </execution> </executions> </plugin> - - <!-- Produce a tests jar so that avatica-server/pom.xml can reference for suite. - TODO: remove after moving over to annotation-based TestSuite definitions. --> <plugin> <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-jar-plugin</artifactId> + <artifactId>maven-compiler-plugin</artifactId> + <configuration> + <source>1.7</source> + <target>1.7</target> + </configuration> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-checkstyle-plugin</artifactId> <executions> <execution> + <id>validate</id> + <phase>validate</phase> + <configuration> + <configLocation>${top.dir}/src/main/config/checkstyle/checker.xml</configLocation> + <suppressionsLocation>${top.dir}/src/main/config/checkstyle/suppressions.xml</suppressionsLocation> + <consoleOutput>true</consoleOutput> + <headerLocation>${top.dir}/src/main/config/checkstyle/header.txt</headerLocation> + <failOnViolation>true</failOnViolation> + <includeTestSourceDirectory>true</includeTestSourceDirectory> + </configuration> <goals> - <goal>test-jar</goal> + <goal>check</goal> </goals> </execution> </executions> + <dependencies> + <dependency> + <groupId>net.hydromatic</groupId> + <artifactId>toolbox</artifactId> + <version>${hydromatic-toolbox.version}</version> + </dependency> + </dependencies> </plugin> <plugin> - <artifactId>maven-remote-resources-plugin</artifactId> + <!-- override default version 2.8 for access to additional config settings --> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-dependency-plugin</artifactId> + <version>${maven-dependency-plugin.version}</version> <executions> <execution> - <id>non-root-resources</id> + <id>analyze</id> <goals> - <goal>process</goal> + <goal>analyze-only</goal> </goals> + <configuration> + <failOnWarning>true</failOnWarning> + </configuration> </execution> </executions> </plugin> <plugin> + <!-- This is the configuration used by "mvn javadoc:javadoc". It is + configured strict, so that it shows errors such as broken links in + javadoc on private methods. The configuration for "mvn site" is + under "reporting", and is more lenient. --> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-javadoc-plugin</artifactId> + <configuration> + <links> + <link>http://docs.oracle.com/javase/8/docs/api/</link> + </links> + <tags> + <tag> + <name>sql.92</name> + <placement>a</placement> + <head>SQL 92 spec:</head> + </tag> + <tag> + <name>sql.99</name> + <placement>a</placement> + <head>SQL 99 spec:</head> + </tag> + <tag> + <name>sql.2003</name> + <placement>a</placement> + <head>SQL 2003 spec:</head> + </tag> + <tag> + <name>pre</name> + </tag> + <tag> + <name>post</name> + </tag> + </tags> + <show>private</show> + </configuration> + </plugin> + <plugin> + <!-- Override apache parent POM's definition of release + plugin. If we don't specify gitexe version, git doesn't + commit during release process. --> <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-shade-plugin</artifactId> + <artifactId>maven-release-plugin</artifactId> + <dependencies> + <dependency> + <groupId>org.apache.maven.scm</groupId> + <artifactId>maven-scm-provider-gitexe</artifactId> + <version>${maven-scm-provider.version}</version> + </dependency> + </dependencies> + </plugin> + <plugin> + <groupId>org.codehaus.mojo</groupId> + <artifactId>build-helper-maven-plugin</artifactId> + <!-- Make sure every sub-project has LICENSE, NOTICE and + git.properties in its jar's META-INF directory. --> <executions> <execution> - <phase>package</phase> + <id>add-resource</id> + <phase>generate-resources</phase> <goals> - <goal>shade</goal> + <goal>add-resource</goal> + <goal>add-test-resource</goal> </goals> <configuration> - <relocations> - <relocation> - <pattern>com.google.protobuf</pattern> - <shadedPattern>org.apache.calcite.avatica.com.google.protobuf</shadedPattern> - </relocation> - <relocation> - <pattern>org.apache.http</pattern> - <shadedPattern>org.apache.calcite.avatica.org.apache.http</shadedPattern> - </relocation> - <relocation> - <pattern>org.apache.commons</pattern> - <shadedPattern>org.apache.calcite.avatica.org.apache.commons</shadedPattern> - </relocation> - </relocations> - <createDependencyReducedPom>false</createDependencyReducedPom> + <resources> + <resource> + <directory>${top.dir}</directory> + <targetPath>META-INF</targetPath> + <includes> + <include>LICENSE</include> + <include>NOTICE</include> + </includes> + </resource> + <resource> + <directory>${top.dir}/target</directory> + <targetPath>META-INF</targetPath> + <includes> + <include>git.properties</include> + </includes> + </resource> + </resources> </configuration> </execution> </executions> </plugin> </plugins> + + <pluginManagement> + <plugins> + <!-- Sorted by groupId, artifactId. --> + <plugin> + <groupId>com.googlecode.fmpp-maven-plugin</groupId> + <artifactId>fmpp-maven-plugin</artifactId> + <version>${fmpp-maven-plugin.version}</version> + <dependencies> + <dependency> + <groupId>org.freemarker</groupId> + <artifactId>freemarker</artifactId> + <version>${freemarker.version}</version> + </dependency> + </dependencies> + </plugin> + <plugin> + <groupId>net.hydromatic</groupId> + <artifactId>hydromatic-resource-maven-plugin</artifactId> + <version>${hydromatic-resource.version}</version> + </plugin> + <plugin> + <groupId>net.ju-n.maven.plugins</groupId> + <artifactId>checksum-maven-plugin</artifactId> + <version>${checksum-maven-plugin.version}</version> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-checkstyle-plugin</artifactId> + <version>${maven-checkstyle-plugin.version}</version> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-failsafe-plugin</artifactId> + <version>${maven-failsafe-plugin.version}</version> + <executions> + <execution> + <id>failsafe-integration-test</id> + <goals> + <goal>integration-test</goal> + </goals> + <phase>integration-test</phase> + <configuration> + <threadCount>6</threadCount> + <parallel>both</parallel> + <argLine>-Xmx1024m</argLine> + </configuration> + </execution> + <execution> + <id>failsafe-verify</id> + <goals> + <goal>verify</goal> + </goals> + <phase>verify</phase> + </execution> + </executions> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-shade-plugin</artifactId> + <version>${maven-shade-plugin.version}</version> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-source-plugin</artifactId> + <version>${maven-source-plugin.version}</version> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-surefire-plugin</artifactId> + <configuration> + <threadCount>1</threadCount> + <perCoreThreadCount>true</perCoreThreadCount> + <parallel>both</parallel> + <argLine>-Xmx1536m -XX:MaxPermSize=256m -Duser.timezone=${user.timezone}</argLine> + </configuration> + </plugin> + <plugin> + <groupId>org.codehaus.mojo</groupId> + <artifactId>build-helper-maven-plugin</artifactId> + <version>${build-helper-maven-plugin.version}</version> + </plugin> + <plugin> + <groupId>org.codehaus.mojo</groupId> + <artifactId>javacc-maven-plugin</artifactId> + <version>${javacc-maven-plugin.version}</version> + </plugin> + <plugin> + <groupId>pl.project13.maven</groupId> + <artifactId>git-commit-id-plugin</artifactId> + <version>${git-commit-id-plugin.version}</version> + </plugin> + </plugins> + </pluginManagement> </build> + + <reporting> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-javadoc-plugin</artifactId> + <configuration> + <links> + <link>http://docs.oracle.com/javase/8/docs/api/</link> + </links> + <tags> + <tag> + <name>sql.92</name> + <placement>a</placement> + <head>SQL 92 spec:</head> + </tag> + <tag> + <name>sql.99</name> + <placement>a</placement> + <head>SQL 99 spec:</head> + </tag> + <tag> + <name>sql.2003</name> + <placement>a</placement> + <head>SQL 2003 spec:</head> + </tag> + </tags> + <additionalparam>-tag sql.2003:a:xxx</additionalparam> + <notimestamp>true</notimestamp> + <windowtitle>Apache Calcite Avatica API</windowtitle> + </configuration> + </plugin> + </plugins> + </reporting> + + <repositories> + <repository> + <id>central</id> + <name>Central Repository</name> + <url>http://repo.maven.apache.org/maven2</url> + <layout>default</layout> + <snapshots> + <enabled>false</enabled> + </snapshots> + </repository> + <repository> + <releases> + <enabled>true</enabled> + <updatePolicy>always</updatePolicy> + <checksumPolicy>warn</checksumPolicy> + </releases> + <id>conjars</id> + <name>Conjars</name> + <url>http://conjars.org/repo</url> + <layout>default</layout> + </repository> + </repositories> + + <profiles> + <profile> + <!-- This profile adds/overrides few features of the 'apache-release' + profile in the parent pom. --> + <id>apache-release</id> + <build> + <plugins> + <!-- Apache-RAT checks for files without headers. + If run on a messy developer's sandbox, it will fail. + This serves as a reminder to only build a release in a clean + sandbox! --> + <plugin> + <groupId>org.apache.rat</groupId> + <artifactId>apache-rat-plugin</artifactId> + <configuration> + <excludes> + <!-- The following files have file formats that do not + admit comments, and therefore cannot contain + license notices. --> + <exclude>src/main/resources/META-INF/services/java.sql.Driver</exclude> + <exclude>src/main/resources/META-INF/services/org.apache.calcite.avatica.metrics.MetricsSystemFactory</exclude> + + <!-- Files generated by Jekyll. --> + <exclude>site/.sass-cache/**</exclude> + <exclude>site/_includes/anchor_links.html</exclude> + <exclude>site/_includes/docs_contents.html</exclude> + <exclude>site/_includes/docs_contents_mobile.html</exclude> + <exclude>site/_includes/docs_option.html</exclude> + <exclude>site/_includes/docs_ul.html</exclude> + <exclude>site/_includes/footer.html</exclude> + <exclude>site/_includes/header.html</exclude> + <exclude>site/_includes/news_contents.html</exclude> + <exclude>site/_includes/news_contents_mobile.html</exclude> + <exclude>site/_includes/news_item.html</exclude> + <exclude>site/_includes/primary-nav-items.html</exclude> + <exclude>site/_includes/section_nav.html</exclude> + <exclude>site/_includes/top.html</exclude> + <exclude>site/_layouts/default.html</exclude> + <exclude>site/_layouts/docs.html</exclude> + <exclude>site/_layouts/external.html</exclude> + <exclude>site/_layouts/news.html</exclude> + <exclude>site/_layouts/news_item.html</exclude> + <exclude>site/_layouts/page.html</exclude> + <exclude>site/_sass/**</exclude> + <exclude>site/css/screen.scss</exclude> + <exclude>site/fonts/**</exclude> + <exclude>site/js/**</exclude> + + <!-- Images --> + <exclude>site/img/*.png</exclude> + <exclude>site/favicon.ico</exclude> + </excludes> + </configuration> + <executions> + <execution> + <phase>verify</phase> + <goals> + <goal>check</goal> + </goals> + </execution> + </executions> + </plugin> + <plugin> + <groupId>net.ju-n.maven.plugins</groupId> + <artifactId>checksum-maven-plugin</artifactId> + <executions> + <execution> + <goals> + <goal>artifacts</goal> + </goals> + </execution> + </executions> + <configuration> + <algorithms> + <algorithm>MD5</algorithm> + <algorithm>SHA-1</algorithm> + </algorithms> + <failOnError>false</failOnError> + </configuration> + </plugin> + <!-- Override the parent assembly execution to customize the assembly + descriptor and final name. --> + <plugin> + <artifactId>maven-assembly-plugin</artifactId> + <executions> + <execution> + <id>source-release-assembly</id> + <phase>none</phase> + </execution> + <execution> + <id>source-release-assembly-calcite</id> + <phase>package</phase> + <goals> + <goal>single</goal> + </goals> + <configuration> + <runOnlyAtExecutionRoot>true</runOnlyAtExecutionRoot> + <appendAssemblyId>false</appendAssemblyId> + <descriptor>${top.dir}/src/main/config/assemblies/source-assembly.xml</descriptor> + <finalName>apache-calcite-avatica-${project.version}-src</finalName> + <tarLongFileMode>gnu</tarLongFileMode> + </configuration> + </execution> + </executions> + </plugin> + </plugins> + </build> + </profile> + <profile> + <id>it</id> + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-failsafe-plugin</artifactId> + </plugin> + </plugins> + </build> + </profile> + <profile> + <!-- CALCITE-537: workaround for MRRESOURCES-91 + Avoid overwrite of the destination file if the produced + contents is the same. + Apache pom overwrites NOTICE, DEPENDENCIES, and LICENSE files, however + we do not want recompile the module every time. --> + <id>skip-apache-licenses</id> + <activation> + <activeByDefault>true</activeByDefault> + <file><exists>target/maven-shared-archive-resources/META-INF/LICENSE</exists></file> + </activation> + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-remote-resources-plugin</artifactId> + <configuration> + <skip>true</skip> + </configuration> + </plugin> + </plugins> + </build> + </profile> + <profile> + <!-- Generate git.properties on first build --> + <id>generate-git-id</id> + <activation> + <activeByDefault>false</activeByDefault> + <!-- Note: <missing> here does NOT support interpolation, + so technically, this profile is active for sub-modules. + It would be nice to use ${top.dir}/target/... here, but + it is not possible. + However sub-modules lack .git folder, so no git.properties + is generated. --> + <file> + <missing>target/git.properties</missing> + </file> + </activation> + <build> + <plugins> + <plugin> + <groupId>pl.project13.maven</groupId> + <artifactId>git-commit-id-plugin</artifactId> + <inherited>false</inherited> + <executions> + <execution> + <goals> + <goal>revision</goal> + </goals> + </execution> + </executions> + <configuration> + <dateFormat>yyyy-MM-dd'T'HH:mm:ssZ</dateFormat> + <verbose>false</verbose> + <skipPoms>false</skipPoms> + <generateGitPropertiesFile>true</generateGitPropertiesFile> + <generateGitPropertiesFilename>target/git.properties</generateGitPropertiesFilename> + <failOnNoGitDirectory>false</failOnNoGitDirectory> + <gitDescribe> + <skip>false</skip> + <always>false</always> + <abbrev>7</abbrev> + <dirty>-dirty</dirty> + <forceLongFormat>true</forceLongFormat> + </gitDescribe> + </configuration> + </plugin> + </plugins> + </build> + </profile> + </profiles> </project>
http://git-wip-us.apache.org/repos/asf/calcite/blob/5cee486f/avatica/server/pom.xml ---------------------------------------------------------------------- diff --git a/avatica/server/pom.xml b/avatica/server/pom.xml new file mode 100644 index 0000000..63a390d --- /dev/null +++ b/avatica/server/pom.xml @@ -0,0 +1,196 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- +Licensed to the Apache Software Foundation (ASF) under one or more +contributor license agreements. See the NOTICE file distributed with +this work for additional information regarding copyright ownership. +The ASF licenses this file to you under the Apache License, Version 2.0 +(the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +--> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + <parent> + <groupId>org.apache.calcite.avatica</groupId> + <artifactId>calcite-avatica-parent</artifactId> + <version>1.7.0-SNAPSHOT</version> + </parent> + + <artifactId>calcite-avatica-server</artifactId> + <packaging>jar</packaging> + <name>Calcite Avatica Server</name> + <description>JDBC server.</description> + + <properties> + <top.dir>${project.basedir}/..</top.dir> + </properties> + + <dependencies> + <!-- Sorted by groupId, artifactId; calcite dependencies first. Put versions + in dependencyManagement in the root POM, not here. --> + <dependency> + <groupId>org.apache.calcite</groupId> + <artifactId>calcite-avatica</artifactId> + </dependency> + <dependency> + <groupId>javax.servlet</groupId> + <artifactId>javax.servlet-api</artifactId> + </dependency> + <dependency> + <groupId>com.google.guava</groupId> + <artifactId>guava</artifactId> + </dependency> + <dependency> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-api</artifactId> + </dependency> + <dependency> + <groupId>org.eclipse.jetty</groupId> + <artifactId>jetty-server</artifactId> + </dependency> + <dependency> + <groupId>org.eclipse.jetty</groupId> + <artifactId>jetty-util</artifactId> + </dependency> + + <!-- test dependencies --> + <dependency> + <groupId>org.apache.calcite</groupId> + <artifactId>calcite-avatica</artifactId> + <type>test-jar</type> + <scope>test</scope> + </dependency> + <dependency> + <groupId>junit</groupId> + <artifactId>junit</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <!-- Used in RemoteDriverTest but dependency not detected by maven-dependency-plugin:2.8:analyze --> + <groupId>net.hydromatic</groupId> + <artifactId>scott-data-hsqldb</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.hamcrest</groupId> + <artifactId>hamcrest-core</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <!-- Used in RemoteDriverTest but dependency not detected by maven-dependency-plugin:2.8:analyze --> + <groupId>org.hsqldb</groupId> + <artifactId>hsqldb</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.mockito</groupId> + <artifactId>mockito-all</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>com.github.stephenc.jcip</groupId> + <artifactId>jcip-annotations</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-log4j12</artifactId> + <scope>test</scope> + </dependency> + </dependencies> + + <build> + <pluginManagement> + <plugins> + <plugin> + <groupId>org.eclipse.m2e</groupId> + <artifactId>lifecycle-mapping</artifactId> + <version>1.0.0</version> + <configuration> + <lifecycleMappingMetadata> + <pluginExecutions> + <pluginExecution> + <pluginExecutionFilter> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-checkstyle-plugin</artifactId> + <versionRange>[2.12.1,)</versionRange> + <goals> + <goal>check</goal> + </goals> + </pluginExecutionFilter> + <action> + <ignore /> + </action> + </pluginExecution> + </pluginExecutions> + </lifecycleMappingMetadata> + </configuration> + </plugin> + </plugins> + </pluginManagement> + <plugins> + <!-- Parent module has the same plugin and does the work of + generating -sources.jar for each project. But without the + plugin declared here, IDEs don't know the sources are + available. --> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-source-plugin</artifactId> + <executions> + <execution> + <id>attach-sources</id> + <phase>verify</phase> + <goals> + <goal>jar-no-fork</goal> + <goal>test-jar-no-fork</goal> + </goals> + </execution> + </executions> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-dependency-plugin</artifactId> + <version>${maven-dependency-plugin.version}</version> + <!-- configurations do not cascade, so all of the definition from + ../pom.xml:build:plugin-management:plugins:plugin must be repeated in child poms --> + <executions> + <execution> + <id>analyze</id> + <goals> + <goal>analyze-only</goal> + </goals> + <configuration> + <failOnWarning>true</failOnWarning> + <!-- ignore "unused but declared" warnings --> + <ignoredUnusedDeclaredDependencies> + <ignoredUnusedDeclaredDependency>io.dropwizard.metrics:metrics-core</ignoredUnusedDeclaredDependency> + <ignoredUnusedDeclaredDependency>net.hydromatic:scott-data-hsqldb</ignoredUnusedDeclaredDependency> + <ignoredUnusedDeclaredDependency>org.hsqldb:hsqldb</ignoredUnusedDeclaredDependency> + <ignoredUnusedDeclaredDependency>org.slf4j:slf4j-api</ignoredUnusedDeclaredDependency> + <ignoredUnusedDeclaredDependency>org.slf4j:slf4j-log4j12</ignoredUnusedDeclaredDependency> + </ignoredUnusedDeclaredDependencies> + </configuration> + </execution> + </executions> + </plugin> + <plugin> + <artifactId>maven-remote-resources-plugin</artifactId> + <executions> + <execution> + <id>non-root-resources</id> + <goals> + <goal>process</goal> + </goals> + </execution> + </executions> + </plugin> + </plugins> + </build> +</project> http://git-wip-us.apache.org/repos/asf/calcite/blob/5cee486f/avatica/server/src/main/java/org/apache/calcite/avatica/jdbc/JdbcMeta.java ---------------------------------------------------------------------- diff --git a/avatica/server/src/main/java/org/apache/calcite/avatica/jdbc/JdbcMeta.java b/avatica/server/src/main/java/org/apache/calcite/avatica/jdbc/JdbcMeta.java new file mode 100644 index 0000000..dfe7f99 --- /dev/null +++ b/avatica/server/src/main/java/org/apache/calcite/avatica/jdbc/JdbcMeta.java @@ -0,0 +1,976 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.calcite.avatica.jdbc; + +import org.apache.calcite.avatica.AvaticaParameter; +import org.apache.calcite.avatica.AvaticaPreparedStatement; +import org.apache.calcite.avatica.AvaticaUtils; +import org.apache.calcite.avatica.ColumnMetaData; +import org.apache.calcite.avatica.ConnectionPropertiesImpl; +import org.apache.calcite.avatica.Meta; +import org.apache.calcite.avatica.MetaImpl; +import org.apache.calcite.avatica.MissingResultsException; +import org.apache.calcite.avatica.NoSuchConnectionException; +import org.apache.calcite.avatica.NoSuchStatementException; +import org.apache.calcite.avatica.QueryState; +import org.apache.calcite.avatica.SqlType; +import org.apache.calcite.avatica.metrics.Gauge; +import org.apache.calcite.avatica.metrics.MetricsSystem; +import org.apache.calcite.avatica.metrics.noop.NoopMetricsSystem; +import org.apache.calcite.avatica.remote.TypedValue; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.calcite.avatica.remote.MetricsHelper.concat; + +import java.lang.reflect.InvocationTargetException; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.DriverManager; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +/** Implementation of {@link Meta} upon an existing JDBC data source. */ +public class JdbcMeta implements Meta { + private static final Logger LOG = LoggerFactory.getLogger(JdbcMeta.class); + + private static final String CONN_CACHE_KEY_BASE = "avatica.connectioncache"; + + private static final String STMT_CACHE_KEY_BASE = "avatica.statementcache"; + + /** Special value for {@code Statement#getLargeMaxRows()} that means fetch + * an unlimited number of rows in a single batch. + * + * <p>Any other negative value will return an unlimited number of rows but + * will do it in the default batch size, namely 100. */ + public static final long UNLIMITED_COUNT = -2L; + + // End of constants, start of member variables + + final Calendar calendar = Calendar.getInstance(); + + /** Generates ids for statements. The ids are unique across all connections + * created by this JdbcMeta. */ + private final AtomicInteger statementIdGenerator = new AtomicInteger(); + + private final String url; + private final Properties info; + private final Cache<String, Connection> connectionCache; + private final Cache<Integer, StatementInfo> statementCache; + private final MetricsSystem metrics; + + /** + * Creates a JdbcMeta. + * + * @param url a database url of the form + * <code>jdbc:<em>subprotocol</em>:<em>subname</em></code> + */ + public JdbcMeta(String url) throws SQLException { + this(url, new Properties()); + } + + /** + * Creates a JdbcMeta. + * + * @param url a database url of the form + * <code>jdbc:<em>subprotocol</em>:<em>subname</em></code> + * @param user the database user on whose behalf the connection is being + * made + * @param password the user's password + */ + public JdbcMeta(final String url, final String user, final String password) + throws SQLException { + this(url, new Properties() { + { + put("user", user); + put("password", password); + } + }); + } + + public JdbcMeta(String url, Properties info) throws SQLException { + this(url, info, NoopMetricsSystem.getInstance()); + } + + /** + * Creates a JdbcMeta. + * + * @param url a database url of the form + * <code> jdbc:<em>subprotocol</em>:<em>subname</em></code> + * @param info a list of arbitrary string tag/value pairs as + * connection arguments; normally at least a "user" and + * "password" property should be included + */ + public JdbcMeta(String url, Properties info, MetricsSystem metrics) + throws SQLException { + this.url = url; + this.info = info; + this.metrics = Objects.requireNonNull(metrics); + + int concurrencyLevel = Integer.parseInt( + info.getProperty(ConnectionCacheSettings.CONCURRENCY_LEVEL.key(), + ConnectionCacheSettings.CONCURRENCY_LEVEL.defaultValue())); + int initialCapacity = Integer.parseInt( + info.getProperty(ConnectionCacheSettings.INITIAL_CAPACITY.key(), + ConnectionCacheSettings.INITIAL_CAPACITY.defaultValue())); + long maxCapacity = Long.parseLong( + info.getProperty(ConnectionCacheSettings.MAX_CAPACITY.key(), + ConnectionCacheSettings.MAX_CAPACITY.defaultValue())); + long connectionExpiryDuration = Long.parseLong( + info.getProperty(ConnectionCacheSettings.EXPIRY_DURATION.key(), + ConnectionCacheSettings.EXPIRY_DURATION.defaultValue())); + TimeUnit connectionExpiryUnit = TimeUnit.valueOf( + info.getProperty(ConnectionCacheSettings.EXPIRY_UNIT.key(), + ConnectionCacheSettings.EXPIRY_UNIT.defaultValue())); + this.connectionCache = CacheBuilder.newBuilder() + .concurrencyLevel(concurrencyLevel) + .initialCapacity(initialCapacity) + .maximumSize(maxCapacity) + .expireAfterAccess(connectionExpiryDuration, connectionExpiryUnit) + .removalListener(new ConnectionExpiryHandler()) + .build(); + LOG.debug("instantiated connection cache: {}", connectionCache.stats()); + + concurrencyLevel = Integer.parseInt( + info.getProperty(StatementCacheSettings.CONCURRENCY_LEVEL.key(), + StatementCacheSettings.CONCURRENCY_LEVEL.defaultValue())); + initialCapacity = Integer.parseInt( + info.getProperty(StatementCacheSettings.INITIAL_CAPACITY.key(), + StatementCacheSettings.INITIAL_CAPACITY.defaultValue())); + maxCapacity = Long.parseLong( + info.getProperty(StatementCacheSettings.MAX_CAPACITY.key(), + StatementCacheSettings.MAX_CAPACITY.defaultValue())); + connectionExpiryDuration = Long.parseLong( + info.getProperty(StatementCacheSettings.EXPIRY_DURATION.key(), + StatementCacheSettings.EXPIRY_DURATION.defaultValue())); + connectionExpiryUnit = TimeUnit.valueOf( + info.getProperty(StatementCacheSettings.EXPIRY_UNIT.key(), + StatementCacheSettings.EXPIRY_UNIT.defaultValue())); + this.statementCache = CacheBuilder.newBuilder() + .concurrencyLevel(concurrencyLevel) + .initialCapacity(initialCapacity) + .maximumSize(maxCapacity) + .expireAfterAccess(connectionExpiryDuration, connectionExpiryUnit) + .removalListener(new StatementExpiryHandler()) + .build(); + + LOG.debug("instantiated statement cache: {}", statementCache.stats()); + + // Register some metrics + this.metrics.register(concat(JdbcMeta.class, "ConnectionCacheSize"), new Gauge<Long>() { + @Override public Long getValue() { + return connectionCache.size(); + } + }); + + this.metrics.register(concat(JdbcMeta.class, "StatementCacheSize"), new Gauge<Long>() { + @Override public Long getValue() { + return statementCache.size(); + } + }); + } + + /** + * Converts from JDBC metadata to Avatica columns. + */ + protected static List<ColumnMetaData> + columns(ResultSetMetaData metaData) throws SQLException { + if (metaData == null) { + return Collections.emptyList(); + } + final List<ColumnMetaData> columns = new ArrayList<>(); + for (int i = 1; i <= metaData.getColumnCount(); i++) { + final SqlType sqlType = SqlType.valueOf(metaData.getColumnType(i)); + final ColumnMetaData.Rep rep = ColumnMetaData.Rep.of(sqlType.internal); + final ColumnMetaData.AvaticaType t; + if (sqlType == SqlType.ARRAY || sqlType == SqlType.STRUCT || sqlType == SqlType.MULTISET) { + ColumnMetaData.AvaticaType arrayValueType = ColumnMetaData.scalar(Types.JAVA_OBJECT, + metaData.getColumnTypeName(i), ColumnMetaData.Rep.OBJECT); + t = ColumnMetaData.array(arrayValueType, metaData.getColumnTypeName(i), rep); + } else { + t = ColumnMetaData.scalar(metaData.getColumnType(i), metaData.getColumnTypeName(i), rep); + } + ColumnMetaData md = + new ColumnMetaData(i - 1, metaData.isAutoIncrement(i), + metaData.isCaseSensitive(i), metaData.isSearchable(i), + metaData.isCurrency(i), metaData.isNullable(i), + metaData.isSigned(i), metaData.getColumnDisplaySize(i), + metaData.getColumnLabel(i), metaData.getColumnName(i), + metaData.getSchemaName(i), metaData.getPrecision(i), + metaData.getScale(i), metaData.getTableName(i), + metaData.getCatalogName(i), t, metaData.isReadOnly(i), + metaData.isWritable(i), metaData.isDefinitelyWritable(i), + metaData.getColumnClassName(i)); + columns.add(md); + } + return columns; + } + + /** + * Converts from JDBC metadata to Avatica parameters + */ + protected static List<AvaticaParameter> parameters(ParameterMetaData metaData) + throws SQLException { + if (metaData == null) { + return Collections.emptyList(); + } + final List<AvaticaParameter> params = new ArrayList<>(); + for (int i = 1; i <= metaData.getParameterCount(); i++) { + params.add( + new AvaticaParameter(metaData.isSigned(i), metaData.getPrecision(i), + metaData.getScale(i), metaData.getParameterType(i), + metaData.getParameterTypeName(i), + metaData.getParameterClassName(i), "?" + i)); + } + return params; + } + + protected static Signature signature(ResultSetMetaData metaData, + ParameterMetaData parameterMetaData, String sql, + Meta.StatementType statementType) throws SQLException { + final CursorFactory cf = CursorFactory.LIST; // because JdbcResultSet#frame + return new Signature(columns(metaData), sql, parameters(parameterMetaData), + null, cf, statementType); + } + + protected static Signature signature(ResultSetMetaData metaData) + throws SQLException { + return signature(metaData, null, null, null); + } + + public Map<DatabaseProperty, Object> getDatabaseProperties(ConnectionHandle ch) { + try { + final Map<DatabaseProperty, Object> map = new HashMap<>(); + final DatabaseMetaData metaData = getConnection(ch.id).getMetaData(); + for (DatabaseProperty p : DatabaseProperty.values()) { + addProperty(map, metaData, p); + } + return map; + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + private static Object addProperty(Map<DatabaseProperty, Object> map, + DatabaseMetaData metaData, DatabaseProperty p) throws SQLException { + try { + return map.put(p, p.method.invoke(metaData)); + } catch (IllegalAccessException | InvocationTargetException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getTables(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat tableNamePattern, List<String> typeList) { + try { + final ResultSet rs = + getConnection(ch.id).getMetaData().getTables(catalog, schemaPattern.s, + tableNamePattern.s, toArray(typeList)); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + /** + * Registers a StatementInfo for the given ResultSet, returning the id under + * which it is registered. This should be used for metadata ResultSets, which + * have an implicit statement created. + */ + private int registerMetaStatement(ResultSet rs) throws SQLException { + final int id = statementIdGenerator.getAndIncrement(); + StatementInfo statementInfo = new StatementInfo(rs.getStatement()); + statementInfo.setResultSet(rs); + statementCache.put(id, statementInfo); + return id; + } + + public MetaResultSet getColumns(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat tableNamePattern, Pat columnNamePattern) { + try { + final ResultSet rs = + getConnection(ch.id).getMetaData().getColumns(catalog, schemaPattern.s, + tableNamePattern.s, columnNamePattern.s); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getSchemas(ConnectionHandle ch, String catalog, Pat schemaPattern) { + try { + final ResultSet rs = + getConnection(ch.id).getMetaData().getSchemas(catalog, schemaPattern.s); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getCatalogs(ConnectionHandle ch) { + try { + final ResultSet rs = getConnection(ch.id).getMetaData().getCatalogs(); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getTableTypes(ConnectionHandle ch) { + try { + final ResultSet rs = getConnection(ch.id).getMetaData().getTableTypes(); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getProcedures(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat procedureNamePattern) { + try { + final ResultSet rs = + getConnection(ch.id).getMetaData().getProcedures(catalog, schemaPattern.s, + procedureNamePattern.s); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getProcedureColumns(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat procedureNamePattern, Pat columnNamePattern) { + try { + final ResultSet rs = + getConnection(ch.id).getMetaData().getProcedureColumns(catalog, + schemaPattern.s, procedureNamePattern.s, columnNamePattern.s); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getColumnPrivileges(ConnectionHandle ch, String catalog, String schema, + String table, Pat columnNamePattern) { + try { + final ResultSet rs = + getConnection(ch.id).getMetaData().getColumnPrivileges(catalog, schema, + table, columnNamePattern.s); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getTablePrivileges(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat tableNamePattern) { + try { + final ResultSet rs = + getConnection(ch.id).getMetaData().getTablePrivileges(catalog, + schemaPattern.s, tableNamePattern.s); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getBestRowIdentifier(ConnectionHandle ch, String catalog, String schema, + String table, int scope, boolean nullable) { + LOG.trace("getBestRowIdentifier catalog:{} schema:{} table:{} scope:{} nullable:{}", catalog, + schema, table, scope, nullable); + try { + final ResultSet rs = + getConnection(ch.id).getMetaData().getBestRowIdentifier(catalog, schema, + table, scope, nullable); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getVersionColumns(ConnectionHandle ch, String catalog, String schema, + String table) { + LOG.trace("getVersionColumns catalog:{} schema:{} table:{}", catalog, schema, table); + try { + final ResultSet rs = + getConnection(ch.id).getMetaData().getVersionColumns(catalog, schema, table); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getPrimaryKeys(ConnectionHandle ch, String catalog, String schema, + String table) { + LOG.trace("getPrimaryKeys catalog:{} schema:{} table:{}", catalog, schema, table); + try { + final ResultSet rs = + getConnection(ch.id).getMetaData().getPrimaryKeys(catalog, schema, table); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getImportedKeys(ConnectionHandle ch, String catalog, String schema, + String table) { + return null; + } + + public MetaResultSet getExportedKeys(ConnectionHandle ch, String catalog, String schema, + String table) { + return null; + } + + public MetaResultSet getCrossReference(ConnectionHandle ch, String parentCatalog, + String parentSchema, String parentTable, String foreignCatalog, + String foreignSchema, String foreignTable) { + return null; + } + + public MetaResultSet getTypeInfo(ConnectionHandle ch) { + try { + final ResultSet rs = getConnection(ch.id).getMetaData().getTypeInfo(); + int stmtId = registerMetaStatement(rs); + return JdbcResultSet.create(ch.id, stmtId, rs); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public MetaResultSet getIndexInfo(ConnectionHandle ch, String catalog, String schema, + String table, boolean unique, boolean approximate) { + return null; + } + + public MetaResultSet getUDTs(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat typeNamePattern, int[] types) { + return null; + } + + public MetaResultSet getSuperTypes(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat typeNamePattern) { + return null; + } + + public MetaResultSet getSuperTables(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat tableNamePattern) { + return null; + } + + public MetaResultSet getAttributes(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat typeNamePattern, Pat attributeNamePattern) { + return null; + } + + public MetaResultSet getClientInfoProperties(ConnectionHandle ch) { + return null; + } + + public MetaResultSet getFunctions(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat functionNamePattern) { + return null; + } + + public MetaResultSet getFunctionColumns(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat functionNamePattern, Pat columnNamePattern) { + return null; + } + + public MetaResultSet getPseudoColumns(ConnectionHandle ch, String catalog, Pat schemaPattern, + Pat tableNamePattern, Pat columnNamePattern) { + return null; + } + + public Iterable<Object> createIterable(StatementHandle handle, QueryState state, + Signature signature, List<TypedValue> parameterValues, Frame firstFrame) { + return null; + } + + protected Connection getConnection(String id) throws SQLException { + if (id == null) { + throw new NullPointerException("Connection id is null."); + } + Connection conn = connectionCache.getIfPresent(id); + if (conn == null) { + throw new NoSuchConnectionException("Connection not found: invalid id, closed, or expired: " + + id); + } + return conn; + } + + public StatementHandle createStatement(ConnectionHandle ch) { + try { + final Connection conn = getConnection(ch.id); + final Statement statement = conn.createStatement(); + final int id = statementIdGenerator.getAndIncrement(); + statementCache.put(id, new StatementInfo(statement)); + StatementHandle h = new StatementHandle(ch.id, id, null); + LOG.trace("created statement {}", h); + return h; + } catch (SQLException e) { + throw propagate(e); + } + } + + @Override public void closeStatement(StatementHandle h) { + StatementInfo info = statementCache.getIfPresent(h.id); + if (info == null || info.statement == null) { + LOG.debug("client requested close unknown statement {}", h); + return; + } + LOG.trace("closing statement {}", h); + try { + ResultSet results = info.getResultSet(); + if (info.isResultSetInitialized() && null != results) { + results.close(); + } + info.statement.close(); + } catch (SQLException e) { + throw propagate(e); + } finally { + statementCache.invalidate(h.id); + } + } + + @Override public void openConnection(ConnectionHandle ch, + Map<String, String> info) { + Properties fullInfo = new Properties(); + fullInfo.putAll(this.info); + if (info != null) { + fullInfo.putAll(info); + } + + synchronized (this) { + try { + if (connectionCache.asMap().containsKey(ch.id)) { + throw new RuntimeException("Connection already exists: " + ch.id); + } + Connection conn = DriverManager.getConnection(url, fullInfo); + connectionCache.put(ch.id, conn); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + } + + @Override public void closeConnection(ConnectionHandle ch) { + Connection conn = connectionCache.getIfPresent(ch.id); + if (conn == null) { + LOG.debug("client requested close unknown connection {}", ch); + return; + } + LOG.trace("closing connection {}", ch); + try { + conn.close(); + } catch (SQLException e) { + throw propagate(e); + } finally { + connectionCache.invalidate(ch.id); + } + } + + protected void apply(Connection conn, ConnectionProperties connProps) + throws SQLException { + if (connProps.isAutoCommit() != null) { + conn.setAutoCommit(connProps.isAutoCommit()); + } + if (connProps.isReadOnly() != null) { + conn.setReadOnly(connProps.isReadOnly()); + } + if (connProps.getTransactionIsolation() != null) { + conn.setTransactionIsolation(connProps.getTransactionIsolation()); + } + if (connProps.getCatalog() != null) { + conn.setCatalog(connProps.getCatalog()); + } + if (connProps.getSchema() != null) { + conn.setSchema(connProps.getSchema()); + } + } + + @Override public ConnectionProperties connectionSync(ConnectionHandle ch, + ConnectionProperties connProps) { + LOG.trace("syncing properties for connection {}", ch); + try { + Connection conn = getConnection(ch.id); + ConnectionPropertiesImpl props = new ConnectionPropertiesImpl(conn).merge(connProps); + if (props.isDirty()) { + apply(conn, props); + props.setDirty(false); + } + return props; + } catch (SQLException e) { + throw propagate(e); + } + } + + RuntimeException propagate(Throwable e) { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } else if (e instanceof Error) { + throw (Error) e; + } else { + throw new RuntimeException(e); + } + } + + public StatementHandle prepare(ConnectionHandle ch, String sql, + long maxRowCount) { + try { + final Connection conn = getConnection(ch.id); + final PreparedStatement statement = conn.prepareStatement(sql); + final int id = statementIdGenerator.getAndIncrement(); + Meta.StatementType statementType = null; + if (statement.isWrapperFor(AvaticaPreparedStatement.class)) { + final AvaticaPreparedStatement avaticaPreparedStatement; + avaticaPreparedStatement = + statement.unwrap(AvaticaPreparedStatement.class); + statementType = avaticaPreparedStatement.getStatementType(); + } + statementCache.put(id, new StatementInfo(statement)); + StatementHandle h = new StatementHandle(ch.id, id, + signature(statement.getMetaData(), statement.getParameterMetaData(), + sql, statementType)); + LOG.trace("prepared statement {}", h); + return h; + } catch (SQLException e) { + throw propagate(e); + } + } + + public ExecuteResult prepareAndExecute(StatementHandle h, String sql, + long maxRowCount, PrepareCallback callback) throws NoSuchStatementException { + try { + final StatementInfo info = statementCache.getIfPresent(h.id); + if (info == null) { + throw new NoSuchStatementException(h); + } + final Statement statement = info.statement; + // Special handling of maxRowCount as JDBC 0 is unlimited, our meta 0 row + if (maxRowCount > 0) { + AvaticaUtils.setLargeMaxRows(statement, maxRowCount); + } else if (maxRowCount < 0) { + statement.setMaxRows(0); + } + boolean ret = statement.execute(sql); + info.setResultSet(statement.getResultSet()); + // Either execute(sql) returned true or the resultSet was null + assert ret || null == info.getResultSet(); + final List<MetaResultSet> resultSets = new ArrayList<>(); + if (null == info.getResultSet()) { + // Create a special result set that just carries update count + resultSets.add( + JdbcResultSet.count(h.connectionId, h.id, + AvaticaUtils.getLargeUpdateCount(statement))); + } else { + resultSets.add( + JdbcResultSet.create(h.connectionId, h.id, info.getResultSet(), maxRowCount)); + } + LOG.trace("prepAndExec statement {}", h); + // TODO: review client to ensure statementId is updated when appropriate + return new ExecuteResult(resultSets); + } catch (SQLException e) { + throw propagate(e); + } + } + + public boolean syncResults(StatementHandle sh, QueryState state, long offset) + throws NoSuchStatementException { + try { + final Connection conn = getConnection(sh.connectionId); + final StatementInfo info = statementCache.getIfPresent(sh.id); + if (null == info) { + throw new NoSuchStatementException(sh); + } + final Statement statement = info.statement; + // Let the state recreate the necessary ResultSet on the Statement + info.setResultSet(state.invoke(conn, statement)); + + if (null != info.getResultSet()) { + // If it is non-null, try to advance to the requested offset. + return info.advanceResultSetToOffset(info.getResultSet(), offset); + } + + // No results, nothing to do. Client can move on. + return false; + } catch (SQLException e) { + throw propagate(e); + } + } + + public Frame fetch(StatementHandle h, long offset, int fetchMaxRowCount) throws + NoSuchStatementException, MissingResultsException { + LOG.trace("fetching {} offset:{} fetchMaxRowCount:{}", h, offset, fetchMaxRowCount); + try { + final StatementInfo statementInfo = statementCache.getIfPresent(h.id); + if (null == statementInfo) { + // Statement might have expired, or never existed on this server. + throw new NoSuchStatementException(h); + } + + if (!statementInfo.isResultSetInitialized()) { + // The Statement exists, but the results are missing. Need to call syncResults(...) + throw new MissingResultsException(h); + } + if (statementInfo.getResultSet() == null) { + return Frame.EMPTY; + } else { + return JdbcResultSet.frame(statementInfo, statementInfo.getResultSet(), offset, + fetchMaxRowCount, calendar); + } + } catch (SQLException e) { + throw propagate(e); + } + } + + private static String[] toArray(List<String> typeList) { + if (typeList == null) { + return null; + } + return typeList.toArray(new String[typeList.size()]); + } + + @Override public ExecuteResult execute(StatementHandle h, + List<TypedValue> parameterValues, long maxRowCount) throws NoSuchStatementException { + try { + if (MetaImpl.checkParameterValueHasNull(parameterValues)) { + throw new SQLException("exception while executing query: unbound parameter"); + } + + final StatementInfo statementInfo = statementCache.getIfPresent(h.id); + if (null == statementInfo) { + throw new NoSuchStatementException(h); + } + final List<MetaResultSet> resultSets; + final PreparedStatement preparedStatement = + (PreparedStatement) statementInfo.statement; + + if (parameterValues != null) { + for (int i = 0; i < parameterValues.size(); i++) { + TypedValue o = parameterValues.get(i); + preparedStatement.setObject(i + 1, o.toJdbc(calendar)); + } + } + + if (preparedStatement.execute()) { + final Meta.Frame frame; + final Signature signature2; + if (preparedStatement.isWrapperFor(AvaticaPreparedStatement.class)) { + signature2 = h.signature; + } else { + h.signature = signature(preparedStatement.getMetaData(), + preparedStatement.getParameterMetaData(), h.signature.sql, + Meta.StatementType.SELECT); + signature2 = h.signature; + } + + // Make sure we set this for subsequent fetch()'s to find the result set. + statementInfo.setResultSet(preparedStatement.getResultSet()); + + if (statementInfo.getResultSet() == null) { + frame = Frame.EMPTY; + resultSets = Collections.<MetaResultSet>singletonList( + JdbcResultSet.empty(h.connectionId, h.id, signature2)); + } else { + resultSets = Collections.<MetaResultSet>singletonList( + JdbcResultSet.create(h.connectionId, h.id, statementInfo.getResultSet(), + maxRowCount, signature2)); + } + } else { + resultSets = Collections.<MetaResultSet>singletonList( + JdbcResultSet.count(h.connectionId, h.id, preparedStatement.getUpdateCount())); + } + + return new ExecuteResult(resultSets); + } catch (SQLException e) { + throw propagate(e); + } + } + + @Override public void commit(ConnectionHandle ch) { + try { + final Connection conn = getConnection(ch.id); + conn.commit(); + } catch (SQLException e) { + throw propagate(e); + } + } + + @Override public void rollback(ConnectionHandle ch) { + try { + final Connection conn = getConnection(ch.id); + conn.rollback(); + } catch (SQLException e) { + throw propagate(e); + } + } + + /** Configurable statement cache settings. */ + public enum StatementCacheSettings { + /** JDBC connection property for setting connection cache concurrency level. */ + CONCURRENCY_LEVEL(STMT_CACHE_KEY_BASE + ".concurrency", "100"), + + /** JDBC connection property for setting connection cache initial capacity. */ + INITIAL_CAPACITY(STMT_CACHE_KEY_BASE + ".initialcapacity", "1000"), + + /** JDBC connection property for setting connection cache maximum capacity. */ + MAX_CAPACITY(STMT_CACHE_KEY_BASE + ".maxcapacity", "10000"), + + /** JDBC connection property for setting connection cache expiration duration. + * + * <p>Used in conjunction with {@link #EXPIRY_UNIT}.</p> + */ + EXPIRY_DURATION(STMT_CACHE_KEY_BASE + ".expirydiration", "5"), + + /** JDBC connection property for setting connection cache expiration unit. + * + * <p>Used in conjunction with {@link #EXPIRY_DURATION}.</p> + */ + EXPIRY_UNIT(STMT_CACHE_KEY_BASE + ".expiryunit", TimeUnit.MINUTES.name()); + + private final String key; + private final String defaultValue; + + StatementCacheSettings(String key, String defaultValue) { + this.key = key; + this.defaultValue = defaultValue; + } + + /** The configuration key for specifying this setting. */ + public String key() { + return key; + } + + /** The default value for this setting. */ + public String defaultValue() { + return defaultValue; + } + } + + /** Configurable connection cache settings. */ + public enum ConnectionCacheSettings { + /** JDBC connection property for setting connection cache concurrency level. */ + CONCURRENCY_LEVEL(CONN_CACHE_KEY_BASE + ".concurrency", "10"), + + /** JDBC connection property for setting connection cache initial capacity. */ + INITIAL_CAPACITY(CONN_CACHE_KEY_BASE + ".initialcapacity", "100"), + + /** JDBC connection property for setting connection cache maximum capacity. */ + MAX_CAPACITY(CONN_CACHE_KEY_BASE + ".maxcapacity", "1000"), + + /** JDBC connection property for setting connection cache expiration duration. */ + EXPIRY_DURATION(CONN_CACHE_KEY_BASE + ".expiryduration", "10"), + + /** JDBC connection property for setting connection cache expiration unit. */ + EXPIRY_UNIT(CONN_CACHE_KEY_BASE + ".expiryunit", TimeUnit.MINUTES.name()); + + private final String key; + private final String defaultValue; + + ConnectionCacheSettings(String key, String defaultValue) { + this.key = key; + this.defaultValue = defaultValue; + } + + /** The configuration key for specifying this setting. */ + public String key() { + return key; + } + + /** The default value for this setting. */ + public String defaultValue() { + return defaultValue; + } + } + + /** Callback for {@link #connectionCache} member expiration. */ + private class ConnectionExpiryHandler + implements RemovalListener<String, Connection> { + + public void onRemoval(RemovalNotification<String, Connection> notification) { + String connectionId = notification.getKey(); + Connection doomed = notification.getValue(); + LOG.debug("Expiring connection {} because {}", connectionId, notification.getCause()); + try { + if (doomed != null) { + doomed.close(); + } + } catch (Throwable t) { + LOG.info("Exception thrown while expiring connection {}", connectionId, t); + } + } + } + + /** Callback for {@link #statementCache} member expiration. */ + private class StatementExpiryHandler + implements RemovalListener<Integer, StatementInfo> { + public void onRemoval(RemovalNotification<Integer, StatementInfo> notification) { + Integer stmtId = notification.getKey(); + StatementInfo doomed = notification.getValue(); + if (doomed == null) { + // log/throw? + return; + } + LOG.debug("Expiring statement {} because {}", stmtId, notification.getCause()); + try { + if (doomed.getResultSet() != null) { + doomed.getResultSet().close(); + } + if (doomed.statement != null) { + doomed.statement.close(); + } + } catch (Throwable t) { + LOG.info("Exception thrown while expiring statement {}", stmtId, t); + } + } + } +} + +// End JdbcMeta.java http://git-wip-us.apache.org/repos/asf/calcite/blob/5cee486f/avatica/server/src/main/java/org/apache/calcite/avatica/jdbc/JdbcResultSet.java ---------------------------------------------------------------------- diff --git a/avatica/server/src/main/java/org/apache/calcite/avatica/jdbc/JdbcResultSet.java b/avatica/server/src/main/java/org/apache/calcite/avatica/jdbc/JdbcResultSet.java new file mode 100644 index 0000000..6630124 --- /dev/null +++ b/avatica/server/src/main/java/org/apache/calcite/avatica/jdbc/JdbcResultSet.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.calcite.avatica.jdbc; + +import org.apache.calcite.avatica.AvaticaStatement; +import org.apache.calcite.avatica.Meta; +import org.apache.calcite.avatica.util.DateTimeUtils; + +import java.sql.Array; +import java.sql.Date; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Struct; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.TreeMap; + +/** Implementation of {@link org.apache.calcite.avatica.Meta.MetaResultSet} + * upon a JDBC {@link java.sql.ResultSet}. + * + * @see org.apache.calcite.avatica.jdbc.JdbcMeta */ +class JdbcResultSet extends Meta.MetaResultSet { + protected JdbcResultSet(String connectionId, int statementId, + boolean ownStatement, Meta.Signature signature, Meta.Frame firstFrame) { + this(connectionId, statementId, ownStatement, signature, firstFrame, -1L); + } + + protected JdbcResultSet(String connectionId, int statementId, + boolean ownStatement, Meta.Signature signature, Meta.Frame firstFrame, + long updateCount) { + super(connectionId, statementId, ownStatement, signature, firstFrame, updateCount); + } + + /** Creates a result set. */ + public static JdbcResultSet create(String connectionId, int statementId, + ResultSet resultSet) { + // -1 still limits to 100 but -2 does not limit to any number + return create(connectionId, statementId, resultSet, + JdbcMeta.UNLIMITED_COUNT); + } + + /** Creates a result set with maxRowCount. + * + * <p>If {@code maxRowCount} is -2 ({@link JdbcMeta#UNLIMITED_COUNT}), + * returns an unlimited number of rows in a single frame; any other + * negative value (typically -1) returns an unlimited number of rows + * in frames of the default frame size. */ + public static JdbcResultSet create(String connectionId, int statementId, + ResultSet resultSet, long maxRowCount) { + try { + Meta.Signature sig = JdbcMeta.signature(resultSet.getMetaData()); + return create(connectionId, statementId, resultSet, maxRowCount, sig); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public static JdbcResultSet create(String connectionId, int statementId, + ResultSet resultSet, long maxRowCount, Meta.Signature signature) { + try { + final Calendar calendar = Calendar.getInstance(DateTimeUtils.GMT_ZONE); + final int fetchRowCount; + if (maxRowCount == JdbcMeta.UNLIMITED_COUNT) { + fetchRowCount = -1; + } else if (maxRowCount < 0L) { + fetchRowCount = AvaticaStatement.DEFAULT_FETCH_SIZE; + } else if (maxRowCount > AvaticaStatement.DEFAULT_FETCH_SIZE) { + fetchRowCount = AvaticaStatement.DEFAULT_FETCH_SIZE; + } else { + fetchRowCount = (int) maxRowCount; + } + final Meta.Frame firstFrame = frame(null, resultSet, 0, fetchRowCount, calendar); + if (firstFrame.done) { + resultSet.close(); + } + return new JdbcResultSet(connectionId, statementId, true, signature, + firstFrame); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + /** Creates a empty result set with empty frame */ + public static JdbcResultSet empty(String connectionId, int statementId, + Meta.Signature signature) { + return new JdbcResultSet(connectionId, statementId, true, signature, + Meta.Frame.EMPTY); + } + + /** Creates a result set that only has an update count. */ + public static JdbcResultSet count(String connectionId, int statementId, + int updateCount) { + return new JdbcResultSet(connectionId, statementId, true, null, null, updateCount); + } + + /** Creates a frame containing a given number or unlimited number of rows + * from a result set. */ + static Meta.Frame frame(StatementInfo info, ResultSet resultSet, long offset, + int fetchMaxRowCount, Calendar calendar) throws SQLException { + final ResultSetMetaData metaData = resultSet.getMetaData(); + final int columnCount = metaData.getColumnCount(); + final int[] types = new int[columnCount]; + for (int i = 0; i < types.length; i++) { + types[i] = metaData.getColumnType(i + 1); + } + final List<Object> rows = new ArrayList<>(); + // Meta prepare/prepareAndExecute 0 return 0 row and done + boolean done = fetchMaxRowCount == 0; + for (int i = 0; fetchMaxRowCount < 0 || i < fetchMaxRowCount; i++) { + final boolean hasRow; + if (null != info) { + hasRow = info.next(); + } else { + hasRow = resultSet.next(); + } + if (!hasRow) { + done = true; + resultSet.close(); + break; + } + Object[] columns = new Object[columnCount]; + for (int j = 0; j < columnCount; j++) { + columns[j] = getValue(resultSet, types[j], j, calendar); + } + rows.add(columns); + } + return new Meta.Frame(offset, done, rows); + } + + private static Object getValue(ResultSet resultSet, int type, int j, + Calendar calendar) throws SQLException { + switch (type) { + case Types.BIGINT: + final long aLong = resultSet.getLong(j + 1); + return aLong == 0 && resultSet.wasNull() ? null : aLong; + case Types.INTEGER: + final int anInt = resultSet.getInt(j + 1); + return anInt == 0 && resultSet.wasNull() ? null : anInt; + case Types.SMALLINT: + final short aShort = resultSet.getShort(j + 1); + return aShort == 0 && resultSet.wasNull() ? null : aShort; + case Types.TINYINT: + final byte aByte = resultSet.getByte(j + 1); + return aByte == 0 && resultSet.wasNull() ? null : aByte; + case Types.DOUBLE: + case Types.FLOAT: + final double aDouble = resultSet.getDouble(j + 1); + return aDouble == 0D && resultSet.wasNull() ? null : aDouble; + case Types.REAL: + final float aFloat = resultSet.getFloat(j + 1); + return aFloat == 0D && resultSet.wasNull() ? null : aFloat; + case Types.DATE: + final Date aDate = resultSet.getDate(j + 1, calendar); + return aDate == null + ? null + : (int) (aDate.getTime() / DateTimeUtils.MILLIS_PER_DAY); + case Types.TIME: + final Time aTime = resultSet.getTime(j + 1, calendar); + return aTime == null + ? null + : (int) (aTime.getTime() % DateTimeUtils.MILLIS_PER_DAY); + case Types.TIMESTAMP: + final Timestamp aTimestamp = resultSet.getTimestamp(j + 1, calendar); + return aTimestamp == null ? null : aTimestamp.getTime(); + case Types.ARRAY: + final Array array = resultSet.getArray(j + 1); + if (null == array) { + return null; + } + ResultSet arrayValues = array.getResultSet(); + TreeMap<Integer, Object> map = new TreeMap<>(); + while (arrayValues.next()) { + // column 1 is the index in the array, column 2 is the value. + // Recurse on `getValue` to unwrap nested types correctly. + // `j` is zero-indexed and incremented for us, thus we have `1` being used twice. + map.put(arrayValues.getInt(1), getValue(arrayValues, array.getBaseType(), 1, calendar)); + } + // If the result set is not in the same order as the actual Array, TreeMap fixes that. + // Need to make a concrete list to ensure Jackson serialization. + //return new ListLike<Object>(new ArrayList<>(map.values()), ListLikeType.ARRAY); + return new ArrayList<>(map.values()); + case Types.STRUCT: + Struct struct = resultSet.getObject(j + 1, Struct.class); + Object[] attrs = struct.getAttributes(); + List<Object> list = new ArrayList<>(attrs.length); + for (Object o : attrs) { + list.add(o); + } + return list; + default: + return resultSet.getObject(j + 1); + } + } +} + +// End JdbcResultSet.java
