[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207063036
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("Phoenix");
+JvmMetrics.initSingleton("Phoenix", "");
+}
+
+public static GlobalMetricRegistriesAdapter getInstance() {
+if (INSTANCE == null) {
--- End diff --

I think I am moving away from lazy initialization all together. No need of 
premature optimization here I believe.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207060201
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("Phoenix");
+JvmMetrics.initSingleton("Phoenix", "");
+}
+
+public static GlobalMetricRegistriesAdapter getInstance() {
+if (INSTANCE == null) {
+INSTANCE = new GlobalMetricRegistriesAdapter();
+}
+return INSTANCE;
+}
+
+public void registerMetricRegistry(MetricRegistry registry) {
+if (registry == null) {
+LOG.warn("Registry cannot be registered with Hadoop Metrics 2 
since it is null.");
+return;
+}
+
+HBaseMetrics2HadoopMetricsAdapter adapter = new 
HBaseMetrics2HadoopMetricsAdapter(registry);
+registerToDefaultMetricsSystem(registry, adapter);
+}
+
+private void registerToDefaultMetricsSystem(MetricRegistry registry, 
HBaseMetrics2HadoopMetricsAdapter adapter) {
+MetricRegistryInfo info = registry.getMetricRegistryInfo();
+LOG.info("Registering " + info.getMetricsJmxContext() + " " + 
info.getMetricsDescription() + " into DefaultMetricsSystem");
+
DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), 
info.getMetricsDescription(), adapter);
+}
+
+/**
+ * Class to convert HBase Metric Objects to Hadoop Metrics2 Metric 
Objects
+ */
+private static class HBaseMetrics2HadoopMetricsAdapter implements 
MetricsSource {
+private static final Log LOG = 
LogFactory.getLog(HBaseMetrics2HadoopMetricsAdapter.class);
+private final MetricRegistry registry;
+private HBaseMetrics2HadoopMetricsAdapter(MetricRegistry registry) 
{
+this.registry = registry;
+}
+
+public void snapshotAllMetrics(MetricRegistry metricRegistry, 
MetricsCollector collector) {
+MetricRegistryInfo info = 
metricRegistry.getMetricRegistryInfo();
+MetricsRecordBuilder builder = 
collector.addRecord(Interns.info(info.getMetricsName(), 

[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207060270
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("Phoenix");
+JvmMetrics.initSingleton("Phoenix", "");
+}
+
+public static GlobalMetricRegistriesAdapter getInstance() {
+if (INSTANCE == null) {
--- End diff --

As of now, this method is called from static block in 
`GlobalClientMetrics`, which can happen only once, so we dont have any race 
conditions here. 
I have two options here, add a lock here or move this initialization to 
static block of this class. What do you suggest?


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207054938
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

Cool. Thanks!


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207054877
  
--- Diff: phoenix-core/src/test/resources/hadoop-metrics2.properties ---
@@ -32,10 +32,9 @@
 #[prefix].[source|sink].[instance].[options]
 # See javadoc of package-info.java for org.apache.hadoop.metrics2 for 
detail
 
-
-# Don't attempt to start jmx mbeans for all sources.
-#  For right now, all metrics are exported to a phoenix table
-*.source.start_mbeans=false
+phoenix.source.start_mbeans=true

+phoenix.sink.sink0.class=org.apache.phoenix.monitoring.GlobalPhoenixMetricsTestSink
--- End diff --

It is just an identifier and can be anything.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread xcangCRM
Github user xcangCRM commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207052688
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("Phoenix");
+JvmMetrics.initSingleton("Phoenix", "");
+}
+
+public static GlobalMetricRegistriesAdapter getInstance() {
+if (INSTANCE == null) {
--- End diff --

nit: do you need double checking here?
if (INSTANCE == null) {
 synchronized(this) {
if(INSTANCE == null) {
  ...
  }  
 }
}


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread xcangCRM
Github user xcangCRM commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207053124
  
--- Diff: phoenix-core/src/test/resources/hadoop-metrics2.properties ---
@@ -32,10 +32,9 @@
 #[prefix].[source|sink].[instance].[options]
 # See javadoc of package-info.java for org.apache.hadoop.metrics2 for 
detail
 
-
-# Don't attempt to start jmx mbeans for all sources.
-#  For right now, all metrics are exported to a phoenix table
-*.source.start_mbeans=false
+phoenix.source.start_mbeans=true

+phoenix.sink.sink0.class=org.apache.phoenix.monitoring.GlobalPhoenixMetricsTestSink
--- End diff --

I think it usually starts with sink 1 .  (?)  at least ni HBase.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread xcangCRM
Github user xcangCRM commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207053000
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("Phoenix");
+JvmMetrics.initSingleton("Phoenix", "");
+}
+
+public static GlobalMetricRegistriesAdapter getInstance() {
+if (INSTANCE == null) {
+INSTANCE = new GlobalMetricRegistriesAdapter();
+}
+return INSTANCE;
+}
+
+public void registerMetricRegistry(MetricRegistry registry) {
+if (registry == null) {
+LOG.warn("Registry cannot be registered with Hadoop Metrics 2 
since it is null.");
+return;
+}
+
+HBaseMetrics2HadoopMetricsAdapter adapter = new 
HBaseMetrics2HadoopMetricsAdapter(registry);
+registerToDefaultMetricsSystem(registry, adapter);
+}
+
+private void registerToDefaultMetricsSystem(MetricRegistry registry, 
HBaseMetrics2HadoopMetricsAdapter adapter) {
+MetricRegistryInfo info = registry.getMetricRegistryInfo();
+LOG.info("Registering " + info.getMetricsJmxContext() + " " + 
info.getMetricsDescription() + " into DefaultMetricsSystem");
+
DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), 
info.getMetricsDescription(), adapter);
+}
+
+/**
+ * Class to convert HBase Metric Objects to Hadoop Metrics2 Metric 
Objects
+ */
+private static class HBaseMetrics2HadoopMetricsAdapter implements 
MetricsSource {
+private static final Log LOG = 
LogFactory.getLog(HBaseMetrics2HadoopMetricsAdapter.class);
+private final MetricRegistry registry;
+private HBaseMetrics2HadoopMetricsAdapter(MetricRegistry registry) 
{
+this.registry = registry;
+}
+
+public void snapshotAllMetrics(MetricRegistry metricRegistry, 
MetricsCollector collector) {
+MetricRegistryInfo info = 
metricRegistry.getMetricRegistryInfo();
+MetricsRecordBuilder builder = 
collector.addRecord(Interns.info(info.getMetricsName(), 
info.getMetricsDescription()));
  

[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207051474
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

I don't have a strong opinion on how you expose this to let PQS say "I am 
PQS" (e.g. whether CSV, list, whatever).

But yeah, modifying `snapshotAllMetrics` to include a Hadoop Metrics2 
MetricsTag should do the trick.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207049490
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

So if I understand this correctly, I will include a new property which 
takes a csv of tags as input. Later on, I can split by comma and add those tags 
to `HBaseMetrics2HadoopMetricsAdapter#snapshotAllMetrics()`. @joshelser 


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207003788
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

> I am sorry, I didn't you on this one. Could you elaborate?

You're using openTSDB to collect all of this data and your use case is to 
"understand how PQS runs". To do this, you will need to know what hosts that 
PQS is running on to just look at PQS metrics. I was suggesting that having a 
unique name (or a tag) would make an operator's life much more simple -- it 
would be clear (and presumably easier to filter on) to find just PQS metrics.

For example, the difference of issuing a filter "host in pqs_server1, 
pqs_server2, pqs_server3" as opposed to just "tag=PQS", or similar. 

Now that I'm thinking about this, leaving this as {{Phoenix,sub=CLIENT}} 
may be OK and you can instead just configure the push of metrics data to Hadoop 
Metrics2 to include a "tag" that indicates this is from PQS. That would make me 
happy.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r207001759
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

I am sorry, I didn't you on this one. Could you elaborate?
Also, I realized that stopping JVM metrics from Phoenix Level is not an 
option here. Since phoenix embeds hbase-client which instantiates JVM Metrics 
by default. Checkout this stack trace. 


`
init:57, BaseSourceImpl$DefaultMetricsSystemInitializer 
(org.apache.hadoop.hbase.metrics)
:112, BaseSourceImpl (org.apache.hadoop.hbase.metrics)
:56, MetricsZooKeeperSourceImpl (org.apache.hadoop.hbase.zookeeper)
:51, MetricsZooKeeperSourceImpl (org.apache.hadoop.hbase.zookeeper)
newInstance0:-1, NativeConstructorAccessorImpl (sun.reflect)
newInstance:62, NativeConstructorAccessorImpl (sun.reflect)
newInstance:45, DelegatingConstructorAccessorImpl (sun.reflect)
newInstance:423, Constructor (java.lang.reflect)
newInstance:442, Class (java.lang)
nextService:380, ServiceLoader$LazyIterator (java.util)
next:404, ServiceLoader$LazyIterator (java.util)
next:480, ServiceLoader$1 (java.util)
getInstance:59, CompatibilitySingletonFactory (org.apache.hadoop.hbase)
:38, MetricsZooKeeper (org.apache.hadoop.hbase.zookeeper)
:130, RecoverableZooKeeper (org.apache.hadoop.hbase.zookeeper)
connect:143, ZKUtil (org.apache.hadoop.hbase.zookeeper)
:181, ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper)
:155, ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper)
:43, ZooKeeperKeepAliveConnection (org.apache.hadoop.hbase.client)
getKeepAliveZooKeeperWatcher:1737, 
ConnectionManager$HConnectionImplementation (org.apache.hadoop.hbase.client)
getClusterId:104, ZooKeeperRegistry (org.apache.hadoop.hbase.client)
retrieveClusterId:945, ConnectionManager$HConnectionImplementation 
(org.apache.hadoop.hbase.client)
:721, ConnectionManager$HConnectionImplementation 
(org.apache.hadoop.hbase.client)
newInstance0:-1, NativeConstructorAccessorImpl (sun.reflect)
newInstance:62, NativeConstructorAccessorImpl 

[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206974865
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

> why do we need to differentiate

As an operator, I'm going to expect much different characteristics coming 
from client applications than I am from PQS. Since PQS is a SPOF for 
(potentially) many users, I would expect a higher level of regularity from PQS 
than I would a client's app.

> We should get that info from the application JVM itself right

I'm sure there's something you can apply to find a PQS metric compared to 
other clients, but I am just thinking that it would be good to make this as 
easy as possible. I think watching PQS metrics would be a common use-case, not 
the exception.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206973402
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

I think its hard to differentiate, but my concern is why do we need to 
differentiate. We should get that info from the application JVM itself right, 
whether its PQS or some app server using Phoenix Client.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206971126
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

How will you differentiate this "client" (PQS) from other clients?


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-08-01 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206968534
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

Lets keep it as client since they represent client in the end, even though 
it PQS that is instantiating it.


---


[GitHub] phoenix pull request #318: Fixed Spelling.

2018-08-01 Thread asfgit
Github user asfgit closed the pull request at:

https://github.com/apache/phoenix/pull/318


---


[GitHub] phoenix pull request #319: PHOENIX-4820

2018-08-01 Thread comnetwork
Github user comnetwork closed the pull request at:

https://github.com/apache/phoenix/pull/319


---


[GitHub] phoenix pull request #321: PHOENIX-4820 V3

2018-07-31 Thread comnetwork
GitHub user comnetwork opened a pull request:

https://github.com/apache/phoenix/pull/321

PHOENIX-4820 V3

This patch is mainly for :
1. remove CoerceExpression in 
ExpressionUtil.IsColumnConstantExpressionVisitor, 
conside following sql:
 
 select a.av1 
 from (select pk1 ak1,pk2 ak2,pk3 ak3, substr(v1,1,1) av1 from test_table 
order by pk1,pk2 limit 10) a 
 where cast(a.ak1 as integer)=2 group by a.ak1,a.av1 order by a.av1

we can not infer a.ak1 is constant for " where cast(a.ak1 as integer)=2" if 
a.ak1 is double,

2. add more unit tests and IT tests for  CoerceExpression .


You can merge this pull request into a Git repository by running:

$ git pull https://github.com/comnetwork/phoenix 4.x-HBase-1.3

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/phoenix/pull/321.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #321


commit ef71627dbf7fd08a3093b7edaf34bc4092100e64
Author: chenglei 
Date:   2018-08-01T04:48:10Z

PHOENIX-4820




---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread ankitsinghal
Github user ankitsinghal commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206741679
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java ---
@@ -355,5 +355,19 @@
+ "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + 
HConstants.VERSIONS + "=%s,\n"
+ HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n" + 
PhoenixDatabaseMetaData.TRANSACTIONAL + "="
+ Boolean.FALSE;
+   
+public static final String CREATE_MUTEX_METADTA =
+   "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_MUTEX_TABLE_NAME + "\"(\n" +
+   // Pk columns
+   TENANT_ID + " VARCHAR NULL," +
+   TABLE_SCHEM + " VARCHAR NULL," +
+   TABLE_NAME + " VARCHAR NOT NULL," +
+   COLUMN_NAME + " VARCHAR NULL," + // null for table row
+   COLUMN_FAMILY + " VARCHAR NULL " + // using for CF to 
uniqueness for columns
+   "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + ","
+   + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," 
+ COLUMN_FAMILY + "))\n" +
+   HConstants.VERSIONS + "=%s,\n" +
+   HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n" +
+   PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
--- End diff --

Yes, probably our comments have crossed.


---


[GitHub] phoenix pull request #320: PHOENIX-4820

2018-07-31 Thread comnetwork
Github user comnetwork commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/320#discussion_r206739045
  
--- Diff: 
phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java ---
@@ -2951,7 +2951,129 @@ public void testOrderPreservingGroupBy() throws 
Exception {
 }
 }
 }
-
+
+@Test
+public void testOrderPreservingGroupByForNotPkColumns() throws 
Exception {
+
+try (Connection conn= DriverManager.getConnection(getUrl())) {
+
+conn.createStatement().execute("CREATE TABLE test (\n" +
+"pk1 varchar, \n" +
+"pk2 varchar, \n" +
+"pk3 varchar, \n" +
+"pk4 varchar, \n" +
+"v1 varchar, \n" +
+"v2 varchar,\n" +
+"CONSTRAINT pk PRIMARY KEY (\n" +
+"   pk1,\n" +
+"   pk2,\n" +
+"   pk3,\n" +
+"   pk4\n" +
+" )\n" +
+" )");
+String[] queries = new String[] {
+"SELECT pk3 FROM test WHERE v2 = 'a' GROUP BY 
substr(v2,0,1),pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'c' and v2 = 
substr('abc',1,1) GROUP BY v2,pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE v1 = 'a' and v2 = 'b' 
GROUP BY length(v1)+length(v2),pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'a' and v2 = 'b' 
GROUP BY length(pk1)+length(v2),pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE v1 = 'a' and v2 = 
substr('abc',2,1) GROUP BY pk4,CASE WHEN v1 > v2 THEN v1 ELSE v2 END,pk3 ORDER 
BY pk4,pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'a' and v2 = 
substr('abc',2,1) GROUP BY pk4,CASE WHEN pk1 > v2 THEN pk1 ELSE v2 END,pk3 
ORDER BY pk4,pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'a' and pk2 = 'b' 
and v1 = 'c' GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE 
pk2 END,pk3 ORDER BY pk3"
+};
+int index = 0;
+for (String query : queries) {
+QueryPlan plan = getQueryPlan(conn, query);
+assertTrue((index + 1) + ") " + queries[index], 
plan.getOrderBy().getOrderByExpressions().isEmpty());
+index++;
+}
+}
+}
+
+@Test
+public void testOrderPreservingGroupByForClientAggregatePlan() throws 
Exception {
+Connection conn = null;
+ try {
+ conn = DriverManager.getConnection(getUrl());
+ String tableName1 = "test_table";
+ String sql = "create table " + tableName1 + "( "+
+ " pk1 varchar not null , " +
+ " pk2 varchar not null, " +
+ " pk3 varchar not null," +
+ " v1 varchar, " +
+ " v2 varchar, " +
+ " CONSTRAINT TEST_PK PRIMARY KEY ( "+
+"pk1,"+
+"pk2,"+
+"pk3 ))";
+ conn.createStatement().execute(sql);
+
+ String[] queries = new String[] {
+   "select a.ak3 "+
+   "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) 
ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from 
"+tableName1+" order by pk2,pk3 limit 10) a "+
+   "group by a.ak3,a.av1 order by a.ak3,a.av1",
+
+   "select a.ak3 "+
+   "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) 
ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from 
"+tableName1+" order by pk2,pk3 limit 10) a "+
+   "where a.av2 = 'a' GROUP BY substr(a.av2,0,1),ak3 ORDER 
BY ak3",
+
+   //for InListExpression
+   "select a.ak3 "+
+   "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) 
ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from 
"+tableName1+" order by pk2,pk3 limit 10) a "+
+   "where a.av2 in('a') GROUP BY substr(a.av2,0,1),ak3 
ORDER BY ak3",
--- End diff --

yes, there is already a test for more than a single constant in an IN 
clause in testNotOrderPreservingGroupByForClientAggregatePlan


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread ankitsinghal
Github user ankitsinghal commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206738423
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 ---
@@ -2723,13 +2712,10 @@ private void 
createOtherSystemTables(PhoenixConnection metaConnection, HBaseAdmi
 try {
 
metaConnection.createStatement().executeUpdate(getChildLinkDDL());
 } catch (TableAlreadyExistsException e) {}
-// Catch the IOException to log the error message and then bubble 
it up for the client to retry.
 try {
-createSysMutexTableIfNotExists(hbaseAdmin);
-} catch (IOException exception) {
-logger.error("Failed to created SYSMUTEX table. Upgrade or 
migration is not possible without it. Please retry.");
-throw exception;
-}
+metaConnection.createStatement().executeUpdate(getMutexDDL());
--- End diff --

ok, then we are good.


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread ankitsinghal
Github user ankitsinghal commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206738342
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java ---
@@ -355,5 +355,19 @@
+ "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + 
HConstants.VERSIONS + "=%s,\n"
+ HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n" + 
PhoenixDatabaseMetaData.TRANSACTIONAL + "="
+ Boolean.FALSE;
+   
+public static final String CREATE_MUTEX_METADTA =
+   "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_MUTEX_TABLE_NAME + "\"(\n" +
+   // Pk columns
+   TENANT_ID + " VARCHAR NULL," +
+   TABLE_SCHEM + " VARCHAR NULL," +
+   TABLE_NAME + " VARCHAR NOT NULL," +
+   COLUMN_NAME + " VARCHAR NULL," + // null for table row
+   COLUMN_FAMILY + " VARCHAR NULL " + // using for CF to 
uniqueness for columns
+   "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + ","
+   + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," 
+ COLUMN_FAMILY + "))\n" +
+   HConstants.VERSIONS + "=%s,\n" +
+   HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n" +
+   PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
--- End diff --

ok make sense, so can you just update QueryServices.writeMutexCell(byte[] 
rowKey) and deleteMutexCell(byte[] rowKey) to accept all the arguments which 
form the right primary key for the table for consistency. 


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206737818
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java ---
@@ -355,5 +355,19 @@
+ "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + 
HConstants.VERSIONS + "=%s,\n"
+ HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n" + 
PhoenixDatabaseMetaData.TRANSACTIONAL + "="
+ Boolean.FALSE;
+   
+public static final String CREATE_MUTEX_METADTA =
+   "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_MUTEX_TABLE_NAME + "\"(\n" +
+   // Pk columns
+   TENANT_ID + " VARCHAR NULL," +
+   TABLE_SCHEM + " VARCHAR NULL," +
+   TABLE_NAME + " VARCHAR NOT NULL," +
+   COLUMN_NAME + " VARCHAR NULL," + // null for table row
+   COLUMN_FAMILY + " VARCHAR NULL " + // using for CF to 
uniqueness for columns
+   "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + ","
+   + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," 
+ COLUMN_FAMILY + "))\n" +
+   HConstants.VERSIONS + "=%s,\n" +
+   HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n" +
+   PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
--- End diff --


Are you suggesting writeMutexCell/deleteMutexCell should take as arguments 
(tenantid, schema, tablename, column name, column family) instead of a byte[]? 
I will make that change.


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206737372
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 ---
@@ -2723,13 +2712,10 @@ private void 
createOtherSystemTables(PhoenixConnection metaConnection, HBaseAdmi
 try {
 
metaConnection.createStatement().executeUpdate(getChildLinkDDL());
 } catch (TableAlreadyExistsException e) {}
-// Catch the IOException to log the error message and then bubble 
it up for the client to retry.
 try {
-createSysMutexTableIfNotExists(hbaseAdmin);
-} catch (IOException exception) {
-logger.error("Failed to created SYSMUTEX table. Upgrade or 
migration is not possible without it. Please retry.");
-throw exception;
-}
+metaConnection.createStatement().executeUpdate(getMutexDDL());
--- End diff --

The mutex table would have been created by 
createSysMutexTableIfNotExists(), we call execute the CREATE TABLE statement so 
that it exists in SYSTEM.CATALOG, so that we can use GRANT/REVOKE to grant 
permission on the SYSTEM.MUTEX table.


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206737111
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java ---
@@ -355,5 +355,19 @@
+ "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + 
HConstants.VERSIONS + "=%s,\n"
+ HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n" + 
PhoenixDatabaseMetaData.TRANSACTIONAL + "="
+ Boolean.FALSE;
+   
+public static final String CREATE_MUTEX_METADTA =
+   "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_MUTEX_TABLE_NAME + "\"(\n" +
+   // Pk columns
+   TENANT_ID + " VARCHAR NULL," +
+   TABLE_SCHEM + " VARCHAR NULL," +
+   TABLE_NAME + " VARCHAR NOT NULL," +
+   COLUMN_NAME + " VARCHAR NULL," + // null for table row
+   COLUMN_FAMILY + " VARCHAR NULL " + // using for CF to 
uniqueness for columns
+   "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + ","
+   + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," 
+ COLUMN_FAMILY + "))\n" +
+   HConstants.VERSIONS + "=%s,\n" +
+   HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n" +
+   PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
--- End diff --

For clusters that use namespace mapping and that map the phoenix system 
tables to the SYSTEM namespace, we want to be able to use the GRANT/REVOKE 
statements to grant RW access to the SYSTEM:MUTEX table.


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread ankitsinghal
Github user ankitsinghal commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206735726
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java ---
@@ -355,5 +355,19 @@
+ "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + 
HConstants.VERSIONS + "=%s,\n"
+ HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n" + 
PhoenixDatabaseMetaData.TRANSACTIONAL + "="
+ Boolean.FALSE;
+   
+public static final String CREATE_MUTEX_METADTA =
+   "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_MUTEX_TABLE_NAME + "\"(\n" +
+   // Pk columns
+   TENANT_ID + " VARCHAR NULL," +
+   TABLE_SCHEM + " VARCHAR NULL," +
+   TABLE_NAME + " VARCHAR NOT NULL," +
+   COLUMN_NAME + " VARCHAR NULL," + // null for table row
+   COLUMN_FAMILY + " VARCHAR NULL " + // using for CF to 
uniqueness for columns
+   "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + 
TENANT_ID + ","
+   + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," 
+ COLUMN_FAMILY + "))\n" +
+   HConstants.VERSIONS + "=%s,\n" +
+   HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n" +
+   PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
--- End diff --

Why there is a need of creating a Phoenix managed table for mutex? 
And also API in QueryServices.writeMutexCell(byte[] rowKey) and 
deleteMutexCell(byte[] rowKey) don't enforce the schema of the table will be 
followed. 



---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread ankitsinghal
Github user ankitsinghal commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206736061
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 ---
@@ -2723,13 +2712,10 @@ private void 
createOtherSystemTables(PhoenixConnection metaConnection, HBaseAdmi
 try {
 
metaConnection.createStatement().executeUpdate(getChildLinkDDL());
 } catch (TableAlreadyExistsException e) {}
-// Catch the IOException to log the error message and then bubble 
it up for the client to retry.
 try {
-createSysMutexTableIfNotExists(hbaseAdmin);
-} catch (IOException exception) {
-logger.error("Failed to created SYSMUTEX table. Upgrade or 
migration is not possible without it. Please retry.");
-throw exception;
-}
+metaConnection.createStatement().executeUpdate(getMutexDDL());
--- End diff --

Shouldn't the mutex table available and have acquired a mutex already for 
the upgrade before you call createOtherSystemTables


---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-31 Thread m2je
Github user m2je commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/317#discussion_r206717705
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java ---
@@ -1223,7 +1225,9 @@ public void readFields(DataInput input) throws 
IOException {
 boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0;
 if (hasViewIndexId) {
 // Fixed length
-viewIndexId = new 
byte[MetaDataUtil.getViewIndexIdDataType().getByteSize()];
+//Use leacy viewIndexIdType for clients older than 4.10 release
--- End diff --

done


---


[GitHub] phoenix pull request #320: PHOENIX-4820

2018-07-31 Thread JamesRTaylor
Github user JamesRTaylor commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/320#discussion_r206717157
  
--- Diff: 
phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java ---
@@ -2951,7 +2951,129 @@ public void testOrderPreservingGroupBy() throws 
Exception {
 }
 }
 }
-
+
+@Test
+public void testOrderPreservingGroupByForNotPkColumns() throws 
Exception {
+
+try (Connection conn= DriverManager.getConnection(getUrl())) {
+
+conn.createStatement().execute("CREATE TABLE test (\n" +
+"pk1 varchar, \n" +
+"pk2 varchar, \n" +
+"pk3 varchar, \n" +
+"pk4 varchar, \n" +
+"v1 varchar, \n" +
+"v2 varchar,\n" +
+"CONSTRAINT pk PRIMARY KEY (\n" +
+"   pk1,\n" +
+"   pk2,\n" +
+"   pk3,\n" +
+"   pk4\n" +
+" )\n" +
+" )");
+String[] queries = new String[] {
+"SELECT pk3 FROM test WHERE v2 = 'a' GROUP BY 
substr(v2,0,1),pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'c' and v2 = 
substr('abc',1,1) GROUP BY v2,pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE v1 = 'a' and v2 = 'b' 
GROUP BY length(v1)+length(v2),pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'a' and v2 = 'b' 
GROUP BY length(pk1)+length(v2),pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE v1 = 'a' and v2 = 
substr('abc',2,1) GROUP BY pk4,CASE WHEN v1 > v2 THEN v1 ELSE v2 END,pk3 ORDER 
BY pk4,pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'a' and v2 = 
substr('abc',2,1) GROUP BY pk4,CASE WHEN pk1 > v2 THEN pk1 ELSE v2 END,pk3 
ORDER BY pk4,pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'a' and pk2 = 'b' 
and v1 = 'c' GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE 
pk2 END,pk3 ORDER BY pk3"
+};
+int index = 0;
+for (String query : queries) {
+QueryPlan plan = getQueryPlan(conn, query);
+assertTrue((index + 1) + ") " + queries[index], 
plan.getOrderBy().getOrderByExpressions().isEmpty());
+index++;
+}
+}
+}
+
+@Test
+public void testOrderPreservingGroupByForClientAggregatePlan() throws 
Exception {
+Connection conn = null;
+ try {
+ conn = DriverManager.getConnection(getUrl());
+ String tableName1 = "test_table";
+ String sql = "create table " + tableName1 + "( "+
+ " pk1 varchar not null , " +
+ " pk2 varchar not null, " +
+ " pk3 varchar not null," +
+ " v1 varchar, " +
+ " v2 varchar, " +
+ " CONSTRAINT TEST_PK PRIMARY KEY ( "+
+"pk1,"+
+"pk2,"+
+"pk3 ))";
+ conn.createStatement().execute(sql);
+
+ String[] queries = new String[] {
+   "select a.ak3 "+
+   "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) 
ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from 
"+tableName1+" order by pk2,pk3 limit 10) a "+
+   "group by a.ak3,a.av1 order by a.ak3,a.av1",
+
+   "select a.ak3 "+
+   "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) 
ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from 
"+tableName1+" order by pk2,pk3 limit 10) a "+
+   "where a.av2 = 'a' GROUP BY substr(a.av2,0,1),ak3 ORDER 
BY ak3",
+
+   //for InListExpression
+   "select a.ak3 "+
+   "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) 
ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from 
"+tableName1+" order by pk2,pk3 limit 10) a "+
+   "where a.av2 in('a') GROUP BY substr(a.av2,0,1),ak3 
ORDER BY ak3",
--- End diff --

Never mind - just saw your comment in the new visitor and you only want to 
optimize this single constant case. Would it make sense to have a negative test 
for this below to make sure it doesn't get optimized when there are more than a 
single constant in an IN clause?


---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-31 Thread m2je
Github user m2je commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/317#discussion_r206715551
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java ---
@@ -204,14 +205,19 @@ private void appendPKColumnValue(StringBuilder buf, 
byte[] range, Boolean isNull
 range = ptr.get();
 }
 if (changeViewIndexId) {
-Short s = (Short) type.toObject(range);
-s = (short) (s + (-Short.MAX_VALUE));
-buf.append(s.toString());
+PDataType viewIndexDataType = 
tableRef.getTable().getViewIndexType();
+buf.append(getViewIndexValue(type, range, 
viewIndexDataType).toString());
 } else {
 Format formatter = context.getConnection().getFormatter(type);
 buf.append(type.toStringLiteral(range, formatter));
 }
 }
+
+private Long getViewIndexValue(PDataType type, byte[] range, PDataType 
viewIndexDataType){
+boolean useLongViewIndex = 
MetaDataUtil.getViewIndexIdDataType().equals(viewIndexDataType);
+Object s =  type.toObject(range);
+return (useLongViewIndex ? (Long) s : (Short) s) - 
(useLongViewIndex ? Long.MAX_VALUE : Short.MAX_VALUE);
--- End diff --

Done


---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-31 Thread m2je
Github user m2je commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/317#discussion_r206715609
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 ---
@@ -1431,11 +1438,42 @@ private PTable getTable(RegionScanner scanner, long 
clientTimeStamp, long tableT
 // server while holding this lock is a bad idea and likely to 
cause contention.
 return PTableImpl.makePTable(tenantId, schemaName, tableName, 
tableType, indexState, timeStamp, tableSeqNum,
 pkName, saltBucketNum, columns, parentSchemaName, 
parentTableName, indexes, isImmutableRows, physicalTables, defaultFamilyName,
-viewStatement, disableWAL, multiTenant, storeNulls, 
viewType, viewIndexId, indexType,
+viewStatement, disableWAL, multiTenant, storeNulls, 
viewType, viewIndexType, viewIndexId, indexType,
 rowKeyOrderOptimizable, transactionProvider, 
updateCacheFrequency, baseColumnCount,
 indexDisableTimestamp, isNamespaceMapped, 
autoPartitionSeq, isAppendOnlySchema, storageScheme, encodingScheme, cqCounter, 
useStatsForParallelization);
 }
+private Long getViewIndexId(Cell[] tableKeyValues, PDataType 
viewIndexType) {
+Cell viewIndexIdKv = tableKeyValues[VIEW_INDEX_ID_INDEX];
+return viewIndexIdKv == null ? null :
+decodeViewIndexId(viewIndexIdKv, viewIndexType);
+}
 
+/**
+ * check the value for {@value USE_LONG_VIEW_INDEX} and if its present 
consider viewIndexId as long otherwise
+ * read as short and convert it to long
+ *
+ * @param tableKeyValues
+ * @param viewIndexType
+ * @return
+ */
+private Long decodeViewIndexId(Cell viewIndexIdKv,  PDataType 
viewIndexType) {
+boolean useLongViewIndex = 
MetaDataUtil.getViewIndexIdDataType().equals(viewIndexType);
+   return new Long(
+   useLongViewIndex
+   ? 
viewIndexType.getCodec().decodeLong(viewIndexIdKv.getValueArray(),
+   viewIndexIdKv.getValueOffset(), 
SortOrder.getDefault())
+   : 
MetaDataUtil.getLegacyViewIndexIdDataType().getCodec().decodeShort(viewIndexIdKv.getValueArray(),
+   viewIndexIdKv.getValueOffset(), 
SortOrder.getDefault())
+   );
+}
+
+private PDataType getViewIndexType(Cell[] tableKeyValues) {
+Cell useLongViewIndexKv = tableKeyValues[USE_LONG_VIEW_INDEX];
--- End diff --

done


---


[GitHub] phoenix pull request #320: PHOENIX-4820

2018-07-31 Thread JamesRTaylor
Github user JamesRTaylor commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/320#discussion_r206715618
  
--- Diff: 
phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java ---
@@ -2951,7 +2951,129 @@ public void testOrderPreservingGroupBy() throws 
Exception {
 }
 }
 }
-
+
+@Test
+public void testOrderPreservingGroupByForNotPkColumns() throws 
Exception {
+
+try (Connection conn= DriverManager.getConnection(getUrl())) {
+
+conn.createStatement().execute("CREATE TABLE test (\n" +
+"pk1 varchar, \n" +
+"pk2 varchar, \n" +
+"pk3 varchar, \n" +
+"pk4 varchar, \n" +
+"v1 varchar, \n" +
+"v2 varchar,\n" +
+"CONSTRAINT pk PRIMARY KEY (\n" +
+"   pk1,\n" +
+"   pk2,\n" +
+"   pk3,\n" +
+"   pk4\n" +
+" )\n" +
+" )");
+String[] queries = new String[] {
+"SELECT pk3 FROM test WHERE v2 = 'a' GROUP BY 
substr(v2,0,1),pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'c' and v2 = 
substr('abc',1,1) GROUP BY v2,pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE v1 = 'a' and v2 = 'b' 
GROUP BY length(v1)+length(v2),pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'a' and v2 = 'b' 
GROUP BY length(pk1)+length(v2),pk3 ORDER BY pk3",
+"SELECT pk3 FROM test WHERE v1 = 'a' and v2 = 
substr('abc',2,1) GROUP BY pk4,CASE WHEN v1 > v2 THEN v1 ELSE v2 END,pk3 ORDER 
BY pk4,pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'a' and v2 = 
substr('abc',2,1) GROUP BY pk4,CASE WHEN pk1 > v2 THEN pk1 ELSE v2 END,pk3 
ORDER BY pk4,pk3",
+"SELECT pk3 FROM test WHERE pk1 = 'a' and pk2 = 'b' 
and v1 = 'c' GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE 
pk2 END,pk3 ORDER BY pk3"
+};
+int index = 0;
+for (String query : queries) {
+QueryPlan plan = getQueryPlan(conn, query);
+assertTrue((index + 1) + ") " + queries[index], 
plan.getOrderBy().getOrderByExpressions().isEmpty());
+index++;
+}
+}
+}
+
+@Test
+public void testOrderPreservingGroupByForClientAggregatePlan() throws 
Exception {
+Connection conn = null;
+ try {
+ conn = DriverManager.getConnection(getUrl());
+ String tableName1 = "test_table";
+ String sql = "create table " + tableName1 + "( "+
+ " pk1 varchar not null , " +
+ " pk2 varchar not null, " +
+ " pk3 varchar not null," +
+ " v1 varchar, " +
+ " v2 varchar, " +
+ " CONSTRAINT TEST_PK PRIMARY KEY ( "+
+"pk1,"+
+"pk2,"+
+"pk3 ))";
+ conn.createStatement().execute(sql);
+
+ String[] queries = new String[] {
+   "select a.ak3 "+
+   "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) 
ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from 
"+tableName1+" order by pk2,pk3 limit 10) a "+
+   "group by a.ak3,a.av1 order by a.ak3,a.av1",
+
+   "select a.ak3 "+
+   "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) 
ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from 
"+tableName1+" order by pk2,pk3 limit 10) a "+
+   "where a.av2 = 'a' GROUP BY substr(a.av2,0,1),ak3 ORDER 
BY ak3",
+
+   //for InListExpression
+   "select a.ak3 "+
+   "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) 
ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from 
"+tableName1+" order by pk2,pk3 limit 10) a "+
+   "where a.av2 in('a') GROUP BY substr(a.av2,0,1),ak3 
ORDER BY ak3",
--- End diff --

For the IN test, please add more than one constant (or this gets optimized 
to a.av2='a'


---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-31 Thread m2je
Github user m2je commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/317#discussion_r206715585
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 ---
@@ -2185,7 +2223,7 @@ public void createTable(RpcController controller, 
CreateTableRequest request,
 throw sqlExceptions[0];
 }
 long seqValue = seqValues[0];
-if (seqValue > Short.MAX_VALUE) {
+if (seqValue > Long.MAX_VALUE) {
--- End diff --

done


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206688033
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java ---
@@ -82,6 +82,10 @@
 static final String SYSTEM_SEQUENCE_IDENTIFIER =
 QueryConstants.SYSTEM_SCHEMA_NAME + "." + "\"" + 
PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE+ "\"";
 
+static final String SYSTEM_MUTEX_IDENTIFIER =
+QueryConstants.SYSTEM_SCHEMA_NAME + "." + "\""
++ PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME + 
"\"";
+
 static final Set PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES = new 
HashSet<>(Arrays.asList(
--- End diff --

Done


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206687937
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java ---
@@ -2957,6 +3010,11 @@ MutationState dropTable(String schemaName, String 
tableName, String parentTableN
 Delete linkDelete = new Delete(linkKey, clientTimeStamp);
 tableMetaData.add(linkDelete);
 }
+if (tableType == PTableType.TABLE) {
+// acquire a mutex on the table to prevent creating views 
while concurrently
+// dropping the base table
+acquiredMutex = writeCell(null, schemaName, tableName, 
null);
--- End diff --

Yes, I missed that. I originally had writeCell throw the 
ConcurrentTableMutationException, but we need to know we need writeCell to 
return false if it wasn't able to do the checkAndPut so that we can set the 
acquiredMutex boolean which is later used to determine if we have to delete the 
cell in the finally block.


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206687568
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java ---
@@ -1913,6 +1940,21 @@ private PTable 
createTableInternal(CreateTableStatement statement, byte[][] spli
 boolean isLocalIndex = indexType == IndexType.LOCAL;
 QualifierEncodingScheme encodingScheme = 
NON_ENCODED_QUALIFIERS;
 ImmutableStorageScheme immutableStorageScheme = 
ONE_CELL_PER_COLUMN;
+
+if (tableType == PTableType.VIEW) {
--- End diff --

Done


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206687510
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java ---
@@ -16,8 +16,29 @@
  */
 package org.apache.phoenix.end2end;
 
-import com.google.common.base.Joiner;
-import com.google.common.base.Throwables;
+import static org.junit.Assert.assertEquals;
--- End diff --

I used the dev/phoenix.importorder format, so this should be fine.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-31 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206638567
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

Oh, right. I keep getting confused over that.

JVM metrics being exported from PQS makes total sense. +1 to that

I am now wondering: should we be exporting Phoenix metrics out of PQS under 
"Client" or make some specific "sub=PQS" or similar for it?


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-31 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206616222
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

Actually I added this since this is going to be a part of PQS, where having 
these metrics would help. Should I cover this with a config parameter then?


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-31 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206597366
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java ---
@@ -204,6 +230,45 @@ private static void resetGlobalMetrics() {
 }
 }
 
+// Phoenix Client Metrics are transported via Hadoop-metrics2 sink
+// The test sink is defined at GlobalPhoenixMetricsTestSink
+// Configuration for Hadoop-metrics2 comes from 
hadoop-metrics2.properties file located in test/resources
+private boolean verifyMetricsFromSink() throws InterruptedException {
+Map expectedMetrics = new HashMap<>();
+for (GlobalMetric m : 
PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
+expectedMetrics.put(m.getMetricType().name(), m.getTotalSum());
+}
+
+for (int i = 0; i < MAX_RETRIES; i++) {
+LOG.info("Verifying Global Metrics from Hadoop Sink, Retry: " 
+ (i + 1));
+if (verifyMetricsFromSinkOnce(expectedMetrics)) {
+LOG.info("Values from Hadoop Metrics Sink match actual 
values");
+return true;
+}
+Thread.sleep(1000);
+}
+return false;
+}
+
+private boolean verifyMetricsFromSinkOnce(Map 
expectedMetrics) {
+synchronized (GlobalPhoenixMetricsTestSink.lock) {
+for (AbstractMetric metric : 
GlobalPhoenixMetricsTestSink.metrics) {
+if (expectedMetrics.containsKey(metric.name())) {
--- End diff --

I thought there was an issue with unboxing the value from 
`expectedMetrics.get(metric.name())`.

I think it would be cleaner to do:
```
Long value = expectedMetrics.get(metric.name());
if (value != null) {
  long expectedValue = value.longValue();
  ...
```


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-31 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206598366
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
+JvmMetrics.initSingleton("HBase", "");
--- End diff --

I think collecting JVM metrics from client applications is heavy-handed. 
I'd suggest you drop this.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-31 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206598488
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java ---
@@ -204,6 +230,45 @@ private static void resetGlobalMetrics() {
 }
 }
 
+// Phoenix Client Metrics are transported via Hadoop-metrics2 sink
+// The test sink is defined at GlobalPhoenixMetricsTestSink
+// Configuration for Hadoop-metrics2 comes from 
hadoop-metrics2.properties file located in test/resources
--- End diff --

Nice!


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-31 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206598832
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/monitoring/GlobalPhoenixMetricsTestSink.java
 ---
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.phoenix.util.PhoenixRuntime;
+
+import java.util.Map;
+
+import static junit.framework.TestCase.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public class GlobalPhoenixMetricsTestSink implements MetricsSink {
+
+public static final String PHOENIX_METRICS_RECORD_NAME = "PHOENIX";
+static Object lock = new Object();
--- End diff --

Can you leave a comment here about the importance of this lock? e.g. that 
the caller should hold it to read `metrics`.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-31 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206595985
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java ---
@@ -204,6 +230,45 @@ private static void resetGlobalMetrics() {
 }
 }
 
+// Phoenix Client Metrics are transported via Hadoop-metrics2 sink
+// The test sink is defined at GlobalPhoenixMetricsTestSink
+// Configuration for Hadoop-metrics2 comes from 
hadoop-metrics2.properties file located in test/resources
+private boolean verifyMetricsFromSink() throws InterruptedException {
+Map expectedMetrics = new HashMap<>();
+for (GlobalMetric m : 
PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
+expectedMetrics.put(m.getMetricType().name(), m.getTotalSum());
+}
+
+for (int i = 0; i < MAX_RETRIES; i++) {
+LOG.info("Verifying Global Metrics from Hadoop Sink, Retry: " 
+ (i + 1));
+if (verifyMetricsFromSinkOnce(expectedMetrics)) {
+LOG.info("Values from Hadoop Metrics Sink match actual 
values");
+return true;
+}
+Thread.sleep(1000);
--- End diff --

Change this to:
```
try {
  Thread.sleep(1000);
} catch (InterruptedException e) {
  Thread.currentThread().interrupt();
   return false;
}
```
Then, remove the `throws InterruptedException`.
We should be catching, re-setting the interrupted state and just returning 
ASAP. Likely, this code never gets invoked in a unit-test context, but good 
practice.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-31 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206598160
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
 ---
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.metrics.Counter;
+import org.apache.hadoop.hbase.metrics.Gauge;
+import org.apache.hadoop.hbase.metrics.Histogram;
+import org.apache.hadoop.hbase.metrics.Meter;
+import org.apache.hadoop.hbase.metrics.Metric;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.metrics.MetricRegistryInfo;
+import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * Contents mostly copied from GlobalMetricRegistriesAdapter class from 
hbase-hadoop2-compat
+ * The adapter attaches HBase's MetricRegistry to Hadoop's 
DefaultMetricsSystem
+ * Doesn't handle dynamic attach/detach of registries
+ */
+public class GlobalMetricRegistriesAdapter {
+
+private static final Log LOG = 
LogFactory.getLog(GlobalMetricRegistriesAdapter.class);
+private static GlobalMetricRegistriesAdapter INSTANCE;
+
+private GlobalMetricRegistriesAdapter() {
+DefaultMetricsSystem.initialize("HBase");
--- End diff --

s/HBase/Phoenix/


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206416912
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java ---
@@ -16,8 +16,29 @@
  */
 package org.apache.phoenix.end2end;
 
-import com.google.common.base.Joiner;
-import com.google.common.base.Throwables;
+import static org.junit.Assert.assertEquals;
--- End diff --

nit: Diff generated due to change in order of imports


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206415970
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java ---
@@ -2957,6 +3010,11 @@ MutationState dropTable(String schemaName, String 
tableName, String parentTableN
 Delete linkDelete = new Delete(linkKey, clientTimeStamp);
 tableMetaData.add(linkDelete);
 }
+if (tableType == PTableType.TABLE) {
+// acquire a mutex on the table to prevent creating views 
while concurrently
+// dropping the base table
+acquiredMutex = writeCell(null, schemaName, tableName, 
null);
--- End diff --

Nothing is prevent even if `acquiredMutex` is false here. I think you 
should throw `ConcurrentTableMutationException` here as well? You might might 
wanna push down that logic to `writeCell()` method?


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206415547
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java ---
@@ -1913,6 +1940,21 @@ private PTable 
createTableInternal(CreateTableStatement statement, byte[][] spli
 boolean isLocalIndex = indexType == IndexType.LOCAL;
 QualifierEncodingScheme encodingScheme = 
NON_ENCODED_QUALIFIERS;
 ImmutableStorageScheme immutableStorageScheme = 
ONE_CELL_PER_COLUMN;
+
+if (tableType == PTableType.VIEW) {
--- End diff --

nit: Would be good to have some logs here for debugging purposes.


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206414686
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 ---
@@ -3410,17 +3405,10 @@ void ensureSystemTablesMigratedToSystemNamespace()
 // No tables exist matching "SYSTEM\..*", they are all already 
in "SYSTEM:.*"
 if (tableNames.size() == 0) { return; }
 // Try to move any remaining tables matching "SYSTEM\..*" into 
"SYSTEM:"
-if (tableNames.size() > 5) {
+if (tableNames.size() > 7) {
 logger.warn("Expected 5 system tables but found " + 
tableNames.size() + ":" + tableNames);
--- End diff --

nit: change log message appropriately. Extract the constant to a variable


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206414419
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 ---
@@ -2723,6 +2712,9 @@ private void 
createOtherSystemTables(PhoenixConnection metaConnection, HBaseAdmi
 try {
 
metaConnection.createStatement().executeUpdate(getChildLinkDDL());
 } catch (TableAlreadyExistsException e) {}
+try {
+metaConnection.createStatement().executeUpdate(getMutexDDL());
+} catch (TableAlreadyExistsException e) {}
 // Catch the IOException to log the error message and then bubble 
it up for the client to retry.
 try {
 createSysMutexTableIfNotExists(hbaseAdmin);
--- End diff --

Do we still need this part of code?


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206412733
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java ---
@@ -82,6 +82,10 @@
 static final String SYSTEM_SEQUENCE_IDENTIFIER =
 QueryConstants.SYSTEM_SCHEMA_NAME + "." + "\"" + 
PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE+ "\"";
 
+static final String SYSTEM_MUTEX_IDENTIFIER =
+QueryConstants.SYSTEM_SCHEMA_NAME + "." + "\""
++ PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME + 
"\"";
+
 static final Set PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES = new 
HashSet<>(Arrays.asList(
--- End diff --

Do you need to SYSTEM:MUTEX over here as well since now its visible from 
Phoenix level as well?


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-31 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206411967
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java ---
@@ -3604,6 +3681,18 @@ public MutationState addColumn(PTable table, 
List origColumnDefs,
 }
 } finally {
 connection.setAutoCommit(wasAutoCommit);
+if (acquiredMutex && !columns.isEmpty()) {
--- End diff --

@ankitsinghal  Sorry, I didn't update the PR with all my latest changes, I 
fixed this. Can you please review?


---


[GitHub] phoenix pull request #320: PHOENIX-4820

2018-07-30 Thread comnetwork
GitHub user comnetwork opened a pull request:

https://github.com/apache/phoenix/pull/320

PHOENIX-4820

This patch is mainly for:

1.Isolate the changes made for QueryCompiler to OrderByCompiler.
2.Check more Expression besides ComparisonExpression for 
IsColumnConstantExpressionVisitor, add tests for InListExpression , 
CorceExpression and RVC.
3. I think ExpressionUtil.isConstant(Expression) is not suitable for  
OrderPreservingTracker.IsConstantVisitor, isStateless() is to check if the 
expression is depend on the server state, even for RowKeyColumnExpression , 
isStateless() is false. What we want to check is if a expression is constant 
when all children of it are constants, just consider following sql:

select a.ak3 from
(select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) 
av1,length(v2) av2 from test order by pk2,pk3 limit 10) a
where a.ak1 = 0.0 and a.av2 = length(substr('abc',1,1))
group by a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN 
coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1
order by a.ak3,a.av1

Obviously , because of rand(), the Determinism of expression a.ak1 is 
Determinism.PER_INVOCATION, so for expression "CASE WHEN coalesce(a.ak1,1) > 
coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END", the 
determinism is Determinism.PER_INVOCATION and isStateless is false , but 
because the a.ak1 and a.av2 are both constants in where clause of outer query, 
we can regard "CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN 
coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END" as constant in IsConstantVisitor.

You can merge this pull request into a Git repository by running:

$ git pull https://github.com/comnetwork/phoenix 4.x-HBase-1.3

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/phoenix/pull/320.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #320


commit ae0e6ef4dded2c3a8c3d6fffa200acbe971688f8
Author: chenglei 
Date:   2018-07-31T04:34:25Z

PHOENIX-4820 v2




---


[GitHub] phoenix pull request #314: PHOENIX-4820

2018-07-30 Thread comnetwork
Github user comnetwork commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/314#discussion_r206382301
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java ---
@@ -559,8 +559,16 @@ protected QueryPlan 
compileSingleFlatQuery(StatementContext context, SelectState
 groupBy = groupBy.compile(context, innerPlanTupleProjector);
 context.setResolver(resolver); // recover resolver
 RowProjector projector = ProjectionCompiler.compile(context, 
select, groupBy, asSubquery ? Collections.emptyList() : targetColumns, 
where);
-OrderBy orderBy = OrderByCompiler.compile(context, select, 
groupBy, limit, offset, projector,
-groupBy == GroupBy.EMPTY_GROUP_BY ? 
innerPlanTupleProjector : null, isInRowKeyOrder);
+OrderBy orderBy = OrderByCompiler.compile(
+context,
+select,
+groupBy,
+limit,
+offset,
+projector,
+groupBy == GroupBy.EMPTY_GROUP_BY ? 
innerPlanTupleProjector : null,
+groupBy == GroupBy.EMPTY_GROUP_BY ? isInRowKeyOrder : true,
+where);
--- End diff --

@maryannxue , yes, you are right , Eliminating order-by based on the inner 
plan ordering is ultimate solution.  Openning this JIRA is just to optimize the 
OrderBy for ClientAggregatePlan when there is a GroupBy, it is only to need  to 
conside the GroupBy of the outer query, and innerQuery is not required to take 
into account. It is much simpler than purely optimizing OrderBy for 
ClientScanPlan, which need to  make a huge modification and refactor with 
OrderByCompiler and OrderPreservingTracker. I have planned to  purely optimize 
OrderBy for ClientScanPlan after this JIRA by openning a new JIRA.


---


[GitHub] phoenix pull request #314: PHOENIX-4820

2018-07-30 Thread maryannxue
Github user maryannxue commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/314#discussion_r206377190
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java ---
@@ -559,8 +559,16 @@ protected QueryPlan 
compileSingleFlatQuery(StatementContext context, SelectState
 groupBy = groupBy.compile(context, innerPlanTupleProjector);
 context.setResolver(resolver); // recover resolver
 RowProjector projector = ProjectionCompiler.compile(context, 
select, groupBy, asSubquery ? Collections.emptyList() : targetColumns, 
where);
-OrderBy orderBy = OrderByCompiler.compile(context, select, 
groupBy, limit, offset, projector,
-groupBy == GroupBy.EMPTY_GROUP_BY ? 
innerPlanTupleProjector : null, isInRowKeyOrder);
+OrderBy orderBy = OrderByCompiler.compile(
+context,
+select,
+groupBy,
+limit,
+offset,
+projector,
+groupBy == GroupBy.EMPTY_GROUP_BY ? 
innerPlanTupleProjector : null,
+groupBy == GroupBy.EMPTY_GROUP_BY ? isInRowKeyOrder : true,
+where);
--- End diff --

Eliminating order-by based on the inner plan ordering is definitely the 
right and ultimate solution, and we should get rid of all “dirty fix” flags 
here. The compile result, in our case, the QueryPlan should have a definite 
ordering whether it’s from an order by, or a group-by, or a natural row 
order, or an order-by from the inner query. However, letting QueryPlan contain 
that information might not be a good idea. So we should see if we can infer the 
ordering from a QueryPlan.


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-30 Thread ankitsinghal
Github user ankitsinghal commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r206330492
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java ---
@@ -3604,6 +3681,18 @@ public MutationState addColumn(PTable table, 
List origColumnDefs,
 }
 } finally {
 connection.setAutoCommit(wasAutoCommit);
+if (acquiredMutex && !columns.isEmpty()) {
--- End diff --

where is acquiredMutex set to true?




---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-30 Thread JamesRTaylor
Github user JamesRTaylor commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/317#discussion_r206328431
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java ---
@@ -204,14 +205,19 @@ private void appendPKColumnValue(StringBuilder buf, 
byte[] range, Boolean isNull
 range = ptr.get();
 }
 if (changeViewIndexId) {
-Short s = (Short) type.toObject(range);
-s = (short) (s + (-Short.MAX_VALUE));
-buf.append(s.toString());
+PDataType viewIndexDataType = 
tableRef.getTable().getViewIndexType();
+buf.append(getViewIndexValue(type, range, 
viewIndexDataType).toString());
 } else {
 Format formatter = context.getConnection().getFormatter(type);
 buf.append(type.toStringLiteral(range, formatter));
 }
 }
+
+private Long getViewIndexValue(PDataType type, byte[] range, PDataType 
viewIndexDataType){
+boolean useLongViewIndex = 
MetaDataUtil.getViewIndexIdDataType().equals(viewIndexDataType);
+Object s =  type.toObject(range);
+return (useLongViewIndex ? (Long) s : (Short) s) - 
(useLongViewIndex ? Long.MAX_VALUE : Short.MAX_VALUE);
--- End diff --

Shouldn't the  argument be all that's necessary? Why is an additional 
type argument needed? The type as defined in the schema should be the right 
type, no?


---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-30 Thread JamesRTaylor
Github user JamesRTaylor commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/317#discussion_r206326382
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 ---
@@ -1431,11 +1438,42 @@ private PTable getTable(RegionScanner scanner, long 
clientTimeStamp, long tableT
 // server while holding this lock is a bad idea and likely to 
cause contention.
 return PTableImpl.makePTable(tenantId, schemaName, tableName, 
tableType, indexState, timeStamp, tableSeqNum,
 pkName, saltBucketNum, columns, parentSchemaName, 
parentTableName, indexes, isImmutableRows, physicalTables, defaultFamilyName,
-viewStatement, disableWAL, multiTenant, storeNulls, 
viewType, viewIndexId, indexType,
+viewStatement, disableWAL, multiTenant, storeNulls, 
viewType, viewIndexType, viewIndexId, indexType,
 rowKeyOrderOptimizable, transactionProvider, 
updateCacheFrequency, baseColumnCount,
 indexDisableTimestamp, isNamespaceMapped, 
autoPartitionSeq, isAppendOnlySchema, storageScheme, encodingScheme, cqCounter, 
useStatsForParallelization);
 }
+private Long getViewIndexId(Cell[] tableKeyValues, PDataType 
viewIndexType) {
+Cell viewIndexIdKv = tableKeyValues[VIEW_INDEX_ID_INDEX];
+return viewIndexIdKv == null ? null :
+decodeViewIndexId(viewIndexIdKv, viewIndexType);
+}
 
+/**
+ * check the value for {@value USE_LONG_VIEW_INDEX} and if its present 
consider viewIndexId as long otherwise
+ * read as short and convert it to long
+ *
+ * @param tableKeyValues
+ * @param viewIndexType
+ * @return
+ */
+private Long decodeViewIndexId(Cell viewIndexIdKv,  PDataType 
viewIndexType) {
+boolean useLongViewIndex = 
MetaDataUtil.getViewIndexIdDataType().equals(viewIndexType);
+   return new Long(
+   useLongViewIndex
+   ? 
viewIndexType.getCodec().decodeLong(viewIndexIdKv.getValueArray(),
+   viewIndexIdKv.getValueOffset(), 
SortOrder.getDefault())
+   : 
MetaDataUtil.getLegacyViewIndexIdDataType().getCodec().decodeShort(viewIndexIdKv.getValueArray(),
+   viewIndexIdKv.getValueOffset(), 
SortOrder.getDefault())
+   );
+}
+
+private PDataType getViewIndexType(Cell[] tableKeyValues) {
+Cell useLongViewIndexKv = tableKeyValues[USE_LONG_VIEW_INDEX];
--- End diff --

I think we need to keep the single VIEW_INDEX_ID column and make sure it's 
type is defined (through a property) at create time (and not allow it to be 
changed). The issue isn't with the metadata, but with the row key of the rows 
of the table. In old tables, it'll be a short while for new tables it'll be a 
long. We don't want to have to rewrite the data.


---


[GitHub] phoenix pull request #308: Client-side hash aggregation

2018-07-30 Thread JamesRTaylor
Github user JamesRTaylor commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/308#discussion_r206295564
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java 
---
@@ -135,17 +142,25 @@ public ResultIterator iterator(ParallelScanGrouper 
scanGrouper, Scan scan) throw
 aggResultIterator = new 
ClientUngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator),
 serverAggregators);
 aggResultIterator = new 
UngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(aggResultIterator),
 clientAggregators);
 } else {
-if (!groupBy.isOrderPreserving()) {
-int thresholdBytes = 
context.getConnection().getQueryServices().getProps().getInt(
-QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, 
QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
-List keyExpressions = 
groupBy.getKeyExpressions();
+List keyExpressions = groupBy.getKeyExpressions();
+if (groupBy.isOrderPreserving()) {
+aggResultIterator = new 
ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), 
serverAggregators, keyExpressions);
+} else {
+int thresholdBytes = 
context.getConnection().getQueryServices().getProps().getInt
+(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, 
QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
 List keyExpressionOrderBy = 
Lists.newArrayListWithExpectedSize(keyExpressions.size());
 for (Expression keyExpression : keyExpressions) {
 keyExpressionOrderBy.add(new 
OrderByExpression(keyExpression, false, true));
 }
-iterator = new OrderedResultIterator(iterator, 
keyExpressionOrderBy, thresholdBytes, null, null, 
projector.getEstimatedRowByteSize());
+
+if (useHashAgg) {
+boolean sort = orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY;
+aggResultIterator = new 
ClientHashAggregatingResultIterator(context, iterator, serverAggregators, 
keyExpressions, sort);
--- End diff --

Please add comment about reverse sort and point to JIRA (create one if 
there's not already one). Might be best to just have the code here such that 
once reverse sort is supported, the hash aggregate will just work.


---


[GitHub] phoenix pull request #308: Client-side hash aggregation

2018-07-30 Thread JamesRTaylor
Github user JamesRTaylor commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/308#discussion_r206295347
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientHashAggregateIT.java 
---
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.Properties;
+
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.junit.Test;
+
+public class ClientHashAggregateIT extends ParallelStatsDisabledIT {
--- End diff --

Unless I'm missing something, this needs a few more tests:
* verify CLIENT SORTED BY is present in explain plan when sort required for 
hash aggregate
* verify it's not in explain plan when sort not required
* verify query results when sort not required


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-30 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206259317
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java ---
@@ -204,6 +219,39 @@ private static void resetGlobalMetrics() {
 }
 }
 
+private boolean verifyMetricsFromSink() throws InterruptedException {
+Map expectedMetrics = new HashMap<>();
+for (GlobalMetric m : 
PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
+expectedMetrics.put(m.getMetricType().name(), m.getTotalSum());
+}
+
+for (int i = 0; i < MAX_RETRIES; i++) {
+LOG.info("Verifying Global Metrics from Hadoop Sink, Retry: " 
+ (i + 1));
+if (verifyMetricsFromSinkOnce(expectedMetrics)) {
+LOG.info("Values from Hadoop Metrics Sink match actual 
values");
+return true;
+}
+Thread.sleep(1000);
+}
+return false;
+}
+
+private boolean verifyMetricsFromSinkOnce(Map 
expectedMetrics) {
+synchronized (GlobalPhoenixMetricsTestSink.lock) {
+for (AbstractMetric metric : 
GlobalPhoenixMetricsTestSink.metrics) {
+if (expectedMetrics.containsKey(metric.name())) {
--- End diff --

> One approach can be to remove each entry from the map and check if the 
size is 0 at the end. How does that sound?

That would work too!


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-30 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206259129
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
 ---
@@ -108,40 +114,88 @@
 GLOBAL_HBASE_COUNT_ROWS_SCANNED(COUNT_ROWS_SCANNED),
 GLOBAL_HBASE_COUNT_ROWS_FILTERED(COUNT_ROWS_FILTERED);
 
-
+private static final Logger LOG = 
LoggerFactory.getLogger(GlobalClientMetrics.class);
 private static final boolean isGlobalMetricsEnabled = 
QueryServicesOptions.withDefaults().isGlobalMetricsEnabled();
+private MetricType metricType;
 private GlobalMetric metric;
 
-public void update(long value) {
+static {
+initPhoenixGlobalClientMetrics();
 if (isGlobalMetricsEnabled) {
-metric.change(value);
+MetricRegistry metricRegistry = createMetricRegistry();
+registerPhoenixMetricsToRegistry(metricRegistry);
+
GlobalMetricRegistriesAdapter.getInstance().registerMetricRegistry(metricRegistry);
+}
+}
+
+private static void initPhoenixGlobalClientMetrics() {
+for (GlobalClientMetrics globalMetric : 
GlobalClientMetrics.values()) {
+globalMetric.metric = isGlobalMetricsEnabled ?
+new GlobalMetricImpl(globalMetric.metricType) : new 
NoOpGlobalMetricImpl();
+}
+}
+
+private static void registerPhoenixMetricsToRegistry(MetricRegistry 
metricRegistry) {
+for (GlobalClientMetrics globalMetric : 
GlobalClientMetrics.values()) {
+metricRegistry.register(globalMetric.metricType.columnName(),
+new PhoenixGlobalMetricGauge(globalMetric.metric));
+}
+}
+
+private static MetricRegistry createMetricRegistry() {
+LOG.info("Creating Metric Registry for Phoenix Global Metrics");
+MetricRegistryInfo registryInfo = new 
MetricRegistryInfo("PHOENIX", "Phoenix Global Metrics",
--- End diff --

> Global here refers that it is an aggregation across all clients (or all 
Phoenix Connections).

In my mind, if you show me "Phoenix Client Metrics", I would naturally 
assume that they are for all clients that have metrics enabled (as opposed to 
breakouts by individual clients). As such, the word "Global" to me is just 
filler, but maybe that's just my interpretation of it.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-30 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r206259200
  
--- Diff: phoenix-core/src/test/resources/hadoop-metrics2.properties ---
@@ -32,10 +32,9 @@
 #[prefix].[source|sink].[instance].[options]
 # See javadoc of package-info.java for org.apache.hadoop.metrics2 for 
detail
 
-
-# Don't attempt to start jmx mbeans for all sources.
-#  For right now, all metrics are exported to a phoenix table
-*.source.start_mbeans=false
+hbase.source.start_mbeans=true

+hbase.sink.sink0.class=org.apache.phoenix.monitoring.GlobalPhoenixMetricsTestSink
--- End diff --

Haha, perfect :)


---


[GitHub] phoenix pull request #319: PHOENIX-4820_V2

2018-07-30 Thread comnetwork
GitHub user comnetwork opened a pull request:

https://github.com/apache/phoenix/pull/319

PHOENIX-4820_V2

This Patch is mainly for:
1. Isolate the changes made for QueryCompiler  to OrderByCompiler.
2. Check more Expression besides ComparisonExpression for 
IsColumnConstantExpressionVisitor, add tests for InListExpression , 
CorceExpression and  RVC.

You can merge this pull request into a Git repository by running:

$ git pull https://github.com/comnetwork/phoenix 4.x-HBase-1.3

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/phoenix/pull/319.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #319


commit 4b8582fbb915d225cc547f1a1786d6e43dbc5ab2
Author: chenglei 
Date:   2018-07-30T15:19:23Z

PHOENIX-4820_V2




---


[GitHub] phoenix pull request #314: PHOENIX-4820

2018-07-29 Thread comnetwork
Github user comnetwork commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/314#discussion_r206014266
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java ---
@@ -559,8 +559,16 @@ protected QueryPlan 
compileSingleFlatQuery(StatementContext context, SelectState
 groupBy = groupBy.compile(context, innerPlanTupleProjector);
 context.setResolver(resolver); // recover resolver
 RowProjector projector = ProjectionCompiler.compile(context, 
select, groupBy, asSubquery ? Collections.emptyList() : targetColumns, 
where);
-OrderBy orderBy = OrderByCompiler.compile(context, select, 
groupBy, limit, offset, projector,
-groupBy == GroupBy.EMPTY_GROUP_BY ? 
innerPlanTupleProjector : null, isInRowKeyOrder);
+OrderBy orderBy = OrderByCompiler.compile(
+context,
+select,
+groupBy,
+limit,
+offset,
+projector,
+groupBy == GroupBy.EMPTY_GROUP_BY ? 
innerPlanTupleProjector : null,
+groupBy == GroupBy.EMPTY_GROUP_BY ? isInRowKeyOrder : true,
+where);
--- End diff --

Yes, it is a nice suggestion, I would make a modification


---


[GitHub] phoenix pull request #314: PHOENIX-4820

2018-07-29 Thread comnetwork
Github user comnetwork commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/314#discussion_r206013986
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java ---
@@ -68,4 +74,85 @@ public static boolean isPkPositionChanging(TableRef 
tableRef, List p
 return false;
 }
 
+public static boolean isColumnConstant(Expression columnExpression, 
Expression whereExpression) {
+if(whereExpression == null) {
+return false;
+}
+IsColumnConstantExpressionVisitor 
isColumnConstantExpressionVisitor =
+new IsColumnConstantExpressionVisitor(columnExpression);
+whereExpression.accept(isColumnConstantExpressionVisitor);
+return isColumnConstantExpressionVisitor.isConstant();
+}
+
+private static class IsColumnConstantExpressionVisitor extends 
StatelessTraverseNoExpressionVisitor {
--- End diff --

I did not find a existing visitor  which can statisfy the requirement , I 
would check more Expression besides ComparisonExpression.


---


[GitHub] phoenix pull request #314: PHOENIX-4820

2018-07-29 Thread comnetwork
Github user comnetwork commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/314#discussion_r206012706
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/expression/Expression.java ---
@@ -88,4 +88,10 @@
  * @return
  */
 boolean requiresFinalEvaluation();
+
+/**
+ *
+ * @return
+ */
+boolean isConstantIfChildrenAllConstant();
--- End diff --

I think ExpressionUtil.isConstant(Expression) is not suitable for this 
case, just as the comments of jira said, what we want to check in this issue is 
if a expression is constant when all children of it are constants, just 
consider following sql:

 select a.ak3  from
 (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) 
av1,length(v2) av2 from test order by pk2,pk3 limit 10) a 
 where a.ak1 = 0.0 and a.av2 = length(substr('abc',1,1)) 
 group by a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN 
coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1
order by a.ak3,a.av1

Obviously , because of rand(), the Determinism of expression a.ak1 is 
Determinism.PER_INVOCATION, so the determinism is  Determinism.PER_INVOCATION 
and isStateless is false for expression "CASE WHEN coalesce(a.ak1,1) > 
coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END", but 
because the a.ak1 and a.av2 are both constants in where clause of outer query, 
we can regard  "CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN 
coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END" as constant in IsConstantVisitor.


---


[GitHub] phoenix pull request #318: Fixed Spelling.

2018-07-29 Thread jimmycasey
GitHub user jimmycasey opened a pull request:

https://github.com/apache/phoenix/pull/318

Fixed Spelling.



You can merge this pull request into a Git repository by running:

$ git pull https://github.com/jimmycasey/phoenix-1 master

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/phoenix/pull/318.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #318


commit 112d9697ca88a61e301868995aa8162c7605cd10
Author: Jimmy Casey 
Date:   2018-07-29T21:43:55Z

Fixed Spelling.




---


[GitHub] phoenix pull request #314: PHOENIX-4820

2018-07-29 Thread JamesRTaylor
Github user JamesRTaylor commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/314#discussion_r205981788
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/expression/Expression.java ---
@@ -88,4 +88,10 @@
  * @return
  */
 boolean requiresFinalEvaluation();
+
+/**
+ *
+ * @return
+ */
+boolean isConstantIfChildrenAllConstant();
--- End diff --

It's not clear that we need this. We already rollup determinism and 
isStateless for the expression tree. If determinism == 
Determinism.PER_STATEMENT or Determinism.ALWAYS and isStateless is true, then 
we know an expression is a constant. We have a utility for this in 
ExpressionUtil.isConstant(Expression).


---


[GitHub] phoenix pull request #314: PHOENIX-4820

2018-07-29 Thread JamesRTaylor
Github user JamesRTaylor commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/314#discussion_r205981903
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/util/ExpressionUtil.java ---
@@ -68,4 +74,85 @@ public static boolean isPkPositionChanging(TableRef 
tableRef, List p
 return false;
 }
 
+public static boolean isColumnConstant(Expression columnExpression, 
Expression whereExpression) {
+if(whereExpression == null) {
+return false;
+}
+IsColumnConstantExpressionVisitor 
isColumnConstantExpressionVisitor =
+new IsColumnConstantExpressionVisitor(columnExpression);
+whereExpression.accept(isColumnConstantExpressionVisitor);
+return isColumnConstantExpressionVisitor.isConstant();
+}
+
+private static class IsColumnConstantExpressionVisitor extends 
StatelessTraverseNoExpressionVisitor {
--- End diff --

I'm hoping we don't need a new visitor here. There are probably other cases 
to check for besides ComparisonExpression. For example, InListExpression, maybe 
CoerceExpression, etc.


---


[GitHub] phoenix pull request #314: PHOENIX-4820

2018-07-29 Thread JamesRTaylor
Github user JamesRTaylor commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/314#discussion_r205982004
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java ---
@@ -559,8 +559,16 @@ protected QueryPlan 
compileSingleFlatQuery(StatementContext context, SelectState
 groupBy = groupBy.compile(context, innerPlanTupleProjector);
 context.setResolver(resolver); // recover resolver
 RowProjector projector = ProjectionCompiler.compile(context, 
select, groupBy, asSubquery ? Collections.emptyList() : targetColumns, 
where);
-OrderBy orderBy = OrderByCompiler.compile(context, select, 
groupBy, limit, offset, projector,
-groupBy == GroupBy.EMPTY_GROUP_BY ? 
innerPlanTupleProjector : null, isInRowKeyOrder);
+OrderBy orderBy = OrderByCompiler.compile(
+context,
+select,
+groupBy,
+limit,
+offset,
+projector,
+groupBy == GroupBy.EMPTY_GROUP_BY ? 
innerPlanTupleProjector : null,
+groupBy == GroupBy.EMPTY_GROUP_BY ? isInRowKeyOrder : true,
+where);
--- End diff --

Would be ideal if the change could be isolated to OrderByCompiler being 
aware of its inner plan to determine correctly whether the OrderBy can be 
compiled out. Any ideas, @maryannxue?


---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-28 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/317#discussion_r205953961
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java ---
@@ -204,14 +205,19 @@ private void appendPKColumnValue(StringBuilder buf, 
byte[] range, Boolean isNull
 range = ptr.get();
 }
 if (changeViewIndexId) {
-Short s = (Short) type.toObject(range);
-s = (short) (s + (-Short.MAX_VALUE));
-buf.append(s.toString());
+PDataType viewIndexDataType = 
tableRef.getTable().getViewIndexType();
+buf.append(getViewIndexValue(type, range, 
viewIndexDataType).toString());
 } else {
 Format formatter = context.getConnection().getFormatter(type);
 buf.append(type.toStringLiteral(range, formatter));
 }
 }
+
+private Long getViewIndexValue(PDataType type, byte[] range, PDataType 
viewIndexDataType){
+boolean useLongViewIndex = 
MetaDataUtil.getViewIndexIdDataType().equals(viewIndexDataType);
+Object s =  type.toObject(range);
+return (useLongViewIndex ? (Long) s : (Short) s) - 
(useLongViewIndex ? Long.MAX_VALUE : Short.MAX_VALUE);
--- End diff --

 @JamesRTaylor  or @chrajeshbabu do you know if this change is correct?


---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-28 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/317#discussion_r205954264
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 ---
@@ -1431,11 +1438,42 @@ private PTable getTable(RegionScanner scanner, long 
clientTimeStamp, long tableT
 // server while holding this lock is a bad idea and likely to 
cause contention.
 return PTableImpl.makePTable(tenantId, schemaName, tableName, 
tableType, indexState, timeStamp, tableSeqNum,
 pkName, saltBucketNum, columns, parentSchemaName, 
parentTableName, indexes, isImmutableRows, physicalTables, defaultFamilyName,
-viewStatement, disableWAL, multiTenant, storeNulls, 
viewType, viewIndexId, indexType,
+viewStatement, disableWAL, multiTenant, storeNulls, 
viewType, viewIndexType, viewIndexId, indexType,
 rowKeyOrderOptimizable, transactionProvider, 
updateCacheFrequency, baseColumnCount,
 indexDisableTimestamp, isNamespaceMapped, 
autoPartitionSeq, isAppendOnlySchema, storageScheme, encodingScheme, cqCounter, 
useStatsForParallelization);
 }
+private Long getViewIndexId(Cell[] tableKeyValues, PDataType 
viewIndexType) {
+Cell viewIndexIdKv = tableKeyValues[VIEW_INDEX_ID_INDEX];
+return viewIndexIdKv == null ? null :
+decodeViewIndexId(viewIndexIdKv, viewIndexType);
+}
 
+/**
+ * check the value for {@value USE_LONG_VIEW_INDEX} and if its present 
consider viewIndexId as long otherwise
+ * read as short and convert it to long
+ *
+ * @param tableKeyValues
+ * @param viewIndexType
+ * @return
+ */
+private Long decodeViewIndexId(Cell viewIndexIdKv,  PDataType 
viewIndexType) {
+boolean useLongViewIndex = 
MetaDataUtil.getViewIndexIdDataType().equals(viewIndexType);
+   return new Long(
+   useLongViewIndex
+   ? 
viewIndexType.getCodec().decodeLong(viewIndexIdKv.getValueArray(),
+   viewIndexIdKv.getValueOffset(), 
SortOrder.getDefault())
+   : 
MetaDataUtil.getLegacyViewIndexIdDataType().getCodec().decodeShort(viewIndexIdKv.getValueArray(),
+   viewIndexIdKv.getValueOffset(), 
SortOrder.getDefault())
+   );
+}
+
+private PDataType getViewIndexType(Cell[] tableKeyValues) {
+Cell useLongViewIndexKv = tableKeyValues[USE_LONG_VIEW_INDEX];
--- End diff --

For existing tables the VIEW_INDEX_ID  column will store the index as a 
short while for new tables the value will be stored as a long. 
Would it be cleaner to create a new column that stores the view index id as 
a long. In QueryServicesConnectionImpl.upgradeSystemTables()  we would set the 
new column based on the existing value. Finally we can remove the existing 
VIEW_INDEX_ID in the next release. 
WDYT @JamesRTaylor ?






---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-28 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/317#discussion_r205952155
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java ---
@@ -1223,7 +1225,9 @@ public void readFields(DataInput input) throws 
IOException {
 boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0;
 if (hasViewIndexId) {
 // Fixed length
-viewIndexId = new 
byte[MetaDataUtil.getViewIndexIdDataType().getByteSize()];
+//Use leacy viewIndexIdType for clients older than 4.10 release
--- End diff --

nit: typo 


---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-28 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/317#discussion_r205952450
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 ---
@@ -2185,7 +2223,7 @@ public void createTable(RpcController controller, 
CreateTableRequest request,
 throw sqlExceptions[0];
 }
 long seqValue = seqValues[0];
-if (seqValue > Short.MAX_VALUE) {
+if (seqValue > Long.MAX_VALUE) {
--- End diff --

This check is no longer needed since the indexId is now a Long. When you 
call incrementSequences if the current sequence value is LONG.MAX_VALUE you 
will get an exception.


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-28 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r205950165
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java ---
@@ -3604,6 +3675,17 @@ public MutationState addColumn(PTable table, 
List origColumnDefs,
 }
 } finally {
 connection.setAutoCommit(wasAutoCommit);
+if (!columns.isEmpty()) {
+for (PColumn pColumn : columns) {
+PName physicalName = table.getPhysicalName();
+String physicalSchemaName =
+
SchemaUtil.getSchemaNameFromFullName(physicalName.getString());
+String physicalTableName =
+
SchemaUtil.getTableNameFromFullName(physicalName.getString());
+deleteCell(null, physicalSchemaName, physicalTableName,
--- End diff --

I changed the code to only delete the cells if we were able to successfully 
do the checkAndPut. 
When we add a column we write a cell per column that we are creating. 
When we drop a base table or create a view, we write a single cell with the 
rowkey of the physical table (to prevent a view being created while we are 
dropping the base table).


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-28 Thread twdsilva
Github user twdsilva commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r205950104
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java ---
@@ -3034,6 +3088,11 @@ MutationState dropTable(String schemaName, String 
tableName, String parentTableN
 return new MutationState(0, 0, connection);
 } finally {
 connection.setAutoCommit(wasAutoCommit);
+// acquire a mutex on the table to prevent creating views 
while concurrently
+// dropping the base table
+if (tableType == PTableType.TABLE) {
+deleteCell(null, schemaName, tableName, null);
+}
--- End diff --

Done.


---


[GitHub] phoenix pull request #317: PHOENIX-3547 Supporting more number of indices pe...

2018-07-28 Thread m2je
GitHub user m2je opened a pull request:

https://github.com/apache/phoenix/pull/317

PHOENIX-3547 Supporting more number of indices per table.

Currently the number of indices per Phoenix table is bound to maximum of 
65535 (java.lang.Short) which is a limitation for applications requiring to 
have unlimited number of indices.
This change will consider any new table created in Phoenix to support view 
index ids to be in the range of -9,223,372,036,854,775,808 to 
9,223,372,036,854,775,807 (java.lang.Long) which is undoubtably big enough to 
cover this requirement.
Any existing Phoenix table will still continue to support only maximum of 
65535 of indices.
A new boolean column (USE_LONG_VIEW_INDEX BOOLEAN DEFAULT FALSE) is added 
to SYSTEM.CATALOG to specify each Phoenix table's support for large number of 
indices.
On each new Phoenix table creation the value for USE_LONG_VIEW_INDEX will 
be set to `true` while this value would be false for any existing table.

You can merge this pull request into a Git repository by running:

$ git pull https://github.com/m2je/phoenix PHOENIX-3547

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/phoenix/pull/317.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #317


commit f1ffcf6b76a0a764df5529a03ba8dc42112dc5a9
Author: Mahdi Salarkia 
Date:   2018-07-28T05:27:46Z

PHOENIX-3547 Supporting more number of indices per table.

Currently the number of indices per Phoenix table is bound to maximum of 
65535 (java.lang.Short) which is a limitation for applications requiring to 
have unlimited number of indices.
This change will consider any new table created in Phoenix to support view 
index ids to be in the range of -9,223,372,036,854,775,808 to 
9,223,372,036,854,775,807 (java.lang.Long) which is undoubtably big enough to 
cover this requirement.
Any existing Phoenix table will still continue to support only maximum of 
65535 of indices.
A new boolean column (USE_LONG_VIEW_INDEX BOOLEAN DEFAULT FALSE) is added 
to SYSTEM.CATALOG to specify each Phoenix table's support for large number of 
indices.
On each new Phoenix table creation the value for USE_LONG_VIEW_INDEX will 
be set to `true` while this value would be false for any existing table.




---


[GitHub] phoenix pull request #278: PHOENIX-4322 DESC primary key column with variabl...

2018-07-27 Thread maryannxue
Github user maryannxue closed the pull request at:

https://github.com/apache/phoenix/pull/278


---


[GitHub] phoenix pull request #228: PHOENIX-3355 Register Phoenix built-in functions ...

2018-07-27 Thread maryannxue
Github user maryannxue closed the pull request at:

https://github.com/apache/phoenix/pull/228


---


[GitHub] phoenix pull request #281: PHOENIX-4288 Indexes not used when ordering by pr...

2018-07-27 Thread maryannxue
Github user maryannxue closed the pull request at:

https://github.com/apache/phoenix/pull/281


---


[GitHub] phoenix pull request #316: PHOENIX-3547 Supporting more number of indices pe...

2018-07-27 Thread m2je
Github user m2je closed the pull request at:

https://github.com/apache/phoenix/pull/316


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-27 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r205901029
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java ---
@@ -204,6 +219,39 @@ private static void resetGlobalMetrics() {
 }
 }
 
+private boolean verifyMetricsFromSink() throws InterruptedException {
+Map expectedMetrics = new HashMap<>();
+for (GlobalMetric m : 
PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
+expectedMetrics.put(m.getMetricType().name(), m.getTotalSum());
+}
+
+for (int i = 0; i < MAX_RETRIES; i++) {
+LOG.info("Verifying Global Metrics from Hadoop Sink, Retry: " 
+ (i + 1));
+if (verifyMetricsFromSinkOnce(expectedMetrics)) {
+LOG.info("Values from Hadoop Metrics Sink match actual 
values");
+return true;
+}
+Thread.sleep(1000);
+}
+return false;
+}
+
+private boolean verifyMetricsFromSinkOnce(Map 
expectedMetrics) {
+synchronized (GlobalPhoenixMetricsTestSink.lock) {
+for (AbstractMetric metric : 
GlobalPhoenixMetricsTestSink.metrics) {
+if (expectedMetrics.containsKey(metric.name())) {
--- End diff --

I will also soon add a test that disables these metrics as well.
> I'd switch this around to iterate over your expectations, ensuring that 
they all exist.

The reason why I didn't do it that way because I didn't wanted to iterate 
over the it for every expected metric. It doesn't really matter since its a 
test though. One approach can be to remove each entry from the map and check if 
the size is 0 at the end. How does that sound?


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-27 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r205900530
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
 ---
@@ -108,40 +114,88 @@
 GLOBAL_HBASE_COUNT_ROWS_SCANNED(COUNT_ROWS_SCANNED),
 GLOBAL_HBASE_COUNT_ROWS_FILTERED(COUNT_ROWS_FILTERED);
 
-
+private static final Logger LOG = 
LoggerFactory.getLogger(GlobalClientMetrics.class);
 private static final boolean isGlobalMetricsEnabled = 
QueryServicesOptions.withDefaults().isGlobalMetricsEnabled();
+private MetricType metricType;
 private GlobalMetric metric;
 
-public void update(long value) {
+static {
+initPhoenixGlobalClientMetrics();
 if (isGlobalMetricsEnabled) {
-metric.change(value);
+MetricRegistry metricRegistry = createMetricRegistry();
+registerPhoenixMetricsToRegistry(metricRegistry);
+
GlobalMetricRegistriesAdapter.getInstance().registerMetricRegistry(metricRegistry);
+}
+}
+
+private static void initPhoenixGlobalClientMetrics() {
+for (GlobalClientMetrics globalMetric : 
GlobalClientMetrics.values()) {
+globalMetric.metric = isGlobalMetricsEnabled ?
+new GlobalMetricImpl(globalMetric.metricType) : new 
NoOpGlobalMetricImpl();
+}
+}
+
+private static void registerPhoenixMetricsToRegistry(MetricRegistry 
metricRegistry) {
+for (GlobalClientMetrics globalMetric : 
GlobalClientMetrics.values()) {
+metricRegistry.register(globalMetric.metricType.columnName(),
+new PhoenixGlobalMetricGauge(globalMetric.metric));
+}
+}
+
+private static MetricRegistry createMetricRegistry() {
+LOG.info("Creating Metric Registry for Phoenix Global Metrics");
+MetricRegistryInfo registryInfo = new 
MetricRegistryInfo("PHOENIX", "Phoenix Global Metrics",
--- End diff --

Global here refers that it is an aggregation across all clients (or all 
Phoenix Connections). 

> Maybe "Phoenix,sub=CLIENT" down below?

Seems reasonable as well.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-27 Thread karanmehta93
Github user karanmehta93 commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r205899353
  
--- Diff: phoenix-core/src/test/resources/hadoop-metrics2.properties ---
@@ -32,10 +32,9 @@
 #[prefix].[source|sink].[instance].[options]
 # See javadoc of package-info.java for org.apache.hadoop.metrics2 for 
detail
 
-
-# Don't attempt to start jmx mbeans for all sources.
-#  For right now, all metrics are exported to a phoenix table
-*.source.start_mbeans=false
+hbase.source.start_mbeans=true

+hbase.sink.sink0.class=org.apache.phoenix.monitoring.GlobalPhoenixMetricsTestSink
--- End diff --

Sure. It was hard for me as well since things were not working as expected 
in the first place. Took a little while to realize this file was the reason :)


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-27 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r205884608
  
--- Diff: phoenix-core/src/test/resources/hadoop-metrics2.properties ---
@@ -32,10 +32,9 @@
 #[prefix].[source|sink].[instance].[options]
 # See javadoc of package-info.java for org.apache.hadoop.metrics2 for 
detail
 
-
-# Don't attempt to start jmx mbeans for all sources.
-#  For right now, all metrics are exported to a phoenix table
-*.source.start_mbeans=false
+hbase.source.start_mbeans=true

+hbase.sink.sink0.class=org.apache.phoenix.monitoring.GlobalPhoenixMetricsTestSink
--- End diff --

Can you leave a comment in the test that this configuration is here? 
Otherwise, might seem like voodoo


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-27 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r205884335
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java ---
@@ -204,6 +219,39 @@ private static void resetGlobalMetrics() {
 }
 }
 
+private boolean verifyMetricsFromSink() throws InterruptedException {
+Map expectedMetrics = new HashMap<>();
+for (GlobalMetric m : 
PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
+expectedMetrics.put(m.getMetricType().name(), m.getTotalSum());
+}
+
+for (int i = 0; i < MAX_RETRIES; i++) {
+LOG.info("Verifying Global Metrics from Hadoop Sink, Retry: " 
+ (i + 1));
+if (verifyMetricsFromSinkOnce(expectedMetrics)) {
+LOG.info("Values from Hadoop Metrics Sink match actual 
values");
+return true;
+}
+Thread.sleep(1000);
+}
+return false;
+}
+
+private boolean verifyMetricsFromSinkOnce(Map 
expectedMetrics) {
+synchronized (GlobalPhoenixMetricsTestSink.lock) {
+for (AbstractMetric metric : 
GlobalPhoenixMetricsTestSink.metrics) {
+if (expectedMetrics.containsKey(metric.name())) {
--- End diff --

This check doesn't catch the case where you have `expectedMetrics` but 
`GlobalPhoenixMetricsTestSink.metrics` is empty. I'd switch this around to 
iterate over your expectations, ensuring that they all exist.


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-27 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r205884792
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
 ---
@@ -108,40 +114,88 @@
 GLOBAL_HBASE_COUNT_ROWS_SCANNED(COUNT_ROWS_SCANNED),
 GLOBAL_HBASE_COUNT_ROWS_FILTERED(COUNT_ROWS_FILTERED);
 
-
+private static final Logger LOG = 
LoggerFactory.getLogger(GlobalClientMetrics.class);
 private static final boolean isGlobalMetricsEnabled = 
QueryServicesOptions.withDefaults().isGlobalMetricsEnabled();
+private MetricType metricType;
 private GlobalMetric metric;
 
-public void update(long value) {
+static {
+initPhoenixGlobalClientMetrics();
 if (isGlobalMetricsEnabled) {
-metric.change(value);
+MetricRegistry metricRegistry = createMetricRegistry();
+registerPhoenixMetricsToRegistry(metricRegistry);
+
GlobalMetricRegistriesAdapter.getInstance().registerMetricRegistry(metricRegistry);
+}
+}
+
+private static void initPhoenixGlobalClientMetrics() {
+for (GlobalClientMetrics globalMetric : 
GlobalClientMetrics.values()) {
+globalMetric.metric = isGlobalMetricsEnabled ?
+new GlobalMetricImpl(globalMetric.metricType) : new 
NoOpGlobalMetricImpl();
+}
+}
+
+private static void registerPhoenixMetricsToRegistry(MetricRegistry 
metricRegistry) {
+for (GlobalClientMetrics globalMetric : 
GlobalClientMetrics.values()) {
+metricRegistry.register(globalMetric.metricType.columnName(),
+new PhoenixGlobalMetricGauge(globalMetric.metric));
+}
+}
+
+private static MetricRegistry createMetricRegistry() {
+LOG.info("Creating Metric Registry for Phoenix Global Metrics");
+MetricRegistryInfo registryInfo = new 
MetricRegistryInfo("PHOENIX", "Phoenix Global Metrics",
--- End diff --

What does "Global" mean in this context? Is "Phoenix Client Metrics" more 
reasonable? Maybe "Phoenix,sub=CLIENT" down below?


---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-27 Thread joshelser
Github user joshelser commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/315#discussion_r205884048
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java ---
@@ -204,6 +219,39 @@ private static void resetGlobalMetrics() {
 }
 }
 
+private boolean verifyMetricsFromSink() throws InterruptedException {
+Map expectedMetrics = new HashMap<>();
+for (GlobalMetric m : 
PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
+expectedMetrics.put(m.getMetricType().name(), m.getTotalSum());
+}
+
+for (int i = 0; i < MAX_RETRIES; i++) {
+LOG.info("Verifying Global Metrics from Hadoop Sink, Retry: " 
+ (i + 1));
+if (verifyMetricsFromSinkOnce(expectedMetrics)) {
+LOG.info("Values from Hadoop Metrics Sink match actual 
values");
+return true;
+}
+Thread.sleep(1000);
+}
+return false;
+}
+
+private boolean verifyMetricsFromSinkOnce(Map 
expectedMetrics) {
+synchronized (GlobalPhoenixMetricsTestSink.lock) {
--- End diff --

If it were me, I would have used a Queue or something to save off the 
metrics, but there's nothing wrong with your approach here :)


---


[GitHub] phoenix pull request #316: PHOENIX-3547 Supporting more number of indices pe...

2018-07-27 Thread m2je
GitHub user m2je opened a pull request:

https://github.com/apache/phoenix/pull/316

PHOENIX-3547 Supporting more number of indices per table.

PHOENIX-3547 Supporting more number of indices per table.

Currently the number of indices per Phoenix table is bound to maximum of 
65535 (java.lang.Short) which is a limitation for applications requiring to 
have unlimited number of indices.
This change will consider any new table created in Phoenix to support view 
index ids to be in the range of -9,223,372,036,854,775,808 to 
9,223,372,036,854,775,807 (java.lang.Long) which is undoubtably big enough to 
cover this requirement.
Any existing Phoenix table will still continue to support only maximum of 
65535 of indices.
A new boolean column (USE_LONG_VIEW_INDEX BOOLEAN DEFAULT FALSE) is added 
to SYSTEM.CATALOG to specify each Phoenix table's support for large number of 
indices.
On each new Phoenix table creation the value for USE_LONG_VIEW_INDEX will 
be set to `true` while this value would be false for any existing table.

You can merge this pull request into a Git repository by running:

$ git pull https://github.com/m2je/phoenix PHOENIX-3547

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/phoenix/pull/316.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #316






---


[GitHub] phoenix pull request #315: PHOENIX-3655 Global Phoenix Client Metrics for PQ...

2018-07-27 Thread karanmehta93
GitHub user karanmehta93 opened a pull request:

https://github.com/apache/phoenix/pull/315

PHOENIX-3655 Global Phoenix Client Metrics for PQS

@joshelser Please review.

You can merge this pull request into a Git repository by running:

$ git pull https://github.com/karanmehta93/phoenix 4.x-HBase-1.4

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/phoenix/pull/315.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #315


commit a8f7f25b57e9e7f6abbbfc2c70bb9648ca872761
Author: Karan Mehta 
Date:   2018-07-27T08:57:33Z

PHOENIX-3655 Global Phoenix Client Metrics for PQS




---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-26 Thread ankitsinghal
Github user ankitsinghal commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r205594858
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java ---
@@ -3604,6 +3675,17 @@ public MutationState addColumn(PTable table, 
List origColumnDefs,
 }
 } finally {
 connection.setAutoCommit(wasAutoCommit);
+if (!columns.isEmpty()) {
+for (PColumn pColumn : columns) {
+PName physicalName = table.getPhysicalName();
+String physicalSchemaName =
+
SchemaUtil.getSchemaNameFromFullName(physicalName.getString());
+String physicalTableName =
+
SchemaUtil.getTableNameFromFullName(physicalName.getString());
+deleteCell(null, physicalSchemaName, physicalTableName,
--- End diff --

@twdsilva , shouldn't you check whether you have acquired a lock(or 
inserted the cell in Mutex) before deleting the lock cell, because here you 
might be deleting the locks acquired by some other threads. And, You may need 
to do this per column I think.


---


[GitHub] phoenix pull request #313: PHOENIX-4799 Write cells using checkAndMutate to ...

2018-07-26 Thread ankitsinghal
Github user ankitsinghal commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/313#discussion_r205595041
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java ---
@@ -3034,6 +3088,11 @@ MutationState dropTable(String schemaName, String 
tableName, String parentTableN
 return new MutationState(0, 0, connection);
 } finally {
 connection.setAutoCommit(wasAutoCommit);
+// acquire a mutex on the table to prevent creating views 
while concurrently
+// dropping the base table
+if (tableType == PTableType.TABLE) {
+deleteCell(null, schemaName, tableName, null);
+}
--- End diff --

nit: Please update the comment here that you are releasing a mutex.


---


[GitHub] phoenix pull request #314: PHOENIX-4820

2018-07-25 Thread comnetwork
GitHub user comnetwork opened a pull request:

https://github.com/apache/phoenix/pull/314

PHOENIX-4820

PHOENIX-4820

You can merge this pull request into a Git repository by running:

$ git pull https://github.com/comnetwork/phoenix 4.x-HBase-1.3

Alternatively you can review and apply these changes as the patch at:

https://github.com/apache/phoenix/pull/314.patch

To close this pull request, make a commit to your master/trunk branch
with (at least) the following in the commit message:

This closes #314


commit ff779298e8c521c8a86d78ea7364d8eff8942c1a
Author: chenglei 
Date:   2018-07-26T05:32:13Z

PHOENIX-4820




---


[GitHub] phoenix pull request #306: PHOENIX-4797 file not found or file exist excepti...

2018-07-25 Thread 492066199
Github user 492066199 closed the pull request at:

https://github.com/apache/phoenix/pull/306


---


[GitHub] phoenix pull request #308: Client-side hash aggregation

2018-07-25 Thread geraldss
Github user geraldss commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/308#discussion_r205297065
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java 
---
@@ -135,17 +142,25 @@ public ResultIterator iterator(ParallelScanGrouper 
scanGrouper, Scan scan) throw
 aggResultIterator = new 
ClientUngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator),
 serverAggregators);
 aggResultIterator = new 
UngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(aggResultIterator),
 clientAggregators);
 } else {
-if (!groupBy.isOrderPreserving()) {
-int thresholdBytes = 
context.getConnection().getQueryServices().getProps().getInt(
-QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, 
QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
-List keyExpressions = 
groupBy.getKeyExpressions();
+List keyExpressions = groupBy.getKeyExpressions();
+if (groupBy.isOrderPreserving()) {
+aggResultIterator = new 
ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), 
serverAggregators, keyExpressions);
+} else {
+int thresholdBytes = 
context.getConnection().getQueryServices().getProps().getInt
+(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, 
QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
 List keyExpressionOrderBy = 
Lists.newArrayListWithExpectedSize(keyExpressions.size());
 for (Expression keyExpression : keyExpressions) {
 keyExpressionOrderBy.add(new 
OrderByExpression(keyExpression, false, true));
 }
-iterator = new OrderedResultIterator(iterator, 
keyExpressionOrderBy, thresholdBytes, null, null, 
projector.getEstimatedRowByteSize());
+
+if (useHashAgg) {
+boolean sort = orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY;
+aggResultIterator = new 
ClientHashAggregatingResultIterator(context, iterator, serverAggregators, 
keyExpressions, sort);
--- End diff --

Same as for previous comment. The GROUP BY cannot produce a reverse sort.


---


[GitHub] phoenix pull request #308: Client-side hash aggregation

2018-07-25 Thread geraldss
Github user geraldss commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/308#discussion_r205296815
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java 
---
@@ -135,17 +142,25 @@ public ResultIterator iterator(ParallelScanGrouper 
scanGrouper, Scan scan) throw
 aggResultIterator = new 
ClientUngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator),
 serverAggregators);
 aggResultIterator = new 
UngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(aggResultIterator),
 clientAggregators);
 } else {
-if (!groupBy.isOrderPreserving()) {
-int thresholdBytes = 
context.getConnection().getQueryServices().getProps().getInt(
-QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, 
QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
-List keyExpressions = 
groupBy.getKeyExpressions();
+List keyExpressions = groupBy.getKeyExpressions();
+if (groupBy.isOrderPreserving()) {
+aggResultIterator = new 
ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), 
serverAggregators, keyExpressions);
+} else {
+int thresholdBytes = 
context.getConnection().getQueryServices().getProps().getInt
+(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, 
QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
 List keyExpressionOrderBy = 
Lists.newArrayListWithExpectedSize(keyExpressions.size());
 for (Expression keyExpression : keyExpressions) {
 keyExpressionOrderBy.add(new 
OrderByExpression(keyExpression, false, true));
 }
-iterator = new OrderedResultIterator(iterator, 
keyExpressionOrderBy, thresholdBytes, null, null, 
projector.getEstimatedRowByteSize());
+
+if (useHashAgg) {
+boolean sort = orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY;
--- End diff --

The GROUP BY cannot cause REV_ROW_KEY_ORDER_BY, because the GROUP BY cannot 
specific or produce descending keys. This is a pre-existing assumption + design 
in SORT_MERGE_JOIN. The SORT_MERGE_JOIN creates its own forward sort, and its 
Tracker reports a forward sort.


---


[GitHub] phoenix pull request #308: Client-side hash aggregation

2018-07-25 Thread geraldss
Github user geraldss commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/308#discussion_r205296403
  
--- Diff: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientHashAggregateIT.java 
---
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.Properties;
+
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.junit.Test;
+
+public class ClientHashAggregateIT extends ParallelStatsDisabledIT {
--- End diff --

The current tests cover when forward sort is required. My comment below 
addresses reverse sort.


---


[GitHub] phoenix pull request #308: Client-side hash aggregation

2018-07-25 Thread geraldss
Github user geraldss commented on a diff in the pull request:

https://github.com/apache/phoenix/pull/308#discussion_r205296128
  
--- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java 
---
@@ -183,13 +198,15 @@ public ExplainPlan getExplainPlan() throws 
SQLException {
 if (where != null) {
 planSteps.add("CLIENT FILTER BY " + where.toString());
 }
-if (!groupBy.isEmpty()) {
-if (!groupBy.isOrderPreserving()) {
-planSteps.add("CLIENT SORTED BY " + 
groupBy.getKeyExpressions().toString());
-}
+if (groupBy.isEmpty()) {
+planSteps.add("CLIENT AGGREGATE INTO SINGLE ROW");
+} else if (groupBy.isOrderPreserving()) {
 planSteps.add("CLIENT AGGREGATE INTO DISTINCT ROWS BY " + 
groupBy.getExpressions().toString());
+} else if (useHashAgg) {
+planSteps.add("CLIENT HASH AGGREGATE INTO DISTINCT ROWS BY " + 
groupBy.getExpressions().toString());
--- End diff --

Done.


---


  1   2   3   4   5   6   7   8   9   10   >