kfaraz commented on code in PR #18692:
URL: https://github.com/apache/druid/pull/18692#discussion_r2459140311


##########
sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java:
##########
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.schema;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.FluentIterable;
+import org.apache.calcite.DataContext;
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Linq4j;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.schema.ScannableTable;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.impl.AbstractTable;
+import org.apache.druid.discovery.DiscoveryDruidNode;
+import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
+import org.apache.druid.java.util.common.RE;
+import org.apache.druid.java.util.http.client.HttpClient;
+import org.apache.druid.java.util.http.client.Request;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHandler;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHolder;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.segment.column.RowSignature;
+import org.apache.druid.server.DruidNode;
+import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizerMapper;
+import org.apache.druid.sql.calcite.planner.PlannerContext;
+import org.apache.druid.sql.calcite.table.RowSignatures;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+
+public final class SystemPropertiesTable extends AbstractTable implements 
ScannableTable

Review Comment:
   Please add a short javadoc here.



##########
sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java:
##########
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.schema;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.FluentIterable;
+import org.apache.calcite.DataContext;
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Linq4j;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.schema.ScannableTable;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.impl.AbstractTable;
+import org.apache.druid.discovery.DiscoveryDruidNode;
+import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
+import org.apache.druid.java.util.common.RE;
+import org.apache.druid.java.util.http.client.HttpClient;
+import org.apache.druid.java.util.http.client.Request;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHandler;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHolder;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.segment.column.RowSignature;
+import org.apache.druid.server.DruidNode;
+import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizerMapper;
+import org.apache.druid.sql.calcite.planner.PlannerContext;
+import org.apache.druid.sql.calcite.table.RowSignatures;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+
+public final class SystemPropertiesTable extends AbstractTable implements 
ScannableTable
+{
+  public static final String PROPERTIES_TABLE = "properties";

Review Comment:
   ```suggestion
     public static final String PROPERTIES_TABLE = "server_properties";
   ```



##########
sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java:
##########
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.schema;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.FluentIterable;
+import org.apache.calcite.DataContext;
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Linq4j;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.schema.ScannableTable;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.impl.AbstractTable;
+import org.apache.druid.discovery.DiscoveryDruidNode;
+import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
+import org.apache.druid.java.util.common.RE;
+import org.apache.druid.java.util.http.client.HttpClient;
+import org.apache.druid.java.util.http.client.Request;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHandler;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHolder;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.segment.column.RowSignature;
+import org.apache.druid.server.DruidNode;
+import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizerMapper;
+import org.apache.druid.sql.calcite.planner.PlannerContext;
+import org.apache.druid.sql.calcite.table.RowSignatures;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+
+public final class SystemPropertiesTable extends AbstractTable implements 
ScannableTable
+{
+  public static final String PROPERTIES_TABLE = "properties";
+
+  static final RowSignature PROPERTIES_SIGNATURE = RowSignature
+      .builder()
+      .add("service", ColumnType.STRING)
+      .add("host", ColumnType.STRING)
+      .add("server_type", ColumnType.STRING)
+      .add("property", ColumnType.STRING)
+      .add("value", ColumnType.STRING)
+      .build();
+
+  private final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider;
+  private final AuthorizerMapper authorizerMapper;
+  private final HttpClient httpClient;
+  private final ObjectMapper jsonMapper;
+
+  public SystemPropertiesTable(
+      DruidNodeDiscoveryProvider druidNodeDiscoveryProvider,
+      AuthorizerMapper authorizerMapper,
+      HttpClient httpClient,
+      ObjectMapper jsonMapper
+  )
+  {
+    this.druidNodeDiscoveryProvider = druidNodeDiscoveryProvider;
+    this.authorizerMapper = authorizerMapper;
+    this.httpClient = httpClient;
+    this.jsonMapper = jsonMapper;
+  }
+
+  @Override
+  public RelDataType getRowType(RelDataTypeFactory typeFactory)
+  {
+    return RowSignatures.toRelDataType(PROPERTIES_SIGNATURE, typeFactory);
+  }
+
+  @Override
+  public Schema.TableType getJdbcTableType()
+  {
+    return Schema.TableType.SYSTEM_TABLE;
+  }
+
+  @Override
+  public Enumerable<Object[]> scan(DataContext root)
+  {
+    final AuthenticationResult authenticationResult = (AuthenticationResult) 
Preconditions.checkNotNull(
+        root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT),
+        "authenticationResult in dataContext"
+    );
+    SystemSchema.checkStateReadAccessForServers(authenticationResult, 
authorizerMapper);
+    final Iterator<DiscoveryDruidNode> druidServers = 
SystemSchema.getDruidServers(druidNodeDiscoveryProvider);
+
+    final FluentIterable<Object[]> results = FluentIterable
+        .from(() -> druidServers)
+        .transformAndConcat((DiscoveryDruidNode discoveryDruidNode) -> {
+          final DruidNode druidNode = discoveryDruidNode.getDruidNode();
+          final Map<String, String> propertiesMap = getProperties(druidNode);
+          return propertiesMap.entrySet().stream()
+                              .map(entry -> new Object[]{
+                                  druidNode.getServiceName(),
+                                  druidNode.getHost(),

Review Comment:
   to align with `sys.servers` table and to avoid ambiguity in case multiple 
Druid services are running on the same host.
   ```suggestion
                                     druidNode.getHostAndPortToUse(),
   ```



##########
sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java:
##########
@@ -234,29 +236,29 @@ public SystemSchema(
       final CoordinatorClient coordinatorClient,
       final OverlordClient overlordClient,
       final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider,
-      final ObjectMapper jsonMapper
+      final ObjectMapper jsonMapper,
+      final @EscalatedClient HttpClient httpClient
   )
   {
     Preconditions.checkNotNull(serverView, "serverView");
-    this.tableMap = ImmutableMap.of(
-        SEGMENTS_TABLE,
-        new SegmentsTable(druidSchema, metadataView, jsonMapper, 
authorizerMapper),
-        SERVERS_TABLE,
-        new ServersTable(
-            druidNodeDiscoveryProvider,
-            serverInventoryView,
-            authorizerMapper,
-            overlordClient,
-            coordinatorClient,
-            jsonMapper
-        ),
-        SERVER_SEGMENTS_TABLE,
-        new ServerSegmentsTable(serverView, authorizerMapper),
-        TASKS_TABLE,
-        new TasksTable(overlordClient, authorizerMapper),
-        SUPERVISOR_TABLE,
-        new SupervisorsTable(overlordClient, authorizerMapper)
-    );
+    this.tableMap = ImmutableMap.<String, Table>builder()

Review Comment:
   Please use the original syntax `ImmutableMap.of()` and reduce the diff size 
here.



##########
sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java:
##########
@@ -1448,6 +1463,129 @@ public void testSupervisorTableAuth()
     // verifyTypes(rows, SystemSchema.SUPERVISOR_SIGNATURE);
   }
 
+  @Test
+  public void testPropertiesTable() throws Exception
+  {
+    SystemPropertiesTable propertiesTable = 
EasyMock.createMockBuilder(SystemPropertiesTable.class)
+                                                    
.withConstructor(druidNodeDiscoveryProvider, authMapper, httpClient, MAPPER)
+                                                    .createMock();
+
+    EasyMock.replay(propertiesTable);
+
+    final DruidNodeDiscovery coordinatorNodeDiscovery = 
EasyMock.createMock(DruidNodeDiscovery.class);
+    final DruidNodeDiscovery middleManagerNodeDiscovery = 
EasyMock.createMock(DruidNodeDiscovery.class);
+    final DruidNodeDiscovery brokerNodeDiscovery = 
EasyMock.createMock(DruidNodeDiscovery.class);
+    final DruidNodeDiscovery routerNodeDiscovery = 
EasyMock.createMock(DruidNodeDiscovery.class);
+    final DruidNodeDiscovery historicalNodeDiscovery = 
EasyMock.createMock(DruidNodeDiscovery.class);
+    final DruidNodeDiscovery overlordNodeDiscovery = 
EasyMock.createMock(DruidNodeDiscovery.class);
+    final DruidNodeDiscovery peonNodeDiscovery = 
EasyMock.createMock(DruidNodeDiscovery.class);
+    final DruidNodeDiscovery indexerNodeDiscovery = 
EasyMock.createMock(DruidNodeDiscovery.class);
+
+    
EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.COORDINATOR)).andReturn(coordinatorNodeDiscovery).once();

Review Comment:
   Please group the definition and expectation of each mock together.
   Also, since these snippets will follow the same format, please see if they 
can't be put in a common private method.
   
   Something liek:
   
   ```
   final DruidNodeDiscovery coordinatorNodeDiscovery = mockNodeDiscovery(
       // list of nodes to return
       new DruidNode(...),
       new DruidNode(...)
   );
   
   final DruidNodeDiscovery overlordNodeDiscovery = mockNodeDiscovery(
          new DruidNode(...),
       new DruidNode(...)
   );
   ```
   
   ```
   private DruidNodeDiscovery mockNodeDiscovery(DruidNode... nodes)
   {
      // Create the mock
      // Set expectations
      // Replay 
   }
   ```
   
   



##########
sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java:
##########
@@ -1121,6 +1116,14 @@ private static void checkStateReadAccessForServers(
     }
   }
 
+  public static Iterator<DiscoveryDruidNode> 
getDruidServers(DruidNodeDiscoveryProvider druidNodeDiscoveryProvider)

Review Comment:
   Please add a short javadoc to this method.



##########
sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java:
##########
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.schema;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.FluentIterable;
+import org.apache.calcite.DataContext;
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Linq4j;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.schema.ScannableTable;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.impl.AbstractTable;
+import org.apache.druid.discovery.DiscoveryDruidNode;
+import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
+import org.apache.druid.java.util.common.RE;
+import org.apache.druid.java.util.http.client.HttpClient;
+import org.apache.druid.java.util.http.client.Request;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHandler;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHolder;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.segment.column.RowSignature;
+import org.apache.druid.server.DruidNode;
+import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizerMapper;
+import org.apache.druid.sql.calcite.planner.PlannerContext;
+import org.apache.druid.sql.calcite.table.RowSignatures;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+
+public final class SystemPropertiesTable extends AbstractTable implements 
ScannableTable
+{
+  public static final String PROPERTIES_TABLE = "properties";
+
+  static final RowSignature PROPERTIES_SIGNATURE = RowSignature
+      .builder()
+      .add("service", ColumnType.STRING)
+      .add("host", ColumnType.STRING)
+      .add("server_type", ColumnType.STRING)
+      .add("property", ColumnType.STRING)
+      .add("value", ColumnType.STRING)
+      .build();
+
+  private final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider;
+  private final AuthorizerMapper authorizerMapper;
+  private final HttpClient httpClient;
+  private final ObjectMapper jsonMapper;
+
+  public SystemPropertiesTable(
+      DruidNodeDiscoveryProvider druidNodeDiscoveryProvider,
+      AuthorizerMapper authorizerMapper,
+      HttpClient httpClient,
+      ObjectMapper jsonMapper
+  )
+  {
+    this.druidNodeDiscoveryProvider = druidNodeDiscoveryProvider;
+    this.authorizerMapper = authorizerMapper;
+    this.httpClient = httpClient;
+    this.jsonMapper = jsonMapper;
+  }
+
+  @Override
+  public RelDataType getRowType(RelDataTypeFactory typeFactory)
+  {
+    return RowSignatures.toRelDataType(PROPERTIES_SIGNATURE, typeFactory);
+  }
+
+  @Override
+  public Schema.TableType getJdbcTableType()
+  {
+    return Schema.TableType.SYSTEM_TABLE;
+  }
+
+  @Override
+  public Enumerable<Object[]> scan(DataContext root)
+  {
+    final AuthenticationResult authenticationResult = (AuthenticationResult) 
Preconditions.checkNotNull(
+        root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT),
+        "authenticationResult in dataContext"
+    );
+    SystemSchema.checkStateReadAccessForServers(authenticationResult, 
authorizerMapper);
+    final Iterator<DiscoveryDruidNode> druidServers = 
SystemSchema.getDruidServers(druidNodeDiscoveryProvider);
+
+    final FluentIterable<Object[]> results = FluentIterable
+        .from(() -> druidServers)
+        .transformAndConcat((DiscoveryDruidNode discoveryDruidNode) -> {
+          final DruidNode druidNode = discoveryDruidNode.getDruidNode();
+          final Map<String, String> propertiesMap = getProperties(druidNode);
+          return propertiesMap.entrySet().stream()
+                              .map(entry -> new Object[]{
+                                  druidNode.getServiceName(),
+                                  druidNode.getHost(),
+                                  
discoveryDruidNode.getNodeRole().getJsonName(),
+                                  entry.getKey(),
+                                  entry.getValue()
+                              })
+                              .collect(Collectors.toList());
+        });
+    return Linq4j.asEnumerable(results);
+  }
+
+  private Map<String, String> getProperties(DruidNode druidNode)
+  {
+    try {
+      final String url = 
druidNode.getUriToUse().resolve("/status/properties").toString();
+      final Request request = new Request(HttpMethod.GET, new URL(url));
+      final StringFullResponseHolder response;
+      try {
+        response = httpClient
+            .go(request, new StringFullResponseHandler(StandardCharsets.UTF_8))
+            .get();
+      }
+      catch (ExecutionException e) {
+        throw new RE(e, "HTTP request to[%s] failed", request.getUrl());
+      }
+
+      if (response.getStatus().getCode() != HttpServletResponse.SC_OK) {
+        throw new RE(
+            "Failed to get properties from node at [%s]. Error code [%d], 
description [%s].",
+            url,
+            response.getStatus().getCode(),
+            response.getStatus().getReasonPhrase()
+        );
+      }
+      return jsonMapper.readValue(
+          response.getContent(), new TypeReference<Map<String, String>>()
+          {
+          }

Review Comment:
   ```suggestion
             response.getContent(),
             new TypeReference<>(){}
   ```



##########
sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java:
##########
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.schema;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.FluentIterable;
+import org.apache.calcite.DataContext;
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Linq4j;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.schema.ScannableTable;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.impl.AbstractTable;
+import org.apache.druid.discovery.DiscoveryDruidNode;
+import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
+import org.apache.druid.java.util.common.RE;
+import org.apache.druid.java.util.http.client.HttpClient;
+import org.apache.druid.java.util.http.client.Request;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHandler;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHolder;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.segment.column.RowSignature;
+import org.apache.druid.server.DruidNode;
+import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizerMapper;
+import org.apache.druid.sql.calcite.planner.PlannerContext;
+import org.apache.druid.sql.calcite.table.RowSignatures;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+
+public final class SystemPropertiesTable extends AbstractTable implements 
ScannableTable
+{
+  public static final String PROPERTIES_TABLE = "properties";
+
+  static final RowSignature PROPERTIES_SIGNATURE = RowSignature
+      .builder()
+      .add("service", ColumnType.STRING)
+      .add("host", ColumnType.STRING)
+      .add("server_type", ColumnType.STRING)
+      .add("property", ColumnType.STRING)
+      .add("value", ColumnType.STRING)
+      .build();
+
+  private final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider;
+  private final AuthorizerMapper authorizerMapper;
+  private final HttpClient httpClient;
+  private final ObjectMapper jsonMapper;
+
+  public SystemPropertiesTable(
+      DruidNodeDiscoveryProvider druidNodeDiscoveryProvider,
+      AuthorizerMapper authorizerMapper,
+      HttpClient httpClient,
+      ObjectMapper jsonMapper
+  )
+  {
+    this.druidNodeDiscoveryProvider = druidNodeDiscoveryProvider;
+    this.authorizerMapper = authorizerMapper;
+    this.httpClient = httpClient;
+    this.jsonMapper = jsonMapper;
+  }
+
+  @Override
+  public RelDataType getRowType(RelDataTypeFactory typeFactory)
+  {
+    return RowSignatures.toRelDataType(PROPERTIES_SIGNATURE, typeFactory);
+  }
+
+  @Override
+  public Schema.TableType getJdbcTableType()
+  {
+    return Schema.TableType.SYSTEM_TABLE;
+  }
+
+  @Override
+  public Enumerable<Object[]> scan(DataContext root)
+  {
+    final AuthenticationResult authenticationResult = (AuthenticationResult) 
Preconditions.checkNotNull(
+        root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT),
+        "authenticationResult in dataContext"
+    );
+    SystemSchema.checkStateReadAccessForServers(authenticationResult, 
authorizerMapper);
+    final Iterator<DiscoveryDruidNode> druidServers = 
SystemSchema.getDruidServers(druidNodeDiscoveryProvider);
+
+    final FluentIterable<Object[]> results = FluentIterable
+        .from(() -> druidServers)
+        .transformAndConcat((DiscoveryDruidNode discoveryDruidNode) -> {
+          final DruidNode druidNode = discoveryDruidNode.getDruidNode();
+          final Map<String, String> propertiesMap = getProperties(druidNode);
+          return propertiesMap.entrySet().stream()
+                              .map(entry -> new Object[]{
+                                  druidNode.getServiceName(),
+                                  druidNode.getHost(),
+                                  
discoveryDruidNode.getNodeRole().getJsonName(),
+                                  entry.getKey(),
+                                  entry.getValue()
+                              })
+                              .collect(Collectors.toList());
+        });
+    return Linq4j.asEnumerable(results);
+  }
+
+  private Map<String, String> getProperties(DruidNode druidNode)
+  {
+    try {
+      final String url = 
druidNode.getUriToUse().resolve("/status/properties").toString();
+      final Request request = new Request(HttpMethod.GET, new URL(url));
+      final StringFullResponseHolder response;
+      try {

Review Comment:
   Please don't nest try-catch.



##########
sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java:
##########
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.schema;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.FluentIterable;
+import org.apache.calcite.DataContext;
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Linq4j;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.schema.ScannableTable;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.impl.AbstractTable;
+import org.apache.druid.discovery.DiscoveryDruidNode;
+import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
+import org.apache.druid.java.util.common.RE;
+import org.apache.druid.java.util.http.client.HttpClient;
+import org.apache.druid.java.util.http.client.Request;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHandler;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHolder;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.segment.column.RowSignature;
+import org.apache.druid.server.DruidNode;
+import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizerMapper;
+import org.apache.druid.sql.calcite.planner.PlannerContext;
+import org.apache.druid.sql.calcite.table.RowSignatures;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+
+public final class SystemPropertiesTable extends AbstractTable implements 
ScannableTable
+{
+  public static final String PROPERTIES_TABLE = "properties";
+
+  static final RowSignature PROPERTIES_SIGNATURE = RowSignature
+      .builder()
+      .add("service", ColumnType.STRING)
+      .add("host", ColumnType.STRING)
+      .add("server_type", ColumnType.STRING)
+      .add("property", ColumnType.STRING)
+      .add("value", ColumnType.STRING)
+      .build();
+
+  private final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider;
+  private final AuthorizerMapper authorizerMapper;
+  private final HttpClient httpClient;
+  private final ObjectMapper jsonMapper;
+
+  public SystemPropertiesTable(
+      DruidNodeDiscoveryProvider druidNodeDiscoveryProvider,
+      AuthorizerMapper authorizerMapper,
+      HttpClient httpClient,
+      ObjectMapper jsonMapper
+  )
+  {
+    this.druidNodeDiscoveryProvider = druidNodeDiscoveryProvider;
+    this.authorizerMapper = authorizerMapper;
+    this.httpClient = httpClient;
+    this.jsonMapper = jsonMapper;
+  }
+
+  @Override
+  public RelDataType getRowType(RelDataTypeFactory typeFactory)
+  {
+    return RowSignatures.toRelDataType(PROPERTIES_SIGNATURE, typeFactory);
+  }
+
+  @Override
+  public Schema.TableType getJdbcTableType()
+  {
+    return Schema.TableType.SYSTEM_TABLE;
+  }
+
+  @Override
+  public Enumerable<Object[]> scan(DataContext root)
+  {
+    final AuthenticationResult authenticationResult = (AuthenticationResult) 
Preconditions.checkNotNull(
+        root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT),
+        "authenticationResult in dataContext"
+    );
+    SystemSchema.checkStateReadAccessForServers(authenticationResult, 
authorizerMapper);
+    final Iterator<DiscoveryDruidNode> druidServers = 
SystemSchema.getDruidServers(druidNodeDiscoveryProvider);
+
+    final FluentIterable<Object[]> results = FluentIterable
+        .from(() -> druidServers)
+        .transformAndConcat((DiscoveryDruidNode discoveryDruidNode) -> {
+          final DruidNode druidNode = discoveryDruidNode.getDruidNode();
+          final Map<String, String> propertiesMap = getProperties(druidNode);
+          return propertiesMap.entrySet().stream()

Review Comment:
   Possible improvement:
   I think we could club together the entries if the same hostAndPort occurs 
multiple times with different node roles, e.g. this can happen when running a 
combined Coordinator/Overlord. Then instead of `server_type`, we could have a 
multi valued column `node_roles`. Otherwise, we would duplicating a bunch of 
properties unnecessarily.
   
   A simple way to do that would be to have `results` be a `Map<String, 
Object[]>` with key as `hostAndPort` (i.e. same value as the column `server`) 
and add or update entries into it as applicable. Then just return 
`Linq4.asEnumerable(map.values())`.



##########
sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java:
##########
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.sql.calcite.schema;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.FluentIterable;
+import org.apache.calcite.DataContext;
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Linq4j;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.schema.ScannableTable;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.impl.AbstractTable;
+import org.apache.druid.discovery.DiscoveryDruidNode;
+import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
+import org.apache.druid.java.util.common.RE;
+import org.apache.druid.java.util.http.client.HttpClient;
+import org.apache.druid.java.util.http.client.Request;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHandler;
+import 
org.apache.druid.java.util.http.client.response.StringFullResponseHolder;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.segment.column.RowSignature;
+import org.apache.druid.server.DruidNode;
+import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizerMapper;
+import org.apache.druid.sql.calcite.planner.PlannerContext;
+import org.apache.druid.sql.calcite.table.RowSignatures;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+
+public final class SystemPropertiesTable extends AbstractTable implements 
ScannableTable
+{
+  public static final String PROPERTIES_TABLE = "properties";
+
+  static final RowSignature PROPERTIES_SIGNATURE = RowSignature
+      .builder()
+      .add("service", ColumnType.STRING)
+      .add("host", ColumnType.STRING)

Review Comment:
   To avoid confusion
   ```suggestion
         .add("service_name", ColumnType.STRING)
         .add("server", ColumnType.STRING)
   ```
   
   the `server` field should take the value `node.getHostAndPortToUse()` as 
several Druid services may be running on the same host.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to