jon-wei commented on a change in pull request #6094: Introduce SystemSchema 
tables (#5989)
URL: https://github.com/apache/incubator-druid/pull/6094#discussion_r224946614
 
 

 ##########
 File path: 
sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java
 ##########
 @@ -0,0 +1,632 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.druid.sql.calcite.schema;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.SettableFuture;
+import org.apache.calcite.DataContext;
+import org.apache.calcite.adapter.java.JavaTypeFactory;
+import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
+import org.apache.calcite.linq4j.Enumerable;
+import org.apache.calcite.linq4j.Enumerator;
+import org.apache.calcite.linq4j.QueryProvider;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.schema.Table;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.druid.client.DirectDruidClient;
+import org.apache.druid.client.DruidServer;
+import org.apache.druid.client.ImmutableDruidServer;
+import org.apache.druid.client.TimelineServerView;
+import org.apache.druid.data.input.InputRow;
+import org.apache.druid.discovery.DruidLeaderClient;
+import org.apache.druid.jackson.DefaultObjectMapper;
+import org.apache.druid.java.util.common.Intervals;
+import org.apache.druid.java.util.common.Pair;
+import org.apache.druid.java.util.common.io.Closer;
+import org.apache.druid.java.util.http.client.HttpClient;
+import org.apache.druid.java.util.http.client.Request;
+import 
org.apache.druid.java.util.http.client.io.AppendableByteArrayInputStream;
+import org.apache.druid.java.util.http.client.response.FullResponseHolder;
+import org.apache.druid.java.util.http.client.response.HttpResponseHandler;
+import org.apache.druid.query.QueryRunnerFactoryConglomerate;
+import org.apache.druid.query.QueryRunnerTestHelper;
+import org.apache.druid.query.ReflectionQueryToolChestWarehouse;
+import org.apache.druid.query.aggregation.CountAggregatorFactory;
+import org.apache.druid.query.aggregation.DoubleSumAggregatorFactory;
+import org.apache.druid.query.aggregation.LongSumAggregatorFactory;
+import 
org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory;
+import org.apache.druid.segment.IndexBuilder;
+import org.apache.druid.segment.QueryableIndex;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.segment.incremental.IncrementalIndexSchema;
+import 
org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory;
+import org.apache.druid.server.coordination.DruidServerMetadata;
+import org.apache.druid.server.coordination.ServerType;
+import org.apache.druid.server.coordinator.BytesAccumulatingResponseHandler;
+import org.apache.druid.server.metrics.NoopServiceEmitter;
+import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.Authorizer;
+import org.apache.druid.server.security.AuthorizerMapper;
+import org.apache.druid.server.security.NoopEscalator;
+import org.apache.druid.sql.calcite.planner.PlannerConfig;
+import org.apache.druid.sql.calcite.util.CalciteTestBase;
+import org.apache.druid.sql.calcite.util.CalciteTests;
+import org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker;
+import org.apache.druid.sql.calcite.util.TestServerInventoryView;
+import org.apache.druid.sql.calcite.view.NoopViewManager;
+import org.apache.druid.timeline.DataSegment;
+import org.easymock.EasyMock;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+import org.jboss.netty.handler.codec.http.HttpResponse;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import javax.servlet.http.HttpServletResponse;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+import java.util.Map;
+
+public class SystemSchemaTest extends CalciteTestBase
+{
+  private static final PlannerConfig PLANNER_CONFIG_DEFAULT = new 
PlannerConfig();
+
+  private static final List<InputRow> ROWS1 = ImmutableList.of(
+      CalciteTests.createRow(ImmutableMap.of("t", "2000-01-01", "m1", "1.0", 
"dim1", "")),
+      CalciteTests.createRow(ImmutableMap.of("t", "2000-01-02", "m1", "2.0", 
"dim1", "10.1")),
+      CalciteTests.createRow(ImmutableMap.of("t", "2000-01-03", "m1", "3.0", 
"dim1", "2"))
+  );
+
+  private static final List<InputRow> ROWS2 = ImmutableList.of(
+      CalciteTests.createRow(ImmutableMap.of("t", "2001-01-01", "m1", "4.0", 
"dim2", ImmutableList.of("a"))),
+      CalciteTests.createRow(ImmutableMap.of("t", "2001-01-02", "m1", "5.0", 
"dim2", ImmutableList.of("abc"))),
+      CalciteTests.createRow(ImmutableMap.of("t", "2001-01-03", "m1", "6.0"))
+  );
+
+  private SystemSchema schema;
+  private SpecificSegmentsQuerySegmentWalker walker;
+  private DruidLeaderClient client;
+  private TimelineServerView serverView;
+  private ObjectMapper mapper;
+  private FullResponseHolder responseHolder;
+  private BytesAccumulatingResponseHandler responseHandler;
+  private Request request;
+  private DruidSchema druidSchema;
+  private AuthorizerMapper authMapper;
+  private static QueryRunnerFactoryConglomerate conglomerate;
+  private static Closer resourceCloser;
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  @BeforeClass
+  public static void setUpClass()
+  {
+    final Pair<QueryRunnerFactoryConglomerate, Closer> conglomerateCloserPair 
= CalciteTests
+        .createQueryRunnerFactoryConglomerate();
+    conglomerate = conglomerateCloserPair.lhs;
+    resourceCloser = conglomerateCloserPair.rhs;
+  }
+
+  @AfterClass
+  public static void tearDownClass() throws IOException
+  {
+    resourceCloser.close();
+  }
+
+  @Before
+  public void setUp() throws Exception
+  {
+    serverView = EasyMock.createNiceMock(TimelineServerView.class);
+    client = EasyMock.createMock(DruidLeaderClient.class);
+    mapper = TestHelper.makeJsonMapper();
+    responseHolder = EasyMock.createMock(FullResponseHolder.class);
+    responseHandler = 
EasyMock.createMockBuilder(BytesAccumulatingResponseHandler.class)
+                              .withConstructor()
+                              .addMockedMethod(
+                                      "handleResponse",
+                                      HttpResponse.class,
+                                      HttpResponseHandler.TrafficCop.class
+                                  )
+                              .addMockedMethod("getStatus")
+                              .createMock();
+    request = EasyMock.createMock(Request.class);
+    authMapper = new AuthorizerMapper(null)
+    {
+      @Override
+      public Authorizer getAuthorizer(String name)
+      {
+        return (authenticationResult, resource, action) -> new Access(true);
+      }
+    };
+
+    final File tmpDir = temporaryFolder.newFolder();
+    final QueryableIndex index1 = IndexBuilder.create()
+                                              .tmpDir(new File(tmpDir, "1"))
+                                              
.segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance())
+                                              .schema(
+                                                  new 
IncrementalIndexSchema.Builder()
+                                                      .withMetrics(
+                                                          new 
CountAggregatorFactory("cnt"),
+                                                          new 
DoubleSumAggregatorFactory("m1", "m1"),
+                                                          new 
HyperUniquesAggregatorFactory("unique_dim1", "dim1")
+                                                      )
+                                                      .withRollup(false)
+                                                      .build()
+                                              )
+                                              .rows(ROWS1)
+                                              .buildMMappedIndex();
+
+    final QueryableIndex index2 = IndexBuilder.create()
+                                              .tmpDir(new File(tmpDir, "2"))
+                                              
.segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance())
+                                              .schema(
+                                                  new 
IncrementalIndexSchema.Builder()
+                                                      .withMetrics(new 
LongSumAggregatorFactory("m1", "m1"))
+                                                      .withRollup(false)
+                                                      .build()
+                                              )
+                                              .rows(ROWS2)
+                                              .buildMMappedIndex();
+
+    walker = new SpecificSegmentsQuerySegmentWalker(conglomerate)
+        .add(segment1, index1)
+        .add(segment2, index2)
+        .add(segment2, index2)
+        .add(segment3, index2);
+
+    druidSchema = new DruidSchema(
+        CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate),
+        new TestServerInventoryView(walker.getSegments()),
+        PLANNER_CONFIG_DEFAULT,
+        new NoopViewManager(),
+        new NoopEscalator()
+    );
+    druidSchema.start();
+    druidSchema.awaitInitialization();
+    schema = new SystemSchema(
+        druidSchema,
+        serverView,
+        EasyMock.createStrictMock(AuthorizerMapper.class),
+        client,
+        client,
+        mapper
+    );
+  }
+
+  private final DataSegment segment1 = new DataSegment(
+      "test1",
+      Intervals.of("2010/2011"),
+      "version1",
+      null,
+      ImmutableList.of("dim1", "dim2"),
+      ImmutableList.of("met1", "met2"),
+      null,
+      1,
+      100L,
+      DataSegment.PruneLoadSpecHolder.DEFAULT
+  );
+  private final DataSegment segment2 = new DataSegment(
+      "test2",
+      Intervals.of("2011/2012"),
+      "version2",
+      null,
+      ImmutableList.of("dim1", "dim2"),
+      ImmutableList.of("met1", "met2"),
+      null,
+      1,
+      100L,
+      DataSegment.PruneLoadSpecHolder.DEFAULT
+  );
+  private final DataSegment segment3 = new DataSegment(
+      "test3",
+      Intervals.of("2012/2013"),
+      "version3",
+      null,
+      ImmutableList.of("dim1", "dim2"),
+      ImmutableList.of("met1", "met2"),
+      null,
+      1,
+      100L,
+      DataSegment.PruneLoadSpecHolder.DEFAULT
+  );
+  private final DataSegment segment4 = new DataSegment(
+      "test4",
+      Intervals.of("2017/2018"),
+      "version4",
+      null,
+      ImmutableList.of("dim1", "dim2"),
+      ImmutableList.of("met1", "met2"),
+      null,
+      1,
+      100L,
+      DataSegment.PruneLoadSpecHolder.DEFAULT
+  );
+  private final DataSegment segment5 = new DataSegment(
+      "test5",
+      Intervals.of("2017/2018"),
+      "version5",
+      null,
+      ImmutableList.of("dim1", "dim2"),
+      ImmutableList.of("met1", "met2"),
+      null,
+      1,
+      100L,
+      DataSegment.PruneLoadSpecHolder.DEFAULT
+  );
+
+  private final HttpClient httpClient = EasyMock.createMock(HttpClient.class);
+  private final DirectDruidClient client1 = new DirectDruidClient(
+      new ReflectionQueryToolChestWarehouse(),
+      QueryRunnerTestHelper.NOOP_QUERYWATCHER,
+      new DefaultObjectMapper(),
+      httpClient,
+      "http",
+      "foo",
+      new NoopServiceEmitter()
+  );
+  private final DirectDruidClient client2 = new DirectDruidClient(
+      new ReflectionQueryToolChestWarehouse(),
+      QueryRunnerTestHelper.NOOP_QUERYWATCHER,
+      new DefaultObjectMapper(),
+      httpClient,
+      "http",
+      "foo2",
+      new NoopServiceEmitter()
+  );
+  private final ImmutableDruidServer druidServer1 = new ImmutableDruidServer(
+      new DruidServerMetadata("server1", "localhost:0000", null, 5L, 
ServerType.REALTIME, DruidServer.DEFAULT_TIER, 0),
+      1L,
+      null,
+      ImmutableMap.of("segment1", segment1, "segment2", segment2)
+  );
+
+  private final ImmutableDruidServer druidServer2 = new ImmutableDruidServer(
+      new DruidServerMetadata("server2", "server2:1234", null, 5L, 
ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0),
+      1L,
+      null,
+      ImmutableMap.of("segment2", segment2, "segment4", segment4, "segment5", 
segment5)
 
 Review comment:
   hm, I think this suite needs a test for `SystemSchema.ServerSegmentsTable` 
that checks expected server/segment mappings, this test would catch that 
segment2->segment3 change

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@druid.apache.org
For additional commands, e-mail: commits-h...@druid.apache.org

Reply via email to