[ 
https://issues.apache.org/jira/browse/CARBONDATA-2614?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Jacky Li resolved CARBONDATA-2614.
----------------------------------
       Resolution: Fixed
    Fix Version/s: 1.4.1
                   1.5.0

> There are some exception when using FG in search mode and the prune result is 
> none
> ----------------------------------------------------------------------------------
>
>                 Key: CARBONDATA-2614
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-2614
>             Project: CarbonData
>          Issue Type: Bug
>            Reporter: xubo245
>            Assignee: xubo245
>            Priority: Major
>             Fix For: 1.5.0, 1.4.1
>
>          Time Spent: 2h 50m
>  Remaining Estimate: 0h
>
> test code:
> {code:java}
>   test("test lucene datamap with search mode, two column") {
>     sql("set carbon.search.enabled = true")
>     sql("drop datamap if exists dm3 ON TABLE main")
>     sql("CREATE DATAMAP dm3 ON TABLE main USING 'lucene' 
> DMProperties('INDEX_COLUMNS'='city , id') ")
>     checkAnswer(sql("SELECT * FROM main WHERE TEXT_MATCH('city:city6')"),
>       sql("SELECT * FROM main WHERE city='city6'"))
>     checkAnswer(sql("SELECT * FROM main WHERE TEXT_MATCH('id:100000')"),
>       sql(s"SELECT * FROM main WHERE id='100000'"))
>     sql("DROP DATAMAP if exists dm3 ON TABLE main")
>   }
> {code}
> error:
> {code:java}
> 18/06/15 03:30:45 INFO UnsafeMemoryManager: 
> [dispatcher-event-loop-7][partitionID:main;queryID:33936819040113] Total 
> memory used after task 33936818832698 is 0 Current tasks running now are : 
> [33934154436368]
> 18/06/15 03:30:45 ERROR Inbox: Ignoring error
> java.lang.RuntimeException: java.util.concurrent.ExecutionException: 
> org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException:
>  TEXT_MATCH is not supported on table main
>       at 
> org.apache.carbondata.core.scan.result.iterator.AbstractSearchModeResultIterator.hasNext(AbstractSearchModeResultIterator.java:75)
>       at 
> org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.<init>(ChunkRowIterator.java:40)
>       at 
> org.apache.carbondata.hadoop.CarbonRecordReader.initialize(CarbonRecordReader.java:89)
>       at 
> org.apache.carbondata.store.worker.SearchRequestHandler.handleRequest(SearchRequestHandler.java:140)
>       at 
> org.apache.carbondata.store.worker.SearchRequestHandler.handleSearch(SearchRequestHandler.java:71)
>       at 
> org.apache.spark.search.Searcher$$anonfun$receiveAndReply$1.applyOrElse(Searcher.scala:42)
>       at 
> org.apache.spark.rpc.netty.Inbox$$anonfun$process$1.apply$mcV$sp(Inbox.scala:105)
>       at org.apache.spark.rpc.netty.Inbox.safelyCall(Inbox.scala:205)
>       at org.apache.spark.rpc.netty.Inbox.process(Inbox.scala:101)
>       at 
> org.apache.spark.rpc.netty.Dispatcher$MessageLoop.run(Dispatcher.scala:216)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>       at java.lang.Thread.run(Thread.java:748)
> Caused by: java.util.concurrent.ExecutionException: 
> org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException:
>  TEXT_MATCH is not supported on table main
>       at java.util.concurrent.FutureTask.report(FutureTask.java:122)
>       at java.util.concurrent.FutureTask.get(FutureTask.java:192)
>       at 
> org.apache.carbondata.core.scan.result.iterator.AbstractSearchModeResultIterator.hasNext(AbstractSearchModeResultIterator.java:71)
>       ... 12 more
> Caused by: 
> org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException:
>  TEXT_MATCH is not supported on table main
>       at 
> org.apache.carbondata.core.scan.filter.executer.RowLevelFilterExecuterImpl.applyFilter(RowLevelFilterExecuterImpl.java:199)
>       at 
> org.apache.carbondata.core.scan.scanner.impl.BlockletFilterScanner.executeFilter(BlockletFilterScanner.java:174)
>       at 
> org.apache.carbondata.core.scan.scanner.impl.BlockletFilterScanner.scanBlocklet(BlockletFilterScanner.java:101)
>       at 
> org.apache.carbondata.core.scan.processor.BlockScan.scan(BlockScan.java:70)
>       at 
> org.apache.carbondata.core.scan.result.iterator.AbstractSearchModeResultIterator$1.call(AbstractSearchModeResultIterator.java:59)
>       at 
> org.apache.carbondata.core.scan.result.iterator.AbstractSearchModeResultIterator$1.call(AbstractSearchModeResultIterator.java:53)
>       at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>       ... 3 more
> 18/06/15 03:30:45 ERROR CarbonSession: Exception when executing search mode: 
> Exception thrown in awaitResult: 
> 18/06/15 03:30:45 INFO SearchModeTestCase: 
> ScalaTest-run-running-SearchModeTestCase 
> ===== FINISHED 
> org.apache.carbondata.spark.testsuite.detailquery.SearchModeTestCase: 'test 
> lucene datamap with search mode 2' =====
> Exception thrown in awaitResult: 
> org.apache.spark.SparkException: Exception thrown in awaitResult: 
>       at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:205)
>       at org.apache.spark.rpc.Master$$anonfun$search$1.apply(Master.scala:239)
>       at org.apache.spark.rpc.Master$$anonfun$search$1.apply(Master.scala:230)
>       at 
> scala.collection.mutable.HashMap$$anonfun$foreach$1.apply(HashMap.scala:99)
>       at 
> scala.collection.mutable.HashMap$$anonfun$foreach$1.apply(HashMap.scala:99)
>       at 
> scala.collection.mutable.HashTable$class.foreachEntry(HashTable.scala:230)
>       at scala.collection.mutable.HashMap.foreachEntry(HashMap.scala:40)
>       at scala.collection.mutable.HashMap.foreach(HashMap.scala:99)
>       at org.apache.spark.rpc.Master.search(Master.scala:230)
>       at 
> org.apache.carbondata.store.SparkCarbonStore.search(SparkCarbonStore.scala:144)
>       at org.apache.spark.sql.CarbonSession.runSearch(CarbonSession.scala:228)
>       at 
> org.apache.spark.sql.CarbonSession.org$apache$spark$sql$CarbonSession$$trySearchMode(CarbonSession.scala:180)
>       at 
> org.apache.spark.sql.CarbonSession$$anonfun$sql$1.apply(CarbonSession.scala:99)
>       at 
> org.apache.spark.sql.CarbonSession$$anonfun$sql$1.apply(CarbonSession.scala:96)
>       at 
> org.apache.spark.sql.CarbonSession.withProfiler(CarbonSession.scala:154)
>       at org.apache.spark.sql.CarbonSession.sql(CarbonSession.scala:94)
>       at 
> org.apache.spark.sql.test.Spark2TestQueryExecutor.sql(Spark2TestQueryExecutor.scala:35)
>       at org.apache.spark.sql.test.util.QueryTest.sql(QueryTest.scala:113)
>       at 
> org.apache.carbondata.spark.testsuite.detailquery.SearchModeTestCase$$anonfun$12.apply$mcV$sp(SearchModeTestCase.scala:129)
>       at 
> org.apache.carbondata.spark.testsuite.detailquery.SearchModeTestCase$$anonfun$12.apply(SearchModeTestCase.scala:122)
>       at 
> org.apache.carbondata.spark.testsuite.detailquery.SearchModeTestCase$$anonfun$12.apply(SearchModeTestCase.scala:122)
>       at 
> org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)
>       at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
>       at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
>       at org.scalatest.Transformer.apply(Transformer.scala:22)
>       at org.scalatest.Transformer.apply(Transformer.scala:20)
>       at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)
>       at 
> org.apache.spark.sql.test.util.CarbonFunSuite.withFixture(CarbonFunSuite.scala:41)
>       at 
> org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
>       at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
>       at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)
>       at org.scalatest.FunSuite.runTest(FunSuite.scala:1555)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
>       at 
> org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413)
>       at 
> org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401)
>       at scala.collection.immutable.List.foreach(List.scala:381)
>       at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
>       at 
> org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396)
>       at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483)
>       at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208)
>       at org.scalatest.FunSuite.runTests(FunSuite.scala:1555)
>       at org.scalatest.Suite$class.run(Suite.scala:1424)
>       at 
> org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
>       at org.scalatest.SuperEngine.runImpl(Engine.scala:545)
>       at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)
>       at 
> org.apache.carbondata.spark.testsuite.detailquery.SearchModeTestCase.org$scalatest$BeforeAndAfterAll$$super$run(SearchModeTestCase.scala:32)
>       at 
> org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:257)
>       at 
> org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:256)
>       at 
> org.apache.carbondata.spark.testsuite.detailquery.SearchModeTestCase.run(SearchModeTestCase.scala:32)
>       at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)
>       at 
> org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)
>       at 
> org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)
>       at scala.collection.immutable.List.foreach(List.scala:381)
>       at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)
>       at 
> org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)
>       at 
> org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)
>       at 
> org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)
>       at 
> org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)
>       at org.scalatest.tools.Runner$.run(Runner.scala:883)
>       at org.scalatest.tools.Runner.run(Runner.scala)
>       at 
> org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.runScalaTest2(ScalaTestRunner.java:131)
>       at 
> org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.main(ScalaTestRunner.java:28)
> Caused by: java.lang.RuntimeException: 
> java.util.concurrent.ExecutionException: 
> org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException:
>  TEXT_MATCH is not supported on table main
>       at 
> org.apache.carbondata.core.scan.result.iterator.AbstractSearchModeResultIterator.hasNext(AbstractSearchModeResultIterator.java:75)
>       at 
> org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.<init>(ChunkRowIterator.java:40)
>       at 
> org.apache.carbondata.hadoop.CarbonRecordReader.initialize(CarbonRecordReader.java:89)
>       at 
> org.apache.carbondata.store.worker.SearchRequestHandler.handleRequest(SearchRequestHandler.java:140)
>       at 
> org.apache.carbondata.store.worker.SearchRequestHandler.handleSearch(SearchRequestHandler.java:71)
>       at 
> org.apache.spark.search.Searcher$$anonfun$receiveAndReply$1.applyOrElse(Searcher.scala:42)
>       at 
> org.apache.spark.rpc.netty.Inbox$$anonfun$process$1.apply$mcV$sp(Inbox.scala:105)
>       at org.apache.spark.rpc.netty.Inbox.safelyCall(Inbox.scala:205)
>       at org.apache.spark.rpc.netty.Inbox.process(Inbox.scala:101)
>       at 
> org.apache.spark.rpc.netty.Dispatcher$MessageLoop.run(Dispatcher.scala:216)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>       at java.lang.Thread.run(Thread.java:748)
> Caused by: java.util.concurrent.ExecutionException: 
> org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException:
>  TEXT_MATCH is not supported on table main
>       at java.util.concurrent.FutureTask.report(FutureTask.java:122)
>       at java.util.concurrent.FutureTask.get(FutureTask.java:192)
>       at 
> org.apache.carbondata.core.scan.result.iterator.AbstractSearchModeResultIterator.hasNext(AbstractSearchModeResultIterator.java:71)
>       ... 12 more
> Caused by: 
> org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException:
>  TEXT_MATCH is not supported on table main
>       at 
> org.apache.carbondata.core.scan.filter.executer.RowLevelFilterExecuterImpl.applyFilter(RowLevelFilterExecuterImpl.java:199)
>       at 
> org.apache.carbondata.core.scan.scanner.impl.BlockletFilterScanner.executeFilter(BlockletFilterScanner.java:174)
>       at 
> org.apache.carbondata.core.scan.scanner.impl.BlockletFilterScanner.scanBlocklet(BlockletFilterScanner.java:101)
>       at 
> org.apache.carbondata.core.scan.processor.BlockScan.scan(BlockScan.java:70)
>       at 
> org.apache.carbondata.core.scan.result.iterator.AbstractSearchModeResultIterator$1.call(AbstractSearchModeResultIterator.java:59)
>       at 
> org.apache.carbondata.core.scan.result.iterator.AbstractSearchModeResultIterator$1.call(AbstractSearchModeResultIterator.java:53)
>       at java.util.concurrent.FutureTask.run(FutureTask.java:266)
>       ... 3 more
> 18/06/15 03:30:45 INFO CarbonSparkSqlParser: Parsing command: DROP TABLE IF 
> EXISTS main
> 18/06/15 03:30:45 INFO CarbonSession: ScalaTest-run Search service started, 
> but don't support: DROP TABLE IF EXISTS main, and will run it with SparkSQL
> 18/06/15 03:30:45 INFO CarbonLateDecodeRule: ScalaTest-run skip 
> CarbonOptimizer
> {code}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to