Copilot commented on code in PR #2085:
URL: https://github.com/apache/auron/pull/2085#discussion_r2922135573


##########
native-engine/datafusion-ext-functions/src/spark_instr.rs:
##########
@@ -0,0 +1,214 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow::array::{Array, ArrayRef, Int32Array, StringArray};
+use datafusion::{
+    common::{
+        Result, ScalarValue,
+        cast::{as_int32_array, as_string_array},
+    },
+    physical_plan::ColumnarValue,
+};
+use datafusion_ext_commons::df_execution_err;
+
+/// instr(str, substr) - Returns the (1-based) index of the first occurrence of
+/// substr in str. Compatible with Spark's instr function.
+/// Returns 0 if substr is not found or if substr is empty.
+/// Returns null if str is null or substr is null.
+pub fn spark_instr(args: &[ColumnarValue]) -> Result<ColumnarValue> {
+    if args.len() != 2 {
+        df_execution_err!("instr requires exactly 2 arguments")?;
+    }
+
+    let is_scalar = args
+        .iter()
+        .all(|arg| matches!(arg, ColumnarValue::Scalar(_)));
+    let len = args
+        .iter()
+        .map(|arg| match arg {
+            ColumnarValue::Array(array) => array.len(),
+            ColumnarValue::Scalar(_) => 1,
+        })
+        .max()
+        .unwrap_or(0);
+
+    let arrays = args
+        .iter()
+        .map(|arg| {
+            Ok(match arg {
+                ColumnarValue::Array(array) => array.clone(),
+                ColumnarValue::Scalar(scalar) => scalar.to_array_of_size(len)?,
+            })
+        })
+        .collect::<Result<Vec<_>>>()?;
+
+    let str_array = as_string_array(&arrays[0])?;
+    let substr_array = as_string_array(&arrays[1])?;
+
+    let result_array: ArrayRef = Arc::new(Int32Array::from_iter(
+        str_array
+            .iter()
+            .zip(substr_array.iter())
+            .map(|(s, substr)| match (s, substr) {
+                (Some(_), None) => None, // substr is null
+                (None, _) => None,       // str is null
+                (Some(s), Some(substr)) => {
+                    if substr.is_empty() {
+                        Some(0)
+                    } else {
+                        Some(s.find(substr).map(|pos| (pos + 1) as 
i32).unwrap_or(0))
+                    }
+                }
+            }),
+    ));
+
+    if is_scalar {
+        let scalar = as_int32_array(&result_array)?.value(0);
+        Ok(ColumnarValue::Scalar(if result_array.is_null(0) {
+            ScalarValue::Int32(None)
+        } else {
+            ScalarValue::Int32(Some(scalar))
+        }))
+    } else {
+        Ok(ColumnarValue::Array(result_array))
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use std::sync::Arc;
+
+    use arrow::array::{ArrayRef, Int32Array, StringArray};
+    use datafusion::{
+        common::{Result, ScalarValue, cast::as_int32_array},
+        physical_plan::ColumnarValue,
+    };

Review Comment:
   The test module imports `ArrayRef` and `Int32Array` but doesn't use them. 
Since the repo runs clippy with `-D warnings`, these unused imports will fail 
CI. Remove the unused imports from the test module.



##########
auron-spark-tests/spark33/src/test/scala/org/apache/spark/sql/AuronInstrSuite.scala:
##########
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql
+
+class AuronInstrSuite extends QueryTest with SparkQueryTestsBase {
+
+  test("test instr function - basic functionality") {
+    val data = Seq(
+      ("hello world", "world"),
+      ("hello world", "hello"),
+      ("hello world", "o"),
+      ("hello world", "z"),
+      (null, "test"),
+      ("test", null)
+    )
+
+    val df = spark.createDataFrame(data).toDF("str", "substr")
+    val result = df.selectExpr("instr(str, substr)").collect().map(_.getInt(0))
+
+    assert(result(0) == 7, "instr('hello world', 'world') should return 7")
+    assert(result(1) == 1, "instr('hello world', 'hello') should return 1")
+    assert(result(2) == 5, "instr('hello world', 'o') should return 5")
+    assert(result(3) == 0, "instr('hello world', 'z') should return 0")
+    assert(result(4) == 0, "instr(null, 'test') should return null")
+    assert(result(5) == 0, "instr('test', null) should return null")

Review Comment:
   The test collects `instr(str, substr)` results via `row.getInt(0)` and then 
asserts `0` for null-input cases, but `instr(null, ...)` / `instr(..., null)` 
should produce a NULL result. With the native implementation returning NULL for 
null inputs, `getInt(0)` will throw at runtime. Collect as 
`Integer`/`java.lang.Integer` (or check `row.isNullAt(0)`) and assert NULLs for 
these rows instead of `0`.
   ```suggestion
       val result = df.selectExpr("instr(str, substr)").collect()
   
       assert(result(0).getInt(0) == 7, "instr('hello world', 'world') should 
return 7")
       assert(result(1).getInt(0) == 1, "instr('hello world', 'hello') should 
return 1")
       assert(result(2).getInt(0) == 5, "instr('hello world', 'o') should 
return 5")
       assert(result(3).getInt(0) == 0, "instr('hello world', 'z') should 
return 0")
       assert(result(4).isNullAt(0), "instr(null, 'test') should return null")
       assert(result(5).isNullAt(0), "instr('test', null) should return null")
   ```



##########
native-engine/datafusion-ext-functions/src/spark_instr.rs:
##########
@@ -0,0 +1,214 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow::array::{Array, ArrayRef, Int32Array, StringArray};
+use datafusion::{
+    common::{
+        Result, ScalarValue,
+        cast::{as_int32_array, as_string_array},
+    },
+    physical_plan::ColumnarValue,
+};
+use datafusion_ext_commons::df_execution_err;
+
+/// instr(str, substr) - Returns the (1-based) index of the first occurrence of
+/// substr in str. Compatible with Spark's instr function.
+/// Returns 0 if substr is not found or if substr is empty.
+/// Returns null if str is null or substr is null.
+pub fn spark_instr(args: &[ColumnarValue]) -> Result<ColumnarValue> {
+    if args.len() != 2 {
+        df_execution_err!("instr requires exactly 2 arguments")?;
+    }
+
+    let is_scalar = args
+        .iter()
+        .all(|arg| matches!(arg, ColumnarValue::Scalar(_)));
+    let len = args
+        .iter()
+        .map(|arg| match arg {
+            ColumnarValue::Array(array) => array.len(),
+            ColumnarValue::Scalar(_) => 1,
+        })
+        .max()
+        .unwrap_or(0);
+
+    let arrays = args
+        .iter()
+        .map(|arg| {
+            Ok(match arg {
+                ColumnarValue::Array(array) => array.clone(),
+                ColumnarValue::Scalar(scalar) => scalar.to_array_of_size(len)?,
+            })
+        })
+        .collect::<Result<Vec<_>>>()?;
+
+    let str_array = as_string_array(&arrays[0])?;
+    let substr_array = as_string_array(&arrays[1])?;
+
+    let result_array: ArrayRef = Arc::new(Int32Array::from_iter(
+        str_array
+            .iter()
+            .zip(substr_array.iter())
+            .map(|(s, substr)| match (s, substr) {
+                (Some(_), None) => None, // substr is null
+                (None, _) => None,       // str is null
+                (Some(s), Some(substr)) => {
+                    if substr.is_empty() {
+                        Some(0)
+                    } else {
+                        Some(s.find(substr).map(|pos| (pos + 1) as 
i32).unwrap_or(0))

Review Comment:
   `(pos + 1) as i32` can overflow if the match position exceeds `i32::MAX - 
1`, producing an incorrect (potentially negative) result. Since Spark's return 
type is Int32, use a checked/try conversion and decide on an explicit behavior 
(e.g., error or clamp to `i32::MAX`) rather than a lossy cast.
   ```suggestion
                           Some(
                               s.find(substr)
                                   .map(|pos| {
                                       let one_based = pos.saturating_add(1);
                                       if one_based > i32::MAX as usize {
                                           i32::MAX
                                       } else {
                                           one_based as i32
                                       }
                                   })
                                   .unwrap_or(0),
                           )
   ```



##########
auron-spark-tests/spark33/src/test/scala/org/apache/spark/sql/AuronInstrSuite.scala:
##########
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql
+
+class AuronInstrSuite extends QueryTest with SparkQueryTestsBase {
+
+  test("test instr function - basic functionality") {
+    val data = Seq(
+      ("hello world", "world"),
+      ("hello world", "hello"),
+      ("hello world", "o"),
+      ("hello world", "z"),
+      (null, "test"),
+      ("test", null)
+    )
+
+    val df = spark.createDataFrame(data).toDF("str", "substr")
+    val result = df.selectExpr("instr(str, substr)").collect().map(_.getInt(0))
+
+    assert(result(0) == 7, "instr('hello world', 'world') should return 7")
+    assert(result(1) == 1, "instr('hello world', 'hello') should return 1")
+    assert(result(2) == 5, "instr('hello world', 'o') should return 5")
+    assert(result(3) == 0, "instr('hello world', 'z') should return 0")
+    assert(result(4) == 0, "instr(null, 'test') should return null")
+    assert(result(5) == 0, "instr('test', null) should return null")
+  }
+
+  test("test instr function - multiple occurrences") {
+    val data = Seq(
+      ("banana", "a"),
+      ("testtesttest", "test"),
+      ("abcabcabc", "abc")
+    )
+
+    val df = spark.createDataFrame(data).toDF("str", "substr")
+    val result = df.selectExpr("instr(str, substr)").collect().map(_.getInt(0))
+
+    assert(result(0) == 2, "instr('banana', 'a') should return 2")
+    assert(result(1) == 1, "instr('testtesttest', 'test') should return 1")
+    assert(result(2) == 1, "instr('abcabcabc', 'abc') should return 1")
+  }
+
+  test("test instr function - case sensitive") {
+    val data = Seq(
+      ("Hello", "hello"),
+      ("HELLO", "hello"),
+      ("Hello", "Hello"),
+      ("hElLo", "hello")
+    )
+
+    val df = spark.createDataFrame(data).toDF("str", "substr")
+    val result = df.selectExpr("instr(str, substr)").collect().map(_.getInt(0))
+
+    assert(result(0) == 0, "instr('Hello', 'hello') should return 0 (case 
sensitive)")
+    assert(result(1) == 0, "instr('HELLO', 'hello') should return 0 (case 
sensitive)")
+    assert(result(2) == 1, "instr('Hello', 'Hello') should return 1")
+    assert(result(3) == 0, "instr('hElLo', 'hello') should return 0 (case 
sensitive)")
+  }
+
+  test("test instr function - with filter") {
+    val data = Seq(
+      ("hello world", "world", 1),
+      ("hello", "world", 0),
+      ("hello", "hello", 1),
+      ("test", "abc", 0)
+    )
+
+    val df = spark.createDataFrame(data).toDF("str", "substr", "expected")
+    val result = df
+      .filter("instr(str, substr) > 0")
+      .select("str")
+      .collect()
+      .map(_.getString(0))
+
+    assert(result.length == 2, "Should find 2 matching strings")
+    assert(result.contains("hello world"))
+    assert(result.contains("hello"))
+  }
+
+  test("test instr function - in group by") {
+    val data = Seq(
+      ("test1", "test"),
+      ("test2", "test"),
+      ("hello", "world"),
+      ("testing", "test")
+    )
+
+    val df = spark.createDataFrame(data).toDF("str", "substr")
+    val result = df
+      .groupBy("substr")
+      .count()
+      .filter("count > 0")
+      .orderBy("substr")
+      .collect()
+
+    assert(result.length >= 1)

Review Comment:
   This test is titled "in group by" but it never evaluates `instr` (it only 
groups by `substr` and counts). As written, it doesn't validate that `instr` 
works correctly in the presence of aggregations / grouping. Consider 
incorporating `instr(...)` either in the grouping key, an aggregate expression, 
or a post-aggregation filter so the test actually exercises the new function in 
a group-by context.
   ```suggestion
       df.createOrReplaceTempView("instr_group_test")
       val result = spark.sql(
         """
           |SELECT
           |  substr,
           |  SUM(CASE WHEN instr(str, substr) > 0 THEN 1 ELSE 0 END) AS 
match_count
           |FROM instr_group_test
           |GROUP BY substr
           |HAVING match_count > 0
           |ORDER BY substr
           |""".stripMargin)
         .collect()
   
       // For the input data, only "test" appears within "str" values, and it 
occurs 3 times.
       assert(result.length == 1)
       assert(result(0).getString(0) == "test")
       assert(result(0).getLong(1) == 3L)
   ```



##########
native-engine/datafusion-ext-functions/src/spark_instr.rs:
##########
@@ -0,0 +1,214 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow::array::{Array, ArrayRef, Int32Array, StringArray};
+use datafusion::{
+    common::{
+        Result, ScalarValue,
+        cast::{as_int32_array, as_string_array},
+    },
+    physical_plan::ColumnarValue,
+};

Review Comment:
   This module imports `StringArray` but doesn't use it in the production code 
(tests have their own imports). With CI running `cargo clippy ... -D warnings`, 
this unused import will fail the build. Remove the unused `StringArray` import 
(or use it explicitly if intended).



##########
native-engine/datafusion-ext-functions/src/spark_instr.rs:
##########
@@ -0,0 +1,214 @@
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements.  See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License.  You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow::array::{Array, ArrayRef, Int32Array, StringArray};
+use datafusion::{
+    common::{
+        Result, ScalarValue,
+        cast::{as_int32_array, as_string_array},
+    },
+    physical_plan::ColumnarValue,
+};
+use datafusion_ext_commons::df_execution_err;
+
+/// instr(str, substr) - Returns the (1-based) index of the first occurrence of
+/// substr in str. Compatible with Spark's instr function.
+/// Returns 0 if substr is not found or if substr is empty.
+/// Returns null if str is null or substr is null.
+pub fn spark_instr(args: &[ColumnarValue]) -> Result<ColumnarValue> {
+    if args.len() != 2 {
+        df_execution_err!("instr requires exactly 2 arguments")?;
+    }
+
+    let is_scalar = args
+        .iter()
+        .all(|arg| matches!(arg, ColumnarValue::Scalar(_)));
+    let len = args
+        .iter()
+        .map(|arg| match arg {
+            ColumnarValue::Array(array) => array.len(),
+            ColumnarValue::Scalar(_) => 1,
+        })
+        .max()
+        .unwrap_or(0);
+
+    let arrays = args
+        .iter()
+        .map(|arg| {
+            Ok(match arg {
+                ColumnarValue::Array(array) => array.clone(),
+                ColumnarValue::Scalar(scalar) => scalar.to_array_of_size(len)?,
+            })
+        })
+        .collect::<Result<Vec<_>>>()?;
+
+    let str_array = as_string_array(&arrays[0])?;
+    let substr_array = as_string_array(&arrays[1])?;
+
+    let result_array: ArrayRef = Arc::new(Int32Array::from_iter(
+        str_array
+            .iter()
+            .zip(substr_array.iter())
+            .map(|(s, substr)| match (s, substr) {
+                (Some(_), None) => None, // substr is null
+                (None, _) => None,       // str is null
+                (Some(s), Some(substr)) => {
+                    if substr.is_empty() {
+                        Some(0)
+                    } else {
+                        Some(s.find(substr).map(|pos| (pos + 1) as 
i32).unwrap_or(0))
+                    }
+                }
+            }),
+    ));
+
+    if is_scalar {
+        let scalar = as_int32_array(&result_array)?.value(0);
+        Ok(ColumnarValue::Scalar(if result_array.is_null(0) {
+            ScalarValue::Int32(None)
+        } else {
+            ScalarValue::Int32(Some(scalar))
+        }))

Review Comment:
   In the scalar-return branch, `value(0)` is read before checking 
`is_null(0)`. Even if this currently works because Arrow stores a buffer value 
for nulls, it's easy to misread and can become unsafe if assumptions change. 
Consider checking `is_null(0)` first and only reading `value(0)` in the 
non-null case.
   ```suggestion
           let int_array = as_int32_array(&result_array)?;
           let scalar = if int_array.is_null(0) {
               None
           } else {
               Some(int_array.value(0))
           };
           Ok(ColumnarValue::Scalar(ScalarValue::Int32(scalar)))
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to