This is an automated email from the ASF dual-hosted git repository.

agrove pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/datafusion-ballista.git


The following commit(s) were added to refs/heads/main by this push:
     new d57deb0b Fix cargo build (#1045)
d57deb0b is described below

commit d57deb0ba7af656f45485196fc0cd4874eea625b
Author: Andy Grove <[email protected]>
AuthorDate: Mon Sep 2 18:19:20 2024 -0600

    Fix cargo build (#1045)
    
    * fix build
    
    * fix build
    
    * clippy
    
    * tomlfmt
    
    * tomlfmt
---
 Cargo.toml                                                | 15 +++------------
 ballista/client/README.md                                 |  2 +-
 ballista/core/Cargo.toml                                  |  2 ++
 ballista/core/src/serde/generated/ballista.rs             |  1 +
 ballista/scheduler/src/config.rs                          |  4 ++--
 .../src/state/execution_graph/execution_stage.rs          |  2 +-
 6 files changed, 10 insertions(+), 16 deletions(-)

diff --git a/Cargo.toml b/Cargo.toml
index e73f5ce7..d6d13872 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -16,21 +16,12 @@
 # under the License.
 
 [workspace]
-members = [
-    "ballista-cli",
-    "ballista/cache",
-    "ballista/client",
-    "ballista/core",
-    "ballista/executor",
-    "ballista/scheduler",
-    "benchmarks",
-    "examples"
-]
-exclude = [ "python" ]
+exclude = ["python"]
+members = ["ballista-cli", "ballista/cache", "ballista/client", 
"ballista/core", "ballista/executor", "ballista/scheduler", "benchmarks", 
"examples"]
 resolver = "2"
 
 [workspace.dependencies]
-arrow = { version = "51.0.0", features=["ipc_compression"] }
+arrow = { version = "51.0.0", features = ["ipc_compression"] }
 arrow-flight = { version = "51.0.0", features = ["flight-sql-experimental"] }
 arrow-schema = { version = "51.0.0", default-features = false }
 configure_me = { version = "0.4.0" }
diff --git a/ballista/client/README.md b/ballista/client/README.md
index 02709320..19dc1439 100644
--- a/ballista/client/README.md
+++ b/ballista/client/README.md
@@ -137,7 +137,7 @@ async fn main() -> Result<()> {
 
 The output should look similar to the following table.
 
-```{r eval=FALSE}
+```text
 
+-----------------+--------------------------+--------------------------+--------------------------+--------------------------+
 | passenger_count | MIN(?table?.fare_amount) | MAX(?table?.fare_amount) | 
AVG(?table?.fare_amount) | SUM(?table?.fare_amount) |
 
+-----------------+--------------------------+--------------------------+--------------------------+--------------------------+
diff --git a/ballista/core/Cargo.toml b/ballista/core/Cargo.toml
index 2dfdd630..43620ecc 100644
--- a/ballista/core/Cargo.toml
+++ b/ballista/core/Cargo.toml
@@ -35,8 +35,10 @@ rustc-args = ["--cfg", "docsrs"]
 
 [features]
 azure = ["object_store/azure"]
+docsrs = []
 # Used for testing ONLY: causes all values to hash to the same value (test for 
collisions)
 force_hash_collisions = ["datafusion/force_hash_collisions"]
+gcs = ["object_store/gcp"]
 # Used to enable hdfs to be registered in the ObjectStoreRegistry by default
 hdfs = ["datafusion-objectstore-hdfs/hdfs"]
 hdfs3 = ["datafusion-objectstore-hdfs/hdfs3"]
diff --git a/ballista/core/src/serde/generated/ballista.rs 
b/ballista/core/src/serde/generated/ballista.rs
index 13a17f7e..50df950a 100644
--- a/ballista/core/src/serde/generated/ballista.rs
+++ b/ballista/core/src/serde/generated/ballista.rs
@@ -1,3 +1,4 @@
+// This file is @generated by prost-build.
 /// 
/////////////////////////////////////////////////////////////////////////////////////////////////
 /// Ballista Physical Plan
 /// 
/////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/ballista/scheduler/src/config.rs b/ballista/scheduler/src/config.rs
index 38ae5ae0..d15e928c 100644
--- a/ballista/scheduler/src/config.rs
+++ b/ballista/scheduler/src/config.rs
@@ -199,7 +199,7 @@ pub enum TaskDistribution {
     RoundRobin,
     /// 1. Firstly, try to bind tasks without scanning source files by 
[`RoundRobin`] policy.
     /// 2. Then for a task for scanning source files, firstly calculate a hash 
value based on input files.
-    /// And then bind it with an execute according to consistent hashing 
policy.
+    ///    And then bind it with an execute according to consistent hashing 
policy.
     /// 3. If needed, work stealing can be enabled based on the tolerance of 
the consistent hashing.
     ConsistentHash,
 }
@@ -228,7 +228,7 @@ pub enum TaskDistributionPolicy {
     RoundRobin,
     /// 1. Firstly, try to bind tasks without scanning source files by 
[`RoundRobin`] policy.
     /// 2. Then for a task for scanning source files, firstly calculate a hash 
value based on input files.
-    /// And then bind it with an execute according to consistent hashing 
policy.
+    ///    And then bind it with an execute according to consistent hashing 
policy.
     /// 3. If needed, work stealing can be enabled based on the tolerance of 
the consistent hashing.
     ConsistentHash {
         num_replicas: usize,
diff --git a/ballista/scheduler/src/state/execution_graph/execution_stage.rs 
b/ballista/scheduler/src/state/execution_graph/execution_stage.rs
index 8aded3e0..e9b62a06 100644
--- a/ballista/scheduler/src/state/execution_graph/execution_stage.rs
+++ b/ballista/scheduler/src/state/execution_graph/execution_stage.rs
@@ -145,7 +145,7 @@ pub(crate) struct ResolvedStage {
 /// 1. save the execution plan as encoded one to avoid serialization cost for 
creating task definition
 /// 2. manage the task statuses
 /// 3. manage the stage-level combined metrics
-/// Running stages will only be maintained in memory and will not saved to the 
backend storage
+///    Running stages will only be maintained in memory and will not saved to 
the backend storage
 #[derive(Clone)]
 pub(crate) struct RunningStage {
     /// Stage ID


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to