This is an automated email from the ASF dual-hosted git repository.

koushiro pushed a commit to branch remove-scheme-enum
in repository https://gitbox.apache.org/repos/asf/opendal.git

commit 5ca6421d88b364c9b6ab6f235ec2c5de74acd7b2
Author: koushiro <[email protected]>
AuthorDate: Sat Nov 8 21:25:17 2025 +0800

    refactor(types): use string-based scheme over enum-based approach
---
 .../src/main.rs                                    |   3 +-
 core/src/docs/internals/accessor.rs                |  11 +-
 core/src/layers/async_backtrace.rs                 |   1 -
 core/src/layers/await_tree.rs                      |   1 -
 core/src/layers/capability_check.rs                |   1 -
 core/src/layers/chaos.rs                           |   1 -
 core/src/layers/concurrent_limit.rs                |   2 -
 core/src/layers/error_context.rs                   |   2 +-
 core/src/layers/logging.rs                         |   2 -
 core/src/layers/mime_guess.rs                      |   1 -
 core/src/layers/retry.rs                           |   3 -
 core/src/layers/throttle.rs                        |   1 -
 core/src/layers/timeout.rs                         |   1 -
 core/src/lib.rs                                    |   1 -
 core/src/raw/tests/utils.rs                        |   6 +-
 core/src/types/context/read.rs                     |   5 +-
 core/src/types/mod.rs                              |   3 -
 core/src/types/operator/builder.rs                 | 175 ++++----
 core/src/types/operator/info.rs                    |   8 +-
 core/src/types/operator/operator.rs                |   2 -
 core/src/types/operator/operator_futures.rs        |   5 -
 core/src/types/read/buffer_stream.rs               |   4 +-
 core/src/types/read/futures_async_reader.rs        |   8 +-
 core/src/types/read/futures_bytes_stream.rs        |   6 +-
 core/src/types/read/reader.rs                      |  10 +-
 core/src/types/scheme.rs                           | 461 ---------------------
 core/src/types/write/buffer_sink.rs                |   2 +-
 core/src/types/write/futures_async_writer.rs       |   2 +-
 core/src/types/write/futures_bytes_sink.rs         |   2 +-
 core/tests/behavior/async_copy.rs                  |   2 +-
 core/tests/behavior/async_delete.rs                |   2 +-
 core/tests/behavior/async_list.rs                  |   4 +-
 core/tests/behavior/async_write.rs                 |   4 +-
 33 files changed, 130 insertions(+), 612 deletions(-)

diff --git a/core/edge/s3_aws_assume_role_with_web_identity/src/main.rs 
b/core/edge/s3_aws_assume_role_with_web_identity/src/main.rs
index 5c3296918..333f3c200 100644
--- a/core/edge/s3_aws_assume_role_with_web_identity/src/main.rs
+++ b/core/edge/s3_aws_assume_role_with_web_identity/src/main.rs
@@ -16,13 +16,12 @@
 // under the License.
 
 use opendal::Result;
-use opendal::Scheme;
 use opendal::raw::tests::init_test_service;
 
 #[tokio::main]
 async fn main() -> Result<()> {
     let op = init_test_service()?.expect("service must be init");
-    assert_eq!(op.info().scheme(), Scheme::S3);
+    assert_eq!(op.info().scheme(), opendal::services::S3_SCHEME);
 
     let result = op
         .exists(&uuid::Uuid::new_v4().to_string())
diff --git a/core/src/docs/internals/accessor.rs 
b/core/src/docs/internals/accessor.rs
index 08f2cbd6d..514937df8 100644
--- a/core/src/docs/internals/accessor.rs
+++ b/core/src/docs/internals/accessor.rs
@@ -119,7 +119,7 @@
 //!
 //! ## Scheme
 //!
-//! First of all, let's pick a good [`Scheme`] for our duck service. The
+//! First of all, let's pick a good scheme for our duck service. The
 //! scheme should be unique and easy to understand. Normally we should
 //! use its formal name.
 //!
@@ -128,12 +128,10 @@
 //! vendors that provide s3-like RESTful APIs, and our s3 service is
 //! implemented to support all of them, not just AWS S3.
 //!
-//! Obviously, we can use `duck` as scheme, let's add a new variant in 
[`Scheme`], and implement all required functions like `Scheme::from_str` and 
`Scheme::into_static`:
+//! Obviously, we can use `duck` as scheme, let's add a new constant string:
 //!
 //! ```ignore
-//! pub enum Scheme {
-//!     Duck,
-//! }
+//! pub const DUCK_SCHEME: &str = "duck";
 //! ```
 //!
 //! ## Builder
@@ -273,7 +271,7 @@
 //!
 //!     fn info(&self) -> Arc<AccessorInfo> {
 //!         let am = AccessorInfo::default();
-//!         am.set_scheme("duck")
+//!         am.set_scheme(DUCK_SCHEME)
 //!             .set_root(&self.root)
 //!             .set_native_capability(
 //!                 Capability {
@@ -301,6 +299,5 @@
 //! [`Operation`]: crate::raw::Operation
 //! [`Capability`]: crate::Capability
 //! [`AccessorInfo`]: crate::raw::AccessorInfo
-//! [`Scheme`]: crate::Scheme
 //! [`Builder`]: crate::Builder
 //! [`Configurator`]: crate::Configurator
diff --git a/core/src/layers/async_backtrace.rs 
b/core/src/layers/async_backtrace.rs
index 47a761185..e68c37817 100644
--- a/core/src/layers/async_backtrace.rs
+++ b/core/src/layers/async_backtrace.rs
@@ -32,7 +32,6 @@ use crate::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// let _ = Operator::new(services::Memory::default())?
diff --git a/core/src/layers/await_tree.rs b/core/src/layers/await_tree.rs
index 8d6f66a80..416d814ca 100644
--- a/core/src/layers/await_tree.rs
+++ b/core/src/layers/await_tree.rs
@@ -36,7 +36,6 @@ use crate::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// let _ = Operator::new(services::Memory::default())?
diff --git a/core/src/layers/capability_check.rs 
b/core/src/layers/capability_check.rs
index 1a8f534ee..e9455d323 100644
--- a/core/src/layers/capability_check.rs
+++ b/core/src/layers/capability_check.rs
@@ -45,7 +45,6 @@ use crate::raw::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// use opendal::layers::CapabilityCheckLayer;
diff --git a/core/src/layers/chaos.rs b/core/src/layers/chaos.rs
index 8a9424ec9..31ef3deec 100644
--- a/core/src/layers/chaos.rs
+++ b/core/src/layers/chaos.rs
@@ -48,7 +48,6 @@ use crate::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// let _ = Operator::new(services::Memory::default())?
diff --git a/core/src/layers/concurrent_limit.rs 
b/core/src/layers/concurrent_limit.rs
index 435149190..d51f4de45 100644
--- a/core/src/layers/concurrent_limit.rs
+++ b/core/src/layers/concurrent_limit.rs
@@ -50,7 +50,6 @@ use crate::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// let _ = Operator::new(services::Memory::default())?
@@ -67,7 +66,6 @@ use crate::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// let limit = ConcurrentLimitLayer::new(1024);
diff --git a/core/src/layers/error_context.rs b/core/src/layers/error_context.rs
index 315068a98..305c62bce 100644
--- a/core/src/layers/error_context.rs
+++ b/core/src/layers/error_context.rs
@@ -28,7 +28,7 @@ use crate::*;
 ///
 /// This layer will add the following error context into all errors:
 ///
-/// - `service`: The [`Scheme`] of underlying service.
+/// - `service`: The scheme of underlying service.
 /// - `operation`: The [`Operation`] of this operation
 /// - `path`: The path of this operation
 ///
diff --git a/core/src/layers/logging.rs b/core/src/layers/logging.rs
index 70a0ae703..82c48f87f 100644
--- a/core/src/layers/logging.rs
+++ b/core/src/layers/logging.rs
@@ -45,7 +45,6 @@ use crate::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// let _ = Operator::new(services::Memory::default())?
@@ -83,7 +82,6 @@ use crate::*;
 /// # use opendal::Error;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// #[derive(Debug, Clone)]
 /// struct MyLoggingInterceptor;
diff --git a/core/src/layers/mime_guess.rs b/core/src/layers/mime_guess.rs
index 2aaffdd24..aea79f6ce 100644
--- a/core/src/layers/mime_guess.rs
+++ b/core/src/layers/mime_guess.rs
@@ -49,7 +49,6 @@ use crate::raw::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// let _ = Operator::new(services::Memory::default())?
diff --git a/core/src/layers/retry.rs b/core/src/layers/retry.rs
index cbb3b4c6b..59a63830f 100644
--- a/core/src/layers/retry.rs
+++ b/core/src/layers/retry.rs
@@ -75,7 +75,6 @@ use crate::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// let _ = Operator::new(services::Memory::default())?
@@ -99,7 +98,6 @@ use crate::*;
 /// # use opendal::Error;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// struct MyRetryInterceptor;
 ///
@@ -148,7 +146,6 @@ impl RetryLayer {
     /// use opendal::layers::RetryLayer;
     /// use opendal::services;
     /// use opendal::Operator;
-    /// use opendal::Scheme;
     ///
     /// let _ = Operator::new(services::Memory::default())
     ///     .expect("must init")
diff --git a/core/src/layers/throttle.rs b/core/src/layers/throttle.rs
index 1394aaaaf..9b8161c86 100644
--- a/core/src/layers/throttle.rs
+++ b/core/src/layers/throttle.rs
@@ -53,7 +53,6 @@ use crate::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// let _ = Operator::new(services::Memory::default())
diff --git a/core/src/layers/timeout.rs b/core/src/layers/timeout.rs
index 54864aedd..8b6a78e33 100644
--- a/core/src/layers/timeout.rs
+++ b/core/src/layers/timeout.rs
@@ -84,7 +84,6 @@ use crate::*;
 /// # use opendal::services;
 /// # use opendal::Operator;
 /// # use opendal::Result;
-/// # use opendal::Scheme;
 ///
 /// # fn main() -> Result<()> {
 /// let _ = Operator::new(services::Memory::default())?
diff --git a/core/src/lib.rs b/core/src/lib.rs
index cd53ad3f7..89177dbbb 100644
--- a/core/src/lib.rs
+++ b/core/src/lib.rs
@@ -183,7 +183,6 @@ mod tests {
         assert_eq!(360, size_of::<Entry>());
         assert_eq!(336, size_of::<Metadata>());
         assert_eq!(1, size_of::<EntryMode>());
-        assert_eq!(24, size_of::<Scheme>());
     }
 
     trait AssertSendSync: Send + Sync {}
diff --git a/core/src/raw/tests/utils.rs b/core/src/raw/tests/utils.rs
index 906be27a6..6cd2aeb39 100644
--- a/core/src/raw/tests/utils.rs
+++ b/core/src/raw/tests/utils.rs
@@ -17,7 +17,6 @@
 
 use std::collections::HashMap;
 use std::env;
-use std::str::FromStr;
 use std::sync::LazyLock;
 
 use crate::*;
@@ -43,9 +42,8 @@ pub fn init_test_service() -> Result<Option<Operator>> {
     } else {
         return Ok(None);
     };
-    let scheme = Scheme::from_str(&scheme).unwrap();
 
-    let scheme_key = String::from(scheme).replace('-', "_");
+    let scheme_key = scheme.replace('-', "_");
     let prefix = format!("opendal_{scheme_key}_");
 
     let mut cfg = env::vars()
@@ -67,7 +65,7 @@ pub fn init_test_service() -> Result<Option<Operator>> {
         cfg.insert("root".to_string(), root);
     }
 
-    let op = Operator::via_iter(scheme, cfg).expect("must succeed");
+    let op = Operator::via_iter(&scheme, cfg).expect("must succeed");
 
     #[cfg(feature = "layers-chaos")]
     let op = { op.layer(layers::ChaosLayer::new(0.1)) };
diff --git a/core/src/types/context/read.rs b/core/src/types/context/read.rs
index 4d79d83ca..f77f32557 100644
--- a/core/src/types/context/read.rs
+++ b/core/src/types/context/read.rs
@@ -172,14 +172,13 @@ impl ReadGenerator {
 
 #[cfg(test)]
 mod tests {
-
     use bytes::Bytes;
 
     use super::*;
 
     #[tokio::test]
     async fn test_next_reader() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         op.write(
             "test",
             Buffer::from(vec![Bytes::from("Hello"), Bytes::from("World")]),
@@ -205,7 +204,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_next_reader_without_size() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         op.write(
             "test",
             Buffer::from(vec![Bytes::from("Hello"), Bytes::from("World")]),
diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs
index 79f8daf36..fb168553f 100644
--- a/core/src/types/mod.rs
+++ b/core/src/types/mod.rs
@@ -62,9 +62,6 @@ pub use error::Error;
 pub use error::ErrorKind;
 pub use error::Result;
 
-mod scheme;
-pub use scheme::Scheme;
-
 mod capability;
 pub use capability::Capability;
 
diff --git a/core/src/types/operator/builder.rs 
b/core/src/types/operator/builder.rs
index b417f3211..c312489fe 100644
--- a/core/src/types/operator/builder.rs
+++ b/core/src/types/operator/builder.rs
@@ -164,7 +164,8 @@ impl Operator {
     /// use std::collections::HashMap;
     ///
     /// use opendal::Operator;
-    /// use opendal::Scheme;
+    /// use opendal::services;
+    ///
     /// async fn test() -> Result<()> {
     ///     let map = [
     ///         // Set the root for fs, all operations will happen under this 
root.
@@ -174,135 +175,149 @@ impl Operator {
     ///     ];
     ///
     ///     // Build an `Operator` to start operating the storage.
-    ///     let op: Operator = Operator::via_iter(Scheme::Fs, map)?;
+    ///     let op: Operator = Operator::via_iter(services::FS_SCHEME, map)?;
     ///
     ///     Ok(())
     /// }
     /// ```
     #[allow(unused_variables, unreachable_code)]
     pub fn via_iter(
-        scheme: Scheme,
+        scheme: &str,
         iter: impl IntoIterator<Item = (String, String)>,
     ) -> Result<Operator> {
         let op = match scheme {
             #[cfg(feature = "services-aliyun-drive")]
-            Scheme::AliyunDrive => 
Self::from_iter::<services::AliyunDrive>(iter)?.finish(),
+            services::ALIYUN_DRIVE_SCHEME => {
+                Self::from_iter::<services::AliyunDrive>(iter)?.finish()
+            }
             #[cfg(feature = "services-alluxio")]
-            Scheme::Alluxio => 
Self::from_iter::<services::Alluxio>(iter)?.finish(),
-            #[cfg(feature = "services-cloudflare-kv")]
-            Scheme::CloudflareKv => 
Self::from_iter::<services::CloudflareKv>(iter)?.finish(),
-            #[cfg(feature = "services-compfs")]
-            Scheme::Compfs => 
Self::from_iter::<services::Compfs>(iter)?.finish(),
-            #[cfg(feature = "services-upyun")]
-            Scheme::Upyun => 
Self::from_iter::<services::Upyun>(iter)?.finish(),
-            #[cfg(feature = "services-koofr")]
-            Scheme::Koofr => 
Self::from_iter::<services::Koofr>(iter)?.finish(),
-            #[cfg(feature = "services-yandex-disk")]
-            Scheme::YandexDisk => 
Self::from_iter::<services::YandexDisk>(iter)?.finish(),
-            #[cfg(feature = "services-pcloud")]
-            Scheme::Pcloud => 
Self::from_iter::<services::Pcloud>(iter)?.finish(),
+            services::ALLUXIO_SCHEME => 
Self::from_iter::<services::Alluxio>(iter)?.finish(),
             #[cfg(feature = "services-azblob")]
-            Scheme::Azblob => 
Self::from_iter::<services::Azblob>(iter)?.finish(),
+            services::AZBLOB_SCHEME => 
Self::from_iter::<services::Azblob>(iter)?.finish(),
             #[cfg(feature = "services-azdls")]
-            Scheme::Azdls => 
Self::from_iter::<services::Azdls>(iter)?.finish(),
+            services::AZDLS_SCHEME => 
Self::from_iter::<services::Azdls>(iter)?.finish(),
             #[cfg(feature = "services-azfile")]
-            Scheme::Azfile => 
Self::from_iter::<services::Azfile>(iter)?.finish(),
+            services::AZFILE_SCHEME => 
Self::from_iter::<services::Azfile>(iter)?.finish(),
             #[cfg(feature = "services-b2")]
-            Scheme::B2 => Self::from_iter::<services::B2>(iter)?.finish(),
+            services::B2_SCHEME => 
Self::from_iter::<services::B2>(iter)?.finish(),
             #[cfg(feature = "services-cacache")]
-            Scheme::Cacache => 
Self::from_iter::<services::Cacache>(iter)?.finish(),
+            services::CACACHE_SCHEME => 
Self::from_iter::<services::Cacache>(iter)?.finish(),
+            #[cfg(feature = "services-cloudflare-kv")]
+            services::CLOUDFLARE_KV_SCHEME => {
+                Self::from_iter::<services::CloudflareKv>(iter)?.finish()
+            }
+            #[cfg(feature = "services-compfs")]
+            services::COMPFS_SCHEME => 
Self::from_iter::<services::Compfs>(iter)?.finish(),
             #[cfg(feature = "services-cos")]
-            Scheme::Cos => Self::from_iter::<services::Cos>(iter)?.finish(),
+            services::COS_SCHEME => 
Self::from_iter::<services::Cos>(iter)?.finish(),
             #[cfg(feature = "services-d1")]
-            Scheme::D1 => Self::from_iter::<services::D1>(iter)?.finish(),
+            services::D1_SCHEME => 
Self::from_iter::<services::D1>(iter)?.finish(),
             #[cfg(feature = "services-dashmap")]
-            Scheme::Dashmap => 
Self::from_iter::<services::Dashmap>(iter)?.finish(),
+            services::DASHMAP_SCHEME => 
Self::from_iter::<services::Dashmap>(iter)?.finish(),
+            #[cfg(feature = "services-dbfs")]
+            services::DBFS_SCHEME => 
Self::from_iter::<services::Dbfs>(iter)?.finish(),
             #[cfg(feature = "services-dropbox")]
-            Scheme::Dropbox => 
Self::from_iter::<services::Dropbox>(iter)?.finish(),
+            services::DROPBOX_SCHEME => 
Self::from_iter::<services::Dropbox>(iter)?.finish(),
             #[cfg(feature = "services-etcd")]
-            Scheme::Etcd => Self::from_iter::<services::Etcd>(iter)?.finish(),
+            services::ETCD_SCHEME => 
Self::from_iter::<services::Etcd>(iter)?.finish(),
             #[cfg(feature = "services-foundationdb")]
-            Scheme::Foundationdb => 
Self::from_iter::<services::Foundationdb>(iter)?.finish(),
+            services::FOUNDATIONDB_SCHEME => {
+                Self::from_iter::<services::Foundationdb>(iter)?.finish()
+            }
             #[cfg(feature = "services-fs")]
-            Scheme::Fs => Self::from_iter::<services::Fs>(iter)?.finish(),
+            services::FS_SCHEME => 
Self::from_iter::<services::Fs>(iter)?.finish(),
             #[cfg(feature = "services-ftp")]
-            Scheme::Ftp => Self::from_iter::<services::Ftp>(iter)?.finish(),
+            services::FTP_SCHEME => 
Self::from_iter::<services::Ftp>(iter)?.finish(),
             #[cfg(feature = "services-gcs")]
-            Scheme::Gcs => Self::from_iter::<services::Gcs>(iter)?.finish(),
+            services::GCS_SCHEME => 
Self::from_iter::<services::Gcs>(iter)?.finish(),
+            #[cfg(feature = "services-gdrive")]
+            services::GDRIVE_SCHEME => 
Self::from_iter::<services::Gdrive>(iter)?.finish(),
             #[cfg(feature = "services-ghac")]
-            Scheme::Ghac => Self::from_iter::<services::Ghac>(iter)?.finish(),
-            #[cfg(feature = "services-gridfs")]
-            Scheme::Gridfs => 
Self::from_iter::<services::Gridfs>(iter)?.finish(),
+            services::GHAC_SCHEME => 
Self::from_iter::<services::Ghac>(iter)?.finish(),
             #[cfg(feature = "services-github")]
-            Scheme::Github => 
Self::from_iter::<services::Github>(iter)?.finish(),
+            services::GITHUB_SCHEME => 
Self::from_iter::<services::Github>(iter)?.finish(),
+            #[cfg(feature = "services-gridfs")]
+            services::GRIDFS_SCHEME => 
Self::from_iter::<services::Gridfs>(iter)?.finish(),
             #[cfg(feature = "services-hdfs")]
-            Scheme::Hdfs => Self::from_iter::<services::Hdfs>(iter)?.finish(),
+            services::HDFS_SCHEME => 
Self::from_iter::<services::Hdfs>(iter)?.finish(),
+            #[cfg(feature = "services-hdfs-native")]
+            services::HDFS_NATIVE_SCHEME => 
Self::from_iter::<services::HdfsNative>(iter)?.finish(),
             #[cfg(feature = "services-http")]
-            Scheme::Http => Self::from_iter::<services::Http>(iter)?.finish(),
+            services::HTTP_SCHEME => 
Self::from_iter::<services::Http>(iter)?.finish(),
             #[cfg(feature = "services-huggingface")]
-            Scheme::Huggingface => 
Self::from_iter::<services::Huggingface>(iter)?.finish(),
+            services::HUGGINGFACE_SCHEME => {
+                Self::from_iter::<services::Huggingface>(iter)?.finish()
+            }
             #[cfg(feature = "services-ipfs")]
-            Scheme::Ipfs => Self::from_iter::<services::Ipfs>(iter)?.finish(),
+            services::IPFS_SCHEME => 
Self::from_iter::<services::Ipfs>(iter)?.finish(),
             #[cfg(feature = "services-ipmfs")]
-            Scheme::Ipmfs => 
Self::from_iter::<services::Ipmfs>(iter)?.finish(),
+            services::IPMFS_SCHEME => 
Self::from_iter::<services::Ipmfs>(iter)?.finish(),
+            #[cfg(feature = "services-koofr")]
+            services::KOOFR_SCHEME => 
Self::from_iter::<services::Koofr>(iter)?.finish(),
+            #[cfg(feature = "services-lakefs")]
+            services::LAKEFS_SCHEME => 
Self::from_iter::<services::Lakefs>(iter)?.finish(),
             #[cfg(feature = "services-memcached")]
-            Scheme::Memcached => 
Self::from_iter::<services::Memcached>(iter)?.finish(),
+            services::MEMCACHED_SCHEME => 
Self::from_iter::<services::Memcached>(iter)?.finish(),
             #[cfg(feature = "services-memory")]
-            Scheme::Memory => 
Self::from_iter::<services::Memory>(iter)?.finish(),
+            services::MEMORY_SCHEME => 
Self::from_iter::<services::Memory>(iter)?.finish(),
             #[cfg(feature = "services-mini-moka")]
-            Scheme::MiniMoka => 
Self::from_iter::<services::MiniMoka>(iter)?.finish(),
+            services::MINI_MOKA_SCHEME => 
Self::from_iter::<services::MiniMoka>(iter)?.finish(),
             #[cfg(feature = "services-moka")]
-            Scheme::Moka => Self::from_iter::<services::Moka>(iter)?.finish(),
+            services::MOKA_SCHEME => 
Self::from_iter::<services::Moka>(iter)?.finish(),
+            #[cfg(feature = "services-mongodb")]
+            services::MONGODB_SCHEME => 
Self::from_iter::<services::Mongodb>(iter)?.finish(),
             #[cfg(feature = "services-monoiofs")]
-            Scheme::Monoiofs => 
Self::from_iter::<services::Monoiofs>(iter)?.finish(),
+            services::MONOIOFS_SCHEME => 
Self::from_iter::<services::Monoiofs>(iter)?.finish(),
             #[cfg(feature = "services-mysql")]
-            Scheme::Mysql => 
Self::from_iter::<services::Mysql>(iter)?.finish(),
+            services::MYSQL_SCHEME => 
Self::from_iter::<services::Mysql>(iter)?.finish(),
             #[cfg(feature = "services-obs")]
-            Scheme::Obs => Self::from_iter::<services::Obs>(iter)?.finish(),
+            services::OBS_SCHEME => 
Self::from_iter::<services::Obs>(iter)?.finish(),
             #[cfg(feature = "services-onedrive")]
-            Scheme::Onedrive => 
Self::from_iter::<services::Onedrive>(iter)?.finish(),
-            #[cfg(feature = "services-postgresql")]
-            Scheme::Postgresql => 
Self::from_iter::<services::Postgresql>(iter)?.finish(),
-            #[cfg(feature = "services-gdrive")]
-            Scheme::Gdrive => 
Self::from_iter::<services::Gdrive>(iter)?.finish(),
+            services::ONEDRIVE_SCHEME => 
Self::from_iter::<services::Onedrive>(iter)?.finish(),
             #[cfg(feature = "services-oss")]
-            Scheme::Oss => Self::from_iter::<services::Oss>(iter)?.finish(),
+            services::OSS_SCHEME => 
Self::from_iter::<services::Oss>(iter)?.finish(),
+            #[cfg(feature = "services-pcloud")]
+            services::PCLOUD_SCHEME => 
Self::from_iter::<services::Pcloud>(iter)?.finish(),
             #[cfg(feature = "services-persy")]
-            Scheme::Persy => 
Self::from_iter::<services::Persy>(iter)?.finish(),
+            services::PERSY_SCHEME => 
Self::from_iter::<services::Persy>(iter)?.finish(),
+            #[cfg(feature = "services-postgresql")]
+            services::POSTGRESQL_SCHEME => 
Self::from_iter::<services::Postgresql>(iter)?.finish(),
+            #[cfg(feature = "services-redb")]
+            services::REDB_SCHEME => 
Self::from_iter::<services::Redb>(iter)?.finish(),
             #[cfg(feature = "services-redis")]
-            Scheme::Redis => 
Self::from_iter::<services::Redis>(iter)?.finish(),
+            services::REDIS_SCHEME => 
Self::from_iter::<services::Redis>(iter)?.finish(),
             #[cfg(feature = "services-rocksdb")]
-            Scheme::Rocksdb => 
Self::from_iter::<services::Rocksdb>(iter)?.finish(),
+            services::ROCKSDB_SCHEME => 
Self::from_iter::<services::Rocksdb>(iter)?.finish(),
             #[cfg(feature = "services-s3")]
-            Scheme::S3 => Self::from_iter::<services::S3>(iter)?.finish(),
+            services::S3_SCHEME => 
Self::from_iter::<services::S3>(iter)?.finish(),
             #[cfg(feature = "services-seafile")]
-            Scheme::Seafile => 
Self::from_iter::<services::Seafile>(iter)?.finish(),
+            services::SEAFILE_SCHEME => 
Self::from_iter::<services::Seafile>(iter)?.finish(),
             #[cfg(feature = "services-sftp")]
-            Scheme::Sftp => Self::from_iter::<services::Sftp>(iter)?.finish(),
+            services::SFTP_SCHEME => 
Self::from_iter::<services::Sftp>(iter)?.finish(),
             #[cfg(feature = "services-sled")]
-            Scheme::Sled => Self::from_iter::<services::Sled>(iter)?.finish(),
+            services::SLED_SCHEME => 
Self::from_iter::<services::Sled>(iter)?.finish(),
             #[cfg(feature = "services-sqlite")]
-            Scheme::Sqlite => 
Self::from_iter::<services::Sqlite>(iter)?.finish(),
+            services::SQLITE_SCHEME => 
Self::from_iter::<services::Sqlite>(iter)?.finish(),
+            #[cfg(feature = "services-surrealdb")]
+            services::SURREALDB_SCHEME => 
Self::from_iter::<services::Surrealdb>(iter)?.finish(),
             #[cfg(feature = "services-swift")]
-            Scheme::Swift => 
Self::from_iter::<services::Swift>(iter)?.finish(),
+            services::SWIFT_SCHEME => 
Self::from_iter::<services::Swift>(iter)?.finish(),
             #[cfg(feature = "services-tikv")]
-            Scheme::Tikv => Self::from_iter::<services::Tikv>(iter)?.finish(),
+            services::TIKV_SCHEME => 
Self::from_iter::<services::Tikv>(iter)?.finish(),
+            #[cfg(feature = "services-upyun")]
+            services::UPYUN_SCHEME => 
Self::from_iter::<services::Upyun>(iter)?.finish(),
             #[cfg(feature = "services-vercel-artifacts")]
-            Scheme::VercelArtifacts => 
Self::from_iter::<services::VercelArtifacts>(iter)?.finish(),
+            services::VERCEL_ARTIFACTS_SCHEME => {
+                Self::from_iter::<services::VercelArtifacts>(iter)?.finish()
+            }
             #[cfg(feature = "services-vercel-blob")]
-            Scheme::VercelBlob => 
Self::from_iter::<services::VercelBlob>(iter)?.finish(),
+            services::VERCEL_BLOB_SCHEME => 
Self::from_iter::<services::VercelBlob>(iter)?.finish(),
             #[cfg(feature = "services-webdav")]
-            Scheme::Webdav => 
Self::from_iter::<services::Webdav>(iter)?.finish(),
+            services::WEBDAV_SCHEME => 
Self::from_iter::<services::Webdav>(iter)?.finish(),
             #[cfg(feature = "services-webhdfs")]
-            Scheme::Webhdfs => 
Self::from_iter::<services::Webhdfs>(iter)?.finish(),
-            #[cfg(feature = "services-redb")]
-            Scheme::Redb => Self::from_iter::<services::Redb>(iter)?.finish(),
-            #[cfg(feature = "services-mongodb")]
-            Scheme::Mongodb => 
Self::from_iter::<services::Mongodb>(iter)?.finish(),
-            #[cfg(feature = "services-hdfs-native")]
-            Scheme::HdfsNative => 
Self::from_iter::<services::HdfsNative>(iter)?.finish(),
-            #[cfg(feature = "services-lakefs")]
-            Scheme::Lakefs => 
Self::from_iter::<services::Lakefs>(iter)?.finish(),
+            services::WEBHDFS_SCHEME => 
Self::from_iter::<services::Webhdfs>(iter)?.finish(),
+            #[cfg(feature = "services-yandex-disk")]
+            services::YANDEX_DISK_SCHEME => 
Self::from_iter::<services::YandexDisk>(iter)?.finish(),
             v => {
                 return Err(Error::new(
                     ErrorKind::Unsupported,
@@ -364,18 +379,17 @@ impl Operator {
     /// use std::collections::HashMap;
     ///
     /// use opendal::Operator;
-    /// use opendal::Scheme;
     /// async fn test() -> Result<()> {
     ///     let map = HashMap::new();
     ///
     ///     // Build an `Operator` to start operating the storage.
-    ///     let op: Operator = Operator::via_map(Scheme::Memory, map)?;
+    ///     let op: Operator = 
Operator::via_map(opendal::services::MEMORY_SCHEME, map)?;
     ///
     ///     Ok(())
     /// }
     /// ```
     #[deprecated = "use via_iter instead"]
-    pub fn via_map(scheme: Scheme, map: HashMap<String, String>) -> 
Result<Operator> {
+    pub fn via_map(scheme: &str, map: HashMap<String, String>) -> 
Result<Operator> {
         Self::via_iter(scheme, map)
     }
 
@@ -443,7 +457,6 @@ impl Operator {
 /// use opendal::Builder;
 /// use opendal::Operator;
 /// use opendal::Result;
-/// use opendal::Scheme;
 ///
 /// fn init_service<B: Builder>(cfg: HashMap<String, String>) -> 
Result<Operator> {
 ///     let op = Operator::from_map::<B>(cfg)?
@@ -454,9 +467,9 @@ impl Operator {
 ///     Ok(op)
 /// }
 ///
-/// async fn init(scheme: Scheme, cfg: HashMap<String, String>) -> Result<()> {
+/// async fn init(scheme: &str, cfg: HashMap<String, String>) -> Result<()> {
 ///     let _ = match scheme {
-///         Scheme::Memory => init_service::<services::Memory>(cfg)?,
+///         services::MEMORY_SCHEME => init_service::<services::Memory>(cfg)?,
 ///         _ => todo!(),
 ///     };
 ///
diff --git a/core/src/types/operator/info.rs b/core/src/types/operator/info.rs
index 2e483f450..d2af58793 100644
--- a/core/src/types/operator/info.rs
+++ b/core/src/types/operator/info.rs
@@ -15,7 +15,6 @@
 // specific language governing permissions and limitations
 // under the License.
 
-use std::str::FromStr;
 use std::sync::Arc;
 
 use crate::raw::*;
@@ -30,10 +29,9 @@ impl OperatorInfo {
         OperatorInfo(acc)
     }
 
-    /// [`Scheme`] of operator.
-    pub fn scheme(&self) -> Scheme {
-        let scheme_str = self.0.scheme();
-        Scheme::from_str(scheme_str).unwrap_or(Scheme::Custom(scheme_str))
+    /// Scheme of operator.
+    pub fn scheme(&self) -> &'static str {
+        self.0.scheme()
     }
 
     /// Root of operator, will be in format like `/path/to/dir/`
diff --git a/core/src/types/operator/operator.rs 
b/core/src/types/operator/operator.rs
index f27306f3a..5f255ab52 100644
--- a/core/src/types/operator/operator.rs
+++ b/core/src/types/operator/operator.rs
@@ -599,7 +599,6 @@ impl Operator {
     /// # use opendal::Result;
     /// # use opendal::Operator;
     /// # use futures::TryStreamExt;
-    /// # use opendal::Scheme;
     /// # async fn test(op: Operator) -> Result<()> {
     /// let r = op.reader("path/to/file").await?;
     /// // Read the first 10 bytes of the file
@@ -624,7 +623,6 @@ impl Operator {
     /// ```
     /// # use opendal::Result;
     /// # use opendal::Operator;
-    /// # use opendal::Scheme;
     /// # async fn test(op: Operator) -> Result<()> {
     /// let r = op.reader_with("path/to/file").version("version_id").await?;
     /// // Read the first 10 bytes of the file
diff --git a/core/src/types/operator/operator_futures.rs 
b/core/src/types/operator/operator_futures.rs
index eeb594972..1a1276bb1 100644
--- a/core/src/types/operator/operator_futures.rs
+++ b/core/src/types/operator/operator_futures.rs
@@ -282,7 +282,6 @@ impl<F: Future<Output = Result<Buffer>>> FutureRead<F> {
     /// ```
     /// # use opendal::Result;
     /// # use opendal::Operator;
-    /// # use opendal::Scheme;
     /// # async fn test(op: Operator) -> Result<()> {
     /// let r = op.read_with("path/to/file").concurrent(8).await?;
     /// # Ok(())
@@ -300,7 +299,6 @@ impl<F: Future<Output = Result<Buffer>>> FutureRead<F> {
     /// ```
     /// # use opendal::Result;
     /// # use opendal::Operator;
-    /// # use opendal::Scheme;
     /// # async fn test(op: Operator) -> Result<()> {
     /// let r = op.read_with("path/to/file").chunk(4 * 1024 * 1024).await?;
     /// # Ok(())
@@ -461,7 +459,6 @@ impl<F: Future<Output = Result<Reader>>> FutureReader<F> {
     /// ```
     /// # use opendal::Result;
     /// # use opendal::Operator;
-    /// # use opendal::Scheme;
     /// # async fn test(op: Operator) -> Result<()> {
     /// let r = op.reader_with("path/to/file").concurrent(8).await?;
     /// # Ok(())
@@ -479,7 +476,6 @@ impl<F: Future<Output = Result<Reader>>> FutureReader<F> {
     /// ```
     /// # use opendal::Result;
     /// # use opendal::Operator;
-    /// # use opendal::Scheme;
     /// # async fn test(op: Operator) -> Result<()> {
     /// let r = op
     ///     .reader_with("path/to/file")
@@ -510,7 +506,6 @@ impl<F: Future<Output = Result<Reader>>> FutureReader<F> {
     /// ```
     /// # use opendal::Result;
     /// # use opendal::Operator;
-    /// # use opendal::Scheme;
     /// # async fn test(op: Operator) -> Result<()> {
     /// let r = op
     ///     .reader_with("path/to/file")
diff --git a/core/src/types/read/buffer_stream.rs 
b/core/src/types/read/buffer_stream.rs
index 9c5ff4fdc..811fc08df 100644
--- a/core/src/types/read/buffer_stream.rs
+++ b/core/src/types/read/buffer_stream.rs
@@ -228,7 +228,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_trait() -> Result<()> {
-        let acc = Operator::via_iter(Scheme::Memory, [])?.into_inner();
+        let acc = Operator::via_iter(services::MEMORY_SCHEME, 
[])?.into_inner();
         let ctx = Arc::new(ReadContext::new(
             acc,
             "test".to_string(),
@@ -244,7 +244,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_buffer_stream() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         op.write(
             "test",
             Buffer::from(vec![Bytes::from("Hello"), Bytes::from("World")]),
diff --git a/core/src/types/read/futures_async_reader.rs 
b/core/src/types/read/futures_async_reader.rs
index 86b88228d..de5aa3b20 100644
--- a/core/src/types/read/futures_async_reader.rs
+++ b/core/src/types/read/futures_async_reader.rs
@@ -183,7 +183,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_trait() -> Result<()> {
-        let acc = Operator::via_iter(Scheme::Memory, [])?.into_inner();
+        let acc = Operator::via_iter(services::MEMORY_SCHEME, 
[])?.into_inner();
         let ctx = Arc::new(ReadContext::new(
             acc,
             "test".to_string(),
@@ -199,7 +199,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_futures_async_read() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         op.write(
             "test",
             Buffer::from(vec![Bytes::from("Hello"), Bytes::from("World")]),
@@ -230,7 +230,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_futures_async_read_with_concurrent() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         op.write(
             "test",
             Buffer::from(vec![Bytes::from("Hello"), Bytes::from("World")]),
@@ -261,7 +261,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_futures_async_buf_read() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         op.write(
             "test",
             Buffer::from(vec![Bytes::from("Hello"), Bytes::from("World")]),
diff --git a/core/src/types/read/futures_bytes_stream.rs 
b/core/src/types/read/futures_bytes_stream.rs
index bc55cc7c5..1a4e7ac11 100644
--- a/core/src/types/read/futures_bytes_stream.rs
+++ b/core/src/types/read/futures_bytes_stream.rs
@@ -89,7 +89,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_trait() -> Result<()> {
-        let acc = Operator::via_iter(Scheme::Memory, [])?.into_inner();
+        let acc = Operator::via_iter(services::MEMORY_SCHEME, 
[])?.into_inner();
         let ctx = Arc::new(ReadContext::new(
             acc,
             "test".to_string(),
@@ -105,7 +105,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_futures_bytes_stream() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         op.write(
             "test",
             Buffer::from(vec![Bytes::from("Hello"), Bytes::from("World")]),
@@ -130,7 +130,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_futures_bytes_stream_with_concurrent() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         op.write(
             "test",
             Buffer::from(vec![Bytes::from("Hello"), Bytes::from("World")]),
diff --git a/core/src/types/read/reader.rs b/core/src/types/read/reader.rs
index 6908a900b..230306aa4 100644
--- a/core/src/types/read/reader.rs
+++ b/core/src/types/read/reader.rs
@@ -440,7 +440,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_trait() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         op.write(
             "test",
             Buffer::from(vec![Bytes::from("Hello"), Bytes::from("World")]),
@@ -473,7 +473,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_reader_read() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         let path = "test_file";
 
         let content = gen_random_bytes();
@@ -490,7 +490,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_reader_read_with_chunk() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         let path = "test_file";
 
         let content = gen_random_bytes();
@@ -507,7 +507,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_reader_read_with_concurrent() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         let path = "test_file";
 
         let content = gen_random_bytes();
@@ -529,7 +529,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_reader_read_into() -> Result<()> {
-        let op = Operator::via_iter(Scheme::Memory, [])?;
+        let op = Operator::via_iter(services::MEMORY_SCHEME, [])?;
         let path = "test_file";
 
         let content = gen_random_bytes();
diff --git a/core/src/types/scheme.rs b/core/src/types/scheme.rs
deleted file mode 100644
index 2b12a566a..000000000
--- a/core/src/types/scheme.rs
+++ /dev/null
@@ -1,461 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-use std::collections::HashSet;
-use std::fmt::Display;
-use std::fmt::Formatter;
-use std::str::FromStr;
-
-use crate::Error;
-
-/// Services that OpenDAL supports
-///
-/// # Notes
-///
-/// - Scheme is `non_exhaustive`, new variant COULD be added at any time.
-/// - New variant SHOULD be added in alphabet orders,
-/// - Users MUST NOT relay on its order.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Default)]
-#[non_exhaustive]
-pub enum Scheme {
-    /// [aliyun-drive][crate::services::AliyunDrive]: Aliyun Drive services.
-    AliyunDrive,
-    /// [azblob][crate::services::Azblob]: Azure Storage Blob services.
-    Azblob,
-    /// [Azdls][crate::services::Azdls]: Azure Data Lake Storage Gen2.
-    Azdls,
-    /// [B2][crate::services::B2]: Backblaze B2 Services.
-    B2,
-    /// [Compfs][crate::services::Compfs]: Compio fs Services.
-    Compfs,
-    /// [Seafile][crate::services::Seafile]: Seafile Services.
-    Seafile,
-    /// [Upyun][crate::services::Upyun]: Upyun Services.
-    Upyun,
-    /// [VercelBlob][crate::services::VercelBlob]: VercelBlob Services.
-    VercelBlob,
-    /// [YandexDisk][crate::services::YandexDisk]: YandexDisk Services.
-    YandexDisk,
-    /// [Pcloud][crate::services::Pcloud]: Pcloud Services.
-    Pcloud,
-    /// [Koofr][crate::services::Koofr]: Koofr Services.
-    Koofr,
-    /// [cacache][crate::services::Cacache]: cacache backend support.
-    Cacache,
-    /// [cloudflare-kv][crate::services::CloudflareKv]: Cloudflare KV services.
-    CloudflareKv,
-    /// [cos][crate::services::Cos]: Tencent Cloud Object Storage services.
-    Cos,
-    /// [d1][crate::services::D1]: D1 services
-    D1,
-    /// [dashmap][crate::services::Dashmap]: dashmap backend support.
-    Dashmap,
-    /// [etcd][crate::services::Etcd]: Etcd Services
-    Etcd,
-    /// [foundationdb][crate::services::Foundationdb]: Foundationdb services.
-    Foundationdb,
-    /// [dbfs][crate::services::Dbfs]: DBFS backend support.
-    Dbfs,
-    /// [fs][crate::services::Fs]: POSIX-like file system.
-    Fs,
-    /// [ftp][crate::services::Ftp]: FTP backend.
-    Ftp,
-    /// [gcs][crate::services::Gcs]: Google Cloud Storage backend.
-    Gcs,
-    /// [ghac][crate::services::Ghac]: GitHub Action Cache services.
-    Ghac,
-    /// [hdfs][crate::services::Hdfs]: Hadoop Distributed File System.
-    Hdfs,
-    /// [http][crate::services::Http]: HTTP backend.
-    Http,
-    /// [huggingface][crate::services::Huggingface]: Huggingface services.
-    Huggingface,
-    /// [alluxio][crate::services::Alluxio]: Alluxio services.
-    Alluxio,
-
-    /// [ipmfs][crate::services::Ipfs]: IPFS HTTP Gateway
-    Ipfs,
-    /// [ipmfs][crate::services::Ipmfs]: IPFS mutable file system
-    Ipmfs,
-    /// [memcached][crate::services::Memcached]: Memcached service support.
-    Memcached,
-    /// [memory][crate::services::Memory]: In memory backend support.
-    #[default]
-    Memory,
-    /// [mini-moka][crate::services::MiniMoka]: Mini Moka backend support.
-    MiniMoka,
-    /// [moka][crate::services::Moka]: moka backend support.
-    Moka,
-    /// [monoiofs][crate::services::Monoiofs]: monoio fs services.
-    Monoiofs,
-    /// [obs][crate::services::Obs]: Huawei Cloud OBS services.
-    Obs,
-    /// [onedrive][crate::services::Onedrive]: Microsoft OneDrive services.
-    Onedrive,
-    /// [gdrive][crate::services::Gdrive]: GoogleDrive services.
-    Gdrive,
-    /// [dropbox][crate::services::Dropbox]: Dropbox services.
-    Dropbox,
-    /// [oss][crate::services::Oss]: Aliyun Object Storage Services
-    Oss,
-    /// [persy][crate::services::Persy]: persy backend support.
-    Persy,
-    /// [redis][crate::services::Redis]: Redis services
-    Redis,
-    /// [postgresql][crate::services::Postgresql]: Postgresql services
-    Postgresql,
-    /// [mysql][crate::services::Mysql]: Mysql services
-    Mysql,
-    /// [sqlite][crate::services::Sqlite]: Sqlite services
-    Sqlite,
-    /// [rocksdb][crate::services::Rocksdb]: RocksDB services
-    Rocksdb,
-    /// [s3][crate::services::S3]: AWS S3 alike services.
-    S3,
-    /// [sftp][crate::services::Sftp]: SFTP services
-    Sftp,
-    /// [sled][crate::services::Sled]: Sled services
-    Sled,
-    /// [swift][crate::services::Swift]: Swift backend support.
-    Swift,
-    /// [Vercel Artifacts][crate::services::VercelArtifacts]: Vercel Artifacts 
service, as known as Vercel Remote Caching.
-    VercelArtifacts,
-    /// [webdav][crate::services::Webdav]: WebDAV support.
-    Webdav,
-    /// [webhdfs][crate::services::Webhdfs]: WebHDFS RESTful API Services
-    Webhdfs,
-    /// [redb][crate::services::Redb]: Redb Services
-    Redb,
-    /// [tikv][crate::services::Tikv]: Tikv Services
-    Tikv,
-    /// [azfile][crate::services::Azfile]: Azfile Services
-    Azfile,
-    /// [mongodb](crate::services::Mongodb): MongoDB Services
-    Mongodb,
-    /// [gridfs](crate::services::Gridfs): MongoDB Gridfs Services
-    Gridfs,
-    /// [Github Contents][crate::services::Github]: Github contents support.
-    Github,
-    /// [Native HDFS](crate::services::HdfsNative): Hdfs Native service, using 
rust hdfs-native client for hdfs
-    HdfsNative,
-    /// [surrealdb](crate::services::Surrealdb): Surrealdb Services
-    Surrealdb,
-    /// [lakefs](crate::services::Lakefs): LakeFS Services
-    Lakefs,
-    /// Custom that allow users to implement services outside OpenDAL.
-    ///
-    /// # NOTE
-    ///
-    /// - Custom must not overwrite any existing services name.
-    /// - Custom must be in lower case.
-    Custom(&'static str),
-}
-
-impl Scheme {
-    /// Convert self into static str.
-    pub fn into_static(self) -> &'static str {
-        self.into()
-    }
-
-    /// Get all enabled schemes.
-    ///
-    /// OpenDAL could be compiled with different features, which will enable 
different schemes.
-    /// This function returns all enabled schemes so users can make decisions 
based on it.
-    ///
-    /// # Examples
-    ///
-    /// ```rust,no_run
-    /// use opendal::Scheme;
-    ///
-    /// let enabled_schemes = Scheme::enabled();
-    /// if !enabled_schemes.contains(&Scheme::Memory) {
-    ///     panic!("s3 support is not enabled")
-    /// }
-    /// ```
-    pub fn enabled() -> HashSet<Scheme> {
-        HashSet::from([
-            #[cfg(feature = "services-aliyun-drive")]
-            Scheme::AliyunDrive,
-            #[cfg(feature = "services-alluxio")]
-            Scheme::Alluxio,
-            #[cfg(feature = "services-azblob")]
-            Scheme::Azblob,
-            #[cfg(feature = "services-azdls")]
-            Scheme::Azdls,
-            #[cfg(feature = "services-azfile")]
-            Scheme::Azfile,
-            #[cfg(feature = "services-b2")]
-            Scheme::B2,
-            #[cfg(feature = "services-cacache")]
-            Scheme::Cacache,
-            #[cfg(feature = "services-cos")]
-            Scheme::Cos,
-            #[cfg(feature = "services-compfs")]
-            Scheme::Compfs,
-            #[cfg(feature = "services-dashmap")]
-            Scheme::Dashmap,
-            #[cfg(feature = "services-dropbox")]
-            Scheme::Dropbox,
-            #[cfg(feature = "services-etcd")]
-            Scheme::Etcd,
-            #[cfg(feature = "services-foundationdb")]
-            Scheme::Foundationdb,
-            #[cfg(feature = "services-fs")]
-            Scheme::Fs,
-            #[cfg(feature = "services-ftp")]
-            Scheme::Ftp,
-            #[cfg(feature = "services-gcs")]
-            Scheme::Gcs,
-            #[cfg(feature = "services-ghac")]
-            Scheme::Ghac,
-            #[cfg(feature = "services-hdfs")]
-            Scheme::Hdfs,
-            #[cfg(feature = "services-http")]
-            Scheme::Http,
-            #[cfg(feature = "services-huggingface")]
-            Scheme::Huggingface,
-            #[cfg(feature = "services-ipfs")]
-            Scheme::Ipfs,
-            #[cfg(feature = "services-ipmfs")]
-            Scheme::Ipmfs,
-            #[cfg(feature = "services-memcached")]
-            Scheme::Memcached,
-            #[cfg(feature = "services-memory")]
-            Scheme::Memory,
-            #[cfg(feature = "services-mini-moka")]
-            Scheme::MiniMoka,
-            #[cfg(feature = "services-moka")]
-            Scheme::Moka,
-            #[cfg(feature = "services-monoiofs")]
-            Scheme::Monoiofs,
-            #[cfg(feature = "services-mysql")]
-            Scheme::Mysql,
-            #[cfg(feature = "services-obs")]
-            Scheme::Obs,
-            #[cfg(feature = "services-onedrive")]
-            Scheme::Onedrive,
-            #[cfg(feature = "services-postgresql")]
-            Scheme::Postgresql,
-            #[cfg(feature = "services-gdrive")]
-            Scheme::Gdrive,
-            #[cfg(feature = "services-oss")]
-            Scheme::Oss,
-            #[cfg(feature = "services-persy")]
-            Scheme::Persy,
-            #[cfg(feature = "services-redis")]
-            Scheme::Redis,
-            #[cfg(feature = "services-rocksdb")]
-            Scheme::Rocksdb,
-            #[cfg(feature = "services-s3")]
-            Scheme::S3,
-            #[cfg(feature = "services-seafile")]
-            Scheme::Seafile,
-            #[cfg(feature = "services-upyun")]
-            Scheme::Upyun,
-            #[cfg(feature = "services-yandex-disk")]
-            Scheme::YandexDisk,
-            #[cfg(feature = "services-pcloud")]
-            Scheme::Pcloud,
-            #[cfg(feature = "services-sftp")]
-            Scheme::Sftp,
-            #[cfg(feature = "services-sled")]
-            Scheme::Sled,
-            #[cfg(feature = "services-sqlite")]
-            Scheme::Sqlite,
-            #[cfg(feature = "services-swift")]
-            Scheme::Swift,
-            #[cfg(feature = "services-tikv")]
-            Scheme::Tikv,
-            #[cfg(feature = "services-vercel-artifacts")]
-            Scheme::VercelArtifacts,
-            #[cfg(feature = "services-vercel-blob")]
-            Scheme::VercelBlob,
-            #[cfg(feature = "services-webdav")]
-            Scheme::Webdav,
-            #[cfg(feature = "services-webhdfs")]
-            Scheme::Webhdfs,
-            #[cfg(feature = "services-redb")]
-            Scheme::Redb,
-            #[cfg(feature = "services-mongodb")]
-            Scheme::Mongodb,
-            #[cfg(feature = "services-hdfs-native")]
-            Scheme::HdfsNative,
-            #[cfg(feature = "services-surrealdb")]
-            Scheme::Surrealdb,
-            #[cfg(feature = "services-lakefs")]
-            Scheme::Lakefs,
-            #[cfg(feature = "services-cloudflare-kv")]
-            Scheme::CloudflareKv,
-        ])
-    }
-}
-
-impl Display for Scheme {
-    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
-        write!(f, "{}", self.into_static())
-    }
-}
-
-impl FromStr for Scheme {
-    type Err = Error;
-
-    fn from_str(s: &str) -> Result<Self, Self::Err> {
-        let s = s.to_lowercase();
-        match s.as_str() {
-            "aliyun-drive" | "aliyun_drive" => Ok(Scheme::AliyunDrive),
-            "azblob" => Ok(Scheme::Azblob),
-            "alluxio" => Ok(Scheme::Alluxio),
-            // Notes:
-            //
-            // OpenDAL used to call `azdls` as `azdfs`, we keep it for 
backward compatibility.
-            // And abfs is widely used in hadoop ecosystem, keep it for easy 
to use.
-            "azdls" | "azdfs" | "abfs" => Ok(Scheme::Azdls),
-            "b2" => Ok(Scheme::B2),
-            "cacache" => Ok(Scheme::Cacache),
-            "compfs" => Ok(Scheme::Compfs),
-            "cloudflare-kv" | "cloudflare_kv" => Ok(Scheme::CloudflareKv),
-            "cos" => Ok(Scheme::Cos),
-            "d1" => Ok(Scheme::D1),
-            "dashmap" => Ok(Scheme::Dashmap),
-            "dropbox" => Ok(Scheme::Dropbox),
-            "etcd" => Ok(Scheme::Etcd),
-            "dbfs" => Ok(Scheme::Dbfs),
-            "fs" => Ok(Scheme::Fs),
-            "gcs" => Ok(Scheme::Gcs),
-            "gdrive" => Ok(Scheme::Gdrive),
-            "ghac" => Ok(Scheme::Ghac),
-            "gridfs" => Ok(Scheme::Gridfs),
-            "github" => Ok(Scheme::Github),
-            "hdfs" => Ok(Scheme::Hdfs),
-            "http" | "https" => Ok(Scheme::Http),
-            "huggingface" | "hf" => Ok(Scheme::Huggingface),
-            "ftp" | "ftps" => Ok(Scheme::Ftp),
-            "ipfs" | "ipns" => Ok(Scheme::Ipfs),
-            "ipmfs" => Ok(Scheme::Ipmfs),
-            "koofr" => Ok(Scheme::Koofr),
-            "memcached" => Ok(Scheme::Memcached),
-            "memory" => Ok(Scheme::Memory),
-            "mysql" => Ok(Scheme::Mysql),
-            "sqlite" => Ok(Scheme::Sqlite),
-            "mini-moka" | "mini_moka" => Ok(Scheme::MiniMoka),
-            "moka" => Ok(Scheme::Moka),
-            "monoiofs" => Ok(Scheme::Monoiofs),
-            "obs" => Ok(Scheme::Obs),
-            "onedrive" => Ok(Scheme::Onedrive),
-            "persy" => Ok(Scheme::Persy),
-            "postgresql" => Ok(Scheme::Postgresql),
-            "redb" => Ok(Scheme::Redb),
-            "redis" => Ok(Scheme::Redis),
-            "rocksdb" => Ok(Scheme::Rocksdb),
-            "s3" => Ok(Scheme::S3),
-            "seafile" => Ok(Scheme::Seafile),
-            "upyun" => Ok(Scheme::Upyun),
-            "yandex-disk" | "yandex_disk" => Ok(Scheme::YandexDisk),
-            "pcloud" => Ok(Scheme::Pcloud),
-            "sftp" => Ok(Scheme::Sftp),
-            "sled" => Ok(Scheme::Sled),
-            "swift" => Ok(Scheme::Swift),
-            "oss" => Ok(Scheme::Oss),
-            "vercel-artifacts" | "vercel_artifacts" => 
Ok(Scheme::VercelArtifacts),
-            "vercel-blob" | "vercel_blob" => Ok(Scheme::VercelBlob),
-            "webdav" => Ok(Scheme::Webdav),
-            "webhdfs" => Ok(Scheme::Webhdfs),
-            "tikv" => Ok(Scheme::Tikv),
-            "azfile" => Ok(Scheme::Azfile),
-            "mongodb" => Ok(Scheme::Mongodb),
-            "hdfs-native" | "hdfs_native" => Ok(Scheme::HdfsNative),
-            "surrealdb" => Ok(Scheme::Surrealdb),
-            "lakefs" => Ok(Scheme::Lakefs),
-            _ => Ok(Scheme::Custom(Box::leak(s.into_boxed_str()))),
-        }
-    }
-}
-
-impl From<Scheme> for &'static str {
-    fn from(v: Scheme) -> Self {
-        match v {
-            Scheme::AliyunDrive => "aliyun-drive",
-            Scheme::Azblob => "azblob",
-            Scheme::Azdls => "azdls",
-            Scheme::B2 => "b2",
-            Scheme::Cacache => "cacache",
-            Scheme::CloudflareKv => "cloudflare-kv",
-            Scheme::Cos => "cos",
-            Scheme::Compfs => "compfs",
-            Scheme::D1 => "d1",
-            Scheme::Dashmap => "dashmap",
-            Scheme::Etcd => "etcd",
-            Scheme::Dbfs => "dbfs",
-            Scheme::Fs => "fs",
-            Scheme::Gcs => "gcs",
-            Scheme::Ghac => "ghac",
-            Scheme::Gridfs => "gridfs",
-            Scheme::Hdfs => "hdfs",
-            Scheme::Http => "http",
-            Scheme::Huggingface => "huggingface",
-            Scheme::Foundationdb => "foundationdb",
-            Scheme::Ftp => "ftp",
-            Scheme::Ipfs => "ipfs",
-            Scheme::Ipmfs => "ipmfs",
-            Scheme::Koofr => "koofr",
-            Scheme::Memcached => "memcached",
-            Scheme::Memory => "memory",
-            Scheme::MiniMoka => "mini-moka",
-            Scheme::Moka => "moka",
-            Scheme::Monoiofs => "monoiofs",
-            Scheme::Obs => "obs",
-            Scheme::Onedrive => "onedrive",
-            Scheme::Persy => "persy",
-            Scheme::Postgresql => "postgresql",
-            Scheme::Mysql => "mysql",
-            Scheme::Gdrive => "gdrive",
-            Scheme::Github => "github",
-            Scheme::Dropbox => "dropbox",
-            Scheme::Redis => "redis",
-            Scheme::Rocksdb => "rocksdb",
-            Scheme::S3 => "s3",
-            Scheme::Seafile => "seafile",
-            Scheme::Sftp => "sftp",
-            Scheme::Sled => "sled",
-            Scheme::Swift => "swift",
-            Scheme::VercelArtifacts => "vercel-artifacts",
-            Scheme::VercelBlob => "vercel-blob",
-            Scheme::Oss => "oss",
-            Scheme::Webdav => "webdav",
-            Scheme::Webhdfs => "webhdfs",
-            Scheme::Redb => "redb",
-            Scheme::Tikv => "tikv",
-            Scheme::Azfile => "azfile",
-            Scheme::Sqlite => "sqlite",
-            Scheme::Mongodb => "mongodb",
-            Scheme::Alluxio => "alluxio",
-            Scheme::Upyun => "upyun",
-            Scheme::YandexDisk => "yandex-disk",
-            Scheme::Pcloud => "pcloud",
-            Scheme::HdfsNative => "hdfs-native",
-            Scheme::Surrealdb => "surrealdb",
-            Scheme::Lakefs => "lakefs",
-            Scheme::Custom(v) => v,
-        }
-    }
-}
-
-impl From<Scheme> for String {
-    fn from(v: Scheme) -> Self {
-        v.into_static().to_string()
-    }
-}
diff --git a/core/src/types/write/buffer_sink.rs 
b/core/src/types/write/buffer_sink.rs
index 7c5ba4205..4faf550c5 100644
--- a/core/src/types/write/buffer_sink.rs
+++ b/core/src/types/write/buffer_sink.rs
@@ -170,7 +170,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_trait() {
-        let op = Operator::via_iter(Scheme::Memory, []).unwrap();
+        let op = Operator::via_iter(services::MEMORY_SCHEME, []).unwrap();
 
         let acc = op.into_inner();
         let ctx = Arc::new(WriteContext::new(
diff --git a/core/src/types/write/futures_async_writer.rs 
b/core/src/types/write/futures_async_writer.rs
index 28dd89dff..752a74d63 100644
--- a/core/src/types/write/futures_async_writer.rs
+++ b/core/src/types/write/futures_async_writer.rs
@@ -118,7 +118,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_trait() {
-        let op = Operator::via_iter(Scheme::Memory, []).unwrap();
+        let op = Operator::via_iter(services::MEMORY_SCHEME, []).unwrap();
 
         let acc = op.into_inner();
         let ctx = Arc::new(WriteContext::new(
diff --git a/core/src/types/write/futures_bytes_sink.rs 
b/core/src/types/write/futures_bytes_sink.rs
index de5563992..1530728c4 100644
--- a/core/src/types/write/futures_bytes_sink.rs
+++ b/core/src/types/write/futures_bytes_sink.rs
@@ -85,7 +85,7 @@ mod tests {
 
     #[tokio::test]
     async fn test_trait() {
-        let op = Operator::via_iter(Scheme::Memory, []).unwrap();
+        let op = Operator::via_iter(services::MEMORY_SCHEME, []).unwrap();
 
         let acc = op.into_inner();
         let ctx = Arc::new(WriteContext::new(
diff --git a/core/tests/behavior/async_copy.rs 
b/core/tests/behavior/async_copy.rs
index bc73ddccf..58274d43b 100644
--- a/core/tests/behavior/async_copy.rs
+++ b/core/tests/behavior/async_copy.rs
@@ -76,7 +76,7 @@ pub async fn test_copy_file_with_ascii_name(op: Operator) -> 
Result<()> {
 /// Copy a file with non ascii name and test contents.
 pub async fn test_copy_file_with_non_ascii_name(op: Operator) -> Result<()> {
     // Koofr does not support non-ascii 
name.(https://github.com/apache/opendal/issues/4051)
-    if op.info().scheme() == Scheme::Koofr {
+    if op.info().scheme() == services::KOOFR_SCHEME {
         return Ok(());
     }
 
diff --git a/core/tests/behavior/async_delete.rs 
b/core/tests/behavior/async_delete.rs
index 7b517367d..9d897588e 100644
--- a/core/tests/behavior/async_delete.rs
+++ b/core/tests/behavior/async_delete.rs
@@ -133,7 +133,7 @@ pub async fn test_delete_stream(op: Operator) -> Result<()> 
{
     }
     // Gdrive think that this test is an abuse of their service and redirect us
     // to an infinite loop. Let's ignore this test for gdrive.
-    if op.info().scheme() == Scheme::Gdrive {
+    if op.info().scheme() == services::GDRIVE_SCHEME {
         return Ok(());
     }
 
diff --git a/core/tests/behavior/async_list.rs 
b/core/tests/behavior/async_list.rs
index 385a06a8d..6035a57a1 100644
--- a/core/tests/behavior/async_list.rs
+++ b/core/tests/behavior/async_list.rs
@@ -115,7 +115,7 @@ pub async fn test_list_prefix(op: Operator) -> Result<()> {
 pub async fn test_list_rich_dir(op: Operator) -> Result<()> {
     // Gdrive think that this test is an abuse of their service and redirect us
     // to an infinite loop. Let's ignore this test for gdrive.
-    if op.info().scheme() == Scheme::Gdrive {
+    if op.info().scheme() == services::GDRIVE_SCHEME {
         return Ok(());
     }
 
@@ -653,7 +653,7 @@ pub async fn test_list_files_with_deleted(op: Operator) -> 
Result<()> {
 pub async fn test_list_with_versions_and_limit(op: Operator) -> Result<()> {
     // Gdrive think that this test is an abuse of their service and redirect us
     // to an infinite loop. Let's ignore this test for gdrive.
-    if op.info().scheme() == Scheme::Gdrive {
+    if op.info().scheme() == services::GDRIVE_SCHEME {
         return Ok(());
     }
     if !op.info().full_capability().list_with_versions {
diff --git a/core/tests/behavior/async_write.rs 
b/core/tests/behavior/async_write.rs
index 8672ecd33..f672131f5 100644
--- a/core/tests/behavior/async_write.rs
+++ b/core/tests/behavior/async_write.rs
@@ -116,7 +116,7 @@ pub async fn test_write_with_dir_path(op: Operator) -> 
Result<()> {
 /// Write a single file with special chars should succeed.
 pub async fn test_write_with_special_chars(op: Operator) -> Result<()> {
     // Ignore test for vercel blob https://github.com/apache/opendal/pull/4103.
-    if op.info().scheme() == opendal::Scheme::VercelBlob {
+    if op.info().scheme() == services::VERCEL_BLOB_SCHEME {
         warn!("ignore test for vercel blob 
https://github.com/apache/opendal/pull/4103";);
         return Ok(());
     }
@@ -694,7 +694,7 @@ pub async fn test_writer_with_append(op: Operator) -> 
Result<()> {
 
 pub async fn test_writer_write_with_overwrite(op: Operator) -> Result<()> {
     // ghac does not support overwrite
-    if op.info().scheme() == Scheme::Ghac {
+    if op.info().scheme() == services::GHAC_SCHEME {
         return Ok(());
     }
 

Reply via email to