kszucs commented on code in PR #7185:
URL: https://github.com/apache/opendal/pull/7185#discussion_r2836252911
##########
core/services/hf/src/core.rs:
##########
@@ -546,235 +732,19 @@ mod tests {
Ok(())
}
+}
- #[tokio::test]
- async fn test_hf_list_url_space() -> Result<()> {
- let (core, mock_client) = create_test_core(
- RepoType::Space,
- "org/space",
- "main",
- "https://huggingface.co",
- );
-
- core.hf_list("static", false, None).await?;
-
- let url = mock_client.get_captured_url();
- assert_eq!(
- url,
-
"https://huggingface.co/api/spaces/org/space/tree/main/static?expand=True"
- );
-
- Ok(())
- }
-
- #[tokio::test]
- async fn test_hf_resolve_url_space() -> Result<()> {
- let (core, mock_client) = create_test_core(
- RepoType::Space,
- "user/space",
- "main",
- "https://huggingface.co",
- );
-
- let args = OpRead::default();
- core.hf_resolve("README.md", BytesRange::default(), &args)
- .await?;
-
- let url = mock_client.get_captured_url();
- assert_eq!(
- url,
- "https://huggingface.co/spaces/user/space/resolve/main/README.md"
- );
-
- Ok(())
- }
-
- #[tokio::test]
- async fn test_hf_resolve_with_range() -> Result<()> {
- let (core, mock_client) = create_test_core(
- RepoType::Model,
- "user/model",
- "main",
- "https://huggingface.co",
- );
-
- let args = OpRead::default();
- let range = BytesRange::new(0, Some(1024));
- core.hf_resolve("large_file.bin", range, &args).await?;
-
- let url = mock_client.get_captured_url();
- let headers = mock_client.get_captured_headers();
- assert_eq!(
- url,
- "https://huggingface.co/user/model/resolve/main/large_file.bin"
- );
- assert_eq!(headers.get(http::header::RANGE).unwrap(), "bytes=0-1023");
-
- Ok(())
- }
-
- #[test]
- fn parse_list_response_test() -> Result<()> {
- let resp = Bytes::from(
- r#"
- [
- {
- "type": "file",
- "oid": "45fa7c3d85ee7dd4139adbc056da25ae136a65f2",
- "size": 69512435,
- "lfs": {
- "oid":
"b43f4c2ea569da1d66ca74e26ca8ea4430dfc29195e97144b2d0b4f3f6cafa1c",
- "size": 69512435,
- "pointerSize": 133
- },
- "path": "maelstrom/lib/maelstrom.jar"
- },
- {
- "type": "directory",
- "oid":
"b43f4c2ea569da1d66ca74e26ca8ea4430dfc29195e97144b2d0b4f3f6cafa1c",
- "size": 69512435,
- "path": "maelstrom/lib/plugins"
- }
- ]
- "#,
- );
-
- let decoded_response =
-
serde_json::from_slice::<Vec<HfStatus>>(&resp).map_err(new_json_deserialize_error)?;
-
- assert_eq!(decoded_response.len(), 2);
-
- let file_entry = HfStatus {
- type_: "file".to_string(),
- oid: "45fa7c3d85ee7dd4139adbc056da25ae136a65f2".to_string(),
- size: 69512435,
- lfs: Some(HfLfs {
- oid:
"b43f4c2ea569da1d66ca74e26ca8ea4430dfc29195e97144b2d0b4f3f6cafa1c".to_string(),
- size: 69512435,
- pointer_size: 133,
- }),
- path: "maelstrom/lib/maelstrom.jar".to_string(),
- last_commit: None,
- security: None,
- };
-
- assert_eq!(decoded_response[0], file_entry);
-
- let dir_entry = HfStatus {
- type_: "directory".to_string(),
- oid:
"b43f4c2ea569da1d66ca74e26ca8ea4430dfc29195e97144b2d0b4f3f6cafa1c".to_string(),
- size: 69512435,
- lfs: None,
- path: "maelstrom/lib/plugins".to_string(),
- last_commit: None,
- security: None,
- };
-
- assert_eq!(decoded_response[1], dir_entry);
-
- Ok(())
- }
-
- #[test]
- fn parse_files_info_test() -> Result<()> {
- let resp = Bytes::from(
- r#"
- [
- {
- "type": "file",
- "oid": "45fa7c3d85ee7dd4139adbc056da25ae136a65f2",
- "size": 69512435,
- "lfs": {
- "oid":
"b43f4c2ea569da1d66ca74e26ca8ea4430dfc29195e97144b2d0b4f3f6cafa1c",
- "size": 69512435,
- "pointerSize": 133
- },
- "path": "maelstrom/lib/maelstrom.jar",
- "lastCommit": {
- "id": "bc1ef030bf3743290d5e190695ab94582e51ae2f",
- "title": "Upload 141 files",
- "date": "2023-11-17T23:50:28.000Z"
- },
- "security": {
- "blobId": "45fa7c3d85ee7dd4139adbc056da25ae136a65f2",
- "name": "maelstrom/lib/maelstrom.jar",
- "safe": true,
- "avScan": {
- "virusFound": false,
- "virusNames": null
- },
- "pickleImportScan": {
- "highestSafetyLevel": "innocuous",
- "imports": [
- {"module": "torch", "name": "FloatStorage",
"safety": "innocuous"},
- {"module": "collections", "name":
"OrderedDict", "safety": "innocuous"},
- {"module": "torch", "name": "LongStorage",
"safety": "innocuous"},
- {"module": "torch._utils", "name":
"_rebuild_tensor_v2", "safety": "innocuous"}
- ]
- }
- }
- }
- ]
- "#,
- );
-
- let decoded_response =
-
serde_json::from_slice::<Vec<HfStatus>>(&resp).map_err(new_json_deserialize_error)?;
-
- assert_eq!(decoded_response.len(), 1);
-
- let file_info = HfStatus {
- type_: "file".to_string(),
- oid: "45fa7c3d85ee7dd4139adbc056da25ae136a65f2".to_string(),
- size: 69512435,
- lfs: Some(HfLfs {
- oid:
"b43f4c2ea569da1d66ca74e26ca8ea4430dfc29195e97144b2d0b4f3f6cafa1c".to_string(),
- size: 69512435,
- pointer_size: 133,
- }),
- path: "maelstrom/lib/maelstrom.jar".to_string(),
- last_commit: Some(HfLastCommit {
- id: "bc1ef030bf3743290d5e190695ab94582e51ae2f".to_string(),
- title: "Upload 141 files".to_string(),
- date: "2023-11-17T23:50:28.000Z".to_string(),
- }),
- security: Some(HfSecurity {
- blob_id:
"45fa7c3d85ee7dd4139adbc056da25ae136a65f2".to_string(),
- safe: true,
- av_scan: Some(HfAvScan {
- virus_found: false,
- virus_names: None,
- }),
- pickle_import_scan: Some(HfPickleImportScan {
- highest_safety_level: "innocuous".to_string(),
- imports: vec![
- HfImport {
- module: "torch".to_string(),
- name: "FloatStorage".to_string(),
- safety: "innocuous".to_string(),
- },
- HfImport {
- module: "collections".to_string(),
- name: "OrderedDict".to_string(),
- safety: "innocuous".to_string(),
- },
- HfImport {
- module: "torch".to_string(),
- name: "LongStorage".to_string(),
- safety: "innocuous".to_string(),
- },
- HfImport {
- module: "torch._utils".to_string(),
- name: "_rebuild_tensor_v2".to_string(),
- safety: "innocuous".to_string(),
- },
- ],
- }),
- }),
- };
-
- assert_eq!(decoded_response[0], file_info);
+#[cfg(feature = "xet")]
+pub(super) fn map_xet_error(err: impl std::error::Error + Send + Sync +
'static) -> Error {
Review Comment:
Updating.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]