From: Yifan Zhao <[email protected]>
It introduces configuration options for the upcoming experimental S3
support, including configuration parsing and `passwd_file` reading
logic.
Users can specify the following options:
- S3 service endpoint (required);
- S3 credentials file in the format $ak:%sk (optional);
- S3 API calling style (optional);
- S3 API signature version (optional, only V2 is currently supported).
Signed-off-by: Yifan Zhao <[email protected]>
Signed-off-by: Gao Xiang <[email protected]>
---
lib/liberofs_s3.h | 40 ++++++++
mkfs/main.c | 226 +++++++++++++++++++++++++++++++++++++++-------
2 files changed, 234 insertions(+), 32 deletions(-)
create mode 100644 lib/liberofs_s3.h
diff --git a/lib/liberofs_s3.h b/lib/liberofs_s3.h
new file mode 100644
index 0000000..4d3555e
--- /dev/null
+++ b/lib/liberofs_s3.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0 */
+/*
+ * Copyright (C) 2025 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Yifan Zhao <[email protected]>
+ */
+#ifndef __EROFS_S3_H
+#define __EROFS_S3_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum s3erofs_url_style {
+ S3EROFS_URL_STYLE_PATH, // Path style:
https://s3.amazonaws.com/bucket/object
+ S3EROFS_URL_STYLE_VIRTUAL_HOST, // Virtual host style:
https://bucket.s3.amazonaws.com/object
+};
+
+enum s3erofs_signature_version {
+ S3EROFS_SIGNATURE_VERSION_2,
+ S3EROFS_SIGNATURE_VERSION_4,
+};
+
+#define S3_ACCESS_KEY_LEN 256
+#define S3_SECRET_KEY_LEN 256
+
+struct erofs_s3 {
+ const char *endpoint;
+ char access_key[S3_ACCESS_KEY_LEN + 1];
+ char secret_key[S3_SECRET_KEY_LEN + 1];
+
+ enum s3erofs_url_style url_style;
+ enum s3erofs_signature_version sig;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/mkfs/main.c b/mkfs/main.c
index ab27b77..a7d3f31 100644
--- a/mkfs/main.c
+++ b/mkfs/main.c
@@ -31,6 +31,7 @@
#include "../lib/liberofs_private.h"
#include "../lib/liberofs_uuid.h"
#include "../lib/liberofs_metabox.h"
+#include "../lib/liberofs_s3.h"
#include "../lib/compressor.h"
static struct option long_options[] = {
@@ -92,6 +93,9 @@ static struct option long_options[] = {
#endif
{"fsalignblks", required_argument, NULL, 531},
{"vmdk-desc", required_argument, NULL, 532},
+#ifdef S3EROFS_ENABLED
+ {"s3", required_argument, NULL, 533},
+#endif
{0, 0, 0, 0},
};
@@ -174,8 +178,8 @@ static void usage(int argc, char **argv)
" --chunksize=# generate chunk-based files with
#-byte chunks\n"
" --clean=X run full clean build (default) or:\n"
" --incremental=X run incremental build\n"
- " (X = data|rvsp; data=full data,
rvsp=space is allocated\n"
- " and filled with
zeroes)\n"
+ " X = data|rvsp|0 (data: full data,
rvsp: space fallocated\n"
+ " 0: inodes zeroed)\n"
" --compress-hints=X specify a file to configure per-file
compression strategy\n"
" --dsunit=# align all data block addresses to
multiples of #\n"
" --exclude-path=X avoid including file X (X = exact
literal path)\n"
@@ -197,6 +201,12 @@ static void usage(int argc, char **argv)
" --root-xattr-isize=# ensure the inline xattr size of the
root directory is # bytes at least\n"
" --aufs replace aufs special files with
overlayfs metadata\n"
" --sort=<path,none> data sorting order for tarballs as
input (default: path)\n"
+#ifdef S3EROFS_ENABLED
+ " --s3=X generate an image from S3-compatible
object store\n"
+ " [,passwd_file=Y] X=endpoint, Y=s3fs-compatible
password file\n"
+ " [,urlstyle=Z] S3 API calling style (Z = vhost|path)
(default: vhost)\n"
+ " [,sig=<2,4>] S3 API signature version (default: 2)\n"
+#endif
" --tar=X generate a full or index-only image
from a tarball(-ish) source\n"
" (X = f|i|headerball; f=full mode,
i=index mode,\n"
" headerball=file
data is omited in the source stream)\n"
@@ -247,16 +257,23 @@ static struct erofs_tarfile erofstar = {
static bool incremental_mode;
static u8 metabox_algorithmid;
+#ifdef S3EROFS_ENABLED
+static struct erofs_s3 s3cfg;
+#endif
+
enum {
EROFS_MKFS_DATA_IMPORT_DEFAULT,
EROFS_MKFS_DATA_IMPORT_FULLDATA,
EROFS_MKFS_DATA_IMPORT_RVSP,
- EROFS_MKFS_DATA_IMPORT_SPARSE,
+ EROFS_MKFS_DATA_IMPORT_ZEROFILL,
} dataimport_mode;
static enum {
EROFS_MKFS_SOURCE_LOCALDIR,
EROFS_MKFS_SOURCE_TAR,
+#ifdef S3EROFS_ENABLED
+ EROFS_MKFS_SOURCE_S3,
+#endif
EROFS_MKFS_SOURCE_REBUILD,
} source_mode;
@@ -522,6 +539,137 @@ static void mkfs_parse_tar_cfg(char *cfg)
erofstar.index_mode = true;
}
+#ifdef S3EROFS_ENABLED
+static int mkfs_parse_s3_cfg_passwd(const char *filepath, char *ak,
char *sk)
+{
+ struct stat st;
+ int fd, n, ret;
+ char buf[S3_ACCESS_KEY_LEN + S3_SECRET_KEY_LEN + 3];
+ char *colon;
+
+ fd = open(filepath, O_RDONLY);
+ if (fd < 0) {
+ erofs_err("failed to open passwd_file %s", filepath);
+ return -errno;
+ }
+
+ ret = fstat(fd, &st);
+ if (ret) {
+ ret = -errno;
+ goto err;
+ }
+
+ if (!S_ISREG(st.st_mode)) {
+ erofs_err("%s is not a regular file", filepath);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if ((st.st_mode & 077) != 0)
+ erofs_warn("passwd_file %s should not be accessible by group
or others",
+ filepath);
+
+ if (st.st_size > S3_ACCESS_KEY_LEN + S3_SECRET_KEY_LEN + 3) {
+ erofs_err("passwd_file %s is too large (size: %llu)", filepath,
+ st.st_size | 0ULL);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ n = read(fd, buf, st.st_size);
+ if (n < 0) {
+ ret = -errno;
+ goto err;
+ }
+ buf[n] = '\0';
+
+ while (n > 0 && (buf[n - 1] == '\n' || buf[n - 1] == '\r'))
+ buf[--n] = '\0';
+
+ colon = strchr(buf, ':');
+ if (!colon) {
+ ret = -EINVAL;
+ goto err;
+ }
+ *colon = '\0';
+
+ strcpy(ak, buf);
+ strcpy(sk, colon + 1);