This patch enhances the build scriots and run.py to allow build the
images as described by #1200:

1. Run OSv from a single disk with two partitions: ROFS + ZFS (on /dev/vblk0.2)
  ./scripts/build image=tests,zfs,zfs-tools fs=rofs_with_zfs fs_size_mb=5000
  ./scripts/run.py --execute='--mount-fs=zfs,/dev/vblk0.2,/data 
/tests/misc-zfs-io.so --random --file-path /data/file'

2. Run OSv with 2 disks: 1st one with ROFS and second one with ZFS 
(/dev/vblk1.1):
  ./scripts/build image=tests,zfs,zfs-tools fs=rofs fs_size_mb=5000 
--create-zfs-disk
  ./scripts/run.py --execute='--mount-fs=zfs,/dev/vblk1.1,/data 
/tests/misc-zfs-io.so --random --file-path /data/file' --second-disk-image 
build/release/zfs_disk.img

Fixes #1200

Signed-off-by: Waldemar Kozaczuk <jwkozac...@gmail.com>
---
 scripts/build              | 59 ++++++++++++++++++++++++++++++++++----
 scripts/export_manifest.py |  7 ++---
 scripts/run.py             | 12 ++++++++
 tools/mkfs/mkfs.cc         |  8 +++---
 4 files changed, 73 insertions(+), 13 deletions(-)

diff --git a/scripts/build b/scripts/build
index b31b8172..64a55516 100755
--- a/scripts/build
+++ b/scripts/build
@@ -26,7 +26,8 @@ usage() {
          mode=release|debug            Specify the build mode; default is 
release
          export=none|selected|all      If 'selected' or 'all' export the app 
files to <export_dir>
          export_dir=<dir>              The directory to export the files to; 
default is build/export
-         fs=zfs|rofs|ramfs|virtiofs    Specify the filesystem of the image 
partition
+         fs=zfs|rofs|rofs_with_zfs|    Specify the filesystem of the image 
partition
+            ramfs|virtiofs
          fs_size=N                     Specify the size of the image in bytes
          fs_size_mb=N                  Specify the size of the image in MiB
          app_local_exec_tls_size=N     Specify the size of app local TLS in 
bytes; the default is 64
@@ -36,6 +37,7 @@ usage() {
          -j<N>                         Set number of parallel jobs for make
          --append-manifest             Append build/<mode>/append.manifest to 
usr.manifest
          --create-disk                 Instead of usr.img create kernel-less 
disk.img
+         --create-zfs-disk             Create extra empty disk with ZFS 
filesystem
 
        Examples:
          ./scripts/build -j4 fs=rofs image=native-example   # Create image 
with native-example app
@@ -77,7 +79,7 @@ do
        case $i in
        --help|-h)
                usage ;;
-       image=*|modules=*|fs=*|usrskel=*|check|--append-manifest|--create-disk) 
;;
+       
image=*|modules=*|fs=*|usrskel=*|check|--append-manifest|--create-disk|--create-zfs-disk)
 ;;
        clean)
                stage1_args=clean ;;
        arch=*)
@@ -159,6 +161,8 @@ do
                vars[append_manifest]="true";;
        --create-disk)
                vars[create_disk]="true";;
+       --create-zfs-disk)
+               vars[create_zfs_disk]="true";;
        esac
 done
 
@@ -195,7 +199,7 @@ usrskel_arg=
 case $fs_type in
 zfs)
        ;; # Nothing to change here. This is our default behavior
-rofs|virtiofs)
+rofs|rofs_with_zfs|virtiofs)
        # Both are read-only (in OSv) and require nothing extra on bootfs to 
work
        manifest=bootfs_empty.manifest.skel
        usrskel_arg="--usrskel usr_rofs.manifest.skel";;
@@ -293,6 +297,7 @@ cd $OUT
 
 if [ "$export" != "none" ]; then
        export_dir=${vars[export_dir]-$SRC/build/export}
+       rm -rf "$export_dir"
        "$SRC"/scripts/export_manifest.py -e "$export_dir" -m usr.manifest -D 
libgcc_s_dir="$libgcc_s_dir"
 fi
 
@@ -314,6 +319,7 @@ create_zfs_disk() {
        qemu-img convert -f raw -O qcow2 $raw_disk.raw $qcow2_disk.img
        qemu-img resize $qcow2_disk.img ${image_size}b >/dev/null 2>&1
        "$SRC"/scripts/upload_manifest.py --arch=$arch -o $qcow2_disk.img -m 
usr.manifest -D libgcc_s_dir="$libgcc_s_dir"
+       #"$SRC"/scripts/zfs-image-on-host.sh build $qcow2_disk.img 
$partition_offset osv zfs
 }
 
 create_rofs_disk() {
@@ -324,6 +330,22 @@ create_rofs_disk() {
        qemu-img convert -f raw -O qcow2 $raw_disk.raw $qcow2_disk.img
 }
 
+create_zfs_filesystem() {
+       local image_path=$1
+       local device_path=$2
+       local qemu_arch=$arch
+       if [[ "$qemu_arch" == 'aarch64' ]]; then
+               console=''
+               zfs_builder_name='zfs_builder.img'
+       else
+               qemu_arch='x86_64'
+               console='--console=serial'
+               zfs_builder_name='zfs_builder-stripped.elf'
+       fi
+       "$SRC"/scripts/run.py -k --kernel-path $zfs_builder_name 
--arch=$qemu_arch --vnc none -m 512 -c1 -i ${image_path} \
+               --block-device-cache unsafe -s -e "${console} --norandom 
--nomount --noinit --preload-zfs-library /tools/mkfs.so ${device_path}; /zfs.so 
set compression=off osv"
+}
+
 if [[ "$arch" == 'aarch64' ]]; then
        export STRIP=${CROSS_PREFIX:-aarch64-linux-gnu-}strip
 fi
@@ -332,13 +354,27 @@ case $fs_type in
 zfs)
        partition_size=$((fs_size - partition_offset))
        image_size=$fs_size
-        create_zfs_disk ;;
+       create_zfs_disk ;;
 rofs)
        rm -rf rofs.img
        "$SRC"/scripts/gen-rofs-img.py -o rofs.img -m usr.manifest -D 
libgcc_s_dir="$libgcc_s_dir"
        partition_size=`stat --printf %s rofs.img`
        image_size=$((partition_offset + partition_size))
-        create_rofs_disk ;;
+       create_rofs_disk ;;
+rofs_with_zfs)
+       # Create disk with rofs image on it 1st partition
+       rm -rf rofs.img
+       "$SRC"/scripts/gen-rofs-img.py -o rofs.img -m usr.manifest -D 
libgcc_s_dir="$libgcc_s_dir"
+       partition_size=`stat --printf %s rofs.img`
+       image_size=$((fs_size+partition_size))
+       create_rofs_disk
+       # Resize the disk to fit ZFS on it after rofs
+       qemu-img resize $qcow2_disk.img ${image_size}b >/dev/null 2>&1
+       # Create filesystem on ZFS partition
+       zfs_partition_offset=$((partition_offset + partition_size))
+       zfs_partition_size=$((image_size-zfs_partition_offset))
+       "$SRC"/scripts/imgedit.py setpartition "$qcow2_disk.img" 3 
$zfs_partition_offset $zfs_partition_size
+       create_zfs_filesystem $qcow2_disk.img "/dev/vblk0.2";;
 ramfs|virtiofs)
        # No need to create extra fs like above: ramfs is already created (as 
the
        # bootfs) and virtio-fs is specified with virtiofsd at run time
@@ -352,6 +388,19 @@ if [[ -f "$OSV_BUILD_PATH/usr.img" ]]; then
        "$SRC"/scripts/imgedit.py setargs usr.img `cat cmdline`
 fi
 
+if [[ ${vars[create_zfs_disk]} == "true" ]]; then
+       partition_offset=512
+       partition_size=$((fs_size - partition_offset))
+       image_size=$fs_size
+       raw_disk=zfs_disk
+       qcow2_disk=zfs_disk
+       cp "$SRC"/scripts/disk.bin $raw_disk.raw
+       "$SRC"/scripts/imgedit.py setpartition "-f raw ${raw_disk}.raw" 2 
$partition_offset $partition_size
+       qemu-img convert -f raw -O qcow2 $raw_disk.raw $qcow2_disk.img
+       qemu-img resize $qcow2_disk.img ${image_size}b >/dev/null 2>&1
+       create_zfs_filesystem $qcow2_disk.img "/dev/vblk0.1"
+fi
+
 # Support "build check"
 for i
 do
diff --git a/scripts/export_manifest.py b/scripts/export_manifest.py
index 67739c0e..48daebf6 100755
--- a/scripts/export_manifest.py
+++ b/scripts/export_manifest.py
@@ -10,10 +10,9 @@ def export_package(manifest, dest):
     abs_dest = os.path.abspath(dest)
     print("[INFO] exporting into directory %s" % abs_dest)
 
-    # Remove and create the base directory where we are going to put all 
package files.
-    if os.path.exists(abs_dest):
-        shutil.rmtree(abs_dest)
-    os.makedirs(abs_dest)
+    # Create the base directory where we are going to put all package files.
+    if not os.path.exists(abs_dest):
+        os.makedirs(abs_dest)
 
     files = list(expand(manifest))
     files = [(x, unsymlink(y % defines)) for (x, y) in files]
diff --git a/scripts/run.py b/scripts/run.py
index 6131ad27..f2b4c00d 100755
--- a/scripts/run.py
+++ b/scripts/run.py
@@ -177,6 +177,11 @@ def start_osv_qemu(options):
         "-device", "virtio-blk-pci,id=blk1,bootindex=1,drive=hd1,scsi=off%s" % 
options.virtio_device_suffix,
         "-drive", "file=%s,if=none,id=hd1" % (options.cloud_init_image)]
 
+    if options.second_disk_image:
+        args += [
+        "-device", "virtio-blk-pci,id=blk1,drive=hd1,scsi=off%s" % 
options.virtio_device_suffix,
+        "-drive", "file=%s,if=none,id=hd1" % (options.second_disk_image)]
+
     if options.virtio_fs_tag:
         dax = (",cache-size=%s" % options.virtio_fs_dax) if 
options.virtio_fs_dax else ""
         args += [
@@ -589,6 +594,8 @@ if __name__ == "__main__":
                         help="specify gdb port number")
     parser.add_argument("--script", action="store",
                         help="XEN define configuration script for vif")
+    parser.add_argument("--second-disk-image", action="store",
+                        help="Path to the optional second disk image that 
should be attached to the instance")
     parser.add_argument("--cloud-init-image", action="store",
                         help="Path to the optional cloud-init image that 
should be attached to the instance")
     parser.add_argument("-k", "--kernel", action="store_true",
@@ -630,6 +637,11 @@ if __name__ == "__main__":
         if not os.path.exists(cmdargs.cloud_init_image):
             raise Exception('Cloud-init image %s does not exist.' % 
cmdargs.cloud_init_image)
 
+    if cmdargs.second_disk_image:
+        cmdargs.second_disk_image = os.path.abspath(cmdargs.second_disk_image)
+        if not os.path.exists(cmdargs.second_disk_image):
+            raise Exception('Second disk image %s does not exist.' % 
cmdargs.second_disk_image)
+
     if cmdargs.virtio_fs_dir and not os.path.exists(cmdargs.virtio_fs_dir):
         raise Exception('Directory %s to be exposed through virtio-fs does not 
exist.' % cmdargs.virtio_fs_dir)
 
diff --git a/tools/mkfs/mkfs.cc b/tools/mkfs/mkfs.cc
index 2b6d4648..4808e6ea 100644
--- a/tools/mkfs/mkfs.cc
+++ b/tools/mkfs/mkfs.cc
@@ -51,7 +51,7 @@ static void get_blk_devices(vector<string> &zpool_args)
 }
 
 extern "C" void zfsdev_init();
-static void mkfs(void)
+static void mkfs(int ac, char** av)
 {
     // Create zfs device, then /etc/mnttab which is required by libzfs
     zfsdev_init();
@@ -62,8 +62,8 @@ static void mkfs(void)
     assert(fd != -1);
     close(fd);
 
-    vector<string> zpool_args = {"zpool", "create", "-f", "-R", "/zfs", "osv",
-        "/dev/vblk0.1"};
+    const char *dev_name = ac == 2 ? av[1] : "/dev/vblk0.1";
+    vector<string> zpool_args = {"zpool", "create", "-f", "-R", "/zfs", "osv", 
dev_name};
 
     get_blk_devices(zpool_args);
 
@@ -86,7 +86,7 @@ __attribute__((__visibility__("default")))
 int main(int ac, char** av)
 {
     cout << "Running mkfs...\n";
-    mkfs();
+    mkfs(ac, av);
     sync();
     return 0;
 }
-- 
2.35.1

-- 
You received this message because you are subscribed to the Google Groups "OSv 
Development" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to osv-dev+unsubscr...@googlegroups.com.
To view this discussion on the web visit 
https://groups.google.com/d/msgid/osv-dev/20220717053500.96038-1-jwkozaczuk%40gmail.com.

Reply via email to