The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6451

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
Cleaned up our commits for adding  CEPH FS/RBD disks.
From 401eaf46f7e2415cdcaa2e8167bd03da45c54536 Mon Sep 17 00:00:00 2001
From: anusha-paul <anusha.p...@utexas.edu>
Date: Tue, 5 Nov 2019 22:32:57 -0600
Subject: [PATCH 1/3] api: Add container_disk_ceph API extension

Signed-off-by: anusha-paul <anusha.p...@utexas.edu>
---
 doc/api-extensions.md | 3 +++
 shared/version/api.go | 1 +
 2 files changed, 4 insertions(+)

diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index e33d9ecced..1456c7e876 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -866,3 +866,6 @@ Adds the `security.syscalls.intercept.mount`,
 `security.syscalls.intercept.mount.shift` configuration keys to control whether
 and how the mount system call will be interecepted by LXD and processed with
 elevated permissions.
+
+## container\_disk\_ceph
+This allows for existing a CEPH RDB or FS to be directly connected to a LXD 
container.
\ No newline at end of file
diff --git a/shared/version/api.go b/shared/version/api.go
index f6f64cb741..749fa2deef 100644
--- a/shared/version/api.go
+++ b/shared/version/api.go
@@ -173,6 +173,7 @@ var APIExtensions = []string{
        "backup_compression_algorithm",
        "ceph_data_pool_name",
        "container_syscall_intercept_mount",
+       "container_disk_ceph",
 }
 
 // APIExtensionsCount returns the number of available API extensions.

From 23b16427b35cbb79c87fc0798141165909e7fe36 Mon Sep 17 00:00:00 2001
From: anusha-paul <anusha.p...@utexas.edu>
Date: Tue, 12 Nov 2019 23:17:48 -0600
Subject: [PATCH 2/3] lxd: Add support for CEPH FS backed disks and CEPH RBD
 backed disks

Signed-off-by: anusha-paul <anusha.p...@utexas.edu>
---
 lxd/device/config/devices.go    |   5 +
 lxd/device/device_utils_disk.go | 177 +++++++++++++++++++++++++++++++-
 lxd/device/disk.go              | 119 +++++++++++++++------
 3 files changed, 266 insertions(+), 35 deletions(-)

diff --git a/lxd/device/config/devices.go b/lxd/device/config/devices.go
index fc91d73218..0398a20015 100644
--- a/lxd/device/config/devices.go
+++ b/lxd/device/config/devices.go
@@ -3,6 +3,7 @@ package config
 import (
        "fmt"
        "sort"
+       "strings"
 )
 
 // Device represents a LXD container device
@@ -47,6 +48,10 @@ func (device Device) Validate(rules map[string]func(value 
string) error) error {
                        continue
                }
 
+               if (k == "ceph.cluster_name" || k == "ceph.user_name") && 
(strings.HasPrefix(device["source"], "ceph")) {
+                       continue
+               }
+
                return fmt.Errorf("Invalid device option: %s", k)
        }
 
diff --git a/lxd/device/device_utils_disk.go b/lxd/device/device_utils_disk.go
index c6545869fd..7b7b0c9f32 100644
--- a/lxd/device/device_utils_disk.go
+++ b/lxd/device/device_utils_disk.go
@@ -1,9 +1,16 @@
 package device
 
 import (
+       "bufio"
        "fmt"
+       "github.com/lxc/lxd/lxd/db"
+       driver "github.com/lxc/lxd/lxd/storage"
+       "github.com/lxc/lxd/shared/logger"
+       "os"
+       "os/exec"
        "strings"
-
+       "syscall"
+       "time"
        "golang.org/x/sys/unix"
 
        "github.com/lxc/lxd/lxd/state"
@@ -117,3 +124,171 @@ func DiskMount(srcPath string, dstPath string, readonly 
bool, recursive bool, pr
 
        return nil
 }
+
+func diskCephRbdMap(clusterName string, userName string, poolName string, 
volumeName string) (string, error) {
+       devPath, err := shared.RunCommand(
+               "rbd",
+               "--id", userName,
+               "--cluster", clusterName,
+               "--pool", poolName,
+               "map",
+               fmt.Sprintf("%s_%s", db.StoragePoolVolumeTypeNameCustom, 
volumeName))
+       if err != nil {
+               return "", err
+       }
+
+       idx := strings.Index(devPath, "/dev/rbd")
+       if idx < 0 {
+               return "", fmt.Errorf("Failed to detect mapped device path")
+       }
+
+       devPath = devPath[idx:]
+       return strings.TrimSpace(devPath), nil
+}
+
+func diskCephRbdUnmap(deviceName string) error {
+       unmapImageName := fmt.Sprintf("%s_%s", 
db.StoragePoolVolumeTypeNameCustom, deviceName)
+       busyCount := 0
+again:
+       _, err := shared.RunCommand(
+               "rbd",
+               "unmap",
+               unmapImageName)
+       if err != nil {
+               runError, ok := err.(shared.RunError)
+               if ok {
+                       exitError, ok := runError.Err.(*exec.ExitError)
+                       if ok {
+                               waitStatus := 
exitError.Sys().(syscall.WaitStatus)
+                               if waitStatus.ExitStatus() == 22 {
+                                       // EINVAL (already unmapped)
+                                       return nil
+                               }
+
+                               if waitStatus.ExitStatus() == 16 {
+                                       // EBUSY (currently in use)
+                                       busyCount++
+                                       if busyCount == 10 {
+                                               return err
+                                       }
+
+                                       // Wait a second an try again
+                                       time.Sleep(time.Second)
+                                       goto again
+                               }
+                       }
+               }
+
+               return err
+       }
+       goto again
+}
+
+func cephFsConfig(clusterName string, userName string) ([]string, string, 
error) {
+       // Parse the CEPH configuration
+       cephConf, err := os.Open(fmt.Sprintf("/etc/ceph/%s.conf", clusterName))
+       if err != nil {
+               return nil, "", err
+       }
+
+       cephMon := []string{}
+
+       scan := bufio.NewScanner(cephConf)
+       for scan.Scan() {
+               line := scan.Text()
+               line = strings.TrimSpace(line)
+
+               if line == "" {
+                       continue
+               }
+
+               if strings.HasPrefix(line, "mon_host") {
+                       fields := strings.SplitN(line, "=", 2)
+                       if len(fields) < 2 {
+                               continue
+                       }
+
+                       servers := strings.Split(fields[1], ",")
+                       for _, server := range servers {
+                               cephMon = append(cephMon, 
strings.TrimSpace(server))
+                       }
+                       break
+               }
+       }
+
+       if len(cephMon) == 0 {
+               return nil, "", fmt.Errorf("Couldn't find a CPEH mon")
+       }
+
+       // Parse the CEPH keyring
+       cephKeyring, err := 
os.Open(fmt.Sprintf("/etc/ceph/%v.client.%v.keyring", clusterName, userName))
+       if err != nil {
+               return nil, "", err
+       }
+
+       var cephSecret string
+
+       scan = bufio.NewScanner(cephKeyring)
+       for scan.Scan() {
+               line := scan.Text()
+               line = strings.TrimSpace(line)
+
+               if line == "" {
+                       continue
+               }
+
+               if strings.HasPrefix(line, "key") {
+                       fields := strings.SplitN(line, "=", 2)
+                       if len(fields) < 2 {
+                               continue
+                       }
+
+                       cephSecret = strings.TrimSpace(fields[1])
+                       break
+               }
+       }
+
+       if cephSecret == "" {
+               return nil, "", fmt.Errorf("Couldn't find a keyring entry")
+       }
+
+       return cephMon, cephSecret, nil
+}
+
+func diskCephfsMount(clusterName string, userName string, fsName string, path 
string) error {
+       logger.Debugf("Mounting CEPHFS ")
+       // Parse the namespace / path
+       fields := strings.SplitN(fsName, "/", 2)
+       fsName = fields[0]
+       fsPath := "/"
+       if len(fields) > 1 {
+               fsPath = fields[1]
+       }
+
+       // Get the credentials and host
+       monAddresses, secret, err := cephFsConfig(clusterName, userName)
+       if err != nil {
+               return err
+       }
+
+       // Do the actual mount
+       connected := false
+       for _, monAddress := range monAddresses {
+               uri := fmt.Sprintf("%s:6789:/%s", monAddress, fsPath)
+               err = driver.TryMount(uri, path, "ceph", 0, 
fmt.Sprintf("name=%v,secret=%v,mds_namespace=%v", userName, secret, fsName))
+               if err != nil {
+                       continue
+               }
+
+               connected = true
+               break
+       }
+
+       if !connected {
+               return err
+       }
+
+       logger.Debugf("Mounted CEPHFS")
+
+       return nil
+}
diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index 853598006b..4ea8657050 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -511,6 +511,47 @@ func (d *disk) createDevice() (string, error) {
        isFile := false
        if d.config["pool"] == "" {
                isFile = !shared.IsDir(srcPath) && !IsBlockdev(srcPath)
+               if strings.HasPrefix(d.config["source"], "cephfs") {
+                       //filesystem mount
+                       fields := strings.SplitN(d.config["source"], ":", 2)
+                       fsName := fields[1]
+                       userName := d.config["ceph.user_name"]
+                       clusterName := d.config["ceph.cluster_name"]
+                       path := d.config["path"]
+                       err := diskCephfsMount(clusterName, userName, fsName, 
path)
+                       if err != nil {
+                               msg := fmt.Sprintf("Could not mount Ceph FS: 
%s.", err)
+                               if !isRequired {
+                                       // Will fail the PathExists test below.
+                                       logger.Warn(msg)
+                               } else {
+                                       return "", fmt.Errorf(msg)
+                               }
+                       }
+               } else if strings.HasPrefix(d.config["source"], "ceph") {
+                       // get pool name, volume name, ceph.user_name, and 
ceph.cluster_name from d.config and make call to map
+                       // after call to map, save the src path it returned in 
variable src_path
+                       fields := strings.SplitN(d.config["source"], ":", 2)
+                       fields = strings.SplitN(fields[1], "/", 2)
+                       poolName := fields[0]
+                       volumeName := fields[1]
+                       userName := d.config["ceph.user_name"]
+                       clusterName := d.config["ceph.cluster_name"]
+                       src_path, err := diskCephRbdMap(clusterName, userName, 
poolName, volumeName)
+                       if err != nil {
+                               msg := fmt.Sprintf("Could not mount map Ceph 
RBD: %s.", err)
+                               if !isRequired {
+                                       // Will fail the PathExists test below.
+                                       logger.Warn(msg)
+                               } else {
+                                       return "", fmt.Errorf(msg)
+                               }
+                       }
+                       err = 
d.volatileSet(map[string]string{"ceph_rbd_src_path": src_path})
+                       if err != nil {
+                               return "", err
+                       }
+               }
        } else {
                // Deal with mounting storage volumes created via the storage 
api. Extract the name
                // of the storage volume that we are supposed to attach. We 
assume that the only
@@ -522,41 +563,41 @@ func (d *disk) createDevice() (string, error) {
 
                if filepath.IsAbs(d.config["source"]) {
                        return "", fmt.Errorf("When the \"pool\" property is 
set \"source\" must specify the name of a volume, not a path")
-               }
+               } else {
+                       volumeTypeName := ""
+                       volumeName := filepath.Clean(d.config["source"])
+                       slash := strings.Index(volumeName, "/")
+                       if (slash > 0) && (len(volumeName) > slash) {
+                               // Extract volume name.
+                               volumeName = d.config["source"][(slash + 1):]
+                               // Extract volume type.
+                               volumeTypeName = d.config["source"][:slash]
+                       }
 
-               volumeTypeName := ""
-               volumeName := filepath.Clean(d.config["source"])
-               slash := strings.Index(volumeName, "/")
-               if (slash > 0) && (len(volumeName) > slash) {
-                       // Extract volume name.
-                       volumeName = d.config["source"][(slash + 1):]
-                       // Extract volume type.
-                       volumeTypeName = d.config["source"][:slash]
-               }
-
-               switch volumeTypeName {
-               case db.StoragePoolVolumeTypeNameContainer:
-                       return "", fmt.Errorf("Using container storage volumes 
is not supported")
-               case "":
-                       // We simply received the name of a storage volume.
-                       volumeTypeName = db.StoragePoolVolumeTypeNameCustom
-                       fallthrough
-               case db.StoragePoolVolumeTypeNameCustom:
-                       srcPath = shared.VarPath("storage-pools", 
d.config["pool"], volumeTypeName, volumeName)
-               case db.StoragePoolVolumeTypeNameImage:
-                       return "", fmt.Errorf("Using image storage volumes is 
not supported")
-               default:
-                       return "", fmt.Errorf("Unknown storage type prefix 
\"%s\" found", volumeTypeName)
-               }
-
-               err := StorageVolumeMount(d.state, d.config["pool"], 
volumeName, volumeTypeName, d.instance)
-               if err != nil {
-                       msg := fmt.Sprintf("Could not mount storage volume 
\"%s\" of type \"%s\" on storage pool \"%s\": %s.", volumeName, volumeTypeName, 
d.config["pool"], err)
-                       if !isRequired {
-                               // Will fail the PathExists test below.
-                               logger.Warn(msg)
-                       } else {
-                               return "", fmt.Errorf(msg)
+                       switch volumeTypeName {
+                       case db.StoragePoolVolumeTypeNameContainer:
+                               return "", fmt.Errorf("Using container storage 
volumes is not supported")
+                       case "":
+                               // We simply received the name of a storage 
volume.
+                               volumeTypeName = 
db.StoragePoolVolumeTypeNameCustom
+                               fallthrough
+                       case db.StoragePoolVolumeTypeNameCustom:
+                               srcPath = shared.VarPath("storage-pools", 
d.config["pool"], volumeTypeName, volumeName)
+                       case db.StoragePoolVolumeTypeNameImage:
+                               return "", fmt.Errorf("Using image storage 
volumes is not supported")
+                       default:
+                               return "", fmt.Errorf("Unknown storage type 
prefix \"%s\" found", volumeTypeName)
+                       }
+
+                       err := StorageVolumeMount(d.state, d.config["pool"], 
volumeName, volumeTypeName, d.instance)
+                       if err != nil {
+                               msg := fmt.Sprintf("Could not mount storage 
volume \"%s\" of type \"%s\" on storage pool \"%s\": %s.", volumeName, 
volumeTypeName, d.config["pool"], err)
+                               if !isRequired {
+                                       // Will fail the PathExists test below.
+                                       logger.Warn(msg)
+                               } else {
+                                       return "", fmt.Errorf(msg)
+                               }
                        }
                }
        }
@@ -642,6 +683,16 @@ func (d *disk) postStop() error {
                }
 
        }
+       if d.config["source"] == "ceph" {
+               //unmap rbd storage from path
+               //get the map with v := d.volatileGet
+               //get the actual path with v[cepth_rbd_src_path]
+               v := d.volatileGet()
+               err := diskCephRbdUnmap(v["ceph_rbd_src_path"])
+               if err != nil {
+                       return err
+               }
+       }
 
        devPath := d.getDevicePath(d.name, d.config)
 

From 7ce1c2fb051839bb7660302a5e9812df5c3832ba Mon Sep 17 00:00:00 2001
From: anusha-paul <anusha.p...@utexas.edu>
Date: Wed, 13 Nov 2019 11:52:34 -0600
Subject: [PATCH 3/3] tests: Add test for CEPH backed disks

Signed-off-by: anusha-paul <anusha.p...@utexas.edu>
---
 lxd/device/disk.go                    |  2 +-
 test/suites/container_devices_disk.sh | 42 +++++++++++++++++++++++++++
 2 files changed, 43 insertions(+), 1 deletion(-)

diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index 4ea8657050..04ef66837b 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -683,7 +683,7 @@ func (d *disk) postStop() error {
                }
 
        }
-       if d.config["source"] == "ceph" {
+       if strings.HasPrefix(d.config["source"] == "ceph") {
                //unmap rbd storage from path
                //get the map with v := d.volatileGet
                //get the actual path with v[cepth_rbd_src_path]
diff --git a/test/suites/container_devices_disk.sh 
b/test/suites/container_devices_disk.sh
index 91ef518b0f..695a54a794 100644
--- a/test/suites/container_devices_disk.sh
+++ b/test/suites/container_devices_disk.sh
@@ -5,6 +5,8 @@ test_container_devices_disk() {
   lxc launch testimage foo
 
   test_container_devices_disk_shift
+  test_container_devices_disk_ceph
+  test_container_devices_disk_cephfs
 
   lxc delete -f foo
 }
@@ -59,3 +61,43 @@ test_container_devices_disk_shift() {
   lxc storage volume delete "${POOL}" foo-shift
   lxc stop foo -f
 }
+
+test_container_devices_disk_ceph() {
+  local LXD_BACKEND
+
+  LXD_BACKEND=$(storage_backend "$LXD_DIR")
+  if ! [ "${LXD_BACKEND}" = "ceph" ]; then
+    return
+  fi
+  RBD_POOL_NAME=lxdtest-$(basename "${LXD_DIR}")-disk
+  ceph osd pool create $RBD_POOL_NAME 1
+  rbd create --pool $RBD_POOL_NAME --size 50MB
+  rbd map --pool $RBD_POOL_NAME --name admin
+  RBD_POOL_PATH="/dev/rbd/${RBD_POOL_NAME}"
+  mkfs.ext4 -m0 $RBD_POOL_PATH
+  rbd unmap $RBD_POOL_PATH
+  lxc launch testimage ceph-disk -c security.privileged=true
+  lxc config device add ceph-disk rbd disk 
source=ceph:$RBD_POOL_NAME/my-volume ceph.user_name=admin 
ceph.cluster_name=ceph path=/ceph
+  lxc exec ceph-disk -- stat /ceph/lost+found
+  lxc restart ceph-disk
+  lxc exec cephfs-disk -- stat /cephfs
+  lxc delete -f ceph-disklxc delete -f ceph-disk
+}
+
+test_container_devices_disk_cephfs() {
+  local LXD_BACKEND
+
+  LXD_BACKEND=$(storage_backend "$LXD_DIR")
+  if ! [ "${LXD_BACKEND}" = "ceph" ]|| [ -z "${LXD_CEPH_CEPHFS:-}" ]; then
+    return
+  fi
+#  ceph osd pool create cephfs_data
+#  ceph osd pool create cephfs_metadata
+#  ceph fs new $LXD_CEPH_CEPHFS cephfs_metadata cephfs_data
+  lxc launch testimage ceph-fs -c security.privileged=true
+  lxc config device add ceph-fs fs disk source=cephfs:$LXD_CEPH_CEPHFS/ 
ceph.user_name=admin ceph.cluster_name=ceph path=/cephfs
+  lxc exec ceph-fs -- stat /cephfs
+  lxc restart ceph-fs
+  lxc exec ceph-fs -- stat /cephfs
+  lxc delete -f ceph-fs
+}
\ No newline at end of file
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to