Allow to create a new CephFS instance and allow to list them. As deletion requires coordination between the active MDS and all standby MDS next in line this needs a bit more work. One could mark the MDS cluster down and stop the active, that should work but as destroying is quite a sensible operation, in production not often needed I deemed it better to document this only, and leaving API endpoints for this to the future.
For index/list I slightly transform the result of an RADOS `fs ls` monitor command, this would allow relative easy display of a CephFS and it's backing metadata and data pools in a GUI. While for now it's not enabled by default and marked as experimental, this API is designed to host multiple CephFS instances - we may not need this at all, but I did not want to limit us early. And anybody liking to experiment can use it after the respective ceph.conf settings. When encountering errors try to rollback. As we verified at the beginning that we did not reused pools, destroy the ones which we created. Signed-off-by: Thomas Lamprecht <t.lampre...@proxmox.com> Co-authored-by: Alwin Antreich <a.antre...@proxmox.com> --- PVE/API2/Ceph.pm | 7 ++ PVE/API2/Ceph/FS.pm | 184 +++++++++++++++++++++++++++++++++++++++++ PVE/API2/Ceph/Makefile | 1 + PVE/CLI/pveceph.pm | 2 + PVE/CephTools.pm | 12 +++ 5 files changed, 206 insertions(+) create mode 100644 PVE/API2/Ceph/FS.pm diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm index a8665dc1..85a13c52 100644 --- a/PVE/API2/Ceph.pm +++ b/PVE/API2/Ceph.pm @@ -542,6 +542,7 @@ use PVE::Storage; use PVE::RESTHandler; use PVE::RPCEnvironment; use PVE::JSONSchema qw(get_standard_option); +use PVE::API2::Ceph::FS; use PVE::API2::Ceph::MDS; use PVE::RADOS; use PVE::CephTools; @@ -561,6 +562,11 @@ __PACKAGE__->register_method ({ path => 'mds', }); +__PACKAGE__->register_method ({ + subclass => "PVE::API2::Ceph::FS", + path => 'fs', +}); + __PACKAGE__->register_method ({ name => 'index', path => '', @@ -592,6 +598,7 @@ __PACKAGE__->register_method ({ { name => 'mon' }, { name => 'osd' }, { name => 'pools' }, + { name => 'fs' }, { name => 'mds' }, { name => 'stop' }, { name => 'start' }, diff --git a/PVE/API2/Ceph/FS.pm b/PVE/API2/Ceph/FS.pm new file mode 100644 index 00000000..588eb49e --- /dev/null +++ b/PVE/API2/Ceph/FS.pm @@ -0,0 +1,184 @@ +package PVE::API2::Ceph::FS; + +use strict; +use warnings; + +use PVE::CephTools; +use PVE::RPCEnvironment; +use PVE::JSONSchema qw(get_standard_option); +use PVE::RADOS; +use PVE::RESTHandler; + +use base qw(PVE::RESTHandler); + +__PACKAGE__->register_method ({ + name => 'index', + path => '', + method => 'GET', + description => "Directory index.", + permissions => { + check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1], + }, + protected => 1, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + }, + }, + returns => { + type => 'array', + items => { + type => "object", + properties => { + name => { + description => "The ceph filesystem name.", + type => 'string', + }, + metadata_pool => { + description => "The name of the metadata pool.", + type => 'string', + }, + data_pool => { + description => "The name of the data pool.", + type => 'string', + }, + }, + }, + links => [ { rel => 'child', href => "{name}" } ], + }, + code => sub { + my ($param) = @_; + + my $rados = PVE::RADOS->new(); + + my $cephfs_list = $rados->mon_command({ prefix => "fs ls" }); + # we get something like: + #{ + # 'metadata_pool_id' => 2, + # 'data_pool_ids' => [ 1 ], + # 'metadata_pool' => 'cephfs_metadata', + # 'data_pools' => [ 'cephfs_data' ], + # 'name' => 'cephfs', + #} + # pass name for our index and + + my $res = []; + map { + push @$res, { + name => $_->{name}, + metadata_pool => $_->{metadata_pool}, + data_pool => $_->{data_pools}->[0], + } + } @$cephfs_list; + + return $res; + } +}); + +__PACKAGE__->register_method ({ + name => 'createfs', + path => '{name}', + method => 'POST', + description => "Create a Ceph filesystem", + proxyto => 'node', + protected => 1, + permissions => { + check => ['perm', '/', [ 'Sys.Modify' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + name => { + description => "The ceph filesystem name.", + type => 'string', + default => 'cephfs', + optional => 1, + }, + pg_num => { + description => "Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.", + type => 'integer', + default => 64, + optional => 1, + minimum => 8, + maximum => 32768, + }, + }, + }, + returns => { type => 'string' }, + code => sub { + my ($param) = @_; + + PVE::CephTools::check_ceph_inited(); + + my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path'); + die "Ceph is not fully configured - missing '$pve_ckeyring_path'\n" + if ! -f $pve_ckeyring_path; + + my $fs_name = $param->{name} // 'cephfs'; + my $pg_num = $param->{pg_num} // 64; + + my $pool_data = "${fs_name}_data"; + my $pool_metadata = "${fs_name}_metadata"; + + my $ls_pools = PVE::CephTools::ls_pools(); + my $existing_pools = { map { $_->{poolname} => 1 } @$ls_pools }; + + die "ceph pools '$pool_data' and/or '$pool_metadata' already exist\n" + if $existing_pools->{$pool_data} || $existing_pools->{$pool_metadata}; + + my $worker = sub { + my $rados = PVE::RADOS->new(); + + my $pool_param = { + application => 'cephfs', + pg_num => $pg_num, + }; + + my @created_pools = (); + eval { + print "creating data pool '$pool_data'...\n"; + PVE::CephTools::create_pool($pool_data, $pool_param, $rados); + push @created_pools, $pool_data; + + print "creating metadata pool '$pool_metadata'...\n"; + $pool_param->{pg_num} = $pg_num >= 32 ? $pg_num / 4 : 8; + PVE::CephTools::create_pool($pool_metadata, $pool_param, $rados); + push @created_pools, $pool_metadata; + + print "configuring new CephFS '$fs_name'\n"; + $rados->mon_command({ + prefix => "fs new", + fs_name => $fs_name, + metadata => $pool_metadata, + data => $pool_data, + format => 'plain', + }); + }; + if (my $err = $@) { + $@ = undef; + + if (@created_pools > 0) { + warn "Encountered error after creating at least one pool\n"; + # our old connection is very likely broken now, recreate + $rados = PVE::RADOS->new(); + foreach my $pool (@created_pools) { + warn "cleaning up left over pool '$pool'\n"; + eval { PVE::CephTools::destroy_pool($pool, $rados) }; + warn "$@\n" if $@; + } + } + + die "$err\n"; + } + }; + + my $rpcenv = PVE::RPCEnvironment::get(); + my $user = $rpcenv->get_user(); + + return $rpcenv->fork_worker('cephfscreate', $fs_name, $user, $worker); + } +}); + +1; diff --git a/PVE/API2/Ceph/Makefile b/PVE/API2/Ceph/Makefile index be4d740c..59fcda71 100644 --- a/PVE/API2/Ceph/Makefile +++ b/PVE/API2/Ceph/Makefile @@ -1,6 +1,7 @@ include ../../../defines.mk PERLSOURCE= \ + FS.pm \ MDS.pm all: diff --git a/PVE/CLI/pveceph.pm b/PVE/CLI/pveceph.pm index 90878d9e..57097b13 100755 --- a/PVE/CLI/pveceph.pm +++ b/PVE/CLI/pveceph.pm @@ -19,6 +19,7 @@ use PVE::Tools qw(run_command); use PVE::JSONSchema qw(get_standard_option); use PVE::CephTools; use PVE::API2::Ceph; +use PVE::API2::Ceph::FS; use PVE::API2::Ceph::MDS; use PVE::CLIHandler; @@ -170,6 +171,7 @@ our $cmddef = { }], createpool => [ 'PVE::API2::Ceph', 'createpool', ['name'], { node => $nodename }], destroypool => [ 'PVE::API2::Ceph', 'destroypool', ['name'], { node => $nodename } ], + createfs => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }], createosd => [ 'PVE::API2::CephOSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit], destroyosd => [ 'PVE::API2::CephOSD', 'destroyosd', ['osdid'], { node => $nodename }, $upid_exit], createmon => [ 'PVE::API2::Ceph', 'createmon', [], { node => $nodename }, $upid_exit], diff --git a/PVE/CephTools.pm b/PVE/CephTools.pm index d0e774ed..de428b79 100644 --- a/PVE/CephTools.pm +++ b/PVE/CephTools.pm @@ -239,6 +239,18 @@ sub create_pool { } +sub ls_pools { + my ($pool, $rados) = @_; + + if (!defined($rados)) { + $rados = PVE::RADOS->new(); + } + + my $res = $rados->mon_command({ prefix => "osd lspools" }); + + return $res; +} + sub destroy_pool { my ($pool, $rados) = @_; -- 2.19.1 _______________________________________________ pve-devel mailing list pve-devel@pve.proxmox.com https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel