GitHub user bilalinamdar added a comment to the discussion: CloudStack 4.22 – 
VM deployment on LINSTOR primary fails during ROOT volume population (qemu-img 
convert), while volume creation succeeds

Nope i recreated everything now below is fresh install again..



<img width="1920" height="1200" alt="image" 
src="https://github.com/user-attachments/assets/de54c8c3-cd48-4d1a-af03-804337d6d78e";
 />


<img width="1920" height="1200" alt="image" 
src="https://github.com/user-attachments/assets/6d94185e-74dc-4d83-a070-958dce605035";
 />

also did below qcow2 to raw conversion
```

#linstor resource-group spawn cloudstack 
cs-1b41101f-f69f-11f0-a88f-de01a885d20e 5G
#linstor resource-group spawn cloudstack 
cs-837ecc2d-b777-4621-8aae-62abbcb41fe2 5G
#qemu-img convert -p -f qcow2 -O raw 
/export/secondary/template/tmpl/1/3/837ecc2d-b777-4621-8aae-62abbcb41fe2.qcow2 
/dev/drbd/by-res/cs-1b41101f-f69f-11f0-a88f-de01a885d20e/0
#qemu-img convert -p -f qcow2 -O raw 
/export/secondary/template/tmpl/1/3/837ecc2d-b777-4621-8aae-62abbcb41fe2.qcow2 
/dev/drbd/by-res/cs-837ecc2d-b777-4621-8aae-62abbcb41fe2/0


```


here is the entire install script 

```
# 21-Jan-2026   linstor resintall and cloudstack too.

#Remove LINSTOR + CLOUDSTACK + MARIADB
chattr -i /var/lib/linstor
chattr -i /var/lib/mysql
systemctl disable --now linstor-controller
apt purge -y linstor-controller linstor-satellite linstor-client drbd-reactor 
resource-agents-base drbd-dkms chrony qemu-kvm libvirt-daemon-system 
libvirt-clients bridge-utils cloudstack-* mariadb*
rm -rf /etc/drbd-reactor.d
rm -rf /etc/linstor
rm -f /etc/systemd/system/var-lib-linstor.mount
rm -rf /etc/systemd/system/var-lib-mysql.mount
rm -rf /etc/cloudstack
rm -rf /var/lib/mysql
rm -rf /var/lib/cloudstack
rm -rf /var/log/cloudstack
rm -rf /usr/share/cloudstack-common
#rm -rf /export/secondary/*
apt autoremove -y
sleep 10
init 6


systemctl stop avahi-daemon
systemctl disable avahi-daemon
apt purge -y avahi-daemon
apt autoremove

hostnamectl set-hostname cskvm01.poc.local --static
hostnamectl set-hostname cskvm02.poc.local --static
hostnamectl set-hostname cskvm03.poc.local --static

echo "cskvm01.poc.local" > /etc/hostname

echo "cskvm02.poc.local" > /etc/hostname

echo "cskvm03.poc.local" > /etc/hostname

systemctl restart systemd-hostnamed
resolvectl flush-caches



hostname
hostname -f
getent hosts cskvm01.poc.local
getent hosts cskvm01 || echo "short name gone"

#nmcli general hostname
#nmcli general hostname cskvm01.poc.local


init 6

############  LINSTOR INSTALL
add-apt-repository ppa:linbit/linbit-drbd9-stack
apt install -y linstor-controller linstor-satellite linstor-client drbd-reactor 
resource-agents-base drbd-dkms

rmmod drbd && modprobe drbd && cat /proc/drbd

systemctl disable --now linstor-controller #on CSKVM02 AND CSKVM03 only


lvmconfig --type current --mergedconfig --config 'devices { global_filter = [ 
"r|^/dev/drbd|", "r|^/dev/zd|" ] }' > /etc/lvm/lvm.conf.new && mv 
/etc/lvm/lvm.conf.new /etc/lvm/lvm.conf


pvcreate /dev/sdb
vgcreate storage /dev/sdb
lvcreate storage --thin --name storage-thin --extents 100%FREE


#On Main
linstor node create cskvm01.poc.local 10.50.11.101 --node-type Combined
linstor node create cskvm02.poc.local 10.50.11.102 --node-type Satellite
linstor node create cskvm03.poc.local 10.50.11.103 --node-type Satellite
linstor n l

linstor storage-pool create lvmthin cskvm01.poc.local thinpool 
storage/storage-thin
linstor storage-pool create lvmthin cskvm02.poc.local thinpool 
storage/storage-thin
linstor storage-pool create lvmthin cskvm03.poc.local thinpool 
storage/storage-thin

## 3 linstor ha

linstor resource-group create --storage-pool thinpool --place-count 3 ha-grp
linstor volume-group create ha-grp
linstor resource-group drbd-options --auto-promote=no --quorum=majority 
--on-no-quorum=io-error --on-no-data-accessible=io-error ha-grp
linstor resource-group spawn ha-grp linstor_db 300M

linstor rg c cloudstack --storage-pool thinpool
linstor vg c cloudstack
linstor rg opt --on-no-data-accessible suspend-io --on-no-quorum suspend-io 
cloudstack



# on all
cat << EOF > /etc/systemd/system/var-lib-linstor.mount
[Unit]
Description=Filesystem for the LINSTOR controller

[Mount]
# you can use the minor like /dev/drbdX or the udev symlink
What=/dev/drbd/by-res/linstor_db/0
Where=/var/lib/linstor
EOF


# on main
systemctl disable --now linstor-controller
mv /var/lib/linstor{,.orig}
mkdir /var/lib/linstor
chattr +i /var/lib/linstor
drbdadm primary linstor_db
mkfs.ext4 /dev/drbd/by-res/linstor_db/0
systemctl start var-lib-linstor.mount
cp -r /var/lib/linstor.orig/* /var/lib/linstor
systemctl start linstor-controller

# on all
cat << EOF > /etc/drbd-reactor.d/linstor_db.toml
[[promoter]]
[promoter.resources.linstor_db]
start = ["var-lib-linstor.mount", "ocf:heartbeat:IPaddr2 service_ip 
cidr_netmask=22 ip=10.50.11.251", "linstor-controller.service"]
EOF
systemctl reload drbd-reactor.service

cat << EOF > /etc/linstor/linstor-client.conf
[global]
controllers=10.50.11.251
EOF


################### 4. CLOUDSTACK HA
echo "deb [trusted=yes] https://download.cloudstack.org/ubuntu noble 4.22" > 
/etc/apt/sources.list.d/cloudstack.list
apt update

apt install -y cloudstack-management mariadb-server mariadb-client 
nfs-kernel-server
#systemctl disable --now mariadb


cat << EOF > /etc/mysql/conf.d/cloudstack.cnf
[mysqld]
innodb_rollback_on_timeout=1
innodb_lock_wait_timeout=600
max_connections=700
log_bin=mysql-bin
binlog_format=ROW
EOF

sed -i 's/^\s*bind-address\s*=.*/bind-address = 0.0.0.0/' 
/etc/mysql/mariadb.conf.d/50-server.cnf

mysql_secure_installation

echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'10.50.%.%' IDENTIFIED BY 
'nonsense' WITH GRANT OPTION; FLUSH PRIVILEGES;" | mariadb mysql
echo "SET PASSWORD FOR 'root'@'localhost' = PASSWORD('nonsense'); FLUSH 
PRIVILEGES;" | mariadb mysql


systemctl restart mariadb

cloudstack-setup-databases cloud:[email protected] --deploy-as=root:nonsense


chown root:cloud /etc/cloudstack/management/key

systemctl restart cloudstack-management
cloudstack-setup-management
systemctl restart cloudstack-management

## 5 agent prepare
# on all




FOR KVM BELOW SCRIPT WILL DO.

Timezone: Asia/Dubai
Chrony: enabled and running
AppArmor: disabled
KVM / libvirt: install
libvirtd.conf:
libvirtd TCP: listening on port 16509



##################################################################
sudo bash -c '
set -e

GREEN="\033[1;32m"
YELLOW="\033[1;33m"
BLUE="\033[1;34m"
RED="\033[1;31m"
BOLD="\033[1m"
RESET="\033[0m"

echo -e "${BLUE}${BOLD}==> CloudStack KVM bootstrap started${RESET}"

timedatectl set-timezone Asia/Dubai

apt update -qq
apt install -y chrony qemu-kvm libvirt-daemon-system libvirt-clients 
bridge-utils cloudstack-agent

systemctl enable --now chrony
#systemctl stop apparmor || true
#systemctl disable apparmor || true


CONF="/etc/libvirt/libvirtd.conf"

EXPECTED=$(cat <<EOF
listen_tls = 0
listen_tcp = 1
auth_tcp = "none"
tcp_port = "16509"
mdns_adv = 0
EOF
)

LAST5=$(grep -v "^[[:space:]]*$" "$CONF" | tail -n 5)

LIBVIRT_ACTION="already configured"

if [ "$LAST5" != "$EXPECTED" ]; then
  cat >> "$CONF" <<EOF

listen_tls = 0
listen_tcp = 1
auth_tcp = "none"
tcp_port = "16509"
mdns_adv = 0
EOF
  LIBVIRT_ACTION="configured (TCP enabled)"
fi

echo "LIBVIRTD_ARGS=\"--listen\"" > /etc/default/libvirtd

systemctl mask \
  libvirtd.socket \
  libvirtd-ro.socket \
  libvirtd-admin.socket \
  libvirtd-tls.socket \
  libvirtd-tcp.socket

systemctl restart libvirtd

echo
echo -e "${BOLD}${GREEN}========= SUMMARY =========${RESET}"
echo -e "${BOLD}Timezone:${RESET} Asia/Dubai"
echo -e "${BOLD}Chrony:${RESET} enabled and running"
echo -e "${BOLD}AppArmor:${RESET} disabled"
echo -e "${BOLD}KVM / libvirt:${RESET} installed"
echo -e "${BOLD}libvirtd.conf:${RESET} ${YELLOW}${LIBVIRT_ACTION}${RESET}"




if ss -lntp | grep -q ":16509"; then
  echo -e "${BOLD}libvirtd TCP:${RESET} ${GREEN}listening on port 16509${RESET}"
else
  echo -e "${BOLD}libvirtd TCP:${RESET} ${RED}NOT listening on port 
16509${RESET}"
fi

echo -e "${BOLD}${GREEN}===========================${RESET}"
echo
'

# Disable apparmor on libvirtd
ln -s /etc/apparmor.d/usr.sbin.libvirtd /etc/apparmor.d/disable/
ln -s /etc/apparmor.d/usr.lib.libvirt.virt-aa-helper /etc/apparmor.d/disable/
apparmor_parser -R /etc/apparmor.d/usr.sbin.libvirtd
apparmor_parser -R /etc/apparmor.d/usr.lib.libvirt.virt-aa-helper





sudo sed -i \
  -E 's|^#?vnc_listen.*|vnc_listen = "0.0.0.0"|' \
  /etc/libvirt/qemu.conf


grep -E '^vnc_listen' /etc/libvirt/qemu.conf


systemctl restart libvirtd

systemctl restart multipathd
systemctl restart libvirtd


# LVM CONFIG (MOSTLY FOR DRBD LINSTOR)

sudo lvmconfig --type current --mergedconfig \
  --config 'devices { global_filter = [ "r|^/dev/drbd.*|", "r|^/dev/zd.*|", 
"a|.*|" ] }' \
  > /etc/lvm/lvm.conf


sudo systemctl restart lvm2-monitor || true
sudo pvscan --cache

apt install linstor-gui
sudo systemctl daemon-reexec
sudo systemctl daemon-reload



# Extra added as per ur last request
linstor resource-group spawn cloudstack cs-1b41101f-f69f-11f0-a88f-de01a885d20e 
5G
linstor resource-group spawn cloudstack cs-837ecc2d-b777-4621-8aae-62abbcb41fe2 
5G
qemu-img convert -p -f qcow2 -O raw 
/export/secondary/template/tmpl/1/3/837ecc2d-b777-4621-8aae-62abbcb41fe2.qcow2 
/dev/drbd/by-res/cs-1b41101f-f69f-11f0-a88f-de01a885d20e/0
qemu-img convert -p -f qcow2 -O raw 
/export/secondary/template/tmpl/1/3/837ecc2d-b777-4621-8aae-62abbcb41fe2.qcow2 
/dev/drbd/by-res/cs-837ecc2d-b777-4621-8aae-62abbcb41fe2/0


```



also here is some other things

```

root@cskvm01:~# cat /etc/hosts
127.0.0.1   localhost

# ===== Management / Cluster identity (PRIMARY) =====
10.50.10.100  csmgmt01.poc.local
10.50.11.101  cskvm01.poc.local
10.50.11.102  cskvm02.poc.local
10.50.11.103  cskvm03.poc.local

# ===== Storage network (ALIASES ONLY) =====
10.70.10.100  csmgmt01-storage
10.70.10.101  cskvm01-storage
10.70.10.102  cskvm02-storage
10.70.10.103  cskvm03-storage

# ===== Backup network (ALIASES ONLY) =====
10.70.11.100  csmgmt01-backup
10.70.11.101  cskvm01-backup
10.70.11.102  cskvm02-backup
10.70.11.103  cskvm03-backup

root@cskvm01:~#

```





also i was using full fqdn and also made sure it the norm across but as u can 
notice linstor n l will show it first but later when adding storage it changes 
to short name.

```

root@cskvm01:~# linstor node create cskvm01.poc.local 10.50.11.101 --node-type 
Combined
linstor node create cskvm02.poc.local 10.50.11.102 --node-type Satellite
SUCCESS:
Description:
    New node 'cskvm01.poc.local' registered.
Details:
    Node 'cskvm01.poc.local' UUID is: 40544faf-9771-4046-ba83-831b967d6b95
SUCCESS:
Description:
    Node 'cskvm01.poc.local' authenticated
Details:
    Supported storage providers: [diskless, lvm, lvm_thin, file, file_thin, 
remote_spdk, ebs_init, ebs_target]
    Supported resource layers  : [drbd, luks, writecache, cache, bcache, 
storage]
    Unsupported storage providers:
        ZFS: 'cat /sys/module/zfs/version' returned with exit code 1
             IO exception occured when running 'zfs --version': Cannot run 
program "zfs": error=2, No such file or directory
        ZFS_THIN: 'cat /sys/module/zfs/version' returned with exit code 1
                  IO exception occured when running 'zfs --version': Cannot run 
program "zfs": error=2, No such file or directory
        SPDK: IO exception occured when running 'rpc.py spdk_get_version': 
Cannot run program "rpc.py": error=2, No such file or directory
        STORAGE_SPACES: This tool does not exist on the Linux platform.
        STORAGE_SPACES_THIN: This tool does not exist on the Linux platform.

    Unsupported resource layers:
        NVME: IO exception occured when running 'nvme version': Cannot run 
program "nvme": error=2, No such file or directory
SUCCESS:
Description:
    New node 'cskvm02.poc.local' registered.
Details:
    Node 'cskvm02.poc.local' UUID is: 43a6e2f1-d69d-4f54-b96a-9097c68434fc
SUCCESS:
Description:
    Node 'cskvm02.poc.local' authenticated
Details:
    Supported storage providers: [diskless, lvm, lvm_thin, file, file_thin, 
remote_spdk, ebs_init, ebs_target]
    Supported resource layers  : [drbd, luks, writecache, cache, bcache, 
storage]
    Unsupported storage providers:
        ZFS: 'cat /sys/module/zfs/version' returned with exit code 1
             IO exception occured when running 'zfs --version': Cannot run 
program "zfs": error=2, No such file or directory
        ZFS_THIN: 'cat /sys/module/zfs/version' returned with exit code 1
                  IO exception occured when running 'zfs --version': Cannot run 
program "zfs": error=2, No such file or directory
        SPDK: IO exception occured when running 'rpc.py spdk_get_version': 
Cannot run program "rpc.py": error=2, No such file or directory
        STORAGE_SPACES: This tool does not exist on the Linux platform.
        STORAGE_SPACES_THIN: This tool does not exist on the Linux platform.

    Unsupported resource layers:
        NVME: IO exception occured when running 'nvme version': Cannot run 
program "nvme": error=2, No such file or directory
root@cskvm01:~# linstor n l
╭────────────────────────────────────────────────────────────────────╮
┊ Node              ┊ NodeType  ┊ Addresses                 ┊ State  ┊
╞════════════════════════════════════════════════════════════════════╡
┊ cskvm01.poc.local ┊ COMBINED  ┊ 10.50.11.101:3366 (PLAIN) ┊ Online ┊
┊ cskvm02.poc.local ┊ SATELLITE ┊ 10.50.11.102:3366 (PLAIN) ┊ Online ┊
╰────────────────────────────────────────────────────────────────────╯
root@cskvm01:~# linstor node create cskvm03.poc.local 10.50.11.103 --node-type 
Satellite
linstor n l
SUCCESS:
Description:
    New node 'cskvm03.poc.local' registered.
Details:
    Node 'cskvm03.poc.local' UUID is: 39142dd8-8fe5-47a3-9cdf-cc4743b34798
SUCCESS:
Description:
    Node 'cskvm03.poc.local' authenticated
Details:
    Supported storage providers: [diskless, lvm, lvm_thin, file, file_thin, 
remote_spdk, ebs_init, ebs_target]
    Supported resource layers  : [drbd, luks, writecache, cache, bcache, 
storage]
    Unsupported storage providers:
        ZFS: 'cat /sys/module/zfs/version' returned with exit code 1
             IO exception occured when running 'zfs --version': Cannot run 
program "zfs": error=2, No such file or directory
        ZFS_THIN: 'cat /sys/module/zfs/version' returned with exit code 1
                  IO exception occured when running 'zfs --version': Cannot run 
program "zfs": error=2, No such file or directory
        SPDK: IO exception occured when running 'rpc.py spdk_get_version': 
Cannot run program "rpc.py": error=2, No such file or directory
        STORAGE_SPACES: This tool does not exist on the Linux platform.
        STORAGE_SPACES_THIN: This tool does not exist on the Linux platform.

    Unsupported resource layers:
        NVME: IO exception occured when running 'nvme version': Cannot run 
program "nvme": error=2, No such file or directory
╭────────────────────────────────────────────────────────────────────╮
┊ Node              ┊ NodeType  ┊ Addresses                 ┊ State  ┊
╞════════════════════════════════════════════════════════════════════╡
┊ cskvm01.poc.local ┊ COMBINED  ┊ 10.50.11.101:3366 (PLAIN) ┊ Online ┊
┊ cskvm02.poc.local ┊ SATELLITE ┊ 10.50.11.102:3366 (PLAIN) ┊ Online ┊
┊ cskvm03.poc.local ┊ SATELLITE ┊ 10.50.11.103:3366 (PLAIN) ┊ Online ┊
╰────────────────────────────────────────────────────────────────────╯
root@cskvm01:~# linstor storage-pool create lvmthin cskvm01.poc.local thinpool 
storage/storage-thin
linstor storage-pool create lvmthin cskvm02.poc.local thinpool 
storage/storage-thin
linstor storage-pool create lvmthin cskvm03.poc.local thinpool 
storage/storage-thin

SUCCESS:
    Successfully set property key(s): StorDriver/StorPoolName
SUCCESS:
Description:
    New storage pool 'thinpool' on node 'cskvm01.poc.local' registered.
Details:
    Storage pool 'thinpool' on node 'cskvm01.poc.local' UUID is: 
e55fef28-51cf-42a8-868e-54c8a9aafbf3
SUCCESS:
    (cskvm01.poc.local) Changes applied to storage pool 'thinpool' of node 
'cskvm01.poc.local'
SUCCESS:
    Storage pool updated on 'cskvm01.poc.local'
SUCCESS:
    Successfully set property key(s): StorDriver/StorPoolName
SUCCESS:
Description:
    New storage pool 'thinpool' on node 'cskvm02.poc.local' registered.
Details:
    Storage pool 'thinpool' on node 'cskvm02.poc.local' UUID is: 
e72dc60e-e9c8-4011-b897-d2cdee5e091d
SUCCESS:
    (cskvm02.poc.local) Changes applied to storage pool 'thinpool' of node 
'cskvm02.poc.local'
SUCCESS:
    Storage pool updated on 'cskvm02.poc.local'
SUCCESS:
    Successfully set property key(s): StorDriver/StorPoolName
SUCCESS:
Description:
    New storage pool 'thinpool' on node 'cskvm03.poc.local' registered.
Details:
    Storage pool 'thinpool' on node 'cskvm03.poc.local' UUID is: 
37ed7faa-8a2c-4a73-b224-0f286044fe28
SUCCESS:
    (cskvm03.poc.local) Changes applied to storage pool 'thinpool' of node 
'cskvm03.poc.local'
SUCCESS:
    Storage pool updated on 'cskvm03.poc.local'
root@cskvm01:~#
root@cskvm01:~# linstor resource-group create --storage-pool thinpool 
--place-count 3 ha-grp
linstor volume-group create ha-grp
linstor resource-group drbd-options --auto-promote=no --quorum=majority 
--on-no-quorum=io-error --on-no-data-accessible=io-error ha-grp
linstor resource-group spawn ha-grp linstor_db 300M
SUCCESS:
Description:
    New resource group 'ha-grp' created.
Details:
    Resource group 'ha-grp' UUID is: 9366f27d-2505-4a9f-ae20-01121781b66b
SUCCESS:
    New volume group with number '0' of resource group 'ha-grp' created.
SUCCESS:
    Successfully set property key(s): 
DrbdOptions/Resource/auto-promote,DrbdOptions/Resource/on-no-data-accessible,DrbdOptions/Resource/on-no-quorum,DrbdOptions/Resource/quorum
SUCCESS:
Description:
    Resource group 'ha-grp' modified.
Details:
    Resource group 'ha-grp' UUID is: 9366f27d-2505-4a9f-ae20-01121781b66b
SUCCESS:
    Volume definition with number '0' successfully  created in resource 
definition 'linstor_db'.
SUCCESS:
Description:
    New resource definition 'linstor_db' created.
Details:
    Resource definition 'linstor_db' UUID is: 
a516e8d8-e529-479e-b34e-6e29a531dd84
SUCCESS:
    Successfully set property key(s): StorPoolName
SUCCESS:
    Successfully set property key(s): StorPoolName
SUCCESS:
    Successfully set property key(s): StorPoolName
SUCCESS:
Description:
    Resource 'linstor_db' successfully autoplaced on 3 nodes
Details:
    Used nodes (storage pool name): 'cskvm01.poc.local (thinpool)', 
'cskvm02.poc.local (thinpool)', 'cskvm03.poc.local (thinpool)'
INFO:
    Updated linstor_db DRBD auto verify algorithm to 'sha256'
INFO:
    Resource-definition property 'DrbdOptions/Resource/quorum' updated from 
undefined to 'majority' by User
SUCCESS:
    (cskvm03.poc.local) Volume number 0 of resource 'linstor_db' [LVM-Thin] 
created
SUCCESS:
    (cskvm03.poc.local) Resource 'linstor_db' [DRBD] adjusted.
SUCCESS:
    Created resource 'linstor_db' on 'cskvm03.poc.local'
SUCCESS:
    (cskvm02.poc.local) Volume number 0 of resource 'linstor_db' [LVM-Thin] 
created
SUCCESS:
    (cskvm02.poc.local) Resource 'linstor_db' [DRBD] adjusted.
SUCCESS:
    Created resource 'linstor_db' on 'cskvm02.poc.local'
SUCCESS:
    (cskvm01.poc.local) Volume number 0 of resource 'linstor_db' [LVM-Thin] 
created
SUCCESS:
    (cskvm01.poc.local) Resource 'linstor_db' [DRBD] adjusted.
SUCCESS:
    Created resource 'linstor_db' on 'cskvm01.poc.local'
SUCCESS:
Description:
    Resource 'linstor_db' on 'cskvm03.poc.local' ready
Details:
    Resource group: ha-grp
SUCCESS:
Description:
    Resource 'linstor_db' on 'cskvm02.poc.local' ready
Details:
    Resource group: ha-grp
SUCCESS:
Description:
    Resource 'linstor_db' on 'cskvm01.poc.local' ready
Details:
    Resource group: ha-grp
SUCCESS:
    (cskvm01.poc.local) Resource 'linstor_db' [DRBD] adjusted.
SUCCESS:
    (cskvm03.poc.local) Resource 'linstor_db' [DRBD] adjusted.
SUCCESS:
    (cskvm02.poc.local) Resource 'linstor_db' [DRBD] adjusted.
root@cskvm01:~# cat << EOF > /etc/systemd/system/var-lib-linstor.mount
[Unit]
Description=Filesystem for the LINSTOR controller

[Mount]
# you can use the minor like /dev/drbdX or the udev symlink
What=/dev/drbd/by-res/linstor_db/0
Where=/var/lib/linstor
EOF

root@cskvm01:~# systemctl disable --now linstor-controller
mv /var/lib/linstor{,.orig}
mkdir /var/lib/linstor
chattr +i /var/lib/linstor
drbdadm primary linstor_db
mkfs.ext4 /dev/drbd/by-res/linstor_db/0
systemctl start var-lib-linstor.mount
cp -r /var/lib/linstor.orig/* /var/lib/linstor
systemctl start linstor-controller
Removed 
"/etc/systemd/system/multi-user.target.wants/linstor-controller.service".
mv: cannot overwrite '/var/lib/linstor.orig/linstor': Directory not empty
mkdir: cannot create directory ‘/var/lib/linstor’: File exists
mke2fs 1.47.0 (5-Feb-2023)
Discarding device blocks: done
Creating filesystem with 77798 4k blocks and 77808 inodes
Filesystem UUID: 86e8a5ae-b7a7-419f-96bb-9fda7ab9186a
Superblock backups stored on blocks:
        32768

Allocating group tables: done
Writing inode tables: done
Creating journal (4096 blocks): done
Writing superblocks and filesystem accounting information: done

root@cskvm01:~# cat << EOF > /etc/drbd-reactor.d/linstor_db.toml
[[promoter]]
[promoter.resources.linstor_db]
start = ["var-lib-linstor.mount", "ocf:heartbeat:IPaddr2 service_ip 
cidr_netmask=22 ip=10.50.11.251", "linstor-controller.service"]
EOF
systemctl reload drbd-reactor.service

cat << EOF > /etc/linstor/linstor-client.conf
[global]
controllers=10.50.11.251
EOF
root@cskvm01:~# linstor n l
╭──────────────────────────────────────────────────────────╮
┊ Node    ┊ NodeType  ┊ Addresses                 ┊ State  ┊
╞══════════════════════════════════════════════════════════╡
┊ cskvm01 ┊ SATELLITE ┊ 10.50.11.101:3366 (PLAIN) ┊ Online ┊
┊ cskvm02 ┊ SATELLITE ┊ 10.50.11.102:3366 (PLAIN) ┊ Online ┊
┊ cskvm03 ┊ SATELLITE ┊ 10.50.11.103:3366 (PLAIN) ┊ Online ┊
╰──────────────────────────────────────────────────────────╯


```
as you can see even though i added fqdn later it became shortname


logs - 
[cskvm01.logs.tar.gz](https://github.com/user-attachments/files/24770881/cskvm01.logs.tar.gz)

I also tried removing and readding the primary storage you can check in log.

also if i remove and add nfs primary it will start working but linstor primary 
doesn't that is the the issue still not resolved.


GitHub link: 
https://github.com/apache/cloudstack/discussions/12388#discussioncomment-15561437

----
This is an automatically sent email for [email protected].
To unsubscribe, please send an email to: [email protected]

Reply via email to