Attached log file

-----Original Message-----
From: Adiga, Anantha <anantha.ad...@intel.com> 
Sent: Thursday, August 3, 2023 5:50 PM
To: ceph-users@ceph.io
Subject: [ceph-users] Re: cephfs snapshot mirror peer_bootstrap import hung

Adding additional info:

The cluster A and B both have the same name: ceph and  each has a single 
filesystem with the same name cephfs. Is that the issue ? 


Tried using peer_add command and it is hanging as well:

root@fl31ca104ja0201:/#ls /etc/ceph/
cr_ceph.conf  client.mirror_remote.keying ceph.client.admin.keyring  ceph.conf

(remote cluster)
root@cr21meg16ba0101:/etc/ceph# ls /etc/ceph
ceph.client.admin.keyring  ceph.conf   ceph.mon.keyring
  

root@fl31ca104ja0201:/# ceph fs snapshot mirror peer_add cephfs 
client.mirror_remote@cr_ceph  cephfs 
v2:172.18.55.71:3300,v1:172.18.55.71:6789],[v2:172.18.55.72:3300,v1:172.18.55.72:6789],[v2:172.18.55.73:3300,v1:172.18.55.73:6789
 AQCfwMlkM90pLBAAwXtvpp8j04IvC8tqpAG9bA==



Hi

Could you please  provide guidance on how to diagnose this issue:

In this case, there are two  Ceph clusters: cluster A, 4 nodes and cluster B, 3 
node, in different locations.  Both are already running RGW multi-site,  A is 
master.

Cephfs snapshot mirroring is being configured on the clusters.  Cluster A  is 
the primary, cluster B is the peer. Cephfs snapshot mirroring is being 
configured. The bootstrap import  step on the primary node hangs.

On the target cluster :
---------------------------
"version": "16.2.5",
    "release": "pacific",
    "release_type": "stable"

root@cr21meg16ba0101:/# ceph fs snapshot mirror peer_bootstrap create cephfs 
client.mirror_remote flex2-site
{"token": 
"eyJmc2lkIjogImE2ZjUyNTk4LWU1Y2QtNGEwOC04NDIyLTdiNmZkYjFkNWRiZSIsICJmaWxlc3lzdGVtIjogImNlcGhmcyIsICJ1c2VyIjogImNsaWVudC5taXJyb3JfcmVtb3RlIiwgInNpdGVfbmFtZSI6ICJmbGV4Mi1zaXRlIiwgImtleSI6ICJBUUNmd01sa005MHBMQkFBd1h0dnBwOGowNEl2Qzh0cXBBRzliQT09IiwgIm1vbl9ob3N0IjogIlt2MjoxNzIuMTguNTUuNzE6MzMwMC8wLHYxOjE3Mi4xOC41NS43MTo2Nzg5LzBdIFt2MjoxNzIuMTguNTUuNzM6MzMwMC8wLHYxOjE3Mi4xOC41NS43Mzo2Nzg5LzBdIn0="}
root@cr21meg16ba0101:/var/run/ceph#

On the source cluster:
----------------------------
"version": "17.2.6",
    "release": "quincy",
    "release_type": "stable"

root@fl31ca104ja0201:/# ceph -s
  cluster:
    id:     d0a3b6e0-d2c3-11ed-be05-a7a3a1d7a87e
    health: HEALTH_OK

  services:
    mon:           3 daemons, quorum 
fl31ca104ja0202,fl31ca104ja0203,fl31ca104ja0201 (age 111m)
    mgr:           fl31ca104ja0201.nwpqlh(active, since 11h), standbys: 
fl31ca104ja0203, fl31ca104ja0202
    mds:           1/1 daemons up, 2 standby
    osd:           44 osds: 44 up (since 111m), 44 in (since 4w)
    cephfs-mirror: 1 daemon active (1 hosts)
    rgw:           3 daemons active (3 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   25 pools, 769 pgs
    objects: 614.40k objects, 1.9 TiB
    usage:   2.8 TiB used, 292 TiB / 295 TiB avail
    pgs:     769 active+clean

root@fl31ca104ja0302:/# ceph mgr module enable mirroring module 'mirroring' is 
already enabled root@fl31ca104ja0302:/# ceph fs snapshot mirror peer_bootstrap 
import cephfs 
eyJmc2lkIjogImE2ZjUyNTk4LWU1Y2QtNGEwOC04NDIyLTdiNmZkYjFkNWRiZSIsICJmaWxlc3lzdGVtIjogImNlcGhmcyIsICJ1c2VyIjogImNsaWVudC5taXJyb3JfcmVtb3RlIiwgInNpdGVfbmFtZSI6ICJmbGV4Mi1zaXRlIiwgImtleSI6ICJBUUNmd01sa005MHBMQkFBd1h0dnBwOGowNEl2Qzh0cXBBRzliQT09IiwgIm1vbl9ob3N0IjogIlt2MjoxNzIuMTguNTUuNzE6MzMwMC8wLHYxOjE3Mi4xOC41NS43MTo2Nzg5LzBdIFt2MjoxNzIuMTguNTUuNzM6MzMwMC8wLHYxOjE3Mi4xOC41NS43Mzo2Nzg5LzBdIn0=

root@fl31ca104ja0201:/# ceph fs snapshot mirror daemon status
[{"daemon_id": 5300887, "filesystems": [{"filesystem_id": 1, "name": "cephfs", 
"directory_count": 0, "peers": []}]}]

root@fl31ca104ja0302:/var/run/ceph# ceph --admin-daemon 
/var/run/ceph/ceph-client.cephfs-mirror.fl31ca104ja0302.sypagt.7.94083135960976.asok
 status {
    "metadata": {
        "ceph_sha1": "d7ff0d10654d2280e08f1ab989c7cdf3064446a5",
        "ceph_version": "ceph version 17.2.6 
(d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)",
        "entity_id": "cephfs-mirror.fl31ca104ja0302.sypagt",
        "hostname": "fl31ca104ja0302",
        "pid": "7",
        "root": "/"
    },
    "dentry_count": 0,
    "dentry_pinned_count": 0,
    "id": 5194553,
    "inst": {
        "name": {
            "type": "client",
            "num": 5194553
        },
        "addr": {
            "type": "v1",
            "addr": "10.45.129.5:0",
            "nonce": 2497002034
        }
    },
    "addr": {
        "type": "v1",
        "addr": "10.45.129.5:0",
        "nonce": 2497002034
    },
    "inst_str": "client.5194553 10.45.129.5:0/2497002034",
    "addr_str": "10.45.129.5:0/2497002034",
    "inode_count": 1,
    "mds_epoch": 118,
    "osd_epoch": 6266,
    "osd_epoch_barrier": 0,
    "blocklisted": false,
    "fs_name": "cephfs"
}

root@fl31ca104ja0302:/home/general# docker logs 
ceph-d0a3b6e0-d2c3-11ed-be05-a7a3a1d7a87e-cephfs-mirror-fl31ca104ja0302-sypagt 
--tail  10 debug 2023-08-03T05:24:27.413+0000 7f8eb6fc0280  0 ceph version 
17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable), process 
cephfs-mirror, pid 7 debug 2023-08-03T05:24:27.413+0000 7f8eb6fc0280  0 
pidfile_write: ignore empty --pid-file debug 2023-08-03T05:24:27.445+0000 
7f8eb6fc0280  1 mgrc service_daemon_register cephfs-mirror.5184622 metadata 
{arch=x86_64,ceph_release=quincy,ceph_version=ceph version 17.2.6 
(d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy 
(stable),ceph_version_short=17.2.6,container_hostname=fl31ca104ja0302,container_image=quay.io/ceph/ceph@sha256:af79fedafc42237b7612fe2d18a9c64ca62a0b38ab362e614ad671efa4a0547e,cpu=Intel(R)
 Xeon(R) Gold 6252 CPU @ 2.10GHz,distro=centos,distro_description=CentOS Stream 
8,distro_version=8,hostname=fl31ca104ja0302,id=fl31ca104ja0302.sypagt,instance_id=5184622,kernel_description=#82-Ub
 untu SMP Tue Jun 6 23:10:23 UTC 
2023,kernel_version=5.15.0-75-generic,mem_swap_kb=8388604,mem_total_kb=527946928,os=Linux}
debug 2023-08-03T05:27:10.419+0000 7f8ea1b2c700  0 client.5194553 
ms_handle_reset on v2:10.45.128.141:3300/0 debug 2023-08-03T05:50:10.917+0000 
7f8ea1b2c700  0 client.5194553 ms_handle_reset on v2:10.45.128.139:3300/0

Thank you,
Anantha
_______________________________________________
ceph-users mailing list -- ceph-users@ceph.io To unsubscribe send an email to 
ceph-users-le...@ceph.io _______________________________________________
ceph-users mailing list -- ceph-users@ceph.io To unsubscribe send an email to 
ceph-users-le...@ceph.io _______________________________________________
ceph-users mailing list -- ceph-users@ceph.io To unsubscribe send an email to 
ceph-users-le...@ceph.io
_______________________________________________
ceph-users mailing list -- ceph-users@ceph.io
To unsubscribe send an email to ceph-users-le...@ceph.io

Reply via email to