OS: CentOS 6.5
Version: Ceph 0.79

ceph.conf:
[global]
        auth supported = none


        ;auth_service_required = cephx
        ;auth_client_required = cephx
        ;auth_cluster_required = cephx
        filestore_xattr_use_omap = true


        max open files = 131072
        log file = /var/log/ceph/$name.log
        pid file = /var/run/ceph/$name.pid
        keyring = /etc/ceph/keyring.admin
        
        ;mon_clock_drift_allowed = 1 ;clock skew detected


[mon]
        mon data = /data/mon$id
        keyring = /etc/ceph/keyring.$name
[mds]
        mds data = /data/mds$id
        keyring = /etc/ceph/keyring.$name
[osd]
        osd data = /data/osd$id
        osd journal = /data/osd$id/journal
        osd journal size = 1024
        keyring = /etc/ceph/keyring.$name
        osd mkfs type = xfs    
        osd mount options xfs = rw,noatime
        osd mkfs options xfs = -f
        filestore fiemap = false


In every server, there is an mds, an mon, 11 osd with 4TB space each.
mon address is public IP, and osd address has an public IP and an cluster IP.
If I install ceph in 4 servers, it can start normally.
But if I install ceph in 10 servers, there will be a server with memory using 
out rapidly and crashing.Then what I can do is restart the server.
What's the difference between a cluster with 4 servers and 10 servers?
Can anyone give some recommandation configure?
Thanks!

_______________________________________________
ceph-users mailing list
[email protected]
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

Reply via email to