Greetings, We had a problem on the hd of one of our nodes, and we had to reinstall the server.
Everything returned to normal operation, but we have a problem with the CEPH monitor that was out of the quorum. # pveversion pve-manager/4.4-1/eb2d6f1e (running kernel: 4.4.35-1-pve) # ceph health detail 2017-07-11 13:49:17.115883 7f256c273700 0 -- :/3226337302 >> 10.10.10.12:6789/0 pipe(0x7f256805a550 sd=3 :0 s=1 pgs=0 cs=0 l=1 c=0x7f256805e840).fault HEALTH_WARN 1 mons down, quorum 0,1 0,1 mon.2 (rank 2) addr 10.10.10.12:6789/0 is down (out of quorum) # ceph mon remove mon.2 mon mon.2 does not exist or has already been removed # pveceph createmon monitor address '10.10.10.12:6789' already in use by 'mon.2' # pveceph status { "quorum" : [ 0, 1 ], "osdmap" : { "osdmap" : { "num_osds" : 12, "num_up_osds" : 12, "full" : false, "epoch" : 7409, "num_in_osds" : 12, "num_remapped_pgs" : 0, "nearfull" : false } }, "pgmap" : { "write_bytes_sec" : 2684, "version" : 19003699, "data_bytes" : 4676668142879, "bytes_used" : 9346006102016, "pgs_by_state" : [ { "state_name" : "active+clean", "count" : 511 }, { "state_name" : "active+clean+scrubbing+deep", "count" : 1 } ], "num_pgs" : 512, "read_bytes_sec" : 3020, "op_per_sec" : 1, "bytes_avail" : 14647047196672, "bytes_total" : 23993053298688 }, "monmap" : { "fsid" : "f5d2413a-0c0c-4bfa-b709-74bc07749789", "mons" : [ { "rank" : 0, "addr" : "10.10.10.10:6789/0", "name" : "0" }, { "name" : "1", "rank" : 1, "addr" : "10.10.10.11:6789/0" }, { "addr" : "10.10.10.12:6789/0", "rank" : 2, "name" : "2" } ], "modified" : "2016-10-06 12:48:35.417421", "created" : "2016-10-06 12:48:15.988857", "epoch" : 3 }, "quorum_names" : [ "0", "1" ], "mdsmap" : { "epoch" : 1, "up" : 0, "in" : 0, "by_rank" : [], "max" : 0 }, "election_epoch" : 510, "fsid" : "f5d2413a-0c0c-4bfa-b709-74bc07749789", "health" : { "overall_status" : "HEALTH_WARN", "detail" : [], "summary" : [ { "severity" : "HEALTH_WARN", "summary" : "1 mons down, quorum 0,1 0,1" } ], "timechecks" : { "round" : 120, "round_status" : "finished", "epoch" : 510, "mons" : [ { "skew" : 0, "name" : "0", "latency" : 0, "health" : "HEALTH_OK" }, { "health" : "HEALTH_OK", "name" : "1", "latency" : 0.000667, "skew" : 0.000802 } ] }, "health" : { "health_services" : [ { "mons" : [ { "kb_avail" : 9161916, "kb_total" : 28510348, "name" : "0", "kb_used" : 17877152, "store_stats" : { "bytes_sst" : 0, "bytes_misc" : 28985450, "last_updated" : "0.000000", "bytes_total" : 31036097, "bytes_log" : 2050647 }, "avail_percent" : 32, "last_updated" : "2017-07-11 13:50:12.768057", "health" : "HEALTH_OK" }, { "name" : "1", "kb_total" : 28510348, "kb_avail" : 18154988, "avail_percent" : 63, "last_updated" : "2017-07-11 13:50:39.318205", "store_stats" : { "bytes_misc" : 28857931, "bytes_sst" : 0, "last_updated" : "0.000000", "bytes_total" : 29497513, "bytes_log" : 639582 }, "kb_used" : 8884080, "health" : "HEALTH_OK" } ] } ] } } } Cordialmente, *Robson Rosa Branco <robson.bra...@gmail.com> <>><* [C]: +55 21 99525-6856 [E]: robson.bra...@gmail.com [S]: b_r_a_n_c_o [L]: http://www.linkedin.com/in/robsonbranco [T]: https://twitter.com/robsonrbranco [+]: https://google.com/+RobsonBrancoRosa _______________________________________________ pve-user mailing list pve-user@pve.proxmox.com https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-user