Hi:

Could you check you iptable/firewall rules?

you need to open port 6789 for ceph mons and 6800:7100 for ceph osds

thanks!!
vicente



> Hello.
> I am completely new in ceph and need a help to set up my very first
> installation of ceph.
> I am fighting two days with ceph. Mkceph and ceph-deploy were used first,
> but with the same result: "seph -s" hangs on execute.
> Now I am trying to set up it from scratch with "ceph-mon --mkfs". Dug into
> it i found out that Paxos cannot meet consensus:
> [root@centos-01 ~]# ceph daemon mon.0 mon_status
> { "name": "0",
>   "rank": -1,
>   "state": "probing",
>   "election_epoch": 0,
>   "quorum": [],
>   "outside_quorum": [],
>   "extra_probe_peers": [
>         "192.168.176.1:6789\/0"],
>   "sync_provider": [],
>   "monmap": { "epoch": 0,
>       "fsid": "b5ee18d0-fd89-4794-b37e-2be80ac04bde",
>       "modified": "2015-03-08 22:30:26.906993",
>       "created": "2015-03-08 22:30:26.906993",
>       "mons": [
>             { "rank": 0,
>               "name": "centos-01",
>               "addr": "0.0.0.0:0\/1"}]}}
> As you can see "addr" is zeroed, but "extra_probe_peers" show needed ip. I
> am not sure I am in right direction (as I haven't seen working ceph before
> :) ). but the output looks weird for me.
> What have been done:
> # ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon.
> --cap mon 'allow *'
> # ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring
> --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow
> *' --cap mds 'allow'
> # ceph-authtool /tmp/ceph.mon.keyring --import-keyring
> /etc/ceph/ceph.client.admin.keyring
> # monmaptool --create --add mon.0 192.168.176.1:6789 --fsid
> b5ee18d0-fd89-4794-b37e-2be80ac04bde /tmp/monmap.2  ##### Tried "-i 0" with
> the same result
> # ceph-mon --mkfs -i 0 --monmap /tmp/monmap.2 --keyring
> /tmp/ceph.mon.keyring
> # cp /tmp/ceph.mon.keyring /ceph/mon/mon.0/keyring    ### another weird
> place, "service ceph start mon" complained on absent keyring
> my ceph.conf:
> [global]
>     fsid                       = b5ee18d0-fd89-4794-b37e-2be80ac04bde
>     public network             = 192.168.176.0/24
>     cluster network            = 192.168.176.0/24
>     pid file                   = /var/run/ceph/$name.pid
>     max open files             = 131072
>     auth cluster required      = cephx
>     auth service required      = cephx
>     auth client required       = cephx
>     cephx require signatures   = true    ; everywhere possible
>     keyring                  = /etc/ceph/$cluster.$name.keyring
>     osd pool default size      = 2
>     osd pool default min size  = 1
>     osd pool default pg num    = 100
>     osd pool default pgp num   = 100
>     osd pool default crush rule = 0
>     log file                   = /var/log/ceph/$cluster-$name.log
>     ms bind ipv6               = false
>     filestore max sync interval = 5
>     filestore xattr use omap    = true
> [mon]
>     mon initial members        = centos-01
>     mon host                   = centos-01
>     mon addr                   = 192.168.176.1
>     mon data                   = /ceph/mon/$name
> [mon.0]
>     host                       = centos-01
>     mon addr                   = 192.168.176.1:6789
> [mds]
> [mds.1]
>     host                       = centos-01
> [osd]
>     osd data                     = /ceph/osd/$name
>     osd recovery max active      = 3
>     osd journal                  = /ceph/osd/$name/journal
> [osd.0]
>     host                         = centos-01
>     osd data                                    = /ceph/osd/ceph-0
> Thanks in advance for any help.
> Kind regards,
> Dmitry Chirikov
_______________________________________________
ceph-users mailing list
ceph-users@lists.ceph.com
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

Reply via email to