Error when import Ceph cluster to VSM

classic Classic list List threaded Threaded
8 messages Options
Reply | Threaded
Open this post in threaded view
|

Error when import Ceph cluster to VSM

MinhViet Bui
Hi Every One,
I try to import exit Ceph cluster, but it return error "osd quantity is not consistent between ceph conf and crush map.,missing mon header section in ceph configration file.,missing osd header section in ceph configration file."

This is crushmap:
{
    "devices": [
        {
            "id": 0,
            "name": "osd.0"
        },
        {
            "id": 1,
            "name": "osd.1"
        },
        {
            "id": 2,
            "name": "osd.2"
        }
    ],
    "types": [
        {
            "type_id": 0,
            "name": "osd"
        },
        {
            "type_id": 1,
            "name": "host"
        },
        {
            "type_id": 2,
            "name": "chassis"
        },
        {
            "type_id": 3,
            "name": "rack"
        },
        {
            "type_id": 4,
            "name": "row"
        },
        {
            "type_id": 5,
            "name": "pdu"
        },
        {
            "type_id": 6,
            "name": "pod"
        },
        {
            "type_id": 7,
            "name": "room"
        },
        {
            "type_id": 8,
            "name": "datacenter"
        },
        {
            "type_id": 9,
            "name": "region"
        },
        {
            "type_id": 10,
            "name": "root"
        }
    ],
    "buckets": [
        {
            "id": -1,
            "name": "cus",
            "type_id": 10,
            "type_name": "root",
            "weight": 87489,
            "alg": "straw",
            "hash": "rjenkins1",
            "items": [
                {
                    "id": -2,
                    "weight": 29163,
                    "pos": 0
                },
                {
                    "id": -3,
                    "weight": 29163,
                    "pos": 1
                },
                {
                    "id": -4,
                    "weight": 29163,
                    "pos": 2
                }
            ]
        },
        {
            "id": -2,
            "name": "rack1",
            "type_id": 3,
            "type_name": "rack",
            "weight": 29163,
            "alg": "straw",
            "hash": "rjenkins1",
            "items": [
                {
                    "id": -5,
                    "weight": 29163,
                    "pos": 0
                }
            ]
        },
        {
            "id": -3,
            "name": "rack2",
            "type_id": 3,
            "type_name": "rack",
            "weight": 29163,
            "alg": "straw",
            "hash": "rjenkins1",
            "items": [
                {
                    "id": -6,
                    "weight": 29163,
                    "pos": 0
                }
            ]
        },
        {
            "id": -4,
            "name": "rack3",
            "type_id": 3,
            "type_name": "rack",
            "weight": 29163,
            "alg": "straw",
            "hash": "rjenkins1",
            "items": [
                {
                    "id": -7,
                    "weight": 29163,
                    "pos": 0
                }
            ]
        },
        {
            "id": -5,
            "name": "node1",
            "type_id": 1,
            "type_name": "host",
            "weight": 29163,
            "alg": "straw",
            "hash": "rjenkins1",
            "items": [
                {
                    "id": 0,
                    "weight": 29163,
                    "pos": 0
                }
            ]
        },
        {
            "id": -6,
            "name": "node2",
            "type_id": 1,
            "type_name": "host",
            "weight": 29163,
            "alg": "straw",
            "hash": "rjenkins1",
            "items": [
                {
                    "id": 1,
                    "weight": 29163,
                    "pos": 0
                }
            ]
        },
        {
            "id": -7,
            "name": "node3",
            "type_id": 1,
            "type_name": "host",
            "weight": 29163,
            "alg": "straw",
            "hash": "rjenkins1",
            "items": [
                {
                    "id": 2,
                    "weight": 29163,
                    "pos": 0
                }
            ]
        }
    ],
    "rules": [
        {
            "rule_id": 0,
            "rule_name": "replicated_ruleset",
            "ruleset": 0,
            "type": 1,
            "min_size": 1,
            "max_size": 10,
            "steps": [
                {
                    "op": "take",
                    "item": -1,
                    "item_name": "cus"
                },
                {
                    "op": "chooseleaf_firstn",
                    "num": 0,
                    "type": "rack"
                },
                {
                    "op": "emit"
                }
            ]
        }
    ],
    "tunables": {
        "choose_local_tries": 0,
        "choose_local_fallback_tries": 0,
        "choose_total_tries": 50,
        "chooseleaf_descend_once": 1,
        "chooseleaf_vary_r": 0,
        "straw_calc_version": 1,
        "allowed_bucket_algs": 22,
        "profile": "unknown",
        "optimal_tunables": 0,
        "legacy_tunables": 0,
        "require_feature_tunables": 1,
        "require_feature_tunables2": 1,
        "require_feature_tunables3": 0,
        "has_v2_rules": 0,
        "has_v3_rules": 0,
        "has_v4_buckets": 0
    }
}

and this is ceph.conf:
[global]
fsid = 87ecad8f-068e-44db-9a92-a31ea4f2e7f7
mon_initial_members = node1, node2, node3
mon_host = xxx.xxx.xxx.11
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
filestore_xattr_use_omap = true
public_network = xxx.xxx.xxx.0/26
cluster_network = 10.10.1.0/24
osd_journal_size = 10000
osd_pool_default_size = 2
osd_pool_default_min_size = 1
osd_crush_chooseleaf_type = 2

I don't know reason. Please help me. Thank you.
Log vsm-api:
2016-06-21 16:47:48     INFO [vsm.api.v1.clusters] CEPH_LOG check_pre_existing_cluster get ret={u'info': u'', u'code': u'-1,-22,-23', u'crushmap_tree_data': [{u'parent_id': [-6], u'type_name': u'osd', u'type': 0, u'id': 1, u'name': u'osd.1'}, {u'parent_id': [-5], u'type_name': u'osd', u'type': 0, u'id': 0, u'name': u'osd.0'}, {u'parent_id': [-2], u'type_name': u'host', u'type': 1, u'id': -5, u'name': u'node1'}, {u'parent_id': [-1], u'type_name': u'rack', u'type': 3, u'id': -4, u'name': u'rack3'}, {u'parent_id': [-4], u'type_name': u'host', u'type': 1, u'id': -7, u'name': u'node3'}, {u'parent_id': [-3], u'type_name': u'host', u'type': 1, u'id': -6, u'name': u'node2'}, {u'type_name': u'root', u'type': 10, u'id': -1, u'name': u'cus'}, {u'parent_id': [-1], u'type_name': u'rack', u'type': 3, u'id': -3, u'name': u'rack2'}, {u'parent_id': [-1], u'type_name': u'rack', u'type': 3, u'id': -2, u'name': u'rack1'}, {u'parent_id': [-7], u'type_name': u'osd', u'type': 0, u'id': 2, u'name': u'osd.2'}], u'error': u'osd quantity is not consistent between ceph conf and crush map.,missing mon header section in ceph configration file.,missing osd header section in ceph configration file.'}
Reply | Threaded
Open this post in threaded view
|

Re: Error when import Ceph cluster to VSM

niupengju
hi,I use vsm2.1, ceph 10.2.3(jewel),do you resolvd this problem?I met the same problem。
thanks!
Reply | Threaded
Open this post in threaded view
|

Re: Error when import Ceph cluster to VSM

ywang19
Administrator
you need fill all mon and osd sections in ceph.conf

在 2016年9月27日,上午8:26,niupengju [via vsm-discuss] <[hidden email]> 写道:

hi,I use vsm2.1, ceph 10.2.3(jewel),do you resolvd this problem?I met the same problem。
thanks!


If you reply to this email, your message will be added to the discussion below:
http://vsm-discuss.33411.n7.nabble.com/Error-when-import-Ceph-cluster-to-VSM-tp545p551.html
To start a new topic under vsm-discuss, email [hidden email]
To unsubscribe from vsm-discuss, click here.
NAML
Reply | Threaded
Open this post in threaded view
|

Re: Error when import Ceph cluster to VSM

niupengju
thank you!
the problem has resolved!
Do you some templet is need by vsm for ceph?
because I use “ceph-deploy” tool to create cluster!
MGS
Reply | Threaded
Open this post in threaded view
|

Re: Error when import Ceph cluster to VSM

MGS
I am experiencing the same problem while importing cluster which has configured by ceph-deploy.
"osd quantity is not consistent between ceph conf and crush map"

Do you have any sample working ceph.conf and crushmap files?
MGS
Reply | Threaded
Open this post in threaded view
|

Re: Error when import Ceph cluster to VSM

MGS
I am experiencing the same problem while importing cluster which has configured by ceph-deploy.
"osd quantity is not consistent between ceph conf and crush map"

Do you have any sample working ceph.conf and crushmap files?


Current setup: VSM 2.2.0-521
                       Ceph 10.2.5
Reply | Threaded
Open this post in threaded view
|

Re: Error when import Ceph cluster to VSM

namdh
In reply to this post by ywang19
Hi Mr Wang

I am currenty use VSM vsm2.2 and  Ceph 12.2. I am trying to add exitsting  cluster to vsm to monitoring but it is not successful. The problems seem relative ceph.conf file. It has some syntaxs mistake. Please help me to check and correct itceph.conf
Reply | Threaded
Open this post in threaded view
|

Re: Error when import Ceph cluster to VSM

namdh
In reply to this post by MinhViet Bui
Hi Mr Viet

Do you fix this problem. Would you give me your phone ? I want to directly contact you to disscus thís  problem and further