[Pacemaker] Resource clone as part of a group - errors

David Morton davidmorton78 at gmail.com
Wed Feb 16 19:55:33 EST 2011


Afternoon all ... I'm attempting to use a cloned resource (OCFS2 filesystem)
in two groups to give the correct start order and to ensure its always
running before more critical functions happen, each group has a preferred
server to run on but may and should fail over to the other. When this
happens the clone resource needs to only run once (filesystem will already
be mounted) or at least not fail if it tries to run and is found to already
be running.

When I add the clone resource CL_FS_DB_SHARED to the two groups DEPOT and
ESP_AUDIT i get the following output (this was done in the live crm util):
element group: Relax-NG validity error : Expecting an element
meta_attributes, got nothing
element clone: Relax-NG validity error : Invalid sequence in interleave
element clone: Relax-NG validity error : Element group failed to validate
content
element clone: Relax-NG validity error : Element resources has extra
content: clone
element configuration: Relax-NG validity error : Invalid sequence in
interleave
element cib: Relax-NG validity error : Element cib failed to validate
content
crm_verify[16127]: 2011/02/17_00:46:12 ERROR: main: CIB did not pass
DTD/schema validation
Errors found during check: config not valid

I've tried using colocation and order statements but its not working like I
would have anticipated and it would be a lot tidier to add the clone to the
group command.

Config is as per the below....

node company-prod-db-001
node company-prod-db-002
primitive DERBYDB lsb:derby
primitive FS_DB_DEPOT ocf:heartbeat:Filesystem \
        params device="-LDB_DEPOT" directory="/disk5" fstype="ocfs2"
options="acl" \
        op monitor interval="60s" timeout="40" \
        op start interval="0" timeout="60" \
        op stop interval="0" timeout="60" \
        meta migration-threshold="3" failure-timeout="180"
primitive FS_DB_ESP_AUDIT ocf:heartbeat:Filesystem \
        params device="-LDB_ESP_AUDIT" directory="/disk4" fstype="ocfs2"
options="acl" \
        op monitor interval="60s" timeout="40" \
        op start interval="0" timeout="60" \
        op stop interval="0" timeout="60" \
        meta migration-threshold="3" failure-timeout="180"
primitive FS_DB_SHARED ocf:heartbeat:Filesystem \
        params device="-LDB_SHARED" directory="/disk3" fstype="ocfs2"
options="acl" \
        op monitor interval="60s" timeout="40" \
        op start interval="0" timeout="60" \
        op stop interval="0" timeout="60" \
        meta migration-threshold="3" failure-timeout="180"
primitive FS_LOGS_DEPOT ocf:heartbeat:Filesystem \
        params device="-LLOGS_DEPOT" directory="/disk2" fstype="ocfs2"
options="data=writeback,noatime,acl" \
        op monitor interval="60s" timeout="40" \
        op start interval="0" timeout="60" \
        op stop interval="0" timeout="60" \
        meta migration-threshold="3" failure-timeout="180"
primitive FS_LOGS_ESP_AUDIT ocf:heartbeat:Filesystem \
        params device="-LLOGS_ESP_AUDIT" directory="/disk1" fstype="ocfs2"
options="data=writeback,noatime,acl" \
        op monitor interval="60s" timeout="40" \
        op start interval="0" timeout="60" \
        op stop interval="0" timeout="60" \
        meta migration-threshold="3" failure-timeout="180"
primitive IP_DEPOT_15 ocf:heartbeat:IPaddr2 \
        params ip="192.168.15.93" cidr_netmask="24" \
        op monitor interval="30s"
primitive IP_DEPOT_72 ocf:heartbeat:IPaddr2 \
        params ip="192.168.72.93" cidr_netmask="24" \
        op monitor interval="30s"
primitive IP_ESP_AUDIT_15 ocf:heartbeat:IPaddr2 \
        params ip="192.168.15.92" cidr_netmask="24" \
        op monitor interval="30s"
primitive IP_ESP_AUDIT_72 ocf:heartbeat:IPaddr2 \
        params ip="192.168.72.92" cidr_netmask="24" \
        op monitor interval="30s"
primitive MAIL_ALERT ocf:heartbeat:MailTo \
        params email="root at localhost" \
        op monitor interval="60" timeout="10"
primitive PGSQL_AUDIT ocf:heartbeat:pgsql \
        params pgdata="/disk1/audit/dbdata/data/" pgport="1234"
pgdba="audit" \
        op start interval="0" timeout="120" \
        op stop interval="0" timeout="120" \
        op monitor interval="60" timeout="30" \
        meta migration-threshold="3" failure-timeout="180"
primitive PGSQL_DEPOT ocf:heartbeat:pgsql \
        params pgdata="/disk2/depot/dbdata/data/" pgport="1235"
pgdba="depot" \
        op start interval="0" timeout="120" \
        op stop interval="0" timeout="120" \
        op monitor interval="60" timeout="30" \
        meta migration-threshold="3" failure-timeout="180"
primitive PGSQL_ESP ocf:heartbeat:pgsql \
        params pgdata="/disk1/esp/dbdata/data/" pgport="1236" pgdba="esp" \
        op start interval="0" timeout="120" \
        op stop interval="0" timeout="120" \
        op monitor interval="60" timeout="30" \
        meta migration-threshold="3" failure-timeout="180"
primitive STONITH-DB-001 stonith:external/ipmi \
        params hostname="company-prod-db-001" ipaddr="192.168.72.80"
userid="thatguy" passwd="bla" interface="lan" \
        op monitor interval="60s" timeout="30s"
primitive STONITH-DB-002 stonith:external/ipmi \
        params hostname="company-prod-db-002" ipaddr="192.168.72.81"
userid="thatguy" passwd="bla" interface="lan" \
        op monitor interval="60s" timeout="30s"
group DEPOT FS_LOGS_DEPOT FS_DB_DEPOT IP_DEPOT_15 IP_DEPOT_72 DERBYDB
PGSQL_DEPOT
group ESP_AUDIT FS_LOGS_ESP_AUDIT FS_DB_ESP_AUDIT IP_ESP_AUDIT_15
IP_ESP_AUDIT_72 PGSQL_AUDIT PGSQL_ESP
clone CL_FS_DB_SHARED FS_DB_SHARED
clone CL_MAIL_ALERT MAIL_ALERT
location LOC_DEPOT DEPOT 25: company-prod-db-001
location LOC_ESP_AUDIT ESP_AUDIT 25: company-prod-db-002
location LOC_STONITH-001 STONITH-DB-001 -inf: company-prod-db-001
location LOC_STONITH-002 STONITH-DB-002 -inf: company-prod-db-002
property $id="cib-bootstrap-options" \
        dc-version="1.1.2-2e096a41a5f9e184a1c1537c82c6da1093698eb5" \
        cluster-infrastructure="openais" \
        expected-quorum-votes="2" \
        no-quorum-policy="ignore" \
        start-failure-is-fatal="false"
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.clusterlabs.org/pipermail/pacemaker/attachments/20110217/2e142ca9/attachment.html>


More information about the Pacemaker mailing list