[Pacemaker] clvmd could not connect to cluster manager

Sander van Vugt sander.van.vugt at xs4all.nl
Mon Feb 8 14:48:33 EST 2010


Hi,

After it has been working for a while, the cluster I've installed to
create a Xen HA solution, dropped dead on the floor a few days ago. I
completely stripped it, deciding to build it up from scratch (based on
the xml files I've saved of course), but when rebuilding it, I can't get
the clvmd process to work anymore, it refuses to start with a message
"clvm could not connect to cluster manager", see attached
partial /var/log/messages, at the end it says what happens. I've also
included relevant configuration files; cluster.xml is the result of
cibadmin -Q > cluster.xml, then there are the config files for openais
and lvm also. 

Basically, there's only a few resources configured so far:
- null-stonith, just to make the SW happy. 
- dlm
- clvm
(Don't want to go any further before I get clmv working)

I'd greatly appreciate any help I can get on this one. 

Thanks in advance,
Sander van Vugt

-------------- next part --------------
A non-text attachment was scrubbed...
Name: cluster.xml
Type: application/xml
Size: 21469 bytes
Desc: not available
URL: <http://lists.clusterlabs.org/pipermail/pacemaker/attachments/20100208/1f601ae9/attachment.wsdl>
-------------- next part --------------
# This is an example configuration file for the LVM2 system.
# It contains the default settings that would be used if there was no
# /etc/lvm/lvm.conf file.
#
# Refer to 'man lvm.conf' for further information including the file layout.
#
# To put this file in a different directory and override /etc/lvm set
# the environment variable LVM_SYSTEM_DIR before running the tools.


# This section allows you to configure which block devices should
# be used by the LVM system.
devices {

    # Where do you want your volume groups to appear ?
    dir = "/dev"

    # An array of directories that contain the device nodes you wish
    # to use with LVM2.
    scan = [ "/dev" ]

    # A filter that tells LVM2 to only use a restricted set of devices.
    # The filter consists of an array of regular expressions.  These
    # expressions can be delimited by a character of your choice, and
    # prefixed with either an 'a' (for accept) or 'r' (for reject).
    # The first expression found to match a device name determines if
    # the device will be accepted or rejected (ignored).  Devices that
    # don't match any patterns are accepted.

    # Remember to run vgscan after you change this parameter to ensure 
    # that the cache file gets regenerated (see below).

    # By default we accept every block device except udev names:
    # Disabled by SvV on nov. 12th 2009
    #filter = [ "r|/dev/.*/by-path/.*|", "r|/dev/.*/by-id/.*|", "a/.*/" ]
    filter = [ "a|/dev/disk/by-id/.*|", "r|.*|" ]

    # Exclude the cdrom drive
    # filter = [ "r|/dev/cdrom|" ]

    # When testing I like to work with just loopback devices:
    # filter = [ "a/loop/", "r/.*/" ]

    # Or maybe all loops and ide drives except hdc:
    # filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]

    # Use anchors if you want to be really specific
    # filter = [ "a|^/dev/hda8$|", "r/.*/" ]

    # The results of the filtering are cached on disk to avoid
    # rescanning dud devices (which can take a very long time).  By
    # default this cache file is hidden in the /etc/lvm directory.
    # It is safe to delete this file: the tools regenerate it.
    cache = "/etc/lvm/.cache"

    # You can turn off writing this cache file by setting this to 0.
    write_cache_state = 1

    # Advanced settings.

    # List of pairs of additional acceptable block device types found 
    # in /proc/devices with maximum (non-zero) number of partitions.
    types = [ "device-mapper", 16 ]

    # If sysfs is mounted (2.6 kernels) restrict device scanning to 
    # the block devices it believes are valid.
    # 1 enables; 0 disables.
    sysfs_scan = 1	

    # By default, LVM2 will ignore devices used as components of
    # software RAID (md) devices by looking for md superblocks.
    # 1 enables; 0 disables.
    md_component_detection = 1
}

# This section that allows you to configure the nature of the
# information that LVM2 reports.
log {

    # Controls the messages sent to stdout or stderr.
    # There are three levels of verbosity, 3 being the most verbose.
    verbose = 0

    # Should we send log messages through syslog?
    # 1 is yes; 0 is no.
    syslog = 1

    # Should we log error and debug messages to a file?
    # By default there is no log file.
    #file = "/var/log/lvm2.log"

    # Should we overwrite the log file each time the program is run?
    # By default we append.
    overwrite = 0

    # What level of log messages should we send to the log file and/or syslog?
    # There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
    # 7 is the most verbose (LOG_DEBUG).
    level = 0
    
    # Format of output messages
    # Whether or not (1 or 0) to indent messages according to their severity
    indent = 1

    # Whether or not (1 or 0) to display the command name on each line output
    command_names = 0

    # A prefix to use before the message text (but after the command name,
    # if selected).  Default is two spaces, so you can see/grep the severity
    # of each message.
    prefix = "  "

    # To make the messages look similar to the original LVM tools use:
    #   indent = 0
    #   command_names = 1
    #   prefix = " -- "

    # Set this if you want log messages during activation.
    # Don't use this in low memory situations (can deadlock).
    # activation = 0
}

# Configuration of metadata backups and archiving.  In LVM2 when we
# talk about a 'backup' we mean making a copy of the metadata for the
# *current* system.  The 'archive' contains old metadata configurations.
# Backups are stored in a human readeable text format.
backup {

    # Should we maintain a backup of the current metadata configuration ?
    # Use 1 for Yes; 0 for No.
    # Think very hard before turning this off!
    backup = 1

    # Where shall we keep it ?
    # Remember to back up this directory regularly!
    backup_dir = "/etc/lvm/backup"

    # Should we maintain an archive of old metadata configurations.
    # Use 1 for Yes; 0 for No.
    # On by default.  Think very hard before turning this off.
    archive = 1

    # Where should archived files go ?
    # Remember to back up this directory regularly!
    archive_dir = "/etc/lvm/archive"
    
    # What is the minimum number of archive files you wish to keep ?
    retain_min = 10

    # What is the minimum time you wish to keep an archive file for ?
    retain_days = 30
}

# Settings for the running LVM2 in shell (readline) mode.
shell {

    # Number of lines of history to store in ~/.lvm_history
    history_size = 100
}


# Miscellaneous global LVM2 settings
global {
    
    # The file creation mask for any files and directories created.
    # Interpreted as octal if the first digit is zero.
    umask = 077

    # Allow other users to read the files
    #umask = 022

    # Enabling test mode means that no changes to the on disk metadata
    # will be made.  Equivalent to having the -t option on every
    # command.  Defaults to off.
    test = 0

    # Whether or not to communicate with the kernel device-mapper.
    # Set to 0 if you want to use the tools to manipulate LVM metadata 
    # without activating any logical volumes.
    # If the device-mapper kernel driver is not present in your kernel
    # setting this to 0 should suppress the error messages.
    activation = 1

    # If we can't communicate with device-mapper, should we try running 
    # the LVM1 tools?
    # This option only applies to 2.4 kernels and is provided to help you
    # switch between device-mapper kernels and LVM1 kernels.
    # The LVM1 tools need to be installed with .lvm1 suffices
    # e.g. vgscan.lvm1 and they will stop working after you start using
    # the new lvm2 on-disk metadata format.
    # The default value is set when the tools are built.
    # fallback_to_lvm1 = 0

    # The default metadata format that commands should use - "lvm1" or "lvm2".
    # The command line override is -M1 or -M2.
    # Defaults to "lvm1" if compiled in, else "lvm2".
    # format = "lvm1"

    # Location of proc filesystem
    proc = "/proc"

    # Type of locking to use. Defaults to file-based locking (1).
    # Turn locking off by setting to 0 (dangerous: risks metadata corruption
    # if LVM2 commands get run concurrently).
    locking_type = 3

    # Local non-LV directory that holds file-based locks while commands are
    # in progress.  A directory like /tmp that may get wiped on reboot is OK.
    locking_dir = "/var/lock/lvm"

    # Other entries can go here to allow you to load shared libraries
    # e.g. if support for LVM1 metadata was compiled as a shared library use
    #   format_libraries = "liblvm2format1.so" 
    # Full pathnames can be given.

    # Search this directory first for shared libraries.
    #   library_dir = "/lib"
}

activation {
    # Device used in place of missing stripes if activating incomplete volume.
    # For now, you need to set this up yourself first (e.g. with 'dmsetup')
    # For example, you could make it return I/O errors using the 'error' 
    # target or make it return zeros.
    missing_stripe_filler = "/dev/ioerror"

    # Size (in KB) of each copy operation when mirroring
    mirror_region_size = 512

    # How much stack (in KB) to reserve for use while devices suspended
    reserved_stack = 256

    # How much memory (in KB) to reserve for use while devices suspended
    reserved_memory = 8192

    # Nice value used while devices suspended
    process_priority = -18

    # If volume_list is defined, each LV is only activated if there is a
    # match against the list.
    #   "vgname" and "vgname/lvname" are matched exactly.
    #   "@tag" matches any tag set in the LV or VG.
    #   "@*" matches if any tag defined on the host is also set in the LV or VG
    #
    # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
}


####################
# Advanced section #
####################

# Metadata settings
#
# metadata {
    # Default number of copies of metadata to hold on each PV.  0, 1 or 2.
    # It's best to leave this at 2.
    # You might want to override it from the command line with 0 or 1 
    # when running pvcreate on new PVs which are to be added to large VGs.

    # pvmetadatacopies = 2

    # Approximate default size of on-disk metadata areas in sectors.
    # You should increase this if you have large volume groups or
    # you want to retain a large on-disk history of your metadata changes.

    # pvmetadatasize = 255

    # List of directories holding live copies of text format metadata.
    # These directories must not be on logical volumes!
    # It's possible to use LVM2 with a couple of directories here,
    # preferably on different (non-LV) filesystems, and with no other 
    # on-disk metadata (pvmetadatacopies = 0). Or this can be in
    # addition to on-disk metadata areas.
    # The feature was originally added to simplify testing and is not
    # supported under low memory situations - the machine could lock up.
    #
    # Never edit any files in these directories by hand unless you
    # you are absolutely sure you know what you are doing! Use
    # the supplied toolset to make changes (e.g. vgcfgrestore).

    # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
#}

dmeventd {
	# mirror_library is the library used when monitoring a mirror device.
	#
	# "libdevmapper-event-lvm2mirror.so" attempts to recover from
	# failures.  It removes failed devices from a volume group and
	# reconfigures a mirror as necessary. If no mirror library is
	# provided, mirrors are not monitored through dmeventd.

	mirror_library = "libdevmapper-event-lvm2mirror.so.2.02"

	# snapshot_library is the library used when monitoring a snapshot device.
	#
	# "libdevmapper-event-lvm2snapshot.so" monitors the filling of
	# snapshots and emits a warning through syslog, when the use of
	# snapshot exceedes 80%. The warning is repeated when 85%, 90% and
	# 95% of the snapshot are filled.

	snapshot_library = "libdevmapper-event-lvm2snapshot.so.2.02"
}

-------------- next part --------------

Feb  8 20:08:27 nd3 cluster-dlm: add_configfs_node: set_configfs_node 22 192.168.1.102 local 0
Feb  8 20:08:27 nd3 cluster-dlm: dlm_process_node: Added active node 22: born-on=305356, last-seen=305356, this-event=305356, last-event=0
Feb  8 20:08:28 nd3 crmd: [7408]: info: process_lrm_event: LRM operation dlm:2_start_0 (call=8, rc=0, cib-update=19, confirmed=true) complete ok
Feb  8 20:08:28 nd3 crmd: [7408]: info: do_lrm_rsc_op: Performing key=21:5:0:6c6f89e1-f164-41c8-a2d7-eeaaa277cf97 op=dlm:2_monitor_10000 )
Feb  8 20:08:28 nd3 crmd: [7408]: info: do_lrm_rsc_op: Performing key=3:5:0:6c6f89e1-f164-41c8-a2d7-eeaaa277cf97 op=clvm:0_stop_0 )
Feb  8 20:08:28 nd3 lrmd: [7405]: info: rsc:clvm:0: stop
Feb  8 20:08:28 nd3 crmd: [7408]: info: process_lrm_event: LRM operation dlm:2_monitor_10000 (call=10, rc=0, cib-update=20, confirmed=false) complete ok
Feb  8 20:08:28 nd3 crmd: [7408]: info: process_lrm_event: LRM operation clvm:0_stop_0 (call=11, rc=0, cib-update=21, confirmed=true) complete ok
Feb  8 20:08:31 nd3 attrd: [7406]: info: main: Sending full refresh
Feb  8 20:08:31 nd3 attrd: [7406]: info: main: Starting mainloop...
Feb  8 20:08:31 nd3 attrd: [7406]: info: crm_new_peer: Node nd1 now has id: 11
Feb  8 20:08:31 nd3 attrd: [7406]: info: crm_new_peer: Node 11 is now known as nd1
Feb  8 20:08:31 nd3 attrd: [7406]: info: find_hash_entry: Creating hash entry for terminate
Feb  8 20:08:31 nd3 attrd: [7406]: info: find_hash_entry: Creating hash entry for shutdown
Feb  8 20:08:31 nd3 attrd: [7406]: info: crm_new_peer: Node nd2 now has id: 22
Feb  8 20:08:31 nd3 attrd: [7406]: info: crm_new_peer: Node 22 is now known as nd2
Feb  8 20:08:31 nd3 attrd: [7406]: info: attrd_local_callback: Sending full refresh (origin=crmd)
Feb  8 20:08:31 nd3 attrd: [7406]: info: attrd_trigger_update: Sending flush op to all hosts for: terminate
Feb  8 20:08:31 nd3 attrd: [7406]: info: attrd_trigger_update: Sending flush op to all hosts for: shutdown
Feb  8 20:10:31 nd3 sshd[7975]: Accepted keyboard-interactive/pam for root from 192.168.1.168 port 3267 ssh2
Feb  8 20:12:47 nd3 cib: [8202]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-99.raw
Feb  8 20:12:47 nd3 cib: [8202]: info: write_cib_contents: Wrote version 0.799.0 of the CIB to disk (digest: e57868717709f8fc77603a6022b20a74)
Feb  8 20:12:47 nd3 cib: [8202]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.l5SN7J (digest: /var/lib/heartbeat/crm/cib.LzAUBZ)
Feb  8 20:16:01 nd3 cib: [8752]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-100.raw
Feb  8 20:16:01 nd3 cib: [8752]: info: write_cib_contents: Wrote version 0.800.0 of the CIB to disk (digest: b6b7e827fb29787c96e5edc0bc23cd12)
Feb  8 20:16:01 nd3 cib: [8752]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.VlsjUi (digest: /var/lib/heartbeat/crm/cib.NxK9OE)
Feb  8 20:16:34 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:16:34 nd3 cib: [8792]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-1.raw
Feb  8 20:16:34 nd3 cib: [8792]: info: write_cib_contents: Wrote version 0.801.0 of the CIB to disk (digest: 0908208d0326c22ecb93f0fe6960f0f3)
Feb  8 20:16:34 nd3 cib: [8792]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.IsbdHO (digest: /var/lib/heartbeat/crm/cib.2CTClI)
Feb  8 20:16:36 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:16:38 nd3 crmd: [7408]: info: do_lrm_invoke: Removing resource clvm:0 from the LRM
Feb  8 20:16:38 nd3 crmd: [7408]: info: send_direct_ack: ACK'ing resource op clvm:0_delete_0 from mgmtd-8429: lrm_invoke-lrmd-1265656598-7
Feb  8 20:16:38 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:16:38 nd3 cib: [8793]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-2.raw
Feb  8 20:16:38 nd3 cib: [8793]: info: write_cib_contents: Wrote version 0.802.0 of the CIB to disk (digest: 6be9ecda537ca356c9a917df648923b0)
Feb  8 20:16:38 nd3 cib: [8793]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.3o3f21 (digest: /var/lib/heartbeat/crm/cib.spj0m7)
Feb  8 20:16:38 nd3 cib: [8794]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-3.raw
Feb  8 20:16:38 nd3 cib: [8794]: info: write_cib_contents: Wrote version 0.803.0 of the CIB to disk (digest: 0691e311e5e9160a9d47813902516e4d)
Feb  8 20:16:38 nd3 cib: [8794]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.kH4m24 (digest: /var/lib/heartbeat/crm/cib.8P4Zua)
Feb  8 20:16:38 nd3 crmd: [7408]: info: do_lrm_rsc_op: Performing key=25:12:0:6c6f89e1-f164-41c8-a2d7-eeaaa277cf97 op=clvm:0_start_0 )
Feb  8 20:16:38 nd3 lrmd: [7405]: info: rsc:clvm:0: start
Feb  8 20:16:38 nd3 lrmd: [7405]: info: RA output: (clvm:0:start:stderr) logd is not running
Feb  8 20:16:38 nd3 lrmd: [7405]: info: RA output: (clvm:0:start:stderr) 2010/02/08_20:16:38 INFO: Starting clvm:0
Feb  8 20:16:38 nd3 clvmd: Unable to create lockspace for CLVM: Invalid argument
Feb  8 20:16:38 nd3 lrmd: [7405]: info: RA output: (clvm:0:start:stderr) clvmd could not connect to cluster manager Consult syslog for more information
Feb  8 20:16:38 nd3 lrmd: [7405]: info: RA output: (clvm:0:start:stderr) logd is not running
Feb  8 20:16:38 nd3 lrmd: [7405]: info: RA output: (clvm:0:start:stderr) 2010/02/08_20:16:38 ERROR: Could not start /usr/sbin/clvmd
Feb  8 20:16:38 nd3 crmd: [7408]: info: process_lrm_event: LRM operation clvm:0_start_0 (call=12, rc=1, cib-update=28, confirmed=true) complete unknown error
Feb  8 20:16:38 nd3 crmd: [7408]: info: do_lrm_rsc_op: Performing key=4:13:0:6c6f89e1-f164-41c8-a2d7-eeaaa277cf97 op=clvm:0_stop_0 )
Feb  8 20:16:38 nd3 lrmd: [7405]: info: rsc:clvm:0: stop
Feb  8 20:16:38 nd3 crmd: [7408]: info: process_lrm_event: LRM operation clvm:0_stop_0 (call=13, rc=0, cib-update=29, confirmed=true) complete ok
Feb  8 20:16:40 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:16:40 nd3 cib: [8839]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-4.raw
Feb  8 20:16:40 nd3 cib: [8839]: info: write_cib_contents: Wrote version 0.804.0 of the CIB to disk (digest: a0d51dae27b316fa49740a646a2735c7)
Feb  8 20:16:40 nd3 cib: [8839]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.fTTf1e (digest: /var/lib/heartbeat/crm/cib.CtG6rq)
Feb  8 20:16:42 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:16:42 nd3 cib: [8840]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-5.raw
Feb  8 20:16:42 nd3 cib: [8840]: info: write_cib_contents: Wrote version 0.805.0 of the CIB to disk (digest: a40c58485f82482d738110b0376a9ec9)
Feb  8 20:16:42 nd3 cib: [8840]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.m9eMck (digest: /var/lib/heartbeat/crm/cib.cC0cvB)
Feb  8 20:16:44 nd3 crmd: [7408]: ERROR: do_lrm_invoke: Not creating resource for a delete event: (null)
Feb  8 20:16:44 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input <create_request_adv origin="delete_lrm_rsc" t="crmd" version="3.0.1" subt="request" reference="lrm_delete-mgmtd-1265656813-16" crm_task="lrm_delete" crm_sys_to="lrmd" crm_sys_from="8429_mgmtd" crm_host_to="nd3" src="nd1" seq="71" >
Feb  8 20:16:44 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input   <crm_xml >
Feb  8 20:16:44 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input     <rsc_op transition-key="mgmtd-8429" >
Feb  8 20:16:44 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input       <primitive id="clvm:1" />
Feb  8 20:16:44 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input       <attributes crm_feature_set="3.0.1" />
Feb  8 20:16:44 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input     </rsc_op>
Feb  8 20:16:44 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input   </crm_xml>
Feb  8 20:16:44 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input </create_request_adv>
Feb  8 20:16:44 nd3 crmd: [7408]: info: send_direct_ack: ACK'ing resource op clvm:1_delete_0 from mgmtd-8429: lrm_invoke-lrmd-1265656604-8
Feb  8 20:16:44 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:16:44 nd3 cib: [8841]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-6.raw
Feb  8 20:16:44 nd3 cib: [8841]: info: write_cib_contents: Wrote version 0.806.0 of the CIB to disk (digest: a7503fb65497970060615ad6957da52b)
Feb  8 20:16:44 nd3 cib: [8841]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.Tk3oHs (digest: /var/lib/heartbeat/crm/cib.gE5gUP)
Feb  8 20:16:46 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:16:46 nd3 cib: [8842]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-7.raw
Feb  8 20:16:46 nd3 cib: [8842]: info: write_cib_contents: Wrote version 0.807.0 of the CIB to disk (digest: 7c8dcfc71d9b239746b66a90dc85e2c2)
Feb  8 20:16:46 nd3 cib: [8842]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.qING5B (digest: /var/lib/heartbeat/crm/cib.8vax94)
Feb  8 20:16:48 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:16:48 nd3 cib: [8843]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-8.raw
Feb  8 20:16:48 nd3 cib: [8843]: info: write_cib_contents: Wrote version 0.808.0 of the CIB to disk (digest: 0d8a9effaee713a9a7d5a2050a6db681)
Feb  8 20:16:48 nd3 cib: [8843]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.9HoLiJ (digest: /var/lib/heartbeat/crm/cib.c6aZei)
Feb  8 20:16:50 nd3 crmd: [7408]: info: do_lrm_invoke: Removing resource clvm:2 from the LRM
Feb  8 20:16:50 nd3 crmd: [7408]: info: send_direct_ack: ACK'ing resource op clvm:2_delete_0 from mgmtd-8429: lrm_invoke-lrmd-1265656610-9
Feb  8 20:16:50 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:16:50 nd3 cib: [8857]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-9.raw
Feb  8 20:16:50 nd3 cib: [8857]: info: write_cib_contents: Wrote version 0.809.0 of the CIB to disk (digest: 519089bccce83355a63d7f892ff659f9)
Feb  8 20:16:50 nd3 cib: [8857]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.NmhyZO (digest: /var/lib/heartbeat/crm/cib.8OelOt)
Feb  8 20:16:50 nd3 cib: [8858]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-10.raw
Feb  8 20:16:50 nd3 cib: [8858]: info: write_cib_contents: Wrote version 0.810.0 of the CIB to disk (digest: c5bae7705302d444b933af4d56923309)
Feb  8 20:16:50 nd3 cib: [8858]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.mWA2jR (digest: /var/lib/heartbeat/crm/cib.mt63ew)
Feb  8 20:16:52 nd3 cib: [8859]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-11.raw
Feb  8 20:16:52 nd3 cib: [8859]: info: write_cib_contents: Wrote version 0.811.0 of the CIB to disk (digest: 7795b545235498d4955f7c18384f5045)
Feb  8 20:16:52 nd3 cib: [8859]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.nrrJt1 (digest: /var/lib/heartbeat/crm/cib.wI0QmM)
Feb  8 20:17:40 nd3 cib: [8925]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-12.raw
Feb  8 20:17:40 nd3 cib: [8925]: info: write_cib_contents: Wrote version 0.812.0 of the CIB to disk (digest: f779a89a580954cc28a0535fc5477394)
Feb  8 20:17:40 nd3 cib: [8925]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.vo06AG (digest: /var/lib/heartbeat/crm/cib.uL7erF)
Feb  8 20:17:56 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:17:56 nd3 cib: [8939]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-13.raw
Feb  8 20:17:56 nd3 cib: [8939]: info: write_cib_contents: Wrote version 0.813.0 of the CIB to disk (digest: f1a39c2d905c137830ba9e287be58f7c)
Feb  8 20:17:56 nd3 cib: [8939]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.1BqQDi (digest: /var/lib/heartbeat/crm/cib.Ual3w1)
Feb  8 20:17:58 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:17:58 nd3 cib: [8940]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-14.raw
Feb  8 20:17:58 nd3 cib: [8940]: info: write_cib_contents: Wrote version 0.814.0 of the CIB to disk (digest: 1fc364384790fe2371a37a27bd1e73be)
Feb  8 20:17:58 nd3 cib: [8940]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.MUwWQm (digest: /var/lib/heartbeat/crm/cib.u3uizb)
Feb  8 20:18:00 nd3 crmd: [7408]: info: do_lrm_invoke: Removing resource clvm:0 from the LRM
Feb  8 20:18:00 nd3 crmd: [7408]: info: send_direct_ack: ACK'ing resource op clvm:0_delete_0 from mgmtd-8429: lrm_invoke-lrmd-1265656680-10
Feb  8 20:18:00 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:18:00 nd3 cib: [8941]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-15.raw
Feb  8 20:18:00 nd3 cib: [8941]: info: write_cib_contents: Wrote version 0.815.0 of the CIB to disk (digest: 429d6a83d9b0e424f7eee6efa18bee77)
Feb  8 20:18:00 nd3 cib: [8941]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.xpx0Vt (digest: /var/lib/heartbeat/crm/cib.83JAqo)
Feb  8 20:18:00 nd3 cib: [8942]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-16.raw
Feb  8 20:18:00 nd3 cib: [8942]: info: write_cib_contents: Wrote version 0.816.0 of the CIB to disk (digest: 6eeb3d60c5f0c32364247ed0c5ae64ad)
Feb  8 20:18:00 nd3 cib: [8942]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.eNGSuv (digest: /var/lib/heartbeat/crm/cib.wpRO8p)
Feb  8 20:18:00 nd3 crmd: [7408]: info: do_lrm_rsc_op: Performing key=9:30:7:6c6f89e1-f164-41c8-a2d7-eeaaa277cf97 op=clvm:0_monitor_0 )
Feb  8 20:18:00 nd3 lrmd: [7405]: info: rsc:clvm:0: monitor
Feb  8 20:18:00 nd3 crmd: [7408]: info: process_lrm_event: LRM operation clvm:0_monitor_0 (call=14, rc=7, cib-update=45, confirmed=true) complete not running
Feb  8 20:18:02 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:18:02 nd3 cib: [8965]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-17.raw
Feb  8 20:18:02 nd3 cib: [8965]: info: write_cib_contents: Wrote version 0.817.0 of the CIB to disk (digest: ff96f910bcfcb53a357ec775f37192c2)
Feb  8 20:18:02 nd3 cib: [8965]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.LJOFpG (digest: /var/lib/heartbeat/crm/cib.eCMd3G)
Feb  8 20:18:04 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:18:04 nd3 cib: [8966]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-18.raw
Feb  8 20:18:04 nd3 cib: [8966]: info: write_cib_contents: Wrote version 0.818.0 of the CIB to disk (digest: 0cdbfb9849ad385f2473153046188d9e)
Feb  8 20:18:04 nd3 cib: [8966]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.oImpHP (digest: /var/lib/heartbeat/crm/cib.Ke5ObW)
Feb  8 20:18:06 nd3 crmd: [7408]: ERROR: do_lrm_invoke: Not creating resource for a delete event: (null)
Feb  8 20:18:06 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input <create_request_adv origin="delete_lrm_rsc" t="crmd" version="3.0.1" subt="request" reference="lrm_delete-mgmtd-1265656895-43" crm_task="lrm_delete" crm_sys_to="lrmd" crm_sys_from="8429_mgmtd" crm_host_to="nd3" src="nd1" seq="102" >
Feb  8 20:18:06 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input   <crm_xml >
Feb  8 20:18:06 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input     <rsc_op transition-key="mgmtd-8429" >
Feb  8 20:18:06 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input       <primitive id="clvm:1" />
Feb  8 20:18:06 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input       <attributes crm_feature_set="3.0.1" />
Feb  8 20:18:06 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input     </rsc_op>
Feb  8 20:18:06 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input   </crm_xml>
Feb  8 20:18:06 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input </create_request_adv>
Feb  8 20:18:06 nd3 crmd: [7408]: info: send_direct_ack: ACK'ing resource op clvm:1_delete_0 from mgmtd-8429: lrm_invoke-lrmd-1265656686-11
Feb  8 20:18:06 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:18:06 nd3 cib: [8967]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-19.raw
Feb  8 20:18:06 nd3 cib: [8967]: info: write_cib_contents: Wrote version 0.819.0 of the CIB to disk (digest: 1a150dd7dd8dbce1a69223a0a0181f04)
Feb  8 20:18:06 nd3 cib: [8967]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.fA102Z (digest: /var/lib/heartbeat/crm/cib.OiZZwc)
Feb  8 20:18:08 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:18:08 nd3 cib: [8968]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-20.raw
Feb  8 20:18:08 nd3 cib: [8968]: info: write_cib_contents: Wrote version 0.820.0 of the CIB to disk (digest: fcbccd258c143f71ec19700e5cc1fcd8)
Feb  8 20:18:08 nd3 cib: [8968]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.iKSGi4 (digest: /var/lib/heartbeat/crm/cib.AfwRCm)
Feb  8 20:18:10 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:18:10 nd3 cib: [8969]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-21.raw
Feb  8 20:18:10 nd3 cib: [8969]: info: write_cib_contents: Wrote version 0.821.0 of the CIB to disk (digest: 9d67d500ede22c80feb54aedb1df84b5)
Feb  8 20:18:10 nd3 cib: [8969]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.dmqeGd (digest: /var/lib/heartbeat/crm/cib.gnwjRB)
Feb  8 20:18:12 nd3 crmd: [7408]: ERROR: do_lrm_invoke: Not creating resource for a delete event: (null)
Feb  8 20:18:12 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input <create_request_adv origin="delete_lrm_rsc" t="crmd" version="3.0.1" subt="request" reference="lrm_delete-mgmtd-1265656901-52" crm_task="lrm_delete" crm_sys_to="lrmd" crm_sys_from="8429_mgmtd" crm_host_to="nd3" src="nd1" seq="110" >
Feb  8 20:18:12 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input   <crm_xml >
Feb  8 20:18:12 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input     <rsc_op transition-key="mgmtd-8429" >
Feb  8 20:18:12 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input       <primitive id="clvm:2" />
Feb  8 20:18:12 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input       <attributes crm_feature_set="3.0.1" />
Feb  8 20:18:12 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input     </rsc_op>
Feb  8 20:18:12 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input   </crm_xml>
Feb  8 20:18:12 nd3 crmd: [7408]: WARN: log_data_element: do_lrm_invoke: bad input </create_request_adv>
Feb  8 20:18:12 nd3 crmd: [7408]: info: send_direct_ack: ACK'ing resource op clvm:2_delete_0 from mgmtd-8429: lrm_invoke-lrmd-1265656692-12
Feb  8 20:18:12 nd3 crmd: [7408]: info: do_lrm_invoke: Forcing a local LRM refresh
Feb  8 20:18:12 nd3 cib: [8983]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-22.raw
Feb  8 20:18:12 nd3 cib: [8983]: info: write_cib_contents: Wrote version 0.822.0 of the CIB to disk (digest: f9801c6f87ba39bd392af8287a17a1e0)
Feb  8 20:18:12 nd3 cib: [8983]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.HnXPKk (digest: /var/lib/heartbeat/crm/cib.YQ7ORO)
Feb  8 20:18:14 nd3 cib: [8984]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-23.raw
Feb  8 20:18:14 nd3 cib: [8984]: info: write_cib_contents: Wrote version 0.823.0 of the CIB to disk (digest: d1614f02dda8064e639818aeaea537fd)
Feb  8 20:18:14 nd3 cib: [8984]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.Sv57sr (digest: /var/lib/heartbeat/crm/cib.UEPkq1)
Feb  8 20:18:21 nd3 cib: [7404]: info: cib_stats: Processed 241 operations (124.00us average, 0% utilization) in the last 10min
Feb  8 20:19:11 nd3 cib: [9063]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-24.raw
Feb  8 20:19:11 nd3 cib: [9063]: info: write_cib_contents: Wrote version 0.824.0 of the CIB to disk (digest: 4212157b66264fbcd579d067a13118b0)
Feb  8 20:19:11 nd3 cib: [9063]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.WdXIsj (digest: /var/lib/heartbeat/crm/cib.7hGbkz)
Feb  8 20:19:12 nd3 crmd: [7408]: info: do_lrm_rsc_op: Performing key=26:40:0:6c6f89e1-f164-41c8-a2d7-eeaaa277cf97 op=clvm:0_start_0 )
Feb  8 20:19:12 nd3 lrmd: [7405]: info: rsc:clvm:0: start
Feb  8 20:19:12 nd3 lrmd: [7405]: info: RA output: (clvm:0:start:stderr) logd is not running
Feb  8 20:19:12 nd3 lrmd: [7405]: info: RA output: (clvm:0:start:stderr) 2010/02/08_20:19:12 INFO: Starting clvm:0
Feb  8 20:19:12 nd3 clvmd: Unable to create lockspace for CLVM: Invalid argument
Feb  8 20:19:12 nd3 lrmd: [7405]: info: RA output: (clvm:0:start:stderr) clvmd could not connect to cluster manager Consult syslog for more information
Feb  8 20:19:12 nd3 lrmd: [7405]: info: RA output: (clvm:0:start:stderr) logd is not running
Feb  8 20:19:12 nd3 lrmd: [7405]: info: RA output: (clvm:0:start:stderr) 2010/02/08_20:19:12 ERROR: Could not start /usr/sbin/clvmd
Feb  8 20:19:12 nd3 crmd: [7408]: info: process_lrm_event: LRM operation clvm:0_start_0 (call=15, rc=1, cib-update=54, confirmed=true) complete unknown error
Feb  8 20:19:12 nd3 crmd: [7408]: info: do_lrm_rsc_op: Performing key=4:41:0:6c6f89e1-f164-41c8-a2d7-eeaaa277cf97 op=clvm:0_stop_0 )
Feb  8 20:19:12 nd3 lrmd: [7405]: info: rsc:clvm:0: stop
Feb  8 20:19:12 nd3 crmd: [7408]: info: process_lrm_event: LRM operation clvm:0_stop_0 (call=16, rc=0, cib-update=55, confirmed=true) complete ok
Feb  8 20:20:39 nd3 mgmtd: [7409]: info: CIB query: cib

-------------- next part --------------
aisexec {
	#Group to run aisexec as. Needs to be root for Pacemaker

	group:	root

	#User to run aisexec as. Needs to be root for Pacemaker

	user:	root

}
service {
	#Default to start mgmtd with pacemaker

	use_mgmtd:	yes

	#Use logd for pacemaker

	use_logd:	yes

	#Version

	ver:	0

	#The name of the service

	name:	pacemaker

}
totem {
	#The mode for redundant ring. None is used when only 1 interface specified, otherwise, only active or passive may be choosen

	rrp_mode:	none

	#How long to wait for join messages in membership protocol. in ms

	join:	1000

	#The maximum number of messages that may be sent by one processor on receipt of the token.

	max_messages:	20

	#The virtual synchrony filter type used to indentify a primary component. Change with care.

	vsftype:	none

	#HMAC/SHA1 should be used to authenticate all message

	secauth:	off

	#The fixed 32 bit value to indentify node to cluster membership. Optional for IPv4, and required for IPv6. 0 is reserved for other usage

	nodeid:	33

	#How long to wait for consensus to be achieved before starting a new round of membership configuration.

	consensus:	2500

	#This timeout specifies in milliseconds an upper range between 0 and send_join to wait before sending a join message.

	send_join:	45

	#Timeout for a token lost. in ms

	token:	5000

	#How many token retransmits should be attempted before forming a new configuration.

	token_retransmits_before_loss_const:	10

	#The only valid version is 2

	version:	2

	interface {
		#Network Address to be bind for this interface setting

		bindnetaddr:	192.168.1.0

		#The multicast address to be used

		mcastaddr:	224.12.13.14

		#The multicast port to be used

		mcastport:	5405

		#The ringnumber assigned to this interface setting

		ringnumber:	0

	}
	#To make sure the auto-generated nodeid is positive

	clear_node_high_bit:	no

}
logging {
	#Log to the standard error output

	to_stderr:	off

	#Log to syslog

	to_syslog:	yes

	#Whether or not turning on the debug information in the log

	debug:	off

	#Log timestamp as well

	timestamp:	on

	#Log to a specified file

	to_file:	no

	#Logging file line in the source code as well

	fileline:	off

	#Facility in syslog

	syslog_facility:	daemon

}
amf {
	#Enable or disable AMF 

	mode:	disabled

}


More information about the Pacemaker mailing list