[ClusterLabs] Pacemaker resources are not scheduled

lkxjtu lkxjtu at 163.com
Fri Apr 13 12:16:36 EDT 2018


My cluster version:
Corosync 2.4.0
Pacemaker 1.1.16

There are many resource anomalies. Some resources are only monitored and not recovered. Some resources are not monitored or recovered. Only one resource of vnm is scheduled normally, but this resource cannot be started because other resources in the cluster are abnormal. Just like a deadlock. I have been plagued by this problem for a long time. I just want a stable and highly available resource with infinite recovery for everyone. Is my resource configure correct?



$ cat /etc/corosync/corosync.conf
mpatibility: whitetank

quorum {
  provider: corosync_votequorum
        two_node: 0
   }

nodelist {
  node {
    ring0_addr: 122.0.1.8
    name: 122.0.1.8
    nodeid: 000001008
  }
  node {
    ring0_addr: 122.0.1.9
    name: 122.0.1.9
    nodeid: 000001009
  }
  node {
    ring0_addr: 122.0.1.10
    name: 122.0.1.10
    nodeid: 000001010
  }
}

totem {
  version:                             2
  token:                               3000
  token_retransmits_before_loss_const: 10
  join:                                60
  consensus:                           3600
  vsftype:                             none
  max_messages:                        20
  clear_node_high_bit:                 yes
  rrp_mode:                            none
  secauth:                             off
  threads:                             2
  transport:                           udpu
  interface {
    ringnumber:  0
    bindnetaddr: 122.0.1.8
    mcastport:   5405
  }
}

logging {
  fileline:        off
  to_stderr:       no
  to_logfile:      yes
  logfile:         /root/info/logs/pacemaker_cluster/corosync.log
  to_syslog:       yes
  syslog_facility: daemon
  syslog_priority: info
  debug:           off
  function_name:   on
  timestamp:       on
  logger_subsys {
    subsys: AMF
    debug:  off
    tags:   enter|leave|trace1|trace2|trace3|trace4|trace6
  }
}

amf {
  mode: disabled
}

aisexec {
  user:  root
  group: root
}





$ crm configure show
node 1008: 122.0.1.8
node 1009: 122.0.1.9
node 1010: 122.0.1.10
primitive apigateway apigateway \
        op monitor interval=20s timeout=220 \
        op stop interval=0 timeout=120s on-fail=restart \
        op start interval=0 timeout=120s on-fail=restart \
        meta failure-timeout=60s target-role=Started
primitive apigateway_vip IPaddr2 \
        params ip=122.0.1.203 cidr_netmask=24 \
        op start interval=0 timeout=20 \
        op stop interval=0 timeout=20 \
        op monitor timeout=20s interval=10s depth=0 \
        meta migration-threshold=3 failure-timeout=60s
primitive inetmanager inetmanager \
        op monitor interval=10s timeout=160 \
        op stop interval=0 timeout=60s on-fail=restart \
        op start interval=0 timeout=60s on-fail=restart \
        meta migration-threshold=2 failure-timeout=60s resource-stickiness=100
primitive inetmanager_vip IPaddr2 \
        params ip=122.0.1.201 cidr_netmask=24 \
        op start interval=0 timeout=20 \
        op stop interval=0 timeout=20 \
        op monitor timeout=20s interval=10s depth=0 \
        meta migration-threshold=3 failure-timeout=60s
primitive logserver logserver \
        op monitor interval=20s timeout=220 \
        op stop interval=0 timeout=120s on-fail=restart \
        op start interval=0 timeout=120s on-fail=restart \
        meta failure-timeout=60s target-role=Started
primitive mariadb_vip IPaddr2 \
        params ip=122.0.1.204 cidr_netmask=24 \
        op start interval=0 timeout=20 \
        op stop interval=0 timeout=20 \
        op monitor timeout=20s interval=10s depth=0 \
        meta migration-threshold=3 failure-timeout=60s
primitive mysql mysql \
        op monitor interval=20s timeout=220 \
        op stop interval=0 timeout=120s on-fail=restart \
        op start interval=0 timeout=120s on-fail=restart \
        meta failure-timeout=60s target-role=Started
primitive p_rdbserver hardb_docker \
        op start timeout=3600 interval=0 \
        op stop timeout=1260 interval=0 \
        op promote timeout=3600 interval=0 \
        op monitor role=Master interval=1 timeout=30 \
        op monitor interval=15 timeout=7200 \
        meta migration-threshold=3 failure-timeout=3600s
primitive p_rdbvip IPaddr2 \
        params ip=100.0.1.203 cidr_netmask=24 \
        op start interval=0 timeout=20 \
        op stop interval=0 timeout=20 \
        op monitor timeout=20s interval=10s depth=0 \
        meta migration-threshold=3 failure-timeout=60s resource-stickiness=100
primitive rabbitmq rabbitmq \
        op monitor interval=20s timeout=220 \
        op stop interval=0 timeout=120s on-fail=restart \
        op start interval=0 timeout=120s on-fail=restart \
        meta failure-timeout=60s target-role=Started
primitive rabbitmq_vip IPaddr2 \
        params ip=122.0.1.200 \
        op start interval=0 timeout=20 \
        op stop interval=0 timeout=20 \
        op monitor timeout=20s interval=10s depth=0 \
        meta migration-threshold=3 failure-timeout=60s
primitive router router \
        op monitor interval=20s timeout=220 \
        op stop interval=0 timeout=120s on-fail=restart \
        op start interval=0 timeout=120s on-fail=restart \
        meta failure-timeout=60s target-role=Started
primitive router_vip IPaddr2 \
        params ip=100.0.1.201 cidr_netmask=24 \
        op start interval=0 timeout=20 \
        op stop interval=0 timeout=20 \
        op monitor timeout=20s interval=10s depth=0 \
        meta migration-threshold=3 failure-timeout=60s
primitive sdclient sdclient \
        op monitor interval=20s timeout=120s \
        op stop interval=0 timeout=120s on-fail=restart \
        op start interval=0 timeout=120s on-fail=restart \
        meta failure-timeout=60s
primitive sdclient_vip IPaddr2 \
        params ip=100.0.1.202 cidr_netmask=24 \
        op start interval=0 timeout=20 \
        op stop interval=0 timeout=20 \
        op monitor timeout=20s interval=10s depth=0 \
        meta migration-threshold=3 failure-timeout=60s
primitive storage storage \
        op monitor interval=20s timeout=220 \
        op stop interval=0 timeout=120s on-fail=restart \
        op start interval=0 timeout=120s on-fail=restart \
        meta failure-timeout=60s target-role=Started
primitive swr swr \
        op monitor interval=20s timeout=220 \
        op stop interval=0 timeout=120s on-fail=restart \
        op start interval=0 timeout=120s on-fail=restart \
        meta migration-threshold=2 failure-timeout=60s resource-stickiness=100
primitive tcfs_aerospike tcfs_aerospike \
        params probe_url=122.0.1.8 \
        op monitor interval=20s timeout=220 \
        op stop interval=0 timeout=120s on-fail=restart \
        op start interval=0 timeout=120s on-fail=restart \
        meta failure-timeout=60s target-role=Started
primitive tcfs_server tcfs_server \
        op monitor interval=20s timeout=220 \
        op stop interval=0 timeout=120s on-fail=restart \
        op start interval=0 timeout=120s on-fail=restart \
        meta failure-timeout=60s target-role=Started
primitive vip IPaddr2 \
        params ip=122.0.1.205 cidr_netmask=24 \
        op start interval=0 timeout=20 \
        op stop interval=0 timeout=20 \
        op monitor timeout=20s interval=10s depth=0 \
        meta migration-threshold=3 failure-timeout=60s
primitive vnm vnm \
        op monitor interval=20s timeout=340 \
        op stop interval=0 timeout=240s on-fail=restart \
        op start interval=0 timeout=240s on-fail=restart \
        meta migration-threshold=2 failure-timeout=60s resource-stickiness=100
primitive vnm_vip IPaddr2 \
        params ip=100.0.1.200 cidr_netmask=24 \
        op start interval=0 timeout=20 \
        op stop interval=0 timeout=20 \
        op monitor timeout=20s interval=10s depth=0 \
        meta migration-threshold=3 failure-timeout=60s
ms ms_rdbserver p_rdbserver \
        meta master-max=1 master-node-max=1 clone-max=3 clone-node-max=1 notify=true
clone apigateway_replica apigateway
clone logserver_replica logserver
clone mysql_replica mysql
clone rabbitmq_replica rabbitmq
clone router_replica router
clone sdclient_rep sdclient \
        meta target-role=Started
clone storage_replica storage
clone tcfs_aerospike_replica tcfs_aerospike
clone tcfs_server_replica tcfs_server
location apigateway_loc apigateway_vip \
        rule +inf: apigateway_workable eq 1
colocation c_rdb_with_vip inf: p_rdbvip ms_rdbserver:Master
colocation inetmanager_col +inf: inetmanager_vip inetmanager
order inetmanager_order Mandatory: inetmanager inetmanager_vip
location mysql_loc mariadb_vip \
        rule +inf: mysql_status eq ok
order o_vip_after_rdbserver inf: ms_rdbserver:promote p_rdbvip:start
location rabbitmq_loc rabbitmq_vip \
        rule +inf: rabbitmq_status eq ok
location router_loc router_vip \
        rule +inf: router_workable eq 1
location sdclient_loc sdclient_vip \
        rule +inf: sdclient_workable eq 1
colocation swr_col +inf: vip swr
order swr_order Mandatory: vip swr
colocation vnm_col +inf: vnm_vip vnm
order vnm_order Mandatory: vnm vnm_vip
property RDB_p_rdbserver_INSTANCE_UUID: \
        122.0.1.8_ebaserdb_1=375b9e0d-3e23-11e8-ab5c-0242ac110011
property RDB_p_rdbserver_REPL_INFO: \
        real_master=122.0.1.8 \
        master_node=122.0.1.8 \
        master_gtid=0 \
        master_score=2000
property cib-bootstrap-options: \
        have-watchdog=false \
        dc-version=1.1.16-12.el7-94ff4df \
        cluster-infrastructure=corosync \
        stonith-enabled=false \
        start-failure-is-fatal=false \
        load-threshold="3200%"






$ crm status
Stack: corosync
Current DC: 122.0.1.10 (version 1.1.16-12.el7-94ff4df) - partition with quorum
Last updated: Fri Apr 13 23:48:12 2018
Last change: Fri Apr 13 15:31:54 2018 by root via cibadmin on 122.0.1.8

3 nodes configured
42 resources configured

Online: [ 122.0.1.10 122.0.1.8 122.0.1.9 ]

Full list of resources:

 router_vip     (ocf::heartbeat:IPaddr2):       Started 122.0.1.10
 sdclient_vip   (ocf::heartbeat:IPaddr2):       Started 122.0.1.10
 apigateway_vip (ocf::heartbeat:IPaddr2):       Started 122.0.1.8
 mariadb_vip    (ocf::heartbeat:IPaddr2):       Started 122.0.1.9
 Clone Set: sdclient_rep [sdclient]
     sdclient   (ocf::heartbeat:sdclient):      FAILED 122.0.1.9
     Started: [ 122.0.1.10 ]
     Stopped: [ 122.0.1.8 ]
 Clone Set: apigateway_replica [apigateway]
     Started: [ 122.0.1.10 122.0.1.8 122.0.1.9 ]
 Clone Set: router_replica [router]
     Started: [ 122.0.1.10 122.0.1.8 122.0.1.9 ]
 Clone Set: mysql_replica [mysql]
     Started: [ 122.0.1.10 122.0.1.8 122.0.1.9 ]
 rabbitmq_vip   (ocf::heartbeat:IPaddr2):       Started 122.0.1.8
 Clone Set: rabbitmq_replica [rabbitmq]
     rabbitmq   (ocf::heartbeat:rabbitmq):      FAILED 122.0.1.9
     Stopped: [ 122.0.1.10 122.0.1.8 ]
 inetmanager_vip        (ocf::heartbeat:IPaddr2):       Stopped
 inetmanager    (ocf::heartbeat:inetmanager):   Stopped
 vnm_vip        (ocf::heartbeat:IPaddr2):       Stopped
 vnm    (ocf::heartbeat:vnm):   FAILED 122.0.1.10
 Clone Set: storage_replica [storage]
     storage    (ocf::heartbeat:storage):       FAILED 122.0.1.9
     Stopped: [ 122.0.1.10 122.0.1.8 ]
 vip    (ocf::heartbeat:IPaddr2):       Started 122.0.1.8
 swr    (ocf::heartbeat:swr):   FAILED 122.0.1.9
 Clone Set: tcfs_aerospike_replica [tcfs_aerospike]
     tcfs_aerospike     (ocf::heartbeat:tcfs_aerospike):        FAILED 122.0.1.9
     Stopped: [ 122.0.1.10 122.0.1.8 ]
 Clone Set: tcfs_server_replica [tcfs_server]
     tcfs_server        (ocf::heartbeat:tcfs_server):   FAILED 122.0.1.9
     Stopped: [ 122.0.1.10 122.0.1.8 ]
 Clone Set: logserver_replica [logserver]
     logserver  (ocf::heartbeat:logserver):     FAILED 122.0.1.9
     Stopped: [ 122.0.1.10 122.0.1.8 ]
 p_rdbvip       (ocf::heartbeat:IPaddr2):       Started 122.0.1.8
 Master/Slave Set: ms_rdbserver [p_rdbserver]
     Masters: [ 122.0.1.8 ]
     Stopped: [ 122.0.1.10 122.0.1.9 ]

Failed Actions:
* sdclient_monitor_0 on 122.0.1.9 'unknown error' (1): call=2142, status=complete, exitreason='none',
    last-rc-change='Fri Apr 13 23:47:19 2018', queued=0ms, exec=73ms
* rabbitmq_monitor_0 on 122.0.1.9 'unknown error' (1): call=2143, status=complete, exitreason='none',
    last-rc-change='Fri Apr 13 23:47:19 2018', queued=0ms, exec=66ms
* storage_monitor_0 on 122.0.1.9 'unknown error' (1): call=2145, status=complete, exitreason='none',
    last-rc-change='Fri Apr 13 23:47:19 2018', queued=0ms, exec=60ms
* p_rdbserver_monitor_0 on 122.0.1.9 'not installed' (5): call=135, status=Not installed, exitreason='none',
    last-rc-change='Fri Apr 13 13:39:33 2018', queued=0ms, exec=0ms
* tcfs_aerospike_monitor_0 on 122.0.1.9 'unknown error' (1): call=2147, status=complete, exitreason='none',
    last-rc-change='Fri Apr 13 23:47:19 2018', queued=1ms, exec=49ms
* swr_monitor_0 on 122.0.1.9 'unknown error' (1): call=2146, status=complete, exitreason='none',
    last-rc-change='Fri Apr 13 23:47:19 2018', queued=0ms, exec=45ms
* tcfs_server_monitor_0 on 122.0.1.9 'unknown error' (1): call=2148, status=complete, exitreason='none',
    last-rc-change='Fri Apr 13 23:47:19 2018', queued=0ms, exec=52ms
* logserver_monitor_0 on 122.0.1.9 'unknown error' (1): call=2149, status=complete, exitreason='none',
    last-rc-change='Fri Apr 13 23:47:19 2018', queued=0ms, exec=58ms
* vnm_monitor_20000 on 122.0.1.10 'unknown error' (1): call=619, status=complete, exitreason='none',
    last-rc-change='Fri Apr 13 23:47:19 2018', queued=0ms, exec=241245ms
* p_rdbserver_monitor_0 on 122.0.1.10 'not installed' (5): call=101, status=Not installed, exitreason='none',
    last-rc-change='Fri Apr 13 13:37:10 2018', queued=0ms, exec=0ms





corosync.log of node 122.0.1.8
Apr 13 23:49:56 [6302] paas-controller-122-0-1-8        cib:     info: cib_perform_op:  Diff: --- 0.124.2399 2
Apr 13 23:49:56 [6302] paas-controller-122-0-1-8        cib:     info: cib_perform_op:  Diff: +++ 0.124.2400 (null)
Apr 13 23:49:56 [6302] paas-controller-122-0-1-8        cib:     info: cib_perform_op:  +  /cib:  @num_updates=2400
Apr 13 23:49:56 [6302] paas-controller-122-0-1-8        cib:     info: cib_perform_op:  +  /cib/status/node_state[@id='1009']/lrm[@id='1009']/lrm_resources/lrm_resource[@id='inetmanager']/lrm_rsc_op[@id='inetmanager_last_failure_0']:  @transition-key=24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @transition-magic=0:1;24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @call-id=2152, @last-run=1523634518, @last-rc-change=1523634518, @exec-time=78408
Apr 13 23:49:56 [6302] paas-controller-122-0-1-8        cib:     info: cib_perform_op:  +  /cib/status/node_state[@id='1009']/lrm[@id='1009']/lrm_resources/lrm_resource[@id='inetmanager']/lrm_rsc_op[@id='inetmanager_last_0']:  @transition-key=24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @transition-magic=0:1;24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @call-id=2152, @last-run=1523634518, @last-rc-change=1523634518, @exec-time=78408
Apr 13 23:49:56 [6302] paas-controller-122-0-1-8        cib:     info: cib_process_request:     Completed cib_modify operation for section status: OK (rc=0, origin=122.0.1.9/crmd/2092, version=0.124.2400)
Apr 13 23:50:01 [6302] paas-controller-122-0-1-8        cib:     info: cib_process_ping:        Reporting our current digest to 122.0.1.10: 139d30df3b1f549818371c8cb08fab09 for 0.124.2400 (0x7fa03378e1e0 0)



corosync.log of node 122.0.1.9
Apr 13 23:49:56 [6255] paas-controller-122-0-1-9       crmd:  warning: find_xml_node:   Could not find parameters in resource-agent.
Apr 13 23:49:56 [6255] paas-controller-122-0-1-9       crmd:   notice: process_lrm_event:       Result of probe operation for inetmanager on 122.0.1.9: 1 (unknown error) | call=2152 key=inetmanager_monitor_0 confirmed=true cib-update=2092
Apr 13 23:49:56 [6250] paas-controller-122-0-1-9        cib:     info: cib_process_request:     Forwarding cib_modify operation for section status to all (origin=local/crmd/2092)
Apr 13 23:49:56 [6250] paas-controller-122-0-1-9        cib:     info: cib_perform_op:  Diff: --- 0.124.2399 2
Apr 13 23:49:56 [6250] paas-controller-122-0-1-9        cib:     info: cib_perform_op:  Diff: +++ 0.124.2400 (null)
Apr 13 23:49:56 [6250] paas-controller-122-0-1-9        cib:     info: cib_perform_op:  +  /cib:  @num_updates=2400
Apr 13 23:49:56 [6250] paas-controller-122-0-1-9        cib:     info: cib_perform_op:  +  /cib/status/node_state[@id='1009']/lrm[@id='1009']/lrm_resources/lrm_resource[@id='inetmanager']/lrm_rsc_op[@id='inetmanager_last_failure_0']:  @transition-key=24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @transition-magic=0:1;24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @call-id=2152, @last-run=1523634518, @last-rc-change=1523634518, @exec-time=78408
Apr 13 23:49:56 [6250] paas-controller-122-0-1-9        cib:     info: cib_perform_op:  +  /cib/status/node_state[@id='1009']/lrm[@id='1009']/lrm_resources/lrm_resource[@id='inetmanager']/lrm_rsc_op[@id='inetmanager_last_0']:  @transition-key=24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @transition-magic=0:1;24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @call-id=2152, @last-run=1523634518, @last-rc-change=1523634518, @exec-time=78408
Apr 13 23:49:56 [6250] paas-controller-122-0-1-9        cib:     info: cib_process_request:     Completed cib_modify operation for section status: OK (rc=0, origin=122.0.1.9/crmd/2092, version=0.124.2400)
Apr 13 23:50:01 [6250] paas-controller-122-0-1-9        cib:     info: cib_process_ping:        Reporting our current digest to 122.0.1.10: 139d30df3b1f549818371c8cb08fab09 for 0.124.2400 (0x7fb76e204f30 0)



corosync.log of node 122.0.1.10
Apr 13 23:49:56 [6132] paas-controller-122-0-1-10        cib:     info: cib_perform_op: Diff: --- 0.124.2399 2
Apr 13 23:49:56 [6132] paas-controller-122-0-1-10        cib:     info: cib_perform_op: Diff: +++ 0.124.2400 (null)
Apr 13 23:49:56 [6132] paas-controller-122-0-1-10        cib:     info: cib_perform_op: +  /cib:  @num_updates=2400
Apr 13 23:49:56 [6132] paas-controller-122-0-1-10        cib:     info: cib_perform_op: +  /cib/status/node_state[@id='1009']/lrm[@id='1009']/lrm_resources/lrm_resource[@id='inetmanager']/lrm_rsc_op[@id='inetmanager_last_failure_0']:  @transition-key=24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @transition-magic=0:1;24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @call-id=2152, @last-run=1523634518, @last-rc-change=1523634518, @exec-time=78408
Apr 13 23:49:56 [6132] paas-controller-122-0-1-10        cib:     info: cib_perform_op: +  /cib/status/node_state[@id='1009']/lrm[@id='1009']/lrm_resources/lrm_resource[@id='inetmanager']/lrm_rsc_op[@id='inetmanager_last_0']:  @transition-key=24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @transition-magic=0:1;24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be, @call-id=2152, @last-run=1523634518, @last-rc-change=1523634518, @exec-time=78408
Apr 13 23:49:56 [6132] paas-controller-122-0-1-10        cib:     info: cib_process_request:    Completed cib_modify operation for section status: OK (rc=0, origin=122.0.1.9/crmd/2092, version=0.124.2400)
Apr 13 23:49:56 [6137] paas-controller-122-0-1-10       crmd:  warning: status_from_rc: Action 24 (inetmanager_monitor_0) on 122.0.1.9 failed (target: 7 vs. rc: 1): Error
Apr 13 23:49:56 [6137] paas-controller-122-0-1-10       crmd:     info: abort_transition_graph: Transition aborted by operation inetmanager_monitor_0 'modify' on 122.0.1.9: Event failed | magic=0:1;24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be cib=0.124.2400 source=match_graph_event:310 complete=false
Apr 13 23:49:56 [6137] paas-controller-122-0-1-10       crmd:     info: match_graph_event:      Action inetmanager_monitor_0 (24) confirmed on 122.0.1.9 (rc=1)
Apr 13 23:49:56 [6137] paas-controller-122-0-1-10       crmd:     info: process_graph_event:    Detected action (360.24) inetmanager_monitor_0.2152=unknown error: failed
Apr 13 23:49:56 [6137] paas-controller-122-0-1-10       crmd:  warning: status_from_rc: Action 24 (inetmanager_monitor_0) on 122.0.1.9 failed (target: 7 vs. rc: 1): Error
Apr 13 23:49:56 [6137] paas-controller-122-0-1-10       crmd:     info: abort_transition_graph: Transition aborted by operation inetmanager_monitor_0 'modify' on 122.0.1.9: Event failed | magic=0:1;24:360:7:a7901eb1-462f-4259-a613-e0023ce8a6be cib=0.124.2400 source=match_graph_event:310 complete=false
Apr 13 23:49:56 [6137] paas-controller-122-0-1-10       crmd:     info: match_graph_event:      Action inetmanager_monitor_0 (24) confirmed on 122.0.1.9 (rc=1)
Apr 13 23:49:56 [6137] paas-controller-122-0-1-10       crmd:     info: process_graph_event:    Detected action (360.24) inetmanager_monitor_0.2152=unknown error: failed
Apr 13 23:50:01 [6132] paas-controller-122-0-1-10        cib:     info: cib_process_ping:       Reporting our current digest to 122.0.1.10: 139d30df3b1f549818371c8cb08fab09 for 0.124.2400 (0x7f9fd1aa1c10 0)
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.clusterlabs.org/pipermail/users/attachments/20180414/50761a53/attachment-0001.html>


More information about the Users mailing list