Hi all
We are using pacemaker and the RA heartbeat:ManageVE to manage a number
of OpenVZ CTs on a three-node cluster. The CTs are hosted on an NFS
mount shared by all three nodes which is managed by by the
heartbeat:Filesystem RA.
We get errors in certain situations, for instance when starting all
resources or when switching nodes between online and standby states. A
few examples:
error after setting node virtuetest2 to 'online':
-------------------------------------------------
Mar 25 18:05:08 virtuetest2 ManageVE(ploop8)[382818]: ERROR: Restoring
container ... Unable to open /virtual/vz/dump/Dump.58: No such file or directory
It seems migrate_from_ve() of ManageVE is executed before the files from
the NFS mount are available. It looks like heartbeat:Filesystem is
reporting success too early, when mounting NFS.
error after setting node virtuetest3 to 'online':
-------------------------------------------------
Mar 25 18:17:58 virtuetest3 pengine: [1627]: ERROR: native_create_actions:
Resource ploop3 (ocf::ManageVE) is active on 2 nodes attempting recovery
This happened also during a migration of the CT. Obviously pacemaker
sees the CT running on two nodes, although the CT was suspended on the
source node.
This problem triggers pacemaker to restart the CT on the target node
unnecessarily.
error after setting node virtuetest1 to 'online' (starting all
resources):
--------------------------------------------------------------
Mar 28 11:49:51 virtuetest1 ManageVE(ploop6)[782233]: ERROR: Starting
container... Adding delta dev=/dev/ploop44896
img=/virtual/vz/private/56/root.hdd/root.hdd (rw) Error in reread_part
(ploop.c:988): BLKRRPART /dev/ploop44896: Input/output error Error in
ploop_mount_fs (ploop.c:1017): Can't mount file system dev=/dev/ploop44896p1
target=/virtual/vz/root/56: No such device or address Failed to mount image:
Error in ploop_mount_fs (ploop.c:1017): Can't mount file system
dev=/dev/ploop44896p1 target=/virtual/vz/root/56: No such device or address [21
The error says there is no directory "/virtual/vz/root/56", although
there should be if the mount was successful. Is hearbeat:Filesystem
reporting success too early?
When stopping/restarting, checkpointing/restoring CTs manually, I am not
able to produce any of above errors.
Any help appreciated.
Roman
pacemaker 1.1.7
resource-agents 3.9.5
This is my config:
------------------
node virtuetest1 \
attributes standby="off"
node virtuetest2 \
attributes standby="off"
node virtuetest3 \
attributes standby="off"
primitive mailalerts ocf:pacemaker:ClusterMon \
params extra_options="--mail-to root --mail-from
[email protected] --mail-prefix virtuetest"
pidfile="/var/run/crm/crm_mon.pid" \
op start interval="0" timeout="90s" \
op stop interval="0" timeout="100s" \
op monitor interval="10s" timeout="20s" \
meta target-role="Stopped"
primitive p_lsb_vz lsb:vz \
op monitor interval="30" timeout="120s"
primitive p_nfs ocf:heartbeat:Filesystem \
params device="10.10.10.201:/vol/virtuetest/virtuetest"
directory="/virtual" fstype="nfs"
options="rsize=64512,wsize=64512,intr,noacl,nolock,ac,sync,tcp" \
op monitor interval="30" timeout="40" \
op start interval="0" timeout="60" \
op stop interval="0" timeout="60"
primitive p_nfs_dump ocf:heartbeat:Filesystem \
params device="10.10.10.201:/vol/virtuedump/virtuedump"
directory="/virtual/vz/dump" fstype="nfs"
options="rsize=64512,wsize=64512,intr,noacl,nolock,ac,async,tcp" \
op monitor interval="30" timeout="40" \
op start interval="0" timeout="60" \
op stop interval="0" timeout="60"
primitive p_sbd lsb:sbd \
op monitor interval="30" timeout="120"
primitive ploop1 ocf:heartbeat:ManageVE \
params veid="51" \
op monitor interval="30" timeout="120s" \
op start interval="0" timeout="300s" \
op stop interval="0" timeout="300s" \
op migrate_to interval="0" timeout="300s" \
op migrate_from interval="0" timeout="300s" \
meta target-role="Started" allow-migrate="true" is-managed="true"
primitive ploop2 ocf:heartbeat:ManageVE \
params veid="52" \
op monitor interval="30" timeout="120s" \
op start interval="0" timeout="300s" \
op stop interval="0" timeout="300s" \
op migrate_to interval="0" timeout="300s" \
op migrate_from interval="0" timeout="300s" \
meta target-role="Started" allow-migrate="true"
primitive ploop3 ocf:heartbeat:ManageVE \
params veid="53" \
op monitor interval="30" timeout="120s" \
op start interval="0" timeout="300s" \
op stop interval="0" timeout="300s" \
op migrate_to interval="0" timeout="300s" \
op migrate_from interval="0" timeout="300s" \
meta target-role="Started" allow-migrate="true"
primitive ploop4 ocf:heartbeat:ManageVE \
params veid="54" \
op monitor interval="30" timeout="120s" \
op start interval="0" timeout="300s" \
op stop interval="0" timeout="300s" \
op migrate_to interval="0" timeout="300s" \
op migrate_from interval="0" timeout="300s" \
meta target-role="Started" allow-migrate="true"
primitive ploop5 ocf:heartbeat:ManageVE \
params veid="55" \
op monitor interval="30" timeout="120s" \
op start interval="0" timeout="300s" \
op stop interval="0" timeout="300s" \
op migrate_to interval="0" timeout="300s" \
op migrate_from interval="0" timeout="300s" \
meta target-role="Started" allow-migrate="true"
primitive ploop6 ocf:heartbeat:ManageVE \
params veid="56" \
op monitor interval="30" timeout="120s" \
op start interval="0" timeout="300s" \
op stop interval="0" timeout="300s" \
op migrate_to interval="0" timeout="300s" \
op migrate_from interval="0" timeout="300s" \
meta target-role="Started" allow-migrate="true" is-managed="true"
primitive ploop7 ocf:heartbeat:ManageVE \
params veid="57" \
op monitor interval="30" timeout="120s" \
op start interval="0" timeout="300s" \
op stop interval="0" timeout="300s" \
op migrate_to interval="0" timeout="300s" \
op migrate_from interval="0" timeout="300s" \
meta target-role="Started" allow-migrate="true"
primitive ploop8 ocf:heartbeat:ManageVE \
params veid="58" \
op monitor interval="30" timeout="120s" \
op start interval="0" timeout="300s" \
op stop interval="0" timeout="300s" \
op migrate_to interval="0" timeout="300s" \
op migrate_from interval="0" timeout="300s" \
meta target-role="Started" allow-migrate="true"
primitive ploop9 ocf:heartbeat:ManageVE \
params veid="59" \
op monitor interval="30" timeout="120s" \
op start interval="0" timeout="300s" \
op stop interval="0" timeout="300s" \
op migrate_to interval="0" timeout="300s" \
op migrate_from interval="0" timeout="300s" \
meta target-role="Started" allow-migrate="true"
primitive stonith_sbd stonith:external/sbd \
params
sbd_device="/dev/disk/by-id/scsi-360a98000572d4c73526f6e4d36685764"
clone cl_nfs p_nfs \
meta interleave="true" target-role="Started"
clone cl_nfs_dump p_nfs_dump \
meta interleave="true" target-role="Started"
clone cl_sbd p_sbd \
meta interleave="true" target-role="Started"
clone cl_vz p_lsb_vz \
meta interleave="true" target-role="Started" is-managed="true"
location cli-prefer-ploop1 ploop1 \
rule $id="cli-prefer-rule-ploop1" inf: #uname eq virtuetest1
location cli-prefer-ploop2 ploop2 \
rule $id="cli-prefer-rule-ploop2" inf: #uname eq virtuetest2
location cli-prefer-ploop3 ploop3 \
rule $id="cli-prefer-rule-ploop3" inf: #uname eq virtuetest3
location cli-prefer-ploop4 ploop4 \
rule $id="cli-prefer-rule-ploop4" inf: #uname eq virtuetest1
location cli-prefer-ploop5 ploop5 \
rule $id="cli-prefer-rule-ploop5" inf: #uname eq virtuetest2
location cli-prefer-ploop6 ploop6 \
rule $id="cli-prefer-rule-ploop6" inf: #uname eq virtuetest3
location cli-prefer-ploop7 ploop7 \
rule $id="cli-prefer-rule-ploop7" inf: #uname eq virtuetest1
location cli-prefer-ploop8 ploop8 \
rule $id="cli-prefer-rule-ploop8" inf: #uname eq virtuetest2
location cli-prefer-ploop9 ploop9 \
rule $id="cli-prefer-rule-ploop9" inf: #uname eq virtuetest3
order o_nfs_before_vz inf: cl_nfs cl_nfs_dump cl_vz
order o_sbd_before_vz inf: cl_sbd cl_vz
order o_vz_before_ploop1 inf: cl_vz ploop1
order o_vz_before_ploop2 inf: cl_vz ploop2
order o_vz_before_ploop3 inf: cl_vz ploop3
order o_vz_before_ploop4 inf: cl_vz ploop4
order o_vz_before_ploop5 inf: cl_vz ploop5
order o_vz_before_ploop6 inf: cl_vz ploop6
order o_vz_before_ploop7 inf: cl_vz ploop7
order o_vz_before_ploop8 inf: cl_vz ploop8
order o_vz_before_ploop9 inf: cl_vz ploop9
property $id="cib-bootstrap-options" \
dc-version="1.1.7-ee0730e13d124c3d58f00016c3376a1de5323cff" \
cluster-infrastructure="openais" \
expected-quorum-votes="3" \
stonith-enabled="true" \
no-quorum-policy="ignore" \
last-lrm-refresh="1364990043" \
maintenance-mode="false" \
migration-limit="1" \
batch-limit="5"
rsc_defaults $id="rsc-options" \
resource-stickiness="200"
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems