Hello Everyone,

I was behind a project to create a test cluster using Pacemaker on suse11.
With kind help of lmb and beekhof @ #linux-cluster i was finally able to put
up a two node cluster using HP ML350g5 each with two HBA connected to a
MSA2012fcdc.

The cluster resource both apache2 and postgresql requires cleanup every time
i boot the cluster (this was a test cluster - which was switched off at the
end of the day - or when i see the level of madness in me cross the
barrier), when a simulated failover (by making the other node stand by) , or
when i pull the nic cable of one node. Ipaddress and stonith was working
fine as planned. but the big boys - apache2 and postgresql is having trouble
and i have to cleanup always.

I would like to give the log file as attachment (/var/log/messages) - but it
is 3.2GB in size and  has lot of repeated entries - whic i did not find
relevant.  Any way i 'm attaching the cib.xml and cib.sig (these files were
taken after openais on both nodes were stopped. Also attaching openais.conf)
of the herewith for your kind perusal.  Please help me. This is my 4th week
in trying to make a happy cluster.  Please help.

My hardware config is as follows.

Two HP ML350g5 with
=================
One QC intel 2.33
6GB ram
One gbe nic,
one ilo port
two HBA
Two Sata drives
one DVD
suse11 + HA kit (eval copy)
postgres (the one which came with suse)
apache (the one which came with suse)
ocfs2 (the one which came with suse)

configuration was done exactly as mentioned in the pdf by novel. (except the
case that i have used only one nic - that too connected to a 10/100 pocket
switch- no other heartbeat medium was used)  -  is one nic a problem - is
there any mandate that i should 2 nics?
==================

Thanks in advance,

take care,

ajith / deucn

-- 
After all, all you have is your dreams and yourself... Never let those go,
if you do, you will be left alone and cold for all eternity...
<cib validate-with="pacemaker-1.0" crm_feature_set="3.0.1" have-quorum="0" dc-uuid="node1" admin_epoch="0" epoch="510" num_updates="0" cib-last-written="Mon Aug  3 17:27:48 2009">
  <configuration>
    <crm_config>
      <cluster_property_set id="cib-bootstrap-options">
        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.0.3-0080ec086ae9c20ad5c4c3562000c0ad68374f0a"/>
        <nvpair name="last-lrm-refresh" id="cib-bootstrap-options-last-lrm-refresh" value="1249297938"/>
        <nvpair id="nvpair-f8ef1457-f079-4c3b-a7b7-a424e5fb7e14" name="no-quorum-policy" value="ignore"/>
        <nvpair id="nvpair-17bd5af5-0199-47db-abd0-d2149666e5ff" name="cluster-recheck-interval" value="10min"/>
        <nvpair id="cib-bootstrap-options-expected-quorum-votes" name="expected-quorum-votes" value="2"/>
      </cluster_property_set>
    </crm_config>
    <nodes>
      <node uname="node1" type="normal" id="node1">
        <instance_attributes id="nodes-node1">
          <nvpair name="standby" id="standby-node1" value="false"/>
        </instance_attributes>
      </node>
      <node uname="node2" type="normal" id="node2">
        <instance_attributes id="nodes-node2">
          <nvpair name="standby" id="standby-node2" value="false"/>
        </instance_attributes>
      </node>
    </nodes>
    <resources>
      <primitive class="stonith" type="external/riloe" id="st1">
        <meta_attributes id="st1-meta_attributes">
          <nvpair id="nvpair-87e11c49-8023-46fa-b379-1e2c677acd14" name="target-role" value="started"/>
        </meta_attributes>
        <operations id="st1-operations">
          <op id="st1-op-monitor-15" interval="2" name="monitor" start-delay="15" timeout="5"/>
        </operations>
        <instance_attributes id="st1-instance_attributes">
          <nvpair id="nvpair-979c9ffe-68fc-484a-83b3-c021daaf3298" name="hostlist" value="node2"/>
          <nvpair id="nvpair-9f2055e3-34e3-499c-835f-c4ae8bd7ce44" name="ilo_hostname" value="ilo2"/>
          <nvpair id="nvpair-a545fabd-c056-4d8a-9aec-00fed53ac657" name="ilo_user" value="node"/>
          <nvpair id="nvpair-4d965f2f-5f01-4c1c-91df-7995dcd67679" name="ilo_password" value="test1234"/>
          <nvpair name="ilo_can_reset" id="nvpair-09035dd0-9d57-4668-be77-eddd6ecc1e4f" value="0"/>
          <nvpair id="nvpair-767d713b-4b96-4f03-9494-663d21f39768" name="ilo_powerdown_method" value="power"/>
        </instance_attributes>
      </primitive>
      <primitive class="stonith" type="external/riloe" id="st2">
        <meta_attributes id="st2-meta_attributes">
          <nvpair id="nvpair-8660714d-80a5-45db-9f9a-753441fc979a" name="target-role" value="Started"/>
        </meta_attributes>
        <operations id="st2-operations">
          <op id="st2-op-monitor-15" interval="2" name="monitor" start-delay="15" timeout="5"/>
        </operations>
        <instance_attributes id="st2-instance_attributes">
          <nvpair id="nvpair-8a6866e4-891e-4a02-9e0d-8e3e99a8ce9f" name="hostlist" value="node1"/>
          <nvpair id="nvpair-16276503-8ec7-4da0-b234-13e1dc439394" name="ilo_hostname" value="ilo1"/>
          <nvpair id="nvpair-88bb77c5-a16e-46d8-897c-014a9e4fa659" name="ilo_user" value="node"/>
          <nvpair id="nvpair-9773273f-66af-4b59-92be-2a41a9180b55" name="ilo_password" value="test1234"/>
          <nvpair name="ilo_can_reset" id="nvpair-cd76fc1d-ebf6-4b82-b44f-e0cc1ab06e16" value="0"/>
          <nvpair id="nvpair-8cbd62d0-8709-4bb9-bb71-5674a6865a59" name="ilo_powerdown_method" value="power"/>
        </instance_attributes>
      </primitive>
      <clone id="dlm-clone">
        <meta_attributes id="dlm-clone-meta_attributes">
          <nvpair id="dlm-clone-meta_attributes-globally-unique" name="globally-unique" value="false"/>
          <nvpair id="dlm-clone-meta_attributes-interleave" name="interleave" value="true"/>
        </meta_attributes>
        <primitive class="ocf" id="dlm" provider="pacemaker" type="controld">
          <operations>
            <op id="dlm-monitor-120s" interval="120s" name="monitor"/>
          </operations>
        </primitive>
      </clone>
      <clone id="o2cb-clone">
        <meta_attributes id="o2cb-clone-meta_attributes">
          <nvpair id="o2cb-clone-meta_attributes-globally-unique" name="globally-unique" value="false"/>
          <nvpair id="nvpair-0eee0ac5-15e9-4bf6-a10b-457eddc02d78" name="interleave" value="true"/>
        </meta_attributes>
        <primitive class="ocf" id="o2cb" provider="ocfs2" type="o2cb">
          <operations>
            <op id="o2cb-monitor-120s" interval="120s" name="monitor"/>
          </operations>
        </primitive>
      </clone>
      <clone id="fs-clone">
        <meta_attributes id="fs-clone-meta_attributes">
          <nvpair id="fs-clone-meta_attributes-interleave" name="interleave" value="true"/>
          <nvpair id="fs-clone-meta_attributes-ordered" name="ordered" value="true"/>
        </meta_attributes>
        <primitive class="ocf" id="fs" provider="heartbeat" type="Filesystem">
          <instance_attributes id="fs-instance_attributes">
            <nvpair id="fs-instance_attributes-device" name="device" value="/dev/mapper/link1_part3"/>
            <nvpair id="fs-instance_attributes-directory" name="directory" value="/shared"/>
            <nvpair id="fs-instance_attributes-fstype" name="fstype" value="ocfs2"/>
          </instance_attributes>
          <operations>
            <op id="fs-monitor-120s" interval="120s" name="monitor"/>
          </operations>
        </primitive>
      </clone>
      <group id="services">
        <meta_attributes id="services-meta_attributes">
          <nvpair id="nvpair-56950ac3-1a2a-470d-9b37-5d77097953d0" name="target-role" value="started"/>
        </meta_attributes>
        <primitive class="ocf" id="ip_address" provider="heartbeat" type="IPaddr">
          <meta_attributes id="ip_address-meta_attributes">                      </meta_attributes>
          <operations id="ip_address-operations">
            <op id="ip_address-op-monitor-5s" interval="5s" name="monitor" start-delay="1s" timeout="30s"/>
          </operations>
          <instance_attributes id="ip_address-instance_attributes">
            <nvpair id="nvpair-3648c0e7-1005-4177-ab94-7a50604399c7" name="ip" value="192.168.1.53"/>
          </instance_attributes>
        </primitive>
        <primitive class="ocf" id="postgres" provider="heartbeat" type="pgsql">
          <meta_attributes id="postgres-meta_attributes">
            <nvpair id="postgres-meta_attributes-target-role" name="target-role" value="started"/>
          </meta_attributes>
          <operations id="postgres-operations">
            <op id="postgres-op-monitor-30" interval="30" name="monitor" start-delay="8" timeout="30"/>
          </operations>
          <instance_attributes id="postgres-instance_attributes">
            <nvpair id="nvpair-f00d1543-9a34-416a-9792-998f2aa7ede6" name="pgdata" value="/shared/data"/>
            <nvpair id="nvpair-225328c4-fb56-4bda-aa3c-2bb9ef72cac9" name="pghost" value="*"/>
            <nvpair id="nvpair-9950f836-08c2-41f8-9b24-7ac411d79902" name="pgport" value="5432"/>
            <nvpair id="nvpair-f23b9022-a211-40c5-98f0-dfabec3118e5" name="pgdb" value="template1"/>
          </instance_attributes>
        </primitive>
        <primitive class="ocf" id="web_server" provider="heartbeat" type="apache">
          <operations id="web_server-operations">
            <op id="web_server-op-monitor-15" interval="10" name="monitor" start-delay="12" timeout="30"/>
          </operations>
          <instance_attributes id="web_server-instance_attributes">
            <nvpair id="nvpair-a74e522c-213b-4793-b9f8-367297507370" name="configfile" value="/etc/apache2/httpd.conf"/>
          </instance_attributes>
          <meta_attributes id="web_server-meta_attributes">
            <nvpair id="web_server-meta_attributes-target-role" name="target-role" value="started"/>
          </meta_attributes>
        </primitive>
      </group>
    </resources>
    <constraints>
      <rsc_location id="st1_cons" node="node2" rsc="st1" score="-INFINITY"/>
      <rsc_location id="st2_cons" node="node1" rsc="st2" score="-INFINITY"/>
      <rsc_colocation id="o2cb-with-dlm" rsc="o2cb-clone" score="INFINITY" with-rsc="dlm-clone"/>
      <rsc_order first="dlm-clone" id="start-o2cb-after-dlm" score="INFINITY" then="o2cb-clone"/>
      <rsc_colocation id="fs-with-o2cb" rsc="fs-clone" score="INFINITY" with-rsc="o2cb-clone"/>
      <rsc_order first="o2cb-clone" id="start-fs-after-o2cb" score="INFINITY" then="fs-clone"/>
    </constraints>
    <op_defaults>
      <meta_attributes id="op_defaults-options">
        <nvpair id="nvpair-a74cab2f-2fb3-46d3-afd1-9753c40bb62d" name="on-fail" value="restart"/>
      </meta_attributes>
    </op_defaults>
    <rsc_defaults>
      <meta_attributes id="rsc_defaults-options">
        <nvpair id="nvpair-61b3f7ee-ecb8-439b-b913-11e8aca589fd" name="is-managed" value="true"/>
        <nvpair id="nvpair-31631835-f93f-4556-a55e-0f0e9137de8c" name="multiple-active" value="stop_only"/>
      </meta_attributes>
    </rsc_defaults>
  </configuration>
</cib>

Attachment: cib.xml.sig
Description: Binary data

Attachment: openais.conf
Description: Binary data

_______________________________________________
Pacemaker mailing list
Pacemaker@oss.clusterlabs.org
http://oss.clusterlabs.org/mailman/listinfo/pacemaker

Reply via email to