Follow-up Comment #1, sr #106554 (project administration):

Gluster Server Side Configuration

[EMAIL PROTECTED] ~# cat /etc/glusterfs/glusterfs-server.vol
## Define the stroage
volume fs1-storage
 type storage/posix                   # POSIX FS translator
 option directory /storage            # Export this directory
end-volume

volume iothreads                      #iothreads can give performance a
boost
  type performance/io-threads
  option thread-count 16
  subvolumes fs1-storage
end-volume

## Add network serving capability to above brick.
volume server
 type protocol/server
 option transport-type tcp/server     # For TCP/IP transport
 option listen-port 6996              # Default is 6996
 option client-volume-filename /var/log/glusterfs/client.vol
 subvolumes iothreads
 option auth.ip.iothreads.allow * # Allow access to "brick" volume
end-volume

Gluster Client Side Configuration

[EMAIL PROTECTED] ~# cat /etc/glusterfs/glusterfs-client.vol

### Add client feature and attach to remote subvolume
volume client_211
 type protocol/client
 option transport-type tcp/client     # for TCP/IP transport
 option remote-host 10.10.1.211     # IP address of the remote brick
 option remote-port 6996              # default server port is 6996
 option remote-subvolume iothreads        # name of the remote volume
# option transport-timeout 4
end-volume

volume afrbricks
 type cluster/afr
 subvolumes  client_211
 option replicate *:1
 option self-heal on
end-volume

volume iothreads    #iothreads can give performance a boost
  type performance/io-threads
  option thread-count 8
  subvolumes afrbricks
end-volume



    _______________________________________________________

Reply to this item at:

  <http://savannah.gnu.org/support/?106554>

_______________________________________________
  Message sent via/by Savannah
  http://savannah.gnu.org/



Reply via email to