[Gluster-devel] nfs over glusterfs/fuse
Brent A Nelson
brent at phys.ufl.edu
Thu Jun 14 21:45:59 UTC 2007
I get the same and a host of other symptoms (I've only tried
nfs-kernel-server); it's not really usable. Do you also see significant
growth in the memory consumed by the glusterfs process that is being
reexported?
I wonder if the new inode-based release tomorrow will fix the NFS
reexport issue...
On Thu, 14 Jun 2007, Dale Dude wrote:
> This may be more for fuse but I thought Id post here first since it seems
> someone is/has done this.
>
> Ubuntu Feisty Kernel 2.6.20-16. Fuse 2.6.5.
>
> I tried these userspace servers: unfs3, nfs-user-server
> I tried the nfs-kernel-server
>
> With each of the above nfs servers I get spuradic Input/Output errors when I
> try to 'ls' or 'find' on some directories on the nfs client. I tried fsid
> option without success. Im using samba/cifs now which works but proving to be
> way to slow.
>
> Any advice would greatly appreciated.
>
> Regards,
> Dale
>
>
> ###########################################
> # glusterfs-client.vol
> volume server1vol1
> type protocol/client
> option transport-type tcp/client # for TCP/IP transport
> option remote-host 192.168.0.2 # IP address of the remote brick
> option remote-subvolume clusterfs1
> end-volume
>
> volume server1vol2
> type protocol/client
> option transport-type tcp/client # for TCP/IP transport
> option remote-host 192.168.0.2 # IP address of the remote brick
> option remote-subvolume clusterfs2
> end-volume
>
> ###################
>
> volume server2vol1
> type protocol/client
> option transport-type tcp/client # for TCP/IP transport
> option remote-host 127.0.0.1 # IP address of the remote brick
> option remote-subvolume clusterfs1
> end-volume
>
> volume server2vol2
> type protocol/client
> option transport-type tcp/client # for TCP/IP transport
> option remote-host 127.0.0.1 # IP address of the remote brick
> option remote-subvolume clusterfs2
> end-volume
>
> volume server2vol3
> type protocol/client
> option transport-type tcp/client # for TCP/IP transport
> option remote-host 127.0.0.1 # IP address of the remote brick
> option remote-subvolume clusterfs3
> end-volume
>
> volume server2vol4
> type protocol/client
> option transport-type tcp/client # for TCP/IP transport
> option remote-host 127.0.0.1 # IP address of the remote brick
> option remote-subvolume clusterfs4
> end-volume
>
> volume server2vol5
> type protocol/client
> option transport-type tcp/client # for TCP/IP transport
> option remote-host 127.0.0.1 # IP address of the remote brick
> option remote-subvolume clusterfs5
> end-volume
>
> volume server2vol6
> type protocol/client
> option transport-type tcp/client # for TCP/IP transport
> option remote-host 127.0.0.1 # IP address of the remote brick
> option remote-subvolume clusterfs6
> end-volume
>
> ###################
>
> volume server3vol1
> type protocol/client
> option transport-type tcp/client # for TCP/IP transport
> option remote-host 192.168.0.4 # IP address of the remote brick
> option remote-subvolume clusterfs1
> end-volume
>
> volume server3vol2
> type protocol/client
> option transport-type tcp/client # for TCP/IP transport
> option remote-host 192.168.0.4 # IP address of the remote brick
> option remote-subvolume clusterfs2
> end-volume
>
> ###################
>
> volume bricks
> type cluster/unify
> option readdir-force-success on # ignore failed mounts
> subvolumes server1vol1 server1vol2 server2vol1 server2vol2 server2vol3
> server2vol4 server2vol5 server2vol6 server3vol1 server3vol2
> option scheduler random
> option random.limits.min-free-disk 6GB
> end-volume
>
>
> volume iothreads
> type performance/io-threads
> option thread-count 10
> subvolumes bricks
> end-volume
>
> volume statprefetch
> type performance/stat-prefetch
> option cache-seconds 10
> subvolumes iothreads
> end-volume
>
> volume writebehind
> type performance/write-behind
> option aggregate-size 131072 # aggregate block size in bytes
> #option aggregate-size 1048576 # aggregate block size in bytes
> subvolumes statprefetch
> end-volume
>
> volume readahead
> type performance/read-ahead
> option page-size 65536 # unit in bytes
> option page-count 2 # cache per file = (page-count x page-size)
> subvolumes writebehind
> end-volume
>
>
>
> ###################################
> # glusterfs-server.vol (all servers the same cept for # of "bricks"
> volume volume1
> type storage/posix
> option directory /volume1
> end-volume
>
> volume locks1
> type features/posix-locks
> subvolumes volume1
> end-volume
>
> volume clusterfs1
> type performance/io-threads
> option thread-count 10
> subvolumes locks1
> end-volume
>
> #######
>
> volume volume2
> type storage/posix
> option directory /volume2
> end-volume
>
> volume locks2
> type features/posix-locks
> subvolumes volume2
> end-volume
>
> volume clusterfs2
> type performance/io-threads
> option thread-count 10
> subvolumes locks2
> end-volume
>
> #######
>
> volume volume3
> type storage/posix
> option directory /volume3
> end-volume
>
> volume locks3
> type features/posix-locks
> subvolumes volume3
> end-volume
>
> volume clusterfs3
> type performance/io-threads
> option thread-count 10
> subvolumes locks3
> end-volume
>
> #######
>
> volume volume4
> type storage/posix
> option directory /volume4
> end-volume
>
> volume locks4
> type features/posix-locks
> subvolumes volume4
> end-volume
>
> volume clusterfs4
> type performance/io-threads
> option thread-count 10
> subvolumes locks4
> end-volume
>
> #######
>
> volume volume5
> type storage/posix
> option directory /volume5
> end-volume
>
> volume locks5
> type features/posix-locks
> subvolumes volume5
> end-volume
>
> volume clusterfs5
> type performance/io-threads
> option thread-count 10
> subvolumes locks5
> end-volume
>
> #######
>
> volume volume6
> type storage/posix
> option directory /volume6
> end-volume
>
> volume locks6
> type features/posix-locks
> subvolumes volume6
> end-volume
>
> volume clusterfs6
> type performance/io-threads
> option thread-count 10
> subvolumes locks6
> end-volume
>
> ###
>
> volume clusterfs
> type protocol/server
> option transport-type tcp/server
> subvolumes clusterfs1 clusterfs2 clusterfs3 clusterfs4 clusterfs5 clusterfs6
> option auth.ip.clusterfs1.allow *
> option auth.ip.clusterfs2.allow *
> option auth.ip.clusterfs3.allow *
> option auth.ip.clusterfs4.allow *
> option auth.ip.clusterfs5.allow *
> option auth.ip.clusterfs6.allow *
> end-volume
>
>
> _______________________________________________
> Gluster-devel mailing list
> Gluster-devel at nongnu.org
> http://lists.nongnu.org/mailman/listinfo/gluster-devel
>
More information about the Gluster-devel
mailing list