[Gluster-devel] dht / unify "No such file or directory"

Dan Parsons dparsons at nyip.net
Mon Feb 23 20:53:16 UTC 2009


I'm getting the below error message on some of my gluster clients when under
high I/O load:
009-02-23 12:43:50 E [unify.c:362:unify_lookup_cbk] unify: child(dht0):
path(/bio/data/fast-hmmsearch-all/tmp5bgb6I_fast-hmmsearch-all_job/result.tigrfam.TIGR03461.hmmhits):
No such file or directory
2009-02-23 12:43:50 E [unify.c:362:unify_lookup_cbk] unify:
child(unify-switch-ns):
path(/bio/data/fast-hmmsearch-all/tmp5bgb6I_fast-hmmsearch-all_job/result.tigrfam.TIGR03461.hmmhits):
No such file or directory
2009-02-23 12:43:50 E [unify.c:362:unify_lookup_cbk] unify:
child(unify-switch-ns):
path(/bio/data/fast-hmmsearch-all/tmp5bgb6I_fast-hmmsearch-all_job/result.tigrfam.TIGR03461.hmmhits):
No such file or directory

The thing is, the above files DO exist. But according to the nodes this
happens on, they do not. There are no error messages in the gluster servers.
Below is my client config. Any thoughts?


volume unify-switch-ns
   type protocol/client
   option transport-type tcp
   option remote-host 10.8.101.51
   option remote-subvolume posix-unify-switch-ns
end-volume

#volume distfs01-ns-readahead
#   type performance/read-ahead
#   option page-size 1MB
#   option page-count 8
#   subvolumes distfs01-ns-brick
#end-volume

#volume unify-switch-ns
#   type performance/write-behind
#   option block-size 1MB
#   option cache-size 3MB
#   subvolumes distfs01-ns-readahead
#end-volume

volume distfs01-unify
   type protocol/client
   option transport-type tcp
   option remote-host 10.8.101.51
   option remote-subvolume posix-unify
end-volume

volume distfs02-unify
   type protocol/client
   option transport-type tcp
   option remote-host 10.8.101.52
   option remote-subvolume posix-unify
end-volume

volume distfs03-unify
   type protocol/client
   option transport-type tcp
   option remote-host 10.8.101.53
   option remote-subvolume posix-unify
end-volume

volume distfs04-unify
   type protocol/client
   option transport-type tcp
   option remote-host 10.8.101.54
   option remote-subvolume posix-unify
end-volume

volume distfs01-stripe
   type protocol/client
   option transport-type tcp
   option remote-host 10.8.101.51
   option remote-subvolume posix-stripe
end-volume

volume distfs02-stripe
   type protocol/client
   option transport-type tcp
   option remote-host 10.8.101.52
   option remote-subvolume posix-stripe
end-volume

volume distfs03-stripe
   type protocol/client
   option transport-type tcp
   option remote-host 10.8.101.53
   option remote-subvolume posix-stripe
end-volume

volume distfs04-stripe
   type protocol/client
   option transport-type tcp
   option remote-host 10.8.101.54
   option remote-subvolume posix-stripe
end-volume

volume stripe0
type cluster/stripe
option block-size *.jar,*.pin:1MB,*:2MB
subvolumes distfs01-stripe distfs02-stripe distfs03-stripe distfs04-stripe
end-volume

volume dht0
type cluster/dht
subvolumes distfs01-unify distfs02-unify distfs03-unify distfs04-unify
end-volume

volume unify
type cluster/unify
option namespace unify-switch-ns
option self-heal off
option scheduler switch
# send *.phr/psq/pnd etc to stripe0, send the rest to hash
# extensions have to be *.foo* and not simply *.foo or rsync's tmp file
naming will prevent files from being matched
option scheduler.switch.case
*.phr*:stripe0;*.psq*:stripe0;*.pnd*:stripe0;*.psd*:stripe0;*.pin*:stripe0;*.nsi*:stripe0;*.nin*:stripe0;*.nsd*:stripe0;*.nhr*:stripe0;*.nsq*:stripe0;*.tar*:stripe0;*.tar.gz*:stripe0;*.jar*:stripe0;*.img*:stripe0;*.perf*:stripe0;*.tgz*:stripe0;*.fasta*:stripe0;*.huge*:stripe0
subvolumes stripe0 dht0
end-volume

volume ioc
   type performance/io-cache
   subvolumes unify
   option cache-size 3000MB
option cache-timeout 3600
end-volume

volume filter
  type features/filter
  option fixed-uid 0
  option fixed-gid 900
  subvolumes ioc
end-volume
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://supercolony.gluster.org/pipermail/gluster-devel/attachments/20090223/76c97ac6/attachment-0003.html>


More information about the Gluster-devel mailing list