[Gluster-users] Cannot remove directory - Directory not empty
phil cryer
phil at cryer.us
Mon Sep 20 13:40:15 UTC 2010
>[root at 3d13 ~]# rm -rfv /flock/proj/tele2_holland/rnd/comp/010/v003
>rm: cannot remove directory `/flock/proj/tele2_holland/rnd/comp/010/v003': Directory not empty
When I had this issue it was because I modified the files outside of
glusterfs - so for example, when gluster was not running, I
moved/modified files. I believe you have to run the scale-n-defrag.sh
script that you'll find in the contrib directory of the gluster
source.
P
On Mon, Sep 20, 2010 at 4:49 AM, Thomas Ericsson
<thomas.ericsson at fido.se> wrote:
> I can not delete quite a few directories on our glusterfs mounts. The error is "Directory not empty". A listing shows no files in the directory, however if i do a listing on the brick volumes some of them show files.
>
> Any idea as of how this can happen and how to remove the directories. Would it be safe to remove the "invisible" files straight from the brick volume?
>
> Best regards
> Thomas
>
>
> From a glusterfs client
> [root at 3d13 ~]# ls -lai /flock/proj/tele2_holland/rnd/comp/010/v003/
> total 0
> 38939716797 drwxrwxr-x 2 poal FidoUsers 162 Sep 16 09:15 .
> 60331700537 drwxrwxr-x 5 poal FidoUsers 536 Sep 4 01:24 ..
> [root at 3d13 ~]# rm -rfv /flock/proj/tele2_holland/rnd/comp/010/v003
> rm: cannot remove directory `/flock/proj/tele2_holland/rnd/comp/010/v003': Directory not empty
>
> From a glusterfsd brick
> flock01 ~ # ls -lai /node04/storage/proj/tele2_holland/rnd/comp/010/v003/
> total 0
> 305414438 drwxrwxr-x 2 1038 fido_user 57 Sep 16 09:15 .
> 7541462567 drwxrwxr-x 5 1038 fido_user 61 Jul 7 09:44 ..
> 305414403 ---------T 1 root root 0 Sep 4 01:24 tele2_holland_010_comp_v003.0031.exr
>
> From another glusterfsd brick
> flock04 ~ # ls -lai /node03/storage/proj/tele2_holland/rnd/comp/010/v003/
> total 0
> 4861583534 drwxrwxr-x 2 1038 500 57 Sep 16 09:15 .
> 280040615 drwxrwxr-x 5 1038 500 61 Jul 7 09:44 ..
> 4861671820 ---------T 1 root root 0 Sep 4 01:24 tele2_holland_010_comp_v003.0007.exr
>
>
> --------------------------------------------------------------
>
> Server and clients are vesion 2.0.8 with FUSE 2.7.4
>
> Server config
> flock04 ~ # cat /usr/local/etc/glusterfs/glusterfs.server
> volume posix01
> type storage/posix
> option directory /node01/storage
> end-volume
>
> volume locks01
> type features/locks
> subvolumes posix01
> end-volume
>
> volume brick01
> type performance/io-threads
> option thread-count 2
> subvolumes locks01
> end-volume
>
> volume posix02
> type storage/posix
> option directory /node02/storage
> end-volume
>
> volume locks02
> type features/locks
> subvolumes posix02
> end-volume
>
> volume brick02
> type performance/io-threads
> option thread-count 2
> subvolumes locks02
> end-volume
>
> volume posix03
> type storage/posix
> option directory /node03/storage
> end-volume
>
> volume locks03
> type features/locks
> subvolumes posix03
> end-volume
>
> volume brick03
> type performance/io-threads
> option thread-count 32
> subvolumes locks03
> end-volume
>
> volume posix04
> type storage/posix
> option directory /node04/storage
> end-volume
>
> volume locks04
> type features/locks
> subvolumes posix04
> end-volume
>
> volume brick04
> type performance/io-threads
> option thread-count 32
> subvolumes locks04
> end-volume
>
> volume server
> type protocol/server
> option transport-type ib-verbs/server
> option auth.addr.brick01.allow *
> option auth.addr.brick02.allow *
> option auth.addr.brick03.allow *
> option auth.addr.brick04.allow *
> subvolumes brick01 brick02 brick03 brick04
> end-volume
>
> volume tcp_server
> type protocol/server
> option transport-type tcp/server
> option transport.socket.nodelay on
> option auth.addr.brick01.allow *
> option auth.addr.brick02.allow *
> option auth.addr.brick03.allow *
> option auth.addr.brick04.allow *
> subvolumes brick01 brick02 brick03 brick04
> end-volume
>
> Client config
> volume remote01
> type protocol/client
> option transport-type ib-verbs/client
> option remote-host flock01
> option remote-subvolume brick03
> end-volume
>
> volume remote02
> type protocol/client
> option transport-type ib-verbs/client
> option remote-host flock01
> option remote-subvolume brick04
> end-volume
>
> volume remote03
> type protocol/client
> option transport-type ib-verbs/client
> option remote-host flock03
> option remote-subvolume brick03
> end-volume
>
> volume remote04
> type protocol/client
> option transport-type ib-verbs/client
> option remote-host flock03
> option remote-subvolume brick04
> end-volume
>
> volume remote05
> type protocol/client
> option transport-type ib-verbs/client
> option remote-host flock04
> option remote-subvolume brick03
> end-volume
>
> volume remote06
> type protocol/client
> option transport-type ib-verbs/client
> option remote-host flock04
> option remote-subvolume brick04
> end-volume
>
> volume remote07
> type protocol/client
> option transport-type ib-verbs/client
> option remote-host flock08
> option remote-subvolume brick03
> end-volume
>
> volume remote08
> type protocol/client
> option transport-type ib-verbs/client
> option remote-host flock08
> option remote-subvolume brick04
> end-volume
>
> volume distribute
> type cluster/distribute
> subvolumes remote01 remote02 remote03 remote04 remote05 remote06 remote07 remote08
> end-volume
>
> volume writebehind
> type performance/write-behind
> option cache-size 16MB
> option flush-behind on
> subvolumes distribute
> end-volume
>
> volume cache01
> type performance/io-cache
> option cache-size 512MB
> option cache-timeout 30
> subvolumes writebehind
> end-volume
>
> volume readahead
> type performance/read-ahead
> option page-count 16 # 2 is default option
> option force-atime-update off # default is off
> subvolumes cache01
> end-volume
>
>
>
>
> _______________________________________________
> Gluster-users mailing list
> Gluster-users at gluster.org
> http://gluster.org/cgi-bin/mailman/listinfo/gluster-users
>
--
http://philcryer.com
More information about the Gluster-users
mailing list