[Gluster-devel] Is lock (fcntl) supported by multilple concurent client ?

gpvcs@tiscali.fr gpvcs at tiscali.fr
Wed Mar 28 13:45:57 UTC 2007


Hi,

First of all, I would like to say that I found GlusterFS
being a great project with ingenious design. I tryied many
different failover solutions (ocfs2, safekit, lustre, gfarm)
and this one is the best.

I'm trying to share a central file system between two
clients using glusterfs 1.3.0pre2. Replication works
perfectly but I have difficulties with locking.
When I'm mounting the same filesystem (ie: same client.vol)
on two machines, both machines are able to do an exclusive
lock on the "same" file (ie: lock /gfs/fs1/toto) .
I don't know if its a misconfiguration, a problem with my
boxes (DLM ?, fuse ?), a bug or if that kind of locking is
not yet implemented.
I'm running Linux Fedora FC4 (2.6.17-1.2142_FC4).

When I'm tracing the code I dont see the fuse_getlk or the
fuse_setlk being called contrary to fuser_readdir.
client_protocol:client_lk never called
proto-srv.c:fop_lk never called.

I would really appreciate some help on this issue.

Regards FT

Reference:

#file client.vol
volume client0
type protocol/client
option transport-type tcp/client
option remote-host 10.80.225.150
option remote-port 7996
option remote-subvolume brick
end-volume

volume client1
type protocol/client
option transport-type tcp/client
option remote-host 192.80.221.24
option remote-port 7997
option remote-subvolume brick
end-volume

volume client2
type protocol/client
option transport-type tcp/client
option remote-host 192.80.225.150
option remote-port 7998
option remote-subvolume brick
end-volume

volume client3
type protocol/client
option transport-type tcp/client
option remote-host 192.80.221.24
option remote-port 7999
option remote-subvolume brick
end-volume

volume afr1
type cluster/afr
subvolumes client0 client2 client1 client3
option replicate *:4
end-volume

#file server0.vol (all server{0,1,2,3}.vol are similar)
volume brick
type storage/posix
option directory /home/cfs0
option debug off
end-volume

volume server
type protocol/server
option transport-type tcp/server
option listen-port 7996
option bind-address 192.80.225.150
subvolumes brick
option auth.ip.brick.allow 127.0.0.1
option auth.ip.brick.allow 192.80.*
end-volume

# lock.c
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>

int panic(int code)
{
return code;
}

int main(int argc, char *argv[])
{
if (argc>=2)
{
char *filename;
char *tag;
int fd;
FILE *stream=1;

filename=argv[1];
tag= (argc>=3?argv[2]:"__tag__");
int count=120;
if (argc>4)
sscanf(argv[3],"%d",&count);
printf("\n# inf Open lock on file %s (%d)
(%s)",filename,argc,tag);
// sddsaf fd=open(filename,O_RDWR|O_APPEND);
fd=open(filename,O_EXCL|O_RDWR|O_APPEND|O_SYNC|O_NDELAY);
if (fd>0)
{
printf("\n# ok_ lock open %s (%d)",filename,fd);
struct flock verou;
verou.l_type=F_WRLCK;
verou.l_whence=SEEK_SET;
verou.l_start=0;
verou.l_len=0;
verou.l_pid=666;
fcntl(fd,F_GETLK,&verou);
if (F_UNLCK==verou.l_type)
//stream=fdopen(fd,"w+");
//if (NULL!=stream)
{
static char buffer[256];
printf("\n# ok Could lock it stream %s open
(%p) count (%d)\n",filename,stream,count);
verou.l_type=F_WRLCK;
fcntl(fd,F_SETLKW,&verou);
printf("\n# OK locked");
register i;
register j=0;
static char wait[]="./-\\!";
for (i=0;i<count;i++)
{
//printf("\n%d\n",i);
printf("\r%c%s",wait[j++],buffer);
sprintf(buffer,"\n# inf tick %i for
%s",i,tag);
write(fd,buffer,strlen(buffer));
j%=5;
sleep(1);
}
exit(panic(EXIT_SUCCESS));
}
else
{
printf("\n# ko cannot open stream %s because
of pid %d\n",filename,verou.l_pid);
exit(panic(55));
}
}
else
{
printf("\n# KO_ cannot open filename %s",filename);
exit(panic(10));
}
}
else
{
printf("\n# ko_ Not enough argument");
exit(panic(99));
}
} 

------------------------ ALICE C'EST ENCORE MIEUX AVEC CANAL+ LE BOUQUET ! ---------------
Découvrez vite l'offre exclusive ALICEBOX et CANAL+ LE BOUQUET, en cliquant ici http://alicebox.fr
Soumis à conditions.







More information about the Gluster-devel mailing list