[Gluster-devel] glusterfs--mainline--2.5 stability question

Dale Dude dale at oc3networks.com
Wed Jun 20 04:58:51 UTC 2007


I found a namespace example in examples/cluster-client.vol, but when I 
try it I get this in glusterfs.log:

2007-06-19 21:51:55 C [unify.c:3935:init] bricks: namespace node used as 
a subvolume, Exiting
2007-06-19 21:51:55 C [common-utils.c:205:gf_print_trace] 
debug-backtrace: Got signal (11), printing backtrace
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: /lib/libglusterfs.so.0(gf_print_trace+0x2d) [0xb7f6e1ad]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: [0xffffe420]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: /lib/libglusterfs.so.0(default_notify+0x5b) [0xb7f6c41b]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: /lib/libglusterfs.so.0(xlator_tree_init+0x53) [0xb7f6b483]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: [glusterfs] [0x804d7f7]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: /usr/lib/libfuse.so.2 [0xb7f5c91e]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: /usr/lib/libfuse.so.2 [0xb7f5c4d3]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: /usr/lib/libfuse.so.2(fuse_session_process+0x26) 
[0xb7f5dcb6]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: [glusterfs] [0x804a7a3]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: /lib/libglusterfs.so.0(transport_notify+0x37) [0xb7f6f6f7]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: /lib/libglusterfs.so.0(sys_epoll_iteration+0xd9) 
[0xb7f70219]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: /lib/libglusterfs.so.0(poll_iteration+0x1d) [0xb7f6f7cd]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: [glusterfs] [0x804a160]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: /lib/tls/i686/cmov/libc.so.6(__libc_start_main+0xdc) 
[0xb7e07ebc]
2007-06-19 21:51:55 C [common-utils.c:207:gf_print_trace] 
debug-backtrace: [glusterfs] [0x8049d61]


========================================
glusterfs-client.vol:
volume server1vol1
         type protocol/client
         option transport-type tcp/client     # for TCP/IP transport
         option remote-host 127.0.0.1     # IP address of the remote brick
         option remote-subvolume clusterfs1
end-volume

volume server1vol2
         type protocol/client
         option transport-type tcp/client     # for TCP/IP transport
         option remote-host 127.0.0.1     # IP address of the remote brick
         option remote-subvolume clusterfs2
end-volume

###################

volume bricks
  type cluster/unify
  option namespace server1vol1
  option readdir-force-success on  # ignore failed mounts
  subvolumes server1vol1 server1vol2
  option scheduler rr
  option rr.limits.min-free-disk 4GB          # Units in KB, MB and GB 
are allowed
  option rr.refresh-interval 1               # Check server brick after 1s.
end-volume



========================================
glusterfs-server.vol:
volume volume1
  type storage/posix
  option directory /volume1
end-volume

volume locks1
  type features/posix-locks
  option mandatory on
  subvolumes volume1
end-volume

volume clusterfs1
   type performance/io-threads
   option thread-count 10
   subvolumes locks1
end-volume

#######

volume volume2
  type storage/posix
  option directory /volume2
end-volume

volume locks2
  type features/posix-locks
  option mandatory on
  subvolumes volume2
end-volume

volume clusterfs2
   type performance/io-threads
   option thread-count 10
   subvolumes locks2
end-volume

###

volume clusterfs
  type protocol/server
  option transport-type tcp/server
  subvolumes clusterfs1 clusterfs2
  option auth.ip.clusterfs1.allow *
  option auth.ip.clusterfs2.allow *
end-volume





More information about the Gluster-devel mailing list