[Gluster-users] How do I temporarily take a brick out of service and then put it back later?
Greg Scott
GregScott at infrasupport.com
Tue Sep 16 19:24:36 UTC 2014
And this really sucks because it's not telling me the truth. It took **minutes** for Gluster to tell me it has no volumes. But I know there are volumes.
[root at lme-fw2 glusterfs]# gluster volume info all
No volumes present
[root at lme-fw2 glusterfs]#
Because here is my volume right here - the one at the bottom.
[root at lme-fw2 glusterfs]# df -h
Filesystem Size Used Avail Use% Mounted on
rootfs 50G 5.9G 42G 13% /
devtmpfs 959M 0 959M 0% /dev
tmpfs 969M 160K 969M 1% /dev/shm
tmpfs 969M 50M 920M 6% /run
/dev/mapper/vg_lmefw2-lv_root 50G 5.9G 42G 13% /
tmpfs 969M 50M 920M 6% /run
tmpfs 969M 0 969M 0% /sys/fs/cgroup
tmpfs 969M 0 969M 0% /media
/dev/sda2 497M 97M 375M 21% /boot
/dev/mapper/vg_lmefw2-lv_firewallscripts 16G 37M 16G 1% /gluster-fw2
/dev/mapper/vg_lmefw2-lv_home 391G 6.0G 366G 2% /home
192.168.253.2:/firewall-scripts 16G 37M 16G 1% /firewall-scripts
[root at lme-fw2 glusterfs]#
And it's mounted - see almost at the bottom:
[root at lme-fw2 glusterfs]# mount
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime,seclabel)
devtmpfs on /dev type devtmpfs (rw,nosuid,relatime,seclabel,size=981088k,nr_inodes=245272,mode=755)
devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev,relatime,seclabel)
/dev/mapper/vg_lmefw2-lv_root on / type ext4 (rw,relatime,seclabel,user_xattr,acl,barrier=1,data=ordered)
tmpfs on /run type tmpfs (rw,nosuid,nodev,relatime,seclabel,mode=755)
selinuxfs on /sys/fs/selinux type selinuxfs (rw,relatime)
tmpfs on /sys/fs/cgroup type tmpfs (rw,nosuid,nodev,noexec,seclabel,mode=755)
cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpuacct,cpu)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/net_cls type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=25,pgrp=1,timeout=300,minproto=5,maxproto=5,direct)
securityfs on /sys/kernel/security type securityfs (rw,relatime)
tmpfs on /media type tmpfs (rw,nosuid,nodev,noexec,relatime,rootcontext=system_u:object_r:mnt_t:s0,seclabel,mode=755)
debugfs on /sys/kernel/debug type debugfs (rw,relatime)
hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime,seclabel)
mqueue on /dev/mqueue type mqueue (rw,relatime,seclabel)
configfs on /sys/kernel/config type configfs (rw,relatime)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime)
/dev/sda2 on /boot type ext4 (rw,relatime,seclabel,user_xattr,acl,barrier=1,data=ordered)
/dev/mapper/vg_lmefw2-lv_firewallscripts on /gluster-fw2 type xfs (rw,relatime,seclabel,attr2,noquota)
/dev/mapper/vg_lmefw2-lv_home on /home type ext4 (rw,relatime,seclabel,user_xattr,acl,barrier=1,data=ordered)
fusectl on /sys/fs/fuse/connections type fusectl (rw,relatime)
192.168.253.2:/firewall-scripts on /firewall-scripts type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072)
binfmt_misc on /proc/sys/fs/binfmt_misc type binfmt_misc (rw,relatime)
gvfs-fuse-daemon on /home/gregs/.gvfs type fuse.gvfs-fuse-daemon (rw,nosuid,nodev,relatime,user_id=1000,group_id=1000)
[root at lme-fw2 glusterfs]#
Isn't the whole reason for Gluster to stand up to hardware failures like this? It's dying right when I need it most.
- Greg
More information about the Gluster-users
mailing list