[Bugs] [Bug 1157975] [USS]: glusterd does not start if older volume exists

bugzilla at redhat.com bugzilla at redhat.com
Mon Dec 1 10:05:34 UTC 2014


https://bugzilla.redhat.com/show_bug.cgi?id=1157975

ssamanta at redhat.com changed:

           What    |Removed                     |Added
----------------------------------------------------------------------------
             Status|ON_QA                       |VERIFIED
                 CC|                            |ssamanta at redhat.com



--- Comment #5 from ssamanta at redhat.com ---
Verified the bug with following gluster version and did not find the issue.
Marking as VERIFIED.


[root at dhcp42-244 yum.repos.d]# rpm -qa | grep gluster
gluster-nagios-common-0.1.3-2.el6rhs.noarch
glusterfs-libs-3.6.0.33-1.el6rhs.x86_64
glusterfs-server-3.6.0.33-1.el6rhs.x86_64
glusterfs-cli-3.6.0.33-1.el6rhs.x86_64
glusterfs-api-3.6.0.33-1.el6rhs.x86_64
glusterfs-fuse-3.6.0.33-1.el6rhs.x86_64
glusterfs-debuginfo-3.6.0.33-1.el6rhs.x86_64
samba-glusterfs-3.6.509-169.1.el6rhs.x86_64
gluster-nagios-addons-0.1.10-2.el6rhs.x86_64
glusterfs-3.6.0.33-1.el6rhs.x86_64
[root at dhcp42-244 yum.repos.d]# 

[root at dhcp42-244 yum.repos.d]# gluster peer status
Number of Peers: 3

Hostname: 10.70.43.6
Uuid: 2c0d5fe8-a014-4978-ace7-c663e4cc8d91
State: Peer in Cluster (Connected)

Hostname: 10.70.42.204
Uuid: 2a2a1b36-37e3-4336-b82a-b09dcc2f745e
State: Peer in Cluster (Connected)

Hostname: 10.70.42.10
Uuid: 77c49bfc-6cb4-44f3-be12-41447a3a452e
State: Peer in Cluster (Connected)
[root at dhcp42-244 yum.repos.d]# 

[root at dhcp42-244 yum.repos.d]# gluster volume info

Volume Name: testvol
Type: Distributed-Replicate
Volume ID: 34a699ad-81f8-489e-8242-40c5e181901a
Status: Started
Snap Volume: no
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 10.70.42.244:/rhs/brick1/vol
Brick2: 10.70.43.6:/rhs/brick2/vol
Brick3: 10.70.42.204:/rhs/brick3/vol
Brick4: 10.70.42.10:/rhs/brick4/vol
Options Reconfigured:
performance.readdir-ahead: on
snap-max-hard-limit: 256
snap-max-soft-limit: 90
auto-delete: disable

Volume Name: testvol1
Type: Distributed-Replicate
Volume ID: bcd90c32-e79d-4197-a5b2-b0ea1d52002d
Status: Started
Snap Volume: no
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 10.70.42.244:/rhs/brick2/testvol
Brick2: 10.70.43.6:/rhs/brick3/testvol
Brick3: 10.70.42.204:/rhs/brick4/testvol
Brick4: 10.70.42.10:/rhs/brick1/testvol
Options Reconfigured:
features.barrier: disable
features.uss: on
performance.readdir-ahead: on
snap-max-hard-limit: 256
snap-max-soft-limit: 90
auto-delete: disable
[root at dhcp42-244 yum.repos.d]# 


[root at dhcp42-244 yum.repos.d]# gluster volume create oldvol2
10.70.42.244:/rhs/brick4/oldvol 10.70.43.6:/rhs/brick2/oldvol
10.70.42.204:/rhs/brick1/oldvol 10.70.42.10:/rhs/brick3/oldvol
volume create: oldvol2: success: please start the volume to access data
[root at dhcp42-244 yum.repos.d]# gluster volume start oldvol2
volume start: oldvol2: success
[root at dhcp42-244 yum.repos.d]# gluster volume info

Volume Name: oldvol2
Type: Distribute
Volume ID: ac6ffc71-e2d0-41ef-928d-b4aa344b2825
Status: Started
Snap Volume: no
Number of Bricks: 4
Transport-type: tcp
Bricks:
Brick1: 10.70.42.244:/rhs/brick4/oldvol
Brick2: 10.70.43.6:/rhs/brick2/oldvol
Brick3: 10.70.42.204:/rhs/brick1/oldvol
Brick4: 10.70.42.10:/rhs/brick3/oldvol
Options Reconfigured:
performance.readdir-ahead: on
snap-max-hard-limit: 256
snap-max-soft-limit: 90
auto-delete: disable

Volume Name: testvol
Type: Distributed-Replicate
Volume ID: 34a699ad-81f8-489e-8242-40c5e181901a
Status: Started
Snap Volume: no
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 10.70.42.244:/rhs/brick1/vol
Brick2: 10.70.43.6:/rhs/brick2/vol
Brick3: 10.70.42.204:/rhs/brick3/vol
Brick4: 10.70.42.10:/rhs/brick4/vol
Options Reconfigured:
performance.readdir-ahead: on
snap-max-hard-limit: 256
snap-max-soft-limit: 90
auto-delete: disable

Volume Name: testvol1
Type: Distributed-Replicate
Volume ID: bcd90c32-e79d-4197-a5b2-b0ea1d52002d
Status: Started
Snap Volume: no
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 10.70.42.244:/rhs/brick2/testvol
Brick2: 10.70.43.6:/rhs/brick3/testvol
Brick3: 10.70.42.204:/rhs/brick4/testvol
Brick4: 10.70.42.10:/rhs/brick1/testvol
Options Reconfigured:
features.barrier: disable
features.uss: on
performance.readdir-ahead: on
snap-max-hard-limit: 256
snap-max-soft-limit: 90
auto-delete: disable
[root at dhcp42-244 yum.repos.d]# 

[root at dhcp42-244 yum.repos.d]# pgrep glusterd
12396
[root at dhcp42-244 yum.repos.d]# 

[root at dhcp42-244 yum.repos.d]# cat /var/lib/glusterd/vols/oldvol2/info 
type=0
count=4
status=1
sub_count=0
stripe_count=1
replica_count=1
version=2
transport-type=0
volume-id=ac6ffc71-e2d0-41ef-928d-b4aa344b2825
username=21f62ab1-fe38-4ab7-b4af-b8e9de0b193e
password=6fbae7e1-6eab-4ae6-8ddc-d0224bdd3551
op-version=30000
client-op-version=30000
parent_volname=N/A
restored_from_snap=00000000-0000-0000-0000-000000000000
snap-max-hard-limit=256
performance.readdir-ahead=on
brick-0=10.70.42.244:-rhs-brick4-oldvol
brick-1=10.70.43.6:-rhs-brick2-oldvol
brick-2=10.70.42.204:-rhs-brick1-oldvol
brick-3=10.70.42.10:-rhs-brick3-oldvol
[root at dhcp42-244 yum.repos.d]#

[root at dhcp43-190 oldvol]# ls
test.0  test.10  test.3  test.5  test.7  test.9
test.1  test.2   test.4  test.6  test.8
[root at dhcp43-190 oldvol]# 

[root at dhcp42-244 log]# gluster volume info

Volume Name: oldvol2
Type: Distribute
Volume ID: ac6ffc71-e2d0-41ef-928d-b4aa344b2825
Status: Started
Snap Volume: no
Number of Bricks: 4
Transport-type: tcp
Bricks:
Brick1: 10.70.42.244:/rhs/brick4/oldvol
Brick2: 10.70.43.6:/rhs/brick2/oldvol
Brick3: 10.70.42.204:/rhs/brick1/oldvol
Brick4: 10.70.42.10:/rhs/brick3/oldvol
Options Reconfigured:
features.uss: off
performance.readdir-ahead: on
snap-max-hard-limit: 256
snap-max-soft-limit: 90
auto-delete: disable

Volume Name: testvol
Type: Distributed-Replicate
Volume ID: 34a699ad-81f8-489e-8242-40c5e181901a
Status: Started
Snap Volume: no
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 10.70.42.244:/rhs/brick1/vol
Brick2: 10.70.43.6:/rhs/brick2/vol
Brick3: 10.70.42.204:/rhs/brick3/vol
Brick4: 10.70.42.10:/rhs/brick4/vol
Options Reconfigured:
performance.readdir-ahead: on
snap-max-hard-limit: 256
snap-max-soft-limit: 90
auto-delete: disable

Volume Name: testvol1
Type: Distributed-Replicate
Volume ID: bcd90c32-e79d-4197-a5b2-b0ea1d52002d
Status: Started
Snap Volume: no
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 10.70.42.244:/rhs/brick2/testvol
Brick2: 10.70.43.6:/rhs/brick3/testvol
Brick3: 10.70.42.204:/rhs/brick4/testvol
Brick4: 10.70.42.10:/rhs/brick1/testvol
Options Reconfigured:
features.barrier: disable
features.uss: on
performance.readdir-ahead: on
snap-max-hard-limit: 256
snap-max-soft-limit: 90
auto-delete: disable
[root at dhcp42-244 log]# 

[root at dhcp42-244 log]# gluster snapshot status

Snap Name : snapold
Snap UUID : af21c51f-c2cc-4fbb-b8fc-2c20fbe783be

    Brick Path        :  
10.70.42.244:/var/run/gluster/snaps/1424e70ed7b5499692ff6f85398589c0/brick1/oldvol
    Volume Group      :   RHS_vg4
    Brick Running     :   No
    Brick PID         :   N/A
    Data Percentage   :   0.20
    LV Size           :   13.47g


    Brick Path        :  
10.70.43.6:/var/run/gluster/snaps/1424e70ed7b5499692ff6f85398589c0/brick2/oldvol
    Volume Group      :   RHS_vg2
    Brick Running     :   No
    Brick PID         :   N/A
    Data Percentage   :   0.28
    LV Size           :   13.47g


    Brick Path        :  
10.70.42.204:/var/run/gluster/snaps/1424e70ed7b5499692ff6f85398589c0/brick3/oldvol
    Volume Group      :   RHS_vg1
    Brick Running     :   No
    Brick PID         :   N/A
    Data Percentage   :   0.20
    LV Size           :   13.47g


    Brick Path        :  
10.70.42.10:/var/run/gluster/snaps/1424e70ed7b5499692ff6f85398589c0/brick4/oldvol
    Volume Group      :   RHS_vg3
    Brick Running     :   No
    Brick PID         :   N/A
    Data Percentage   :   0.20
    LV Size           :   13.47g

[root at dhcp42-244 log]# 

[root at dhcp42-244 oldvol2]# cat info
type=0
count=4
status=1
sub_count=0
stripe_count=1
replica_count=1
version=5
transport-type=0
volume-id=ac6ffc71-e2d0-41ef-928d-b4aa344b2825
username=21f62ab1-fe38-4ab7-b4af-b8e9de0b193e
password=6fbae7e1-6eab-4ae6-8ddc-d0224bdd3551
op-version=30000
client-op-version=30000
parent_volname=N/A
restored_from_snap=00000000-0000-0000-0000-000000000000
snap-max-hard-limit=256
features.barrier=disable
features.uss=off
performance.readdir-ahead=on
brick-0=10.70.42.244:-rhs-brick4-oldvol
brick-1=10.70.43.6:-rhs-brick2-oldvol
brick-2=10.70.42.204:-rhs-brick1-oldvol
brick-3=10.70.42.10:-rhs-brick3-oldvol
[root at dhcp42-244 oldvol2]# 

[root at dhcp42-244 oldvol2]# killall glusterd
[root at dhcp42-244 oldvol2]# pgrep glusterd
[root at dhcp42-244 oldvol2]# 

After update:
==========================================
[root at dhcp42-244 yum.repos.d]# service glusterd start
Starting glusterd:                                         [  OK  ]
[root at dhcp42-244 yum.repos.d]#

[root at dhcp42-244 yum.repos.d]# rpm -qa | grep glusterfs
glusterfs-3.6.0.34-1.el6rhs.x86_64
samba-glusterfs-3.6.509-169.1.el6rhs.x86_64
glusterfs-cli-3.6.0.34-1.el6rhs.x86_64
glusterfs-libs-3.6.0.34-1.el6rhs.x86_64
glusterfs-api-3.6.0.34-1.el6rhs.x86_64
glusterfs-server-3.6.0.34-1.el6rhs.x86_64
glusterfs-debuginfo-3.6.0.34-1.el6rhs.x86_64
glusterfs-fuse-3.6.0.34-1.el6rhs.x86_64
[root at dhcp42-244 yum.repos.d]# 

[root at dhcp42-244 yum.repos.d]# gluster volume stop oldvol2
Stopping volume will make its data inaccessible. Do you want to continue? (y/n)
y
volume stop: oldvol2: success
[root at dhcp42-244 yum.repos.d]# gluster snapshot restore snapold
Snapshot restore: snapold: Snap restored successfully
[root at dhcp42-244 yum.repos.d]#

Client:
=========
[root at dhcp43-190 oldvol]# ls
test.0  test.10  test.3  test.5  test.7  test.9
test.1  test.2   test.4  test.6  test.8
[root at dhcp43-190 oldvol]# cd .snaps
[root at dhcp43-190 oldvol]

-- 
You are receiving this mail because:
You are on the CC list for the bug.
Unsubscribe from this bug https://bugzilla.redhat.com/token.cgi?t=bquf0QuBb2&a=cc_unsubscribe


More information about the Bugs mailing list