[Bugs] [Bug 1271648] tier/cli: number of bricks remains the same in v info --xml

bugzilla at redhat.com bugzilla at redhat.com
Tue Nov 3 13:02:38 UTC 2015


https://bugzilla.redhat.com/show_bug.cgi?id=1271648

nchilaka <nchilaka at redhat.com> changed:

           What    |Removed                     |Added
----------------------------------------------------------------------------
             Status|ON_QA                       |VERIFIED



--- Comment #3 from nchilaka <nchilaka at redhat.com> ---
Working now:
[root at zod ~]# rpm -qa|grep gluster
glusterfs-libs-3.7.5-5.el7rhgs.x86_64
glusterfs-fuse-3.7.5-5.el7rhgs.x86_64
glusterfs-3.7.5-5.el7rhgs.x86_64
glusterfs-server-3.7.5-5.el7rhgs.x86_64
glusterfs-client-xlators-3.7.5-5.el7rhgs.x86_64
glusterfs-cli-3.7.5-5.el7rhgs.x86_64
glusterfs-api-3.7.5-5.el7rhgs.x86_64
glusterfs-debuginfo-3.7.5-5.el7rhgs.x86_64
[root at zod ~]# gluster v status quota_one --xml
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
  <opRet>0</opRet>
  <opErrno>0</opErrno>
  <opErrstr/>
  <volStatus>
    <volumes>
      <volume>
        <volName>quota_one</volName>
        <nodeCount>14</nodeCount>
        <hotBricks>
          <node>
            <hostname>yarrow</hostname>
            <path>/dummy/brick101/quota_one_hot</path>
            <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid>
            <status>1</status>
            <port>49185</port>
            <ports>
              <tcp>49185</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>18811</pid>
          </node>
          <node>
            <hostname>zod</hostname>
            <path>/dummy/brick101/quota_one_hot</path>
            <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid>
            <status>1</status>
            <port>49185</port>
            <ports>
              <tcp>49185</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>20257</pid>
          </node>
          <node>
            <hostname>yarrow</hostname>
            <path>/dummy/brick100/quota_one_hot</path>
            <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid>
            <status>1</status>
            <port>49184</port>
            <ports>
              <tcp>49184</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>18854</pid>
          </node>
          <node>
            <hostname>zod</hostname>
            <path>/dummy/brick100/quota_one_hot</path>
            <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid>
            <status>1</status>
            <port>49184</port>
            <ports>
              <tcp>49184</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>20275</pid>
          </node>
        </hotBricks>
        <coldBricks>
          <node>
            <hostname>zod</hostname>
            <path>/rhs/brick1/quota_one</path>
            <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid>
            <status>1</status>
            <port>49182</port>
            <ports>
              <tcp>49182</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>20293</pid>
          </node>
          <node>
            <hostname>yarrow</hostname>
            <path>/rhs/brick1/quota_one</path>
            <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid>
            <status>1</status>
            <port>49182</port>
            <ports>
              <tcp>49182</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>18883</pid>
          </node>
          <node>
            <hostname>zod</hostname>
            <path>/rhs/brick2/quota_one</path>
            <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid>
            <status>1</status>
            <port>49183</port>
            <ports>
              <tcp>49183</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>20311</pid>
          </node>
          <node>
            <hostname>yarrow</hostname>
            <path>/rhs/brick2/quota_one</path>
            <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid>
            <status>1</status>
            <port>49183</port>
            <ports>
              <tcp>49183</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>18901</pid>
          </node>
          <node>
            <hostname>NFS Server</hostname>
            <path>localhost</path>
            <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid>
            <status>0</status>
            <port>N/A</port>
            <ports>
              <tcp>N/A</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>-1</pid>
          </node>
          <node>
            <hostname>Self-heal Daemon</hostname>
            <path>localhost</path>
            <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid>
            <status>1</status>
            <port>N/A</port>
            <ports>
              <tcp>N/A</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>20347</pid>
          </node>
          <node>
            <hostname>Quota Daemon</hostname>
            <path>localhost</path>
            <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid>
            <status>1</status>
            <port>N/A</port>
            <ports>
              <tcp>N/A</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>20356</pid>
          </node>
          <node>
            <hostname>NFS Server</hostname>
            <path>10.70.34.43</path>
            <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid>
            <status>0</status>
            <port>N/A</port>
            <ports>
              <tcp>N/A</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>-1</pid>
          </node>
          <node>
            <hostname>Self-heal Daemon</hostname>
            <path>10.70.34.43</path>
            <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid>
            <status>1</status>
            <port>N/A</port>
            <ports>
              <tcp>N/A</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>19003</pid>
          </node>
          <node>
            <hostname>Quota Daemon</hostname>
            <path>10.70.34.43</path>
            <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid>
            <status>1</status>
            <port>N/A</port>
            <ports>
              <tcp>N/A</tcp>
              <rdma>N/A</rdma>
            </ports>
            <pid>19012</pid>
          </node>
        </coldBricks>
        <tasks>
          <task>
            <type>Tier migration</type>
            <id>eae47ea7-aea5-4220-8f1d-c6cfc145875d</id>
            <status>1</status>
            <statusStr>in progress</statusStr>
          </task>
        </tasks>
      </volume>
    </volumes>
  </volStatus>
</cliOutput>
[root at zod ~]# 
[root at zod ~]# 
[root at zod ~]# 
[root at zod ~]# gluster v info quota_one --xml
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
  <opRet>0</opRet>
  <opErrno>0</opErrno>
  <opErrstr/>
  <volInfo>
    <volumes>
      <volume>
        <name>quota_one</name>
        <id>1f7be42a-0213-4e7c-9721-392a3747a19a</id>
        <status>1</status>
        <statusStr>Started</statusStr>
        <brickCount>8</brickCount>
        <distCount>2</distCount>
        <stripeCount>1</stripeCount>
        <replicaCount>2</replicaCount>
        <disperseCount>0</disperseCount>
        <redundancyCount>0</redundancyCount>
        <type>5</type>
        <typeStr>Tier</typeStr>
        <transport>0</transport>
        <xlators/>
        <bricks>
          <hotBricks>
            <hotBrickType>Distributed-Replicate</hotBrickType>
            <numberOfBricks>2 x 2 = 4</numberOfBricks>
            <brick
uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">yarrow:/dummy/brick101/quota_one_hot<name>yarrow:/dummy/brick101/quota_one_hot</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick>
            <brick
uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">zod:/dummy/brick101/quota_one_hot<name>zod:/dummy/brick101/quota_one_hot</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick>
            <brick
uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">yarrow:/dummy/brick100/quota_one_hot<name>yarrow:/dummy/brick100/quota_one_hot</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick>
            <brick
uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">zod:/dummy/brick100/quota_one_hot<name>zod:/dummy/brick100/quota_one_hot</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick>
          </hotBricks>
          <coldBricks>
            <coldBrickType>Distributed-Replicate</coldBrickType>
            <numberOfBricks>2 x 2 = 4</numberOfBricks>
            <brick
uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">zod:/rhs/brick1/quota_one<name>zod:/rhs/brick1/quota_one</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick>
            <brick
uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">yarrow:/rhs/brick1/quota_one<name>yarrow:/rhs/brick1/quota_one</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick>
            <brick
uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">zod:/rhs/brick2/quota_one<name>zod:/rhs/brick2/quota_one</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick>
            <brick
uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">yarrow:/rhs/brick2/quota_one<name>yarrow:/rhs/brick2/quota_one</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick>
          </coldBricks>
        </bricks>
        <optCount>6</optCount>
        <options>
          <option>
            <name>diagnostics.brick-log-level</name>
            <value>TRACE</value>
          </option>
          <option>
            <name>features.quota-deem-statfs</name>
            <value>on</value>
          </option>
          <option>
            <name>features.ctr-enabled</name>
            <value>on</value>
          </option>
          <option>
            <name>features.inode-quota</name>
            <value>on</value>
          </option>
          <option>
            <name>features.quota</name>
            <value>on</value>
          </option>
          <option>
            <name>performance.readdir-ahead</name>
            <value>on</value>
          </option>
        </options>
      </volume>
      <count>1</count>
    </volumes>
  </volInfo>
</cliOutput>
[root at zod ~]#

-- 
You are receiving this mail because:
You are on the CC list for the bug.
Unsubscribe from this bug https://bugzilla.redhat.com/token.cgi?t=itz6DUlZvj&a=cc_unsubscribe


More information about the Bugs mailing list