[Gluster-users] Parallel process hang on gluster volume

nico at furyweb.fr nico at furyweb.fr
Fri Jun 21 07:48:47 UTC 2019


I encounterd an issue on production servers using GlusterFS servers 5.1 and clients 4.1.5 when several process write at the same time on a gluster volume. 

With more than 48 process writes on the volume at the same time, they are blocked in D state (uninterruptible sleep), I guess some volume settings have to be tuned but can't figure out which.

The client is using op-version 40100 on this volume
Below are volume info, volume settings and ps output on blocked processes.

root at glusterVM1:~# gluster volume info logsscripts 
Volume Name: logsscripts 
Type: Replicate 
Volume ID: cb49af70-d197-43c1-852d-0bcf8dc9f6fa 
Status: Started 
Snapshot Count: 0 
Number of Bricks: 1 x (2 + 1) = 3 
Transport-type: tcp 
Bricks: 
Brick1: glusterVM1:/bricks/logsscripts/brick1/data 
Brick2: glusterVM2:/bricks/logsscripts/brick1/data 
Brick3: glusterVM3:/bricks/logsscripts/brick1/data (arbiter) 
Options Reconfigured: 
server.tcp-user-timeout: 42 
cluster.data-self-heal-algorithm: full 
features.trash: off 
diagnostics.client-log-level: ERROR 
ssl.cipher-list: HIGH:!SSLv2 
server.ssl: on 
client.ssl: on 
transport.address-family: inet 
nfs.disable: on 
performance.client-io-threads: off 

root at glusterVM1:~# gluster volume get logsscripts all 
Option Value 
------ ----- 
cluster.lookup-unhashed on 
cluster.lookup-optimize on 
cluster.min-free-disk 10% 
cluster.min-free-inodes 5% 
cluster.rebalance-stats off 
cluster.subvols-per-directory (null) 
cluster.readdir-optimize off 
cluster.rsync-hash-regex (null) 
cluster.extra-hash-regex (null) 
cluster.dht-xattr-name trusted.glusterfs.dht 
cluster.randomize-hash-range-by-gfid off 
cluster.rebal-throttle normal 
cluster.lock-migration off 
cluster.force-migration off 
cluster.local-volume-name (null) 
cluster.weighted-rebalance on 
cluster.switch-pattern (null) 
cluster.entry-change-log on 
cluster.read-subvolume (null) 
cluster.read-subvolume-index -1 
cluster.read-hash-mode 1 
cluster.background-self-heal-count 8 
cluster.metadata-self-heal on 
cluster.data-self-heal on 
cluster.entry-self-heal on 
cluster.self-heal-daemon on 
cluster.heal-timeout 600 
cluster.self-heal-window-size 1 
cluster.data-change-log on 
cluster.metadata-change-log on 
cluster.data-self-heal-algorithm full 
cluster.eager-lock on 
disperse.eager-lock on 
disperse.other-eager-lock on 
disperse.eager-lock-timeout 1 
disperse.other-eager-lock-timeout 1 
cluster.quorum-type auto 
cluster.quorum-count (null) 
cluster.choose-local true 
cluster.self-heal-readdir-size 1KB 
cluster.post-op-delay-secs 1 
cluster.ensure-durability on 
cluster.consistent-metadata no 
cluster.heal-wait-queue-length 128 
cluster.favorite-child-policy none 
cluster.full-lock yes 
cluster.stripe-block-size 128KB 
cluster.stripe-coalesce true 
diagnostics.latency-measurement off 
diagnostics.dump-fd-stats off 
diagnostics.count-fop-hits off 
diagnostics.brick-log-level INFO 
diagnostics.client-log-level ERROR 
diagnostics.brick-sys-log-level CRITICAL 
diagnostics.client-sys-log-level CRITICAL 
diagnostics.brick-logger (null) 
diagnostics.client-logger (null) 
diagnostics.brick-log-format (null) 
diagnostics.client-log-format (null) 
diagnostics.brick-log-buf-size 5 
diagnostics.client-log-buf-size 5 
diagnostics.brick-log-flush-timeout 120 
diagnostics.client-log-flush-timeout 120 
diagnostics.stats-dump-interval 0 
diagnostics.fop-sample-interval 0 
diagnostics.stats-dump-format json 
diagnostics.fop-sample-buf-size 65535 
diagnostics.stats-dnscache-ttl-sec 86400 
performance.cache-max-file-size 0 
performance.cache-min-file-size 0 
performance.cache-refresh-timeout 1 
performance.cache-priority 
performance.cache-size 32MB 
performance.io-thread-count 16 
performance.high-prio-threads 16 
performance.normal-prio-threads 16 
performance.low-prio-threads 16 
performance.least-prio-threads 1 
performance.enable-least-priority on 
performance.iot-watchdog-secs (null) 
performance.iot-cleanup-disconnected-reqsoff 
performance.iot-pass-through false 
performance.io-cache-pass-through false 
performance.cache-size 128MB 
performance.qr-cache-timeout 1 
performance.cache-invalidation false 
performance.ctime-invalidation false 
performance.flush-behind on 
performance.nfs.flush-behind on 
performance.write-behind-window-size 1MB 
performance.resync-failed-syncs-after-fsyncoff 
performance.nfs.write-behind-window-size1MB 
performance.strict-o-direct off 
performance.nfs.strict-o-direct off 
performance.strict-write-ordering off 
performance.nfs.strict-write-ordering off 
performance.write-behind-trickling-writeson 
performance.aggregate-size 128KB 
performance.nfs.write-behind-trickling-writeson 
performance.lazy-open yes 
performance.read-after-open yes 
performance.open-behind-pass-through false 
performance.read-ahead-page-count 4 
performance.read-ahead-pass-through false 
performance.readdir-ahead-pass-through false 
performance.md-cache-pass-through false 
performance.md-cache-timeout 1 
performance.cache-swift-metadata true 
performance.cache-samba-metadata false 
performance.cache-capability-xattrs true 
performance.cache-ima-xattrs true 
performance.md-cache-statfs off 
performance.xattr-cache-list 
performance.nl-cache-pass-through false 
features.encryption off 
encryption.master-key (null) 
encryption.data-key-size 256 
encryption.block-size 4096 
network.frame-timeout 1800 
network.ping-timeout 42 
network.tcp-window-size (null) 
client.ssl on 
network.remote-dio disable 
client.event-threads 2 
client.tcp-user-timeout 0 
client.keepalive-time 20 
client.keepalive-interval 2 
client.keepalive-count 9 
network.tcp-window-size (null) 
network.inode-lru-limit 16384 
auth.allow * 
auth.reject (null) 
transport.keepalive 1 
server.allow-insecure on 
server.root-squash off 
server.anonuid 65534 
server.anongid 65534 
server.statedump-path /var/run/gluster 
server.outstanding-rpc-limit 64 
server.ssl on 
auth.ssl-allow * 
server.manage-gids off 
server.dynamic-auth on 
client.send-gids on 
server.gid-timeout 300 
server.own-thread (null) 
server.event-threads 1 
server.tcp-user-timeout 42 
server.keepalive-time 20 
server.keepalive-interval 2 
server.keepalive-count 9 
transport.listen-backlog 1024 
ssl.own-cert (null) 
ssl.private-key (null) 
ssl.ca-list (null) 
ssl.crl-path (null) 
ssl.certificate-depth (null) 
ssl.cipher-list HIGH:!SSLv2 
ssl.dh-param (null) 
ssl.ec-curve (null) 
transport.address-family inet 
performance.write-behind on 
performance.read-ahead on 
performance.readdir-ahead on 
performance.io-cache on 
performance.quick-read on 
performance.open-behind on 
performance.nl-cache off 
performance.stat-prefetch on 
performance.client-io-threads off 
performance.nfs.write-behind on 
performance.nfs.read-ahead off 
performance.nfs.io-cache off 
performance.nfs.quick-read off 
performance.nfs.stat-prefetch off 
performance.nfs.io-threads off 
performance.force-readdirp true 
performance.cache-invalidation false 
features.uss off 
features.snapshot-directory .snaps 
features.show-snapshot-directory off 
features.tag-namespaces off 
network.compression off 
network.compression.window-size -15 
network.compression.mem-level 8 
network.compression.min-size 0 
network.compression.compression-level -1 
network.compression.debug false 
features.default-soft-limit 80% 
features.soft-timeout 60 
features.hard-timeout 5 
features.alert-time 86400 
features.quota-deem-statfs off 
geo-replication.indexing off 
geo-replication.indexing off 
geo-replication.ignore-pid-check off 
geo-replication.ignore-pid-check off 
features.quota off 
features.inode-quota off 
features.bitrot disable 
debug.trace off 
debug.log-history no 
debug.log-file no 
debug.exclude-ops (null) 
debug.include-ops (null) 
debug.error-gen off 
debug.error-failure (null) 
debug.error-number (null) 
debug.random-failure off 
debug.error-fops (null) 
nfs.enable-ino32 no 
nfs.mem-factor 15 
nfs.export-dirs on 
nfs.export-volumes on 
nfs.addr-namelookup off 
nfs.dynamic-volumes off 
nfs.register-with-portmap on 
nfs.outstanding-rpc-limit 16 
nfs.port 2049 
nfs.rpc-auth-unix on 
nfs.rpc-auth-null on 
nfs.rpc-auth-allow all 
nfs.rpc-auth-reject none 
nfs.ports-insecure off 
nfs.trusted-sync off 
nfs.trusted-write off 
nfs.volume-access read-write 
nfs.export-dir 
nfs.disable on 
nfs.nlm on 
nfs.acl on 
nfs.mount-udp off 
nfs.mount-rmtab /var/lib/glusterd/nfs/rmtab 
nfs.rpc-statd /sbin/rpc.statd 
nfs.server-aux-gids off 
nfs.drc off 
nfs.drc-size 0x20000 
nfs.read-size (1 * 1048576ULL) 
nfs.write-size (1 * 1048576ULL) 
nfs.readdir-size (1 * 1048576ULL) 
nfs.rdirplus on 
nfs.event-threads 1 
nfs.exports-auth-enable (null) 
nfs.auth-refresh-interval-sec (null) 
nfs.auth-cache-ttl-sec (null) 
features.read-only off 
features.worm off 
features.worm-file-level off 
features.worm-files-deletable on 
features.default-retention-period 120 
features.retention-mode relax 
features.auto-commit-period 180 
storage.linux-aio off 
storage.batch-fsync-mode reverse-fsync 
storage.batch-fsync-delay-usec 0 
storage.owner-uid -1 
storage.owner-gid -1 
storage.node-uuid-pathinfo off 
storage.health-check-interval 30 
storage.build-pgfid off 
storage.gfid2path on 
storage.gfid2path-separator : 
storage.reserve 1 
storage.health-check-timeout 10 
storage.fips-mode-rchecksum off 
storage.force-create-mode 0000 
storage.force-directory-mode 0000 
storage.create-mask 0777 
storage.create-directory-mask 0777 
storage.max-hardlinks 100 
storage.ctime off 
storage.bd-aio off 
config.gfproxyd off 
cluster.server-quorum-type off 
cluster.server-quorum-ratio 0 
changelog.changelog off 
changelog.changelog-dir {{ brick.path }}/.glusterfs/changelogs 
changelog.encoding ascii 
changelog.rollover-time 15 
changelog.fsync-interval 5 
changelog.changelog-barrier-timeout 120 
changelog.capture-del-path off 
features.barrier disable 
features.barrier-timeout 120 
features.trash off 
features.trash-dir .trashcan 
features.trash-eliminate-path (null) 
features.trash-max-filesize 5MB 
features.trash-internal-op off 
cluster.enable-shared-storage disable 
cluster.write-freq-threshold 0 
cluster.read-freq-threshold 0 
cluster.tier-pause off 
cluster.tier-promote-frequency 120 
cluster.tier-demote-frequency 3600 
cluster.watermark-hi 90 
cluster.watermark-low 75 
cluster.tier-mode cache 
cluster.tier-max-promote-file-size 0 
cluster.tier-max-mb 4000 
cluster.tier-max-files 10000 
cluster.tier-query-limit 100 
cluster.tier-compact on 
cluster.tier-hot-compact-frequency 604800 
cluster.tier-cold-compact-frequency 604800 
features.ctr-enabled off 
features.record-counters off 
features.ctr-record-metadata-heat off 
features.ctr_link_consistency off 
features.ctr_lookupheal_link_timeout 300 
features.ctr_lookupheal_inode_timeout 300 
features.ctr-sql-db-cachesize 12500 
features.ctr-sql-db-wal-autocheckpoint 25000 
features.selinux on 
locks.trace off 
locks.mandatory-locking off 
cluster.disperse-self-heal-daemon enable 
cluster.quorum-reads no 
client.bind-insecure (null) 
features.timeout 45 
features.failover-hosts (null) 
features.shard off 
features.shard-block-size 64MB 
features.shard-lru-limit 16384 
features.shard-deletion-rate 100 
features.scrub-throttle lazy 
features.scrub-freq biweekly 
features.scrub false 
features.expiry-time 120 
features.cache-invalidation off 
features.cache-invalidation-timeout 60 
features.leases off 
features.lease-lock-recall-timeout 60 
disperse.background-heals 8 
disperse.heal-wait-qlength 128 
cluster.heal-timeout 600 
dht.force-readdirp on 
disperse.read-policy gfid-hash 
cluster.shd-max-threads 1 
cluster.shd-wait-qlength 1024 
cluster.locking-scheme full 
cluster.granular-entry-heal no 
features.locks-revocation-secs 0 
features.locks-revocation-clear-all false 
features.locks-revocation-max-blocked 0 
features.locks-monkey-unlocking false 
features.locks-notify-contention no 
features.locks-notify-contention-delay 5 
disperse.shd-max-threads 1 
disperse.shd-wait-qlength 1024 
disperse.cpu-extensions auto 
disperse.self-heal-window-size 1 
cluster.use-compound-fops off 
performance.parallel-readdir off 
performance.rda-request-size 131072 
performance.rda-low-wmark 4096 
performance.rda-high-wmark 128KB 
performance.rda-cache-limit 10MB 
performance.nl-cache-positive-entry false 
performance.nl-cache-limit 10MB 
performance.nl-cache-timeout 60 
cluster.brick-multiplex off 
cluster.max-bricks-per-process 0 
disperse.optimistic-change-log on 
disperse.stripe-cache 4 
cluster.halo-enabled False 
cluster.halo-shd-max-latency 99999 
cluster.halo-nfsd-max-latency 5 
cluster.halo-max-latency 5 
cluster.halo-max-replicas 99999 
cluster.halo-min-replicas 2 
cluster.daemon-log-level INFO 
debug.delay-gen off 
delay-gen.delay-percentage 10% 
delay-gen.delay-duration 100000 
delay-gen.enable 
disperse.parallel-writes on 
features.sdfs on 
features.cloudsync off 
features.utime off 
ctime.noatime on 

traitVM2:~# ps fax | grep '[_]save' 
7305 ? D 2:57 [remote_save] 
7801 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7802 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7803 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7804 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7805 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7806 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7807 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7808 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7809 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7810 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7811 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7812 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7813 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7814 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7815 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7816 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7817 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7818 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7819 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7820 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7821 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7822 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7823 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7824 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7825 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7826 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7827 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7828 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7829 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7830 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7831 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7832 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7833 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7834 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7835 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7836 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7837 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7838 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7839 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7840 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7841 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7842 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7843 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7844 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7845 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7846 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7847 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7848 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7849 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7850 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7851 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7852 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7853 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7854 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7855 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7856 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7857 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7858 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7859 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7860 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7861 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7862 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7863 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7864 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7865 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7866 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7867 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7868 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7869 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7870 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7871 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7872 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7873 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7874 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7875 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7876 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7877 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7878 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7879 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7880 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7881 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7882 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7883 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7884 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000 
7885 ? D 0:00 \_ /usr/bin/perl /home/prod/current/app/scripts/remote_save.pl 30000


More information about the Gluster-users mailing list