[Gluster-Maintainers] [gluster-packaging] glusterfs-3.9.1 released
Patrick Matthäi
pmatthaei at debian.org
Thu Jan 19 15:58:20 UTC 2017
Am 17.01.2017 um 19:47 schrieb Gluster Build System:
>
> SRC: http://bits.gluster.org/pub/gluster/glusterfs/src/glusterfs-3.9.1.tar.gz
>
> This release is made off jenkins-release-182
>
> -- Gluster Build System
> _______________________________________________
> packaging mailing list
> packaging at gluster.org
> http://lists.gluster.org/mailman/listinfo/packaging
Hi,
on a Debian system 3.9.{0,1} fails at make install, because it tries to
install a non existent file glustereventsd-Debian:
Making install in doc
/bin/mkdir -p '/build/glusterfs-3.9.1/debian/tmp/usr/share/man/man8'
/usr/bin/install -c -m 644 glusterfs.8 mount.glusterfs.8 gluster.8
glusterd.8 glusterfsd.8
'/build/glusterfs-3.9.1/debian/tmp/usr/share/man/man8'
Making install in extras
Making install in init.d
/usr/bin/install: cannot stat 'glustereventsd-Debian': No such file or
directory
Makefile:572: recipe for target 'Debian' failed
make[4]: *** [Debian] Error 1
Makefile:462: recipe for target 'install-am' failed
make[3]: *** [install-am] Error 2
Makefile:655: recipe for target 'install-recursive' failed
make[2]: *** [install-recursive] Error 1
Makefile:589: recipe for target 'install-recursive' failed
make[1]: *** [install-recursive] Error 1
make[1]: Leaving directory '/build/glusterfs-3.9.1'
dh_auto_install: make -j1 install
DESTDIR=/build/glusterfs-3.9.1/debian/tmp AM_UPDATE_INFO_DIR=no returned
exit code 2
debian/rules:9: recipe for target 'binary' failed
And please apply the attached patch :)
--
/*
Mit freundlichem Gruß / With kind regards,
Patrick Matthäi
GNU/Linux Debian Developer
Blog: http://www.linux-dev.org/
E-Mail: pmatthaei at debian.org
patrick at linux-dev.org
*/
-------------- next part --------------
# Fix spelling errors in source.
diff -Naur glusterfs-3.9.1.orig/api/src/glfs-mgmt.c glusterfs-3.9.1/api/src/glfs-mgmt.c
--- glusterfs-3.9.1.orig/api/src/glfs-mgmt.c 2017-01-17 19:47:06.594255690 +0100
+++ glusterfs-3.9.1/api/src/glfs-mgmt.c 2017-01-19 16:12:42.250198969 +0100
@@ -240,7 +240,7 @@
if (-1 == req->rpc_status) {
gf_msg (frame->this->name, GF_LOG_ERROR, EINVAL,
API_MSG_INVALID_ENTRY,
- "GET_VOLUME_INFO RPC call is not successfull");
+ "GET_VOLUME_INFO RPC call is not successful");
errno = EINVAL;
ret = -1;
goto out;
diff -Naur glusterfs-3.9.1.orig/xlators/cluster/dht/src/dht-common.c glusterfs-3.9.1/xlators/cluster/dht/src/dht-common.c
--- glusterfs-3.9.1.orig/xlators/cluster/dht/src/dht-common.c 2017-01-17 19:47:06.730255961 +0100
+++ glusterfs-3.9.1/xlators/cluster/dht/src/dht-common.c 2017-01-19 16:12:42.254198945 +0100
@@ -1715,7 +1715,7 @@
* be unlinked (performed in the "else if" block below)
*
* But if a linkto file is found on hashed subvolume, it may be
- * pointing to vaild cached node. So unlinking of linkto
+ * pointing to valid cached node. So unlinking of linkto
* file on hashed subvolume is skipped and inside
* dht_lookup_everywhere_done, checks are performed. If this
* linkto file is found as stale linkto file, it is deleted
diff -Naur glusterfs-3.9.1.orig/xlators/features/bit-rot/src/bitd/bit-rot.c glusterfs-3.9.1/xlators/features/bit-rot/src/bitd/bit-rot.c
--- glusterfs-3.9.1.orig/xlators/features/bit-rot/src/bitd/bit-rot.c 2017-01-17 19:47:06.749255999 +0100
+++ glusterfs-3.9.1/xlators/features/bit-rot/src/bitd/bit-rot.c 2017-01-19 16:12:42.254198945 +0100
@@ -1211,7 +1211,7 @@
fsscrub = &priv->fsscrub;
/**
- * if this child already witnesses a successfull connection earlier
+ * if this child already witnesses a successful connection earlier
* there's no need to initialize mutexes, condvars, etc..
*/
if (_br_child_witnessed_connection (child))
diff -Naur glusterfs-3.9.1.orig/xlators/features/trash/src/trash.c glusterfs-3.9.1/xlators/features/trash/src/trash.c
--- glusterfs-3.9.1.orig/xlators/features/trash/src/trash.c 2017-01-17 19:47:06.771256046 +0100
+++ glusterfs-3.9.1/xlators/features/trash/src/trash.c 2017-01-19 16:12:42.254198945 +0100
@@ -1669,7 +1669,7 @@
local->newloc.inode = inode_new (local->loc.inode->table);
local->newfd = fd_create (local->newloc.inode, frame->root->pid);
- /* Creating vaild parent and pargfids for both files */
+ /* Creating valid parent and pargfids for both files */
if (dir_entry == NULL) {
ret = EINVAL;
diff -Naur glusterfs-3.9.1.orig/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c glusterfs-3.9.1/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
--- glusterfs-3.9.1.orig/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c 2017-01-17 19:47:06.787256077 +0100
+++ glusterfs-3.9.1/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c 2017-01-19 16:13:01.402083462 +0100
@@ -682,7 +682,7 @@
if (ret < 0) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_RES_DECODE_FAIL, "Failed to serialize friend"
- " update repsonse");
+ " update response");
goto out;
}
diff -Naur glusterfs-3.9.1.orig/xlators/mgmt/glusterd/src/glusterd-store.c glusterfs-3.9.1/xlators/mgmt/glusterd/src/glusterd-store.c
--- glusterfs-3.9.1.orig/xlators/mgmt/glusterd/src/glusterd-store.c 2017-01-17 19:47:06.791256085 +0100
+++ glusterfs-3.9.1/xlators/mgmt/glusterd/src/glusterd-store.c 2017-01-19 16:12:42.254198945 +0100
@@ -3155,7 +3155,7 @@
}
if (!S_ISDIR (st.st_mode)) {
- gf_msg_debug (this->name, 0, "%s is not a vaild volume"
+ gf_msg_debug (this->name, 0, "%s is not a valid volume"
, entry->d_name);
goto next;
}
diff -Naur glusterfs-3.9.1.orig/xlators/nfs/server/src/nfs3.c glusterfs-3.9.1/xlators/nfs/server/src/nfs3.c
--- glusterfs-3.9.1.orig/xlators/nfs/server/src/nfs3.c 2017-01-17 19:47:06.803256107 +0100
+++ glusterfs-3.9.1/xlators/nfs/server/src/nfs3.c 2017-01-19 16:13:01.402083462 +0100
@@ -1511,7 +1511,7 @@
if (ret < 0) {
gf_msg (GF_NFS, GF_LOG_ERROR, -ret,
NFS_MSG_HARD_RESOLVE_FAIL,
- "failed to start hard reslove");
+ "failed to start hard resolve");
stat = nfs3_errno_to_nfsstat3 (-ret);
}
diff -Naur glusterfs-3.9.1.orig/events/src/glustereventsd.py glusterfs-3.9.1/events/src/glustereventsd.py
--- glusterfs-3.9.1.orig/events/src/glustereventsd.py 2017-01-17 19:47:06.625255753 +0100
+++ glusterfs-3.9.1/events/src/glustereventsd.py 2017-01-19 16:47:41.217586890 +0100
@@ -59,7 +59,7 @@
try:
# Event Type to Function Map, Recieved event data will be in
# the form <TIMESTAMP> <TYPE> <DETAIL>, Get Event name for the
- # recieved Type/Key and construct a function name starting with
+ # received Type/Key and construct a function name starting with
# handle_ For example: handle_event_volume_create
func_name = "handle_" + all_events[int(key)].lower()
except IndexError:
diff -Naur glusterfs-3.9.1.orig/xlators/cluster/ec/src/ec-code.c glusterfs-3.9.1/xlators/cluster/ec/src/ec-code.c
--- glusterfs-3.9.1.orig/xlators/cluster/ec/src/ec-code.c 2017-01-17 19:47:06.737255977 +0100
+++ glusterfs-3.9.1/xlators/cluster/ec/src/ec-code.c 2017-01-19 16:45:51.382244040 +0100
@@ -882,7 +882,7 @@
if (file.error) {
gf_msg(xl->name, GF_LOG_WARNING, 0, EC_MSG_EXTENSION_FAILED,
- "Unable to detemine supported CPU extensions. Not using any "
+ "Unable to determine supported CPU extensions. Not using any "
"cpu extensions");
gen = NULL;
diff -Naur glusterfs-3.9.1.orig/xlators/performance/md-cache/src/md-cache.c glusterfs-3.9.1/xlators/performance/md-cache/src/md-cache.c
--- glusterfs-3.9.1.orig/xlators/performance/md-cache/src/md-cache.c 2017-01-17 19:47:06.806256116 +0100
+++ glusterfs-3.9.1/xlators/performance/md-cache/src/md-cache.c 2017-01-19 16:47:35.441621457 +0100
@@ -38,8 +38,8 @@
uint64_t negative_lookup; /* No. of negative lookups */
uint64_t nameless_lookup; /* No. of negative lookups that were sent
sent to bricks */
- uint64_t stat_invals; /* No. of invalidates recieved from upcall*/
- uint64_t xattr_invals; /* No. of invalidates recieved from upcall*/
+ uint64_t stat_invals; /* No. of invalidates received from upcall*/
+ uint64_t xattr_invals; /* No. of invalidates received from upcall*/
uint64_t need_lookup; /* No. of lookups issued, because other xlators
* requested for explicit lookup */
gf_lock_t lock;
@@ -673,7 +673,7 @@
LOCK (&mdc->lock);
{
if (mdc->xattr) {
- gf_msg_trace ("md-cache", 0, "deleteing the old xattr "
+ gf_msg_trace ("md-cache", 0, "deleting the old xattr "
"cache (%s)", uuid_utoa (inode->gfid));
dict_unref (mdc->xattr);
mdc->xattr = NULL;
@@ -2535,9 +2535,9 @@
conf->mdc_counter.nameless_lookup);
gf_proc_dump_write("negative_lookup_count", "%"PRId64,
conf->mdc_counter.negative_lookup);
- gf_proc_dump_write("stat_invalidations_recieved", "%"PRId64,
+ gf_proc_dump_write("stat_invalidations_received", "%"PRId64,
conf->mdc_counter.stat_invals);
- gf_proc_dump_write("xattr_invalidations_recieved", "%"PRId64,
+ gf_proc_dump_write("xattr_invalidations_received", "%"PRId64,
conf->mdc_counter.xattr_invals);
return 0;
diff -Naur glusterfs-3.9.1.orig/xlators/protocol/client/src/client-callback.c glusterfs-3.9.1/xlators/protocol/client/src/client-callback.c
--- glusterfs-3.9.1.orig/xlators/protocol/client/src/client-callback.c 2017-01-17 19:47:06.810256122 +0100
+++ glusterfs-3.9.1/xlators/protocol/client/src/client-callback.c 2017-01-19 16:47:27.993666015 +0100
@@ -117,7 +117,7 @@
if (ret < 0)
goto out;
- gf_msg_trace (THIS->name, 0, "Cache invalidation cbk recieved for gfid:"
+ gf_msg_trace (THIS->name, 0, "Cache invalidation cbk received for gfid:"
" %s, ret = %d", ca_req.gfid, ret);
default_notify (THIS, GF_EVENT_UPCALL, &upcall_data);
More information about the maintainers
mailing list