[Gluster-Maintainers] Build failed in Jenkins: centos8-s390-regression #78

jenkins at build.gluster.org jenkins at build.gluster.org
Tue Nov 15 07:59:42 UTC 2022


See <https://build.gluster.org/job/centos8-s390-regression/78/display/redirect?page=changes>

Changes:

[GitHub] fix potential deadlock in gf_print_trace (#3898)


------------------------------------------
[...truncated 5.11 MB...]
losetup: /d/dev/loop*: failed to use device: No such device
mkdir: cannot create directory ‘/d/backends’: No space left on device
mkdir: cannot create directory ‘/d/dev’: No space left on device
ok   1 [   1011/   3658] <   8> 'glusterd'
ok   2 [     20/     10] <   9> 'pidof glusterd'
volume create: patchy: failed: Failed to create brick directory for brick 148.100.84.23:/d/backends/brick0. Reason : No such file or directory 
not ok   3 [     25/    225] <  10> 'gluster --mode=script --wignore volume create patchy replica 3 148.100.84.23:/d/backends/brick0 148.100.84.23:/d/backends/brick1 148.100.84.23:/d/backends/brick2' -> ''
volume set: failed: Volume patchy does not exist
not ok   4 [     16/    130] <  11> 'gluster --mode=script --wignore volume set patchy performance.stat-prefetch off' -> ''
volume set: failed: Volume patchy does not exist
not ok   5 [     13/    137] <  12> 'gluster --mode=script --wignore volume set patchy cluster.self-heal-daemon off' -> ''
volume set: failed: Volume patchy does not exist
not ok   6 [     22/    133] <  13> 'gluster --mode=script --wignore volume set patchy cluster.data-self-heal on' -> ''
volume set: failed: Volume patchy does not exist
not ok   7 [     18/    142] <  14> 'gluster --mode=script --wignore volume set patchy cluster.metadata-self-heal on' -> ''
volume set: failed: Volume patchy does not exist
not ok   8 [     16/    158] <  15> 'gluster --mode=script --wignore volume set patchy cluster.entry-self-heal on' -> ''
volume start: patchy: failed: Volume patchy does not exist
not ok   9 [    121/    127] <  16> 'gluster --mode=script --wignore volume start patchy' -> ''
not ok  10 [     24/   1106] <  17> 'glusterfs --volfile-id=/patchy --volfile-server=148.100.84.23 /mnt/glusterfs/0 --attribute-timeout=0 --entry-timeout=0' -> ''
ok  11 [     17/      2] <  20> 'touch file'
setfattr: /d/backends/brick0/file: No such file or directory
not ok  12 [     22/      2] <  21> 'setfattr -n user.attribute1 -v value /d/backends/brick0/file' -> ''
cat: /var/run/gluster/vols/patchy/148.100.84.23-d-backends-brick2.pid: No such file or directory
Usage: gf_attach uds_path volfile_path (to attach)
       gf_attach -d uds_path brick_path (to detach)
ok  13 [     14/  45177] <  22> 'kill_brick patchy 148.100.84.23 /d/backends/brick2'
ok  14 [    283/      2] <  23> 'chmod +x file'
volume start: patchy: failed: Volume patchy does not exist
not ok  15 [     15/    114] <  26> 'gluster --mode=script --wignore volume start patchy force' -> ''
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
kill: usage: kill [-s sigspec | -n signum | -sigspec] pid | jobspec ... or kill -l [sigspec]
grep: nostatedump: No such file or directory
not ok  16 [     68/  48711] <  27> '1 afr_child_up_status patchy 2' -> 'Got "" instead of "1"'
not ok  17 [     17/    848] <  28> '2 get_pending_heal_count patchy' -> 'Got "" instead of "2"'
ok  18 [     24/     14] <  31> 'stat file'
not ok  19 [     13/  80288] <  32> '^0$ get_pending_heal_count patchy' -> 'Got "" instead of "^0$"'
stat: cannot statx '/d/backends/brick0/file': No such file or directory
stat: cannot statx '/d/backends/brick1/file': No such file or directory
stat: cannot statx '/d/backends/brick2/file': No such file or directory
not ok  20 [    316/      7] <  38> '0:0:-rwxr-xr-x echo' -> 'Got "" instead of "0:0:-rwxr-xr-x"'
not ok  21 [     22/     11] <  39> '0:0:-rwxr-xr-x echo' -> 'Got "" instead of "0:0:-rwxr-xr-x"'
not ok  22 [     14/      3] <  40> '0:0:-rwxr-xr-x echo' -> 'Got "" instead of "0:0:-rwxr-xr-x"'
getfattr: /d/backends/brick0/file: No such file or directory
getfattr: /d/backends/brick1/file: No such file or directory
getfattr: /d/backends/brick2/file: No such file or directory
ok  23 [     35/      3] <  46> '0 echo 0'
ok  24 [     17/      3] <  47> '0 echo 0'
not ok  25 [    360/      8] <  52> '000000000000000000000000 echo' -> 'Got "" instead of "000000000000000000000000"'
not ok  26 [     25/     11] <  53> '000000000000000000000000 echo' -> 'Got "" instead of "000000000000000000000000"'
/home/jenkins/root/workspace/centos8-s390-regression
losetup: /d/dev/loop*: failed to use device: No such device
rm: cannot remove '/mnt/glusterfs/0': Directory not empty
Aborting.

/d/dev could not be deleted, here are the left over items
drwxr-xr-x. 2 root root 4096 Nov 15 07:57 /mnt/glusterfs/0
ls: cannot access '/mnt/glusterfs/0/file25702132.data': No such file or directory
-rw-r--r--. 1 root root 1048576 Nov 15 07:57 /mnt/glusterfs/0/file56331609.data
-rw-r--r--. 1 root root 1048576 Nov 15 07:57 /mnt/glusterfs/0/file79266881.data
ls: cannot access '/mnt/glusterfs/0/file39634420.data': No such file or directory
-rw-r--r--. 1 root root 1048576 Nov 15 07:57 /mnt/glusterfs/0/file39554715.data
-rw-r--r--. 1 root root 1048576 Nov 15 07:57 /mnt/glusterfs/0/file56308866.data
ls: cannot access '/mnt/glusterfs/0/file115480568.data': No such file or directory
ls: cannot access '/mnt/glusterfs/0/file25632291.data': No such file or directory

Please correct the problem and try again.

Dubious, test returned 1 (wstat 256, 0x100)
Failed 18/26 subtests 

Test Summary Report
-------------------
./tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t (Wstat: 256 Tests: 26 Failed: 18)
  Failed tests:  3-10, 12, 15-17, 19-22, 25-26
  Non-zero exit status: 1
Files=1, Tests=26, 184 wallclock secs ( 0.03 usr  0.00 sys + 16.01 cusr  8.25 csys = 24.29 CPU)
Result: FAIL
Logs preserved in tarball bug-1134691-afr-lookup-metadata-heal-iteration-1.tar.gz
./tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t: bad status 1

       *********************************
       *       REGRESSION FAILED       *
       * Retrying failed tests in case *
       * we got some spurious failures *
       *********************************

FATAL: command execution failed
java.io.IOException
	at hudson.remoting.Channel.close(Channel.java:1491)
	at hudson.remoting.Channel.close(Channel.java:1447)
	at hudson.slaves.SlaveComputer.closeChannel(SlaveComputer.java:923)
	at hudson.slaves.SlaveComputer.kill(SlaveComputer.java:889)
	at hudson.model.AbstractCIBase.killComputer(AbstractCIBase.java:97)
	at jenkins.model.Jenkins.lambda$_cleanUpDisconnectComputers$11(Jenkins.java:3708)
	at hudson.model.Queue._withLock(Queue.java:1396)
	at hudson.model.Queue.withLock(Queue.java:1270)
	at jenkins.model.Jenkins._cleanUpDisconnectComputers(Jenkins.java:3704)
	at jenkins.model.Jenkins.cleanUp(Jenkins.java:3585)
	at hudson.WebAppMain.contextDestroyed(WebAppMain.java:374)
	at org.eclipse.jetty.server.handler.ContextHandler.callContextDestroyed(ContextHandler.java:1053)
	at org.eclipse.jetty.servlet.ServletContextHandler.callContextDestroyed(ServletContextHandler.java:636)
	at org.eclipse.jetty.server.handler.ContextHandler.contextDestroyed(ContextHandler.java:1010)
	at org.eclipse.jetty.servlet.ServletHandler.doStop(ServletHandler.java:306)
	at org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:132)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:182)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:205)
	at org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:97)
	at org.eclipse.jetty.security.SecurityHandler.doStop(SecurityHandler.java:411)
	at org.eclipse.jetty.security.ConstraintSecurityHandler.doStop(ConstraintSecurityHandler.java:413)
	at org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:132)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:182)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:205)
	at org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:97)
	at org.eclipse.jetty.server.session.SessionHandler.doStop(SessionHandler.java:498)
	at org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:132)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:182)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:205)
	at org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:97)
	at org.eclipse.jetty.server.handler.ContextHandler.stopContext(ContextHandler.java:1033)
	at org.eclipse.jetty.servlet.ServletContextHandler.stopContext(ServletContextHandler.java:399)
	at org.eclipse.jetty.webapp.WebAppContext.stopContext(WebAppContext.java:1311)
	at org.eclipse.jetty.server.handler.ContextHandler.doStop(ContextHandler.java:1081)
	at org.eclipse.jetty.servlet.ServletContextHandler.doStop(ServletContextHandler.java:312)
	at org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:132)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:182)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:205)
	at org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:97)
	at org.eclipse.jetty.server.Server.doStop(Server.java:516)
	at org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:132)
	at winstone.Launcher.shutdown(Launcher.java:353)
	at winstone.ShutdownHook.run(ShutdownHook.java:26)
Caused: hudson.remoting.ChannelClosedException: Channel "hudson.remoting.Channel at 32d026e5:builder-el8-s390x-3.ibm-l1.gluster.org": Remote call on builder-el8-s390x-3.ibm-l1.gluster.org failed. The channel is closing down or has closed down
	at hudson.remoting.Channel.call(Channel.java:993)
	at hudson.remoting.RemoteInvocationHandler.invoke(RemoteInvocationHandler.java:285)
	at com.sun.proxy.$Proxy137.isAlive(Unknown Source)
	at hudson.Launcher$RemoteLauncher$ProcImpl.isAlive(Launcher.java:1215)
	at hudson.Launcher$RemoteLauncher$ProcImpl.join(Launcher.java:1207)
	at hudson.tasks.CommandInterpreter.join(CommandInterpreter.java:195)
	at hudson.tasks.CommandInterpreter.perform(CommandInterpreter.java:145)
	at hudson.tasks.CommandInterpreter.perform(CommandInterpreter.java:92)
	at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
	at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:816)
	at hudson.model.Build$BuildExecution.build(Build.java:199)
	at hudson.model.Build$BuildExecution.doRun(Build.java:164)
	at hudson.model.AbstractBuild$AbstractBuildExecution.run(AbstractBuild.java:524)
	at hudson.model.Run.execute(Run.java:1899)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:44)
	at hudson.model.ResourceController.execute(ResourceController.java:107)
	at hudson.model.Executor.run(Executor.java:449)
FATAL: Unable to delete script file /tmp/jenkins1294565855754631852.sh
java.io.IOException
	at hudson.remoting.Channel.close(Channel.java:1491)
	at hudson.remoting.Channel.close(Channel.java:1447)
	at hudson.slaves.SlaveComputer.closeChannel(SlaveComputer.java:923)
	at hudson.slaves.SlaveComputer.kill(SlaveComputer.java:889)
	at hudson.model.AbstractCIBase.killComputer(AbstractCIBase.java:97)
	at jenkins.model.Jenkins.lambda$_cleanUpDisconnectComputers$11(Jenkins.java:3708)
	at hudson.model.Queue._withLock(Queue.java:1396)
	at hudson.model.Queue.withLock(Queue.java:1270)
	at jenkins.model.Jenkins._cleanUpDisconnectComputers(Jenkins.java:3704)
	at jenkins.model.Jenkins.cleanUp(Jenkins.java:3585)
	at hudson.WebAppMain.contextDestroyed(WebAppMain.java:374)
	at org.eclipse.jetty.server.handler.ContextHandler.callContextDestroyed(ContextHandler.java:1053)
	at org.eclipse.jetty.servlet.ServletContextHandler.callContextDestroyed(ServletContextHandler.java:636)
	at org.eclipse.jetty.server.handler.ContextHandler.contextDestroyed(ContextHandler.java:1010)
	at org.eclipse.jetty.servlet.ServletHandler.doStop(ServletHandler.java:306)
	at org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:132)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:182)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:205)
	at org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:97)
	at org.eclipse.jetty.security.SecurityHandler.doStop(SecurityHandler.java:411)
	at org.eclipse.jetty.security.ConstraintSecurityHandler.doStop(ConstraintSecurityHandler.java:413)
	at org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:132)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:182)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:205)
	at org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:97)
	at org.eclipse.jetty.server.session.SessionHandler.doStop(SessionHandler.java:498)
	at org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:132)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:182)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:205)
	at org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:97)
	at org.eclipse.jetty.server.handler.ContextHandler.stopContext(ContextHandler.java:1033)
	at org.eclipse.jetty.servlet.ServletContextHandler.stopContext(ServletContextHandler.java:399)
	at org.eclipse.jetty.webapp.WebAppContext.stopContext(WebAppContext.java:1311)
	at org.eclipse.jetty.server.handler.ContextHandler.doStop(ContextHandler.java:1081)
	at org.eclipse.jetty.servlet.ServletContextHandler.doStop(ServletContextHandler.java:312)
	at org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:132)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:182)
	at org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:205)
	at org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:97)
	at org.eclipse.jetty.server.Server.doStop(Server.java:516)
	at org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:132)
	at winstone.Launcher.shutdown(Launcher.java:353)
	at winstone.ShutdownHook.run(ShutdownHook.java:26)
Caused: hudson.remoting.ChannelClosedException: Channel "hudson.remoting.Channel at 32d026e5:builder-el8-s390x-3.ibm-l1.gluster.org": Remote call on builder-el8-s390x-3.ibm-l1.gluster.org failed. The channel is closing down or has closed down
	at hudson.remoting.Channel.call(Channel.java:993)
	at hudson.FilePath.act(FilePath.java:1186)
	at hudson.FilePath.act(FilePath.java:1175)
	at hudson.FilePath.delete(FilePath.java:1722)
	at hudson.tasks.CommandInterpreter.perform(CommandInterpreter.java:163)
	at hudson.tasks.CommandInterpreter.perform(CommandInterpreter.java:92)
	at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
	at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:816)
	at hudson.model.Build$BuildExecution.build(Build.java:199)
	at hudson.model.Build$BuildExecution.doRun(Build.java:164)
	at hudson.model.AbstractBuild$AbstractBuildExecution.run(AbstractBuild.java:524)
	at hudson.model.Run.execute(Run.java:1899)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:44)
	at hudson.model.ResourceController.execute(ResourceController.java:107)
	at hudson.model.Executor.run(Executor.java:449)
Build step 'Execute shell' marked build as failure
ERROR: Unable to tear down: null
java.lang.NullPointerException
	at org.jenkinsci.plugins.credentialsbinding.impl.UnbindableDir.tempDir(UnbindableDir.java:67)
	at org.jenkinsci.plugins.credentialsbinding.impl.UnbindableDir.secretsDir(UnbindableDir.java:62)
	at org.jenkinsci.plugins.credentialsbinding.impl.UnbindableDir.access$000(UnbindableDir.java:23)
	at org.jenkinsci.plugins.credentialsbinding.impl.UnbindableDir$UnbinderImpl.unbind(UnbindableDir.java:84)
	at org.jenkinsci.plugins.credentialsbinding.impl.SecretBuildWrapper$1.tearDown(SecretBuildWrapper.java:111)
	at hudson.model.AbstractBuild$AbstractBuildExecution.tearDownBuildEnvironments(AbstractBuild.java:564)
	at hudson.model.AbstractBuild$AbstractBuildExecution.run(AbstractBuild.java:528)
	at hudson.model.Run.execute(Run.java:1899)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:44)
	at hudson.model.ResourceController.execute(ResourceController.java:107)
	at hudson.model.Executor.run(Executor.java:449)
ERROR: builder-el8-s390x-3.ibm-l1.gluster.org is offline; cannot locate java-1.6.0-openjdk-1.6.0.0.x86_64


More information about the maintainers mailing list