[Buildbot-commits] buildbot/buildbot/test test_run.py,1.18,1.19
Brian Warner
warner at users.sourceforge.net
Fri Dec 3 22:54:55 UTC 2004
Update of /cvsroot/buildbot/buildbot/buildbot/test
In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv20004/buildbot/test
Modified Files:
test_run.py
Log Message:
Make commands (and builds) interruptible. Improve lost-slave behavior.
Merging in several days of changes from local Arch branch, see ChangeLog for
details about individual files.
Index: test_run.py
===================================================================
RCS file: /cvsroot/buildbot/buildbot/buildbot/test/test_run.py,v
retrieving revision 1.18
retrieving revision 1.19
diff -u -d -r1.18 -r1.19
--- test_run.py 30 Sep 2004 07:13:32 -0000 1.18
+++ test_run.py 3 Dec 2004 22:54:53 -0000 1.19
@@ -4,7 +4,7 @@
dr = unittest.deferredResult
from twisted.internet import reactor, defer
from twisted.python import log
-import sys, os, shutil
+import sys, os, shutil, time
#log.startLogging(sys.stderr)
from buildbot import master, interfaces
@@ -44,6 +44,12 @@
BuildmasterConfig = c
"""
+class MyBot(bot.Bot):
+ def remote_getSlaveInfo(self):
+ return self.parent.info
+class MyBuildSlave(bot.BuildSlave):
+ botClass = MyBot
+
class STarget:
__implements__ = interfaces.IStatusReceiver,
debug = False
@@ -119,33 +125,97 @@
# now kill the timer
b1.waiting.stopTimer()
-class Status(unittest.TestCase):
+class RunMixin:
master = None
slave = None
+ slave2 = None
def setUp(self):
shutil.rmtree("basedir", ignore_errors=1)
+ shutil.rmtree("slavebase", ignore_errors=1)
+ shutil.rmtree("slavebase2", ignore_errors=1)
os.mkdir("basedir")
self.master = master.BuildMaster("basedir")
def connectSlave(self):
port = self.master.slavePort._port.getHost().port
os.mkdir("slavebase")
- slave = bot.BuildSlave("localhost", port, "bot1", "sekrit",
- "slavebase", keepalive=0, usePTY=1)
+ slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
+ "slavebase", keepalive=0, usePTY=1)
+ slave.info = {"admin": "one"}
self.slave = slave
slave.startService()
d = self.master.botmaster.waitUntilBuilderAttached("dummy")
dr(d)
+ def connectSlave2(self):
+ port = self.master.slavePort._port.getHost().port
+ os.mkdir("slavebase2")
+ slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
+ "slavebase2", keepalive=0, usePTY=1)
+ slave.info = {"admin": "two"}
+ self.slave2 = slave
+ slave.startService()
+
def tearDown(self):
+ log.msg("doing tearDown")
+ self.shutdownSlave()
+ if self.master:
+ dr(defer.maybeDeferred(self.master.stopService))
+ self.master = None
+
+ # various forms of slave death
+
+ def shutdownSlave(self, waitForMasterToo=True):
+ # the slave has disconnected normally: they SIGINT'ed it, or it shut
+ # down willingly. This will kill child processes and give them a
+ # chance to finish up.
if self.slave:
- d = self.master.botmaster.waitUntilBuilderDetached("dummy")
+ d = self.slave.waitUntilDisconnected()
dr(defer.maybeDeferred(self.slave.stopService))
dr(d)
- if self.master:
- dr(defer.maybeDeferred(self.master.stopService))
-
+ self.slave = None
+ if self.slave2:
+ d = self.slave2.waitUntilDisconnected()
+ dr(defer.maybeDeferred(self.slave2.stopService))
+ dr(d)
+ self.slave2 = None
+ if waitForMasterToo:
+ d = self.master.botmaster.waitUntilBuilderDetached("dummy")
+ dr(d)
+
+ def killSlave(self):
+ # the slave has died, its host sent a FIN. The .notifyOnDisconnect
+ # callbacks will terminate the current step, so the build should be
+ # flunked (no further steps should be started).
+ self.slave.bf.continueTrying = 0
+ bot = self.slave.getServiceNamed("bot")
+ broker = bot.builders["dummy"].remote.broker
+ broker.transport.loseConnection()
+ self.slave = None
+
+ def disappearSlave(self):
+ # the slave's host has vanished off the net, leaving the connection
+ # dangling. This will be detected quickly by app-level keepalives or
+ # a ping, or slowly by TCP timeouts.
+
+ # implement this by replacing the slave Broker's .dataReceived method
+ # with one that just throws away all data.
+ def discard(data):
+ pass
+ bot = self.slave.getServiceNamed("bot")
+ broker = bot.builders["dummy"].remote.broker
+ broker.dataReceived = discard # seal its ears
+ broker.transport.write = discard # and take away its voice
+
+ def ghostSlave(self):
+ # the slave thinks it has lost the connection, and initiated a
+ # reconnect. The master doesn't yet realize it has lost the previous
+ # connection, and sees two connections at once.
+ raise NotImplementedError
+
+class Status(RunMixin, unittest.TestCase):
+
def testSlave(self):
m = self.master
s = m.getStatus()
@@ -269,3 +339,166 @@
res = dr(d)
self.failUnless(3.0 < t4.eta_build < 5.0) # should be 4 seconds
+
+class Disconnect(RunMixin, unittest.TestCase):
+
+ def disconnectSetup(self):
+ # verify that disconnecting the slave during a build properly
+ # terminates the build
+ m = self.master
+ s = m.getStatus()
+ c = interfaces.IControl(m)
+
+ m.loadConfig(config_2)
+ m.readConfig = True
+ m.startService()
+
+ self.failUnlessEqual(s.getBuilderNames(), ["dummy"])
+ s1 = s.getBuilder("dummy")
+ self.failUnlessEqual(s1.getName(), "dummy")
+ self.failUnlessEqual(s1.getState(), ("offline", None, None))
+ self.failUnlessEqual(s1.getCurrentBuild(), None)
+ self.failUnlessEqual(s1.getLastFinishedBuild(), None)
+ self.failUnlessEqual(s1.getBuild(-1), None)
+
+ self.connectSlave()
+ self.failUnlessEqual(s1.getState(), ("idle", None, None))
+ return m,s,c,s1
+
+ def verifyDisconnect(self, bs):
+ self.failUnless(bs.isFinished())
+
+ step1 = bs.getSteps()[0]
+ self.failUnlessEqual(step1.getText(), ["delay", "interrupted"])
+ self.failUnlessEqual(step1.getResults()[0], builder.FAILURE)
+
+ self.failUnlessEqual(bs.getResults(), builder.FAILURE)
+
+
+ def testIdle1(self):
+ m,s,c,s1 = self.disconnectSetup()
+ # disconnect the slave before the build starts
+ self.shutdownSlave() # dies before it gets started
+
+ # trying to force a build now will cause an error. Regular builds
+ # just wait for the slave to re-appear, but forced builds that
+ # cannot be run right away trigger NoSlaveErrors
+ fb = c.getBuilder("dummy").forceBuild
+ self.failUnlessRaises(interfaces.NoSlaveError,
+ fb, None, "forced build")
+
+ def testIdle2(self):
+ m,s,c,s1 = self.disconnectSetup()
+ # now suppose the slave goes missing
+ self.disappearSlave()
+
+ # forcing a build will work: the build will begin, since we think we
+ # have a slave. The build will fail, however, because of a timeout
+ # error.
+ bc = c.getBuilder("dummy").forceBuild(None, "forced build")
+ bs = bc.getStatus()
+ print "build started"
+ d = bs.waitUntilFinished()
+ dr(d, 5)
+ print bs.getText()
+ testIdle2.skip = "short timeout not yet implemented"
+
+ def testBuild1(self):
+ m,s,c,s1 = self.disconnectSetup()
+ # this next sequence is timing-dependent. The dummy build takes at
+ # least 3 seconds to complete, and this batch of commands must
+ # complete within that time.
+ #
+ bc = c.getBuilder("dummy").forceBuild(None, "forced build")
+ bs = bc.getStatus()
+ # kill the slave while it's running the first step
+ self.shutdownSlave() # dies before it gets started
+
+ # now examine the just-stopped build and make sure it is really
+ # stopped. This is checking for bugs in which the slave-detach gets
+ # missed or causes an exception which prevents the build from being
+ # marked as "finished due to an error".
+ d = bs.waitUntilFinished()
+ dr(d, 5)
+
+ self.failUnlessEqual(s1.getState()[0], "offline")
+ self.verifyDisconnect(bs)
+
+ def testBuild2(self):
+ m,s,c,s1 = self.disconnectSetup()
+ # this next sequence is timing-dependent
+ bc = c.getBuilder("dummy").forceBuild(None, "forced build")
+ bs = bc.getStatus()
+ # shutdown the slave while it's running the first step
+ reactor.callLater(0.5, self.shutdownSlave)
+
+ dr(bs.waitUntilFinished(), 5)
+
+ self.failUnlessEqual(s1.getState()[0], "offline")
+ self.verifyDisconnect(bs)
+
+ def testBuild3(self):
+ m,s,c,s1 = self.disconnectSetup()
+ # this next sequence is timing-dependent
+ bc = c.getBuilder("dummy").forceBuild(None, "forced build")
+ bs = bc.getStatus()
+ # kill the slave while it's running the first step
+ reactor.callLater(0.5, self.killSlave)
+
+ dr(bs.waitUntilFinished(), 5)
+
+ self.failUnlessEqual(s1.getState()[0], "offline")
+ self.verifyDisconnect(bs)
+
+ def testInterrupt(self):
+ m,s,c,s1 = self.disconnectSetup()
+ # this next sequence is timing-dependent
+ bc = c.getBuilder("dummy").forceBuild(None, "forced build")
+ bs = bc.getStatus()
+ # halt the build while it's running the first step
+ reactor.callLater(0.5, bc.stopBuild, "bang go splat")
+
+ dr(bs.waitUntilFinished(), 5)
+
+ self.verifyDisconnect(bs)
+
+ def testDisappear(self):
+ m,s,c,s1 = self.disconnectSetup()
+ bc = c.getBuilder("dummy")
+
+ # ping should succeed
+ d = bc.ping(1)
+ res = dr(d)
+ self.failUnlessEqual(res, True)
+
+ # now, before any build is run, make the slave disappear
+ self.slave.bf.continueTrying = 0
+ self.disappearSlave()
+
+ # at this point, a ping to the slave should timeout
+ d = bc.ping(1)
+ res = dr(d)
+ self.failUnlessEqual(res, False)
+
+ def testDuplicate(self):
+ m,s,c,s1 = self.disconnectSetup()
+ bc = c.getBuilder("dummy")
+ bs = s.getBuilder("dummy")
+ ss = bs.getSlave()
+
+ self.failUnless(ss.isConnected())
+ self.failUnlessEqual(ss.getAdmin(), "one")
+
+ # now, before any build is run, make the first slave disappear
+ self.slave.bf.continueTrying = 0
+ self.disappearSlave()
+
+ d = self.master.botmaster.waitUntilBuilderDetached("dummy")
+ # now let the new slave take over
+ self.connectSlave2()
+ dr(d, 2)
+ d = self.master.botmaster.waitUntilBuilderAttached("dummy")
+ dr(d, 2)
+
+ self.failUnless(ss.isConnected())
+ self.failUnlessEqual(ss.getAdmin(), "two")
More information about the Commits
mailing list