[Buildbot-commits] buildbot/buildbot/process step.py,1.66,1.67 factory.py,1.9,1.10 base.py,1.55,1.56 builder.py,1.26,1.27 interlock.py,1.7,NONE

Brian Warner warner at users.sourceforge.net
Tue Jul 19 23:12:00 UTC 2005


Update of /cvsroot/buildbot/buildbot/buildbot/process
In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv17398/buildbot/process

Modified Files:
	step.py factory.py base.py builder.py 
Removed Files:
	interlock.py 
Log Message:
Revision: arch at buildbot.sf.net--2004/buildbot--dev--0--patch-239
Creator:  Brian Warner <warner at monolith.lothar.com>

merge in build-on-branch code: Merged from warner at monolith.lothar.com--2005 (patch 0-18, 40-41)

Patches applied:

 * warner at monolith.lothar.com--2005/buildbot--dev--0--patch-40
   Merged from arch at buildbot.sf.net--2004 (patch 232-238)

 * warner at monolith.lothar.com--2005/buildbot--dev--0--patch-41
   Merged from local-usebranches (warner at monolith.lothar.com--2005/buildbot--usebranches--0( (patch 0-18)

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--base-0
   tag of warner at monolith.lothar.com--2005/buildbot--dev--0--patch-38

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-1
   rearrange build scheduling

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-2
   replace ugly 4-tuple with a distinct SourceStamp class

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-3
   document upcoming features, clean up CVS branch= argument

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-4
   Merged from arch at buildbot.sf.net--2004 (patch 227-231), warner at monolith.lothar.com--2005 (patch 39)

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-5
   implement per-Step Locks, add tests (which all fail)

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-6
   implement scheduler.Dependent, add (failing) tests

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-7
   make test_dependencies work

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-8
   finish making Locks work, tests now pass

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-9
   fix test failures when run against twisted >2.0.1

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-10
   rename test_interlock.py to test_locks.py

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-11
   add more Locks tests, add branch examples to manual

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-12
   rewrite test_vc.py, create repositories in setUp rather than offline

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-13
   make new tests work with twisted-1.3.0

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-14
   implement/test build-on-branch for most VC systems

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-15
   minor changes: test-case-name tags, init cleanup

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-16
   Merged from arch at buildbot.sf.net--2004 (patch 232-233)

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-17
   Merged from arch at buildbot.sf.net--2004 (patch 234-236)

 * warner at monolith.lothar.com--2005/buildbot--usebranches--0--patch-18
   Merged from arch at buildbot.sf.net--2004 (patch 237-238), warner at monolith.lothar.com--2005 (patch 40)


Index: base.py
===================================================================
RCS file: /cvsroot/buildbot/buildbot/buildbot/process/base.py,v
retrieving revision 1.55
retrieving revision 1.56
diff -u -d -r1.55 -r1.56
--- base.py	22 May 2005 02:16:14 -0000	1.55
+++ base.py	19 Jul 2005 23:11:58 -0000	1.56
@@ -15,58 +15,167 @@
 from buildbot.status.builder import Results
 from buildbot.status.progress import BuildProgress
 
+class BuildRequest:
+    """I represent a request to a specific Builder to run a single build.
+
+    I have a SourceStamp which specifies what sources I will build. This may
+    specify a specific revision of the source tree (so source.branch,
+    source.revision, and source.patch are used). The .patch attribute is
+    either None or a tuple of (patchlevel, diff), consisting of a number to
+    use in 'patch -pN', and a unified-format context diff.
+
+    Alternatively, the SourceStamp may specify a set of Changes to be built,
+    contained in source.changes. In this case, I may be mergeable with other
+    BuildRequests on the same branch.
+
+    I may be part of a BuildSet, in which case I will report status results
+    to it.
+
+    @type source: a L{buildbot.buildset.SourceStamp} instance.   
+    @ivar source: the source code that this BuildRequest use
+
+    @type reason: string
+    @ivar reason: the reason this Build is being requested. Schedulers
+                  provide this, but for forced builds the user requesting the
+                  build will provide a string.
+
+    @ivar status: the IBuildStatus object which tracks our status
+
+    @ivar submittedAt: a timestamp (seconds since epoch) when this request
+                       was submitted to the Builder. This is used by the CVS
+                       step to compute a checkout timestamp.
+    """
+
+    source = None
+    builder = None
+    startCount = 0 # how many times we have tried to start this build
+
+    if implements:
+        implements(interfaces.IBuildRequestControl)
+    else:
+        __implements__ = interfaces.IBuildRequestControl,
+
+    def __init__(self, reason, source):
+        assert interfaces.ISourceStamp(source, None)
+        self.reason = reason
+        self.source = source
+        self.start_watchers = []
+        self.finish_watchers = []
+
+    def canBeMergedWith(self, other):
+        return self.source.canBeMergedWith(other.source)
+
+    def mergeWith(self, others):
+        return self.source.mergeWith([o.source for o in others])
+
+    def mergeReasons(self, others):
+        """Return a reason for the merged build request."""
+        reasons = []
+        for req in [self] + others:
+            if req.reason and req.reason not in reasons:
+                reasons.append(req.reason)
+        return ", ".join(reasons)
+
+    def waitUntilStarted(self):
+        """Get a Deferred that will fire (with a
+        L{buildbot.interfaces.IBuildControl} instance) when the build starts.
+        TODO: there could be multiple Builds to satisfy a BuildRequest, but
+        this API only allows you to wait for the first one."""
+        # TODO: if you call this after the build has started, the Deferred
+        # will never fire.
+        d = defer.Deferred()
+        self.start_watchers.append(d)
+        return d
+
+    def waitUntilFinished(self):
+        """Get a Deferred that will fire (with a
+        L{buildbot.interfaces.IBuildStatus} instance when the build
+        finishes."""
+        d = defer.Deferred()
+        self.finish_watchers.append(d)
+        return d
+
+    # these are called by the Builder
+
+    def requestSubmitted(self, builder):
+        # the request has been placed on the queue
+        self.builder = builder
+
+    def buildStarted(self, build, buildstatus):
+        """This is called by the Builder when a Build has been started in the
+        hopes of satifying this BuildRequest. It may be called multiple
+        times, since interrupted builds and lost buildslaves may force
+        multiple Builds to be run until the fate of the BuildRequest is known
+        for certain."""
+        for w in self.start_watchers:
+            w.callback(build)
+        self.start_watchers = []
+
+    def finished(self, buildstatus):
+        """This is called by the Builder when the BuildRequest has been
+        retired. This happens when its Build has either succeeded (yay!) or
+        failed (boo!). TODO: If it is halted due to an exception (oops!), or
+        some other retryable error, C{finished} will not be called yet."""
+
+        for w in self.finish_watchers:
+            w.callback(buildstatus)
+        self.finish_watchers = []
+
+    # IBuildRequestControl
+    def cancel(self):
+        """Cancel this request. This can only be successful if the Build has
+        not yet been started.
+
+        @return: a boolean indicating if the cancel was successful."""
+        if self.builder:
+            return self.builder.cancelBuildRequest(self)
+        return False
+
+
 class Build:
     """I represent a single build by a single bot. Specialized Builders can
     use subclasses of Build to hold status information unique to those build
     processes.
 
-    I am responsible for two things:
-      1. deciding B{when} a build should occur.  This involves knowing
-         which file changes to ignore (documentation or comments files,
-         for example), and deciding how long to wait for the tree to
-         become stable before starting.  The base class pays attention
-         to all files, and waits 10 seconds for a stable tree.
-      
-      2. controlling B{how} the build proceeds.  The actual build is
-         broken up into a series of steps, saved in the .buildSteps[]
-         array as a list of L{buildbot.process.step.BuildStep}
-         objects. Each step is a single remote command, possibly a shell
-         command.
-
-    Before the build is started, I accumulate Changes and track the
-    tree-stable timers and interlocks necessary to decide when I ought to
-    start building.
+    I control B{how} the build proceeds. The actual build is broken up into a
+    series of steps, saved in the .buildSteps[] array as a list of
+    L{buildbot.process.step.BuildStep} objects. Each step is a single remote
+    command, possibly a shell command.
 
-    During the build, I don't do anything interesting.
+    During the build, I put status information into my C{BuildStatus}
+    gatherer.
 
-    After the build, I hold historical data about the build, like how long
-    it took, tree size, lines of code, etc. It is expected to be used to
-    generate graphs and quantify long-term trends. It does not hold any
-    status events or build logs.
+    After the build, I go away.
 
     I can be used by a factory by setting buildClass on
     L{buildbot.process.factory.BuildFactory}
+
+    @ivar request: the L{BuildRequest} that triggered me
+    @ivar build_status: the L{buildbot.status.builder.BuildStatus} that
+                        collects our status
     """
+    if implements:
+        implements(interfaces.IBuildControl)
+    else:
+        __implements__ = interfaces.IBuildControl,
 
-    treeStableTimer = 10 #*60
     workdir = "build"
     build_status = None
     reason = "changes"
-    sourceStamp = (None, None)
     finished = False
     results = None
 
-    def __init__(self):
-        self.unimportantChanges = []
-        self.changes = []
-        self.failedChanges = []
-        self.maxChangeNumber = None
-        # .timer and .nextBuildTime are only set while we are in the
-        # Builder's 'waiting' slot
-        self.timer = None
-        self.nextBuildTime = None
-        self.interlocks = []
-        self.abandoned = False
+    def __init__(self, requests):
+        self.requests = requests
+        for req in self.requests:
+            req.startCount += 1
+        self.locks = []
+        # build a source stamp
+        self.source = requests[0].mergeWith(requests[1:])
+        self.reason = requests[0].mergeReasons(requests[1:])
+
+        #self.interlocks = []
+        #self.abandoned = False
 
         self.progress = None
         self.currentStep = None
@@ -80,131 +189,27 @@
         """
         self.builder = builder
 
-    def setSourceStamp(self, baserev, patch, reason="try"):
-        # sourceStamp is (baserevision, (patchlevel, diff))
-        self.sourceStamp = (baserev, patch)
-        self.reason = reason
-
-    def isFileImportant(self, filename):
-        """
-        I check if the given file is important enough to trigger a rebuild.
-
-        Override me to ignore unimporant files: documentation, .cvsignore
-        files, etc. 
-
-        The timer is not restarted, so a checkout may occur in the middle of
-        a set of changes marked 'unimportant'. Also, the checkout may or may
-        not pick up the 'unimportant' changes. The implicit assumption is
-        that any file marked 'unimportant' is incapable of affecting the
-        results of the build.
-
-        @param filename: name of a file to check, relative to the VC base
-        @type  filename: string
-      
-        @rtype: boolean
-        @returns: whether the change to this file should trigger a rebuild
-        """
-        return True
-
-    def isBranchImportant(self, branch):
-        """I return True if the given branch is important enough to trigger a
-        rebuild, False if it should be ignored. Override me to ignore
-        unimportant branches. The timer is not restarted, so a checkout may
-        occur in the middle of a set of changes marked 'unimportant'. Also,
-        the checkout may or may not pick up the 'unimportant' changes."""
-        return True
-     
-    def bumpMaxChangeNumber(self, change):
-        if not self.maxChangeNumber:
-            self.maxChangeNumber = change.number
-        if change.number > self.maxChangeNumber:
-            self.maxChangeNumber = change.number
-
-    def addChange(self, change):
-        """
-        Add the change, deciding if the change is important or not. 
-        Called by L{buildbot.process.builder.filesChanged}
-
-        @type  change: L{buildbot.changes.changes.Change}
-        """
-        # for a change to be important, it needs to be with an important
-        # branch and it need to contain an important file
-
-        important = 0
-
-        if self.isBranchImportant(change.branch):
-            for filename in change.files:
-                if self.isFileImportant(filename):
-                    important = 1
-                    break
-
-        if important:
-            self.addImportantChange(change)
-        else:
-            self.addUnimportantChange(change)
+    def setLocks(self, locks):
+        self.locks = locks
 
-    def addImportantChange(self, change):
-        log.msg("builder %s: change is important, adding" % self.builder.name)
-        self.bumpMaxChangeNumber(change)
-        self.changes.append(change)
-        self.nextBuildTime = change.when + self.treeStableTimer
-        self.setTimer(self.nextBuildTime)
-        self.builder.updateBigStatus()
-            
-    def addUnimportantChange(self, change):
-        self.unimportantChanges.append(change)
+    def getSourceStamp(self):
+        return self.source
 
     def allChanges(self):
-        return self.changes + self.failedChanges + self.unimportantChanges
+        return self.source.changes
 
     def allFiles(self):
         # return a list of all source files that were changed
         files = []
         havedirs = 0
-        for c in self.changes + self.unimportantChanges:
+        for c in self.allChanges():
             for f in c.files:
                 files.append(f)
             if c.isdir:
                 havedirs = 1
         return files
 
-    def failMerge(self, b):
-        for c in b.unimportantChanges + b.changes + b.failedChanges:
-            self.bumpMaxChangeNumber(c)
-            self.failedChanges.append(c)
-    def merge(self, b):
-        self.unimportantChanges.extend(b.unimportantChanges)
-        self.failedChanges.extend(b.failedChanges)
-        self.changes.extend(b.changes)
-        for c in b.unimportantChanges + b.changes + b.failedChanges:
-            self.bumpMaxChangeNumber(c)
-
-    def getSourceStamp(self):
-        return self.sourceStamp
-
-    def setTimer(self, when):
-        log.msg("setting timer to %s" %
-                time.strftime("%H:%M:%S", time.localtime(when)))
-        if when < now():
-            when = now() + 1
-        if self.timer:
-            self.timer.cancel()
-        self.timer = reactor.callLater(when - now(), self.fireTimer)
-    def stopTimer(self):
-        if self.timer:
-            self.timer.cancel()
-            self.timer = None
-
-    def fireTimer(self):
-        """
-        Fire the build timer on the builder.
-        """
-        self.timer = None
-        self.nextBuildTime = None
-        # tell the Builder to deal with us
-        self.builder.buildTimerFired(self)
-
-    def checkInterlocks(self, interlocks):
+    def OFFcheckInterlocks(self, interlocks):
         assert interlocks
         # Build.interlocks is a list of the ones we are waiting for. As each
         # deferred fires, we remove one from the list. When the last one
@@ -221,7 +226,7 @@
             d.addCallback(self.interlockDone, interlock)
         # wait for all of them to pass
 
-    def interlockDone(self, passed, interlock):
+    def OFFinterlockDone(self, passed, interlock):
         # one interlock has finished
         self.interlocks.remove(interlock)
         if self.abandoned:
@@ -241,23 +246,19 @@
         d = self.__dict__.copy()
         if d.has_key('remote'):
             del d['remote']
-        d['timer'] = None
         return d
-    def __setstate__(self, state):
-        self.__dict__ = state
-        if self.nextBuildTime:
-            self.setTimer(self.nextBuildTime)
 
     def blamelist(self):
-        who = {}
-        for c in self.unimportantChanges + self.changes + self.failedChanges:
-            who[c.who] = 1
-        blamelist = who.keys()
+        blamelist = []
+        for c in self.allChanges():
+            if c.who not in blamelist:
+                blamelist.append(c.who)
         blamelist.sort()
         return blamelist
+
     def changesText(self):
         changetext = ""
-        for c in self.failedChanges + self.unimportantChanges + self.changes:
+        for c in self.allChanges():
             changetext += "-" * 60 + "\n\n" + c.asText() + "\n"
         # consider sorting these by number
         return changetext
@@ -277,14 +278,19 @@
 
     useProgress = True
 
-    def startBuild(self, build_status, expectations, remote):
+    def getSlaveCommandVersion(self, command, oldversion=None):
+        return self.slavebuilder.getSlaveCommandVersion(command, oldversion)
+
+    def startBuild(self, build_status, expectations, slavebuilder):
         """This method sets up the build, then starts it by invoking the
         first Step. It returns a Deferred which will fire when the build
-        finishes."""
+        finishes. This Deferred is guaranteed to never errback."""
 
         log.msg("%s.startBuild" % self)
         self.build_status = build_status
-        self.remote = remote
+        self.slavebuilder = slavebuilder
+        self.locks = [l.getLock(self.slavebuilder) for l in self.locks]
+        self.remote = slavebuilder.remote
         self.remote.notifyOnDisconnect(self.lostRemote)
         d = self.deferred = defer.Deferred()
 
@@ -308,9 +314,27 @@
             return d
 
         self.build_status.buildStarted(self)
-        self.startNextStep()
+        self.acquireLocks().addCallback(self._startBuild_2)
         return d
 
+    def acquireLocks(self, res=None):
+        log.msg("acquireLocks(step %s, locks %s)" % (self, self.locks))
+        if not self.locks:
+            return defer.succeed(None)
+        for lock in self.locks:
+            if not lock.isAvailable():
+                log.msg("Build %s waiting for lock %s" % (self, lock))
+                d = lock.waitUntilAvailable(self)
+                d.addCallback(self.acquireLocks)
+                return d
+        # all locks are available, claim them all
+        for lock in self.locks:
+            lock.claim(self)
+        return defer.succeed(None)
+
+    def _startBuild_2(self, res):
+        self.startNextStep()
+
     def setupBuild(self, expectations):
         # create the actual BuildSteps. If there are any name collisions, we
         # add a count to the loser until it is unique.
@@ -361,11 +385,8 @@
                 self.progress.setExpectationsFrom(expectations)
 
         # we are now ready to set up our BuildStatus.
-        self.build_status.setSourceStamp(self.maxChangeNumber)
+        self.build_status.setSourceStamp(self.source)
         self.build_status.setReason(self.reason)
-        self.build_status.setChanges(self.changes +
-                                     self.failedChanges +
-                                     self.unimportantChanges)
         self.build_status.setBlamelist(self.blamelist())
         self.build_status.setProgress(self.progress)
 
@@ -438,7 +459,7 @@
             terminate = True
         return terminate
 
-    def lostRemote(self, remote):
+    def lostRemote(self, remote=None):
         # the slave went away. There are several possible reasons for this,
         # and they aren't necessarily fatal. For now, kill the build, but
         # TODO: see if we can resume the build when it reconnects.
@@ -449,7 +470,7 @@
             log.msg(" stopping currentStep", self.currentStep)
             self.currentStep.interrupt(Failure(error.ConnectionLost()))
 
-    def stopBuild(self, reason):
+    def stopBuild(self, reason="<no reason given>"):
         # the idea here is to let the user cancel a build because, e.g.,
         # they realized they committed a bug and they don't want to waste
         # the time building something that they know will fail. Another
@@ -519,21 +540,19 @@
             # XXX: also test a 'timing consistent' flag?
             log.msg(" setting expectations for next time")
             self.builder.setExpectations(self.progress)
+        reactor.callLater(0, self.releaseLocks)
         self.deferred.callback(self)
         self.deferred = None
 
-    def testsFinished(self, results):
-        """Accept a TestResults object."""
-        self.builder.testsFinished(results)
+    def releaseLocks(self):
+        log.msg("releaseLocks(%s): %s" % (self, self.locks))
+        for lock in self.locks:
+            lock.release(self)
 
-class BuildControl(components.Adapter):
-    if implements:
-        implements(interfaces.IBuildControl)
-    else:
-        __implements__ = interfaces.IBuildControl,
+    # IBuildControl
 
     def getStatus(self):
-        return self.original.build_status
+        return self.build_status
+
+    # stopBuild is defined earlier
 
-    def stopBuild(self, reason="<no reason given>"):
-        self.original.stopBuild(reason)

Index: builder.py
===================================================================
RCS file: /cvsroot/buildbot/buildbot/buildbot/process/builder.py,v
retrieving revision 1.26
retrieving revision 1.27
diff -u -d -r1.26 -r1.27
--- builder.py	17 May 2005 10:14:10 -0000	1.26
+++ builder.py	19 Jul 2005 23:11:58 -0000	1.27
@@ -1,18 +1,157 @@
 #! /usr/bin/python
 
+import warnings
+
 from twisted.python import log, components, failure
 from twisted.spread import pb
 from twisted.internet import reactor, defer
 
-from buildbot import interfaces
+from buildbot import interfaces, sourcestamp
 from buildbot.twcompat import implements
 from buildbot.status.progress import Expectations
 from buildbot.status import builder
 from buildbot.util import now
 from buildbot.process import base
 
-class Builder(pb.Referenceable):
+class SlaveBuilder(pb.Referenceable):
+    """I am the master-side representative for one of the
+    L{buildbot.slave.bot.SlaveBuilder} objects that lives in a remote
+    buildbot. When a remote builder connects, I query it for command versions
+    and then make it available to any Builds that are ready to run. """
+
+    remote = None
+    build = None
+
+    def __init__(self, builder):
+        self.builder = builder
+        self.ping_watchers = []
+
+    def getSlaveCommandVersion(self, command, oldversion=None):
+        if self.remoteCommands is None:
+            # the slave is 0.5.0 or earlier
+            return oldversion
+        return self.remoteCommands.get(command)
+
+    def attached(self, slave, remote, commands):
+        self.slave = slave
+        self.remote = remote
+        self.remoteCommands = commands # maps command name to version
+        log.msg("Buildslave %s attached to %s" % (slave.slavename,
+                                                  self.builder.name))
+        d = self.remote.callRemote("setMaster", self)
+        d.addErrback(self._attachFailure, "Builder.setMaster")
+        d.addCallback(self._attached2)
+        return d
+
+    def _attached2(self, res):
+        d = self.remote.callRemote("print", "attached")
+        d.addErrback(self._attachFailure, "Builder.print 'attached'")
+        d.addCallback(self._attached3)
+        return d
+
+    def _attached3(self, res):
+        # now we say they're really attached
+        return self
+
+    def _attachFailure(self, why, where):
+        assert type(where) is str
+        log.msg(where)
+        log.err(why)
+        return why
+
+    def detached(self):
+        self.slave = None
+        self.remote = None
+        self.remoteCommands = None
+
+    def startBuild(self, build):
+        self.build = build
+
+    def finishBuild(self):
+        self.build = None
+
+
+    def ping(self, timeout, status=None):
+        """Ping the slave to make sure it is still there. Returns a Deferred
+        that fires with True if it is.
+
+        @param status: if you point this at a BuilderStatus, a 'pinging'
+                       event will be pushed.
+        """
+
+        newping = not self.ping_watchers
+        d = defer.Deferred()
+        self.ping_watchers.append(d)
+        if newping:
+            if status:
+                event = status.addEvent(["pinging"], "yellow")
+                d2 = defer.Deferred()
+                d2.addCallback(self._pong_status, event)
+                self.ping_watchers.insert(0, d2)
+                # I think it will make the tests run smoother if the status
+                # is updated before the ping completes
+            Ping().ping(self.remote, timeout).addCallback(self._pong)
+
+        return d
+
+    def _pong(self, res):
+        watchers, self.ping_watchers = self.ping_watchers, []
+        for d in watchers:
+            d.callback(res)
+
+    def _pong_status(self, res, event):
+        if res:
+            event.text = ["ping", "success"]
+            event.color = "green"
+        else:
+            event.text = ["ping", "failed"]
+            event.color = "red"
+        event.finish()
+
+class Ping:
+    running = False
+    timer = None
+
+    def ping(self, remote, timeout):
+        assert not self.running
+        self.running = True
+        log.msg("sending ping")
+        self.d = defer.Deferred()
+        remote.callRemote("print", "ping").addBoth(self._pong)
+
+        # We use either our own timeout or the (long) TCP timeout to detect
+        # silently-missing slaves. This might happen because of a NAT
+        # timeout or a routing loop. If the slave just shuts down (and we
+        # somehow missed the FIN), we should get a "connection refused"
+        # message.
+        self.timer = reactor.callLater(timeout, self._ping_timeout, remote)
+        return self.d
+
+    def _ping_timeout(self, remote):
+        log.msg("ping timeout")
+        # force the BotPerspective to disconnect, since this indicates that
+        # the bot is unreachable.
+        del self.timer
+        remote.broker.transport.loseConnection()
+        # the forcibly-lost connection will now cause the ping to fail
+
+    def _pong(self, res):
+        if not self.running:
+            return
+        self.running = False
 
+        log.msg("ping finished")
+        if self.timer:
+            self.timer.cancel()
+            del self.timer
+
+        if isinstance(res, failure.Failure):
+            self.d.callback(False)
+        else:
+            self.d.callback(True)
+
+
+class Builder(pb.Referenceable):
     """I manage all Builds of a given type.
 
     Each Builder is created by an entry in the config file (the c['builders']
@@ -24,76 +163,72 @@
     Build object defines when and how the build is performed, so a new
     Factory or Builder should be defined to control this behavior.
 
-    The Builder holds on to a number of these Build
-    objects, in various slots like C{.waiting}, C{.interlocked},
-    C{.buildable}, and C{.currentBuild}. Incoming
-    L{Change<buildbot.change.changes.Change>} objects are passed to the
-    C{.waiting} build, and when it decides it is ready to go, I move it to
-    the C{.buildable} slot. When a slave becomes available, I move it to the
-    C{.currentBuild} slot and start it running.
-
-    The Builder is also the master-side representative for one of the
-    L{buildbot.slave.bot.SlaveBuilder} objects that lives in a remote
-    buildbot. When a remote builder connects, I query it for command versions
-    and then make it available to any Builds that are ready to run.
+    The Builder holds on to a number of L{base.BuildRequest} objects in a
+    list named C{.buildable}. Incoming BuildRequest objects will be added to
+    this list, or (if possible) merged into an existing request. When a slave
+    becomes available, I will use my C{BuildFactory} to turn the request into
+    a new C{Build} object. The C{BuildRequest} is forgotten, the C{Build}
+    goes into C{.building} while it runs. Once the build finishes, I will
+    discard it.
 
-    I also manage Interlocks, periodic build timers, forced builds, progress
-    expectation (ETA) management, and some status delivery chores.
+    I maintain a list of available SlaveBuilders, one for each connected
+    slave that the C{slavename} parameter says we can use. Some of these will
+    be idle, some of them will be busy running builds for me. If there are
+    multiple slaves, I can run multiple builds at once.
 
-    @type waiting: L{buildbot.process.base.Build}
-    @ivar waiting: a slot for a Build waiting for its 'tree stable' timer to
-                   expire
+    I also manage forced builds, progress expectation (ETA) management, and
+    some status delivery chores.
 
-    @type interlocked: list of L{buildbot.process.base.Build}
-    @ivar interlocked: a slot for the Builds that are stable, but which must
-                       wait for other Builds to complete successfully before
-                       they can be run.
+    I am persisted in C{BASEDIR/BUILDERNAME/builder}, so I can remember how
+    long a build usually takes to run (in my C{expectations} attribute). This
+    pickle also includes the L{buildbot.status.builder.BuilderStatus} object,
+    which remembers the set of historic builds.
 
-    @type buildable: L{buildbot.process.base.Build}
-    @ivar buildable: a slot for a Build that is stable and ready to build,
-                     but which is waiting for a buildslave to be available.
+    @type buildable: list of L{buildbot.process.base.BuildRequest}
+    @ivar buildable: BuildRequests that are ready to build, but which are
+                     waiting for a buildslave to be available.
 
-    @type currentBuild: L{buildbot.process.base.Build}
-    @ivar currentBuild: a slot for the Build that actively running
+    @type building: list of L{buildbot.process.base.Build}
+    @ivar building: Builds that are actively running
 
     """
 
-    remote = None
-    lastChange = None
-    buildNumber = 0
-    periodicBuildTimer = None
-    buildable = None
-    currentBuild = None
-    status = "idle"
-    debug = False
-    wantToStartBuild = None
     expectations = None # this is created the first time we get a good build
+    START_BUILD_TIMEOUT = 10
 
     def __init__(self, setup, builder_status):
         """
         @type  setup: dict
         @param setup: builder setup data, as stored in
                       BuildmasterConfig['builders'].  Contains name,
-                      slavename, builddir, factory.
+                      slavename, builddir, factory, locks.
         @type  builder_status: L{buildbot.status.builder.BuilderStatus}
         """
         self.name = setup['name']
         self.slavename = setup['slavename']
         self.builddir = setup['builddir']
         self.buildFactory = setup['factory']
-
-        self.periodicBuildTime = setup.get('periodicBuildTime', None)
+        self.locks = setup.get("locks", [])
+        if setup.has_key('periodicBuildTime'):
+            raise ValueError("periodicBuildTime can no longer be defined as"
+                             " part of the Builder: use scheduler.Periodic"
+                             " instead")
 
         # build/wannabuild slots: Build objects move along this sequence
-        self.waiting = self.newBuild()
-        self.interlocked = []
+        self.buildable = []
+        self.building = []
 
-        self.interlocks = [] # I watch these interlocks to know when to build
-        self.feeders = [] # I feed these interlocks
+        # buildslaves at our disposal. This maps SlaveBuilder instances to
+        # state, where state is one of "attaching", "idle", "pinging",
+        # "busy". "pinging" is used when a Build is about to start, to make
+        # sure that they're still alive.
+        self.slaves = {} 
 
         self.builder_status = builder_status
         self.builder_status.setSlavename(self.slavename)
-        self.watchers = {'attach': [], 'detach': []}
+
+        # for testing, to help synchronize tests
+        self.watchers = {'attach': [], 'detach': [], 'idle': []}
 
     def setBotmaster(self, botmaster):
         self.botmaster = botmaster
@@ -108,252 +243,201 @@
                          % (self.builddir, setup['builddir']))
         if setup['factory'] != self.buildFactory: # compare objects
             diffs.append('factory changed')
-        if setup.get('periodicBuildTime', None) != self.periodicBuildTime:
-            diffs.append('periodicBuildTime changed from %s to %s' \
-                         % (self.periodicBuildTime,
-                            setup.get('periodicBuildTime', None)))
+        oldlocks = [lock.name for lock in setup.get('locks',[])]
+        newlocks = [lock.name for lock in self.locks]
+        if oldlocks != newlocks:
+            diffs.append('locks changed from %s to %s' % (oldlocks, newlocks))
         return diffs
 
-    def newBuild(self):
-        """
-        Create a new build from our build factory and set ourself as the
-        builder.
-
-        @rtype: L{buildbot.process.base.Build}
-        """
-        b = self.buildFactory.newBuild()
-        b.setBuilder(self)
-        return b
-        
-    def watchInterlock(self, interlock):
-        """This builder will wait for the given interlock to open up before
-        it starts."""
-        self.interlocks.append(interlock)
-    def stopWatchingInterlock(self, interlock):
-        self.interlocks.remove(interlock)
+    def __repr__(self):
+        return "<Builder '%s'>" % self.name
 
-    def feedInterlock(self, interlock):
-        """The following interlocks will be fed by this build."""
-        self.feeders.append(interlock)
-    def stopFeedingInterlock(self, interlock):
-        self.feeders.remove(interlock)
 
+    def submitBuildRequest(self, req):
+        req.submittedAt = now()
+        self.buildable.append(req)
+        req.requestSubmitted(self)
+        self.maybeStartBuild()
 
-    def __repr__(self):
-        return "<Builder '%s'>" % self.name
+    def cancelBuildRequest(self, req):
+        if req in self.buildable:
+            self.buildable.remove(req)
+            return True
+        return False
 
     def __getstate__(self):
         d = self.__dict__.copy()
-        d['remote'] = None
-        d['currentBuild'] = None # XXX: failover to a new Build
-        d['periodicBuildTimer'] = None
+        # TODO: note that d['buildable'] can contain Deferreds
+        del d['building'] # TODO: move these back to .buildable?
+        del d['slaves']
         return d
-        
-    def attached(self, remote, commands):
+
+    def __setstate__(self, d):
+        self.__dict__ = d
+        self.building = []
+        self.slaves = {}
+
+    def fireTestEvent(self, name, with=None):
+        if with is None:
+            with = self
+        watchers = self.watchers[name]
+        self.watchers[name] = []
+        for w in watchers:
+            w.callback(with)
+
+    def attached(self, slave, remote, commands):
         """This is invoked by the BotPerspective when the self.slavename bot
         registers their builder.
 
-        @rtype : L{twisted.internet.defer.Deferred}
+        @type  slave: L{buildbot.master.BotPerspective}
+        @param slave: the BotPerspective that represents the buildslave as a
+                      whole
+        @type  remote: L{twisted.spread.pb.RemoteReference}
+        @param remote: a reference to the L{buildbot.slave.bot.SlaveBuilder}
+        @type  commands: dict: string -> string, or None
+        @param commands: provides the slave's version of each RemoteCommand
+
+        @rtype:  L{twisted.internet.defer.Deferred}
         @return: a Deferred that fires (with 'self') when the slave-side
                  builder is fully attached and ready to accept commands.
         """
-        if self.remote == remote:
-            # already attached to them
-            log.msg("Builder %s already attached" % self.name)
-            return defer.succeed(self)
-        if self.remote:
-            log.msg("WEIRD", self.remote, remote)
-        self.remote = remote
-        self.remoteCommands = commands # maps command name to version
-        log.msg("Builder %s attached" % self.name)
-        d = self.remote.callRemote("setMaster", self)
-        d.addErrback(self._attachFailure, "Builder.setMaster")
-        d.addCallback(self._attached2)
-        return d
-
-    def _attachFailure(self, why, where):
-        assert type(where) is str
-        log.msg(where)
-        log.err(why)
-
-    def _attached2(self, res):
-        d = self.remote.callRemote("print", "attached")
-        d.addErrback(self._attachFailure, "Builder.print 'attached'")
-        d.addCallback(self._attached3)
+        for s in self.slaves.keys():
+            if s.slave == slave:
+                # already attached to them. TODO: how does this ever get
+                # reached?
+                log.msg("%s.attached: WEIRD slave %s already attached"
+                        % (self, slave))
+                return defer.succeed(self)
+        sb = SlaveBuilder(self)
+        self.slaves[sb] = "attaching"
+        d = sb.attached(slave, remote, commands)
+        d.addCallback(self._attached)
+        d.addErrback(self._not_attached, slave)
         return d
 
-    def _attached3(self, res):
-        # now we say they're really attached
-        self.builder_status.addPointEvent(['connect'])
-        if self.currentBuild:
-            # XXX: handle interrupted build: flunk the current buildStep,
-            # see if it can be restarted. buildStep.setBuilder(self) must be
-            # done to allow it to run finishStep() when it is complete.
-            log.msg("interrupted build!")
-            pass
-        self.startPeriodicBuildTimer()
+    def _attached(self, sb):
+        # TODO: make this .addSlaveEvent(slave.slavename, ['connect']) ?
+        self.builder_status.addPointEvent(['connect', sb.slave.slavename])
+        self.slaves[sb] = "idle"
         self.maybeStartBuild()
-        for w in self.watchers['attach']:
-            w.callback(self)
-        self.watchers['attach'] = []
+
+        self.fireTestEvent('attach')
         return self
 
-    def getSlaveCommandVersion(self, command, oldversion=None):
-        if self.remoteCommands is None:
-            # the slave is 0.5.0 or earlier
-            return oldversion
-        return self.remoteCommands.get(command)
+    def _not_attached(self, why, slave):
+        # already log.err'ed by SlaveBuilder._attachFailure
+        # TODO: make this .addSlaveEvent?
+        # TODO: remove from self.slaves
+        self.builder_status.addPointEvent(['failed', 'connect',
+                                           slave.slave.slavename])
+        # TODO: add an HTMLLogFile of the exception
+        self.fireTestEvent('attach', why)
 
-    def detached(self):
+    def detached(self, slave):
         """This is called when the connection to the bot is lost."""
-        log.msg("%s.detached" % self)
-        self.remote = None
-        reactor.callLater(0, self._detached)
-        # the current step will be stopped (via a notifyOnDisconnect
-        # callback), and the build will probably stop.
+        log.msg("%s.detached" % self, slave.slavename)
+        for sb in self.slaves.keys():
+            if sb.slave == slave:
+                break
+        if self.slaves[sb] == "busy":
+            # the Build's .lostRemote method (invoked by a notifyOnDisconnect
+            # handler) will cause the Build to be stopped, probably right
+            # after the notifyOnDisconnect that invoked us finishes running.
 
-    def _detached(self):
-        if self.currentBuild:
-            log.msg("%s._detached: killing build" % self)
-            # wasn't enough
-            try:
-                self.currentBuild.stopBuild("slave lost")
-            except:
-                log.msg("currentBuild.stopBuild failed")
-                log.err()
-            self.currentBuild = None
             # TODO: should failover to a new Build
-        self.builder_status.addPointEvent(['disconnect'])
-        self.builder_status.currentlyOffline()
-        self.stopPeriodicBuildTimer()
-        log.msg("Builder %s detached" % self.name)
-        for w in self.watchers['detach']:
-            w.callback(self)
-        self.watchers['detach'] = []
-
-    def updateBigStatus(self):
-        if self.currentBuild:
-            return # leave it alone
-        if self.buildable and self.remote:
-            log.msg("(self.buildable and self.remote) shouldn't happen")
-            # maybeStartBuild should have moved this to self.currentBuild
-            # before we get to see it
-        elif self.buildable and not self.remote:
-            # TODO: make this a big-status
-            log.msg("want to start build, but we don't have a remote")
-        if self.interlocked:
-            # TODO: list all blocked interlocks
-            self.builder_status.currentlyInterlocked(self.interlocks)
-        elif self.waiting and self.waiting.nextBuildTime:
-            self.builder_status.currentlyWaiting(self.waiting.nextBuildTime)
-            # nextBuildTime == None means an interlock failed and the
-            # changes were merged into the next build, but we don't know
-            # when that will be. Call this state of affairs "idle"
-        elif self.remote:
-            self.builder_status.currentlyIdle()
-        else:
-            self.builder_status.currentlyOffline()
-
-    def filesChanged(self, change):
-        """
-        Tell the waiting L{buildbot.process.base.Build} that files have
-        changed.
-
-        @type  change: L{buildbot.changes.changes.Change}
-        """
-        # this is invoked by the BotMaster to distribute change notification
-        # we assume they are added in strictly increasing order
-        if not self.waiting:
-            self.waiting = self.newBuild()
-        self.waiting.addChange(change)
-        # eventually, our buildTimerFired() method will be called
-        
-    def buildTimerFired(self, wb):
-        """
-        Called by the Build when the build timer fires.
+            #self.retryBuild(sb.build)
+            pass
 
-        @type  wb: L{buildbot.process.base.Build}
-        @param wb: the waiting build that fires the timer
-        """
+        del self.slaves[sb]
 
-        if not self.interlocks:
-            # move from .waiting to .buildable
-            if self.buildable:
-                self.buildable.merge(wb)
-            else:
-                self.buildable = wb
-            self.waiting = None
-            self.maybeStartBuild()
-            return
-        # interlocked. Move from .waiting to .interlocked[]
-        self.interlocked.append(wb)
-        self.waiting = None
-        # tell them to ask build interlock when they can proceed
-        wb.checkInterlocks(self.interlocks)
-        self.updateBigStatus()
-        # if the interlocks are not blocked, interlockDone may be fired
-        # inside checkInterlocks
-        
-    def interlockPassed(self, b):
-        log.msg("%s: interlockPassed" % self)
-        self.interlocked.remove(b)
-        if self.buildable:
-            self.buildable.merge(b)
-        else:
-            self.buildable = b
-        self.maybeStartBuild()
-    def interlockFailed(self, b):
-        log.msg("%s: interlockFailed" % self)
-        # who do we merge to?
-        assert(self.interlocked[0] == b)
-        self.interlocked.remove(b)
-        if self.interlocked:
-            target = self.interlocked[0]
-        elif self.waiting:
-            target = self.waiting
-        else:
-            self.waiting = self.newBuild()
-            target = self.waiting
-        target.failMerge(b)
+        # TODO: make this .addSlaveEvent?
+        self.builder_status.addPointEvent(['disconnect', slave.slavename])
+        sb.detached() # inform the SlaveBuilder that their slave went away
         self.updateBigStatus()
-        
-    def startPeriodicBuildTimer(self):
-        self.stopPeriodicBuildTimer()
-        if self.periodicBuildTime:
-            t = reactor.callLater(self.periodicBuildTime,
-                                  self.doPeriodicBuild)
-            self.periodicBuildTimer = t
-
-    def stopPeriodicBuildTimer(self):
-        if self.periodicBuildTimer:
-            self.periodicBuildTimer.cancel()
-            self.periodicBuildTimer = None
+        self.fireTestEvent('detach')
 
-    def doPeriodicBuild(self):
-        self.periodicBuildTimer = None
-        self.forceBuild(None, "periodic build")
-        self.startPeriodicBuildTimer()
+    def updateBigStatus(self):
+        if not self.slaves:
+            self.builder_status.setBigState("offline")
+        elif self.building:
+            self.builder_status.setBigState("building")
+        else:
+            self.builder_status.setBigState("idle")
+            self.fireTestEvent('idle')
 
     def maybeStartBuild(self):
-        if self.currentBuild:
-            return # must wait
+        log.msg("maybeStartBuild: %s %s" % (self.buildable, self.slaves))
         if not self.buildable:
             self.updateBigStatus()
             return # nothing to do
-        if not self.remote:
-            #log.msg("want to start build, but we don't have a remote")
+        idle_slaves = [sb for sb in self.slaves.keys()
+                       if self.slaves[sb] == "idle"]
+        if not idle_slaves:
+            log.msg("%s: want to start build, but we don't have a remote"
+                    % self)
             self.updateBigStatus()
             return
-        # move to .building, start it
-        self.currentBuild = self.buildable
-        self.buildable = None
-        return self.startBuild(self.currentBuild)
+        sb = idle_slaves[0]
 
-    def startBuild(self, build):
-        log.msg("starting build %s" % build)
-        d = self.remote.callRemote("startBuild") # informational courtesy
-        d.addErrback(self._startBuildFailed, build)
+        # there is something to build, and there is a slave on which to build
+        # it. Grab the oldest request, see if we can merge it with anything
+        # else.
+        req = self.buildable.pop(0)
+        mergers = []
+        for br in self.buildable[:]:
+            if req.canBeMergedWith(br):
+                self.buildable.remove(br)
+                mergers.append(br)
+        requests = [req] + mergers
+
+        # Create a new build from our build factory and set ourself as the
+        # builder.
+        build = self.buildFactory.newBuild(requests)
+        build.setBuilder(self)
+        build.setLocks(self.locks)
+
+        # start it
+        self.startBuild(build, sb)
+
+    def startBuild(self, build, sb):
+        """Start a build on the given slave.
+        @param build: the L{base.Build} to start
+        @param slave: the L{SlaveBuilder} which will host this build
+
+        @return: a Deferred which fires with a L{base.BuildControl} that can
+        be used to stop the Build, or to access a
+        L{buildbot.status.builder.BuildStatus} which will watch the Build as
+        it runs. """
+
+        self.building.append(build)
+
+        # claim the slave
+        self.slaves[sb] = "pinging"
+        sb.startBuild(build)
+
+        self.updateBigStatus()
+
+        log.msg("starting build %s.. pinging the slave" % build)
+        # ping the slave to make sure they're still there. If they're fallen
+        # off the map (due to a NAT timeout or something), this will fail in
+        # a couple of minutes, depending upon the TCP timeout. TODO: consider
+        # making this time out faster, or at least characterize the likely
+        # duration.
+        d = sb.ping(self.START_BUILD_TIMEOUT)
+        d.addCallback(self._startBuild_1, build, sb)
+        return d
+
+    def _startBuild_1(self, res, build, sb):
+        if not res:
+            return self._startBuildFailed("slave ping failed", build, sb)
+        # The buildslave is ready to go.
+        self.slaves[sb] = "building"
+        d = sb.remote.callRemote("startBuild")
+        d.addCallbacks(self._startBuild_2, self._startBuildFailed,
+                       callbackArgs=(build,sb), errbackArgs=(build,sb))
+        return d
 
+    def _startBuild_2(self, res, build, sb):
         # create the BuildStatus object that goes with the Build
         bs = self.builder_status.newBuild()
 
@@ -361,32 +445,51 @@
         # BuildStatus that it has started, which will announce it to the
         # world (through our BuilderStatus object, which is its parent).
         # Finally it will start the actual build process.
-        d = build.startBuild(bs, self.expectations, self.remote)
-        d.addCallback(self.buildFinished)
-        d.addErrback(self._buildNotFinished)
-        control = base.BuildControl(build)
-        return control
-
-    def _buildNotFinished(self, why):
-        log.msg("_buildNotFinished")
-        log.err()
+        d = build.startBuild(bs, self.expectations, sb)
+        d.addCallback(self.buildFinished, sb)
+        d.addErrback(log.err) # this shouldn't happen. if it does, the slave
+                              # will be wedged
+        for req in build.requests:
+            req.buildStarted(build, bs)
+        return build # this is the IBuildControl
 
-    def _startBuildFailed(self, why, build):
+    def _startBuildFailed(self, why, build, sb):
+        # put the build back on the buildable list
         log.msg("I tried to tell the slave that the build %s started, but "
                 "remote_startBuild failed: %s" % (build, why))
+        # release the slave
+        sb.finishBuild()
+        if sb in self.slaves:
+            self.slaves[sb] = "idle"
 
-    def testsFinished(self, results):
-        # XXX: add build number, datestamp, Change information
-        #self.testTracker.testsFinished(results)
-        pass
-        
-    def buildFinished(self, build):
-        self.currentBuild = None
-        for f in self.feeders:
-            f.buildFinished(self.name, build.maxChangeNumber,
-                            (build.results == builder.SUCCESS))
+        log.msg("re-queueing the BuildRequest")
+        self.building.remove(build)
+        for req in build.requests:
+            self.buildable.insert(0, req) # they get first priority
+
+        # other notifyOnDisconnect calls will mark the slave as disconnected.
+        # Re-try after they have fired, maybe there's another slave
+        # available. TODO: I don't like these un-synchronizable callLaters..
+        # a better solution is to mark the SlaveBuilder as disconnected
+        # ourselves, but we'll need to make sure that they can tolerate
+        # multiple disconnects first.
+        reactor.callLater(0, self.maybeStartBuild)
+
+    def buildFinished(self, build, sb):
+        """This is called when the Build has finished (either success or
+        failure). Any exceptions during the build are reported with
+        results=FAILURE, not with an errback."""
+
+        # release the slave
+        sb.finishBuild()
+        if sb in self.slaves:
+            self.slaves[sb] = "idle"
+        # otherwise the slave probably got removed in detach()
+
+        self.building.remove(build)
+        for req in build.requests:
+            req.finished(build.build_status)
         self.maybeStartBuild()
-        return build.results # give to whoever started the build
 
     def setExpectations(self, progress):
         """Mark the build as successful and update expectations for the next
@@ -404,74 +507,10 @@
         log.msg("new expectations: %s seconds" % \
                 self.expectations.expectedBuildTime())
 
-    def forceBuild(self, who, reason):
-        # only add a build if there isn't anything already building
-        if self.currentBuild:
-            log.msg(self,
-                    "forceBuild(%s,%s) ignored because a build is running" % \
-                    (who, reason))
-            raise interfaces.BuilderInUseError
-        if not self.remote:
-            log.msg(self,
-                    "forceBuild(%s,%s) ignored because we have no slave" % \
-                    (who, reason))
-            raise interfaces.NoSlaveError
-        if self.buildable:
-            self.buildable.reason = reason
-        else:
-            self.buildable = self.newBuild()
-            self.buildable.reason = reason
-        return self.maybeStartBuild()
-
     def shutdownSlave(self):
         if self.remote:
             self.remote.callRemote("shutdown")
-            
-
-class Ping:
-    def ping(self, status, remote, timeout):
-        if not remote:
-            status.addPointEvent(["ping", "no slave"], "red")
-            return defer.succeed(False) # interfaces.NoSlaveError
-        self.event = status.addEvent(["pinging"], "yellow")
-        self.active = True
-        self.d = defer.Deferred()
-        d = remote.callRemote("print", "ping")
-        d.addBoth(self._pong)
-
-        # We use either our own timeout or the (long) TCP timeout to detect
-        # silently-missing slaves. This might happen because of a NAT
-        # timeout or a routing loop. If the slave just shuts down (and we
-        # somehow missed the FIN), we should get a "connection refused"
-        # message.
-        self.timer = reactor.callLater(timeout, self.timeout)
-        return self.d
 
-    def timeout(self):
-        self.timer = None
-        self._pong(failure.Failure(interfaces.NoSlaveError("timeout")))
-
-    def _pong(self, res):
-        if not self.active:
-            return
-        self.active = False
-        if self.timer:
-            self.timer.cancel()
-        e = self.event
-        if isinstance(res, failure.Failure):
-            e.text = ["ping", "failed"]
-            e.color = "red"
-            ponged = False
-            # TODO: force the BotPerspective to disconnect, since this
-            # indicates that the bot is unreachable. That will also append a
-            # "disconnect" event to the builder_status, terminating this
-            # "ping failed" event.
-        else:
-            e.text = ["ping", "success"]
-            e.color = "green"
-            ponged = True
-        e.finish()
-        self.d.callback(ponged)
 
 class BuilderControl(components.Adapter):
     if implements:
@@ -480,18 +519,47 @@
         __implements__ = interfaces.IBuilderControl,
 
     def forceBuild(self, who, reason):
-        bc = self.original.forceBuild(who, reason)
-        return bc
+        """This is a shortcut for building the current HEAD. You get back a
+        BuildRequest, just as if you'd asked politely. To get control of the
+        resulting build, you'll need to wait for req.waitUntilStarted().
+
+        This shortcut peeks into the Builder and raises an exception if there
+        is no slave available, to make backwards-compatibility a little
+        easier.
+        """
+
+        warnings.warn("Please use BuilderControl.requestBuild instead",
+                      category=DeprecationWarning, stacklevel=1)
+        idle_slaves = [sb for sb in self.original.slaves
+                       if self.original.slaves[sb] == "idle"]
+        if not idle_slaves:
+            if self.original.building:
+                raise interfaces.BuilderInUseError("All slaves are in use")
+            raise interfaces.NoSlaveError("There are no slaves connected")
+        req = base.BuildRequest(reason, sourcestamp.SourceStamp())
+        self.requestBuild(req)
+        return req.waitUntilStarted()
+
+    def requestBuild(self, req):
+        self.original.submitBuildRequest(req)
+
+    def getPendingBuilds(self):
+        # return IBuildRequestControl objects
+        raise NotImplementedError
 
     def getBuild(self, number):
-        b =  self.original.currentBuild
-        if b and b.build_status.number == number:
-            return base.BuildControl(b)
+        for b in self.original.building:
+            if b.build_status.number == number:
+                return b
         return None
 
     def ping(self, timeout=30):
-        d = Ping().ping(self.original.builder_status,
-                        self.original.remote, timeout)
+        if not self.original.slaves:
+            self.original.builder_status.addPointEvent(["ping", "no slave"],
+                                                       "red")
+            return defer.succeed(False) # interfaces.NoSlaveError
+        d = self.original.slaves.keys()[0].ping(timeout,
+                                                self.original.builder_status)
         return d
 
 components.registerAdapter(BuilderControl, Builder, interfaces.IBuilderControl)

Index: factory.py
===================================================================
RCS file: /cvsroot/buildbot/buildbot/buildbot/process/factory.py,v
retrieving revision 1.9
retrieving revision 1.10
diff -u -d -r1.9 -r1.10
--- factory.py	24 Apr 2005 21:30:24 -0000	1.9
+++ factory.py	19 Jul 2005 23:11:58 -0000	1.10
@@ -15,21 +15,21 @@
     @type  buildClass: L{buildbot.process.base.Build}
     """
     buildClass = Build
-    treeStableTimer = None
     steps = []
     useProgress = 1
-    compare_attrs = ['buildClass', 'treeStableTimer', 'steps', 'useProgress']
+    compare_attrs = ['buildClass', 'steps', 'useProgress']
 
     def __init__(self, steps=None):
         if steps is None: steps = []
         self.steps = steps
 
-    def newBuild(self):
-        b = self.buildClass()
+    def newBuild(self, request):
+        """Create a new Build instance.
+        @param request: a L{base.BuildRequest} describing what is to be built
+        """
+        b = self.buildClass(request)
         b.useProgress = self.useProgress
         b.setSteps(self.steps)
-        if self.treeStableTimer:
-            b.treeStableTimer = self.treeStableTimer
         return b
 
 

--- interlock.py DELETED ---

Index: step.py
===================================================================
RCS file: /cvsroot/buildbot/buildbot/buildbot/process/step.py,v
retrieving revision 1.66
retrieving revision 1.67
diff -u -d -r1.66 -r1.67
--- step.py	17 May 2005 04:40:55 -0000	1.66
+++ step.py	19 Jul 2005 23:11:57 -0000	1.67
@@ -403,7 +403,7 @@
     flunkOnFailure = False
     warnOnWarnings = False
     warnOnFailure = False
-    parms = ['build', 'name',
+    parms = ['build', 'name', 'locks',
              'haltOnFailure',
              'flunkOnWarnings',
              'flunkOnFailure',
@@ -411,6 +411,7 @@
              'warnOnFailure',]
 
     name = "generic"
+    locks = []
     progressMetrics = [] # 'time' is implicit
     useProgress = True # set to False if step is really unpredictable
     build = None
@@ -468,17 +469,45 @@
 
         self.remote = remote
         self.deferred = defer.Deferred()
+        # convert all locks into their real form (SlaveLocks get narrowed
+        # down to the slave that this build is being run on)
+        self.locks = [l.getLock(self.build.slavebuilder) for l in self.locks]
+        for l in self.locks:
+            if l in self.build.locks:
+                log.msg("Hey, lock %s is claimed by both a Step (%s) and the"
+                        " parent Build (%s)" % (l, self, self.build))
+                raise RuntimeError("lock claimed by both Step and Build")
+        d = self.acquireLocks()
+        d.addCallback(self._startStep_2)
+        return self.deferred
+
+    def acquireLocks(self, res=None):
+        log.msg("acquireLocks(step %s, locks %s)" % (self, self.locks))
+        if not self.locks:
+            return defer.succeed(None)
+        for lock in self.locks:
+            if not lock.isAvailable():
+                log.msg("step %s waiting for lock %s" % (self, lock))
+                d = lock.waitUntilAvailable(self)
+                d.addCallback(self.acquireLocks)
+                return d
+        # all locks are available, claim them all
+        for lock in self.locks:
+            lock.claim(self)
+        return defer.succeed(None)
+
+    def _startStep_2(self, res):
         if self.progress:
             self.progress.start()
         self.step_status.stepStarted()
         try:
             skip = self.start()
             if skip == SKIPPED:
+                reactor.callLater(0, self.releaseLocks)
                 reactor.callLater(0, self.deferred.callback, SKIPPED)
         except:
             log.msg("BuildStep.startStep exception in .start")
             self.failed(Failure())
-        return self.deferred
 
     def start(self):
         """Begin the step. Override this method and add code to do local
@@ -537,10 +566,16 @@
         ['step', 'interrupted'] or ['remote', 'lost']"""
         pass
 
+    def releaseLocks(self):
+        log.msg("releaseLocks(%s): %s" % (self, self.locks))
+        for lock in self.locks:
+            lock.release(self)
+
     def finished(self, results):
         if self.progress:
             self.progress.finish()
         self.step_status.stepFinished(results)
+        self.releaseLocks()
         self.deferred.callback(results)
 
     def failed(self, why):
@@ -565,13 +600,19 @@
             # the progress stuff may still be whacked (the StepStatus may
             # think that it is still running), but the build overall will now
             # finish
+        try:
+            self.releaseLocks()
+        except:
+            log.msg("exception while releasing locks")
+            log.err()
+
         log.msg("BuildStep.failed now firing callback")
         self.deferred.callback(EXCEPTION)
 
     # utility methods that BuildSteps may find useful
 
     def slaveVersion(self, command, oldversion=None):
-        return self.build.builder.getSlaveCommandVersion(command, oldversion)
+        return self.build.getSlaveCommandVersion(command, oldversion)
 
     def addLog(self, name):
         loog = self.step_status.addLog(name)
@@ -975,16 +1016,20 @@
                                 % self.name)
             return SKIPPED
 
-        # can we construct a source stamp?
-        #revision = None # default: use the latest sources (-rHEAD)
-        revision, patch = self.build.getSourceStamp()
-        # 'patch' is None or a tuple of (patchlevel, diff)
+        # what source stamp would this build like to use?
+        s = self.build.getSourceStamp()
+        # if branch is None, then use the Step's "default" branch
+        branch = s.branch or self.branch
+        # if revision is None, use the latest sources (-rHEAD)
+        revision = s.revision
         if not revision and not self.alwaysUseLatest:
-            changes = self.build.allChanges()
-            revision = self.computeSourceRevision(changes)
-        self.args['revision'] = revision
-        self.args['patch'] = patch
-        self.startVC()
+            revision = self.computeSourceRevision(s.changes)
+        # if patch is None, then do not patch the tree after checkout
+
+        # 'patch' is None or a tuple of (patchlevel, diff)
+        patch = s.patch
+
+        self.startVC(branch, revision, patch)
 
 
 class CVS(Source):
@@ -1009,7 +1054,7 @@
     # called with each complete line.
 
     def __init__(self, cvsroot, cvsmodule, 
-                 global_options=[], branch="HEAD", checkoutDelay=None,
+                 global_options=[], branch=None, checkoutDelay=None,
                  login=None,
                  clobber=0, export=0, copydir=None,
                  **kwargs):
@@ -1038,9 +1083,10 @@
                       it was previously performed or not.
 
         @type  branch: string
-        @param branch: a string to be used in a '-r' argument to specify
-                       which named branch of the source tree should be
-                       used for this checkout.  Defaults to 'HEAD'.
+        @param branch: the default branch nane, will be used in a '-r'
+                       argument to specify which branch of the source tree
+                       should be used for this checkout. Defaults to None,
+                       which means to use 'HEAD'.
 
         @type  checkoutDelay: int or None
         @param checkoutDelay: if not None, the number of seconds to put
@@ -1065,6 +1111,7 @@
                                ,v files)."""
                                
         self.checkoutDelay = checkoutDelay
+        self.branch = branch
 
         if not kwargs.has_key('mode') and (clobber or export or copydir):
             # deal with old configs
@@ -1084,7 +1131,6 @@
         self.args.update({'cvsroot': cvsroot,
                           'cvsmodule': cvsmodule,
                           'global_options': global_options,
-                          'branch': branch,
                           'login': login,
                           })
 
@@ -1095,10 +1141,17 @@
         if self.checkoutDelay is not None:
             when = lastChange + self.checkoutDelay
         else:
-            when = lastChange + self.build.treeStableTimer / 2
+            lastSubmit = max([r.submittedAt for r in self.build.requests])
+            when = (lastChange + lastSubmit) / 2
         return formatdate(when)
 
-    def startVC(self):
+    def startVC(self, branch, revision, patch):
+        if branch is None:
+            branch = "HEAD"
+        self.args['branch'] = branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
+
         if self.args['branch'] == "HEAD" and self.args['revision']:
             # special case. 'cvs update -r HEAD -D today' gives no files
             # TODO: figure out why, see if it applies to -r BRANCH
@@ -1126,14 +1179,28 @@
 
     name = 'svn'
 
-    def __init__(self, svnurl, directory=None, **kwargs):
+    def __init__(self, svnurl=None, base_url=None, default_branch=None,
+                 directory=None, **kwargs):
         """
         @type  svnurl: string
         @param svnurl: the URL which points to the Subversion server,
                        combining the access method (HTTP, ssh, local file),
-                       the repository host/port, the repository path,
-                       the sub-tree within the repository, and the branch
-                       to check out.
+                       the repository host/port, the repository path, the
+                       sub-tree within the repository, and the branch to
+                       check out. Using C{svnurl} does not enable builds of
+                       alternate branches: use C{base_url} to enable this.
+                       Use exactly one of C{svnurl} and C{base_url}.
+
+        @param base_url: if branches are enabled, this is the base URL to
+                         which a branch name will be appended. It should
+                         probably end in a slash. Use exactly one of
+                         C{svnurl} and C{base_url}.
+                         
+        @param default_branch: if branches are enabled, this is the branch
+                               to use if the Build does not specify one
+                               explicitly. It will simply be appended
+                               to C{base_url} and the result handed to
+                               the SVN command.
         """
 
         if not kwargs.has_key('workdir') and directory is not None:
@@ -1141,9 +1208,16 @@
             warnings.warn("Please use workdir=, not directory=",
                           DeprecationWarning)
             kwargs['workdir'] = directory
+
+        if not svnurl and not base_url:
+            raise ValueError("you must use exactly one of svnurl and base_url")
+
+        self.svnurl = svnurl
+        self.base_url = base_url
+        self.branch = default_branch
+
         Source.__init__(self, **kwargs)
 
-        self.args['svnurl'] = svnurl
 
     def computeSourceRevision(self, changes):
         if not changes:
@@ -1151,7 +1225,8 @@
         lastChange = max([c.revision for c in changes])
         return lastChange
 
-    def startVC(self):
+    def startVC(self, branch, revision, patch):
+
         # accomodate old slaves
         errorMessage = None
         slavever = self.slaveVersion("svn", "old")
@@ -1167,13 +1242,20 @@
                 log.msg("WARNING: this slave only does mode=update")
             assert self.args['mode'] != "export" # more serious
             self.args['directory'] = self.args['workdir']
-            if self.args['revision'] is not None:
+            if revision is not None:
                 # 0.5.0 can only do HEAD
                 errorMessage = "WARNING: this slave can only update to HEAD"
-                errorMessage += ", not revision=%s\n" % self.args['revision']
+                errorMessage += ", not revision=%s\n" % revision
                 log.msg("WARNING: this slave only does -rHEAD")
-            self.args['revision'] = "HEAD" # interprets this key differently
-            assert not self.args['patch'] # 0.5.0 slave can't do patch
+            revision = "HEAD" # interprets this key differently
+            assert not patch # 0.5.0 slave can't do patch
+
+        if self.svnurl:
+            self.args['svnurl'] = self.svnurl
+        else:
+            self.args['svnurl'] = self.base_url + branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
 
         self.cmd = LoggedRemoteCommand("svn", self.args)
         ShellCommand.start(self, errorMessage)
@@ -1191,19 +1273,47 @@
 
     name = "darcs"
 
-    def __init__(self, repourl, **kwargs):
+    def __init__(self, repourl=None, base_url=None, default_branch=None,
+                 **kwargs):
         """
         @type  repourl: string
-        @param repourl: the URL which points at the Darcs repository
+        @param repourl: the URL which points at the Darcs repository. This
+                        is used as the default branch. Using C{repourl} does
+                        not enable builds of alternate branches: use
+                        C{base_url} to enable this. Use either C{repourl} or
+                        C{base_url}, not both.
+
+        @param base_url: if branches are enabled, this is the base URL to
+                         which a branch name will be appended. It should
+                         probably end in a slash. Use exactly one of
+                         C{repourl} and C{base_url}.
+                         
+        @param default_branch: if branches are enabled, this is the branch
+                               to use if the Build does not specify one
+                               explicitly. It will simply be appended to
+                               C{base_url} and the result handed to the
+                               'darcs pull' command.
         """
         assert kwargs['mode'] != "export", \
                "Darcs does not have an 'export' mode"
+        if (not repourl and not base_url) or (repourl and base_url):
+            raise ValueError("you must provide exactly one of repourl and"
+                             " base_url")
+        self.repourl = repourl
+        self.base_url = base_url
+        self.branch = default_branch
         Source.__init__(self, **kwargs)
-        self.args['repourl'] = repourl
 
-    def startVC(self):
+    def startVC(self, branch, revision, patch):
         slavever = self.slaveVersion("darcs")
         assert slavever, "slave is too old, does not know about darcs"
+
+        if self.repourl:
+            self.args['repourl'] = self.repourl
+        else:
+            self.args['repourl'] = self.base_url + branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
         self.cmd = LoggedRemoteCommand("darcs", self.args)
         ShellCommand.start(self)
 
@@ -1218,10 +1328,14 @@
         @type  repourl: string
         @param repourl: the URL which points at the git repository
         """
+        self.branch = None # TODO
         Source.__init__(self, **kwargs)
         self.args['repourl'] = repourl
 
-    def startVC(self):
+    def startVC(self, branch, revision, patch):
+        self.args['branch'] = branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
         slavever = self.slaveVersion("git")
         assert slavever, "slave is too old, does not know about git"
         self.cmd = LoggedRemoteCommand("git", self.args)
@@ -1246,38 +1360,21 @@
                     pathname of a local directory instead.
 
         @type  version: string
-        @param version: the category--branch--version to check out
+        @param version: the category--branch--version to check out. This is
+                        the default branch. If a build specifies a different
+                        branch, it will be used instead of this.
 
         @type  archive: string
         @param archive: The archive name. If provided, it must match the one
                         that comes from the repository. If not, the
                         repository's default will be used.
         """
+        self.branch = version
         Source.__init__(self, **kwargs)
         self.args.update({'url': url,
-                          'version': version,
                           'archive': archive,
                           })
 
-    def checkSlaveVersion(self):
-        slavever = self.slaveVersion("arch")
-        assert slavever, "slave is too old, does not know about arch"
-        # slave 1.28 and later understand 'revision'
-        oldslave = False
-        try:
-            if slavever.startswith("1.") and int(slavever[2:]) < 28:
-                oldslave = True
-        except ValueError:
-            pass
-        if oldslave:
-            if not self.alwaysUseLatest:
-                log.msg("warning, slave is too old to use a revision")
-
-    def startVC(self):
-        self.checkSlaveVersion()
-        self.cmd = LoggedRemoteCommand("arch", self.args)
-        ShellCommand.start(self)
-
     def computeSourceRevision(self, changes):
         # in Arch, fully-qualified revision numbers look like:
         #  arch at buildbot.sourceforge.net--2004/buildbot--dev--0--patch-104
@@ -1302,6 +1399,29 @@
             return "base-0"
         return "patch-%d" % lastChange
 
+    def checkSlaveVersion(self):
+        slavever = self.slaveVersion("arch")
+        assert slavever, "slave is too old, does not know about arch"
+        # slave 1.28 and later understand 'revision'
+        oldslave = False
+        try:
+            if slavever.startswith("1.") and int(slavever[2:]) < 28:
+                oldslave = True
+        except ValueError:
+            pass
+        if oldslave:
+            if not self.alwaysUseLatest:
+                log.msg("warning, slave is too old to use a revision")
+
+    def startVC(self, branch, revision, patch):
+        self.args['version'] = branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
+        self.checkSlaveVersion()
+        self.cmd = LoggedRemoteCommand("arch", self.args)
+        ShellCommand.start(self)
+
+
 class Bazaar(Arch):
     """Bazaar is an alternative client for Arch repositories. baz is mostly
     compatible with tla, but archive registration is slightly different."""
@@ -1323,9 +1443,9 @@
                         buildslave will attempt to get sources from the wrong
                         archive.
         """
+        self.branch = version
         Source.__init__(self, **kwargs)
         self.args.update({'url': url,
-                          'version': version,
                           'archive': archive,
                           })
 
@@ -1341,7 +1461,10 @@
             pass
         assert not oldslave, "slave is too old, does not know about baz"
 
-    def startVC(self):
+    def startVC(self, branch, revision, patch):
+        self.args['version'] = branch
+        self.args['revision'] = revision
+        self.args['patch'] = patch
         self.checkSlaveVersion()
         self.cmd = LoggedRemoteCommand("bazaar", self.args)
         ShellCommand.start(self)
@@ -1363,7 +1486,7 @@
                           'view': view,
                           })
 
-    def startVC(self):
+    def startVC(self, branch, revision, patch):
         self.cmd = LoggedRemoteCommand("p4", self.args)
         ShellCommand.start(self)
 
@@ -1389,6 +1512,7 @@
 
     def __init__(self, p4port, **kwargs):
         assert kwargs['mode'] == "copy", "P4Sync can only be used in mode=copy"
+        self.branch = None
         Source.__init__(self, **kwargs)
         self.args['p4port'] = p4port
 
@@ -1398,7 +1522,7 @@
         lastChange = max([c.revision for c in changes])
         return lastChange
 
-    def startVC(self):
+    def startVC(self, branch, revision, patch):
         slavever = self.slaveVersion("p4sync")
         assert slavever, "slave is too old, does not know about p4"
         self.cmd = LoggedRemoteCommand("p4sync", self.args)
@@ -1440,8 +1564,8 @@
         self.finished(SUCCESS)
 
 class FailingDummy(Dummy):
-    """I am a dummy no-op step that 'runs' master-side and raises an
-    Exception after by default 5 seconds."""
+    """I am a dummy no-op step that 'runs' master-side and finishes (with a
+    FAILURE status) after 5 seconds."""
 
     name = "failing dummy"
 
@@ -1451,13 +1575,8 @@
         self.timer = reactor.callLater(self.timeout, self.done)
 
     def done(self):
-        class Boom(Exception):
-            pass
-        try:
-            raise Boom("boom")
-        except Boom:
-            f = Failure()
-        self.failed(f)
+        self.step_status.setColor("red")
+        self.finished(FAILURE)
 
 # subclasses from Shell Command to get the output reporting
 class RemoteDummy(ShellCommand):





More information about the Commits mailing list