Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/buildbot/buildbot
diff options
context:
space:
mode:
Diffstat (limited to 'buildbot/buildbot')
-rw-r--r--buildbot/buildbot/__init__.py1
-rw-r--r--buildbot/buildbot/buildbot.pngbin783 -> 0 bytes
-rw-r--r--buildbot/buildbot/buildset.py81
-rw-r--r--buildbot/buildbot/buildslave.py688
-rw-r--r--buildbot/buildbot/changes/__init__.py0
-rw-r--r--buildbot/buildbot/changes/base.py10
-rw-r--r--buildbot/buildbot/changes/bonsaipoller.py320
-rw-r--r--buildbot/buildbot/changes/changes.py288
-rw-r--r--buildbot/buildbot/changes/dnotify.py100
-rw-r--r--buildbot/buildbot/changes/freshcvs.py144
-rw-r--r--buildbot/buildbot/changes/hgbuildbot.py114
-rw-r--r--buildbot/buildbot/changes/mail.py458
-rw-r--r--buildbot/buildbot/changes/maildir.py116
-rw-r--r--buildbot/buildbot/changes/monotone.py305
-rw-r--r--buildbot/buildbot/changes/p4poller.py207
-rw-r--r--buildbot/buildbot/changes/pb.py108
-rw-r--r--buildbot/buildbot/changes/svnpoller.py463
-rw-r--r--buildbot/buildbot/clients/__init__.py0
-rw-r--r--buildbot/buildbot/clients/base.py125
-rw-r--r--buildbot/buildbot/clients/debug.glade684
-rw-r--r--buildbot/buildbot/clients/debug.py181
-rw-r--r--buildbot/buildbot/clients/gtkPanes.py532
-rw-r--r--buildbot/buildbot/clients/sendchange.py48
-rw-r--r--buildbot/buildbot/dnotify.py102
-rw-r--r--buildbot/buildbot/ec2buildslave.py283
-rw-r--r--buildbot/buildbot/interfaces.py1123
-rw-r--r--buildbot/buildbot/locks.py247
-rw-r--r--buildbot/buildbot/manhole.py265
-rw-r--r--buildbot/buildbot/master.py965
-rw-r--r--buildbot/buildbot/pbutil.py147
-rw-r--r--buildbot/buildbot/process/__init__.py0
-rw-r--r--buildbot/buildbot/process/base.py627
-rw-r--r--buildbot/buildbot/process/builder.py874
-rw-r--r--buildbot/buildbot/process/buildstep.py1097
-rw-r--r--buildbot/buildbot/process/factory.py182
-rw-r--r--buildbot/buildbot/process/process_twisted.py118
-rw-r--r--buildbot/buildbot/process/properties.py157
-rw-r--r--buildbot/buildbot/process/step_twisted2.py159
-rw-r--r--buildbot/buildbot/scheduler.py837
-rw-r--r--buildbot/buildbot/scripts/__init__.py0
-rw-r--r--buildbot/buildbot/scripts/checkconfig.py53
-rw-r--r--buildbot/buildbot/scripts/logwatcher.py97
-rw-r--r--buildbot/buildbot/scripts/reconfig.py69
-rw-r--r--buildbot/buildbot/scripts/runner.py1023
-rw-r--r--buildbot/buildbot/scripts/sample.cfg175
-rw-r--r--buildbot/buildbot/scripts/startup.py128
-rw-r--r--buildbot/buildbot/scripts/tryclient.py707
-rw-r--r--buildbot/buildbot/slave/__init__.py0
-rw-r--r--buildbot/buildbot/slave/bot.py510
-rw-r--r--buildbot/buildbot/slave/commands.py2788
-rw-r--r--buildbot/buildbot/slave/interfaces.py56
-rw-r--r--buildbot/buildbot/slave/registry.py17
-rw-r--r--buildbot/buildbot/sourcestamp.py95
-rw-r--r--buildbot/buildbot/status/__init__.py0
-rw-r--r--buildbot/buildbot/status/base.py69
-rw-r--r--buildbot/buildbot/status/builder.py2182
-rw-r--r--buildbot/buildbot/status/client.py564
-rw-r--r--buildbot/buildbot/status/html.py6
-rw-r--r--buildbot/buildbot/status/mail.py524
-rw-r--r--buildbot/buildbot/status/progress.py308
-rw-r--r--buildbot/buildbot/status/tests.py73
-rw-r--r--buildbot/buildbot/status/tinderbox.py223
-rw-r--r--buildbot/buildbot/status/web/__init__.py0
-rw-r--r--buildbot/buildbot/status/web/about.py33
-rw-r--r--buildbot/buildbot/status/web/base.py421
-rw-r--r--buildbot/buildbot/status/web/baseweb.py614
-rw-r--r--buildbot/buildbot/status/web/build.py302
-rw-r--r--buildbot/buildbot/status/web/builder.py312
-rw-r--r--buildbot/buildbot/status/web/changes.py41
-rw-r--r--buildbot/buildbot/status/web/classic.css78
-rw-r--r--buildbot/buildbot/status/web/feeds.py359
-rw-r--r--buildbot/buildbot/status/web/grid.py252
-rw-r--r--buildbot/buildbot/status/web/index.html32
-rw-r--r--buildbot/buildbot/status/web/logs.py171
-rw-r--r--buildbot/buildbot/status/web/robots.txt9
-rw-r--r--buildbot/buildbot/status/web/slaves.py181
-rw-r--r--buildbot/buildbot/status/web/step.py97
-rw-r--r--buildbot/buildbot/status/web/tests.py64
-rw-r--r--buildbot/buildbot/status/web/waterfall.py962
-rw-r--r--buildbot/buildbot/status/web/xmlrpc.py203
-rw-r--r--buildbot/buildbot/status/words.py875
-rw-r--r--buildbot/buildbot/steps/__init__.py0
-rw-r--r--buildbot/buildbot/steps/dummy.py100
-rw-r--r--buildbot/buildbot/steps/master.py76
-rw-r--r--buildbot/buildbot/steps/maxq.py44
-rw-r--r--buildbot/buildbot/steps/package/__init__.py11
-rw-r--r--buildbot/buildbot/steps/package/rpm/__init__.py15
-rw-r--r--buildbot/buildbot/steps/package/rpm/rpmbuild.py144
-rw-r--r--buildbot/buildbot/steps/package/rpm/rpmlint.py51
-rw-r--r--buildbot/buildbot/steps/package/rpm/rpmspec.py67
-rw-r--r--buildbot/buildbot/steps/python.py187
-rw-r--r--buildbot/buildbot/steps/python_twisted.py804
-rw-r--r--buildbot/buildbot/steps/shell.py487
-rw-r--r--buildbot/buildbot/steps/source.py1107
-rw-r--r--buildbot/buildbot/steps/transfer.py465
-rw-r--r--buildbot/buildbot/steps/trigger.py122
-rw-r--r--buildbot/buildbot/test/__init__.py0
-rw-r--r--buildbot/buildbot/test/emit.py11
-rw-r--r--buildbot/buildbot/test/emitlogs.py42
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.168
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.2101
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.397
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.445
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.554
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.670
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.768
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.861
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.918
-rw-r--r--buildbot/buildbot/test/mail/svn-commit.167
-rw-r--r--buildbot/buildbot/test/mail/svn-commit.21218
-rw-r--r--buildbot/buildbot/test/mail/syncmail.1152
-rw-r--r--buildbot/buildbot/test/mail/syncmail.256
-rw-r--r--buildbot/buildbot/test/mail/syncmail.339
-rw-r--r--buildbot/buildbot/test/mail/syncmail.4290
-rw-r--r--buildbot/buildbot/test/mail/syncmail.570
-rw-r--r--buildbot/buildbot/test/runutils.py516
-rw-r--r--buildbot/buildbot/test/sleep.py8
-rw-r--r--buildbot/buildbot/test/subdir/emit.py11
-rw-r--r--buildbot/buildbot/test/test__versions.py16
-rw-r--r--buildbot/buildbot/test/test_bonsaipoller.py244
-rw-r--r--buildbot/buildbot/test/test_buildreq.py182
-rw-r--r--buildbot/buildbot/test/test_buildstep.py144
-rw-r--r--buildbot/buildbot/test/test_changes.py243
-rw-r--r--buildbot/buildbot/test/test_config.py1277
-rw-r--r--buildbot/buildbot/test/test_control.py104
-rw-r--r--buildbot/buildbot/test/test_dependencies.py166
-rw-r--r--buildbot/buildbot/test/test_ec2buildslave.py552
-rw-r--r--buildbot/buildbot/test/test_limitlogs.py94
-rw-r--r--buildbot/buildbot/test/test_locks.py495
-rw-r--r--buildbot/buildbot/test/test_maildir.py92
-rw-r--r--buildbot/buildbot/test/test_mailparse.py293
-rw-r--r--buildbot/buildbot/test/test_mergerequests.py196
-rw-r--r--buildbot/buildbot/test/test_p4poller.py213
-rw-r--r--buildbot/buildbot/test/test_package_rpm.py132
-rw-r--r--buildbot/buildbot/test/test_properties.py274
-rw-r--r--buildbot/buildbot/test/test_reconfig.py91
-rw-r--r--buildbot/buildbot/test/test_run.py1199
-rw-r--r--buildbot/buildbot/test/test_runner.py392
-rw-r--r--buildbot/buildbot/test/test_scheduler.py348
-rw-r--r--buildbot/buildbot/test/test_shell.py138
-rw-r--r--buildbot/buildbot/test/test_slavecommand.py294
-rw-r--r--buildbot/buildbot/test/test_slaves.py991
-rw-r--r--buildbot/buildbot/test/test_status.py1631
-rw-r--r--buildbot/buildbot/test/test_steps.py788
-rw-r--r--buildbot/buildbot/test/test_svnpoller.py476
-rw-r--r--buildbot/buildbot/test/test_transfer.py721
-rw-r--r--buildbot/buildbot/test/test_twisted.py219
-rw-r--r--buildbot/buildbot/test/test_util.py26
-rw-r--r--buildbot/buildbot/test/test_vc.py3023
-rw-r--r--buildbot/buildbot/test/test_web.py594
-rw-r--r--buildbot/buildbot/test/test_webparts.py141
-rw-r--r--buildbot/buildbot/util.py102
152 files changed, 0 insertions, 49462 deletions
diff --git a/buildbot/buildbot/__init__.py b/buildbot/buildbot/__init__.py
deleted file mode 100644
index b691f8b..0000000
--- a/buildbot/buildbot/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-version = "0.7.10p1"
diff --git a/buildbot/buildbot/buildbot.png b/buildbot/buildbot/buildbot.png
deleted file mode 100644
index 387ba15..0000000
--- a/buildbot/buildbot/buildbot.png
+++ /dev/null
Binary files differ
diff --git a/buildbot/buildbot/buildset.py b/buildbot/buildbot/buildset.py
deleted file mode 100644
index fe59f74..0000000
--- a/buildbot/buildbot/buildset.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from buildbot.process import base
-from buildbot.status import builder
-from buildbot.process.properties import Properties
-
-
-class BuildSet:
- """I represent a set of potential Builds, all of the same source tree,
- across a specified list of Builders. I can represent a build of a
- specific version of the source tree (named by source.branch and
- source.revision), or a build of a certain set of Changes
- (source.changes=list)."""
-
- def __init__(self, builderNames, source, reason=None, bsid=None,
- properties=None):
- """
- @param source: a L{buildbot.sourcestamp.SourceStamp}
- """
- self.builderNames = builderNames
- self.source = source
- self.reason = reason
-
- self.properties = Properties()
- if properties: self.properties.updateFromProperties(properties)
-
- self.stillHopeful = True
- self.status = bss = builder.BuildSetStatus(source, reason,
- builderNames, bsid)
-
- def waitUntilSuccess(self):
- return self.status.waitUntilSuccess()
- def waitUntilFinished(self):
- return self.status.waitUntilFinished()
-
- def start(self, builders):
- """This is called by the BuildMaster to actually create and submit
- the BuildRequests."""
- self.requests = []
- reqs = []
-
- # create the requests
- for b in builders:
- req = base.BuildRequest(self.reason, self.source, b.name,
- properties=self.properties)
- reqs.append((b, req))
- self.requests.append(req)
- d = req.waitUntilFinished()
- d.addCallback(self.requestFinished, req)
-
- # tell our status about them
- req_statuses = [req.status for req in self.requests]
- self.status.setBuildRequestStatuses(req_statuses)
-
- # now submit them
- for b,req in reqs:
- b.submitBuildRequest(req)
-
- def requestFinished(self, buildstatus, req):
- # TODO: this is where individual build status results are aggregated
- # into a BuildSet-wide status. Consider making a rule that says one
- # WARNINGS results in the overall status being WARNINGS too. The
- # current rule is that any FAILURE means FAILURE, otherwise you get
- # SUCCESS.
- self.requests.remove(req)
- results = buildstatus.getResults()
- if results == builder.FAILURE:
- self.status.setResults(results)
- if self.stillHopeful:
- # oh, cruel reality cuts deep. no joy for you. This is the
- # first failure. This flunks the overall BuildSet, so we can
- # notify success watchers that they aren't going to be happy.
- self.stillHopeful = False
- self.status.giveUpHope()
- self.status.notifySuccessWatchers()
- if not self.requests:
- # that was the last build, so we can notify finished watchers. If
- # we haven't failed by now, we can claim success.
- if self.stillHopeful:
- self.status.setResults(builder.SUCCESS)
- self.status.notifySuccessWatchers()
- self.status.notifyFinishedWatchers()
-
diff --git a/buildbot/buildbot/buildslave.py b/buildbot/buildbot/buildslave.py
deleted file mode 100644
index bd41813..0000000
--- a/buildbot/buildbot/buildslave.py
+++ /dev/null
@@ -1,688 +0,0 @@
-# Portions copyright Canonical Ltd. 2009
-
-import time
-from email.Message import Message
-from email.Utils import formatdate
-from zope.interface import implements
-from twisted.python import log
-from twisted.internet import defer, reactor
-from twisted.application import service
-import twisted.spread.pb
-
-from buildbot.pbutil import NewCredPerspective
-from buildbot.status.builder import SlaveStatus
-from buildbot.status.mail import MailNotifier
-from buildbot.interfaces import IBuildSlave, ILatentBuildSlave
-from buildbot.process.properties import Properties
-
-
-class AbstractBuildSlave(NewCredPerspective, service.MultiService):
- """This is the master-side representative for a remote buildbot slave.
- There is exactly one for each slave described in the config file (the
- c['slaves'] list). When buildbots connect in (.attach), they get a
- reference to this instance. The BotMaster object is stashed as the
- .botmaster attribute. The BotMaster is also our '.parent' Service.
-
- I represent a build slave -- a remote machine capable of
- running builds. I am instantiated by the configuration file, and can be
- subclassed to add extra functionality."""
-
- implements(IBuildSlave)
-
- def __init__(self, name, password, max_builds=None,
- notify_on_missing=[], missing_timeout=3600,
- properties={}):
- """
- @param name: botname this machine will supply when it connects
- @param password: password this machine will supply when
- it connects
- @param max_builds: maximum number of simultaneous builds that will
- be run concurrently on this buildslave (the
- default is None for no limit)
- @param properties: properties that will be applied to builds run on
- this slave
- @type properties: dictionary
- """
- service.MultiService.__init__(self)
- self.slavename = name
- self.password = password
- self.botmaster = None # no buildmaster yet
- self.slave_status = SlaveStatus(name)
- self.slave = None # a RemoteReference to the Bot, when connected
- self.slave_commands = None
- self.slavebuilders = {}
- self.max_builds = max_builds
-
- self.properties = Properties()
- self.properties.update(properties, "BuildSlave")
- self.properties.setProperty("slavename", name, "BuildSlave")
-
- self.lastMessageReceived = 0
- if isinstance(notify_on_missing, str):
- notify_on_missing = [notify_on_missing]
- self.notify_on_missing = notify_on_missing
- for i in notify_on_missing:
- assert isinstance(i, str)
- self.missing_timeout = missing_timeout
- self.missing_timer = None
-
- def update(self, new):
- """
- Given a new BuildSlave, configure this one identically. Because
- BuildSlave objects are remotely referenced, we can't replace them
- without disconnecting the slave, yet there's no reason to do that.
- """
- # the reconfiguration logic should guarantee this:
- assert self.slavename == new.slavename
- assert self.password == new.password
- assert self.__class__ == new.__class__
- self.max_builds = new.max_builds
-
- def __repr__(self):
- if self.botmaster:
- builders = self.botmaster.getBuildersForSlave(self.slavename)
- return "<%s '%s', current builders: %s>" % \
- (self.__class__.__name__, self.slavename,
- ','.join(map(lambda b: b.name, builders)))
- else:
- return "<%s '%s', (no builders yet)>" % \
- (self.__class__.__name__, self.slavename)
-
- def setBotmaster(self, botmaster):
- assert not self.botmaster, "BuildSlave already has a botmaster"
- self.botmaster = botmaster
- self.startMissingTimer()
-
- def stopMissingTimer(self):
- if self.missing_timer:
- self.missing_timer.cancel()
- self.missing_timer = None
-
- def startMissingTimer(self):
- if self.notify_on_missing and self.missing_timeout and self.parent:
- self.stopMissingTimer() # in case it's already running
- self.missing_timer = reactor.callLater(self.missing_timeout,
- self._missing_timer_fired)
-
- def _missing_timer_fired(self):
- self.missing_timer = None
- # notify people, but only if we're still in the config
- if not self.parent:
- return
-
- buildmaster = self.botmaster.parent
- status = buildmaster.getStatus()
- text = "The Buildbot working for '%s'\n" % status.getProjectName()
- text += ("has noticed that the buildslave named %s went away\n" %
- self.slavename)
- text += "\n"
- text += ("It last disconnected at %s (buildmaster-local time)\n" %
- time.ctime(time.time() - self.missing_timeout)) # approx
- text += "\n"
- text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n"
- text += "was '%s'.\n" % self.slave_status.getAdmin()
- text += "\n"
- text += "Sincerely,\n"
- text += " The Buildbot\n"
- text += " %s\n" % status.getProjectURL()
- subject = "Buildbot: buildslave %s was lost" % self.slavename
- return self._mail_missing_message(subject, text)
-
-
- def updateSlave(self):
- """Called to add or remove builders after the slave has connected.
-
- @return: a Deferred that indicates when an attached slave has
- accepted the new builders and/or released the old ones."""
- if self.slave:
- return self.sendBuilderList()
- else:
- return defer.succeed(None)
-
- def updateSlaveStatus(self, buildStarted=None, buildFinished=None):
- if buildStarted:
- self.slave_status.buildStarted(buildStarted)
- if buildFinished:
- self.slave_status.buildFinished(buildFinished)
-
- def attached(self, bot):
- """This is called when the slave connects.
-
- @return: a Deferred that fires with a suitable pb.IPerspective to
- give to the slave (i.e. 'self')"""
-
- if self.slave:
- # uh-oh, we've got a duplicate slave. The most likely
- # explanation is that the slave is behind a slow link, thinks we
- # went away, and has attempted to reconnect, so we've got two
- # "connections" from the same slave, but the previous one is
- # stale. Give the new one precedence.
- log.msg("duplicate slave %s replacing old one" % self.slavename)
-
- # just in case we've got two identically-configured slaves,
- # report the IP addresses of both so someone can resolve the
- # squabble
- tport = self.slave.broker.transport
- log.msg("old slave was connected from", tport.getPeer())
- log.msg("new slave is from", bot.broker.transport.getPeer())
- d = self.disconnect()
- else:
- d = defer.succeed(None)
- # now we go through a sequence of calls, gathering information, then
- # tell the Botmaster that it can finally give this slave to all the
- # Builders that care about it.
-
- # we accumulate slave information in this 'state' dictionary, then
- # set it atomically if we make it far enough through the process
- state = {}
-
- # Reset graceful shutdown status
- self.slave_status.setGraceful(False)
- # We want to know when the graceful shutdown flag changes
- self.slave_status.addGracefulWatcher(self._gracefulChanged)
-
- def _log_attachment_on_slave(res):
- d1 = bot.callRemote("print", "attached")
- d1.addErrback(lambda why: None)
- return d1
- d.addCallback(_log_attachment_on_slave)
-
- def _get_info(res):
- d1 = bot.callRemote("getSlaveInfo")
- def _got_info(info):
- log.msg("Got slaveinfo from '%s'" % self.slavename)
- # TODO: info{} might have other keys
- state["admin"] = info.get("admin")
- state["host"] = info.get("host")
- def _info_unavailable(why):
- # maybe an old slave, doesn't implement remote_getSlaveInfo
- log.msg("BuildSlave.info_unavailable")
- log.err(why)
- d1.addCallbacks(_got_info, _info_unavailable)
- return d1
- d.addCallback(_get_info)
-
- def _get_commands(res):
- d1 = bot.callRemote("getCommands")
- def _got_commands(commands):
- state["slave_commands"] = commands
- def _commands_unavailable(why):
- # probably an old slave
- log.msg("BuildSlave._commands_unavailable")
- if why.check(AttributeError):
- return
- log.err(why)
- d1.addCallbacks(_got_commands, _commands_unavailable)
- return d1
- d.addCallback(_get_commands)
-
- def _accept_slave(res):
- self.slave_status.setAdmin(state.get("admin"))
- self.slave_status.setHost(state.get("host"))
- self.slave_status.setConnected(True)
- self.slave_commands = state.get("slave_commands")
- self.slave = bot
- log.msg("bot attached")
- self.messageReceivedFromSlave()
- self.stopMissingTimer()
-
- return self.updateSlave()
- d.addCallback(_accept_slave)
-
- # Finally, the slave gets a reference to this BuildSlave. They
- # receive this later, after we've started using them.
- d.addCallback(lambda res: self)
- return d
-
- def messageReceivedFromSlave(self):
- now = time.time()
- self.lastMessageReceived = now
- self.slave_status.setLastMessageReceived(now)
-
- def detached(self, mind):
- self.slave = None
- self.slave_status.removeGracefulWatcher(self._gracefulChanged)
- self.slave_status.setConnected(False)
- log.msg("BuildSlave.detached(%s)" % self.slavename)
-
- def disconnect(self):
- """Forcibly disconnect the slave.
-
- This severs the TCP connection and returns a Deferred that will fire
- (with None) when the connection is probably gone.
-
- If the slave is still alive, they will probably try to reconnect
- again in a moment.
-
- This is called in two circumstances. The first is when a slave is
- removed from the config file. In this case, when they try to
- reconnect, they will be rejected as an unknown slave. The second is
- when we wind up with two connections for the same slave, in which
- case we disconnect the older connection.
- """
-
- if not self.slave:
- return defer.succeed(None)
- log.msg("disconnecting old slave %s now" % self.slavename)
- # When this Deferred fires, we'll be ready to accept the new slave
- return self._disconnect(self.slave)
-
- def _disconnect(self, slave):
- # all kinds of teardown will happen as a result of
- # loseConnection(), but it happens after a reactor iteration or
- # two. Hook the actual disconnect so we can know when it is safe
- # to connect the new slave. We have to wait one additional
- # iteration (with callLater(0)) to make sure the *other*
- # notifyOnDisconnect handlers have had a chance to run.
- d = defer.Deferred()
-
- # notifyOnDisconnect runs the callback with one argument, the
- # RemoteReference being disconnected.
- def _disconnected(rref):
- reactor.callLater(0, d.callback, None)
- slave.notifyOnDisconnect(_disconnected)
- tport = slave.broker.transport
- # this is the polite way to request that a socket be closed
- tport.loseConnection()
- try:
- # but really we don't want to wait for the transmit queue to
- # drain. The remote end is unlikely to ACK the data, so we'd
- # probably have to wait for a (20-minute) TCP timeout.
- #tport._closeSocket()
- # however, doing _closeSocket (whether before or after
- # loseConnection) somehow prevents the notifyOnDisconnect
- # handlers from being run. Bummer.
- tport.offset = 0
- tport.dataBuffer = ""
- except:
- # however, these hacks are pretty internal, so don't blow up if
- # they fail or are unavailable
- log.msg("failed to accelerate the shutdown process")
- pass
- log.msg("waiting for slave to finish disconnecting")
-
- return d
-
- def sendBuilderList(self):
- our_builders = self.botmaster.getBuildersForSlave(self.slavename)
- blist = [(b.name, b.builddir) for b in our_builders]
- d = self.slave.callRemote("setBuilderList", blist)
- return d
-
- def perspective_keepalive(self):
- pass
-
- def addSlaveBuilder(self, sb):
- if sb.builder_name not in self.slavebuilders:
- log.msg("%s adding %s" % (self, sb))
- elif sb is not self.slavebuilders[sb.builder_name]:
- log.msg("%s replacing %s" % (self, sb))
- else:
- return
- self.slavebuilders[sb.builder_name] = sb
-
- def removeSlaveBuilder(self, sb):
- try:
- del self.slavebuilders[sb.builder_name]
- except KeyError:
- pass
- else:
- log.msg("%s removed %s" % (self, sb))
-
- def canStartBuild(self):
- """
- I am called when a build is requested to see if this buildslave
- can start a build. This function can be used to limit overall
- concurrency on the buildslave.
- """
- # If we're waiting to shutdown gracefully, then we shouldn't
- # accept any new jobs.
- if self.slave_status.getGraceful():
- return False
-
- if self.max_builds:
- active_builders = [sb for sb in self.slavebuilders.values()
- if sb.isBusy()]
- if len(active_builders) >= self.max_builds:
- return False
- return True
-
- def _mail_missing_message(self, subject, text):
- # first, see if we have a MailNotifier we can use. This gives us a
- # fromaddr and a relayhost.
- buildmaster = self.botmaster.parent
- for st in buildmaster.statusTargets:
- if isinstance(st, MailNotifier):
- break
- else:
- # if not, they get a default MailNotifier, which always uses SMTP
- # to localhost and uses a dummy fromaddr of "buildbot".
- log.msg("buildslave-missing msg using default MailNotifier")
- st = MailNotifier("buildbot")
- # now construct the mail
-
- m = Message()
- m.set_payload(text)
- m['Date'] = formatdate(localtime=True)
- m['Subject'] = subject
- m['From'] = st.fromaddr
- recipients = self.notify_on_missing
- m['To'] = ", ".join(recipients)
- d = st.sendMessage(m, recipients)
- # return the Deferred for testing purposes
- return d
-
- def _gracefulChanged(self, graceful):
- """This is called when our graceful shutdown setting changes"""
- if graceful:
- active_builders = [sb for sb in self.slavebuilders.values()
- if sb.isBusy()]
- if len(active_builders) == 0:
- # Shut down!
- self.shutdown()
-
- def shutdown(self):
- """Shutdown the slave"""
- # Look for a builder with a remote reference to the client side
- # slave. If we can find one, then call "shutdown" on the remote
- # builder, which will cause the slave buildbot process to exit.
- d = None
- for b in self.slavebuilders.values():
- if b.remote:
- d = b.remote.callRemote("shutdown")
- break
-
- if d:
- log.msg("Shutting down slave: %s" % self.slavename)
- # The remote shutdown call will not complete successfully since the
- # buildbot process exits almost immediately after getting the
- # shutdown request.
- # Here we look at the reason why the remote call failed, and if
- # it's because the connection was lost, that means the slave
- # shutdown as expected.
- def _errback(why):
- if why.check(twisted.spread.pb.PBConnectionLost):
- log.msg("Lost connection to %s" % self.slavename)
- else:
- log.err("Unexpected error when trying to shutdown %s" % self.slavename)
- d.addErrback(_errback)
- return d
- log.err("Couldn't find remote builder to shut down slave")
- return defer.succeed(None)
-
-class BuildSlave(AbstractBuildSlave):
-
- def sendBuilderList(self):
- d = AbstractBuildSlave.sendBuilderList(self)
- def _sent(slist):
- dl = []
- for name, remote in slist.items():
- # use get() since we might have changed our mind since then
- b = self.botmaster.builders.get(name)
- if b:
- d1 = b.attached(self, remote, self.slave_commands)
- dl.append(d1)
- return defer.DeferredList(dl)
- def _set_failed(why):
- log.msg("BuildSlave.sendBuilderList (%s) failed" % self)
- log.err(why)
- # TODO: hang up on them?, without setBuilderList we can't use
- # them
- d.addCallbacks(_sent, _set_failed)
- return d
-
- def detached(self, mind):
- AbstractBuildSlave.detached(self, mind)
- self.botmaster.slaveLost(self)
- self.startMissingTimer()
-
- def buildFinished(self, sb):
- """This is called when a build on this slave is finished."""
- # If we're gracefully shutting down, and we have no more active
- # builders, then it's safe to disconnect
- if self.slave_status.getGraceful():
- active_builders = [sb for sb in self.slavebuilders.values()
- if sb.isBusy()]
- if len(active_builders) == 0:
- # Shut down!
- return self.shutdown()
- return defer.succeed(None)
-
-class AbstractLatentBuildSlave(AbstractBuildSlave):
- """A build slave that will start up a slave instance when needed.
-
- To use, subclass and implement start_instance and stop_instance.
-
- See ec2buildslave.py for a concrete example. Also see the stub example in
- test/test_slaves.py.
- """
-
- implements(ILatentBuildSlave)
-
- substantiated = False
- substantiation_deferred = None
- build_wait_timer = None
- _start_result = _shutdown_callback_handle = None
-
- def __init__(self, name, password, max_builds=None,
- notify_on_missing=[], missing_timeout=60*20,
- build_wait_timeout=60*10,
- properties={}):
- AbstractBuildSlave.__init__(
- self, name, password, max_builds, notify_on_missing,
- missing_timeout, properties)
- self.building = set()
- self.build_wait_timeout = build_wait_timeout
-
- def start_instance(self):
- # responsible for starting instance that will try to connect with
- # this master. Should return deferred. Problems should use an
- # errback.
- raise NotImplementedError
-
- def stop_instance(self, fast=False):
- # responsible for shutting down instance.
- raise NotImplementedError
-
- def substantiate(self, sb):
- if self.substantiated:
- self._clearBuildWaitTimer()
- self._setBuildWaitTimer()
- return defer.succeed(self)
- if self.substantiation_deferred is None:
- if self.parent and not self.missing_timer:
- # start timer. if timer times out, fail deferred
- self.missing_timer = reactor.callLater(
- self.missing_timeout,
- self._substantiation_failed, defer.TimeoutError())
- self.substantiation_deferred = defer.Deferred()
- if self.slave is None:
- self._substantiate() # start up instance
- # else: we're waiting for an old one to detach. the _substantiate
- # will be done in ``detached`` below.
- return self.substantiation_deferred
-
- def _substantiate(self):
- # register event trigger
- d = self.start_instance()
- self._shutdown_callback_handle = reactor.addSystemEventTrigger(
- 'before', 'shutdown', self._soft_disconnect, fast=True)
- def stash_reply(result):
- self._start_result = result
- def clean_up(failure):
- if self.missing_timer is not None:
- self.missing_timer.cancel()
- self._substantiation_failed(failure)
- if self._shutdown_callback_handle is not None:
- handle = self._shutdown_callback_handle
- del self._shutdown_callback_handle
- reactor.removeSystemEventTrigger(handle)
- return failure
- d.addCallbacks(stash_reply, clean_up)
- return d
-
- def attached(self, bot):
- if self.substantiation_deferred is None:
- log.msg('Slave %s received connection while not trying to '
- 'substantiate. Disconnecting.' % (self.slavename,))
- self._disconnect(bot)
- return defer.fail()
- return AbstractBuildSlave.attached(self, bot)
-
- def detached(self, mind):
- AbstractBuildSlave.detached(self, mind)
- if self.substantiation_deferred is not None:
- self._substantiate()
-
- def _substantiation_failed(self, failure):
- d = self.substantiation_deferred
- self.substantiation_deferred = None
- self.missing_timer = None
- d.errback(failure)
- self.insubstantiate()
- # notify people, but only if we're still in the config
- if not self.parent or not self.notify_on_missing:
- return
-
- status = buildmaster.getStatus()
- text = "The Buildbot working for '%s'\n" % status.getProjectName()
- text += ("has noticed that the latent buildslave named %s \n" %
- self.slavename)
- text += "never substantiated after a request\n"
- text += "\n"
- text += ("The request was made at %s (buildmaster-local time)\n" %
- time.ctime(time.time() - self.missing_timeout)) # approx
- text += "\n"
- text += "Sincerely,\n"
- text += " The Buildbot\n"
- text += " %s\n" % status.getProjectURL()
- subject = "Buildbot: buildslave %s never substantiated" % self.slavename
- return self._mail_missing_message(subject, text)
-
- def buildStarted(self, sb):
- assert self.substantiated
- self._clearBuildWaitTimer()
- self.building.add(sb.builder_name)
-
- def buildFinished(self, sb):
- self.building.remove(sb.builder_name)
- if not self.building:
- self._setBuildWaitTimer()
-
- def _clearBuildWaitTimer(self):
- if self.build_wait_timer is not None:
- if self.build_wait_timer.active():
- self.build_wait_timer.cancel()
- self.build_wait_timer = None
-
- def _setBuildWaitTimer(self):
- self._clearBuildWaitTimer()
- self.build_wait_timer = reactor.callLater(
- self.build_wait_timeout, self._soft_disconnect)
-
- def insubstantiate(self, fast=False):
- self._clearBuildWaitTimer()
- d = self.stop_instance(fast)
- if self._shutdown_callback_handle is not None:
- handle = self._shutdown_callback_handle
- del self._shutdown_callback_handle
- reactor.removeSystemEventTrigger(handle)
- self.substantiated = False
- self.building.clear() # just to be sure
- return d
-
- def _soft_disconnect(self, fast=False):
- d = AbstractBuildSlave.disconnect(self)
- if self.slave is not None:
- # this could be called when the slave needs to shut down, such as
- # in BotMaster.removeSlave, *or* when a new slave requests a
- # connection when we already have a slave. It's not clear what to
- # do in the second case: this shouldn't happen, and if it
- # does...if it's a latent slave, shutting down will probably kill
- # something we want...but we can't know what the status is. So,
- # here, we just do what should be appropriate for the first case,
- # and put our heads in the sand for the second, at least for now.
- # The best solution to the odd situation is removing it as a
- # possibilty: make the master in charge of connecting to the
- # slave, rather than vice versa. TODO.
- d = defer.DeferredList([d, self.insubstantiate(fast)])
- else:
- if self.substantiation_deferred is not None:
- # unlike the previous block, we don't expect this situation when
- # ``attached`` calls ``disconnect``, only when we get a simple
- # request to "go away".
- self.substantiation_deferred.errback()
- self.substantiation_deferred = None
- if self.missing_timer:
- self.missing_timer.cancel()
- self.missing_timer = None
- self.stop_instance()
- return d
-
- def disconnect(self):
- d = self._soft_disconnect()
- # this removes the slave from all builders. It won't come back
- # without a restart (or maybe a sighup)
- self.botmaster.slaveLost(self)
-
- def stopService(self):
- res = defer.maybeDeferred(AbstractBuildSlave.stopService, self)
- if self.slave is not None:
- d = self._soft_disconnect()
- res = defer.DeferredList([res, d])
- return res
-
- def updateSlave(self):
- """Called to add or remove builders after the slave has connected.
-
- Also called after botmaster's builders are initially set.
-
- @return: a Deferred that indicates when an attached slave has
- accepted the new builders and/or released the old ones."""
- for b in self.botmaster.getBuildersForSlave(self.slavename):
- if b.name not in self.slavebuilders:
- b.addLatentSlave(self)
- return AbstractBuildSlave.updateSlave(self)
-
- def sendBuilderList(self):
- d = AbstractBuildSlave.sendBuilderList(self)
- def _sent(slist):
- dl = []
- for name, remote in slist.items():
- # use get() since we might have changed our mind since then.
- # we're checking on the builder in addition to the
- # slavebuilders out of a bit of paranoia.
- b = self.botmaster.builders.get(name)
- sb = self.slavebuilders.get(name)
- if b and sb:
- d1 = sb.attached(self, remote, self.slave_commands)
- dl.append(d1)
- return defer.DeferredList(dl)
- def _set_failed(why):
- log.msg("BuildSlave.sendBuilderList (%s) failed" % self)
- log.err(why)
- # TODO: hang up on them?, without setBuilderList we can't use
- # them
- if self.substantiation_deferred:
- self.substantiation_deferred.errback()
- self.substantiation_deferred = None
- if self.missing_timer:
- self.missing_timer.cancel()
- self.missing_timer = None
- # TODO: maybe log? send an email?
- return why
- d.addCallbacks(_sent, _set_failed)
- def _substantiated(res):
- self.substantiated = True
- if self.substantiation_deferred:
- d = self.substantiation_deferred
- del self.substantiation_deferred
- res = self._start_result
- del self._start_result
- d.callback(res)
- # note that the missing_timer is already handled within
- # ``attached``
- if not self.building:
- self._setBuildWaitTimer()
- d.addCallback(_substantiated)
- return d
diff --git a/buildbot/buildbot/changes/__init__.py b/buildbot/buildbot/changes/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/buildbot/buildbot/changes/__init__.py
+++ /dev/null
diff --git a/buildbot/buildbot/changes/base.py b/buildbot/buildbot/changes/base.py
deleted file mode 100644
index 72c45bf..0000000
--- a/buildbot/buildbot/changes/base.py
+++ /dev/null
@@ -1,10 +0,0 @@
-
-from zope.interface import implements
-from twisted.application import service
-
-from buildbot.interfaces import IChangeSource
-from buildbot import util
-
-class ChangeSource(service.Service, util.ComparableMixin):
- implements(IChangeSource)
-
diff --git a/buildbot/buildbot/changes/bonsaipoller.py b/buildbot/buildbot/changes/bonsaipoller.py
deleted file mode 100644
index 2e319bb..0000000
--- a/buildbot/buildbot/changes/bonsaipoller.py
+++ /dev/null
@@ -1,320 +0,0 @@
-import time
-from xml.dom import minidom
-
-from twisted.python import log, failure
-from twisted.internet import reactor
-from twisted.internet.task import LoopingCall
-from twisted.web.client import getPage
-
-from buildbot.changes import base, changes
-
-class InvalidResultError(Exception):
- def __init__(self, value="InvalidResultError"):
- self.value = value
- def __str__(self):
- return repr(self.value)
-
-class EmptyResult(Exception):
- pass
-
-class NoMoreCiNodes(Exception):
- pass
-
-class NoMoreFileNodes(Exception):
- pass
-
-class BonsaiResult:
- """I hold a list of CiNodes"""
- def __init__(self, nodes=[]):
- self.nodes = nodes
-
- def __cmp__(self, other):
- if len(self.nodes) != len(other.nodes):
- return False
- for i in range(len(self.nodes)):
- if self.nodes[i].log != other.nodes[i].log \
- or self.nodes[i].who != other.nodes[i].who \
- or self.nodes[i].date != other.nodes[i].date \
- or len(self.nodes[i].files) != len(other.nodes[i].files):
- return -1
-
- for j in range(len(self.nodes[i].files)):
- if self.nodes[i].files[j].revision \
- != other.nodes[i].files[j].revision \
- or self.nodes[i].files[j].filename \
- != other.nodes[i].files[j].filename:
- return -1
-
- return 0
-
-class CiNode:
- """I hold information baout one <ci> node, including a list of files"""
- def __init__(self, log="", who="", date=0, files=[]):
- self.log = log
- self.who = who
- self.date = date
- self.files = files
-
-class FileNode:
- """I hold information about one <f> node"""
- def __init__(self, revision="", filename=""):
- self.revision = revision
- self.filename = filename
-
-class BonsaiParser:
- """I parse the XML result from a bonsai cvsquery."""
-
- def __init__(self, data):
- try:
- # this is a fix for non-ascii characters
- # because bonsai does not give us an encoding to work with
- # it impossible to be 100% sure what to decode it as but latin1 covers
- # the broadest base
- data = data.decode("latin1")
- data = data.encode("ascii", "replace")
- self.dom = minidom.parseString(data)
- log.msg(data)
- except:
- raise InvalidResultError("Malformed XML in result")
-
- self.ciNodes = self.dom.getElementsByTagName("ci")
- self.currentCiNode = None # filled in by _nextCiNode()
- self.fileNodes = None # filled in by _nextCiNode()
- self.currentFileNode = None # filled in by _nextFileNode()
- self.bonsaiResult = self._parseData()
-
- def getData(self):
- return self.bonsaiResult
-
- def _parseData(self):
- """Returns data from a Bonsai cvsquery in a BonsaiResult object"""
- nodes = []
- try:
- while self._nextCiNode():
- files = []
- try:
- while self._nextFileNode():
- files.append(FileNode(self._getRevision(),
- self._getFilename()))
- except NoMoreFileNodes:
- pass
- except InvalidResultError:
- raise
- cinode = CiNode(self._getLog(), self._getWho(),
- self._getDate(), files)
- # hack around bonsai xml output bug for empty check-in comments
- if not cinode.log and nodes and \
- not nodes[-1].log and \
- cinode.who == nodes[-1].who and \
- cinode.date == nodes[-1].date:
- nodes[-1].files += cinode.files
- else:
- nodes.append(cinode)
-
- except NoMoreCiNodes:
- pass
- except InvalidResultError, EmptyResult:
- raise
-
- return BonsaiResult(nodes)
-
-
- def _nextCiNode(self):
- """Iterates to the next <ci> node and fills self.fileNodes with
- child <f> nodes"""
- try:
- self.currentCiNode = self.ciNodes.pop(0)
- if len(self.currentCiNode.getElementsByTagName("files")) > 1:
- raise InvalidResultError("Multiple <files> for one <ci>")
-
- self.fileNodes = self.currentCiNode.getElementsByTagName("f")
- except IndexError:
- # if there was zero <ci> nodes in the result
- if not self.currentCiNode:
- raise EmptyResult
- else:
- raise NoMoreCiNodes
-
- return True
-
- def _nextFileNode(self):
- """Iterates to the next <f> node"""
- try:
- self.currentFileNode = self.fileNodes.pop(0)
- except IndexError:
- raise NoMoreFileNodes
-
- return True
-
- def _getLog(self):
- """Returns the log of the current <ci> node"""
- logs = self.currentCiNode.getElementsByTagName("log")
- if len(logs) < 1:
- raise InvalidResultError("No log present")
- elif len(logs) > 1:
- raise InvalidResultError("Multiple logs present")
-
- # catch empty check-in comments
- if logs[0].firstChild:
- return logs[0].firstChild.data
- return ''
-
- def _getWho(self):
- """Returns the e-mail address of the commiter"""
- # convert unicode string to regular string
- return str(self.currentCiNode.getAttribute("who"))
-
- def _getDate(self):
- """Returns the date (unix time) of the commit"""
- # convert unicode number to regular one
- try:
- commitDate = int(self.currentCiNode.getAttribute("date"))
- except ValueError:
- raise InvalidResultError
-
- return commitDate
-
- def _getFilename(self):
- """Returns the filename of the current <f> node"""
- try:
- filename = self.currentFileNode.firstChild.data
- except AttributeError:
- raise InvalidResultError("Missing filename")
-
- return filename
-
- def _getRevision(self):
- return self.currentFileNode.getAttribute("rev")
-
-
-class BonsaiPoller(base.ChangeSource):
- """This source will poll a bonsai server for changes and submit
- them to the change master."""
-
- compare_attrs = ["bonsaiURL", "pollInterval", "tree",
- "module", "branch", "cvsroot"]
-
- parent = None # filled in when we're added
- loop = None
- volatile = ['loop']
- working = False
-
- def __init__(self, bonsaiURL, module, branch, tree="default",
- cvsroot="/cvsroot", pollInterval=30):
- """
- @type bonsaiURL: string
- @param bonsaiURL: The base URL of the Bonsai server
- (ie. http://bonsai.mozilla.org)
- @type module: string
- @param module: The module to look for changes in. Commonly
- this is 'all'
- @type branch: string
- @param branch: The branch to look for changes in. This must
- match the
- 'branch' option for the Scheduler.
- @type tree: string
- @param tree: The tree to look for changes in. Commonly this
- is 'all'
- @type cvsroot: string
- @param cvsroot: The cvsroot of the repository. Usually this is
- '/cvsroot'
- @type pollInterval: int
- @param pollInterval: The time (in seconds) between queries for
- changes
- """
-
- self.bonsaiURL = bonsaiURL
- self.module = module
- self.branch = branch
- self.tree = tree
- self.cvsroot = cvsroot
- self.pollInterval = pollInterval
- self.lastChange = time.time()
- self.lastPoll = time.time()
-
- def startService(self):
- self.loop = LoopingCall(self.poll)
- base.ChangeSource.startService(self)
-
- reactor.callLater(0, self.loop.start, self.pollInterval)
-
- def stopService(self):
- self.loop.stop()
- return base.ChangeSource.stopService(self)
-
- def describe(self):
- str = ""
- str += "Getting changes from the Bonsai service running at %s " \
- % self.bonsaiURL
- str += "<br>Using tree: %s, branch: %s, and module: %s" % (self.tree, \
- self.branch, self.module)
- return str
-
- def poll(self):
- if self.working:
- log.msg("Not polling Bonsai because last poll is still working")
- else:
- self.working = True
- d = self._get_changes()
- d.addCallback(self._process_changes)
- d.addCallbacks(self._finished_ok, self._finished_failure)
- return
-
- def _finished_ok(self, res):
- assert self.working
- self.working = False
-
- # check for failure -- this is probably never hit but the twisted docs
- # are not clear enough to be sure. it is being kept "just in case"
- if isinstance(res, failure.Failure):
- log.msg("Bonsai poll failed: %s" % res)
- return res
-
- def _finished_failure(self, res):
- log.msg("Bonsai poll failed: %s" % res)
- assert self.working
- self.working = False
- return None # eat the failure
-
- def _make_url(self):
- args = ["treeid=%s" % self.tree, "module=%s" % self.module,
- "branch=%s" % self.branch, "branchtype=match",
- "sortby=Date", "date=explicit",
- "mindate=%d" % self.lastChange,
- "maxdate=%d" % int(time.time()),
- "cvsroot=%s" % self.cvsroot, "xml=1"]
- # build the bonsai URL
- url = self.bonsaiURL
- url += "/cvsquery.cgi?"
- url += "&".join(args)
-
- return url
-
- def _get_changes(self):
- url = self._make_url()
- log.msg("Polling Bonsai tree at %s" % url)
-
- self.lastPoll = time.time()
- # get the page, in XML format
- return getPage(url, timeout=self.pollInterval)
-
- def _process_changes(self, query):
- try:
- bp = BonsaiParser(query)
- result = bp.getData()
- except InvalidResultError, e:
- log.msg("Could not process Bonsai query: " + e.value)
- return
- except EmptyResult:
- return
-
- for cinode in result.nodes:
- files = [file.filename + ' (revision '+file.revision+')'
- for file in cinode.files]
- c = changes.Change(who = cinode.who,
- files = files,
- comments = cinode.log,
- when = cinode.date,
- branch = self.branch)
- self.parent.addChange(c)
- self.lastChange = self.lastPoll
diff --git a/buildbot/buildbot/changes/changes.py b/buildbot/buildbot/changes/changes.py
deleted file mode 100644
index 7d399e0..0000000
--- a/buildbot/buildbot/changes/changes.py
+++ /dev/null
@@ -1,288 +0,0 @@
-
-import sys, os, time
-from cPickle import dump
-
-from zope.interface import implements
-from twisted.python import log
-from twisted.internet import defer
-from twisted.application import service
-from twisted.web import html
-
-from buildbot import interfaces, util
-
-html_tmpl = """
-<p>Changed by: <b>%(who)s</b><br />
-Changed at: <b>%(at)s</b><br />
-%(branch)s
-%(revision)s
-<br />
-
-Changed files:
-%(files)s
-
-Comments:
-%(comments)s
-</p>
-"""
-
-class Change:
- """I represent a single change to the source tree. This may involve
- several files, but they are all changed by the same person, and there is
- a change comment for the group as a whole.
-
- If the version control system supports sequential repository- (or
- branch-) wide change numbers (like SVN, P4, and Arch), then revision=
- should be set to that number. The highest such number will be used at
- checkout time to get the correct set of files.
-
- If it does not (like CVS), when= should be set to the timestamp (seconds
- since epoch, as returned by time.time()) when the change was made. when=
- will be filled in for you (to the current time) if you omit it, which is
- suitable for ChangeSources which have no way of getting more accurate
- timestamps.
-
- Changes should be submitted to ChangeMaster.addChange() in
- chronologically increasing order. Out-of-order changes will probably
- cause the html.Waterfall display to be corrupted."""
-
- implements(interfaces.IStatusEvent)
-
- number = None
-
- links = []
- branch = None
- revision = None # used to create a source-stamp
-
- def __init__(self, who, files, comments, isdir=0, links=[],
- revision=None, when=None, branch=None, category=None):
- self.who = who
- self.comments = comments
- self.isdir = isdir
- self.links = links
- self.revision = revision
- if when is None:
- when = util.now()
- self.when = when
- self.branch = branch
- self.category = category
-
- # keep a sorted list of the files, for easier display
- self.files = files[:]
- self.files.sort()
-
- def asText(self):
- data = ""
- data += self.getFileContents()
- data += "At: %s\n" % self.getTime()
- data += "Changed By: %s\n" % self.who
- data += "Comments: %s\n\n" % self.comments
- return data
-
- def asHTML(self):
- links = []
- for file in self.files:
- link = filter(lambda s: s.find(file) != -1, self.links)
- if len(link) == 1:
- # could get confused
- links.append('<a href="%s"><b>%s</b></a>' % (link[0], file))
- else:
- links.append('<b>%s</b>' % file)
- revision = ""
- if self.revision:
- revision = "Revision: <b>%s</b><br />\n" % self.revision
- branch = ""
- if self.branch:
- branch = "Branch: <b>%s</b><br />\n" % self.branch
-
- kwargs = { 'who' : html.escape(self.who),
- 'at' : self.getTime(),
- 'files' : html.UL(links) + '\n',
- 'revision': revision,
- 'branch' : branch,
- 'comments': html.PRE(self.comments) }
- return html_tmpl % kwargs
-
- def get_HTML_box(self, url):
- """Return the contents of a TD cell for the waterfall display.
-
- @param url: the URL that points to an HTML page that will render
- using our asHTML method. The Change is free to use this or ignore it
- as it pleases.
-
- @return: the HTML that will be put inside the table cell. Typically
- this is just a single href named after the author of the change and
- pointing at the passed-in 'url'.
- """
- who = self.getShortAuthor()
- if self.comments is None:
- title = ""
- else:
- title = html.escape(self.comments)
- return '<a href="%s" title="%s">%s</a>' % (url,
- title,
- html.escape(who))
-
- def getShortAuthor(self):
- return self.who
-
- def getTime(self):
- if not self.when:
- return "?"
- return time.strftime("%a %d %b %Y %H:%M:%S",
- time.localtime(self.when))
-
- def getTimes(self):
- return (self.when, None)
-
- def getText(self):
- return [html.escape(self.who)]
- def getLogs(self):
- return {}
-
- def getFileContents(self):
- data = ""
- if len(self.files) == 1:
- if self.isdir:
- data += "Directory: %s\n" % self.files[0]
- else:
- data += "File: %s\n" % self.files[0]
- else:
- data += "Files:\n"
- for f in self.files:
- data += " %s\n" % f
- return data
-
-class ChangeMaster(service.MultiService):
-
- """This is the master-side service which receives file change
- notifications from CVS. It keeps a log of these changes, enough to
- provide for the HTML waterfall display, and to tell
- temporarily-disconnected bots what they missed while they were
- offline.
-
- Change notifications come from two different kinds of sources. The first
- is a PB service (servicename='changemaster', perspectivename='change'),
- which provides a remote method called 'addChange', which should be
- called with a dict that has keys 'filename' and 'comments'.
-
- The second is a list of objects derived from the ChangeSource class.
- These are added with .addSource(), which also sets the .changemaster
- attribute in the source to point at the ChangeMaster. When the
- application begins, these will be started with .start() . At shutdown
- time, they will be terminated with .stop() . They must be persistable.
- They are expected to call self.changemaster.addChange() with Change
- objects.
-
- There are several different variants of the second type of source:
-
- - L{buildbot.changes.mail.MaildirSource} watches a maildir for CVS
- commit mail. It uses DNotify if available, or polls every 10
- seconds if not. It parses incoming mail to determine what files
- were changed.
-
- - L{buildbot.changes.freshcvs.FreshCVSSource} makes a PB
- connection to the CVSToys 'freshcvs' daemon and relays any
- changes it announces.
-
- """
-
- implements(interfaces.IEventSource)
-
- debug = False
- # todo: use Maildir class to watch for changes arriving by mail
-
- def __init__(self):
- service.MultiService.__init__(self)
- self.changes = []
- # self.basedir must be filled in by the parent
- self.nextNumber = 1
-
- def addSource(self, source):
- assert interfaces.IChangeSource.providedBy(source)
- assert service.IService.providedBy(source)
- if self.debug:
- print "ChangeMaster.addSource", source
- source.setServiceParent(self)
-
- def removeSource(self, source):
- assert source in self
- if self.debug:
- print "ChangeMaster.removeSource", source, source.parent
- d = defer.maybeDeferred(source.disownServiceParent)
- return d
-
- def addChange(self, change):
- """Deliver a file change event. The event should be a Change object.
- This method will timestamp the object as it is received."""
- log.msg("adding change, who %s, %d files, rev=%s, branch=%s, "
- "comments %s, category %s" % (change.who, len(change.files),
- change.revision, change.branch,
- change.comments, change.category))
- change.number = self.nextNumber
- self.nextNumber += 1
- self.changes.append(change)
- self.parent.addChange(change)
- # TODO: call pruneChanges after a while
-
- def pruneChanges(self):
- self.changes = self.changes[-100:] # or something
-
- def eventGenerator(self, branches=[]):
- for i in range(len(self.changes)-1, -1, -1):
- c = self.changes[i]
- if not branches or c.branch in branches:
- yield c
-
- def getChangeNumbered(self, num):
- if not self.changes:
- return None
- first = self.changes[0].number
- if first + len(self.changes)-1 != self.changes[-1].number:
- log.msg(self,
- "lost a change somewhere: [0] is %d, [%d] is %d" % \
- (self.changes[0].number,
- len(self.changes) - 1,
- self.changes[-1].number))
- for c in self.changes:
- log.msg("c[%d]: " % c.number, c)
- return None
- offset = num - first
- log.msg(self, "offset", offset)
- return self.changes[offset]
-
- def __getstate__(self):
- d = service.MultiService.__getstate__(self)
- del d['parent']
- del d['services'] # lose all children
- del d['namedServices']
- return d
-
- def __setstate__(self, d):
- self.__dict__ = d
- # self.basedir must be set by the parent
- self.services = [] # they'll be repopulated by readConfig
- self.namedServices = {}
-
-
- def saveYourself(self):
- filename = os.path.join(self.basedir, "changes.pck")
- tmpfilename = filename + ".tmp"
- try:
- dump(self, open(tmpfilename, "wb"))
- if sys.platform == 'win32':
- # windows cannot rename a file on top of an existing one
- if os.path.exists(filename):
- os.unlink(filename)
- os.rename(tmpfilename, filename)
- except Exception, e:
- log.msg("unable to save changes")
- log.err()
-
- def stopService(self):
- self.saveYourself()
- return service.MultiService.stopService(self)
-
-class TestChangeMaster(ChangeMaster):
- """A ChangeMaster for use in tests that does not save itself"""
- def stopService(self):
- return service.MultiService.stopService(self)
diff --git a/buildbot/buildbot/changes/dnotify.py b/buildbot/buildbot/changes/dnotify.py
deleted file mode 100644
index 0674248..0000000
--- a/buildbot/buildbot/changes/dnotify.py
+++ /dev/null
@@ -1,100 +0,0 @@
-
-import fcntl, signal, os
-
-class DNotify_Handler:
- def __init__(self):
- self.watchers = {}
- self.installed = 0
- def install(self):
- if self.installed:
- return
- signal.signal(signal.SIGIO, self.fire)
- self.installed = 1
- def uninstall(self):
- if not self.installed:
- return
- signal.signal(signal.SIGIO, signal.SIG_DFL)
- self.installed = 0
- def add(self, watcher):
- self.watchers[watcher.fd] = watcher
- self.install()
- def remove(self, watcher):
- if self.watchers.has_key(watcher.fd):
- del(self.watchers[watcher.fd])
- if not self.watchers:
- self.uninstall()
- def fire(self, signum, frame):
- # this is the signal handler
- # without siginfo_t, we must fire them all
- for watcher in self.watchers.values():
- watcher.callback()
-
-class DNotify:
- DN_ACCESS = fcntl.DN_ACCESS # a file in the directory was read
- DN_MODIFY = fcntl.DN_MODIFY # a file was modified (write,truncate)
- DN_CREATE = fcntl.DN_CREATE # a file was created
- DN_DELETE = fcntl.DN_DELETE # a file was unlinked
- DN_RENAME = fcntl.DN_RENAME # a file was renamed
- DN_ATTRIB = fcntl.DN_ATTRIB # a file had attributes changed (chmod,chown)
-
- handler = [None]
-
- def __init__(self, dirname, callback=None,
- flags=[DN_MODIFY,DN_CREATE,DN_DELETE,DN_RENAME]):
-
- """This object watches a directory for changes. The .callback
- attribute should be set to a function to be run every time something
- happens to it. Be aware that it will be called more times than you
- expect."""
-
- if callback:
- self.callback = callback
- else:
- self.callback = self.fire
- self.dirname = dirname
- self.flags = reduce(lambda x, y: x | y, flags) | fcntl.DN_MULTISHOT
- self.fd = os.open(dirname, os.O_RDONLY)
- # ideally we would move the notification to something like SIGRTMIN,
- # (to free up SIGIO) and use sigaction to have the signal handler
- # receive a structure with the fd number. But python doesn't offer
- # either.
- if not self.handler[0]:
- self.handler[0] = DNotify_Handler()
- self.handler[0].add(self)
- fcntl.fcntl(self.fd, fcntl.F_NOTIFY, self.flags)
- def remove(self):
- self.handler[0].remove(self)
- os.close(self.fd)
- def fire(self):
- print self.dirname, "changed!"
-
-def test_dnotify1():
- d = DNotify(".")
- while 1:
- signal.pause()
-
-def test_dnotify2():
- # create ./foo/, create/delete files in ./ and ./foo/ while this is
- # running. Notice how both notifiers are fired when anything changes;
- # this is an unfortunate side-effect of the lack of extended sigaction
- # support in Python.
- count = [0]
- d1 = DNotify(".")
- def fire1(count=count, d1=d1):
- print "./ changed!", count[0]
- count[0] += 1
- if count[0] > 5:
- d1.remove()
- del(d1)
- # change the callback, since we can't define it until after we have the
- # dnotify object. Hmm, unless we give the dnotify to the callback.
- d1.callback = fire1
- def fire2(): print "foo/ changed!"
- d2 = DNotify("foo", fire2)
- while 1:
- signal.pause()
-
-
-if __name__ == '__main__':
- test_dnotify2()
-
diff --git a/buildbot/buildbot/changes/freshcvs.py b/buildbot/buildbot/changes/freshcvs.py
deleted file mode 100644
index 53a2ac4..0000000
--- a/buildbot/buildbot/changes/freshcvs.py
+++ /dev/null
@@ -1,144 +0,0 @@
-
-import os.path
-
-from zope.interface import implements
-from twisted.cred import credentials
-from twisted.spread import pb
-from twisted.application.internet import TCPClient
-from twisted.python import log
-
-import cvstoys.common # to make sure VersionedPatch gets registered
-
-from buildbot.interfaces import IChangeSource
-from buildbot.pbutil import ReconnectingPBClientFactory
-from buildbot.changes.changes import Change
-from buildbot import util
-
-class FreshCVSListener(pb.Referenceable):
- def remote_notify(self, root, files, message, user):
- try:
- self.source.notify(root, files, message, user)
- except Exception, e:
- print "notify failed"
- log.err()
-
- def remote_goodbye(self, message):
- pass
-
-class FreshCVSConnectionFactory(ReconnectingPBClientFactory):
-
- def gotPerspective(self, perspective):
- log.msg("connected to FreshCVS daemon")
- ReconnectingPBClientFactory.gotPerspective(self, perspective)
- self.source.connected = True
- # TODO: freshcvs-1.0.10 doesn't handle setFilter correctly, it will
- # be fixed in the upcoming 1.0.11 . I haven't been able to test it
- # to make sure the failure mode is survivable, so I'll just leave
- # this out for now.
- return
- if self.source.prefix is not None:
- pathfilter = "^%s" % self.source.prefix
- d = perspective.callRemote("setFilter",
- None, pathfilter, None)
- # ignore failures, setFilter didn't work in 1.0.10 and this is
- # just an optimization anyway
- d.addErrback(lambda f: None)
-
- def clientConnectionLost(self, connector, reason):
- ReconnectingPBClientFactory.clientConnectionLost(self, connector,
- reason)
- self.source.connected = False
-
-class FreshCVSSourceNewcred(TCPClient, util.ComparableMixin):
- """This source will connect to a FreshCVS server associated with one or
- more CVS repositories. Each time a change is committed to a repository,
- the server will send us a message describing the change. This message is
- used to build a Change object, which is then submitted to the
- ChangeMaster.
-
- This class handles freshcvs daemons which use newcred. CVSToys-1.0.9
- does not, later versions might.
- """
-
- implements(IChangeSource)
- compare_attrs = ["host", "port", "username", "password", "prefix"]
-
- changemaster = None # filled in when we're added
- connected = False
-
- def __init__(self, host, port, user, passwd, prefix=None):
- self.host = host
- self.port = port
- self.username = user
- self.password = passwd
- if prefix is not None and not prefix.endswith("/"):
- log.msg("WARNING: prefix '%s' should probably end with a slash" \
- % prefix)
- self.prefix = prefix
- self.listener = l = FreshCVSListener()
- l.source = self
- self.factory = f = FreshCVSConnectionFactory()
- f.source = self
- self.creds = credentials.UsernamePassword(user, passwd)
- f.startLogin(self.creds, client=l)
- TCPClient.__init__(self, host, port, f)
-
- def __repr__(self):
- return "<FreshCVSSource where=%s, prefix=%s>" % \
- ((self.host, self.port), self.prefix)
-
- def describe(self):
- online = ""
- if not self.connected:
- online = " [OFFLINE]"
- return "freshcvs %s:%s%s" % (self.host, self.port, online)
-
- def notify(self, root, files, message, user):
- pathnames = []
- isdir = 0
- for f in files:
- if not isinstance(f, (cvstoys.common.VersionedPatch,
- cvstoys.common.Directory)):
- continue
- pathname, filename = f.pathname, f.filename
- #r1, r2 = getattr(f, 'r1', None), getattr(f, 'r2', None)
- if isinstance(f, cvstoys.common.Directory):
- isdir = 1
- path = os.path.join(pathname, filename)
- log.msg("FreshCVS notify '%s'" % path)
- if self.prefix:
- if path.startswith(self.prefix):
- path = path[len(self.prefix):]
- else:
- continue
- pathnames.append(path)
- if pathnames:
- # now() is close enough: FreshCVS *is* realtime, after all
- when=util.now()
- c = Change(user, pathnames, message, isdir, when=when)
- self.parent.addChange(c)
-
-class FreshCVSSourceOldcred(FreshCVSSourceNewcred):
- """This is for older freshcvs daemons (from CVSToys-1.0.9 and earlier).
- """
-
- def __init__(self, host, port, user, passwd,
- serviceName="cvstoys.notify", prefix=None):
- self.host = host
- self.port = port
- self.prefix = prefix
- self.listener = l = FreshCVSListener()
- l.source = self
- self.factory = f = FreshCVSConnectionFactory()
- f.source = self
- f.startGettingPerspective(user, passwd, serviceName, client=l)
- TCPClient.__init__(self, host, port, f)
-
- def __repr__(self):
- return "<FreshCVSSourceOldcred where=%s, prefix=%s>" % \
- ((self.host, self.port), self.prefix)
-
-# this is suitable for CVSToys-1.0.10 and later. If you run CVSToys-1.0.9 or
-# earlier, use FreshCVSSourceOldcred instead.
-FreshCVSSource = FreshCVSSourceNewcred
-
diff --git a/buildbot/buildbot/changes/hgbuildbot.py b/buildbot/buildbot/changes/hgbuildbot.py
deleted file mode 100644
index 1f4ed34..0000000
--- a/buildbot/buildbot/changes/hgbuildbot.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# hgbuildbot.py - mercurial hooks for buildbot
-#
-# Copyright 2007 Frederic Leroy <fredo@starox.org>
-#
-# This software may be used and distributed according to the terms
-# of the GNU General Public License, incorporated herein by reference.
-
-# hook extension to send change notifications to buildbot when a changeset is
-# brought into the repository from elsewhere.
-#
-# default mode is to use mercurial branch
-#
-# to use, configure hgbuildbot in .hg/hgrc like this:
-#
-# [hooks]
-# changegroup = python:buildbot.changes.hgbuildbot.hook
-#
-# [hgbuildbot]
-# # config items go in here
-#
-# config items:
-#
-# REQUIRED:
-# master = host:port # host to send buildbot changes
-#
-# OPTIONAL:
-# branchtype = inrepo|dirname # dirname: branch = name of directory
-# # containing the repository
-# #
-# # inrepo: branch = mercurial branch
-#
-# branch = branchname # if set, branch is always branchname
-
-import os
-
-from mercurial.i18n import gettext as _
-from mercurial.node import bin, hex, nullid
-from mercurial.context import workingctx
-
-# mercurial's on-demand-importing hacks interfere with the:
-#from zope.interface import Interface
-# that Twisted needs to do, so disable it.
-try:
- from mercurial import demandimport
- demandimport.disable()
-except ImportError:
- pass
-
-from buildbot.clients import sendchange
-from twisted.internet import defer, reactor
-
-
-def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
- # read config parameters
- master = ui.config('hgbuildbot', 'master')
- if master:
- branchtype = ui.config('hgbuildbot', 'branchtype')
- branch = ui.config('hgbuildbot', 'branch')
- else:
- ui.write("* You must add a [hgbuildbot] section to .hg/hgrc in "
- "order to use buildbot hook\n")
- return
-
- if branch is None:
- if branchtype is not None:
- if branchtype == 'dirname':
- branch = os.path.basename(os.getcwd())
- if branchtype == 'inrepo':
- branch = workingctx(repo).branch()
-
- if hooktype == 'changegroup':
- s = sendchange.Sender(master, None)
- d = defer.Deferred()
- reactor.callLater(0, d.callback, None)
- # process changesets
- def _send(res, c):
- ui.status("rev %s sent\n" % c['revision'])
- return s.send(c['branch'], c['revision'], c['comments'],
- c['files'], c['username'])
-
- try: # first try Mercurial 1.1+ api
- start = repo[node].rev()
- end = len(repo)
- except TypeError: # else fall back to old api
- start = repo.changelog.rev(bin(node))
- end = repo.changelog.count()
-
- for rev in xrange(start, end):
- # send changeset
- node = repo.changelog.node(rev)
- manifest, user, (time, timezone), files, desc, extra = repo.changelog.read(node)
- parents = filter(lambda p: not p == nullid, repo.changelog.parents(node))
- if branchtype == 'inrepo':
- branch = extra['branch']
- # merges don't always contain files, but at least one file is required by buildbot
- if len(parents) > 1 and not files:
- files = ["merge"]
- change = {
- 'master': master,
- 'username': user,
- 'revision': hex(node),
- 'comments': desc,
- 'files': files,
- 'branch': branch
- }
- d.addCallback(_send, change)
-
- d.addCallbacks(s.printSuccess, s.printFailure)
- d.addBoth(s.stop)
- s.run()
- else:
- ui.status(_('hgbuildbot: hook %s not supported\n') % hooktype)
- return
-
diff --git a/buildbot/buildbot/changes/mail.py b/buildbot/buildbot/changes/mail.py
deleted file mode 100644
index 7d86d47..0000000
--- a/buildbot/buildbot/changes/mail.py
+++ /dev/null
@@ -1,458 +0,0 @@
-# -*- test-case-name: buildbot.test.test_mailparse -*-
-
-"""
-Parse various kinds of 'CVS notify' email.
-"""
-import os, re
-from email import message_from_file
-from email.Utils import parseaddr
-from email.Iterators import body_line_iterator
-
-from zope.interface import implements
-from twisted.python import log
-from buildbot import util
-from buildbot.interfaces import IChangeSource
-from buildbot.changes import changes
-from buildbot.changes.maildir import MaildirService
-
-class MaildirSource(MaildirService, util.ComparableMixin):
- """This source will watch a maildir that is subscribed to a FreshCVS
- change-announcement mailing list.
- """
- implements(IChangeSource)
-
- compare_attrs = ["basedir", "pollinterval"]
- name = None
-
- def __init__(self, maildir, prefix=None):
- MaildirService.__init__(self, maildir)
- self.prefix = prefix
- if prefix and not prefix.endswith("/"):
- log.msg("%s: you probably want your prefix=('%s') to end with "
- "a slash")
-
- def describe(self):
- return "%s mailing list in maildir %s" % (self.name, self.basedir)
-
- def messageReceived(self, filename):
- path = os.path.join(self.basedir, "new", filename)
- change = self.parse_file(open(path, "r"), self.prefix)
- if change:
- self.parent.addChange(change)
- os.rename(os.path.join(self.basedir, "new", filename),
- os.path.join(self.basedir, "cur", filename))
-
- def parse_file(self, fd, prefix=None):
- m = message_from_file(fd)
- return self.parse(m, prefix)
-
-class FCMaildirSource(MaildirSource):
- name = "FreshCVS"
-
- def parse(self, m, prefix=None):
- """Parse mail sent by FreshCVS"""
-
- # FreshCVS sets From: to "user CVS <user>", but the <> part may be
- # modified by the MTA (to include a local domain)
- name, addr = parseaddr(m["from"])
- if not name:
- return None # no From means this message isn't from FreshCVS
- cvs = name.find(" CVS")
- if cvs == -1:
- return None # this message isn't from FreshCVS
- who = name[:cvs]
-
- # we take the time of receipt as the time of checkin. Not correct,
- # but it avoids the out-of-order-changes issue. See the comment in
- # parseSyncmail about using the 'Date:' header
- when = util.now()
-
- files = []
- comments = ""
- isdir = 0
- lines = list(body_line_iterator(m))
- while lines:
- line = lines.pop(0)
- if line == "Modified files:\n":
- break
- while lines:
- line = lines.pop(0)
- if line == "\n":
- break
- line = line.rstrip("\n")
- linebits = line.split(None, 1)
- file = linebits[0]
- if prefix:
- # insist that the file start with the prefix: FreshCVS sends
- # changes we don't care about too
- if file.startswith(prefix):
- file = file[len(prefix):]
- else:
- continue
- if len(linebits) == 1:
- isdir = 1
- elif linebits[1] == "0 0":
- isdir = 1
- files.append(file)
- while lines:
- line = lines.pop(0)
- if line == "Log message:\n":
- break
- # message is terminated by "ViewCVS links:" or "Index:..." (patch)
- while lines:
- line = lines.pop(0)
- if line == "ViewCVS links:\n":
- break
- if line.find("Index: ") == 0:
- break
- comments += line
- comments = comments.rstrip() + "\n"
-
- if not files:
- return None
-
- change = changes.Change(who, files, comments, isdir, when=when)
-
- return change
-
-class SyncmailMaildirSource(MaildirSource):
- name = "Syncmail"
-
- def parse(self, m, prefix=None):
- """Parse messages sent by the 'syncmail' program, as suggested by the
- sourceforge.net CVS Admin documentation. Syncmail is maintained at
- syncmail.sf.net .
- """
- # pretty much the same as freshcvs mail, not surprising since CVS is
- # the one creating most of the text
-
- # The mail is sent from the person doing the checkin. Assume that the
- # local username is enough to identify them (this assumes a one-server
- # cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
- # model)
- name, addr = parseaddr(m["from"])
- if not addr:
- return None # no From means this message isn't from FreshCVS
- at = addr.find("@")
- if at == -1:
- who = addr # might still be useful
- else:
- who = addr[:at]
-
- # we take the time of receipt as the time of checkin. Not correct (it
- # depends upon the email latency), but it avoids the
- # out-of-order-changes issue. Also syncmail doesn't give us anything
- # better to work with, unless you count pulling the v1-vs-v2
- # timestamp out of the diffs, which would be ugly. TODO: Pulling the
- # 'Date:' header from the mail is a possibility, and
- # email.Utils.parsedate_tz may be useful. It should be configurable,
- # however, because there are a lot of broken clocks out there.
- when = util.now()
-
- subject = m["subject"]
- # syncmail puts the repository-relative directory in the subject:
- # mprefix + "%(dir)s %(file)s,%(oldversion)s,%(newversion)s", where
- # 'mprefix' is something that could be added by a mailing list
- # manager.
- # this is the only reasonable way to determine the directory name
- space = subject.find(" ")
- if space != -1:
- directory = subject[:space]
- else:
- directory = subject
-
- files = []
- comments = ""
- isdir = 0
- branch = None
-
- lines = list(body_line_iterator(m))
- while lines:
- line = lines.pop(0)
-
- if (line == "Modified Files:\n" or
- line == "Added Files:\n" or
- line == "Removed Files:\n"):
- break
-
- while lines:
- line = lines.pop(0)
- if line == "\n":
- break
- if line == "Log Message:\n":
- lines.insert(0, line)
- break
- line = line.lstrip()
- line = line.rstrip()
- # note: syncmail will send one email per directory involved in a
- # commit, with multiple files if they were in the same directory.
- # Unlike freshCVS, it makes no attempt to collect all related
- # commits into a single message.
-
- # note: syncmail will report a Tag underneath the ... Files: line
- # e.g.: Tag: BRANCH-DEVEL
-
- if line.startswith('Tag:'):
- branch = line.split(' ')[-1].rstrip()
- continue
-
- thesefiles = line.split(" ")
- for f in thesefiles:
- f = directory + "/" + f
- if prefix:
- # insist that the file start with the prefix: we may get
- # changes we don't care about too
- if f.startswith(prefix):
- f = f[len(prefix):]
- else:
- continue
- break
- # TODO: figure out how new directories are described, set
- # .isdir
- files.append(f)
-
- if not files:
- return None
-
- while lines:
- line = lines.pop(0)
- if line == "Log Message:\n":
- break
- # message is terminated by "Index:..." (patch) or "--- NEW FILE.."
- # or "--- filename DELETED ---". Sigh.
- while lines:
- line = lines.pop(0)
- if line.find("Index: ") == 0:
- break
- if re.search(r"^--- NEW FILE", line):
- break
- if re.search(r" DELETED ---$", line):
- break
- comments += line
- comments = comments.rstrip() + "\n"
-
- change = changes.Change(who, files, comments, isdir, when=when,
- branch=branch)
-
- return change
-
-# Bonsai mail parser by Stephen Davis.
-#
-# This handles changes for CVS repositories that are watched by Bonsai
-# (http://www.mozilla.org/bonsai.html)
-
-# A Bonsai-formatted email message looks like:
-#
-# C|1071099907|stephend|/cvs|Sources/Scripts/buildbot|bonsai.py|1.2|||18|7
-# A|1071099907|stephend|/cvs|Sources/Scripts/buildbot|master.cfg|1.1|||18|7
-# R|1071099907|stephend|/cvs|Sources/Scripts/buildbot|BuildMaster.py|||
-# LOGCOMMENT
-# Updated bonsai parser and switched master config to buildbot-0.4.1 style.
-#
-# :ENDLOGCOMMENT
-#
-# In the first example line, stephend is the user, /cvs the repository,
-# buildbot the directory, bonsai.py the file, 1.2 the revision, no sticky
-# and branch, 18 lines added and 7 removed. All of these fields might not be
-# present (during "removes" for example).
-#
-# There may be multiple "control" lines or even none (imports, directory
-# additions) but there is one email per directory. We only care about actual
-# changes since it is presumed directory additions don't actually affect the
-# build. At least one file should need to change (the makefile, say) to
-# actually make a new directory part of the build process. That's my story
-# and I'm sticking to it.
-
-class BonsaiMaildirSource(MaildirSource):
- name = "Bonsai"
-
- def parse(self, m, prefix=None):
- """Parse mail sent by the Bonsai cvs loginfo script."""
-
- # we don't care who the email came from b/c the cvs user is in the
- # msg text
-
- who = "unknown"
- timestamp = None
- files = []
- lines = list(body_line_iterator(m))
-
- # read the control lines (what/who/where/file/etc.)
- while lines:
- line = lines.pop(0)
- if line == "LOGCOMMENT\n":
- break;
- line = line.rstrip("\n")
-
- # we'd like to do the following but it won't work if the number of
- # items doesn't match so...
- # what, timestamp, user, repo, module, file = line.split( '|' )
- items = line.split('|')
- if len(items) < 6:
- # not a valid line, assume this isn't a bonsai message
- return None
-
- try:
- # just grab the bottom-most timestamp, they're probably all the
- # same. TODO: I'm assuming this is relative to the epoch, but
- # this needs testing.
- timestamp = int(items[1])
- except ValueError:
- pass
-
- user = items[2]
- if user:
- who = user
-
- module = items[4]
- file = items[5]
- if module and file:
- path = "%s/%s" % (module, file)
- files.append(path)
- sticky = items[7]
- branch = items[8]
-
- # if no files changed, return nothing
- if not files:
- return None
-
- # read the comments
- comments = ""
- while lines:
- line = lines.pop(0)
- if line == ":ENDLOGCOMMENT\n":
- break
- comments += line
- comments = comments.rstrip() + "\n"
-
- # return buildbot Change object
- return changes.Change(who, files, comments, when=timestamp,
- branch=branch)
-
-# svn "commit-email.pl" handler. The format is very similar to freshcvs mail;
-# here's a sample:
-
-# From: username [at] apache.org [slightly obfuscated to avoid spam here]
-# To: commits [at] spamassassin.apache.org
-# Subject: svn commit: r105955 - in spamassassin/trunk: . lib/Mail
-# ...
-#
-# Author: username
-# Date: Sat Nov 20 00:17:49 2004 [note: TZ = local tz on server!]
-# New Revision: 105955
-#
-# Modified: [also Removed: and Added:]
-# [filename]
-# ...
-# Log:
-# [log message]
-# ...
-#
-#
-# Modified: spamassassin/trunk/lib/Mail/SpamAssassin.pm
-# [unified diff]
-#
-# [end of mail]
-
-class SVNCommitEmailMaildirSource(MaildirSource):
- name = "SVN commit-email.pl"
-
- def parse(self, m, prefix=None):
- """Parse messages sent by the svn 'commit-email.pl' trigger.
- """
-
- # The mail is sent from the person doing the checkin. Assume that the
- # local username is enough to identify them (this assumes a one-server
- # cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
- # model)
- name, addr = parseaddr(m["from"])
- if not addr:
- return None # no From means this message isn't from FreshCVS
- at = addr.find("@")
- if at == -1:
- who = addr # might still be useful
- else:
- who = addr[:at]
-
- # we take the time of receipt as the time of checkin. Not correct (it
- # depends upon the email latency), but it avoids the
- # out-of-order-changes issue. Also syncmail doesn't give us anything
- # better to work with, unless you count pulling the v1-vs-v2
- # timestamp out of the diffs, which would be ugly. TODO: Pulling the
- # 'Date:' header from the mail is a possibility, and
- # email.Utils.parsedate_tz may be useful. It should be configurable,
- # however, because there are a lot of broken clocks out there.
- when = util.now()
-
- files = []
- comments = ""
- isdir = 0
- lines = list(body_line_iterator(m))
- rev = None
- while lines:
- line = lines.pop(0)
-
- # "Author: jmason"
- match = re.search(r"^Author: (\S+)", line)
- if match:
- who = match.group(1)
-
- # "New Revision: 105955"
- match = re.search(r"^New Revision: (\d+)", line)
- if match:
- rev = match.group(1)
-
- # possible TODO: use "Date: ..." data here instead of time of
- # commit message receipt, above. however, this timestamp is
- # specified *without* a timezone, in the server's local TZ, so to
- # be accurate buildbot would need a config setting to specify the
- # source server's expected TZ setting! messy.
-
- # this stanza ends with the "Log:"
- if (line == "Log:\n"):
- break
-
- # commit message is terminated by the file-listing section
- while lines:
- line = lines.pop(0)
- if (line == "Modified:\n" or
- line == "Added:\n" or
- line == "Removed:\n"):
- break
- comments += line
- comments = comments.rstrip() + "\n"
-
- while lines:
- line = lines.pop(0)
- if line == "\n":
- break
- if line.find("Modified:\n") == 0:
- continue # ignore this line
- if line.find("Added:\n") == 0:
- continue # ignore this line
- if line.find("Removed:\n") == 0:
- continue # ignore this line
- line = line.strip()
-
- thesefiles = line.split(" ")
- for f in thesefiles:
- if prefix:
- # insist that the file start with the prefix: we may get
- # changes we don't care about too
- if f.startswith(prefix):
- f = f[len(prefix):]
- else:
- log.msg("ignored file from svn commit: prefix '%s' "
- "does not match filename '%s'" % (prefix, f))
- continue
-
- # TODO: figure out how new directories are described, set
- # .isdir
- files.append(f)
-
- if not files:
- log.msg("no matching files found, ignoring commit")
- return None
-
- return changes.Change(who, files, comments, when=when, revision=rev)
-
diff --git a/buildbot/buildbot/changes/maildir.py b/buildbot/buildbot/changes/maildir.py
deleted file mode 100644
index 2e4a706..0000000
--- a/buildbot/buildbot/changes/maildir.py
+++ /dev/null
@@ -1,116 +0,0 @@
-
-# This is a class which watches a maildir for new messages. It uses the
-# linux dirwatcher API (if available) to look for new files. The
-# .messageReceived method is invoked with the filename of the new message,
-# relative to the top of the maildir (so it will look like "new/blahblah").
-
-import os
-from twisted.python import log
-from twisted.application import service, internet
-from twisted.internet import reactor
-dnotify = None
-try:
- import dnotify
-except:
- # I'm not actually sure this log message gets recorded
- log.msg("unable to import dnotify, so Maildir will use polling instead")
-
-class NoSuchMaildir(Exception):
- pass
-
-class MaildirService(service.MultiService):
- """I watch a maildir for new messages. I should be placed as the service
- child of some MultiService instance. When running, I use the linux
- dirwatcher API (if available) or poll for new files in the 'new'
- subdirectory of my maildir path. When I discover a new message, I invoke
- my .messageReceived() method with the short filename of the new message,
- so the full name of the new file can be obtained with
- os.path.join(maildir, 'new', filename). messageReceived() should be
- overridden by a subclass to do something useful. I will not move or
- delete the file on my own: the subclass's messageReceived() should
- probably do that.
- """
- pollinterval = 10 # only used if we don't have DNotify
-
- def __init__(self, basedir=None):
- """Create the Maildir watcher. BASEDIR is the maildir directory (the
- one which contains new/ and tmp/)
- """
- service.MultiService.__init__(self)
- self.basedir = basedir
- self.files = []
- self.dnotify = None
-
- def setBasedir(self, basedir):
- # some users of MaildirService (scheduler.Try_Jobdir, in particular)
- # don't know their basedir until setServiceParent, since it is
- # relative to the buildmaster's basedir. So let them set it late. We
- # don't actually need it until our own startService.
- self.basedir = basedir
-
- def startService(self):
- service.MultiService.startService(self)
- self.newdir = os.path.join(self.basedir, "new")
- if not os.path.isdir(self.basedir) or not os.path.isdir(self.newdir):
- raise NoSuchMaildir("invalid maildir '%s'" % self.basedir)
- try:
- if dnotify:
- # we must hold an fd open on the directory, so we can get
- # notified when it changes.
- self.dnotify = dnotify.DNotify(self.newdir,
- self.dnotify_callback,
- [dnotify.DNotify.DN_CREATE])
- except (IOError, OverflowError):
- # IOError is probably linux<2.4.19, which doesn't support
- # dnotify. OverflowError will occur on some 64-bit machines
- # because of a python bug
- log.msg("DNotify failed, falling back to polling")
- if not self.dnotify:
- t = internet.TimerService(self.pollinterval, self.poll)
- t.setServiceParent(self)
- self.poll()
-
- def dnotify_callback(self):
- log.msg("dnotify noticed something, now polling")
-
- # give it a moment. I found that qmail had problems when the message
- # was removed from the maildir instantly. It shouldn't, that's what
- # maildirs are made for. I wasn't able to eyeball any reason for the
- # problem, and safecat didn't behave the same way, but qmail reports
- # "Temporary_error_on_maildir_delivery" (qmail-local.c:165,
- # maildir_child() process exited with rc not in 0,2,3,4). Not sure
- # why, and I'd have to hack qmail to investigate further, so it's
- # easier to just wait a second before yanking the message out of new/
-
- reactor.callLater(0.1, self.poll)
-
-
- def stopService(self):
- if self.dnotify:
- self.dnotify.remove()
- self.dnotify = None
- return service.MultiService.stopService(self)
-
- def poll(self):
- assert self.basedir
- # see what's new
- for f in self.files:
- if not os.path.isfile(os.path.join(self.newdir, f)):
- self.files.remove(f)
- newfiles = []
- for f in os.listdir(self.newdir):
- if not f in self.files:
- newfiles.append(f)
- self.files.extend(newfiles)
- # TODO: sort by ctime, then filename, since safecat uses a rather
- # fine-grained timestamp in the filename
- for n in newfiles:
- # TODO: consider catching exceptions in messageReceived
- self.messageReceived(n)
-
- def messageReceived(self, filename):
- """Called when a new file is noticed. Will call
- self.parent.messageReceived() with a path relative to maildir/new.
- Should probably be overridden in subclasses."""
- self.parent.messageReceived(filename)
-
diff --git a/buildbot/buildbot/changes/monotone.py b/buildbot/buildbot/changes/monotone.py
deleted file mode 100644
index 302c1c5..0000000
--- a/buildbot/buildbot/changes/monotone.py
+++ /dev/null
@@ -1,305 +0,0 @@
-
-import tempfile
-import os
-from cStringIO import StringIO
-
-from twisted.python import log
-from twisted.application import service
-from twisted.internet import defer, protocol, error, reactor
-from twisted.internet.task import LoopingCall
-
-from buildbot import util
-from buildbot.interfaces import IChangeSource
-from buildbot.changes.changes import Change
-
-class _MTProtocol(protocol.ProcessProtocol):
-
- def __init__(self, deferred, cmdline):
- self.cmdline = cmdline
- self.deferred = deferred
- self.s = StringIO()
-
- def errReceived(self, text):
- log.msg("stderr: %s" % text)
-
- def outReceived(self, text):
- log.msg("stdout: %s" % text)
- self.s.write(text)
-
- def processEnded(self, reason):
- log.msg("Command %r exited with value %s" % (self.cmdline, reason))
- if isinstance(reason.value, error.ProcessDone):
- self.deferred.callback(self.s.getvalue())
- else:
- self.deferred.errback(reason)
-
-class Monotone:
- """All methods of this class return a Deferred."""
-
- def __init__(self, bin, db):
- self.bin = bin
- self.db = db
-
- def _run_monotone(self, args):
- d = defer.Deferred()
- cmdline = (self.bin, "--db=" + self.db) + tuple(args)
- p = _MTProtocol(d, cmdline)
- log.msg("Running command: %r" % (cmdline,))
- log.msg("wd: %s" % os.getcwd())
- reactor.spawnProcess(p, self.bin, cmdline)
- return d
-
- def _process_revision_list(self, output):
- if output:
- return output.strip().split("\n")
- else:
- return []
-
- def get_interface_version(self):
- d = self._run_monotone(["automate", "interface_version"])
- d.addCallback(self._process_interface_version)
- return d
-
- def _process_interface_version(self, output):
- return tuple(map(int, output.strip().split(".")))
-
- def db_init(self):
- return self._run_monotone(["db", "init"])
-
- def db_migrate(self):
- return self._run_monotone(["db", "migrate"])
-
- def pull(self, server, pattern):
- return self._run_monotone(["pull", server, pattern])
-
- def get_revision(self, rid):
- return self._run_monotone(["cat", "revision", rid])
-
- def get_heads(self, branch, rcfile=""):
- cmd = ["automate", "heads", branch]
- if rcfile:
- cmd += ["--rcfile=" + rcfile]
- d = self._run_monotone(cmd)
- d.addCallback(self._process_revision_list)
- return d
-
- def erase_ancestors(self, revs):
- d = self._run_monotone(["automate", "erase_ancestors"] + revs)
- d.addCallback(self._process_revision_list)
- return d
-
- def ancestry_difference(self, new_rev, old_revs):
- d = self._run_monotone(["automate", "ancestry_difference", new_rev]
- + old_revs)
- d.addCallback(self._process_revision_list)
- return d
-
- def descendents(self, rev):
- d = self._run_monotone(["automate", "descendents", rev])
- d.addCallback(self._process_revision_list)
- return d
-
- def log(self, rev, depth=None):
- if depth is not None:
- depth_arg = ["--last=%i" % (depth,)]
- else:
- depth_arg = []
- return self._run_monotone(["log", "-r", rev] + depth_arg)
-
-
-class MonotoneSource(service.Service, util.ComparableMixin):
- """This source will poll a monotone server for changes and submit them to
- the change master.
-
- @param server_addr: monotone server specification (host:portno)
-
- @param branch: monotone branch to watch
-
- @param trusted_keys: list of keys whose code you trust
-
- @param db_path: path to monotone database to pull into
-
- @param pollinterval: interval in seconds between polls, defaults to 10 minutes
- @param monotone_exec: path to monotone executable, defaults to "monotone"
- """
-
- __implements__ = IChangeSource, service.Service.__implements__
- compare_attrs = ["server_addr", "trusted_keys", "db_path",
- "pollinterval", "branch", "monotone_exec"]
-
- parent = None # filled in when we're added
- done_revisions = []
- last_revision = None
- loop = None
- d = None
- tmpfile = None
- monotone = None
- volatile = ["loop", "d", "tmpfile", "monotone"]
-
- def __init__(self, server_addr, branch, trusted_keys, db_path,
- pollinterval=60 * 10, monotone_exec="monotone"):
- self.server_addr = server_addr
- self.branch = branch
- self.trusted_keys = trusted_keys
- self.db_path = db_path
- self.pollinterval = pollinterval
- self.monotone_exec = monotone_exec
- self.monotone = Monotone(self.monotone_exec, self.db_path)
-
- def startService(self):
- self.loop = LoopingCall(self.start_poll)
- self.loop.start(self.pollinterval)
- service.Service.startService(self)
-
- def stopService(self):
- self.loop.stop()
- return service.Service.stopService(self)
-
- def describe(self):
- return "monotone_source %s %s" % (self.server_addr,
- self.branch)
-
- def start_poll(self):
- if self.d is not None:
- log.msg("last poll still in progress, skipping next poll")
- return
- log.msg("starting poll")
- self.d = self._maybe_init_db()
- self.d.addCallback(self._do_netsync)
- self.d.addCallback(self._get_changes)
- self.d.addErrback(self._handle_error)
-
- def _handle_error(self, failure):
- log.err(failure)
- self.d = None
-
- def _maybe_init_db(self):
- if not os.path.exists(self.db_path):
- log.msg("init'ing db")
- return self.monotone.db_init()
- else:
- log.msg("db already exists, migrating")
- return self.monotone.db_migrate()
-
- def _do_netsync(self, output):
- return self.monotone.pull(self.server_addr, self.branch)
-
- def _get_changes(self, output):
- d = self._get_new_head()
- d.addCallback(self._process_new_head)
- return d
-
- def _get_new_head(self):
- # This function returns a deferred that resolves to a good pick of new
- # head (or None if there is no good new head.)
-
- # First need to get all new heads...
- rcfile = """function get_revision_cert_trust(signers, id, name, val)
- local trusted_signers = { %s }
- local ts_table = {}
- for k, v in pairs(trusted_signers) do ts_table[v] = 1 end
- for k, v in pairs(signers) do
- if ts_table[v] then
- return true
- end
- end
- return false
- end
- """
- trusted_list = ", ".join(['"' + key + '"' for key in self.trusted_keys])
- # mktemp is unsafe, but mkstemp is not 2.2 compatible.
- tmpfile_name = tempfile.mktemp()
- f = open(tmpfile_name, "w")
- f.write(rcfile % trusted_list)
- f.close()
- d = self.monotone.get_heads(self.branch, tmpfile_name)
- d.addCallback(self._find_new_head, tmpfile_name)
- return d
-
- def _find_new_head(self, new_heads, tmpfile_name):
- os.unlink(tmpfile_name)
- # Now get the old head's descendents...
- if self.last_revision is not None:
- d = self.monotone.descendents(self.last_revision)
- else:
- d = defer.succeed(new_heads)
- d.addCallback(self._pick_new_head, new_heads)
- return d
-
- def _pick_new_head(self, old_head_descendents, new_heads):
- for r in new_heads:
- if r in old_head_descendents:
- return r
- return None
-
- def _process_new_head(self, new_head):
- if new_head is None:
- log.msg("No new head")
- self.d = None
- return None
- # Okay, we have a new head; we need to get all the revisions since
- # then and create change objects for them.
- # Step 1: simplify set of processed revisions.
- d = self._simplify_revisions()
- # Step 2: get the list of new revisions
- d.addCallback(self._get_new_revisions, new_head)
- # Step 3: add a change for each
- d.addCallback(self._add_changes_for_revisions)
- # Step 4: all done
- d.addCallback(self._finish_changes, new_head)
- return d
-
- def _simplify_revisions(self):
- d = self.monotone.erase_ancestors(self.done_revisions)
- d.addCallback(self._reset_done_revisions)
- return d
-
- def _reset_done_revisions(self, new_done_revisions):
- self.done_revisions = new_done_revisions
- return None
-
- def _get_new_revisions(self, blah, new_head):
- if self.done_revisions:
- return self.monotone.ancestry_difference(new_head,
- self.done_revisions)
- else:
- # Don't force feed the builder with every change since the
- # beginning of time when it's first started up.
- return defer.succeed([new_head])
-
- def _add_changes_for_revisions(self, revs):
- d = defer.succeed(None)
- for rid in revs:
- d.addCallback(self._add_change_for_revision, rid)
- return d
-
- def _add_change_for_revision(self, blah, rid):
- d = self.monotone.log(rid, 1)
- d.addCallback(self._add_change_from_log, rid)
- return d
-
- def _add_change_from_log(self, log, rid):
- d = self.monotone.get_revision(rid)
- d.addCallback(self._add_change_from_log_and_revision, log, rid)
- return d
-
- def _add_change_from_log_and_revision(self, revision, log, rid):
- # Stupid way to pull out everything inside quotes (which currently
- # uniquely identifies filenames inside a changeset).
- pieces = revision.split('"')
- files = []
- for i in range(len(pieces)):
- if (i % 2) == 1:
- files.append(pieces[i])
- # Also pull out author key and date
- author = "unknown author"
- pieces = log.split('\n')
- for p in pieces:
- if p.startswith("Author:"):
- author = p.split()[1]
- self.parent.addChange(Change(author, files, log, revision=rid))
-
- def _finish_changes(self, blah, new_head):
- self.done_revisions.append(new_head)
- self.last_revision = new_head
- self.d = None
diff --git a/buildbot/buildbot/changes/p4poller.py b/buildbot/buildbot/changes/p4poller.py
deleted file mode 100644
index a313343..0000000
--- a/buildbot/buildbot/changes/p4poller.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# -*- test-case-name: buildbot.test.test_p4poller -*-
-
-# Many thanks to Dave Peticolas for contributing this module
-
-import re
-import time
-
-from twisted.python import log, failure
-from twisted.internet import defer, reactor
-from twisted.internet.utils import getProcessOutput
-from twisted.internet.task import LoopingCall
-
-from buildbot import util
-from buildbot.changes import base, changes
-
-def get_simple_split(branchfile):
- """Splits the branchfile argument and assuming branch is
- the first path component in branchfile, will return
- branch and file else None."""
-
- index = branchfile.find('/')
- if index == -1: return None, None
- branch, file = branchfile.split('/', 1)
- return branch, file
-
-class P4Source(base.ChangeSource, util.ComparableMixin):
- """This source will poll a perforce repository for changes and submit
- them to the change master."""
-
- compare_attrs = ["p4port", "p4user", "p4passwd", "p4base",
- "p4bin", "pollinterval"]
-
- changes_line_re = re.compile(
- r"Change (?P<num>\d+) on \S+ by \S+@\S+ '.+'$")
- describe_header_re = re.compile(
- r"Change \d+ by (?P<who>\S+)@\S+ on (?P<when>.+)$")
- file_re = re.compile(r"^\.\.\. (?P<path>[^#]+)#\d+ \w+$")
- datefmt = '%Y/%m/%d %H:%M:%S'
-
- parent = None # filled in when we're added
- last_change = None
- loop = None
- working = False
-
- def __init__(self, p4port=None, p4user=None, p4passwd=None,
- p4base='//', p4bin='p4',
- split_file=lambda branchfile: (None, branchfile),
- pollinterval=60 * 10, histmax=None):
- """
- @type p4port: string
- @param p4port: p4 port definition (host:portno)
- @type p4user: string
- @param p4user: p4 user
- @type p4passwd: string
- @param p4passwd: p4 passwd
- @type p4base: string
- @param p4base: p4 file specification to limit a poll to
- without the trailing '...' (i.e., //)
- @type p4bin: string
- @param p4bin: path to p4 binary, defaults to just 'p4'
- @type split_file: func
- $param split_file: splits a filename into branch and filename.
- @type pollinterval: int
- @param pollinterval: interval in seconds between polls
- @type histmax: int
- @param histmax: (obsolete) maximum number of changes to look back through.
- ignored; accepted for backwards compatibility.
- """
-
- self.p4port = p4port
- self.p4user = p4user
- self.p4passwd = p4passwd
- self.p4base = p4base
- self.p4bin = p4bin
- self.split_file = split_file
- self.pollinterval = pollinterval
- self.loop = LoopingCall(self.checkp4)
-
- def startService(self):
- base.ChangeSource.startService(self)
-
- # Don't start the loop just yet because the reactor isn't running.
- # Give it a chance to go and install our SIGCHLD handler before
- # spawning processes.
- reactor.callLater(0, self.loop.start, self.pollinterval)
-
- def stopService(self):
- self.loop.stop()
- return base.ChangeSource.stopService(self)
-
- def describe(self):
- return "p4source %s %s" % (self.p4port, self.p4base)
-
- def checkp4(self):
- # Our return value is only used for unit testing.
- if self.working:
- log.msg("Skipping checkp4 because last one has not finished")
- return defer.succeed(None)
- else:
- self.working = True
- d = self._get_changes()
- d.addCallback(self._process_changes)
- d.addBoth(self._finished)
- return d
-
- def _finished(self, res):
- assert self.working
- self.working = False
-
- # Again, the return value is only for unit testing.
- # If there's a failure, log it so it isn't lost.
- if isinstance(res, failure.Failure):
- log.msg('P4 poll failed: %s' % res)
- return None
- return res
-
- def _get_changes(self):
- args = []
- if self.p4port:
- args.extend(['-p', self.p4port])
- if self.p4user:
- args.extend(['-u', self.p4user])
- if self.p4passwd:
- args.extend(['-P', self.p4passwd])
- args.extend(['changes'])
- if self.last_change is not None:
- args.extend(['%s...@%d,now' % (self.p4base, self.last_change+1)])
- else:
- args.extend(['-m', '1', '%s...' % (self.p4base,)])
- env = {}
- return getProcessOutput(self.p4bin, args, env)
-
- def _process_changes(self, result):
- last_change = self.last_change
- changelists = []
- for line in result.split('\n'):
- line = line.strip()
- if not line: continue
- m = self.changes_line_re.match(line)
- assert m, "Unexpected 'p4 changes' output: %r" % result
- num = int(m.group('num'))
- if last_change is None:
- log.msg('P4Poller: starting at change %d' % num)
- self.last_change = num
- return []
- changelists.append(num)
- changelists.reverse() # oldest first
-
- # Retrieve each sequentially.
- d = defer.succeed(None)
- for c in changelists:
- d.addCallback(self._get_describe, c)
- d.addCallback(self._process_describe, c)
- return d
-
- def _get_describe(self, dummy, num):
- args = []
- if self.p4port:
- args.extend(['-p', self.p4port])
- if self.p4user:
- args.extend(['-u', self.p4user])
- if self.p4passwd:
- args.extend(['-P', self.p4passwd])
- args.extend(['describe', '-s', str(num)])
- env = {}
- d = getProcessOutput(self.p4bin, args, env)
- return d
-
- def _process_describe(self, result, num):
- lines = result.split('\n')
- # SF#1555985: Wade Brainerd reports a stray ^M at the end of the date
- # field. The rstrip() is intended to remove that.
- lines[0] = lines[0].rstrip()
- m = self.describe_header_re.match(lines[0])
- assert m, "Unexpected 'p4 describe -s' result: %r" % result
- who = m.group('who')
- when = time.mktime(time.strptime(m.group('when'), self.datefmt))
- comments = ''
- while not lines[0].startswith('Affected files'):
- comments += lines.pop(0) + '\n'
- lines.pop(0) # affected files
-
- branch_files = {} # dict for branch mapped to file(s)
- while lines:
- line = lines.pop(0).strip()
- if not line: continue
- m = self.file_re.match(line)
- assert m, "Invalid file line: %r" % line
- path = m.group('path')
- if path.startswith(self.p4base):
- branch, file = self.split_file(path[len(self.p4base):])
- if (branch == None and file == None): continue
- if branch_files.has_key(branch):
- branch_files[branch].append(file)
- else:
- branch_files[branch] = [file]
-
- for branch in branch_files:
- c = changes.Change(who=who,
- files=branch_files[branch],
- comments=comments,
- revision=num,
- when=when,
- branch=branch)
- self.parent.addChange(c)
-
- self.last_change = num
diff --git a/buildbot/buildbot/changes/pb.py b/buildbot/buildbot/changes/pb.py
deleted file mode 100644
index 91a1a22..0000000
--- a/buildbot/buildbot/changes/pb.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# -*- test-case-name: buildbot.test.test_changes -*-
-
-from twisted.python import log
-
-from buildbot.pbutil import NewCredPerspective
-from buildbot.changes import base, changes
-
-class ChangePerspective(NewCredPerspective):
-
- def __init__(self, changemaster, prefix):
- self.changemaster = changemaster
- self.prefix = prefix
-
- def attached(self, mind):
- return self
- def detached(self, mind):
- pass
-
- def perspective_addChange(self, changedict):
- log.msg("perspective_addChange called")
- pathnames = []
- prefixpaths = None
- for path in changedict['files']:
- if self.prefix:
- if not path.startswith(self.prefix):
- # this file does not start with the prefix, so ignore it
- continue
- path = path[len(self.prefix):]
- pathnames.append(path)
-
- if pathnames:
- change = changes.Change(changedict['who'],
- pathnames,
- changedict['comments'],
- branch=changedict.get('branch'),
- revision=changedict.get('revision'),
- category=changedict.get('category'),
- )
- self.changemaster.addChange(change)
-
-class PBChangeSource(base.ChangeSource):
- compare_attrs = ["user", "passwd", "port", "prefix"]
-
- def __init__(self, user="change", passwd="changepw", port=None,
- prefix=None, sep=None):
- """I listen on a TCP port for Changes from 'buildbot sendchange'.
-
- I am a ChangeSource which will accept Changes from a remote source. I
- share a TCP listening port with the buildslaves.
-
- The 'buildbot sendchange' command, the contrib/svn_buildbot.py tool,
- and the contrib/bzr_buildbot.py tool know how to send changes to me.
-
- @type prefix: string (or None)
- @param prefix: if set, I will ignore any filenames that do not start
- with this string. Moreover I will remove this string
- from all filenames before creating the Change object
- and delivering it to the Schedulers. This is useful
- for changes coming from version control systems that
- represent branches as parent directories within the
- repository (like SVN and Perforce). Use a prefix of
- 'trunk/' or 'project/branches/foobranch/' to only
- follow one branch and to get correct tree-relative
- filenames.
-
- @param sep: DEPRECATED (with an axe). sep= was removed in
- buildbot-0.7.4 . Instead of using it, you should use
- prefix= with a trailing directory separator. This
- docstring (and the better-than-nothing error message
- which occurs when you use it) will be removed in 0.7.5 .
- """
-
- # sep= was removed in 0.7.4 . This more-helpful-than-nothing error
- # message will be removed in 0.7.5 .
- assert sep is None, "prefix= is now a complete string, do not use sep="
- # TODO: current limitations
- assert user == "change"
- assert passwd == "changepw"
- assert port == None
- self.user = user
- self.passwd = passwd
- self.port = port
- self.prefix = prefix
-
- def describe(self):
- # TODO: when the dispatcher is fixed, report the specific port
- #d = "PB listener on port %d" % self.port
- d = "PBChangeSource listener on all-purpose slaveport"
- if self.prefix is not None:
- d += " (prefix '%s')" % self.prefix
- return d
-
- def startService(self):
- base.ChangeSource.startService(self)
- # our parent is the ChangeMaster object
- # find the master's Dispatch object and register our username
- # TODO: the passwd should be registered here too
- master = self.parent.parent
- master.dispatcher.register(self.user, self)
-
- def stopService(self):
- base.ChangeSource.stopService(self)
- # unregister our username
- master = self.parent.parent
- master.dispatcher.unregister(self.user)
-
- def getPerspective(self):
- return ChangePerspective(self.parent, self.prefix)
diff --git a/buildbot/buildbot/changes/svnpoller.py b/buildbot/buildbot/changes/svnpoller.py
deleted file mode 100644
index 223c8b5..0000000
--- a/buildbot/buildbot/changes/svnpoller.py
+++ /dev/null
@@ -1,463 +0,0 @@
-# -*- test-case-name: buildbot.test.test_svnpoller -*-
-
-# Based on the work of Dave Peticolas for the P4poll
-# Changed to svn (using xml.dom.minidom) by Niklaus Giger
-# Hacked beyond recognition by Brian Warner
-
-from twisted.python import log
-from twisted.internet import defer, reactor, utils
-from twisted.internet.task import LoopingCall
-
-from buildbot import util
-from buildbot.changes import base
-from buildbot.changes.changes import Change
-
-import xml.dom.minidom
-
-def _assert(condition, msg):
- if condition:
- return True
- raise AssertionError(msg)
-
-def dbgMsg(myString):
- log.msg(myString)
- return 1
-
-# these split_file_* functions are available for use as values to the
-# split_file= argument.
-def split_file_alwaystrunk(path):
- return (None, path)
-
-def split_file_branches(path):
- # turn trunk/subdir/file.c into (None, "subdir/file.c")
- # and branches/1.5.x/subdir/file.c into ("branches/1.5.x", "subdir/file.c")
- pieces = path.split('/')
- if pieces[0] == 'trunk':
- return (None, '/'.join(pieces[1:]))
- elif pieces[0] == 'branches':
- return ('/'.join(pieces[0:2]), '/'.join(pieces[2:]))
- else:
- return None
-
-
-class SVNPoller(base.ChangeSource, util.ComparableMixin):
- """This source will poll a Subversion repository for changes and submit
- them to the change master."""
-
- compare_attrs = ["svnurl", "split_file_function",
- "svnuser", "svnpasswd",
- "pollinterval", "histmax",
- "svnbin"]
-
- parent = None # filled in when we're added
- last_change = None
- loop = None
- working = False
-
- def __init__(self, svnurl, split_file=None,
- svnuser=None, svnpasswd=None,
- pollinterval=10*60, histmax=100,
- svnbin='svn'):
- """
- @type svnurl: string
- @param svnurl: the SVN URL that describes the repository and
- subdirectory to watch. If this ChangeSource should
- only pay attention to a single branch, this should
- point at the repository for that branch, like
- svn://svn.twistedmatrix.com/svn/Twisted/trunk . If it
- should follow multiple branches, point it at the
- repository directory that contains all the branches
- like svn://svn.twistedmatrix.com/svn/Twisted and also
- provide a branch-determining function.
-
- Each file in the repository has a SVN URL in the form
- (SVNURL)/(BRANCH)/(FILEPATH), where (BRANCH) could be
- empty or not, depending upon your branch-determining
- function. Only files that start with (SVNURL)/(BRANCH)
- will be monitored. The Change objects that are sent to
- the Schedulers will see (FILEPATH) for each modified
- file.
-
- @type split_file: callable or None
- @param split_file: a function that is called with a string of the
- form (BRANCH)/(FILEPATH) and should return a tuple
- (BRANCH, FILEPATH). This function should match
- your repository's branch-naming policy. Each
- changed file has a fully-qualified URL that can be
- split into a prefix (which equals the value of the
- 'svnurl' argument) and a suffix; it is this suffix
- which is passed to the split_file function.
-
- If the function returns None, the file is ignored.
- Use this to indicate that the file is not a part
- of this project.
-
- For example, if your repository puts the trunk in
- trunk/... and branches are in places like
- branches/1.5/..., your split_file function could
- look like the following (this function is
- available as svnpoller.split_file_branches)::
-
- pieces = path.split('/')
- if pieces[0] == 'trunk':
- return (None, '/'.join(pieces[1:]))
- elif pieces[0] == 'branches':
- return ('/'.join(pieces[0:2]),
- '/'.join(pieces[2:]))
- else:
- return None
-
- If instead your repository layout puts the trunk
- for ProjectA in trunk/ProjectA/... and the 1.5
- branch in branches/1.5/ProjectA/..., your
- split_file function could look like::
-
- pieces = path.split('/')
- if pieces[0] == 'trunk':
- branch = None
- pieces.pop(0) # remove 'trunk'
- elif pieces[0] == 'branches':
- pieces.pop(0) # remove 'branches'
- # grab branch name
- branch = 'branches/' + pieces.pop(0)
- else:
- return None # something weird
- projectname = pieces.pop(0)
- if projectname != 'ProjectA':
- return None # wrong project
- return (branch, '/'.join(pieces))
-
- The default of split_file= is None, which
- indicates that no splitting should be done. This
- is equivalent to the following function::
-
- return (None, path)
-
- If you wish, you can override the split_file
- method with the same sort of function instead of
- passing in a split_file= argument.
-
-
- @type svnuser: string
- @param svnuser: If set, the --username option will be added to
- the 'svn log' command. You may need this to get
- access to a private repository.
- @type svnpasswd: string
- @param svnpasswd: If set, the --password option will be added.
-
- @type pollinterval: int
- @param pollinterval: interval in seconds between polls. The default
- is 600 seconds (10 minutes). Smaller values
- decrease the latency between the time a change
- is recorded and the time the buildbot notices
- it, but it also increases the system load.
-
- @type histmax: int
- @param histmax: maximum number of changes to look back through.
- The default is 100. Smaller values decrease
- system load, but if more than histmax changes
- are recorded between polls, the extra ones will
- be silently lost.
-
- @type svnbin: string
- @param svnbin: path to svn binary, defaults to just 'svn'. Use
- this if your subversion command lives in an
- unusual location.
- """
-
- if svnurl.endswith("/"):
- svnurl = svnurl[:-1] # strip the trailing slash
- self.svnurl = svnurl
- self.split_file_function = split_file or split_file_alwaystrunk
- self.svnuser = svnuser
- self.svnpasswd = svnpasswd
-
- self.svnbin = svnbin
- self.pollinterval = pollinterval
- self.histmax = histmax
- self._prefix = None
- self.overrun_counter = 0
- self.loop = LoopingCall(self.checksvn)
-
- def split_file(self, path):
- # use getattr() to avoid turning this function into a bound method,
- # which would require it to have an extra 'self' argument
- f = getattr(self, "split_file_function")
- return f(path)
-
- def startService(self):
- log.msg("SVNPoller(%s) starting" % self.svnurl)
- base.ChangeSource.startService(self)
- # Don't start the loop just yet because the reactor isn't running.
- # Give it a chance to go and install our SIGCHLD handler before
- # spawning processes.
- reactor.callLater(0, self.loop.start, self.pollinterval)
-
- def stopService(self):
- log.msg("SVNPoller(%s) shutting down" % self.svnurl)
- self.loop.stop()
- return base.ChangeSource.stopService(self)
-
- def describe(self):
- return "SVNPoller watching %s" % self.svnurl
-
- def checksvn(self):
- # Our return value is only used for unit testing.
-
- # we need to figure out the repository root, so we can figure out
- # repository-relative pathnames later. Each SVNURL is in the form
- # (ROOT)/(PROJECT)/(BRANCH)/(FILEPATH), where (ROOT) is something
- # like svn://svn.twistedmatrix.com/svn/Twisted (i.e. there is a
- # physical repository at /svn/Twisted on that host), (PROJECT) is
- # something like Projects/Twisted (i.e. within the repository's
- # internal namespace, everything under Projects/Twisted/ has
- # something to do with Twisted, but these directory names do not
- # actually appear on the repository host), (BRANCH) is something like
- # "trunk" or "branches/2.0.x", and (FILEPATH) is a tree-relative
- # filename like "twisted/internet/defer.py".
-
- # our self.svnurl attribute contains (ROOT)/(PROJECT) combined
- # together in a way that we can't separate without svn's help. If the
- # user is not using the split_file= argument, then self.svnurl might
- # be (ROOT)/(PROJECT)/(BRANCH) . In any case, the filenames we will
- # get back from 'svn log' will be of the form
- # (PROJECT)/(BRANCH)/(FILEPATH), but we want to be able to remove
- # that (PROJECT) prefix from them. To do this without requiring the
- # user to tell us how svnurl is split into ROOT and PROJECT, we do an
- # 'svn info --xml' command at startup. This command will include a
- # <root> element that tells us ROOT. We then strip this prefix from
- # self.svnurl to determine PROJECT, and then later we strip the
- # PROJECT prefix from the filenames reported by 'svn log --xml' to
- # get a (BRANCH)/(FILEPATH) that can be passed to split_file() to
- # turn into separate BRANCH and FILEPATH values.
-
- # whew.
-
- if self.working:
- log.msg("SVNPoller(%s) overrun: timer fired but the previous "
- "poll had not yet finished." % self.svnurl)
- self.overrun_counter += 1
- return defer.succeed(None)
- self.working = True
-
- log.msg("SVNPoller polling")
- if not self._prefix:
- # this sets self._prefix when it finishes. It fires with
- # self._prefix as well, because that makes the unit tests easier
- # to write.
- d = self.get_root()
- d.addCallback(self.determine_prefix)
- else:
- d = defer.succeed(self._prefix)
-
- d.addCallback(self.get_logs)
- d.addCallback(self.parse_logs)
- d.addCallback(self.get_new_logentries)
- d.addCallback(self.create_changes)
- d.addCallback(self.submit_changes)
- d.addCallbacks(self.finished_ok, self.finished_failure)
- return d
-
- def getProcessOutput(self, args):
- # this exists so we can override it during the unit tests
- d = utils.getProcessOutput(self.svnbin, args, {})
- return d
-
- def get_root(self):
- args = ["info", "--xml", "--non-interactive", self.svnurl]
- if self.svnuser:
- args.extend(["--username=%s" % self.svnuser])
- if self.svnpasswd:
- args.extend(["--password=%s" % self.svnpasswd])
- d = self.getProcessOutput(args)
- return d
-
- def determine_prefix(self, output):
- try:
- doc = xml.dom.minidom.parseString(output)
- except xml.parsers.expat.ExpatError:
- dbgMsg("_process_changes: ExpatError in %s" % output)
- log.msg("SVNPoller._determine_prefix_2: ExpatError in '%s'"
- % output)
- raise
- rootnodes = doc.getElementsByTagName("root")
- if not rootnodes:
- # this happens if the URL we gave was already the root. In this
- # case, our prefix is empty.
- self._prefix = ""
- return self._prefix
- rootnode = rootnodes[0]
- root = "".join([c.data for c in rootnode.childNodes])
- # root will be a unicode string
- _assert(self.svnurl.startswith(root),
- "svnurl='%s' doesn't start with <root>='%s'" %
- (self.svnurl, root))
- self._prefix = self.svnurl[len(root):]
- if self._prefix.startswith("/"):
- self._prefix = self._prefix[1:]
- log.msg("SVNPoller: svnurl=%s, root=%s, so prefix=%s" %
- (self.svnurl, root, self._prefix))
- return self._prefix
-
- def get_logs(self, ignored_prefix=None):
- args = []
- args.extend(["log", "--xml", "--verbose", "--non-interactive"])
- if self.svnuser:
- args.extend(["--username=%s" % self.svnuser])
- if self.svnpasswd:
- args.extend(["--password=%s" % self.svnpasswd])
- args.extend(["--limit=%d" % (self.histmax), self.svnurl])
- d = self.getProcessOutput(args)
- return d
-
- def parse_logs(self, output):
- # parse the XML output, return a list of <logentry> nodes
- try:
- doc = xml.dom.minidom.parseString(output)
- except xml.parsers.expat.ExpatError:
- dbgMsg("_process_changes: ExpatError in %s" % output)
- log.msg("SVNPoller._parse_changes: ExpatError in '%s'" % output)
- raise
- logentries = doc.getElementsByTagName("logentry")
- return logentries
-
-
- def _filter_new_logentries(self, logentries, last_change):
- # given a list of logentries, return a tuple of (new_last_change,
- # new_logentries), where new_logentries contains only the ones after
- # last_change
- if not logentries:
- # no entries, so last_change must stay at None
- return (None, [])
-
- mostRecent = int(logentries[0].getAttribute("revision"))
-
- if last_change is None:
- # if this is the first time we've been run, ignore any changes
- # that occurred before now. This prevents a build at every
- # startup.
- log.msg('svnPoller: starting at change %s' % mostRecent)
- return (mostRecent, [])
-
- if last_change == mostRecent:
- # an unmodified repository will hit this case
- log.msg('svnPoller: _process_changes last %s mostRecent %s' % (
- last_change, mostRecent))
- return (mostRecent, [])
-
- new_logentries = []
- for el in logentries:
- if last_change == int(el.getAttribute("revision")):
- break
- new_logentries.append(el)
- new_logentries.reverse() # return oldest first
- return (mostRecent, new_logentries)
-
- def get_new_logentries(self, logentries):
- last_change = self.last_change
- (new_last_change,
- new_logentries) = self._filter_new_logentries(logentries,
- self.last_change)
- self.last_change = new_last_change
- log.msg('svnPoller: _process_changes %s .. %s' %
- (last_change, new_last_change))
- return new_logentries
-
-
- def _get_text(self, element, tag_name):
- try:
- child_nodes = element.getElementsByTagName(tag_name)[0].childNodes
- text = "".join([t.data for t in child_nodes])
- except:
- text = "<unknown>"
- return text
-
- def _transform_path(self, path):
- _assert(path.startswith(self._prefix),
- "filepath '%s' should start with prefix '%s'" %
- (path, self._prefix))
- relative_path = path[len(self._prefix):]
- if relative_path.startswith("/"):
- relative_path = relative_path[1:]
- where = self.split_file(relative_path)
- # 'where' is either None or (branch, final_path)
- return where
-
- def create_changes(self, new_logentries):
- changes = []
-
- for el in new_logentries:
- branch_files = [] # get oldest change first
- revision = str(el.getAttribute("revision"))
- dbgMsg("Adding change revision %s" % (revision,))
- # TODO: the rest of buildbot may not be ready for unicode 'who'
- # values
- author = self._get_text(el, "author")
- comments = self._get_text(el, "msg")
- # there is a "date" field, but it provides localtime in the
- # repository's timezone, whereas we care about buildmaster's
- # localtime (since this will get used to position the boxes on
- # the Waterfall display, etc). So ignore the date field and use
- # our local clock instead.
- #when = self._get_text(el, "date")
- #when = time.mktime(time.strptime("%.19s" % when,
- # "%Y-%m-%dT%H:%M:%S"))
- branches = {}
- pathlist = el.getElementsByTagName("paths")[0]
- for p in pathlist.getElementsByTagName("path"):
- action = p.getAttribute("action")
- path = "".join([t.data for t in p.childNodes])
- # the rest of buildbot is certaily not yet ready to handle
- # unicode filenames, because they get put in RemoteCommands
- # which get sent via PB to the buildslave, and PB doesn't
- # handle unicode.
- path = path.encode("ascii")
- if path.startswith("/"):
- path = path[1:]
- where = self._transform_path(path)
-
- # if 'where' is None, the file was outside any project that
- # we care about and we should ignore it
- if where:
- branch, filename = where
- if not branch in branches:
- branches[branch] = { 'files': []}
- branches[branch]['files'].append(filename)
-
- if not branches[branch].has_key('action'):
- branches[branch]['action'] = action
-
- for branch in branches.keys():
- action = branches[branch]['action']
- files = branches[branch]['files']
- number_of_files_changed = len(files)
-
- if action == u'D' and number_of_files_changed == 1 and files[0] == '':
- log.msg("Ignoring deletion of branch '%s'" % branch)
- else:
- c = Change(who=author,
- files=files,
- comments=comments,
- revision=revision,
- branch=branch)
- changes.append(c)
-
- return changes
-
- def submit_changes(self, changes):
- for c in changes:
- self.parent.addChange(c)
-
- def finished_ok(self, res):
- log.msg("SVNPoller finished polling")
- dbgMsg('_finished : %s' % res)
- assert self.working
- self.working = False
- return res
-
- def finished_failure(self, f):
- log.msg("SVNPoller failed")
- dbgMsg('_finished : %s' % f)
- assert self.working
- self.working = False
- return None # eat the failure
diff --git a/buildbot/buildbot/clients/__init__.py b/buildbot/buildbot/clients/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/buildbot/buildbot/clients/__init__.py
+++ /dev/null
diff --git a/buildbot/buildbot/clients/base.py b/buildbot/buildbot/clients/base.py
deleted file mode 100644
index 6d9e46c..0000000
--- a/buildbot/buildbot/clients/base.py
+++ /dev/null
@@ -1,125 +0,0 @@
-
-import sys, re
-
-from twisted.spread import pb
-from twisted.cred import credentials, error
-from twisted.internet import reactor
-
-class StatusClient(pb.Referenceable):
- """To use this, call my .connected method with a RemoteReference to the
- buildmaster's StatusClientPerspective object.
- """
-
- def __init__(self, events):
- self.builders = {}
- self.events = events
-
- def connected(self, remote):
- print "connected"
- self.remote = remote
- remote.callRemote("subscribe", self.events, 5, self)
-
- def remote_builderAdded(self, buildername, builder):
- print "builderAdded", buildername
-
- def remote_builderRemoved(self, buildername):
- print "builderRemoved", buildername
-
- def remote_builderChangedState(self, buildername, state, eta):
- print "builderChangedState", buildername, state, eta
-
- def remote_buildStarted(self, buildername, build):
- print "buildStarted", buildername
-
- def remote_buildFinished(self, buildername, build, results):
- print "buildFinished", results
-
- def remote_buildETAUpdate(self, buildername, build, eta):
- print "ETA", buildername, eta
-
- def remote_stepStarted(self, buildername, build, stepname, step):
- print "stepStarted", buildername, stepname
-
- def remote_stepFinished(self, buildername, build, stepname, step, results):
- print "stepFinished", buildername, stepname, results
-
- def remote_stepETAUpdate(self, buildername, build, stepname, step,
- eta, expectations):
- print "stepETA", buildername, stepname, eta
-
- def remote_logStarted(self, buildername, build, stepname, step,
- logname, log):
- print "logStarted", buildername, stepname
-
- def remote_logFinished(self, buildername, build, stepname, step,
- logname, log):
- print "logFinished", buildername, stepname
-
- def remote_logChunk(self, buildername, build, stepname, step, logname, log,
- channel, text):
- ChunkTypes = ["STDOUT", "STDERR", "HEADER"]
- print "logChunk[%s]: %s" % (ChunkTypes[channel], text)
-
-class TextClient:
- def __init__(self, master, events="steps"):
- """
- @type events: string, one of builders, builds, steps, logs, full
- @param events: specify what level of detail should be reported.
- - 'builders': only announce new/removed Builders
- - 'builds': also announce builderChangedState, buildStarted, and
- buildFinished
- - 'steps': also announce buildETAUpdate, stepStarted, stepFinished
- - 'logs': also announce stepETAUpdate, logStarted, logFinished
- - 'full': also announce log contents
- """
- self.master = master
- self.listener = StatusClient(events)
-
- def run(self):
- """Start the TextClient."""
- self.startConnecting()
- reactor.run()
-
- def startConnecting(self):
- try:
- host, port = re.search(r'(.+):(\d+)', self.master).groups()
- port = int(port)
- except:
- print "unparseable master location '%s'" % self.master
- print " expecting something more like localhost:8007"
- raise
- cf = pb.PBClientFactory()
- creds = credentials.UsernamePassword("statusClient", "clientpw")
- d = cf.login(creds)
- reactor.connectTCP(host, port, cf)
- d.addCallbacks(self.connected, self.not_connected)
- return d
- def connected(self, ref):
- ref.notifyOnDisconnect(self.disconnected)
- self.listener.connected(ref)
- def not_connected(self, why):
- if why.check(error.UnauthorizedLogin):
- print """
-Unable to login.. are you sure we are connecting to a
-buildbot.status.client.PBListener port and not to the slaveport?
-"""
- reactor.stop()
- return why
- def disconnected(self, ref):
- print "lost connection"
- # we can get here in one of two ways: the buildmaster has
- # disconnected us (probably because it shut itself down), or because
- # we've been SIGINT'ed. In the latter case, our reactor is already
- # shut down, but we have no easy way of detecting that. So protect
- # our attempt to shut down the reactor.
- try:
- reactor.stop()
- except RuntimeError:
- pass
-
-if __name__ == '__main__':
- master = "localhost:8007"
- if len(sys.argv) > 1:
- master = sys.argv[1]
- c = TextClient()
- c.run()
diff --git a/buildbot/buildbot/clients/debug.glade b/buildbot/buildbot/clients/debug.glade
deleted file mode 100644
index 40468bb..0000000
--- a/buildbot/buildbot/clients/debug.glade
+++ /dev/null
@@ -1,684 +0,0 @@
-<?xml version="1.0" standalone="no"?> <!--*- mode: xml -*-->
-<!DOCTYPE glade-interface SYSTEM "http://glade.gnome.org/glade-2.0.dtd">
-
-<glade-interface>
-<requires lib="gnome"/>
-
-<widget class="GtkWindow" id="window1">
- <property name="visible">True</property>
- <property name="title" translatable="yes">Buildbot Debug Tool</property>
- <property name="type">GTK_WINDOW_TOPLEVEL</property>
- <property name="window_position">GTK_WIN_POS_NONE</property>
- <property name="modal">False</property>
- <property name="resizable">True</property>
- <property name="destroy_with_parent">False</property>
- <property name="decorated">True</property>
- <property name="skip_taskbar_hint">False</property>
- <property name="skip_pager_hint">False</property>
- <property name="type_hint">GDK_WINDOW_TYPE_HINT_NORMAL</property>
- <property name="gravity">GDK_GRAVITY_NORTH_WEST</property>
- <property name="focus_on_map">True</property>
- <property name="urgency_hint">False</property>
-
- <child>
- <widget class="GtkVBox" id="vbox1">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkHBox" id="connection">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkButton" id="connectbutton">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">Connect</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_connect"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkLabel" id="connectlabel">
- <property name="visible">True</property>
- <property name="label" translatable="yes">Disconnected</property>
- <property name="use_underline">False</property>
- <property name="use_markup">False</property>
- <property name="justify">GTK_JUSTIFY_CENTER</property>
- <property name="wrap">False</property>
- <property name="selectable">False</property>
- <property name="xalign">0.5</property>
- <property name="yalign">0.5</property>
- <property name="xpad">0</property>
- <property name="ypad">0</property>
- <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
- <property name="width_chars">-1</property>
- <property name="single_line_mode">False</property>
- <property name="angle">0</property>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkHBox" id="commands">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkButton" id="reload">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">Reload .cfg</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_reload" last_modification_time="Wed, 24 Sep 2003 20:47:55 GMT"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkButton" id="rebuild">
- <property name="visible">True</property>
- <property name="sensitive">False</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">Rebuild .py</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_rebuild" last_modification_time="Wed, 24 Sep 2003 20:49:18 GMT"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkButton" id="button7">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">poke IRC</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_poke_irc" last_modification_time="Wed, 14 Jan 2004 22:23:59 GMT"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkHBox" id="hbox3">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkCheckButton" id="usebranch">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">Branch:</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <property name="active">False</property>
- <property name="inconsistent">False</property>
- <property name="draw_indicator">True</property>
- <signal name="toggled" handler="on_usebranch_toggled" last_modification_time="Tue, 25 Oct 2005 01:42:45 GMT"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkEntry" id="branch">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="editable">True</property>
- <property name="visibility">True</property>
- <property name="max_length">0</property>
- <property name="text" translatable="yes"></property>
- <property name="has_frame">True</property>
- <property name="invisible_char">*</property>
- <property name="activates_default">False</property>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkHBox" id="hbox1">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkCheckButton" id="userevision">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">Revision:</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <property name="active">False</property>
- <property name="inconsistent">False</property>
- <property name="draw_indicator">True</property>
- <signal name="toggled" handler="on_userevision_toggled" last_modification_time="Wed, 08 Sep 2004 17:58:33 GMT"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkEntry" id="revision">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="editable">True</property>
- <property name="visibility">True</property>
- <property name="max_length">0</property>
- <property name="text" translatable="yes"></property>
- <property name="has_frame">True</property>
- <property name="invisible_char">*</property>
- <property name="activates_default">False</property>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkFrame" id="Commit">
- <property name="border_width">4</property>
- <property name="visible">True</property>
- <property name="label_xalign">0</property>
- <property name="label_yalign">0.5</property>
- <property name="shadow_type">GTK_SHADOW_ETCHED_IN</property>
-
- <child>
- <widget class="GtkAlignment" id="alignment1">
- <property name="visible">True</property>
- <property name="xalign">0.5</property>
- <property name="yalign">0.5</property>
- <property name="xscale">1</property>
- <property name="yscale">1</property>
- <property name="top_padding">0</property>
- <property name="bottom_padding">0</property>
- <property name="left_padding">0</property>
- <property name="right_padding">0</property>
-
- <child>
- <widget class="GtkVBox" id="vbox3">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkHBox" id="commit">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkButton" id="button2">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">commit</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_commit"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkEntry" id="filename">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="editable">True</property>
- <property name="visibility">True</property>
- <property name="max_length">0</property>
- <property name="text" translatable="yes">twisted/internet/app.py</property>
- <property name="has_frame">True</property>
- <property name="invisible_char">*</property>
- <property name="activates_default">False</property>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkHBox" id="hbox2">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkLabel" id="label5">
- <property name="visible">True</property>
- <property name="label" translatable="yes">Who: </property>
- <property name="use_underline">False</property>
- <property name="use_markup">False</property>
- <property name="justify">GTK_JUSTIFY_LEFT</property>
- <property name="wrap">False</property>
- <property name="selectable">False</property>
- <property name="xalign">0.5</property>
- <property name="yalign">0.5</property>
- <property name="xpad">0</property>
- <property name="ypad">0</property>
- <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
- <property name="width_chars">-1</property>
- <property name="single_line_mode">False</property>
- <property name="angle">0</property>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkEntry" id="who">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="editable">True</property>
- <property name="visibility">True</property>
- <property name="max_length">0</property>
- <property name="text" translatable="yes">bob</property>
- <property name="has_frame">True</property>
- <property name="invisible_char">*</property>
- <property name="activates_default">False</property>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
- </widget>
- </child>
- </widget>
- </child>
-
- <child>
- <widget class="GtkLabel" id="label4">
- <property name="visible">True</property>
- <property name="label" translatable="yes">Commit</property>
- <property name="use_underline">False</property>
- <property name="use_markup">False</property>
- <property name="justify">GTK_JUSTIFY_LEFT</property>
- <property name="wrap">False</property>
- <property name="selectable">False</property>
- <property name="xalign">0.5</property>
- <property name="yalign">0.5</property>
- <property name="xpad">2</property>
- <property name="ypad">0</property>
- <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
- <property name="width_chars">-1</property>
- <property name="single_line_mode">False</property>
- <property name="angle">0</property>
- </widget>
- <packing>
- <property name="type">label_item</property>
- </packing>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkFrame" id="builderframe">
- <property name="border_width">4</property>
- <property name="visible">True</property>
- <property name="label_xalign">0</property>
- <property name="label_yalign">0.5</property>
- <property name="shadow_type">GTK_SHADOW_ETCHED_IN</property>
-
- <child>
- <widget class="GtkVBox" id="vbox2">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkHBox" id="builder">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">3</property>
-
- <child>
- <widget class="GtkLabel" id="label1">
- <property name="visible">True</property>
- <property name="label" translatable="yes">Builder:</property>
- <property name="use_underline">False</property>
- <property name="use_markup">False</property>
- <property name="justify">GTK_JUSTIFY_CENTER</property>
- <property name="wrap">False</property>
- <property name="selectable">False</property>
- <property name="xalign">0.5</property>
- <property name="yalign">0.5</property>
- <property name="xpad">0</property>
- <property name="ypad">0</property>
- <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
- <property name="width_chars">-1</property>
- <property name="single_line_mode">False</property>
- <property name="angle">0</property>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkEntry" id="buildname">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="editable">True</property>
- <property name="visibility">True</property>
- <property name="max_length">0</property>
- <property name="text" translatable="yes">one</property>
- <property name="has_frame">True</property>
- <property name="invisible_char">*</property>
- <property name="activates_default">False</property>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkHBox" id="buildercontrol">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkButton" id="button1">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">Request
-Build</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_build"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkButton" id="button8">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">Ping
-Builder</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_ping" last_modification_time="Fri, 24 Nov 2006 05:18:51 GMT"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <placeholder/>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkHBox" id="status">
- <property name="visible">True</property>
- <property name="homogeneous">False</property>
- <property name="spacing">0</property>
-
- <child>
- <widget class="GtkLabel" id="label2">
- <property name="visible">True</property>
- <property name="label" translatable="yes">Currently:</property>
- <property name="use_underline">False</property>
- <property name="use_markup">False</property>
- <property name="justify">GTK_JUSTIFY_CENTER</property>
- <property name="wrap">False</property>
- <property name="selectable">False</property>
- <property name="xalign">0.5</property>
- <property name="yalign">0.5</property>
- <property name="xpad">7</property>
- <property name="ypad">0</property>
- <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
- <property name="width_chars">-1</property>
- <property name="single_line_mode">False</property>
- <property name="angle">0</property>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkButton" id="button3">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">offline</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_current_offline"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkButton" id="button4">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">idle</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_current_idle"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkButton" id="button5">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">waiting</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_current_waiting"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
-
- <child>
- <widget class="GtkButton" id="button6">
- <property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="label" translatable="yes">building</property>
- <property name="use_underline">True</property>
- <property name="relief">GTK_RELIEF_NORMAL</property>
- <property name="focus_on_click">True</property>
- <signal name="clicked" handler="do_current_building"/>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">False</property>
- <property name="fill">False</property>
- </packing>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
- </widget>
- </child>
-
- <child>
- <widget class="GtkLabel" id="label3">
- <property name="visible">True</property>
- <property name="label" translatable="yes">Builder</property>
- <property name="use_underline">False</property>
- <property name="use_markup">False</property>
- <property name="justify">GTK_JUSTIFY_LEFT</property>
- <property name="wrap">False</property>
- <property name="selectable">False</property>
- <property name="xalign">0.5</property>
- <property name="yalign">0.5</property>
- <property name="xpad">2</property>
- <property name="ypad">0</property>
- <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
- <property name="width_chars">-1</property>
- <property name="single_line_mode">False</property>
- <property name="angle">0</property>
- </widget>
- <packing>
- <property name="type">label_item</property>
- </packing>
- </child>
- </widget>
- <packing>
- <property name="padding">0</property>
- <property name="expand">True</property>
- <property name="fill">True</property>
- </packing>
- </child>
- </widget>
- </child>
-</widget>
-
-</glade-interface>
diff --git a/buildbot/buildbot/clients/debug.py b/buildbot/buildbot/clients/debug.py
deleted file mode 100644
index 5413765..0000000
--- a/buildbot/buildbot/clients/debug.py
+++ /dev/null
@@ -1,181 +0,0 @@
-
-from twisted.internet import gtk2reactor
-gtk2reactor.install()
-from twisted.internet import reactor
-from twisted.python import util
-from twisted.spread import pb
-from twisted.cred import credentials
-import gtk.glade
-import sys, re
-
-class DebugWidget:
- def __init__(self, master="localhost:8007", passwd="debugpw"):
- self.connected = 0
- try:
- host, port = re.search(r'(.+):(\d+)', master).groups()
- except:
- print "unparseable master location '%s'" % master
- print " expecting something more like localhost:8007"
- raise
- self.host = host
- self.port = int(port)
- self.passwd = passwd
- self.remote = None
- xml = self.xml = gtk.glade.XML(util.sibpath(__file__, "debug.glade"))
- g = xml.get_widget
- self.buildname = g('buildname')
- self.filename = g('filename')
- self.connectbutton = g('connectbutton')
- self.connectlabel = g('connectlabel')
- g('window1').connect('destroy', lambda win: gtk.main_quit())
- # put the master info in the window's titlebar
- g('window1').set_title("Buildbot Debug Tool: %s" % master)
- c = xml.signal_connect
- c('do_connect', self.do_connect)
- c('do_reload', self.do_reload)
- c('do_rebuild', self.do_rebuild)
- c('do_poke_irc', self.do_poke_irc)
- c('do_build', self.do_build)
- c('do_ping', self.do_ping)
- c('do_commit', self.do_commit)
- c('on_usebranch_toggled', self.usebranch_toggled)
- self.usebranch_toggled(g('usebranch'))
- c('on_userevision_toggled', self.userevision_toggled)
- self.userevision_toggled(g('userevision'))
- c('do_current_offline', self.do_current, "offline")
- c('do_current_idle', self.do_current, "idle")
- c('do_current_waiting', self.do_current, "waiting")
- c('do_current_building', self.do_current, "building")
-
- def do_connect(self, widget):
- if self.connected:
- self.connectlabel.set_text("Disconnecting...")
- if self.remote:
- self.remote.broker.transport.loseConnection()
- else:
- self.connectlabel.set_text("Connecting...")
- f = pb.PBClientFactory()
- creds = credentials.UsernamePassword("debug", self.passwd)
- d = f.login(creds)
- reactor.connectTCP(self.host, int(self.port), f)
- d.addCallbacks(self.connect_complete, self.connect_failed)
- def connect_complete(self, ref):
- self.connectbutton.set_label("Disconnect")
- self.connectlabel.set_text("Connected")
- self.connected = 1
- self.remote = ref
- self.remote.callRemote("print", "hello cleveland")
- self.remote.notifyOnDisconnect(self.disconnected)
- def connect_failed(self, why):
- self.connectlabel.set_text("Failed")
- print why
- def disconnected(self, ref):
- self.connectbutton.set_label("Connect")
- self.connectlabel.set_text("Disconnected")
- self.connected = 0
- self.remote = None
-
- def do_reload(self, widget):
- if not self.remote:
- return
- d = self.remote.callRemote("reload")
- d.addErrback(self.err)
- def do_rebuild(self, widget):
- print "Not yet implemented"
- return
- def do_poke_irc(self, widget):
- if not self.remote:
- return
- d = self.remote.callRemote("pokeIRC")
- d.addErrback(self.err)
-
- def do_build(self, widget):
- if not self.remote:
- return
- name = self.buildname.get_text()
- branch = None
- if self.xml.get_widget("usebranch").get_active():
- branch = self.xml.get_widget('branch').get_text()
- if branch == '':
- branch = None
- revision = None
- if self.xml.get_widget("userevision").get_active():
- revision = self.xml.get_widget('revision').get_text()
- if revision == '':
- revision = None
- reason = "debugclient 'Request Build' button pushed"
- properties = {}
- d = self.remote.callRemote("requestBuild",
- name, reason, branch, revision, properties)
- d.addErrback(self.err)
-
- def do_ping(self, widget):
- if not self.remote:
- return
- name = self.buildname.get_text()
- d = self.remote.callRemote("pingBuilder", name)
- d.addErrback(self.err)
-
- def usebranch_toggled(self, widget):
- rev = self.xml.get_widget('branch')
- if widget.get_active():
- rev.set_sensitive(True)
- else:
- rev.set_sensitive(False)
-
- def userevision_toggled(self, widget):
- rev = self.xml.get_widget('revision')
- if widget.get_active():
- rev.set_sensitive(True)
- else:
- rev.set_sensitive(False)
-
- def do_commit(self, widget):
- if not self.remote:
- return
- filename = self.filename.get_text()
- who = self.xml.get_widget("who").get_text()
-
- branch = None
- if self.xml.get_widget("usebranch").get_active():
- branch = self.xml.get_widget('branch').get_text()
- if branch == '':
- branch = None
-
- revision = None
- if self.xml.get_widget("userevision").get_active():
- revision = self.xml.get_widget('revision').get_text()
- try:
- revision = int(revision)
- except ValueError:
- pass
- if revision == '':
- revision = None
-
- kwargs = { 'revision': revision, 'who': who }
- if branch:
- kwargs['branch'] = branch
- d = self.remote.callRemote("fakeChange", filename, **kwargs)
- d.addErrback(self.err)
-
- def do_current(self, widget, state):
- if not self.remote:
- return
- name = self.buildname.get_text()
- d = self.remote.callRemote("setCurrentState", name, state)
- d.addErrback(self.err)
- def err(self, failure):
- print "received error:", failure
-
- def run(self):
- reactor.run()
-
-if __name__ == '__main__':
- master = "localhost:8007"
- if len(sys.argv) > 1:
- master = sys.argv[1]
- passwd = "debugpw"
- if len(sys.argv) > 2:
- passwd = sys.argv[2]
- d = DebugWidget(master, passwd)
- d.run()
diff --git a/buildbot/buildbot/clients/gtkPanes.py b/buildbot/buildbot/clients/gtkPanes.py
deleted file mode 100644
index 8acba1b..0000000
--- a/buildbot/buildbot/clients/gtkPanes.py
+++ /dev/null
@@ -1,532 +0,0 @@
-
-from twisted.internet import gtk2reactor
-gtk2reactor.install()
-
-import sys, time
-
-import pygtk
-pygtk.require("2.0")
-import gobject, gtk
-assert(gtk.Window) # in gtk1 it's gtk.GtkWindow
-
-from twisted.spread import pb
-
-#from buildbot.clients.base import Builder, Client
-from buildbot.clients.base import TextClient
-from buildbot.util import now
-
-from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
-
-'''
-class Pane:
- def __init__(self):
- pass
-
-class OneRow(Pane):
- """This is a one-row status bar. It has one square per Builder, and that
- square is either red, yellow, or green. """
-
- def __init__(self):
- Pane.__init__(self)
- self.widget = gtk.VBox(gtk.FALSE, 2)
- self.nameBox = gtk.HBox(gtk.TRUE)
- self.statusBox = gtk.HBox(gtk.TRUE)
- self.widget.add(self.nameBox)
- self.widget.add(self.statusBox)
- self.widget.show_all()
- self.builders = []
-
- def getWidget(self):
- return self.widget
- def addBuilder(self, builder):
- print "OneRow.addBuilder"
- # todo: ordering. Should follow the order in which they were added
- # to the original BotMaster
- self.builders.append(builder)
- # add the name to the left column, and a label (with background) to
- # the right
- name = gtk.Label(builder.name)
- status = gtk.Label('??')
- status.set_size_request(64,64)
- box = gtk.EventBox()
- box.add(status)
- name.show()
- box.show_all()
- self.nameBox.add(name)
- self.statusBox.add(box)
- builder.haveSomeWidgets([name, status, box])
-
-class R2Builder(Builder):
- def start(self):
- self.nameSquare.set_text(self.name)
- self.statusSquare.set_text("???")
- self.subscribe()
- def haveSomeWidgets(self, widgets):
- self.nameSquare, self.statusSquare, self.statusBox = widgets
-
- def remote_newLastBuildStatus(self, event):
- color = None
- if event:
- text = "\n".join(event.text)
- color = event.color
- else:
- text = "none"
- self.statusSquare.set_text(text)
- if color:
- print "color", color
- self.statusBox.modify_bg(gtk.STATE_NORMAL,
- gtk.gdk.color_parse(color))
-
- def remote_currentlyOffline(self):
- self.statusSquare.set_text("offline")
- def remote_currentlyIdle(self):
- self.statusSquare.set_text("idle")
- def remote_currentlyWaiting(self, seconds):
- self.statusSquare.set_text("waiting")
- def remote_currentlyInterlocked(self):
- self.statusSquare.set_text("interlocked")
- def remote_currentlyBuilding(self, eta):
- self.statusSquare.set_text("building")
-
-
-class CompactRow(Pane):
- def __init__(self):
- Pane.__init__(self)
- self.widget = gtk.VBox(gtk.FALSE, 3)
- self.nameBox = gtk.HBox(gtk.TRUE, 2)
- self.lastBuildBox = gtk.HBox(gtk.TRUE, 2)
- self.statusBox = gtk.HBox(gtk.TRUE, 2)
- self.widget.add(self.nameBox)
- self.widget.add(self.lastBuildBox)
- self.widget.add(self.statusBox)
- self.widget.show_all()
- self.builders = []
-
- def getWidget(self):
- return self.widget
-
- def addBuilder(self, builder):
- self.builders.append(builder)
-
- name = gtk.Label(builder.name)
- name.show()
- self.nameBox.add(name)
-
- last = gtk.Label('??')
- last.set_size_request(64,64)
- lastbox = gtk.EventBox()
- lastbox.add(last)
- lastbox.show_all()
- self.lastBuildBox.add(lastbox)
-
- status = gtk.Label('??')
- status.set_size_request(64,64)
- statusbox = gtk.EventBox()
- statusbox.add(status)
- statusbox.show_all()
- self.statusBox.add(statusbox)
-
- builder.haveSomeWidgets([name, last, lastbox, status, statusbox])
-
- def removeBuilder(self, name, builder):
- self.nameBox.remove(builder.nameSquare)
- self.lastBuildBox.remove(builder.lastBuildBox)
- self.statusBox.remove(builder.statusBox)
- self.builders.remove(builder)
-
-class CompactBuilder(Builder):
- def setup(self):
- self.timer = None
- self.text = []
- self.eta = None
- def start(self):
- self.nameSquare.set_text(self.name)
- self.statusSquare.set_text("???")
- self.subscribe()
- def haveSomeWidgets(self, widgets):
- (self.nameSquare,
- self.lastBuildSquare, self.lastBuildBox,
- self.statusSquare, self.statusBox) = widgets
-
- def remote_currentlyOffline(self):
- self.eta = None
- self.stopTimer()
- self.statusSquare.set_text("offline")
- self.statusBox.modify_bg(gtk.STATE_NORMAL,
- gtk.gdk.color_parse("red"))
- def remote_currentlyIdle(self):
- self.eta = None
- self.stopTimer()
- self.statusSquare.set_text("idle")
- def remote_currentlyWaiting(self, seconds):
- self.nextBuild = now() + seconds
- self.startTimer(self.updateWaiting)
- def remote_currentlyInterlocked(self):
- self.stopTimer()
- self.statusSquare.set_text("interlocked")
- def startTimer(self, func):
- # the func must clear self.timer and return gtk.FALSE when the event
- # has arrived
- self.stopTimer()
- self.timer = gtk.timeout_add(1000, func)
- func()
- def stopTimer(self):
- if self.timer:
- gtk.timeout_remove(self.timer)
- self.timer = None
- def updateWaiting(self):
- when = self.nextBuild
- if now() < when:
- next = time.strftime("%H:%M:%S", time.localtime(when))
- secs = "[%d seconds]" % (when - now())
- self.statusSquare.set_text("waiting\n%s\n%s" % (next, secs))
- return gtk.TRUE # restart timer
- else:
- # done
- self.statusSquare.set_text("waiting\n[RSN]")
- self.timer = None
- return gtk.FALSE
-
- def remote_currentlyBuilding(self, eta):
- self.stopTimer()
- self.statusSquare.set_text("building")
- if eta:
- d = eta.callRemote("subscribe", self, 5)
-
- def remote_newLastBuildStatus(self, event):
- color = None
- if event:
- text = "\n".join(event.text)
- color = event.color
- else:
- text = "none"
- if not color: color = "gray"
- self.lastBuildSquare.set_text(text)
- self.lastBuildBox.modify_bg(gtk.STATE_NORMAL,
- gtk.gdk.color_parse(color))
-
- def remote_newEvent(self, event):
- assert(event.__class__ == GtkUpdatingEvent)
- self.current = event
- event.builder = self
- self.text = event.text
- if not self.text: self.text = ["idle"]
- self.eta = None
- self.stopTimer()
- self.updateText()
- color = event.color
- if not color: color = "gray"
- self.statusBox.modify_bg(gtk.STATE_NORMAL,
- gtk.gdk.color_parse(color))
-
- def updateCurrent(self):
- text = self.current.text
- if text:
- self.text = text
- self.updateText()
- color = self.current.color
- if color:
- self.statusBox.modify_bg(gtk.STATE_NORMAL,
- gtk.gdk.color_parse(color))
- def updateText(self):
- etatext = []
- if self.eta:
- etatext = [time.strftime("%H:%M:%S", time.localtime(self.eta))]
- if now() > self.eta:
- etatext += ["RSN"]
- else:
- seconds = self.eta - now()
- etatext += ["[%d secs]" % seconds]
- text = "\n".join(self.text + etatext)
- self.statusSquare.set_text(text)
- def updateTextTimer(self):
- self.updateText()
- return gtk.TRUE # restart timer
-
- def remote_progress(self, seconds):
- if seconds == None:
- self.eta = None
- else:
- self.eta = now() + seconds
- self.startTimer(self.updateTextTimer)
- self.updateText()
- def remote_finished(self, eta):
- self.eta = None
- self.stopTimer()
- self.updateText()
- eta.callRemote("unsubscribe", self)
-'''
-
-class Box:
- def __init__(self, text="?"):
- self.text = text
- self.box = gtk.EventBox()
- self.label = gtk.Label(text)
- self.box.add(self.label)
- self.box.set_size_request(64,64)
- self.timer = None
-
- def getBox(self):
- return self.box
-
- def setText(self, text):
- self.text = text
- self.label.set_text(text)
-
- def setColor(self, color):
- if not color:
- return
- self.box.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(color))
-
- def setETA(self, eta):
- if eta:
- self.when = now() + eta
- self.startTimer()
- else:
- self.stopTimer()
-
- def startTimer(self):
- self.stopTimer()
- self.timer = gobject.timeout_add(1000, self.update)
- self.update()
-
- def stopTimer(self):
- if self.timer:
- gobject.source_remove(self.timer)
- self.timer = None
- self.label.set_text(self.text)
-
- def update(self):
- if now() < self.when:
- next = time.strftime("%H:%M:%S", time.localtime(self.when))
- secs = "[%d secs]" % (self.when - now())
- self.label.set_text("%s\n%s\n%s" % (self.text, next, secs))
- return True # restart timer
- else:
- # done
- self.label.set_text("%s\n[soon]\n[overdue]" % (self.text,))
- self.timer = None
- return False
-
-
-
-class ThreeRowBuilder:
- def __init__(self, name, ref):
- self.name = name
-
- self.last = Box()
- self.current = Box()
- self.step = Box("idle")
- self.step.setColor("white")
-
- self.ref = ref
-
- def getBoxes(self):
- return self.last.getBox(), self.current.getBox(), self.step.getBox()
-
- def getLastBuild(self):
- d = self.ref.callRemote("getLastFinishedBuild")
- d.addCallback(self.gotLastBuild)
- def gotLastBuild(self, build):
- if build:
- build.callRemote("getText").addCallback(self.gotLastText)
- build.callRemote("getResults").addCallback(self.gotLastResult)
-
- def gotLastText(self, text):
- print "Got text", text
- self.last.setText("\n".join(text))
-
- def gotLastResult(self, result):
- colormap = {SUCCESS: 'green',
- FAILURE: 'red',
- WARNINGS: 'orange',
- EXCEPTION: 'purple',
- }
- self.last.setColor(colormap[result])
-
- def getState(self):
- self.ref.callRemote("getState").addCallback(self.gotState)
- def gotState(self, res):
- state, ETA, builds = res
- # state is one of: offline, idle, waiting, interlocked, building
- # TODO: ETA is going away, you have to look inside the builds to get
- # that value
- currentmap = {"offline": "red",
- "idle": "white",
- "waiting": "yellow",
- "interlocked": "yellow",
- "building": "yellow",}
- text = state
- self.current.setColor(currentmap[state])
- if ETA is not None:
- text += "\nETA=%s secs" % ETA
- self.current.setText(state)
-
- def buildStarted(self, build):
- print "[%s] buildStarted" % (self.name,)
- self.current.setColor("yellow")
-
- def buildFinished(self, build, results):
- print "[%s] buildFinished: %s" % (self.name, results)
- self.gotLastBuild(build)
- self.current.setColor("white")
- self.current.stopTimer()
-
- def buildETAUpdate(self, eta):
- print "[%s] buildETAUpdate: %s" % (self.name, eta)
- self.current.setETA(eta)
-
-
- def stepStarted(self, stepname, step):
- print "[%s] stepStarted: %s" % (self.name, stepname)
- self.step.setText(stepname)
- self.step.setColor("yellow")
- def stepFinished(self, stepname, step, results):
- print "[%s] stepFinished: %s %s" % (self.name, stepname, results)
- self.step.setText("idle")
- self.step.setColor("white")
- self.step.stopTimer()
- def stepETAUpdate(self, stepname, eta):
- print "[%s] stepETAUpdate: %s %s" % (self.name, stepname, eta)
- self.step.setETA(eta)
-
-
-class ThreeRowClient(pb.Referenceable):
- def __init__(self, window):
- self.window = window
- self.buildernames = []
- self.builders = {}
-
- def connected(self, ref):
- print "connected"
- self.ref = ref
- self.pane = gtk.VBox(False, 2)
- self.table = gtk.Table(1+3, 1)
- self.pane.add(self.table)
- self.window.vb.add(self.pane)
- self.pane.show_all()
- ref.callRemote("subscribe", "logs", 5, self)
-
- def removeTable(self):
- for child in self.table.get_children():
- self.table.remove(child)
- self.pane.remove(self.table)
-
- def makeTable(self):
- columns = len(self.builders)
- self.table = gtk.Table(2, columns)
- self.pane.add(self.table)
- for i in range(len(self.buildernames)):
- name = self.buildernames[i]
- b = self.builders[name]
- last,current,step = b.getBoxes()
- self.table.attach(gtk.Label(name), i, i+1, 0, 1)
- self.table.attach(last, i, i+1, 1, 2,
- xpadding=1, ypadding=1)
- self.table.attach(current, i, i+1, 2, 3,
- xpadding=1, ypadding=1)
- self.table.attach(step, i, i+1, 3, 4,
- xpadding=1, ypadding=1)
- self.table.show_all()
-
- def rebuildTable(self):
- self.removeTable()
- self.makeTable()
-
- def remote_builderAdded(self, buildername, builder):
- print "builderAdded", buildername
- assert buildername not in self.buildernames
- self.buildernames.append(buildername)
-
- b = ThreeRowBuilder(buildername, builder)
- self.builders[buildername] = b
- self.rebuildTable()
- b.getLastBuild()
- b.getState()
-
- def remote_builderRemoved(self, buildername):
- del self.builders[buildername]
- self.buildernames.remove(buildername)
- self.rebuildTable()
-
- def remote_builderChangedState(self, name, state, eta):
- self.builders[name].gotState((state, eta, None))
- def remote_buildStarted(self, name, build):
- self.builders[name].buildStarted(build)
- def remote_buildFinished(self, name, build, results):
- self.builders[name].buildFinished(build, results)
-
- def remote_buildETAUpdate(self, name, build, eta):
- self.builders[name].buildETAUpdate(eta)
- def remote_stepStarted(self, name, build, stepname, step):
- self.builders[name].stepStarted(stepname, step)
- def remote_stepFinished(self, name, build, stepname, step, results):
- self.builders[name].stepFinished(stepname, step, results)
-
- def remote_stepETAUpdate(self, name, build, stepname, step,
- eta, expectations):
- # expectations is a list of (metricname, current_value,
- # expected_value) tuples, so that we could show individual progress
- # meters for each metric
- self.builders[name].stepETAUpdate(stepname, eta)
-
- def remote_logStarted(self, buildername, build, stepname, step,
- logname, log):
- pass
-
- def remote_logFinished(self, buildername, build, stepname, step,
- logname, log):
- pass
-
-
-class GtkClient(TextClient):
- ClientClass = ThreeRowClient
-
- def __init__(self, master):
- self.master = master
-
- w = gtk.Window()
- self.w = w
- #w.set_size_request(64,64)
- w.connect('destroy', lambda win: gtk.main_quit())
- self.vb = gtk.VBox(False, 2)
- self.status = gtk.Label("unconnected")
- self.vb.add(self.status)
- self.listener = self.ClientClass(self)
- w.add(self.vb)
- w.show_all()
-
- def connected(self, ref):
- self.status.set_text("connected")
- TextClient.connected(self, ref)
-
-"""
- def addBuilder(self, name, builder):
- Client.addBuilder(self, name, builder)
- self.pane.addBuilder(builder)
- def removeBuilder(self, name):
- self.pane.removeBuilder(name, self.builders[name])
- Client.removeBuilder(self, name)
-
- def startConnecting(self, master):
- self.master = master
- Client.startConnecting(self, master)
- self.status.set_text("connecting to %s.." % master)
- def connected(self, remote):
- Client.connected(self, remote)
- self.status.set_text(self.master)
- remote.notifyOnDisconnect(self.disconnected)
- def disconnected(self, remote):
- self.status.set_text("disconnected, will retry")
-"""
-
-def main():
- master = "localhost:8007"
- if len(sys.argv) > 1:
- master = sys.argv[1]
- c = GtkClient(master)
- c.run()
-
-if __name__ == '__main__':
- main()
-
diff --git a/buildbot/buildbot/clients/sendchange.py b/buildbot/buildbot/clients/sendchange.py
deleted file mode 100644
index 0ea4ba6..0000000
--- a/buildbot/buildbot/clients/sendchange.py
+++ /dev/null
@@ -1,48 +0,0 @@
-
-from twisted.spread import pb
-from twisted.cred import credentials
-from twisted.internet import reactor
-
-class Sender:
- def __init__(self, master, user=None):
- self.user = user
- self.host, self.port = master.split(":")
- self.port = int(self.port)
- self.num_changes = 0
-
- def send(self, branch, revision, comments, files, user=None, category=None):
- if user is None:
- user = self.user
- change = {'who': user, 'files': files, 'comments': comments,
- 'branch': branch, 'revision': revision, 'category': category}
- self.num_changes += 1
-
- f = pb.PBClientFactory()
- d = f.login(credentials.UsernamePassword("change", "changepw"))
- reactor.connectTCP(self.host, self.port, f)
- d.addCallback(self.addChange, change)
- return d
-
- def addChange(self, remote, change):
- d = remote.callRemote('addChange', change)
- d.addCallback(lambda res: remote.broker.transport.loseConnection())
- return d
-
- def printSuccess(self, res):
- if self.num_changes > 1:
- print "%d changes sent successfully" % self.num_changes
- elif self.num_changes == 1:
- print "change sent successfully"
- else:
- print "no changes to send"
-
- def printFailure(self, why):
- print "change(s) NOT sent, something went wrong:"
- print why
-
- def stop(self, res):
- reactor.stop()
- return res
-
- def run(self):
- reactor.run()
diff --git a/buildbot/buildbot/dnotify.py b/buildbot/buildbot/dnotify.py
deleted file mode 100644
index d23d600..0000000
--- a/buildbot/buildbot/dnotify.py
+++ /dev/null
@@ -1,102 +0,0 @@
-
-# spiv wants this
-
-import fcntl, signal
-
-class DNotify_Handler:
- def __init__(self):
- self.watchers = {}
- self.installed = 0
- def install(self):
- if self.installed:
- return
- signal.signal(signal.SIGIO, self.fire)
- self.installed = 1
- def uninstall(self):
- if not self.installed:
- return
- signal.signal(signal.SIGIO, signal.SIG_DFL)
- self.installed = 0
- def add(self, watcher):
- self.watchers[watcher.fd.fileno()] = watcher
- self.install()
- def remove(self, watcher):
- if self.watchers.has_key(watcher.fd.fileno()):
- del(self.watchers[watcher.fd.fileno()])
- if not self.watchers:
- self.uninstall()
- def fire(self, signum, frame):
- # this is the signal handler
- # without siginfo_t, we must fire them all
- for watcher in self.watchers.values():
- watcher.callback()
-
-class DNotify:
- DN_ACCESS = fcntl.DN_ACCESS # a file in the directory was read
- DN_MODIFY = fcntl.DN_MODIFY # a file was modified (write,truncate)
- DN_CREATE = fcntl.DN_CREATE # a file was created
- DN_DELETE = fcntl.DN_DELETE # a file was unlinked
- DN_RENAME = fcntl.DN_RENAME # a file was renamed
- DN_ATTRIB = fcntl.DN_ATTRIB # a file had attributes changed (chmod,chown)
-
- handler = [None]
-
- def __init__(self, dirname, callback=None,
- flags=[DN_MODIFY,DN_CREATE,DN_DELETE,DN_RENAME]):
-
- """This object watches a directory for changes. The .callback
- attribute should be set to a function to be run every time something
- happens to it. Be aware that it will be called more times than you
- expect."""
-
- if callback:
- self.callback = callback
- else:
- self.callback = self.fire
- self.dirname = dirname
- self.flags = reduce(lambda x, y: x | y, flags) | fcntl.DN_MULTISHOT
- self.fd = open(dirname, "r")
- # ideally we would move the notification to something like SIGRTMIN,
- # (to free up SIGIO) and use sigaction to have the signal handler
- # receive a structure with the fd number. But python doesn't offer
- # either.
- if not self.handler[0]:
- self.handler[0] = DNotify_Handler()
- self.handler[0].add(self)
- fcntl.fcntl(self.fd, fcntl.F_NOTIFY, self.flags)
- def remove(self):
- self.handler[0].remove(self)
- self.fd.close()
- def fire(self):
- print self.dirname, "changed!"
-
-def test_dnotify1():
- d = DNotify(".")
- while 1:
- signal.pause()
-
-def test_dnotify2():
- # create ./foo/, create/delete files in ./ and ./foo/ while this is
- # running. Notice how both notifiers are fired when anything changes;
- # this is an unfortunate side-effect of the lack of extended sigaction
- # support in Python.
- count = [0]
- d1 = DNotify(".")
- def fire1(count=count, d1=d1):
- print "./ changed!", count[0]
- count[0] += 1
- if count[0] > 5:
- d1.remove()
- del(d1)
- # change the callback, since we can't define it until after we have the
- # dnotify object. Hmm, unless we give the dnotify to the callback.
- d1.callback = fire1
- def fire2(): print "foo/ changed!"
- d2 = DNotify("foo", fire2)
- while 1:
- signal.pause()
-
-
-if __name__ == '__main__':
- test_dnotify2()
-
diff --git a/buildbot/buildbot/ec2buildslave.py b/buildbot/buildbot/ec2buildslave.py
deleted file mode 100644
index 6a1f42d..0000000
--- a/buildbot/buildbot/ec2buildslave.py
+++ /dev/null
@@ -1,283 +0,0 @@
-"""A LatentSlave that uses EC2 to instantiate the slaves on demand.
-
-Tested with Python boto 1.5c
-"""
-
-# Portions copyright Canonical Ltd. 2009
-
-import cStringIO
-import os
-import re
-import time
-import urllib
-
-import boto
-import boto.exception
-from twisted.internet import defer, threads
-from twisted.python import log
-
-from buildbot.buildslave import AbstractLatentBuildSlave
-from buildbot import interfaces
-
-PENDING = 'pending'
-RUNNING = 'running'
-SHUTTINGDOWN = 'shutting-down'
-TERMINATED = 'terminated'
-
-class EC2LatentBuildSlave(AbstractLatentBuildSlave):
-
- instance = image = None
- _poll_resolution = 5 # hook point for tests
-
- def __init__(self, name, password, instance_type, ami=None,
- valid_ami_owners=None, valid_ami_location_regex=None,
- elastic_ip=None, identifier=None, secret_identifier=None,
- aws_id_file_path=None,
- keypair_name='latent_buildbot_slave',
- security_name='latent_buildbot_slave',
- max_builds=None, notify_on_missing=[], missing_timeout=60*20,
- build_wait_timeout=60*10, properties={}):
- AbstractLatentBuildSlave.__init__(
- self, name, password, max_builds, notify_on_missing,
- missing_timeout, build_wait_timeout, properties)
- if not ((ami is not None) ^
- (valid_ami_owners is not None or
- valid_ami_location_regex is not None)):
- raise ValueError(
- 'You must provide either a specific ami, or one or both of '
- 'valid_ami_location_regex and valid_ami_owners')
- self.ami = ami
- if valid_ami_owners is not None:
- if isinstance(valid_ami_owners, (int, long)):
- valid_ami_owners = (valid_ami_owners,)
- else:
- for element in valid_ami_owners:
- if not isinstance(element, (int, long)):
- raise ValueError(
- 'valid_ami_owners should be int or iterable '
- 'of ints', element)
- if valid_ami_location_regex is not None:
- if not isinstance(valid_ami_location_regex, basestring):
- raise ValueError(
- 'valid_ami_location_regex should be a string')
- else:
- # verify that regex will compile
- re.compile(valid_ami_location_regex)
- self.valid_ami_owners = valid_ami_owners
- self.valid_ami_location_regex = valid_ami_location_regex
- self.instance_type = instance_type
- self.keypair_name = keypair_name
- self.security_name = security_name
- if identifier is None:
- assert secret_identifier is None, (
- 'supply both or neither of identifier, secret_identifier')
- if aws_id_file_path is None:
- home = os.environ['HOME']
- aws_id_file_path = os.path.join(home, '.ec2', 'aws_id')
- if not os.path.exists(aws_id_file_path):
- raise ValueError(
- "Please supply your AWS access key identifier and secret "
- "access key identifier either when instantiating this %s "
- "or in the %s file (on two lines).\n" %
- (self.__class__.__name__, aws_id_file_path))
- aws_file = open(aws_id_file_path, 'r')
- try:
- identifier = aws_file.readline().strip()
- secret_identifier = aws_file.readline().strip()
- finally:
- aws_file.close()
- else:
- assert (aws_id_file_path is None,
- 'if you supply the identifier and secret_identifier, '
- 'do not specify the aws_id_file_path')
- assert (secret_identifier is not None,
- 'supply both or neither of identifier, secret_identifier')
- # Make the EC2 connection.
- self.conn = boto.connect_ec2(identifier, secret_identifier)
-
- # Make a keypair
- #
- # We currently discard the keypair data because we don't need it.
- # If we do need it in the future, we will always recreate the keypairs
- # because there is no way to
- # programmatically retrieve the private key component, unless we
- # generate it and store it on the filesystem, which is an unnecessary
- # usage requirement.
- try:
- key_pair = self.conn.get_all_key_pairs(keypair_name)[0]
- # key_pair.delete() # would be used to recreate
- except boto.exception.EC2ResponseError, e:
- if e.code != 'InvalidKeyPair.NotFound':
- if e.code == 'AuthFailure':
- print ('POSSIBLE CAUSES OF ERROR:\n'
- ' Did you sign up for EC2?\n'
- ' Did you put a credit card number in your AWS '
- 'account?\n'
- 'Please doublecheck before reporting a problem.\n')
- raise
- # make one; we would always do this, and stash the result, if we
- # needed the key (for instance, to SSH to the box). We'd then
- # use paramiko to use the key to connect.
- self.conn.create_key_pair(keypair_name)
-
- # create security group
- try:
- group = self.conn.get_all_security_groups(security_name)[0]
- except boto.exception.EC2ResponseError, e:
- if e.code == 'InvalidGroup.NotFound':
- self.security_group = self.conn.create_security_group(
- security_name,
- 'Authorization to access the buildbot instance.')
- # Authorize the master as necessary
- # TODO this is where we'd open the hole to do the reverse pb
- # connect to the buildbot
- # ip = urllib.urlopen(
- # 'http://checkip.amazonaws.com').read().strip()
- # self.security_group.authorize('tcp', 22, 22, '%s/32' % ip)
- # self.security_group.authorize('tcp', 80, 80, '%s/32' % ip)
- else:
- raise
-
- # get the image
- if self.ami is not None:
- self.image = self.conn.get_image(self.ami)
- else:
- # verify we have access to at least one acceptable image
- discard = self.get_image()
-
- # get the specified elastic IP, if any
- if elastic_ip is not None:
- elastic_ip = self.conn.get_all_addresses([elastic_ip])[0]
- self.elastic_ip = elastic_ip
-
- def get_image(self):
- if self.image is not None:
- return self.image
- if self.valid_ami_location_regex:
- level = 0
- options = []
- get_match = re.compile(self.valid_ami_location_regex).match
- for image in self.conn.get_all_images(
- owners=self.valid_ami_owners):
- # gather sorting data
- match = get_match(image.location)
- if match:
- alpha_sort = int_sort = None
- if level < 2:
- try:
- alpha_sort = match.group(1)
- except IndexError:
- level = 2
- else:
- if level == 0:
- try:
- int_sort = int(alpha_sort)
- except ValueError:
- level = 1
- options.append([int_sort, alpha_sort,
- image.location, image.id, image])
- if level:
- log.msg('sorting images at level %d' % level)
- options = [candidate[level:] for candidate in options]
- else:
- options = [(image.location, image.id, image) for image
- in self.conn.get_all_images(
- owners=self.valid_ami_owners)]
- options.sort()
- log.msg('sorted images (last is chosen): %s' %
- (', '.join(
- '%s (%s)' % (candidate[-1].id, candidate[-1].location)
- for candidate in options)))
- if not options:
- raise ValueError('no available images match constraints')
- return options[-1][-1]
-
- @property
- def dns(self):
- if self.instance is None:
- return None
- return self.instance.public_dns_name
-
- def start_instance(self):
- if self.instance is not None:
- raise ValueError('instance active')
- return threads.deferToThread(self._start_instance)
-
- def _start_instance(self):
- image = self.get_image()
- reservation = image.run(
- key_name=self.keypair_name, security_groups=[self.security_name],
- instance_type=self.instance_type)
- self.instance = reservation.instances[0]
- log.msg('%s %s starting instance %s' %
- (self.__class__.__name__, self.slavename, self.instance.id))
- duration = 0
- interval = self._poll_resolution
- while self.instance.state == PENDING:
- time.sleep(interval)
- duration += interval
- if duration % 60 == 0:
- log.msg('%s %s has waited %d minutes for instance %s' %
- (self.__class__.__name__, self.slavename, duration//60,
- self.instance.id))
- self.instance.update()
- if self.instance.state == RUNNING:
- self.output = self.instance.get_console_output()
- minutes = duration//60
- seconds = duration%60
- log.msg('%s %s instance %s started on %s '
- 'in about %d minutes %d seconds (%s)' %
- (self.__class__.__name__, self.slavename,
- self.instance.id, self.dns, minutes, seconds,
- self.output.output))
- if self.elastic_ip is not None:
- self.instance.use_ip(self.elastic_ip)
- return [self.instance.id,
- image.id,
- '%02d:%02d:%02d' % (minutes//60, minutes%60, seconds)]
- else:
- log.msg('%s %s failed to start instance %s (%s)' %
- (self.__class__.__name__, self.slavename,
- self.instance.id, self.instance.state))
- raise interfaces.LatentBuildSlaveFailedToSubstantiate(
- self.instance.id, self.instance.state)
-
- def stop_instance(self, fast=False):
- if self.instance is None:
- # be gentle. Something may just be trying to alert us that an
- # instance never attached, and it's because, somehow, we never
- # started.
- return defer.succeed(None)
- instance = self.instance
- self.output = self.instance = None
- return threads.deferToThread(
- self._stop_instance, instance, fast)
-
- def _stop_instance(self, instance, fast):
- if self.elastic_ip is not None:
- self.conn.disassociate_address(self.elastic_ip.public_ip)
- instance.update()
- if instance.state not in (SHUTTINGDOWN, TERMINATED):
- instance.stop()
- log.msg('%s %s terminating instance %s' %
- (self.__class__.__name__, self.slavename, instance.id))
- duration = 0
- interval = self._poll_resolution
- if fast:
- goal = (SHUTTINGDOWN, TERMINATED)
- instance.update()
- else:
- goal = (TERMINATED,)
- while instance.state not in goal:
- time.sleep(interval)
- duration += interval
- if duration % 60 == 0:
- log.msg(
- '%s %s has waited %d minutes for instance %s to end' %
- (self.__class__.__name__, self.slavename, duration//60,
- instance.id))
- instance.update()
- log.msg('%s %s instance %s %s '
- 'after about %d minutes %d seconds' %
- (self.__class__.__name__, self.slavename,
- instance.id, goal, duration//60, duration%60))
diff --git a/buildbot/buildbot/interfaces.py b/buildbot/buildbot/interfaces.py
deleted file mode 100644
index e510d05..0000000
--- a/buildbot/buildbot/interfaces.py
+++ /dev/null
@@ -1,1123 +0,0 @@
-
-"""Interface documentation.
-
-Define the interfaces that are implemented by various buildbot classes.
-"""
-
-from zope.interface import Interface, Attribute
-
-# exceptions that can be raised while trying to start a build
-class NoSlaveError(Exception):
- pass
-class BuilderInUseError(Exception):
- pass
-class BuildSlaveTooOldError(Exception):
- pass
-class LatentBuildSlaveFailedToSubstantiate(Exception):
- pass
-
-# other exceptions
-class BuildbotNotRunningError(Exception):
- pass
-
-class IChangeSource(Interface):
- """Object which feeds Change objects to the changemaster. When files or
- directories are changed and the version control system provides some
- kind of notification, this object should turn it into a Change object
- and pass it through::
-
- self.changemaster.addChange(change)
- """
-
- def start():
- """Called when the buildmaster starts. Can be used to establish
- connections to VC daemons or begin polling."""
-
- def stop():
- """Called when the buildmaster shuts down. Connections should be
- terminated, polling timers should be canceled."""
-
- def describe():
- """Should return a string which briefly describes this source. This
- string will be displayed in an HTML status page."""
-
-class IScheduler(Interface):
- """I watch for Changes in the source tree and decide when to trigger
- Builds. I create BuildSet objects and submit them to the BuildMaster. I
- am a service, and the BuildMaster is always my parent.
-
- @ivar properties: properties to be applied to all builds started by this
- scheduler
- @type properties: L<buildbot.process.properties.Properties>
- """
-
- def addChange(change):
- """A Change has just been dispatched by one of the ChangeSources.
- Each Scheduler will receive this Change. I may decide to start a
- build as a result, or I might choose to ignore it."""
-
- def listBuilderNames():
- """Return a list of strings indicating the Builders that this
- Scheduler might feed."""
-
- def getPendingBuildTimes():
- """Return a list of timestamps for any builds that are waiting in the
- tree-stable-timer queue. This is only relevant for Change-based
- schedulers, all others can just return an empty list."""
- # TODO: it might be nice to make this into getPendingBuildSets, which
- # would let someone subscribe to the buildset being finished.
- # However, the Scheduler doesn't actually create the buildset until
- # it gets submitted, so doing this would require some major rework.
-
-class IUpstreamScheduler(Interface):
- """This marks an IScheduler as being eligible for use as the 'upstream='
- argument to a buildbot.scheduler.Dependent instance."""
-
- def subscribeToSuccessfulBuilds(target):
- """Request that the target callbable be invoked after every
- successful buildset. The target will be called with a single
- argument: the SourceStamp used by the successful builds."""
-
- def listBuilderNames():
- """Return a list of strings indicating the Builders that this
- Scheduler might feed."""
-
-class IDownstreamScheduler(Interface):
- """This marks an IScheduler to be listening to other schedulers.
- On reconfigs, these might get notified to check if their upstream
- scheduler are stil the same."""
-
- def checkUpstreamScheduler():
- """Check if the upstream scheduler is still alive, and if not,
- get a new upstream object from the master."""
-
-
-class ISourceStamp(Interface):
- """
- @cvar branch: branch from which source was drawn
- @type branch: string or None
-
- @cvar revision: revision of the source, or None to use CHANGES
- @type revision: varies depending on VC
-
- @cvar patch: patch applied to the source, or None if no patch
- @type patch: None or tuple (level diff)
-
- @cvar changes: the source step should check out hte latest revision
- in the given changes
- @type changes: tuple of L{buildbot.changes.changes.Change} instances,
- all of which are on the same branch
- """
-
- def canBeMergedWith(self, other):
- """
- Can this SourceStamp be merged with OTHER?
- """
-
- def mergeWith(self, others):
- """Generate a SourceStamp for the merger of me and all the other
- BuildRequests. This is called by a Build when it starts, to figure
- out what its sourceStamp should be."""
-
- def getAbsoluteSourceStamp(self, got_revision):
- """Get a new SourceStamp object reflecting the actual revision found
- by a Source step."""
-
- def getText(self):
- """Returns a list of strings to describe the stamp. These are
- intended to be displayed in a narrow column. If more space is
- available, the caller should join them together with spaces before
- presenting them to the user."""
-
-class IEmailSender(Interface):
- """I know how to send email, and can be used by other parts of the
- Buildbot to contact developers."""
- pass
-
-class IEmailLookup(Interface):
- def getAddress(user):
- """Turn a User-name string into a valid email address. Either return
- a string (with an @ in it), None (to indicate that the user cannot
- be reached by email), or a Deferred which will fire with the same."""
-
-class IStatus(Interface):
- """I am an object, obtainable from the buildmaster, which can provide
- status information."""
-
- def getProjectName():
- """Return the name of the project that this Buildbot is working
- for."""
- def getProjectURL():
- """Return the URL of this Buildbot's project."""
- def getBuildbotURL():
- """Return the URL of the top-most Buildbot status page, or None if
- this Buildbot does not provide a web status page."""
- def getURLForThing(thing):
- """Return the URL of a page which provides information on 'thing',
- which should be an object that implements one of the status
- interfaces defined in L{buildbot.interfaces}. Returns None if no
- suitable page is available (or if no Waterfall is running)."""
-
- def getChangeSources():
- """Return a list of IChangeSource objects."""
-
- def getChange(number):
- """Return an IChange object."""
-
- def getSchedulers():
- """Return a list of ISchedulerStatus objects for all
- currently-registered Schedulers."""
-
- def getBuilderNames(categories=None):
- """Return a list of the names of all current Builders."""
- def getBuilder(name):
- """Return the IBuilderStatus object for a given named Builder. Raises
- KeyError if there is no Builder by that name."""
-
- def getSlaveNames():
- """Return a list of buildslave names, suitable for passing to
- getSlave()."""
- def getSlave(name):
- """Return the ISlaveStatus object for a given named buildslave."""
-
- def getBuildSets():
- """Return a list of active (non-finished) IBuildSetStatus objects."""
-
- def generateFinishedBuilds(builders=[], branches=[],
- num_builds=None, finished_before=None,
- max_search=200):
- """Return a generator that will produce IBuildStatus objects each
- time you invoke its .next() method, starting with the most recent
- finished build and working backwards.
-
- @param builders: this is a list of Builder names, and the generator
- will only produce builds that ran on the given
- Builders. If the list is empty, produce builds from
- all Builders.
-
- @param branches: this is a list of branch names, and the generator
- will only produce builds that used the given
- branches. If the list is empty, produce builds from
- all branches.
-
- @param num_builds: the generator will stop after providing this many
- builds. The default of None means to produce as
- many builds as possible.
-
- @type finished_before: int: a timestamp, seconds since the epoch
- @param finished_before: if provided, do not produce any builds that
- finished after the given timestamp.
-
- @type max_search: int
- @param max_search: this method may have to examine a lot of builds
- to find some that match the search parameters,
- especially if there aren't any matching builds.
- This argument imposes a hard limit on the number
- of builds that will be examined within any given
- Builder.
- """
-
- def subscribe(receiver):
- """Register an IStatusReceiver to receive new status events. The
- receiver will immediately be sent a set of 'builderAdded' messages
- for all current builders. It will receive further 'builderAdded' and
- 'builderRemoved' messages as the config file is reloaded and builders
- come and go. It will also receive 'buildsetSubmitted' messages for
- all outstanding BuildSets (and each new BuildSet that gets
- submitted). No additional messages will be sent unless the receiver
- asks for them by calling .subscribe on the IBuilderStatus objects
- which accompany the addedBuilder message."""
-
- def unsubscribe(receiver):
- """Unregister an IStatusReceiver. No further status messgaes will be
- delivered."""
-
-class IBuildSetStatus(Interface):
- """I represent a set of Builds, each run on a separate Builder but all
- using the same source tree."""
-
- def getSourceStamp():
- """Return a SourceStamp object which can be used to re-create
- the source tree that this build used.
-
- This method will return None if the source information is no longer
- available."""
- pass
- def getReason():
- pass
- def getID():
- """Return the BuildSet's ID string, if any. The 'try' feature uses a
- random string as a BuildSetID to relate submitted jobs with the
- resulting BuildSet."""
- def getResponsibleUsers():
- pass # not implemented
- def getInterestedUsers():
- pass # not implemented
- def getBuilderNames():
- """Return a list of the names of all Builders on which this set will
- do builds."""
- def getBuildRequests():
- """Return a list of IBuildRequestStatus objects that represent my
- component Builds. This list might correspond to the Builders named by
- getBuilderNames(), but if builder categories are used, or 'Builder
- Aliases' are implemented, then they may not."""
- def isFinished():
- pass
- def waitUntilSuccess():
- """Return a Deferred that fires (with this IBuildSetStatus object)
- when the outcome of the BuildSet is known, i.e., upon the first
- failure, or after all builds complete successfully."""
- def waitUntilFinished():
- """Return a Deferred that fires (with this IBuildSetStatus object)
- when all builds have finished."""
- def getResults():
- pass
-
-class IBuildRequestStatus(Interface):
- """I represent a request to build a particular set of source code on a
- particular Builder. These requests may be merged by the time they are
- finally turned into a Build."""
-
- def getSourceStamp():
- """Return a SourceStamp object which can be used to re-create
- the source tree that this build used. This method will
- return an absolute SourceStamp if possible, and its results
- may change as the build progresses. Specifically, a "HEAD"
- build may later be more accurately specified by an absolute
- SourceStamp with the specific revision information.
-
- This method will return None if the source information is no longer
- available."""
- pass
- def getBuilderName():
- pass
- def getBuilds():
- """Return a list of IBuildStatus objects for each Build that has been
- started in an attempt to satify this BuildRequest."""
-
- def subscribe(observer):
- """Register a callable that will be invoked (with a single
- IBuildStatus object) for each Build that is created to satisfy this
- request. There may be multiple Builds created in an attempt to handle
- the request: they may be interrupted by the user or abandoned due to
- a lost slave. The last Build (the one which actually gets to run to
- completion) is said to 'satisfy' the BuildRequest. The observer will
- be called once for each of these Builds, both old and new."""
- def unsubscribe(observer):
- """Unregister the callable that was registered with subscribe()."""
- def getSubmitTime():
- """Return the time when this request was submitted"""
- def setSubmitTime(t):
- """Sets the time when this request was submitted"""
-
-
-class ISlaveStatus(Interface):
- def getName():
- """Return the name of the build slave."""
-
- def getAdmin():
- """Return a string with the slave admin's contact data."""
-
- def getHost():
- """Return a string with the slave host info."""
-
- def isConnected():
- """Return True if the slave is currently online, False if not."""
-
- def lastMessageReceived():
- """Return a timestamp (seconds since epoch) indicating when the most
- recent message was received from the buildslave."""
-
-class ISchedulerStatus(Interface):
- def getName():
- """Return the name of this Scheduler (a string)."""
-
- def getPendingBuildsets():
- """Return an IBuildSet for all BuildSets that are pending. These
- BuildSets are waiting for their tree-stable-timers to expire."""
- # TODO: this is not implemented anywhere
-
-
-class IBuilderStatus(Interface):
- def getName():
- """Return the name of this Builder (a string)."""
-
- def getState():
- # TODO: this isn't nearly as meaningful as it used to be
- """Return a tuple (state, builds) for this Builder. 'state' is the
- so-called 'big-status', indicating overall status (as opposed to
- which step is currently running). It is a string, one of 'offline',
- 'idle', or 'building'. 'builds' is a list of IBuildStatus objects
- (possibly empty) representing the currently active builds."""
-
- def getSlaves():
- """Return a list of ISlaveStatus objects for the buildslaves that are
- used by this builder."""
-
- def getPendingBuilds():
- """Return an IBuildRequestStatus object for all upcoming builds
- (those which are ready to go but which are waiting for a buildslave
- to be available."""
-
- def getCurrentBuilds():
- """Return a list containing an IBuildStatus object for each build
- currently in progress."""
- # again, we could probably provide an object for 'waiting' and
- # 'interlocked' too, but things like the Change list might still be
- # subject to change
-
- def getLastFinishedBuild():
- """Return the IBuildStatus object representing the last finished
- build, which may be None if the builder has not yet finished any
- builds."""
-
- def getBuild(number):
- """Return an IBuildStatus object for a historical build. Each build
- is numbered (starting at 0 when the Builder is first added),
- getBuild(n) will retrieve the Nth such build. getBuild(-n) will
- retrieve a recent build, with -1 being the most recent build
- started. If the Builder is idle, this will be the same as
- getLastFinishedBuild(). If the Builder is active, it will be an
- unfinished build. This method will return None if the build is no
- longer available. Older builds are likely to have less information
- stored: Logs are the first to go, then Steps."""
-
- def getEvent(number):
- """Return an IStatusEvent object for a recent Event. Builders
- connecting and disconnecting are events, as are ping attempts.
- getEvent(-1) will return the most recent event. Events are numbered,
- but it probably doesn't make sense to ever do getEvent(+n)."""
-
- def generateFinishedBuilds(branches=[],
- num_builds=None,
- max_buildnum=None, finished_before=None,
- max_search=200,
- ):
- """Return a generator that will produce IBuildStatus objects each
- time you invoke its .next() method, starting with the most recent
- finished build, then the previous build, and so on back to the oldest
- build available.
-
- @param branches: this is a list of branch names, and the generator
- will only produce builds that involve the given
- branches. If the list is empty, the generator will
- produce all builds regardless of what branch they
- used.
-
- @param num_builds: if provided, the generator will stop after
- providing this many builds. The default of None
- means to produce as many builds as possible.
-
- @param max_buildnum: if provided, the generator will start by
- providing the build with this number, or the
- highest-numbered preceding build (i.e. the
- generator will not produce any build numbered
- *higher* than max_buildnum). The default of None
- means to start with the most recent finished
- build. -1 means the same as None. -2 means to
- start with the next-most-recent completed build,
- etc.
-
- @type finished_before: int: a timestamp, seconds since the epoch
- @param finished_before: if provided, do not produce any builds that
- finished after the given timestamp.
-
- @type max_search: int
- @param max_search: this method may have to examine a lot of builds
- to find some that match the search parameters,
- especially if there aren't any matching builds.
- This argument imposes a hard limit on the number
- of builds that will be examined.
- """
-
- def subscribe(receiver):
- """Register an IStatusReceiver to receive new status events. The
- receiver will be given builderChangedState, buildStarted, and
- buildFinished messages."""
-
- def unsubscribe(receiver):
- """Unregister an IStatusReceiver. No further status messgaes will be
- delivered."""
-
-class IEventSource(Interface):
- def eventGenerator(branches=[]):
- """This function creates a generator which will yield all of this
- object's status events, starting with the most recent and progressing
- backwards in time. These events provide the IStatusEvent interface.
- At the moment they are all instances of buildbot.status.builder.Event
- or buildbot.status.builder.BuildStepStatus .
-
- @param branches: a list of branch names. The generator should only
- return events that are associated with these branches. If the list is
- empty, events for all branches should be returned (i.e. an empty list
- means 'accept all' rather than 'accept none').
- """
-
-class IBuildStatus(Interface):
- """I represent the status of a single Build/BuildRequest. It could be
- in-progress or finished."""
-
- def getBuilder():
- """
- Return the BuilderStatus that owns this build.
-
- @rtype: implementor of L{IBuilderStatus}
- """
-
- def isFinished():
- """Return a boolean. True means the build has finished, False means
- it is still running."""
-
- def waitUntilFinished():
- """Return a Deferred that will fire when the build finishes. If the
- build has already finished, this deferred will fire right away. The
- callback is given this IBuildStatus instance as an argument."""
-
- def getProperty(propname):
- """Return the value of the build property with the given name. Raises
- KeyError if there is no such property on this build."""
-
- def getReason():
- """Return a string that indicates why the build was run. 'changes',
- 'forced', and 'periodic' are the most likely values. 'try' will be
- added in the future."""
-
- def getSourceStamp():
- """Return a SourceStamp object which can be used to re-create
- the source tree that this build used.
-
- This method will return None if the source information is no longer
- available."""
- # TODO: it should be possible to expire the patch but still remember
- # that the build was r123+something.
-
- def getChanges():
- """Return a list of Change objects which represent which source
- changes went into the build."""
-
- def getResponsibleUsers():
- """Return a list of Users who are to blame for the changes that went
- into this build. If anything breaks (at least anything that wasn't
- already broken), blame them. Specifically, this is the set of users
- who were responsible for the Changes that went into this build. Each
- User is a string, corresponding to their name as known by the VC
- repository."""
-
- def getInterestedUsers():
- """Return a list of Users who will want to know about the results of
- this build. This is a superset of getResponsibleUsers(): it adds
- people who are interested in this build but who did not actually
- make the Changes that went into it (build sheriffs, code-domain
- owners)."""
-
- def getNumber():
- """Within each builder, each Build has a number. Return it."""
-
- def getPreviousBuild():
- """Convenience method. Returns None if the previous build is
- unavailable."""
-
- def getSteps():
- """Return a list of IBuildStepStatus objects. For invariant builds
- (those which always use the same set of Steps), this should always
- return the complete list, however some of the steps may not have
- started yet (step.getTimes()[0] will be None). For variant builds,
- this may not be complete (asking again later may give you more of
- them)."""
-
- def getTimes():
- """Returns a tuple of (start, end). 'start' and 'end' are the times
- (seconds since the epoch) when the Build started and finished. If
- the build is still running, 'end' will be None."""
-
- # while the build is running, the following methods make sense.
- # Afterwards they return None
-
- def getETA():
- """Returns the number of seconds from now in which the build is
- expected to finish, or None if we can't make a guess. This guess will
- be refined over time."""
-
- def getCurrentStep():
- """Return an IBuildStepStatus object representing the currently
- active step."""
-
- # Once you know the build has finished, the following methods are legal.
- # Before ths build has finished, they all return None.
-
- def getSlavename():
- """Return the name of the buildslave which handled this build."""
-
- def getText():
- """Returns a list of strings to describe the build. These are
- intended to be displayed in a narrow column. If more space is
- available, the caller should join them together with spaces before
- presenting them to the user."""
-
- def getResults():
- """Return a constant describing the results of the build: one of the
- constants in buildbot.status.builder: SUCCESS, WARNINGS, or
- FAILURE."""
-
- def getLogs():
- """Return a list of logs that describe the build as a whole. Some
- steps will contribute their logs, while others are are less important
- and will only be accessible through the IBuildStepStatus objects.
- Each log is an object which implements the IStatusLog interface."""
-
- def getTestResults():
- """Return a dictionary that maps test-name tuples to ITestResult
- objects. This may return an empty or partially-filled dictionary
- until the build has completed."""
-
- # subscription interface
-
- def subscribe(receiver, updateInterval=None):
- """Register an IStatusReceiver to receive new status events. The
- receiver will be given stepStarted and stepFinished messages. If
- 'updateInterval' is non-None, buildETAUpdate messages will be sent
- every 'updateInterval' seconds."""
-
- def unsubscribe(receiver):
- """Unregister an IStatusReceiver. No further status messgaes will be
- delivered."""
-
-class ITestResult(Interface):
- """I describe the results of a single unit test."""
-
- def getName():
- """Returns a tuple of strings which make up the test name. Tests may
- be arranged in a hierarchy, so looking for common prefixes may be
- useful."""
-
- def getResults():
- """Returns a constant describing the results of the test: SUCCESS,
- WARNINGS, FAILURE."""
-
- def getText():
- """Returns a list of short strings which describe the results of the
- test in slightly more detail. Suggested components include
- 'failure', 'error', 'passed', 'timeout'."""
-
- def getLogs():
- # in flux, it may be possible to provide more structured information
- # like python Failure instances
- """Returns a dictionary of test logs. The keys are strings like
- 'stdout', 'log', 'exceptions'. The values are strings."""
-
-
-class IBuildStepStatus(Interface):
- """I hold status for a single BuildStep."""
-
- def getName():
- """Returns a short string with the name of this step. This string
- may have spaces in it."""
-
- def getBuild():
- """Returns the IBuildStatus object which contains this step."""
-
- def getTimes():
- """Returns a tuple of (start, end). 'start' and 'end' are the times
- (seconds since the epoch) when the Step started and finished. If the
- step has not yet started, 'start' will be None. If the step is still
- running, 'end' will be None."""
-
- def getExpectations():
- """Returns a list of tuples (name, current, target). Each tuple
- describes a single axis along which the step's progress can be
- measured. 'name' is a string which describes the axis itself, like
- 'filesCompiled' or 'tests run' or 'bytes of output'. 'current' is a
- number with the progress made so far, while 'target' is the value
- that we expect (based upon past experience) to get to when the build
- is finished.
-
- 'current' will change over time until the step is finished. It is
- 'None' until the step starts. When the build is finished, 'current'
- may or may not equal 'target' (which is merely the expectation based
- upon previous builds)."""
-
- def getURLs():
- """Returns a dictionary of URLs. Each key is a link name (a short
- string, like 'results' or 'coverage'), and each value is a URL. These
- links will be displayed along with the LogFiles.
- """
-
- def getLogs():
- """Returns a list of IStatusLog objects. If the step has not yet
- finished, this list may be incomplete (asking again later may give
- you more of them)."""
-
-
- def isFinished():
- """Return a boolean. True means the step has finished, False means it
- is still running."""
-
- def waitUntilFinished():
- """Return a Deferred that will fire when the step finishes. If the
- step has already finished, this deferred will fire right away. The
- callback is given this IBuildStepStatus instance as an argument."""
-
- # while the step is running, the following methods make sense.
- # Afterwards they return None
-
- def getETA():
- """Returns the number of seconds from now in which the step is
- expected to finish, or None if we can't make a guess. This guess will
- be refined over time."""
-
- # Once you know the step has finished, the following methods are legal.
- # Before ths step has finished, they all return None.
-
- def getText():
- """Returns a list of strings which describe the step. These are
- intended to be displayed in a narrow column. If more space is
- available, the caller should join them together with spaces before
- presenting them to the user."""
-
- def getResults():
- """Return a tuple describing the results of the step: (result,
- strings). 'result' is one of the constants in
- buildbot.status.builder: SUCCESS, WARNINGS, FAILURE, or SKIPPED.
- 'strings' is an optional list of strings that the step wants to
- append to the overall build's results. These strings are usually
- more terse than the ones returned by getText(): in particular,
- successful Steps do not usually contribute any text to the overall
- build."""
-
- # subscription interface
-
- def subscribe(receiver, updateInterval=10):
- """Register an IStatusReceiver to receive new status events. The
- receiver will be given logStarted and logFinished messages. It will
- also be given a ETAUpdate message every 'updateInterval' seconds."""
-
- def unsubscribe(receiver):
- """Unregister an IStatusReceiver. No further status messgaes will be
- delivered."""
-
-class IStatusEvent(Interface):
- """I represent a Builder Event, something non-Build related that can
- happen to a Builder."""
-
- def getTimes():
- """Returns a tuple of (start, end) like IBuildStepStatus, but end==0
- indicates that this is a 'point event', which has no duration.
- SlaveConnect/Disconnect are point events. Ping is not: it starts
- when requested and ends when the response (positive or negative) is
- returned"""
-
- def getText():
- """Returns a list of strings which describe the event. These are
- intended to be displayed in a narrow column. If more space is
- available, the caller should join them together with spaces before
- presenting them to the user."""
-
-
-LOG_CHANNEL_STDOUT = 0
-LOG_CHANNEL_STDERR = 1
-LOG_CHANNEL_HEADER = 2
-
-class IStatusLog(Interface):
- """I represent a single Log, which is a growing list of text items that
- contains some kind of output for a single BuildStep. I might be finished,
- in which case this list has stopped growing.
-
- Each Log has a name, usually something boring like 'log' or 'output'.
- These names are not guaranteed to be unique, however they are usually
- chosen to be useful within the scope of a single step (i.e. the Compile
- step might produce both 'log' and 'warnings'). The name may also have
- spaces. If you want something more globally meaningful, at least within a
- given Build, try::
-
- '%s.%s' % (log.getStep.getName(), log.getName())
-
- The Log can be presented as plain text, or it can be accessed as a list
- of items, each of which has a channel indicator (header, stdout, stderr)
- and a text chunk. An HTML display might represent the interleaved
- channels with different styles, while a straight download-the-text
- interface would just want to retrieve a big string.
-
- The 'header' channel is used by ShellCommands to prepend a note about
- which command is about to be run ('running command FOO in directory
- DIR'), and append another note giving the exit code of the process.
-
- Logs can be streaming: if the Log has not yet finished, you can
- subscribe to receive new chunks as they are added.
-
- A ShellCommand will have a Log associated with it that gathers stdout
- and stderr. Logs may also be created by parsing command output or
- through other synthetic means (grepping for all the warnings in a
- compile log, or listing all the test cases that are going to be run).
- Such synthetic Logs are usually finished as soon as they are created."""
-
-
- def getName():
- """Returns a short string with the name of this log, probably 'log'.
- """
-
- def getStep():
- """Returns the IBuildStepStatus which owns this log."""
- # TODO: can there be non-Step logs?
-
- def isFinished():
- """Return a boolean. True means the log has finished and is closed,
- False means it is still open and new chunks may be added to it."""
-
- def waitUntilFinished():
- """Return a Deferred that will fire when the log is closed. If the
- log has already finished, this deferred will fire right away. The
- callback is given this IStatusLog instance as an argument."""
-
- def subscribe(receiver, catchup):
- """Register an IStatusReceiver to receive chunks (with logChunk) as
- data is added to the Log. If you use this, you will also want to use
- waitUntilFinished to find out when the listener can be retired.
- Subscribing to a closed Log is a no-op.
-
- If 'catchup' is True, the receiver will immediately be sent a series
- of logChunk messages to bring it up to date with the partially-filled
- log. This allows a status client to join a Log already in progress
- without missing any data. If the Log has already finished, it is too
- late to catch up: just do getText() instead.
-
- If the Log is very large, the receiver will be called many times with
- a lot of data. There is no way to throttle this data. If the receiver
- is planning on sending the data on to somewhere else, over a narrow
- connection, you can get a throttleable subscription by using
- C{subscribeConsumer} instead."""
-
- def unsubscribe(receiver):
- """Remove a receiver previously registered with subscribe(). Attempts
- to remove a receiver which was not previously registered is a no-op.
- """
-
- def subscribeConsumer(consumer):
- """Register an L{IStatusLogConsumer} to receive all chunks of the
- logfile, including all the old entries and any that will arrive in
- the future. The consumer will first have their C{registerProducer}
- method invoked with a reference to an object that can be told
- C{pauseProducing}, C{resumeProducing}, and C{stopProducing}. Then the
- consumer's C{writeChunk} method will be called repeatedly with each
- (channel, text) tuple in the log, starting with the very first. The
- consumer will be notified with C{finish} when the log has been
- exhausted (which can only happen when the log is finished). Note that
- a small amount of data could be written via C{writeChunk} even after
- C{pauseProducing} has been called.
-
- To unsubscribe the consumer, use C{producer.stopProducing}."""
-
- # once the log has finished, the following methods make sense. They can
- # be called earlier, but they will only return the contents of the log up
- # to the point at which they were called. You will lose items that are
- # added later. Use C{subscribe} or C{subscribeConsumer} to avoid missing
- # anything.
-
- def hasContents():
- """Returns True if the LogFile still has contents available. Returns
- False for logs that have been pruned. Clients should test this before
- offering to show the contents of any log."""
-
- def getText():
- """Return one big string with the contents of the Log. This merges
- all non-header chunks together."""
-
- def readlines(channel=LOG_CHANNEL_STDOUT):
- """Read lines from one channel of the logfile. This returns an
- iterator that will provide single lines of text (including the
- trailing newline).
- """
-
- def getTextWithHeaders():
- """Return one big string with the contents of the Log. This merges
- all chunks (including headers) together."""
-
- def getChunks():
- """Generate a list of (channel, text) tuples. 'channel' is a number,
- 0 for stdout, 1 for stderr, 2 for header. (note that stderr is merged
- into stdout if PTYs are in use)."""
-
-class IStatusLogConsumer(Interface):
- """I am an object which can be passed to IStatusLog.subscribeConsumer().
- I represent a target for writing the contents of an IStatusLog. This
- differs from a regular IStatusReceiver in that it can pause the producer.
- This makes it more suitable for use in streaming data over network
- sockets, such as an HTTP request. Note that the consumer can only pause
- the producer until it has caught up with all the old data. After that
- point, C{pauseProducing} is ignored and all new output from the log is
- sent directoy to the consumer."""
-
- def registerProducer(producer, streaming):
- """A producer is being hooked up to this consumer. The consumer only
- has to handle a single producer. It should send .pauseProducing and
- .resumeProducing messages to the producer when it wants to stop or
- resume the flow of data. 'streaming' will be set to True because the
- producer is always a PushProducer.
- """
-
- def unregisterProducer():
- """The previously-registered producer has been removed. No further
- pauseProducing or resumeProducing calls should be made. The consumer
- should delete its reference to the Producer so it can be released."""
-
- def writeChunk(chunk):
- """A chunk (i.e. a tuple of (channel, text)) is being written to the
- consumer."""
-
- def finish():
- """The log has finished sending chunks to the consumer."""
-
-class IStatusReceiver(Interface):
- """I am an object which can receive build status updates. I may be
- subscribed to an IStatus, an IBuilderStatus, or an IBuildStatus."""
-
- def buildsetSubmitted(buildset):
- """A new BuildSet has been submitted to the buildmaster.
-
- @type buildset: implementor of L{IBuildSetStatus}
- """
-
- def requestSubmitted(request):
- """A new BuildRequest has been submitted to the buildmaster.
-
- @type request: implementor of L{IBuildRequestStatus}
- """
-
- def builderAdded(builderName, builder):
- """
- A new Builder has just been added. This method may return an
- IStatusReceiver (probably 'self') which will be subscribed to receive
- builderChangedState and buildStarted/Finished events.
-
- @type builderName: string
- @type builder: L{buildbot.status.builder.BuilderStatus}
- @rtype: implementor of L{IStatusReceiver}
- """
-
- def builderChangedState(builderName, state):
- """Builder 'builderName' has changed state. The possible values for
- 'state' are 'offline', 'idle', and 'building'."""
-
- def buildStarted(builderName, build):
- """Builder 'builderName' has just started a build. The build is an
- object which implements IBuildStatus, and can be queried for more
- information.
-
- This method may return an IStatusReceiver (it could even return
- 'self'). If it does so, stepStarted and stepFinished methods will be
- invoked on the object for the steps of this one build. This is a
- convenient way to subscribe to all build steps without missing any.
- This receiver will automatically be unsubscribed when the build
- finishes.
-
- It can also return a tuple of (IStatusReceiver, interval), in which
- case buildETAUpdate messages are sent ever 'interval' seconds, in
- addition to the stepStarted and stepFinished messages."""
-
- def buildETAUpdate(build, ETA):
- """This is a periodic update on the progress this Build has made
- towards completion."""
-
- def stepStarted(build, step):
- """A step has just started. 'step' is the IBuildStepStatus which
- represents the step: it can be queried for more information.
-
- This method may return an IStatusReceiver (it could even return
- 'self'). If it does so, logStarted and logFinished methods will be
- invoked on the object for logs created by this one step. This
- receiver will be automatically unsubscribed when the step finishes.
-
- Alternatively, the method may return a tuple of an IStatusReceiver
- and an integer named 'updateInterval'. In addition to
- logStarted/logFinished messages, it will also receive stepETAUpdate
- messages about every updateInterval seconds."""
-
- def stepTextChanged(build, step, text):
- """The text for a step has been updated.
-
- This is called when calling setText() on the step status, and
- hands in the text list."""
-
- def stepText2Changed(build, step, text2):
- """The text2 for a step has been updated.
-
- This is called when calling setText2() on the step status, and
- hands in text2 list."""
-
- def stepETAUpdate(build, step, ETA, expectations):
- """This is a periodic update on the progress this Step has made
- towards completion. It gets an ETA (in seconds from the present) of
- when the step ought to be complete, and a list of expectation tuples
- (as returned by IBuildStepStatus.getExpectations) with more detailed
- information."""
-
- def logStarted(build, step, log):
- """A new Log has been started, probably because a step has just
- started running a shell command. 'log' is the IStatusLog object
- which can be queried for more information.
-
- This method may return an IStatusReceiver (such as 'self'), in which
- case the target's logChunk method will be invoked as text is added to
- the logfile. This receiver will automatically be unsubsribed when the
- log finishes."""
-
- def logChunk(build, step, log, channel, text):
- """Some text has been added to this log. 'channel' is one of
- LOG_CHANNEL_STDOUT, LOG_CHANNEL_STDERR, or LOG_CHANNEL_HEADER, as
- defined in IStatusLog.getChunks."""
-
- def logFinished(build, step, log):
- """A Log has been closed."""
-
- def stepFinished(build, step, results):
- """A step has just finished. 'results' is the result tuple described
- in IBuildStepStatus.getResults."""
-
- def buildFinished(builderName, build, results):
- """
- A build has just finished. 'results' is the result tuple described
- in L{IBuildStatus.getResults}.
-
- @type builderName: string
- @type build: L{buildbot.status.builder.BuildStatus}
- @type results: tuple
- """
-
- def builderRemoved(builderName):
- """The Builder has been removed."""
-
-class IControl(Interface):
- def addChange(change):
- """Add a change to all builders. Each Builder will decide for
- themselves whether the change is interesting or not, and may initiate
- a build as a result."""
-
- def submitBuildSet(buildset):
- """Submit a BuildSet object, which will eventually be run on all of
- the builders listed therein."""
-
- def getBuilder(name):
- """Retrieve the IBuilderControl object for the given Builder."""
-
-class IBuilderControl(Interface):
- def requestBuild(request):
- """Queue a L{buildbot.process.base.BuildRequest} object for later
- building."""
-
- def requestBuildSoon(request):
- """Submit a BuildRequest like requestBuild, but raise a
- L{buildbot.interfaces.NoSlaveError} if no slaves are currently
- available, so it cannot be used to queue a BuildRequest in the hopes
- that a slave will eventually connect. This method is appropriate for
- use by things like the web-page 'Force Build' button."""
-
- def resubmitBuild(buildStatus, reason="<rebuild, no reason given>"):
- """Rebuild something we've already built before. This submits a
- BuildRequest to our Builder using the same SourceStamp as the earlier
- build. This has no effect (but may eventually raise an exception) if
- this Build has not yet finished."""
-
- def getPendingBuilds():
- """Return a list of L{IBuildRequestControl} objects for this Builder.
- Each one corresponds to a pending build that has not yet started (due
- to a scarcity of build slaves). These upcoming builds can be canceled
- through the control object."""
-
- def getBuild(number):
- """Attempt to return an IBuildControl object for the given build.
- Returns None if no such object is available. This will only work for
- the build that is currently in progress: once the build finishes,
- there is nothing to control anymore."""
-
- def ping(timeout=30):
- """Attempt to contact the slave and see if it is still alive. This
- returns a Deferred which fires with either True (the slave is still
- alive) or False (the slave did not respond). As a side effect, adds
- an event to this builder's column in the waterfall display
- containing the results of the ping."""
- # TODO: this ought to live in ISlaveControl, maybe with disconnect()
- # or something. However the event that is emitted is most useful in
- # the Builder column, so it kinda fits here too.
-
-class IBuildRequestControl(Interface):
- def subscribe(observer):
- """Register a callable that will be invoked (with a single
- IBuildControl object) for each Build that is created to satisfy this
- request. There may be multiple Builds created in an attempt to handle
- the request: they may be interrupted by the user or abandoned due to
- a lost slave. The last Build (the one which actually gets to run to
- completion) is said to 'satisfy' the BuildRequest. The observer will
- be called once for each of these Builds, both old and new."""
- def unsubscribe(observer):
- """Unregister the callable that was registered with subscribe()."""
- def cancel():
- """Remove the build from the pending queue. Has no effect if the
- build has already been started."""
-
-class IBuildControl(Interface):
- def getStatus():
- """Return an IBuildStatus object for the Build that I control."""
- def stopBuild(reason="<no reason given>"):
- """Halt the build. This has no effect if the build has already
- finished."""
-
-class ILogFile(Interface):
- """This is the internal interface to a LogFile, used by the BuildStep to
- write data into the log.
- """
- def addStdout(data):
- pass
- def addStderr(data):
- pass
- def addHeader(data):
- pass
- def finish():
- """The process that is feeding the log file has finished, and no
- further data will be added. This closes the logfile."""
-
-class ILogObserver(Interface):
- """Objects which provide this interface can be used in a BuildStep to
- watch the output of a LogFile and parse it incrementally.
- """
-
- # internal methods
- def setStep(step):
- pass
- def setLog(log):
- pass
-
- # methods called by the LogFile
- def logChunk(build, step, log, channel, text):
- pass
-
-class IBuildSlave(Interface):
- # this is a marker interface for the BuildSlave class
- pass
-
-class ILatentBuildSlave(IBuildSlave):
- """A build slave that is not always running, but can run when requested.
- """
- substantiated = Attribute('Substantiated',
- 'Whether the latent build slave is currently '
- 'substantiated with a real instance.')
-
- def substantiate():
- """Request that the slave substantiate with a real instance.
-
- Returns a deferred that will callback when a real instance has
- attached."""
-
- # there is an insubstantiate too, but that is not used externally ATM.
-
- def buildStarted(sb):
- """Inform the latent build slave that a build has started.
-
- ``sb`` is a LatentSlaveBuilder as defined in buildslave.py. The sb
- is the one for whom the build started.
- """
-
- def buildFinished(sb):
- """Inform the latent build slave that a build has finished.
-
- ``sb`` is a LatentSlaveBuilder as defined in buildslave.py. The sb
- is the one for whom the build finished.
- """
diff --git a/buildbot/buildbot/locks.py b/buildbot/buildbot/locks.py
deleted file mode 100644
index 6599d1d..0000000
--- a/buildbot/buildbot/locks.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# -*- test-case-name: buildbot.test.test_locks -*-
-
-from twisted.python import log
-from twisted.internet import reactor, defer
-from buildbot import util
-
-if False: # for debugging
- debuglog = log.msg
-else:
- debuglog = lambda m: None
-
-class BaseLock:
- """
- Class handling claiming and releasing of L{self}, and keeping track of
- current and waiting owners.
-
- @note: Ideally, we'd like to maintain FIFO order. The place to do that
- would be the L{isAvailable()} function. However, this function is
- called by builds/steps both for the first time, and after waking
- them up by L{self} from the L{self.waiting} queue. There is
- currently no way of distinguishing between them.
- """
- description = "<BaseLock>"
-
- def __init__(self, name, maxCount=1):
- self.name = name # Name of the lock
- self.waiting = [] # Current queue, tuples (LockAccess, deferred)
- self.owners = [] # Current owners, tuples (owner, LockAccess)
- self.maxCount=maxCount # maximal number of counting owners
-
- def __repr__(self):
- return self.description
-
- def _getOwnersCount(self):
- """ Return the number of current exclusive and counting owners.
-
- @return: Tuple (number exclusive owners, number counting owners)
- """
- num_excl, num_counting = 0, 0
- for owner in self.owners:
- if owner[1].mode == 'exclusive':
- num_excl = num_excl + 1
- else: # mode == 'counting'
- num_counting = num_counting + 1
-
- assert (num_excl == 1 and num_counting == 0) \
- or (num_excl == 0 and num_counting <= self.maxCount)
- return num_excl, num_counting
-
-
- def isAvailable(self, access):
- """ Return a boolean whether the lock is available for claiming """
- debuglog("%s isAvailable(%s): self.owners=%r"
- % (self, access, self.owners))
- num_excl, num_counting = self._getOwnersCount()
- if access.mode == 'counting':
- # Wants counting access
- return num_excl == 0 and num_counting < self.maxCount
- else:
- # Wants exclusive access
- return num_excl == 0 and num_counting == 0
-
- def claim(self, owner, access):
- """ Claim the lock (lock must be available) """
- debuglog("%s claim(%s, %s)" % (self, owner, access.mode))
- assert owner is not None
- assert self.isAvailable(access), "ask for isAvailable() first"
-
- assert isinstance(access, LockAccess)
- assert access.mode in ['counting', 'exclusive']
- self.owners.append((owner, access))
- debuglog(" %s is claimed '%s'" % (self, access.mode))
-
- def release(self, owner, access):
- """ Release the lock """
- assert isinstance(access, LockAccess)
-
- debuglog("%s release(%s, %s)" % (self, owner, access.mode))
- entry = (owner, access)
- assert entry in self.owners
- self.owners.remove(entry)
- # who can we wake up?
- # After an exclusive access, we may need to wake up several waiting.
- # Break out of the loop when the first waiting client should not be awakened.
- num_excl, num_counting = self._getOwnersCount()
- while len(self.waiting) > 0:
- access, d = self.waiting[0]
- if access.mode == 'counting':
- if num_excl > 0 or num_counting == self.maxCount:
- break
- else:
- num_counting = num_counting + 1
- else:
- # access.mode == 'exclusive'
- if num_excl > 0 or num_counting > 0:
- break
- else:
- num_excl = num_excl + 1
-
- del self.waiting[0]
- reactor.callLater(0, d.callback, self)
-
- def waitUntilMaybeAvailable(self, owner, access):
- """Fire when the lock *might* be available. The caller will need to
- check with isAvailable() when the deferred fires. This loose form is
- used to avoid deadlocks. If we were interested in a stronger form,
- this would be named 'waitUntilAvailable', and the deferred would fire
- after the lock had been claimed.
- """
- debuglog("%s waitUntilAvailable(%s)" % (self, owner))
- assert isinstance(access, LockAccess)
- if self.isAvailable(access):
- return defer.succeed(self)
- d = defer.Deferred()
- self.waiting.append((access, d))
- return d
-
-
-class RealMasterLock(BaseLock):
- def __init__(self, lockid):
- BaseLock.__init__(self, lockid.name, lockid.maxCount)
- self.description = "<MasterLock(%s, %s)>" % (self.name, self.maxCount)
-
- def getLock(self, slave):
- return self
-
-class RealSlaveLock:
- def __init__(self, lockid):
- self.name = lockid.name
- self.maxCount = lockid.maxCount
- self.maxCountForSlave = lockid.maxCountForSlave
- self.description = "<SlaveLock(%s, %s, %s)>" % (self.name,
- self.maxCount,
- self.maxCountForSlave)
- self.locks = {}
-
- def __repr__(self):
- return self.description
-
- def getLock(self, slavebuilder):
- slavename = slavebuilder.slave.slavename
- if not self.locks.has_key(slavename):
- maxCount = self.maxCountForSlave.get(slavename,
- self.maxCount)
- lock = self.locks[slavename] = BaseLock(self.name, maxCount)
- desc = "<SlaveLock(%s, %s)[%s] %d>" % (self.name, maxCount,
- slavename, id(lock))
- lock.description = desc
- self.locks[slavename] = lock
- return self.locks[slavename]
-
-
-class LockAccess:
- """ I am an object representing a way to access a lock.
-
- @param lockid: LockId instance that should be accessed.
- @type lockid: A MasterLock or SlaveLock instance.
-
- @param mode: Mode of accessing the lock.
- @type mode: A string, either 'counting' or 'exclusive'.
- """
- def __init__(self, lockid, mode):
- self.lockid = lockid
- self.mode = mode
-
- assert isinstance(lockid, (MasterLock, SlaveLock))
- assert mode in ['counting', 'exclusive']
-
-
-class BaseLockId(util.ComparableMixin):
- """ Abstract base class for LockId classes.
-
- Sets up the 'access()' function for the LockId's available to the user
- (MasterLock and SlaveLock classes).
- Derived classes should add
- - Comparison with the L{util.ComparableMixin} via the L{compare_attrs}
- class variable.
- - Link to the actual lock class should be added with the L{lockClass}
- class variable.
- """
- def access(self, mode):
- """ Express how the lock should be accessed """
- assert mode in ['counting', 'exclusive']
- return LockAccess(self, mode)
-
- def defaultAccess(self):
- """ For buildbot 0.7.7 compability: When user doesn't specify an access
- mode, this one is chosen.
- """
- return self.access('counting')
-
-
-
-# master.cfg should only reference the following MasterLock and SlaveLock
-# classes. They are identifiers that will be turned into real Locks later,
-# via the BotMaster.getLockByID method.
-
-class MasterLock(BaseLockId):
- """I am a semaphore that limits the number of simultaneous actions.
-
- Builds and BuildSteps can declare that they wish to claim me as they run.
- Only a limited number of such builds or steps will be able to run
- simultaneously. By default this number is one, but my maxCount parameter
- can be raised to allow two or three or more operations to happen at the
- same time.
-
- Use this to protect a resource that is shared among all builders and all
- slaves, for example to limit the load on a common SVN repository.
- """
-
- compare_attrs = ['name', 'maxCount']
- lockClass = RealMasterLock
- def __init__(self, name, maxCount=1):
- self.name = name
- self.maxCount = maxCount
-
-class SlaveLock(BaseLockId):
- """I am a semaphore that limits simultaneous actions on each buildslave.
-
- Builds and BuildSteps can declare that they wish to claim me as they run.
- Only a limited number of such builds or steps will be able to run
- simultaneously on any given buildslave. By default this number is one,
- but my maxCount parameter can be raised to allow two or three or more
- operations to happen on a single buildslave at the same time.
-
- Use this to protect a resource that is shared among all the builds taking
- place on each slave, for example to limit CPU or memory load on an
- underpowered machine.
-
- Each buildslave will get an independent copy of this semaphore. By
- default each copy will use the same owner count (set with maxCount), but
- you can provide maxCountForSlave with a dictionary that maps slavename to
- owner count, to allow some slaves more parallelism than others.
-
- """
-
- compare_attrs = ['name', 'maxCount', '_maxCountForSlaveList']
- lockClass = RealSlaveLock
- def __init__(self, name, maxCount=1, maxCountForSlave={}):
- self.name = name
- self.maxCount = maxCount
- self.maxCountForSlave = maxCountForSlave
- # for comparison purposes, turn this dictionary into a stably-sorted
- # list of tuples
- self._maxCountForSlaveList = self.maxCountForSlave.items()
- self._maxCountForSlaveList.sort()
- self._maxCountForSlaveList = tuple(self._maxCountForSlaveList)
diff --git a/buildbot/buildbot/manhole.py b/buildbot/buildbot/manhole.py
deleted file mode 100644
index e5479b3..0000000
--- a/buildbot/buildbot/manhole.py
+++ /dev/null
@@ -1,265 +0,0 @@
-
-import os.path
-import binascii, base64
-from twisted.python import log
-from twisted.application import service, strports
-from twisted.cred import checkers, portal
-from twisted.conch import manhole, telnet, manhole_ssh, checkers as conchc
-from twisted.conch.insults import insults
-from twisted.internet import protocol
-
-from buildbot.util import ComparableMixin
-from zope.interface import implements # requires Twisted-2.0 or later
-
-# makeTelnetProtocol and _TelnetRealm are for the TelnetManhole
-
-class makeTelnetProtocol:
- # this curries the 'portal' argument into a later call to
- # TelnetTransport()
- def __init__(self, portal):
- self.portal = portal
-
- def __call__(self):
- auth = telnet.AuthenticatingTelnetProtocol
- return telnet.TelnetTransport(auth, self.portal)
-
-class _TelnetRealm:
- implements(portal.IRealm)
-
- def __init__(self, namespace_maker):
- self.namespace_maker = namespace_maker
-
- def requestAvatar(self, avatarId, *interfaces):
- if telnet.ITelnetProtocol in interfaces:
- namespace = self.namespace_maker()
- p = telnet.TelnetBootstrapProtocol(insults.ServerProtocol,
- manhole.ColoredManhole,
- namespace)
- return (telnet.ITelnetProtocol, p, lambda: None)
- raise NotImplementedError()
-
-
-class chainedProtocolFactory:
- # this curries the 'namespace' argument into a later call to
- # chainedProtocolFactory()
- def __init__(self, namespace):
- self.namespace = namespace
-
- def __call__(self):
- return insults.ServerProtocol(manhole.ColoredManhole, self.namespace)
-
-class AuthorizedKeysChecker(conchc.SSHPublicKeyDatabase):
- """Accept connections using SSH keys from a given file.
-
- SSHPublicKeyDatabase takes the username that the prospective client has
- requested and attempts to get a ~/.ssh/authorized_keys file for that
- username. This requires root access, so it isn't as useful as you'd
- like.
-
- Instead, this subclass looks for keys in a single file, given as an
- argument. This file is typically kept in the buildmaster's basedir. The
- file should have 'ssh-dss ....' lines in it, just like authorized_keys.
- """
-
- def __init__(self, authorized_keys_file):
- self.authorized_keys_file = os.path.expanduser(authorized_keys_file)
-
- def checkKey(self, credentials):
- f = open(self.authorized_keys_file)
- for l in f.readlines():
- l2 = l.split()
- if len(l2) < 2:
- continue
- try:
- if base64.decodestring(l2[1]) == credentials.blob:
- return 1
- except binascii.Error:
- continue
- return 0
-
-
-class _BaseManhole(service.MultiService):
- """This provides remote access to a python interpreter (a read/exec/print
- loop) embedded in the buildmaster via an internal SSH server. This allows
- detailed inspection of the buildmaster state. It is of most use to
- buildbot developers. Connect to this by running an ssh client.
- """
-
- def __init__(self, port, checker, using_ssh=True):
- """
- @type port: string or int
- @param port: what port should the Manhole listen on? This is a
- strports specification string, like 'tcp:12345' or
- 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
- simple tcp port.
-
- @type checker: an object providing the
- L{twisted.cred.checkers.ICredentialsChecker} interface
- @param checker: if provided, this checker is used to authenticate the
- client instead of using the username/password scheme. You must either
- provide a username/password or a Checker. Some useful values are::
- import twisted.cred.checkers as credc
- import twisted.conch.checkers as conchc
- c = credc.AllowAnonymousAccess # completely open
- c = credc.FilePasswordDB(passwd_filename) # file of name:passwd
- c = conchc.UNIXPasswordDatabase # getpwnam() (probably /etc/passwd)
-
- @type using_ssh: bool
- @param using_ssh: If True, accept SSH connections. If False, accept
- regular unencrypted telnet connections.
- """
-
- # unfortunately, these don't work unless we're running as root
- #c = credc.PluggableAuthenticationModulesChecker: PAM
- #c = conchc.SSHPublicKeyDatabase() # ~/.ssh/authorized_keys
- # and I can't get UNIXPasswordDatabase to work
-
- service.MultiService.__init__(self)
- if type(port) is int:
- port = "tcp:%d" % port
- self.port = port # for comparison later
- self.checker = checker # to maybe compare later
-
- def makeNamespace():
- # close over 'self' so we can get access to .parent later
- master = self.parent
- namespace = {
- 'master': master,
- 'status': master.getStatus(),
- }
- return namespace
-
- def makeProtocol():
- namespace = makeNamespace()
- p = insults.ServerProtocol(manhole.ColoredManhole, namespace)
- return p
-
- self.using_ssh = using_ssh
- if using_ssh:
- r = manhole_ssh.TerminalRealm()
- r.chainedProtocolFactory = makeProtocol
- p = portal.Portal(r, [self.checker])
- f = manhole_ssh.ConchFactory(p)
- else:
- r = _TelnetRealm(makeNamespace)
- p = portal.Portal(r, [self.checker])
- f = protocol.ServerFactory()
- f.protocol = makeTelnetProtocol(p)
- s = strports.service(self.port, f)
- s.setServiceParent(self)
-
-
- def startService(self):
- service.MultiService.startService(self)
- if self.using_ssh:
- via = "via SSH"
- else:
- via = "via telnet"
- log.msg("Manhole listening %s on port %s" % (via, self.port))
-
-
-class TelnetManhole(_BaseManhole, ComparableMixin):
- """This Manhole accepts unencrypted (telnet) connections, and requires a
- username and password authorize access. You are encouraged to use the
- encrypted ssh-based manhole classes instead."""
-
- compare_attrs = ["port", "username", "password"]
-
- def __init__(self, port, username, password):
- """
- @type port: string or int
- @param port: what port should the Manhole listen on? This is a
- strports specification string, like 'tcp:12345' or
- 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
- simple tcp port.
-
- @param username:
- @param password: username= and password= form a pair of strings to
- use when authenticating the remote user.
- """
-
- self.username = username
- self.password = password
-
- c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
- c.addUser(username, password)
-
- _BaseManhole.__init__(self, port, c, using_ssh=False)
-
-class PasswordManhole(_BaseManhole, ComparableMixin):
- """This Manhole accepts encrypted (ssh) connections, and requires a
- username and password to authorize access.
- """
-
- compare_attrs = ["port", "username", "password"]
-
- def __init__(self, port, username, password):
- """
- @type port: string or int
- @param port: what port should the Manhole listen on? This is a
- strports specification string, like 'tcp:12345' or
- 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
- simple tcp port.
-
- @param username:
- @param password: username= and password= form a pair of strings to
- use when authenticating the remote user.
- """
-
- self.username = username
- self.password = password
-
- c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
- c.addUser(username, password)
-
- _BaseManhole.__init__(self, port, c)
-
-class AuthorizedKeysManhole(_BaseManhole, ComparableMixin):
- """This Manhole accepts ssh connections, and requires that the
- prospective client have an ssh private key that matches one of the public
- keys in our authorized_keys file. It is created with the name of a file
- that contains the public keys that we will accept."""
-
- compare_attrs = ["port", "keyfile"]
-
- def __init__(self, port, keyfile):
- """
- @type port: string or int
- @param port: what port should the Manhole listen on? This is a
- strports specification string, like 'tcp:12345' or
- 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
- simple tcp port.
-
- @param keyfile: the name of a file (relative to the buildmaster's
- basedir) that contains SSH public keys of authorized
- users, one per line. This is the exact same format
- as used by sshd in ~/.ssh/authorized_keys .
- """
-
- # TODO: expanduser this, and make it relative to the buildmaster's
- # basedir
- self.keyfile = keyfile
- c = AuthorizedKeysChecker(keyfile)
- _BaseManhole.__init__(self, port, c)
-
-class ArbitraryCheckerManhole(_BaseManhole, ComparableMixin):
- """This Manhole accepts ssh connections, but uses an arbitrary
- user-supplied 'checker' object to perform authentication."""
-
- compare_attrs = ["port", "checker"]
-
- def __init__(self, port, checker):
- """
- @type port: string or int
- @param port: what port should the Manhole listen on? This is a
- strports specification string, like 'tcp:12345' or
- 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
- simple tcp port.
-
- @param checker: an instance of a twisted.cred 'checker' which will
- perform authentication
- """
-
- _BaseManhole.__init__(self, port, checker)
-
-
diff --git a/buildbot/buildbot/master.py b/buildbot/buildbot/master.py
deleted file mode 100644
index 2a07c0b..0000000
--- a/buildbot/buildbot/master.py
+++ /dev/null
@@ -1,965 +0,0 @@
-# -*- test-case-name: buildbot.test.test_run -*-
-
-import os
-signal = None
-try:
- import signal
-except ImportError:
- pass
-from cPickle import load
-import warnings
-
-from zope.interface import implements
-from twisted.python import log, components
-from twisted.internet import defer, reactor
-from twisted.spread import pb
-from twisted.cred import portal, checkers
-from twisted.application import service, strports
-from twisted.persisted import styles
-
-import buildbot
-# sibling imports
-from buildbot.util import now
-from buildbot.pbutil import NewCredPerspective
-from buildbot.process.builder import Builder, IDLE
-from buildbot.process.base import BuildRequest
-from buildbot.status.builder import Status
-from buildbot.changes.changes import Change, ChangeMaster, TestChangeMaster
-from buildbot.sourcestamp import SourceStamp
-from buildbot.buildslave import BuildSlave
-from buildbot import interfaces, locks
-from buildbot.process.properties import Properties
-
-########################################
-
-class BotMaster(service.MultiService):
-
- """This is the master-side service which manages remote buildbot slaves.
- It provides them with BuildSlaves, and distributes file change
- notification messages to them.
- """
-
- debug = 0
-
- def __init__(self):
- service.MultiService.__init__(self)
- self.builders = {}
- self.builderNames = []
- # builders maps Builder names to instances of bb.p.builder.Builder,
- # which is the master-side object that defines and controls a build.
- # They are added by calling botmaster.addBuilder() from the startup
- # code.
-
- # self.slaves contains a ready BuildSlave instance for each
- # potential buildslave, i.e. all the ones listed in the config file.
- # If the slave is connected, self.slaves[slavename].slave will
- # contain a RemoteReference to their Bot instance. If it is not
- # connected, that attribute will hold None.
- self.slaves = {} # maps slavename to BuildSlave
- self.statusClientService = None
- self.watchers = {}
-
- # self.locks holds the real Lock instances
- self.locks = {}
-
- # self.mergeRequests is the callable override for merging build
- # requests
- self.mergeRequests = None
-
- # these four are convenience functions for testing
-
- def waitUntilBuilderAttached(self, name):
- b = self.builders[name]
- #if b.slaves:
- # return defer.succeed(None)
- d = defer.Deferred()
- b.watchers['attach'].append(d)
- return d
-
- def waitUntilBuilderDetached(self, name):
- b = self.builders.get(name)
- if not b or not b.slaves:
- return defer.succeed(None)
- d = defer.Deferred()
- b.watchers['detach'].append(d)
- return d
-
- def waitUntilBuilderFullyDetached(self, name):
- b = self.builders.get(name)
- # TODO: this looks too deeply inside the Builder object
- if not b or not b.slaves:
- return defer.succeed(None)
- d = defer.Deferred()
- b.watchers['detach_all'].append(d)
- return d
-
- def waitUntilBuilderIdle(self, name):
- b = self.builders[name]
- # TODO: this looks way too deeply inside the Builder object
- for sb in b.slaves:
- if sb.state != IDLE:
- d = defer.Deferred()
- b.watchers['idle'].append(d)
- return d
- return defer.succeed(None)
-
- def loadConfig_Slaves(self, new_slaves):
- old_slaves = [c for c in list(self)
- if interfaces.IBuildSlave.providedBy(c)]
-
- # identify added/removed slaves. For each slave we construct a tuple
- # of (name, password, class), and we consider the slave to be already
- # present if the tuples match. (we include the class to make sure
- # that BuildSlave(name,pw) is different than
- # SubclassOfBuildSlave(name,pw) ). If the password or class has
- # changed, we will remove the old version of the slave and replace it
- # with a new one. If anything else has changed, we just update the
- # old BuildSlave instance in place. If the name has changed, of
- # course, it looks exactly the same as deleting one slave and adding
- # an unrelated one.
- old_t = {}
- for s in old_slaves:
- old_t[(s.slavename, s.password, s.__class__)] = s
- new_t = {}
- for s in new_slaves:
- new_t[(s.slavename, s.password, s.__class__)] = s
- removed = [old_t[t]
- for t in old_t
- if t not in new_t]
- added = [new_t[t]
- for t in new_t
- if t not in old_t]
- remaining_t = [t
- for t in new_t
- if t in old_t]
- # removeSlave will hang up on the old bot
- dl = []
- for s in removed:
- dl.append(self.removeSlave(s))
- d = defer.DeferredList(dl, fireOnOneErrback=True)
- def _add(res):
- for s in added:
- self.addSlave(s)
- for t in remaining_t:
- old_t[t].update(new_t[t])
- d.addCallback(_add)
- return d
-
- def addSlave(self, s):
- s.setServiceParent(self)
- s.setBotmaster(self)
- self.slaves[s.slavename] = s
-
- def removeSlave(self, s):
- # TODO: technically, disownServiceParent could return a Deferred
- s.disownServiceParent()
- d = self.slaves[s.slavename].disconnect()
- del self.slaves[s.slavename]
- return d
-
- def slaveLost(self, bot):
- for name, b in self.builders.items():
- if bot.slavename in b.slavenames:
- b.detached(bot)
-
- def getBuildersForSlave(self, slavename):
- return [b
- for b in self.builders.values()
- if slavename in b.slavenames]
-
- def getBuildernames(self):
- return self.builderNames
-
- def getBuilders(self):
- allBuilders = [self.builders[name] for name in self.builderNames]
- return allBuilders
-
- def setBuilders(self, builders):
- self.builders = {}
- self.builderNames = []
- for b in builders:
- for slavename in b.slavenames:
- # this is actually validated earlier
- assert slavename in self.slaves
- self.builders[b.name] = b
- self.builderNames.append(b.name)
- b.setBotmaster(self)
- d = self._updateAllSlaves()
- return d
-
- def _updateAllSlaves(self):
- """Notify all buildslaves about changes in their Builders."""
- dl = [s.updateSlave() for s in self.slaves.values()]
- return defer.DeferredList(dl)
-
- def maybeStartAllBuilds(self):
- builders = self.builders.values()
- def _sortfunc(b1, b2):
- t1 = b1.getOldestRequestTime()
- t2 = b2.getOldestRequestTime()
- # If t1 or t2 is None, then there are no build requests,
- # so sort it at the end
- if t1 is None:
- return 1
- if t2 is None:
- return -1
- return cmp(t1, t2)
- builders.sort(cmp=_sortfunc)
- for b in builders:
- b.maybeStartBuild()
-
- def shouldMergeRequests(self, builder, req1, req2):
- """Determine whether two BuildRequests should be merged for
- the given builder.
-
- """
- if self.mergeRequests is not None:
- return self.mergeRequests(builder, req1, req2)
- return req1.canBeMergedWith(req2)
-
- def getPerspective(self, slavename):
- return self.slaves[slavename]
-
- def shutdownSlaves(self):
- # TODO: make this into a bot method rather than a builder method
- for b in self.slaves.values():
- b.shutdownSlave()
-
- def stopService(self):
- for b in self.builders.values():
- b.builder_status.addPointEvent(["master", "shutdown"])
- b.builder_status.saveYourself()
- return service.Service.stopService(self)
-
- def getLockByID(self, lockid):
- """Convert a Lock identifier into an actual Lock instance.
- @param lockid: a locks.MasterLock or locks.SlaveLock instance
- @return: a locks.RealMasterLock or locks.RealSlaveLock instance
- """
- assert isinstance(lockid, (locks.MasterLock, locks.SlaveLock))
- if not lockid in self.locks:
- self.locks[lockid] = lockid.lockClass(lockid)
- # if the master.cfg file has changed maxCount= on the lock, the next
- # time a build is started, they'll get a new RealLock instance. Note
- # that this requires that MasterLock and SlaveLock (marker) instances
- # be hashable and that they should compare properly.
- return self.locks[lockid]
-
-########################################
-
-
-
-class DebugPerspective(NewCredPerspective):
- def attached(self, mind):
- return self
- def detached(self, mind):
- pass
-
- def perspective_requestBuild(self, buildername, reason, branch, revision, properties={}):
- c = interfaces.IControl(self.master)
- bc = c.getBuilder(buildername)
- ss = SourceStamp(branch, revision)
- bpr = Properties()
- bpr.update(properties, "remote requestBuild")
- br = BuildRequest(reason, ss, builderName=buildername, properties=bpr)
- bc.requestBuild(br)
-
- def perspective_pingBuilder(self, buildername):
- c = interfaces.IControl(self.master)
- bc = c.getBuilder(buildername)
- bc.ping()
-
- def perspective_fakeChange(self, file, revision=None, who="fakeUser",
- branch=None):
- change = Change(who, [file], "some fake comments\n",
- branch=branch, revision=revision)
- c = interfaces.IControl(self.master)
- c.addChange(change)
-
- def perspective_setCurrentState(self, buildername, state):
- builder = self.botmaster.builders.get(buildername)
- if not builder: return
- if state == "offline":
- builder.statusbag.currentlyOffline()
- if state == "idle":
- builder.statusbag.currentlyIdle()
- if state == "waiting":
- builder.statusbag.currentlyWaiting(now()+10)
- if state == "building":
- builder.statusbag.currentlyBuilding(None)
- def perspective_reload(self):
- print "doing reload of the config file"
- self.master.loadTheConfigFile()
- def perspective_pokeIRC(self):
- print "saying something on IRC"
- from buildbot.status import words
- for s in self.master:
- if isinstance(s, words.IRC):
- bot = s.f
- for channel in bot.channels:
- print " channel", channel
- bot.p.msg(channel, "Ow, quit it")
-
- def perspective_print(self, msg):
- print "debug", msg
-
-class Dispatcher(styles.Versioned):
- implements(portal.IRealm)
- persistenceVersion = 2
-
- def __init__(self):
- self.names = {}
-
- def upgradeToVersion1(self):
- self.master = self.botmaster.parent
- def upgradeToVersion2(self):
- self.names = {}
-
- def register(self, name, afactory):
- self.names[name] = afactory
- def unregister(self, name):
- del self.names[name]
-
- def requestAvatar(self, avatarID, mind, interface):
- assert interface == pb.IPerspective
- afactory = self.names.get(avatarID)
- if afactory:
- p = afactory.getPerspective()
- elif avatarID == "debug":
- p = DebugPerspective()
- p.master = self.master
- p.botmaster = self.botmaster
- elif avatarID == "statusClient":
- p = self.statusClientService.getPerspective()
- else:
- # it must be one of the buildslaves: no other names will make it
- # past the checker
- p = self.botmaster.getPerspective(avatarID)
-
- if not p:
- raise ValueError("no perspective for '%s'" % avatarID)
-
- d = defer.maybeDeferred(p.attached, mind)
- d.addCallback(self._avatarAttached, mind)
- return d
-
- def _avatarAttached(self, p, mind):
- return (pb.IPerspective, p, lambda p=p,mind=mind: p.detached(mind))
-
-########################################
-
-# service hierarchy:
-# BuildMaster
-# BotMaster
-# ChangeMaster
-# all IChangeSource objects
-# StatusClientService
-# TCPClient(self.ircFactory)
-# TCPServer(self.slaveFactory) -> dispatcher.requestAvatar
-# TCPServer(self.site)
-# UNIXServer(ResourcePublisher(self.site))
-
-
-class BuildMaster(service.MultiService, styles.Versioned):
- debug = 0
- persistenceVersion = 3
- manhole = None
- debugPassword = None
- projectName = "(unspecified)"
- projectURL = None
- buildbotURL = None
- change_svc = None
- properties = Properties()
-
- def __init__(self, basedir, configFileName="master.cfg"):
- service.MultiService.__init__(self)
- self.setName("buildmaster")
- self.basedir = basedir
- self.configFileName = configFileName
-
- # the dispatcher is the realm in which all inbound connections are
- # looked up: slave builders, change notifications, status clients, and
- # the debug port
- dispatcher = Dispatcher()
- dispatcher.master = self
- self.dispatcher = dispatcher
- self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
- # the checker starts with no user/passwd pairs: they are added later
- p = portal.Portal(dispatcher)
- p.registerChecker(self.checker)
- self.slaveFactory = pb.PBServerFactory(p)
- self.slaveFactory.unsafeTracebacks = True # let them see exceptions
-
- self.slavePortnum = None
- self.slavePort = None
-
- self.botmaster = BotMaster()
- self.botmaster.setName("botmaster")
- self.botmaster.setServiceParent(self)
- dispatcher.botmaster = self.botmaster
-
- self.status = Status(self.botmaster, self.basedir)
-
- self.statusTargets = []
-
- # this ChangeMaster is a dummy, only used by tests. In the real
- # buildmaster, where the BuildMaster instance is activated
- # (startService is called) by twistd, this attribute is overwritten.
- self.useChanges(TestChangeMaster())
-
- self.readConfig = False
-
- def upgradeToVersion1(self):
- self.dispatcher = self.slaveFactory.root.portal.realm
-
- def upgradeToVersion2(self): # post-0.4.3
- self.webServer = self.webTCPPort
- del self.webTCPPort
- self.webDistribServer = self.webUNIXPort
- del self.webUNIXPort
- self.configFileName = "master.cfg"
-
- def upgradeToVersion3(self):
- # post 0.6.3, solely to deal with the 0.6.3 breakage. Starting with
- # 0.6.5 I intend to do away with .tap files altogether
- self.services = []
- self.namedServices = {}
- del self.change_svc
-
- def startService(self):
- service.MultiService.startService(self)
- self.loadChanges() # must be done before loading the config file
- if not self.readConfig:
- # TODO: consider catching exceptions during this call to
- # loadTheConfigFile and bailing (reactor.stop) if it fails,
- # since without a config file we can't do anything except reload
- # the config file, and it would be nice for the user to discover
- # this quickly.
- self.loadTheConfigFile()
- if signal and hasattr(signal, "SIGHUP"):
- signal.signal(signal.SIGHUP, self._handleSIGHUP)
- for b in self.botmaster.builders.values():
- b.builder_status.addPointEvent(["master", "started"])
- b.builder_status.saveYourself()
-
- def useChanges(self, changes):
- if self.change_svc:
- # TODO: can return a Deferred
- self.change_svc.disownServiceParent()
- self.change_svc = changes
- self.change_svc.basedir = self.basedir
- self.change_svc.setName("changemaster")
- self.dispatcher.changemaster = self.change_svc
- self.change_svc.setServiceParent(self)
-
- def loadChanges(self):
- filename = os.path.join(self.basedir, "changes.pck")
- try:
- changes = load(open(filename, "rb"))
- styles.doUpgrade()
- except IOError:
- log.msg("changes.pck missing, using new one")
- changes = ChangeMaster()
- except EOFError:
- log.msg("corrupted changes.pck, using new one")
- changes = ChangeMaster()
- self.useChanges(changes)
-
- def _handleSIGHUP(self, *args):
- reactor.callLater(0, self.loadTheConfigFile)
-
- def getStatus(self):
- """
- @rtype: L{buildbot.status.builder.Status}
- """
- return self.status
-
- def loadTheConfigFile(self, configFile=None):
- if not configFile:
- configFile = os.path.join(self.basedir, self.configFileName)
-
- log.msg("Creating BuildMaster -- buildbot.version: %s" % buildbot.version)
- log.msg("loading configuration from %s" % configFile)
- configFile = os.path.expanduser(configFile)
-
- try:
- f = open(configFile, "r")
- except IOError, e:
- log.msg("unable to open config file '%s'" % configFile)
- log.msg("leaving old configuration in place")
- log.err(e)
- return
-
- try:
- self.loadConfig(f)
- except:
- log.msg("error during loadConfig")
- log.err()
- log.msg("The new config file is unusable, so I'll ignore it.")
- log.msg("I will keep using the previous config file instead.")
- f.close()
-
- def loadConfig(self, f):
- """Internal function to load a specific configuration file. Any
- errors in the file will be signalled by raising an exception.
-
- @return: a Deferred that will fire (with None) when the configuration
- changes have been completed. This may involve a round-trip to each
- buildslave that was involved."""
-
- localDict = {'basedir': os.path.expanduser(self.basedir)}
- try:
- exec f in localDict
- except:
- log.msg("error while parsing config file")
- raise
-
- try:
- config = localDict['BuildmasterConfig']
- except KeyError:
- log.err("missing config dictionary")
- log.err("config file must define BuildmasterConfig")
- raise
-
- known_keys = ("bots", "slaves",
- "sources", "change_source",
- "schedulers", "builders", "mergeRequests",
- "slavePortnum", "debugPassword", "logCompressionLimit",
- "manhole", "status", "projectName", "projectURL",
- "buildbotURL", "properties"
- )
- for k in config.keys():
- if k not in known_keys:
- log.msg("unknown key '%s' defined in config dictionary" % k)
-
- try:
- # required
- schedulers = config['schedulers']
- builders = config['builders']
- for k in builders:
- if k['name'].startswith("_"):
- errmsg = ("builder names must not start with an "
- "underscore: " + k['name'])
- log.err(errmsg)
- raise ValueError(errmsg)
-
- slavePortnum = config['slavePortnum']
- #slaves = config['slaves']
- #change_source = config['change_source']
-
- # optional
- debugPassword = config.get('debugPassword')
- manhole = config.get('manhole')
- status = config.get('status', [])
- projectName = config.get('projectName')
- projectURL = config.get('projectURL')
- buildbotURL = config.get('buildbotURL')
- properties = config.get('properties', {})
- logCompressionLimit = config.get('logCompressionLimit')
- if logCompressionLimit is not None and not \
- isinstance(logCompressionLimit, int):
- raise ValueError("logCompressionLimit needs to be bool or int")
- mergeRequests = config.get('mergeRequests')
- if mergeRequests is not None and not callable(mergeRequests):
- raise ValueError("mergeRequests must be a callable")
-
- except KeyError, e:
- log.msg("config dictionary is missing a required parameter")
- log.msg("leaving old configuration in place")
- raise
-
- #if "bots" in config:
- # raise KeyError("c['bots'] is no longer accepted")
-
- slaves = config.get('slaves', [])
- if "bots" in config:
- m = ("c['bots'] is deprecated as of 0.7.6 and will be "
- "removed by 0.8.0 . Please use c['slaves'] instead.")
- log.msg(m)
- warnings.warn(m, DeprecationWarning)
- for name, passwd in config['bots']:
- slaves.append(BuildSlave(name, passwd))
-
- if "bots" not in config and "slaves" not in config:
- log.msg("config dictionary must have either 'bots' or 'slaves'")
- log.msg("leaving old configuration in place")
- raise KeyError("must have either 'bots' or 'slaves'")
-
- #if "sources" in config:
- # raise KeyError("c['sources'] is no longer accepted")
-
- change_source = config.get('change_source', [])
- if isinstance(change_source, (list, tuple)):
- change_sources = change_source
- else:
- change_sources = [change_source]
- if "sources" in config:
- m = ("c['sources'] is deprecated as of 0.7.6 and will be "
- "removed by 0.8.0 . Please use c['change_source'] instead.")
- log.msg(m)
- warnings.warn(m, DeprecationWarning)
- for s in config['sources']:
- change_sources.append(s)
-
- # do some validation first
- for s in slaves:
- assert interfaces.IBuildSlave.providedBy(s)
- if s.slavename in ("debug", "change", "status"):
- raise KeyError(
- "reserved name '%s' used for a bot" % s.slavename)
- if config.has_key('interlocks'):
- raise KeyError("c['interlocks'] is no longer accepted")
-
- assert isinstance(change_sources, (list, tuple))
- for s in change_sources:
- assert interfaces.IChangeSource(s, None)
- # this assertion catches c['schedulers'] = Scheduler(), since
- # Schedulers are service.MultiServices and thus iterable.
- errmsg = "c['schedulers'] must be a list of Scheduler instances"
- assert isinstance(schedulers, (list, tuple)), errmsg
- for s in schedulers:
- assert interfaces.IScheduler(s, None), errmsg
- assert isinstance(status, (list, tuple))
- for s in status:
- assert interfaces.IStatusReceiver(s, None)
-
- slavenames = [s.slavename for s in slaves]
- buildernames = []
- dirnames = []
- for b in builders:
- if type(b) is tuple:
- raise ValueError("builder %s must be defined with a dict, "
- "not a tuple" % b[0])
- if b.has_key('slavename') and b['slavename'] not in slavenames:
- raise ValueError("builder %s uses undefined slave %s" \
- % (b['name'], b['slavename']))
- for n in b.get('slavenames', []):
- if n not in slavenames:
- raise ValueError("builder %s uses undefined slave %s" \
- % (b['name'], n))
- if b['name'] in buildernames:
- raise ValueError("duplicate builder name %s"
- % b['name'])
- buildernames.append(b['name'])
- if b['builddir'] in dirnames:
- raise ValueError("builder %s reuses builddir %s"
- % (b['name'], b['builddir']))
- dirnames.append(b['builddir'])
-
- unscheduled_buildernames = buildernames[:]
- schedulernames = []
- for s in schedulers:
- for b in s.listBuilderNames():
- assert b in buildernames, \
- "%s uses unknown builder %s" % (s, b)
- if b in unscheduled_buildernames:
- unscheduled_buildernames.remove(b)
-
- if s.name in schedulernames:
- # TODO: schedulers share a namespace with other Service
- # children of the BuildMaster node, like status plugins, the
- # Manhole, the ChangeMaster, and the BotMaster (although most
- # of these don't have names)
- msg = ("Schedulers must have unique names, but "
- "'%s' was a duplicate" % (s.name,))
- raise ValueError(msg)
- schedulernames.append(s.name)
-
- if unscheduled_buildernames:
- log.msg("Warning: some Builders have no Schedulers to drive them:"
- " %s" % (unscheduled_buildernames,))
-
- # assert that all locks used by the Builds and their Steps are
- # uniquely named.
- lock_dict = {}
- for b in builders:
- for l in b.get('locks', []):
- if isinstance(l, locks.LockAccess): # User specified access to the lock
- l = l.lockid
- if lock_dict.has_key(l.name):
- if lock_dict[l.name] is not l:
- raise ValueError("Two different locks (%s and %s) "
- "share the name %s"
- % (l, lock_dict[l.name], l.name))
- else:
- lock_dict[l.name] = l
- # TODO: this will break with any BuildFactory that doesn't use a
- # .steps list, but I think the verification step is more
- # important.
- for s in b['factory'].steps:
- for l in s[1].get('locks', []):
- if isinstance(l, locks.LockAccess): # User specified access to the lock
- l = l.lockid
- if lock_dict.has_key(l.name):
- if lock_dict[l.name] is not l:
- raise ValueError("Two different locks (%s and %s)"
- " share the name %s"
- % (l, lock_dict[l.name], l.name))
- else:
- lock_dict[l.name] = l
-
- if not isinstance(properties, dict):
- raise ValueError("c['properties'] must be a dictionary")
-
- # slavePortnum supposed to be a strports specification
- if type(slavePortnum) is int:
- slavePortnum = "tcp:%d" % slavePortnum
-
- # now we're committed to implementing the new configuration, so do
- # it atomically
- # TODO: actually, this is spread across a couple of Deferreds, so it
- # really isn't atomic.
-
- d = defer.succeed(None)
-
- self.projectName = projectName
- self.projectURL = projectURL
- self.buildbotURL = buildbotURL
-
- self.properties = Properties()
- self.properties.update(properties, self.configFileName)
- if logCompressionLimit is not None:
- self.status.logCompressionLimit = logCompressionLimit
- if mergeRequests is not None:
- self.botmaster.mergeRequests = mergeRequests
-
- # self.slaves: Disconnect any that were attached and removed from the
- # list. Update self.checker with the new list of passwords, including
- # debug/change/status.
- d.addCallback(lambda res: self.loadConfig_Slaves(slaves))
-
- # self.debugPassword
- if debugPassword:
- self.checker.addUser("debug", debugPassword)
- self.debugPassword = debugPassword
-
- # self.manhole
- if manhole != self.manhole:
- # changing
- if self.manhole:
- # disownServiceParent may return a Deferred
- d.addCallback(lambda res: self.manhole.disownServiceParent())
- def _remove(res):
- self.manhole = None
- return res
- d.addCallback(_remove)
- if manhole:
- def _add(res):
- self.manhole = manhole
- manhole.setServiceParent(self)
- d.addCallback(_add)
-
- # add/remove self.botmaster.builders to match builders. The
- # botmaster will handle startup/shutdown issues.
- d.addCallback(lambda res: self.loadConfig_Builders(builders))
-
- d.addCallback(lambda res: self.loadConfig_status(status))
-
- # Schedulers are added after Builders in case they start right away
- d.addCallback(lambda res: self.loadConfig_Schedulers(schedulers))
- # and Sources go after Schedulers for the same reason
- d.addCallback(lambda res: self.loadConfig_Sources(change_sources))
-
- # self.slavePort
- if self.slavePortnum != slavePortnum:
- if self.slavePort:
- def closeSlavePort(res):
- d1 = self.slavePort.disownServiceParent()
- self.slavePort = None
- return d1
- d.addCallback(closeSlavePort)
- if slavePortnum is not None:
- def openSlavePort(res):
- self.slavePort = strports.service(slavePortnum,
- self.slaveFactory)
- self.slavePort.setServiceParent(self)
- d.addCallback(openSlavePort)
- log.msg("BuildMaster listening on port %s" % slavePortnum)
- self.slavePortnum = slavePortnum
-
- log.msg("configuration update started")
- def _done(res):
- self.readConfig = True
- log.msg("configuration update complete")
- d.addCallback(_done)
- d.addCallback(lambda res: self.botmaster.maybeStartAllBuilds())
- return d
-
- def loadConfig_Slaves(self, new_slaves):
- # set up the Checker with the names and passwords of all valid bots
- self.checker.users = {} # violates abstraction, oh well
- for s in new_slaves:
- self.checker.addUser(s.slavename, s.password)
- self.checker.addUser("change", "changepw")
- # let the BotMaster take care of the rest
- return self.botmaster.loadConfig_Slaves(new_slaves)
-
- def loadConfig_Sources(self, sources):
- if not sources:
- log.msg("warning: no ChangeSources specified in c['change_source']")
- # shut down any that were removed, start any that were added
- deleted_sources = [s for s in self.change_svc if s not in sources]
- added_sources = [s for s in sources if s not in self.change_svc]
- dl = [self.change_svc.removeSource(s) for s in deleted_sources]
- def addNewOnes(res):
- [self.change_svc.addSource(s) for s in added_sources]
- d = defer.DeferredList(dl, fireOnOneErrback=1, consumeErrors=0)
- d.addCallback(addNewOnes)
- return d
-
- def allSchedulers(self):
- return [child for child in self
- if interfaces.IScheduler.providedBy(child)]
-
-
- def loadConfig_Schedulers(self, newschedulers):
- oldschedulers = self.allSchedulers()
- removed = [s for s in oldschedulers if s not in newschedulers]
- added = [s for s in newschedulers if s not in oldschedulers]
- dl = [defer.maybeDeferred(s.disownServiceParent) for s in removed]
- def addNewOnes(res):
- log.msg("adding %d new schedulers, removed %d" %
- (len(added), len(dl)))
- for s in added:
- s.setServiceParent(self)
- d = defer.DeferredList(dl, fireOnOneErrback=1)
- d.addCallback(addNewOnes)
- if removed or added:
- # notify Downstream schedulers to potentially pick up
- # new schedulers now that we have removed and added some
- def updateDownstreams(res):
- log.msg("notifying downstream schedulers of changes")
- for s in newschedulers:
- if interfaces.IDownstreamScheduler.providedBy(s):
- s.checkUpstreamScheduler()
- d.addCallback(updateDownstreams)
- return d
-
- def loadConfig_Builders(self, newBuilderData):
- somethingChanged = False
- newList = {}
- newBuilderNames = []
- allBuilders = self.botmaster.builders.copy()
- for data in newBuilderData:
- name = data['name']
- newList[name] = data
- newBuilderNames.append(name)
-
- # identify all that were removed
- for oldname in self.botmaster.getBuildernames():
- if oldname not in newList:
- log.msg("removing old builder %s" % oldname)
- del allBuilders[oldname]
- somethingChanged = True
- # announce the change
- self.status.builderRemoved(oldname)
-
- # everything in newList is either unchanged, changed, or new
- for name, data in newList.items():
- old = self.botmaster.builders.get(name)
- basedir = data['builddir'] # used on both master and slave
- #name, slave, builddir, factory = data
- if not old: # new
- # category added after 0.6.2
- category = data.get('category', None)
- log.msg("adding new builder %s for category %s" %
- (name, category))
- statusbag = self.status.builderAdded(name, basedir, category)
- builder = Builder(data, statusbag)
- allBuilders[name] = builder
- somethingChanged = True
- elif old.compareToSetup(data):
- # changed: try to minimize the disruption and only modify the
- # pieces that really changed
- diffs = old.compareToSetup(data)
- log.msg("updating builder %s: %s" % (name, "\n".join(diffs)))
-
- statusbag = old.builder_status
- statusbag.saveYourself() # seems like a good idea
- # TODO: if the basedir was changed, we probably need to make
- # a new statusbag
- new_builder = Builder(data, statusbag)
- new_builder.consumeTheSoulOfYourPredecessor(old)
- # that migrates any retained slavebuilders too
-
- # point out that the builder was updated. On the Waterfall,
- # this will appear just after any currently-running builds.
- statusbag.addPointEvent(["config", "updated"])
-
- allBuilders[name] = new_builder
- somethingChanged = True
- else:
- # unchanged: leave it alone
- log.msg("builder %s is unchanged" % name)
- pass
-
- if somethingChanged:
- sortedAllBuilders = [allBuilders[name] for name in newBuilderNames]
- d = self.botmaster.setBuilders(sortedAllBuilders)
- return d
- return None
-
- def loadConfig_status(self, status):
- dl = []
-
- # remove old ones
- for s in self.statusTargets[:]:
- if not s in status:
- log.msg("removing IStatusReceiver", s)
- d = defer.maybeDeferred(s.disownServiceParent)
- dl.append(d)
- self.statusTargets.remove(s)
- # after those are finished going away, add new ones
- def addNewOnes(res):
- for s in status:
- if not s in self.statusTargets:
- log.msg("adding IStatusReceiver", s)
- s.setServiceParent(self)
- self.statusTargets.append(s)
- d = defer.DeferredList(dl, fireOnOneErrback=1)
- d.addCallback(addNewOnes)
- return d
-
-
- def addChange(self, change):
- for s in self.allSchedulers():
- s.addChange(change)
-
- def submitBuildSet(self, bs):
- # determine the set of Builders to use
- builders = []
- for name in bs.builderNames:
- b = self.botmaster.builders.get(name)
- if b:
- if b not in builders:
- builders.append(b)
- continue
- # TODO: add aliases like 'all'
- raise KeyError("no such builder named '%s'" % name)
-
- # now tell the BuildSet to create BuildRequests for all those
- # Builders and submit them
- bs.start(builders)
- self.status.buildsetSubmitted(bs.status)
-
-
-class Control:
- implements(interfaces.IControl)
-
- def __init__(self, master):
- self.master = master
-
- def addChange(self, change):
- self.master.change_svc.addChange(change)
-
- def submitBuildSet(self, bs):
- self.master.submitBuildSet(bs)
-
- def getBuilder(self, name):
- b = self.master.botmaster.builders[name]
- return interfaces.IBuilderControl(b)
-
-components.registerAdapter(Control, BuildMaster, interfaces.IControl)
-
-# so anybody who can get a handle on the BuildMaster can cause a build with:
-# IControl(master).getBuilder("full-2.3").requestBuild(buildrequest)
diff --git a/buildbot/buildbot/pbutil.py b/buildbot/buildbot/pbutil.py
deleted file mode 100644
index bc85a01..0000000
--- a/buildbot/buildbot/pbutil.py
+++ /dev/null
@@ -1,147 +0,0 @@
-
-"""Base classes handy for use with PB clients.
-"""
-
-from twisted.spread import pb
-
-from twisted.spread.pb import PBClientFactory
-from twisted.internet import protocol
-from twisted.python import log
-
-class NewCredPerspective(pb.Avatar):
- def attached(self, mind):
- return self
- def detached(self, mind):
- pass
-
-class ReconnectingPBClientFactory(PBClientFactory,
- protocol.ReconnectingClientFactory):
- """Reconnecting client factory for PB brokers.
-
- Like PBClientFactory, but if the connection fails or is lost, the factory
- will attempt to reconnect.
-
- Instead of using f.getRootObject (which gives a Deferred that can only
- be fired once), override the gotRootObject method.
-
- Instead of using the newcred f.login (which is also one-shot), call
- f.startLogin() with the credentials and client, and override the
- gotPerspective method.
-
- Instead of using the oldcred f.getPerspective (also one-shot), call
- f.startGettingPerspective() with the same arguments, and override
- gotPerspective.
-
- gotRootObject and gotPerspective will be called each time the object is
- received (once per successful connection attempt). You will probably want
- to use obj.notifyOnDisconnect to find out when the connection is lost.
-
- If an authorization error occurs, failedToGetPerspective() will be
- invoked.
-
- To use me, subclass, then hand an instance to a connector (like
- TCPClient).
- """
-
- def __init__(self):
- PBClientFactory.__init__(self)
- self._doingLogin = False
- self._doingGetPerspective = False
-
- def clientConnectionFailed(self, connector, reason):
- PBClientFactory.clientConnectionFailed(self, connector, reason)
- # Twisted-1.3 erroneously abandons the connection on non-UserErrors.
- # To avoid this bug, don't upcall, and implement the correct version
- # of the method here.
- if self.continueTrying:
- self.connector = connector
- self.retry()
-
- def clientConnectionLost(self, connector, reason):
- PBClientFactory.clientConnectionLost(self, connector, reason,
- reconnecting=True)
- RCF = protocol.ReconnectingClientFactory
- RCF.clientConnectionLost(self, connector, reason)
-
- def clientConnectionMade(self, broker):
- self.resetDelay()
- PBClientFactory.clientConnectionMade(self, broker)
- if self._doingLogin:
- self.doLogin(self._root)
- if self._doingGetPerspective:
- self.doGetPerspective(self._root)
- self.gotRootObject(self._root)
-
- def __getstate__(self):
- # this should get folded into ReconnectingClientFactory
- d = self.__dict__.copy()
- d['connector'] = None
- d['_callID'] = None
- return d
-
- # oldcred methods
-
- def getPerspective(self, *args):
- raise RuntimeError, "getPerspective is one-shot: use startGettingPerspective instead"
-
- def startGettingPerspective(self, username, password, serviceName,
- perspectiveName=None, client=None):
- self._doingGetPerspective = True
- if perspectiveName == None:
- perspectiveName = username
- self._oldcredArgs = (username, password, serviceName,
- perspectiveName, client)
-
- def doGetPerspective(self, root):
- # oldcred getPerspective()
- (username, password,
- serviceName, perspectiveName, client) = self._oldcredArgs
- d = self._cbAuthIdentity(root, username, password)
- d.addCallback(self._cbGetPerspective,
- serviceName, perspectiveName, client)
- d.addCallbacks(self.gotPerspective, self.failedToGetPerspective)
-
-
- # newcred methods
-
- def login(self, *args):
- raise RuntimeError, "login is one-shot: use startLogin instead"
-
- def startLogin(self, credentials, client=None):
- self._credentials = credentials
- self._client = client
- self._doingLogin = True
-
- def doLogin(self, root):
- # newcred login()
- d = self._cbSendUsername(root, self._credentials.username,
- self._credentials.password, self._client)
- d.addCallbacks(self.gotPerspective, self.failedToGetPerspective)
-
-
- # methods to override
-
- def gotPerspective(self, perspective):
- """The remote avatar or perspective (obtained each time this factory
- connects) is now available."""
- pass
-
- def gotRootObject(self, root):
- """The remote root object (obtained each time this factory connects)
- is now available. This method will be called each time the connection
- is established and the object reference is retrieved."""
- pass
-
- def failedToGetPerspective(self, why):
- """The login process failed, most likely because of an authorization
- failure (bad password), but it is also possible that we lost the new
- connection before we managed to send our credentials.
- """
- log.msg("ReconnectingPBClientFactory.failedToGetPerspective")
- if why.check(pb.PBConnectionLost):
- log.msg("we lost the brand-new connection")
- # retrying might help here, let clientConnectionLost decide
- return
- # probably authorization
- self.stopTrying() # logging in harder won't help
- log.err(why)
diff --git a/buildbot/buildbot/process/__init__.py b/buildbot/buildbot/process/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/buildbot/buildbot/process/__init__.py
+++ /dev/null
diff --git a/buildbot/buildbot/process/base.py b/buildbot/buildbot/process/base.py
deleted file mode 100644
index 8eaa940..0000000
--- a/buildbot/buildbot/process/base.py
+++ /dev/null
@@ -1,627 +0,0 @@
-# -*- test-case-name: buildbot.test.test_step -*-
-
-import types
-
-from zope.interface import implements
-from twisted.python import log
-from twisted.python.failure import Failure
-from twisted.internet import reactor, defer, error
-
-from buildbot import interfaces, locks
-from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
-from buildbot.status.builder import Results, BuildRequestStatus
-from buildbot.status.progress import BuildProgress
-from buildbot.process.properties import Properties
-
-class BuildRequest:
- """I represent a request to a specific Builder to run a single build.
-
- I have a SourceStamp which specifies what sources I will build. This may
- specify a specific revision of the source tree (so source.branch,
- source.revision, and source.patch are used). The .patch attribute is
- either None or a tuple of (patchlevel, diff), consisting of a number to
- use in 'patch -pN', and a unified-format context diff.
-
- Alternatively, the SourceStamp may specify a set of Changes to be built,
- contained in source.changes. In this case, I may be mergeable with other
- BuildRequests on the same branch.
-
- I may be part of a BuildSet, in which case I will report status results
- to it.
-
- I am paired with a BuildRequestStatus object, to which I feed status
- information.
-
- @type source: a L{buildbot.sourcestamp.SourceStamp} instance.
- @ivar source: the source code that this BuildRequest use
-
- @type reason: string
- @ivar reason: the reason this Build is being requested. Schedulers
- provide this, but for forced builds the user requesting the
- build will provide a string.
-
- @type properties: Properties object
- @ivar properties: properties that should be applied to this build
- 'owner' property is used by Build objects to collect
- the list returned by getInterestedUsers
-
- @ivar status: the IBuildStatus object which tracks our status
-
- @ivar submittedAt: a timestamp (seconds since epoch) when this request
- was submitted to the Builder. This is used by the CVS
- step to compute a checkout timestamp, as well as the
- master to prioritize build requests from oldest to
- newest.
- """
-
- source = None
- builder = None
- startCount = 0 # how many times we have tried to start this build
- submittedAt = None
-
- implements(interfaces.IBuildRequestControl)
-
- def __init__(self, reason, source, builderName, properties=None):
- assert interfaces.ISourceStamp(source, None)
- self.reason = reason
- self.source = source
-
- self.properties = Properties()
- if properties:
- self.properties.updateFromProperties(properties)
-
- self.start_watchers = []
- self.finish_watchers = []
- self.status = BuildRequestStatus(source, builderName)
-
- def canBeMergedWith(self, other):
- return self.source.canBeMergedWith(other.source)
-
- def mergeWith(self, others):
- return self.source.mergeWith([o.source for o in others])
-
- def mergeReasons(self, others):
- """Return a reason for the merged build request."""
- reasons = []
- for req in [self] + others:
- if req.reason and req.reason not in reasons:
- reasons.append(req.reason)
- return ", ".join(reasons)
-
- def waitUntilFinished(self):
- """Get a Deferred that will fire (with a
- L{buildbot.interfaces.IBuildStatus} instance when the build
- finishes."""
- d = defer.Deferred()
- self.finish_watchers.append(d)
- return d
-
- # these are called by the Builder
-
- def requestSubmitted(self, builder):
- # the request has been placed on the queue
- self.builder = builder
-
- def buildStarted(self, build, buildstatus):
- """This is called by the Builder when a Build has been started in the
- hopes of satifying this BuildRequest. It may be called multiple
- times, since interrupted builds and lost buildslaves may force
- multiple Builds to be run until the fate of the BuildRequest is known
- for certain."""
- for o in self.start_watchers[:]:
- # these observers get the IBuildControl
- o(build)
- # while these get the IBuildStatus
- self.status.buildStarted(buildstatus)
-
- def finished(self, buildstatus):
- """This is called by the Builder when the BuildRequest has been
- retired. This happens when its Build has either succeeded (yay!) or
- failed (boo!). TODO: If it is halted due to an exception (oops!), or
- some other retryable error, C{finished} will not be called yet."""
-
- for w in self.finish_watchers:
- w.callback(buildstatus)
- self.finish_watchers = []
-
- # IBuildRequestControl
-
- def subscribe(self, observer):
- self.start_watchers.append(observer)
- def unsubscribe(self, observer):
- self.start_watchers.remove(observer)
-
- def cancel(self):
- """Cancel this request. This can only be successful if the Build has
- not yet been started.
-
- @return: a boolean indicating if the cancel was successful."""
- if self.builder:
- return self.builder.cancelBuildRequest(self)
- return False
-
- def setSubmitTime(self, t):
- self.submittedAt = t
- self.status.setSubmitTime(t)
-
- def getSubmitTime(self):
- return self.submittedAt
-
-
-class Build:
- """I represent a single build by a single slave. Specialized Builders can
- use subclasses of Build to hold status information unique to those build
- processes.
-
- I control B{how} the build proceeds. The actual build is broken up into a
- series of steps, saved in the .buildSteps[] array as a list of
- L{buildbot.process.step.BuildStep} objects. Each step is a single remote
- command, possibly a shell command.
-
- During the build, I put status information into my C{BuildStatus}
- gatherer.
-
- After the build, I go away.
-
- I can be used by a factory by setting buildClass on
- L{buildbot.process.factory.BuildFactory}
-
- @ivar requests: the list of L{BuildRequest}s that triggered me
- @ivar build_status: the L{buildbot.status.builder.BuildStatus} that
- collects our status
- """
-
- implements(interfaces.IBuildControl)
-
- workdir = "build"
- build_status = None
- reason = "changes"
- finished = False
- results = None
-
- def __init__(self, requests):
- self.requests = requests
- for req in self.requests:
- req.startCount += 1
- self.locks = []
- # build a source stamp
- self.source = requests[0].mergeWith(requests[1:])
- self.reason = requests[0].mergeReasons(requests[1:])
-
- self.progress = None
- self.currentStep = None
- self.slaveEnvironment = {}
-
- self.terminate = False
-
- def setBuilder(self, builder):
- """
- Set the given builder as our builder.
-
- @type builder: L{buildbot.process.builder.Builder}
- """
- self.builder = builder
-
- def setLocks(self, locks):
- self.locks = locks
-
- def setSlaveEnvironment(self, env):
- self.slaveEnvironment = env
-
- def getSourceStamp(self):
- return self.source
-
- def setProperty(self, propname, value, source):
- """Set a property on this build. This may only be called after the
- build has started, so that it has a BuildStatus object where the
- properties can live."""
- self.build_status.setProperty(propname, value, source)
-
- def getProperties(self):
- return self.build_status.getProperties()
-
- def getProperty(self, propname):
- return self.build_status.getProperty(propname)
-
- def allChanges(self):
- return self.source.changes
-
- def allFiles(self):
- # return a list of all source files that were changed
- files = []
- havedirs = 0
- for c in self.allChanges():
- for f in c.files:
- files.append(f)
- if c.isdir:
- havedirs = 1
- return files
-
- def __repr__(self):
- return "<Build %s>" % (self.builder.name,)
-
- def __getstate__(self):
- d = self.__dict__.copy()
- if d.has_key('remote'):
- del d['remote']
- return d
-
- def blamelist(self):
- blamelist = []
- for c in self.allChanges():
- if c.who not in blamelist:
- blamelist.append(c.who)
- blamelist.sort()
- return blamelist
-
- def changesText(self):
- changetext = ""
- for c in self.allChanges():
- changetext += "-" * 60 + "\n\n" + c.asText() + "\n"
- # consider sorting these by number
- return changetext
-
- def setStepFactories(self, step_factories):
- """Set a list of 'step factories', which are tuples of (class,
- kwargs), where 'class' is generally a subclass of step.BuildStep .
- These are used to create the Steps themselves when the Build starts
- (as opposed to when it is first created). By creating the steps
- later, their __init__ method will have access to things like
- build.allFiles() ."""
- self.stepFactories = list(step_factories)
-
-
-
- useProgress = True
-
- def getSlaveCommandVersion(self, command, oldversion=None):
- return self.slavebuilder.getSlaveCommandVersion(command, oldversion)
- def getSlaveName(self):
- return self.slavebuilder.slave.slavename
-
- def setupProperties(self):
- props = self.getProperties()
-
- # start with global properties from the configuration
- buildmaster = self.builder.botmaster.parent
- props.updateFromProperties(buildmaster.properties)
-
- # get any properties from requests (this is the path through
- # which schedulers will send us properties)
- for rq in self.requests:
- props.updateFromProperties(rq.properties)
-
- # now set some properties of our own, corresponding to the
- # build itself
- props.setProperty("buildername", self.builder.name, "Build")
- props.setProperty("buildnumber", self.build_status.number, "Build")
- props.setProperty("branch", self.source.branch, "Build")
- props.setProperty("revision", self.source.revision, "Build")
-
- def setupSlaveBuilder(self, slavebuilder):
- self.slavebuilder = slavebuilder
-
- # navigate our way back to the L{buildbot.buildslave.BuildSlave}
- # object that came from the config, and get its properties
- buildslave_properties = slavebuilder.slave.properties
- self.getProperties().updateFromProperties(buildslave_properties)
-
- self.slavename = slavebuilder.slave.slavename
- self.build_status.setSlavename(self.slavename)
-
- def startBuild(self, build_status, expectations, slavebuilder):
- """This method sets up the build, then starts it by invoking the
- first Step. It returns a Deferred which will fire when the build
- finishes. This Deferred is guaranteed to never errback."""
-
- # we are taking responsibility for watching the connection to the
- # remote. This responsibility was held by the Builder until our
- # startBuild was called, and will not return to them until we fire
- # the Deferred returned by this method.
-
- log.msg("%s.startBuild" % self)
- self.build_status = build_status
- # now that we have a build_status, we can set properties
- self.setupProperties()
- self.setupSlaveBuilder(slavebuilder)
- slavebuilder.slave.updateSlaveStatus(buildStarted=build_status)
-
- # convert all locks into their real forms
- lock_list = []
- for access in self.locks:
- if not isinstance(access, locks.LockAccess):
- # Buildbot 0.7.7 compability: user did not specify access
- access = access.defaultAccess()
- lock = self.builder.botmaster.getLockByID(access.lockid)
- lock_list.append((lock, access))
- self.locks = lock_list
- # then narrow SlaveLocks down to the right slave
- self.locks = [(l.getLock(self.slavebuilder), la)
- for l, la in self.locks]
- self.remote = slavebuilder.remote
- self.remote.notifyOnDisconnect(self.lostRemote)
- d = self.deferred = defer.Deferred()
- def _release_slave(res, slave, bs):
- self.slavebuilder.buildFinished()
- slave.updateSlaveStatus(buildFinished=bs)
- return res
- d.addCallback(_release_slave, self.slavebuilder.slave, build_status)
-
- try:
- self.setupBuild(expectations) # create .steps
- except:
- # the build hasn't started yet, so log the exception as a point
- # event instead of flunking the build. TODO: associate this
- # failure with the build instead. this involves doing
- # self.build_status.buildStarted() from within the exception
- # handler
- log.msg("Build.setupBuild failed")
- log.err(Failure())
- self.builder.builder_status.addPointEvent(["setupBuild",
- "exception"])
- self.finished = True
- self.results = FAILURE
- self.deferred = None
- d.callback(self)
- return d
-
- self.acquireLocks().addCallback(self._startBuild_2)
- return d
-
- def acquireLocks(self, res=None):
- log.msg("acquireLocks(step %s, locks %s)" % (self, self.locks))
- if not self.locks:
- return defer.succeed(None)
- for lock, access in self.locks:
- if not lock.isAvailable(access):
- log.msg("Build %s waiting for lock %s" % (self, lock))
- d = lock.waitUntilMaybeAvailable(self, access)
- d.addCallback(self.acquireLocks)
- return d
- # all locks are available, claim them all
- for lock, access in self.locks:
- lock.claim(self, access)
- return defer.succeed(None)
-
- def _startBuild_2(self, res):
- self.build_status.buildStarted(self)
- self.startNextStep()
-
- def setupBuild(self, expectations):
- # create the actual BuildSteps. If there are any name collisions, we
- # add a count to the loser until it is unique.
- self.steps = []
- self.stepStatuses = {}
- stepnames = []
- sps = []
-
- for factory, args in self.stepFactories:
- args = args.copy()
- try:
- step = factory(**args)
- except:
- log.msg("error while creating step, factory=%s, args=%s"
- % (factory, args))
- raise
- step.setBuild(self)
- step.setBuildSlave(self.slavebuilder.slave)
- step.setDefaultWorkdir(self.workdir)
- name = step.name
- count = 1
- while name in stepnames and count < 1000:
- count += 1
- name = step.name + "_%d" % count
- if count == 1000:
- raise RuntimeError("reached 1000 steps with base name" + \
- "%s, bailing" % step.name)
- elif name in stepnames:
- raise RuntimeError("duplicate step '%s'" % step.name)
- step.name = name
- stepnames.append(name)
- self.steps.append(step)
-
- # tell the BuildStatus about the step. This will create a
- # BuildStepStatus and bind it to the Step.
- step_status = self.build_status.addStepWithName(name)
- step.setStepStatus(step_status)
-
- sp = None
- if self.useProgress:
- # XXX: maybe bail if step.progressMetrics is empty? or skip
- # progress for that one step (i.e. "it is fast"), or have a
- # separate "variable" flag that makes us bail on progress
- # tracking
- sp = step.setupProgress()
- if sp:
- sps.append(sp)
-
- # Create a buildbot.status.progress.BuildProgress object. This is
- # called once at startup to figure out how to build the long-term
- # Expectations object, and again at the start of each build to get a
- # fresh BuildProgress object to track progress for that individual
- # build. TODO: revisit at-startup call
-
- if self.useProgress:
- self.progress = BuildProgress(sps)
- if self.progress and expectations:
- self.progress.setExpectationsFrom(expectations)
-
- # we are now ready to set up our BuildStatus.
- self.build_status.setSourceStamp(self.source)
- self.build_status.setRequests([req.status for req in self.requests])
- self.build_status.setReason(self.reason)
- self.build_status.setBlamelist(self.blamelist())
- self.build_status.setProgress(self.progress)
-
- # gather owners from build requests
- owners = [r.properties['owner'] for r in self.requests
- if r.properties.has_key('owner')]
- if owners: self.setProperty('owners', owners, self.reason)
-
- self.results = [] # list of FAILURE, SUCCESS, WARNINGS, SKIPPED
- self.result = SUCCESS # overall result, may downgrade after each step
- self.text = [] # list of text string lists (text2)
-
- def getNextStep(self):
- """This method is called to obtain the next BuildStep for this build.
- When it returns None (or raises a StopIteration exception), the build
- is complete."""
- if not self.steps:
- return None
- if self.terminate:
- while True:
- s = self.steps.pop(0)
- if s.alwaysRun:
- return s
- if not self.steps:
- return None
- else:
- return self.steps.pop(0)
-
- def startNextStep(self):
- try:
- s = self.getNextStep()
- except StopIteration:
- s = None
- if not s:
- return self.allStepsDone()
- self.currentStep = s
- d = defer.maybeDeferred(s.startStep, self.remote)
- d.addCallback(self._stepDone, s)
- d.addErrback(self.buildException)
-
- def _stepDone(self, results, step):
- self.currentStep = None
- if self.finished:
- return # build was interrupted, don't keep building
- terminate = self.stepDone(results, step) # interpret/merge results
- if terminate:
- self.terminate = True
- return self.startNextStep()
-
- def stepDone(self, result, step):
- """This method is called when the BuildStep completes. It is passed a
- status object from the BuildStep and is responsible for merging the
- Step's results into those of the overall Build."""
-
- terminate = False
- text = None
- if type(result) == types.TupleType:
- result, text = result
- assert type(result) == type(SUCCESS)
- log.msg(" step '%s' complete: %s" % (step.name, Results[result]))
- self.results.append(result)
- if text:
- self.text.extend(text)
- if not self.remote:
- terminate = True
- if result == FAILURE:
- if step.warnOnFailure:
- if self.result != FAILURE:
- self.result = WARNINGS
- if step.flunkOnFailure:
- self.result = FAILURE
- if step.haltOnFailure:
- terminate = True
- elif result == WARNINGS:
- if step.warnOnWarnings:
- if self.result != FAILURE:
- self.result = WARNINGS
- if step.flunkOnWarnings:
- self.result = FAILURE
- elif result == EXCEPTION:
- self.result = EXCEPTION
- terminate = True
- return terminate
-
- def lostRemote(self, remote=None):
- # the slave went away. There are several possible reasons for this,
- # and they aren't necessarily fatal. For now, kill the build, but
- # TODO: see if we can resume the build when it reconnects.
- log.msg("%s.lostRemote" % self)
- self.remote = None
- if self.currentStep:
- # this should cause the step to finish.
- log.msg(" stopping currentStep", self.currentStep)
- self.currentStep.interrupt(Failure(error.ConnectionLost()))
-
- def stopBuild(self, reason="<no reason given>"):
- # the idea here is to let the user cancel a build because, e.g.,
- # they realized they committed a bug and they don't want to waste
- # the time building something that they know will fail. Another
- # reason might be to abandon a stuck build. We want to mark the
- # build as failed quickly rather than waiting for the slave's
- # timeout to kill it on its own.
-
- log.msg(" %s: stopping build: %s" % (self, reason))
- if self.finished:
- return
- # TODO: include 'reason' in this point event
- self.builder.builder_status.addPointEvent(['interrupt'])
- self.currentStep.interrupt(reason)
- if 0:
- # TODO: maybe let its deferred do buildFinished
- if self.currentStep and self.currentStep.progress:
- # XXX: really .fail or something
- self.currentStep.progress.finish()
- text = ["stopped", reason]
- self.buildFinished(text, FAILURE)
-
- def allStepsDone(self):
- if self.result == FAILURE:
- text = ["failed"]
- elif self.result == WARNINGS:
- text = ["warnings"]
- elif self.result == EXCEPTION:
- text = ["exception"]
- else:
- text = ["build", "successful"]
- text.extend(self.text)
- return self.buildFinished(text, self.result)
-
- def buildException(self, why):
- log.msg("%s.buildException" % self)
- log.err(why)
- self.buildFinished(["build", "exception"], FAILURE)
-
- def buildFinished(self, text, results):
- """This method must be called when the last Step has completed. It
- marks the Build as complete and returns the Builder to the 'idle'
- state.
-
- It takes two arguments which describe the overall build status:
- text, results. 'results' is one of SUCCESS, WARNINGS, or FAILURE.
-
- If 'results' is SUCCESS or WARNINGS, we will permit any dependant
- builds to start. If it is 'FAILURE', those builds will be
- abandoned."""
-
- self.finished = True
- if self.remote:
- self.remote.dontNotifyOnDisconnect(self.lostRemote)
- self.results = results
-
- log.msg(" %s: build finished" % self)
- self.build_status.setText(text)
- self.build_status.setResults(results)
- self.build_status.buildFinished()
- if self.progress and results == SUCCESS:
- # XXX: also test a 'timing consistent' flag?
- log.msg(" setting expectations for next time")
- self.builder.setExpectations(self.progress)
- reactor.callLater(0, self.releaseLocks)
- self.deferred.callback(self)
- self.deferred = None
-
- def releaseLocks(self):
- log.msg("releaseLocks(%s): %s" % (self, self.locks))
- for lock, access in self.locks:
- lock.release(self, access)
-
- # IBuildControl
-
- def getStatus(self):
- return self.build_status
-
- # stopBuild is defined earlier
-
diff --git a/buildbot/buildbot/process/builder.py b/buildbot/buildbot/process/builder.py
deleted file mode 100644
index cb26ccb..0000000
--- a/buildbot/buildbot/process/builder.py
+++ /dev/null
@@ -1,874 +0,0 @@
-
-import random, weakref
-from zope.interface import implements
-from twisted.python import log, components
-from twisted.spread import pb
-from twisted.internet import reactor, defer
-
-from buildbot import interfaces
-from buildbot.status.progress import Expectations
-from buildbot.util import now
-from buildbot.process import base
-
-(ATTACHING, # slave attached, still checking hostinfo/etc
- IDLE, # idle, available for use
- PINGING, # build about to start, making sure it is still alive
- BUILDING, # build is running
- LATENT, # latent slave is not substantiated; similar to idle
- ) = range(5)
-
-
-class AbstractSlaveBuilder(pb.Referenceable):
- """I am the master-side representative for one of the
- L{buildbot.slave.bot.SlaveBuilder} objects that lives in a remote
- buildbot. When a remote builder connects, I query it for command versions
- and then make it available to any Builds that are ready to run. """
-
- def __init__(self):
- self.ping_watchers = []
- self.state = None # set in subclass
- self.remote = None
- self.slave = None
- self.builder_name = None
-
- def __repr__(self):
- r = ["<", self.__class__.__name__]
- if self.builder_name:
- r.extend([" builder=", self.builder_name])
- if self.slave:
- r.extend([" slave=", self.slave.slavename])
- r.append(">")
- return ''.join(r)
-
- def setBuilder(self, b):
- self.builder = b
- self.builder_name = b.name
-
- def getSlaveCommandVersion(self, command, oldversion=None):
- if self.remoteCommands is None:
- # the slave is 0.5.0 or earlier
- return oldversion
- return self.remoteCommands.get(command)
-
- def isAvailable(self):
- # if this SlaveBuilder is busy, then it's definitely not available
- if self.isBusy():
- return False
-
- # otherwise, check in with the BuildSlave
- if self.slave:
- return self.slave.canStartBuild()
-
- # no slave? not very available.
- return False
-
- def isBusy(self):
- return self.state not in (IDLE, LATENT)
-
- def buildStarted(self):
- self.state = BUILDING
-
- def buildFinished(self):
- self.state = IDLE
- reactor.callLater(0, self.builder.botmaster.maybeStartAllBuilds)
-
- def attached(self, slave, remote, commands):
- """
- @type slave: L{buildbot.buildslave.BuildSlave}
- @param slave: the BuildSlave that represents the buildslave as a
- whole
- @type remote: L{twisted.spread.pb.RemoteReference}
- @param remote: a reference to the L{buildbot.slave.bot.SlaveBuilder}
- @type commands: dict: string -> string, or None
- @param commands: provides the slave's version of each RemoteCommand
- """
- self.state = ATTACHING
- self.remote = remote
- self.remoteCommands = commands # maps command name to version
- if self.slave is None:
- self.slave = slave
- self.slave.addSlaveBuilder(self)
- else:
- assert self.slave == slave
- log.msg("Buildslave %s attached to %s" % (slave.slavename,
- self.builder_name))
- d = self.remote.callRemote("setMaster", self)
- d.addErrback(self._attachFailure, "Builder.setMaster")
- d.addCallback(self._attached2)
- return d
-
- def _attached2(self, res):
- d = self.remote.callRemote("print", "attached")
- d.addErrback(self._attachFailure, "Builder.print 'attached'")
- d.addCallback(self._attached3)
- return d
-
- def _attached3(self, res):
- # now we say they're really attached
- self.state = IDLE
- return self
-
- def _attachFailure(self, why, where):
- assert isinstance(where, str)
- log.msg(where)
- log.err(why)
- return why
-
- def ping(self, timeout, status=None):
- """Ping the slave to make sure it is still there. Returns a Deferred
- that fires with True if it is.
-
- @param status: if you point this at a BuilderStatus, a 'pinging'
- event will be pushed.
- """
- oldstate = self.state
- self.state = PINGING
- newping = not self.ping_watchers
- d = defer.Deferred()
- self.ping_watchers.append(d)
- if newping:
- if status:
- event = status.addEvent(["pinging"])
- d2 = defer.Deferred()
- d2.addCallback(self._pong_status, event)
- self.ping_watchers.insert(0, d2)
- # I think it will make the tests run smoother if the status
- # is updated before the ping completes
- Ping().ping(self.remote, timeout).addCallback(self._pong)
-
- def reset_state(res):
- if self.state == PINGING:
- self.state = oldstate
- return res
- d.addCallback(reset_state)
- return d
-
- def _pong(self, res):
- watchers, self.ping_watchers = self.ping_watchers, []
- for d in watchers:
- d.callback(res)
-
- def _pong_status(self, res, event):
- if res:
- event.text = ["ping", "success"]
- else:
- event.text = ["ping", "failed"]
- event.finish()
-
- def detached(self):
- log.msg("Buildslave %s detached from %s" % (self.slave.slavename,
- self.builder_name))
- if self.slave:
- self.slave.removeSlaveBuilder(self)
- self.slave = None
- self.remote = None
- self.remoteCommands = None
-
-
-class Ping:
- running = False
- timer = None
-
- def ping(self, remote, timeout):
- assert not self.running
- self.running = True
- log.msg("sending ping")
- self.d = defer.Deferred()
- # TODO: add a distinct 'ping' command on the slave.. using 'print'
- # for this purpose is kind of silly.
- remote.callRemote("print", "ping").addCallbacks(self._pong,
- self._ping_failed,
- errbackArgs=(remote,))
-
- # We use either our own timeout or the (long) TCP timeout to detect
- # silently-missing slaves. This might happen because of a NAT
- # timeout or a routing loop. If the slave just shuts down (and we
- # somehow missed the FIN), we should get a "connection refused"
- # message.
- self.timer = reactor.callLater(timeout, self._ping_timeout, remote)
- return self.d
-
- def _ping_timeout(self, remote):
- log.msg("ping timeout")
- # force the BuildSlave to disconnect, since this indicates that
- # the bot is unreachable.
- del self.timer
- remote.broker.transport.loseConnection()
- # the forcibly-lost connection will now cause the ping to fail
-
- def _stopTimer(self):
- if not self.running:
- return
- self.running = False
-
- if self.timer:
- self.timer.cancel()
- del self.timer
-
- def _pong(self, res):
- log.msg("ping finished: success")
- self._stopTimer()
- self.d.callback(True)
-
- def _ping_failed(self, res, remote):
- log.msg("ping finished: failure")
- self._stopTimer()
- # the slave has some sort of internal error, disconnect them. If we
- # don't, we'll requeue a build and ping them again right away,
- # creating a nasty loop.
- remote.broker.transport.loseConnection()
- # TODO: except, if they actually did manage to get this far, they'll
- # probably reconnect right away, and we'll do this game again. Maybe
- # it would be better to leave them in the PINGING state.
- self.d.callback(False)
-
-
-class SlaveBuilder(AbstractSlaveBuilder):
-
- def __init__(self):
- AbstractSlaveBuilder.__init__(self)
- self.state = ATTACHING
-
- def detached(self):
- AbstractSlaveBuilder.detached(self)
- if self.slave:
- self.slave.removeSlaveBuilder(self)
- self.slave = None
- self.state = ATTACHING
-
- def buildFinished(self):
- # Call the slave's buildFinished if we can; the slave may be waiting
- # to do a graceful shutdown and needs to know when it's idle.
- # After, we check to see if we can start other builds.
- self.state = IDLE
- if self.slave:
- d = self.slave.buildFinished(self)
- d.addCallback(lambda x: reactor.callLater(0, self.builder.botmaster.maybeStartAllBuilds))
- else:
- reactor.callLater(0, self.builder.botmaster.maybeStartAllBuilds)
-
-
-class LatentSlaveBuilder(AbstractSlaveBuilder):
- def __init__(self, slave, builder):
- AbstractSlaveBuilder.__init__(self)
- self.slave = slave
- self.state = LATENT
- self.setBuilder(builder)
- self.slave.addSlaveBuilder(self)
- log.msg("Latent buildslave %s attached to %s" % (slave.slavename,
- self.builder_name))
-
- def substantiate(self, build):
- d = self.slave.substantiate(self)
- if not self.slave.substantiated:
- event = self.builder.builder_status.addEvent(
- ["substantiating"])
- def substantiated(res):
- msg = ["substantiate", "success"]
- if isinstance(res, basestring):
- msg.append(res)
- elif isinstance(res, (tuple, list)):
- msg.extend(res)
- event.text = msg
- event.finish()
- return res
- def substantiation_failed(res):
- event.text = ["substantiate", "failed"]
- # TODO add log of traceback to event
- event.finish()
- return res
- d.addCallbacks(substantiated, substantiation_failed)
- return d
-
- def detached(self):
- AbstractSlaveBuilder.detached(self)
- self.state = LATENT
-
- def buildStarted(self):
- AbstractSlaveBuilder.buildStarted(self)
- self.slave.buildStarted(self)
-
- def buildFinished(self):
- AbstractSlaveBuilder.buildFinished(self)
- self.slave.buildFinished(self)
-
- def _attachFailure(self, why, where):
- self.state = LATENT
- return AbstractSlaveBuilder._attachFailure(self, why, where)
-
- def ping(self, timeout, status=None):
- if not self.slave.substantiated:
- if status:
- status.addEvent(["ping", "latent"]).finish()
- return defer.succeed(True)
- return AbstractSlaveBuilder.ping(self, timeout, status)
-
-
-class Builder(pb.Referenceable):
- """I manage all Builds of a given type.
-
- Each Builder is created by an entry in the config file (the c['builders']
- list), with a number of parameters.
-
- One of these parameters is the L{buildbot.process.factory.BuildFactory}
- object that is associated with this Builder. The factory is responsible
- for creating new L{Build<buildbot.process.base.Build>} objects. Each
- Build object defines when and how the build is performed, so a new
- Factory or Builder should be defined to control this behavior.
-
- The Builder holds on to a number of L{base.BuildRequest} objects in a
- list named C{.buildable}. Incoming BuildRequest objects will be added to
- this list, or (if possible) merged into an existing request. When a slave
- becomes available, I will use my C{BuildFactory} to turn the request into
- a new C{Build} object. The C{BuildRequest} is forgotten, the C{Build}
- goes into C{.building} while it runs. Once the build finishes, I will
- discard it.
-
- I maintain a list of available SlaveBuilders, one for each connected
- slave that the C{slavenames} parameter says we can use. Some of these
- will be idle, some of them will be busy running builds for me. If there
- are multiple slaves, I can run multiple builds at once.
-
- I also manage forced builds, progress expectation (ETA) management, and
- some status delivery chores.
-
- I am persisted in C{BASEDIR/BUILDERNAME/builder}, so I can remember how
- long a build usually takes to run (in my C{expectations} attribute). This
- pickle also includes the L{buildbot.status.builder.BuilderStatus} object,
- which remembers the set of historic builds.
-
- @type buildable: list of L{buildbot.process.base.BuildRequest}
- @ivar buildable: BuildRequests that are ready to build, but which are
- waiting for a buildslave to be available.
-
- @type building: list of L{buildbot.process.base.Build}
- @ivar building: Builds that are actively running
-
- @type slaves: list of L{buildbot.buildslave.BuildSlave} objects
- @ivar slaves: the slaves currently available for building
- """
-
- expectations = None # this is created the first time we get a good build
- START_BUILD_TIMEOUT = 10
- CHOOSE_SLAVES_RANDOMLY = True # disabled for determinism during tests
-
- def __init__(self, setup, builder_status):
- """
- @type setup: dict
- @param setup: builder setup data, as stored in
- BuildmasterConfig['builders']. Contains name,
- slavename(s), builddir, factory, locks.
- @type builder_status: L{buildbot.status.builder.BuilderStatus}
- """
- self.name = setup['name']
- self.slavenames = []
- if setup.has_key('slavename'):
- self.slavenames.append(setup['slavename'])
- if setup.has_key('slavenames'):
- self.slavenames.extend(setup['slavenames'])
- self.builddir = setup['builddir']
- self.buildFactory = setup['factory']
- self.locks = setup.get("locks", [])
- self.env = setup.get('env', {})
- assert isinstance(self.env, dict)
- if setup.has_key('periodicBuildTime'):
- raise ValueError("periodicBuildTime can no longer be defined as"
- " part of the Builder: use scheduler.Periodic"
- " instead")
-
- # build/wannabuild slots: Build objects move along this sequence
- self.buildable = []
- self.building = []
- # old_building holds active builds that were stolen from a predecessor
- self.old_building = weakref.WeakKeyDictionary()
-
- # buildslaves which have connected but which are not yet available.
- # These are always in the ATTACHING state.
- self.attaching_slaves = []
-
- # buildslaves at our disposal. Each SlaveBuilder instance has a
- # .state that is IDLE, PINGING, or BUILDING. "PINGING" is used when a
- # Build is about to start, to make sure that they're still alive.
- self.slaves = []
-
- self.builder_status = builder_status
- self.builder_status.setSlavenames(self.slavenames)
-
- # for testing, to help synchronize tests
- self.watchers = {'attach': [], 'detach': [], 'detach_all': [],
- 'idle': []}
-
- def setBotmaster(self, botmaster):
- self.botmaster = botmaster
-
- def compareToSetup(self, setup):
- diffs = []
- setup_slavenames = []
- if setup.has_key('slavename'):
- setup_slavenames.append(setup['slavename'])
- setup_slavenames.extend(setup.get('slavenames', []))
- if setup_slavenames != self.slavenames:
- diffs.append('slavenames changed from %s to %s' \
- % (self.slavenames, setup_slavenames))
- if setup['builddir'] != self.builddir:
- diffs.append('builddir changed from %s to %s' \
- % (self.builddir, setup['builddir']))
- if setup['factory'] != self.buildFactory: # compare objects
- diffs.append('factory changed')
- oldlocks = [(lock.__class__, lock.name)
- for lock in self.locks]
- newlocks = [(lock.__class__, lock.name)
- for lock in setup.get('locks',[])]
- if oldlocks != newlocks:
- diffs.append('locks changed from %s to %s' % (oldlocks, newlocks))
- return diffs
-
- def __repr__(self):
- return "<Builder '%s' at %d>" % (self.name, id(self))
-
- def getOldestRequestTime(self):
- """Returns the timestamp of the oldest build request for this builder.
-
- If there are no build requests, None is returned."""
- if self.buildable:
- return self.buildable[0].getSubmitTime()
- else:
- return None
-
- def submitBuildRequest(self, req):
- req.setSubmitTime(now())
- self.buildable.append(req)
- req.requestSubmitted(self)
- self.builder_status.addBuildRequest(req.status)
- self.maybeStartBuild()
-
- def cancelBuildRequest(self, req):
- if req in self.buildable:
- self.buildable.remove(req)
- self.builder_status.removeBuildRequest(req.status)
- return True
- return False
-
- def __getstate__(self):
- d = self.__dict__.copy()
- # TODO: note that d['buildable'] can contain Deferreds
- del d['building'] # TODO: move these back to .buildable?
- del d['slaves']
- return d
-
- def __setstate__(self, d):
- self.__dict__ = d
- self.building = []
- self.slaves = []
-
- def consumeTheSoulOfYourPredecessor(self, old):
- """Suck the brain out of an old Builder.
-
- This takes all the runtime state from an existing Builder and moves
- it into ourselves. This is used when a Builder is changed in the
- master.cfg file: the new Builder has a different factory, but we want
- all the builds that were queued for the old one to get processed by
- the new one. Any builds which are already running will keep running.
- The new Builder will get as many of the old SlaveBuilder objects as
- it wants."""
-
- log.msg("consumeTheSoulOfYourPredecessor: %s feeding upon %s" %
- (self, old))
- # we claim all the pending builds, removing them from the old
- # Builder's queue. This insures that the old Builder will not start
- # any new work.
- log.msg(" stealing %s buildrequests" % len(old.buildable))
- self.buildable.extend(old.buildable)
- old.buildable = []
-
- # old.building (i.e. builds which are still running) is not migrated
- # directly: it keeps track of builds which were in progress in the
- # old Builder. When those builds finish, the old Builder will be
- # notified, not us. However, since the old SlaveBuilder will point to
- # us, it is our maybeStartBuild() that will be triggered.
- if old.building:
- self.builder_status.setBigState("building")
- # however, we do grab a weakref to the active builds, so that our
- # BuilderControl can see them and stop them. We use a weakref because
- # we aren't the one to get notified, so there isn't a convenient
- # place to remove it from self.building .
- for b in old.building:
- self.old_building[b] = None
- for b in old.old_building:
- self.old_building[b] = None
-
- # Our set of slavenames may be different. Steal any of the old
- # buildslaves that we want to keep using.
- for sb in old.slaves[:]:
- if sb.slave.slavename in self.slavenames:
- log.msg(" stealing buildslave %s" % sb)
- self.slaves.append(sb)
- old.slaves.remove(sb)
- sb.setBuilder(self)
-
- # old.attaching_slaves:
- # these SlaveBuilders are waiting on a sequence of calls:
- # remote.setMaster and remote.print . When these two complete,
- # old._attached will be fired, which will add a 'connect' event to
- # the builder_status and try to start a build. However, we've pulled
- # everything out of the old builder's queue, so it will have no work
- # to do. The outstanding remote.setMaster/print call will be holding
- # the last reference to the old builder, so it will disappear just
- # after that response comes back.
- #
- # The BotMaster will ask the slave to re-set their list of Builders
- # shortly after this function returns, which will cause our
- # attached() method to be fired with a bunch of references to remote
- # SlaveBuilders, some of which we already have (by stealing them
- # from the old Builder), some of which will be new. The new ones
- # will be re-attached.
-
- # Therefore, we don't need to do anything about old.attaching_slaves
-
- return # all done
-
- def getBuild(self, number):
- for b in self.building:
- if b.build_status.number == number:
- return b
- for b in self.old_building.keys():
- if b.build_status.number == number:
- return b
- return None
-
- def fireTestEvent(self, name, fire_with=None):
- if fire_with is None:
- fire_with = self
- watchers = self.watchers[name]
- self.watchers[name] = []
- for w in watchers:
- reactor.callLater(0, w.callback, fire_with)
-
- def addLatentSlave(self, slave):
- assert interfaces.ILatentBuildSlave.providedBy(slave)
- for s in self.slaves:
- if s == slave:
- break
- else:
- sb = LatentSlaveBuilder(slave, self)
- self.builder_status.addPointEvent(
- ['added', 'latent', slave.slavename])
- self.slaves.append(sb)
- reactor.callLater(0, self.maybeStartBuild)
-
- def attached(self, slave, remote, commands):
- """This is invoked by the BuildSlave when the self.slavename bot
- registers their builder.
-
- @type slave: L{buildbot.buildslave.BuildSlave}
- @param slave: the BuildSlave that represents the buildslave as a whole
- @type remote: L{twisted.spread.pb.RemoteReference}
- @param remote: a reference to the L{buildbot.slave.bot.SlaveBuilder}
- @type commands: dict: string -> string, or None
- @param commands: provides the slave's version of each RemoteCommand
-
- @rtype: L{twisted.internet.defer.Deferred}
- @return: a Deferred that fires (with 'self') when the slave-side
- builder is fully attached and ready to accept commands.
- """
- for s in self.attaching_slaves + self.slaves:
- if s.slave == slave:
- # already attached to them. This is fairly common, since
- # attached() gets called each time we receive the builder
- # list from the slave, and we ask for it each time we add or
- # remove a builder. So if the slave is hosting builders
- # A,B,C, and the config file changes A, we'll remove A and
- # re-add it, triggering two builder-list requests, getting
- # two redundant calls to attached() for B, and another two
- # for C.
- #
- # Therefore, when we see that we're already attached, we can
- # just ignore it. TODO: build a diagram of the state
- # transitions here, I'm concerned about sb.attached() failing
- # and leaving sb.state stuck at 'ATTACHING', and about
- # the detached() message arriving while there's some
- # transition pending such that the response to the transition
- # re-vivifies sb
- return defer.succeed(self)
-
- sb = SlaveBuilder()
- sb.setBuilder(self)
- self.attaching_slaves.append(sb)
- d = sb.attached(slave, remote, commands)
- d.addCallback(self._attached)
- d.addErrback(self._not_attached, slave)
- return d
-
- def _attached(self, sb):
- # TODO: make this .addSlaveEvent(slave.slavename, ['connect']) ?
- self.builder_status.addPointEvent(['connect', sb.slave.slavename])
- self.attaching_slaves.remove(sb)
- self.slaves.append(sb)
- reactor.callLater(0, self.maybeStartBuild)
-
- self.fireTestEvent('attach')
- return self
-
- def _not_attached(self, why, slave):
- # already log.err'ed by SlaveBuilder._attachFailure
- # TODO: make this .addSlaveEvent?
- # TODO: remove from self.slaves (except that detached() should get
- # run first, right?)
- self.builder_status.addPointEvent(['failed', 'connect',
- slave.slave.slavename])
- # TODO: add an HTMLLogFile of the exception
- self.fireTestEvent('attach', why)
-
- def detached(self, slave):
- """This is called when the connection to the bot is lost."""
- log.msg("%s.detached" % self, slave.slavename)
- for sb in self.attaching_slaves + self.slaves:
- if sb.slave == slave:
- break
- else:
- log.msg("WEIRD: Builder.detached(%s) (%s)"
- " not in attaching_slaves(%s)"
- " or slaves(%s)" % (slave, slave.slavename,
- self.attaching_slaves,
- self.slaves))
- return
- if sb.state == BUILDING:
- # the Build's .lostRemote method (invoked by a notifyOnDisconnect
- # handler) will cause the Build to be stopped, probably right
- # after the notifyOnDisconnect that invoked us finishes running.
-
- # TODO: should failover to a new Build
- #self.retryBuild(sb.build)
- pass
-
- if sb in self.attaching_slaves:
- self.attaching_slaves.remove(sb)
- if sb in self.slaves:
- self.slaves.remove(sb)
-
- # TODO: make this .addSlaveEvent?
- self.builder_status.addPointEvent(['disconnect', slave.slavename])
- sb.detached() # inform the SlaveBuilder that their slave went away
- self.updateBigStatus()
- self.fireTestEvent('detach')
- if not self.slaves:
- self.fireTestEvent('detach_all')
-
- def updateBigStatus(self):
- if not self.slaves:
- self.builder_status.setBigState("offline")
- elif self.building:
- self.builder_status.setBigState("building")
- else:
- self.builder_status.setBigState("idle")
- self.fireTestEvent('idle')
-
- def maybeStartBuild(self):
- log.msg("maybeStartBuild %s: %s %s" %
- (self, self.buildable, self.slaves))
- if not self.buildable:
- self.updateBigStatus()
- return # nothing to do
-
- # pick an idle slave
- available_slaves = [sb for sb in self.slaves if sb.isAvailable()]
- if not available_slaves:
- log.msg("%s: want to start build, but we don't have a remote"
- % self)
- self.updateBigStatus()
- return
- if self.CHOOSE_SLAVES_RANDOMLY:
- # TODO prefer idle over latent? maybe other sorting preferences?
- sb = random.choice(available_slaves)
- else:
- sb = available_slaves[0]
-
- # there is something to build, and there is a slave on which to build
- # it. Grab the oldest request, see if we can merge it with anything
- # else.
- req = self.buildable.pop(0)
- self.builder_status.removeBuildRequest(req.status)
- mergers = []
- botmaster = self.botmaster
- for br in self.buildable[:]:
- if botmaster.shouldMergeRequests(self, req, br):
- self.buildable.remove(br)
- self.builder_status.removeBuildRequest(br.status)
- mergers.append(br)
- requests = [req] + mergers
-
- # Create a new build from our build factory and set ourself as the
- # builder.
- build = self.buildFactory.newBuild(requests)
- build.setBuilder(self)
- build.setLocks(self.locks)
- if len(self.env) > 0:
- build.setSlaveEnvironment(self.env)
-
- # start it
- self.startBuild(build, sb)
-
- def startBuild(self, build, sb):
- """Start a build on the given slave.
- @param build: the L{base.Build} to start
- @param sb: the L{SlaveBuilder} which will host this build
-
- @return: a Deferred which fires with a
- L{buildbot.interfaces.IBuildControl} that can be used to stop the
- Build, or to access a L{buildbot.interfaces.IBuildStatus} which will
- watch the Build as it runs. """
-
- self.building.append(build)
- self.updateBigStatus()
- if isinstance(sb, LatentSlaveBuilder):
- log.msg("starting build %s.. substantiating the slave %s" %
- (build, sb))
- d = sb.substantiate(build)
- def substantiated(res):
- return sb.ping(self.START_BUILD_TIMEOUT)
- def substantiation_failed(res):
- self.builder_status.addPointEvent(
- ['removing', 'latent', sb.slave.slavename])
- sb.slave.disconnect()
- # TODO: should failover to a new Build
- #self.retryBuild(sb.build)
- d.addCallbacks(substantiated, substantiation_failed)
- else:
- log.msg("starting build %s.. pinging the slave %s" % (build, sb))
- d = sb.ping(self.START_BUILD_TIMEOUT)
- # ping the slave to make sure they're still there. If they're fallen
- # off the map (due to a NAT timeout or something), this will fail in
- # a couple of minutes, depending upon the TCP timeout. TODO: consider
- # making this time out faster, or at least characterize the likely
- # duration.
- d.addCallback(self._startBuild_1, build, sb)
- return d
-
- def _startBuild_1(self, res, build, sb):
- if not res:
- return self._startBuildFailed("slave ping failed", build, sb)
- # The buildslave is ready to go. sb.buildStarted() sets its state to
- # BUILDING (so we won't try to use it for any other builds). This
- # gets set back to IDLE by the Build itself when it finishes.
- sb.buildStarted()
- d = sb.remote.callRemote("startBuild")
- d.addCallbacks(self._startBuild_2, self._startBuildFailed,
- callbackArgs=(build,sb), errbackArgs=(build,sb))
- return d
-
- def _startBuild_2(self, res, build, sb):
- # create the BuildStatus object that goes with the Build
- bs = self.builder_status.newBuild()
-
- # start the build. This will first set up the steps, then tell the
- # BuildStatus that it has started, which will announce it to the
- # world (through our BuilderStatus object, which is its parent).
- # Finally it will start the actual build process.
- d = build.startBuild(bs, self.expectations, sb)
- d.addCallback(self.buildFinished, sb)
- d.addErrback(log.err) # this shouldn't happen. if it does, the slave
- # will be wedged
- for req in build.requests:
- req.buildStarted(build, bs)
- return build # this is the IBuildControl
-
- def _startBuildFailed(self, why, build, sb):
- # put the build back on the buildable list
- log.msg("I tried to tell the slave that the build %s started, but "
- "remote_startBuild failed: %s" % (build, why))
- # release the slave. This will queue a call to maybeStartBuild, which
- # will fire after other notifyOnDisconnect handlers have marked the
- # slave as disconnected (so we don't try to use it again).
- sb.buildFinished()
-
- log.msg("re-queueing the BuildRequest")
- self.building.remove(build)
- for req in build.requests:
- self.buildable.insert(0, req) # the interrupted build gets first
- # priority
- self.builder_status.addBuildRequest(req.status)
-
-
- def buildFinished(self, build, sb):
- """This is called when the Build has finished (either success or
- failure). Any exceptions during the build are reported with
- results=FAILURE, not with an errback."""
-
- # by the time we get here, the Build has already released the slave
- # (which queues a call to maybeStartBuild)
-
- self.building.remove(build)
- for req in build.requests:
- req.finished(build.build_status)
-
- def setExpectations(self, progress):
- """Mark the build as successful and update expectations for the next
- build. Only call this when the build did not fail in any way that
- would invalidate the time expectations generated by it. (if the
- compile failed and thus terminated early, we can't use the last
- build to predict how long the next one will take).
- """
- if self.expectations:
- self.expectations.update(progress)
- else:
- # the first time we get a good build, create our Expectations
- # based upon its results
- self.expectations = Expectations(progress)
- log.msg("new expectations: %s seconds" % \
- self.expectations.expectedBuildTime())
-
- def shutdownSlave(self):
- if self.remote:
- self.remote.callRemote("shutdown")
-
-
-class BuilderControl(components.Adapter):
- implements(interfaces.IBuilderControl)
-
- def requestBuild(self, req):
- """Submit a BuildRequest to this Builder."""
- self.original.submitBuildRequest(req)
-
- def requestBuildSoon(self, req):
- """Submit a BuildRequest like requestBuild, but raise a
- L{buildbot.interfaces.NoSlaveError} if no slaves are currently
- available, so it cannot be used to queue a BuildRequest in the hopes
- that a slave will eventually connect. This method is appropriate for
- use by things like the web-page 'Force Build' button."""
- if not self.original.slaves:
- raise interfaces.NoSlaveError
- self.requestBuild(req)
-
- def resubmitBuild(self, bs, reason="<rebuild, no reason given>"):
- if not bs.isFinished():
- return
-
- ss = bs.getSourceStamp(absolute=True)
- req = base.BuildRequest(reason, ss, self.original.name)
- self.requestBuild(req)
-
- def getPendingBuilds(self):
- # return IBuildRequestControl objects
- raise NotImplementedError
-
- def getBuild(self, number):
- return self.original.getBuild(number)
-
- def ping(self, timeout=30):
- if not self.original.slaves:
- self.original.builder_status.addPointEvent(["ping", "no slave"])
- return defer.succeed(False) # interfaces.NoSlaveError
- dl = []
- for s in self.original.slaves:
- dl.append(s.ping(timeout, self.original.builder_status))
- d = defer.DeferredList(dl)
- d.addCallback(self._gatherPingResults)
- return d
-
- def _gatherPingResults(self, res):
- for ignored,success in res:
- if not success:
- return False
- return True
-
-components.registerAdapter(BuilderControl, Builder, interfaces.IBuilderControl)
diff --git a/buildbot/buildbot/process/buildstep.py b/buildbot/buildbot/process/buildstep.py
deleted file mode 100644
index 2cfc157..0000000
--- a/buildbot/buildbot/process/buildstep.py
+++ /dev/null
@@ -1,1097 +0,0 @@
-# -*- test-case-name: buildbot.test.test_steps -*-
-
-from zope.interface import implements
-from twisted.internet import reactor, defer, error
-from twisted.protocols import basic
-from twisted.spread import pb
-from twisted.python import log
-from twisted.python.failure import Failure
-from twisted.web.util import formatFailure
-
-from buildbot import interfaces, locks
-from buildbot.status import progress
-from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, SKIPPED, \
- EXCEPTION
-
-"""
-BuildStep and RemoteCommand classes for master-side representation of the
-build process
-"""
-
-class RemoteCommand(pb.Referenceable):
- """
- I represent a single command to be run on the slave. I handle the details
- of reliably gathering status updates from the slave (acknowledging each),
- and (eventually, in a future release) recovering from interrupted builds.
- This is the master-side object that is known to the slave-side
- L{buildbot.slave.bot.SlaveBuilder}, to which status updates are sent.
-
- My command should be started by calling .run(), which returns a
- Deferred that will fire when the command has finished, or will
- errback if an exception is raised.
-
- Typically __init__ or run() will set up self.remote_command to be a
- string which corresponds to one of the SlaveCommands registered in
- the buildslave, and self.args to a dictionary of arguments that will
- be passed to the SlaveCommand instance.
-
- start, remoteUpdate, and remoteComplete are available to be overridden
-
- @type commandCounter: list of one int
- @cvar commandCounter: provides a unique value for each
- RemoteCommand executed across all slaves
- @type active: boolean
- @ivar active: whether the command is currently running
- """
- commandCounter = [0] # we use a list as a poor man's singleton
- active = False
-
- def __init__(self, remote_command, args):
- """
- @type remote_command: string
- @param remote_command: remote command to start. This will be
- passed to
- L{buildbot.slave.bot.SlaveBuilder.remote_startCommand}
- and needs to have been registered
- slave-side by
- L{buildbot.slave.registry.registerSlaveCommand}
- @type args: dict
- @param args: arguments to send to the remote command
- """
-
- self.remote_command = remote_command
- self.args = args
-
- def __getstate__(self):
- dict = self.__dict__.copy()
- # Remove the remote ref: if necessary (only for resumed builds), it
- # will be reattached at resume time
- if dict.has_key("remote"):
- del dict["remote"]
- return dict
-
- def run(self, step, remote):
- self.active = True
- self.step = step
- self.remote = remote
- c = self.commandCounter[0]
- self.commandCounter[0] += 1
- #self.commandID = "%d %d" % (c, random.randint(0, 1000000))
- self.commandID = "%d" % c
- log.msg("%s: RemoteCommand.run [%s]" % (self, self.commandID))
- self.deferred = defer.Deferred()
-
- d = defer.maybeDeferred(self.start)
-
- # _finished is called with an error for unknown commands, errors
- # that occur while the command is starting (including OSErrors in
- # exec()), StaleBroker (when the connection was lost before we
- # started), and pb.PBConnectionLost (when the slave isn't responding
- # over this connection, perhaps it had a power failure, or NAT
- # weirdness). If this happens, self.deferred is fired right away.
- d.addErrback(self._finished)
-
- # Connections which are lost while the command is running are caught
- # when our parent Step calls our .lostRemote() method.
- return self.deferred
-
- def start(self):
- """
- Tell the slave to start executing the remote command.
-
- @rtype: L{twisted.internet.defer.Deferred}
- @returns: a deferred that will fire when the remote command is
- done (with None as the result)
- """
- # This method only initiates the remote command.
- # We will receive remote_update messages as the command runs.
- # We will get a single remote_complete when it finishes.
- # We should fire self.deferred when the command is done.
- d = self.remote.callRemote("startCommand", self, self.commandID,
- self.remote_command, self.args)
- return d
-
- def interrupt(self, why):
- # TODO: consider separating this into interrupt() and stop(), where
- # stop() unconditionally calls _finished, but interrupt() merely
- # asks politely for the command to stop soon.
-
- log.msg("RemoteCommand.interrupt", self, why)
- if not self.active:
- log.msg(" but this RemoteCommand is already inactive")
- return
- if not self.remote:
- log.msg(" but our .remote went away")
- return
- if isinstance(why, Failure) and why.check(error.ConnectionLost):
- log.msg("RemoteCommand.disconnect: lost slave")
- self.remote = None
- self._finished(why)
- return
-
- # tell the remote command to halt. Returns a Deferred that will fire
- # when the interrupt command has been delivered.
-
- d = defer.maybeDeferred(self.remote.callRemote, "interruptCommand",
- self.commandID, str(why))
- # the slave may not have remote_interruptCommand
- d.addErrback(self._interruptFailed)
- return d
-
- def _interruptFailed(self, why):
- log.msg("RemoteCommand._interruptFailed", self)
- # TODO: forcibly stop the Command now, since we can't stop it
- # cleanly
- return None
-
- def remote_update(self, updates):
- """
- I am called by the slave's L{buildbot.slave.bot.SlaveBuilder} so
- I can receive updates from the running remote command.
-
- @type updates: list of [object, int]
- @param updates: list of updates from the remote command
- """
- self.buildslave.messageReceivedFromSlave()
- max_updatenum = 0
- for (update, num) in updates:
- #log.msg("update[%d]:" % num)
- try:
- if self.active: # ignore late updates
- self.remoteUpdate(update)
- except:
- # log failure, terminate build, let slave retire the update
- self._finished(Failure())
- # TODO: what if multiple updates arrive? should
- # skip the rest but ack them all
- if num > max_updatenum:
- max_updatenum = num
- return max_updatenum
-
- def remoteUpdate(self, update):
- raise NotImplementedError("You must implement this in a subclass")
-
- def remote_complete(self, failure=None):
- """
- Called by the slave's L{buildbot.slave.bot.SlaveBuilder} to
- notify me the remote command has finished.
-
- @type failure: L{twisted.python.failure.Failure} or None
-
- @rtype: None
- """
- self.buildslave.messageReceivedFromSlave()
- # call the real remoteComplete a moment later, but first return an
- # acknowledgement so the slave can retire the completion message.
- if self.active:
- reactor.callLater(0, self._finished, failure)
- return None
-
- def _finished(self, failure=None):
- self.active = False
- # call .remoteComplete. If it raises an exception, or returns the
- # Failure that we gave it, our self.deferred will be errbacked. If
- # it does not (either it ate the Failure or there the step finished
- # normally and it didn't raise a new exception), self.deferred will
- # be callbacked.
- d = defer.maybeDeferred(self.remoteComplete, failure)
- # arrange for the callback to get this RemoteCommand instance
- # instead of just None
- d.addCallback(lambda r: self)
- # this fires the original deferred we returned from .run(),
- # with self as the result, or a failure
- d.addBoth(self.deferred.callback)
-
- def remoteComplete(self, maybeFailure):
- """Subclasses can override this.
-
- This is called when the RemoteCommand has finished. 'maybeFailure'
- will be None if the command completed normally, or a Failure
- instance in one of the following situations:
-
- - the slave was lost before the command was started
- - the slave didn't respond to the startCommand message
- - the slave raised an exception while starting the command
- (bad command name, bad args, OSError from missing executable)
- - the slave raised an exception while finishing the command
- (they send back a remote_complete message with a Failure payload)
-
- and also (for now):
- - slave disconnected while the command was running
-
- This method should do cleanup, like closing log files. It should
- normally return the 'failure' argument, so that any exceptions will
- be propagated to the Step. If it wants to consume them, return None
- instead."""
-
- return maybeFailure
-
-class LoggedRemoteCommand(RemoteCommand):
- """
-
- I am a L{RemoteCommand} which gathers output from the remote command into
- one or more local log files. My C{self.logs} dictionary contains
- references to these L{buildbot.status.builder.LogFile} instances. Any
- stdout/stderr/header updates from the slave will be put into
- C{self.logs['stdio']}, if it exists. If the remote command uses other log
- files, they will go into other entries in C{self.logs}.
-
- If you want to use stdout or stderr, you should create a LogFile named
- 'stdio' and pass it to my useLog() message. Otherwise stdout/stderr will
- be ignored, which is probably not what you want.
-
- Unless you tell me otherwise, when my command completes I will close all
- the LogFiles that I know about.
-
- @ivar logs: maps logname to a LogFile instance
- @ivar _closeWhenFinished: maps logname to a boolean. If true, this
- LogFile will be closed when the RemoteCommand
- finishes. LogFiles which are shared between
- multiple RemoteCommands should use False here.
-
- """
-
- rc = None
- debug = False
-
- def __init__(self, *args, **kwargs):
- self.logs = {}
- self._closeWhenFinished = {}
- RemoteCommand.__init__(self, *args, **kwargs)
-
- def __repr__(self):
- return "<RemoteCommand '%s' at %d>" % (self.remote_command, id(self))
-
- def useLog(self, loog, closeWhenFinished=False, logfileName=None):
- """Start routing messages from a remote logfile to a local LogFile
-
- I take a local ILogFile instance in 'loog', and arrange to route
- remote log messages for the logfile named 'logfileName' into it. By
- default this logfileName comes from the ILogFile itself (using the
- name by which the ILogFile will be displayed), but the 'logfileName'
- argument can be used to override this. For example, if
- logfileName='stdio', this logfile will collect text from the stdout
- and stderr of the command.
-
- @param loog: an instance which implements ILogFile
- @param closeWhenFinished: a boolean, set to False if the logfile
- will be shared between multiple
- RemoteCommands. If True, the logfile will
- be closed when this ShellCommand is done
- with it.
- @param logfileName: a string, which indicates which remote log file
- should be routed into this ILogFile. This should
- match one of the keys of the logfiles= argument
- to ShellCommand.
-
- """
-
- assert interfaces.ILogFile.providedBy(loog)
- if not logfileName:
- logfileName = loog.getName()
- assert logfileName not in self.logs
- self.logs[logfileName] = loog
- self._closeWhenFinished[logfileName] = closeWhenFinished
-
- def start(self):
- log.msg("LoggedRemoteCommand.start")
- if 'stdio' not in self.logs:
- log.msg("LoggedRemoteCommand (%s) is running a command, but "
- "it isn't being logged to anything. This seems unusual."
- % self)
- self.updates = {}
- return RemoteCommand.start(self)
-
- def addStdout(self, data):
- if 'stdio' in self.logs:
- self.logs['stdio'].addStdout(data)
- def addStderr(self, data):
- if 'stdio' in self.logs:
- self.logs['stdio'].addStderr(data)
- def addHeader(self, data):
- if 'stdio' in self.logs:
- self.logs['stdio'].addHeader(data)
-
- def addToLog(self, logname, data):
- if logname in self.logs:
- self.logs[logname].addStdout(data)
- else:
- log.msg("%s.addToLog: no such log %s" % (self, logname))
-
- def remoteUpdate(self, update):
- if self.debug:
- for k,v in update.items():
- log.msg("Update[%s]: %s" % (k,v))
- if update.has_key('stdout'):
- # 'stdout': data
- self.addStdout(update['stdout'])
- if update.has_key('stderr'):
- # 'stderr': data
- self.addStderr(update['stderr'])
- if update.has_key('header'):
- # 'header': data
- self.addHeader(update['header'])
- if update.has_key('log'):
- # 'log': (logname, data)
- logname, data = update['log']
- self.addToLog(logname, data)
- if update.has_key('rc'):
- rc = self.rc = update['rc']
- log.msg("%s rc=%s" % (self, rc))
- self.addHeader("program finished with exit code %d\n" % rc)
-
- for k in update:
- if k not in ('stdout', 'stderr', 'header', 'rc'):
- if k not in self.updates:
- self.updates[k] = []
- self.updates[k].append(update[k])
-
- def remoteComplete(self, maybeFailure):
- for name,loog in self.logs.items():
- if self._closeWhenFinished[name]:
- if maybeFailure:
- loog.addHeader("\nremoteFailed: %s" % maybeFailure)
- else:
- log.msg("closing log %s" % loog)
- loog.finish()
- return maybeFailure
-
-
-class LogObserver:
- implements(interfaces.ILogObserver)
-
- def setStep(self, step):
- self.step = step
-
- def setLog(self, loog):
- assert interfaces.IStatusLog.providedBy(loog)
- loog.subscribe(self, True)
-
- def logChunk(self, build, step, log, channel, text):
- if channel == interfaces.LOG_CHANNEL_STDOUT:
- self.outReceived(text)
- elif channel == interfaces.LOG_CHANNEL_STDERR:
- self.errReceived(text)
-
- # TODO: add a logEnded method? er, stepFinished?
-
- def outReceived(self, data):
- """This will be called with chunks of stdout data. Override this in
- your observer."""
- pass
-
- def errReceived(self, data):
- """This will be called with chunks of stderr data. Override this in
- your observer."""
- pass
-
-
-class LogLineObserver(LogObserver):
- def __init__(self):
- self.stdoutParser = basic.LineOnlyReceiver()
- self.stdoutParser.delimiter = "\n"
- self.stdoutParser.lineReceived = self.outLineReceived
- self.stdoutParser.transport = self # for the .disconnecting attribute
- self.disconnecting = False
-
- self.stderrParser = basic.LineOnlyReceiver()
- self.stderrParser.delimiter = "\n"
- self.stderrParser.lineReceived = self.errLineReceived
- self.stderrParser.transport = self
-
- def setMaxLineLength(self, max_length):
- """
- Set the maximum line length: lines longer than max_length are
- dropped. Default is 16384 bytes. Use sys.maxint for effective
- infinity.
- """
- self.stdoutParser.MAX_LENGTH = max_length
- self.stderrParser.MAX_LENGTH = max_length
-
- def outReceived(self, data):
- self.stdoutParser.dataReceived(data)
-
- def errReceived(self, data):
- self.stderrParser.dataReceived(data)
-
- def outLineReceived(self, line):
- """This will be called with complete stdout lines (not including the
- delimiter). Override this in your observer."""
- pass
-
- def errLineReceived(self, line):
- """This will be called with complete lines of stderr (not including
- the delimiter). Override this in your observer."""
- pass
-
-
-class RemoteShellCommand(LoggedRemoteCommand):
- """This class helps you run a shell command on the build slave. It will
- accumulate all the command's output into a Log named 'stdio'. When the
- command is finished, it will fire a Deferred. You can then check the
- results of the command and parse the output however you like."""
-
- def __init__(self, workdir, command, env=None,
- want_stdout=1, want_stderr=1,
- timeout=20*60, logfiles={}, usePTY="slave-config"):
- """
- @type workdir: string
- @param workdir: directory where the command ought to run,
- relative to the Builder's home directory. Defaults to
- '.': the same as the Builder's homedir. This should
- probably be '.' for the initial 'cvs checkout'
- command (which creates a workdir), and the Build-wide
- workdir for all subsequent commands (including
- compiles and 'cvs update').
-
- @type command: list of strings (or string)
- @param command: the shell command to run, like 'make all' or
- 'cvs update'. This should be a list or tuple
- which can be used directly as the argv array.
- For backwards compatibility, if this is a
- string, the text will be given to '/bin/sh -c
- %s'.
-
- @type env: dict of string->string
- @param env: environment variables to add or change for the
- slave. Each command gets a separate
- environment; all inherit the slave's initial
- one. TODO: make it possible to delete some or
- all of the slave's environment.
-
- @type want_stdout: bool
- @param want_stdout: defaults to True. Set to False if stdout should
- be thrown away. Do this to avoid storing or
- sending large amounts of useless data.
-
- @type want_stderr: bool
- @param want_stderr: False if stderr should be thrown away
-
- @type timeout: int
- @param timeout: tell the remote that if the command fails to
- produce any output for this number of seconds,
- the command is hung and should be killed. Use
- None to disable the timeout.
- """
-
- self.command = command # stash .command, set it later
- if env is not None:
- # avoid mutating the original master.cfg dictionary. Each
- # ShellCommand gets its own copy, any start() methods won't be
- # able to modify the original.
- env = env.copy()
- args = {'workdir': workdir,
- 'env': env,
- 'want_stdout': want_stdout,
- 'want_stderr': want_stderr,
- 'logfiles': logfiles,
- 'timeout': timeout,
- 'usePTY': usePTY,
- }
- LoggedRemoteCommand.__init__(self, "shell", args)
-
- def start(self):
- self.args['command'] = self.command
- if self.remote_command == "shell":
- # non-ShellCommand slavecommands are responsible for doing this
- # fixup themselves
- if self.step.slaveVersion("shell", "old") == "old":
- self.args['dir'] = self.args['workdir']
- what = "command '%s' in dir '%s'" % (self.args['command'],
- self.args['workdir'])
- log.msg(what)
- return LoggedRemoteCommand.start(self)
-
- def __repr__(self):
- return "<RemoteShellCommand '%s'>" % repr(self.command)
-
-class BuildStep:
- """
- I represent a single step of the build process. This step may involve
- zero or more commands to be run in the build slave, as well as arbitrary
- processing on the master side. Regardless of how many slave commands are
- run, the BuildStep will result in a single status value.
-
- The step is started by calling startStep(), which returns a Deferred that
- fires when the step finishes. See C{startStep} for a description of the
- results provided by that Deferred.
-
- __init__ and start are good methods to override. Don't forget to upcall
- BuildStep.__init__ or bad things will happen.
-
- To launch a RemoteCommand, pass it to .runCommand and wait on the
- Deferred it returns.
-
- Each BuildStep generates status as it runs. This status data is fed to
- the L{buildbot.status.builder.BuildStepStatus} listener that sits in
- C{self.step_status}. It can also feed progress data (like how much text
- is output by a shell command) to the
- L{buildbot.status.progress.StepProgress} object that lives in
- C{self.progress}, by calling C{self.setProgress(metric, value)} as it
- runs.
-
- @type build: L{buildbot.process.base.Build}
- @ivar build: the parent Build which is executing this step
-
- @type progress: L{buildbot.status.progress.StepProgress}
- @ivar progress: tracks ETA for the step
-
- @type step_status: L{buildbot.status.builder.BuildStepStatus}
- @ivar step_status: collects output status
- """
-
- # these parameters are used by the parent Build object to decide how to
- # interpret our results. haltOnFailure will affect the build process
- # immediately, the others will be taken into consideration when
- # determining the overall build status.
- #
- # steps that are makred as alwaysRun will be run regardless of the outcome
- # of previous steps (especially steps with haltOnFailure=True)
- haltOnFailure = False
- flunkOnWarnings = False
- flunkOnFailure = False
- warnOnWarnings = False
- warnOnFailure = False
- alwaysRun = False
-
- # 'parms' holds a list of all the parameters we care about, to allow
- # users to instantiate a subclass of BuildStep with a mixture of
- # arguments, some of which are for us, some of which are for the subclass
- # (or a delegate of the subclass, like how ShellCommand delivers many
- # arguments to the RemoteShellCommand that it creates). Such delegating
- # subclasses will use this list to figure out which arguments are meant
- # for us and which should be given to someone else.
- parms = ['name', 'locks',
- 'haltOnFailure',
- 'flunkOnWarnings',
- 'flunkOnFailure',
- 'warnOnWarnings',
- 'warnOnFailure',
- 'alwaysRun',
- 'progressMetrics',
- ]
-
- name = "generic"
- locks = []
- progressMetrics = () # 'time' is implicit
- useProgress = True # set to False if step is really unpredictable
- build = None
- step_status = None
- progress = None
-
- def __init__(self, **kwargs):
- self.factory = (self.__class__, dict(kwargs))
- for p in self.__class__.parms:
- if kwargs.has_key(p):
- setattr(self, p, kwargs[p])
- del kwargs[p]
- if kwargs:
- why = "%s.__init__ got unexpected keyword argument(s) %s" \
- % (self, kwargs.keys())
- raise TypeError(why)
- self._pendingLogObservers = []
-
- def setBuild(self, build):
- # subclasses which wish to base their behavior upon qualities of the
- # Build (e.g. use the list of changed files to run unit tests only on
- # code which has been modified) should do so here. The Build is not
- # available during __init__, but setBuild() will be called just
- # afterwards.
- self.build = build
-
- def setBuildSlave(self, buildslave):
- self.buildslave = buildslave
-
- def setDefaultWorkdir(self, workdir):
- # The Build calls this just after __init__(). ShellCommand
- # and variants use a slave-side workdir, but some other steps
- # do not. Subclasses which use a workdir should use the value
- # set by this method unless they were constructed with
- # something more specific.
- pass
-
- def addFactoryArguments(self, **kwargs):
- self.factory[1].update(kwargs)
-
- def getStepFactory(self):
- return self.factory
-
- def setStepStatus(self, step_status):
- self.step_status = step_status
-
- def setupProgress(self):
- if self.useProgress:
- sp = progress.StepProgress(self.name, self.progressMetrics)
- self.progress = sp
- self.step_status.setProgress(sp)
- return sp
- return None
-
- def setProgress(self, metric, value):
- """BuildSteps can call self.setProgress() to announce progress along
- some metric."""
- if self.progress:
- self.progress.setProgress(metric, value)
-
- def getProperty(self, propname):
- return self.build.getProperty(propname)
-
- def setProperty(self, propname, value, source="Step"):
- self.build.setProperty(propname, value, source)
-
- def startStep(self, remote):
- """Begin the step. This returns a Deferred that will fire when the
- step finishes.
-
- This deferred fires with a tuple of (result, [extra text]), although
- older steps used to return just the 'result' value, so the receiving
- L{base.Build} needs to be prepared to handle that too. C{result} is
- one of the SUCCESS/WARNINGS/FAILURE/SKIPPED constants from
- L{buildbot.status.builder}, and the extra text is a list of short
- strings which should be appended to the Build's text results. This
- text allows a test-case step which fails to append B{17 tests} to the
- Build's status, in addition to marking the build as failing.
-
- The deferred will errback if the step encounters an exception,
- including an exception on the slave side (or if the slave goes away
- altogether). Failures in shell commands (rc!=0) will B{not} cause an
- errback, in general the BuildStep will evaluate the results and
- decide whether to treat it as a WARNING or FAILURE.
-
- @type remote: L{twisted.spread.pb.RemoteReference}
- @param remote: a reference to the slave's
- L{buildbot.slave.bot.SlaveBuilder} instance where any
- RemoteCommands may be run
- """
-
- self.remote = remote
- self.deferred = defer.Deferred()
- # convert all locks into their real form
- lock_list = []
- for access in self.locks:
- if not isinstance(access, locks.LockAccess):
- # Buildbot 0.7.7 compability: user did not specify access
- access = access.defaultAccess()
- lock = self.build.builder.botmaster.getLockByID(access.lockid)
- lock_list.append((lock, access))
- self.locks = lock_list
- # then narrow SlaveLocks down to the slave that this build is being
- # run on
- self.locks = [(l.getLock(self.build.slavebuilder), la) for l, la in self.locks]
- for l, la in self.locks:
- if l in self.build.locks:
- log.msg("Hey, lock %s is claimed by both a Step (%s) and the"
- " parent Build (%s)" % (l, self, self.build))
- raise RuntimeError("lock claimed by both Step and Build")
- d = self.acquireLocks()
- d.addCallback(self._startStep_2)
- return self.deferred
-
- def acquireLocks(self, res=None):
- log.msg("acquireLocks(step %s, locks %s)" % (self, self.locks))
- if not self.locks:
- return defer.succeed(None)
- for lock, access in self.locks:
- if not lock.isAvailable(access):
- log.msg("step %s waiting for lock %s" % (self, lock))
- d = lock.waitUntilMaybeAvailable(self, access)
- d.addCallback(self.acquireLocks)
- return d
- # all locks are available, claim them all
- for lock, access in self.locks:
- lock.claim(self, access)
- return defer.succeed(None)
-
- def _startStep_2(self, res):
- if self.progress:
- self.progress.start()
- self.step_status.stepStarted()
- try:
- skip = self.start()
- if skip == SKIPPED:
- # this return value from self.start is a shortcut
- # to finishing the step immediately
- reactor.callLater(0, self.finished, SKIPPED)
- except:
- log.msg("BuildStep.startStep exception in .start")
- self.failed(Failure())
-
- def start(self):
- """Begin the step. Override this method and add code to do local
- processing, fire off remote commands, etc.
-
- To spawn a command in the buildslave, create a RemoteCommand instance
- and run it with self.runCommand::
-
- c = RemoteCommandFoo(args)
- d = self.runCommand(c)
- d.addCallback(self.fooDone).addErrback(self.failed)
-
- As the step runs, it should send status information to the
- BuildStepStatus::
-
- self.step_status.setText(['compile', 'failed'])
- self.step_status.setText2(['4', 'warnings'])
-
- To have some code parse stdio (or other log stream) in realtime, add
- a LogObserver subclass. This observer can use self.step.setProgress()
- to provide better progress notification to the step.::
-
- self.addLogObserver('stdio', MyLogObserver())
-
- To add a LogFile, use self.addLog. Make sure it gets closed when it
- finishes. When giving a Logfile to a RemoteShellCommand, just ask it
- to close the log when the command completes::
-
- log = self.addLog('output')
- cmd = RemoteShellCommand(args)
- cmd.useLog(log, closeWhenFinished=True)
-
- You can also create complete Logfiles with generated text in a single
- step::
-
- self.addCompleteLog('warnings', text)
-
- When the step is done, it should call self.finished(result). 'result'
- will be provided to the L{buildbot.process.base.Build}, and should be
- one of the constants defined above: SUCCESS, WARNINGS, FAILURE, or
- SKIPPED.
-
- If the step encounters an exception, it should call self.failed(why).
- 'why' should be a Failure object. This automatically fails the whole
- build with an exception. It is a good idea to add self.failed as an
- errback to any Deferreds you might obtain.
-
- If the step decides it does not need to be run, start() can return
- the constant SKIPPED. This fires the callback immediately: it is not
- necessary to call .finished yourself. This can also indicate to the
- status-reporting mechanism that this step should not be displayed."""
-
- raise NotImplementedError("your subclass must implement this method")
-
- def interrupt(self, reason):
- """Halt the command, either because the user has decided to cancel
- the build ('reason' is a string), or because the slave has
- disconnected ('reason' is a ConnectionLost Failure). Any further
- local processing should be skipped, and the Step completed with an
- error status. The results text should say something useful like
- ['step', 'interrupted'] or ['remote', 'lost']"""
- pass
-
- def releaseLocks(self):
- log.msg("releaseLocks(%s): %s" % (self, self.locks))
- for lock, access in self.locks:
- lock.release(self, access)
-
- def finished(self, results):
- if self.progress:
- self.progress.finish()
- self.step_status.stepFinished(results)
- self.releaseLocks()
- self.deferred.callback(results)
-
- def failed(self, why):
- # if isinstance(why, pb.CopiedFailure): # a remote exception might
- # only have short traceback, so formatFailure is not as useful as
- # you'd like (no .frames, so no traceback is displayed)
- log.msg("BuildStep.failed, traceback follows")
- log.err(why)
- try:
- if self.progress:
- self.progress.finish()
- self.addHTMLLog("err.html", formatFailure(why))
- self.addCompleteLog("err.text", why.getTraceback())
- # could use why.getDetailedTraceback() for more information
- self.step_status.setText([self.name, "exception"])
- self.step_status.setText2([self.name])
- self.step_status.stepFinished(EXCEPTION)
- except:
- log.msg("exception during failure processing")
- log.err()
- # the progress stuff may still be whacked (the StepStatus may
- # think that it is still running), but the build overall will now
- # finish
- try:
- self.releaseLocks()
- except:
- log.msg("exception while releasing locks")
- log.err()
-
- log.msg("BuildStep.failed now firing callback")
- self.deferred.callback(EXCEPTION)
-
- # utility methods that BuildSteps may find useful
-
- def slaveVersion(self, command, oldversion=None):
- """Return the version number of the given slave command. For the
- commands defined in buildbot.slave.commands, this is the value of
- 'cvs_ver' at the top of that file. Non-existent commands will return
- a value of None. Buildslaves running buildbot-0.5.0 or earlier did
- not respond to the version query: commands on those slaves will
- return a value of OLDVERSION, so you can distinguish between old
- buildslaves and missing commands.
-
- If you know that <=0.5.0 buildslaves have the command you want (CVS
- and SVN existed back then, but none of the other VC systems), then it
- makes sense to call this with oldversion='old'. If the command you
- want is newer than that, just leave oldversion= unspecified, and the
- command will return None for a buildslave that does not implement the
- command.
- """
- return self.build.getSlaveCommandVersion(command, oldversion)
-
- def slaveVersionIsOlderThan(self, command, minversion):
- sv = self.build.getSlaveCommandVersion(command, None)
- if sv is None:
- return True
- # the version we get back is a string form of the CVS version number
- # of the slave's buildbot/slave/commands.py, something like 1.39 .
- # This might change in the future (I might move away from CVS), but
- # if so I'll keep updating that string with suitably-comparable
- # values.
- if sv.split(".") < minversion.split("."):
- return True
- return False
-
- def getSlaveName(self):
- return self.build.getSlaveName()
-
- def addLog(self, name):
- loog = self.step_status.addLog(name)
- self._connectPendingLogObservers()
- return loog
-
- def getLog(self, name):
- for l in self.step_status.getLogs():
- if l.getName() == name:
- return l
- raise KeyError("no log named '%s'" % (name,))
-
- def addCompleteLog(self, name, text):
- log.msg("addCompleteLog(%s)" % name)
- loog = self.step_status.addLog(name)
- size = loog.chunkSize
- for start in range(0, len(text), size):
- loog.addStdout(text[start:start+size])
- loog.finish()
- self._connectPendingLogObservers()
-
- def addHTMLLog(self, name, html):
- log.msg("addHTMLLog(%s)" % name)
- self.step_status.addHTMLLog(name, html)
- self._connectPendingLogObservers()
-
- def addLogObserver(self, logname, observer):
- assert interfaces.ILogObserver.providedBy(observer)
- observer.setStep(self)
- self._pendingLogObservers.append((logname, observer))
- self._connectPendingLogObservers()
-
- def _connectPendingLogObservers(self):
- if not self._pendingLogObservers:
- return
- if not self.step_status:
- return
- current_logs = {}
- for loog in self.step_status.getLogs():
- current_logs[loog.getName()] = loog
- for logname, observer in self._pendingLogObservers[:]:
- if logname in current_logs:
- observer.setLog(current_logs[logname])
- self._pendingLogObservers.remove((logname, observer))
-
- def addURL(self, name, url):
- """Add a BuildStep URL to this step.
-
- An HREF to this URL will be added to any HTML representations of this
- step. This allows a step to provide links to external web pages,
- perhaps to provide detailed HTML code coverage results or other forms
- of build status.
- """
- self.step_status.addURL(name, url)
-
- def runCommand(self, c):
- c.buildslave = self.buildslave
- d = c.run(self, self.remote)
- return d
-
-
-class OutputProgressObserver(LogObserver):
- length = 0
-
- def __init__(self, name):
- self.name = name
-
- def logChunk(self, build, step, log, channel, text):
- self.length += len(text)
- self.step.setProgress(self.name, self.length)
-
-class LoggingBuildStep(BuildStep):
- """This is an abstract base class, suitable for inheritance by all
- BuildSteps that invoke RemoteCommands which emit stdout/stderr messages.
- """
-
- progressMetrics = ('output',)
- logfiles = {}
-
- parms = BuildStep.parms + ['logfiles']
-
- def __init__(self, logfiles={}, *args, **kwargs):
- BuildStep.__init__(self, *args, **kwargs)
- self.addFactoryArguments(logfiles=logfiles)
- # merge a class-level 'logfiles' attribute with one passed in as an
- # argument
- self.logfiles = self.logfiles.copy()
- self.logfiles.update(logfiles)
- self.addLogObserver('stdio', OutputProgressObserver("output"))
-
- def describe(self, done=False):
- raise NotImplementedError("implement this in a subclass")
-
- def startCommand(self, cmd, errorMessages=[]):
- """
- @param cmd: a suitable RemoteCommand which will be launched, with
- all output being put into our self.stdio_log LogFile
- """
- log.msg("ShellCommand.startCommand(cmd=%s)" % (cmd,))
- log.msg(" cmd.args = %r" % (cmd.args))
- self.cmd = cmd # so we can interrupt it
- self.step_status.setText(self.describe(False))
-
- # stdio is the first log
- self.stdio_log = stdio_log = self.addLog("stdio")
- cmd.useLog(stdio_log, True)
- for em in errorMessages:
- stdio_log.addHeader(em)
- # TODO: consider setting up self.stdio_log earlier, and have the
- # code that passes in errorMessages instead call
- # self.stdio_log.addHeader() directly.
-
- # there might be other logs
- self.setupLogfiles(cmd, self.logfiles)
-
- d = self.runCommand(cmd) # might raise ConnectionLost
- d.addCallback(lambda res: self.commandComplete(cmd))
- d.addCallback(lambda res: self.createSummary(cmd.logs['stdio']))
- d.addCallback(lambda res: self.evaluateCommand(cmd)) # returns results
- def _gotResults(results):
- self.setStatus(cmd, results)
- return results
- d.addCallback(_gotResults) # returns results
- d.addCallbacks(self.finished, self.checkDisconnect)
- d.addErrback(self.failed)
-
- def setupLogfiles(self, cmd, logfiles):
- """Set up any additional logfiles= logs.
- """
- for logname,remotefilename in logfiles.items():
- # tell the BuildStepStatus to add a LogFile
- newlog = self.addLog(logname)
- # and tell the LoggedRemoteCommand to feed it
- cmd.useLog(newlog, True)
-
- def interrupt(self, reason):
- # TODO: consider adding an INTERRUPTED or STOPPED status to use
- # instead of FAILURE, might make the text a bit more clear.
- # 'reason' can be a Failure, or text
- self.addCompleteLog('interrupt', str(reason))
- d = self.cmd.interrupt(reason)
- return d
-
- def checkDisconnect(self, f):
- f.trap(error.ConnectionLost)
- self.step_status.setText(self.describe(True) +
- ["failed", "slave", "lost"])
- self.step_status.setText2(["failed", "slave", "lost"])
- return self.finished(FAILURE)
-
- # to refine the status output, override one or more of the following
- # methods. Change as little as possible: start with the first ones on
- # this list and only proceed further if you have to
- #
- # createSummary: add additional Logfiles with summarized results
- # evaluateCommand: decides whether the step was successful or not
- #
- # getText: create the final per-step text strings
- # describeText2: create the strings added to the overall build status
- #
- # getText2: only adds describeText2() when the step affects build status
- #
- # setStatus: handles all status updating
-
- # commandComplete is available for general-purpose post-completion work.
- # It is a good place to do one-time parsing of logfiles, counting
- # warnings and errors. It should probably stash such counts in places
- # like self.warnings so they can be picked up later by your getText
- # method.
-
- # TODO: most of this stuff should really be on BuildStep rather than
- # ShellCommand. That involves putting the status-setup stuff in
- # .finished, which would make it hard to turn off.
-
- def commandComplete(self, cmd):
- """This is a general-purpose hook method for subclasses. It will be
- called after the remote command has finished, but before any of the
- other hook functions are called."""
- pass
-
- def createSummary(self, log):
- """To create summary logs, do something like this:
- warnings = grep('^Warning:', log.getText())
- self.addCompleteLog('warnings', warnings)
- """
- pass
-
- def evaluateCommand(self, cmd):
- """Decide whether the command was SUCCESS, WARNINGS, or FAILURE.
- Override this to, say, declare WARNINGS if there is any stderr
- activity, or to say that rc!=0 is not actually an error."""
-
- if cmd.rc != 0:
- return FAILURE
- # if cmd.log.getStderr(): return WARNINGS
- return SUCCESS
-
- def getText(self, cmd, results):
- if results == SUCCESS:
- return self.describe(True)
- elif results == WARNINGS:
- return self.describe(True) + ["warnings"]
- else:
- return self.describe(True) + ["failed"]
-
- def getText2(self, cmd, results):
- """We have decided to add a short note about ourselves to the overall
- build description, probably because something went wrong. Return a
- short list of short strings. If your subclass counts test failures or
- warnings of some sort, this is a good place to announce the count."""
- # return ["%d warnings" % warningcount]
- # return ["%d tests" % len(failedTests)]
- return [self.name]
-
- def maybeGetText2(self, cmd, results):
- if results == SUCCESS:
- # successful steps do not add anything to the build's text
- pass
- elif results == WARNINGS:
- if (self.flunkOnWarnings or self.warnOnWarnings):
- # we're affecting the overall build, so tell them why
- return self.getText2(cmd, results)
- else:
- if (self.haltOnFailure or self.flunkOnFailure
- or self.warnOnFailure):
- # we're affecting the overall build, so tell them why
- return self.getText2(cmd, results)
- return []
-
- def setStatus(self, cmd, results):
- # this is good enough for most steps, but it can be overridden to
- # get more control over the displayed text
- self.step_status.setText(self.getText(cmd, results))
- self.step_status.setText2(self.maybeGetText2(cmd, results))
-
-# (WithProeprties used to be available in this module)
-from buildbot.process.properties import WithProperties
-_hush_pyflakes = [WithProperties]
-del _hush_pyflakes
-
diff --git a/buildbot/buildbot/process/factory.py b/buildbot/buildbot/process/factory.py
deleted file mode 100644
index 37551d9..0000000
--- a/buildbot/buildbot/process/factory.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# -*- test-case-name: buildbot.test.test_step -*-
-
-from buildbot import util
-from buildbot.process.base import Build
-from buildbot.process.buildstep import BuildStep
-from buildbot.steps.source import CVS, SVN
-from buildbot.steps.shell import Configure, Compile, Test, PerlModuleTest
-
-# deprecated, use BuildFactory.addStep
-def s(steptype, **kwargs):
- # convenience function for master.cfg files, to create step
- # specification tuples
- return (steptype, kwargs)
-
-class BuildFactory(util.ComparableMixin):
- """
- @cvar buildClass: class to use when creating builds
- @type buildClass: L{buildbot.process.base.Build}
- """
- buildClass = Build
- useProgress = 1
- compare_attrs = ['buildClass', 'steps', 'useProgress']
-
- def __init__(self, steps=None):
- if steps is None:
- steps = []
- self.steps = [self._makeStepFactory(s) for s in steps]
-
- def _makeStepFactory(self, step_or_factory):
- if isinstance(step_or_factory, BuildStep):
- return step_or_factory.getStepFactory()
- return step_or_factory
-
- def newBuild(self, request):
- """Create a new Build instance.
- @param request: a L{base.BuildRequest} describing what is to be built
- """
- b = self.buildClass(request)
- b.useProgress = self.useProgress
- b.setStepFactories(self.steps)
- return b
-
- def addStep(self, step_or_factory, **kwargs):
- if isinstance(step_or_factory, BuildStep):
- s = step_or_factory.getStepFactory()
- else:
- s = (step_or_factory, dict(kwargs))
- self.steps.append(s)
-
- def addSteps(self, steps):
- self.steps.extend([ s.getStepFactory() for s in steps ])
-
-# BuildFactory subclasses for common build tools
-
-class GNUAutoconf(BuildFactory):
- def __init__(self, source, configure="./configure",
- configureEnv={},
- configureFlags=[],
- compile=["make", "all"],
- test=["make", "check"]):
- BuildFactory.__init__(self, [source])
- if configure is not None:
- # we either need to wind up with a string (which will be
- # space-split), or with a list of strings (which will not). The
- # list of strings is the preferred form.
- if type(configure) is str:
- if configureFlags:
- assert not " " in configure # please use list instead
- command = [configure] + configureFlags
- else:
- command = configure
- else:
- assert isinstance(configure, (list, tuple))
- command = configure + configureFlags
- self.addStep(Configure, command=command, env=configureEnv)
- if compile is not None:
- self.addStep(Compile, command=compile)
- if test is not None:
- self.addStep(Test, command=test)
-
-class CPAN(BuildFactory):
- def __init__(self, source, perl="perl"):
- BuildFactory.__init__(self, [source])
- self.addStep(Configure, command=[perl, "Makefile.PL"])
- self.addStep(Compile, command=["make"])
- self.addStep(PerlModuleTest, command=["make", "test"])
-
-class Distutils(BuildFactory):
- def __init__(self, source, python="python", test=None):
- BuildFactory.__init__(self, [source])
- self.addStep(Compile, command=[python, "./setup.py", "build"])
- if test is not None:
- self.addStep(Test, command=test)
-
-class Trial(BuildFactory):
- """Build a python module that uses distutils and trial. Set 'tests' to
- the module in which the tests can be found, or set useTestCaseNames=True
- to always have trial figure out which tests to run (based upon which
- files have been changed).
-
- See docs/factories.xhtml for usage samples. Not all of the Trial
- BuildStep options are available here, only the most commonly used ones.
- To get complete access, you will need to create a custom
- BuildFactory."""
-
- trial = "trial"
- randomly = False
- recurse = False
-
- def __init__(self, source,
- buildpython=["python"], trialpython=[], trial=None,
- testpath=".", randomly=None, recurse=None,
- tests=None, useTestCaseNames=False, env=None):
- BuildFactory.__init__(self, [source])
- assert tests or useTestCaseNames, "must use one or the other"
- if trial is not None:
- self.trial = trial
- if randomly is not None:
- self.randomly = randomly
- if recurse is not None:
- self.recurse = recurse
-
- from buildbot.steps.python_twisted import Trial
- buildcommand = buildpython + ["./setup.py", "build"]
- self.addStep(Compile, command=buildcommand, env=env)
- self.addStep(Trial,
- python=trialpython, trial=self.trial,
- testpath=testpath,
- tests=tests, testChanges=useTestCaseNames,
- randomly=self.randomly,
- recurse=self.recurse,
- env=env,
- )
-
-
-# compatibility classes, will go away. Note that these only offer
-# compatibility at the constructor level: if you have subclassed these
-# factories, your subclasses are unlikely to still work correctly.
-
-ConfigurableBuildFactory = BuildFactory
-
-class BasicBuildFactory(GNUAutoconf):
- # really a "GNU Autoconf-created tarball -in-CVS tree" builder
-
- def __init__(self, cvsroot, cvsmodule,
- configure=None, configureEnv={},
- compile="make all",
- test="make check", cvsCopy=False):
- mode = "clobber"
- if cvsCopy:
- mode = "copy"
- source = s(CVS, cvsroot=cvsroot, cvsmodule=cvsmodule, mode=mode)
- GNUAutoconf.__init__(self, source,
- configure=configure, configureEnv=configureEnv,
- compile=compile,
- test=test)
-
-class QuickBuildFactory(BasicBuildFactory):
- useProgress = False
-
- def __init__(self, cvsroot, cvsmodule,
- configure=None, configureEnv={},
- compile="make all",
- test="make check", cvsCopy=False):
- mode = "update"
- source = s(CVS, cvsroot=cvsroot, cvsmodule=cvsmodule, mode=mode)
- GNUAutoconf.__init__(self, source,
- configure=configure, configureEnv=configureEnv,
- compile=compile,
- test=test)
-
-class BasicSVN(GNUAutoconf):
-
- def __init__(self, svnurl,
- configure=None, configureEnv={},
- compile="make all",
- test="make check"):
- source = s(SVN, svnurl=svnurl, mode="update")
- GNUAutoconf.__init__(self, source,
- configure=configure, configureEnv=configureEnv,
- compile=compile,
- test=test)
diff --git a/buildbot/buildbot/process/process_twisted.py b/buildbot/buildbot/process/process_twisted.py
deleted file mode 100644
index 36d6fc5..0000000
--- a/buildbot/buildbot/process/process_twisted.py
+++ /dev/null
@@ -1,118 +0,0 @@
-
-# Build classes specific to the Twisted codebase
-
-from buildbot.process.base import Build
-from buildbot.process.factory import BuildFactory
-from buildbot.steps import shell
-from buildbot.steps.python_twisted import HLint, ProcessDocs, BuildDebs, \
- Trial, RemovePYCs
-
-class TwistedBuild(Build):
- workdir = "Twisted" # twisted's bin/trial expects to live in here
- def isFileImportant(self, filename):
- if filename.startswith("doc/fun/"):
- return 0
- if filename.startswith("sandbox/"):
- return 0
- return 1
-
-class TwistedTrial(Trial):
- tests = "twisted"
- # the Trial in Twisted >=2.1.0 has --recurse on by default, and -to
- # turned into --reporter=bwverbose .
- recurse = False
- trialMode = ["--reporter=bwverbose"]
- testpath = None
- trial = "./bin/trial"
-
-class TwistedBaseFactory(BuildFactory):
- buildClass = TwistedBuild
- # bin/trial expects its parent directory to be named "Twisted": it uses
- # this to add the local tree to PYTHONPATH during tests
- workdir = "Twisted"
-
- def __init__(self, source):
- BuildFactory.__init__(self, [source])
-
-class QuickTwistedBuildFactory(TwistedBaseFactory):
- treeStableTimer = 30
- useProgress = 0
-
- def __init__(self, source, python="python"):
- TwistedBaseFactory.__init__(self, source)
- if type(python) is str:
- python = [python]
- self.addStep(HLint, python=python[0])
- self.addStep(RemovePYCs)
- for p in python:
- cmd = [p, "setup.py", "build_ext", "-i"]
- self.addStep(shell.Compile, command=cmd, flunkOnFailure=True)
- self.addStep(TwistedTrial, python=p, testChanges=True)
-
-class FullTwistedBuildFactory(TwistedBaseFactory):
- treeStableTimer = 5*60
-
- def __init__(self, source, python="python",
- processDocs=False, runTestsRandomly=False,
- compileOpts=[], compileOpts2=[]):
- TwistedBaseFactory.__init__(self, source)
- if processDocs:
- self.addStep(ProcessDocs)
-
- if type(python) == str:
- python = [python]
- assert isinstance(compileOpts, list)
- assert isinstance(compileOpts2, list)
- cmd = (python + compileOpts + ["setup.py", "build_ext"]
- + compileOpts2 + ["-i"])
-
- self.addStep(shell.Compile, command=cmd, flunkOnFailure=True)
- self.addStep(RemovePYCs)
- self.addStep(TwistedTrial, python=python, randomly=runTestsRandomly)
-
-class TwistedDebsBuildFactory(TwistedBaseFactory):
- treeStableTimer = 10*60
-
- def __init__(self, source, python="python"):
- TwistedBaseFactory.__init__(self, source)
- self.addStep(ProcessDocs, haltOnFailure=True)
- self.addStep(BuildDebs, warnOnWarnings=True)
-
-class TwistedReactorsBuildFactory(TwistedBaseFactory):
- treeStableTimer = 5*60
-
- def __init__(self, source,
- python="python", compileOpts=[], compileOpts2=[],
- reactors=None):
- TwistedBaseFactory.__init__(self, source)
-
- if type(python) == str:
- python = [python]
- assert isinstance(compileOpts, list)
- assert isinstance(compileOpts2, list)
- cmd = (python + compileOpts + ["setup.py", "build_ext"]
- + compileOpts2 + ["-i"])
-
- self.addStep(shell.Compile, command=cmd, warnOnFailure=True)
-
- if reactors == None:
- reactors = [
- 'gtk2',
- 'gtk',
- #'kqueue',
- 'poll',
- 'c',
- 'qt',
- #'win32',
- ]
- for reactor in reactors:
- flunkOnFailure = 1
- warnOnFailure = 0
- #if reactor in ['c', 'qt', 'win32']:
- # # these are buggy, so tolerate failures for now
- # flunkOnFailure = 0
- # warnOnFailure = 1
- self.addStep(RemovePYCs) # TODO: why?
- self.addStep(TwistedTrial, name=reactor, python=python,
- reactor=reactor, flunkOnFailure=flunkOnFailure,
- warnOnFailure=warnOnFailure)
diff --git a/buildbot/buildbot/process/properties.py b/buildbot/buildbot/process/properties.py
deleted file mode 100644
index 2d07db9..0000000
--- a/buildbot/buildbot/process/properties.py
+++ /dev/null
@@ -1,157 +0,0 @@
-import re
-import weakref
-from buildbot import util
-
-class Properties(util.ComparableMixin):
- """
- I represent a set of properties that can be interpolated into various
- strings in buildsteps.
-
- @ivar properties: dictionary mapping property values to tuples
- (value, source), where source is a string identifing the source
- of the property.
-
- Objects of this class can be read like a dictionary -- in this case,
- only the property value is returned.
-
- As a special case, a property value of None is returned as an empty
- string when used as a mapping.
- """
-
- compare_attrs = ('properties',)
-
- def __init__(self, **kwargs):
- """
- @param kwargs: initial property values (for testing)
- """
- self.properties = {}
- self.pmap = PropertyMap(self)
- if kwargs: self.update(kwargs, "TEST")
-
- def __getstate__(self):
- d = self.__dict__.copy()
- del d['pmap']
- return d
-
- def __setstate__(self, d):
- self.__dict__ = d
- self.pmap = PropertyMap(self)
-
- def __getitem__(self, name):
- """Just get the value for this property."""
- rv = self.properties[name][0]
- return rv
-
- def has_key(self, name):
- return self.properties.has_key(name)
-
- def getProperty(self, name, default=None):
- """Get the value for the given property."""
- return self.properties.get(name, (default,))[0]
-
- def getPropertySource(self, name):
- return self.properties[name][1]
-
- def asList(self):
- """Return the properties as a sorted list of (name, value, source)"""
- l = [ (k, v[0], v[1]) for k,v in self.properties.items() ]
- l.sort()
- return l
-
- def __repr__(self):
- return repr(dict([ (k,v[0]) for k,v in self.properties.iteritems() ]))
-
- def setProperty(self, name, value, source):
- self.properties[name] = (value, source)
-
- def update(self, dict, source):
- """Update this object from a dictionary, with an explicit source specified."""
- for k, v in dict.items():
- self.properties[k] = (v, source)
-
- def updateFromProperties(self, other):
- """Update this object based on another object; the other object's """
- self.properties.update(other.properties)
-
- def render(self, value):
- """
- Return a variant of value that has any WithProperties objects
- substituted. This recurses into Python's compound data types.
- """
- # we use isinstance to detect Python's standard data types, and call
- # this function recursively for the values in those types
- if isinstance(value, (str, unicode)):
- return value
- elif isinstance(value, WithProperties):
- return value.render(self.pmap)
- elif isinstance(value, list):
- return [ self.render(e) for e in value ]
- elif isinstance(value, tuple):
- return tuple([ self.render(e) for e in value ])
- elif isinstance(value, dict):
- return dict([ (self.render(k), self.render(v)) for k,v in value.iteritems() ])
- else:
- return value
-
-class PropertyMap:
- """
- Privately-used mapping object to implement WithProperties' substitutions,
- including the rendering of None as ''.
- """
- colon_minus_re = re.compile(r"(.*):-(.*)")
- colon_plus_re = re.compile(r"(.*):\+(.*)")
- def __init__(self, properties):
- # use weakref here to avoid a reference loop
- self.properties = weakref.ref(properties)
-
- def __getitem__(self, key):
- properties = self.properties()
- assert properties is not None
-
- # %(prop:-repl)s
- # if prop exists, use it; otherwise, use repl
- mo = self.colon_minus_re.match(key)
- if mo:
- prop, repl = mo.group(1,2)
- if properties.has_key(prop):
- rv = properties[prop]
- else:
- rv = repl
- else:
- # %(prop:+repl)s
- # if prop exists, use repl; otherwise, an empty string
- mo = self.colon_plus_re.match(key)
- if mo:
- prop, repl = mo.group(1,2)
- if properties.has_key(prop):
- rv = repl
- else:
- rv = ''
- else:
- rv = properties[key]
-
- # translate 'None' to an empty string
- if rv is None: rv = ''
- return rv
-
-class WithProperties(util.ComparableMixin):
- """
- This is a marker class, used fairly widely to indicate that we
- want to interpolate build properties.
- """
-
- compare_attrs = ('fmtstring', 'args')
-
- def __init__(self, fmtstring, *args):
- self.fmtstring = fmtstring
- self.args = args
-
- def render(self, pmap):
- if self.args:
- strings = []
- for name in self.args:
- strings.append(pmap[name])
- s = self.fmtstring % tuple(strings)
- else:
- s = self.fmtstring % pmap
- return s
diff --git a/buildbot/buildbot/process/step_twisted2.py b/buildbot/buildbot/process/step_twisted2.py
deleted file mode 100644
index bc58315..0000000
--- a/buildbot/buildbot/process/step_twisted2.py
+++ /dev/null
@@ -1,159 +0,0 @@
-
-from buildbot.status import tests
-from buildbot.process.step import SUCCESS, FAILURE, BuildStep
-from buildbot.process.step_twisted import RunUnitTests
-
-from zope.interface import implements
-from twisted.python import log, failure
-from twisted.spread import jelly
-from twisted.pb.tokens import BananaError
-from twisted.web.html import PRE
-from twisted.web.error import NoResource
-
-class Null: pass
-ResultTypes = Null()
-ResultTypeNames = ["SKIP",
- "EXPECTED_FAILURE", "FAILURE", "ERROR",
- "UNEXPECTED_SUCCESS", "SUCCESS"]
-try:
- from twisted.trial import reporter # introduced in Twisted-1.0.5
- # extract the individual result types
- for name in ResultTypeNames:
- setattr(ResultTypes, name, getattr(reporter, name))
-except ImportError:
- from twisted.trial import unittest # Twisted-1.0.4 has them here
- for name in ResultTypeNames:
- setattr(ResultTypes, name, getattr(unittest, name))
-
-log._keepErrors = 0
-from twisted.trial import remote # for trial/jelly parsing
-
-import StringIO
-
-class OneJellyTest(tests.OneTest):
- def html(self, request):
- tpl = "<HTML><BODY>\n\n%s\n\n</body></html>\n"
- pptpl = "<HTML><BODY>\n\n<pre>%s</pre>\n\n</body></html>\n"
- t = request.postpath[0] # one of 'short', 'long' #, or 'html'
- if isinstance(self.results, failure.Failure):
- # it would be nice to remove unittest functions from the
- # traceback like unittest.format_exception() does.
- if t == 'short':
- s = StringIO.StringIO()
- self.results.printTraceback(s)
- return pptpl % PRE(s.getvalue())
- elif t == 'long':
- s = StringIO.StringIO()
- self.results.printDetailedTraceback(s)
- return pptpl % PRE(s.getvalue())
- #elif t == 'html':
- # return tpl % formatFailure(self.results)
- # ACK! source lines aren't stored in the Failure, rather,
- # formatFailure pulls them (by filename) from the local
- # disk. Feh. Even printTraceback() won't work. Double feh.
- return NoResource("No such mode '%s'" % t)
- if self.results == None:
- return tpl % "No results to show: test probably passed."
- # maybe results are plain text?
- return pptpl % PRE(self.results)
-
-class TwistedJellyTestResults(tests.TestResults):
- oneTestClass = OneJellyTest
- def describeOneTest(self, testname):
- return "%s: %s\n" % (testname, self.tests[testname][0])
-
-class RunUnitTestsJelly(RunUnitTests):
- """I run the unit tests with the --jelly option, which generates
- machine-parseable results as the tests are run.
- """
- trialMode = "--jelly"
- implements(remote.IRemoteReporter)
-
- ourtypes = { ResultTypes.SKIP: tests.SKIP,
- ResultTypes.EXPECTED_FAILURE: tests.EXPECTED_FAILURE,
- ResultTypes.FAILURE: tests.FAILURE,
- ResultTypes.ERROR: tests.ERROR,
- ResultTypes.UNEXPECTED_SUCCESS: tests.UNEXPECTED_SUCCESS,
- ResultTypes.SUCCESS: tests.SUCCESS,
- }
-
- def __getstate__(self):
- #d = RunUnitTests.__getstate__(self)
- d = self.__dict__.copy()
- # Banana subclasses are Ephemeral
- if d.has_key("decoder"):
- del d['decoder']
- return d
- def start(self):
- self.decoder = remote.DecodeReport(self)
- # don't accept anything unpleasant from the (untrusted) build slave
- # The jellied stream may have Failures, but everything inside should
- # be a string
- security = jelly.SecurityOptions()
- security.allowBasicTypes()
- security.allowInstancesOf(failure.Failure)
- self.decoder.taster = security
- self.results = TwistedJellyTestResults()
- RunUnitTests.start(self)
-
- def logProgress(self, progress):
- # XXX: track number of tests
- BuildStep.logProgress(self, progress)
-
- def addStdout(self, data):
- if not self.decoder:
- return
- try:
- self.decoder.dataReceived(data)
- except BananaError:
- self.decoder = None
- log.msg("trial --jelly output unparseable, traceback follows")
- log.deferr()
-
- def remote_start(self, expectedTests, times=None):
- print "remote_start", expectedTests
- def remote_reportImportError(self, name, aFailure, times=None):
- pass
- def remote_reportStart(self, testClass, method, times=None):
- print "reportStart", testClass, method
-
- def remote_reportResults(self, testClass, method, resultType, results,
- times=None):
- print "reportResults", testClass, method, resultType
- which = testClass + "." + method
- self.results.addTest(which,
- self.ourtypes.get(resultType, tests.UNKNOWN),
- results)
-
- def finished(self, rc):
- # give self.results to our Build object
- self.build.testsFinished(self.results)
- total = self.results.countTests()
- count = self.results.countFailures()
- result = SUCCESS
- if total == None:
- result = (FAILURE, ['tests%s' % self.rtext(' (%s)')])
- if count:
- result = (FAILURE, ["%d tes%s%s" % (count,
- (count == 1 and 't' or 'ts'),
- self.rtext(' (%s)'))])
- return self.stepComplete(result)
- def finishStatus(self, result):
- total = self.results.countTests()
- count = self.results.countFailures()
- text = []
- if count == 0:
- text.extend(["%d %s" % \
- (total,
- total == 1 and "test" or "tests"),
- "passed"])
- else:
- text.append("tests")
- text.append("%d %s" % \
- (count,
- count == 1 and "failure" or "failures"))
- self.updateCurrentActivity(text=text)
- self.addFileToCurrentActivity("tests", self.results)
- #self.finishStatusSummary()
- self.finishCurrentActivity()
-
diff --git a/buildbot/buildbot/scheduler.py b/buildbot/buildbot/scheduler.py
deleted file mode 100644
index 4341617..0000000
--- a/buildbot/buildbot/scheduler.py
+++ /dev/null
@@ -1,837 +0,0 @@
-# -*- test-case-name: buildbot.test.test_dependencies -*-
-
-import time, os.path
-
-from zope.interface import implements
-from twisted.internet import reactor
-from twisted.application import service, internet, strports
-from twisted.python import log, runtime
-from twisted.protocols import basic
-from twisted.cred import portal, checkers
-from twisted.spread import pb
-
-from buildbot import interfaces, buildset, util, pbutil
-from buildbot.status import builder
-from buildbot.sourcestamp import SourceStamp
-from buildbot.changes.maildir import MaildirService
-from buildbot.process.properties import Properties
-
-
-class BaseScheduler(service.MultiService, util.ComparableMixin):
- """
- A Schduler creates BuildSets and submits them to the BuildMaster.
-
- @ivar name: name of the scheduler
-
- @ivar properties: additional properties specified in this
- scheduler's configuration
- @type properties: Properties object
- """
- implements(interfaces.IScheduler)
-
- def __init__(self, name, properties={}):
- """
- @param name: name for this scheduler
-
- @param properties: properties to be propagated from this scheduler
- @type properties: dict
- """
- service.MultiService.__init__(self)
- self.name = name
- self.properties = Properties()
- self.properties.update(properties, "Scheduler")
- self.properties.setProperty("scheduler", name, "Scheduler")
-
- def __repr__(self):
- # TODO: why can't id() return a positive number? %d is ugly.
- return "<Scheduler '%s' at %d>" % (self.name, id(self))
-
- def submitBuildSet(self, bs):
- self.parent.submitBuildSet(bs)
-
- def addChange(self, change):
- pass
-
-class BaseUpstreamScheduler(BaseScheduler):
- implements(interfaces.IUpstreamScheduler)
-
- def __init__(self, name, properties={}):
- BaseScheduler.__init__(self, name, properties)
- self.successWatchers = []
-
- def subscribeToSuccessfulBuilds(self, watcher):
- self.successWatchers.append(watcher)
- def unsubscribeToSuccessfulBuilds(self, watcher):
- self.successWatchers.remove(watcher)
-
- def submitBuildSet(self, bs):
- d = bs.waitUntilFinished()
- d.addCallback(self.buildSetFinished)
- BaseScheduler.submitBuildSet(self, bs)
-
- def buildSetFinished(self, bss):
- if not self.running:
- return
- if bss.getResults() == builder.SUCCESS:
- ss = bss.getSourceStamp()
- for w in self.successWatchers:
- w(ss)
-
-
-class Scheduler(BaseUpstreamScheduler):
- """The default Scheduler class will run a build after some period of time
- called the C{treeStableTimer}, on a given set of Builders. It only pays
- attention to a single branch. You you can provide a C{fileIsImportant}
- function which will evaluate each Change to decide whether or not it
- should trigger a new build.
- """
-
- fileIsImportant = None
- compare_attrs = ('name', 'treeStableTimer', 'builderNames', 'branch',
- 'fileIsImportant', 'properties', 'categories')
-
- def __init__(self, name, branch, treeStableTimer, builderNames,
- fileIsImportant=None, properties={}, categories=None):
- """
- @param name: the name of this Scheduler
- @param branch: The branch name that the Scheduler should pay
- attention to. Any Change that is not on this branch
- will be ignored. It can be set to None to only pay
- attention to the default branch.
- @param treeStableTimer: the duration, in seconds, for which the tree
- must remain unchanged before a build will be
- triggered. This is intended to avoid builds
- of partially-committed fixes.
- @param builderNames: a list of Builder names. When this Scheduler
- decides to start a set of builds, they will be
- run on the Builders named by this list.
-
- @param fileIsImportant: A callable which takes one argument (a Change
- instance) and returns True if the change is
- worth building, and False if it is not.
- Unimportant Changes are accumulated until the
- build is triggered by an important change.
- The default value of None means that all
- Changes are important.
-
- @param properties: properties to apply to all builds started from this
- scheduler
- @param categories: A list of categories of changes to accept
- """
-
- BaseUpstreamScheduler.__init__(self, name, properties)
- self.treeStableTimer = treeStableTimer
- errmsg = ("The builderNames= argument to Scheduler must be a list "
- "of Builder description names (i.e. the 'name' key of the "
- "Builder specification dictionary)")
- assert isinstance(builderNames, (list, tuple)), errmsg
- for b in builderNames:
- assert isinstance(b, str), errmsg
- self.builderNames = builderNames
- self.branch = branch
- if fileIsImportant:
- assert callable(fileIsImportant)
- self.fileIsImportant = fileIsImportant
-
- self.importantChanges = []
- self.unimportantChanges = []
- self.nextBuildTime = None
- self.timer = None
- self.categories = categories
-
- def listBuilderNames(self):
- return self.builderNames
-
- def getPendingBuildTimes(self):
- if self.nextBuildTime is not None:
- return [self.nextBuildTime]
- return []
-
- def addChange(self, change):
- if change.branch != self.branch:
- log.msg("%s ignoring off-branch %s" % (self, change))
- return
- if self.categories is not None and change.category not in self.categories:
- log.msg("%s ignoring non-matching categories %s" % (self, change))
- return
- if not self.fileIsImportant:
- self.addImportantChange(change)
- elif self.fileIsImportant(change):
- self.addImportantChange(change)
- else:
- self.addUnimportantChange(change)
-
- def addImportantChange(self, change):
- log.msg("%s: change is important, adding %s" % (self, change))
- self.importantChanges.append(change)
- self.nextBuildTime = max(self.nextBuildTime,
- change.when + self.treeStableTimer)
- self.setTimer(self.nextBuildTime)
-
- def addUnimportantChange(self, change):
- log.msg("%s: change is not important, adding %s" % (self, change))
- self.unimportantChanges.append(change)
-
- def setTimer(self, when):
- log.msg("%s: setting timer to %s" %
- (self, time.strftime("%H:%M:%S", time.localtime(when))))
- now = util.now()
- if when < now:
- when = now
- if self.timer:
- self.timer.cancel()
- self.timer = reactor.callLater(when - now, self.fireTimer)
-
- def stopTimer(self):
- if self.timer:
- self.timer.cancel()
- self.timer = None
-
- def fireTimer(self):
- # clear out our state
- self.timer = None
- self.nextBuildTime = None
- changes = self.importantChanges + self.unimportantChanges
- self.importantChanges = []
- self.unimportantChanges = []
-
- # create a BuildSet, submit it to the BuildMaster
- bs = buildset.BuildSet(self.builderNames,
- SourceStamp(changes=changes),
- properties=self.properties)
- self.submitBuildSet(bs)
-
- def stopService(self):
- self.stopTimer()
- return service.MultiService.stopService(self)
-
-
-class AnyBranchScheduler(BaseUpstreamScheduler):
- """This Scheduler will handle changes on a variety of branches. It will
- accumulate Changes for each branch separately. It works by creating a
- separate Scheduler for each new branch it sees."""
-
- schedulerFactory = Scheduler
- fileIsImportant = None
-
- compare_attrs = ('name', 'branches', 'treeStableTimer', 'builderNames',
- 'fileIsImportant', 'properties')
-
- def __init__(self, name, branches, treeStableTimer, builderNames,
- fileIsImportant=None, properties={}):
- """
- @param name: the name of this Scheduler
- @param branches: The branch names that the Scheduler should pay
- attention to. Any Change that is not on one of these
- branches will be ignored. It can be set to None to
- accept changes from any branch. Don't use [] (an
- empty list), because that means we don't pay
- attention to *any* branches, so we'll never build
- anything.
- @param treeStableTimer: the duration, in seconds, for which the tree
- must remain unchanged before a build will be
- triggered. This is intended to avoid builds
- of partially-committed fixes.
- @param builderNames: a list of Builder names. When this Scheduler
- decides to start a set of builds, they will be
- run on the Builders named by this list.
-
- @param fileIsImportant: A callable which takes one argument (a Change
- instance) and returns True if the change is
- worth building, and False if it is not.
- Unimportant Changes are accumulated until the
- build is triggered by an important change.
- The default value of None means that all
- Changes are important.
-
- @param properties: properties to apply to all builds started from this
- scheduler
- """
-
- BaseUpstreamScheduler.__init__(self, name, properties)
- self.treeStableTimer = treeStableTimer
- for b in builderNames:
- assert isinstance(b, str)
- self.builderNames = builderNames
- self.branches = branches
- if self.branches == []:
- log.msg("AnyBranchScheduler %s: branches=[], so we will ignore "
- "all branches, and never trigger any builds. Please set "
- "branches=None to mean 'all branches'" % self)
- # consider raising an exception here, to make this warning more
- # prominent, but I can vaguely imagine situations where you might
- # want to comment out branches temporarily and wouldn't
- # appreciate it being treated as an error.
- if fileIsImportant:
- assert callable(fileIsImportant)
- self.fileIsImportant = fileIsImportant
- self.schedulers = {} # one per branch
-
- def __repr__(self):
- return "<AnyBranchScheduler '%s'>" % self.name
-
- def listBuilderNames(self):
- return self.builderNames
-
- def getPendingBuildTimes(self):
- bts = []
- for s in self.schedulers.values():
- if s.nextBuildTime is not None:
- bts.append(s.nextBuildTime)
- return bts
-
- def buildSetFinished(self, bss):
- # we don't care if a build has finished; one of the per-branch builders
- # will take care of it, instead.
- pass
-
- def addChange(self, change):
- branch = change.branch
- if self.branches is not None and branch not in self.branches:
- log.msg("%s ignoring off-branch %s" % (self, change))
- return
- s = self.schedulers.get(branch)
- if not s:
- if branch:
- name = self.name + "." + branch
- else:
- name = self.name + ".<default>"
- s = self.schedulerFactory(name, branch,
- self.treeStableTimer,
- self.builderNames,
- self.fileIsImportant)
- s.successWatchers = self.successWatchers
- s.setServiceParent(self)
- s.properties = self.properties
- # TODO: does this result in schedulers that stack up forever?
- # When I make the persistify-pass, think about this some more.
- self.schedulers[branch] = s
- s.addChange(change)
-
-
-class Dependent(BaseUpstreamScheduler):
- """This scheduler runs some set of 'downstream' builds when the
- 'upstream' scheduler has completed successfully."""
- implements(interfaces.IDownstreamScheduler)
-
- compare_attrs = ('name', 'upstream', 'builderNames', 'properties')
-
- def __init__(self, name, upstream, builderNames, properties={}):
- assert interfaces.IUpstreamScheduler.providedBy(upstream)
- BaseUpstreamScheduler.__init__(self, name, properties)
- self.upstream = upstream
- self.builderNames = builderNames
-
- def listBuilderNames(self):
- return self.builderNames
-
- def getPendingBuildTimes(self):
- # report the upstream's value
- return self.upstream.getPendingBuildTimes()
-
- def startService(self):
- service.MultiService.startService(self)
- self.upstream.subscribeToSuccessfulBuilds(self.upstreamBuilt)
-
- def stopService(self):
- d = service.MultiService.stopService(self)
- self.upstream.unsubscribeToSuccessfulBuilds(self.upstreamBuilt)
- return d
-
- def upstreamBuilt(self, ss):
- bs = buildset.BuildSet(self.builderNames, ss,
- properties=self.properties)
- self.submitBuildSet(bs)
-
- def checkUpstreamScheduler(self):
- # find our *active* upstream scheduler (which may not be self.upstream!) by name
- up_name = self.upstream.name
- upstream = None
- for s in self.parent.allSchedulers():
- if s.name == up_name and interfaces.IUpstreamScheduler.providedBy(s):
- upstream = s
- if not upstream:
- log.msg("ERROR: Couldn't find upstream scheduler of name <%s>" %
- up_name)
-
- # if it's already correct, we're good to go
- if upstream is self.upstream:
- return
-
- # otherwise, associate with the new upstream. We also keep listening
- # to the old upstream, in case it's in the middle of a build
- upstream.subscribeToSuccessfulBuilds(self.upstreamBuilt)
- self.upstream = upstream
- log.msg("Dependent <%s> connected to new Upstream <%s>" %
- (self.name, up_name))
-
-class Periodic(BaseUpstreamScheduler):
- """Instead of watching for Changes, this Scheduler can just start a build
- at fixed intervals. The C{periodicBuildTimer} parameter sets the number
- of seconds to wait between such periodic builds. The first build will be
- run immediately."""
-
- # TODO: consider having this watch another (changed-based) scheduler and
- # merely enforce a minimum time between builds.
-
- compare_attrs = ('name', 'builderNames', 'periodicBuildTimer', 'branch', 'properties')
-
- def __init__(self, name, builderNames, periodicBuildTimer,
- branch=None, properties={}):
- BaseUpstreamScheduler.__init__(self, name, properties)
- self.builderNames = builderNames
- self.periodicBuildTimer = periodicBuildTimer
- self.branch = branch
- self.reason = ("The Periodic scheduler named '%s' triggered this build"
- % name)
- self.timer = internet.TimerService(self.periodicBuildTimer,
- self.doPeriodicBuild)
- self.timer.setServiceParent(self)
-
- def listBuilderNames(self):
- return self.builderNames
-
- def getPendingBuildTimes(self):
- # TODO: figure out when self.timer is going to fire next and report
- # that
- return []
-
- def doPeriodicBuild(self):
- bs = buildset.BuildSet(self.builderNames,
- SourceStamp(branch=self.branch),
- self.reason,
- properties=self.properties)
- self.submitBuildSet(bs)
-
-
-
-class Nightly(BaseUpstreamScheduler):
- """Imitate 'cron' scheduling. This can be used to schedule a nightly
- build, or one which runs are certain times of the day, week, or month.
-
- Pass some subset of minute, hour, dayOfMonth, month, and dayOfWeek; each
- may be a single number or a list of valid values. The builds will be
- triggered whenever the current time matches these values. Wildcards are
- represented by a '*' string. All fields default to a wildcard except
- 'minute', so with no fields this defaults to a build every hour, on the
- hour.
-
- For example, the following master.cfg clause will cause a build to be
- started every night at 3:00am::
-
- s = Nightly('nightly', ['builder1', 'builder2'], hour=3, minute=0)
- c['schedules'].append(s)
-
- This scheduler will perform a build each monday morning at 6:23am and
- again at 8:23am::
-
- s = Nightly('BeforeWork', ['builder1'],
- dayOfWeek=0, hour=[6,8], minute=23)
-
- The following runs a build every two hours::
-
- s = Nightly('every2hours', ['builder1'], hour=range(0, 24, 2))
-
- And this one will run only on December 24th::
-
- s = Nightly('SleighPreflightCheck', ['flying_circuits', 'radar'],
- month=12, dayOfMonth=24, hour=12, minute=0)
-
- For dayOfWeek and dayOfMonth, builds are triggered if the date matches
- either of them. All time values are compared against the tuple returned
- by time.localtime(), so month and dayOfMonth numbers start at 1, not
- zero. dayOfWeek=0 is Monday, dayOfWeek=6 is Sunday.
-
- onlyIfChanged functionality
- s = Nightly('nightly', ['builder1', 'builder2'],
- hour=3, minute=0, onlyIfChanged=True)
- When the flag is True (False by default), the build is trigged if
- the date matches and if the branch has changed
-
- fileIsImportant parameter is implemented as defined in class Scheduler
- """
-
- compare_attrs = ('name', 'builderNames',
- 'minute', 'hour', 'dayOfMonth', 'month',
- 'dayOfWeek', 'branch', 'onlyIfChanged',
- 'fileIsImportant', 'properties')
-
- def __init__(self, name, builderNames, minute=0, hour='*',
- dayOfMonth='*', month='*', dayOfWeek='*',
- branch=None, fileIsImportant=None, onlyIfChanged=False, properties={}):
- # Setting minute=0 really makes this an 'Hourly' scheduler. This
- # seemed like a better default than minute='*', which would result in
- # a build every 60 seconds.
- BaseUpstreamScheduler.__init__(self, name, properties)
- self.builderNames = builderNames
- self.minute = minute
- self.hour = hour
- self.dayOfMonth = dayOfMonth
- self.month = month
- self.dayOfWeek = dayOfWeek
- self.branch = branch
- self.onlyIfChanged = onlyIfChanged
- self.delayedRun = None
- self.nextRunTime = None
- self.reason = ("The Nightly scheduler named '%s' triggered this build"
- % name)
-
- self.importantChanges = []
- self.unimportantChanges = []
- self.fileIsImportant = None
- if fileIsImportant:
- assert callable(fileIsImportant)
- self.fileIsImportant = fileIsImportant
-
- def addTime(self, timetuple, secs):
- return time.localtime(time.mktime(timetuple)+secs)
- def findFirstValueAtLeast(self, values, value, default=None):
- for v in values:
- if v >= value: return v
- return default
-
- def setTimer(self):
- self.nextRunTime = self.calculateNextRunTime()
- self.delayedRun = reactor.callLater(self.nextRunTime - time.time(),
- self.doPeriodicBuild)
-
- def startService(self):
- BaseUpstreamScheduler.startService(self)
- self.setTimer()
-
- def stopService(self):
- BaseUpstreamScheduler.stopService(self)
- self.delayedRun.cancel()
-
- def isRunTime(self, timetuple):
- def check(ourvalue, value):
- if ourvalue == '*': return True
- if isinstance(ourvalue, int): return value == ourvalue
- return (value in ourvalue)
-
- if not check(self.minute, timetuple[4]):
- #print 'bad minute', timetuple[4], self.minute
- return False
-
- if not check(self.hour, timetuple[3]):
- #print 'bad hour', timetuple[3], self.hour
- return False
-
- if not check(self.month, timetuple[1]):
- #print 'bad month', timetuple[1], self.month
- return False
-
- if self.dayOfMonth != '*' and self.dayOfWeek != '*':
- # They specified both day(s) of month AND day(s) of week.
- # This means that we only have to match one of the two. If
- # neither one matches, this time is not the right time.
- if not (check(self.dayOfMonth, timetuple[2]) or
- check(self.dayOfWeek, timetuple[6])):
- #print 'bad day'
- return False
- else:
- if not check(self.dayOfMonth, timetuple[2]):
- #print 'bad day of month'
- return False
-
- if not check(self.dayOfWeek, timetuple[6]):
- #print 'bad day of week'
- return False
-
- return True
-
- def calculateNextRunTime(self):
- return self.calculateNextRunTimeFrom(time.time())
-
- def calculateNextRunTimeFrom(self, now):
- dateTime = time.localtime(now)
-
- # Remove seconds by advancing to at least the next minue
- dateTime = self.addTime(dateTime, 60-dateTime[5])
-
- # Now we just keep adding minutes until we find something that matches
-
- # It not an efficient algorithm, but it'll *work* for now
- yearLimit = dateTime[0]+2
- while not self.isRunTime(dateTime):
- dateTime = self.addTime(dateTime, 60)
- #print 'Trying', time.asctime(dateTime)
- assert dateTime[0] < yearLimit, 'Something is wrong with this code'
- return time.mktime(dateTime)
-
- def listBuilderNames(self):
- return self.builderNames
-
- def getPendingBuildTimes(self):
- # TODO: figure out when self.timer is going to fire next and report
- # that
- if self.nextRunTime is None: return []
- return [self.nextRunTime]
-
- def doPeriodicBuild(self):
- # Schedule the next run
- self.setTimer()
-
- if self.onlyIfChanged:
- if len(self.importantChanges) > 0:
- changes = self.importantChanges + self.unimportantChanges
- # And trigger a build
- log.msg("Nightly Scheduler <%s>: triggering build" % self.name)
- bs = buildset.BuildSet(self.builderNames,
- SourceStamp(changes=changes),
- self.reason,
- properties=self.properties)
- self.submitBuildSet(bs)
- # Reset the change lists
- self.importantChanges = []
- self.unimportantChanges = []
- else:
- log.msg("Nightly Scheduler <%s>: skipping build - No important change" % self.name)
- else:
- # And trigger a build
- bs = buildset.BuildSet(self.builderNames,
- SourceStamp(branch=self.branch),
- self.reason,
- properties=self.properties)
- self.submitBuildSet(bs)
-
- def addChange(self, change):
- if self.onlyIfChanged:
- if change.branch != self.branch:
- log.msg("Nightly Scheduler <%s>: ignoring change %d on off-branch %s" % (self.name, change.revision, change.branch))
- return
- if not self.fileIsImportant:
- self.addImportantChange(change)
- elif self.fileIsImportant(change):
- self.addImportantChange(change)
- else:
- self.addUnimportantChange(change)
- else:
- log.msg("Nightly Scheduler <%s>: no add change" % self.name)
- pass
-
- def addImportantChange(self, change):
- log.msg("Nightly Scheduler <%s>: change %s from %s is important, adding it" % (self.name, change.revision, change.who))
- self.importantChanges.append(change)
-
- def addUnimportantChange(self, change):
- log.msg("Nightly Scheduler <%s>: change %s from %s is not important, adding it" % (self.name, change.revision, change.who))
- self.unimportantChanges.append(change)
-
-
-class TryBase(BaseScheduler):
- def __init__(self, name, builderNames, properties={}):
- BaseScheduler.__init__(self, name, properties)
- self.builderNames = builderNames
-
- def listBuilderNames(self):
- return self.builderNames
-
- def getPendingBuildTimes(self):
- # we can't predict what the developers are going to do in the future
- return []
-
- def addChange(self, change):
- # Try schedulers ignore Changes
- pass
-
- def processBuilderList(self, builderNames):
- # self.builderNames is the configured list of builders
- # available for try. If the user supplies a list of builders,
- # it must be restricted to the configured list. If not, build
- # on all of the configured builders.
- if builderNames:
- for b in builderNames:
- if not b in self.builderNames:
- log.msg("%s got with builder %s" % (self, b))
- log.msg(" but that wasn't in our list: %s"
- % (self.builderNames,))
- return []
- else:
- builderNames = self.builderNames
- return builderNames
-
-class BadJobfile(Exception):
- pass
-
-class JobFileScanner(basic.NetstringReceiver):
- def __init__(self):
- self.strings = []
- self.transport = self # so transport.loseConnection works
- self.error = False
-
- def stringReceived(self, s):
- self.strings.append(s)
-
- def loseConnection(self):
- self.error = True
-
-class Try_Jobdir(TryBase):
- compare_attrs = ( 'name', 'builderNames', 'jobdir', 'properties' )
-
- def __init__(self, name, builderNames, jobdir, properties={}):
- TryBase.__init__(self, name, builderNames, properties)
- self.jobdir = jobdir
- self.watcher = MaildirService()
- self.watcher.setServiceParent(self)
-
- def setServiceParent(self, parent):
- self.watcher.setBasedir(os.path.join(parent.basedir, self.jobdir))
- TryBase.setServiceParent(self, parent)
-
- def parseJob(self, f):
- # jobfiles are serialized build requests. Each is a list of
- # serialized netstrings, in the following order:
- # "1", the version number of this format
- # buildsetID, arbitrary string, used to find the buildSet later
- # branch name, "" for default-branch
- # base revision, "" for HEAD
- # patchlevel, usually "1"
- # patch
- # builderNames...
- p = JobFileScanner()
- p.dataReceived(f.read())
- if p.error:
- raise BadJobfile("unable to parse netstrings")
- s = p.strings
- ver = s.pop(0)
- if ver != "1":
- raise BadJobfile("unknown version '%s'" % ver)
- buildsetID, branch, baserev, patchlevel, diff = s[:5]
- builderNames = s[5:]
- if branch == "":
- branch = None
- if baserev == "":
- baserev = None
- patchlevel = int(patchlevel)
- patch = (patchlevel, diff)
- ss = SourceStamp(branch, baserev, patch)
- return builderNames, ss, buildsetID
-
- def messageReceived(self, filename):
- md = os.path.join(self.parent.basedir, self.jobdir)
- if runtime.platformType == "posix":
- # open the file before moving it, because I'm afraid that once
- # it's in cur/, someone might delete it at any moment
- path = os.path.join(md, "new", filename)
- f = open(path, "r")
- os.rename(os.path.join(md, "new", filename),
- os.path.join(md, "cur", filename))
- else:
- # do this backwards under windows, because you can't move a file
- # that somebody is holding open. This was causing a Permission
- # Denied error on bear's win32-twisted1.3 buildslave.
- os.rename(os.path.join(md, "new", filename),
- os.path.join(md, "cur", filename))
- path = os.path.join(md, "cur", filename)
- f = open(path, "r")
-
- try:
- builderNames, ss, bsid = self.parseJob(f)
- except BadJobfile:
- log.msg("%s reports a bad jobfile in %s" % (self, filename))
- log.err()
- return
- # Validate/fixup the builder names.
- builderNames = self.processBuilderList(builderNames)
- if not builderNames:
- return
- reason = "'try' job"
- bs = buildset.BuildSet(builderNames, ss, reason=reason,
- bsid=bsid, properties=self.properties)
- self.submitBuildSet(bs)
-
-class Try_Userpass(TryBase):
- compare_attrs = ( 'name', 'builderNames', 'port', 'userpass', 'properties' )
- implements(portal.IRealm)
-
- def __init__(self, name, builderNames, port, userpass, properties={}):
- TryBase.__init__(self, name, builderNames, properties)
- if type(port) is int:
- port = "tcp:%d" % port
- self.port = port
- self.userpass = userpass
- c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
- for user,passwd in self.userpass:
- c.addUser(user, passwd)
-
- p = portal.Portal(self)
- p.registerChecker(c)
- f = pb.PBServerFactory(p)
- s = strports.service(port, f)
- s.setServiceParent(self)
-
- def getPort(self):
- # utility method for tests: figure out which TCP port we just opened.
- return self.services[0]._port.getHost().port
-
- def requestAvatar(self, avatarID, mind, interface):
- log.msg("%s got connection from user %s" % (self, avatarID))
- assert interface == pb.IPerspective
- p = Try_Userpass_Perspective(self, avatarID)
- return (pb.IPerspective, p, lambda: None)
-
-class Try_Userpass_Perspective(pbutil.NewCredPerspective):
- def __init__(self, parent, username):
- self.parent = parent
- self.username = username
-
- def perspective_try(self, branch, revision, patch, builderNames, properties={}):
- log.msg("user %s requesting build on builders %s" % (self.username,
- builderNames))
- # Validate/fixup the builder names.
- builderNames = self.parent.processBuilderList(builderNames)
- if not builderNames:
- return
- ss = SourceStamp(branch, revision, patch)
- reason = "'try' job from user %s" % self.username
-
- # roll the specified props in with our inherited props
- combined_props = Properties()
- combined_props.updateFromProperties(self.parent.properties)
- combined_props.update(properties, "try build")
-
- bs = buildset.BuildSet(builderNames,
- ss,
- reason=reason,
- properties=combined_props)
-
- self.parent.submitBuildSet(bs)
-
- # return a remotely-usable BuildSetStatus object
- from buildbot.status.client import makeRemote
- return makeRemote(bs.status)
-
-class Triggerable(BaseUpstreamScheduler):
- """This scheduler doesn't do anything until it is triggered by a Trigger
- step in a factory. In general, that step will not complete until all of
- the builds that I fire have finished.
- """
-
- compare_attrs = ('name', 'builderNames', 'properties')
-
- def __init__(self, name, builderNames, properties={}):
- BaseUpstreamScheduler.__init__(self, name, properties)
- self.builderNames = builderNames
-
- def listBuilderNames(self):
- return self.builderNames
-
- def getPendingBuildTimes(self):
- return []
-
- def trigger(self, ss, set_props=None):
- """Trigger this scheduler. Returns a deferred that will fire when the
- buildset is finished.
- """
-
- # properties for this buildset are composed of our own properties,
- # potentially overridden by anything from the triggering build
- props = Properties()
- props.updateFromProperties(self.properties)
- if set_props: props.updateFromProperties(set_props)
-
- bs = buildset.BuildSet(self.builderNames, ss, properties=props)
- d = bs.waitUntilFinished()
- self.submitBuildSet(bs)
- return d
diff --git a/buildbot/buildbot/scripts/__init__.py b/buildbot/buildbot/scripts/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/buildbot/buildbot/scripts/__init__.py
+++ /dev/null
diff --git a/buildbot/buildbot/scripts/checkconfig.py b/buildbot/buildbot/scripts/checkconfig.py
deleted file mode 100644
index 44dd7bc..0000000
--- a/buildbot/buildbot/scripts/checkconfig.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import sys
-import os
-from shutil import copy, rmtree
-from tempfile import mkdtemp
-from os.path import isfile
-import traceback
-
-from buildbot import master
-
-class ConfigLoader(master.BuildMaster):
- def __init__(self, configFileName="master.cfg"):
- master.BuildMaster.__init__(self, ".", configFileName)
- dir = os.getcwd()
- # Use a temporary directory since loadConfig() creates a bunch of
- # directories and compiles .py files
- tempdir = mkdtemp()
- try:
- copy(configFileName, tempdir)
- for entry in os.listdir("."):
- # Any code in a subdirectory will _not_ be copied! This is a bug
- if isfile(entry):
- copy(entry, tempdir)
- except:
- raise
-
- try:
- os.chdir(tempdir)
- # Add the temp directory to the library path so local modules work
- sys.path.append(tempdir)
- configFile = open(configFileName, "r")
- self.loadConfig(configFile)
- except:
- os.chdir(dir)
- configFile.close()
- rmtree(tempdir)
- raise
- os.chdir(dir)
- rmtree(tempdir)
-
-if __name__ == '__main__':
- try:
- if len(sys.argv) > 1:
- c = ConfigLoader(sys.argv[1])
- else:
- c = ConfigLoader()
- except IOError:
- print >> sys.stderr, "Could not open config file"
- sys.exit(2)
- except:
- print >> sys.stderr, "Error in config file:"
- t, v, tb = sys.exc_info()
- print >> sys.stderr, traceback.print_exception(t, v, tb)
- sys.exit(1)
diff --git a/buildbot/buildbot/scripts/logwatcher.py b/buildbot/buildbot/scripts/logwatcher.py
deleted file mode 100644
index e959afb..0000000
--- a/buildbot/buildbot/scripts/logwatcher.py
+++ /dev/null
@@ -1,97 +0,0 @@
-
-import os
-from twisted.python.failure import Failure
-from twisted.internet import defer, reactor, protocol, error
-from twisted.protocols.basic import LineOnlyReceiver
-
-class FakeTransport:
- disconnecting = False
-
-class BuildmasterTimeoutError(Exception):
- pass
-class BuildslaveTimeoutError(Exception):
- pass
-class ReconfigError(Exception):
- pass
-class BuildSlaveDetectedError(Exception):
- pass
-
-class TailProcess(protocol.ProcessProtocol):
- def outReceived(self, data):
- self.lw.dataReceived(data)
- def errReceived(self, data):
- print "ERR: '%s'" % (data,)
-
-
-class LogWatcher(LineOnlyReceiver):
- POLL_INTERVAL = 0.1
- TIMEOUT_DELAY = 10.0
- delimiter = os.linesep
-
- def __init__(self, logfile):
- self.logfile = logfile
- self.in_reconfig = False
- self.transport = FakeTransport()
- self.pp = TailProcess()
- self.pp.lw = self
- self.processtype = "buildmaster"
- self.timer = None
-
- def start(self):
- # return a Deferred that fires when the reconfig process has
- # finished. It errbacks with TimeoutError if the finish line has not
- # been seen within 10 seconds, and with ReconfigError if the error
- # line was seen. If the logfile could not be opened, it errbacks with
- # an IOError.
- self.p = reactor.spawnProcess(self.pp, "/usr/bin/tail",
- ("tail", "-f", "-n", "0", self.logfile),
- env=os.environ,
- )
- self.running = True
- d = defer.maybeDeferred(self._start)
- return d
-
- def _start(self):
- self.d = defer.Deferred()
- self.timer = reactor.callLater(self.TIMEOUT_DELAY, self.timeout)
- return self.d
-
- def timeout(self):
- self.timer = None
- if self.processtype == "buildmaster":
- e = BuildmasterTimeoutError()
- else:
- e = BuildslaveTimeoutError()
- self.finished(Failure(e))
-
- def finished(self, results):
- try:
- self.p.signalProcess("KILL")
- except error.ProcessExitedAlready:
- pass
- if self.timer:
- self.timer.cancel()
- self.timer = None
- self.running = False
- self.in_reconfig = False
- self.d.callback(results)
-
- def lineReceived(self, line):
- if not self.running:
- return
- if "Log opened." in line:
- self.in_reconfig = True
- if "loading configuration from" in line:
- self.in_reconfig = True
- if "Creating BuildSlave" in line:
- self.processtype = "buildslave"
-
- if self.in_reconfig:
- print line
-
- if "message from master: attached" in line:
- return self.finished("buildslave")
- if "I will keep using the previous config file" in line:
- return self.finished(Failure(ReconfigError()))
- if "configuration update complete" in line:
- return self.finished("buildmaster")
diff --git a/buildbot/buildbot/scripts/reconfig.py b/buildbot/buildbot/scripts/reconfig.py
deleted file mode 100644
index 104214b..0000000
--- a/buildbot/buildbot/scripts/reconfig.py
+++ /dev/null
@@ -1,69 +0,0 @@
-
-import os, signal, platform
-from twisted.internet import reactor
-
-from buildbot.scripts.logwatcher import LogWatcher, BuildmasterTimeoutError, \
- ReconfigError
-
-class Reconfigurator:
- def run(self, config):
- # Returns "Microsoft" for Vista and "Windows" for other versions
- if platform.system() in ("Windows", "Microsoft"):
- print "Reconfig (through SIGHUP) is not supported on Windows."
- print "The 'buildbot debugclient' tool can trigger a reconfig"
- print "remotely, but requires Gtk+ libraries to run."
- return
-
- basedir = config['basedir']
- quiet = config['quiet']
- os.chdir(basedir)
- f = open("twistd.pid", "rt")
- self.pid = int(f.read().strip())
- if quiet:
- os.kill(self.pid, signal.SIGHUP)
- return
-
- # keep reading twistd.log. Display all messages between "loading
- # configuration from ..." and "configuration update complete" or
- # "I will keep using the previous config file instead.", or until
- # 10 seconds have elapsed.
-
- self.sent_signal = False
- lw = LogWatcher("twistd.log")
- d = lw.start()
- d.addCallbacks(self.success, self.failure)
- reactor.callLater(0.2, self.sighup)
- reactor.run()
-
- def sighup(self):
- if self.sent_signal:
- return
- print "sending SIGHUP to process %d" % self.pid
- self.sent_signal = True
- os.kill(self.pid, signal.SIGHUP)
-
- def success(self, res):
- print """
-Reconfiguration appears to have completed successfully.
-"""
- reactor.stop()
-
- def failure(self, why):
- if why.check(BuildmasterTimeoutError):
- print "Never saw reconfiguration finish."
- elif why.check(ReconfigError):
- print """
-Reconfiguration failed. Please inspect the master.cfg file for errors,
-correct them, then try 'buildbot reconfig' again.
-"""
- elif why.check(IOError):
- # we were probably unable to open the file in the first place
- self.sighup()
- else:
- print "Error while following twistd.log: %s" % why
- reactor.stop()
-
-def reconfig(config):
- r = Reconfigurator()
- r.run(config)
-
diff --git a/buildbot/buildbot/scripts/runner.py b/buildbot/buildbot/scripts/runner.py
deleted file mode 100644
index 4e22dbc..0000000
--- a/buildbot/buildbot/scripts/runner.py
+++ /dev/null
@@ -1,1023 +0,0 @@
-# -*- test-case-name: buildbot.test.test_runner -*-
-
-# N.B.: don't import anything that might pull in a reactor yet. Some of our
-# subcommands want to load modules that need the gtk reactor.
-import os, sys, stat, re, time
-import traceback
-from twisted.python import usage, util, runtime
-
-from buildbot.interfaces import BuildbotNotRunningError
-
-# this is mostly just a front-end for mktap, twistd, and kill(1), but in the
-# future it will also provide an interface to some developer tools that talk
-# directly to a remote buildmaster (like 'try' and a status client)
-
-# the create/start/stop commands should all be run as the same user,
-# preferably a separate 'buildbot' account.
-
-class MakerBase(usage.Options):
- optFlags = [
- ['help', 'h', "Display this message"],
- ["quiet", "q", "Do not emit the commands being run"],
- ]
-
- #["basedir", "d", None, "Base directory for the buildmaster"],
- opt_h = usage.Options.opt_help
-
- def parseArgs(self, *args):
- if len(args) > 0:
- self['basedir'] = args[0]
- else:
- self['basedir'] = None
- if len(args) > 1:
- raise usage.UsageError("I wasn't expecting so many arguments")
-
- def postOptions(self):
- if self['basedir'] is None:
- raise usage.UsageError("<basedir> parameter is required")
- self['basedir'] = os.path.abspath(self['basedir'])
-
-makefile_sample = """# -*- makefile -*-
-
-# This is a simple makefile which lives in a buildmaster/buildslave
-# directory (next to the buildbot.tac file). It allows you to start/stop the
-# master or slave by doing 'make start' or 'make stop'.
-
-# The 'reconfig' target will tell a buildmaster to reload its config file.
-
-start:
- twistd --no_save -y buildbot.tac
-
-stop:
- kill `cat twistd.pid`
-
-reconfig:
- kill -HUP `cat twistd.pid`
-
-log:
- tail -f twistd.log
-"""
-
-class Maker:
- def __init__(self, config):
- self.config = config
- self.basedir = config['basedir']
- self.force = config.get('force', False)
- self.quiet = config['quiet']
-
- def mkdir(self):
- if os.path.exists(self.basedir):
- if not self.quiet:
- print "updating existing installation"
- return
- if not self.quiet: print "mkdir", self.basedir
- os.mkdir(self.basedir)
-
- def mkinfo(self):
- path = os.path.join(self.basedir, "info")
- if not os.path.exists(path):
- if not self.quiet: print "mkdir", path
- os.mkdir(path)
- created = False
- admin = os.path.join(path, "admin")
- if not os.path.exists(admin):
- if not self.quiet:
- print "Creating info/admin, you need to edit it appropriately"
- f = open(admin, "wt")
- f.write("Your Name Here <admin@youraddress.invalid>\n")
- f.close()
- created = True
- host = os.path.join(path, "host")
- if not os.path.exists(host):
- if not self.quiet:
- print "Creating info/host, you need to edit it appropriately"
- f = open(host, "wt")
- f.write("Please put a description of this build host here\n")
- f.close()
- created = True
- if created and not self.quiet:
- print "Please edit the files in %s appropriately." % path
-
- def chdir(self):
- if not self.quiet: print "chdir", self.basedir
- os.chdir(self.basedir)
-
- def makeTAC(self, contents, secret=False):
- tacfile = "buildbot.tac"
- if os.path.exists(tacfile):
- oldcontents = open(tacfile, "rt").read()
- if oldcontents == contents:
- if not self.quiet:
- print "buildbot.tac already exists and is correct"
- return
- if not self.quiet:
- print "not touching existing buildbot.tac"
- print "creating buildbot.tac.new instead"
- tacfile = "buildbot.tac.new"
- f = open(tacfile, "wt")
- f.write(contents)
- f.close()
- if secret:
- os.chmod(tacfile, 0600)
-
- def makefile(self):
- target = "Makefile.sample"
- if os.path.exists(target):
- oldcontents = open(target, "rt").read()
- if oldcontents == makefile_sample:
- if not self.quiet:
- print "Makefile.sample already exists and is correct"
- return
- if not self.quiet:
- print "replacing Makefile.sample"
- else:
- if not self.quiet:
- print "creating Makefile.sample"
- f = open(target, "wt")
- f.write(makefile_sample)
- f.close()
-
- def sampleconfig(self, source):
- target = "master.cfg.sample"
- config_sample = open(source, "rt").read()
- if os.path.exists(target):
- oldcontents = open(target, "rt").read()
- if oldcontents == config_sample:
- if not self.quiet:
- print "master.cfg.sample already exists and is up-to-date"
- return
- if not self.quiet:
- print "replacing master.cfg.sample"
- else:
- if not self.quiet:
- print "creating master.cfg.sample"
- f = open(target, "wt")
- f.write(config_sample)
- f.close()
- os.chmod(target, 0600)
-
- def public_html(self, index_html, buildbot_css, robots_txt):
- webdir = os.path.join(self.basedir, "public_html")
- if os.path.exists(webdir):
- if not self.quiet:
- print "public_html/ already exists: not replacing"
- return
- else:
- os.mkdir(webdir)
- if not self.quiet:
- print "populating public_html/"
- target = os.path.join(webdir, "index.html")
- f = open(target, "wt")
- f.write(open(index_html, "rt").read())
- f.close()
-
- target = os.path.join(webdir, "buildbot.css")
- f = open(target, "wt")
- f.write(open(buildbot_css, "rt").read())
- f.close()
-
- target = os.path.join(webdir, "robots.txt")
- f = open(target, "wt")
- f.write(open(robots_txt, "rt").read())
- f.close()
-
- def populate_if_missing(self, target, source, overwrite=False):
- new_contents = open(source, "rt").read()
- if os.path.exists(target):
- old_contents = open(target, "rt").read()
- if old_contents != new_contents:
- if overwrite:
- if not self.quiet:
- print "%s has old/modified contents" % target
- print " overwriting it with new contents"
- open(target, "wt").write(new_contents)
- else:
- if not self.quiet:
- print "%s has old/modified contents" % target
- print " writing new contents to %s.new" % target
- open(target + ".new", "wt").write(new_contents)
- # otherwise, it's up to date
- else:
- if not self.quiet:
- print "populating %s" % target
- open(target, "wt").write(new_contents)
-
- def upgrade_public_html(self, index_html, buildbot_css, robots_txt):
- webdir = os.path.join(self.basedir, "public_html")
- if not os.path.exists(webdir):
- if not self.quiet:
- print "populating public_html/"
- os.mkdir(webdir)
- self.populate_if_missing(os.path.join(webdir, "index.html"),
- index_html)
- self.populate_if_missing(os.path.join(webdir, "buildbot.css"),
- buildbot_css)
- self.populate_if_missing(os.path.join(webdir, "robots.txt"),
- robots_txt)
-
- def check_master_cfg(self):
- from buildbot.master import BuildMaster
- from twisted.python import log, failure
-
- master_cfg = os.path.join(self.basedir, "master.cfg")
- if not os.path.exists(master_cfg):
- if not self.quiet:
- print "No master.cfg found"
- return 1
-
- # side-effects of loading the config file:
-
- # for each Builder defined in c['builders'], if the status directory
- # didn't already exist, it will be created, and the
- # $BUILDERNAME/builder pickle might be created (with a single
- # "builder created" event).
-
- # we put basedir in front of sys.path, because that's how the
- # buildmaster itself will run, and it is quite common to have the
- # buildmaster import helper classes from other .py files in its
- # basedir.
-
- if sys.path[0] != self.basedir:
- sys.path.insert(0, self.basedir)
-
- m = BuildMaster(self.basedir)
- # we need to route log.msg to stdout, so any problems can be seen
- # there. But if everything goes well, I'd rather not clutter stdout
- # with log messages. So instead we add a logObserver which gathers
- # messages and only displays them if something goes wrong.
- messages = []
- log.addObserver(messages.append)
- try:
- # this will raise an exception if there's something wrong with
- # the config file. Note that this BuildMaster instance is never
- # started, so it won't actually do anything with the
- # configuration.
- m.loadConfig(open(master_cfg, "r"))
- except:
- f = failure.Failure()
- if not self.quiet:
- print
- for m in messages:
- print "".join(m['message'])
- print f
- print
- print "An error was detected in the master.cfg file."
- print "Please correct the problem and run 'buildbot upgrade-master' again."
- print
- return 1
- return 0
-
-class UpgradeMasterOptions(MakerBase):
- optFlags = [
- ["replace", "r", "Replace any modified files without confirmation."],
- ]
-
- def getSynopsis(self):
- return "Usage: buildbot upgrade-master [options] <basedir>"
-
- longdesc = """
- This command takes an existing buildmaster working directory and
- adds/modifies the files there to work with the current version of
- buildbot. When this command is finished, the buildmaster directory should
- look much like a brand-new one created by the 'create-master' command.
-
- Use this after you've upgraded your buildbot installation and before you
- restart the buildmaster to use the new version.
-
- If you have modified the files in your working directory, this command
- will leave them untouched, but will put the new recommended contents in a
- .new file (for example, if index.html has been modified, this command
- will create index.html.new). You can then look at the new version and
- decide how to merge its contents into your modified file.
- """
-
-def upgradeMaster(config):
- basedir = config['basedir']
- m = Maker(config)
- # TODO: check Makefile
- # TODO: check TAC file
- # check web files: index.html, classic.css, robots.txt
- webdir = os.path.join(basedir, "public_html")
- m.upgrade_public_html(util.sibpath(__file__, "../status/web/index.html"),
- util.sibpath(__file__, "../status/web/classic.css"),
- util.sibpath(__file__, "../status/web/robots.txt"),
- )
- m.populate_if_missing(os.path.join(basedir, "master.cfg.sample"),
- util.sibpath(__file__, "sample.cfg"),
- overwrite=True)
- rc = m.check_master_cfg()
- if rc:
- return rc
- if not config['quiet']:
- print "upgrade complete"
-
-
-class MasterOptions(MakerBase):
- optFlags = [
- ["force", "f",
- "Re-use an existing directory (will not overwrite master.cfg file)"],
- ]
- optParameters = [
- ["config", "c", "master.cfg", "name of the buildmaster config file"],
- ["log-size", "s", "1000000",
- "size at which to rotate twisted log files"],
- ["log-count", "l", "None",
- "limit the number of kept old twisted log files"],
- ]
- def getSynopsis(self):
- return "Usage: buildbot create-master [options] <basedir>"
-
- longdesc = """
- This command creates a buildmaster working directory and buildbot.tac
- file. The master will live in <dir> and create various files there.
-
- At runtime, the master will read a configuration file (named
- 'master.cfg' by default) in its basedir. This file should contain python
- code which eventually defines a dictionary named 'BuildmasterConfig'.
- The elements of this dictionary are used to configure the Buildmaster.
- See doc/config.xhtml for details about what can be controlled through
- this interface."""
-
- def postOptions(self):
- MakerBase.postOptions(self)
- if not re.match('^\d+$', self['log-size']):
- raise usage.UsageError("log-size parameter needs to be an int")
- if not re.match('^\d+$', self['log-count']) and \
- self['log-count'] != 'None':
- raise usage.UsageError("log-count parameter needs to be an int "+
- " or None")
-
-
-masterTAC = """
-from twisted.application import service
-from buildbot.master import BuildMaster
-
-basedir = r'%(basedir)s'
-configfile = r'%(config)s'
-rotateLength = %(log-size)s
-maxRotatedFiles = %(log-count)s
-
-application = service.Application('buildmaster')
-try:
- from twisted.python.logfile import LogFile
- from twisted.python.log import ILogObserver, FileLogObserver
- logfile = LogFile.fromFullPath("twistd.log", rotateLength=rotateLength,
- maxRotatedFiles=maxRotatedFiles)
- application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
-except ImportError:
- # probably not yet twisted 8.2.0 and beyond, can't set log yet
- pass
-BuildMaster(basedir, configfile).setServiceParent(application)
-
-"""
-
-def createMaster(config):
- m = Maker(config)
- m.mkdir()
- m.chdir()
- contents = masterTAC % config
- m.makeTAC(contents)
- m.sampleconfig(util.sibpath(__file__, "sample.cfg"))
- m.public_html(util.sibpath(__file__, "../status/web/index.html"),
- util.sibpath(__file__, "../status/web/classic.css"),
- util.sibpath(__file__, "../status/web/robots.txt"),
- )
- m.makefile()
-
- if not m.quiet: print "buildmaster configured in %s" % m.basedir
-
-class SlaveOptions(MakerBase):
- optFlags = [
- ["force", "f", "Re-use an existing directory"],
- ]
- optParameters = [
-# ["name", "n", None, "Name for this build slave"],
-# ["passwd", "p", None, "Password for this build slave"],
-# ["basedir", "d", ".", "Base directory to use"],
-# ["master", "m", "localhost:8007",
-# "Location of the buildmaster (host:port)"],
-
- ["keepalive", "k", 600,
- "Interval at which keepalives should be sent (in seconds)"],
- ["usepty", None, 0,
- "(1 or 0) child processes should be run in a pty (default 0)"],
- ["umask", None, "None",
- "controls permissions of generated files. Use --umask=022 to be world-readable"],
- ["maxdelay", None, 300,
- "Maximum time between connection attempts"],
- ["log-size", "s", "1000000",
- "size at which to rotate twisted log files"],
- ["log-count", "l", "None",
- "limit the number of kept old twisted log files"],
- ]
-
- longdesc = """
- This command creates a buildslave working directory and buildbot.tac
- file. The bot will use the <name> and <passwd> arguments to authenticate
- itself when connecting to the master. All commands are run in a
- build-specific subdirectory of <basedir>. <master> is a string of the
- form 'hostname:port', and specifies where the buildmaster can be reached.
-
- <name>, <passwd>, and <master> will be provided by the buildmaster
- administrator for your bot. You must choose <basedir> yourself.
- """
-
- def getSynopsis(self):
- return "Usage: buildbot create-slave [options] <basedir> <master> <name> <passwd>"
-
- def parseArgs(self, *args):
- if len(args) < 4:
- raise usage.UsageError("command needs more arguments")
- basedir, master, name, passwd = args
- self['basedir'] = basedir
- self['master'] = master
- self['name'] = name
- self['passwd'] = passwd
-
- def postOptions(self):
- MakerBase.postOptions(self)
- self['usepty'] = int(self['usepty'])
- self['keepalive'] = int(self['keepalive'])
- self['maxdelay'] = int(self['maxdelay'])
- if self['master'].find(":") == -1:
- raise usage.UsageError("--master must be in the form host:portnum")
- if not re.match('^\d+$', self['log-size']):
- raise usage.UsageError("log-size parameter needs to be an int")
- if not re.match('^\d+$', self['log-count']) and \
- self['log-count'] != 'None':
- raise usage.UsageError("log-count parameter needs to be an int "+
- " or None")
-
-slaveTAC = """
-from twisted.application import service
-from buildbot.slave.bot import BuildSlave
-
-basedir = r'%(basedir)s'
-buildmaster_host = '%(host)s'
-port = %(port)d
-slavename = '%(name)s'
-passwd = '%(passwd)s'
-keepalive = %(keepalive)d
-usepty = %(usepty)d
-umask = %(umask)s
-maxdelay = %(maxdelay)d
-rotateLength = %(log-size)s
-maxRotatedFiles = %(log-count)s
-
-application = service.Application('buildslave')
-try:
- from twisted.python.logfile import LogFile
- from twisted.python.log import ILogObserver, FileLogObserver
- logfile = LogFile.fromFullPath("twistd.log", rotateLength=rotateLength,
- maxRotatedFiles=maxRotatedFiles)
- application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
-except ImportError:
- # probably not yet twisted 8.2.0 and beyond, can't set log yet
- pass
-s = BuildSlave(buildmaster_host, port, slavename, passwd, basedir,
- keepalive, usepty, umask=umask, maxdelay=maxdelay)
-s.setServiceParent(application)
-
-"""
-
-def createSlave(config):
- m = Maker(config)
- m.mkdir()
- m.chdir()
- try:
- master = config['master']
- host, port = re.search(r'(.+):(\d+)', master).groups()
- config['host'] = host
- config['port'] = int(port)
- except:
- print "unparseable master location '%s'" % master
- print " expecting something more like localhost:8007"
- raise
- contents = slaveTAC % config
-
- m.makeTAC(contents, secret=True)
-
- m.makefile()
- m.mkinfo()
-
- if not m.quiet: print "buildslave configured in %s" % m.basedir
-
-
-
-def stop(config, signame="TERM", wait=False):
- import signal
- basedir = config['basedir']
- quiet = config['quiet']
- os.chdir(basedir)
- try:
- f = open("twistd.pid", "rt")
- except:
- raise BuildbotNotRunningError
- pid = int(f.read().strip())
- signum = getattr(signal, "SIG"+signame)
- timer = 0
- os.kill(pid, signum)
- if not wait:
- if not quiet:
- print "sent SIG%s to process" % signame
- return
- time.sleep(0.1)
- while timer < 10:
- # poll once per second until twistd.pid goes away, up to 10 seconds
- try:
- os.kill(pid, 0)
- except OSError:
- if not quiet:
- print "buildbot process %d is dead" % pid
- return
- timer += 1
- time.sleep(1)
- if not quiet:
- print "never saw process go away"
-
-def restart(config):
- quiet = config['quiet']
- from buildbot.scripts.startup import start
- try:
- stop(config, wait=True)
- except BuildbotNotRunningError:
- pass
- if not quiet:
- print "now restarting buildbot process.."
- start(config)
-
-
-def loadOptions(filename="options", here=None, home=None):
- """Find the .buildbot/FILENAME file. Crawl from the current directory up
- towards the root, and also look in ~/.buildbot . The first directory
- that's owned by the user and has the file we're looking for wins. Windows
- skips the owned-by-user test.
-
- @rtype: dict
- @return: a dictionary of names defined in the options file. If no options
- file was found, return an empty dict.
- """
-
- if here is None:
- here = os.getcwd()
- here = os.path.abspath(here)
-
- if home is None:
- if runtime.platformType == 'win32':
- home = os.path.join(os.environ['APPDATA'], "buildbot")
- else:
- home = os.path.expanduser("~/.buildbot")
-
- searchpath = []
- toomany = 20
- while True:
- searchpath.append(os.path.join(here, ".buildbot"))
- next = os.path.dirname(here)
- if next == here:
- break # we've hit the root
- here = next
- toomany -= 1 # just in case
- if toomany == 0:
- raise ValueError("Hey, I seem to have wandered up into the "
- "infinite glories of the heavens. Oops.")
- searchpath.append(home)
-
- localDict = {}
-
- for d in searchpath:
- if os.path.isdir(d):
- if runtime.platformType != 'win32':
- if os.stat(d)[stat.ST_UID] != os.getuid():
- print "skipping %s because you don't own it" % d
- continue # security, skip other people's directories
- optfile = os.path.join(d, filename)
- if os.path.exists(optfile):
- try:
- f = open(optfile, "r")
- options = f.read()
- exec options in localDict
- except:
- print "error while reading %s" % optfile
- raise
- break
-
- for k in localDict.keys():
- if k.startswith("__"):
- del localDict[k]
- return localDict
-
-class StartOptions(MakerBase):
- optFlags = [
- ['quiet', 'q', "Don't display startup log messages"],
- ]
- def getSynopsis(self):
- return "Usage: buildbot start <basedir>"
-
-class StopOptions(MakerBase):
- def getSynopsis(self):
- return "Usage: buildbot stop <basedir>"
-
-class ReconfigOptions(MakerBase):
- optFlags = [
- ['quiet', 'q', "Don't display log messages about reconfiguration"],
- ]
- def getSynopsis(self):
- return "Usage: buildbot reconfig <basedir>"
-
-
-
-class RestartOptions(MakerBase):
- optFlags = [
- ['quiet', 'q', "Don't display startup log messages"],
- ]
- def getSynopsis(self):
- return "Usage: buildbot restart <basedir>"
-
-class DebugClientOptions(usage.Options):
- optFlags = [
- ['help', 'h', "Display this message"],
- ]
- optParameters = [
- ["master", "m", None,
- "Location of the buildmaster's slaveport (host:port)"],
- ["passwd", "p", None, "Debug password to use"],
- ]
-
- def parseArgs(self, *args):
- if len(args) > 0:
- self['master'] = args[0]
- if len(args) > 1:
- self['passwd'] = args[1]
- if len(args) > 2:
- raise usage.UsageError("I wasn't expecting so many arguments")
-
-def debugclient(config):
- from buildbot.clients import debug
- opts = loadOptions()
-
- master = config.get('master')
- if not master:
- master = opts.get('master')
- if master is None:
- raise usage.UsageError("master must be specified: on the command "
- "line or in ~/.buildbot/options")
-
- passwd = config.get('passwd')
- if not passwd:
- passwd = opts.get('debugPassword')
- if passwd is None:
- raise usage.UsageError("passwd must be specified: on the command "
- "line or in ~/.buildbot/options")
-
- d = debug.DebugWidget(master, passwd)
- d.run()
-
-class StatusClientOptions(usage.Options):
- optFlags = [
- ['help', 'h', "Display this message"],
- ]
- optParameters = [
- ["master", "m", None,
- "Location of the buildmaster's status port (host:port)"],
- ]
-
- def parseArgs(self, *args):
- if len(args) > 0:
- self['master'] = args[0]
- if len(args) > 1:
- raise usage.UsageError("I wasn't expecting so many arguments")
-
-def statuslog(config):
- from buildbot.clients import base
- opts = loadOptions()
- master = config.get('master')
- if not master:
- master = opts.get('masterstatus')
- if master is None:
- raise usage.UsageError("master must be specified: on the command "
- "line or in ~/.buildbot/options")
- c = base.TextClient(master)
- c.run()
-
-def statusgui(config):
- from buildbot.clients import gtkPanes
- opts = loadOptions()
- master = config.get('master')
- if not master:
- master = opts.get('masterstatus')
- if master is None:
- raise usage.UsageError("master must be specified: on the command "
- "line or in ~/.buildbot/options")
- c = gtkPanes.GtkClient(master)
- c.run()
-
-class SendChangeOptions(usage.Options):
- optParameters = [
- ("master", "m", None,
- "Location of the buildmaster's PBListener (host:port)"),
- ("username", "u", None, "Username performing the commit"),
- ("branch", "b", None, "Branch specifier"),
- ("category", "c", None, "Category of repository"),
- ("revision", "r", None, "Revision specifier (string)"),
- ("revision_number", "n", None, "Revision specifier (integer)"),
- ("revision_file", None, None, "Filename containing revision spec"),
- ("comments", "m", None, "log message"),
- ("logfile", "F", None,
- "Read the log messages from this file (- for stdin)"),
- ]
- def getSynopsis(self):
- return "Usage: buildbot sendchange [options] filenames.."
- def parseArgs(self, *args):
- self['files'] = args
-
-
-def sendchange(config, runReactor=False):
- """Send a single change to the buildmaster's PBChangeSource. The
- connection will be drpoped as soon as the Change has been sent."""
- from buildbot.clients.sendchange import Sender
-
- opts = loadOptions()
- user = config.get('username', opts.get('username'))
- master = config.get('master', opts.get('master'))
- branch = config.get('branch', opts.get('branch'))
- category = config.get('category', opts.get('category'))
- revision = config.get('revision')
- # SVN and P4 use numeric revisions
- if config.get("revision_number"):
- revision = int(config['revision_number'])
- if config.get("revision_file"):
- revision = open(config["revision_file"],"r").read()
-
- comments = config.get('comments')
- if not comments and config.get('logfile'):
- if config['logfile'] == "-":
- f = sys.stdin
- else:
- f = open(config['logfile'], "rt")
- comments = f.read()
- if comments is None:
- comments = ""
-
- files = config.get('files', [])
-
- assert user, "you must provide a username"
- assert master, "you must provide the master location"
-
- s = Sender(master, user)
- d = s.send(branch, revision, comments, files, category=category)
- if runReactor:
- d.addCallbacks(s.printSuccess, s.printFailure)
- d.addBoth(s.stop)
- s.run()
- return d
-
-
-class ForceOptions(usage.Options):
- optParameters = [
- ["builder", None, None, "which Builder to start"],
- ["branch", None, None, "which branch to build"],
- ["revision", None, None, "which revision to build"],
- ["reason", None, None, "the reason for starting the build"],
- ]
-
- def parseArgs(self, *args):
- args = list(args)
- if len(args) > 0:
- if self['builder'] is not None:
- raise usage.UsageError("--builder provided in two ways")
- self['builder'] = args.pop(0)
- if len(args) > 0:
- if self['reason'] is not None:
- raise usage.UsageError("--reason provided in two ways")
- self['reason'] = " ".join(args)
-
-
-class TryOptions(usage.Options):
- optParameters = [
- ["connect", "c", None,
- "how to reach the buildmaster, either 'ssh' or 'pb'"],
- # for ssh, use --tryhost, --username, and --trydir
- ["tryhost", None, None,
- "the hostname (used by ssh) for the buildmaster"],
- ["trydir", None, None,
- "the directory (on the tryhost) where tryjobs are deposited"],
- ["username", "u", None, "Username performing the trial build"],
- # for PB, use --master, --username, and --passwd
- ["master", "m", None,
- "Location of the buildmaster's PBListener (host:port)"],
- ["passwd", None, None, "password for PB authentication"],
-
- ["diff", None, None,
- "Filename of a patch to use instead of scanning a local tree. Use '-' for stdin."],
- ["patchlevel", "p", 0,
- "Number of slashes to remove from patch pathnames, like the -p option to 'patch'"],
-
- ["baserev", None, None,
- "Base revision to use instead of scanning a local tree."],
-
- ["vc", None, None,
- "The VC system in use, one of: cvs,svn,tla,baz,darcs"],
- ["branch", None, None,
- "The branch in use, for VC systems that can't figure it out"
- " themselves"],
-
- ["builder", "b", None,
- "Run the trial build on this Builder. Can be used multiple times."],
- ["properties", None, None,
- "A set of properties made available in the build environment, format:prop=value,propb=valueb..."],
- ]
-
- optFlags = [
- ["wait", None, "wait until the builds have finished"],
- ["dryrun", 'n', "Gather info, but don't actually submit."],
- ]
-
- def __init__(self):
- super(TryOptions, self).__init__()
- self['builders'] = []
- self['properties'] = {}
-
- def opt_builder(self, option):
- self['builders'].append(option)
-
- def opt_properties(self, option):
- # We need to split the value of this option into a dictionary of properties
- properties = {}
- propertylist = option.split(",")
- for i in range(0,len(propertylist)):
- print propertylist[i]
- splitproperty = propertylist[i].split("=")
- properties[splitproperty[0]] = splitproperty[1]
- self['properties'] = properties
-
- def opt_patchlevel(self, option):
- self['patchlevel'] = int(option)
-
- def getSynopsis(self):
- return "Usage: buildbot try [options]"
-
-def doTry(config):
- from buildbot.scripts import tryclient
- t = tryclient.Try(config)
- t.run()
-
-class TryServerOptions(usage.Options):
- optParameters = [
- ["jobdir", None, None, "the jobdir (maildir) for submitting jobs"],
- ]
-
-def doTryServer(config):
- import md5
- jobdir = os.path.expanduser(config["jobdir"])
- job = sys.stdin.read()
- # now do a 'safecat'-style write to jobdir/tmp, then move atomically to
- # jobdir/new . Rather than come up with a unique name randomly, I'm just
- # going to MD5 the contents and prepend a timestamp.
- timestring = "%d" % time.time()
- jobhash = md5.new(job).hexdigest()
- fn = "%s-%s" % (timestring, jobhash)
- tmpfile = os.path.join(jobdir, "tmp", fn)
- newfile = os.path.join(jobdir, "new", fn)
- f = open(tmpfile, "w")
- f.write(job)
- f.close()
- os.rename(tmpfile, newfile)
-
-
-class CheckConfigOptions(usage.Options):
- optFlags = [
- ['quiet', 'q', "Don't display error messages or tracebacks"],
- ]
-
- def getSynopsis(self):
- return "Usage :buildbot checkconfig [configFile]\n" + \
- " If not specified, 'master.cfg' will be used as 'configFile'"
-
- def parseArgs(self, *args):
- if len(args) >= 1:
- self['configFile'] = args[0]
- else:
- self['configFile'] = 'master.cfg'
-
-
-def doCheckConfig(config):
- quiet = config.get('quiet')
- configFile = config.get('configFile')
- try:
- from buildbot.scripts.checkconfig import ConfigLoader
- ConfigLoader(configFile)
- except:
- if not quiet:
- # Print out the traceback in a nice format
- t, v, tb = sys.exc_info()
- traceback.print_exception(t, v, tb)
- sys.exit(1)
-
- if not quiet:
- print "Config file is good!"
-
-
-class Options(usage.Options):
- synopsis = "Usage: buildbot <command> [command options]"
-
- subCommands = [
- # the following are all admin commands
- ['create-master', None, MasterOptions,
- "Create and populate a directory for a new buildmaster"],
- ['upgrade-master', None, UpgradeMasterOptions,
- "Upgrade an existing buildmaster directory for the current version"],
- ['create-slave', None, SlaveOptions,
- "Create and populate a directory for a new buildslave"],
- ['start', None, StartOptions, "Start a buildmaster or buildslave"],
- ['stop', None, StopOptions, "Stop a buildmaster or buildslave"],
- ['restart', None, RestartOptions,
- "Restart a buildmaster or buildslave"],
-
- ['reconfig', None, ReconfigOptions,
- "SIGHUP a buildmaster to make it re-read the config file"],
- ['sighup', None, ReconfigOptions,
- "SIGHUP a buildmaster to make it re-read the config file"],
-
- ['sendchange', None, SendChangeOptions,
- "Send a change to the buildmaster"],
-
- ['debugclient', None, DebugClientOptions,
- "Launch a small debug panel GUI"],
-
- ['statuslog', None, StatusClientOptions,
- "Emit current builder status to stdout"],
- ['statusgui', None, StatusClientOptions,
- "Display a small window showing current builder status"],
-
- #['force', None, ForceOptions, "Run a build"],
- ['try', None, TryOptions, "Run a build with your local changes"],
-
- ['tryserver', None, TryServerOptions,
- "buildmaster-side 'try' support function, not for users"],
-
- ['checkconfig', None, CheckConfigOptions,
- "test the validity of a master.cfg config file"],
-
- # TODO: 'watch'
- ]
-
- def opt_version(self):
- import buildbot
- print "Buildbot version: %s" % buildbot.version
- usage.Options.opt_version(self)
-
- def opt_verbose(self):
- from twisted.python import log
- log.startLogging(sys.stderr)
-
- def postOptions(self):
- if not hasattr(self, 'subOptions'):
- raise usage.UsageError("must specify a command")
-
-
-def run():
- config = Options()
- try:
- config.parseOptions()
- except usage.error, e:
- print "%s: %s" % (sys.argv[0], e)
- print
- c = getattr(config, 'subOptions', config)
- print str(c)
- sys.exit(1)
-
- command = config.subCommand
- so = config.subOptions
-
- if command == "create-master":
- createMaster(so)
- elif command == "upgrade-master":
- upgradeMaster(so)
- elif command == "create-slave":
- createSlave(so)
- elif command == "start":
- from buildbot.scripts.startup import start
- start(so)
- elif command == "stop":
- stop(so, wait=True)
- elif command == "restart":
- restart(so)
- elif command == "reconfig" or command == "sighup":
- from buildbot.scripts.reconfig import Reconfigurator
- Reconfigurator().run(so)
- elif command == "sendchange":
- sendchange(so, True)
- elif command == "debugclient":
- debugclient(so)
- elif command == "statuslog":
- statuslog(so)
- elif command == "statusgui":
- statusgui(so)
- elif command == "try":
- doTry(so)
- elif command == "tryserver":
- doTryServer(so)
- elif command == "checkconfig":
- doCheckConfig(so)
-
-
diff --git a/buildbot/buildbot/scripts/sample.cfg b/buildbot/buildbot/scripts/sample.cfg
deleted file mode 100644
index b405673..0000000
--- a/buildbot/buildbot/scripts/sample.cfg
+++ /dev/null
@@ -1,175 +0,0 @@
-# -*- python -*-
-# ex: set syntax=python:
-
-# This is a sample buildmaster config file. It must be installed as
-# 'master.cfg' in your buildmaster's base directory (although the filename
-# can be changed with the --basedir option to 'mktap buildbot master').
-
-# It has one job: define a dictionary named BuildmasterConfig. This
-# dictionary has a variety of keys to control different aspects of the
-# buildmaster. They are documented in docs/config.xhtml .
-
-
-# This is the dictionary that the buildmaster pays attention to. We also use
-# a shorter alias to save typing.
-c = BuildmasterConfig = {}
-
-####### BUILDSLAVES
-
-# the 'slaves' list defines the set of allowable buildslaves. Each element is
-# a BuildSlave object, which is created with bot-name, bot-password. These
-# correspond to values given to the buildslave's mktap invocation.
-from buildbot.buildslave import BuildSlave
-c['slaves'] = [BuildSlave("bot1name", "bot1passwd")]
-
-# to limit to two concurrent builds on a slave, use
-# c['slaves'] = [BuildSlave("bot1name", "bot1passwd", max_builds=2)]
-
-
-# 'slavePortnum' defines the TCP port to listen on. This must match the value
-# configured into the buildslaves (with their --master option)
-
-c['slavePortnum'] = 9989
-
-####### CHANGESOURCES
-
-# the 'change_source' setting tells the buildmaster how it should find out
-# about source code changes. Any class which implements IChangeSource can be
-# put here: there are several in buildbot/changes/*.py to choose from.
-
-from buildbot.changes.pb import PBChangeSource
-c['change_source'] = PBChangeSource()
-
-# For example, if you had CVSToys installed on your repository, and your
-# CVSROOT/freshcfg file had an entry like this:
-#pb = ConfigurationSet([
-# (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
-# ])
-
-# then you could use the following buildmaster Change Source to subscribe to
-# the FreshCVS daemon and be notified on every commit:
-#
-#from buildbot.changes.freshcvs import FreshCVSSource
-#fc_source = FreshCVSSource("cvs.example.com", 4519, "foo", "bar")
-#c['change_source'] = fc_source
-
-# or, use a PBChangeSource, and then have your repository's commit script run
-# 'buildbot sendchange', or use contrib/svn_buildbot.py, or
-# contrib/arch_buildbot.py :
-#
-#from buildbot.changes.pb import PBChangeSource
-#c['change_source'] = PBChangeSource()
-
-
-####### SCHEDULERS
-
-## configure the Schedulers
-
-from buildbot.scheduler import Scheduler
-c['schedulers'] = []
-c['schedulers'].append(Scheduler(name="all", branch=None,
- treeStableTimer=2*60,
- builderNames=["buildbot-full"]))
-
-
-####### BUILDERS
-
-# the 'builders' list defines the Builders. Each one is configured with a
-# dictionary, using the following keys:
-# name (required): the name used to describe this builder
-# slavename (required): which slave to use (must appear in c['bots'])
-# builddir (required): which subdirectory to run the builder in
-# factory (required): a BuildFactory to define how the build is run
-# periodicBuildTime (optional): if set, force a build every N seconds
-
-# buildbot/process/factory.py provides several BuildFactory classes you can
-# start with, which implement build processes for common targets (GNU
-# autoconf projects, CPAN perl modules, etc). The factory.BuildFactory is the
-# base class, and is configured with a series of BuildSteps. When the build
-# is run, the appropriate buildslave is told to execute each Step in turn.
-
-# the first BuildStep is typically responsible for obtaining a copy of the
-# sources. There are source-obtaining Steps in buildbot/steps/source.py for
-# CVS, SVN, and others.
-
-cvsroot = ":pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot"
-cvsmodule = "buildbot"
-
-from buildbot.process import factory
-from buildbot.steps.source import CVS
-from buildbot.steps.shell import Compile
-from buildbot.steps.python_twisted import Trial
-f1 = factory.BuildFactory()
-f1.addStep(CVS(cvsroot=cvsroot, cvsmodule=cvsmodule, login="", mode="copy"))
-f1.addStep(Compile(command=["python", "./setup.py", "build"]))
-f1.addStep(Trial(testpath="."))
-
-b1 = {'name': "buildbot-full",
- 'slavename': "bot1name",
- 'builddir': "full",
- 'factory': f1,
- }
-c['builders'] = [b1]
-
-
-####### STATUS TARGETS
-
-# 'status' is a list of Status Targets. The results of each build will be
-# pushed to these targets. buildbot/status/*.py has a variety to choose from,
-# including web pages, email senders, and IRC bots.
-
-c['status'] = []
-
-from buildbot.status import html
-c['status'].append(html.WebStatus(http_port=8010))
-
-# from buildbot.status import mail
-# c['status'].append(mail.MailNotifier(fromaddr="buildbot@localhost",
-# extraRecipients=["builds@example.com"],
-# sendToInterestedUsers=False))
-#
-# from buildbot.status import words
-# c['status'].append(words.IRC(host="irc.example.com", nick="bb",
-# channels=["#example"]))
-#
-# from buildbot.status import client
-# c['status'].append(client.PBListener(9988))
-
-
-####### DEBUGGING OPTIONS
-
-# if you set 'debugPassword', then you can connect to the buildmaster with
-# the diagnostic tool in contrib/debugclient.py . From this tool, you can
-# manually force builds and inject changes, which may be useful for testing
-# your buildmaster without actually committing changes to your repository (or
-# before you have a functioning 'sources' set up). The debug tool uses the
-# same port number as the slaves do: 'slavePortnum'.
-
-#c['debugPassword'] = "debugpassword"
-
-# if you set 'manhole', you can ssh into the buildmaster and get an
-# interactive python shell, which may be useful for debugging buildbot
-# internals. It is probably only useful for buildbot developers. You can also
-# use an authorized_keys file, or plain telnet.
-#from buildbot import manhole
-#c['manhole'] = manhole.PasswordManhole("tcp:9999:interface=127.0.0.1",
-# "admin", "password")
-
-
-####### PROJECT IDENTITY
-
-# the 'projectName' string will be used to describe the project that this
-# buildbot is working on. For example, it is used as the title of the
-# waterfall HTML page. The 'projectURL' string will be used to provide a link
-# from buildbot HTML pages to your project's home page.
-
-c['projectName'] = "Buildbot"
-c['projectURL'] = "http://buildbot.sourceforge.net/"
-
-# the 'buildbotURL' string should point to the location where the buildbot's
-# internal web server (usually the html.Waterfall page) is visible. This
-# typically uses the port number set in the Waterfall 'status' entry, but
-# with an externally-visible host name which the buildbot cannot figure out
-# without some help.
-
-c['buildbotURL'] = "http://localhost:8010/"
diff --git a/buildbot/buildbot/scripts/startup.py b/buildbot/buildbot/scripts/startup.py
deleted file mode 100644
index 9472af2..0000000
--- a/buildbot/buildbot/scripts/startup.py
+++ /dev/null
@@ -1,128 +0,0 @@
-
-import os, sys, time
-
-class Follower:
- def follow(self):
- from twisted.internet import reactor
- from buildbot.scripts.reconfig import LogWatcher
- self.rc = 0
- print "Following twistd.log until startup finished.."
- lw = LogWatcher("twistd.log")
- d = lw.start()
- d.addCallbacks(self._success, self._failure)
- reactor.run()
- return self.rc
-
- def _success(self, processtype):
- from twisted.internet import reactor
- print "The %s appears to have (re)started correctly." % processtype
- self.rc = 0
- reactor.stop()
-
- def _failure(self, why):
- from twisted.internet import reactor
- from buildbot.scripts.logwatcher import BuildmasterTimeoutError, \
- ReconfigError, BuildslaveTimeoutError, BuildSlaveDetectedError
- if why.check(BuildmasterTimeoutError):
- print """
-The buildmaster took more than 10 seconds to start, so we were unable to
-confirm that it started correctly. Please 'tail twistd.log' and look for a
-line that says 'configuration update complete' to verify correct startup.
-"""
- elif why.check(BuildslaveTimeoutError):
- print """
-The buildslave took more than 10 seconds to start and/or connect to the
-buildmaster, so we were unable to confirm that it started and connected
-correctly. Please 'tail twistd.log' and look for a line that says 'message
-from master: attached' to verify correct startup. If you see a bunch of
-messages like 'will retry in 6 seconds', your buildslave might not have the
-correct hostname or portnumber for the buildmaster, or the buildmaster might
-not be running. If you see messages like
- 'Failure: twisted.cred.error.UnauthorizedLogin'
-then your buildslave might be using the wrong botname or password. Please
-correct these problems and then restart the buildslave.
-"""
- elif why.check(ReconfigError):
- print """
-The buildmaster appears to have encountered an error in the master.cfg config
-file during startup. It is probably running with an empty configuration right
-now. Please inspect and fix master.cfg, then restart the buildmaster.
-"""
- elif why.check(BuildSlaveDetectedError):
- print """
-Buildslave is starting up, not following logfile.
-"""
- else:
- print """
-Unable to confirm that the buildmaster started correctly. You may need to
-stop it, fix the config file, and restart.
-"""
- print why
- self.rc = 1
- reactor.stop()
-
-
-def start(config):
- os.chdir(config['basedir'])
- if (not os.path.exists("buildbot.tac") and
- not os.path.exists("Makefile.buildbot")):
- print "This doesn't look like a buildbot base directory:"
- print "No buildbot.tac or Makefile.buildbot file."
- print "Giving up!"
- sys.exit(1)
- if config['quiet']:
- return launch(config)
-
- # we probably can't do this os.fork under windows
- from twisted.python.runtime import platformType
- if platformType == "win32":
- return launch(config)
-
- # fork a child to launch the daemon, while the parent process tails the
- # logfile
- if os.fork():
- # this is the parent
- rc = Follower().follow()
- sys.exit(rc)
- # this is the child: give the logfile-watching parent a chance to start
- # watching it before we start the daemon
- time.sleep(0.2)
- launch(config)
-
-def launch(config):
- sys.path.insert(0, os.path.abspath(os.getcwd()))
- if os.path.exists("/usr/bin/make") and os.path.exists("Makefile.buildbot"):
- # Preferring the Makefile lets slave admins do useful things like set
- # up environment variables for the buildslave.
- cmd = "make -f Makefile.buildbot start"
- if not config['quiet']:
- print cmd
- os.system(cmd)
- else:
- # see if we can launch the application without actually having to
- # spawn twistd, since spawning processes correctly is a real hassle
- # on windows.
- from twisted.python.runtime import platformType
- argv = ["twistd",
- "--no_save",
- "--logfile=twistd.log", # windows doesn't use the same default
- "--python=buildbot.tac"]
- if platformType == "win32":
- argv.append("--reactor=win32")
- sys.argv = argv
-
- # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use
- # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for
- # windows.
- from twisted import __version__
- major, minor, ignored = __version__.split(".", 2)
- major = int(major)
- minor = int(minor)
- if (platformType == "win32" and (major == 2 and minor < 5)):
- from twisted.scripts import _twistw
- run = _twistw.run
- else:
- from twisted.scripts import twistd
- run = twistd.run
- run()
-
diff --git a/buildbot/buildbot/scripts/tryclient.py b/buildbot/buildbot/scripts/tryclient.py
deleted file mode 100644
index b1b7658..0000000
--- a/buildbot/buildbot/scripts/tryclient.py
+++ /dev/null
@@ -1,707 +0,0 @@
-# -*- test-case-name: buildbot.test.test_scheduler,buildbot.test.test_vc -*-
-
-import sys, os, re, time, random
-from twisted.internet import utils, protocol, defer, reactor, task
-from twisted.spread import pb
-from twisted.cred import credentials
-from twisted.python import log
-from twisted.python.procutils import which
-
-from buildbot.sourcestamp import SourceStamp
-from buildbot.scripts import runner
-from buildbot.util import now
-from buildbot.status import builder
-
-class SourceStampExtractor:
-
- def __init__(self, treetop, branch):
- self.treetop = treetop
- self.branch = branch
- self.exe = which(self.vcexe)[0]
-
- def dovc(self, cmd):
- """This accepts the arguments of a command, without the actual
- command itself."""
- env = os.environ.copy()
- env['LC_ALL'] = "C"
- d = utils.getProcessOutputAndValue(self.exe, cmd, env=env,
- path=self.treetop)
- d.addCallback(self._didvc, cmd)
- return d
- def _didvc(self, res, cmd):
- (stdout, stderr, code) = res
- # 'bzr diff' sets rc=1 if there were any differences. tla, baz, and
- # cvs do something similar, so don't bother requring rc=0.
- return stdout
-
- def get(self):
- """Return a Deferred that fires with a SourceStamp instance."""
- d = self.getBaseRevision()
- d.addCallback(self.getPatch)
- d.addCallback(self.done)
- return d
- def readPatch(self, res, patchlevel):
- self.patch = (patchlevel, res)
- def done(self, res):
- # TODO: figure out the branch too
- ss = SourceStamp(self.branch, self.baserev, self.patch)
- return ss
-
-class CVSExtractor(SourceStampExtractor):
- patchlevel = 0
- vcexe = "cvs"
- def getBaseRevision(self):
- # this depends upon our local clock and the repository's clock being
- # reasonably synchronized with each other. We express everything in
- # UTC because the '%z' format specifier for strftime doesn't always
- # work.
- self.baserev = time.strftime("%Y-%m-%d %H:%M:%S +0000",
- time.gmtime(now()))
- return defer.succeed(None)
-
- def getPatch(self, res):
- # the -q tells CVS to not announce each directory as it works
- if self.branch is not None:
- # 'cvs diff' won't take both -r and -D at the same time (it
- # ignores the -r). As best I can tell, there is no way to make
- # cvs give you a diff relative to a timestamp on the non-trunk
- # branch. A bare 'cvs diff' will tell you about the changes
- # relative to your checked-out versions, but I know of no way to
- # find out what those checked-out versions are.
- raise RuntimeError("Sorry, CVS 'try' builds don't work with "
- "branches")
- args = ['-q', 'diff', '-u', '-D', self.baserev]
- d = self.dovc(args)
- d.addCallback(self.readPatch, self.patchlevel)
- return d
-
-class SVNExtractor(SourceStampExtractor):
- patchlevel = 0
- vcexe = "svn"
-
- def getBaseRevision(self):
- d = self.dovc(["status", "-u"])
- d.addCallback(self.parseStatus)
- return d
- def parseStatus(self, res):
- # svn shows the base revision for each file that has been modified or
- # which needs an update. You can update each file to a different
- # version, so each file is displayed with its individual base
- # revision. It also shows the repository-wide latest revision number
- # on the last line ("Status against revision: \d+").
-
- # for our purposes, we use the latest revision number as the "base"
- # revision, and get a diff against that. This means we will get
- # reverse-diffs for local files that need updating, but the resulting
- # tree will still be correct. The only weirdness is that the baserev
- # that we emit may be different than the version of the tree that we
- # first checked out.
-
- # to do this differently would probably involve scanning the revision
- # numbers to find the max (or perhaps the min) revision, and then
- # using that as a base.
-
- for line in res.split("\n"):
- m = re.search(r'^Status against revision:\s+(\d+)', line)
- if m:
- self.baserev = int(m.group(1))
- return
- raise IndexError("Could not find 'Status against revision' in "
- "SVN output: %s" % res)
- def getPatch(self, res):
- d = self.dovc(["diff", "-r%d" % self.baserev])
- d.addCallback(self.readPatch, self.patchlevel)
- return d
-
-class BazExtractor(SourceStampExtractor):
- patchlevel = 1
- vcexe = "baz"
- def getBaseRevision(self):
- d = self.dovc(["tree-id"])
- d.addCallback(self.parseStatus)
- return d
- def parseStatus(self, res):
- tid = res.strip()
- slash = tid.index("/")
- dd = tid.rindex("--")
- self.branch = tid[slash+1:dd]
- self.baserev = tid[dd+2:]
- def getPatch(self, res):
- d = self.dovc(["diff"])
- d.addCallback(self.readPatch, self.patchlevel)
- return d
-
-class TlaExtractor(SourceStampExtractor):
- patchlevel = 1
- vcexe = "tla"
- def getBaseRevision(self):
- # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
- # 'tla logs' gives us REVISION
- d = self.dovc(["logs", "--full", "--reverse"])
- d.addCallback(self.parseStatus)
- return d
- def parseStatus(self, res):
- tid = res.split("\n")[0].strip()
- slash = tid.index("/")
- dd = tid.rindex("--")
- self.branch = tid[slash+1:dd]
- self.baserev = tid[dd+2:]
-
- def getPatch(self, res):
- d = self.dovc(["changes", "--diffs"])
- d.addCallback(self.readPatch, self.patchlevel)
- return d
-
-class BzrExtractor(SourceStampExtractor):
- patchlevel = 0
- vcexe = "bzr"
- def getBaseRevision(self):
- d = self.dovc(["version-info"])
- d.addCallback(self.get_revision_number)
- return d
- def get_revision_number(self, out):
- for line in out.split("\n"):
- colon = line.find(":")
- if colon != -1:
- key, value = line[:colon], line[colon+2:]
- if key == "revno":
- self.baserev = int(value)
- return
- raise ValueError("unable to find revno: in bzr output: '%s'" % out)
-
- def getPatch(self, res):
- d = self.dovc(["diff"])
- d.addCallback(self.readPatch, self.patchlevel)
- return d
-
-class MercurialExtractor(SourceStampExtractor):
- patchlevel = 1
- vcexe = "hg"
- def getBaseRevision(self):
- d = self.dovc(["identify"])
- d.addCallback(self.parseStatus)
- return d
- def parseStatus(self, output):
- m = re.search(r'^(\w+)', output)
- self.baserev = m.group(0)
- def getPatch(self, res):
- d = self.dovc(["diff"])
- d.addCallback(self.readPatch, self.patchlevel)
- return d
-
-class DarcsExtractor(SourceStampExtractor):
- patchlevel = 1
- vcexe = "darcs"
- def getBaseRevision(self):
- d = self.dovc(["changes", "--context"])
- d.addCallback(self.parseStatus)
- return d
- def parseStatus(self, res):
- self.baserev = res # the whole context file
- def getPatch(self, res):
- d = self.dovc(["diff", "-u"])
- d.addCallback(self.readPatch, self.patchlevel)
- return d
-
-class GitExtractor(SourceStampExtractor):
- patchlevel = 1
- vcexe = "git"
-
- def getBaseRevision(self):
- d = self.dovc(["branch", "--no-color", "-v", "--no-abbrev"])
- d.addCallback(self.parseStatus)
- return d
-
- def readConfig(self):
- d = self.dovc(["config", "-l"])
- d.addCallback(self.parseConfig)
- return d
-
- def parseConfig(self, res):
- git_config = {}
- for l in res.split("\n"):
- if l.strip():
- parts = l.strip().split("=", 2)
- git_config[parts[0]] = parts[1]
-
- # If we're tracking a remote, consider that the base.
- remote = git_config.get("branch." + self.branch + ".remote")
- ref = git_config.get("branch." + self.branch + ".merge")
- if remote and ref:
- remote_branch = ref.split("/", 3)[-1]
- d = self.dovc(["rev-parse", remote + "/" + remote_branch])
- d.addCallback(self.override_baserev)
- return d
-
- def override_baserev(self, res):
- self.baserev = res.strip()
-
- def parseStatus(self, res):
- # The current branch is marked by '*' at the start of the
- # line, followed by the branch name and the SHA1.
- #
- # Branch names may contain pretty much anything but whitespace.
- m = re.search(r'^\* (\S+)\s+([0-9a-f]{40})', res, re.MULTILINE)
- if m:
- self.baserev = m.group(2)
- # If a branch is specified, parse out the rev it points to
- # and extract the local name (assuming it has a slash).
- # This may break if someone specifies the name of a local
- # branch that has a slash in it and has no corresponding
- # remote branch (or something similarly contrived).
- if self.branch:
- d = self.dovc(["rev-parse", self.branch])
- if '/' in self.branch:
- self.branch = self.branch.split('/', 1)[1]
- d.addCallback(self.override_baserev)
- return d
- else:
- self.branch = m.group(1)
- return self.readConfig()
- raise IndexError("Could not find current GIT branch: %s" % res)
-
- def getPatch(self, res):
- d = self.dovc(["diff", self.baserev])
- d.addCallback(self.readPatch, self.patchlevel)
- return d
-
-def getSourceStamp(vctype, treetop, branch=None):
- if vctype == "cvs":
- e = CVSExtractor(treetop, branch)
- elif vctype == "svn":
- e = SVNExtractor(treetop, branch)
- elif vctype == "baz":
- e = BazExtractor(treetop, branch)
- elif vctype == "bzr":
- e = BzrExtractor(treetop, branch)
- elif vctype == "tla":
- e = TlaExtractor(treetop, branch)
- elif vctype == "hg":
- e = MercurialExtractor(treetop, branch)
- elif vctype == "darcs":
- e = DarcsExtractor(treetop, branch)
- elif vctype == "git":
- e = GitExtractor(treetop, branch)
- else:
- raise KeyError("unknown vctype '%s'" % vctype)
- return e.get()
-
-
-def ns(s):
- return "%d:%s," % (len(s), s)
-
-def createJobfile(bsid, branch, baserev, patchlevel, diff, builderNames):
- job = ""
- job += ns("1")
- job += ns(bsid)
- job += ns(branch)
- job += ns(str(baserev))
- job += ns("%d" % patchlevel)
- job += ns(diff)
- for bn in builderNames:
- job += ns(bn)
- return job
-
-def getTopdir(topfile, start=None):
- """walk upwards from the current directory until we find this topfile"""
- if not start:
- start = os.getcwd()
- here = start
- toomany = 20
- while toomany > 0:
- if os.path.exists(os.path.join(here, topfile)):
- return here
- next = os.path.dirname(here)
- if next == here:
- break # we've hit the root
- here = next
- toomany -= 1
- raise ValueError("Unable to find topfile '%s' anywhere from %s upwards"
- % (topfile, start))
-
-class RemoteTryPP(protocol.ProcessProtocol):
- def __init__(self, job):
- self.job = job
- self.d = defer.Deferred()
- def connectionMade(self):
- self.transport.write(self.job)
- self.transport.closeStdin()
- def outReceived(self, data):
- sys.stdout.write(data)
- def errReceived(self, data):
- sys.stderr.write(data)
- def processEnded(self, status_object):
- sig = status_object.value.signal
- rc = status_object.value.exitCode
- if sig != None or rc != 0:
- self.d.errback(RuntimeError("remote 'buildbot tryserver' failed"
- ": sig=%s, rc=%s" % (sig, rc)))
- return
- self.d.callback((sig, rc))
-
-class BuildSetStatusGrabber:
- retryCount = 5 # how many times to we try to grab the BuildSetStatus?
- retryDelay = 3 # seconds to wait between attempts
-
- def __init__(self, status, bsid):
- self.status = status
- self.bsid = bsid
-
- def grab(self):
- # return a Deferred that either fires with the BuildSetStatus
- # reference or errbacks because we were unable to grab it
- self.d = defer.Deferred()
- # wait a second before querying to give the master's maildir watcher
- # a chance to see the job
- reactor.callLater(1, self.go)
- return self.d
-
- def go(self, dummy=None):
- if self.retryCount == 0:
- raise RuntimeError("couldn't find matching buildset")
- self.retryCount -= 1
- d = self.status.callRemote("getBuildSets")
- d.addCallback(self._gotSets)
-
- def _gotSets(self, buildsets):
- for bs,bsid in buildsets:
- if bsid == self.bsid:
- # got it
- self.d.callback(bs)
- return
- d = defer.Deferred()
- d.addCallback(self.go)
- reactor.callLater(self.retryDelay, d.callback, None)
-
-
-class Try(pb.Referenceable):
- buildsetStatus = None
- quiet = False
-
- def __init__(self, config):
- self.config = config
- self.opts = runner.loadOptions()
- self.connect = self.getopt('connect', 'try_connect')
- assert self.connect, "you must specify a connect style: ssh or pb"
- self.builderNames = self.getopt('builders', 'try_builders')
-
- def getopt(self, config_name, options_name, default=None):
- value = self.config.get(config_name)
- if value is None or value == []:
- value = self.opts.get(options_name)
- if value is None or value == []:
- value = default
- return value
-
- def createJob(self):
- # returns a Deferred which fires when the job parameters have been
- # created
- opts = self.opts
- # generate a random (unique) string. It would make sense to add a
- # hostname and process ID here, but a) I suspect that would cause
- # windows portability problems, and b) really this is good enough
- self.bsid = "%d-%s" % (time.time(), random.randint(0, 1000000))
-
- # common options
- branch = self.getopt("branch", "try_branch")
-
- difffile = self.config.get("diff")
- if difffile:
- baserev = self.config.get("baserev")
- if difffile == "-":
- diff = sys.stdin.read()
- else:
- diff = open(difffile,"r").read()
- patch = (self.config['patchlevel'], diff)
- ss = SourceStamp(branch, baserev, patch)
- d = defer.succeed(ss)
- else:
- vc = self.getopt("vc", "try_vc")
- if vc in ("cvs", "svn"):
- # we need to find the tree-top
- topdir = self.getopt("try_topdir", "try_topdir")
- if topdir:
- treedir = os.path.expanduser(topdir)
- else:
- topfile = self.getopt("try-topfile", "try_topfile")
- treedir = getTopdir(topfile)
- else:
- treedir = os.getcwd()
- d = getSourceStamp(vc, treedir, branch)
- d.addCallback(self._createJob_1)
- return d
-
- def _createJob_1(self, ss):
- self.sourcestamp = ss
- if self.connect == "ssh":
- patchlevel, diff = ss.patch
- revspec = ss.revision
- if revspec is None:
- revspec = ""
- self.jobfile = createJobfile(self.bsid,
- ss.branch or "", revspec,
- patchlevel, diff,
- self.builderNames)
-
- def fakeDeliverJob(self):
- # Display the job to be delivered, but don't perform delivery.
- ss = self.sourcestamp
- print ("Job:\n\tBranch: %s\n\tRevision: %s\n\tBuilders: %s\n%s"
- % (ss.branch,
- ss.revision,
- self.builderNames,
- ss.patch[1]))
- d = defer.Deferred()
- d.callback(True)
- return d
-
- def deliverJob(self):
- # returns a Deferred that fires when the job has been delivered
- opts = self.opts
-
- if self.connect == "ssh":
- tryhost = self.getopt("tryhost", "try_host")
- tryuser = self.getopt("username", "try_username")
- trydir = self.getopt("trydir", "try_dir")
-
- argv = ["ssh", "-l", tryuser, tryhost,
- "buildbot", "tryserver", "--jobdir", trydir]
- # now run this command and feed the contents of 'job' into stdin
-
- pp = RemoteTryPP(self.jobfile)
- p = reactor.spawnProcess(pp, argv[0], argv, os.environ)
- d = pp.d
- return d
- if self.connect == "pb":
- user = self.getopt("username", "try_username")
- passwd = self.getopt("passwd", "try_password")
- master = self.getopt("master", "try_master")
- tryhost, tryport = master.split(":")
- tryport = int(tryport)
- f = pb.PBClientFactory()
- d = f.login(credentials.UsernamePassword(user, passwd))
- reactor.connectTCP(tryhost, tryport, f)
- d.addCallback(self._deliverJob_pb)
- return d
- raise RuntimeError("unknown connecttype '%s', should be 'ssh' or 'pb'"
- % self.connect)
-
- def _deliverJob_pb(self, remote):
- ss = self.sourcestamp
-
- d = remote.callRemote("try",
- ss.branch,
- ss.revision,
- ss.patch,
- self.builderNames,
- self.config.get('properties', {}))
- d.addCallback(self._deliverJob_pb2)
- return d
- def _deliverJob_pb2(self, status):
- self.buildsetStatus = status
- return status
-
- def getStatus(self):
- # returns a Deferred that fires when the builds have finished, and
- # may emit status messages while we wait
- wait = bool(self.getopt("wait", "try_wait", False))
- if not wait:
- # TODO: emit the URL where they can follow the builds. This
- # requires contacting the Status server over PB and doing
- # getURLForThing() on the BuildSetStatus. To get URLs for
- # individual builds would require we wait for the builds to
- # start.
- print "not waiting for builds to finish"
- return
- d = self.running = defer.Deferred()
- if self.buildsetStatus:
- self._getStatus_1()
- # contact the status port
- # we're probably using the ssh style
- master = self.getopt("master", "masterstatus")
- host, port = master.split(":")
- port = int(port)
- self.announce("contacting the status port at %s:%d" % (host, port))
- f = pb.PBClientFactory()
- creds = credentials.UsernamePassword("statusClient", "clientpw")
- d = f.login(creds)
- reactor.connectTCP(host, port, f)
- d.addCallback(self._getStatus_ssh_1)
- return self.running
-
- def _getStatus_ssh_1(self, remote):
- # find a remotereference to the corresponding BuildSetStatus object
- self.announce("waiting for job to be accepted")
- g = BuildSetStatusGrabber(remote, self.bsid)
- d = g.grab()
- d.addCallback(self._getStatus_1)
- return d
-
- def _getStatus_1(self, res=None):
- if res:
- self.buildsetStatus = res
- # gather the set of BuildRequests
- d = self.buildsetStatus.callRemote("getBuildRequests")
- d.addCallback(self._getStatus_2)
-
- def _getStatus_2(self, brs):
- self.builderNames = []
- self.buildRequests = {}
-
- # self.builds holds the current BuildStatus object for each one
- self.builds = {}
-
- # self.outstanding holds the list of builderNames which haven't
- # finished yet
- self.outstanding = []
-
- # self.results holds the list of build results. It holds a tuple of
- # (result, text)
- self.results = {}
-
- # self.currentStep holds the name of the Step that each build is
- # currently running
- self.currentStep = {}
-
- # self.ETA holds the expected finishing time (absolute time since
- # epoch)
- self.ETA = {}
-
- for n,br in brs:
- self.builderNames.append(n)
- self.buildRequests[n] = br
- self.builds[n] = None
- self.outstanding.append(n)
- self.results[n] = [None,None]
- self.currentStep[n] = None
- self.ETA[n] = None
- # get new Builds for this buildrequest. We follow each one until
- # it finishes or is interrupted.
- br.callRemote("subscribe", self)
-
- # now that those queries are in transit, we can start the
- # display-status-every-30-seconds loop
- self.printloop = task.LoopingCall(self.printStatus)
- self.printloop.start(3, now=False)
-
-
- # these methods are invoked by the status objects we've subscribed to
-
- def remote_newbuild(self, bs, builderName):
- if self.builds[builderName]:
- self.builds[builderName].callRemote("unsubscribe", self)
- self.builds[builderName] = bs
- bs.callRemote("subscribe", self, 20)
- d = bs.callRemote("waitUntilFinished")
- d.addCallback(self._build_finished, builderName)
-
- def remote_stepStarted(self, buildername, build, stepname, step):
- self.currentStep[buildername] = stepname
-
- def remote_stepFinished(self, buildername, build, stepname, step, results):
- pass
-
- def remote_buildETAUpdate(self, buildername, build, eta):
- self.ETA[buildername] = now() + eta
-
- def _build_finished(self, bs, builderName):
- # we need to collect status from the newly-finished build. We don't
- # remove the build from self.outstanding until we've collected
- # everything we want.
- self.builds[builderName] = None
- self.ETA[builderName] = None
- self.currentStep[builderName] = "finished"
- d = bs.callRemote("getResults")
- d.addCallback(self._build_finished_2, bs, builderName)
- return d
- def _build_finished_2(self, results, bs, builderName):
- self.results[builderName][0] = results
- d = bs.callRemote("getText")
- d.addCallback(self._build_finished_3, builderName)
- return d
- def _build_finished_3(self, text, builderName):
- self.results[builderName][1] = text
-
- self.outstanding.remove(builderName)
- if not self.outstanding:
- # all done
- return self.statusDone()
-
- def printStatus(self):
- names = self.buildRequests.keys()
- names.sort()
- for n in names:
- if n not in self.outstanding:
- # the build is finished, and we have results
- code,text = self.results[n]
- t = builder.Results[code]
- if text:
- t += " (%s)" % " ".join(text)
- elif self.builds[n]:
- t = self.currentStep[n] or "building"
- if self.ETA[n]:
- t += " [ETA %ds]" % (self.ETA[n] - now())
- else:
- t = "no build"
- self.announce("%s: %s" % (n, t))
- self.announce("")
-
- def statusDone(self):
- self.printloop.stop()
- print "All Builds Complete"
- # TODO: include a URL for all failing builds
- names = self.buildRequests.keys()
- names.sort()
- happy = True
- for n in names:
- code,text = self.results[n]
- t = "%s: %s" % (n, builder.Results[code])
- if text:
- t += " (%s)" % " ".join(text)
- print t
- if self.results[n] != builder.SUCCESS:
- happy = False
-
- if happy:
- self.exitcode = 0
- else:
- self.exitcode = 1
- self.running.callback(self.exitcode)
-
- def announce(self, message):
- if not self.quiet:
- print message
-
- def run(self):
- # we can't do spawnProcess until we're inside reactor.run(), so get
- # funky
- print "using '%s' connect method" % self.connect
- self.exitcode = 0
- d = defer.Deferred()
- d.addCallback(lambda res: self.createJob())
- d.addCallback(lambda res: self.announce("job created"))
- deliver = self.deliverJob
- if bool(self.config.get("dryrun")):
- deliver = self.fakeDeliverJob
- d.addCallback(lambda res: deliver())
- d.addCallback(lambda res: self.announce("job has been delivered"))
- d.addCallback(lambda res: self.getStatus())
- d.addErrback(log.err)
- d.addCallback(self.cleanup)
- d.addCallback(lambda res: reactor.stop())
-
- reactor.callLater(0, d.callback, None)
- reactor.run()
- sys.exit(self.exitcode)
-
- def logErr(self, why):
- log.err(why)
- print "error during 'try' processing"
- print why
-
- def cleanup(self, res=None):
- if self.buildsetStatus:
- self.buildsetStatus.broker.transport.loseConnection()
-
-
-
diff --git a/buildbot/buildbot/slave/__init__.py b/buildbot/buildbot/slave/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/buildbot/buildbot/slave/__init__.py
+++ /dev/null
diff --git a/buildbot/buildbot/slave/bot.py b/buildbot/buildbot/slave/bot.py
deleted file mode 100644
index 4184d3d..0000000
--- a/buildbot/buildbot/slave/bot.py
+++ /dev/null
@@ -1,510 +0,0 @@
-
-import os.path
-
-import buildbot
-
-from twisted.spread import pb
-from twisted.python import log
-from twisted.internet import reactor, defer
-from twisted.application import service, internet
-from twisted.cred import credentials
-
-from buildbot.util import now
-from buildbot.pbutil import ReconnectingPBClientFactory
-from buildbot.slave import registry
-# make sure the standard commands get registered. This import is performed
-# for its side-effects.
-from buildbot.slave import commands
-# and make pyflakes think we aren't being stupid
-commands = commands
-
-class NoCommandRunning(pb.Error):
- pass
-class WrongCommandRunning(pb.Error):
- pass
-class UnknownCommand(pb.Error):
- pass
-
-class Master:
- def __init__(self, host, port, username, password):
- self.host = host
- self.port = port
- self.username = username
- self.password = password
-
-class SlaveBuild:
-
- """This is an object that can hold state from one step to another in the
- same build. All SlaveCommands have access to it.
- """
- def __init__(self, builder):
- self.builder = builder
-
-class SlaveBuilder(pb.Referenceable, service.Service):
-
- """This is the local representation of a single Builder: it handles a
- single kind of build (like an all-warnings build). It has a name and a
- home directory. The rest of its behavior is determined by the master.
- """
-
- stopCommandOnShutdown = True
-
- # remote is a ref to the Builder object on the master side, and is set
- # when they attach. We use it to detect when the connection to the master
- # is severed.
- remote = None
-
- # .build points to a SlaveBuild object, a new one for each build
- build = None
-
- # .command points to a SlaveCommand instance, and is set while the step
- # is running. We use it to implement the stopBuild method.
- command = None
-
- # .remoteStep is a ref to the master-side BuildStep object, and is set
- # when the step is started
- remoteStep = None
-
- def __init__(self, name, not_really):
- #service.Service.__init__(self) # Service has no __init__ method
- self.setName(name)
- self.not_really = not_really
-
- def __repr__(self):
- return "<SlaveBuilder '%s' at %d>" % (self.name, id(self))
-
- def setServiceParent(self, parent):
- service.Service.setServiceParent(self, parent)
- self.bot = self.parent
- # note that self.parent will go away when the buildmaster's config
- # file changes and this Builder is removed (possibly because it has
- # been changed, so the Builder will be re-added again in a moment).
- # This may occur during a build, while a step is running.
-
- def setBuilddir(self, builddir):
- assert self.parent
- self.builddir = builddir
- self.basedir = os.path.join(self.bot.basedir, self.builddir)
- if not os.path.isdir(self.basedir):
- os.mkdir(self.basedir)
-
- def stopService(self):
- service.Service.stopService(self)
- if self.stopCommandOnShutdown:
- self.stopCommand()
-
- def activity(self):
- bot = self.parent
- if bot:
- buildslave = bot.parent
- if buildslave:
- bf = buildslave.bf
- bf.activity()
-
- def remote_setMaster(self, remote):
- self.remote = remote
- self.remote.notifyOnDisconnect(self.lostRemote)
- def remote_print(self, message):
- log.msg("SlaveBuilder.remote_print(%s): message from master: %s" %
- (self.name, message))
- if message == "ping":
- return self.remote_ping()
-
- def remote_ping(self):
- log.msg("SlaveBuilder.remote_ping(%s)" % self)
- if self.bot and self.bot.parent:
- debugOpts = self.bot.parent.debugOpts
- if debugOpts.get("stallPings"):
- log.msg(" debug_stallPings")
- timeout, timers = debugOpts["stallPings"]
- d = defer.Deferred()
- t = reactor.callLater(timeout, d.callback, None)
- timers.append(t)
- return d
- if debugOpts.get("failPingOnce"):
- log.msg(" debug_failPingOnce")
- class FailPingError(pb.Error): pass
- del debugOpts['failPingOnce']
- raise FailPingError("debug_failPingOnce means we should fail")
-
- def lostRemote(self, remote):
- log.msg("lost remote")
- self.remote = None
-
- def lostRemoteStep(self, remotestep):
- log.msg("lost remote step")
- self.remoteStep = None
- if self.stopCommandOnShutdown:
- self.stopCommand()
-
- # the following are Commands that can be invoked by the master-side
- # Builder
- def remote_startBuild(self):
- """This is invoked before the first step of any new build is run. It
- creates a new SlaveBuild object, which holds slave-side state from
- one step to the next."""
- self.build = SlaveBuild(self)
- log.msg("%s.startBuild" % self)
-
- def remote_startCommand(self, stepref, stepId, command, args):
- """
- This gets invoked by L{buildbot.process.step.RemoteCommand.start}, as
- part of various master-side BuildSteps, to start various commands
- that actually do the build. I return nothing. Eventually I will call
- .commandComplete() to notify the master-side RemoteCommand that I'm
- done.
- """
-
- self.activity()
-
- if self.command:
- log.msg("leftover command, dropping it")
- self.stopCommand()
-
- try:
- factory, version = registry.commandRegistry[command]
- except KeyError:
- raise UnknownCommand, "unrecognized SlaveCommand '%s'" % command
- self.command = factory(self, stepId, args)
-
- log.msg(" startCommand:%s [id %s]" % (command,stepId))
- self.remoteStep = stepref
- self.remoteStep.notifyOnDisconnect(self.lostRemoteStep)
- d = self.command.doStart()
- d.addCallback(lambda res: None)
- d.addBoth(self.commandComplete)
- return None
-
- def remote_interruptCommand(self, stepId, why):
- """Halt the current step."""
- log.msg("asked to interrupt current command: %s" % why)
- self.activity()
- if not self.command:
- # TODO: just log it, a race could result in their interrupting a
- # command that wasn't actually running
- log.msg(" .. but none was running")
- return
- self.command.doInterrupt()
-
-
- def stopCommand(self):
- """Make any currently-running command die, with no further status
- output. This is used when the buildslave is shutting down or the
- connection to the master has been lost. Interrupt the command,
- silence it, and then forget about it."""
- if not self.command:
- return
- log.msg("stopCommand: halting current command %s" % self.command)
- self.command.doInterrupt() # shut up! and die!
- self.command = None # forget you!
-
- # sendUpdate is invoked by the Commands we spawn
- def sendUpdate(self, data):
- """This sends the status update to the master-side
- L{buildbot.process.step.RemoteCommand} object, giving it a sequence
- number in the process. It adds the update to a queue, and asks the
- master to acknowledge the update so it can be removed from that
- queue."""
-
- if not self.running:
- # .running comes from service.Service, and says whether the
- # service is running or not. If we aren't running, don't send any
- # status messages.
- return
- # the update[1]=0 comes from the leftover 'updateNum', which the
- # master still expects to receive. Provide it to avoid significant
- # interoperability issues between new slaves and old masters.
- if self.remoteStep:
- update = [data, 0]
- updates = [update]
- d = self.remoteStep.callRemote("update", updates)
- d.addCallback(self.ackUpdate)
- d.addErrback(self._ackFailed, "SlaveBuilder.sendUpdate")
-
- def ackUpdate(self, acknum):
- self.activity() # update the "last activity" timer
-
- def ackComplete(self, dummy):
- self.activity() # update the "last activity" timer
-
- def _ackFailed(self, why, where):
- log.msg("SlaveBuilder._ackFailed:", where)
- #log.err(why) # we don't really care
-
-
- # this is fired by the Deferred attached to each Command
- def commandComplete(self, failure):
- if failure:
- log.msg("SlaveBuilder.commandFailed", self.command)
- log.err(failure)
- # failure, if present, is a failure.Failure. To send it across
- # the wire, we must turn it into a pb.CopyableFailure.
- failure = pb.CopyableFailure(failure)
- failure.unsafeTracebacks = True
- else:
- # failure is None
- log.msg("SlaveBuilder.commandComplete", self.command)
- self.command = None
- if not self.running:
- log.msg(" but we weren't running, quitting silently")
- return
- if self.remoteStep:
- self.remoteStep.dontNotifyOnDisconnect(self.lostRemoteStep)
- d = self.remoteStep.callRemote("complete", failure)
- d.addCallback(self.ackComplete)
- d.addErrback(self._ackFailed, "sendComplete")
- self.remoteStep = None
-
-
- def remote_shutdown(self):
- print "slave shutting down on command from master"
- reactor.stop()
-
-
-class Bot(pb.Referenceable, service.MultiService):
- """I represent the slave-side bot."""
- usePTY = None
- name = "bot"
-
- def __init__(self, basedir, usePTY, not_really=0):
- service.MultiService.__init__(self)
- self.basedir = basedir
- self.usePTY = usePTY
- self.not_really = not_really
- self.builders = {}
-
- def startService(self):
- assert os.path.isdir(self.basedir)
- service.MultiService.startService(self)
-
- def remote_getDirs(self):
- return filter(lambda d: os.path.isdir(d), os.listdir(self.basedir))
-
- def remote_getCommands(self):
- commands = {}
- for name, (factory, version) in registry.commandRegistry.items():
- commands[name] = version
- return commands
-
- def remote_setBuilderList(self, wanted):
- retval = {}
- wanted_dirs = ["info"]
- for (name, builddir) in wanted:
- wanted_dirs.append(builddir)
- b = self.builders.get(name, None)
- if b:
- if b.builddir != builddir:
- log.msg("changing builddir for builder %s from %s to %s" \
- % (name, b.builddir, builddir))
- b.setBuilddir(builddir)
- else:
- b = SlaveBuilder(name, self.not_really)
- b.usePTY = self.usePTY
- b.setServiceParent(self)
- b.setBuilddir(builddir)
- self.builders[name] = b
- retval[name] = b
- for name in self.builders.keys():
- if not name in map(lambda a: a[0], wanted):
- log.msg("removing old builder %s" % name)
- self.builders[name].disownServiceParent()
- del(self.builders[name])
-
- for d in os.listdir(self.basedir):
- if os.path.isdir(d):
- if d not in wanted_dirs:
- log.msg("I have a leftover directory '%s' that is not "
- "being used by the buildmaster: you can delete "
- "it now" % d)
- return retval
-
- def remote_print(self, message):
- log.msg("message from master:", message)
-
- def remote_getSlaveInfo(self):
- """This command retrieves data from the files in SLAVEDIR/info/* and
- sends the contents to the buildmaster. These are used to describe
- the slave and its configuration, and should be created and
- maintained by the slave administrator. They will be retrieved each
- time the master-slave connection is established.
- """
-
- files = {}
- basedir = os.path.join(self.basedir, "info")
- if not os.path.isdir(basedir):
- return files
- for f in os.listdir(basedir):
- filename = os.path.join(basedir, f)
- if os.path.isfile(filename):
- files[f] = open(filename, "r").read()
- return files
-
-class BotFactory(ReconnectingPBClientFactory):
- # 'keepaliveInterval' serves two purposes. The first is to keep the
- # connection alive: it guarantees that there will be at least some
- # traffic once every 'keepaliveInterval' seconds, which may help keep an
- # interposed NAT gateway from dropping the address mapping because it
- # thinks the connection has been abandoned. The second is to put an upper
- # limit on how long the buildmaster might have gone away before we notice
- # it. For this second purpose, we insist upon seeing *some* evidence of
- # the buildmaster at least once every 'keepaliveInterval' seconds.
- keepaliveInterval = None # None = do not use keepalives
-
- # 'keepaliveTimeout' seconds before the interval expires, we will send a
- # keepalive request, both to add some traffic to the connection, and to
- # prompt a response from the master in case all our builders are idle. We
- # don't insist upon receiving a timely response from this message: a slow
- # link might put the request at the wrong end of a large build message.
- keepaliveTimeout = 30 # how long we will go without a response
-
- # 'maxDelay' determines the maximum amount of time the slave will wait
- # between connection retries
- maxDelay = 300
-
- keepaliveTimer = None
- activityTimer = None
- lastActivity = 0
- unsafeTracebacks = 1
- perspective = None
-
- def __init__(self, keepaliveInterval, keepaliveTimeout, maxDelay):
- ReconnectingPBClientFactory.__init__(self)
- self.maxDelay = maxDelay
- self.keepaliveInterval = keepaliveInterval
- self.keepaliveTimeout = keepaliveTimeout
-
- def startedConnecting(self, connector):
- ReconnectingPBClientFactory.startedConnecting(self, connector)
- self.connector = connector
-
- def gotPerspective(self, perspective):
- ReconnectingPBClientFactory.gotPerspective(self, perspective)
- self.perspective = perspective
- try:
- perspective.broker.transport.setTcpKeepAlive(1)
- except:
- log.msg("unable to set SO_KEEPALIVE")
- if not self.keepaliveInterval:
- self.keepaliveInterval = 10*60
- self.activity()
- if self.keepaliveInterval:
- log.msg("sending application-level keepalives every %d seconds" \
- % self.keepaliveInterval)
- self.startTimers()
-
- def clientConnectionFailed(self, connector, reason):
- self.connector = None
- ReconnectingPBClientFactory.clientConnectionFailed(self,
- connector, reason)
-
- def clientConnectionLost(self, connector, reason):
- self.connector = None
- self.stopTimers()
- self.perspective = None
- ReconnectingPBClientFactory.clientConnectionLost(self,
- connector, reason)
-
- def startTimers(self):
- assert self.keepaliveInterval
- assert not self.keepaliveTimer
- assert not self.activityTimer
- # Insist that doKeepalive fires before checkActivity. Really, it
- # needs to happen at least one RTT beforehand.
- assert self.keepaliveInterval > self.keepaliveTimeout
-
- # arrange to send a keepalive a little while before our deadline
- when = self.keepaliveInterval - self.keepaliveTimeout
- self.keepaliveTimer = reactor.callLater(when, self.doKeepalive)
- # and check for activity too
- self.activityTimer = reactor.callLater(self.keepaliveInterval,
- self.checkActivity)
-
- def stopTimers(self):
- if self.keepaliveTimer:
- self.keepaliveTimer.cancel()
- self.keepaliveTimer = None
- if self.activityTimer:
- self.activityTimer.cancel()
- self.activityTimer = None
-
- def activity(self, res=None):
- self.lastActivity = now()
-
- def doKeepalive(self):
- # send the keepalive request. If it fails outright, the connection
- # was already dropped, so just log and ignore.
- self.keepaliveTimer = None
- log.msg("sending app-level keepalive")
- d = self.perspective.callRemote("keepalive")
- d.addCallback(self.activity)
- d.addErrback(self.keepaliveLost)
-
- def keepaliveLost(self, f):
- log.msg("BotFactory.keepaliveLost")
-
- def checkActivity(self):
- self.activityTimer = None
- if self.lastActivity + self.keepaliveInterval < now():
- log.msg("BotFactory.checkActivity: nothing from master for "
- "%d secs" % (now() - self.lastActivity))
- self.perspective.broker.transport.loseConnection()
- return
- self.startTimers()
-
- def stopFactory(self):
- ReconnectingPBClientFactory.stopFactory(self)
- self.stopTimers()
-
-
-class BuildSlave(service.MultiService):
- botClass = Bot
-
- # debugOpts is a dictionary used during unit tests.
-
- # debugOpts['stallPings'] can be set to a tuple of (timeout, []). Any
- # calls to remote_print will stall for 'timeout' seconds before
- # returning. The DelayedCalls used to implement this are stashed in the
- # list so they can be cancelled later.
-
- # debugOpts['failPingOnce'] can be set to True to make the slaveping fail
- # exactly once.
-
- def __init__(self, buildmaster_host, port, name, passwd, basedir,
- keepalive, usePTY, keepaliveTimeout=30, umask=None,
- maxdelay=300, debugOpts={}):
- log.msg("Creating BuildSlave -- buildbot.version: %s" % buildbot.version)
- service.MultiService.__init__(self)
- self.debugOpts = debugOpts.copy()
- bot = self.botClass(basedir, usePTY)
- bot.setServiceParent(self)
- self.bot = bot
- if keepalive == 0:
- keepalive = None
- self.umask = umask
- bf = self.bf = BotFactory(keepalive, keepaliveTimeout, maxdelay)
- bf.startLogin(credentials.UsernamePassword(name, passwd), client=bot)
- self.connection = c = internet.TCPClient(buildmaster_host, port, bf)
- c.setServiceParent(self)
-
- def waitUntilDisconnected(self):
- # utility method for testing. Returns a Deferred that will fire when
- # we lose the connection to the master.
- if not self.bf.perspective:
- return defer.succeed(None)
- d = defer.Deferred()
- self.bf.perspective.notifyOnDisconnect(lambda res: d.callback(None))
- return d
-
- def startService(self):
- if self.umask is not None:
- os.umask(self.umask)
- service.MultiService.startService(self)
-
- def stopService(self):
- self.bf.continueTrying = 0
- self.bf.stopTrying()
- service.MultiService.stopService(self)
- # now kill the TCP connection
- # twisted >2.0.1 does this for us, and leaves _connection=None
- if self.connection._connection:
- self.connection._connection.disconnect()
diff --git a/buildbot/buildbot/slave/commands.py b/buildbot/buildbot/slave/commands.py
deleted file mode 100644
index 45b9e99..0000000
--- a/buildbot/buildbot/slave/commands.py
+++ /dev/null
@@ -1,2788 +0,0 @@
-# -*- test-case-name: buildbot.test.test_slavecommand -*-
-
-import os, re, signal, shutil, types, time
-from stat import ST_CTIME, ST_MTIME, ST_SIZE
-
-from zope.interface import implements
-from twisted.internet.protocol import ProcessProtocol
-from twisted.internet import reactor, defer, task
-from twisted.python import log, failure, runtime
-from twisted.python.procutils import which
-
-from buildbot.slave.interfaces import ISlaveCommand
-from buildbot.slave.registry import registerSlaveCommand
-
-# this used to be a CVS $-style "Revision" auto-updated keyword, but since I
-# moved to Darcs as the primary repository, this is updated manually each
-# time this file is changed. The last cvs_ver that was here was 1.51 .
-command_version = "2.8"
-
-# version history:
-# >=1.17: commands are interruptable
-# >=1.28: Arch understands 'revision', added Bazaar
-# >=1.33: Source classes understand 'retry'
-# >=1.39: Source classes correctly handle changes in branch (except Git)
-# Darcs accepts 'revision' (now all do but Git) (well, and P4Sync)
-# Arch/Baz should accept 'build-config'
-# >=1.51: (release 0.7.3)
-# >= 2.1: SlaveShellCommand now accepts 'initial_stdin', 'keep_stdin_open',
-# and 'logfiles'. It now sends 'log' messages in addition to
-# stdout/stdin/header/rc. It acquired writeStdin/closeStdin methods,
-# but these are not remotely callable yet.
-# (not externally visible: ShellCommandPP has writeStdin/closeStdin.
-# ShellCommand accepts new arguments (logfiles=, initialStdin=,
-# keepStdinOpen=) and no longer accepts stdin=)
-# (release 0.7.4)
-# >= 2.2: added monotone, uploadFile, and downloadFile (release 0.7.5)
-# >= 2.3: added bzr (release 0.7.6)
-# >= 2.4: Git understands 'revision' and branches
-# >= 2.5: workaround added for remote 'hg clone --rev REV' when hg<0.9.2
-# >= 2.6: added uploadDirectory
-# >= 2.7: added usePTY option to SlaveShellCommand
-# >= 2.8: added username and password args to SVN class
-
-class CommandInterrupted(Exception):
- pass
-class TimeoutError(Exception):
- pass
-
-class Obfuscated:
- """An obfuscated string in a command"""
- def __init__(self, real, fake):
- self.real = real
- self.fake = fake
-
- def __str__(self):
- return self.fake
-
- def __repr__(self):
- return `self.fake`
-
- def get_real(command):
- rv = command
- if type(command) == types.ListType:
- rv = []
- for elt in command:
- if isinstance(elt, Obfuscated):
- rv.append(elt.real)
- else:
- rv.append(elt)
- return rv
- get_real = staticmethod(get_real)
-
- def get_fake(command):
- rv = command
- if type(command) == types.ListType:
- rv = []
- for elt in command:
- if isinstance(elt, Obfuscated):
- rv.append(elt.fake)
- else:
- rv.append(elt)
- return rv
- get_fake = staticmethod(get_fake)
-
-class AbandonChain(Exception):
- """A series of chained steps can raise this exception to indicate that
- one of the intermediate ShellCommands has failed, such that there is no
- point in running the remainder. 'rc' should be the non-zero exit code of
- the failing ShellCommand."""
-
- def __repr__(self):
- return "<AbandonChain rc=%s>" % self.args[0]
-
-def getCommand(name):
- possibles = which(name)
- if not possibles:
- raise RuntimeError("Couldn't find executable for '%s'" % name)
- return possibles[0]
-
-def rmdirRecursive(dir):
- """This is a replacement for shutil.rmtree that works better under
- windows. Thanks to Bear at the OSAF for the code."""
- if not os.path.exists(dir):
- return
-
- if os.path.islink(dir):
- os.remove(dir)
- return
-
- # Verify the directory is read/write/execute for the current user
- os.chmod(dir, 0700)
-
- for name in os.listdir(dir):
- full_name = os.path.join(dir, name)
- # on Windows, if we don't have write permission we can't remove
- # the file/directory either, so turn that on
- if os.name == 'nt':
- if not os.access(full_name, os.W_OK):
- # I think this is now redundant, but I don't have an NT
- # machine to test on, so I'm going to leave it in place
- # -warner
- os.chmod(full_name, 0600)
-
- if os.path.isdir(full_name):
- rmdirRecursive(full_name)
- else:
- os.chmod(full_name, 0700)
- os.remove(full_name)
- os.rmdir(dir)
-
-class ShellCommandPP(ProcessProtocol):
- debug = False
-
- def __init__(self, command):
- self.command = command
- self.pending_stdin = ""
- self.stdin_finished = False
-
- def writeStdin(self, data):
- assert not self.stdin_finished
- if self.connected:
- self.transport.write(data)
- else:
- self.pending_stdin += data
-
- def closeStdin(self):
- if self.connected:
- if self.debug: log.msg(" closing stdin")
- self.transport.closeStdin()
- self.stdin_finished = True
-
- def connectionMade(self):
- if self.debug:
- log.msg("ShellCommandPP.connectionMade")
- if not self.command.process:
- if self.debug:
- log.msg(" assigning self.command.process: %s" %
- (self.transport,))
- self.command.process = self.transport
-
- # TODO: maybe we shouldn't close stdin when using a PTY. I can't test
- # this yet, recent debian glibc has a bug which causes thread-using
- # test cases to SIGHUP trial, and the workaround is to either run
- # the whole test with /bin/sh -c " ".join(argv) (way gross) or to
- # not use a PTY. Once the bug is fixed, I'll be able to test what
- # happens when you close stdin on a pty. My concern is that it will
- # SIGHUP the child (since we are, in a sense, hanging up on them).
- # But it may well be that keeping stdout open prevents the SIGHUP
- # from being sent.
- #if not self.command.usePTY:
-
- if self.pending_stdin:
- if self.debug: log.msg(" writing to stdin")
- self.transport.write(self.pending_stdin)
- if self.stdin_finished:
- if self.debug: log.msg(" closing stdin")
- self.transport.closeStdin()
-
- def outReceived(self, data):
- if self.debug:
- log.msg("ShellCommandPP.outReceived")
- self.command.addStdout(data)
-
- def errReceived(self, data):
- if self.debug:
- log.msg("ShellCommandPP.errReceived")
- self.command.addStderr(data)
-
- def processEnded(self, status_object):
- if self.debug:
- log.msg("ShellCommandPP.processEnded", status_object)
- # status_object is a Failure wrapped around an
- # error.ProcessTerminated or and error.ProcessDone.
- # requires twisted >= 1.0.4 to overcome a bug in process.py
- sig = status_object.value.signal
- rc = status_object.value.exitCode
- self.command.finished(sig, rc)
-
-class LogFileWatcher:
- POLL_INTERVAL = 2
-
- def __init__(self, command, name, logfile):
- self.command = command
- self.name = name
- self.logfile = logfile
- log.msg("LogFileWatcher created to watch %s" % logfile)
- # we are created before the ShellCommand starts. If the logfile we're
- # supposed to be watching already exists, record its size and
- # ctime/mtime so we can tell when it starts to change.
- self.old_logfile_stats = self.statFile()
- self.started = False
-
- # every 2 seconds we check on the file again
- self.poller = task.LoopingCall(self.poll)
-
- def start(self):
- self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
-
- def _cleanupPoll(self, err):
- log.err(err, msg="Polling error")
- self.poller = None
-
- def stop(self):
- self.poll()
- if self.poller is not None:
- self.poller.stop()
- if self.started:
- self.f.close()
-
- def statFile(self):
- if os.path.exists(self.logfile):
- s = os.stat(self.logfile)
- return (s[ST_CTIME], s[ST_MTIME], s[ST_SIZE])
- return None
-
- def poll(self):
- if not self.started:
- s = self.statFile()
- if s == self.old_logfile_stats:
- return # not started yet
- if not s:
- # the file was there, but now it's deleted. Forget about the
- # initial state, clearly the process has deleted the logfile
- # in preparation for creating a new one.
- self.old_logfile_stats = None
- return # no file to work with
- self.f = open(self.logfile, "rb")
- self.started = True
- self.f.seek(self.f.tell(), 0)
- while True:
- data = self.f.read(10000)
- if not data:
- return
- self.command.addLogfile(self.name, data)
-
-
-class ShellCommand:
- # This is a helper class, used by SlaveCommands to run programs in a
- # child shell.
-
- notreally = False
- BACKUP_TIMEOUT = 5
- KILL = "KILL"
- CHUNK_LIMIT = 128*1024
-
- # For sending elapsed time:
- startTime = None
- elapsedTime = None
- # I wish we had easy access to CLOCK_MONOTONIC in Python:
- # http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html
- # Then changes to the system clock during a run wouldn't effect the "elapsed
- # time" results.
-
- def __init__(self, builder, command,
- workdir, environ=None,
- sendStdout=True, sendStderr=True, sendRC=True,
- timeout=None, initialStdin=None, keepStdinOpen=False,
- keepStdout=False, keepStderr=False, logEnviron=True,
- logfiles={}, usePTY="slave-config"):
- """
-
- @param keepStdout: if True, we keep a copy of all the stdout text
- that we've seen. This copy is available in
- self.stdout, which can be read after the command
- has finished.
- @param keepStderr: same, for stderr
-
- @param usePTY: "slave-config" -> use the SlaveBuilder's usePTY;
- otherwise, true to use a PTY, false to not use a PTY.
- """
-
- self.builder = builder
- self.command = Obfuscated.get_real(command)
- self.fake_command = Obfuscated.get_fake(command)
- self.sendStdout = sendStdout
- self.sendStderr = sendStderr
- self.sendRC = sendRC
- self.logfiles = logfiles
- self.workdir = workdir
- self.environ = os.environ.copy()
- if environ:
- if environ.has_key('PYTHONPATH'):
- ppath = environ['PYTHONPATH']
- # Need to do os.pathsep translation. We could either do that
- # by replacing all incoming ':'s with os.pathsep, or by
- # accepting lists. I like lists better.
- if not isinstance(ppath, str):
- # If it's not a string, treat it as a sequence to be
- # turned in to a string.
- ppath = os.pathsep.join(ppath)
-
- if self.environ.has_key('PYTHONPATH'):
- # special case, prepend the builder's items to the
- # existing ones. This will break if you send over empty
- # strings, so don't do that.
- ppath = ppath + os.pathsep + self.environ['PYTHONPATH']
-
- environ['PYTHONPATH'] = ppath
-
- self.environ.update(environ)
- self.initialStdin = initialStdin
- self.keepStdinOpen = keepStdinOpen
- self.logEnviron = logEnviron
- self.timeout = timeout
- self.timer = None
- self.keepStdout = keepStdout
- self.keepStderr = keepStderr
-
-
- if usePTY == "slave-config":
- self.usePTY = self.builder.usePTY
- else:
- self.usePTY = usePTY
-
- # usePTY=True is a convenience for cleaning up all children and
- # grandchildren of a hung command. Fall back to usePTY=False on systems
- # and in situations where ptys cause problems. PTYs are posix-only,
- # and for .closeStdin to matter, we must use a pipe, not a PTY
- if runtime.platformType != "posix" or initialStdin is not None:
- if self.usePTY and usePTY != "slave-config":
- self.sendStatus({'header': "WARNING: disabling usePTY for this command"})
- self.usePTY = False
-
- self.logFileWatchers = []
- for name,filename in self.logfiles.items():
- w = LogFileWatcher(self, name,
- os.path.join(self.workdir, filename))
- self.logFileWatchers.append(w)
-
- def __repr__(self):
- return "<slavecommand.ShellCommand '%s'>" % self.fake_command
-
- def sendStatus(self, status):
- self.builder.sendUpdate(status)
-
- def start(self):
- # return a Deferred which fires (with the exit code) when the command
- # completes
- if self.keepStdout:
- self.stdout = ""
- if self.keepStderr:
- self.stderr = ""
- self.deferred = defer.Deferred()
- try:
- self._startCommand()
- except:
- log.msg("error in ShellCommand._startCommand")
- log.err()
- # pretend it was a shell error
- self.deferred.errback(AbandonChain(-1))
- return self.deferred
-
- def _startCommand(self):
- # ensure workdir exists
- if not os.path.isdir(self.workdir):
- os.makedirs(self.workdir)
- log.msg("ShellCommand._startCommand")
- if self.notreally:
- self.sendStatus({'header': "command '%s' in dir %s" % \
- (self.fake_command, self.workdir)})
- self.sendStatus({'header': "(not really)\n"})
- self.finished(None, 0)
- return
-
- self.pp = ShellCommandPP(self)
-
- if type(self.command) in types.StringTypes:
- if runtime.platformType == 'win32':
- argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
- if '/c' not in argv: argv += ['/c']
- argv += [self.command]
- else:
- # for posix, use /bin/sh. for other non-posix, well, doesn't
- # hurt to try
- argv = ['/bin/sh', '-c', self.command]
- display = self.fake_command
- else:
- if runtime.platformType == 'win32':
- argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
- if '/c' not in argv: argv += ['/c']
- argv += list(self.command)
- else:
- argv = self.command
- display = " ".join(self.fake_command)
-
- # $PWD usually indicates the current directory; spawnProcess may not
- # update this value, though, so we set it explicitly here.
- self.environ['PWD'] = os.path.abspath(self.workdir)
-
- # self.stdin is handled in ShellCommandPP.connectionMade
-
- # first header line is the command in plain text, argv joined with
- # spaces. You should be able to cut-and-paste this into a shell to
- # obtain the same results. If there are spaces in the arguments, too
- # bad.
- log.msg(" " + display)
- self.sendStatus({'header': display+"\n"})
-
- # then comes the secondary information
- msg = " in dir %s" % (self.workdir,)
- if self.timeout:
- msg += " (timeout %d secs)" % (self.timeout,)
- log.msg(" " + msg)
- self.sendStatus({'header': msg+"\n"})
-
- msg = " watching logfiles %s" % (self.logfiles,)
- log.msg(" " + msg)
- self.sendStatus({'header': msg+"\n"})
-
- # then the obfuscated command array for resolving unambiguity
- msg = " argv: %s" % (self.fake_command,)
- log.msg(" " + msg)
- self.sendStatus({'header': msg+"\n"})
-
- # then the environment, since it sometimes causes problems
- if self.logEnviron:
- msg = " environment:\n"
- env_names = self.environ.keys()
- env_names.sort()
- for name in env_names:
- msg += " %s=%s\n" % (name, self.environ[name])
- log.msg(" environment: %s" % (self.environ,))
- self.sendStatus({'header': msg})
-
- if self.initialStdin:
- msg = " writing %d bytes to stdin" % len(self.initialStdin)
- log.msg(" " + msg)
- self.sendStatus({'header': msg+"\n"})
-
- if self.keepStdinOpen:
- msg = " leaving stdin open"
- else:
- msg = " closing stdin"
- log.msg(" " + msg)
- self.sendStatus({'header': msg+"\n"})
-
- msg = " using PTY: %s" % bool(self.usePTY)
- log.msg(" " + msg)
- self.sendStatus({'header': msg+"\n"})
-
- # this will be buffered until connectionMade is called
- if self.initialStdin:
- self.pp.writeStdin(self.initialStdin)
- if not self.keepStdinOpen:
- self.pp.closeStdin()
-
- # win32eventreactor's spawnProcess (under twisted <= 2.0.1) returns
- # None, as opposed to all the posixbase-derived reactors (which
- # return the new Process object). This is a nuisance. We can make up
- # for it by having the ProcessProtocol give us their .transport
- # attribute after they get one. I'd prefer to get it from
- # spawnProcess because I'm concerned about returning from this method
- # without having a valid self.process to work with. (if kill() were
- # called right after we return, but somehow before connectionMade
- # were called, then kill() would blow up).
- self.process = None
- self.startTime = time.time()
- p = reactor.spawnProcess(self.pp, argv[0], argv,
- self.environ,
- self.workdir,
- usePTY=self.usePTY)
- # connectionMade might have been called during spawnProcess
- if not self.process:
- self.process = p
-
- # connectionMade also closes stdin as long as we're not using a PTY.
- # This is intended to kill off inappropriately interactive commands
- # better than the (long) hung-command timeout. ProcessPTY should be
- # enhanced to allow the same childFDs argument that Process takes,
- # which would let us connect stdin to /dev/null .
-
- if self.timeout:
- self.timer = reactor.callLater(self.timeout, self.doTimeout)
-
- for w in self.logFileWatchers:
- w.start()
-
-
- def _chunkForSend(self, data):
- # limit the chunks that we send over PB to 128k, since it has a
- # hardwired string-size limit of 640k.
- LIMIT = self.CHUNK_LIMIT
- for i in range(0, len(data), LIMIT):
- yield data[i:i+LIMIT]
-
- def addStdout(self, data):
- if self.sendStdout:
- for chunk in self._chunkForSend(data):
- self.sendStatus({'stdout': chunk})
- if self.keepStdout:
- self.stdout += data
- if self.timer:
- self.timer.reset(self.timeout)
-
- def addStderr(self, data):
- if self.sendStderr:
- for chunk in self._chunkForSend(data):
- self.sendStatus({'stderr': chunk})
- if self.keepStderr:
- self.stderr += data
- if self.timer:
- self.timer.reset(self.timeout)
-
- def addLogfile(self, name, data):
- for chunk in self._chunkForSend(data):
- self.sendStatus({'log': (name, chunk)})
- if self.timer:
- self.timer.reset(self.timeout)
-
- def finished(self, sig, rc):
- self.elapsedTime = time.time() - self.startTime
- log.msg("command finished with signal %s, exit code %s, elapsedTime: %0.6f" % (sig,rc,self.elapsedTime))
- for w in self.logFileWatchers:
- # this will send the final updates
- w.stop()
- if sig is not None:
- rc = -1
- if self.sendRC:
- if sig is not None:
- self.sendStatus(
- {'header': "process killed by signal %d\n" % sig})
- self.sendStatus({'rc': rc})
- self.sendStatus({'header': "elapsedTime=%0.6f\n" % self.elapsedTime})
- if self.timer:
- self.timer.cancel()
- self.timer = None
- d = self.deferred
- self.deferred = None
- if d:
- d.callback(rc)
- else:
- log.msg("Hey, command %s finished twice" % self)
-
- def failed(self, why):
- log.msg("ShellCommand.failed: command failed: %s" % (why,))
- if self.timer:
- self.timer.cancel()
- self.timer = None
- d = self.deferred
- self.deferred = None
- if d:
- d.errback(why)
- else:
- log.msg("Hey, command %s finished twice" % self)
-
- def doTimeout(self):
- self.timer = None
- msg = "command timed out: %d seconds without output" % self.timeout
- self.kill(msg)
-
- def kill(self, msg):
- # This may be called by the timeout, or when the user has decided to
- # abort this build.
- if self.timer:
- self.timer.cancel()
- self.timer = None
- if hasattr(self.process, "pid"):
- msg += ", killing pid %d" % self.process.pid
- log.msg(msg)
- self.sendStatus({'header': "\n" + msg + "\n"})
-
- hit = 0
- if runtime.platformType == "posix":
- try:
- # really want to kill off all child processes too. Process
- # Groups are ideal for this, but that requires
- # spawnProcess(usePTY=1). Try both ways in case process was
- # not started that way.
-
- # the test suite sets self.KILL=None to tell us we should
- # only pretend to kill the child. This lets us test the
- # backup timer.
-
- sig = None
- if self.KILL is not None:
- sig = getattr(signal, "SIG"+ self.KILL, None)
-
- if self.KILL == None:
- log.msg("self.KILL==None, only pretending to kill child")
- elif sig is None:
- log.msg("signal module is missing SIG%s" % self.KILL)
- elif not hasattr(os, "kill"):
- log.msg("os module is missing the 'kill' function")
- else:
- log.msg("trying os.kill(-pid, %d)" % (sig,))
- # TODO: maybe use os.killpg instead of a negative pid?
- os.kill(-self.process.pid, sig)
- log.msg(" signal %s sent successfully" % sig)
- hit = 1
- except OSError:
- # probably no-such-process, maybe because there is no process
- # group
- pass
- if not hit:
- try:
- if self.KILL is None:
- log.msg("self.KILL==None, only pretending to kill child")
- else:
- log.msg("trying process.signalProcess('KILL')")
- self.process.signalProcess(self.KILL)
- log.msg(" signal %s sent successfully" % (self.KILL,))
- hit = 1
- except OSError:
- # could be no-such-process, because they finished very recently
- pass
- if not hit:
- log.msg("signalProcess/os.kill failed both times")
-
- if runtime.platformType == "posix":
- # we only do this under posix because the win32eventreactor
- # blocks here until the process has terminated, while closing
- # stderr. This is weird.
- self.pp.transport.loseConnection()
-
- # finished ought to be called momentarily. Just in case it doesn't,
- # set a timer which will abandon the command.
- self.timer = reactor.callLater(self.BACKUP_TIMEOUT,
- self.doBackupTimeout)
-
- def doBackupTimeout(self):
- log.msg("we tried to kill the process, and it wouldn't die.."
- " finish anyway")
- self.timer = None
- self.sendStatus({'header': "SIGKILL failed to kill process\n"})
- if self.sendRC:
- self.sendStatus({'header': "using fake rc=-1\n"})
- self.sendStatus({'rc': -1})
- self.failed(TimeoutError("SIGKILL failed to kill process"))
-
-
- def writeStdin(self, data):
- self.pp.writeStdin(data)
-
- def closeStdin(self):
- self.pp.closeStdin()
-
-
-class Command:
- implements(ISlaveCommand)
-
- """This class defines one command that can be invoked by the build master.
- The command is executed on the slave side, and always sends back a
- completion message when it finishes. It may also send intermediate status
- as it runs (by calling builder.sendStatus). Some commands can be
- interrupted (either by the build master or a local timeout), in which
- case the step is expected to complete normally with a status message that
- indicates an error occurred.
-
- These commands are used by BuildSteps on the master side. Each kind of
- BuildStep uses a single Command. The slave must implement all the
- Commands required by the set of BuildSteps used for any given build:
- this is checked at startup time.
-
- All Commands are constructed with the same signature:
- c = CommandClass(builder, args)
- where 'builder' is the parent SlaveBuilder object, and 'args' is a
- dict that is interpreted per-command.
-
- The setup(args) method is available for setup, and is run from __init__.
-
- The Command is started with start(). This method must be implemented in a
- subclass, and it should return a Deferred. When your step is done, you
- should fire the Deferred (the results are not used). If the command is
- interrupted, it should fire the Deferred anyway.
-
- While the command runs. it may send status messages back to the
- buildmaster by calling self.sendStatus(statusdict). The statusdict is
- interpreted by the master-side BuildStep however it likes.
-
- A separate completion message is sent when the deferred fires, which
- indicates that the Command has finished, but does not carry any status
- data. If the Command needs to return an exit code of some sort, that
- should be sent as a regular status message before the deferred is fired .
- Once builder.commandComplete has been run, no more status messages may be
- sent.
-
- If interrupt() is called, the Command should attempt to shut down as
- quickly as possible. Child processes should be killed, new ones should
- not be started. The Command should send some kind of error status update,
- then complete as usual by firing the Deferred.
-
- .interrupted should be set by interrupt(), and can be tested to avoid
- sending multiple error status messages.
-
- If .running is False, the bot is shutting down (or has otherwise lost the
- connection to the master), and should not send any status messages. This
- is checked in Command.sendStatus .
-
- """
-
- # builder methods:
- # sendStatus(dict) (zero or more)
- # commandComplete() or commandInterrupted() (one, at end)
-
- debug = False
- interrupted = False
- running = False # set by Builder, cleared on shutdown or when the
- # Deferred fires
-
- def __init__(self, builder, stepId, args):
- self.builder = builder
- self.stepId = stepId # just for logging
- self.args = args
- self.setup(args)
-
- def setup(self, args):
- """Override this in a subclass to extract items from the args dict."""
- pass
-
- def doStart(self):
- self.running = True
- d = defer.maybeDeferred(self.start)
- d.addBoth(self.commandComplete)
- return d
-
- def start(self):
- """Start the command. This method should return a Deferred that will
- fire when the command has completed. The Deferred's argument will be
- ignored.
-
- This method should be overridden by subclasses."""
- raise NotImplementedError, "You must implement this in a subclass"
-
- def sendStatus(self, status):
- """Send a status update to the master."""
- if self.debug:
- log.msg("sendStatus", status)
- if not self.running:
- log.msg("would sendStatus but not .running")
- return
- self.builder.sendUpdate(status)
-
- def doInterrupt(self):
- self.running = False
- self.interrupt()
-
- def interrupt(self):
- """Override this in a subclass to allow commands to be interrupted.
- May be called multiple times, test and set self.interrupted=True if
- this matters."""
- pass
-
- def commandComplete(self, res):
- self.running = False
- return res
-
- # utility methods, mostly used by SlaveShellCommand and the like
-
- def _abandonOnFailure(self, rc):
- if type(rc) is not int:
- log.msg("weird, _abandonOnFailure was given rc=%s (%s)" % \
- (rc, type(rc)))
- assert isinstance(rc, int)
- if rc != 0:
- raise AbandonChain(rc)
- return rc
-
- def _sendRC(self, res):
- self.sendStatus({'rc': 0})
-
- def _checkAbandoned(self, why):
- log.msg("_checkAbandoned", why)
- why.trap(AbandonChain)
- log.msg(" abandoning chain", why.value)
- self.sendStatus({'rc': why.value.args[0]})
- return None
-
-
-
-class SlaveFileUploadCommand(Command):
- """
- Upload a file from slave to build master
- Arguments:
-
- - ['workdir']: base directory to use
- - ['slavesrc']: name of the slave-side file to read from
- - ['writer']: RemoteReference to a transfer._FileWriter object
- - ['maxsize']: max size (in bytes) of file to write
- - ['blocksize']: max size for each data block
- """
- debug = False
-
- def setup(self, args):
- self.workdir = args['workdir']
- self.filename = args['slavesrc']
- self.writer = args['writer']
- self.remaining = args['maxsize']
- self.blocksize = args['blocksize']
- self.stderr = None
- self.rc = 0
-
- def start(self):
- if self.debug:
- log.msg('SlaveFileUploadCommand started')
-
- # Open file
- self.path = os.path.join(self.builder.basedir,
- self.workdir,
- os.path.expanduser(self.filename))
- try:
- self.fp = open(self.path, 'rb')
- if self.debug:
- log.msg('Opened %r for upload' % self.path)
- except:
- # TODO: this needs cleanup
- self.fp = None
- self.stderr = 'Cannot open file %r for upload' % self.path
- self.rc = 1
- if self.debug:
- log.msg('Cannot open file %r for upload' % self.path)
-
- self.sendStatus({'header': "sending %s" % self.path})
-
- d = defer.Deferred()
- reactor.callLater(0, self._loop, d)
- def _close(res):
- # close the file, but pass through any errors from _loop
- d1 = self.writer.callRemote("close")
- d1.addErrback(log.err)
- d1.addCallback(lambda ignored: res)
- return d1
- d.addBoth(_close)
- d.addBoth(self.finished)
- return d
-
- def _loop(self, fire_when_done):
- d = defer.maybeDeferred(self._writeBlock)
- def _done(finished):
- if finished:
- fire_when_done.callback(None)
- else:
- self._loop(fire_when_done)
- def _err(why):
- fire_when_done.errback(why)
- d.addCallbacks(_done, _err)
- return None
-
- def _writeBlock(self):
- """Write a block of data to the remote writer"""
-
- if self.interrupted or self.fp is None:
- if self.debug:
- log.msg('SlaveFileUploadCommand._writeBlock(): end')
- return True
-
- length = self.blocksize
- if self.remaining is not None and length > self.remaining:
- length = self.remaining
-
- if length <= 0:
- if self.stderr is None:
- self.stderr = 'Maximum filesize reached, truncating file %r' \
- % self.path
- self.rc = 1
- data = ''
- else:
- data = self.fp.read(length)
-
- if self.debug:
- log.msg('SlaveFileUploadCommand._writeBlock(): '+
- 'allowed=%d readlen=%d' % (length, len(data)))
- if len(data) == 0:
- log.msg("EOF: callRemote(close)")
- return True
-
- if self.remaining is not None:
- self.remaining = self.remaining - len(data)
- assert self.remaining >= 0
- d = self.writer.callRemote('write', data)
- d.addCallback(lambda res: False)
- return d
-
- def interrupt(self):
- if self.debug:
- log.msg('interrupted')
- if self.interrupted:
- return
- if self.stderr is None:
- self.stderr = 'Upload of %r interrupted' % self.path
- self.rc = 1
- self.interrupted = True
- # the next _writeBlock call will notice the .interrupted flag
-
- def finished(self, res):
- if self.debug:
- log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
- if self.stderr is None:
- self.sendStatus({'rc': self.rc})
- else:
- self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
- return res
-
-registerSlaveCommand("uploadFile", SlaveFileUploadCommand, command_version)
-
-
-class SlaveDirectoryUploadCommand(Command):
- """
- Upload a directory from slave to build master
- Arguments:
-
- - ['workdir']: base directory to use
- - ['slavesrc']: name of the slave-side directory to read from
- - ['writer']: RemoteReference to a transfer._DirectoryWriter object
- - ['maxsize']: max size (in bytes) of file to write
- - ['blocksize']: max size for each data block
- """
- debug = True
-
- def setup(self, args):
- self.workdir = args['workdir']
- self.dirname = args['slavesrc']
- self.writer = args['writer']
- self.remaining = args['maxsize']
- self.blocksize = args['blocksize']
- self.stderr = None
- self.rc = 0
-
- def start(self):
- if self.debug:
- log.msg('SlaveDirectoryUploadCommand started')
-
- # create some lists with all files and directories
- foundFiles = []
- foundDirs = []
-
- self.baseRoot = os.path.join(self.builder.basedir,
- self.workdir,
- os.path.expanduser(self.dirname))
- if self.debug:
- log.msg("baseRoot: %r" % self.baseRoot)
-
- for root, dirs, files in os.walk(self.baseRoot):
- tempRoot = root
- relRoot = ''
- while (tempRoot != self.baseRoot):
- tempRoot, tempRelRoot = os.path.split(tempRoot)
- relRoot = os.path.join(tempRelRoot, relRoot)
- for name in files:
- foundFiles.append(os.path.join(relRoot, name))
- for directory in dirs:
- foundDirs.append(os.path.join(relRoot, directory))
-
- if self.debug:
- log.msg("foundDirs: %s" % (str(foundDirs)))
- log.msg("foundFiles: %s" % (str(foundFiles)))
-
- # create all directories on the master, to catch also empty ones
- for dirname in foundDirs:
- self.writer.callRemote("createdir", dirname)
-
- for filename in foundFiles:
- self._writeFile(filename)
-
- return None
-
- def _writeFile(self, filename):
- """Write a file to the remote writer"""
-
- log.msg("_writeFile: %r" % (filename))
- self.writer.callRemote('open', filename)
- data = open(os.path.join(self.baseRoot, filename), "r").read()
- self.writer.callRemote('write', data)
- self.writer.callRemote('close')
- return None
-
- def interrupt(self):
- if self.debug:
- log.msg('interrupted')
- if self.interrupted:
- return
- if self.stderr is None:
- self.stderr = 'Upload of %r interrupted' % self.path
- self.rc = 1
- self.interrupted = True
- # the next _writeBlock call will notice the .interrupted flag
-
- def finished(self, res):
- if self.debug:
- log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
- if self.stderr is None:
- self.sendStatus({'rc': self.rc})
- else:
- self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
- return res
-
-registerSlaveCommand("uploadDirectory", SlaveDirectoryUploadCommand, command_version)
-
-
-class SlaveFileDownloadCommand(Command):
- """
- Download a file from master to slave
- Arguments:
-
- - ['workdir']: base directory to use
- - ['slavedest']: name of the slave-side file to be created
- - ['reader']: RemoteReference to a transfer._FileReader object
- - ['maxsize']: max size (in bytes) of file to write
- - ['blocksize']: max size for each data block
- - ['mode']: access mode for the new file
- """
- debug = False
-
- def setup(self, args):
- self.workdir = args['workdir']
- self.filename = args['slavedest']
- self.reader = args['reader']
- self.bytes_remaining = args['maxsize']
- self.blocksize = args['blocksize']
- self.mode = args['mode']
- self.stderr = None
- self.rc = 0
-
- def start(self):
- if self.debug:
- log.msg('SlaveFileDownloadCommand starting')
-
- # Open file
- self.path = os.path.join(self.builder.basedir,
- self.workdir,
- os.path.expanduser(self.filename))
-
- dirname = os.path.dirname(self.path)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- try:
- self.fp = open(self.path, 'wb')
- if self.debug:
- log.msg('Opened %r for download' % self.path)
- if self.mode is not None:
- # note: there is a brief window during which the new file
- # will have the buildslave's default (umask) mode before we
- # set the new one. Don't use this mode= feature to keep files
- # private: use the buildslave's umask for that instead. (it
- # is possible to call os.umask() before and after the open()
- # call, but cleaning up from exceptions properly is more of a
- # nuisance that way).
- os.chmod(self.path, self.mode)
- except IOError:
- # TODO: this still needs cleanup
- self.fp = None
- self.stderr = 'Cannot open file %r for download' % self.path
- self.rc = 1
- if self.debug:
- log.msg('Cannot open file %r for download' % self.path)
-
- d = defer.Deferred()
- reactor.callLater(0, self._loop, d)
- def _close(res):
- # close the file, but pass through any errors from _loop
- d1 = self.reader.callRemote('close')
- d1.addErrback(log.err)
- d1.addCallback(lambda ignored: res)
- return d1
- d.addBoth(_close)
- d.addBoth(self.finished)
- return d
-
- def _loop(self, fire_when_done):
- d = defer.maybeDeferred(self._readBlock)
- def _done(finished):
- if finished:
- fire_when_done.callback(None)
- else:
- self._loop(fire_when_done)
- def _err(why):
- fire_when_done.errback(why)
- d.addCallbacks(_done, _err)
- return None
-
- def _readBlock(self):
- """Read a block of data from the remote reader."""
-
- if self.interrupted or self.fp is None:
- if self.debug:
- log.msg('SlaveFileDownloadCommand._readBlock(): end')
- return True
-
- length = self.blocksize
- if self.bytes_remaining is not None and length > self.bytes_remaining:
- length = self.bytes_remaining
-
- if length <= 0:
- if self.stderr is None:
- self.stderr = 'Maximum filesize reached, truncating file %r' \
- % self.path
- self.rc = 1
- return True
- else:
- d = self.reader.callRemote('read', length)
- d.addCallback(self._writeData)
- return d
-
- def _writeData(self, data):
- if self.debug:
- log.msg('SlaveFileDownloadCommand._readBlock(): readlen=%d' %
- len(data))
- if len(data) == 0:
- return True
-
- if self.bytes_remaining is not None:
- self.bytes_remaining = self.bytes_remaining - len(data)
- assert self.bytes_remaining >= 0
- self.fp.write(data)
- return False
-
- def interrupt(self):
- if self.debug:
- log.msg('interrupted')
- if self.interrupted:
- return
- if self.stderr is None:
- self.stderr = 'Download of %r interrupted' % self.path
- self.rc = 1
- self.interrupted = True
- # now we wait for the next read request to return. _readBlock will
- # abandon the file when it sees self.interrupted set.
-
- def finished(self, res):
- if self.fp is not None:
- self.fp.close()
-
- if self.debug:
- log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
- if self.stderr is None:
- self.sendStatus({'rc': self.rc})
- else:
- self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
- return res
-
-registerSlaveCommand("downloadFile", SlaveFileDownloadCommand, command_version)
-
-
-
-class SlaveShellCommand(Command):
- """This is a Command which runs a shell command. The args dict contains
- the following keys:
-
- - ['command'] (required): a shell command to run. If this is a string,
- it will be run with /bin/sh (['/bin/sh',
- '-c', command]). If it is a list
- (preferred), it will be used directly.
- - ['workdir'] (required): subdirectory in which the command will be
- run, relative to the builder dir
- - ['env']: a dict of environment variables to augment/replace
- os.environ . PYTHONPATH is treated specially, and
- should be a list of path components to be prepended to
- any existing PYTHONPATH environment variable.
- - ['initial_stdin']: a string which will be written to the command's
- stdin as soon as it starts
- - ['keep_stdin_open']: unless True, the command's stdin will be
- closed as soon as initial_stdin has been
- written. Set this to True if you plan to write
- to stdin after the command has been started.
- - ['want_stdout']: 0 if stdout should be thrown away
- - ['want_stderr']: 0 if stderr should be thrown away
- - ['usePTY']: True or False if the command should use a PTY (defaults to
- configuration of the slave)
- - ['not_really']: 1 to skip execution and return rc=0
- - ['timeout']: seconds of silence to tolerate before killing command
- - ['logfiles']: dict mapping LogFile name to the workdir-relative
- filename of a local log file. This local file will be
- watched just like 'tail -f', and all changes will be
- written to 'log' status updates.
-
- ShellCommand creates the following status messages:
- - {'stdout': data} : when stdout data is available
- - {'stderr': data} : when stderr data is available
- - {'header': data} : when headers (command start/stop) are available
- - {'log': (logfile_name, data)} : when log files have new contents
- - {'rc': rc} : when the process has terminated
- """
-
- def start(self):
- args = self.args
- # args['workdir'] is relative to Builder directory, and is required.
- assert args['workdir'] is not None
- workdir = os.path.join(self.builder.basedir, args['workdir'])
-
- c = ShellCommand(self.builder, args['command'],
- workdir, environ=args.get('env'),
- timeout=args.get('timeout', None),
- sendStdout=args.get('want_stdout', True),
- sendStderr=args.get('want_stderr', True),
- sendRC=True,
- initialStdin=args.get('initial_stdin'),
- keepStdinOpen=args.get('keep_stdin_open'),
- logfiles=args.get('logfiles', {}),
- usePTY=args.get('usePTY', "slave-config"),
- )
- self.command = c
- d = self.command.start()
- return d
-
- def interrupt(self):
- self.interrupted = True
- self.command.kill("command interrupted")
-
- def writeStdin(self, data):
- self.command.writeStdin(data)
-
- def closeStdin(self):
- self.command.closeStdin()
-
-registerSlaveCommand("shell", SlaveShellCommand, command_version)
-
-
-class DummyCommand(Command):
- """
- I am a dummy no-op command that by default takes 5 seconds to complete.
- See L{buildbot.steps.dummy.RemoteDummy}
- """
-
- def start(self):
- self.d = defer.Deferred()
- log.msg(" starting dummy command [%s]" % self.stepId)
- self.timer = reactor.callLater(1, self.doStatus)
- return self.d
-
- def interrupt(self):
- if self.interrupted:
- return
- self.timer.cancel()
- self.timer = None
- self.interrupted = True
- self.finished()
-
- def doStatus(self):
- log.msg(" sending intermediate status")
- self.sendStatus({'stdout': 'data'})
- timeout = self.args.get('timeout', 5) + 1
- self.timer = reactor.callLater(timeout - 1, self.finished)
-
- def finished(self):
- log.msg(" dummy command finished [%s]" % self.stepId)
- if self.interrupted:
- self.sendStatus({'rc': 1})
- else:
- self.sendStatus({'rc': 0})
- self.d.callback(0)
-
-registerSlaveCommand("dummy", DummyCommand, command_version)
-
-
-# this maps handle names to a callable. When the WaitCommand starts, this
-# callable is invoked with no arguments. It should return a Deferred. When
-# that Deferred fires, our WaitCommand will finish.
-waitCommandRegistry = {}
-
-class WaitCommand(Command):
- """
- I am a dummy command used by the buildbot unit test suite. I want for the
- unit test to tell us to finish. See L{buildbot.steps.dummy.Wait}
- """
-
- def start(self):
- self.d = defer.Deferred()
- log.msg(" starting wait command [%s]" % self.stepId)
- handle = self.args['handle']
- cb = waitCommandRegistry[handle]
- del waitCommandRegistry[handle]
- def _called():
- log.msg(" wait-%s starting" % (handle,))
- d = cb()
- def _done(res):
- log.msg(" wait-%s finishing: %s" % (handle, res))
- return res
- d.addBoth(_done)
- d.addCallbacks(self.finished, self.failed)
- reactor.callLater(0, _called)
- return self.d
-
- def interrupt(self):
- log.msg(" wait command interrupted")
- if self.interrupted:
- return
- self.interrupted = True
- self.finished("interrupted")
-
- def finished(self, res):
- log.msg(" wait command finished [%s]" % self.stepId)
- if self.interrupted:
- self.sendStatus({'rc': 2})
- else:
- self.sendStatus({'rc': 0})
- self.d.callback(0)
- def failed(self, why):
- log.msg(" wait command failed [%s]" % self.stepId)
- self.sendStatus({'rc': 1})
- self.d.callback(0)
-
-registerSlaveCommand("dummy.wait", WaitCommand, command_version)
-
-
-class SourceBase(Command):
- """Abstract base class for Version Control System operations (checkout
- and update). This class extracts the following arguments from the
- dictionary received from the master:
-
- - ['workdir']: (required) the subdirectory where the buildable sources
- should be placed
-
- - ['mode']: one of update/copy/clobber/export, defaults to 'update'
-
- - ['revision']: If not None, this is an int or string which indicates
- which sources (along a time-like axis) should be used.
- It is the thing you provide as the CVS -r or -D
- argument.
-
- - ['patch']: If not None, this is a tuple of (striplevel, patch)
- which contains a patch that should be applied after the
- checkout has occurred. Once applied, the tree is no
- longer eligible for use with mode='update', and it only
- makes sense to use this in conjunction with a
- ['revision'] argument. striplevel is an int, and patch
- is a string in standard unified diff format. The patch
- will be applied with 'patch -p%d <PATCH', with
- STRIPLEVEL substituted as %d. The command will fail if
- the patch process fails (rejected hunks).
-
- - ['timeout']: seconds of silence tolerated before we kill off the
- command
-
- - ['retry']: If not None, this is a tuple of (delay, repeats)
- which means that any failed VC updates should be
- reattempted, up to REPEATS times, after a delay of
- DELAY seconds. This is intended to deal with slaves
- that experience transient network failures.
- """
-
- sourcedata = ""
-
- def setup(self, args):
- # if we need to parse the output, use this environment. Otherwise
- # command output will be in whatever the buildslave's native language
- # has been set to.
- self.env = os.environ.copy()
- self.env['LC_MESSAGES'] = "C"
-
- self.workdir = args['workdir']
- self.mode = args.get('mode', "update")
- self.revision = args.get('revision')
- self.patch = args.get('patch')
- self.timeout = args.get('timeout', 120)
- self.retry = args.get('retry')
- # VC-specific subclasses should override this to extract more args.
- # Make sure to upcall!
-
- def start(self):
- self.sendStatus({'header': "starting " + self.header + "\n"})
- self.command = None
-
- # self.srcdir is where the VC system should put the sources
- if self.mode == "copy":
- self.srcdir = "source" # hardwired directory name, sorry
- else:
- self.srcdir = self.workdir
- self.sourcedatafile = os.path.join(self.builder.basedir,
- self.srcdir,
- ".buildbot-sourcedata")
-
- d = defer.succeed(None)
- self.maybeClobber(d)
- if not (self.sourcedirIsUpdateable() and self.sourcedataMatches()):
- # the directory cannot be updated, so we have to clobber it.
- # Perhaps the master just changed modes from 'export' to
- # 'update'.
- d.addCallback(self.doClobber, self.srcdir)
-
- d.addCallback(self.doVC)
-
- if self.mode == "copy":
- d.addCallback(self.doCopy)
- if self.patch:
- d.addCallback(self.doPatch)
- d.addCallbacks(self._sendRC, self._checkAbandoned)
- return d
-
- def maybeClobber(self, d):
- # do we need to clobber anything?
- if self.mode in ("copy", "clobber", "export"):
- d.addCallback(self.doClobber, self.workdir)
-
- def interrupt(self):
- self.interrupted = True
- if self.command:
- self.command.kill("command interrupted")
-
- def doVC(self, res):
- if self.interrupted:
- raise AbandonChain(1)
- if self.sourcedirIsUpdateable() and self.sourcedataMatches():
- d = self.doVCUpdate()
- d.addCallback(self.maybeDoVCFallback)
- else:
- d = self.doVCFull()
- d.addBoth(self.maybeDoVCRetry)
- d.addCallback(self._abandonOnFailure)
- d.addCallback(self._handleGotRevision)
- d.addCallback(self.writeSourcedata)
- return d
-
- def sourcedataMatches(self):
- try:
- olddata = open(self.sourcedatafile, "r").read()
- if olddata != self.sourcedata:
- return False
- except IOError:
- return False
- return True
-
- def _handleGotRevision(self, res):
- d = defer.maybeDeferred(self.parseGotRevision)
- d.addCallback(lambda got_revision:
- self.sendStatus({'got_revision': got_revision}))
- return d
-
- def parseGotRevision(self):
- """Override this in a subclass. It should return a string that
- represents which revision was actually checked out, or a Deferred
- that will fire with such a string. If, in a future build, you were to
- pass this 'got_revision' string in as the 'revision' component of a
- SourceStamp, you should wind up with the same source code as this
- checkout just obtained.
-
- It is probably most useful to scan self.command.stdout for a string
- of some sort. Be sure to set keepStdout=True on the VC command that
- you run, so that you'll have something available to look at.
-
- If this information is unavailable, just return None."""
-
- return None
-
- def writeSourcedata(self, res):
- open(self.sourcedatafile, "w").write(self.sourcedata)
- return res
-
- def sourcedirIsUpdateable(self):
- raise NotImplementedError("this must be implemented in a subclass")
-
- def doVCUpdate(self):
- raise NotImplementedError("this must be implemented in a subclass")
-
- def doVCFull(self):
- raise NotImplementedError("this must be implemented in a subclass")
-
- def maybeDoVCFallback(self, rc):
- if type(rc) is int and rc == 0:
- return rc
- if self.interrupted:
- raise AbandonChain(1)
- msg = "update failed, clobbering and trying again"
- self.sendStatus({'header': msg + "\n"})
- log.msg(msg)
- d = self.doClobber(None, self.srcdir)
- d.addCallback(self.doVCFallback2)
- return d
-
- def doVCFallback2(self, res):
- msg = "now retrying VC operation"
- self.sendStatus({'header': msg + "\n"})
- log.msg(msg)
- d = self.doVCFull()
- d.addBoth(self.maybeDoVCRetry)
- d.addCallback(self._abandonOnFailure)
- return d
-
- def maybeDoVCRetry(self, res):
- """We get here somewhere after a VC chain has finished. res could
- be::
-
- - 0: the operation was successful
- - nonzero: the operation failed. retry if possible
- - AbandonChain: the operation failed, someone else noticed. retry.
- - Failure: some other exception, re-raise
- """
-
- if isinstance(res, failure.Failure):
- if self.interrupted:
- return res # don't re-try interrupted builds
- res.trap(AbandonChain)
- else:
- if type(res) is int and res == 0:
- return res
- if self.interrupted:
- raise AbandonChain(1)
- # if we get here, we should retry, if possible
- if self.retry:
- delay, repeats = self.retry
- if repeats >= 0:
- self.retry = (delay, repeats-1)
- msg = ("update failed, trying %d more times after %d seconds"
- % (repeats, delay))
- self.sendStatus({'header': msg + "\n"})
- log.msg(msg)
- d = defer.Deferred()
- self.maybeClobber(d)
- d.addCallback(lambda res: self.doVCFull())
- d.addBoth(self.maybeDoVCRetry)
- reactor.callLater(delay, d.callback, None)
- return d
- return res
-
- def doClobber(self, dummy, dirname):
- # TODO: remove the old tree in the background
-## workdir = os.path.join(self.builder.basedir, self.workdir)
-## deaddir = self.workdir + ".deleting"
-## if os.path.isdir(workdir):
-## try:
-## os.rename(workdir, deaddir)
-## # might fail if deaddir already exists: previous deletion
-## # hasn't finished yet
-## # start the deletion in the background
-## # TODO: there was a solaris/NetApp/NFS problem where a
-## # process that was still running out of the directory we're
-## # trying to delete could prevent the rm-rf from working. I
-## # think it stalled the rm, but maybe it just died with
-## # permission issues. Try to detect this.
-## os.commands("rm -rf %s &" % deaddir)
-## except:
-## # fall back to sequential delete-then-checkout
-## pass
- d = os.path.join(self.builder.basedir, dirname)
- if runtime.platformType != "posix":
- # if we're running on w32, use rmtree instead. It will block,
- # but hopefully it won't take too long.
- rmdirRecursive(d)
- return defer.succeed(0)
- command = ["rm", "-rf", d]
- c = ShellCommand(self.builder, command, self.builder.basedir,
- sendRC=0, timeout=self.timeout, usePTY=False)
-
- self.command = c
- # sendRC=0 means the rm command will send stdout/stderr to the
- # master, but not the rc=0 when it finishes. That job is left to
- # _sendRC
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- return d
-
- def doCopy(self, res):
- # now copy tree to workdir
- fromdir = os.path.join(self.builder.basedir, self.srcdir)
- todir = os.path.join(self.builder.basedir, self.workdir)
- if runtime.platformType != "posix":
- self.sendStatus({'header': "Since we're on a non-POSIX platform, "
- "we're not going to try to execute cp in a subprocess, but instead "
- "use shutil.copytree(), which will block until it is complete. "
- "fromdir: %s, todir: %s\n" % (fromdir, todir)})
- shutil.copytree(fromdir, todir)
- return defer.succeed(0)
-
- if not os.path.exists(os.path.dirname(todir)):
- os.makedirs(os.path.dirname(todir))
- if os.path.exists(todir):
- # I don't think this happens, but just in case..
- log.msg("cp target '%s' already exists -- cp will not do what you think!" % todir)
-
- command = ['cp', '-R', '-P', '-p', fromdir, todir]
- c = ShellCommand(self.builder, command, self.builder.basedir,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- return d
-
- def doPatch(self, res):
- patchlevel, diff = self.patch
- command = [getCommand("patch"), '-p%d' % patchlevel]
- dir = os.path.join(self.builder.basedir, self.workdir)
- # mark the directory so we don't try to update it later
- open(os.path.join(dir, ".buildbot-patched"), "w").write("patched\n")
- # now apply the patch
- c = ShellCommand(self.builder, command, dir,
- sendRC=False, timeout=self.timeout,
- initialStdin=diff, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- return d
-
-
-class CVS(SourceBase):
- """CVS-specific VC operation. In addition to the arguments handled by
- SourceBase, this command reads the following keys:
-
- ['cvsroot'] (required): the CVSROOT repository string
- ['cvsmodule'] (required): the module to be retrieved
- ['branch']: a '-r' tag or branch name to use for the checkout/update
- ['login']: a string for use as a password to 'cvs login'
- ['global_options']: a list of strings to use before the CVS verb
- """
-
- header = "cvs operation"
-
- def setup(self, args):
- SourceBase.setup(self, args)
- self.vcexe = getCommand("cvs")
- self.cvsroot = args['cvsroot']
- self.cvsmodule = args['cvsmodule']
- self.global_options = args.get('global_options', [])
- self.branch = args.get('branch')
- self.login = args.get('login')
- self.sourcedata = "%s\n%s\n%s\n" % (self.cvsroot, self.cvsmodule,
- self.branch)
-
- def sourcedirIsUpdateable(self):
- if os.path.exists(os.path.join(self.builder.basedir,
- self.srcdir, ".buildbot-patched")):
- return False
- return os.path.isdir(os.path.join(self.builder.basedir,
- self.srcdir, "CVS"))
-
- def start(self):
- if self.login is not None:
- # need to do a 'cvs login' command first
- d = self.builder.basedir
- command = ([self.vcexe, '-d', self.cvsroot] + self.global_options
- + ['login'])
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout,
- initialStdin=self.login+"\n", usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- d.addCallback(self._didLogin)
- return d
- else:
- return self._didLogin(None)
-
- def _didLogin(self, res):
- # now we really start
- return SourceBase.start(self)
-
- def doVCUpdate(self):
- d = os.path.join(self.builder.basedir, self.srcdir)
- command = [self.vcexe, '-z3'] + self.global_options + ['update', '-dP']
- if self.branch:
- command += ['-r', self.branch]
- if self.revision:
- command += ['-D', self.revision]
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- return c.start()
-
- def doVCFull(self):
- d = self.builder.basedir
- if self.mode == "export":
- verb = "export"
- else:
- verb = "checkout"
- command = ([self.vcexe, '-d', self.cvsroot, '-z3'] +
- self.global_options +
- [verb, '-d', self.srcdir])
- if self.branch:
- command += ['-r', self.branch]
- if self.revision:
- command += ['-D', self.revision]
- command += [self.cvsmodule]
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- return c.start()
-
- def parseGotRevision(self):
- # CVS does not have any kind of revision stamp to speak of. We return
- # the current timestamp as a best-effort guess, but this depends upon
- # the local system having a clock that is
- # reasonably-well-synchronized with the repository.
- return time.strftime("%Y-%m-%d %H:%M:%S +0000", time.gmtime())
-
-registerSlaveCommand("cvs", CVS, command_version)
-
-class SVN(SourceBase):
- """Subversion-specific VC operation. In addition to the arguments
- handled by SourceBase, this command reads the following keys:
-
- ['svnurl'] (required): the SVN repository string
- ['username'] Username passed to the svn command
- ['password'] Password passed to the svn command
- """
-
- header = "svn operation"
-
- def setup(self, args):
- SourceBase.setup(self, args)
- self.vcexe = getCommand("svn")
- self.svnurl = args['svnurl']
- self.sourcedata = "%s\n" % self.svnurl
-
- self.extra_args = []
- if args.has_key('username'):
- self.extra_args.extend(["--username", args['username']])
- if args.has_key('password'):
- self.extra_args.extend(["--password", Obfuscated(args['password'], "XXXX")])
-
- def sourcedirIsUpdateable(self):
- if os.path.exists(os.path.join(self.builder.basedir,
- self.srcdir, ".buildbot-patched")):
- return False
- return os.path.isdir(os.path.join(self.builder.basedir,
- self.srcdir, ".svn"))
-
- def doVCUpdate(self):
- revision = self.args['revision'] or 'HEAD'
- # update: possible for mode in ('copy', 'update')
- d = os.path.join(self.builder.basedir, self.srcdir)
- command = [self.vcexe, 'update'] + \
- self.extra_args + \
- ['--revision', str(revision),
- '--non-interactive', '--no-auth-cache']
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout,
- keepStdout=True, usePTY=False)
- self.command = c
- return c.start()
-
- def doVCFull(self):
- revision = self.args['revision'] or 'HEAD'
- d = self.builder.basedir
- if self.mode == "export":
- command = [self.vcexe, 'export'] + \
- self.extra_args + \
- ['--revision', str(revision),
- '--non-interactive', '--no-auth-cache',
- self.svnurl, self.srcdir]
- else:
- # mode=='clobber', or copy/update on a broken workspace
- command = [self.vcexe, 'checkout'] + \
- self.extra_args + \
- ['--revision', str(revision),
- '--non-interactive', '--no-auth-cache',
- self.svnurl, self.srcdir]
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout,
- keepStdout=True, usePTY=False)
- self.command = c
- return c.start()
-
- def getSvnVersionCommand(self):
- """
- Get the (shell) command used to determine SVN revision number
- of checked-out code
-
- return: list of strings, passable as the command argument to ShellCommand
- """
- # svn checkout operations finish with 'Checked out revision 16657.'
- # svn update operations finish the line 'At revision 16654.'
- # But we don't use those. Instead, run 'svnversion'.
- svnversion_command = getCommand("svnversion")
- # older versions of 'svnversion' (1.1.4) require the WC_PATH
- # argument, newer ones (1.3.1) do not.
- return [svnversion_command, "."]
-
- def parseGotRevision(self):
- c = ShellCommand(self.builder,
- self.getSvnVersionCommand(),
- os.path.join(self.builder.basedir, self.srcdir),
- environ=self.env,
- sendStdout=False, sendStderr=False, sendRC=False,
- keepStdout=True, usePTY=False)
- d = c.start()
- def _parse(res):
- r_raw = c.stdout.strip()
- # Extract revision from the version "number" string
- r = r_raw.rstrip('MS')
- r = r.split(':')[-1]
- got_version = None
- try:
- got_version = int(r)
- except ValueError:
- msg =("SVN.parseGotRevision unable to parse output "
- "of svnversion: '%s'" % r_raw)
- log.msg(msg)
- self.sendStatus({'header': msg + "\n"})
- return got_version
- d.addCallback(_parse)
- return d
-
-
-registerSlaveCommand("svn", SVN, command_version)
-
-class Darcs(SourceBase):
- """Darcs-specific VC operation. In addition to the arguments
- handled by SourceBase, this command reads the following keys:
-
- ['repourl'] (required): the Darcs repository string
- """
-
- header = "darcs operation"
-
- def setup(self, args):
- SourceBase.setup(self, args)
- self.vcexe = getCommand("darcs")
- self.repourl = args['repourl']
- self.sourcedata = "%s\n" % self.repourl
- self.revision = self.args.get('revision')
-
- def sourcedirIsUpdateable(self):
- if os.path.exists(os.path.join(self.builder.basedir,
- self.srcdir, ".buildbot-patched")):
- return False
- if self.revision:
- # checking out a specific revision requires a full 'darcs get'
- return False
- return os.path.isdir(os.path.join(self.builder.basedir,
- self.srcdir, "_darcs"))
-
- def doVCUpdate(self):
- assert not self.revision
- # update: possible for mode in ('copy', 'update')
- d = os.path.join(self.builder.basedir, self.srcdir)
- command = [self.vcexe, 'pull', '--all', '--verbose']
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- return c.start()
-
- def doVCFull(self):
- # checkout or export
- d = self.builder.basedir
- command = [self.vcexe, 'get', '--verbose', '--partial',
- '--repo-name', self.srcdir]
- if self.revision:
- # write the context to a file
- n = os.path.join(self.builder.basedir, ".darcs-context")
- f = open(n, "wb")
- f.write(self.revision)
- f.close()
- # tell Darcs to use that context
- command.append('--context')
- command.append(n)
- command.append(self.repourl)
-
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- if self.revision:
- d.addCallback(self.removeContextFile, n)
- return d
-
- def removeContextFile(self, res, n):
- os.unlink(n)
- return res
-
- def parseGotRevision(self):
- # we use 'darcs context' to find out what we wound up with
- command = [self.vcexe, "changes", "--context"]
- c = ShellCommand(self.builder, command,
- os.path.join(self.builder.basedir, self.srcdir),
- environ=self.env,
- sendStdout=False, sendStderr=False, sendRC=False,
- keepStdout=True, usePTY=False)
- d = c.start()
- d.addCallback(lambda res: c.stdout)
- return d
-
-registerSlaveCommand("darcs", Darcs, command_version)
-
-class Monotone(SourceBase):
- """Monotone-specific VC operation. In addition to the arguments handled
- by SourceBase, this command reads the following keys:
-
- ['server_addr'] (required): the address of the server to pull from
- ['branch'] (required): the branch the revision is on
- ['db_path'] (required): the local database path to use
- ['revision'] (required): the revision to check out
- ['monotone']: (required): path to monotone executable
- """
-
- header = "monotone operation"
-
- def setup(self, args):
- SourceBase.setup(self, args)
- self.server_addr = args["server_addr"]
- self.branch = args["branch"]
- self.db_path = args["db_path"]
- self.revision = args["revision"]
- self.monotone = args["monotone"]
- self._made_fulls = False
- self._pull_timeout = args["timeout"]
-
- def _makefulls(self):
- if not self._made_fulls:
- basedir = self.builder.basedir
- self.full_db_path = os.path.join(basedir, self.db_path)
- self.full_srcdir = os.path.join(basedir, self.srcdir)
- self._made_fulls = True
-
- def sourcedirIsUpdateable(self):
- self._makefulls()
- if os.path.exists(os.path.join(self.full_srcdir,
- ".buildbot_patched")):
- return False
- return (os.path.isfile(self.full_db_path)
- and os.path.isdir(os.path.join(self.full_srcdir, "MT")))
-
- def doVCUpdate(self):
- return self._withFreshDb(self._doUpdate)
-
- def _doUpdate(self):
- # update: possible for mode in ('copy', 'update')
- command = [self.monotone, "update",
- "-r", self.revision,
- "-b", self.branch]
- c = ShellCommand(self.builder, command, self.full_srcdir,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- return c.start()
-
- def doVCFull(self):
- return self._withFreshDb(self._doFull)
-
- def _doFull(self):
- command = [self.monotone, "--db=" + self.full_db_path,
- "checkout",
- "-r", self.revision,
- "-b", self.branch,
- self.full_srcdir]
- c = ShellCommand(self.builder, command, self.builder.basedir,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- return c.start()
-
- def _withFreshDb(self, callback):
- self._makefulls()
- # first ensure the db exists and is usable
- if os.path.isfile(self.full_db_path):
- # already exists, so run 'db migrate' in case monotone has been
- # upgraded under us
- command = [self.monotone, "db", "migrate",
- "--db=" + self.full_db_path]
- else:
- # We'll be doing an initial pull, so up the timeout to 3 hours to
- # make sure it will have time to complete.
- self._pull_timeout = max(self._pull_timeout, 3 * 60 * 60)
- self.sendStatus({"header": "creating database %s\n"
- % (self.full_db_path,)})
- command = [self.monotone, "db", "init",
- "--db=" + self.full_db_path]
- c = ShellCommand(self.builder, command, self.builder.basedir,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- d.addCallback(self._didDbInit)
- d.addCallback(self._didPull, callback)
- return d
-
- def _didDbInit(self, res):
- command = [self.monotone, "--db=" + self.full_db_path,
- "pull", "--ticker=dot", self.server_addr, self.branch]
- c = ShellCommand(self.builder, command, self.builder.basedir,
- sendRC=False, timeout=self._pull_timeout, usePTY=False)
- self.sendStatus({"header": "pulling %s from %s\n"
- % (self.branch, self.server_addr)})
- self.command = c
- return c.start()
-
- def _didPull(self, res, callback):
- return callback()
-
-registerSlaveCommand("monotone", Monotone, command_version)
-
-
-class Git(SourceBase):
- """Git specific VC operation. In addition to the arguments
- handled by SourceBase, this command reads the following keys:
-
- ['repourl'] (required): the upstream GIT repository string
- ['branch'] (optional): which version (i.e. branch or tag) to
- retrieve. Default: "master".
- """
-
- header = "git operation"
-
- def setup(self, args):
- SourceBase.setup(self, args)
- self.repourl = args['repourl']
- self.branch = args.get('branch')
- if not self.branch:
- self.branch = "master"
- self.sourcedata = "%s %s\n" % (self.repourl, self.branch)
-
- def _fullSrcdir(self):
- return os.path.join(self.builder.basedir, self.srcdir)
-
- def _commitSpec(self):
- if self.revision:
- return self.revision
- return self.branch
-
- def sourcedirIsUpdateable(self):
- if os.path.exists(os.path.join(self._fullSrcdir(),
- ".buildbot-patched")):
- return False
- return os.path.isdir(os.path.join(self._fullSrcdir(), ".git"))
-
- def readSourcedata(self):
- return open(self.sourcedatafile, "r").read()
-
- # If the repourl matches the sourcedata file, then
- # we can say that the sourcedata matches. We can
- # ignore branch changes, since Git can work with
- # many branches fetched, and we deal with it properly
- # in doVCUpdate.
- def sourcedataMatches(self):
- try:
- olddata = self.readSourcedata()
- if not olddata.startswith(self.repourl+' '):
- return False
- except IOError:
- return False
- return True
-
- def _didFetch(self, res):
- if self.revision:
- head = self.revision
- else:
- head = 'FETCH_HEAD'
-
- command = ['git', 'reset', '--hard', head]
- c = ShellCommand(self.builder, command, self._fullSrcdir(),
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- return c.start()
-
- # Update first runs "git clean", removing local changes,
- # if the branch to be checked out has changed. This, combined
- # with the later "git reset" equates clobbering the repo,
- # but it's much more efficient.
- def doVCUpdate(self):
- try:
- # Check to see if our branch has changed
- diffbranch = self.sourcedata != self.readSourcedata()
- except IOError:
- diffbranch = False
- if diffbranch:
- command = ['git', 'clean', '-f', '-d']
- c = ShellCommand(self.builder, command, self._fullSrcdir(),
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- d.addCallback(self._didClean)
- return d
- return self._didClean(None)
-
- def _didClean(self, dummy):
- command = ['git', 'fetch', '-t', self.repourl, self.branch]
- self.sendStatus({"header": "fetching branch %s from %s\n"
- % (self.branch, self.repourl)})
- c = ShellCommand(self.builder, command, self._fullSrcdir(),
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- d.addCallback(self._didFetch)
- return d
-
- def _didInit(self, res):
- return self.doVCUpdate()
-
- def doVCFull(self):
- os.mkdir(self._fullSrcdir())
- c = ShellCommand(self.builder, ['git', 'init'], self._fullSrcdir(),
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- d.addCallback(self._didInit)
- return d
-
- def parseGotRevision(self):
- command = ['git', 'rev-parse', 'HEAD']
- c = ShellCommand(self.builder, command, self._fullSrcdir(),
- sendRC=False, keepStdout=True, usePTY=False)
- d = c.start()
- def _parse(res):
- hash = c.stdout.strip()
- if len(hash) != 40:
- return None
- return hash
- d.addCallback(_parse)
- return d
-
-registerSlaveCommand("git", Git, command_version)
-
-class Arch(SourceBase):
- """Arch-specific (tla-specific) VC operation. In addition to the
- arguments handled by SourceBase, this command reads the following keys:
-
- ['url'] (required): the repository string
- ['version'] (required): which version (i.e. branch) to retrieve
- ['revision'] (optional): the 'patch-NN' argument to check out
- ['archive']: the archive name to use. If None, use the archive's default
- ['build-config']: if present, give to 'tla build-config' after checkout
- """
-
- header = "arch operation"
- buildconfig = None
-
- def setup(self, args):
- SourceBase.setup(self, args)
- self.vcexe = getCommand("tla")
- self.archive = args.get('archive')
- self.url = args['url']
- self.version = args['version']
- self.revision = args.get('revision')
- self.buildconfig = args.get('build-config')
- self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
- self.buildconfig)
-
- def sourcedirIsUpdateable(self):
- if self.revision:
- # Arch cannot roll a directory backwards, so if they ask for a
- # specific revision, clobber the directory. Technically this
- # could be limited to the cases where the requested revision is
- # later than our current one, but it's too hard to extract the
- # current revision from the tree.
- return False
- if os.path.exists(os.path.join(self.builder.basedir,
- self.srcdir, ".buildbot-patched")):
- return False
- return os.path.isdir(os.path.join(self.builder.basedir,
- self.srcdir, "{arch}"))
-
- def doVCUpdate(self):
- # update: possible for mode in ('copy', 'update')
- d = os.path.join(self.builder.basedir, self.srcdir)
- command = [self.vcexe, 'replay']
- if self.revision:
- command.append(self.revision)
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- return c.start()
-
- def doVCFull(self):
- # to do a checkout, we must first "register" the archive by giving
- # the URL to tla, which will go to the repository at that URL and
- # figure out the archive name. tla will tell you the archive name
- # when it is done, and all further actions must refer to this name.
-
- command = [self.vcexe, 'register-archive', '--force', self.url]
- c = ShellCommand(self.builder, command, self.builder.basedir,
- sendRC=False, keepStdout=True,
- timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- d.addCallback(self._didRegister, c)
- return d
-
- def _didRegister(self, res, c):
- # find out what tla thinks the archive name is. If the user told us
- # to use something specific, make sure it matches.
- r = re.search(r'Registering archive: (\S+)\s*$', c.stdout)
- if r:
- msg = "tla reports archive name is '%s'" % r.group(1)
- log.msg(msg)
- self.builder.sendUpdate({'header': msg+"\n"})
- if self.archive and r.group(1) != self.archive:
- msg = (" mismatch, we wanted an archive named '%s'"
- % self.archive)
- log.msg(msg)
- self.builder.sendUpdate({'header': msg+"\n"})
- raise AbandonChain(-1)
- self.archive = r.group(1)
- assert self.archive, "need archive name to continue"
- return self._doGet()
-
- def _doGet(self):
- ver = self.version
- if self.revision:
- ver += "--%s" % self.revision
- command = [self.vcexe, 'get', '--archive', self.archive,
- '--no-pristine',
- ver, self.srcdir]
- c = ShellCommand(self.builder, command, self.builder.basedir,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- if self.buildconfig:
- d.addCallback(self._didGet)
- return d
-
- def _didGet(self, res):
- d = os.path.join(self.builder.basedir, self.srcdir)
- command = [self.vcexe, 'build-config', self.buildconfig]
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- return d
-
- def parseGotRevision(self):
- # using code from tryclient.TlaExtractor
- # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
- # 'tla logs' gives us REVISION
- command = [self.vcexe, "logs", "--full", "--reverse"]
- c = ShellCommand(self.builder, command,
- os.path.join(self.builder.basedir, self.srcdir),
- environ=self.env,
- sendStdout=False, sendStderr=False, sendRC=False,
- keepStdout=True, usePTY=False)
- d = c.start()
- def _parse(res):
- tid = c.stdout.split("\n")[0].strip()
- slash = tid.index("/")
- dd = tid.rindex("--")
- #branch = tid[slash+1:dd]
- baserev = tid[dd+2:]
- return baserev
- d.addCallback(_parse)
- return d
-
-registerSlaveCommand("arch", Arch, command_version)
-
-class Bazaar(Arch):
- """Bazaar (/usr/bin/baz) is an alternative client for Arch repositories.
- It is mostly option-compatible, but archive registration is different
- enough to warrant a separate Command.
-
- ['archive'] (required): the name of the archive being used
- """
-
- def setup(self, args):
- Arch.setup(self, args)
- self.vcexe = getCommand("baz")
- # baz doesn't emit the repository name after registration (and
- # grepping through the output of 'baz archives' is too hard), so we
- # require that the buildmaster configuration to provide both the
- # archive name and the URL.
- self.archive = args['archive'] # required for Baz
- self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
- self.buildconfig)
-
- # in _didRegister, the regexp won't match, so we'll stick with the name
- # in self.archive
-
- def _doGet(self):
- # baz prefers ARCHIVE/VERSION. This will work even if
- # my-default-archive is not set.
- ver = self.archive + "/" + self.version
- if self.revision:
- ver += "--%s" % self.revision
- command = [self.vcexe, 'get', '--no-pristine',
- ver, self.srcdir]
- c = ShellCommand(self.builder, command, self.builder.basedir,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- if self.buildconfig:
- d.addCallback(self._didGet)
- return d
-
- def parseGotRevision(self):
- # using code from tryclient.BazExtractor
- command = [self.vcexe, "tree-id"]
- c = ShellCommand(self.builder, command,
- os.path.join(self.builder.basedir, self.srcdir),
- environ=self.env,
- sendStdout=False, sendStderr=False, sendRC=False,
- keepStdout=True, usePTY=False)
- d = c.start()
- def _parse(res):
- tid = c.stdout.strip()
- slash = tid.index("/")
- dd = tid.rindex("--")
- #branch = tid[slash+1:dd]
- baserev = tid[dd+2:]
- return baserev
- d.addCallback(_parse)
- return d
-
-registerSlaveCommand("bazaar", Bazaar, command_version)
-
-
-class Bzr(SourceBase):
- """bzr-specific VC operation. In addition to the arguments
- handled by SourceBase, this command reads the following keys:
-
- ['repourl'] (required): the Bzr repository string
- """
-
- header = "bzr operation"
-
- def setup(self, args):
- SourceBase.setup(self, args)
- self.vcexe = getCommand("bzr")
- self.repourl = args['repourl']
- self.sourcedata = "%s\n" % self.repourl
- self.revision = self.args.get('revision')
-
- def sourcedirIsUpdateable(self):
- if os.path.exists(os.path.join(self.builder.basedir,
- self.srcdir, ".buildbot-patched")):
- return False
- if self.revision:
- # checking out a specific revision requires a full 'bzr checkout'
- return False
- return os.path.isdir(os.path.join(self.builder.basedir,
- self.srcdir, ".bzr"))
-
- def doVCUpdate(self):
- assert not self.revision
- # update: possible for mode in ('copy', 'update')
- srcdir = os.path.join(self.builder.basedir, self.srcdir)
- command = [self.vcexe, 'update']
- c = ShellCommand(self.builder, command, srcdir,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- return c.start()
-
- def doVCFull(self):
- # checkout or export
- d = self.builder.basedir
- if self.mode == "export":
- # exporting in bzr requires a separate directory
- return self.doVCExport()
- # originally I added --lightweight here, but then 'bzr revno' is
- # wrong. The revno reported in 'bzr version-info' is correct,
- # however. Maybe this is a bzr bug?
- #
- # In addition, you cannot perform a 'bzr update' on a repo pulled
- # from an HTTP repository that used 'bzr checkout --lightweight'. You
- # get a "ERROR: Cannot lock: transport is read only" when you try.
- #
- # So I won't bother using --lightweight for now.
-
- command = [self.vcexe, 'checkout']
- if self.revision:
- command.append('--revision')
- command.append(str(self.revision))
- command.append(self.repourl)
- command.append(self.srcdir)
-
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- return d
-
- def doVCExport(self):
- tmpdir = os.path.join(self.builder.basedir, "export-temp")
- srcdir = os.path.join(self.builder.basedir, self.srcdir)
- command = [self.vcexe, 'checkout', '--lightweight']
- if self.revision:
- command.append('--revision')
- command.append(str(self.revision))
- command.append(self.repourl)
- command.append(tmpdir)
- c = ShellCommand(self.builder, command, self.builder.basedir,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- d = c.start()
- def _export(res):
- command = [self.vcexe, 'export', srcdir]
- c = ShellCommand(self.builder, command, tmpdir,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- return c.start()
- d.addCallback(_export)
- return d
-
- def get_revision_number(self, out):
- # it feels like 'bzr revno' sometimes gives different results than
- # the 'revno:' line from 'bzr version-info', and the one from
- # version-info is more likely to be correct.
- for line in out.split("\n"):
- colon = line.find(":")
- if colon != -1:
- key, value = line[:colon], line[colon+2:]
- if key == "revno":
- return int(value)
- raise ValueError("unable to find revno: in bzr output: '%s'" % out)
-
- def parseGotRevision(self):
- command = [self.vcexe, "version-info"]
- c = ShellCommand(self.builder, command,
- os.path.join(self.builder.basedir, self.srcdir),
- environ=self.env,
- sendStdout=False, sendStderr=False, sendRC=False,
- keepStdout=True, usePTY=False)
- d = c.start()
- def _parse(res):
- try:
- return self.get_revision_number(c.stdout)
- except ValueError:
- msg =("Bzr.parseGotRevision unable to parse output "
- "of bzr version-info: '%s'" % c.stdout.strip())
- log.msg(msg)
- self.sendStatus({'header': msg + "\n"})
- return None
- d.addCallback(_parse)
- return d
-
-registerSlaveCommand("bzr", Bzr, command_version)
-
-class Mercurial(SourceBase):
- """Mercurial specific VC operation. In addition to the arguments
- handled by SourceBase, this command reads the following keys:
-
- ['repourl'] (required): the Cogito repository string
- """
-
- header = "mercurial operation"
-
- def setup(self, args):
- SourceBase.setup(self, args)
- self.vcexe = getCommand("hg")
- self.repourl = args['repourl']
- self.sourcedata = "%s\n" % self.repourl
- self.stdout = ""
- self.stderr = ""
-
- def sourcedirIsUpdateable(self):
- if os.path.exists(os.path.join(self.builder.basedir,
- self.srcdir, ".buildbot-patched")):
- return False
- # like Darcs, to check out a specific (old) revision, we have to do a
- # full checkout. TODO: I think 'hg pull' plus 'hg update' might work
- if self.revision:
- return False
- return os.path.isdir(os.path.join(self.builder.basedir,
- self.srcdir, ".hg"))
-
- def doVCUpdate(self):
- d = os.path.join(self.builder.basedir, self.srcdir)
- command = [self.vcexe, 'pull', '--verbose', self.repourl]
- c = ShellCommand(self.builder, command, d,
- sendRC=False, timeout=self.timeout,
- keepStdout=True, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._handleEmptyUpdate)
- d.addCallback(self._update)
- return d
-
- def _handleEmptyUpdate(self, res):
- if type(res) is int and res == 1:
- if self.command.stdout.find("no changes found") != -1:
- # 'hg pull', when it doesn't have anything to do, exits with
- # rc=1, and there appears to be no way to shut this off. It
- # emits a distinctive message to stdout, though. So catch
- # this and pretend that it completed successfully.
- return 0
- return res
-
- def doVCFull(self):
- d = os.path.join(self.builder.basedir, self.srcdir)
- command = [self.vcexe, 'init', d]
- c = ShellCommand(self.builder, command, self.builder.basedir,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- cmd1 = c.start()
-
- def _vcupdate(res):
- return self.doVCUpdate()
-
- cmd1.addCallback(_vcupdate)
- return cmd1
-
- def _update(self, res):
- if res != 0:
- return res
-
- # compare current branch to update
- self.update_branch = self.args.get('branch', 'default')
-
- d = os.path.join(self.builder.basedir, self.srcdir)
- parentscmd = [self.vcexe, 'identify', '--num', '--branch']
- cmd = ShellCommand(self.builder, parentscmd, d,
- sendStdout=False, sendStderr=False,
- keepStdout=True, keepStderr=True, usePTY=False)
-
- def _parse(res):
- if res != 0:
- msg = "'hg identify' failed: %s\n%s" % (cmd.stdout, cmd.stderr)
- self.sendStatus({'header': msg + "\n"})
- log.msg(msg)
- return res
-
- log.msg('Output: %s' % cmd.stdout)
-
- match = re.search(r'^(.+) (.+)$', cmd.stdout)
- assert match
-
- rev = match.group(1)
- current_branch = match.group(2)
-
- if rev == '-1':
- msg = "Fresh hg repo, don't worry about branch"
- log.msg(msg)
-
- elif self.update_branch != current_branch:
- msg = "Working dir is on branch '%s' and build needs '%s'. Clobbering." % (current_branch, self.update_branch)
- self.sendStatus({'header': msg + "\n"})
- log.msg(msg)
-
- def _vcfull(res):
- return self.doVCFull()
-
- d = self.doClobber(None, self.srcdir)
- d.addCallback(_vcfull)
- return d
-
- else:
- msg = "Working dir on same branch as build (%s)." % (current_branch)
- log.msg(msg)
-
- return 0
-
- c = cmd.start()
- c.addCallback(_parse)
- c.addCallback(self._update2)
- return c
-
- def _update2(self, res):
- d = os.path.join(self.builder.basedir, self.srcdir)
-
- updatecmd=[self.vcexe, 'update', '--clean', '--repository', d]
- if self.args.get('revision'):
- updatecmd.extend(['--rev', self.args['revision']])
- else:
- updatecmd.extend(['--rev', self.args.get('branch', 'default')])
- self.command = ShellCommand(self.builder, updatecmd,
- self.builder.basedir, sendRC=False,
- timeout=self.timeout, usePTY=False)
- return self.command.start()
-
- def parseGotRevision(self):
- # we use 'hg identify' to find out what we wound up with
- command = [self.vcexe, "identify"]
- c = ShellCommand(self.builder, command,
- os.path.join(self.builder.basedir, self.srcdir),
- environ=self.env,
- sendStdout=False, sendStderr=False, sendRC=False,
- keepStdout=True, usePTY=False)
- d = c.start()
- def _parse(res):
- m = re.search(r'^(\w+)', c.stdout)
- return m.group(1)
- d.addCallback(_parse)
- return d
-
-registerSlaveCommand("hg", Mercurial, command_version)
-
-
-class P4Base(SourceBase):
- """Base class for P4 source-updaters
-
- ['p4port'] (required): host:port for server to access
- ['p4user'] (optional): user to use for access
- ['p4passwd'] (optional): passwd to try for the user
- ['p4client'] (optional): client spec to use
- """
- def setup(self, args):
- SourceBase.setup(self, args)
- self.p4port = args['p4port']
- self.p4client = args['p4client']
- self.p4user = args['p4user']
- self.p4passwd = args['p4passwd']
-
- def parseGotRevision(self):
- # Executes a p4 command that will give us the latest changelist number
- # of any file under the current (or default) client:
- command = ['p4']
- if self.p4port:
- command.extend(['-p', self.p4port])
- if self.p4user:
- command.extend(['-u', self.p4user])
- if self.p4passwd:
- command.extend(['-P', self.p4passwd])
- if self.p4client:
- command.extend(['-c', self.p4client])
- command.extend(['changes', '-m', '1', '#have'])
- c = ShellCommand(self.builder, command, self.builder.basedir,
- environ=self.env, timeout=self.timeout,
- sendStdout=True, sendStderr=False, sendRC=False,
- keepStdout=True, usePTY=False)
- self.command = c
- d = c.start()
-
- def _parse(res):
- # 'p4 -c clien-name change -m 1 "#have"' will produce an output like:
- # "Change 28147 on 2008/04/07 by p4user@hostname..."
- # The number after "Change" is the one we want.
- m = re.match('Change\s+(\d+)\s+', c.stdout)
- if m:
- return m.group(1)
- return None
- d.addCallback(_parse)
- return d
-
-
-class P4(P4Base):
- """A P4 source-updater.
-
- ['p4port'] (required): host:port for server to access
- ['p4user'] (optional): user to use for access
- ['p4passwd'] (optional): passwd to try for the user
- ['p4client'] (optional): client spec to use
- ['p4extra_views'] (optional): additional client views to use
- """
-
- header = "p4"
-
- def setup(self, args):
- P4Base.setup(self, args)
- self.p4base = args['p4base']
- self.p4extra_views = args['p4extra_views']
- self.p4mode = args['mode']
- self.p4branch = args['branch']
-
- self.sourcedata = str([
- # Perforce server.
- self.p4port,
-
- # Client spec.
- self.p4client,
-
- # Depot side of view spec.
- self.p4base,
- self.p4branch,
- self.p4extra_views,
-
- # Local side of view spec (srcdir is made from these).
- self.builder.basedir,
- self.mode,
- self.workdir
- ])
-
-
- def sourcedirIsUpdateable(self):
- if os.path.exists(os.path.join(self.builder.basedir,
- self.srcdir, ".buildbot-patched")):
- return False
- # We assume our client spec is still around.
- # We just say we aren't updateable if the dir doesn't exist so we
- # don't get ENOENT checking the sourcedata.
- return os.path.isdir(os.path.join(self.builder.basedir,
- self.srcdir))
-
- def doVCUpdate(self):
- return self._doP4Sync(force=False)
-
- def _doP4Sync(self, force):
- command = ['p4']
-
- if self.p4port:
- command.extend(['-p', self.p4port])
- if self.p4user:
- command.extend(['-u', self.p4user])
- if self.p4passwd:
- command.extend(['-P', self.p4passwd])
- if self.p4client:
- command.extend(['-c', self.p4client])
- command.extend(['sync'])
- if force:
- command.extend(['-f'])
- if self.revision:
- command.extend(['@' + str(self.revision)])
- env = {}
- c = ShellCommand(self.builder, command, self.builder.basedir,
- environ=env, sendRC=False, timeout=self.timeout,
- keepStdout=True, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- return d
-
-
- def doVCFull(self):
- env = {}
- command = ['p4']
- client_spec = ''
- client_spec += "Client: %s\n\n" % self.p4client
- client_spec += "Owner: %s\n\n" % self.p4user
- client_spec += "Description:\n\tCreated by %s\n\n" % self.p4user
- client_spec += "Root:\t%s\n\n" % self.builder.basedir
- client_spec += "Options:\tallwrite rmdir\n\n"
- client_spec += "LineEnd:\tlocal\n\n"
-
- # Setup a view
- client_spec += "View:\n\t%s" % (self.p4base)
- if self.p4branch:
- client_spec += "%s/" % (self.p4branch)
- client_spec += "... //%s/%s/...\n" % (self.p4client, self.srcdir)
- if self.p4extra_views:
- for k, v in self.p4extra_views:
- client_spec += "\t%s/... //%s/%s%s/...\n" % (k, self.p4client,
- self.srcdir, v)
- if self.p4port:
- command.extend(['-p', self.p4port])
- if self.p4user:
- command.extend(['-u', self.p4user])
- if self.p4passwd:
- command.extend(['-P', self.p4passwd])
- command.extend(['client', '-i'])
- log.msg(client_spec)
- c = ShellCommand(self.builder, command, self.builder.basedir,
- environ=env, sendRC=False, timeout=self.timeout,
- initialStdin=client_spec, usePTY=False)
- self.command = c
- d = c.start()
- d.addCallback(self._abandonOnFailure)
- d.addCallback(lambda _: self._doP4Sync(force=True))
- return d
-
-registerSlaveCommand("p4", P4, command_version)
-
-
-class P4Sync(P4Base):
- """A partial P4 source-updater. Requires manual setup of a per-slave P4
- environment. The only thing which comes from the master is P4PORT.
- 'mode' is required to be 'copy'.
-
- ['p4port'] (required): host:port for server to access
- ['p4user'] (optional): user to use for access
- ['p4passwd'] (optional): passwd to try for the user
- ['p4client'] (optional): client spec to use
- """
-
- header = "p4 sync"
-
- def setup(self, args):
- P4Base.setup(self, args)
- self.vcexe = getCommand("p4")
-
- def sourcedirIsUpdateable(self):
- return True
-
- def _doVC(self, force):
- d = os.path.join(self.builder.basedir, self.srcdir)
- command = [self.vcexe]
- if self.p4port:
- command.extend(['-p', self.p4port])
- if self.p4user:
- command.extend(['-u', self.p4user])
- if self.p4passwd:
- command.extend(['-P', self.p4passwd])
- if self.p4client:
- command.extend(['-c', self.p4client])
- command.extend(['sync'])
- if force:
- command.extend(['-f'])
- if self.revision:
- command.extend(['@' + self.revision])
- env = {}
- c = ShellCommand(self.builder, command, d, environ=env,
- sendRC=False, timeout=self.timeout, usePTY=False)
- self.command = c
- return c.start()
-
- def doVCUpdate(self):
- return self._doVC(force=False)
-
- def doVCFull(self):
- return self._doVC(force=True)
-
-registerSlaveCommand("p4sync", P4Sync, command_version)
diff --git a/buildbot/buildbot/slave/interfaces.py b/buildbot/buildbot/slave/interfaces.py
deleted file mode 100644
index fb143a7..0000000
--- a/buildbot/buildbot/slave/interfaces.py
+++ /dev/null
@@ -1,56 +0,0 @@
-
-from zope.interface import Interface
-
-class ISlaveCommand(Interface):
- """This interface is implemented by all of the buildslave's Command
- subclasses. It specifies how the buildslave can start, interrupt, and
- query the various Commands running on behalf of the buildmaster."""
-
- def __init__(builder, stepId, args):
- """Create the Command. 'builder' is a reference to the parent
- buildbot.bot.SlaveBuilder instance, which will be used to send status
- updates (by calling builder.sendStatus). 'stepId' is a random string
- which helps correlate slave logs with the master. 'args' is a dict of
- arguments that comes from the master-side BuildStep, with contents
- that are specific to the individual Command subclass.
-
- This method is not intended to be subclassed."""
-
- def setup(args):
- """This method is provided for subclasses to override, to extract
- parameters from the 'args' dictionary. The default implemention does
- nothing. It will be called from __init__"""
-
- def start():
- """Begin the command, and return a Deferred.
-
- While the command runs, it should send status updates to the
- master-side BuildStep by calling self.sendStatus(status). The
- 'status' argument is typically a dict with keys like 'stdout',
- 'stderr', and 'rc'.
-
- When the step completes, it should fire the Deferred (the results are
- not used). If an exception occurs during execution, it may also
- errback the deferred, however any reasonable errors should be trapped
- and indicated with a non-zero 'rc' status rather than raising an
- exception. Exceptions should indicate problems within the buildbot
- itself, not problems in the project being tested.
-
- """
-
- def interrupt():
- """This is called to tell the Command that the build is being stopped
- and therefore the command should be terminated as quickly as
- possible. The command may continue to send status updates, up to and
- including an 'rc' end-of-command update (which should indicate an
- error condition). The Command's deferred should still be fired when
- the command has finally completed.
-
- If the build is being stopped because the slave it shutting down or
- because the connection to the buildmaster has been lost, the status
- updates will simply be discarded. The Command does not need to be
- aware of this.
-
- Child shell processes should be killed. Simple ShellCommand classes
- can just insert a header line indicating that the process will be
- killed, then os.kill() the child."""
diff --git a/buildbot/buildbot/slave/registry.py b/buildbot/buildbot/slave/registry.py
deleted file mode 100644
index 772aad3..0000000
--- a/buildbot/buildbot/slave/registry.py
+++ /dev/null
@@ -1,17 +0,0 @@
-
-commandRegistry = {}
-
-def registerSlaveCommand(name, factory, version):
- """
- Register a slave command with the registry, making it available in slaves.
-
- @type name: string
- @param name: name under which the slave command will be registered; used
- for L{buildbot.slave.bot.SlaveBuilder.remote_startCommand}
-
- @type factory: L{buildbot.slave.commands.Command}
- @type version: string
- @param version: version string of the factory code
- """
- assert not commandRegistry.has_key(name)
- commandRegistry[name] = (factory, version)
diff --git a/buildbot/buildbot/sourcestamp.py b/buildbot/buildbot/sourcestamp.py
deleted file mode 100644
index e2162ca..0000000
--- a/buildbot/buildbot/sourcestamp.py
+++ /dev/null
@@ -1,95 +0,0 @@
-
-from zope.interface import implements
-from buildbot import util, interfaces
-
-class SourceStamp(util.ComparableMixin):
- """This is a tuple of (branch, revision, patchspec, changes).
-
- C{branch} is always valid, although it may be None to let the Source
- step use its default branch. There are three possibilities for the
- remaining elements:
- - (revision=REV, patchspec=None, changes=None): build REV. If REV is
- None, build the HEAD revision from the given branch.
- - (revision=REV, patchspec=(LEVEL, DIFF), changes=None): checkout REV,
- then apply a patch to the source, with C{patch -pPATCHLEVEL <DIFF}.
- If REV is None, checkout HEAD and patch it.
- - (revision=None, patchspec=None, changes=[CHANGES]): let the Source
- step check out the latest revision indicated by the given Changes.
- CHANGES is a tuple of L{buildbot.changes.changes.Change} instances,
- and all must be on the same branch.
- """
-
- # all four of these are publically visible attributes
- branch = None
- revision = None
- patch = None
- changes = ()
-
- compare_attrs = ('branch', 'revision', 'patch', 'changes')
-
- implements(interfaces.ISourceStamp)
-
- def __init__(self, branch=None, revision=None, patch=None,
- changes=None):
- self.branch = branch
- self.revision = revision
- self.patch = patch
- if changes:
- self.changes = tuple(changes)
- self.branch = changes[0].branch
-
- def canBeMergedWith(self, other):
- if other.branch != self.branch:
- return False # the builds are completely unrelated
-
- if self.changes and other.changes:
- # TODO: consider not merging these. It's a tradeoff between
- # minimizing the number of builds and obtaining finer-grained
- # results.
- return True
- elif self.changes and not other.changes:
- return False # we're using changes, they aren't
- elif not self.changes and other.changes:
- return False # they're using changes, we aren't
-
- if self.patch or other.patch:
- return False # you can't merge patched builds with anything
- if self.revision == other.revision:
- # both builds are using the same specific revision, so they can
- # be merged. It might be the case that revision==None, so they're
- # both building HEAD.
- return True
-
- return False
-
- def mergeWith(self, others):
- """Generate a SourceStamp for the merger of me and all the other
- BuildRequests. This is called by a Build when it starts, to figure
- out what its sourceStamp should be."""
-
- # either we're all building the same thing (changes==None), or we're
- # all building changes (which can be merged)
- changes = []
- changes.extend(self.changes)
- for req in others:
- assert self.canBeMergedWith(req) # should have been checked already
- changes.extend(req.changes)
- newsource = SourceStamp(branch=self.branch,
- revision=self.revision,
- patch=self.patch,
- changes=changes)
- return newsource
-
- def getAbsoluteSourceStamp(self, got_revision):
- return SourceStamp(branch=self.branch, revision=got_revision, patch=self.patch)
-
- def getText(self):
- # TODO: this won't work for VC's with huge 'revision' strings
- if self.revision is None:
- return [ "latest" ]
- text = [ str(self.revision) ]
- if self.branch:
- text.append("in '%s'" % self.branch)
- if self.patch:
- text.append("[patch]")
- return text
diff --git a/buildbot/buildbot/status/__init__.py b/buildbot/buildbot/status/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/buildbot/buildbot/status/__init__.py
+++ /dev/null
diff --git a/buildbot/buildbot/status/base.py b/buildbot/buildbot/status/base.py
deleted file mode 100644
index 7588198..0000000
--- a/buildbot/buildbot/status/base.py
+++ /dev/null
@@ -1,69 +0,0 @@
-
-from zope.interface import implements
-from twisted.application import service
-
-from buildbot.interfaces import IStatusReceiver
-from buildbot import util, pbutil
-
-class StatusReceiver:
- implements(IStatusReceiver)
-
- def requestSubmitted(self, request):
- pass
-
- def buildsetSubmitted(self, buildset):
- pass
-
- def builderAdded(self, builderName, builder):
- pass
-
- def builderChangedState(self, builderName, state):
- pass
-
- def buildStarted(self, builderName, build):
- pass
-
- def buildETAUpdate(self, build, ETA):
- pass
-
- def stepStarted(self, build, step):
- pass
-
- def stepTextChanged(self, build, step, text):
- pass
-
- def stepText2Changed(self, build, step, text2):
- pass
-
- def stepETAUpdate(self, build, step, ETA, expectations):
- pass
-
- def logStarted(self, build, step, log):
- pass
-
- def logChunk(self, build, step, log, channel, text):
- pass
-
- def logFinished(self, build, step, log):
- pass
-
- def stepFinished(self, build, step, results):
- pass
-
- def buildFinished(self, builderName, build, results):
- pass
-
- def builderRemoved(self, builderName):
- pass
-
-class StatusReceiverMultiService(StatusReceiver, service.MultiService,
- util.ComparableMixin):
- implements(IStatusReceiver)
-
- def __init__(self):
- service.MultiService.__init__(self)
-
-
-class StatusReceiverPerspective(StatusReceiver, pbutil.NewCredPerspective):
- implements(IStatusReceiver)
-
diff --git a/buildbot/buildbot/status/builder.py b/buildbot/buildbot/status/builder.py
deleted file mode 100644
index 97f356f..0000000
--- a/buildbot/buildbot/status/builder.py
+++ /dev/null
@@ -1,2182 +0,0 @@
-# -*- test-case-name: buildbot.test.test_status -*-
-
-from zope.interface import implements
-from twisted.python import log
-from twisted.persisted import styles
-from twisted.internet import reactor, defer, threads
-from twisted.protocols import basic
-from buildbot.process.properties import Properties
-
-import os, shutil, sys, re, urllib, itertools
-from cPickle import load, dump
-from cStringIO import StringIO
-from bz2 import BZ2File
-
-# sibling imports
-from buildbot import interfaces, util, sourcestamp
-
-SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION = range(5)
-Results = ["success", "warnings", "failure", "skipped", "exception"]
-
-
-# build processes call the following methods:
-#
-# setDefaults
-#
-# currentlyBuilding
-# currentlyIdle
-# currentlyInterlocked
-# currentlyOffline
-# currentlyWaiting
-#
-# setCurrentActivity
-# updateCurrentActivity
-# addFileToCurrentActivity
-# finishCurrentActivity
-#
-# startBuild
-# finishBuild
-
-STDOUT = interfaces.LOG_CHANNEL_STDOUT
-STDERR = interfaces.LOG_CHANNEL_STDERR
-HEADER = interfaces.LOG_CHANNEL_HEADER
-ChunkTypes = ["stdout", "stderr", "header"]
-
-class LogFileScanner(basic.NetstringReceiver):
- def __init__(self, chunk_cb, channels=[]):
- self.chunk_cb = chunk_cb
- self.channels = channels
-
- def stringReceived(self, line):
- channel = int(line[0])
- if not self.channels or (channel in self.channels):
- self.chunk_cb((channel, line[1:]))
-
-class LogFileProducer:
- """What's the plan?
-
- the LogFile has just one FD, used for both reading and writing.
- Each time you add an entry, fd.seek to the end and then write.
-
- Each reader (i.e. Producer) keeps track of their own offset. The reader
- starts by seeking to the start of the logfile, and reading forwards.
- Between each hunk of file they yield chunks, so they must remember their
- offset before yielding and re-seek back to that offset before reading
- more data. When their read() returns EOF, they're finished with the first
- phase of the reading (everything that's already been written to disk).
-
- After EOF, the remaining data is entirely in the current entries list.
- These entries are all of the same channel, so we can do one "".join and
- obtain a single chunk to be sent to the listener. But since that involves
- a yield, and more data might arrive after we give up control, we have to
- subscribe them before yielding. We can't subscribe them any earlier,
- otherwise they'd get data out of order.
-
- We're using a generator in the first place so that the listener can
- throttle us, which means they're pulling. But the subscription means
- we're pushing. Really we're a Producer. In the first phase we can be
- either a PullProducer or a PushProducer. In the second phase we're only a
- PushProducer.
-
- So the client gives a LogFileConsumer to File.subscribeConsumer . This
- Consumer must have registerProducer(), unregisterProducer(), and
- writeChunk(), and is just like a regular twisted.interfaces.IConsumer,
- except that writeChunk() takes chunks (tuples of (channel,text)) instead
- of the normal write() which takes just text. The LogFileConsumer is
- allowed to call stopProducing, pauseProducing, and resumeProducing on the
- producer instance it is given. """
-
- paused = False
- subscribed = False
- BUFFERSIZE = 2048
-
- def __init__(self, logfile, consumer):
- self.logfile = logfile
- self.consumer = consumer
- self.chunkGenerator = self.getChunks()
- consumer.registerProducer(self, True)
-
- def getChunks(self):
- f = self.logfile.getFile()
- offset = 0
- chunks = []
- p = LogFileScanner(chunks.append)
- f.seek(offset)
- data = f.read(self.BUFFERSIZE)
- offset = f.tell()
- while data:
- p.dataReceived(data)
- while chunks:
- c = chunks.pop(0)
- yield c
- f.seek(offset)
- data = f.read(self.BUFFERSIZE)
- offset = f.tell()
- del f
-
- # now subscribe them to receive new entries
- self.subscribed = True
- self.logfile.watchers.append(self)
- d = self.logfile.waitUntilFinished()
-
- # then give them the not-yet-merged data
- if self.logfile.runEntries:
- channel = self.logfile.runEntries[0][0]
- text = "".join([c[1] for c in self.logfile.runEntries])
- yield (channel, text)
-
- # now we've caught up to the present. Anything further will come from
- # the logfile subscription. We add the callback *after* yielding the
- # data from runEntries, because the logfile might have finished
- # during the yield.
- d.addCallback(self.logfileFinished)
-
- def stopProducing(self):
- # TODO: should we still call consumer.finish? probably not.
- self.paused = True
- self.consumer = None
- self.done()
-
- def done(self):
- if self.chunkGenerator:
- self.chunkGenerator = None # stop making chunks
- if self.subscribed:
- self.logfile.watchers.remove(self)
- self.subscribed = False
-
- def pauseProducing(self):
- self.paused = True
-
- def resumeProducing(self):
- # Twisted-1.3.0 has a bug which causes hangs when resumeProducing
- # calls transport.write (there is a recursive loop, fixed in 2.0 in
- # t.i.abstract.FileDescriptor.doWrite by setting the producerPaused
- # flag *before* calling resumeProducing). To work around this, we
- # just put off the real resumeProducing for a moment. This probably
- # has a performance hit, but I'm going to assume that the log files
- # are not retrieved frequently enough for it to be an issue.
-
- reactor.callLater(0, self._resumeProducing)
-
- def _resumeProducing(self):
- self.paused = False
- if not self.chunkGenerator:
- return
- try:
- while not self.paused:
- chunk = self.chunkGenerator.next()
- self.consumer.writeChunk(chunk)
- # we exit this when the consumer says to stop, or we run out
- # of chunks
- except StopIteration:
- # if the generator finished, it will have done releaseFile
- self.chunkGenerator = None
- # now everything goes through the subscription, and they don't get to
- # pause anymore
-
- def logChunk(self, build, step, logfile, channel, chunk):
- if self.consumer:
- self.consumer.writeChunk((channel, chunk))
-
- def logfileFinished(self, logfile):
- self.done()
- if self.consumer:
- self.consumer.unregisterProducer()
- self.consumer.finish()
- self.consumer = None
-
-def _tryremove(filename, timeout, retries):
- """Try to remove a file, and if failed, try again in timeout.
- Increases the timeout by a factor of 4, and only keeps trying for
- another retries-amount of times.
-
- """
- try:
- os.unlink(filename)
- except OSError:
- if retries > 0:
- reactor.callLater(timeout, _tryremove, filename, timeout * 4,
- retries - 1)
- else:
- log.msg("giving up on removing %s after over %d seconds" %
- (filename, timeout))
-
-class LogFile:
- """A LogFile keeps all of its contents on disk, in a non-pickle format to
- which new entries can easily be appended. The file on disk has a name
- like 12-log-compile-output, under the Builder's directory. The actual
- filename is generated (before the LogFile is created) by
- L{BuildStatus.generateLogfileName}.
-
- Old LogFile pickles (which kept their contents in .entries) must be
- upgraded. The L{BuilderStatus} is responsible for doing this, when it
- loads the L{BuildStatus} into memory. The Build pickle is not modified,
- so users who go from 0.6.5 back to 0.6.4 don't have to lose their
- logs."""
-
- implements(interfaces.IStatusLog, interfaces.ILogFile)
-
- finished = False
- length = 0
- chunkSize = 10*1000
- runLength = 0
- runEntries = [] # provided so old pickled builds will getChunks() ok
- entries = None
- BUFFERSIZE = 2048
- filename = None # relative to the Builder's basedir
- openfile = None
-
- def __init__(self, parent, name, logfilename):
- """
- @type parent: L{BuildStepStatus}
- @param parent: the Step that this log is a part of
- @type name: string
- @param name: the name of this log, typically 'output'
- @type logfilename: string
- @param logfilename: the Builder-relative pathname for the saved entries
- """
- self.step = parent
- self.name = name
- self.filename = logfilename
- fn = self.getFilename()
- if os.path.exists(fn):
- # the buildmaster was probably stopped abruptly, before the
- # BuilderStatus could be saved, so BuilderStatus.nextBuildNumber
- # is out of date, and we're overlapping with earlier builds now.
- # Warn about it, but then overwrite the old pickle file
- log.msg("Warning: Overwriting old serialized Build at %s" % fn)
- self.openfile = open(fn, "w+")
- self.runEntries = []
- self.watchers = []
- self.finishedWatchers = []
-
- def getFilename(self):
- return os.path.join(self.step.build.builder.basedir, self.filename)
-
- def hasContents(self):
- return os.path.exists(self.getFilename() + '.bz2') or \
- os.path.exists(self.getFilename())
-
- def getName(self):
- return self.name
-
- def getStep(self):
- return self.step
-
- def isFinished(self):
- return self.finished
- def waitUntilFinished(self):
- if self.finished:
- d = defer.succeed(self)
- else:
- d = defer.Deferred()
- self.finishedWatchers.append(d)
- return d
-
- def getFile(self):
- if self.openfile:
- # this is the filehandle we're using to write to the log, so
- # don't close it!
- return self.openfile
- # otherwise they get their own read-only handle
- # try a compressed log first
- try:
- return BZ2File(self.getFilename() + ".bz2", "r")
- except IOError:
- pass
- return open(self.getFilename(), "r")
-
- def getText(self):
- # this produces one ginormous string
- return "".join(self.getChunks([STDOUT, STDERR], onlyText=True))
-
- def getTextWithHeaders(self):
- return "".join(self.getChunks(onlyText=True))
-
- def getChunks(self, channels=[], onlyText=False):
- # generate chunks for everything that was logged at the time we were
- # first called, so remember how long the file was when we started.
- # Don't read beyond that point. The current contents of
- # self.runEntries will follow.
-
- # this returns an iterator, which means arbitrary things could happen
- # while we're yielding. This will faithfully deliver the log as it
- # existed when it was started, and not return anything after that
- # point. To use this in subscribe(catchup=True) without missing any
- # data, you must insure that nothing will be added to the log during
- # yield() calls.
-
- f = self.getFile()
- offset = 0
- f.seek(0, 2)
- remaining = f.tell()
-
- leftover = None
- if self.runEntries and (not channels or
- (self.runEntries[0][0] in channels)):
- leftover = (self.runEntries[0][0],
- "".join([c[1] for c in self.runEntries]))
-
- # freeze the state of the LogFile by passing a lot of parameters into
- # a generator
- return self._generateChunks(f, offset, remaining, leftover,
- channels, onlyText)
-
- def _generateChunks(self, f, offset, remaining, leftover,
- channels, onlyText):
- chunks = []
- p = LogFileScanner(chunks.append, channels)
- f.seek(offset)
- data = f.read(min(remaining, self.BUFFERSIZE))
- remaining -= len(data)
- offset = f.tell()
- while data:
- p.dataReceived(data)
- while chunks:
- channel, text = chunks.pop(0)
- if onlyText:
- yield text
- else:
- yield (channel, text)
- f.seek(offset)
- data = f.read(min(remaining, self.BUFFERSIZE))
- remaining -= len(data)
- offset = f.tell()
- del f
-
- if leftover:
- if onlyText:
- yield leftover[1]
- else:
- yield leftover
-
- def readlines(self, channel=STDOUT):
- """Return an iterator that produces newline-terminated lines,
- excluding header chunks."""
- # TODO: make this memory-efficient, by turning it into a generator
- # that retrieves chunks as necessary, like a pull-driven version of
- # twisted.protocols.basic.LineReceiver
- alltext = "".join(self.getChunks([channel], onlyText=True))
- io = StringIO(alltext)
- return io.readlines()
-
- def subscribe(self, receiver, catchup):
- if self.finished:
- return
- self.watchers.append(receiver)
- if catchup:
- for channel, text in self.getChunks():
- # TODO: add logChunks(), to send over everything at once?
- receiver.logChunk(self.step.build, self.step, self,
- channel, text)
-
- def unsubscribe(self, receiver):
- if receiver in self.watchers:
- self.watchers.remove(receiver)
-
- def subscribeConsumer(self, consumer):
- p = LogFileProducer(self, consumer)
- p.resumeProducing()
-
- # interface used by the build steps to add things to the log
-
- def merge(self):
- # merge all .runEntries (which are all of the same type) into a
- # single chunk for .entries
- if not self.runEntries:
- return
- channel = self.runEntries[0][0]
- text = "".join([c[1] for c in self.runEntries])
- assert channel < 10
- f = self.openfile
- f.seek(0, 2)
- offset = 0
- while offset < len(text):
- size = min(len(text)-offset, self.chunkSize)
- f.write("%d:%d" % (1 + size, channel))
- f.write(text[offset:offset+size])
- f.write(",")
- offset += size
- self.runEntries = []
- self.runLength = 0
-
- def addEntry(self, channel, text):
- assert not self.finished
- # we only add to .runEntries here. merge() is responsible for adding
- # merged chunks to .entries
- if self.runEntries and channel != self.runEntries[0][0]:
- self.merge()
- self.runEntries.append((channel, text))
- self.runLength += len(text)
- if self.runLength >= self.chunkSize:
- self.merge()
-
- for w in self.watchers:
- w.logChunk(self.step.build, self.step, self, channel, text)
- self.length += len(text)
-
- def addStdout(self, text):
- self.addEntry(STDOUT, text)
- def addStderr(self, text):
- self.addEntry(STDERR, text)
- def addHeader(self, text):
- self.addEntry(HEADER, text)
-
- def finish(self):
- self.merge()
- if self.openfile:
- # we don't do an explicit close, because there might be readers
- # shareing the filehandle. As soon as they stop reading, the
- # filehandle will be released and automatically closed. We will
- # do a sync, however, to make sure the log gets saved in case of
- # a crash.
- self.openfile.flush()
- os.fsync(self.openfile.fileno())
- del self.openfile
- self.finished = True
- watchers = self.finishedWatchers
- self.finishedWatchers = []
- for w in watchers:
- w.callback(self)
- self.watchers = []
-
-
- def compressLog(self):
- compressed = self.getFilename() + ".bz2.tmp"
- d = threads.deferToThread(self._compressLog, compressed)
- d.addCallback(self._renameCompressedLog, compressed)
- d.addErrback(self._cleanupFailedCompress, compressed)
- return d
-
- def _compressLog(self, compressed):
- infile = self.getFile()
- cf = BZ2File(compressed, 'w')
- bufsize = 1024*1024
- while True:
- buf = infile.read(bufsize)
- cf.write(buf)
- if len(buf) < bufsize:
- break
- cf.close()
- def _renameCompressedLog(self, rv, compressed):
- filename = self.getFilename() + '.bz2'
- if sys.platform == 'win32':
- # windows cannot rename a file on top of an existing one, so
- # fall back to delete-first. There are ways this can fail and
- # lose the builder's history, so we avoid using it in the
- # general (non-windows) case
- if os.path.exists(filename):
- os.unlink(filename)
- os.rename(compressed, filename)
- _tryremove(self.getFilename(), 1, 5)
- def _cleanupFailedCompress(self, failure, compressed):
- log.msg("failed to compress %s" % self.getFilename())
- if os.path.exists(compressed):
- _tryremove(compressed, 1, 5)
- failure.trap() # reraise the failure
-
- # persistence stuff
- def __getstate__(self):
- d = self.__dict__.copy()
- del d['step'] # filled in upon unpickling
- del d['watchers']
- del d['finishedWatchers']
- d['entries'] = [] # let 0.6.4 tolerate the saved log. TODO: really?
- if d.has_key('finished'):
- del d['finished']
- if d.has_key('openfile'):
- del d['openfile']
- return d
-
- def __setstate__(self, d):
- self.__dict__ = d
- self.watchers = [] # probably not necessary
- self.finishedWatchers = [] # same
- # self.step must be filled in by our parent
- self.finished = True
-
- def upgrade(self, logfilename):
- """Save our .entries to a new-style offline log file (if necessary),
- and modify our in-memory representation to use it. The original
- pickled LogFile (inside the pickled Build) won't be modified."""
- self.filename = logfilename
- if not os.path.exists(self.getFilename()):
- self.openfile = open(self.getFilename(), "w")
- self.finished = False
- for channel,text in self.entries:
- self.addEntry(channel, text)
- self.finish() # releases self.openfile, which will be closed
- del self.entries
-
-class HTMLLogFile:
- implements(interfaces.IStatusLog)
-
- filename = None
-
- def __init__(self, parent, name, logfilename, html):
- self.step = parent
- self.name = name
- self.filename = logfilename
- self.html = html
-
- def getName(self):
- return self.name # set in BuildStepStatus.addLog
- def getStep(self):
- return self.step
-
- def isFinished(self):
- return True
- def waitUntilFinished(self):
- return defer.succeed(self)
-
- def hasContents(self):
- return True
- def getText(self):
- return self.html # looks kinda like text
- def getTextWithHeaders(self):
- return self.html
- def getChunks(self):
- return [(STDERR, self.html)]
-
- def subscribe(self, receiver, catchup):
- pass
- def unsubscribe(self, receiver):
- pass
-
- def finish(self):
- pass
-
- def __getstate__(self):
- d = self.__dict__.copy()
- del d['step']
- return d
-
- def upgrade(self, logfilename):
- pass
-
-
-class Event:
- implements(interfaces.IStatusEvent)
-
- started = None
- finished = None
- text = []
-
- # IStatusEvent methods
- def getTimes(self):
- return (self.started, self.finished)
- def getText(self):
- return self.text
- def getLogs(self):
- return []
-
- def finish(self):
- self.finished = util.now()
-
-class TestResult:
- implements(interfaces.ITestResult)
-
- def __init__(self, name, results, text, logs):
- assert isinstance(name, tuple)
- self.name = name
- self.results = results
- self.text = text
- self.logs = logs
-
- def getName(self):
- return self.name
-
- def getResults(self):
- return self.results
-
- def getText(self):
- return self.text
-
- def getLogs(self):
- return self.logs
-
-
-class BuildSetStatus:
- implements(interfaces.IBuildSetStatus)
-
- def __init__(self, source, reason, builderNames, bsid=None):
- self.source = source
- self.reason = reason
- self.builderNames = builderNames
- self.id = bsid
- self.successWatchers = []
- self.finishedWatchers = []
- self.stillHopeful = True
- self.finished = False
-
- def setBuildRequestStatuses(self, buildRequestStatuses):
- self.buildRequests = buildRequestStatuses
- def setResults(self, results):
- # the build set succeeds only if all its component builds succeed
- self.results = results
- def giveUpHope(self):
- self.stillHopeful = False
-
-
- def notifySuccessWatchers(self):
- for d in self.successWatchers:
- d.callback(self)
- self.successWatchers = []
-
- def notifyFinishedWatchers(self):
- self.finished = True
- for d in self.finishedWatchers:
- d.callback(self)
- self.finishedWatchers = []
-
- # methods for our clients
-
- def getSourceStamp(self):
- return self.source
- def getReason(self):
- return self.reason
- def getResults(self):
- return self.results
- def getID(self):
- return self.id
-
- def getBuilderNames(self):
- return self.builderNames
- def getBuildRequests(self):
- return self.buildRequests
- def isFinished(self):
- return self.finished
-
- def waitUntilSuccess(self):
- if self.finished or not self.stillHopeful:
- # the deferreds have already fired
- return defer.succeed(self)
- d = defer.Deferred()
- self.successWatchers.append(d)
- return d
-
- def waitUntilFinished(self):
- if self.finished:
- return defer.succeed(self)
- d = defer.Deferred()
- self.finishedWatchers.append(d)
- return d
-
-class BuildRequestStatus:
- implements(interfaces.IBuildRequestStatus)
-
- def __init__(self, source, builderName):
- self.source = source
- self.builderName = builderName
- self.builds = [] # list of BuildStatus objects
- self.observers = []
- self.submittedAt = None
-
- def buildStarted(self, build):
- self.builds.append(build)
- for o in self.observers[:]:
- o(build)
-
- # methods called by our clients
- def getSourceStamp(self):
- return self.source
- def getBuilderName(self):
- return self.builderName
- def getBuilds(self):
- return self.builds
-
- def subscribe(self, observer):
- self.observers.append(observer)
- for b in self.builds:
- observer(b)
- def unsubscribe(self, observer):
- self.observers.remove(observer)
-
- def getSubmitTime(self):
- return self.submittedAt
- def setSubmitTime(self, t):
- self.submittedAt = t
-
-
-class BuildStepStatus(styles.Versioned):
- """
- I represent a collection of output status for a
- L{buildbot.process.step.BuildStep}.
-
- Statistics contain any information gleaned from a step that is
- not in the form of a logfile. As an example, steps that run
- tests might gather statistics about the number of passed, failed,
- or skipped tests.
-
- @type progress: L{buildbot.status.progress.StepProgress}
- @cvar progress: tracks ETA for the step
- @type text: list of strings
- @cvar text: list of short texts that describe the command and its status
- @type text2: list of strings
- @cvar text2: list of short texts added to the overall build description
- @type logs: dict of string -> L{buildbot.status.builder.LogFile}
- @ivar logs: logs of steps
- @type statistics: dict
- @ivar statistics: results from running this step
- """
- # note that these are created when the Build is set up, before each
- # corresponding BuildStep has started.
- implements(interfaces.IBuildStepStatus, interfaces.IStatusEvent)
- persistenceVersion = 2
-
- started = None
- finished = None
- progress = None
- text = []
- results = (None, [])
- text2 = []
- watchers = []
- updates = {}
- finishedWatchers = []
- statistics = {}
-
- def __init__(self, parent):
- assert interfaces.IBuildStatus(parent)
- self.build = parent
- self.logs = []
- self.urls = {}
- self.watchers = []
- self.updates = {}
- self.finishedWatchers = []
- self.statistics = {}
-
- def getName(self):
- """Returns a short string with the name of this step. This string
- may have spaces in it."""
- return self.name
-
- def getBuild(self):
- return self.build
-
- def getTimes(self):
- return (self.started, self.finished)
-
- def getExpectations(self):
- """Returns a list of tuples (name, current, target)."""
- if not self.progress:
- return []
- ret = []
- metrics = self.progress.progress.keys()
- metrics.sort()
- for m in metrics:
- t = (m, self.progress.progress[m], self.progress.expectations[m])
- ret.append(t)
- return ret
-
- def getLogs(self):
- return self.logs
-
- def getURLs(self):
- return self.urls.copy()
-
- def isFinished(self):
- return (self.finished is not None)
-
- def waitUntilFinished(self):
- if self.finished:
- d = defer.succeed(self)
- else:
- d = defer.Deferred()
- self.finishedWatchers.append(d)
- return d
-
- # while the step is running, the following methods make sense.
- # Afterwards they return None
-
- def getETA(self):
- if self.started is None:
- return None # not started yet
- if self.finished is not None:
- return None # already finished
- if not self.progress:
- return None # no way to predict
- return self.progress.remaining()
-
- # Once you know the step has finished, the following methods are legal.
- # Before this step has finished, they all return None.
-
- def getText(self):
- """Returns a list of strings which describe the step. These are
- intended to be displayed in a narrow column. If more space is
- available, the caller should join them together with spaces before
- presenting them to the user."""
- return self.text
-
- def getResults(self):
- """Return a tuple describing the results of the step.
- 'result' is one of the constants in L{buildbot.status.builder}:
- SUCCESS, WARNINGS, FAILURE, or SKIPPED.
- 'strings' is an optional list of strings that the step wants to
- append to the overall build's results. These strings are usually
- more terse than the ones returned by getText(): in particular,
- successful Steps do not usually contribute any text to the
- overall build.
-
- @rtype: tuple of int, list of strings
- @returns: (result, strings)
- """
- return (self.results, self.text2)
-
- def hasStatistic(self, name):
- """Return true if this step has a value for the given statistic.
- """
- return self.statistics.has_key(name)
-
- def getStatistic(self, name, default=None):
- """Return the given statistic, if present
- """
- return self.statistics.get(name, default)
-
- # subscription interface
-
- def subscribe(self, receiver, updateInterval=10):
- # will get logStarted, logFinished, stepETAUpdate
- assert receiver not in self.watchers
- self.watchers.append(receiver)
- self.sendETAUpdate(receiver, updateInterval)
-
- def sendETAUpdate(self, receiver, updateInterval):
- self.updates[receiver] = None
- # they might unsubscribe during stepETAUpdate
- receiver.stepETAUpdate(self.build, self,
- self.getETA(), self.getExpectations())
- if receiver in self.watchers:
- self.updates[receiver] = reactor.callLater(updateInterval,
- self.sendETAUpdate,
- receiver,
- updateInterval)
-
- def unsubscribe(self, receiver):
- if receiver in self.watchers:
- self.watchers.remove(receiver)
- if receiver in self.updates:
- if self.updates[receiver] is not None:
- self.updates[receiver].cancel()
- del self.updates[receiver]
-
-
- # methods to be invoked by the BuildStep
-
- def setName(self, stepname):
- self.name = stepname
-
- def setColor(self, color):
- log.msg("BuildStepStatus.setColor is no longer supported -- ignoring color %s" % (color,))
-
- def setProgress(self, stepprogress):
- self.progress = stepprogress
-
- def stepStarted(self):
- self.started = util.now()
- if self.build:
- self.build.stepStarted(self)
-
- def addLog(self, name):
- assert self.started # addLog before stepStarted won't notify watchers
- logfilename = self.build.generateLogfileName(self.name, name)
- log = LogFile(self, name, logfilename)
- self.logs.append(log)
- for w in self.watchers:
- receiver = w.logStarted(self.build, self, log)
- if receiver:
- log.subscribe(receiver, True)
- d = log.waitUntilFinished()
- d.addCallback(lambda log: log.unsubscribe(receiver))
- d = log.waitUntilFinished()
- d.addCallback(self.logFinished)
- return log
-
- def addHTMLLog(self, name, html):
- assert self.started # addLog before stepStarted won't notify watchers
- logfilename = self.build.generateLogfileName(self.name, name)
- log = HTMLLogFile(self, name, logfilename, html)
- self.logs.append(log)
- for w in self.watchers:
- receiver = w.logStarted(self.build, self, log)
- # TODO: think about this: there isn't much point in letting
- # them subscribe
- #if receiver:
- # log.subscribe(receiver, True)
- w.logFinished(self.build, self, log)
-
- def logFinished(self, log):
- for w in self.watchers:
- w.logFinished(self.build, self, log)
-
- def addURL(self, name, url):
- self.urls[name] = url
-
- def setText(self, text):
- self.text = text
- for w in self.watchers:
- w.stepTextChanged(self.build, self, text)
- def setText2(self, text):
- self.text2 = text
- for w in self.watchers:
- w.stepText2Changed(self.build, self, text)
-
- def setStatistic(self, name, value):
- """Set the given statistic. Usually called by subclasses.
- """
- self.statistics[name] = value
-
- def stepFinished(self, results):
- self.finished = util.now()
- self.results = results
- cld = [] # deferreds for log compression
- logCompressionLimit = self.build.builder.logCompressionLimit
- for loog in self.logs:
- if not loog.isFinished():
- loog.finish()
- # if log compression is on, and it's a real LogFile,
- # HTMLLogFiles aren't files
- if logCompressionLimit is not False and \
- isinstance(loog, LogFile):
- if os.path.getsize(loog.getFilename()) > logCompressionLimit:
- cld.append(loog.compressLog())
-
- for r in self.updates.keys():
- if self.updates[r] is not None:
- self.updates[r].cancel()
- del self.updates[r]
-
- watchers = self.finishedWatchers
- self.finishedWatchers = []
- for w in watchers:
- w.callback(self)
- if cld:
- return defer.DeferredList(cld)
-
- # persistence
-
- def __getstate__(self):
- d = styles.Versioned.__getstate__(self)
- del d['build'] # filled in when loading
- if d.has_key('progress'):
- del d['progress']
- del d['watchers']
- del d['finishedWatchers']
- del d['updates']
- return d
-
- def __setstate__(self, d):
- styles.Versioned.__setstate__(self, d)
- # self.build must be filled in by our parent
- for loog in self.logs:
- loog.step = self
-
- def upgradeToVersion1(self):
- if not hasattr(self, "urls"):
- self.urls = {}
-
- def upgradeToVersion2(self):
- if not hasattr(self, "statistics"):
- self.statistics = {}
-
-
-class BuildStatus(styles.Versioned):
- implements(interfaces.IBuildStatus, interfaces.IStatusEvent)
- persistenceVersion = 3
-
- source = None
- reason = None
- changes = []
- blamelist = []
- requests = []
- progress = None
- started = None
- finished = None
- currentStep = None
- text = []
- results = None
- slavename = "???"
-
- # these lists/dicts are defined here so that unserialized instances have
- # (empty) values. They are set in __init__ to new objects to make sure
- # each instance gets its own copy.
- watchers = []
- updates = {}
- finishedWatchers = []
- testResults = {}
-
- def __init__(self, parent, number):
- """
- @type parent: L{BuilderStatus}
- @type number: int
- """
- assert interfaces.IBuilderStatus(parent)
- self.builder = parent
- self.number = number
- self.watchers = []
- self.updates = {}
- self.finishedWatchers = []
- self.steps = []
- self.testResults = {}
- self.properties = Properties()
- self.requests = []
-
- # IBuildStatus
-
- def getBuilder(self):
- """
- @rtype: L{BuilderStatus}
- """
- return self.builder
-
- def getProperty(self, propname):
- return self.properties[propname]
-
- def getProperties(self):
- return self.properties
-
- def getNumber(self):
- return self.number
-
- def getPreviousBuild(self):
- if self.number == 0:
- return None
- return self.builder.getBuild(self.number-1)
-
- def getSourceStamp(self, absolute=False):
- if not absolute or not self.properties.has_key('got_revision'):
- return self.source
- return self.source.getAbsoluteSourceStamp(self.properties['got_revision'])
-
- def getReason(self):
- return self.reason
-
- def getChanges(self):
- return self.changes
-
- def getRequests(self):
- return self.requests
-
- def getResponsibleUsers(self):
- return self.blamelist
-
- def getInterestedUsers(self):
- # TODO: the Builder should add others: sheriffs, domain-owners
- return self.blamelist + self.properties.getProperty('owners', [])
-
- def getSteps(self):
- """Return a list of IBuildStepStatus objects. For invariant builds
- (those which always use the same set of Steps), this should be the
- complete list, however some of the steps may not have started yet
- (step.getTimes()[0] will be None). For variant builds, this may not
- be complete (asking again later may give you more of them)."""
- return self.steps
-
- def getTimes(self):
- return (self.started, self.finished)
-
- _sentinel = [] # used as a sentinel to indicate unspecified initial_value
- def getSummaryStatistic(self, name, summary_fn, initial_value=_sentinel):
- """Summarize the named statistic over all steps in which it
- exists, using combination_fn and initial_value to combine multiple
- results into a single result. This translates to a call to Python's
- X{reduce}::
- return reduce(summary_fn, step_stats_list, initial_value)
- """
- step_stats_list = [
- st.getStatistic(name)
- for st in self.steps
- if st.hasStatistic(name) ]
- if initial_value is self._sentinel:
- return reduce(summary_fn, step_stats_list)
- else:
- return reduce(summary_fn, step_stats_list, initial_value)
-
- def isFinished(self):
- return (self.finished is not None)
-
- def waitUntilFinished(self):
- if self.finished:
- d = defer.succeed(self)
- else:
- d = defer.Deferred()
- self.finishedWatchers.append(d)
- return d
-
- # while the build is running, the following methods make sense.
- # Afterwards they return None
-
- def getETA(self):
- if self.finished is not None:
- return None
- if not self.progress:
- return None
- eta = self.progress.eta()
- if eta is None:
- return None
- return eta - util.now()
-
- def getCurrentStep(self):
- return self.currentStep
-
- # Once you know the build has finished, the following methods are legal.
- # Before ths build has finished, they all return None.
-
- def getText(self):
- text = []
- text.extend(self.text)
- for s in self.steps:
- text.extend(s.text2)
- return text
-
- def getResults(self):
- return self.results
-
- def getSlavename(self):
- return self.slavename
-
- def getTestResults(self):
- return self.testResults
-
- def getLogs(self):
- # TODO: steps should contribute significant logs instead of this
- # hack, which returns every log from every step. The logs should get
- # names like "compile" and "test" instead of "compile.output"
- logs = []
- for s in self.steps:
- for log in s.getLogs():
- logs.append(log)
- return logs
-
- # subscription interface
-
- def subscribe(self, receiver, updateInterval=None):
- # will receive stepStarted and stepFinished messages
- # and maybe buildETAUpdate
- self.watchers.append(receiver)
- if updateInterval is not None:
- self.sendETAUpdate(receiver, updateInterval)
-
- def sendETAUpdate(self, receiver, updateInterval):
- self.updates[receiver] = None
- ETA = self.getETA()
- if ETA is not None:
- receiver.buildETAUpdate(self, self.getETA())
- # they might have unsubscribed during buildETAUpdate
- if receiver in self.watchers:
- self.updates[receiver] = reactor.callLater(updateInterval,
- self.sendETAUpdate,
- receiver,
- updateInterval)
-
- def unsubscribe(self, receiver):
- if receiver in self.watchers:
- self.watchers.remove(receiver)
- if receiver in self.updates:
- if self.updates[receiver] is not None:
- self.updates[receiver].cancel()
- del self.updates[receiver]
-
- # methods for the base.Build to invoke
-
- def addStepWithName(self, name):
- """The Build is setting up, and has added a new BuildStep to its
- list. Create a BuildStepStatus object to which it can send status
- updates."""
-
- s = BuildStepStatus(self)
- s.setName(name)
- self.steps.append(s)
- return s
-
- def setProperty(self, propname, value, source):
- self.properties.setProperty(propname, value, source)
-
- def addTestResult(self, result):
- self.testResults[result.getName()] = result
-
- def setSourceStamp(self, sourceStamp):
- self.source = sourceStamp
- self.changes = self.source.changes
-
- def setRequests(self, requests):
- self.requests = requests
-
- def setReason(self, reason):
- self.reason = reason
- def setBlamelist(self, blamelist):
- self.blamelist = blamelist
- def setProgress(self, progress):
- self.progress = progress
-
- def buildStarted(self, build):
- """The Build has been set up and is about to be started. It can now
- be safely queried, so it is time to announce the new build."""
-
- self.started = util.now()
- # now that we're ready to report status, let the BuilderStatus tell
- # the world about us
- self.builder.buildStarted(self)
-
- def setSlavename(self, slavename):
- self.slavename = slavename
-
- def setText(self, text):
- assert isinstance(text, (list, tuple))
- self.text = text
- def setResults(self, results):
- self.results = results
-
- def buildFinished(self):
- self.currentStep = None
- self.finished = util.now()
-
- for r in self.updates.keys():
- if self.updates[r] is not None:
- self.updates[r].cancel()
- del self.updates[r]
-
- watchers = self.finishedWatchers
- self.finishedWatchers = []
- for w in watchers:
- w.callback(self)
-
- # methods called by our BuildStepStatus children
-
- def stepStarted(self, step):
- self.currentStep = step
- name = self.getBuilder().getName()
- for w in self.watchers:
- receiver = w.stepStarted(self, step)
- if receiver:
- if type(receiver) == type(()):
- step.subscribe(receiver[0], receiver[1])
- else:
- step.subscribe(receiver)
- d = step.waitUntilFinished()
- d.addCallback(lambda step: step.unsubscribe(receiver))
-
- step.waitUntilFinished().addCallback(self._stepFinished)
-
- def _stepFinished(self, step):
- results = step.getResults()
- for w in self.watchers:
- w.stepFinished(self, step, results)
-
- # methods called by our BuilderStatus parent
-
- def pruneLogs(self):
- # this build is somewhat old: remove the build logs to save space
- # TODO: delete logs visible through IBuildStatus.getLogs
- for s in self.steps:
- s.pruneLogs()
-
- def pruneSteps(self):
- # this build is very old: remove the build steps too
- self.steps = []
-
- # persistence stuff
-
- def generateLogfileName(self, stepname, logname):
- """Return a filename (relative to the Builder's base directory) where
- the logfile's contents can be stored uniquely.
-
- The base filename is made by combining our build number, the Step's
- name, and the log's name, then removing unsuitable characters. The
- filename is then made unique by appending _0, _1, etc, until it does
- not collide with any other logfile.
-
- These files are kept in the Builder's basedir (rather than a
- per-Build subdirectory) because that makes cleanup easier: cron and
- find will help get rid of the old logs, but the empty directories are
- more of a hassle to remove."""
-
- starting_filename = "%d-log-%s-%s" % (self.number, stepname, logname)
- starting_filename = re.sub(r'[^\w\.\-]', '_', starting_filename)
- # now make it unique
- unique_counter = 0
- filename = starting_filename
- while filename in [l.filename
- for step in self.steps
- for l in step.getLogs()
- if l.filename]:
- filename = "%s_%d" % (starting_filename, unique_counter)
- unique_counter += 1
- return filename
-
- def __getstate__(self):
- d = styles.Versioned.__getstate__(self)
- # for now, a serialized Build is always "finished". We will never
- # save unfinished builds.
- if not self.finished:
- d['finished'] = True
- # TODO: push an "interrupted" step so it is clear that the build
- # was interrupted. The builder will have a 'shutdown' event, but
- # someone looking at just this build will be confused as to why
- # the last log is truncated.
- del d['builder'] # filled in by our parent when loading
- del d['watchers']
- del d['updates']
- del d['requests']
- del d['finishedWatchers']
- return d
-
- def __setstate__(self, d):
- styles.Versioned.__setstate__(self, d)
- # self.builder must be filled in by our parent when loading
- for step in self.steps:
- step.build = self
- self.watchers = []
- self.updates = {}
- self.finishedWatchers = []
-
- def upgradeToVersion1(self):
- if hasattr(self, "sourceStamp"):
- # the old .sourceStamp attribute wasn't actually very useful
- maxChangeNumber, patch = self.sourceStamp
- changes = getattr(self, 'changes', [])
- source = sourcestamp.SourceStamp(branch=None,
- revision=None,
- patch=patch,
- changes=changes)
- self.source = source
- self.changes = source.changes
- del self.sourceStamp
-
- def upgradeToVersion2(self):
- self.properties = {}
-
- def upgradeToVersion3(self):
- # in version 3, self.properties became a Properties object
- propdict = self.properties
- self.properties = Properties()
- self.properties.update(propdict, "Upgrade from previous version")
-
- def upgradeLogfiles(self):
- # upgrade any LogFiles that need it. This must occur after we've been
- # attached to our Builder, and after we know about all LogFiles of
- # all Steps (to get the filenames right).
- assert self.builder
- for s in self.steps:
- for l in s.getLogs():
- if l.filename:
- pass # new-style, log contents are on disk
- else:
- logfilename = self.generateLogfileName(s.name, l.name)
- # let the logfile update its .filename pointer,
- # transferring its contents onto disk if necessary
- l.upgrade(logfilename)
-
- def saveYourself(self):
- filename = os.path.join(self.builder.basedir, "%d" % self.number)
- if os.path.isdir(filename):
- # leftover from 0.5.0, which stored builds in directories
- shutil.rmtree(filename, ignore_errors=True)
- tmpfilename = filename + ".tmp"
- try:
- dump(self, open(tmpfilename, "wb"), -1)
- if sys.platform == 'win32':
- # windows cannot rename a file on top of an existing one, so
- # fall back to delete-first. There are ways this can fail and
- # lose the builder's history, so we avoid using it in the
- # general (non-windows) case
- if os.path.exists(filename):
- os.unlink(filename)
- os.rename(tmpfilename, filename)
- except:
- log.msg("unable to save build %s-#%d" % (self.builder.name,
- self.number))
- log.err()
-
-
-
-class BuilderStatus(styles.Versioned):
- """I handle status information for a single process.base.Builder object.
- That object sends status changes to me (frequently as Events), and I
- provide them on demand to the various status recipients, like the HTML
- waterfall display and the live status clients. It also sends build
- summaries to me, which I log and provide to status clients who aren't
- interested in seeing details of the individual build steps.
-
- I am responsible for maintaining the list of historic Events and Builds,
- pruning old ones, and loading them from / saving them to disk.
-
- I live in the buildbot.process.base.Builder object, in the
- .builder_status attribute.
-
- @type category: string
- @ivar category: user-defined category this builder belongs to; can be
- used to filter on in status clients
- """
-
- implements(interfaces.IBuilderStatus, interfaces.IEventSource)
- persistenceVersion = 1
-
- # these limit the amount of memory we consume, as well as the size of the
- # main Builder pickle. The Build and LogFile pickles on disk must be
- # handled separately.
- buildCacheSize = 30
- buildHorizon = 100 # forget builds beyond this
- stepHorizon = 50 # forget steps in builds beyond this
-
- category = None
- currentBigState = "offline" # or idle/waiting/interlocked/building
- basedir = None # filled in by our parent
-
- def __init__(self, buildername, category=None):
- self.name = buildername
- self.category = category
-
- self.slavenames = []
- self.events = []
- # these three hold Events, and are used to retrieve the current
- # state of the boxes.
- self.lastBuildStatus = None
- #self.currentBig = None
- #self.currentSmall = None
- self.currentBuilds = []
- self.pendingBuilds = []
- self.nextBuild = None
- self.watchers = []
- self.buildCache = [] # TODO: age builds out of the cache
- self.logCompressionLimit = False # default to no compression for tests
-
- # persistence
-
- def __getstate__(self):
- # when saving, don't record transient stuff like what builds are
- # currently running, because they won't be there when we start back
- # up. Nor do we save self.watchers, nor anything that gets set by our
- # parent like .basedir and .status
- d = styles.Versioned.__getstate__(self)
- d['watchers'] = []
- del d['buildCache']
- for b in self.currentBuilds:
- b.saveYourself()
- # TODO: push a 'hey, build was interrupted' event
- del d['currentBuilds']
- del d['pendingBuilds']
- del d['currentBigState']
- del d['basedir']
- del d['status']
- del d['nextBuildNumber']
- return d
-
- def __setstate__(self, d):
- # when loading, re-initialize the transient stuff. Remember that
- # upgradeToVersion1 and such will be called after this finishes.
- styles.Versioned.__setstate__(self, d)
- self.buildCache = []
- self.currentBuilds = []
- self.pendingBuilds = []
- self.watchers = []
- self.slavenames = []
- # self.basedir must be filled in by our parent
- # self.status must be filled in by our parent
-
- def upgradeToVersion1(self):
- if hasattr(self, 'slavename'):
- self.slavenames = [self.slavename]
- del self.slavename
- if hasattr(self, 'nextBuildNumber'):
- del self.nextBuildNumber # determineNextBuildNumber chooses this
-
- def determineNextBuildNumber(self):
- """Scan our directory of saved BuildStatus instances to determine
- what our self.nextBuildNumber should be. Set it one larger than the
- highest-numbered build we discover. This is called by the top-level
- Status object shortly after we are created or loaded from disk.
- """
- existing_builds = [int(f)
- for f in os.listdir(self.basedir)
- if re.match("^\d+$", f)]
- if existing_builds:
- self.nextBuildNumber = max(existing_builds) + 1
- else:
- self.nextBuildNumber = 0
-
- def setLogCompressionLimit(self, lowerLimit):
- self.logCompressionLimit = lowerLimit
-
- def saveYourself(self):
- for b in self.buildCache:
- if not b.isFinished:
- # interrupted build, need to save it anyway.
- # BuildStatus.saveYourself will mark it as interrupted.
- b.saveYourself()
- filename = os.path.join(self.basedir, "builder")
- tmpfilename = filename + ".tmp"
- try:
- dump(self, open(tmpfilename, "wb"), -1)
- if sys.platform == 'win32':
- # windows cannot rename a file on top of an existing one
- if os.path.exists(filename):
- os.unlink(filename)
- os.rename(tmpfilename, filename)
- except:
- log.msg("unable to save builder %s" % self.name)
- log.err()
-
-
- # build cache management
-
- def addBuildToCache(self, build):
- if build in self.buildCache:
- return
- self.buildCache.append(build)
- while len(self.buildCache) > self.buildCacheSize:
- self.buildCache.pop(0)
-
- def getBuildByNumber(self, number):
- for b in self.currentBuilds:
- if b.number == number:
- return b
- for build in self.buildCache:
- if build.number == number:
- return build
- filename = os.path.join(self.basedir, "%d" % number)
- try:
- build = load(open(filename, "rb"))
- styles.doUpgrade()
- build.builder = self
- # handle LogFiles from after 0.5.0 and before 0.6.5
- build.upgradeLogfiles()
- self.addBuildToCache(build)
- return build
- except IOError:
- raise IndexError("no such build %d" % number)
- except EOFError:
- raise IndexError("corrupted build pickle %d" % number)
-
- def prune(self):
- return # TODO: change this to walk through the filesystem
- # first, blow away all builds beyond our build horizon
- self.builds = self.builds[-self.buildHorizon:]
- # then prune steps in builds past the step horizon
- for b in self.builds[0:-self.stepHorizon]:
- b.pruneSteps()
-
- # IBuilderStatus methods
- def getName(self):
- return self.name
-
- def getState(self):
- return (self.currentBigState, self.currentBuilds)
-
- def getSlaves(self):
- return [self.status.getSlave(name) for name in self.slavenames]
-
- def getPendingBuilds(self):
- return self.pendingBuilds
-
- def getCurrentBuilds(self):
- return self.currentBuilds
-
- def getLastFinishedBuild(self):
- b = self.getBuild(-1)
- if not (b and b.isFinished()):
- b = self.getBuild(-2)
- return b
-
- def getBuild(self, number):
- if number < 0:
- number = self.nextBuildNumber + number
- if number < 0 or number >= self.nextBuildNumber:
- return None
-
- try:
- return self.getBuildByNumber(number)
- except IndexError:
- return None
-
- def getEvent(self, number):
- try:
- return self.events[number]
- except IndexError:
- return None
-
- def generateFinishedBuilds(self, branches=[],
- num_builds=None,
- max_buildnum=None,
- finished_before=None,
- max_search=200):
- got = 0
- for Nb in itertools.count(1):
- if Nb > self.nextBuildNumber:
- break
- if Nb > max_search:
- break
- build = self.getBuild(-Nb)
- if build is None:
- continue
- if max_buildnum is not None:
- if build.getNumber() > max_buildnum:
- continue
- if not build.isFinished():
- continue
- if finished_before is not None:
- start, end = build.getTimes()
- if end >= finished_before:
- continue
- if branches:
- if build.getSourceStamp().branch not in branches:
- continue
- got += 1
- yield build
- if num_builds is not None:
- if got >= num_builds:
- return
-
- def eventGenerator(self, branches=[]):
- """This function creates a generator which will provide all of this
- Builder's status events, starting with the most recent and
- progressing backwards in time. """
-
- # remember the oldest-to-earliest flow here. "next" means earlier.
-
- # TODO: interleave build steps and self.events by timestamp.
- # TODO: um, I think we're already doing that.
-
- # TODO: there's probably something clever we could do here to
- # interleave two event streams (one from self.getBuild and the other
- # from self.getEvent), which would be simpler than this control flow
-
- eventIndex = -1
- e = self.getEvent(eventIndex)
- for Nb in range(1, self.nextBuildNumber+1):
- b = self.getBuild(-Nb)
- if not b:
- break
- if branches and not b.getSourceStamp().branch in branches:
- continue
- steps = b.getSteps()
- for Ns in range(1, len(steps)+1):
- if steps[-Ns].started:
- step_start = steps[-Ns].getTimes()[0]
- while e is not None and e.getTimes()[0] > step_start:
- yield e
- eventIndex -= 1
- e = self.getEvent(eventIndex)
- yield steps[-Ns]
- yield b
- while e is not None:
- yield e
- eventIndex -= 1
- e = self.getEvent(eventIndex)
-
- def subscribe(self, receiver):
- # will get builderChangedState, buildStarted, and buildFinished
- self.watchers.append(receiver)
- self.publishState(receiver)
-
- def unsubscribe(self, receiver):
- self.watchers.remove(receiver)
-
- ## Builder interface (methods called by the Builder which feeds us)
-
- def setSlavenames(self, names):
- self.slavenames = names
-
- def addEvent(self, text=[]):
- # this adds a duration event. When it is done, the user should call
- # e.finish(). They can also mangle it by modifying .text
- e = Event()
- e.started = util.now()
- e.text = text
- self.events.append(e)
- return e # they are free to mangle it further
-
- def addPointEvent(self, text=[]):
- # this adds a point event, one which occurs as a single atomic
- # instant of time.
- e = Event()
- e.started = util.now()
- e.finished = 0
- e.text = text
- self.events.append(e)
- return e # for consistency, but they really shouldn't touch it
-
- def setBigState(self, state):
- needToUpdate = state != self.currentBigState
- self.currentBigState = state
- if needToUpdate:
- self.publishState()
-
- def publishState(self, target=None):
- state = self.currentBigState
-
- if target is not None:
- # unicast
- target.builderChangedState(self.name, state)
- return
- for w in self.watchers:
- try:
- w.builderChangedState(self.name, state)
- except:
- log.msg("Exception caught publishing state to %r" % w)
- log.err()
-
- def newBuild(self):
- """The Builder has decided to start a build, but the Build object is
- not yet ready to report status (it has not finished creating the
- Steps). Create a BuildStatus object that it can use."""
- number = self.nextBuildNumber
- self.nextBuildNumber += 1
- # TODO: self.saveYourself(), to make sure we don't forget about the
- # build number we've just allocated. This is not quite as important
- # as it was before we switch to determineNextBuildNumber, but I think
- # it may still be useful to have the new build save itself.
- s = BuildStatus(self, number)
- s.waitUntilFinished().addCallback(self._buildFinished)
- return s
-
- def addBuildRequest(self, brstatus):
- self.pendingBuilds.append(brstatus)
- for w in self.watchers:
- w.requestSubmitted(brstatus)
-
- def removeBuildRequest(self, brstatus):
- self.pendingBuilds.remove(brstatus)
-
- # buildStarted is called by our child BuildStatus instances
- def buildStarted(self, s):
- """Now the BuildStatus object is ready to go (it knows all of its
- Steps, its ETA, etc), so it is safe to notify our watchers."""
-
- assert s.builder is self # paranoia
- assert s.number == self.nextBuildNumber - 1
- assert s not in self.currentBuilds
- self.currentBuilds.append(s)
- self.addBuildToCache(s)
-
- # now that the BuildStatus is prepared to answer queries, we can
- # announce the new build to all our watchers
-
- for w in self.watchers: # TODO: maybe do this later? callLater(0)?
- try:
- receiver = w.buildStarted(self.getName(), s)
- if receiver:
- if type(receiver) == type(()):
- s.subscribe(receiver[0], receiver[1])
- else:
- s.subscribe(receiver)
- d = s.waitUntilFinished()
- d.addCallback(lambda s: s.unsubscribe(receiver))
- except:
- log.msg("Exception caught notifying %r of buildStarted event" % w)
- log.err()
-
- def _buildFinished(self, s):
- assert s in self.currentBuilds
- s.saveYourself()
- self.currentBuilds.remove(s)
-
- name = self.getName()
- results = s.getResults()
- for w in self.watchers:
- try:
- w.buildFinished(name, s, results)
- except:
- log.msg("Exception caught notifying %r of buildFinished event" % w)
- log.err()
-
- self.prune() # conserve disk
-
-
- # waterfall display (history)
-
- # I want some kind of build event that holds everything about the build:
- # why, what changes went into it, the results of the build, itemized
- # test results, etc. But, I do kind of need something to be inserted in
- # the event log first, because intermixing step events and the larger
- # build event is fraught with peril. Maybe an Event-like-thing that
- # doesn't have a file in it but does have links. Hmm, that's exactly
- # what it does now. The only difference would be that this event isn't
- # pushed to the clients.
-
- # publish to clients
- def sendLastBuildStatus(self, client):
- #client.newLastBuildStatus(self.lastBuildStatus)
- pass
- def sendCurrentActivityBigToEveryone(self):
- for s in self.subscribers:
- self.sendCurrentActivityBig(s)
- def sendCurrentActivityBig(self, client):
- state = self.currentBigState
- if state == "offline":
- client.currentlyOffline()
- elif state == "idle":
- client.currentlyIdle()
- elif state == "building":
- client.currentlyBuilding()
- else:
- log.msg("Hey, self.currentBigState is weird:", state)
-
-
- ## HTML display interface
-
- def getEventNumbered(self, num):
- # deal with dropped events, pruned events
- first = self.events[0].number
- if first + len(self.events)-1 != self.events[-1].number:
- log.msg(self,
- "lost an event somewhere: [0] is %d, [%d] is %d" % \
- (self.events[0].number,
- len(self.events) - 1,
- self.events[-1].number))
- for e in self.events:
- log.msg("e[%d]: " % e.number, e)
- return None
- offset = num - first
- log.msg(self, "offset", offset)
- try:
- return self.events[offset]
- except IndexError:
- return None
-
- ## Persistence of Status
- def loadYourOldEvents(self):
- if hasattr(self, "allEvents"):
- # first time, nothing to get from file. Note that this is only if
- # the Application gets .run() . If it gets .save()'ed, then the
- # .allEvents attribute goes away in the initial __getstate__ and
- # we try to load a non-existent file.
- return
- self.allEvents = self.loadFile("events", [])
- if self.allEvents:
- self.nextEventNumber = self.allEvents[-1].number + 1
- else:
- self.nextEventNumber = 0
- def saveYourOldEvents(self):
- self.saveFile("events", self.allEvents)
-
- ## clients
-
- def addClient(self, client):
- if client not in self.subscribers:
- self.subscribers.append(client)
- self.sendLastBuildStatus(client)
- self.sendCurrentActivityBig(client)
- client.newEvent(self.currentSmall)
- def removeClient(self, client):
- if client in self.subscribers:
- self.subscribers.remove(client)
-
-class SlaveStatus:
- implements(interfaces.ISlaveStatus)
-
- admin = None
- host = None
- connected = False
- graceful_shutdown = False
-
- def __init__(self, name):
- self.name = name
- self._lastMessageReceived = 0
- self.runningBuilds = []
- self.graceful_callbacks = []
-
- def getName(self):
- return self.name
- def getAdmin(self):
- return self.admin
- def getHost(self):
- return self.host
- def isConnected(self):
- return self.connected
- def lastMessageReceived(self):
- return self._lastMessageReceived
- def getRunningBuilds(self):
- return self.runningBuilds
-
- def setAdmin(self, admin):
- self.admin = admin
- def setHost(self, host):
- self.host = host
- def setConnected(self, isConnected):
- self.connected = isConnected
- def setLastMessageReceived(self, when):
- self._lastMessageReceived = when
-
- def buildStarted(self, build):
- self.runningBuilds.append(build)
- def buildFinished(self, build):
- self.runningBuilds.remove(build)
-
- def getGraceful(self):
- """Return the graceful shutdown flag"""
- return self.graceful_shutdown
- def setGraceful(self, graceful):
- """Set the graceful shutdown flag, and notify all the watchers"""
- self.graceful_shutdown = graceful
- for cb in self.graceful_callbacks:
- reactor.callLater(0, cb, graceful)
- def addGracefulWatcher(self, watcher):
- """Add watcher to the list of watchers to be notified when the
- graceful shutdown flag is changed."""
- if not watcher in self.graceful_callbacks:
- self.graceful_callbacks.append(watcher)
- def removeGracefulWatcher(self, watcher):
- """Remove watcher from the list of watchers to be notified when the
- graceful shutdown flag is changed."""
- if watcher in self.graceful_callbacks:
- self.graceful_callbacks.remove(watcher)
-
-class Status:
- """
- I represent the status of the buildmaster.
- """
- implements(interfaces.IStatus)
-
- def __init__(self, botmaster, basedir):
- """
- @type botmaster: L{buildbot.master.BotMaster}
- @param botmaster: the Status object uses C{.botmaster} to get at
- both the L{buildbot.master.BuildMaster} (for
- various buildbot-wide parameters) and the
- actual Builders (to get at their L{BuilderStatus}
- objects). It is not allowed to change or influence
- anything through this reference.
- @type basedir: string
- @param basedir: this provides a base directory in which saved status
- information (changes.pck, saved Build status
- pickles) can be stored
- """
- self.botmaster = botmaster
- self.basedir = basedir
- self.watchers = []
- self.activeBuildSets = []
- assert os.path.isdir(basedir)
- # compress logs bigger than 4k, a good default on linux
- self.logCompressionLimit = 4*1024
-
-
- # methods called by our clients
-
- def getProjectName(self):
- return self.botmaster.parent.projectName
- def getProjectURL(self):
- return self.botmaster.parent.projectURL
- def getBuildbotURL(self):
- return self.botmaster.parent.buildbotURL
-
- def getURLForThing(self, thing):
- prefix = self.getBuildbotURL()
- if not prefix:
- return None
- if interfaces.IStatus.providedBy(thing):
- return prefix
- if interfaces.ISchedulerStatus.providedBy(thing):
- pass
- if interfaces.IBuilderStatus.providedBy(thing):
- builder = thing
- return prefix + "builders/%s" % (
- urllib.quote(builder.getName(), safe=''),
- )
- if interfaces.IBuildStatus.providedBy(thing):
- build = thing
- builder = build.getBuilder()
- return prefix + "builders/%s/builds/%d" % (
- urllib.quote(builder.getName(), safe=''),
- build.getNumber())
- if interfaces.IBuildStepStatus.providedBy(thing):
- step = thing
- build = step.getBuild()
- builder = build.getBuilder()
- return prefix + "builders/%s/builds/%d/steps/%s" % (
- urllib.quote(builder.getName(), safe=''),
- build.getNumber(),
- urllib.quote(step.getName(), safe=''))
- # IBuildSetStatus
- # IBuildRequestStatus
- # ISlaveStatus
-
- # IStatusEvent
- if interfaces.IStatusEvent.providedBy(thing):
- from buildbot.changes import changes
- # TODO: this is goofy, create IChange or something
- if isinstance(thing, changes.Change):
- change = thing
- return "%schanges/%d" % (prefix, change.number)
-
- if interfaces.IStatusLog.providedBy(thing):
- log = thing
- step = log.getStep()
- build = step.getBuild()
- builder = build.getBuilder()
-
- logs = step.getLogs()
- for i in range(len(logs)):
- if log is logs[i]:
- lognum = i
- break
- else:
- return None
- return prefix + "builders/%s/builds/%d/steps/%s/logs/%d" % (
- urllib.quote(builder.getName(), safe=''),
- build.getNumber(),
- urllib.quote(step.getName(), safe=''),
- lognum)
-
- def getChangeSources(self):
- return list(self.botmaster.parent.change_svc)
-
- def getChange(self, number):
- return self.botmaster.parent.change_svc.getChangeNumbered(number)
-
- def getSchedulers(self):
- return self.botmaster.parent.allSchedulers()
-
- def getBuilderNames(self, categories=None):
- if categories == None:
- return self.botmaster.builderNames[:] # don't let them break it
-
- l = []
- # respect addition order
- for name in self.botmaster.builderNames:
- builder = self.botmaster.builders[name]
- if builder.builder_status.category in categories:
- l.append(name)
- return l
-
- def getBuilder(self, name):
- """
- @rtype: L{BuilderStatus}
- """
- return self.botmaster.builders[name].builder_status
-
- def getSlaveNames(self):
- return self.botmaster.slaves.keys()
-
- def getSlave(self, slavename):
- return self.botmaster.slaves[slavename].slave_status
-
- def getBuildSets(self):
- return self.activeBuildSets[:]
-
- def generateFinishedBuilds(self, builders=[], branches=[],
- num_builds=None, finished_before=None,
- max_search=200):
-
- def want_builder(bn):
- if builders:
- return bn in builders
- return True
- builder_names = [bn
- for bn in self.getBuilderNames()
- if want_builder(bn)]
-
- # 'sources' is a list of generators, one for each Builder we're
- # using. When the generator is exhausted, it is replaced in this list
- # with None.
- sources = []
- for bn in builder_names:
- b = self.getBuilder(bn)
- g = b.generateFinishedBuilds(branches,
- finished_before=finished_before,
- max_search=max_search)
- sources.append(g)
-
- # next_build the next build from each source
- next_build = [None] * len(sources)
-
- def refill():
- for i,g in enumerate(sources):
- if next_build[i]:
- # already filled
- continue
- if not g:
- # already exhausted
- continue
- try:
- next_build[i] = g.next()
- except StopIteration:
- next_build[i] = None
- sources[i] = None
-
- got = 0
- while True:
- refill()
- # find the latest build among all the candidates
- candidates = [(i, b, b.getTimes()[1])
- for i,b in enumerate(next_build)
- if b is not None]
- candidates.sort(lambda x,y: cmp(x[2], y[2]))
- if not candidates:
- return
-
- # and remove it from the list
- i, build, finshed_time = candidates[-1]
- next_build[i] = None
- got += 1
- yield build
- if num_builds is not None:
- if got >= num_builds:
- return
-
- def subscribe(self, target):
- self.watchers.append(target)
- for name in self.botmaster.builderNames:
- self.announceNewBuilder(target, name, self.getBuilder(name))
- def unsubscribe(self, target):
- self.watchers.remove(target)
-
-
- # methods called by upstream objects
-
- def announceNewBuilder(self, target, name, builder_status):
- t = target.builderAdded(name, builder_status)
- if t:
- builder_status.subscribe(t)
-
- def builderAdded(self, name, basedir, category=None):
- """
- @rtype: L{BuilderStatus}
- """
- filename = os.path.join(self.basedir, basedir, "builder")
- log.msg("trying to load status pickle from %s" % filename)
- builder_status = None
- try:
- builder_status = load(open(filename, "rb"))
- styles.doUpgrade()
- except IOError:
- log.msg("no saved status pickle, creating a new one")
- except:
- log.msg("error while loading status pickle, creating a new one")
- log.msg("error follows:")
- log.err()
- if not builder_status:
- builder_status = BuilderStatus(name, category)
- builder_status.addPointEvent(["builder", "created"])
- log.msg("added builder %s in category %s" % (name, category))
- # an unpickled object might not have category set from before,
- # so set it here to make sure
- builder_status.category = category
- builder_status.basedir = os.path.join(self.basedir, basedir)
- builder_status.name = name # it might have been updated
- builder_status.status = self
-
- if not os.path.isdir(builder_status.basedir):
- os.makedirs(builder_status.basedir)
- builder_status.determineNextBuildNumber()
-
- builder_status.setBigState("offline")
- builder_status.setLogCompressionLimit(self.logCompressionLimit)
-
- for t in self.watchers:
- self.announceNewBuilder(t, name, builder_status)
-
- return builder_status
-
- def builderRemoved(self, name):
- for t in self.watchers:
- t.builderRemoved(name)
-
- def prune(self):
- for b in self.botmaster.builders.values():
- b.builder_status.prune()
-
- def buildsetSubmitted(self, bss):
- self.activeBuildSets.append(bss)
- bss.waitUntilFinished().addCallback(self.activeBuildSets.remove)
- for t in self.watchers:
- t.buildsetSubmitted(bss)
diff --git a/buildbot/buildbot/status/client.py b/buildbot/buildbot/status/client.py
deleted file mode 100644
index 0d4611d..0000000
--- a/buildbot/buildbot/status/client.py
+++ /dev/null
@@ -1,564 +0,0 @@
-# -*- test-case-name: buildbot.test.test_status -*-
-
-from twisted.spread import pb
-from twisted.python import components, log as twlog
-from twisted.internet import reactor
-from twisted.application import strports
-from twisted.cred import portal, checkers
-
-from buildbot import interfaces
-from zope.interface import Interface, implements
-from buildbot.status import builder, base
-from buildbot.changes import changes
-
-class IRemote(Interface):
- pass
-
-def makeRemote(obj):
- # we want IRemote(None) to be None, but you can't really do that with
- # adapters, so we fake it
- if obj is None:
- return None
- return IRemote(obj)
-
-
-class RemoteBuildSet(pb.Referenceable):
- def __init__(self, buildset):
- self.b = buildset
-
- def remote_getSourceStamp(self):
- return self.b.getSourceStamp()
-
- def remote_getReason(self):
- return self.b.getReason()
-
- def remote_getID(self):
- return self.b.getID()
-
- def remote_getBuilderNames(self):
- return self.b.getBuilderNames()
-
- def remote_getBuildRequests(self):
- """Returns a list of (builderName, BuildRequest) tuples."""
- return [(br.getBuilderName(), IRemote(br))
- for br in self.b.getBuildRequests()]
-
- def remote_isFinished(self):
- return self.b.isFinished()
-
- def remote_waitUntilSuccess(self):
- d = self.b.waitUntilSuccess()
- d.addCallback(lambda res: self)
- return d
-
- def remote_waitUntilFinished(self):
- d = self.b.waitUntilFinished()
- d.addCallback(lambda res: self)
- return d
-
- def remote_getResults(self):
- return self.b.getResults()
-
-components.registerAdapter(RemoteBuildSet,
- interfaces.IBuildSetStatus, IRemote)
-
-
-class RemoteBuilder(pb.Referenceable):
- def __init__(self, builder):
- self.b = builder
-
- def remote_getName(self):
- return self.b.getName()
-
- def remote_getState(self):
- state, builds = self.b.getState()
- return (state,
- None, # TODO: remove leftover ETA
- [makeRemote(b) for b in builds])
-
- def remote_getSlaves(self):
- return [IRemote(s) for s in self.b.getSlaves()]
-
- def remote_getLastFinishedBuild(self):
- return makeRemote(self.b.getLastFinishedBuild())
-
- def remote_getCurrentBuilds(self):
- return [IRemote(b) for b in self.b.getCurrentBuilds()]
-
- def remote_getBuild(self, number):
- return makeRemote(self.b.getBuild(number))
-
- def remote_getEvent(self, number):
- return IRemote(self.b.getEvent(number))
-
-components.registerAdapter(RemoteBuilder,
- interfaces.IBuilderStatus, IRemote)
-
-
-class RemoteBuildRequest(pb.Referenceable):
- def __init__(self, buildreq):
- self.b = buildreq
- self.observers = []
-
- def remote_getSourceStamp(self):
- return self.b.getSourceStamp()
-
- def remote_getBuilderName(self):
- return self.b.getBuilderName()
-
- def remote_subscribe(self, observer):
- """The observer's remote_newbuild method will be called (with two
- arguments: the RemoteBuild object, and our builderName) for each new
- Build that is created to handle this BuildRequest."""
- self.observers.append(observer)
- def send(bs):
- d = observer.callRemote("newbuild",
- IRemote(bs), self.b.getBuilderName())
- d.addErrback(lambda err: None)
- reactor.callLater(0, self.b.subscribe, send)
-
- def remote_unsubscribe(self, observer):
- # PB (well, at least oldpb) doesn't re-use RemoteReference instances,
- # so sending the same object across the wire twice will result in two
- # separate objects that compare as equal ('a is not b' and 'a == b').
- # That means we can't use a simple 'self.observers.remove(observer)'
- # here.
- for o in self.observers:
- if o == observer:
- self.observers.remove(o)
-
-components.registerAdapter(RemoteBuildRequest,
- interfaces.IBuildRequestStatus, IRemote)
-
-class RemoteBuild(pb.Referenceable):
- def __init__(self, build):
- self.b = build
- self.observers = []
-
- def remote_getBuilderName(self):
- return self.b.getBuilder().getName()
-
- def remote_getNumber(self):
- return self.b.getNumber()
-
- def remote_getReason(self):
- return self.b.getReason()
-
- def remote_getChanges(self):
- return [IRemote(c) for c in self.b.getChanges()]
-
- def remote_getResponsibleUsers(self):
- return self.b.getResponsibleUsers()
-
- def remote_getSteps(self):
- return [IRemote(s) for s in self.b.getSteps()]
-
- def remote_getTimes(self):
- return self.b.getTimes()
-
- def remote_isFinished(self):
- return self.b.isFinished()
-
- def remote_waitUntilFinished(self):
- # the Deferred returned by callRemote() will fire when this build is
- # finished
- d = self.b.waitUntilFinished()
- d.addCallback(lambda res: self)
- return d
-
- def remote_getETA(self):
- return self.b.getETA()
-
- def remote_getCurrentStep(self):
- return makeRemote(self.b.getCurrentStep())
-
- def remote_getText(self):
- return self.b.getText()
-
- def remote_getResults(self):
- return self.b.getResults()
-
- def remote_getLogs(self):
- logs = {}
- for name,log in self.b.getLogs().items():
- logs[name] = IRemote(log)
- return logs
-
- def remote_subscribe(self, observer, updateInterval=None):
- """The observer will have remote_stepStarted(buildername, build,
- stepname, step), remote_stepFinished(buildername, build, stepname,
- step, results), and maybe remote_buildETAUpdate(buildername, build,
- eta)) messages sent to it."""
- self.observers.append(observer)
- s = BuildSubscriber(observer)
- self.b.subscribe(s, updateInterval)
-
- def remote_unsubscribe(self, observer):
- # TODO: is the observer automatically unsubscribed when the build
- # finishes? Or are they responsible for unsubscribing themselves
- # anyway? How do we avoid a race condition here?
- for o in self.observers:
- if o == observer:
- self.observers.remove(o)
-
-
-components.registerAdapter(RemoteBuild,
- interfaces.IBuildStatus, IRemote)
-
-class BuildSubscriber:
- def __init__(self, observer):
- self.observer = observer
-
- def buildETAUpdate(self, build, eta):
- self.observer.callRemote("buildETAUpdate",
- build.getBuilder().getName(),
- IRemote(build),
- eta)
-
- def stepStarted(self, build, step):
- self.observer.callRemote("stepStarted",
- build.getBuilder().getName(),
- IRemote(build),
- step.getName(), IRemote(step))
- return None
-
- def stepFinished(self, build, step, results):
- self.observer.callRemote("stepFinished",
- build.getBuilder().getName(),
- IRemote(build),
- step.getName(), IRemote(step),
- results)
-
-
-class RemoteBuildStep(pb.Referenceable):
- def __init__(self, step):
- self.s = step
-
- def remote_getName(self):
- return self.s.getName()
-
- def remote_getBuild(self):
- return IRemote(self.s.getBuild())
-
- def remote_getTimes(self):
- return self.s.getTimes()
-
- def remote_getExpectations(self):
- return self.s.getExpectations()
-
- def remote_getLogs(self):
- logs = {}
- for log in self.s.getLogs():
- logs[log.getName()] = IRemote(log)
- return logs
-
- def remote_isFinished(self):
- return self.s.isFinished()
-
- def remote_waitUntilFinished(self):
- return self.s.waitUntilFinished() # returns a Deferred
-
- def remote_getETA(self):
- return self.s.getETA()
-
- def remote_getText(self):
- return self.s.getText()
-
- def remote_getResults(self):
- return self.s.getResults()
-
-components.registerAdapter(RemoteBuildStep,
- interfaces.IBuildStepStatus, IRemote)
-
-class RemoteSlave:
- def __init__(self, slave):
- self.s = slave
-
- def remote_getName(self):
- return self.s.getName()
- def remote_getAdmin(self):
- return self.s.getAdmin()
- def remote_getHost(self):
- return self.s.getHost()
- def remote_isConnected(self):
- return self.s.isConnected()
-
-components.registerAdapter(RemoteSlave,
- interfaces.ISlaveStatus, IRemote)
-
-class RemoteEvent:
- def __init__(self, event):
- self.e = event
-
- def remote_getTimes(self):
- return self.s.getTimes()
- def remote_getText(self):
- return self.s.getText()
-
-components.registerAdapter(RemoteEvent,
- interfaces.IStatusEvent, IRemote)
-
-class RemoteLog(pb.Referenceable):
- def __init__(self, log):
- self.l = log
-
- def remote_getName(self):
- return self.l.getName()
-
- def remote_isFinished(self):
- return self.l.isFinished()
- def remote_waitUntilFinished(self):
- d = self.l.waitUntilFinished()
- d.addCallback(lambda res: self)
- return d
-
- def remote_getText(self):
- return self.l.getText()
- def remote_getTextWithHeaders(self):
- return self.l.getTextWithHeaders()
- def remote_getChunks(self):
- return self.l.getChunks()
- # TODO: subscription interface
-
-components.registerAdapter(RemoteLog, builder.LogFile, IRemote)
-# TODO: something similar for builder.HTMLLogfile ?
-
-class RemoteChange:
- def __init__(self, change):
- self.c = change
-
- def getWho(self):
- return self.c.who
- def getFiles(self):
- return self.c.files
- def getComments(self):
- return self.c.comments
-
-components.registerAdapter(RemoteChange, changes.Change, IRemote)
-
-
-class StatusClientPerspective(base.StatusReceiverPerspective):
-
- subscribed = None
- client = None
-
- def __init__(self, status):
- self.status = status # the IStatus
- self.subscribed_to_builders = [] # Builders to which we're subscribed
- self.subscribed_to = [] # everything else we're subscribed to
-
- def __getstate__(self):
- d = self.__dict__.copy()
- d['client'] = None
- return d
-
- def attached(self, mind):
- #twlog.msg("StatusClientPerspective.attached")
- return self
-
- def detached(self, mind):
- twlog.msg("PB client detached")
- self.client = None
- for name in self.subscribed_to_builders:
- twlog.msg(" unsubscribing from Builder(%s)" % name)
- self.status.getBuilder(name).unsubscribe(self)
- for s in self.subscribed_to:
- twlog.msg(" unsubscribe from %s" % s)
- s.unsubscribe(self)
- self.subscribed = None
-
- def perspective_subscribe(self, mode, interval, target):
- """The remote client wishes to subscribe to some set of events.
- 'target' will be sent remote messages when these events happen.
- 'mode' indicates which events are desired: it is a string with one
- of the following values:
-
- 'builders': builderAdded, builderRemoved
- 'builds': those plus builderChangedState, buildStarted, buildFinished
- 'steps': all those plus buildETAUpdate, stepStarted, stepFinished
- 'logs': all those plus stepETAUpdate, logStarted, logFinished
- 'full': all those plus logChunk (with the log contents)
-
-
- Messages are defined by buildbot.interfaces.IStatusReceiver .
- 'interval' is used to specify how frequently ETAUpdate messages
- should be sent.
-
- Raising or lowering the subscription level will take effect starting
- with the next build or step."""
-
- assert mode in ("builders", "builds", "steps", "logs", "full")
- assert target
- twlog.msg("PB subscribe(%s)" % mode)
-
- self.client = target
- self.subscribed = mode
- self.interval = interval
- self.subscribed_to.append(self.status)
- # wait a moment before subscribing, so the new-builder messages
- # won't appear before this remote method finishes
- reactor.callLater(0, self.status.subscribe, self)
- return None
-
- def perspective_unsubscribe(self):
- twlog.msg("PB unsubscribe")
- self.status.unsubscribe(self)
- self.subscribed_to.remove(self.status)
- self.client = None
-
- def perspective_getBuildSets(self):
- """This returns tuples of (buildset, bsid), because that is much more
- convenient for tryclient."""
- return [(IRemote(s), s.getID()) for s in self.status.getBuildSets()]
-
- def perspective_getBuilderNames(self):
- return self.status.getBuilderNames()
-
- def perspective_getBuilder(self, name):
- b = self.status.getBuilder(name)
- return IRemote(b)
-
- def perspective_getSlave(self, name):
- s = self.status.getSlave(name)
- return IRemote(s)
-
- def perspective_ping(self):
- """Ping method to allow pb clients to validate their connections."""
- return "pong"
-
- # IStatusReceiver methods, invoked if we've subscribed
-
- # mode >= builder
- def builderAdded(self, name, builder):
- self.client.callRemote("builderAdded", name, IRemote(builder))
- if self.subscribed in ("builds", "steps", "logs", "full"):
- self.subscribed_to_builders.append(name)
- return self
- return None
-
- def builderChangedState(self, name, state):
- self.client.callRemote("builderChangedState", name, state, None)
- # TODO: remove leftover ETA argument
-
- def builderRemoved(self, name):
- if name in self.subscribed_to_builders:
- self.subscribed_to_builders.remove(name)
- self.client.callRemote("builderRemoved", name)
-
- def buildsetSubmitted(self, buildset):
- # TODO: deliver to client, somehow
- pass
-
- # mode >= builds
- def buildStarted(self, name, build):
- self.client.callRemote("buildStarted", name, IRemote(build))
- if self.subscribed in ("steps", "logs", "full"):
- self.subscribed_to.append(build)
- return (self, self.interval)
- return None
-
- def buildFinished(self, name, build, results):
- if build in self.subscribed_to:
- # we might have joined during the build
- self.subscribed_to.remove(build)
- self.client.callRemote("buildFinished",
- name, IRemote(build), results)
-
- # mode >= steps
- def buildETAUpdate(self, build, eta):
- self.client.callRemote("buildETAUpdate",
- build.getBuilder().getName(), IRemote(build),
- eta)
-
- def stepStarted(self, build, step):
- # we add some information here so the client doesn't have to do an
- # extra round-trip
- self.client.callRemote("stepStarted",
- build.getBuilder().getName(), IRemote(build),
- step.getName(), IRemote(step))
- if self.subscribed in ("logs", "full"):
- self.subscribed_to.append(step)
- return (self, self.interval)
- return None
-
- def stepFinished(self, build, step, results):
- self.client.callRemote("stepFinished",
- build.getBuilder().getName(), IRemote(build),
- step.getName(), IRemote(step),
- results)
- if step in self.subscribed_to:
- # eventually (through some new subscription method) we could
- # join in the middle of the step
- self.subscribed_to.remove(step)
-
- # mode >= logs
- def stepETAUpdate(self, build, step, ETA, expectations):
- self.client.callRemote("stepETAUpdate",
- build.getBuilder().getName(), IRemote(build),
- step.getName(), IRemote(step),
- ETA, expectations)
-
- def logStarted(self, build, step, log):
- # TODO: make the HTMLLog adapter
- rlog = IRemote(log, None)
- if not rlog:
- print "hey, couldn't adapt %s to IRemote" % log
- self.client.callRemote("logStarted",
- build.getBuilder().getName(), IRemote(build),
- step.getName(), IRemote(step),
- log.getName(), IRemote(log, None))
- if self.subscribed in ("full",):
- self.subscribed_to.append(log)
- return self
- return None
-
- def logFinished(self, build, step, log):
- self.client.callRemote("logFinished",
- build.getBuilder().getName(), IRemote(build),
- step.getName(), IRemote(step),
- log.getName(), IRemote(log, None))
- if log in self.subscribed_to:
- self.subscribed_to.remove(log)
-
- # mode >= full
- def logChunk(self, build, step, log, channel, text):
- self.client.callRemote("logChunk",
- build.getBuilder().getName(), IRemote(build),
- step.getName(), IRemote(step),
- log.getName(), IRemote(log),
- channel, text)
-
-
-class PBListener(base.StatusReceiverMultiService):
- """I am a listener for PB-based status clients."""
-
- compare_attrs = ["port", "cred"]
- implements(portal.IRealm)
-
- def __init__(self, port, user="statusClient", passwd="clientpw"):
- base.StatusReceiverMultiService.__init__(self)
- if type(port) is int:
- port = "tcp:%d" % port
- self.port = port
- self.cred = (user, passwd)
- p = portal.Portal(self)
- c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
- c.addUser(user, passwd)
- p.registerChecker(c)
- f = pb.PBServerFactory(p)
- s = strports.service(port, f)
- s.setServiceParent(self)
-
- def setServiceParent(self, parent):
- base.StatusReceiverMultiService.setServiceParent(self, parent)
- self.setup()
-
- def setup(self):
- self.status = self.parent.getStatus()
-
- def requestAvatar(self, avatarID, mind, interface):
- assert interface == pb.IPerspective
- p = StatusClientPerspective(self.status)
- p.attached(mind) # perhaps .callLater(0) ?
- return (pb.IPerspective, p,
- lambda p=p,mind=mind: p.detached(mind))
diff --git a/buildbot/buildbot/status/html.py b/buildbot/buildbot/status/html.py
deleted file mode 100644
index cc36a4a..0000000
--- a/buildbot/buildbot/status/html.py
+++ /dev/null
@@ -1,6 +0,0 @@
-
-# compatibility wrapper. This is currently the preferred place for master.cfg
-# to import from.
-
-from buildbot.status.web.baseweb import Waterfall, WebStatus
-_hush_pyflakes = [Waterfall, WebStatus]
diff --git a/buildbot/buildbot/status/mail.py b/buildbot/buildbot/status/mail.py
deleted file mode 100644
index e32cfa9..0000000
--- a/buildbot/buildbot/status/mail.py
+++ /dev/null
@@ -1,524 +0,0 @@
-# -*- test-case-name: buildbot.test.test_status -*-
-
-# the email.MIMEMultipart module is only available in python-2.2.2 and later
-import re
-
-from email.Message import Message
-from email.Utils import formatdate
-from email.MIMEText import MIMEText
-try:
- from email.MIMEMultipart import MIMEMultipart
- canDoAttachments = True
-except ImportError:
- canDoAttachments = False
-import urllib
-
-from zope.interface import implements
-from twisted.internet import defer
-from twisted.mail.smtp import sendmail
-from twisted.python import log as twlog
-
-from buildbot import interfaces, util
-from buildbot.status import base
-from buildbot.status.builder import FAILURE, SUCCESS, WARNINGS, Results
-
-VALID_EMAIL = re.compile("[a-zA-Z0-9\.\_\%\-\+]+@[a-zA-Z0-9\.\_\%\-]+.[a-zA-Z]{2,6}")
-
-def message(attrs):
- """Generate a buildbot mail message and return a tuple of message text
- and type.
-
- This function can be replaced using the customMesg variable in MailNotifier.
- A message function will *always* get a dictionary of attributes with
- the following values:
-
- builderName - (str) Name of the builder that generated this event.
-
- projectName - (str) Name of the project.
-
- mode - (str) Mode set in MailNotifier. (failing, passing, problem).
-
- result - (str) Builder result as a string. 'success', 'warnings',
- 'failure', 'skipped', or 'exception'
-
- buildURL - (str) URL to build page.
-
- buildbotURL - (str) URL to buildbot main page.
-
- buildText - (str) Build text from build.getText().
-
- slavename - (str) Slavename.
-
- reason - (str) Build reason from build.getReason().
-
- responsibleUsers - (List of str) List of responsible users.
-
- branch - (str) Name of branch used. If no SourceStamp exists branch
- is an empty string.
-
- revision - (str) Name of revision used. If no SourceStamp exists revision
- is an empty string.
-
- patch - (str) Name of patch used. If no SourceStamp exists patch
- is an empty string.
-
- changes - (list of objs) List of change objects from SourceStamp. A change
- object has the following useful information:
-
- who - who made this change
- revision - what VC revision is this change
- branch - on what branch did this change occur
- when - when did this change occur
- files - what files were affected in this change
- comments - comments reguarding the change.
-
- The functions asText and asHTML return a list of strings with
- the above information formatted.
-
- logs - (List of Tuples) List of tuples that contain the log name, log url
- and log contents as a list of strings.
- """
- text = ""
- if attrs['mode'] == "all":
- text += "The Buildbot has finished a build"
- elif attrs['mode'] == "failing":
- text += "The Buildbot has detected a failed build"
- elif attrs['mode'] == "passing":
- text += "The Buildbot has detected a passing build"
- else:
- text += "The Buildbot has detected a new failure"
- text += " of %s on %s.\n" % (attrs['builderName'], attrs['projectName'])
- if attrs['buildURL']:
- text += "Full details are available at:\n %s\n" % attrs['buildURL']
- text += "\n"
-
- if attrs['buildbotURL']:
- text += "Buildbot URL: %s\n\n" % urllib.quote(attrs['buildbotURL'], '/:')
-
- text += "Buildslave for this Build: %s\n\n" % attrs['slavename']
- text += "Build Reason: %s\n" % attrs['reason']
-
- #
- # No source stamp
- #
- if attrs['branch']:
- source = "unavailable"
- else:
- source = ""
- if attrs['branch']:
- source += "[branch %s] " % attrs['branch']
- if attrs['revision']:
- source += attrs['revision']
- else:
- source += "HEAD"
- if attrs['patch']:
- source += " (plus patch)"
- text += "Build Source Stamp: %s\n" % source
-
- text += "Blamelist: %s\n" % ",".join(attrs['responsibleUsers'])
-
- text += "\n"
-
- t = attrs['buildText']
- if t:
- t = ": " + " ".join(t)
- else:
- t = ""
-
- if attrs['result'] == 'success':
- text += "Build succeeded!\n"
- elif attrs['result'] == 'warnings':
- text += "Build Had Warnings%s\n" % t
- else:
- text += "BUILD FAILED%s\n" % t
-
- text += "\n"
- text += "sincerely,\n"
- text += " -The Buildbot\n"
- text += "\n"
- return (text, 'plain')
-
-class Domain(util.ComparableMixin):
- implements(interfaces.IEmailLookup)
- compare_attrs = ["domain"]
-
- def __init__(self, domain):
- assert "@" not in domain
- self.domain = domain
-
- def getAddress(self, name):
- """If name is already an email address, pass it through."""
- if '@' in name:
- return name
- return name + "@" + self.domain
-
-
-class MailNotifier(base.StatusReceiverMultiService):
- """This is a status notifier which sends email to a list of recipients
- upon the completion of each build. It can be configured to only send out
- mail for certain builds, and only send messages when the build fails, or
- when it transitions from success to failure. It can also be configured to
- include various build logs in each message.
-
- By default, the message will be sent to the Interested Users list, which
- includes all developers who made changes in the build. You can add
- additional recipients with the extraRecipients argument.
-
- To get a simple one-message-per-build (say, for a mailing list), use
- sendToInterestedUsers=False, extraRecipients=['listaddr@example.org']
-
- Each MailNotifier sends mail to a single set of recipients. To send
- different kinds of mail to different recipients, use multiple
- MailNotifiers.
- """
-
- implements(interfaces.IEmailSender)
-
- compare_attrs = ["extraRecipients", "lookup", "fromaddr", "mode",
- "categories", "builders", "addLogs", "relayhost",
- "subject", "sendToInterestedUsers", "customMesg"]
-
- def __init__(self, fromaddr, mode="all", categories=None, builders=None,
- addLogs=False, relayhost="localhost",
- subject="buildbot %(result)s in %(projectName)s on %(builder)s",
- lookup=None, extraRecipients=[],
- sendToInterestedUsers=True, customMesg=message):
- """
- @type fromaddr: string
- @param fromaddr: the email address to be used in the 'From' header.
- @type sendToInterestedUsers: boolean
- @param sendToInterestedUsers: if True (the default), send mail to all
- of the Interested Users. If False, only
- send mail to the extraRecipients list.
-
- @type extraRecipients: tuple of string
- @param extraRecipients: a list of email addresses to which messages
- should be sent (in addition to the
- InterestedUsers list, which includes any
- developers who made Changes that went into this
- build). It is a good idea to create a small
- mailing list and deliver to that, then let
- subscribers come and go as they please.
-
- @type subject: string
- @param subject: a string to be used as the subject line of the message.
- %(builder)s will be replaced with the name of the
- builder which provoked the message.
-
- @type mode: string (defaults to all)
- @param mode: one of:
- - 'all': send mail about all builds, passing and failing
- - 'failing': only send mail about builds which fail
- - 'passing': only send mail about builds which succeed
- - 'problem': only send mail about a build which failed
- when the previous build passed
-
- @type builders: list of strings
- @param builders: a list of builder names for which mail should be
- sent. Defaults to None (send mail for all builds).
- Use either builders or categories, but not both.
-
- @type categories: list of strings
- @param categories: a list of category names to serve status
- information for. Defaults to None (all
- categories). Use either builders or categories,
- but not both.
-
- @type addLogs: boolean.
- @param addLogs: if True, include all build logs as attachments to the
- messages. These can be quite large. This can also be
- set to a list of log names, to send a subset of the
- logs. Defaults to False.
-
- @type relayhost: string
- @param relayhost: the host to which the outbound SMTP connection
- should be made. Defaults to 'localhost'
-
- @type lookup: implementor of {IEmailLookup}
- @param lookup: object which provides IEmailLookup, which is
- responsible for mapping User names (which come from
- the VC system) into valid email addresses. If not
- provided, the notifier will only be able to send mail
- to the addresses in the extraRecipients list. Most of
- the time you can use a simple Domain instance. As a
- shortcut, you can pass as string: this will be
- treated as if you had provided Domain(str). For
- example, lookup='twistedmatrix.com' will allow mail
- to be sent to all developers whose SVN usernames
- match their twistedmatrix.com account names.
-
- @type customMesg: func
- @param customMesg: A function that returns a tuple containing the text of
- a custom message and its type. This function takes
- the dict attrs which has the following values:
-
- builderName - (str) Name of the builder that generated this event.
-
- projectName - (str) Name of the project.
-
- mode - (str) Mode set in MailNotifier. (failing, passing, problem).
-
- result - (str) Builder result as a string. 'success', 'warnings',
- 'failure', 'skipped', or 'exception'
-
- buildURL - (str) URL to build page.
-
- buildbotURL - (str) URL to buildbot main page.
-
- buildText - (str) Build text from build.getText().
-
- slavename - (str) Slavename.
-
- reason - (str) Build reason from build.getReason().
-
- responsibleUsers - (List of str) List of responsible users.
-
- branch - (str) Name of branch used. If no SourceStamp exists branch
- is an empty string.
-
- revision - (str) Name of revision used. If no SourceStamp exists revision
- is an empty string.
-
- patch - (str) Name of patch used. If no SourceStamp exists patch
- is an empty string.
-
- changes - (list of objs) List of change objects from SourceStamp. A change
- object has the following useful information:
-
- who - who made this change
- revision - what VC revision is this change
- branch - on what branch did this change occur
- when - when did this change occur
- files - what files were affected in this change
- comments - comments reguarding the change.
-
- The functions asText and asHTML return a list of strings with
- the above information formatted.
-
- logs - (List of Tuples) List of tuples that contain the log name, log url,
- and log contents as a list of strings.
-
- """
-
- base.StatusReceiverMultiService.__init__(self)
- assert isinstance(extraRecipients, (list, tuple))
- for r in extraRecipients:
- assert isinstance(r, str)
- assert VALID_EMAIL.search(r) # require full email addresses, not User names
- self.extraRecipients = extraRecipients
- self.sendToInterestedUsers = sendToInterestedUsers
- self.fromaddr = fromaddr
- assert mode in ('all', 'failing', 'problem')
- self.mode = mode
- self.categories = categories
- self.builders = builders
- self.addLogs = addLogs
- self.relayhost = relayhost
- self.subject = subject
- if lookup is not None:
- if type(lookup) is str:
- lookup = Domain(lookup)
- assert interfaces.IEmailLookup.providedBy(lookup)
- self.lookup = lookup
- self.customMesg = customMesg
- self.watched = []
- self.status = None
-
- # you should either limit on builders or categories, not both
- if self.builders != None and self.categories != None:
- twlog.err("Please specify only builders to ignore or categories to include")
- raise # FIXME: the asserts above do not raise some Exception either
-
- def setServiceParent(self, parent):
- """
- @type parent: L{buildbot.master.BuildMaster}
- """
- base.StatusReceiverMultiService.setServiceParent(self, parent)
- self.setup()
-
- def setup(self):
- self.status = self.parent.getStatus()
- self.status.subscribe(self)
-
- def disownServiceParent(self):
- self.status.unsubscribe(self)
- for w in self.watched:
- w.unsubscribe(self)
- return base.StatusReceiverMultiService.disownServiceParent(self)
-
- def builderAdded(self, name, builder):
- # only subscribe to builders we are interested in
- if self.categories != None and builder.category not in self.categories:
- return None
-
- self.watched.append(builder)
- return self # subscribe to this builder
-
- def builderRemoved(self, name):
- pass
-
- def builderChangedState(self, name, state):
- pass
- def buildStarted(self, name, build):
- pass
- def buildFinished(self, name, build, results):
- # here is where we actually do something.
- builder = build.getBuilder()
- if self.builders is not None and name not in self.builders:
- return # ignore this build
- if self.categories is not None and \
- builder.category not in self.categories:
- return # ignore this build
-
- if self.mode == "failing" and results != FAILURE:
- return
- if self.mode == "passing" and results != SUCCESS:
- return
- if self.mode == "problem":
- if results != FAILURE:
- return
- prev = build.getPreviousBuild()
- if prev and prev.getResults() == FAILURE:
- return
- # for testing purposes, buildMessage returns a Deferred that fires
- # when the mail has been sent. To help unit tests, we return that
- # Deferred here even though the normal IStatusReceiver.buildFinished
- # signature doesn't do anything with it. If that changes (if
- # .buildFinished's return value becomes significant), we need to
- # rearrange this.
- return self.buildMessage(name, build, results)
-
- def buildMessage(self, name, build, results):
- #
- # logs is a list of tuples that contain the log
- # name, log url, and the log contents as a list of strings.
- #
- logs = list()
- for log in build.getLogs():
- stepName = log.getStep().getName()
- logName = log.getName()
- logs.append(('%s.%s' % (stepName, logName),
- '%s/steps/%s/logs/%s' % (self.status.getURLForThing(build), stepName, logName),
- log.getText().splitlines()))
-
- attrs = {'builderName': name,
- 'projectName': self.status.getProjectName(),
- 'mode': self.mode,
- 'result': Results[results],
- 'buildURL': self.status.getURLForThing(build),
- 'buildbotURL': self.status.getBuildbotURL(),
- 'buildText': build.getText(),
- 'slavename': build.getSlavename(),
- 'reason': build.getReason(),
- 'responsibleUsers': build.getResponsibleUsers(),
- 'branch': "",
- 'revision': "",
- 'patch': "",
- 'changes': [],
- 'logs': logs}
-
- ss = build.getSourceStamp()
- if ss:
- attrs['branch'] = ss.branch
- attrs['revision'] = ss.revision
- attrs['patch'] = ss.patch
- attrs['changes'] = ss.changes[:]
-
- text, type = self.customMesg(attrs)
- assert type in ('plain', 'html'), "'%s' message type must be 'plain' or 'html'." % type
-
- haveAttachments = False
- if attrs['patch'] or self.addLogs:
- haveAttachments = True
- if not canDoAttachments:
- twlog.msg("warning: I want to send mail with attachments, "
- "but this python is too old to have "
- "email.MIMEMultipart . Please upgrade to python-2.3 "
- "or newer to enable addLogs=True")
-
- if haveAttachments and canDoAttachments:
- m = MIMEMultipart()
- m.attach(MIMEText(text, type))
- else:
- m = Message()
- m.set_payload(text)
- m.set_type("text/%s" % type)
-
- m['Date'] = formatdate(localtime=True)
- m['Subject'] = self.subject % { 'result': attrs['result'],
- 'projectName': attrs['projectName'],
- 'builder': attrs['builderName'],
- }
- m['From'] = self.fromaddr
- # m['To'] is added later
-
- if attrs['patch']:
- a = MIMEText(attrs['patch'][1])
- a.add_header('Content-Disposition', "attachment",
- filename="source patch")
- m.attach(a)
- if self.addLogs:
- for log in build.getLogs():
- name = "%s.%s" % (log.getStep().getName(),
- log.getName())
- if self._shouldAttachLog(log.getName()) or self._shouldAttachLog(name):
- a = MIMEText(log.getText())
- a.add_header('Content-Disposition', "attachment",
- filename=name)
- m.attach(a)
-
- # now, who is this message going to?
- dl = []
- recipients = []
- if self.sendToInterestedUsers and self.lookup:
- for u in build.getInterestedUsers():
- d = defer.maybeDeferred(self.lookup.getAddress, u)
- d.addCallback(recipients.append)
- dl.append(d)
- d = defer.DeferredList(dl)
- d.addCallback(self._gotRecipients, recipients, m)
- return d
-
- def _shouldAttachLog(self, logname):
- if type(self.addLogs) is bool:
- return self.addLogs
- return logname in self.addLogs
-
- def _gotRecipients(self, res, rlist, m):
- recipients = set()
-
- for r in rlist:
- if r is None: # getAddress didn't like this address
- continue
-
- # Git can give emails like 'User' <user@foo.com>@foo.com so check
- # for two @ and chop the last
- if r.count('@') > 1:
- r = r[:r.rindex('@')]
-
- if VALID_EMAIL.search(r):
- recipients.add(r)
- else:
- twlog.msg("INVALID EMAIL: %r" + r)
-
- # if we're sending to interested users move the extra's to the CC
- # list so they can tell if they are also interested in the change
- # unless there are no interested users
- if self.sendToInterestedUsers and len(recipients):
- m['CC'] = ", ".join(sorted(self.extraRecipients[:]))
- else:
- [recipients.add(r) for r in self.extraRecipients[:]]
-
- m['To'] = ", ".join(sorted(recipients))
-
- # The extras weren't part of the TO list so add them now
- if self.sendToInterestedUsers:
- for r in self.extraRecipients:
- recipients.add(r)
-
- return self.sendMessage(m, list(recipients))
-
- def sendMessage(self, m, recipients):
- s = m.as_string()
- twlog.msg("sending mail (%d bytes) to" % len(s), recipients)
- return sendmail(self.relayhost, self.fromaddr, recipients, s)
diff --git a/buildbot/buildbot/status/progress.py b/buildbot/buildbot/status/progress.py
deleted file mode 100644
index dc4d3d5..0000000
--- a/buildbot/buildbot/status/progress.py
+++ /dev/null
@@ -1,308 +0,0 @@
-# -*- test-case-name: buildbot.test.test_status -*-
-
-from twisted.internet import reactor
-from twisted.spread import pb
-from twisted.python import log
-from buildbot import util
-
-class StepProgress:
- """I keep track of how much progress a single BuildStep has made.
-
- Progress is measured along various axes. Time consumed is one that is
- available for all steps. Amount of command output is another, and may be
- better quantified by scanning the output for markers to derive number of
- files compiled, directories walked, tests run, etc.
-
- I am created when the build begins, and given to a BuildProgress object
- so it can track the overall progress of the whole build.
-
- """
-
- startTime = None
- stopTime = None
- expectedTime = None
- buildProgress = None
- debug = False
-
- def __init__(self, name, metricNames):
- self.name = name
- self.progress = {}
- self.expectations = {}
- for m in metricNames:
- self.progress[m] = None
- self.expectations[m] = None
-
- def setBuildProgress(self, bp):
- self.buildProgress = bp
-
- def setExpectations(self, metrics):
- """The step can call this to explicitly set a target value for one
- of its metrics. E.g., ShellCommands knows how many commands it will
- execute, so it could set the 'commands' expectation."""
- for metric, value in metrics.items():
- self.expectations[metric] = value
- self.buildProgress.newExpectations()
-
- def setExpectedTime(self, seconds):
- self.expectedTime = seconds
- self.buildProgress.newExpectations()
-
- def start(self):
- if self.debug: print "StepProgress.start[%s]" % self.name
- self.startTime = util.now()
-
- def setProgress(self, metric, value):
- """The step calls this as progress is made along various axes."""
- if self.debug:
- print "setProgress[%s][%s] = %s" % (self.name, metric, value)
- self.progress[metric] = value
- if self.debug:
- r = self.remaining()
- print " step remaining:", r
- self.buildProgress.newProgress()
-
- def finish(self):
- """This stops the 'time' metric and marks the step as finished
- overall. It should be called after the last .setProgress has been
- done for each axis."""
- if self.debug: print "StepProgress.finish[%s]" % self.name
- self.stopTime = util.now()
- self.buildProgress.stepFinished(self.name)
-
- def totalTime(self):
- if self.startTime != None and self.stopTime != None:
- return self.stopTime - self.startTime
-
- def remaining(self):
- if self.startTime == None:
- return self.expectedTime
- if self.stopTime != None:
- return 0 # already finished
- # TODO: replace this with cleverness that graphs each metric vs.
- # time, then finds the inverse function. Will probably need to save
- # a timestamp with each setProgress update, when finished, go back
- # and find the 2% transition points, then save those 50 values in a
- # list. On the next build, do linear interpolation between the two
- # closest samples to come up with a percentage represented by that
- # metric.
-
- # TODO: If no other metrics are available, just go with elapsed
- # time. Given the non-time-uniformity of text output from most
- # steps, this would probably be better than the text-percentage
- # scheme currently implemented.
-
- percentages = []
- for metric, value in self.progress.items():
- expectation = self.expectations[metric]
- if value != None and expectation != None:
- p = 1.0 * value / expectation
- percentages.append(p)
- if percentages:
- avg = reduce(lambda x,y: x+y, percentages) / len(percentages)
- if avg > 1.0:
- # overdue
- avg = 1.0
- if avg < 0.0:
- avg = 0.0
- if percentages and self.expectedTime != None:
- return self.expectedTime - (avg * self.expectedTime)
- if self.expectedTime is not None:
- # fall back to pure time
- return self.expectedTime - (util.now() - self.startTime)
- return None # no idea
-
-
-class WatcherState:
- def __init__(self, interval):
- self.interval = interval
- self.timer = None
- self.needUpdate = 0
-
-class BuildProgress(pb.Referenceable):
- """I keep track of overall build progress. I hold a list of StepProgress
- objects.
- """
-
- def __init__(self, stepProgresses):
- self.steps = {}
- for s in stepProgresses:
- self.steps[s.name] = s
- s.setBuildProgress(self)
- self.finishedSteps = []
- self.watchers = {}
- self.debug = 0
-
- def setExpectationsFrom(self, exp):
- """Set our expectations from the builder's Expectations object."""
- for name, metrics in exp.steps.items():
- s = self.steps[name]
- s.setExpectedTime(exp.times[name])
- s.setExpectations(exp.steps[name])
-
- def newExpectations(self):
- """Call this when one of the steps has changed its expectations.
- This should trigger us to update our ETA value and notify any
- subscribers."""
- pass # subscribers are not implemented: they just poll
-
- def stepFinished(self, stepname):
- assert(stepname not in self.finishedSteps)
- self.finishedSteps.append(stepname)
- if len(self.finishedSteps) == len(self.steps.keys()):
- self.sendLastUpdates()
-
- def newProgress(self):
- r = self.remaining()
- if self.debug:
- print " remaining:", r
- if r != None:
- self.sendAllUpdates()
-
- def remaining(self):
- # sum eta of all steps
- sum = 0
- for name, step in self.steps.items():
- rem = step.remaining()
- if rem == None:
- return None # not sure
- sum += rem
- return sum
- def eta(self):
- left = self.remaining()
- if left == None:
- return None # not sure
- done = util.now() + left
- return done
-
-
- def remote_subscribe(self, remote, interval=5):
- # [interval, timer, needUpdate]
- # don't send an update more than once per interval
- self.watchers[remote] = WatcherState(interval)
- remote.notifyOnDisconnect(self.removeWatcher)
- self.updateWatcher(remote)
- self.startTimer(remote)
- log.msg("BuildProgress.remote_subscribe(%s)" % remote)
- def remote_unsubscribe(self, remote):
- # TODO: this doesn't work. I think 'remote' will always be different
- # than the object that appeared in _subscribe.
- log.msg("BuildProgress.remote_unsubscribe(%s)" % remote)
- self.removeWatcher(remote)
- #remote.dontNotifyOnDisconnect(self.removeWatcher)
- def removeWatcher(self, remote):
- #log.msg("removeWatcher(%s)" % remote)
- try:
- timer = self.watchers[remote].timer
- if timer:
- timer.cancel()
- del self.watchers[remote]
- except KeyError:
- log.msg("Weird, removeWatcher on non-existent subscriber:",
- remote)
- def sendAllUpdates(self):
- for r in self.watchers.keys():
- self.updateWatcher(r)
- def updateWatcher(self, remote):
- # an update wants to go to this watcher. Send it if we can, otherwise
- # queue it for later
- w = self.watchers[remote]
- if not w.timer:
- # no timer, so send update now and start the timer
- self.sendUpdate(remote)
- self.startTimer(remote)
- else:
- # timer is running, just mark as needing an update
- w.needUpdate = 1
- def startTimer(self, remote):
- w = self.watchers[remote]
- timer = reactor.callLater(w.interval, self.watcherTimeout, remote)
- w.timer = timer
- def sendUpdate(self, remote, last=0):
- self.watchers[remote].needUpdate = 0
- #text = self.asText() # TODO: not text, duh
- try:
- remote.callRemote("progress", self.remaining())
- if last:
- remote.callRemote("finished", self)
- except:
- log.deferr()
- self.removeWatcher(remote)
-
- def watcherTimeout(self, remote):
- w = self.watchers.get(remote, None)
- if not w:
- return # went away
- w.timer = None
- if w.needUpdate:
- self.sendUpdate(remote)
- self.startTimer(remote)
- def sendLastUpdates(self):
- for remote in self.watchers.keys():
- self.sendUpdate(remote, 1)
- self.removeWatcher(remote)
-
-
-class Expectations:
- debug = False
- # decay=1.0 ignores all but the last build
- # 0.9 is short time constant. 0.1 is very long time constant
- # TODO: let decay be specified per-metric
- decay = 0.5
-
- def __init__(self, buildprogress):
- """Create us from a successful build. We will expect each step to
- take as long as it did in that build."""
-
- # .steps maps stepname to dict2
- # dict2 maps metricname to final end-of-step value
- self.steps = {}
-
- # .times maps stepname to per-step elapsed time
- self.times = {}
-
- for name, step in buildprogress.steps.items():
- self.steps[name] = {}
- for metric, value in step.progress.items():
- self.steps[name][metric] = value
- self.times[name] = None
- if step.startTime is not None and step.stopTime is not None:
- self.times[name] = step.stopTime - step.startTime
-
- def wavg(self, old, current):
- if old is None:
- return current
- if current is None:
- return old
- else:
- return (current * self.decay) + (old * (1 - self.decay))
-
- def update(self, buildprogress):
- for name, stepprogress in buildprogress.steps.items():
- old = self.times[name]
- current = stepprogress.totalTime()
- if current == None:
- log.msg("Expectations.update: current[%s] was None!" % name)
- continue
- new = self.wavg(old, current)
- self.times[name] = new
- if self.debug:
- print "new expected time[%s] = %s, old %s, cur %s" % \
- (name, new, old, current)
-
- for metric, current in stepprogress.progress.items():
- old = self.steps[name][metric]
- new = self.wavg(old, current)
- if self.debug:
- print "new expectation[%s][%s] = %s, old %s, cur %s" % \
- (name, metric, new, old, current)
- self.steps[name][metric] = new
-
- def expectedBuildTime(self):
- if None in self.times.values():
- return None
- #return sum(self.times.values())
- # python-2.2 doesn't have 'sum'. TODO: drop python-2.2 support
- s = 0
- for v in self.times.values():
- s += v
- return s
diff --git a/buildbot/buildbot/status/tests.py b/buildbot/buildbot/status/tests.py
deleted file mode 100644
index 4c4c894..0000000
--- a/buildbot/buildbot/status/tests.py
+++ /dev/null
@@ -1,73 +0,0 @@
-
-from twisted.web import resource
-from twisted.web.error import NoResource
-
-# these are our test result types. Steps are responsible for mapping results
-# into these values.
-SKIP, EXPECTED_FAILURE, FAILURE, ERROR, UNEXPECTED_SUCCESS, SUCCESS = \
- "skip", "expected failure", "failure", "error", "unexpected success", \
- "success"
-UNKNOWN = "unknown" # catch-all
-
-
-class OneTest(resource.Resource):
- isLeaf = 1
- def __init__(self, parent, testName, results):
- self.parent = parent
- self.testName = testName
- self.resultType, self.results = results
-
- def render(self, request):
- request.setHeader("content-type", "text/html")
- if request.method == "HEAD":
- request.setHeader("content-length", len(self.html(request)))
- return ''
- return self.html(request)
-
- def html(self, request):
- # turn ourselves into HTML
- raise NotImplementedError
-
-class TestResults(resource.Resource):
- oneTestClass = OneTest
- def __init__(self):
- resource.Resource.__init__(self)
- self.tests = {}
- def addTest(self, testName, resultType, results=None):
- self.tests[testName] = (resultType, results)
- # TODO: .setName and .delete should be used on our Swappable
- def countTests(self):
- return len(self.tests)
- def countFailures(self):
- failures = 0
- for t in self.tests.values():
- if t[0] in (FAILURE, ERROR):
- failures += 1
- return failures
- def summary(self):
- """Return a short list of text strings as a summary, suitable for
- inclusion in an Event"""
- return ["some", "tests"]
- def describeOneTest(self, testname):
- return "%s: %s\n" % (testname, self.tests[testname][0])
- def html(self):
- data = "<html>\n<head><title>Test Results</title></head>\n"
- data += "<body>\n"
- data += "<pre>\n"
- tests = self.tests.keys()
- tests.sort()
- for testname in tests:
- data += self.describeOneTest(testname)
- data += "</pre>\n"
- data += "</body></html>\n"
- return data
- def render(self, request):
- request.setHeader("content-type", "text/html")
- if request.method == "HEAD":
- request.setHeader("content-length", len(self.html()))
- return ''
- return self.html()
- def getChild(self, path, request):
- if self.tests.has_key(path):
- return self.oneTestClass(self, path, self.tests[path])
- return NoResource("No such test '%s'" % path)
diff --git a/buildbot/buildbot/status/tinderbox.py b/buildbot/buildbot/status/tinderbox.py
deleted file mode 100644
index 51d404b..0000000
--- a/buildbot/buildbot/status/tinderbox.py
+++ /dev/null
@@ -1,223 +0,0 @@
-
-from email.Message import Message
-from email.Utils import formatdate
-
-from zope.interface import implements
-from twisted.internet import defer
-
-from buildbot import interfaces
-from buildbot.status import mail
-from buildbot.status.builder import SUCCESS, WARNINGS
-from buildbot.steps.shell import WithProperties
-
-import zlib, bz2, base64
-
-# TODO: docs, maybe a test of some sort just to make sure it actually imports
-# and can format email without raising an exception.
-
-class TinderboxMailNotifier(mail.MailNotifier):
- """This is a Tinderbox status notifier. It can send e-mail to a number of
- different tinderboxes or people. E-mails are sent at the beginning and
- upon completion of each build. It can be configured to send out e-mails
- for only certain builds.
-
- The most basic usage is as follows::
- TinderboxMailNotifier(fromaddr="buildbot@localhost",
- tree="MyTinderboxTree",
- extraRecipients=["tinderboxdaemon@host.org"])
-
- The builder name (as specified in master.cfg) is used as the "build"
- tinderbox option.
-
- """
- implements(interfaces.IEmailSender)
-
- compare_attrs = ["extraRecipients", "fromaddr", "categories", "builders",
- "addLogs", "relayhost", "subject", "binaryURL", "tree",
- "logCompression", "errorparser", "columnName",
- "useChangeTime"]
-
- def __init__(self, fromaddr, tree, extraRecipients,
- categories=None, builders=None, relayhost="localhost",
- subject="buildbot %(result)s in %(builder)s", binaryURL="",
- logCompression="", errorparser="unix", columnName=None,
- useChangeTime=False):
- """
- @type fromaddr: string
- @param fromaddr: the email address to be used in the 'From' header.
-
- @type tree: string
- @param tree: The Tinderbox tree to post to.
-
- @type extraRecipients: tuple of string
- @param extraRecipients: E-mail addresses of recipients. This should at
- least include the tinderbox daemon.
-
- @type categories: list of strings
- @param categories: a list of category names to serve status
- information for. Defaults to None (all
- categories). Use either builders or categories,
- but not both.
-
- @type builders: list of strings
- @param builders: a list of builder names for which mail should be
- sent. Defaults to None (send mail for all builds).
- Use either builders or categories, but not both.
-
- @type relayhost: string
- @param relayhost: the host to which the outbound SMTP connection
- should be made. Defaults to 'localhost'
-
- @type subject: string
- @param subject: a string to be used as the subject line of the message.
- %(builder)s will be replaced with the name of the
- %builder which provoked the message.
- This parameter is not significant for the tinderbox
- daemon.
-
- @type binaryURL: string
- @param binaryURL: If specified, this should be the location where final
- binary for a build is located.
- (ie. http://www.myproject.org/nightly/08-08-2006.tgz)
- It will be posted to the Tinderbox.
-
- @type logCompression: string
- @param logCompression: The type of compression to use on the log.
- Valid options are"bzip2" and "gzip". gzip is
- only known to work on Python 2.4 and above.
-
- @type errorparser: string
- @param errorparser: The error parser that the Tinderbox server
- should use when scanning the log file.
- Default is "unix".
-
- @type columnName: string
- @param columnName: When columnName is None, use the buildername as
- the Tinderbox column name. When columnName is a
- string this exact string will be used for all
- builders that this TinderboxMailNotifier cares
- about (not recommended). When columnName is a
- WithProperties instance it will be interpolated
- as such. See WithProperties for more detail.
- @type useChangeTime: bool
- @param useChangeTime: When True, the time of the first Change for a
- build is used as the builddate. When False,
- the current time is used as the builddate.
- """
-
- mail.MailNotifier.__init__(self, fromaddr, categories=categories,
- builders=builders, relayhost=relayhost,
- subject=subject,
- extraRecipients=extraRecipients,
- sendToInterestedUsers=False)
- self.tree = tree
- self.binaryURL = binaryURL
- self.logCompression = logCompression
- self.errorparser = errorparser
- self.useChangeTime = useChangeTime
- assert columnName is None or type(columnName) is str \
- or isinstance(columnName, WithProperties), \
- "columnName must be None, a string, or a WithProperties instance"
- self.columnName = columnName
-
- def buildStarted(self, name, build):
- builder = build.getBuilder()
- if self.builders is not None and name not in self.builders:
- return # ignore this Build
- if self.categories is not None and \
- builder.category not in self.categories:
- return # ignore this build
- self.buildMessage(name, build, "building")
-
- def buildMessage(self, name, build, results):
- text = ""
- res = ""
- # shortform
- t = "tinderbox:"
-
- text += "%s tree: %s\n" % (t, self.tree)
- # the start time
- # getTimes() returns a fractioned time that tinderbox doesn't understand
- builddate = int(build.getTimes()[0])
- # attempt to pull a Change time from this Build's Changes.
- # if that doesn't work, fall back on the current time
- if self.useChangeTime:
- try:
- builddate = build.getChanges()[-1].when
- except:
- pass
- text += "%s builddate: %s\n" % (t, builddate)
- text += "%s status: " % t
-
- if results == "building":
- res = "building"
- text += res
- elif results == SUCCESS:
- res = "success"
- text += res
- elif results == WARNINGS:
- res = "testfailed"
- text += res
- else:
- res += "busted"
- text += res
-
- text += "\n";
-
- if self.columnName is None:
- # use the builder name
- text += "%s build: %s\n" % (t, name)
- elif type(self.columnName) is str:
- # use the exact string given
- text += "%s build: %s\n" % (t, self.columnName)
- elif isinstance(self.columnName, WithProperties):
- # interpolate the WithProperties instance, use that
- text += "%s build: %s\n" % (t, build.getProperties().render(self.columnName))
- else:
- raise Exception("columnName is an unhandled value")
- text += "%s errorparser: %s\n" % (t, self.errorparser)
-
- # if the build just started...
- if results == "building":
- text += "%s END\n" % t
- # if the build finished...
- else:
- text += "%s binaryurl: %s\n" % (t, self.binaryURL)
- text += "%s logcompression: %s\n" % (t, self.logCompression)
-
- # logs will always be appended
- logEncoding = ""
- tinderboxLogs = ""
- for log in build.getLogs():
- l = ""
- if self.logCompression == "bzip2":
- compressedLog = bz2.compress(log.getText())
- l = base64.encodestring(compressedLog)
- logEncoding = "base64";
- elif self.logCompression == "gzip":
- compressedLog = zlib.compress(log.getText())
- l = base64.encodestring(compressedLog)
- logEncoding = "base64";
- else:
- l = log.getText()
- tinderboxLogs += l
-
- text += "%s logencoding: %s\n" % (t, logEncoding)
- text += "%s END\n\n" % t
- text += tinderboxLogs
- text += "\n"
-
- m = Message()
- m.set_payload(text)
-
- m['Date'] = formatdate(localtime=True)
- m['Subject'] = self.subject % { 'result': res,
- 'builder': name,
- }
- m['From'] = self.fromaddr
- # m['To'] is added later
-
- d = defer.DeferredList([])
- d.addCallback(self._gotRecipients, self.extraRecipients, m)
- return d
-
diff --git a/buildbot/buildbot/status/web/__init__.py b/buildbot/buildbot/status/web/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/buildbot/buildbot/status/web/__init__.py
+++ /dev/null
diff --git a/buildbot/buildbot/status/web/about.py b/buildbot/buildbot/status/web/about.py
deleted file mode 100644
index 09748e6..0000000
--- a/buildbot/buildbot/status/web/about.py
+++ /dev/null
@@ -1,33 +0,0 @@
-
-from twisted.web import html
-from buildbot.status.web.base import HtmlResource
-import buildbot
-import twisted
-import sys
-
-class AboutBuildbot(HtmlResource):
- title = "About this Buildbot"
-
- def body(self, request):
- data = ''
- data += '<h1>Welcome to the Buildbot</h1>\n'
- data += '<h2>Version Information</h2>\n'
- data += '<ul>\n'
- data += ' <li>Buildbot: %s</li>\n' % html.escape(buildbot.version)
- data += ' <li>Twisted: %s</li>\n' % html.escape(twisted.__version__)
- data += ' <li>Python: %s</li>\n' % html.escape(sys.version)
- data += ' <li>Buildmaster platform: %s</li>\n' % html.escape(sys.platform)
- data += '</ul>\n'
-
- data += '''
-<h2>Source code</h2>
-
-<p>Buildbot is a free software project, released under the terms of the
-<a href="http://www.gnu.org/licenses/gpl.html">GNU GPL</a>.</p>
-
-<p>Please visit the <a href="http://buildbot.net/">Buildbot Home Page</a> for
-more information, including documentation, bug reports, and source
-downloads.</p>
-'''
- return data
-
diff --git a/buildbot/buildbot/status/web/base.py b/buildbot/buildbot/status/web/base.py
deleted file mode 100644
index e515a25..0000000
--- a/buildbot/buildbot/status/web/base.py
+++ /dev/null
@@ -1,421 +0,0 @@
-
-import urlparse, urllib, time
-from zope.interface import Interface
-from twisted.web import html, resource
-from buildbot.status import builder
-from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION
-from buildbot import version, util
-
-class ITopBox(Interface):
- """I represent a box in the top row of the waterfall display: the one
- which shows the status of the last build for each builder."""
- def getBox(self, request):
- """Return a Box instance, which can produce a <td> cell.
- """
-
-class ICurrentBox(Interface):
- """I represent the 'current activity' box, just above the builder name."""
- def getBox(self, status):
- """Return a Box instance, which can produce a <td> cell.
- """
-
-class IBox(Interface):
- """I represent a box in the waterfall display."""
- def getBox(self, request):
- """Return a Box instance, which wraps an Event and can produce a <td>
- cell.
- """
-
-class IHTMLLog(Interface):
- pass
-
-css_classes = {SUCCESS: "success",
- WARNINGS: "warnings",
- FAILURE: "failure",
- SKIPPED: "skipped",
- EXCEPTION: "exception",
- }
-
-ROW_TEMPLATE = '''
-<div class="row">
- <span class="label">%(label)s</span>
- <span class="field">%(field)s</span>
-</div>
-'''
-
-def make_row(label, field):
- """Create a name/value row for the HTML.
-
- `label` is plain text; it will be HTML-encoded.
-
- `field` is a bit of HTML structure; it will not be encoded in
- any way.
- """
- label = html.escape(label)
- return ROW_TEMPLATE % {"label": label, "field": field}
-
-def make_stop_form(stopURL, on_all=False, label="Build"):
- if on_all:
- data = """<form action="%s" class='command stopbuild'>
- <p>To stop all builds, fill out the following fields and
- push the 'Stop' button</p>\n""" % stopURL
- else:
- data = """<form action="%s" class='command stopbuild'>
- <p>To stop this build, fill out the following fields and
- push the 'Stop' button</p>\n""" % stopURL
- data += make_row("Your name:",
- "<input type='text' name='username' />")
- data += make_row("Reason for stopping build:",
- "<input type='text' name='comments' />")
- data += '<input type="submit" value="Stop %s" /></form>\n' % label
- return data
-
-def make_force_build_form(forceURL, on_all=False):
- if on_all:
- data = """<form action="%s" class="command forcebuild">
- <p>To force a build on all Builders, fill out the following fields
- and push the 'Force Build' button</p>""" % forceURL
- else:
- data = """<form action="%s" class="command forcebuild">
- <p>To force a build, fill out the following fields and
- push the 'Force Build' button</p>""" % forceURL
- return (data
- + make_row("Your name:",
- "<input type='text' name='username' />")
- + make_row("Reason for build:",
- "<input type='text' name='comments' />")
- + make_row("Branch to build:",
- "<input type='text' name='branch' />")
- + make_row("Revision to build:",
- "<input type='text' name='revision' />")
- + '<input type="submit" value="Force Build" /></form>\n')
-
-def td(text="", parms={}, **props):
- data = ""
- data += " "
- #if not props.has_key("border"):
- # props["border"] = 1
- props.update(parms)
- comment = props.get("comment", None)
- if comment:
- data += "<!-- %s -->" % comment
- data += "<td"
- class_ = props.get('class_', None)
- if class_:
- props["class"] = class_
- for prop in ("align", "colspan", "rowspan", "border",
- "valign", "halign", "class"):
- p = props.get(prop, None)
- if p != None:
- data += " %s=\"%s\"" % (prop, p)
- data += ">"
- if not text:
- text = "&nbsp;"
- if isinstance(text, list):
- data += "<br />".join(text)
- else:
- data += text
- data += "</td>\n"
- return data
-
-def build_get_class(b):
- """
- Return the class to use for a finished build or buildstep,
- based on the result.
- """
- # FIXME: this getResults duplicity might need to be fixed
- result = b.getResults()
- #print "THOMAS: result for b %r: %r" % (b, result)
- if isinstance(b, builder.BuildStatus):
- result = b.getResults()
- elif isinstance(b, builder.BuildStepStatus):
- result = b.getResults()[0]
- # after forcing a build, b.getResults() returns ((None, []), []), ugh
- if isinstance(result, tuple):
- result = result[0]
- else:
- raise TypeError, "%r is not a BuildStatus or BuildStepStatus" % b
-
- if result == None:
- # FIXME: this happens when a buildstep is running ?
- return "running"
- return builder.Results[result]
-
-def path_to_root(request):
- # /waterfall : ['waterfall'] -> ''
- # /somewhere/lower : ['somewhere', 'lower'] -> '../'
- # /somewhere/indexy/ : ['somewhere', 'indexy', ''] -> '../../'
- # / : [] -> ''
- if request.prepath:
- segs = len(request.prepath) - 1
- else:
- segs = 0
- root = "../" * segs
- return root
-
-def path_to_builder(request, builderstatus):
- return (path_to_root(request) +
- "builders/" +
- urllib.quote(builderstatus.getName(), safe=''))
-
-def path_to_build(request, buildstatus):
- return (path_to_builder(request, buildstatus.getBuilder()) +
- "/builds/%d" % buildstatus.getNumber())
-
-def path_to_step(request, stepstatus):
- return (path_to_build(request, stepstatus.getBuild()) +
- "/steps/%s" % urllib.quote(stepstatus.getName(), safe=''))
-
-def path_to_slave(request, slave):
- return (path_to_root(request) +
- "buildslaves/" +
- urllib.quote(slave.getName(), safe=''))
-
-class Box:
- # a Box wraps an Event. The Box has HTML <td> parameters that Events
- # lack, and it has a base URL to which each File's name is relative.
- # Events don't know about HTML.
- spacer = False
- def __init__(self, text=[], class_=None, urlbase=None,
- **parms):
- self.text = text
- self.class_ = class_
- self.urlbase = urlbase
- self.show_idle = 0
- if parms.has_key('show_idle'):
- del parms['show_idle']
- self.show_idle = 1
-
- self.parms = parms
- # parms is a dict of HTML parameters for the <td> element that will
- # represent this Event in the waterfall display.
-
- def td(self, **props):
- props.update(self.parms)
- text = self.text
- if not text and self.show_idle:
- text = ["[idle]"]
- return td(text, props, class_=self.class_)
-
-
-class HtmlResource(resource.Resource):
- # this is a cheap sort of template thingy
- contentType = "text/html; charset=UTF-8"
- title = "Buildbot"
- addSlash = False # adapted from Nevow
-
- def getChild(self, path, request):
- if self.addSlash and path == "" and len(request.postpath) == 0:
- return self
- return resource.Resource.getChild(self, path, request)
-
- def render(self, request):
- # tell the WebStatus about the HTTPChannel that got opened, so they
- # can close it if we get reconfigured and the WebStatus goes away.
- # They keep a weakref to this, since chances are good that it will be
- # closed by the browser or by us before we get reconfigured. See
- # ticket #102 for details.
- if hasattr(request, "channel"):
- # web.distrib.Request has no .channel
- request.site.buildbot_service.registerChannel(request.channel)
-
- # Our pages no longer require that their URL end in a slash. Instead,
- # they all use request.childLink() or some equivalent which takes the
- # last path component into account. This clause is left here for
- # historical and educational purposes.
- if False and self.addSlash and request.prepath[-1] != '':
- # this is intended to behave like request.URLPath().child('')
- # but we need a relative URL, since we might be living behind a
- # reverse proxy
- #
- # note that the Location: header (as used in redirects) are
- # required to have absolute URIs, and my attempt to handle
- # reverse-proxies gracefully violates rfc2616. This frequently
- # works, but single-component paths sometimes break. The best
- # strategy is to avoid these redirects whenever possible by using
- # HREFs with trailing slashes, and only use the redirects for
- # manually entered URLs.
- url = request.prePathURL()
- scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
- new_url = request.prepath[-1] + "/"
- if query:
- new_url += "?" + query
- request.redirect(new_url)
- return ''
-
- data = self.content(request)
- if isinstance(data, unicode):
- data = data.encode("utf-8")
- request.setHeader("content-type", self.contentType)
- if request.method == "HEAD":
- request.setHeader("content-length", len(data))
- return ''
- return data
-
- def getStatus(self, request):
- return request.site.buildbot_service.getStatus()
- def getControl(self, request):
- return request.site.buildbot_service.getControl()
-
- def getChangemaster(self, request):
- return request.site.buildbot_service.getChangeSvc()
-
- def path_to_root(self, request):
- return path_to_root(request)
-
- def footer(self, s, req):
- # TODO: this stuff should be generated by a template of some sort
- projectURL = s.getProjectURL()
- projectName = s.getProjectName()
- data = '<hr /><div class="footer">\n'
-
- welcomeurl = self.path_to_root(req) + "index.html"
- data += '[<a href="%s">welcome</a>]\n' % welcomeurl
- data += "<br />\n"
-
- data += '<a href="http://buildbot.sourceforge.net/">Buildbot</a>'
- data += "-%s " % version
- if projectName:
- data += "working for the "
- if projectURL:
- data += "<a href=\"%s\">%s</a> project." % (projectURL,
- projectName)
- else:
- data += "%s project." % projectName
- data += "<br />\n"
- data += ("Page built: " +
- time.strftime("%a %d %b %Y %H:%M:%S",
- time.localtime(util.now()))
- + "\n")
- data += '</div>\n'
-
- return data
-
- def getTitle(self, request):
- return self.title
-
- def fillTemplate(self, template, request):
- s = request.site.buildbot_service
- values = s.template_values.copy()
- values['root'] = self.path_to_root(request)
- # e.g. to reference the top-level 'buildbot.css' page, use
- # "%(root)sbuildbot.css"
- values['title'] = self.getTitle(request)
- return template % values
-
- def content(self, request):
- s = request.site.buildbot_service
- data = ""
- data += self.fillTemplate(s.header, request)
- data += "<head>\n"
- for he in s.head_elements:
- data += " " + self.fillTemplate(he, request) + "\n"
- data += self.head(request)
- data += "</head>\n\n"
-
- data += '<body %s>\n' % " ".join(['%s="%s"' % (k,v)
- for (k,v) in s.body_attrs.items()])
- data += self.body(request)
- data += "</body>\n"
- data += self.fillTemplate(s.footer, request)
- return data
-
- def head(self, request):
- return ""
-
- def body(self, request):
- return "Dummy\n"
-
-class StaticHTML(HtmlResource):
- def __init__(self, body, title):
- HtmlResource.__init__(self)
- self.bodyHTML = body
- self.title = title
- def body(self, request):
- return self.bodyHTML
-
-MINUTE = 60
-HOUR = 60*MINUTE
-DAY = 24*HOUR
-WEEK = 7*DAY
-MONTH = 30*DAY
-
-def plural(word, words, num):
- if int(num) == 1:
- return "%d %s" % (num, word)
- else:
- return "%d %s" % (num, words)
-
-def abbreviate_age(age):
- if age <= 90:
- return "%s ago" % plural("second", "seconds", age)
- if age < 90*MINUTE:
- return "about %s ago" % plural("minute", "minutes", age / MINUTE)
- if age < DAY:
- return "about %s ago" % plural("hour", "hours", age / HOUR)
- if age < 2*WEEK:
- return "about %s ago" % plural("day", "days", age / DAY)
- if age < 2*MONTH:
- return "about %s ago" % plural("week", "weeks", age / WEEK)
- return "a long time ago"
-
-
-class OneLineMixin:
- LINE_TIME_FORMAT = "%b %d %H:%M"
-
- def get_line_values(self, req, build):
- '''
- Collect the data needed for each line display
- '''
- builder_name = build.getBuilder().getName()
- results = build.getResults()
- text = build.getText()
- try:
- rev = build.getProperty("got_revision")
- if rev is None:
- rev = "??"
- except KeyError:
- rev = "??"
- rev = str(rev)
- if len(rev) > 40:
- rev = "version is too-long"
- root = self.path_to_root(req)
- css_class = css_classes.get(results, "")
- values = {'class': css_class,
- 'builder_name': builder_name,
- 'buildnum': build.getNumber(),
- 'results': css_class,
- 'text': " ".join(build.getText()),
- 'buildurl': path_to_build(req, build),
- 'builderurl': path_to_builder(req, build.getBuilder()),
- 'rev': rev,
- 'time': time.strftime(self.LINE_TIME_FORMAT,
- time.localtime(build.getTimes()[0])),
- }
- return values
-
- def make_line(self, req, build, include_builder=True):
- '''
- Format and render a single line into HTML
- '''
- values = self.get_line_values(req, build)
- fmt_pieces = ['<font size="-1">(%(time)s)</font>',
- 'rev=[%(rev)s]',
- '<span class="%(class)s">%(results)s</span>',
- ]
- if include_builder:
- fmt_pieces.append('<a href="%(builderurl)s">%(builder_name)s</a>')
- fmt_pieces.append('<a href="%(buildurl)s">#%(buildnum)d</a>:')
- fmt_pieces.append('%(text)s')
- data = " ".join(fmt_pieces) % values
- return data
-
-def map_branches(branches):
- # when the query args say "trunk", present that to things like
- # IBuilderStatus.generateFinishedBuilds as None, since that's the
- # convention in use. But also include 'trunk', because some VC systems
- # refer to it that way. In the long run we should clean this up better,
- # maybe with Branch objects or something.
- if "trunk" in branches:
- return branches + [None]
- return branches
diff --git a/buildbot/buildbot/status/web/baseweb.py b/buildbot/buildbot/status/web/baseweb.py
deleted file mode 100644
index a963a9a..0000000
--- a/buildbot/buildbot/status/web/baseweb.py
+++ /dev/null
@@ -1,614 +0,0 @@
-
-import os, sys, urllib, weakref
-from itertools import count
-
-from zope.interface import implements
-from twisted.python import log
-from twisted.application import strports, service
-from twisted.web import server, distrib, static, html
-from twisted.spread import pb
-
-from buildbot.interfaces import IControl, IStatusReceiver
-
-from buildbot.status.web.base import HtmlResource, Box, \
- build_get_class, ICurrentBox, OneLineMixin, map_branches, \
- make_stop_form, make_force_build_form
-from buildbot.status.web.feeds import Rss20StatusResource, \
- Atom10StatusResource
-from buildbot.status.web.waterfall import WaterfallStatusResource
-from buildbot.status.web.grid import GridStatusResource
-from buildbot.status.web.changes import ChangesResource
-from buildbot.status.web.builder import BuildersResource
-from buildbot.status.web.slaves import BuildSlavesResource
-from buildbot.status.web.xmlrpc import XMLRPCServer
-from buildbot.status.web.about import AboutBuildbot
-
-# this class contains the status services (WebStatus and the older Waterfall)
-# which can be put in c['status']. It also contains some of the resources
-# that are attached to the WebStatus at various well-known URLs, which the
-# admin might wish to attach (using WebStatus.putChild) at other URLs.
-
-
-class LastBuild(HtmlResource):
- def body(self, request):
- return "missing\n"
-
-def getLastNBuilds(status, numbuilds, builders=[], branches=[]):
- """Return a list with the last few Builds, sorted by start time.
- builder_names=None means all builders
- """
-
- # TODO: this unsorts the list of builder names, ick
- builder_names = set(status.getBuilderNames())
- if builders:
- builder_names = builder_names.intersection(set(builders))
-
- # to make sure that we get everything, we must get 'numbuilds' builds
- # from *each* source, then sort by ending time, then trim to the last
- # 20. We could be more efficient, but it would require the same
- # gnarly code that the Waterfall uses to generate one event at a
- # time. TODO: factor that code out into some useful class.
- events = []
- for builder_name in builder_names:
- builder = status.getBuilder(builder_name)
- for build_number in count(1):
- if build_number > numbuilds:
- break # enough from this builder, move on to another
- build = builder.getBuild(-build_number)
- if not build:
- break # no more builds here, move on to the next builder
- #if not build.isFinished():
- # continue
- (build_start, build_end) = build.getTimes()
- event = (build_start, builder_name, build)
- events.append(event)
- def _sorter(a, b):
- return cmp( a[:2], b[:2] )
- events.sort(_sorter)
- # now only return the actual build, and only return some of them
- return [e[2] for e in events[-numbuilds:]]
-
-
-# /one_line_per_build
-# accepts builder=, branch=, numbuilds=
-class OneLinePerBuild(HtmlResource, OneLineMixin):
- """This shows one line per build, combining all builders together. Useful
- query arguments:
-
- numbuilds=: how many lines to display
- builder=: show only builds for this builder. Multiple builder= arguments
- can be used to see builds from any builder in the set.
- """
-
- title = "Recent Builds"
-
- def __init__(self, numbuilds=20):
- HtmlResource.__init__(self)
- self.numbuilds = numbuilds
-
- def getChild(self, path, req):
- status = self.getStatus(req)
- builder = status.getBuilder(path)
- return OneLinePerBuildOneBuilder(builder)
-
- def body(self, req):
- status = self.getStatus(req)
- control = self.getControl(req)
- numbuilds = int(req.args.get("numbuilds", [self.numbuilds])[0])
- builders = req.args.get("builder", [])
- branches = [b for b in req.args.get("branch", []) if b]
-
- g = status.generateFinishedBuilds(builders, map_branches(branches),
- numbuilds)
-
- data = ""
-
- # really this is "up to %d builds"
- data += "<h1>Last %d finished builds: %s</h1>\n" % \
- (numbuilds, ", ".join(branches))
- if builders:
- data += ("<p>of builders: %s</p>\n" % (", ".join(builders)))
- data += "<ul>\n"
- got = 0
- building = False
- online = 0
- for build in g:
- got += 1
- data += " <li>" + self.make_line(req, build) + "</li>\n"
- builder_status = build.getBuilder().getState()[0]
- if builder_status == "building":
- building = True
- online += 1
- elif builder_status != "offline":
- online += 1
- if not got:
- data += " <li>No matching builds found</li>\n"
- data += "</ul>\n"
-
- if control is not None:
- if building:
- stopURL = "builders/_all/stop"
- data += make_stop_form(stopURL, True, "Builds")
- if online:
- forceURL = "builders/_all/force"
- data += make_force_build_form(forceURL, True)
-
- return data
-
-
-
-# /one_line_per_build/$BUILDERNAME
-# accepts branch=, numbuilds=
-
-class OneLinePerBuildOneBuilder(HtmlResource, OneLineMixin):
- def __init__(self, builder, numbuilds=20):
- HtmlResource.__init__(self)
- self.builder = builder
- self.builder_name = builder.getName()
- self.numbuilds = numbuilds
- self.title = "Recent Builds of %s" % self.builder_name
-
- def body(self, req):
- status = self.getStatus(req)
- numbuilds = int(req.args.get("numbuilds", [self.numbuilds])[0])
- branches = [b for b in req.args.get("branch", []) if b]
-
- # walk backwards through all builds of a single builder
- g = self.builder.generateFinishedBuilds(map_branches(branches),
- numbuilds)
-
- data = ""
- data += ("<h1>Last %d builds of builder %s: %s</h1>\n" %
- (numbuilds, self.builder_name, ", ".join(branches)))
- data += "<ul>\n"
- got = 0
- for build in g:
- got += 1
- data += " <li>" + self.make_line(req, build) + "</li>\n"
- if not got:
- data += " <li>No matching builds found</li>\n"
- data += "</ul>\n"
-
- return data
-
-# /one_box_per_builder
-# accepts builder=, branch=
-class OneBoxPerBuilder(HtmlResource):
- """This shows a narrow table with one row per builder. The leftmost column
- contains the builder name. The next column contains the results of the
- most recent build. The right-hand column shows the builder's current
- activity.
-
- builder=: show only builds for this builder. Multiple builder= arguments
- can be used to see builds from any builder in the set.
- """
-
- title = "Latest Build"
-
- def body(self, req):
- status = self.getStatus(req)
- control = self.getControl(req)
-
- builders = req.args.get("builder", status.getBuilderNames())
- branches = [b for b in req.args.get("branch", []) if b]
-
- data = ""
-
- data += "<h2>Latest builds: %s</h2>\n" % ", ".join(branches)
- data += "<table>\n"
-
- building = False
- online = 0
- base_builders_url = self.path_to_root(req) + "builders/"
- for bn in builders:
- base_builder_url = base_builders_url + urllib.quote(bn, safe='')
- builder = status.getBuilder(bn)
- data += "<tr>\n"
- data += '<td class="box"><a href="%s">%s</a></td>\n' \
- % (base_builder_url, html.escape(bn))
- builds = list(builder.generateFinishedBuilds(map_branches(branches),
- num_builds=1))
- if builds:
- b = builds[0]
- url = (base_builder_url + "/builds/%d" % b.getNumber())
- try:
- label = b.getProperty("got_revision")
- except KeyError:
- label = None
- if not label or len(str(label)) > 20:
- label = "#%d" % b.getNumber()
- text = ['<a href="%s">%s</a>' % (url, label)]
- text.extend(b.getText())
- box = Box(text,
- class_="LastBuild box %s" % build_get_class(b))
- data += box.td(align="center")
- else:
- data += '<td class="LastBuild box" >no build</td>\n'
- current_box = ICurrentBox(builder).getBox(status)
- data += current_box.td(align="center")
-
- builder_status = builder.getState()[0]
- if builder_status == "building":
- building = True
- online += 1
- elif builder_status != "offline":
- online += 1
-
- data += "</table>\n"
-
- if control is not None:
- if building:
- stopURL = "builders/_all/stop"
- data += make_stop_form(stopURL, True, "Builds")
- if online:
- forceURL = "builders/_all/force"
- data += make_force_build_form(forceURL, True)
-
- return data
-
-
-
-HEADER = '''
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-
-<html
- xmlns="http://www.w3.org/1999/xhtml"
- lang="en"
- xml:lang="en">
-'''
-
-HEAD_ELEMENTS = [
- '<title>%(title)s</title>',
- '<link href="%(root)sbuildbot.css" rel="stylesheet" type="text/css" />',
- ]
-BODY_ATTRS = {
- 'vlink': "#800080",
- }
-
-FOOTER = '''
-</html>
-'''
-
-
-class WebStatus(service.MultiService):
- implements(IStatusReceiver)
- # TODO: IStatusReceiver is really about things which subscribe to hear
- # about buildbot events. We need a different interface (perhaps a parent
- # of IStatusReceiver) for status targets that don't subscribe, like the
- # WebStatus class. buildbot.master.BuildMaster.loadConfig:737 asserts
- # that everything in c['status'] provides IStatusReceiver, but really it
- # should check that they provide IStatusTarget instead.
-
- """
- The webserver provided by this class has the following resources:
-
- /waterfall : the big time-oriented 'waterfall' display, with links
- to individual changes, builders, builds, steps, and logs.
- A number of query-arguments can be added to influence
- the display.
- /rss : a rss feed summarizing all failed builds. The same
- query-arguments used by 'waterfall' can be added to
- influence the feed output.
- /atom : an atom feed summarizing all failed builds. The same
- query-arguments used by 'waterfall' can be added to
- influence the feed output.
- /grid : another summary display that shows a grid of builds, with
- sourcestamps on the x axis, and builders on the y. Query
- arguments similar to those for the waterfall can be added.
- /builders/BUILDERNAME: a page summarizing the builder. This includes
- references to the Schedulers that feed it,
- any builds currently in the queue, which
- buildslaves are designated or attached, and a
- summary of the build process it uses.
- /builders/BUILDERNAME/builds/NUM: a page describing a single Build
- /builders/BUILDERNAME/builds/NUM/steps/STEPNAME: describes a single step
- /builders/BUILDERNAME/builds/NUM/steps/STEPNAME/logs/LOGNAME: a StatusLog
- /builders/BUILDERNAME/builds/NUM/tests : summarize test results
- /builders/BUILDERNAME/builds/NUM/tests/TEST.NAME: results of one test
- /builders/_all/{force,stop}: force a build/stop building on all builders.
- /changes : summarize all ChangeSources
- /changes/CHANGENUM: a page describing a single Change
- /schedulers/SCHEDULERNAME: a page describing a Scheduler, including
- a description of its behavior, a list of the
- Builders it triggers, and list of the Changes
- that are queued awaiting the tree-stable
- timer, and controls to accelerate the timer.
- /buildslaves : list all BuildSlaves
- /buildslaves/SLAVENAME : describe a single BuildSlave
- /one_line_per_build : summarize the last few builds, one line each
- /one_line_per_build/BUILDERNAME : same, but only for a single builder
- /one_box_per_builder : show the latest build and current activity
- /about : describe this buildmaster (Buildbot and support library versions)
- /xmlrpc : (not yet implemented) an XMLRPC server with build status
-
-
- All URLs for pages which are not defined here are used to look
- for files in PUBLIC_HTML, which defaults to BASEDIR/public_html.
- This means that /robots.txt or /buildbot.css or /favicon.ico can
- be placed in that directory.
-
- If an index file (index.html, index.htm, or index, in that order) is
- present in PUBLIC_HTML, it will be used for the root resource. If not,
- the default behavior is to put a redirection to the /waterfall page.
-
- All of the resources provided by this service use relative URLs to reach
- each other. The only absolute links are the c['projectURL'] links at the
- top and bottom of the page, and the buildbot home-page link at the
- bottom.
-
- This webserver defines class attributes on elements so they can be styled
- with CSS stylesheets. All pages pull in PUBLIC_HTML/buildbot.css, and you
- can cause additional stylesheets to be loaded by adding a suitable <link>
- to the WebStatus instance's .head_elements attribute.
-
- Buildbot uses some generic classes to identify the type of object, and
- some more specific classes for the various kinds of those types. It does
- this by specifying both in the class attributes where applicable,
- separated by a space. It is important that in your CSS you declare the
- more generic class styles above the more specific ones. For example,
- first define a style for .Event, and below that for .SUCCESS
-
- The following CSS class names are used:
- - Activity, Event, BuildStep, LastBuild: general classes
- - waiting, interlocked, building, offline, idle: Activity states
- - start, running, success, failure, warnings, skipped, exception:
- LastBuild and BuildStep states
- - Change: box with change
- - Builder: box for builder name (at top)
- - Project
- - Time
-
- """
-
- # we are not a ComparableMixin, and therefore the webserver will be
- # rebuilt every time we reconfig. This is because WebStatus.putChild()
- # makes it too difficult to tell whether two instances are the same or
- # not (we'd have to do a recursive traversal of all children to discover
- # all the changes).
-
- def __init__(self, http_port=None, distrib_port=None, allowForce=False,
- public_html="public_html", site=None):
- """Run a web server that provides Buildbot status.
-
- @type http_port: int or L{twisted.application.strports} string
- @param http_port: a strports specification describing which port the
- buildbot should use for its web server, with the
- Waterfall display as the root page. For backwards
- compatibility this can also be an int. Use
- 'tcp:8000' to listen on that port, or
- 'tcp:12345:interface=127.0.0.1' if you only want
- local processes to connect to it (perhaps because
- you are using an HTTP reverse proxy to make the
- buildbot available to the outside world, and do not
- want to make the raw port visible).
-
- @type distrib_port: int or L{twisted.application.strports} string
- @param distrib_port: Use this if you want to publish the Waterfall
- page using web.distrib instead. The most common
- case is to provide a string that is an absolute
- pathname to the unix socket on which the
- publisher should listen
- (C{os.path.expanduser(~/.twistd-web-pb)} will
- match the default settings of a standard
- twisted.web 'personal web server'). Another
- possibility is to pass an integer, which means
- the publisher should listen on a TCP socket,
- allowing the web server to be on a different
- machine entirely. Both forms are provided for
- backwards compatibility; the preferred form is a
- strports specification like
- 'unix:/home/buildbot/.twistd-web-pb'. Providing
- a non-absolute pathname will probably confuse
- the strports parser.
-
- @param allowForce: boolean, if True then the webserver will allow
- visitors to trigger and cancel builds
-
- @param public_html: the path to the public_html directory for this display,
- either absolute or relative to the basedir. The default
- is 'public_html', which selects BASEDIR/public_html.
-
- @type site: None or L{twisted.web.server.Site}
- @param site: Use this if you want to define your own object instead of
- using the default.`
- """
-
- service.MultiService.__init__(self)
- if type(http_port) is int:
- http_port = "tcp:%d" % http_port
- self.http_port = http_port
- if distrib_port is not None:
- if type(distrib_port) is int:
- distrib_port = "tcp:%d" % distrib_port
- if distrib_port[0] in "/~.": # pathnames
- distrib_port = "unix:%s" % distrib_port
- self.distrib_port = distrib_port
- self.allowForce = allowForce
- self.public_html = public_html
-
- # If we were given a site object, go ahead and use it.
- if site:
- self.site = site
- else:
- # this will be replaced once we've been attached to a parent (and
- # thus have a basedir and can reference BASEDIR)
- root = static.Data("placeholder", "text/plain")
- self.site = server.Site(root)
- self.childrenToBeAdded = {}
-
- self.setupUsualPages()
-
- # the following items are accessed by HtmlResource when it renders
- # each page.
- self.site.buildbot_service = self
- self.header = HEADER
- self.head_elements = HEAD_ELEMENTS[:]
- self.body_attrs = BODY_ATTRS.copy()
- self.footer = FOOTER
- self.template_values = {}
-
- # keep track of cached connections so we can break them when we shut
- # down. See ticket #102 for more details.
- self.channels = weakref.WeakKeyDictionary()
-
- if self.http_port is not None:
- s = strports.service(self.http_port, self.site)
- s.setServiceParent(self)
- if self.distrib_port is not None:
- f = pb.PBServerFactory(distrib.ResourcePublisher(self.site))
- s = strports.service(self.distrib_port, f)
- s.setServiceParent(self)
-
- def setupUsualPages(self):
- #self.putChild("", IndexOrWaterfallRedirection())
- self.putChild("waterfall", WaterfallStatusResource())
- self.putChild("grid", GridStatusResource())
- self.putChild("builders", BuildersResource()) # has builds/steps/logs
- self.putChild("changes", ChangesResource())
- self.putChild("buildslaves", BuildSlavesResource())
- #self.putChild("schedulers", SchedulersResource())
- self.putChild("one_line_per_build", OneLinePerBuild())
- self.putChild("one_box_per_builder", OneBoxPerBuilder())
- self.putChild("xmlrpc", XMLRPCServer())
- self.putChild("about", AboutBuildbot())
-
- def __repr__(self):
- if self.http_port is None:
- return "<WebStatus on path %s at %s>" % (self.distrib_port,
- hex(id(self)))
- if self.distrib_port is None:
- return "<WebStatus on port %s at %s>" % (self.http_port,
- hex(id(self)))
- return ("<WebStatus on port %s and path %s at %s>" %
- (self.http_port, self.distrib_port, hex(id(self))))
-
- def setServiceParent(self, parent):
- service.MultiService.setServiceParent(self, parent)
-
- # this class keeps a *separate* link to the buildmaster, rather than
- # just using self.parent, so that when we are "disowned" (and thus
- # parent=None), any remaining HTTP clients of this WebStatus will still
- # be able to get reasonable results.
- self.master = parent
-
- self.setupSite()
-
- def setupSite(self):
- # this is responsible for creating the root resource. It isn't done
- # at __init__ time because we need to reference the parent's basedir.
- htmldir = os.path.abspath(os.path.join(self.master.basedir, self.public_html))
- if os.path.isdir(htmldir):
- log.msg("WebStatus using (%s)" % htmldir)
- else:
- log.msg("WebStatus: warning: %s is missing. Do you need to run"
- " 'buildbot upgrade-master' on this buildmaster?" % htmldir)
- # all static pages will get a 404 until upgrade-master is used to
- # populate this directory. Create the directory, though, since
- # otherwise we get internal server errors instead of 404s.
- os.mkdir(htmldir)
- root = static.File(htmldir)
-
- for name, child_resource in self.childrenToBeAdded.iteritems():
- root.putChild(name, child_resource)
-
- status = self.getStatus()
- root.putChild("rss", Rss20StatusResource(status))
- root.putChild("atom", Atom10StatusResource(status))
-
- self.site.resource = root
-
- def putChild(self, name, child_resource):
- """This behaves a lot like root.putChild() . """
- self.childrenToBeAdded[name] = child_resource
-
- def registerChannel(self, channel):
- self.channels[channel] = 1 # weakrefs
-
- def stopService(self):
- for channel in self.channels:
- try:
- channel.transport.loseConnection()
- except:
- log.msg("WebStatus.stopService: error while disconnecting"
- " leftover clients")
- log.err()
- return service.MultiService.stopService(self)
-
- def getStatus(self):
- return self.master.getStatus()
-
- def getControl(self):
- if self.allowForce:
- return IControl(self.master)
- return None
-
- def getChangeSvc(self):
- return self.master.change_svc
- def getPortnum(self):
- # this is for the benefit of unit tests
- s = list(self)[0]
- return s._port.getHost().port
-
-# resources can get access to the IStatus by calling
-# request.site.buildbot_service.getStatus()
-
-# this is the compatibility class for the old waterfall. It is exactly like a
-# regular WebStatus except that the root resource (e.g. http://buildbot.net/)
-# always redirects to a WaterfallStatusResource, and the old arguments are
-# mapped into the new resource-tree approach. In the normal WebStatus, the
-# root resource either redirects the browser to /waterfall or serves
-# PUBLIC_HTML/index.html, and favicon/robots.txt are provided by
-# having the admin write actual files into PUBLIC_HTML/ .
-
-# note: we don't use a util.Redirect here because HTTP requires that the
-# Location: header provide an absolute URI, and it's non-trivial to figure
-# out our absolute URI from here.
-
-class Waterfall(WebStatus):
-
- if hasattr(sys, "frozen"):
- # all 'data' files are in the directory of our executable
- here = os.path.dirname(sys.executable)
- buildbot_icon = os.path.abspath(os.path.join(here, "buildbot.png"))
- buildbot_css = os.path.abspath(os.path.join(here, "classic.css"))
- else:
- # running from source
- # the icon is sibpath(__file__, "../buildbot.png") . This is for
- # portability.
- up = os.path.dirname
- buildbot_icon = os.path.abspath(os.path.join(up(up(up(__file__))),
- "buildbot.png"))
- buildbot_css = os.path.abspath(os.path.join(up(__file__),
- "classic.css"))
-
- compare_attrs = ["http_port", "distrib_port", "allowForce",
- "categories", "css", "favicon", "robots_txt"]
-
- def __init__(self, http_port=None, distrib_port=None, allowForce=True,
- categories=None, css=buildbot_css, favicon=buildbot_icon,
- robots_txt=None):
- import warnings
- m = ("buildbot.status.html.Waterfall is deprecated as of 0.7.6 "
- "and will be removed from a future release. "
- "Please use html.WebStatus instead.")
- warnings.warn(m, DeprecationWarning)
-
- WebStatus.__init__(self, http_port, distrib_port, allowForce)
- self.css = css
- if css:
- if os.path.exists(os.path.join("public_html", "buildbot.css")):
- # they've upgraded, so defer to that copy instead
- pass
- else:
- data = open(css, "rb").read()
- self.putChild("buildbot.css", static.Data(data, "text/plain"))
- self.favicon = favicon
- self.robots_txt = robots_txt
- if favicon:
- data = open(favicon, "rb").read()
- self.putChild("favicon.ico", static.Data(data, "image/x-icon"))
- if robots_txt:
- data = open(robots_txt, "rb").read()
- self.putChild("robots.txt", static.Data(data, "text/plain"))
- self.putChild("", WaterfallStatusResource(categories))
diff --git a/buildbot/buildbot/status/web/build.py b/buildbot/buildbot/status/web/build.py
deleted file mode 100644
index 5d01358..0000000
--- a/buildbot/buildbot/status/web/build.py
+++ /dev/null
@@ -1,302 +0,0 @@
-
-from twisted.web import html
-from twisted.web.util import Redirect, DeferredResource
-from twisted.internet import defer, reactor
-
-import urllib, time
-from twisted.python import log
-from buildbot.status.web.base import HtmlResource, make_row, make_stop_form, \
- css_classes, path_to_builder, path_to_slave
-
-from buildbot.status.web.tests import TestsResource
-from buildbot.status.web.step import StepsResource
-from buildbot import version, util
-
-# /builders/$builder/builds/$buildnum
-class StatusResourceBuild(HtmlResource):
- addSlash = True
-
- def __init__(self, build_status, build_control, builder_control):
- HtmlResource.__init__(self)
- self.build_status = build_status
- self.build_control = build_control
- self.builder_control = builder_control
-
- def getTitle(self, request):
- return ("Buildbot: %s Build #%d" %
- (html.escape(self.build_status.getBuilder().getName()),
- self.build_status.getNumber()))
-
- def body(self, req):
- b = self.build_status
- status = self.getStatus(req)
- projectURL = status.getProjectURL()
- projectName = status.getProjectName()
- data = ('<div class="title"><a href="%s">%s</a></div>\n'
- % (self.path_to_root(req), projectName))
- builder_name = b.getBuilder().getName()
- data += ("<h1><a href=\"%s\">Builder %s</a>: Build #%d</h1>\n"
- % (path_to_builder(req, b.getBuilder()),
- builder_name, b.getNumber()))
-
- if not b.isFinished():
- data += "<h2>Build In Progress</h2>"
- when = b.getETA()
- if when is not None:
- when_time = time.strftime("%H:%M:%S",
- time.localtime(time.time() + when))
- data += "<div>ETA %ds (%s)</div>\n" % (when, when_time)
-
- if self.build_control is not None:
- stopURL = urllib.quote(req.childLink("stop"))
- data += make_stop_form(stopURL)
-
- if b.isFinished():
- results = b.getResults()
- data += "<h2>Results:</h2>\n"
- text = " ".join(b.getText())
- data += '<span class="%s">%s</span>\n' % (css_classes[results],
- text)
- if b.getTestResults():
- url = req.childLink("tests")
- data += "<h3><a href=\"%s\">test results</a></h3>\n" % url
-
- ss = b.getSourceStamp()
- data += "<h2>SourceStamp:</h2>\n"
- data += " <ul>\n"
- if ss.branch:
- data += " <li>Branch: %s</li>\n" % html.escape(ss.branch)
- if ss.revision:
- data += " <li>Revision: %s</li>\n" % html.escape(str(ss.revision))
- if ss.patch:
- data += " <li>Patch: YES</li>\n" # TODO: provide link to .diff
- if ss.changes:
- data += " <li>Changes: see below</li>\n"
- if (ss.branch is None and ss.revision is None and ss.patch is None
- and not ss.changes):
- data += " <li>build of most recent revision</li>\n"
- got_revision = None
- try:
- got_revision = b.getProperty("got_revision")
- except KeyError:
- pass
- if got_revision:
- got_revision = str(got_revision)
- if len(got_revision) > 40:
- got_revision = "[revision string too long]"
- data += " <li>Got Revision: %s</li>\n" % got_revision
- data += " </ul>\n"
-
- # TODO: turn this into a table, or some other sort of definition-list
- # that doesn't take up quite so much vertical space
- try:
- slaveurl = path_to_slave(req, status.getSlave(b.getSlavename()))
- data += "<h2>Buildslave:</h2>\n <a href=\"%s\">%s</a>\n" % (html.escape(slaveurl), html.escape(b.getSlavename()))
- except KeyError:
- data += "<h2>Buildslave:</h2>\n %s\n" % html.escape(b.getSlavename())
- data += "<h2>Reason:</h2>\n%s\n" % html.escape(b.getReason())
-
- data += "<h2>Steps and Logfiles:</h2>\n"
- # TODO:
-# urls = self.original.getURLs()
-# ex_url_class = "BuildStep external"
-# for name, target in urls.items():
-# text.append('[<a href="%s" class="%s">%s</a>]' %
-# (target, ex_url_class, html.escape(name)))
- if b.getLogs():
- data += "<ol>\n"
- for s in b.getSteps():
- name = s.getName()
- data += (" <li><a href=\"%s\">%s</a> [%s]\n"
- % (req.childLink("steps/%s" % urllib.quote(name)),
- name,
- " ".join(s.getText())))
- if s.getLogs():
- data += " <ol>\n"
- for logfile in s.getLogs():
- logname = logfile.getName()
- logurl = req.childLink("steps/%s/logs/%s" %
- (urllib.quote(name),
- urllib.quote(logname)))
- data += (" <li><a href=\"%s\">%s</a></li>\n" %
- (logurl, logfile.getName()))
- data += " </ol>\n"
- data += " </li>\n"
- data += "</ol>\n"
-
- data += "<h2>Build Properties:</h2>\n"
- data += "<table><tr><th valign=\"left\">Name</th><th valign=\"left\">Value</th><th valign=\"left\">Source</th></tr>\n"
- for name, value, source in b.getProperties().asList():
- value = str(value)
- if len(value) > 500:
- value = value[:500] + " .. [property value too long]"
- data += "<tr>"
- data += "<td>%s</td>" % html.escape(name)
- data += "<td>%s</td>" % html.escape(value)
- data += "<td>%s</td>" % html.escape(source)
- data += "</tr>\n"
- data += "</table>"
-
- data += "<h2>Blamelist:</h2>\n"
- if list(b.getResponsibleUsers()):
- data += " <ol>\n"
- for who in b.getResponsibleUsers():
- data += " <li>%s</li>\n" % html.escape(who)
- data += " </ol>\n"
- else:
- data += "<div>no responsible users</div>\n"
-
-
- (start, end) = b.getTimes()
- data += "<h2>Timing</h2>\n"
- data += "<table>\n"
- data += "<tr><td>Start</td><td>%s</td></tr>\n" % time.ctime(start)
- if end:
- data += "<tr><td>End</td><td>%s</td></tr>\n" % time.ctime(end)
- data += "<tr><td>Elapsed</td><td>%s</td></tr>\n" % util.formatInterval(end - start)
- data += "</table>\n"
-
- if ss.changes:
- data += "<h2>All Changes</h2>\n"
- data += "<ol>\n"
- for c in ss.changes:
- data += "<li>" + c.asHTML() + "</li>\n"
- data += "</ol>\n"
- #data += html.PRE(b.changesText()) # TODO
-
- if b.isFinished() and self.builder_control is not None:
- data += "<h3>Resubmit Build:</h3>\n"
- # can we rebuild it exactly?
- exactly = (ss.revision is not None) or b.getChanges()
- if exactly:
- data += ("<p>This tree was built from a specific set of \n"
- "source files, and can be rebuilt exactly</p>\n")
- else:
- data += ("<p>This tree was built from the most recent "
- "revision")
- if ss.branch:
- data += " (along some branch)"
- data += (" and thus it might not be possible to rebuild it \n"
- "exactly. Any changes that have been committed \n"
- "after this build was started <b>will</b> be \n"
- "included in a rebuild.</p>\n")
- rebuildURL = urllib.quote(req.childLink("rebuild"))
- data += ('<form action="%s" class="command rebuild">\n'
- % rebuildURL)
- data += make_row("Your name:",
- "<input type='text' name='username' />")
- data += make_row("Reason for re-running build:",
- "<input type='text' name='comments' />")
- data += '<input type="submit" value="Rebuild" />\n'
- data += '</form>\n'
-
- # TODO: this stuff should be generated by a template of some sort
- data += '<hr /><div class="footer">\n'
-
- welcomeurl = self.path_to_root(req) + "index.html"
- data += '[<a href="%s">welcome</a>]\n' % welcomeurl
- data += "<br />\n"
-
- data += '<a href="http://buildbot.sourceforge.net/">Buildbot</a>'
- data += "-%s " % version
- if projectName:
- data += "working for the "
- if projectURL:
- data += "<a href=\"%s\">%s</a> project." % (projectURL,
- projectName)
- else:
- data += "%s project." % projectName
- data += "<br />\n"
- data += ("Page built: " +
- time.strftime("%a %d %b %Y %H:%M:%S",
- time.localtime(util.now()))
- + "\n")
- data += '</div>\n'
-
- return data
-
- def stop(self, req):
- b = self.build_status
- c = self.build_control
- log.msg("web stopBuild of build %s:%s" % \
- (b.getBuilder().getName(), b.getNumber()))
- name = req.args.get("username", ["<unknown>"])[0]
- comments = req.args.get("comments", ["<no reason specified>"])[0]
- reason = ("The web-page 'stop build' button was pressed by "
- "'%s': %s\n" % (name, comments))
- c.stopBuild(reason)
- # we're at http://localhost:8080/svn-hello/builds/5/stop?[args] and
- # we want to go to: http://localhost:8080/svn-hello
- r = Redirect("../..")
- d = defer.Deferred()
- reactor.callLater(1, d.callback, r)
- return DeferredResource(d)
-
- def rebuild(self, req):
- b = self.build_status
- bc = self.builder_control
- builder_name = b.getBuilder().getName()
- log.msg("web rebuild of build %s:%s" % (builder_name, b.getNumber()))
- name = req.args.get("username", ["<unknown>"])[0]
- comments = req.args.get("comments", ["<no reason specified>"])[0]
- reason = ("The web-page 'rebuild' button was pressed by "
- "'%s': %s\n" % (name, comments))
- if not bc or not b.isFinished():
- log.msg("could not rebuild: bc=%s, isFinished=%s"
- % (bc, b.isFinished()))
- # TODO: indicate an error
- else:
- bc.resubmitBuild(b, reason)
- # we're at
- # http://localhost:8080/builders/NAME/builds/5/rebuild?[args]
- # Where should we send them?
- #
- # Ideally it would be to the per-build page that they just started,
- # but we don't know the build number for it yet (besides, it might
- # have to wait for a current build to finish). The next-most
- # preferred place is somewhere that the user can see tangible
- # evidence of their build starting (or to see the reason that it
- # didn't start). This should be the Builder page.
- r = Redirect("../..") # the Builder's page
- d = defer.Deferred()
- reactor.callLater(1, d.callback, r)
- return DeferredResource(d)
-
- def getChild(self, path, req):
- if path == "stop":
- return self.stop(req)
- if path == "rebuild":
- return self.rebuild(req)
- if path == "steps":
- return StepsResource(self.build_status)
- if path == "tests":
- return TestsResource(self.build_status)
-
- return HtmlResource.getChild(self, path, req)
-
-# /builders/$builder/builds
-class BuildsResource(HtmlResource):
- addSlash = True
-
- def __init__(self, builder_status, builder_control):
- HtmlResource.__init__(self)
- self.builder_status = builder_status
- self.builder_control = builder_control
-
- def getChild(self, path, req):
- try:
- num = int(path)
- except ValueError:
- num = None
- if num is not None:
- build_status = self.builder_status.getBuild(num)
- if build_status:
- if self.builder_control:
- build_control = self.builder_control.getBuild(num)
- else:
- build_control = None
- return StatusResourceBuild(build_status, build_control,
- self.builder_control)
-
- return HtmlResource.getChild(self, path, req)
-
diff --git a/buildbot/buildbot/status/web/builder.py b/buildbot/buildbot/status/web/builder.py
deleted file mode 100644
index 35f65e9..0000000
--- a/buildbot/buildbot/status/web/builder.py
+++ /dev/null
@@ -1,312 +0,0 @@
-
-from twisted.web.error import NoResource
-from twisted.web import html, static
-from twisted.web.util import Redirect
-
-import re, urllib, time
-from twisted.python import log
-from buildbot import interfaces
-from buildbot.status.web.base import HtmlResource, make_row, \
- make_force_build_form, OneLineMixin, path_to_build, path_to_slave, path_to_builder
-from buildbot.process.base import BuildRequest
-from buildbot.sourcestamp import SourceStamp
-
-from buildbot.status.web.build import BuildsResource, StatusResourceBuild
-
-# /builders/$builder
-class StatusResourceBuilder(HtmlResource, OneLineMixin):
- addSlash = True
-
- def __init__(self, builder_status, builder_control):
- HtmlResource.__init__(self)
- self.builder_status = builder_status
- self.builder_control = builder_control
-
- def getTitle(self, request):
- return "Buildbot: %s" % html.escape(self.builder_status.getName())
-
- def build_line(self, build, req):
- buildnum = build.getNumber()
- buildurl = path_to_build(req, build)
- data = '<a href="%s">#%d</a> ' % (buildurl, buildnum)
-
- when = build.getETA()
- if when is not None:
- when_time = time.strftime("%H:%M:%S",
- time.localtime(time.time() + when))
- data += "ETA %ds (%s) " % (when, when_time)
- step = build.getCurrentStep()
- if step:
- data += "[%s]" % step.getName()
- else:
- data += "[waiting for Lock]"
- # TODO: is this necessarily the case?
-
- if self.builder_control is not None:
- stopURL = path_to_build(req, build) + '/stop'
- data += '''
-<form action="%s" class="command stopbuild" style="display:inline">
- <input type="submit" value="Stop Build" />
-</form>''' % stopURL
- return data
-
- def body(self, req):
- b = self.builder_status
- control = self.builder_control
- status = self.getStatus(req)
-
- slaves = b.getSlaves()
- connected_slaves = [s for s in slaves if s.isConnected()]
-
- projectName = status.getProjectName()
-
- data = '<a href="%s">%s</a>\n' % (self.path_to_root(req), projectName)
-
- data += "<h1>Builder: %s</h1>\n" % html.escape(b.getName())
-
- # the first section shows builds which are currently running, if any.
-
- current = b.getCurrentBuilds()
- if current:
- data += "<h2>Currently Building:</h2>\n"
- data += "<ul>\n"
- for build in current:
- data += " <li>" + self.build_line(build, req) + "</li>\n"
- data += "</ul>\n"
- else:
- data += "<h2>no current builds</h2>\n"
-
- # Then a section with the last 5 builds, with the most recent build
- # distinguished from the rest.
-
- data += "<h2>Recent Builds:</h2>\n"
- data += "<ul>\n"
- for i,build in enumerate(b.generateFinishedBuilds(num_builds=5)):
- data += " <li>" + self.make_line(req, build, False) + "</li>\n"
- if i == 0:
- data += "<br />\n" # separator
- # TODO: or empty list?
- data += "</ul>\n"
-
-
- data += "<h2>Buildslaves:</h2>\n"
- data += "<ol>\n"
- for slave in slaves:
- slaveurl = path_to_slave(req, slave)
- data += "<li><b><a href=\"%s\">%s</a></b>: " % (html.escape(slaveurl), html.escape(slave.getName()))
- if slave.isConnected():
- data += "CONNECTED\n"
- if slave.getAdmin():
- data += make_row("Admin:", html.escape(slave.getAdmin()))
- if slave.getHost():
- data += "<span class='label'>Host info:</span>\n"
- data += html.PRE(slave.getHost())
- else:
- data += ("NOT CONNECTED\n")
- data += "</li>\n"
- data += "</ol>\n"
-
- if control is not None and connected_slaves:
- forceURL = path_to_builder(req, b) + '/force'
- data += make_force_build_form(forceURL)
- elif control is not None:
- data += """
- <p>All buildslaves appear to be offline, so it's not possible
- to force this build to execute at this time.</p>
- """
-
- if control is not None:
- pingURL = path_to_builder(req, b) + '/ping'
- data += """
- <form action="%s" class='command pingbuilder'>
- <p>To ping the buildslave(s), push the 'Ping' button</p>
-
- <input type="submit" value="Ping Builder" />
- </form>
- """ % pingURL
-
- data += self.footer(status, req)
-
- return data
-
- def force(self, req):
- """
-
- Custom properties can be passed from the web form. To do
- this, subclass this class, overriding the force() method. You
- can then determine the properties (usually from form values,
- by inspecting req.args), then pass them to this superclass
- force method.
-
- """
- name = req.args.get("username", ["<unknown>"])[0]
- reason = req.args.get("comments", ["<no reason specified>"])[0]
- branch = req.args.get("branch", [""])[0]
- revision = req.args.get("revision", [""])[0]
-
- r = "The web-page 'force build' button was pressed by '%s': %s\n" \
- % (name, reason)
- log.msg("web forcebuild of builder '%s', branch='%s', revision='%s'"
- % (self.builder_status.getName(), branch, revision))
-
- if not self.builder_control:
- # TODO: tell the web user that their request was denied
- log.msg("but builder control is disabled")
- return Redirect("..")
-
- # keep weird stuff out of the branch and revision strings. TODO:
- # centralize this somewhere.
- if not re.match(r'^[\w\.\-\/]*$', branch):
- log.msg("bad branch '%s'" % branch)
- return Redirect("..")
- if not re.match(r'^[\w\.\-\/]*$', revision):
- log.msg("bad revision '%s'" % revision)
- return Redirect("..")
- if not branch:
- branch = None
- if not revision:
- revision = None
-
- # TODO: if we can authenticate that a particular User pushed the
- # button, use their name instead of None, so they'll be informed of
- # the results.
- s = SourceStamp(branch=branch, revision=revision)
- req = BuildRequest(r, s, builderName=self.builder_status.getName())
- try:
- self.builder_control.requestBuildSoon(req)
- except interfaces.NoSlaveError:
- # TODO: tell the web user that their request could not be
- # honored
- pass
- # send the user back to the builder page
- return Redirect(".")
-
- def ping(self, req):
- log.msg("web ping of builder '%s'" % self.builder_status.getName())
- self.builder_control.ping() # TODO: there ought to be an ISlaveControl
- # send the user back to the builder page
- return Redirect(".")
-
- def getChild(self, path, req):
- if path == "force":
- return self.force(req)
- if path == "ping":
- return self.ping(req)
- if path == "events":
- num = req.postpath.pop(0)
- req.prepath.append(num)
- num = int(num)
- # TODO: is this dead code? .statusbag doesn't exist,right?
- log.msg("getChild['path']: %s" % req.uri)
- return NoResource("events are unavailable until code gets fixed")
- filename = req.postpath.pop(0)
- req.prepath.append(filename)
- e = self.builder_status.getEventNumbered(num)
- if not e:
- return NoResource("No such event '%d'" % num)
- file = e.files.get(filename, None)
- if file == None:
- return NoResource("No such file '%s'" % filename)
- if type(file) == type(""):
- if file[:6] in ("<HTML>", "<html>"):
- return static.Data(file, "text/html")
- return static.Data(file, "text/plain")
- return file
- if path == "builds":
- return BuildsResource(self.builder_status, self.builder_control)
-
- return HtmlResource.getChild(self, path, req)
-
-
-# /builders/_all
-class StatusResourceAllBuilders(HtmlResource, OneLineMixin):
-
- def __init__(self, status, control):
- HtmlResource.__init__(self)
- self.status = status
- self.control = control
-
- def getChild(self, path, req):
- if path == "force":
- return self.force(req)
- if path == "stop":
- return self.stop(req)
-
- return HtmlResource.getChild(self, path, req)
-
- def force(self, req):
- for bname in self.status.getBuilderNames():
- builder_status = self.status.getBuilder(bname)
- builder_control = None
- c = self.getControl(req)
- if c:
- builder_control = c.getBuilder(bname)
- build = StatusResourceBuilder(builder_status, builder_control)
- build.force(req)
- # back to the welcome page
- return Redirect("../..")
-
- def stop(self, req):
- for bname in self.status.getBuilderNames():
- builder_status = self.status.getBuilder(bname)
- builder_control = None
- c = self.getControl(req)
- if c:
- builder_control = c.getBuilder(bname)
- (state, current_builds) = builder_status.getState()
- if state != "building":
- continue
- for b in current_builds:
- build_status = builder_status.getBuild(b.number)
- if not build_status:
- continue
- if builder_control:
- build_control = builder_control.getBuild(b.number)
- else:
- build_control = None
- build = StatusResourceBuild(build_status, build_control,
- builder_control)
- build.stop(req)
- # go back to the welcome page
- return Redirect("../..")
-
-
-# /builders
-class BuildersResource(HtmlResource):
- title = "Builders"
- addSlash = True
-
- def body(self, req):
- s = self.getStatus(req)
- data = ""
- data += "<h1>Builders</h1>\n"
-
- # TODO: this is really basic. It should be expanded to include a
- # brief one-line summary of the builder (perhaps with whatever the
- # builder is currently doing)
- data += "<ol>\n"
- for bname in s.getBuilderNames():
- data += (' <li><a href="%s">%s</a></li>\n' %
- (req.childLink(urllib.quote(bname, safe='')),
- bname))
- data += "</ol>\n"
-
- data += self.footer(s, req)
-
- return data
-
- def getChild(self, path, req):
- s = self.getStatus(req)
- if path in s.getBuilderNames():
- builder_status = s.getBuilder(path)
- builder_control = None
- c = self.getControl(req)
- if c:
- builder_control = c.getBuilder(path)
- return StatusResourceBuilder(builder_status, builder_control)
- if path == "_all":
- return StatusResourceAllBuilders(self.getStatus(req),
- self.getControl(req))
-
- return HtmlResource.getChild(self, path, req)
-
diff --git a/buildbot/buildbot/status/web/changes.py b/buildbot/buildbot/status/web/changes.py
deleted file mode 100644
index ff562c6..0000000
--- a/buildbot/buildbot/status/web/changes.py
+++ /dev/null
@@ -1,41 +0,0 @@
-
-from zope.interface import implements
-from twisted.python import components
-from twisted.web.error import NoResource
-
-from buildbot.changes.changes import Change
-from buildbot.status.web.base import HtmlResource, StaticHTML, IBox, Box
-
-# /changes/NN
-class ChangesResource(HtmlResource):
-
- def body(self, req):
- data = ""
- data += "Change sources:\n"
- sources = self.getStatus(req).getChangeSources()
- if sources:
- data += "<ol>\n"
- for s in sources:
- data += "<li>%s</li>\n" % s.describe()
- data += "</ol>\n"
- else:
- data += "none (push only)\n"
- return data
-
- def getChild(self, path, req):
- num = int(path)
- c = self.getStatus(req).getChange(num)
- if not c:
- return NoResource("No change number '%d'" % num)
- return StaticHTML(c.asHTML(), "Change #%d" % num)
-
-
-class ChangeBox(components.Adapter):
- implements(IBox)
-
- def getBox(self, req):
- url = req.childLink("../changes/%d" % self.original.number)
- text = self.original.get_HTML_box(url)
- return Box([text], class_="Change")
-components.registerAdapter(ChangeBox, Change, IBox)
-
diff --git a/buildbot/buildbot/status/web/classic.css b/buildbot/buildbot/status/web/classic.css
deleted file mode 100644
index 5a5b0ea..0000000
--- a/buildbot/buildbot/status/web/classic.css
+++ /dev/null
@@ -1,78 +0,0 @@
-a:visited {
- color: #800080;
-}
-
-td.Event, td.BuildStep, td.Activity, td.Change, td.Time, td.Builder {
- border-top: 1px solid;
- border-right: 1px solid;
-}
-
-td.box {
- border: 1px solid;
-}
-
-/* Activity states */
-.offline {
- background-color: gray;
-}
-.idle {
- background-color: white;
-}
-.waiting {
- background-color: yellow;
-}
-.building {
- background-color: yellow;
-}
-
-/* LastBuild, BuildStep states */
-.success {
- background-color: #72ff75;
-}
-.failure {
- background-color: red;
-}
-.warnings {
- background-color: #ff8000;
-}
-.exception {
- background-color: #c000c0;
-}
-.start,.running {
- background-color: yellow;
-}
-
-/* grid styles */
-
-table.Grid {
- border-collapse: collapse;
-}
-
-table.Grid tr td {
- padding: 0.2em;
- margin: 0px;
- text-align: center;
-}
-
-table.Grid tr td.title {
- font-size: 90%;
- border-right: 1px gray solid;
- border-bottom: 1px gray solid;
-}
-
-table.Grid tr td.sourcestamp {
- font-size: 90%;
-}
-
-table.Grid tr td.builder {
- text-align: right;
- font-size: 90%;
-}
-
-table.Grid tr td.build {
- border: 1px gray solid;
-}
-
-div.footer {
- font-size: 80%;
-}
diff --git a/buildbot/buildbot/status/web/feeds.py b/buildbot/buildbot/status/web/feeds.py
deleted file mode 100644
index c86ca3b..0000000
--- a/buildbot/buildbot/status/web/feeds.py
+++ /dev/null
@@ -1,359 +0,0 @@
-# This module enables ATOM and RSS feeds from webstatus.
-#
-# It is based on "feeder.py" which was part of the Buildbot
-# configuration for the Subversion project. The original file was
-# created by Lieven Gobaerts and later adjusted by API
-# (apinheiro@igalia.coma) and also here
-# http://code.google.com/p/pybots/source/browse/trunk/master/Feeder.py
-#
-# All subsequent changes to feeder.py where made by Chandan-Dutta
-# Chowdhury <chandan-dutta.chowdhury @ hp.com> and Gareth Armstrong
-# <gareth.armstrong @ hp.com>.
-#
-# Those modifications are as follows:
-# 1) the feeds are usable from baseweb.WebStatus
-# 2) feeds are fully validated ATOM 1.0 and RSS 2.0 feeds, verified
-# with code from http://feedvalidator.org
-# 3) nicer xml output
-# 4) feeds can be filtered as per the /waterfall display with the
-# builder and category filters
-# 5) cleaned up white space and imports
-#
-# Finally, the code was directly integrated into these two files,
-# buildbot/status/web/feeds.py (you're reading it, ;-)) and
-# buildbot/status/web/baseweb.py.
-
-import os
-import re
-import sys
-import time
-from twisted.web import resource
-from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
-
-class XmlResource(resource.Resource):
- contentType = "text/xml; charset=UTF-8"
- def render(self, request):
- data = self.content(request)
- request.setHeader("content-type", self.contentType)
- if request.method == "HEAD":
- request.setHeader("content-length", len(data))
- return ''
- return data
- docType = ''
- def header (self, request):
- data = ('<?xml version="1.0"?>\n')
- return data
- def footer(self, request):
- data = ''
- return data
- def content(self, request):
- data = self.docType
- data += self.header(request)
- data += self.body(request)
- data += self.footer(request)
- return data
- def body(self, request):
- return ''
-
-class FeedResource(XmlResource):
- title = None
- link = 'http://dummylink'
- language = 'en-us'
- description = 'Dummy rss'
- status = None
-
- def __init__(self, status, categories=None, title=None):
- self.status = status
- self.categories = categories
- self.title = title
- self.link = self.status.getBuildbotURL()
- self.description = 'List of FAILED builds'
- self.pubdate = time.gmtime(int(time.time()))
-
- def getBuilds(self, request):
- builds = []
- # THIS is lifted straight from the WaterfallStatusResource Class in
- # status/web/waterfall.py
- #
- # we start with all Builders available to this Waterfall: this is
- # limited by the config-file -time categories= argument, and defaults
- # to all defined Builders.
- allBuilderNames = self.status.getBuilderNames(categories=self.categories)
- builders = [self.status.getBuilder(name) for name in allBuilderNames]
-
- # but if the URL has one or more builder= arguments (or the old show=
- # argument, which is still accepted for backwards compatibility), we
- # use that set of builders instead. We still don't show anything
- # outside the config-file time set limited by categories=.
- showBuilders = request.args.get("show", [])
- showBuilders.extend(request.args.get("builder", []))
- if showBuilders:
- builders = [b for b in builders if b.name in showBuilders]
-
- # now, if the URL has one or category= arguments, use them as a
- # filter: only show those builders which belong to one of the given
- # categories.
- showCategories = request.args.get("category", [])
- if showCategories:
- builders = [b for b in builders if b.category in showCategories]
-
- maxFeeds = 25
-
- # Copy all failed builds in a new list.
- # This could clearly be implemented much better if we had
- # access to a global list of builds.
- for b in builders:
- lastbuild = b.getLastFinishedBuild()
- if lastbuild is None:
- continue
-
- lastnr = lastbuild.getNumber()
-
- totalbuilds = 0
- i = lastnr
- while i >= 0:
- build = b.getBuild(i)
- i -= 1
- if not build:
- continue
-
- results = build.getResults()
-
- # only add entries for failed builds!
- if results == FAILURE:
- totalbuilds += 1
- builds.append(build)
-
- # stop for this builder when our total nr. of feeds is reached
- if totalbuilds >= maxFeeds:
- break
-
- # Sort build list by date, youngest first.
- if sys.version_info[:3] >= (2,4,0):
- builds.sort(key=lambda build: build.getTimes(), reverse=True)
- else:
- # If you need compatibility with python < 2.4, use this for
- # sorting instead:
- # We apply Decorate-Sort-Undecorate
- deco = [(build.getTimes(), build) for build in builds]
- deco.sort()
- deco.reverse()
- builds = [build for (b1, build) in deco]
-
- if builds:
- builds = builds[:min(len(builds), maxFeeds)]
- return builds
-
- def body (self, request):
- data = ''
- builds = self.getBuilds(request)
-
- for build in builds:
- start, finished = build.getTimes()
- finishedTime = time.gmtime(int(finished))
- projectName = self.status.getProjectName()
- link = re.sub(r'index.html', "", self.status.getURLForThing(build))
-
- # title: trunk r22191 (plus patch) failed on 'i686-debian-sarge1 shared gcc-3.3.5'
- ss = build.getSourceStamp()
- source = ""
- if ss.branch:
- source += "Branch %s " % ss.branch
- if ss.revision:
- source += "Revision %s " % str(ss.revision)
- if ss.patch:
- source += " (plus patch)"
- if ss.changes:
- pass
- if (ss.branch is None and ss.revision is None and ss.patch is None
- and not ss.changes):
- source += "Latest revision "
- got_revision = None
- try:
- got_revision = build.getProperty("got_revision")
- except KeyError:
- pass
- if got_revision:
- got_revision = str(got_revision)
- if len(got_revision) > 40:
- got_revision = "[revision string too long]"
- source += "(Got Revision: %s)" % got_revision
- title = ('%s failed on "%s"' %
- (source, build.getBuilder().getName()))
-
- # get name of the failed step and the last 30 lines of its log.
- if build.getLogs():
- log = build.getLogs()[-1]
- laststep = log.getStep().getName()
- try:
- lastlog = log.getText()
- except IOError:
- # Probably the log file has been removed
- lastlog='<b>log file not available</b>'
-
- lines = re.split('\n', lastlog)
- lastlog = ''
- for logline in lines[max(0, len(lines)-30):]:
- lastlog = lastlog + logline + '<br/>'
- lastlog = lastlog.replace('\n', '<br/>')
-
- description = ''
- description += ('Date: %s<br/><br/>' %
- time.strftime("%a, %d %b %Y %H:%M:%S GMT",
- finishedTime))
- description += ('Full details available here: <a href="%s">%s</a><br/>' %
- (self.link, projectName))
- builder_summary_link = ('%s/builders/%s' %
- (re.sub(r'/index.html', '', self.link),
- build.getBuilder().getName()))
- description += ('Build summary: <a href="%s">%s</a><br/><br/>' %
- (builder_summary_link,
- build.getBuilder().getName()))
- description += ('Build details: <a href="%s">%s</a><br/><br/>' %
- (link, self.link + link[1:]))
- description += ('Author list: <b>%s</b><br/><br/>' %
- ",".join(build.getResponsibleUsers()))
- description += ('Failed step: <b>%s</b><br/><br/>' % laststep)
- description += 'Last lines of the build log:<br/>'
-
- data += self.item(title, description=description, lastlog=lastlog,
- link=link, pubDate=finishedTime)
-
- return data
-
- def item(self, title='', link='', description='', pubDate=''):
- """Generates xml for one item in the feed."""
-
-class Rss20StatusResource(FeedResource):
- def __init__(self, status, categories=None, title=None):
- FeedResource.__init__(self, status, categories, title)
- contentType = 'application/rss+xml'
-
- def header(self, request):
- data = FeedResource.header(self, request)
- data += ('<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">\n')
- data += (' <channel>\n')
- if self.title is None:
- title = 'Build status of ' + status.getProjectName()
- else:
- title = self.title
- data += (' <title>%s</title>\n' % title)
- if self.link is not None:
- data += (' <link>%s</link>\n' % self.link)
- link = re.sub(r'/index.html', '', self.link)
- data += (' <atom:link href="%s/rss" rel="self" type="application/rss+xml"/>\n' % link)
- if self.language is not None:
- data += (' <language>%s</language>\n' % self.language)
- if self.description is not None:
- data += (' <description>%s</description>\n' % self.description)
- if self.pubdate is not None:
- rfc822_pubdate = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
- self.pubdate)
- data += (' <pubDate>%s</pubDate>\n' % rfc822_pubdate)
- return data
-
- def item(self, title='', link='', description='', lastlog='', pubDate=''):
- data = (' <item>\n')
- data += (' <title>%s</title>\n' % title)
- if link is not None:
- data += (' <link>%s</link>\n' % link)
- if (description is not None and lastlog is not None):
- lastlog = re.sub(r'<br/>', "\n", lastlog)
- lastlog = re.sub(r'&', "&amp;", lastlog)
- lastlog = re.sub(r"'", "&apos;", lastlog)
- lastlog = re.sub(r'"', "&quot;", lastlog)
- lastlog = re.sub(r'<', '&lt;', lastlog)
- lastlog = re.sub(r'>', '&gt;', lastlog)
- lastlog = lastlog.replace('\n', '<br/>')
- content = '<![CDATA['
- content += description
- content += lastlog
- content += ']]>'
- data += (' <description>%s</description>\n' % content)
- if pubDate is not None:
- rfc822pubDate = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
- pubDate)
- data += (' <pubDate>%s</pubDate>\n' % rfc822pubDate)
- # Every RSS item must have a globally unique ID
- guid = ('tag:%s@%s,%s:%s' % (os.environ['USER'],
- os.environ['HOSTNAME'],
- time.strftime("%Y-%m-%d", pubDate),
- time.strftime("%Y%m%d%H%M%S",
- pubDate)))
- data += (' <guid isPermaLink="false">%s</guid>\n' % guid)
- data += (' </item>\n')
- return data
-
- def footer(self, request):
- data = (' </channel>\n'
- '</rss>')
- return data
-
-class Atom10StatusResource(FeedResource):
- def __init__(self, status, categories=None, title=None):
- FeedResource.__init__(self, status, categories, title)
- contentType = 'application/atom+xml'
-
- def header(self, request):
- data = FeedResource.header(self, request)
- data += '<feed xmlns="http://www.w3.org/2005/Atom">\n'
- data += (' <id>%s</id>\n' % self.status.getBuildbotURL())
- if self.title is None:
- title = 'Build status of ' + status.getProjectName()
- else:
- title = self.title
- data += (' <title>%s</title>\n' % title)
- if self.link is not None:
- link = re.sub(r'/index.html', '', self.link)
- data += (' <link rel="self" href="%s/atom"/>\n' % link)
- data += (' <link rel="alternate" href="%s/"/>\n' % link)
- if self.description is not None:
- data += (' <subtitle>%s</subtitle>\n' % self.description)
- if self.pubdate is not None:
- rfc3339_pubdate = time.strftime("%Y-%m-%dT%H:%M:%SZ",
- self.pubdate)
- data += (' <updated>%s</updated>\n' % rfc3339_pubdate)
- data += (' <author>\n')
- data += (' <name>Build Bot</name>\n')
- data += (' </author>\n')
- return data
-
- def item(self, title='', link='', description='', lastlog='', pubDate=''):
- data = (' <entry>\n')
- data += (' <title>%s</title>\n' % title)
- if link is not None:
- data += (' <link href="%s"/>\n' % link)
- if (description is not None and lastlog is not None):
- lastlog = re.sub(r'<br/>', "\n", lastlog)
- lastlog = re.sub(r'&', "&amp;", lastlog)
- lastlog = re.sub(r"'", "&apos;", lastlog)
- lastlog = re.sub(r'"', "&quot;", lastlog)
- lastlog = re.sub(r'<', '&lt;', lastlog)
- lastlog = re.sub(r'>', '&gt;', lastlog)
- data += (' <content type="xhtml">\n')
- data += (' <div xmlns="http://www.w3.org/1999/xhtml">\n')
- data += (' %s\n' % description)
- data += (' <pre xml:space="preserve">%s</pre>\n' % lastlog)
- data += (' </div>\n')
- data += (' </content>\n')
- if pubDate is not None:
- rfc3339pubDate = time.strftime("%Y-%m-%dT%H:%M:%SZ",
- pubDate)
- data += (' <updated>%s</updated>\n' % rfc3339pubDate)
- # Every Atom entry must have a globally unique ID
- # http://diveintomark.org/archives/2004/05/28/howto-atom-id
- guid = ('tag:%s@%s,%s:%s' % (os.environ['USER'],
- os.environ['HOSTNAME'],
- time.strftime("%Y-%m-%d", pubDate),
- time.strftime("%Y%m%d%H%M%S",
- pubDate)))
- data += (' <id>%s</id>\n' % guid)
- data += (' <author>\n')
- data += (' <name>Build Bot</name>\n')
- data += (' </author>\n')
- data += (' </entry>\n')
- return data
-
- def footer(self, request):
- data = ('</feed>')
- return data
diff --git a/buildbot/buildbot/status/web/grid.py b/buildbot/buildbot/status/web/grid.py
deleted file mode 100644
index 79527d8..0000000
--- a/buildbot/buildbot/status/web/grid.py
+++ /dev/null
@@ -1,252 +0,0 @@
-from __future__ import generators
-
-import sys, time, os.path
-import urllib
-
-from buildbot import util
-from buildbot import version
-from buildbot.status.web.base import HtmlResource
-#from buildbot.status.web.base import Box, HtmlResource, IBox, ICurrentBox, \
-# ITopBox, td, build_get_class, path_to_build, path_to_step, map_branches
-from buildbot.status.web.base import build_get_class
-
-# set grid_css to the full pathname of the css file
-if hasattr(sys, "frozen"):
- # all 'data' files are in the directory of our executable
- here = os.path.dirname(sys.executable)
- grid_css = os.path.abspath(os.path.join(here, "grid.css"))
-else:
- # running from source; look for a sibling to __file__
- up = os.path.dirname
- grid_css = os.path.abspath(os.path.join(up(__file__), "grid.css"))
-
-class ANYBRANCH: pass # a flag value, used below
-
-class GridStatusResource(HtmlResource):
- # TODO: docs
- status = None
- control = None
- changemaster = None
-
- def __init__(self, allowForce=True, css=None):
- HtmlResource.__init__(self)
-
- self.allowForce = allowForce
- self.css = css or grid_css
-
- def getTitle(self, request):
- status = self.getStatus(request)
- p = status.getProjectName()
- if p:
- return "BuildBot: %s" % p
- else:
- return "BuildBot"
-
- def getChangemaster(self, request):
- # TODO: this wants to go away, access it through IStatus
- return request.site.buildbot_service.getChangeSvc()
-
- # handle reloads through an http header
- # TODO: send this as a real header, rather than a tag
- def get_reload_time(self, request):
- if "reload" in request.args:
- try:
- reload_time = int(request.args["reload"][0])
- return max(reload_time, 15)
- except ValueError:
- pass
- return None
-
- def head(self, request):
- head = ''
- reload_time = self.get_reload_time(request)
- if reload_time is not None:
- head += '<meta http-equiv="refresh" content="%d">\n' % reload_time
- return head
-
-# def setBuildmaster(self, buildmaster):
-# self.status = buildmaster.getStatus()
-# if self.allowForce:
-# self.control = interfaces.IControl(buildmaster)
-# else:
-# self.control = None
-# self.changemaster = buildmaster.change_svc
-#
-# # try to set the page title
-# p = self.status.getProjectName()
-# if p:
-# self.title = "BuildBot: %s" % p
-#
- def build_td(self, request, build):
- if not build:
- return '<td class="build">&nbsp;</td>\n'
-
- if build.isFinished():
- # get the text and annotate the first line with a link
- text = build.getText()
- if not text: text = [ "(no information)" ]
- if text == [ "build", "successful" ]: text = [ "OK" ]
- else:
- text = [ 'building' ]
-
- name = build.getBuilder().getName()
- number = build.getNumber()
- url = "builders/%s/builds/%d" % (name, number)
- text[0] = '<a href="%s">%s</a>' % (url, text[0])
- text = '<br />\n'.join(text)
- class_ = build_get_class(build)
-
- return '<td class="build %s">%s</td>\n' % (class_, text)
-
- def builder_td(self, request, builder):
- state, builds = builder.getState()
-
- # look for upcoming builds. We say the state is "waiting" if the
- # builder is otherwise idle and there is a scheduler which tells us a
- # build will be performed some time in the near future. TODO: this
- # functionality used to be in BuilderStatus.. maybe this code should
- # be merged back into it.
- upcoming = []
- builderName = builder.getName()
- for s in self.getStatus(request).getSchedulers():
- if builderName in s.listBuilderNames():
- upcoming.extend(s.getPendingBuildTimes())
- if state == "idle" and upcoming:
- state = "waiting"
-
- # TODO: for now, this pending/upcoming stuff is in the "current
- # activity" box, but really it should go into a "next activity" row
- # instead. The only times it should show up in "current activity" is
- # when the builder is otherwise idle.
-
- # are any builds pending? (waiting for a slave to be free)
- url = 'builders/%s/' % urllib.quote(builder.getName(), safe='')
- text = '<a href="%s">%s</a>' % (url, builder.getName())
- pbs = builder.getPendingBuilds()
- if state != 'idle' or pbs:
- if pbs:
- text += "<br />(%s with %d pending)" % (state, len(pbs))
- else:
- text += "<br />(%s)" % state
-
- return '<td valign="center" class="builder %s">%s</td>\n' % \
- (state, text)
-
- def stamp_td(self, stamp):
- text = stamp.getText()
- return '<td valign="bottom" class="sourcestamp">%s</td>\n' % \
- "<br />".join(text)
-
- def body(self, request):
- "This method builds the main waterfall display."
-
- # get url parameters
- numBuilds = int(request.args.get("width", [5])[0])
- categories = request.args.get("category", [])
- branch = request.args.get("branch", [ANYBRANCH])[0]
- if branch == 'trunk': branch = None
-
- # and the data we want to render
- status = self.getStatus(request)
- stamps = self.getRecentSourcestamps(status, numBuilds, categories, branch)
-
- projectURL = status.getProjectURL()
- projectName = status.getProjectName()
-
- data = '<table class="Grid" border="0" cellspacing="0">\n'
- data += '<tr>\n'
- data += '<td class="title"><a href="%s">%s</a>' % (projectURL, projectName)
- if categories:
- if len(categories) > 1:
- data += '\n<br /><b>Categories:</b><br/>%s' % ('<br/>'.join(categories))
- else:
- data += '\n<br /><b>Category:</b> %s' % categories[0]
- if branch != ANYBRANCH:
- data += '\n<br /><b>Branch:</b> %s' % (branch or 'trunk')
- data += '</td>\n'
- for stamp in stamps:
- data += self.stamp_td(stamp)
- data += '</tr>\n'
-
- sortedBuilderNames = status.getBuilderNames()[:]
- sortedBuilderNames.sort()
- for bn in sortedBuilderNames:
- builds = [None] * len(stamps)
-
- builder = status.getBuilder(bn)
- if categories and builder.category not in categories:
- continue
-
- build = builder.getBuild(-1)
- while build and None in builds:
- ss = build.getSourceStamp(absolute=True)
- for i in range(len(stamps)):
- if ss == stamps[i] and builds[i] is None:
- builds[i] = build
- build = build.getPreviousBuild()
-
- data += '<tr>\n'
- data += self.builder_td(request, builder)
- for build in builds:
- data += self.build_td(request, build)
- data += '</tr>\n'
-
- data += '</table>\n'
-
- # TODO: this stuff should be generated by a template of some sort
- data += '<hr /><div class="footer">\n'
-
- welcomeurl = self.path_to_root(request) + "index.html"
- data += '[<a href="%s">welcome</a>]\n' % welcomeurl
- data += "<br />\n"
-
- data += '<a href="http://buildbot.sourceforge.net/">Buildbot</a>'
- data += "-%s " % version
- if projectName:
- data += "working for the "
- if projectURL:
- data += "<a href=\"%s\">%s</a> project." % (projectURL,
- projectName)
- else:
- data += "%s project." % projectName
- data += "<br />\n"
- data += ("Page built: " +
- time.strftime("%a %d %b %Y %H:%M:%S",
- time.localtime(util.now()))
- + "\n")
- data += '</div>\n'
- return data
-
- def getRecentSourcestamps(self, status, numBuilds, categories, branch):
- """
- get a list of the most recent NUMBUILDS SourceStamp tuples, sorted
- by the earliest start we've seen for them
- """
- # TODO: use baseweb's getLastNBuilds?
- sourcestamps = { } # { ss-tuple : earliest time }
- for bn in status.getBuilderNames():
- builder = status.getBuilder(bn)
- if categories and builder.category not in categories:
- continue
- build = builder.getBuild(-1)
- while build:
- ss = build.getSourceStamp(absolute=True)
- start = build.getTimes()[0]
- build = build.getPreviousBuild()
-
- # skip un-started builds
- if not start: continue
-
- # skip non-matching branches
- if branch != ANYBRANCH and ss.branch != branch: continue
-
- sourcestamps[ss] = min(sourcestamps.get(ss, sys.maxint), start)
-
- # now sort those and take the NUMBUILDS most recent
- sourcestamps = sourcestamps.items()
- sourcestamps.sort(lambda x, y: cmp(x[1], y[1]))
- sourcestamps = map(lambda tup : tup[0], sourcestamps)
- sourcestamps = sourcestamps[-numBuilds:]
-
- return sourcestamps
-
diff --git a/buildbot/buildbot/status/web/index.html b/buildbot/buildbot/status/web/index.html
deleted file mode 100644
index 23e6650..0000000
--- a/buildbot/buildbot/status/web/index.html
+++ /dev/null
@@ -1,32 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html>
-<head>
-<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-15">
-<title>Welcome to the Buildbot</title>
-</head>
-
-<body>
-<h1>Welcome to the Buildbot!</h1>
-
-<ul>
- <li>the <a href="waterfall">Waterfall Display</a> will give you a
- time-oriented summary of recent buildbot activity.</li>
-
- <li>the <a href="grid">Grid Display</a> will give you a
- developer-oriented summary of recent buildbot activity.</li>
-
- <li>The <a href="one_box_per_builder">Latest Build</a> for each builder is
- here.</li>
-
- <li><a href="one_line_per_build">Recent Builds</a> are summarized here, one
- per line.</li>
-
- <li><a href="buildslaves">Buildslave</a> information</li>
- <li><a href="changes">ChangeSource</a> information.</li>
-
- <br />
- <li><a href="about">About this Buildbot</a></li>
-</ul>
-
-
-</body> </html>
diff --git a/buildbot/buildbot/status/web/logs.py b/buildbot/buildbot/status/web/logs.py
deleted file mode 100644
index dfcf7f0..0000000
--- a/buildbot/buildbot/status/web/logs.py
+++ /dev/null
@@ -1,171 +0,0 @@
-
-from zope.interface import implements
-from twisted.python import components
-from twisted.spread import pb
-from twisted.web import html, server
-from twisted.web.resource import Resource
-from twisted.web.error import NoResource
-
-from buildbot import interfaces
-from buildbot.status import builder
-from buildbot.status.web.base import IHTMLLog, HtmlResource
-
-
-textlog_stylesheet = """
-<style type="text/css">
- div.data {
- font-family: "Courier New", courier, monotype;
- }
- span.stdout {
- font-family: "Courier New", courier, monotype;
- }
- span.stderr {
- font-family: "Courier New", courier, monotype;
- color: red;
- }
- span.header {
- font-family: "Courier New", courier, monotype;
- color: blue;
- }
-</style>
-"""
-
-class ChunkConsumer:
- implements(interfaces.IStatusLogConsumer)
-
- def __init__(self, original, textlog):
- self.original = original
- self.textlog = textlog
- def registerProducer(self, producer, streaming):
- self.producer = producer
- self.original.registerProducer(producer, streaming)
- def unregisterProducer(self):
- self.original.unregisterProducer()
- def writeChunk(self, chunk):
- formatted = self.textlog.content([chunk])
- try:
- self.original.write(formatted)
- except pb.DeadReferenceError:
- self.producing.stopProducing()
- def finish(self):
- self.textlog.finished()
-
-
-# /builders/$builder/builds/$buildnum/steps/$stepname/logs/$logname
-class TextLog(Resource):
- # a new instance of this Resource is created for each client who views
- # it, so we can afford to track the request in the Resource.
- implements(IHTMLLog)
-
- asText = False
- subscribed = False
-
- def __init__(self, original):
- Resource.__init__(self)
- self.original = original
-
- def getChild(self, path, req):
- if path == "text":
- self.asText = True
- return self
- return HtmlResource.getChild(self, path, req)
-
- def htmlHeader(self, request):
- title = "Log File contents"
- data = "<html>\n<head><title>" + title + "</title>\n"
- data += textlog_stylesheet
- data += "</head>\n"
- data += "<body vlink=\"#800080\">\n"
- texturl = request.childLink("text")
- data += '<a href="%s">(view as text)</a><br />\n' % texturl
- data += "<pre>\n"
- return data
-
- def content(self, entries):
- spanfmt = '<span class="%s">%s</span>'
- data = ""
- for type, entry in entries:
- if type >= len(builder.ChunkTypes) or type < 0:
- # non-std channel, don't display
- continue
- if self.asText:
- if type != builder.HEADER:
- data += entry
- else:
- data += spanfmt % (builder.ChunkTypes[type],
- html.escape(entry))
- return data
-
- def htmlFooter(self):
- data = "</pre>\n"
- data += "</body></html>\n"
- return data
-
- def render_HEAD(self, request):
- if self.asText:
- request.setHeader("content-type", "text/plain")
- else:
- request.setHeader("content-type", "text/html")
-
- # vague approximation, ignores markup
- request.setHeader("content-length", self.original.length)
- return ''
-
- def render_GET(self, req):
- self.req = req
-
- if self.asText:
- req.setHeader("content-type", "text/plain")
- else:
- req.setHeader("content-type", "text/html")
-
- if not self.asText:
- req.write(self.htmlHeader(req))
-
- self.original.subscribeConsumer(ChunkConsumer(req, self))
- return server.NOT_DONE_YET
-
- def finished(self):
- if not self.req:
- return
- try:
- if not self.asText:
- self.req.write(self.htmlFooter())
- self.req.finish()
- except pb.DeadReferenceError:
- pass
- # break the cycle, the Request's .notifications list includes the
- # Deferred (from req.notifyFinish) that's pointing at us.
- self.req = None
-
-components.registerAdapter(TextLog, interfaces.IStatusLog, IHTMLLog)
-
-
-class HTMLLog(Resource):
- implements(IHTMLLog)
-
- def __init__(self, original):
- Resource.__init__(self)
- self.original = original
-
- def render(self, request):
- request.setHeader("content-type", "text/html")
- return self.original.html
-
-components.registerAdapter(HTMLLog, builder.HTMLLogFile, IHTMLLog)
-
-
-class LogsResource(HtmlResource):
- addSlash = True
-
- def __init__(self, step_status):
- HtmlResource.__init__(self)
- self.step_status = step_status
-
- def getChild(self, path, req):
- for log in self.step_status.getLogs():
- if path == log.getName():
- if log.hasContents():
- return IHTMLLog(interfaces.IStatusLog(log))
- return NoResource("Empty Log '%s'" % path)
- return HtmlResource.getChild(self, path, req)
diff --git a/buildbot/buildbot/status/web/robots.txt b/buildbot/buildbot/status/web/robots.txt
deleted file mode 100644
index 47a9d27..0000000
--- a/buildbot/buildbot/status/web/robots.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-User-agent: *
-Disallow: /waterfall
-Disallow: /builders
-Disallow: /changes
-Disallow: /buildslaves
-Disallow: /schedulers
-Disallow: /one_line_per_build
-Disallow: /one_box_per_builder
-Disallow: /xmlrpc
diff --git a/buildbot/buildbot/status/web/slaves.py b/buildbot/buildbot/status/web/slaves.py
deleted file mode 100644
index 5782873..0000000
--- a/buildbot/buildbot/status/web/slaves.py
+++ /dev/null
@@ -1,181 +0,0 @@
-
-import time, urllib
-from twisted.python import log
-from twisted.web import html
-from twisted.web.util import Redirect
-
-from buildbot.status.web.base import HtmlResource, abbreviate_age, OneLineMixin, path_to_slave
-from buildbot import version, util
-
-# /buildslaves/$slavename
-class OneBuildSlaveResource(HtmlResource, OneLineMixin):
- addSlash = False
- def __init__(self, slavename):
- HtmlResource.__init__(self)
- self.slavename = slavename
-
- def getTitle(self, req):
- return "Buildbot: %s" % html.escape(self.slavename)
-
- def getChild(self, path, req):
- if path == "shutdown":
- s = self.getStatus(req)
- slave = s.getSlave(self.slavename)
- slave.setGraceful(True)
- return Redirect(path_to_slave(req, slave))
-
- def body(self, req):
- s = self.getStatus(req)
- slave = s.getSlave(self.slavename)
- my_builders = []
- for bname in s.getBuilderNames():
- b = s.getBuilder(bname)
- for bs in b.getSlaves():
- slavename = bs.getName()
- if bs.getName() == self.slavename:
- my_builders.append(b)
-
- # Current builds
- current_builds = []
- for b in my_builders:
- for cb in b.getCurrentBuilds():
- if cb.getSlavename() == self.slavename:
- current_builds.append(cb)
-
- data = []
-
- projectName = s.getProjectName()
-
- data.append("<a href=\"%s\">%s</a>\n" % (self.path_to_root(req), projectName))
-
- data.append("<h1>Build Slave: %s</h1>\n" % self.slavename)
-
- shutdown_url = req.childLink("shutdown")
-
- if not slave.isConnected():
- data.append("<h2>NOT CONNECTED</h2>\n")
- elif not slave.getGraceful():
- data.append('''<form method="POST" action="%s">
-<input type="submit" value="Gracefully Shutdown">
-</form>''' % shutdown_url)
- else:
- data.append("Gracefully shutting down...\n")
-
- if current_builds:
- data.append("<h2>Currently building:</h2>\n")
- data.append("<ul>\n")
- for build in current_builds:
- data.append("<li>%s</li>\n" % self.make_line(req, build, True))
- data.append("</ul>\n")
-
- else:
- data.append("<h2>no current builds</h2>\n")
-
- # Recent builds
- data.append("<h2>Recent builds:</h2>\n")
- data.append("<ul>\n")
- n = 0
- try:
- max_builds = int(req.args.get('builds')[0])
- except:
- max_builds = 10
- for build in s.generateFinishedBuilds(builders=[b.getName() for b in my_builders]):
- if build.getSlavename() == self.slavename:
- n += 1
- data.append("<li>%s</li>\n" % self.make_line(req, build, True))
- if n > max_builds:
- break
- data.append("</ul>\n")
-
- projectURL = s.getProjectURL()
- projectName = s.getProjectName()
- data.append('<hr /><div class="footer">\n')
-
- welcomeurl = self.path_to_root(req) + "index.html"
- data.append("[<a href=\"%s\">welcome</a>]\n" % welcomeurl)
- data.append("<br />\n")
-
- data.append('<a href="http://buildbot.sourceforge.net/">Buildbot</a>')
- data.append("-%s " % version)
- if projectName:
- data.append("working for the ")
- if projectURL:
- data.append("<a href=\"%s\">%s</a> project." % (projectURL,
- projectName))
- else:
- data.append("%s project." % projectName)
- data.append("<br />\n")
- data.append("Page built: " +
- time.strftime("%a %d %b %Y %H:%M:%S",
- time.localtime(util.now()))
- + "\n")
- data.append("</div>\n")
-
- return "".join(data)
-
-# /buildslaves
-class BuildSlavesResource(HtmlResource):
- title = "BuildSlaves"
- addSlash = True
-
- def body(self, req):
- s = self.getStatus(req)
- data = ""
- data += "<h1>Build Slaves</h1>\n"
-
- used_by_builder = {}
- for bname in s.getBuilderNames():
- b = s.getBuilder(bname)
- for bs in b.getSlaves():
- slavename = bs.getName()
- if slavename not in used_by_builder:
- used_by_builder[slavename] = []
- used_by_builder[slavename].append(bname)
-
- data += "<ol>\n"
- for name in util.naturalSort(s.getSlaveNames()):
- slave = s.getSlave(name)
- slave_status = s.botmaster.slaves[name].slave_status
- isBusy = len(slave_status.getRunningBuilds())
- data += " <li><a href=\"%s\">%s</a>:\n" % (req.childLink(urllib.quote(name,'')), name)
- data += " <ul>\n"
- builder_links = ['<a href="%s">%s</a>'
- % (req.childLink("../builders/%s" % bname),bname)
- for bname in used_by_builder.get(name, [])]
- if builder_links:
- data += (" <li>Used by Builders: %s</li>\n" %
- ", ".join(builder_links))
- else:
- data += " <li>Not used by any Builders</li>\n"
- if slave.isConnected():
- data += " <li>Slave is currently connected</li>\n"
- admin = slave.getAdmin()
- if admin:
- # munge it to avoid feeding the spambot harvesters
- admin = admin.replace("@", " -at- ")
- data += " <li>Admin: %s</li>\n" % admin
- last = slave.lastMessageReceived()
- if last:
- lt = time.strftime("%Y-%b-%d %H:%M:%S",
- time.localtime(last))
- age = abbreviate_age(time.time() - last)
- data += " <li>Last heard from: %s " % age
- data += '<font size="-1">(%s)</font>' % lt
- data += "</li>\n"
- if isBusy:
- data += "<li>Slave is currently building.</li>"
- else:
- data += "<li>Slave is idle.</li>"
- else:
- data += " <li><b>Slave is NOT currently connected</b></li>\n"
-
- data += " </ul>\n"
- data += " </li>\n"
- data += "\n"
-
- data += "</ol>\n"
-
- return data
-
- def getChild(self, path, req):
- return OneBuildSlaveResource(path)
diff --git a/buildbot/buildbot/status/web/step.py b/buildbot/buildbot/status/web/step.py
deleted file mode 100644
index b65626f..0000000
--- a/buildbot/buildbot/status/web/step.py
+++ /dev/null
@@ -1,97 +0,0 @@
-
-from twisted.web import html
-
-import urllib
-from buildbot.status.web.base import HtmlResource, path_to_builder, \
- path_to_build
-from buildbot.status.web.logs import LogsResource
-from buildbot import util
-from time import ctime
-
-# /builders/$builder/builds/$buildnum/steps/$stepname
-class StatusResourceBuildStep(HtmlResource):
- title = "Build Step"
- addSlash = True
-
- def __init__(self, build_status, step_status):
- HtmlResource.__init__(self)
- self.status = build_status
- self.step_status = step_status
-
- def body(self, req):
- s = self.step_status
- b = s.getBuild()
- builder_name = b.getBuilder().getName()
- build_num = b.getNumber()
- data = ""
- data += ('<h1>BuildStep <a href="%s">%s</a>:' %
- (path_to_builder(req, b.getBuilder()), builder_name))
- data += '<a href="%s">#%d</a>' % (path_to_build(req, b), build_num)
- data += ":%s</h1>\n" % s.getName()
-
- if s.isFinished():
- data += ("<h2>Finished</h2>\n"
- "<p>%s</p>\n" % html.escape("%s" % s.getText()))
- else:
- data += ("<h2>Not Finished</h2>\n"
- "<p>ETA %s seconds</p>\n" % s.getETA())
-
- exp = s.getExpectations()
- if exp:
- data += ("<h2>Expectations</h2>\n"
- "<ul>\n")
- for e in exp:
- data += "<li>%s: current=%s, target=%s</li>\n" % \
- (html.escape(e[0]), e[1], e[2])
- data += "</ul>\n"
-
- (start, end) = s.getTimes()
- data += "<h2>Timing</h2>\n"
- data += "<table>\n"
- data += "<tr><td>Start</td><td>%s</td></tr>\n" % ctime(start)
- if end:
- data += "<tr><td>End</td><td>%s</td></tr>\n" % ctime(end)
- data += "<tr><td>Elapsed</td><td>%s</td></tr>\n" % util.formatInterval(end - start)
- data += "</table>\n"
-
- logs = s.getLogs()
- if logs:
- data += ("<h2>Logs</h2>\n"
- "<ul>\n")
- for logfile in logs:
- if logfile.hasContents():
- # FIXME: If the step name has a / in it, this is broken
- # either way. If we quote it but say '/'s are safe,
- # it chops up the step name. If we quote it and '/'s
- # are not safe, it escapes the / that separates the
- # step name from the log number.
- logname = logfile.getName()
- logurl = req.childLink("logs/%s" % urllib.quote(logname))
- data += ('<li><a href="%s">%s</a></li>\n' %
- (logurl, html.escape(logname)))
- else:
- data += '<li>%s</li>\n' % html.escape(logname)
- data += "</ul>\n"
-
- return data
-
- def getChild(self, path, req):
- if path == "logs":
- return LogsResource(self.step_status)
- return HtmlResource.getChild(self, path, req)
-
-
-
-# /builders/$builder/builds/$buildnum/steps
-class StepsResource(HtmlResource):
- addSlash = True
-
- def __init__(self, build_status):
- HtmlResource.__init__(self)
- self.build_status = build_status
-
- def getChild(self, path, req):
- for s in self.build_status.getSteps():
- if s.getName() == path:
- return StatusResourceBuildStep(self.build_status, s)
- return HtmlResource.getChild(self, path, req)
diff --git a/buildbot/buildbot/status/web/tests.py b/buildbot/buildbot/status/web/tests.py
deleted file mode 100644
index b96bba2..0000000
--- a/buildbot/buildbot/status/web/tests.py
+++ /dev/null
@@ -1,64 +0,0 @@
-
-from twisted.web.error import NoResource
-from twisted.web import html
-
-from buildbot.status.web.base import HtmlResource
-
-# /builders/$builder/builds/$buildnum/tests/$testname
-class TestResult(HtmlResource):
- title = "Test Logs"
-
- def __init__(self, name, test_result):
- HtmlResource.__init__(self)
- self.name = name
- self.test_result = test_result
-
- def body(self, request):
- dotname = ".".join(self.name)
- logs = self.test_result.getLogs()
- lognames = logs.keys()
- lognames.sort()
- data = "<h1>%s</h1>\n" % html.escape(dotname)
- for name in lognames:
- data += "<h2>%s</h2>\n" % html.escape(name)
- data += "<pre>" + logs[name] + "</pre>\n\n"
-
- return data
-
-
-# /builders/$builder/builds/$buildnum/tests
-class TestsResource(HtmlResource):
- title = "Test Results"
-
- def __init__(self, build_status):
- HtmlResource.__init__(self)
- self.build_status = build_status
- self.test_results = build_status.getTestResults()
-
- def body(self, request):
- r = self.test_results
- data = "<h1>Test Results</h1>\n"
- data += "<ul>\n"
- testnames = r.keys()
- testnames.sort()
- for name in testnames:
- res = r[name]
- dotname = ".".join(name)
- data += " <li>%s: " % dotname
- # TODO: this could break on weird test names. At the moment,
- # test names only come from Trial tests, where the name
- # components must be legal python names, but that won't always
- # be a restriction.
- url = request.childLink(dotname)
- data += "<a href=\"%s\">%s</a>" % (url, " ".join(res.getText()))
- data += "</li>\n"
- data += "</ul>\n"
- return data
-
- def getChild(self, path, request):
- try:
- name = tuple(path.split("."))
- result = self.test_results[name]
- return TestResult(name, result)
- except KeyError:
- return NoResource("No such test name '%s'" % path)
diff --git a/buildbot/buildbot/status/web/waterfall.py b/buildbot/buildbot/status/web/waterfall.py
deleted file mode 100644
index 1d3ab60..0000000
--- a/buildbot/buildbot/status/web/waterfall.py
+++ /dev/null
@@ -1,962 +0,0 @@
-# -*- test-case-name: buildbot.test.test_web -*-
-
-from zope.interface import implements
-from twisted.python import log, components
-from twisted.web import html
-import urllib
-
-import time
-import operator
-
-from buildbot import interfaces, util
-from buildbot import version
-from buildbot.status import builder
-
-from buildbot.status.web.base import Box, HtmlResource, IBox, ICurrentBox, \
- ITopBox, td, build_get_class, path_to_build, path_to_step, map_branches
-
-
-
-class CurrentBox(components.Adapter):
- # this provides the "current activity" box, just above the builder name
- implements(ICurrentBox)
-
- def formatETA(self, prefix, eta):
- if eta is None:
- return []
- if eta < 60:
- return ["< 1 min"]
- eta_parts = ["~"]
- eta_secs = eta
- if eta_secs > 3600:
- eta_parts.append("%d hrs" % (eta_secs / 3600))
- eta_secs %= 3600
- if eta_secs > 60:
- eta_parts.append("%d mins" % (eta_secs / 60))
- eta_secs %= 60
- abstime = time.strftime("%H:%M", time.localtime(util.now()+eta))
- return [prefix, " ".join(eta_parts), "at %s" % abstime]
-
- def getBox(self, status):
- # getState() returns offline, idle, or building
- state, builds = self.original.getState()
-
- # look for upcoming builds. We say the state is "waiting" if the
- # builder is otherwise idle and there is a scheduler which tells us a
- # build will be performed some time in the near future. TODO: this
- # functionality used to be in BuilderStatus.. maybe this code should
- # be merged back into it.
- upcoming = []
- builderName = self.original.getName()
- for s in status.getSchedulers():
- if builderName in s.listBuilderNames():
- upcoming.extend(s.getPendingBuildTimes())
- if state == "idle" and upcoming:
- state = "waiting"
-
- if state == "building":
- text = ["building"]
- if builds:
- for b in builds:
- eta = b.getETA()
- text.extend(self.formatETA("ETA in", eta))
- elif state == "offline":
- text = ["offline"]
- elif state == "idle":
- text = ["idle"]
- elif state == "waiting":
- text = ["waiting"]
- else:
- # just in case I add a state and forget to update this
- text = [state]
-
- # TODO: for now, this pending/upcoming stuff is in the "current
- # activity" box, but really it should go into a "next activity" row
- # instead. The only times it should show up in "current activity" is
- # when the builder is otherwise idle.
-
- # are any builds pending? (waiting for a slave to be free)
- pbs = self.original.getPendingBuilds()
- if pbs:
- text.append("%d pending" % len(pbs))
- for t in upcoming:
- eta = t - util.now()
- text.extend(self.formatETA("next in", eta))
- return Box(text, class_="Activity " + state)
-
-components.registerAdapter(CurrentBox, builder.BuilderStatus, ICurrentBox)
-
-
-class BuildTopBox(components.Adapter):
- # this provides a per-builder box at the very top of the display,
- # showing the results of the most recent build
- implements(IBox)
-
- def getBox(self, req):
- assert interfaces.IBuilderStatus(self.original)
- branches = [b for b in req.args.get("branch", []) if b]
- builder = self.original
- builds = list(builder.generateFinishedBuilds(map_branches(branches),
- num_builds=1))
- if not builds:
- return Box(["none"], class_="LastBuild")
- b = builds[0]
- name = b.getBuilder().getName()
- number = b.getNumber()
- url = path_to_build(req, b)
- text = b.getText()
- tests_failed = b.getSummaryStatistic('tests-failed', operator.add, 0)
- if tests_failed: text.extend(["Failed tests: %d" % tests_failed])
- # TODO: maybe add logs?
- # TODO: add link to the per-build page at 'url'
- class_ = build_get_class(b)
- return Box(text, class_="LastBuild %s" % class_)
-components.registerAdapter(BuildTopBox, builder.BuilderStatus, ITopBox)
-
-class BuildBox(components.Adapter):
- # this provides the yellow "starting line" box for each build
- implements(IBox)
-
- def getBox(self, req):
- b = self.original
- number = b.getNumber()
- url = path_to_build(req, b)
- reason = b.getReason()
- text = ('<a title="Reason: %s" href="%s">Build %d</a>'
- % (html.escape(reason), url, number))
- class_ = "start"
- if b.isFinished() and not b.getSteps():
- # the steps have been pruned, so there won't be any indication
- # of whether it succeeded or failed.
- class_ = build_get_class(b)
- return Box([text], class_="BuildStep " + class_)
-components.registerAdapter(BuildBox, builder.BuildStatus, IBox)
-
-class StepBox(components.Adapter):
- implements(IBox)
-
- def getBox(self, req):
- urlbase = path_to_step(req, self.original)
- text = self.original.getText()
- if text is None:
- log.msg("getText() gave None", urlbase)
- text = []
- text = text[:]
- logs = self.original.getLogs()
- for num in range(len(logs)):
- name = logs[num].getName()
- if logs[num].hasContents():
- url = urlbase + "/logs/%s" % urllib.quote(name)
- text.append("<a href=\"%s\">%s</a>" % (url, html.escape(name)))
- else:
- text.append(html.escape(name))
- urls = self.original.getURLs()
- ex_url_class = "BuildStep external"
- for name, target in urls.items():
- text.append('[<a href="%s" class="%s">%s</a>]' %
- (target, ex_url_class, html.escape(name)))
- class_ = "BuildStep " + build_get_class(self.original)
- return Box(text, class_=class_)
-components.registerAdapter(StepBox, builder.BuildStepStatus, IBox)
-
-
-class EventBox(components.Adapter):
- implements(IBox)
-
- def getBox(self, req):
- text = self.original.getText()
- class_ = "Event"
- return Box(text, class_=class_)
-components.registerAdapter(EventBox, builder.Event, IBox)
-
-
-class Spacer:
- implements(interfaces.IStatusEvent)
-
- def __init__(self, start, finish):
- self.started = start
- self.finished = finish
-
- def getTimes(self):
- return (self.started, self.finished)
- def getText(self):
- return []
-
-class SpacerBox(components.Adapter):
- implements(IBox)
-
- def getBox(self, req):
- #b = Box(["spacer"], "white")
- b = Box([])
- b.spacer = True
- return b
-components.registerAdapter(SpacerBox, Spacer, IBox)
-
-def insertGaps(g, lastEventTime, idleGap=2):
- debug = False
-
- e = g.next()
- starts, finishes = e.getTimes()
- if debug: log.msg("E0", starts, finishes)
- if finishes == 0:
- finishes = starts
- if debug: log.msg("E1 finishes=%s, gap=%s, lET=%s" % \
- (finishes, idleGap, lastEventTime))
- if finishes is not None and finishes + idleGap < lastEventTime:
- if debug: log.msg(" spacer0")
- yield Spacer(finishes, lastEventTime)
-
- followingEventStarts = starts
- if debug: log.msg(" fES0", starts)
- yield e
-
- while 1:
- e = g.next()
- starts, finishes = e.getTimes()
- if debug: log.msg("E2", starts, finishes)
- if finishes == 0:
- finishes = starts
- if finishes is not None and finishes + idleGap < followingEventStarts:
- # there is a gap between the end of this event and the beginning
- # of the next one. Insert an idle event so the waterfall display
- # shows a gap here.
- if debug:
- log.msg(" finishes=%s, gap=%s, fES=%s" % \
- (finishes, idleGap, followingEventStarts))
- yield Spacer(finishes, followingEventStarts)
- yield e
- followingEventStarts = starts
- if debug: log.msg(" fES1", starts)
-
-HELP = '''
-<form action="../waterfall" method="GET">
-
-<h1>The Waterfall Display</h1>
-
-<p>The Waterfall display can be controlled by adding query arguments to the
-URL. For example, if your Waterfall is accessed via the URL
-<tt>http://buildbot.example.org:8080</tt>, then you could add a
-<tt>branch=</tt> argument (described below) by going to
-<tt>http://buildbot.example.org:8080?branch=beta4</tt> instead. Remember that
-query arguments are separated from each other with ampersands, but they are
-separated from the main URL with a question mark, so to add a
-<tt>branch=</tt> and two <tt>builder=</tt> arguments, you would use
-<tt>http://buildbot.example.org:8080?branch=beta4&amp;builder=unix&amp;builder=macos</tt>.</p>
-
-<h2>Limiting the Displayed Interval</h2>
-
-<p>The <tt>last_time=</tt> argument is a unix timestamp (seconds since the
-start of 1970) that will be used as an upper bound on the interval of events
-displayed: nothing will be shown that is more recent than the given time.
-When no argument is provided, all events up to and including the most recent
-steps are included.</p>
-
-<p>The <tt>first_time=</tt> argument provides the lower bound. No events will
-be displayed that occurred <b>before</b> this timestamp. Instead of providing
-<tt>first_time=</tt>, you can provide <tt>show_time=</tt>: in this case,
-<tt>first_time</tt> will be set equal to <tt>last_time</tt> minus
-<tt>show_time</tt>. <tt>show_time</tt> overrides <tt>first_time</tt>.</p>
-
-<p>The display normally shows the latest 200 events that occurred in the
-given interval, where each timestamp on the left hand edge counts as a single
-event. You can add a <tt>num_events=</tt> argument to override this this.</p>
-
-<h2>Hiding non-Build events</h2>
-
-<p>By passing <tt>show_events=false</tt>, you can remove the "buildslave
-attached", "buildslave detached", and "builder reconfigured" events that
-appear in-between the actual builds.</p>
-
-%(show_events_input)s
-
-<h2>Showing only Certain Branches</h2>
-
-<p>If you provide one or more <tt>branch=</tt> arguments, the display will be
-limited to builds that used one of the given branches. If no <tt>branch=</tt>
-arguments are given, builds from all branches will be displayed.</p>
-
-Erase the text from these "Show Branch:" boxes to remove that branch filter.
-
-%(show_branches_input)s
-
-<h2>Limiting the Builders that are Displayed</h2>
-
-<p>By adding one or more <tt>builder=</tt> arguments, the display will be
-limited to showing builds that ran on the given builders. This serves to
-limit the display to the specific named columns. If no <tt>builder=</tt>
-arguments are provided, all Builders will be displayed.</p>
-
-<p>To view a Waterfall page with only a subset of Builders displayed, select
-the Builders you are interested in here.</p>
-
-%(show_builders_input)s
-
-
-<h2>Auto-reloading the Page</h2>
-
-<p>Adding a <tt>reload=</tt> argument will cause the page to automatically
-reload itself after that many seconds.</p>
-
-%(show_reload_input)s
-
-<h2>Reload Waterfall Page</h2>
-
-<input type="submit" value="View Waterfall" />
-</form>
-'''
-
-class WaterfallHelp(HtmlResource):
- title = "Waterfall Help"
-
- def __init__(self, categories=None):
- HtmlResource.__init__(self)
- self.categories = categories
-
- def body(self, request):
- data = ''
- status = self.getStatus(request)
-
- showEvents_checked = 'checked="checked"'
- if request.args.get("show_events", ["true"])[0].lower() == "true":
- showEvents_checked = ''
- show_events_input = ('<p>'
- '<input type="checkbox" name="show_events" '
- 'value="false" %s>'
- 'Hide non-Build events'
- '</p>\n'
- ) % showEvents_checked
-
- branches = [b
- for b in request.args.get("branch", [])
- if b]
- branches.append('')
- show_branches_input = '<table>\n'
- for b in branches:
- show_branches_input += ('<tr>'
- '<td>Show Branch: '
- '<input type="text" name="branch" '
- 'value="%s">'
- '</td></tr>\n'
- ) % (b,)
- show_branches_input += '</table>\n'
-
- # this has a set of toggle-buttons to let the user choose the
- # builders
- showBuilders = request.args.get("show", [])
- showBuilders.extend(request.args.get("builder", []))
- allBuilders = status.getBuilderNames(categories=self.categories)
-
- show_builders_input = '<table>\n'
- for bn in allBuilders:
- checked = ""
- if bn in showBuilders:
- checked = 'checked="checked"'
- show_builders_input += ('<tr>'
- '<td><input type="checkbox"'
- ' name="builder" '
- 'value="%s" %s></td> '
- '<td>%s</td></tr>\n'
- ) % (bn, checked, bn)
- show_builders_input += '</table>\n'
-
- # a couple of radio-button selectors for refresh time will appear
- # just after that text
- show_reload_input = '<table>\n'
- times = [("none", "None"),
- ("60", "60 seconds"),
- ("300", "5 minutes"),
- ("600", "10 minutes"),
- ]
- current_reload_time = request.args.get("reload", ["none"])
- if current_reload_time:
- current_reload_time = current_reload_time[0]
- if current_reload_time not in [t[0] for t in times]:
- times.insert(0, (current_reload_time, current_reload_time) )
- for value, name in times:
- checked = ""
- if value == current_reload_time:
- checked = 'checked="checked"'
- show_reload_input += ('<tr>'
- '<td><input type="radio" name="reload" '
- 'value="%s" %s></td> '
- '<td>%s</td></tr>\n'
- ) % (value, checked, name)
- show_reload_input += '</table>\n'
-
- fields = {"show_events_input": show_events_input,
- "show_branches_input": show_branches_input,
- "show_builders_input": show_builders_input,
- "show_reload_input": show_reload_input,
- }
- data += HELP % fields
- return data
-
-class WaterfallStatusResource(HtmlResource):
- """This builds the main status page, with the waterfall display, and
- all child pages."""
-
- def __init__(self, categories=None):
- HtmlResource.__init__(self)
- self.categories = categories
- self.putChild("help", WaterfallHelp(categories))
-
- def getTitle(self, request):
- status = self.getStatus(request)
- p = status.getProjectName()
- if p:
- return "BuildBot: %s" % p
- else:
- return "BuildBot"
-
- def getChangemaster(self, request):
- # TODO: this wants to go away, access it through IStatus
- return request.site.buildbot_service.getChangeSvc()
-
- def get_reload_time(self, request):
- if "reload" in request.args:
- try:
- reload_time = int(request.args["reload"][0])
- return max(reload_time, 15)
- except ValueError:
- pass
- return None
-
- def head(self, request):
- head = ''
- reload_time = self.get_reload_time(request)
- if reload_time is not None:
- head += '<meta http-equiv="refresh" content="%d">\n' % reload_time
- return head
-
- def body(self, request):
- "This method builds the main waterfall display."
-
- status = self.getStatus(request)
- data = ''
-
- projectName = status.getProjectName()
- projectURL = status.getProjectURL()
-
- phase = request.args.get("phase",["2"])
- phase = int(phase[0])
-
- # we start with all Builders available to this Waterfall: this is
- # limited by the config-file -time categories= argument, and defaults
- # to all defined Builders.
- allBuilderNames = status.getBuilderNames(categories=self.categories)
- builders = [status.getBuilder(name) for name in allBuilderNames]
-
- # but if the URL has one or more builder= arguments (or the old show=
- # argument, which is still accepted for backwards compatibility), we
- # use that set of builders instead. We still don't show anything
- # outside the config-file time set limited by categories=.
- showBuilders = request.args.get("show", [])
- showBuilders.extend(request.args.get("builder", []))
- if showBuilders:
- builders = [b for b in builders if b.name in showBuilders]
-
- # now, if the URL has one or category= arguments, use them as a
- # filter: only show those builders which belong to one of the given
- # categories.
- showCategories = request.args.get("category", [])
- if showCategories:
- builders = [b for b in builders if b.category in showCategories]
-
- builderNames = [b.name for b in builders]
-
- if phase == -1:
- return self.body0(request, builders)
- (changeNames, builderNames, timestamps, eventGrid, sourceEvents) = \
- self.buildGrid(request, builders)
- if phase == 0:
- return self.phase0(request, (changeNames + builderNames),
- timestamps, eventGrid)
- # start the table: top-header material
- data += '<table border="0" cellspacing="0">\n'
-
- if projectName and projectURL:
- # TODO: this is going to look really ugly
- topleft = '<a href="%s">%s</a><br />last build' % \
- (projectURL, projectName)
- else:
- topleft = "last build"
- data += ' <tr class="LastBuild">\n'
- data += td(topleft, align="right", colspan=2, class_="Project")
- for b in builders:
- box = ITopBox(b).getBox(request)
- data += box.td(align="center")
- data += " </tr>\n"
-
- data += ' <tr class="Activity">\n'
- data += td('current activity', align='right', colspan=2)
- for b in builders:
- box = ICurrentBox(b).getBox(status)
- data += box.td(align="center")
- data += " </tr>\n"
-
- data += " <tr>\n"
- TZ = time.tzname[time.localtime()[-1]]
- data += td("time (%s)" % TZ, align="center", class_="Time")
- data += td('<a href="%s">changes</a>' % request.childLink("../changes"),
- align="center", class_="Change")
- for name in builderNames:
- safename = urllib.quote(name, safe='')
- data += td('<a href="%s">%s</a>' %
- (request.childLink("../builders/%s" % safename), name),
- align="center", class_="Builder")
- data += " </tr>\n"
-
- if phase == 1:
- f = self.phase1
- else:
- f = self.phase2
- data += f(request, changeNames + builderNames, timestamps, eventGrid,
- sourceEvents)
-
- data += "</table>\n"
-
- data += '<hr /><div class="footer">\n'
-
- def with_args(req, remove_args=[], new_args=[], new_path=None):
- # sigh, nevow makes this sort of manipulation easier
- newargs = req.args.copy()
- for argname in remove_args:
- newargs[argname] = []
- if "branch" in newargs:
- newargs["branch"] = [b for b in newargs["branch"] if b]
- for k,v in new_args:
- if k in newargs:
- newargs[k].append(v)
- else:
- newargs[k] = [v]
- newquery = "&".join(["%s=%s" % (k, v)
- for k in newargs
- for v in newargs[k]
- ])
- if new_path:
- new_url = new_path
- elif req.prepath:
- new_url = req.prepath[-1]
- else:
- new_url = ''
- if newquery:
- new_url += "?" + newquery
- return new_url
-
- if timestamps:
- bottom = timestamps[-1]
- nextpage = with_args(request, ["last_time"],
- [("last_time", str(int(bottom)))])
- data += '[<a href="%s">next page</a>]\n' % nextpage
-
- helpurl = self.path_to_root(request) + "waterfall/help"
- helppage = with_args(request, new_path=helpurl)
- data += '[<a href="%s">help</a>]\n' % helppage
-
- welcomeurl = self.path_to_root(request) + "index.html"
- data += '[<a href="%s">welcome</a>]\n' % welcomeurl
-
- if self.get_reload_time(request) is not None:
- no_reload_page = with_args(request, remove_args=["reload"])
- data += '[<a href="%s">Stop Reloading</a>]\n' % no_reload_page
-
- data += "<br />\n"
-
-
- bburl = "http://buildbot.net/?bb-ver=%s" % urllib.quote(version)
- data += '<a href="%s">Buildbot-%s</a> ' % (bburl, version)
- if projectName:
- data += "working for the "
- if projectURL:
- data += '<a href="%s">%s</a> project.' % (projectURL,
- projectName)
- else:
- data += "%s project." % projectName
- data += "<br />\n"
- # TODO: push this to the right edge, if possible
- data += ("Page built: " +
- time.strftime("%a %d %b %Y %H:%M:%S",
- time.localtime(util.now()))
- + "\n")
- data += '</div>\n'
- return data
-
- def body0(self, request, builders):
- # build the waterfall display
- data = ""
- data += "<h2>Basic display</h2>\n"
- data += '<p>See <a href="%s">here</a>' % request.childLink("../waterfall")
- data += " for the waterfall display</p>\n"
-
- data += '<table border="0" cellspacing="0">\n'
- names = map(lambda builder: builder.name, builders)
-
- # the top row is two blank spaces, then the top-level status boxes
- data += " <tr>\n"
- data += td("", colspan=2)
- for b in builders:
- text = ""
- state, builds = b.getState()
- if state != "offline":
- text += "%s<br />\n" % state #b.getCurrentBig().text[0]
- else:
- text += "OFFLINE<br />\n"
- data += td(text, align="center")
-
- # the next row has the column headers: time, changes, builder names
- data += " <tr>\n"
- data += td("Time", align="center")
- data += td("Changes", align="center")
- for name in names:
- data += td('<a href="%s">%s</a>' %
- (request.childLink("../" + urllib.quote(name)), name),
- align="center")
- data += " </tr>\n"
-
- # all further rows involve timestamps, commit events, and build events
- data += " <tr>\n"
- data += td("04:00", align="bottom")
- data += td("fred", align="center")
- for name in names:
- data += td("stuff", align="center")
- data += " </tr>\n"
-
- data += "</table>\n"
- return data
-
- def buildGrid(self, request, builders):
- debug = False
- # TODO: see if we can use a cached copy
-
- showEvents = False
- if request.args.get("show_events", ["true"])[0].lower() == "true":
- showEvents = True
- filterBranches = [b for b in request.args.get("branch", []) if b]
- filterBranches = map_branches(filterBranches)
- maxTime = int(request.args.get("last_time", [util.now()])[0])
- if "show_time" in request.args:
- minTime = maxTime - int(request.args["show_time"][0])
- elif "first_time" in request.args:
- minTime = int(request.args["first_time"][0])
- else:
- minTime = None
- spanLength = 10 # ten-second chunks
- maxPageLen = int(request.args.get("num_events", [200])[0])
-
- # first step is to walk backwards in time, asking each column
- # (commit, all builders) if they have any events there. Build up the
- # array of events, and stop when we have a reasonable number.
-
- commit_source = self.getChangemaster(request)
-
- lastEventTime = util.now()
- sources = [commit_source] + builders
- changeNames = ["changes"]
- builderNames = map(lambda builder: builder.getName(), builders)
- sourceNames = changeNames + builderNames
- sourceEvents = []
- sourceGenerators = []
-
- def get_event_from(g):
- try:
- while True:
- e = g.next()
- # e might be builder.BuildStepStatus,
- # builder.BuildStatus, builder.Event,
- # waterfall.Spacer(builder.Event), or changes.Change .
- # The showEvents=False flag means we should hide
- # builder.Event .
- if not showEvents and isinstance(e, builder.Event):
- continue
- break
- event = interfaces.IStatusEvent(e)
- if debug:
- log.msg("gen %s gave1 %s" % (g, event.getText()))
- except StopIteration:
- event = None
- return event
-
- for s in sources:
- gen = insertGaps(s.eventGenerator(filterBranches), lastEventTime)
- sourceGenerators.append(gen)
- # get the first event
- sourceEvents.append(get_event_from(gen))
- eventGrid = []
- timestamps = []
-
- lastEventTime = 0
- for e in sourceEvents:
- if e and e.getTimes()[0] > lastEventTime:
- lastEventTime = e.getTimes()[0]
- if lastEventTime == 0:
- lastEventTime = util.now()
-
- spanStart = lastEventTime - spanLength
- debugGather = 0
-
- while 1:
- if debugGather: log.msg("checking (%s,]" % spanStart)
- # the tableau of potential events is in sourceEvents[]. The
- # window crawls backwards, and we examine one source at a time.
- # If the source's top-most event is in the window, is it pushed
- # onto the events[] array and the tableau is refilled. This
- # continues until the tableau event is not in the window (or is
- # missing).
-
- spanEvents = [] # for all sources, in this span. row of eventGrid
- firstTimestamp = None # timestamp of first event in the span
- lastTimestamp = None # last pre-span event, for next span
-
- for c in range(len(sourceGenerators)):
- events = [] # for this source, in this span. cell of eventGrid
- event = sourceEvents[c]
- while event and spanStart < event.getTimes()[0]:
- # to look at windows that don't end with the present,
- # condition the .append on event.time <= spanFinish
- if not IBox(event, None):
- log.msg("BAD EVENT", event, event.getText())
- assert 0
- if debug:
- log.msg("pushing", event.getText(), event)
- events.append(event)
- starts, finishes = event.getTimes()
- firstTimestamp = util.earlier(firstTimestamp, starts)
- event = get_event_from(sourceGenerators[c])
- if debug:
- log.msg("finished span")
-
- if event:
- # this is the last pre-span event for this source
- lastTimestamp = util.later(lastTimestamp,
- event.getTimes()[0])
- if debugGather:
- log.msg(" got %s from %s" % (events, sourceNames[c]))
- sourceEvents[c] = event # refill the tableau
- spanEvents.append(events)
-
- # only show events older than maxTime. This makes it possible to
- # visit a page that shows what it would be like to scroll off the
- # bottom of this one.
- if firstTimestamp is not None and firstTimestamp <= maxTime:
- eventGrid.append(spanEvents)
- timestamps.append(firstTimestamp)
-
- if lastTimestamp:
- spanStart = lastTimestamp - spanLength
- else:
- # no more events
- break
- if minTime is not None and lastTimestamp < minTime:
- break
-
- if len(timestamps) > maxPageLen:
- break
-
-
- # now loop
-
- # loop is finished. now we have eventGrid[] and timestamps[]
- if debugGather: log.msg("finished loop")
- assert(len(timestamps) == len(eventGrid))
- return (changeNames, builderNames, timestamps, eventGrid, sourceEvents)
-
- def phase0(self, request, sourceNames, timestamps, eventGrid):
- # phase0 rendering
- if not timestamps:
- return "no events"
- data = ""
- for r in range(0, len(timestamps)):
- data += "<p>\n"
- data += "[%s]<br />" % timestamps[r]
- row = eventGrid[r]
- assert(len(row) == len(sourceNames))
- for c in range(0, len(row)):
- if row[c]:
- data += "<b>%s</b><br />\n" % sourceNames[c]
- for e in row[c]:
- log.msg("Event", r, c, sourceNames[c], e.getText())
- lognames = [loog.getName() for loog in e.getLogs()]
- data += "%s: %s: %s<br />" % (e.getText(),
- e.getTimes()[0],
- lognames)
- else:
- data += "<b>%s</b> [none]<br />\n" % sourceNames[c]
- return data
-
- def phase1(self, request, sourceNames, timestamps, eventGrid,
- sourceEvents):
- # phase1 rendering: table, but boxes do not overlap
- data = ""
- if not timestamps:
- return data
- lastDate = None
- for r in range(0, len(timestamps)):
- chunkstrip = eventGrid[r]
- # chunkstrip is a horizontal strip of event blocks. Each block
- # is a vertical list of events, all for the same source.
- assert(len(chunkstrip) == len(sourceNames))
- maxRows = reduce(lambda x,y: max(x,y),
- map(lambda x: len(x), chunkstrip))
- for i in range(maxRows):
- data += " <tr>\n";
- if i == 0:
- stuff = []
- # add the date at the beginning, and each time it changes
- today = time.strftime("<b>%d %b %Y</b>",
- time.localtime(timestamps[r]))
- todayday = time.strftime("<b>%a</b>",
- time.localtime(timestamps[r]))
- if today != lastDate:
- stuff.append(todayday)
- stuff.append(today)
- lastDate = today
- stuff.append(
- time.strftime("%H:%M:%S",
- time.localtime(timestamps[r])))
- data += td(stuff, valign="bottom", align="center",
- rowspan=maxRows, class_="Time")
- for c in range(0, len(chunkstrip)):
- block = chunkstrip[c]
- assert(block != None) # should be [] instead
- # bottom-justify
- offset = maxRows - len(block)
- if i < offset:
- data += td("")
- else:
- e = block[i-offset]
- box = IBox(e).getBox(request)
- box.parms["show_idle"] = 1
- data += box.td(valign="top", align="center")
- data += " </tr>\n"
-
- return data
-
- def phase2(self, request, sourceNames, timestamps, eventGrid,
- sourceEvents):
- data = ""
- if not timestamps:
- return data
- # first pass: figure out the height of the chunks, populate grid
- grid = []
- for i in range(1+len(sourceNames)):
- grid.append([])
- # grid is a list of columns, one for the timestamps, and one per
- # event source. Each column is exactly the same height. Each element
- # of the list is a single <td> box.
- lastDate = time.strftime("<b>%d %b %Y</b>",
- time.localtime(util.now()))
- for r in range(0, len(timestamps)):
- chunkstrip = eventGrid[r]
- # chunkstrip is a horizontal strip of event blocks. Each block
- # is a vertical list of events, all for the same source.
- assert(len(chunkstrip) == len(sourceNames))
- maxRows = reduce(lambda x,y: max(x,y),
- map(lambda x: len(x), chunkstrip))
- for i in range(maxRows):
- if i != maxRows-1:
- grid[0].append(None)
- else:
- # timestamp goes at the bottom of the chunk
- stuff = []
- # add the date at the beginning (if it is not the same as
- # today's date), and each time it changes
- todayday = time.strftime("<b>%a</b>",
- time.localtime(timestamps[r]))
- today = time.strftime("<b>%d %b %Y</b>",
- time.localtime(timestamps[r]))
- if today != lastDate:
- stuff.append(todayday)
- stuff.append(today)
- lastDate = today
- stuff.append(
- time.strftime("%H:%M:%S",
- time.localtime(timestamps[r])))
- grid[0].append(Box(text=stuff, class_="Time",
- valign="bottom", align="center"))
-
- # at this point the timestamp column has been populated with
- # maxRows boxes, most None but the last one has the time string
- for c in range(0, len(chunkstrip)):
- block = chunkstrip[c]
- assert(block != None) # should be [] instead
- for i in range(maxRows - len(block)):
- # fill top of chunk with blank space
- grid[c+1].append(None)
- for i in range(len(block)):
- # so the events are bottom-justified
- b = IBox(block[i]).getBox(request)
- b.parms['valign'] = "top"
- b.parms['align'] = "center"
- grid[c+1].append(b)
- # now all the other columns have maxRows new boxes too
- # populate the last row, if empty
- gridlen = len(grid[0])
- for i in range(len(grid)):
- strip = grid[i]
- assert(len(strip) == gridlen)
- if strip[-1] == None:
- if sourceEvents[i-1]:
- filler = IBox(sourceEvents[i-1]).getBox(request)
- else:
- # this can happen if you delete part of the build history
- filler = Box(text=["?"], align="center")
- strip[-1] = filler
- strip[-1].parms['rowspan'] = 1
- # second pass: bubble the events upwards to un-occupied locations
- # Every square of the grid that has a None in it needs to have
- # something else take its place.
- noBubble = request.args.get("nobubble",['0'])
- noBubble = int(noBubble[0])
- if not noBubble:
- for col in range(len(grid)):
- strip = grid[col]
- if col == 1: # changes are handled differently
- for i in range(2, len(strip)+1):
- # only merge empty boxes. Don't bubble commit boxes.
- if strip[-i] == None:
- next = strip[-i+1]
- assert(next)
- if next:
- #if not next.event:
- if next.spacer:
- # bubble the empty box up
- strip[-i] = next
- strip[-i].parms['rowspan'] += 1
- strip[-i+1] = None
- else:
- # we are above a commit box. Leave it
- # be, and turn the current box into an
- # empty one
- strip[-i] = Box([], rowspan=1,
- comment="commit bubble")
- strip[-i].spacer = True
- else:
- # we are above another empty box, which
- # somehow wasn't already converted.
- # Shouldn't happen
- pass
- else:
- for i in range(2, len(strip)+1):
- # strip[-i] will go from next-to-last back to first
- if strip[-i] == None:
- # bubble previous item up
- assert(strip[-i+1] != None)
- strip[-i] = strip[-i+1]
- strip[-i].parms['rowspan'] += 1
- strip[-i+1] = None
- else:
- strip[-i].parms['rowspan'] = 1
- # third pass: render the HTML table
- for i in range(gridlen):
- data += " <tr>\n";
- for strip in grid:
- b = strip[i]
- if b:
- data += b.td()
- else:
- if noBubble:
- data += td([])
- # Nones are left empty, rowspan should make it all fit
- data += " </tr>\n"
- return data
-
diff --git a/buildbot/buildbot/status/web/xmlrpc.py b/buildbot/buildbot/status/web/xmlrpc.py
deleted file mode 100644
index 234e7ff..0000000
--- a/buildbot/buildbot/status/web/xmlrpc.py
+++ /dev/null
@@ -1,203 +0,0 @@
-
-from twisted.python import log
-from twisted.web import xmlrpc
-from buildbot.status.builder import Results
-from itertools import count
-
-class XMLRPCServer(xmlrpc.XMLRPC):
- def __init__(self):
- xmlrpc.XMLRPC.__init__(self)
-
- def render(self, req):
- # extract the IStatus and IControl objects for later use, since they
- # come from the request object. They'll be the same each time, but
- # they aren't available until the first request arrives.
- self.status = req.site.buildbot_service.getStatus()
- self.control = req.site.buildbot_service.getControl()
- return xmlrpc.XMLRPC.render(self, req)
-
- def xmlrpc_getAllBuilders(self):
- """Return a list of all builder names
- """
- log.msg("getAllBuilders")
- return self.status.getBuilderNames()
-
- def xmlrpc_getLastBuildResults(self, builder_name):
- """Return the result of the last build for the given builder
- """
- builder = self.status.getBuilder(builder_name)
- lastbuild = builder.getBuild(-1)
- return Results[lastbuild.getResults()]
-
- def xmlrpc_getLastBuilds(self, builder_name, num_builds):
- """Return the last N completed builds for the given builder.
- 'builder_name' is the name of the builder to query
- 'num_builds' is the number of builds to return
-
- Each build is returned in the same form as xmlrpc_getAllBuildsInInterval
- """
- log.msg("getLastBuilds: %s - %d" % (builder_name, num_builds))
- builder = self.status.getBuilder(builder_name)
- all_builds = []
- for build_number in range(1, num_builds+1):
- build = builder.getBuild(-build_number)
- if not build:
- break
- if not build.isFinished():
- continue
- (build_start, build_end) = build.getTimes()
-
- ss = build.getSourceStamp()
- branch = ss.branch
- if branch is None:
- branch = ""
- try:
- revision = build.getProperty("got_revision")
- except KeyError:
- revision = ""
- revision = str(revision)
-
- answer = (builder_name,
- build.getNumber(),
- build_end,
- branch,
- revision,
- Results[build.getResults()],
- build.getText(),
- )
- all_builds.append((build_end, answer))
-
- # now we've gotten all the builds we're interested in. Sort them by
- # end time.
- all_builds.sort(lambda a,b: cmp(a[0], b[0]))
- # and remove the timestamps
- all_builds = [t[1] for t in all_builds]
-
- log.msg("ready to go: %s" % (all_builds,))
-
- return all_builds
-
-
- def xmlrpc_getAllBuildsInInterval(self, start, stop):
- """Return a list of builds that have completed after the 'start'
- timestamp and before the 'stop' timestamp. This looks at all
- Builders.
-
- The timestamps are integers, interpreted as standard unix timestamps
- (seconds since epoch).
-
- Each Build is returned as a tuple in the form::
- (buildername, buildnumber, build_end, branchname, revision,
- results, text)
-
- The buildnumber is an integer. 'build_end' is an integer (seconds
- since epoch) specifying when the build finished.
-
- The branchname is a string, which may be an empty string to indicate
- None (i.e. the default branch). The revision is a string whose
- meaning is specific to the VC system in use, and comes from the
- 'got_revision' build property. The results are expressed as a string,
- one of ('success', 'warnings', 'failure', 'exception'). The text is a
- list of short strings that ought to be joined by spaces and include
- slightly more data about the results of the build.
- """
- #log.msg("start: %s %s %s" % (start, type(start), start.__class__))
- log.msg("getAllBuildsInInterval: %d - %d" % (start, stop))
- all_builds = []
-
- for builder_name in self.status.getBuilderNames():
- builder = self.status.getBuilder(builder_name)
- for build_number in count(1):
- build = builder.getBuild(-build_number)
- if not build:
- break
- if not build.isFinished():
- continue
- (build_start, build_end) = build.getTimes()
- # in reality, builds are mostly ordered by start time. For
- # the purposes of this method, we pretend that they are
- # strictly ordered by end time, so that we can stop searching
- # when we start seeing builds that are outside the window.
- if build_end > stop:
- continue # keep looking
- if build_end < start:
- break # stop looking
-
- ss = build.getSourceStamp()
- branch = ss.branch
- if branch is None:
- branch = ""
- try:
- revision = build.getProperty("got_revision")
- except KeyError:
- revision = ""
- revision = str(revision)
-
- answer = (builder_name,
- build.getNumber(),
- build_end,
- branch,
- revision,
- Results[build.getResults()],
- build.getText(),
- )
- all_builds.append((build_end, answer))
- # we've gotten all the builds that we care about from this
- # particular builder, so now we can continue on the next builder
-
- # now we've gotten all the builds we're interested in. Sort them by
- # end time.
- all_builds.sort(lambda a,b: cmp(a[0], b[0]))
- # and remove the timestamps
- all_builds = [t[1] for t in all_builds]
-
- log.msg("ready to go: %s" % (all_builds,))
-
- return all_builds
-
- def xmlrpc_getBuild(self, builder_name, build_number):
- """Return information about a specific build.
-
- """
- builder = self.status.getBuilder(builder_name)
- build = builder.getBuild(build_number)
- info = {}
- info['builder_name'] = builder.getName()
- info['url'] = self.status.getURLForThing(build) or ''
- info['reason'] = build.getReason()
- info['slavename'] = build.getSlavename()
- info['results'] = build.getResults()
- info['text'] = build.getText()
- # Added to help out requests for build -N
- info['number'] = build.number
- ss = build.getSourceStamp()
- branch = ss.branch
- if branch is None:
- branch = ""
- info['branch'] = str(branch)
- try:
- revision = str(build.getProperty("got_revision"))
- except KeyError:
- revision = ""
- info['revision'] = str(revision)
- info['start'], info['end'] = build.getTimes()
-
- info_steps = []
- for s in build.getSteps():
- stepinfo = {}
- stepinfo['name'] = s.getName()
- stepinfo['start'], stepinfo['end'] = s.getTimes()
- stepinfo['results'] = s.getResults()
- info_steps.append(stepinfo)
- info['steps'] = info_steps
-
- info_logs = []
- for l in build.getLogs():
- loginfo = {}
- loginfo['name'] = l.getStep().getName() + "/" + l.getName()
- #loginfo['text'] = l.getText()
- loginfo['text'] = "HUGE"
- info_logs.append(loginfo)
- info['logs'] = info_logs
- return info
-
diff --git a/buildbot/buildbot/status/words.py b/buildbot/buildbot/status/words.py
deleted file mode 100644
index 0e98651..0000000
--- a/buildbot/buildbot/status/words.py
+++ /dev/null
@@ -1,875 +0,0 @@
-
-# code to deliver build status through twisted.words (instant messaging
-# protocols: irc, etc)
-
-import re, shlex
-
-from zope.interface import Interface, implements
-from twisted.internet import protocol, reactor
-from twisted.words.protocols import irc
-from twisted.python import log, failure
-from twisted.application import internet
-
-from buildbot import interfaces, util
-from buildbot import version
-from buildbot.sourcestamp import SourceStamp
-from buildbot.process.base import BuildRequest
-from buildbot.status import base
-from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
-from buildbot.scripts.runner import ForceOptions
-
-from string import join, capitalize, lower
-
-class UsageError(ValueError):
- def __init__(self, string = "Invalid usage", *more):
- ValueError.__init__(self, string, *more)
-
-class IrcBuildRequest:
- hasStarted = False
- timer = None
-
- def __init__(self, parent):
- self.parent = parent
- self.timer = reactor.callLater(5, self.soon)
-
- def soon(self):
- del self.timer
- if not self.hasStarted:
- self.parent.send("The build has been queued, I'll give a shout"
- " when it starts")
-
- def started(self, c):
- self.hasStarted = True
- if self.timer:
- self.timer.cancel()
- del self.timer
- s = c.getStatus()
- eta = s.getETA()
- response = "build #%d forced" % s.getNumber()
- if eta is not None:
- response = "build forced [ETA %s]" % self.parent.convertTime(eta)
- self.parent.send(response)
- self.parent.send("I'll give a shout when the build finishes")
- d = s.waitUntilFinished()
- d.addCallback(self.parent.watchedBuildFinished)
-
-
-class Contact:
- """I hold the state for a single user's interaction with the buildbot.
-
- This base class provides all the basic behavior (the queries and
- responses). Subclasses for each channel type (IRC, different IM
- protocols) are expected to provide the lower-level send/receive methods.
-
- There will be one instance of me for each user who interacts personally
- with the buildbot. There will be an additional instance for each
- 'broadcast contact' (chat rooms, IRC channels as a whole).
- """
-
- def __init__(self, channel):
- self.channel = channel
- self.notify_events = {}
- self.subscribed = 0
- self.add_notification_events(channel.notify_events)
-
- silly = {
- "What happen ?": "Somebody set up us the bomb.",
- "It's You !!": ["How are you gentlemen !!",
- "All your base are belong to us.",
- "You are on the way to destruction."],
- "What you say !!": ["You have no chance to survive make your time.",
- "HA HA HA HA ...."],
- }
-
- def getCommandMethod(self, command):
- meth = getattr(self, 'command_' + command.upper(), None)
- return meth
-
- def getBuilder(self, which):
- try:
- b = self.channel.status.getBuilder(which)
- except KeyError:
- raise UsageError, "no such builder '%s'" % which
- return b
-
- def getControl(self, which):
- if not self.channel.control:
- raise UsageError("builder control is not enabled")
- try:
- bc = self.channel.control.getBuilder(which)
- except KeyError:
- raise UsageError("no such builder '%s'" % which)
- return bc
-
- def getAllBuilders(self):
- """
- @rtype: list of L{buildbot.process.builder.Builder}
- """
- names = self.channel.status.getBuilderNames(categories=self.channel.categories)
- names.sort()
- builders = [self.channel.status.getBuilder(n) for n in names]
- return builders
-
- def convertTime(self, seconds):
- if seconds < 60:
- return "%d seconds" % seconds
- minutes = int(seconds / 60)
- seconds = seconds - 60*minutes
- if minutes < 60:
- return "%dm%02ds" % (minutes, seconds)
- hours = int(minutes / 60)
- minutes = minutes - 60*hours
- return "%dh%02dm%02ds" % (hours, minutes, seconds)
-
- def doSilly(self, message):
- response = self.silly[message]
- if type(response) != type([]):
- response = [response]
- when = 0.5
- for r in response:
- reactor.callLater(when, self.send, r)
- when += 2.5
-
- def command_HELLO(self, args, who):
- self.send("yes?")
-
- def command_VERSION(self, args, who):
- self.send("buildbot-%s at your service" % version)
-
- def command_LIST(self, args, who):
- args = args.split()
- if len(args) == 0:
- raise UsageError, "try 'list builders'"
- if args[0] == 'builders':
- builders = self.getAllBuilders()
- str = "Configured builders: "
- for b in builders:
- str += b.name
- state = b.getState()[0]
- if state == 'offline':
- str += "[offline]"
- str += " "
- str.rstrip()
- self.send(str)
- return
- command_LIST.usage = "list builders - List configured builders"
-
- def command_STATUS(self, args, who):
- args = args.split()
- if len(args) == 0:
- which = "all"
- elif len(args) == 1:
- which = args[0]
- else:
- raise UsageError, "try 'status <builder>'"
- if which == "all":
- builders = self.getAllBuilders()
- for b in builders:
- self.emit_status(b.name)
- return
- self.emit_status(which)
- command_STATUS.usage = "status [<which>] - List status of a builder (or all builders)"
-
- def validate_notification_event(self, event):
- if not re.compile("^(started|finished|success|failure|exception|warnings|(success|warnings|exception|failure)To(Failure|Success|Warnings|Exception))$").match(event):
- raise UsageError("try 'notify on|off <EVENT>'")
-
- def list_notified_events(self):
- self.send( "The following events are being notified: %r" % self.notify_events.keys() )
-
- def notify_for(self, *events):
- for event in events:
- if self.notify_events.has_key(event):
- return 1
- return 0
-
- def subscribe_to_build_events(self):
- self.channel.status.subscribe(self)
- self.subscribed = 1
-
- def unsubscribe_from_build_events(self):
- self.channel.status.unsubscribe(self)
- self.subscribed = 0
-
- def add_notification_events(self, events):
- for event in events:
- self.validate_notification_event(event)
- self.notify_events[event] = 1
-
- if not self.subscribed:
- self.subscribe_to_build_events()
-
- def remove_notification_events(self, events):
- for event in events:
- self.validate_notification_event(event)
- del self.notify_events[event]
-
- if len(self.notify_events) == 0 and self.subscribed:
- self.unsubscribe_from_build_events()
-
- def remove_all_notification_events(self):
- self.notify_events = {}
-
- if self.subscribed:
- self.unsubscribe_from_build_events()
-
- def command_NOTIFY(self, args, who):
- args = args.split()
-
- if not args:
- raise UsageError("try 'notify on|off|list <EVENT>'")
- action = args.pop(0)
- events = args
-
- if action == "on":
- if not events: events = ('started','finished')
- self.add_notification_events(events)
-
- self.list_notified_events()
-
- elif action == "off":
- if events:
- self.remove_notification_events(events)
- else:
- self.remove_all_notification_events()
-
- self.list_notified_events()
-
- elif action == "list":
- self.list_notified_events()
- return
-
- else:
- raise UsageError("try 'notify on|off <EVENT>'")
-
- command_NOTIFY.usage = "notify on|off|list [<EVENT>] ... - Notify me about build events. event should be one or more of: 'started', 'finished', 'failure', 'success', 'exception' or 'xToY' (where x and Y are one of success, warnings, failure, exception, but Y is capitalized)"
-
- def command_WATCH(self, args, who):
- args = args.split()
- if len(args) != 1:
- raise UsageError("try 'watch <builder>'")
- which = args[0]
- b = self.getBuilder(which)
- builds = b.getCurrentBuilds()
- if not builds:
- self.send("there are no builds currently running")
- return
- for build in builds:
- assert not build.isFinished()
- d = build.waitUntilFinished()
- d.addCallback(self.watchedBuildFinished)
- r = "watching build %s #%d until it finishes" \
- % (which, build.getNumber())
- eta = build.getETA()
- if eta is not None:
- r += " [%s]" % self.convertTime(eta)
- r += ".."
- self.send(r)
- command_WATCH.usage = "watch <which> - announce the completion of an active build"
-
- def buildsetSubmitted(self, buildset):
- log.msg('[Contact] Buildset %s added' % (buildset))
-
- def builderAdded(self, builderName, builder):
- log.msg('[Contact] Builder %s added' % (builder))
- builder.subscribe(self)
-
- def builderChangedState(self, builderName, state):
- log.msg('[Contact] Builder %s changed state to %s' % (builderName, state))
-
- def requestSubmitted(self, brstatus):
- log.msg('[Contact] BuildRequest for %s submiitted to Builder %s' %
- (brstatus.getSourceStamp(), brstatus.builderName))
-
- def builderRemoved(self, builderName):
- log.msg('[Contact] Builder %s removed' % (builderName))
-
- def buildStarted(self, builderName, build):
- builder = build.getBuilder()
- log.msg('[Contact] Builder %r in category %s started' % (builder, builder.category))
-
- # only notify about builders we are interested in
-
- if (self.channel.categories != None and
- builder.category not in self.channel.categories):
- log.msg('Not notifying for a build in the wrong category')
- return
-
- if not self.notify_for('started'):
- log.msg('Not notifying for a build when started-notification disabled')
- return
-
- r = "build #%d of %s started" % \
- (build.getNumber(),
- builder.getName())
-
- r += " including [" + ", ".join(map(lambda c: repr(c.revision), build.getChanges())) + "]"
-
- self.send(r)
-
- def buildFinished(self, builderName, build, results):
- builder = build.getBuilder()
-
- results_descriptions = {
- SUCCESS: "Success",
- WARNINGS: "Warnings",
- FAILURE: "Failure",
- EXCEPTION: "Exception",
- }
-
- # only notify about builders we are interested in
- log.msg('[Contact] builder %r in category %s finished' % (builder, builder.category))
-
- if self.notify_for('started'):
- return
-
- if (self.channel.categories != None and
- builder.category not in self.channel.categories):
- return
-
- results = build.getResults()
-
- r = "build #%d of %s is complete: %s" % \
- (build.getNumber(),
- builder.getName(),
- results_descriptions.get(results, "??"))
- r += " [%s]" % " ".join(build.getText())
- buildurl = self.channel.status.getURLForThing(build)
- if buildurl:
- r += " Build details are at %s" % buildurl
-
- if self.notify_for('finished') or self.notify_for(lower(results_descriptions.get(results))):
- self.send(r)
- return
-
- prevBuild = build.getPreviousBuild()
- if prevBuild:
- prevResult = prevBuild.getResults()
-
- required_notification_control_string = join((lower(results_descriptions.get(prevResult)), \
- 'To', \
- capitalize(results_descriptions.get(results))), \
- '')
-
- if (self.notify_for(required_notification_control_string)):
- self.send(r)
-
- def watchedBuildFinished(self, b):
- results = {SUCCESS: "Success",
- WARNINGS: "Warnings",
- FAILURE: "Failure",
- EXCEPTION: "Exception",
- }
-
- # only notify about builders we are interested in
- builder = b.getBuilder()
- log.msg('builder %r in category %s finished' % (builder,
- builder.category))
- if (self.channel.categories != None and
- builder.category not in self.channel.categories):
- return
-
- r = "Hey! build %s #%d is complete: %s" % \
- (b.getBuilder().getName(),
- b.getNumber(),
- results.get(b.getResults(), "??"))
- r += " [%s]" % " ".join(b.getText())
- self.send(r)
- buildurl = self.channel.status.getURLForThing(b)
- if buildurl:
- self.send("Build details are at %s" % buildurl)
-
- def command_FORCE(self, args, who):
- args = shlex.split(args) # TODO: this requires python2.3 or newer
- if not args:
- raise UsageError("try 'force build WHICH <REASON>'")
- what = args.pop(0)
- if what != "build":
- raise UsageError("try 'force build WHICH <REASON>'")
- opts = ForceOptions()
- opts.parseOptions(args)
-
- which = opts['builder']
- branch = opts['branch']
- revision = opts['revision']
- reason = opts['reason']
-
- if which is None:
- raise UsageError("you must provide a Builder, "
- "try 'force build WHICH <REASON>'")
-
- # keep weird stuff out of the branch and revision strings. TODO:
- # centralize this somewhere.
- if branch and not re.match(r'^[\w\.\-\/]*$', branch):
- log.msg("bad branch '%s'" % branch)
- self.send("sorry, bad branch '%s'" % branch)
- return
- if revision and not re.match(r'^[\w\.\-\/]*$', revision):
- log.msg("bad revision '%s'" % revision)
- self.send("sorry, bad revision '%s'" % revision)
- return
-
- bc = self.getControl(which)
-
- r = "forced: by %s: %s" % (self.describeUser(who), reason)
- # TODO: maybe give certain users the ability to request builds of
- # certain branches
- s = SourceStamp(branch=branch, revision=revision)
- req = BuildRequest(r, s, which)
- try:
- bc.requestBuildSoon(req)
- except interfaces.NoSlaveError:
- self.send("sorry, I can't force a build: all slaves are offline")
- return
- ireq = IrcBuildRequest(self)
- req.subscribe(ireq.started)
-
-
- command_FORCE.usage = "force build <which> <reason> - Force a build"
-
- def command_STOP(self, args, who):
- args = args.split(None, 2)
- if len(args) < 3 or args[0] != 'build':
- raise UsageError, "try 'stop build WHICH <REASON>'"
- which = args[1]
- reason = args[2]
-
- buildercontrol = self.getControl(which)
-
- r = "stopped: by %s: %s" % (self.describeUser(who), reason)
-
- # find an in-progress build
- builderstatus = self.getBuilder(which)
- builds = builderstatus.getCurrentBuilds()
- if not builds:
- self.send("sorry, no build is currently running")
- return
- for build in builds:
- num = build.getNumber()
-
- # obtain the BuildControl object
- buildcontrol = buildercontrol.getBuild(num)
-
- # make it stop
- buildcontrol.stopBuild(r)
-
- self.send("build %d interrupted" % num)
-
- command_STOP.usage = "stop build <which> <reason> - Stop a running build"
-
- def emit_status(self, which):
- b = self.getBuilder(which)
- str = "%s: " % which
- state, builds = b.getState()
- str += state
- if state == "idle":
- last = b.getLastFinishedBuild()
- if last:
- start,finished = last.getTimes()
- str += ", last build %s ago: %s" % \
- (self.convertTime(int(util.now() - finished)), " ".join(last.getText()))
- if state == "building":
- t = []
- for build in builds:
- step = build.getCurrentStep()
- if step:
- s = "(%s)" % " ".join(step.getText())
- else:
- s = "(no current step)"
- ETA = build.getETA()
- if ETA is not None:
- s += " [ETA %s]" % self.convertTime(ETA)
- t.append(s)
- str += ", ".join(t)
- self.send(str)
-
- def emit_last(self, which):
- last = self.getBuilder(which).getLastFinishedBuild()
- if not last:
- str = "(no builds run since last restart)"
- else:
- start,finish = last.getTimes()
- str = "%s ago: " % (self.convertTime(int(util.now() - finish)))
- str += " ".join(last.getText())
- self.send("last build [%s]: %s" % (which, str))
-
- def command_LAST(self, args, who):
- args = args.split()
- if len(args) == 0:
- which = "all"
- elif len(args) == 1:
- which = args[0]
- else:
- raise UsageError, "try 'last <builder>'"
- if which == "all":
- builders = self.getAllBuilders()
- for b in builders:
- self.emit_last(b.name)
- return
- self.emit_last(which)
- command_LAST.usage = "last <which> - list last build status for builder <which>"
-
- def build_commands(self):
- commands = []
- for k in dir(self):
- if k.startswith('command_'):
- commands.append(k[8:].lower())
- commands.sort()
- return commands
-
- def command_HELP(self, args, who):
- args = args.split()
- if len(args) == 0:
- self.send("Get help on what? (try 'help <foo>', or 'commands' for a command list)")
- return
- command = args[0]
- meth = self.getCommandMethod(command)
- if not meth:
- raise UsageError, "no such command '%s'" % command
- usage = getattr(meth, 'usage', None)
- if usage:
- self.send("Usage: %s" % usage)
- else:
- self.send("No usage info for '%s'" % command)
- command_HELP.usage = "help <command> - Give help for <command>"
-
- def command_SOURCE(self, args, who):
- banner = "My source can be found at http://buildbot.net/"
- self.send(banner)
-
- def command_COMMANDS(self, args, who):
- commands = self.build_commands()
- str = "buildbot commands: " + ", ".join(commands)
- self.send(str)
- command_COMMANDS.usage = "commands - List available commands"
-
- def command_DESTROY(self, args, who):
- self.act("readies phasers")
-
- def command_DANCE(self, args, who):
- reactor.callLater(1.0, self.send, "0-<")
- reactor.callLater(3.0, self.send, "0-/")
- reactor.callLater(3.5, self.send, "0-\\")
-
- def command_EXCITED(self, args, who):
- # like 'buildbot: destroy the sun!'
- self.send("What you say!")
-
- def handleAction(self, data, user):
- # this is sent when somebody performs an action that mentions the
- # buildbot (like '/me kicks buildbot'). 'user' is the name/nick/id of
- # the person who performed the action, so if their action provokes a
- # response, they can be named.
- if not data.endswith("s buildbot"):
- return
- words = data.split()
- verb = words[-2]
- timeout = 4
- if verb == "kicks":
- response = "%s back" % verb
- timeout = 1
- else:
- response = "%s %s too" % (verb, user)
- reactor.callLater(timeout, self.act, response)
-
-class IRCContact(Contact):
- # this is the IRC-specific subclass of Contact
-
- def __init__(self, channel, dest):
- Contact.__init__(self, channel)
- # when people send us public messages ("buildbot: command"),
- # self.dest is the name of the channel ("#twisted"). When they send
- # us private messages (/msg buildbot command), self.dest is their
- # username.
- self.dest = dest
-
- def describeUser(self, user):
- if self.dest[0] == "#":
- return "IRC user <%s> on channel %s" % (user, self.dest)
- return "IRC user <%s> (privmsg)" % user
-
- # userJoined(self, user, channel)
-
- def send(self, message):
- self.channel.msg(self.dest, message.encode("ascii", "replace"))
- def act(self, action):
- self.channel.me(self.dest, action.encode("ascii", "replace"))
-
- def command_JOIN(self, args, who):
- args = args.split()
- to_join = args[0]
- self.channel.join(to_join)
- self.send("Joined %s" % to_join)
- command_JOIN.usage = "join channel - Join another channel"
-
- def command_LEAVE(self, args, who):
- args = args.split()
- to_leave = args[0]
- self.send("Buildbot has been told to leave %s" % to_leave)
- self.channel.part(to_leave)
- command_LEAVE.usage = "leave channel - Leave a channel"
-
-
- def handleMessage(self, message, who):
- # a message has arrived from 'who'. For broadcast contacts (i.e. when
- # people do an irc 'buildbot: command'), this will be a string
- # describing the sender of the message in some useful-to-log way, and
- # a single Contact may see messages from a variety of users. For
- # unicast contacts (i.e. when people do an irc '/msg buildbot
- # command'), a single Contact will only ever see messages from a
- # single user.
- message = message.lstrip()
- if self.silly.has_key(message):
- return self.doSilly(message)
-
- parts = message.split(' ', 1)
- if len(parts) == 1:
- parts = parts + ['']
- cmd, args = parts
- log.msg("irc command", cmd)
-
- meth = self.getCommandMethod(cmd)
- if not meth and message[-1] == '!':
- meth = self.command_EXCITED
-
- error = None
- try:
- if meth:
- meth(args.strip(), who)
- except UsageError, e:
- self.send(str(e))
- except:
- f = failure.Failure()
- log.err(f)
- error = "Something bad happened (see logs): %s" % f.type
-
- if error:
- try:
- self.send(error)
- except:
- log.err()
-
- #self.say(channel, "count %d" % self.counter)
- self.channel.counter += 1
-
-class IChannel(Interface):
- """I represent the buildbot's presence in a particular IM scheme.
-
- This provides the connection to the IRC server, or represents the
- buildbot's account with an IM service. Each Channel will have zero or
- more Contacts associated with it.
- """
-
-class IrcStatusBot(irc.IRCClient):
- """I represent the buildbot to an IRC server.
- """
- implements(IChannel)
-
- def __init__(self, nickname, password, channels, status, categories, notify_events):
- """
- @type nickname: string
- @param nickname: the nickname by which this bot should be known
- @type password: string
- @param password: the password to use for identifying with Nickserv
- @type channels: list of strings
- @param channels: the bot will maintain a presence in these channels
- @type status: L{buildbot.status.builder.Status}
- @param status: the build master's Status object, through which the
- bot retrieves all status information
- """
- self.nickname = nickname
- self.channels = channels
- self.password = password
- self.status = status
- self.categories = categories
- self.notify_events = notify_events
- self.counter = 0
- self.hasQuit = 0
- self.contacts = {}
-
- def addContact(self, name, contact):
- self.contacts[name] = contact
-
- def getContact(self, name):
- if name in self.contacts:
- return self.contacts[name]
- new_contact = IRCContact(self, name)
- self.contacts[name] = new_contact
- return new_contact
-
- def deleteContact(self, contact):
- name = contact.getName()
- if name in self.contacts:
- assert self.contacts[name] == contact
- del self.contacts[name]
-
- def log(self, msg):
- log.msg("%s: %s" % (self, msg))
-
-
- # the following irc.IRCClient methods are called when we have input
-
- def privmsg(self, user, channel, message):
- user = user.split('!', 1)[0] # rest is ~user@hostname
- # channel is '#twisted' or 'buildbot' (for private messages)
- channel = channel.lower()
- #print "privmsg:", user, channel, message
- if channel == self.nickname:
- # private message
- contact = self.getContact(user)
- contact.handleMessage(message, user)
- return
- # else it's a broadcast message, maybe for us, maybe not. 'channel'
- # is '#twisted' or the like.
- contact = self.getContact(channel)
- if message.startswith("%s:" % self.nickname) or message.startswith("%s," % self.nickname):
- message = message[len("%s:" % self.nickname):]
- contact.handleMessage(message, user)
- # to track users comings and goings, add code here
-
- def action(self, user, channel, data):
- #log.msg("action: %s,%s,%s" % (user, channel, data))
- user = user.split('!', 1)[0] # rest is ~user@hostname
- # somebody did an action (/me actions) in the broadcast channel
- contact = self.getContact(channel)
- if "buildbot" in data:
- contact.handleAction(data, user)
-
-
-
- def signedOn(self):
- if self.password:
- self.msg("Nickserv", "IDENTIFY " + self.password)
- for c in self.channels:
- self.join(c)
-
- def joined(self, channel):
- self.log("I have joined %s" % (channel,))
- def left(self, channel):
- self.log("I have left %s" % (channel,))
- def kickedFrom(self, channel, kicker, message):
- self.log("I have been kicked from %s by %s: %s" % (channel,
- kicker,
- message))
-
- # we can using the following irc.IRCClient methods to send output. Most
- # of these are used by the IRCContact class.
- #
- # self.say(channel, message) # broadcast
- # self.msg(user, message) # unicast
- # self.me(channel, action) # send action
- # self.away(message='')
- # self.quit(message='')
-
-class ThrottledClientFactory(protocol.ClientFactory):
- lostDelay = 2
- failedDelay = 60
- def clientConnectionLost(self, connector, reason):
- reactor.callLater(self.lostDelay, connector.connect)
- def clientConnectionFailed(self, connector, reason):
- reactor.callLater(self.failedDelay, connector.connect)
-
-class IrcStatusFactory(ThrottledClientFactory):
- protocol = IrcStatusBot
-
- status = None
- control = None
- shuttingDown = False
- p = None
-
- def __init__(self, nickname, password, channels, categories, notify_events):
- #ThrottledClientFactory.__init__(self) # doesn't exist
- self.status = None
- self.nickname = nickname
- self.password = password
- self.channels = channels
- self.categories = categories
- self.notify_events = notify_events
-
- def __getstate__(self):
- d = self.__dict__.copy()
- del d['p']
- return d
-
- def shutdown(self):
- self.shuttingDown = True
- if self.p:
- self.p.quit("buildmaster reconfigured: bot disconnecting")
-
- def buildProtocol(self, address):
- p = self.protocol(self.nickname, self.password,
- self.channels, self.status,
- self.categories, self.notify_events)
- p.factory = self
- p.status = self.status
- p.control = self.control
- self.p = p
- return p
-
- # TODO: I think a shutdown that occurs while the connection is being
- # established will make this explode
-
- def clientConnectionLost(self, connector, reason):
- if self.shuttingDown:
- log.msg("not scheduling reconnection attempt")
- return
- ThrottledClientFactory.clientConnectionLost(self, connector, reason)
-
- def clientConnectionFailed(self, connector, reason):
- if self.shuttingDown:
- log.msg("not scheduling reconnection attempt")
- return
- ThrottledClientFactory.clientConnectionFailed(self, connector, reason)
-
-
-class IRC(base.StatusReceiverMultiService):
- """I am an IRC bot which can be queried for status information. I
- connect to a single IRC server and am known by a single nickname on that
- server, however I can join multiple channels."""
-
- compare_attrs = ["host", "port", "nick", "password",
- "channels", "allowForce",
- "categories"]
-
- def __init__(self, host, nick, channels, port=6667, allowForce=True,
- categories=None, password=None, notify_events={}):
- base.StatusReceiverMultiService.__init__(self)
-
- assert allowForce in (True, False) # TODO: implement others
-
- # need to stash these so we can detect changes later
- self.host = host
- self.port = port
- self.nick = nick
- self.channels = channels
- self.password = password
- self.allowForce = allowForce
- self.categories = categories
- self.notify_events = notify_events
-
- # need to stash the factory so we can give it the status object
- self.f = IrcStatusFactory(self.nick, self.password,
- self.channels, self.categories, self.notify_events)
-
- c = internet.TCPClient(host, port, self.f)
- c.setServiceParent(self)
-
- def setServiceParent(self, parent):
- base.StatusReceiverMultiService.setServiceParent(self, parent)
- self.f.status = parent.getStatus()
- if self.allowForce:
- self.f.control = interfaces.IControl(parent)
-
- def stopService(self):
- # make sure the factory will stop reconnecting
- self.f.shutdown()
- return base.StatusReceiverMultiService.stopService(self)
-
-
-## buildbot: list builders
-# buildbot: watch quick
-# print notification when current build in 'quick' finishes
-## buildbot: status
-## buildbot: status full-2.3
-## building, not, % complete, ETA
-## buildbot: force build full-2.3 "reason"
diff --git a/buildbot/buildbot/steps/__init__.py b/buildbot/buildbot/steps/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/buildbot/buildbot/steps/__init__.py
+++ /dev/null
diff --git a/buildbot/buildbot/steps/dummy.py b/buildbot/buildbot/steps/dummy.py
deleted file mode 100644
index 9ddfdce..0000000
--- a/buildbot/buildbot/steps/dummy.py
+++ /dev/null
@@ -1,100 +0,0 @@
-
-from twisted.internet import reactor
-from buildbot.process.buildstep import BuildStep, LoggingBuildStep
-from buildbot.process.buildstep import LoggedRemoteCommand
-from buildbot.status.builder import SUCCESS, FAILURE
-
-# these classes are used internally by buildbot unit tests
-
-class Dummy(BuildStep):
- """I am a dummy no-op step, which runs entirely on the master, and simply
- waits 5 seconds before finishing with SUCCESS
- """
-
- haltOnFailure = True
- flunkOnFailure = True
- name = "dummy"
-
- def __init__(self, timeout=5, **kwargs):
- """
- @type timeout: int
- @param timeout: the number of seconds to delay before completing
- """
- BuildStep.__init__(self, **kwargs)
- self.addFactoryArguments(timeout=timeout)
- self.timeout = timeout
- self.timer = None
-
- def start(self):
- self.step_status.setText(["delay", "%s secs" % self.timeout])
- self.timer = reactor.callLater(self.timeout, self.done)
-
- def interrupt(self, reason):
- if self.timer:
- self.timer.cancel()
- self.timer = None
- self.step_status.setText(["delay", "interrupted"])
- self.finished(FAILURE)
-
- def done(self):
- self.finished(SUCCESS)
-
-class FailingDummy(Dummy):
- """I am a dummy no-op step that 'runs' master-side and finishes (with a
- FAILURE status) after 5 seconds."""
-
- name = "failing dummy"
-
- def start(self):
- self.step_status.setText(["boom", "%s secs" % self.timeout])
- self.timer = reactor.callLater(self.timeout, self.done)
-
- def done(self):
- self.finished(FAILURE)
-
-class RemoteDummy(LoggingBuildStep):
- """I am a dummy no-op step that runs on the remote side and
- simply waits 5 seconds before completing with success.
- See L{buildbot.slave.commands.DummyCommand}
- """
-
- haltOnFailure = True
- flunkOnFailure = True
- name = "remote dummy"
-
- def __init__(self, timeout=5, **kwargs):
- """
- @type timeout: int
- @param timeout: the number of seconds to delay
- """
- LoggingBuildStep.__init__(self, **kwargs)
- self.addFactoryArguments(timeout=timeout)
- self.timeout = timeout
- self.description = ["remote", "delay", "%s secs" % timeout]
-
- def describe(self, done=False):
- return self.description
-
- def start(self):
- args = {'timeout': self.timeout}
- cmd = LoggedRemoteCommand("dummy", args)
- self.startCommand(cmd)
-
-class Wait(LoggingBuildStep):
- """I start a command on the slave that waits for the unit test to
- tell it when to finish.
- """
-
- name = "wait"
- def __init__(self, handle, **kwargs):
- LoggingBuildStep.__init__(self, **kwargs)
- self.addFactoryArguments(handle=handle)
- self.handle = handle
-
- def describe(self, done=False):
- return ["wait: %s" % self.handle]
-
- def start(self):
- args = {'handle': (self.handle, self.build.reason)}
- cmd = LoggedRemoteCommand("dummy.wait", args)
- self.startCommand(cmd)
diff --git a/buildbot/buildbot/steps/master.py b/buildbot/buildbot/steps/master.py
deleted file mode 100644
index da8a664..0000000
--- a/buildbot/buildbot/steps/master.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import os, types
-from twisted.python import log, failure, runtime
-from twisted.internet import reactor, defer, task
-from buildbot.process.buildstep import RemoteCommand, BuildStep
-from buildbot.process.buildstep import SUCCESS, FAILURE
-from twisted.internet.protocol import ProcessProtocol
-
-class MasterShellCommand(BuildStep):
- """
- Run a shell command locally - on the buildmaster. The shell command
- COMMAND is specified just as for a RemoteShellCommand. Note that extra
- logfiles are not sopported.
- """
- name='MasterShellCommand'
- description='Running'
- descriptionDone='Ran'
-
- def __init__(self, command, **kwargs):
- BuildStep.__init__(self, **kwargs)
- self.addFactoryArguments(command=command)
- self.command=command
-
- class LocalPP(ProcessProtocol):
- def __init__(self, step):
- self.step = step
-
- def outReceived(self, data):
- self.step.stdio_log.addStdout(data)
-
- def errReceived(self, data):
- self.step.stdio_log.addStderr(data)
-
- def processEnded(self, status_object):
- self.step.stdio_log.addHeader("exit status %d\n" % status_object.value.exitCode)
- self.step.processEnded(status_object)
-
- def start(self):
- # set up argv
- if type(self.command) in types.StringTypes:
- if runtime.platformType == 'win32':
- argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
- if '/c' not in argv: argv += ['/c']
- argv += [self.command]
- else:
- # for posix, use /bin/sh. for other non-posix, well, doesn't
- # hurt to try
- argv = ['/bin/sh', '-c', self.command]
- else:
- if runtime.platformType == 'win32':
- argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
- if '/c' not in argv: argv += ['/c']
- argv += list(self.command)
- else:
- argv = self.command
-
- self.stdio_log = stdio_log = self.addLog("stdio")
-
- if type(self.command) in types.StringTypes:
- stdio_log.addHeader(self.command.strip() + "\n\n")
- else:
- stdio_log.addHeader(" ".join(self.command) + "\n\n")
- stdio_log.addHeader("** RUNNING ON BUILDMASTER **\n")
- stdio_log.addHeader(" in dir %s\n" % os.getcwd())
- stdio_log.addHeader(" argv: %s\n" % (argv,))
-
- # TODO add a timeout?
- proc = reactor.spawnProcess(self.LocalPP(self), argv[0], argv)
- # (the LocalPP object will call processEnded for us)
-
- def processEnded(self, status_object):
- if status_object.value.exitCode != 0:
- self.step_status.setText(["failed (%d)" % status_object.value.exitCode])
- self.finished(FAILURE)
- else:
- self.step_status.setText(["succeeded"])
- self.finished(SUCCESS)
diff --git a/buildbot/buildbot/steps/maxq.py b/buildbot/buildbot/steps/maxq.py
deleted file mode 100644
index 23538a5..0000000
--- a/buildbot/buildbot/steps/maxq.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from buildbot.steps.shell import ShellCommand
-from buildbot.status.builder import Event, SUCCESS, FAILURE
-
-class MaxQ(ShellCommand):
- flunkOnFailure = True
- name = "maxq"
-
- def __init__(self, testdir=None, **kwargs):
- if not testdir:
- raise TypeError("please pass testdir")
- kwargs['command'] = 'run_maxq.py %s' % (testdir,)
- ShellCommand.__init__(self, **kwargs)
- self.addFactoryArguments(testdir=testdir)
-
- def startStatus(self):
- evt = Event("yellow", ['running', 'maxq', 'tests'],
- files={'log': self.log})
- self.setCurrentActivity(evt)
-
-
- def finished(self, rc):
- self.failures = 0
- if rc:
- self.failures = 1
- output = self.log.getAll()
- self.failures += output.count('\nTEST FAILURE:')
-
- result = (SUCCESS, ['maxq'])
-
- if self.failures:
- result = (FAILURE, [str(self.failures), 'maxq', 'failures'])
-
- return self.stepComplete(result)
-
- def finishStatus(self, result):
- if self.failures:
- text = ["maxq", "failed"]
- else:
- text = ['maxq', 'tests']
- self.updateCurrentActivity(text=text)
- self.finishStatusSummary()
- self.finishCurrentActivity()
-
-
diff --git a/buildbot/buildbot/steps/package/__init__.py b/buildbot/buildbot/steps/package/__init__.py
deleted file mode 100644
index d81f066..0000000
--- a/buildbot/buildbot/steps/package/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
-#
-# This software may be freely redistributed under the terms of the GNU
-# general public license.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-"""
-Steps specific to package formats.
-"""
diff --git a/buildbot/buildbot/steps/package/rpm/__init__.py b/buildbot/buildbot/steps/package/rpm/__init__.py
deleted file mode 100644
index 0d7be6d..0000000
--- a/buildbot/buildbot/steps/package/rpm/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
-#
-# This software may be freely redistributed under the terms of the GNU
-# general public license.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-"""
-Steps specific to the rpm format.
-"""
-
-from rpmbuild import RpmBuild
-from rpmspec import RpmSpec
-from rpmlint import RpmLint
diff --git a/buildbot/buildbot/steps/package/rpm/rpmbuild.py b/buildbot/buildbot/steps/package/rpm/rpmbuild.py
deleted file mode 100644
index 38bce85..0000000
--- a/buildbot/buildbot/steps/package/rpm/rpmbuild.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# Dan Radez <dradez+buildbot@redhat.com>
-# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
-#
-# This software may be freely redistributed under the terms of the GNU
-# general public license.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-"""
-RPM Building steps.
-"""
-
-from buildbot.steps.shell import ShellCommand
-from buildbot.process.buildstep import RemoteShellCommand
-
-
-class RpmBuild(ShellCommand):
- """
- Build and RPM based on pased spec filename
- """
-
- import os.path
-
- name = "rpmbuilder"
- haltOnFailure = 1
- flunkOnFailure = 1
- description = ["RPMBUILD"]
- descriptionDone = ["RPMBUILD"]
-
- def __init__(self,
- specfile=None,
- topdir='`pwd`',
- builddir='`pwd`',
- rpmdir='`pwd`',
- sourcedir='`pwd`',
- specdir='`pwd`',
- srcrpmdir='`pwd`',
- dist='.el5',
- autoRelease=False,
- vcsRevision=False,
- **kwargs):
- """
- Creates the RpmBuild object.
-
- @type specfile: str
- @param specfile: the name of the spec file for the rpmbuild
- @type topdir: str
- @param topdir: the top directory for rpm building.
- @type builddir: str
- @param builddir: the directory to use for building
- @type rpmdir: str
- @param rpmdir: the directory to dump the rpms into
- @type sourcedir: str
- @param sourcedir: the directory that houses source code
- @type srcrpmdir: str
- @param srcrpmdir: the directory to dump source rpms into
- @type dist: str
- @param dist: the distribution to build for
- @type autoRelease: boolean
- @param autoRelease: if the auto release mechanics should be used
- @type vcsRevision: boolean
- @param vcsRevision: if the vcs revision mechanics should be used
- @type kwargs: dict
- @param kwargs: All further keyword arguments.
- """
- ShellCommand.__init__(self, **kwargs)
- self.addFactoryArguments(topdir=topdir,
- builddir=builddir,
- rpmdir=rpmdir,
- sourcedir=sourcedir,
- specdir=specdir,
- srcrpmdir=srcrpmdir,
- specfile=specfile,
- dist=dist,
- autoRelease=autoRelease,
- vcsRevision=vcsRevision)
- self.rpmbuild = (
- 'rpmbuild --define "_topdir %s" --define "_builddir %s"'
- ' --define "_rpmdir %s" --define "_sourcedir %s"'
- ' --define "_specdir %s" --define "_srcrpmdir %s"'
- ' --define "dist %s"' % (topdir, builddir, rpmdir, sourcedir,
- specdir, srcrpmdir, dist))
- self.specfile = specfile
- self.autoRelease = autoRelease
- self.vcsRevision = vcsRevision
-
- def start(self):
- """
- Buildbot Calls Me when it's time to start
- """
- if self.autoRelease:
- relfile = '%s.release' % (
- self.os.path.basename(self.specfile).split('.')[0])
- try:
- rfile = open(relfile, 'r')
- rel = int(rfile.readline().strip())
- rfile.close()
- except:
- rel = 0
- self.rpmbuild = self.rpmbuild + ' --define "_release %s"' % rel
- rfile = open(relfile, 'w')
- rfile.write(str(rel+1))
- rfile.close()
-
- if self.vcsRevision:
- self.rpmbuild = self.rpmbuild + ' --define "_revision %s"' % \
- self.getProperty('got_revision')
-
- self.rpmbuild = self.rpmbuild + ' -ba %s' % self.specfile
-
- self.command = ['bash', '-c', self.rpmbuild]
-
- # create the actual RemoteShellCommand instance now
- kwargs = self.remote_kwargs
- kwargs['command'] = self.command
- cmd = RemoteShellCommand(**kwargs)
- self.setupEnvironment(cmd)
- self.checkForOldSlaveAndLogfiles()
- self.startCommand(cmd)
-
- def createSummary(self, log):
- """
- Create nice summary logs.
-
- @param log: The log to create summary off of.
- """
- rpm_prefixes = ['Provides:', 'Requires(rpmlib):', 'Requires:',
- 'Checking for unpackaged', 'Wrote:',
- 'Executing(%', '+ ']
- rpm_err_pfx = [' ', 'RPM build errors:', 'error: ']
-
- rpmcmdlog = []
- rpmerrors = []
-
- for line in log.readlines():
- for pfx in rpm_prefixes:
- if pfx in line:
- rpmcmdlog.append(line)
- for err in rpm_err_pfx:
- if err in line:
- rpmerrors.append(line)
- self.addCompleteLog('RPM Command Log', "".join(rpmcmdlog))
- self.addCompleteLog('RPM Errors', "".join(rpmerrors))
diff --git a/buildbot/buildbot/steps/package/rpm/rpmlint.py b/buildbot/buildbot/steps/package/rpm/rpmlint.py
deleted file mode 100644
index 444a44a..0000000
--- a/buildbot/buildbot/steps/package/rpm/rpmlint.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
-#
-# This software may be freely redistributed under the terms of the GNU
-# general public license.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-"""
-Steps and objects related to rpmlint.
-"""
-
-from buildbot.steps.shell import Test
-
-
-class RpmLint(Test):
- """
- Rpmlint build step.
- """
-
- description = ["Checking for RPM/SPEC issues"]
- descriptionDone = ["Finished checking RPM/SPEC issues"]
-
- def __init__(self, fileloc="*rpm", **kwargs):
- """
- Create the Rpmlint object.
-
- @type fileloc: str
- @param fileloc: Location glob of the specs or rpms.
- @type kwargs: dict
- @param fileloc: all other keyword arguments.
- """
- Test.__init__(self, **kwargs)
- self.command = ["/usr/bin/rpmlint", "-i"]
- self.command.append(fileloc)
-
- def createSummary(self, log):
- """
- Create nice summary logs.
-
- @param log: log to create summary off of.
- """
- warnings = []
- errors = []
- for line in log.readlines():
- if ' W: ' in line:
- warnings.append(line)
- elif ' E: ' in line:
- errors.append(line)
- self.addCompleteLog('Rpmlint Warnings', "".join(warnings))
- self.addCompleteLog('Rpmlint Errors', "".join(errors))
diff --git a/buildbot/buildbot/steps/package/rpm/rpmspec.py b/buildbot/buildbot/steps/package/rpm/rpmspec.py
deleted file mode 100644
index 6aa5254..0000000
--- a/buildbot/buildbot/steps/package/rpm/rpmspec.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Dan Radez <dradez+buildbot@redhat.com>
-# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
-#
-# This software may be freely redistributed under the terms of the GNU
-# general public license.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-"""
-library to populate parameters from and rpmspec file into a memory structure
-"""
-
-
-from buildbot.steps.shell import ShellCommand
-
-
-class RpmSpec(ShellCommand):
- """
- read parameters out of an rpm spec file
- """
-
- import re
- import types
-
- #initialize spec info vars and get them from the spec file
- n_regex = re.compile('^Name:[ ]*([^\s]*)')
- v_regex = re.compile('^Version:[ ]*([0-9\.]*)')
-
- def __init__(self, specfile=None, **kwargs):
- """
- Creates the RpmSpec object.
-
- @type specfile: str
- @param specfile: the name of the specfile to get the package
- name and version from
- @type kwargs: dict
- @param kwargs: All further keyword arguments.
- """
- self.specfile = specfile
- self._pkg_name = None
- self._pkg_version = None
- self._loaded = False
-
- def load(self):
- """
- call this function after the file exists to populate properties
- """
- # If we are given a string, open it up else assume it's something we
- # can call read on.
- if type(self.specfile) == self.types.StringType:
- f = open(self.specfile, 'r')
- else:
- f = self.specfile
-
- for line in f:
- if self.v_regex.match(line):
- self._pkg_version = self.v_regex.match(line).group(1)
- if self.n_regex.match(line):
- self._pkg_name = self.n_regex.match(line).group(1)
- f.close()
- self._loaded = True
-
- # Read-only properties
- loaded = property(lambda self: self._loaded)
- pkg_name = property(lambda self: self._pkg_name)
- pkg_version = property(lambda self: self._pkg_version)
diff --git a/buildbot/buildbot/steps/python.py b/buildbot/buildbot/steps/python.py
deleted file mode 100644
index 7f87aa7..0000000
--- a/buildbot/buildbot/steps/python.py
+++ /dev/null
@@ -1,187 +0,0 @@
-
-from buildbot.status.builder import SUCCESS, FAILURE, WARNINGS
-from buildbot.steps.shell import ShellCommand
-import re
-
-try:
- import cStringIO
- StringIO = cStringIO.StringIO
-except ImportError:
- from StringIO import StringIO
-
-
-class BuildEPYDoc(ShellCommand):
- name = "epydoc"
- command = ["make", "epydocs"]
- description = ["building", "epydocs"]
- descriptionDone = ["epydoc"]
-
- def createSummary(self, log):
- import_errors = 0
- warnings = 0
- errors = 0
-
- for line in StringIO(log.getText()):
- if line.startswith("Error importing "):
- import_errors += 1
- if line.find("Warning: ") != -1:
- warnings += 1
- if line.find("Error: ") != -1:
- errors += 1
-
- self.descriptionDone = self.descriptionDone[:]
- if import_errors:
- self.descriptionDone.append("ierr=%d" % import_errors)
- if warnings:
- self.descriptionDone.append("warn=%d" % warnings)
- if errors:
- self.descriptionDone.append("err=%d" % errors)
-
- self.import_errors = import_errors
- self.warnings = warnings
- self.errors = errors
-
- def evaluateCommand(self, cmd):
- if cmd.rc != 0:
- return FAILURE
- if self.warnings or self.errors:
- return WARNINGS
- return SUCCESS
-
-
-class PyFlakes(ShellCommand):
- name = "pyflakes"
- command = ["make", "pyflakes"]
- description = ["running", "pyflakes"]
- descriptionDone = ["pyflakes"]
- flunkOnFailure = False
- flunkingIssues = ["undefined"] # any pyflakes lines like this cause FAILURE
-
- MESSAGES = ("unused", "undefined", "redefs", "import*", "misc")
-
- def createSummary(self, log):
- counts = {}
- summaries = {}
- for m in self.MESSAGES:
- counts[m] = 0
- summaries[m] = []
-
- first = True
- for line in StringIO(log.getText()).readlines():
- # the first few lines might contain echoed commands from a 'make
- # pyflakes' step, so don't count these as warnings. Stop ignoring
- # the initial lines as soon as we see one with a colon.
- if first:
- if line.find(":") != -1:
- # there's the colon, this is the first real line
- first = False
- # fall through and parse the line
- else:
- # skip this line, keep skipping non-colon lines
- continue
- if line.find("imported but unused") != -1:
- m = "unused"
- elif line.find("*' used; unable to detect undefined names") != -1:
- m = "import*"
- elif line.find("undefined name") != -1:
- m = "undefined"
- elif line.find("redefinition of unused") != -1:
- m = "redefs"
- else:
- m = "misc"
- summaries[m].append(line)
- counts[m] += 1
-
- self.descriptionDone = self.descriptionDone[:]
- for m in self.MESSAGES:
- if counts[m]:
- self.descriptionDone.append("%s=%d" % (m, counts[m]))
- self.addCompleteLog(m, "".join(summaries[m]))
- self.setProperty("pyflakes-%s" % m, counts[m], "pyflakes")
- self.setProperty("pyflakes-total", sum(counts.values()), "pyflakes")
-
-
- def evaluateCommand(self, cmd):
- if cmd.rc != 0:
- return FAILURE
- for m in self.flunkingIssues:
- if self.getProperty("pyflakes-%s" % m):
- return FAILURE
- if self.getProperty("pyflakes-total"):
- return WARNINGS
- return SUCCESS
-
-class PyLint(ShellCommand):
- '''A command that knows about pylint output.
- It's a good idea to add --output-format=parseable to your
- command, since it includes the filename in the message.
- '''
- name = "pylint"
- description = ["running", "pylint"]
- descriptionDone = ["pylint"]
-
- # Using the default text output, the message format is :
- # MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE
- # with --output-format=parseable it is: (the outer brackets are literal)
- # FILE_NAME:LINE_NUM: [MESSAGE_TYPE[, OBJECT]] MESSAGE
- # message type consists of the type char and 4 digits
- # The message types:
-
- MESSAGES = {
- 'C': "convention", # for programming standard violation
- 'R': "refactor", # for bad code smell
- 'W': "warning", # for python specific problems
- 'E': "error", # for much probably bugs in the code
- 'F': "fatal", # error prevented pylint from further processing.
- 'I': "info",
- }
-
- flunkingIssues = ["F", "E"] # msg categories that cause FAILURE
-
- _re_groupname = 'errtype'
- _msgtypes_re_str = '(?P<%s>[%s])' % (_re_groupname, ''.join(MESSAGES.keys()))
- _default_line_re = re.compile(r'%s\d{4}: *\d+:.+' % _msgtypes_re_str)
- _parseable_line_re = re.compile(r'[^:]+:\d+: \[%s\d{4}[,\]] .+' % _msgtypes_re_str)
-
- def createSummary(self, log):
- counts = {}
- summaries = {}
- for m in self.MESSAGES:
- counts[m] = 0
- summaries[m] = []
-
- line_re = None # decide after first match
- for line in StringIO(log.getText()).readlines():
- if not line_re:
- # need to test both and then decide on one
- if self._parseable_line_re.match(line):
- line_re = self._parseable_line_re
- elif self._default_line_re.match(line):
- line_re = self._default_line_re
- else: # no match yet
- continue
- mo = line_re.match(line)
- if mo:
- msgtype = mo.group(self._re_groupname)
- assert msgtype in self.MESSAGES
- summaries[msgtype].append(line)
- counts[msgtype] += 1
-
- self.descriptionDone = self.descriptionDone[:]
- for msg, fullmsg in self.MESSAGES.items():
- if counts[msg]:
- self.descriptionDone.append("%s=%d" % (fullmsg, counts[msg]))
- self.addCompleteLog(fullmsg, "".join(summaries[msg]))
- self.setProperty("pylint-%s" % fullmsg, counts[msg])
- self.setProperty("pylint-total", sum(counts.values()))
-
- def evaluateCommand(self, cmd):
- if cmd.rc != 0:
- return FAILURE
- for msg in self.flunkingIssues:
- if self.getProperty("pylint-%s" % self.MESSAGES[msg]):
- return FAILURE
- if self.getProperty("pylint-total"):
- return WARNINGS
- return SUCCESS
-
diff --git a/buildbot/buildbot/steps/python_twisted.py b/buildbot/buildbot/steps/python_twisted.py
deleted file mode 100644
index d0ed5b0..0000000
--- a/buildbot/buildbot/steps/python_twisted.py
+++ /dev/null
@@ -1,804 +0,0 @@
-# -*- test-case-name: buildbot.test.test_twisted -*-
-
-from twisted.python import log
-
-from buildbot.status import builder
-from buildbot.status.builder import SUCCESS, FAILURE, WARNINGS, SKIPPED
-from buildbot.process.buildstep import LogLineObserver, OutputProgressObserver
-from buildbot.process.buildstep import RemoteShellCommand
-from buildbot.steps.shell import ShellCommand
-
-try:
- import cStringIO
- StringIO = cStringIO
-except ImportError:
- import StringIO
-import re
-
-# BuildSteps that are specific to the Twisted source tree
-
-class HLint(ShellCommand):
- """I run a 'lint' checker over a set of .xhtml files. Any deviations
- from recommended style is flagged and put in the output log.
-
- This step looks at .changes in the parent Build to extract a list of
- Lore XHTML files to check."""
-
- name = "hlint"
- description = ["running", "hlint"]
- descriptionDone = ["hlint"]
- warnOnWarnings = True
- warnOnFailure = True
- # TODO: track time, but not output
- warnings = 0
-
- def __init__(self, python=None, **kwargs):
- ShellCommand.__init__(self, **kwargs)
- self.addFactoryArguments(python=python)
- self.python = python
-
- def start(self):
- # create the command
- htmlFiles = {}
- for f in self.build.allFiles():
- if f.endswith(".xhtml") and not f.startswith("sandbox/"):
- htmlFiles[f] = 1
- # remove duplicates
- hlintTargets = htmlFiles.keys()
- hlintTargets.sort()
- if not hlintTargets:
- return SKIPPED
- self.hlintFiles = hlintTargets
- c = []
- if self.python:
- c.append(self.python)
- c += ["bin/lore", "-p", "--output", "lint"] + self.hlintFiles
- self.setCommand(c)
-
- # add an extra log file to show the .html files we're checking
- self.addCompleteLog("files", "\n".join(self.hlintFiles)+"\n")
-
- ShellCommand.start(self)
-
- def commandComplete(self, cmd):
- # TODO: remove the 'files' file (a list of .xhtml files that were
- # submitted to hlint) because it is available in the logfile and
- # mostly exists to give the user an idea of how long the step will
- # take anyway).
- lines = cmd.logs['stdio'].getText().split("\n")
- warningLines = filter(lambda line:':' in line, lines)
- if warningLines:
- self.addCompleteLog("warnings", "".join(warningLines))
- warnings = len(warningLines)
- self.warnings = warnings
-
- def evaluateCommand(self, cmd):
- # warnings are in stdout, rc is always 0, unless the tools break
- if cmd.rc != 0:
- return FAILURE
- if self.warnings:
- return WARNINGS
- return SUCCESS
-
- def getText2(self, cmd, results):
- if cmd.rc != 0:
- return ["hlint"]
- return ["%d hlin%s" % (self.warnings,
- self.warnings == 1 and 't' or 'ts')]
-
-def countFailedTests(output):
- # start scanning 10kb from the end, because there might be a few kb of
- # import exception tracebacks between the total/time line and the errors
- # line
- chunk = output[-10000:]
- lines = chunk.split("\n")
- lines.pop() # blank line at end
- # lines[-3] is "Ran NN tests in 0.242s"
- # lines[-2] is blank
- # lines[-1] is 'OK' or 'FAILED (failures=1, errors=12)'
- # or 'FAILED (failures=1)'
- # or "PASSED (skips=N, successes=N)" (for Twisted-2.0)
- # there might be other lines dumped here. Scan all the lines.
- res = {'total': None,
- 'failures': 0,
- 'errors': 0,
- 'skips': 0,
- 'expectedFailures': 0,
- 'unexpectedSuccesses': 0,
- }
- for l in lines:
- out = re.search(r'Ran (\d+) tests', l)
- if out:
- res['total'] = int(out.group(1))
- if (l.startswith("OK") or
- l.startswith("FAILED ") or
- l.startswith("PASSED")):
- # the extra space on FAILED_ is to distinguish the overall
- # status from an individual test which failed. The lack of a
- # space on the OK is because it may be printed without any
- # additional text (if there are no skips,etc)
- out = re.search(r'failures=(\d+)', l)
- if out: res['failures'] = int(out.group(1))
- out = re.search(r'errors=(\d+)', l)
- if out: res['errors'] = int(out.group(1))
- out = re.search(r'skips=(\d+)', l)
- if out: res['skips'] = int(out.group(1))
- out = re.search(r'expectedFailures=(\d+)', l)
- if out: res['expectedFailures'] = int(out.group(1))
- out = re.search(r'unexpectedSuccesses=(\d+)', l)
- if out: res['unexpectedSuccesses'] = int(out.group(1))
- # successes= is a Twisted-2.0 addition, and is not currently used
- out = re.search(r'successes=(\d+)', l)
- if out: res['successes'] = int(out.group(1))
-
- return res
-
-
-class TrialTestCaseCounter(LogLineObserver):
- _line_re = re.compile(r'^(?:Doctest: )?([\w\.]+) \.\.\. \[([^\]]+)\]$')
- numTests = 0
- finished = False
-
- def outLineReceived(self, line):
- # different versions of Twisted emit different per-test lines with
- # the bwverbose reporter.
- # 2.0.0: testSlave (buildbot.test.test_runner.Create) ... [OK]
- # 2.1.0: buildbot.test.test_runner.Create.testSlave ... [OK]
- # 2.4.0: buildbot.test.test_runner.Create.testSlave ... [OK]
- # Let's just handle the most recent version, since it's the easiest.
- # Note that doctests create lines line this:
- # Doctest: viff.field.GF ... [OK]
-
- if self.finished:
- return
- if line.startswith("=" * 40):
- self.finished = True
- return
-
- m = self._line_re.search(line.strip())
- if m:
- testname, result = m.groups()
- self.numTests += 1
- self.step.setProgress('tests', self.numTests)
-
-
-UNSPECIFIED=() # since None is a valid choice
-
-class Trial(ShellCommand):
- """I run a unit test suite using 'trial', a unittest-like testing
- framework that comes with Twisted. Trial is used to implement Twisted's
- own unit tests, and is the unittest-framework of choice for many projects
- that use Twisted internally.
-
- Projects that use trial typically have all their test cases in a 'test'
- subdirectory of their top-level library directory. I.e. for my package
- 'petmail', the tests are in 'petmail/test/test_*.py'. More complicated
- packages (like Twisted itself) may have multiple test directories, like
- 'twisted/test/test_*.py' for the core functionality and
- 'twisted/mail/test/test_*.py' for the email-specific tests.
-
- To run trial tests, you run the 'trial' executable and tell it where the
- test cases are located. The most common way of doing this is with a
- module name. For petmail, I would run 'trial petmail.test' and it would
- locate all the test_*.py files under petmail/test/, running every test
- case it could find in them. Unlike the unittest.py that comes with
- Python, you do not run the test_foo.py as a script; you always let trial
- do the importing and running. The 'tests' parameter controls which tests
- trial will run: it can be a string or a list of strings.
-
- To find these test cases, you must set a PYTHONPATH that allows something
- like 'import petmail.test' to work. For packages that don't use a
- separate top-level 'lib' directory, PYTHONPATH=. will work, and will use
- the test cases (and the code they are testing) in-place.
- PYTHONPATH=build/lib or PYTHONPATH=build/lib.$ARCH are also useful when
- you do a'setup.py build' step first. The 'testpath' attribute of this
- class controls what PYTHONPATH= is set to.
-
- Trial has the ability (through the --testmodule flag) to run only the set
- of test cases named by special 'test-case-name' tags in source files. We
- can get the list of changed source files from our parent Build and
- provide them to trial, thus running the minimal set of test cases needed
- to cover the Changes. This is useful for quick builds, especially in
- trees with a lot of test cases. The 'testChanges' parameter controls this
- feature: if set, it will override 'tests'.
-
- The trial executable itself is typically just 'trial' (which is usually
- found on your $PATH as /usr/bin/trial), but it can be overridden with the
- 'trial' parameter. This is useful for Twisted's own unittests, which want
- to use the copy of bin/trial that comes with the sources. (when bin/trial
- discovers that it is living in a subdirectory named 'Twisted', it assumes
- it is being run from the source tree and adds that parent directory to
- PYTHONPATH. Therefore the canonical way to run Twisted's own unittest
- suite is './bin/trial twisted.test' rather than 'PYTHONPATH=.
- /usr/bin/trial twisted.test', especially handy when /usr/bin/trial has
- not yet been installed).
-
- To influence the version of python being used for the tests, or to add
- flags to the command, set the 'python' parameter. This can be a string
- (like 'python2.2') or a list (like ['python2.3', '-Wall']).
-
- Trial creates and switches into a directory named _trial_temp/ before
- running the tests, and sends the twisted log (which includes all
- exceptions) to a file named test.log . This file will be pulled up to
- the master where it can be seen as part of the status output.
-
- There are some class attributes which may be usefully overridden
- by subclasses. 'trialMode' and 'trialArgs' can influence the trial
- command line.
- """
-
- name = "trial"
- progressMetrics = ('output', 'tests', 'test.log')
- # note: the slash only works on unix buildslaves, of course, but we have
- # no way to know what the buildslave uses as a separator. TODO: figure
- # out something clever.
- logfiles = {"test.log": "_trial_temp/test.log"}
- # we use test.log to track Progress at the end of __init__()
-
- flunkOnFailure = True
- python = None
- trial = "trial"
- trialMode = ["--reporter=bwverbose"] # requires Twisted-2.1.0 or newer
- # for Twisted-2.0.0 or 1.3.0, use ["-o"] instead
- trialArgs = []
- testpath = UNSPECIFIED # required (but can be None)
- testChanges = False # TODO: needs better name
- recurse = False
- reactor = None
- randomly = False
- tests = None # required
-
- def __init__(self, reactor=UNSPECIFIED, python=None, trial=None,
- testpath=UNSPECIFIED,
- tests=None, testChanges=None,
- recurse=None, randomly=None,
- trialMode=None, trialArgs=None,
- **kwargs):
- """
- @type testpath: string
- @param testpath: use in PYTHONPATH when running the tests. If
- None, do not set PYTHONPATH. Setting this to '.' will
- cause the source files to be used in-place.
-
- @type python: string (without spaces) or list
- @param python: which python executable to use. Will form the start of
- the argv array that will launch trial. If you use this,
- you should set 'trial' to an explicit path (like
- /usr/bin/trial or ./bin/trial). Defaults to None, which
- leaves it out entirely (running 'trial args' instead of
- 'python ./bin/trial args'). Likely values are 'python',
- ['python2.2'], ['python', '-Wall'], etc.
-
- @type trial: string
- @param trial: which 'trial' executable to run.
- Defaults to 'trial', which will cause $PATH to be
- searched and probably find /usr/bin/trial . If you set
- 'python', this should be set to an explicit path (because
- 'python2.3 trial' will not work).
-
- @type trialMode: list of strings
- @param trialMode: a list of arguments to pass to trial, specifically
- to set the reporting mode. This defaults to ['-to']
- which means 'verbose colorless output' to the trial
- that comes with Twisted-2.0.x and at least -2.1.0 .
- Newer versions of Twisted may come with a trial
- that prefers ['--reporter=bwverbose'].
-
- @type trialArgs: list of strings
- @param trialArgs: a list of arguments to pass to trial, available to
- turn on any extra flags you like. Defaults to [].
-
- @type tests: list of strings
- @param tests: a list of test modules to run, like
- ['twisted.test.test_defer', 'twisted.test.test_process'].
- If this is a string, it will be converted into a one-item
- list.
-
- @type testChanges: boolean
- @param testChanges: if True, ignore the 'tests' parameter and instead
- ask the Build for all the files that make up the
- Changes going into this build. Pass these filenames
- to trial and ask it to look for test-case-name
- tags, running just the tests necessary to cover the
- changes.
-
- @type recurse: boolean
- @param recurse: If True, pass the --recurse option to trial, allowing
- test cases to be found in deeper subdirectories of the
- modules listed in 'tests'. This does not appear to be
- necessary when using testChanges.
-
- @type reactor: string
- @param reactor: which reactor to use, like 'gtk' or 'java'. If not
- provided, the Twisted's usual platform-dependent
- default is used.
-
- @type randomly: boolean
- @param randomly: if True, add the --random=0 argument, which instructs
- trial to run the unit tests in a random order each
- time. This occasionally catches problems that might be
- masked when one module always runs before another
- (like failing to make registerAdapter calls before
- lookups are done).
-
- @type kwargs: dict
- @param kwargs: parameters. The following parameters are inherited from
- L{ShellCommand} and may be useful to set: workdir,
- haltOnFailure, flunkOnWarnings, flunkOnFailure,
- warnOnWarnings, warnOnFailure, want_stdout, want_stderr,
- timeout.
- """
- ShellCommand.__init__(self, **kwargs)
- self.addFactoryArguments(reactor=reactor,
- python=python,
- trial=trial,
- testpath=testpath,
- tests=tests,
- testChanges=testChanges,
- recurse=recurse,
- randomly=randomly,
- trialMode=trialMode,
- trialArgs=trialArgs,
- )
-
- if python:
- self.python = python
- if self.python is not None:
- if type(self.python) is str:
- self.python = [self.python]
- for s in self.python:
- if " " in s:
- # this is not strictly an error, but I suspect more
- # people will accidentally try to use python="python2.3
- # -Wall" than will use embedded spaces in a python flag
- log.msg("python= component '%s' has spaces")
- log.msg("To add -Wall, use python=['python', '-Wall']")
- why = "python= value has spaces, probably an error"
- raise ValueError(why)
-
- if trial:
- self.trial = trial
- if " " in self.trial:
- raise ValueError("trial= value has spaces")
- if trialMode is not None:
- self.trialMode = trialMode
- if trialArgs is not None:
- self.trialArgs = trialArgs
-
- if testpath is not UNSPECIFIED:
- self.testpath = testpath
- if self.testpath is UNSPECIFIED:
- raise ValueError("You must specify testpath= (it can be None)")
- assert isinstance(self.testpath, str) or self.testpath is None
-
- if reactor is not UNSPECIFIED:
- self.reactor = reactor
-
- if tests is not None:
- self.tests = tests
- if type(self.tests) is str:
- self.tests = [self.tests]
- if testChanges is not None:
- self.testChanges = testChanges
- #self.recurse = True # not sure this is necessary
-
- if not self.testChanges and self.tests is None:
- raise ValueError("Must either set testChanges= or provide tests=")
-
- if recurse is not None:
- self.recurse = recurse
- if randomly is not None:
- self.randomly = randomly
-
- # build up most of the command, then stash it until start()
- command = []
- if self.python:
- command.extend(self.python)
- command.append(self.trial)
- command.extend(self.trialMode)
- if self.recurse:
- command.append("--recurse")
- if self.reactor:
- command.append("--reactor=%s" % reactor)
- if self.randomly:
- command.append("--random=0")
- command.extend(self.trialArgs)
- self.command = command
-
- if self.reactor:
- self.description = ["testing", "(%s)" % self.reactor]
- self.descriptionDone = ["tests"]
- # commandComplete adds (reactorname) to self.text
- else:
- self.description = ["testing"]
- self.descriptionDone = ["tests"]
-
- # this counter will feed Progress along the 'test cases' metric
- self.addLogObserver('stdio', TrialTestCaseCounter())
- # this one just measures bytes of output in _trial_temp/test.log
- self.addLogObserver('test.log', OutputProgressObserver('test.log'))
-
- def setupEnvironment(self, cmd):
- ShellCommand.setupEnvironment(self, cmd)
- if self.testpath != None:
- e = cmd.args['env']
- if e is None:
- cmd.args['env'] = {'PYTHONPATH': self.testpath}
- else:
- # TODO: somehow, each build causes another copy of
- # self.testpath to get prepended
- if e.get('PYTHONPATH', "") == "":
- e['PYTHONPATH'] = self.testpath
- else:
- e['PYTHONPATH'] = self.testpath + ":" + e['PYTHONPATH']
- try:
- p = cmd.args['env']['PYTHONPATH']
- if type(p) is not str:
- log.msg("hey, not a string:", p)
- assert False
- except (KeyError, TypeError):
- # KeyError if args doesn't have ['env']
- # KeyError if args['env'] doesn't have ['PYTHONPATH']
- # TypeError if args is None
- pass
-
- def start(self):
- # now that self.build.allFiles() is nailed down, finish building the
- # command
- if self.testChanges:
- for f in self.build.allFiles():
- if f.endswith(".py"):
- self.command.append("--testmodule=%s" % f)
- else:
- self.command.extend(self.tests)
- log.msg("Trial.start: command is", self.command)
-
- # if our slave is too old to understand logfiles=, fetch them
- # manually. This is a fallback for the Twisted buildbot and some old
- # buildslaves.
- self._needToPullTestDotLog = False
- if self.slaveVersionIsOlderThan("shell", "2.1"):
- log.msg("Trial: buildslave %s is too old to accept logfiles=" %
- self.getSlaveName())
- log.msg(" falling back to 'cat _trial_temp/test.log' instead")
- self.logfiles = {}
- self._needToPullTestDotLog = True
-
- ShellCommand.start(self)
-
-
- def commandComplete(self, cmd):
- if not self._needToPullTestDotLog:
- return self._gotTestDotLog(cmd)
-
- # if the buildslave was too old, pull test.log now
- catcmd = ["cat", "_trial_temp/test.log"]
- c2 = RemoteShellCommand(command=catcmd, workdir=self.workdir)
- loog = self.addLog("test.log")
- c2.useLog(loog, True, logfileName="stdio")
- self.cmd = c2 # to allow interrupts
- d = c2.run(self, self.remote)
- d.addCallback(lambda res: self._gotTestDotLog(cmd))
- return d
-
- def rtext(self, fmt='%s'):
- if self.reactor:
- rtext = fmt % self.reactor
- return rtext.replace("reactor", "")
- return ""
-
- def _gotTestDotLog(self, cmd):
- # figure out all status, then let the various hook functions return
- # different pieces of it
-
- # 'cmd' is the original trial command, so cmd.logs['stdio'] is the
- # trial output. We don't have access to test.log from here.
- output = cmd.logs['stdio'].getText()
- counts = countFailedTests(output)
-
- total = counts['total']
- failures, errors = counts['failures'], counts['errors']
- parsed = (total != None)
- text = []
- text2 = ""
-
- if cmd.rc == 0:
- if parsed:
- results = SUCCESS
- if total:
- text += ["%d %s" % \
- (total,
- total == 1 and "test" or "tests"),
- "passed"]
- else:
- text += ["no tests", "run"]
- else:
- results = FAILURE
- text += ["testlog", "unparseable"]
- text2 = "tests"
- else:
- # something failed
- results = FAILURE
- if parsed:
- text.append("tests")
- if failures:
- text.append("%d %s" % \
- (failures,
- failures == 1 and "failure" or "failures"))
- if errors:
- text.append("%d %s" % \
- (errors,
- errors == 1 and "error" or "errors"))
- count = failures + errors
- text2 = "%d tes%s" % (count, (count == 1 and 't' or 'ts'))
- else:
- text += ["tests", "failed"]
- text2 = "tests"
-
- if counts['skips']:
- text.append("%d %s" % \
- (counts['skips'],
- counts['skips'] == 1 and "skip" or "skips"))
- if counts['expectedFailures']:
- text.append("%d %s" % \
- (counts['expectedFailures'],
- counts['expectedFailures'] == 1 and "todo"
- or "todos"))
- if 0: # TODO
- results = WARNINGS
- if not text2:
- text2 = "todo"
-
- if 0:
- # ignore unexpectedSuccesses for now, but it should really mark
- # the build WARNING
- if counts['unexpectedSuccesses']:
- text.append("%d surprises" % counts['unexpectedSuccesses'])
- results = WARNINGS
- if not text2:
- text2 = "tests"
-
- if self.reactor:
- text.append(self.rtext('(%s)'))
- if text2:
- text2 = "%s %s" % (text2, self.rtext('(%s)'))
-
- self.results = results
- self.text = text
- self.text2 = [text2]
-
- def addTestResult(self, testname, results, text, tlog):
- if self.reactor is not None:
- testname = (self.reactor,) + testname
- tr = builder.TestResult(testname, results, text, logs={'log': tlog})
- #self.step_status.build.addTestResult(tr)
- self.build.build_status.addTestResult(tr)
-
- def createSummary(self, loog):
- output = loog.getText()
- problems = ""
- sio = StringIO.StringIO(output)
- warnings = {}
- while 1:
- line = sio.readline()
- if line == "":
- break
- if line.find(" exceptions.DeprecationWarning: ") != -1:
- # no source
- warning = line # TODO: consider stripping basedir prefix here
- warnings[warning] = warnings.get(warning, 0) + 1
- elif (line.find(" DeprecationWarning: ") != -1 or
- line.find(" UserWarning: ") != -1):
- # next line is the source
- warning = line + sio.readline()
- warnings[warning] = warnings.get(warning, 0) + 1
- elif line.find("Warning: ") != -1:
- warning = line
- warnings[warning] = warnings.get(warning, 0) + 1
-
- if line.find("=" * 60) == 0 or line.find("-" * 60) == 0:
- problems += line
- problems += sio.read()
- break
-
- if problems:
- self.addCompleteLog("problems", problems)
- # now parse the problems for per-test results
- pio = StringIO.StringIO(problems)
- pio.readline() # eat the first separator line
- testname = None
- done = False
- while not done:
- while 1:
- line = pio.readline()
- if line == "":
- done = True
- break
- if line.find("=" * 60) == 0:
- break
- if line.find("-" * 60) == 0:
- # the last case has --- as a separator before the
- # summary counts are printed
- done = True
- break
- if testname is None:
- # the first line after the === is like:
-# EXPECTED FAILURE: testLackOfTB (twisted.test.test_failure.FailureTestCase)
-# SKIPPED: testRETR (twisted.test.test_ftp.TestFTPServer)
-# FAILURE: testBatchFile (twisted.conch.test.test_sftp.TestOurServerBatchFile)
- r = re.search(r'^([^:]+): (\w+) \(([\w\.]+)\)', line)
- if not r:
- # TODO: cleanup, if there are no problems,
- # we hit here
- continue
- result, name, case = r.groups()
- testname = tuple(case.split(".") + [name])
- results = {'SKIPPED': SKIPPED,
- 'EXPECTED FAILURE': SUCCESS,
- 'UNEXPECTED SUCCESS': WARNINGS,
- 'FAILURE': FAILURE,
- 'ERROR': FAILURE,
- 'SUCCESS': SUCCESS, # not reported
- }.get(result, WARNINGS)
- text = result.lower().split()
- loog = line
- # the next line is all dashes
- loog += pio.readline()
- else:
- # the rest goes into the log
- loog += line
- if testname:
- self.addTestResult(testname, results, text, loog)
- testname = None
-
- if warnings:
- lines = warnings.keys()
- lines.sort()
- self.addCompleteLog("warnings", "".join(lines))
-
- def evaluateCommand(self, cmd):
- return self.results
-
- def getText(self, cmd, results):
- return self.text
- def getText2(self, cmd, results):
- return self.text2
-
-
-class ProcessDocs(ShellCommand):
- """I build all docs. This requires some LaTeX packages to be installed.
- It will result in the full documentation book (dvi, pdf, etc).
-
- """
-
- name = "process-docs"
- warnOnWarnings = 1
- command = ["admin/process-docs"]
- description = ["processing", "docs"]
- descriptionDone = ["docs"]
- # TODO: track output and time
-
- def __init__(self, **kwargs):
- """
- @type workdir: string
- @keyword workdir: the workdir to start from: must be the base of the
- Twisted tree
- """
- ShellCommand.__init__(self, **kwargs)
-
- def createSummary(self, log):
- output = log.getText()
- # hlint warnings are of the format: 'WARNING: file:line:col: stuff
- # latex warnings start with "WARNING: LaTeX Warning: stuff", but
- # sometimes wrap around to a second line.
- lines = output.split("\n")
- warningLines = []
- wantNext = False
- for line in lines:
- wantThis = wantNext
- wantNext = False
- if line.startswith("WARNING: "):
- wantThis = True
- wantNext = True
- if wantThis:
- warningLines.append(line)
-
- if warningLines:
- self.addCompleteLog("warnings", "\n".join(warningLines) + "\n")
- self.warnings = len(warningLines)
-
- def evaluateCommand(self, cmd):
- if cmd.rc != 0:
- return FAILURE
- if self.warnings:
- return WARNINGS
- return SUCCESS
-
- def getText(self, cmd, results):
- if results == SUCCESS:
- return ["docs", "successful"]
- if results == WARNINGS:
- return ["docs",
- "%d warnin%s" % (self.warnings,
- self.warnings == 1 and 'g' or 'gs')]
- if results == FAILURE:
- return ["docs", "failed"]
-
- def getText2(self, cmd, results):
- if results == WARNINGS:
- return ["%d do%s" % (self.warnings,
- self.warnings == 1 and 'c' or 'cs')]
- return ["docs"]
-
-
-
-class BuildDebs(ShellCommand):
- """I build the .deb packages."""
-
- name = "debuild"
- flunkOnFailure = 1
- command = ["debuild", "-uc", "-us"]
- description = ["building", "debs"]
- descriptionDone = ["debs"]
-
- def __init__(self, **kwargs):
- """
- @type workdir: string
- @keyword workdir: the workdir to start from (must be the base of the
- Twisted tree)
- """
- ShellCommand.__init__(self, **kwargs)
-
- def commandComplete(self, cmd):
- errors, warnings = 0, 0
- output = cmd.logs['stdio'].getText()
- summary = ""
- sio = StringIO.StringIO(output)
- for line in sio.readlines():
- if line.find("E: ") == 0:
- summary += line
- errors += 1
- if line.find("W: ") == 0:
- summary += line
- warnings += 1
- if summary:
- self.addCompleteLog("problems", summary)
- self.errors = errors
- self.warnings = warnings
-
- def evaluateCommand(self, cmd):
- if cmd.rc != 0:
- return FAILURE
- if self.errors:
- return FAILURE
- if self.warnings:
- return WARNINGS
- return SUCCESS
-
- def getText(self, cmd, results):
- text = ["debuild"]
- if cmd.rc != 0:
- text.append("failed")
- errors, warnings = self.errors, self.warnings
- if warnings or errors:
- text.append("lintian:")
- if warnings:
- text.append("%d warnin%s" % (warnings,
- warnings == 1 and 'g' or 'gs'))
- if errors:
- text.append("%d erro%s" % (errors,
- errors == 1 and 'r' or 'rs'))
- return text
-
- def getText2(self, cmd, results):
- if cmd.rc != 0:
- return ["debuild"]
- if self.errors or self.warnings:
- return ["%d lintian" % (self.errors + self.warnings)]
- return []
-
-class RemovePYCs(ShellCommand):
- name = "remove-.pyc"
- command = 'find . -name "*.pyc" | xargs rm'
- description = ["removing", ".pyc", "files"]
- descriptionDone = ["remove", ".pycs"]
diff --git a/buildbot/buildbot/steps/shell.py b/buildbot/buildbot/steps/shell.py
deleted file mode 100644
index e979f04..0000000
--- a/buildbot/buildbot/steps/shell.py
+++ /dev/null
@@ -1,487 +0,0 @@
-# -*- test-case-name: buildbot.test.test_steps,buildbot.test.test_properties -*-
-
-import re
-from twisted.python import log
-from buildbot.process.buildstep import LoggingBuildStep, RemoteShellCommand
-from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, STDOUT, STDERR
-
-# for existing configurations that import WithProperties from here. We like
-# to move this class around just to keep our readers guessing.
-from buildbot.process.properties import WithProperties
-_hush_pyflakes = [WithProperties]
-del _hush_pyflakes
-
-class ShellCommand(LoggingBuildStep):
- """I run a single shell command on the buildslave. I return FAILURE if
- the exit code of that command is non-zero, SUCCESS otherwise. To change
- this behavior, override my .evaluateCommand method.
-
- By default, a failure of this step will mark the whole build as FAILURE.
- To override this, give me an argument of flunkOnFailure=False .
-
- I create a single Log named 'log' which contains the output of the
- command. To create additional summary Logs, override my .createSummary
- method.
-
- The shell command I run (a list of argv strings) can be provided in
- several ways:
- - a class-level .command attribute
- - a command= parameter to my constructor (overrides .command)
- - set explicitly with my .setCommand() method (overrides both)
-
- @ivar command: a list of renderable objects (typically strings or
- WithProperties instances). This will be used by start()
- to create a RemoteShellCommand instance.
-
- @ivar logfiles: a dict mapping log NAMEs to workdir-relative FILENAMEs
- of their corresponding logfiles. The contents of the file
- named FILENAME will be put into a LogFile named NAME, ina
- something approximating real-time. (note that logfiles=
- is actually handled by our parent class LoggingBuildStep)
-
- """
-
- name = "shell"
- description = None # set this to a list of short strings to override
- descriptionDone = None # alternate description when the step is complete
- command = None # set this to a command, or set in kwargs
- # logfiles={} # you can also set 'logfiles' to a dictionary, and it
- # will be merged with any logfiles= argument passed in
- # to __init__
-
- # override this on a specific ShellCommand if you want to let it fail
- # without dooming the entire build to a status of FAILURE
- flunkOnFailure = True
-
- def __init__(self, workdir=None,
- description=None, descriptionDone=None,
- command=None,
- usePTY="slave-config",
- **kwargs):
- # most of our arguments get passed through to the RemoteShellCommand
- # that we create, but first strip out the ones that we pass to
- # BuildStep (like haltOnFailure and friends), and a couple that we
- # consume ourselves.
-
- if description:
- self.description = description
- if isinstance(self.description, str):
- self.description = [self.description]
- if descriptionDone:
- self.descriptionDone = descriptionDone
- if isinstance(self.descriptionDone, str):
- self.descriptionDone = [self.descriptionDone]
- if command:
- self.setCommand(command)
-
- # pull out the ones that LoggingBuildStep wants, then upcall
- buildstep_kwargs = {}
- for k in kwargs.keys()[:]:
- if k in self.__class__.parms:
- buildstep_kwargs[k] = kwargs[k]
- del kwargs[k]
- LoggingBuildStep.__init__(self, **buildstep_kwargs)
- self.addFactoryArguments(workdir=workdir,
- description=description,
- descriptionDone=descriptionDone,
- command=command)
-
- # everything left over goes to the RemoteShellCommand
- kwargs['workdir'] = workdir # including a copy of 'workdir'
- kwargs['usePTY'] = usePTY
- self.remote_kwargs = kwargs
- # we need to stash the RemoteShellCommand's args too
- self.addFactoryArguments(**kwargs)
-
- def setDefaultWorkdir(self, workdir):
- rkw = self.remote_kwargs
- rkw['workdir'] = rkw['workdir'] or workdir
-
- def setCommand(self, command):
- self.command = command
-
- def describe(self, done=False):
- """Return a list of short strings to describe this step, for the
- status display. This uses the first few words of the shell command.
- You can replace this by setting .description in your subclass, or by
- overriding this method to describe the step better.
-
- @type done: boolean
- @param done: whether the command is complete or not, to improve the
- way the command is described. C{done=False} is used
- while the command is still running, so a single
- imperfect-tense verb is appropriate ('compiling',
- 'testing', ...) C{done=True} is used when the command
- has finished, and the default getText() method adds some
- text, so a simple noun is appropriate ('compile',
- 'tests' ...)
- """
-
- if done and self.descriptionDone is not None:
- return list(self.descriptionDone)
- if self.description is not None:
- return list(self.description)
-
- properties = self.build.getProperties()
- words = self.command
- if isinstance(words, (str, unicode)):
- words = words.split()
- # render() each word to handle WithProperties objects
- words = properties.render(words)
- if len(words) < 1:
- return ["???"]
- if len(words) == 1:
- return ["'%s'" % words[0]]
- if len(words) == 2:
- return ["'%s" % words[0], "%s'" % words[1]]
- return ["'%s" % words[0], "%s" % words[1], "...'"]
-
- def setupEnvironment(self, cmd):
- # merge in anything from Build.slaveEnvironment
- # This can be set from a Builder-level environment, or from earlier
- # BuildSteps. The latter method is deprecated and superceded by
- # BuildProperties.
- # Environment variables passed in by a BuildStep override
- # those passed in at the Builder level.
- properties = self.build.getProperties()
- slaveEnv = self.build.slaveEnvironment
- if slaveEnv:
- if cmd.args['env'] is None:
- cmd.args['env'] = {}
- fullSlaveEnv = slaveEnv.copy()
- fullSlaveEnv.update(cmd.args['env'])
- cmd.args['env'] = properties.render(fullSlaveEnv)
- # note that each RemoteShellCommand gets its own copy of the
- # dictionary, so we shouldn't be affecting anyone but ourselves.
-
- def checkForOldSlaveAndLogfiles(self):
- if not self.logfiles:
- return # doesn't matter
- if not self.slaveVersionIsOlderThan("shell", "2.1"):
- return # slave is new enough
- # this buildslave is too old and will ignore the 'logfiles'
- # argument. You'll either have to pull the logfiles manually
- # (say, by using 'cat' in a separate RemoteShellCommand) or
- # upgrade the buildslave.
- msg1 = ("Warning: buildslave %s is too old "
- "to understand logfiles=, ignoring it."
- % self.getSlaveName())
- msg2 = "You will have to pull this logfile (%s) manually."
- log.msg(msg1)
- for logname,remotefilename in self.logfiles.items():
- newlog = self.addLog(logname)
- newlog.addHeader(msg1 + "\n")
- newlog.addHeader(msg2 % remotefilename + "\n")
- newlog.finish()
- # now prevent setupLogfiles() from adding them
- self.logfiles = {}
-
- def start(self):
- # this block is specific to ShellCommands. subclasses that don't need
- # to set up an argv array, an environment, or extra logfiles= (like
- # the Source subclasses) can just skip straight to startCommand()
- properties = self.build.getProperties()
-
- warnings = []
-
- # create the actual RemoteShellCommand instance now
- kwargs = properties.render(self.remote_kwargs)
- kwargs['command'] = properties.render(self.command)
- kwargs['logfiles'] = self.logfiles
-
- # check for the usePTY flag
- if kwargs.has_key('usePTY') and kwargs['usePTY'] != 'slave-config':
- slavever = self.slaveVersion("shell", "old")
- if self.slaveVersionIsOlderThan("svn", "2.7"):
- warnings.append("NOTE: slave does not allow master to override usePTY\n")
-
- cmd = RemoteShellCommand(**kwargs)
- self.setupEnvironment(cmd)
- self.checkForOldSlaveAndLogfiles()
-
- self.startCommand(cmd, warnings)
-
-
-
-class TreeSize(ShellCommand):
- name = "treesize"
- command = ["du", "-s", "-k", "."]
- kib = None
-
- def commandComplete(self, cmd):
- out = cmd.logs['stdio'].getText()
- m = re.search(r'^(\d+)', out)
- if m:
- self.kib = int(m.group(1))
- self.setProperty("tree-size-KiB", self.kib, "treesize")
-
- def evaluateCommand(self, cmd):
- if cmd.rc != 0:
- return FAILURE
- if self.kib is None:
- return WARNINGS # not sure how 'du' could fail, but whatever
- return SUCCESS
-
- def getText(self, cmd, results):
- if self.kib is not None:
- return ["treesize", "%d KiB" % self.kib]
- return ["treesize", "unknown"]
-
-class SetProperty(ShellCommand):
- name = "setproperty"
-
- def __init__(self, **kwargs):
- self.property = None
- self.extract_fn = None
- self.strip = True
-
- if kwargs.has_key('property'):
- self.property = kwargs['property']
- del kwargs['property']
- if kwargs.has_key('extract_fn'):
- self.extract_fn = kwargs['extract_fn']
- del kwargs['extract_fn']
- if kwargs.has_key('strip'):
- self.strip = kwargs['strip']
- del kwargs['strip']
-
- ShellCommand.__init__(self, **kwargs)
-
- self.addFactoryArguments(property=self.property)
- self.addFactoryArguments(extract_fn=self.extract_fn)
- self.addFactoryArguments(strip=self.strip)
-
- assert self.property or self.extract_fn, \
- "SetProperty step needs either property= or extract_fn="
-
- self.property_changes = {}
-
- def commandComplete(self, cmd):
- if self.property:
- result = cmd.logs['stdio'].getText()
- if self.strip: result = result.strip()
- propname = self.build.getProperties().render(self.property)
- self.setProperty(propname, result, "SetProperty Step")
- self.property_changes[propname] = result
- else:
- log = cmd.logs['stdio']
- new_props = self.extract_fn(cmd.rc,
- ''.join(log.getChunks([STDOUT], onlyText=True)),
- ''.join(log.getChunks([STDERR], onlyText=True)))
- for k,v in new_props.items():
- self.setProperty(k, v, "SetProperty Step")
- self.property_changes = new_props
-
- def createSummary(self, log):
- props_set = [ "%s: %r" % (k,v) for k,v in self.property_changes.items() ]
- self.addCompleteLog('property changes', "\n".join(props_set))
-
- def getText(self, cmd, results):
- if self.property_changes:
- return [ "set props:" ] + self.property_changes.keys()
- else:
- return [ "no change" ]
-
-class Configure(ShellCommand):
-
- name = "configure"
- haltOnFailure = 1
- flunkOnFailure = 1
- description = ["configuring"]
- descriptionDone = ["configure"]
- command = ["./configure"]
-
-class WarningCountingShellCommand(ShellCommand):
- warnCount = 0
- warningPattern = '.*warning[: ].*'
-
- def __init__(self, warningPattern=None, **kwargs):
- # See if we've been given a regular expression to use to match
- # warnings. If not, use a default that assumes any line with "warning"
- # present is a warning. This may lead to false positives in some cases.
- if warningPattern:
- self.warningPattern = warningPattern
-
- # And upcall to let the base class do its work
- ShellCommand.__init__(self, **kwargs)
-
- self.addFactoryArguments(warningPattern=warningPattern)
-
- def createSummary(self, log):
- self.warnCount = 0
-
- # Now compile a regular expression from whichever warning pattern we're
- # using
- if not self.warningPattern:
- return
-
- wre = self.warningPattern
- if isinstance(wre, str):
- wre = re.compile(wre)
-
- # Check if each line in the output from this command matched our
- # warnings regular expressions. If did, bump the warnings count and
- # add the line to the collection of lines with warnings
- warnings = []
- # TODO: use log.readlines(), except we need to decide about stdout vs
- # stderr
- for line in log.getText().split("\n"):
- if wre.match(line):
- warnings.append(line)
- self.warnCount += 1
-
- # If there were any warnings, make the log if lines with warnings
- # available
- if self.warnCount:
- self.addCompleteLog("warnings", "\n".join(warnings) + "\n")
-
- warnings_stat = self.step_status.getStatistic('warnings', 0)
- self.step_status.setStatistic('warnings', warnings_stat + self.warnCount)
-
- try:
- old_count = self.getProperty("warnings-count")
- except KeyError:
- old_count = 0
- self.setProperty("warnings-count", old_count + self.warnCount, "WarningCountingShellCommand")
-
-
- def evaluateCommand(self, cmd):
- if cmd.rc != 0:
- return FAILURE
- if self.warnCount:
- return WARNINGS
- return SUCCESS
-
-
-class Compile(WarningCountingShellCommand):
-
- name = "compile"
- haltOnFailure = 1
- flunkOnFailure = 1
- description = ["compiling"]
- descriptionDone = ["compile"]
- command = ["make", "all"]
-
- OFFprogressMetrics = ('output',)
- # things to track: number of files compiled, number of directories
- # traversed (assuming 'make' is being used)
-
- def createSummary(self, log):
- # TODO: grep for the characteristic GCC error lines and
- # assemble them into a pair of buffers
- WarningCountingShellCommand.createSummary(self, log)
-
-class Test(WarningCountingShellCommand):
-
- name = "test"
- warnOnFailure = 1
- description = ["testing"]
- descriptionDone = ["test"]
- command = ["make", "test"]
-
- def setTestResults(self, total=0, failed=0, passed=0, warnings=0):
- """
- Called by subclasses to set the relevant statistics; this actually
- adds to any statistics already present
- """
- total += self.step_status.getStatistic('tests-total', 0)
- self.step_status.setStatistic('tests-total', total)
- failed += self.step_status.getStatistic('tests-failed', 0)
- self.step_status.setStatistic('tests-failed', failed)
- warnings += self.step_status.getStatistic('tests-warnings', 0)
- self.step_status.setStatistic('tests-warnings', warnings)
- passed += self.step_status.getStatistic('tests-passed', 0)
- self.step_status.setStatistic('tests-passed', passed)
-
- def describe(self, done=False):
- description = WarningCountingShellCommand.describe(self, done)
- if done:
- if self.step_status.hasStatistic('tests-total'):
- total = self.step_status.getStatistic("tests-total", 0)
- failed = self.step_status.getStatistic("tests-failed", 0)
- passed = self.step_status.getStatistic("tests-passed", 0)
- warnings = self.step_status.getStatistic("tests-warnings", 0)
- if not total:
- total = failed + passed + warnings
-
- if total:
- description.append('%d tests' % total)
- if passed:
- description.append('%d passed' % passed)
- if warnings:
- description.append('%d warnings' % warnings)
- if failed:
- description.append('%d failed' % failed)
- return description
-
-class PerlModuleTest(Test):
- command=["prove", "--lib", "lib", "-r", "t"]
- total = 0
-
- def evaluateCommand(self, cmd):
- # Get stdio, stripping pesky newlines etc.
- lines = map(
- lambda line : line.replace('\r\n','').replace('\r','').replace('\n',''),
- self.getLog('stdio').readlines()
- )
-
- total = 0
- passed = 0
- failed = 0
- rc = cmd.rc
-
- # New version of Test::Harness?
- try:
- test_summary_report_index = lines.index("Test Summary Report")
-
- del lines[0:test_summary_report_index + 2]
-
- re_test_result = re.compile("^Result: (PASS|FAIL)$|Tests: \d+ Failed: (\d+)\)|Files=\d+, Tests=(\d+)")
-
- mos = map(lambda line: re_test_result.search(line), lines)
- test_result_lines = [mo.groups() for mo in mos if mo]
-
- for line in test_result_lines:
- if line[0] == 'PASS':
- rc = SUCCESS
- elif line[0] == 'FAIL':
- rc = FAILURE
- elif line[1]:
- failed += int(line[1])
- elif line[2]:
- total = int(line[2])
-
- except ValueError: # Nope, it's the old version
- re_test_result = re.compile("^(All tests successful)|(\d+)/(\d+) subtests failed|Files=\d+, Tests=(\d+),")
-
- mos = map(lambda line: re_test_result.search(line), lines)
- test_result_lines = [mo.groups() for mo in mos if mo]
-
- if test_result_lines:
- test_result_line = test_result_lines[0]
-
- success = test_result_line[0]
-
- if success:
- failed = 0
-
- test_totals_line = test_result_lines[1]
- total_str = test_totals_line[3]
-
- rc = SUCCESS
- else:
- failed_str = test_result_line[1]
- failed = int(failed_str)
-
- total_str = test_result_line[2]
-
- rc = FAILURE
-
- total = int(total_str)
-
- if total:
- passed = total - failed
-
- self.setTestResults(total=total, failed=failed, passed=passed)
-
- return rc
diff --git a/buildbot/buildbot/steps/source.py b/buildbot/buildbot/steps/source.py
deleted file mode 100644
index 4571ad5..0000000
--- a/buildbot/buildbot/steps/source.py
+++ /dev/null
@@ -1,1107 +0,0 @@
-# -*- test-case-name: buildbot.test.test_vc -*-
-
-from warnings import warn
-from email.Utils import formatdate
-from twisted.python import log
-from buildbot.process.buildstep import LoggingBuildStep, LoggedRemoteCommand
-from buildbot.interfaces import BuildSlaveTooOldError
-from buildbot.status.builder import SKIPPED
-
-
-class Source(LoggingBuildStep):
- """This is a base class to generate a source tree in the buildslave.
- Each version control system has a specialized subclass, and is expected
- to override __init__ and implement computeSourceRevision() and
- startVC(). The class as a whole builds up the self.args dictionary, then
- starts a LoggedRemoteCommand with those arguments.
- """
-
- # if the checkout fails, there's no point in doing anything else
- haltOnFailure = True
- flunkOnFailure = True
- notReally = False
-
- branch = None # the default branch, should be set in __init__
-
- def __init__(self, workdir=None, mode='update', alwaysUseLatest=False,
- timeout=20*60, retry=None, **kwargs):
- """
- @type workdir: string
- @param workdir: local directory (relative to the Builder's root)
- where the tree should be placed
-
- @type mode: string
- @param mode: the kind of VC operation that is desired:
- - 'update': specifies that the checkout/update should be
- performed directly into the workdir. Each build is performed
- in the same directory, allowing for incremental builds. This
- minimizes disk space, bandwidth, and CPU time. However, it
- may encounter problems if the build process does not handle
- dependencies properly (if you must sometimes do a 'clean
- build' to make sure everything gets compiled), or if source
- files are deleted but generated files can influence test
- behavior (e.g. python's .pyc files), or when source
- directories are deleted but generated files prevent CVS from
- removing them.
-
- - 'copy': specifies that the source-controlled workspace
- should be maintained in a separate directory (called the
- 'copydir'), using checkout or update as necessary. For each
- build, a new workdir is created with a copy of the source
- tree (rm -rf workdir; cp -R -P -p copydir workdir). This
- doubles the disk space required, but keeps the bandwidth low
- (update instead of a full checkout). A full 'clean' build
- is performed each time. This avoids any generated-file
- build problems, but is still occasionally vulnerable to
- problems such as a CVS repository being manually rearranged
- (causing CVS errors on update) which are not an issue with
- a full checkout.
-
- - 'clobber': specifies that the working directory should be
- deleted each time, necessitating a full checkout for each
- build. This insures a clean build off a complete checkout,
- avoiding any of the problems described above, but is
- bandwidth intensive, as the whole source tree must be
- pulled down for each build.
-
- - 'export': is like 'clobber', except that e.g. the 'cvs
- export' command is used to create the working directory.
- This command removes all VC metadata files (the
- CVS/.svn/{arch} directories) from the tree, which is
- sometimes useful for creating source tarballs (to avoid
- including the metadata in the tar file). Not all VC systems
- support export.
-
- @type alwaysUseLatest: boolean
- @param alwaysUseLatest: whether to always update to the most
- recent available sources for this build.
-
- Normally the Source step asks its Build for a list of all
- Changes that are supposed to go into the build, then computes a
- 'source stamp' (revision number or timestamp) that will cause
- exactly that set of changes to be present in the checked out
- tree. This is turned into, e.g., 'cvs update -D timestamp', or
- 'svn update -r revnum'. If alwaysUseLatest=True, bypass this
- computation and always update to the latest available sources
- for each build.
-
- The source stamp helps avoid a race condition in which someone
- commits a change after the master has decided to start a build
- but before the slave finishes checking out the sources. At best
- this results in a build which contains more changes than the
- buildmaster thinks it has (possibly resulting in the wrong
- person taking the blame for any problems that result), at worst
- is can result in an incoherent set of sources (splitting a
- non-atomic commit) which may not build at all.
-
- @type retry: tuple of ints (delay, repeats) (or None)
- @param retry: if provided, VC update failures are re-attempted up
- to REPEATS times, with DELAY seconds between each
- attempt. Some users have slaves with poor connectivity
- to their VC repository, and they say that up to 80% of
- their build failures are due to transient network
- failures that could be handled by simply retrying a
- couple times.
-
- """
-
- LoggingBuildStep.__init__(self, **kwargs)
- self.addFactoryArguments(workdir=workdir,
- mode=mode,
- alwaysUseLatest=alwaysUseLatest,
- timeout=timeout,
- retry=retry,
- )
-
- assert mode in ("update", "copy", "clobber", "export")
- if retry:
- delay, repeats = retry
- assert isinstance(repeats, int)
- assert repeats > 0
- self.args = {'mode': mode,
- 'workdir': workdir,
- 'timeout': timeout,
- 'retry': retry,
- 'patch': None, # set during .start
- }
- self.alwaysUseLatest = alwaysUseLatest
-
- # Compute defaults for descriptions:
- description = ["updating"]
- descriptionDone = ["update"]
- if mode == "clobber":
- description = ["checkout"]
- # because checkingouting takes too much space
- descriptionDone = ["checkout"]
- elif mode == "export":
- description = ["exporting"]
- descriptionDone = ["export"]
- self.description = description
- self.descriptionDone = descriptionDone
-
- def setDefaultWorkdir(self, workdir):
- self.args['workdir'] = self.args['workdir'] or workdir
-
- def describe(self, done=False):
- if done:
- return self.descriptionDone
- return self.description
-
- def computeSourceRevision(self, changes):
- """Each subclass must implement this method to do something more
- precise than -rHEAD every time. For version control systems that use
- repository-wide change numbers (SVN, P4), this can simply take the
- maximum such number from all the changes involved in this build. For
- systems that do not (CVS), it needs to create a timestamp based upon
- the latest Change, the Build's treeStableTimer, and an optional
- self.checkoutDelay value."""
- return None
-
- def start(self):
- if self.notReally:
- log.msg("faking %s checkout/update" % self.name)
- self.step_status.setText(["fake", self.name, "successful"])
- self.addCompleteLog("log",
- "Faked %s checkout/update 'successful'\n" \
- % self.name)
- return SKIPPED
-
- # what source stamp would this build like to use?
- s = self.build.getSourceStamp()
- # if branch is None, then use the Step's "default" branch
- branch = s.branch or self.branch
- # if revision is None, use the latest sources (-rHEAD)
- revision = s.revision
- if not revision and not self.alwaysUseLatest:
- revision = self.computeSourceRevision(s.changes)
- # if patch is None, then do not patch the tree after checkout
-
- # 'patch' is None or a tuple of (patchlevel, diff)
- patch = s.patch
- if patch:
- self.addCompleteLog("patch", patch[1])
-
- self.startVC(branch, revision, patch)
-
- def commandComplete(self, cmd):
- if cmd.updates.has_key("got_revision"):
- got_revision = cmd.updates["got_revision"][-1]
- if got_revision is not None:
- self.setProperty("got_revision", str(got_revision), "Source")
-
-
-
-class CVS(Source):
- """I do CVS checkout/update operations.
-
- Note: if you are doing anonymous/pserver CVS operations, you will need
- to manually do a 'cvs login' on each buildslave before the slave has any
- hope of success. XXX: fix then, take a cvs password as an argument and
- figure out how to do a 'cvs login' on each build
- """
-
- name = "cvs"
-
- #progressMetrics = ('output',)
- #
- # additional things to track: update gives one stderr line per directory
- # (starting with 'cvs server: Updating ') (and is fairly stable if files
- # is empty), export gives one line per directory (starting with 'cvs
- # export: Updating ') and another line per file (starting with U). Would
- # be nice to track these, requires grepping LogFile data for lines,
- # parsing each line. Might be handy to have a hook in LogFile that gets
- # called with each complete line.
-
- def __init__(self, cvsroot, cvsmodule,
- global_options=[], branch=None, checkoutDelay=None,
- login=None,
- **kwargs):
-
- """
- @type cvsroot: string
- @param cvsroot: CVS Repository from which the source tree should
- be obtained. '/home/warner/Repository' for local
- or NFS-reachable repositories,
- ':pserver:anon@foo.com:/cvs' for anonymous CVS,
- 'user@host.com:/cvs' for non-anonymous CVS or
- CVS over ssh. Lots of possibilities, check the
- CVS documentation for more.
-
- @type cvsmodule: string
- @param cvsmodule: subdirectory of CVS repository that should be
- retrieved
-
- @type login: string or None
- @param login: if not None, a string which will be provided as a
- password to the 'cvs login' command, used when a
- :pserver: method is used to access the repository.
- This login is only needed once, but must be run
- each time (just before the CVS operation) because
- there is no way for the buildslave to tell whether
- it was previously performed or not.
-
- @type branch: string
- @param branch: the default branch name, will be used in a '-r'
- argument to specify which branch of the source tree
- should be used for this checkout. Defaults to None,
- which means to use 'HEAD'.
-
- @type checkoutDelay: int or None
- @param checkoutDelay: if not None, the number of seconds to put
- between the last known Change and the
- timestamp given to the -D argument. This
- defaults to exactly half of the parent
- Build's .treeStableTimer, but it could be
- set to something else if your CVS change
- notification has particularly weird
- latency characteristics.
-
- @type global_options: list of strings
- @param global_options: these arguments are inserted in the cvs
- command line, before the
- 'checkout'/'update' command word. See
- 'cvs --help-options' for a list of what
- may be accepted here. ['-r'] will make
- the checked out files read only. ['-r',
- '-R'] will also assume the repository is
- read-only (I assume this means it won't
- use locks to insure atomic access to the
- ,v files)."""
-
- self.checkoutDelay = checkoutDelay
- self.branch = branch
-
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(cvsroot=cvsroot,
- cvsmodule=cvsmodule,
- global_options=global_options,
- branch=branch,
- checkoutDelay=checkoutDelay,
- login=login,
- )
-
- self.args.update({'cvsroot': cvsroot,
- 'cvsmodule': cvsmodule,
- 'global_options': global_options,
- 'login': login,
- })
-
- def computeSourceRevision(self, changes):
- if not changes:
- return None
- lastChange = max([c.when for c in changes])
- if self.checkoutDelay is not None:
- when = lastChange + self.checkoutDelay
- else:
- lastSubmit = max([r.submittedAt for r in self.build.requests])
- when = (lastChange + lastSubmit) / 2
- return formatdate(when)
-
- def startVC(self, branch, revision, patch):
- if self.slaveVersionIsOlderThan("cvs", "1.39"):
- # the slave doesn't know to avoid re-using the same sourcedir
- # when the branch changes. We have no way of knowing which branch
- # the last build used, so if we're using a non-default branch and
- # either 'update' or 'copy' modes, it is safer to refuse to
- # build, and tell the user they need to upgrade the buildslave.
- if (branch != self.branch
- and self.args['mode'] in ("update", "copy")):
- m = ("This buildslave (%s) does not know about multiple "
- "branches, and using mode=%s would probably build the "
- "wrong tree. "
- "Refusing to build. Please upgrade the buildslave to "
- "buildbot-0.7.0 or newer." % (self.build.slavename,
- self.args['mode']))
- log.msg(m)
- raise BuildSlaveTooOldError(m)
-
- if branch is None:
- branch = "HEAD"
- self.args['branch'] = branch
- self.args['revision'] = revision
- self.args['patch'] = patch
-
- if self.args['branch'] == "HEAD" and self.args['revision']:
- # special case. 'cvs update -r HEAD -D today' gives no files
- # TODO: figure out why, see if it applies to -r BRANCH
- self.args['branch'] = None
-
- # deal with old slaves
- warnings = []
- slavever = self.slaveVersion("cvs", "old")
-
- if slavever == "old":
- # 0.5.0
- if self.args['mode'] == "export":
- self.args['export'] = 1
- elif self.args['mode'] == "clobber":
- self.args['clobber'] = 1
- elif self.args['mode'] == "copy":
- self.args['copydir'] = "source"
- self.args['tag'] = self.args['branch']
- assert not self.args['patch'] # 0.5.0 slave can't do patch
-
- cmd = LoggedRemoteCommand("cvs", self.args)
- self.startCommand(cmd, warnings)
-
-
-class SVN(Source):
- """I perform Subversion checkout/update operations."""
-
- name = 'svn'
-
- def __init__(self, svnurl=None, baseURL=None, defaultBranch=None,
- directory=None, username=None, password=None, **kwargs):
- """
- @type svnurl: string
- @param svnurl: the URL which points to the Subversion server,
- combining the access method (HTTP, ssh, local file),
- the repository host/port, the repository path, the
- sub-tree within the repository, and the branch to
- check out. Using C{svnurl} does not enable builds of
- alternate branches: use C{baseURL} to enable this.
- Use exactly one of C{svnurl} and C{baseURL}.
-
- @param baseURL: if branches are enabled, this is the base URL to
- which a branch name will be appended. It should
- probably end in a slash. Use exactly one of
- C{svnurl} and C{baseURL}.
-
- @param defaultBranch: if branches are enabled, this is the branch
- to use if the Build does not specify one
- explicitly. It will simply be appended
- to C{baseURL} and the result handed to
- the SVN command.
-
- @param username: username to pass to svn's --username
- @param password: username to pass to svn's --password
- """
-
- if not kwargs.has_key('workdir') and directory is not None:
- # deal with old configs
- warn("Please use workdir=, not directory=", DeprecationWarning)
- kwargs['workdir'] = directory
-
- self.svnurl = svnurl
- self.baseURL = baseURL
- self.branch = defaultBranch
- self.username = username
- self.password = password
-
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(svnurl=svnurl,
- baseURL=baseURL,
- defaultBranch=defaultBranch,
- directory=directory,
- username=username,
- password=password,
- )
-
- if not svnurl and not baseURL:
- raise ValueError("you must use exactly one of svnurl and baseURL")
-
-
- def computeSourceRevision(self, changes):
- if not changes or None in [c.revision for c in changes]:
- return None
- lastChange = max([int(c.revision) for c in changes])
- return lastChange
-
- def startVC(self, branch, revision, patch):
-
- # handle old slaves
- warnings = []
- slavever = self.slaveVersion("svn", "old")
- if not slavever:
- m = "slave does not have the 'svn' command"
- raise BuildSlaveTooOldError(m)
-
- if self.slaveVersionIsOlderThan("svn", "1.39"):
- # the slave doesn't know to avoid re-using the same sourcedir
- # when the branch changes. We have no way of knowing which branch
- # the last build used, so if we're using a non-default branch and
- # either 'update' or 'copy' modes, it is safer to refuse to
- # build, and tell the user they need to upgrade the buildslave.
- if (branch != self.branch
- and self.args['mode'] in ("update", "copy")):
- m = ("This buildslave (%s) does not know about multiple "
- "branches, and using mode=%s would probably build the "
- "wrong tree. "
- "Refusing to build. Please upgrade the buildslave to "
- "buildbot-0.7.0 or newer." % (self.build.slavename,
- self.args['mode']))
- raise BuildSlaveTooOldError(m)
-
- if slavever == "old":
- # 0.5.0 compatibility
- if self.args['mode'] in ("clobber", "copy"):
- # TODO: use some shell commands to make up for the
- # deficiency, by blowing away the old directory first (thus
- # forcing a full checkout)
- warnings.append("WARNING: this slave can only do SVN updates"
- ", not mode=%s\n" % self.args['mode'])
- log.msg("WARNING: this slave only does mode=update")
- if self.args['mode'] == "export":
- raise BuildSlaveTooOldError("old slave does not have "
- "mode=export")
- self.args['directory'] = self.args['workdir']
- if revision is not None:
- # 0.5.0 can only do HEAD. We have no way of knowing whether
- # the requested revision is HEAD or not, and for
- # slowly-changing trees this will probably do the right
- # thing, so let it pass with a warning
- m = ("WARNING: old slave can only update to HEAD, not "
- "revision=%s" % revision)
- log.msg(m)
- warnings.append(m + "\n")
- revision = "HEAD" # interprets this key differently
- if patch:
- raise BuildSlaveTooOldError("old slave can't do patch")
-
- if self.svnurl:
- assert not branch # we need baseURL= to use branches
- self.args['svnurl'] = self.svnurl
- else:
- self.args['svnurl'] = self.baseURL + branch
- self.args['revision'] = revision
- self.args['patch'] = patch
-
- if self.username is not None or self.password is not None:
- if self.slaveVersionIsOlderThan("svn", "2.8"):
- m = ("This buildslave (%s) does not support svn usernames "
- "and passwords. "
- "Refusing to build. Please upgrade the buildslave to "
- "buildbot-0.7.10 or newer." % (self.build.slavename,))
- raise BuildSlaveTooOldError(m)
- if self.username is not None: self.args['username'] = self.username
- if self.password is not None: self.args['password'] = self.password
-
- revstuff = []
- if branch is not None and branch != self.branch:
- revstuff.append("[branch]")
- if revision is not None:
- revstuff.append("r%s" % revision)
- if patch is not None:
- revstuff.append("[patch]")
- self.description.extend(revstuff)
- self.descriptionDone.extend(revstuff)
-
- cmd = LoggedRemoteCommand("svn", self.args)
- self.startCommand(cmd, warnings)
-
-
-class Darcs(Source):
- """Check out a source tree from a Darcs repository at 'repourl'.
-
- Darcs has no concept of file modes. This means the eXecute-bit will be
- cleared on all source files. As a result, you may need to invoke
- configuration scripts with something like:
-
- C{s(step.Configure, command=['/bin/sh', './configure'])}
- """
-
- name = "darcs"
-
- def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
- **kwargs):
- """
- @type repourl: string
- @param repourl: the URL which points at the Darcs repository. This
- is used as the default branch. Using C{repourl} does
- not enable builds of alternate branches: use
- C{baseURL} to enable this. Use either C{repourl} or
- C{baseURL}, not both.
-
- @param baseURL: if branches are enabled, this is the base URL to
- which a branch name will be appended. It should
- probably end in a slash. Use exactly one of
- C{repourl} and C{baseURL}.
-
- @param defaultBranch: if branches are enabled, this is the branch
- to use if the Build does not specify one
- explicitly. It will simply be appended to
- C{baseURL} and the result handed to the
- 'darcs pull' command.
- """
- self.repourl = repourl
- self.baseURL = baseURL
- self.branch = defaultBranch
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(repourl=repourl,
- baseURL=baseURL,
- defaultBranch=defaultBranch,
- )
- assert self.args['mode'] != "export", \
- "Darcs does not have an 'export' mode"
- if (not repourl and not baseURL) or (repourl and baseURL):
- raise ValueError("you must provide exactly one of repourl and"
- " baseURL")
-
- def startVC(self, branch, revision, patch):
- slavever = self.slaveVersion("darcs")
- if not slavever:
- m = "slave is too old, does not know about darcs"
- raise BuildSlaveTooOldError(m)
-
- if self.slaveVersionIsOlderThan("darcs", "1.39"):
- if revision:
- # TODO: revisit this once we implement computeSourceRevision
- m = "0.6.6 slaves can't handle args['revision']"
- raise BuildSlaveTooOldError(m)
-
- # the slave doesn't know to avoid re-using the same sourcedir
- # when the branch changes. We have no way of knowing which branch
- # the last build used, so if we're using a non-default branch and
- # either 'update' or 'copy' modes, it is safer to refuse to
- # build, and tell the user they need to upgrade the buildslave.
- if (branch != self.branch
- and self.args['mode'] in ("update", "copy")):
- m = ("This buildslave (%s) does not know about multiple "
- "branches, and using mode=%s would probably build the "
- "wrong tree. "
- "Refusing to build. Please upgrade the buildslave to "
- "buildbot-0.7.0 or newer." % (self.build.slavename,
- self.args['mode']))
- raise BuildSlaveTooOldError(m)
-
- if self.repourl:
- assert not branch # we need baseURL= to use branches
- self.args['repourl'] = self.repourl
- else:
- self.args['repourl'] = self.baseURL + branch
- self.args['revision'] = revision
- self.args['patch'] = patch
-
- revstuff = []
- if branch is not None and branch != self.branch:
- revstuff.append("[branch]")
- self.description.extend(revstuff)
- self.descriptionDone.extend(revstuff)
-
- cmd = LoggedRemoteCommand("darcs", self.args)
- self.startCommand(cmd)
-
-
-class Git(Source):
- """Check out a source tree from a git repository 'repourl'."""
-
- name = "git"
-
- def __init__(self, repourl, branch="master", **kwargs):
- """
- @type repourl: string
- @param repourl: the URL which points at the git repository
-
- @type branch: string
- @param branch: The branch or tag to check out by default. If
- a build specifies a different branch, it will
- be used instead of this.
- """
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(repourl=repourl, branch=branch)
- self.args.update({'repourl': repourl,
- 'branch': branch})
-
- def computeSourceRevision(self, changes):
- if not changes:
- return None
- return changes[-1].revision
-
- def startVC(self, branch, revision, patch):
- if branch is not None:
- self.args['branch'] = branch
-
- self.args['revision'] = revision
- self.args['patch'] = patch
- slavever = self.slaveVersion("git")
- if not slavever:
- raise BuildSlaveTooOldError("slave is too old, does not know "
- "about git")
- cmd = LoggedRemoteCommand("git", self.args)
- self.startCommand(cmd)
-
-
-class Arch(Source):
- """Check out a source tree from an Arch repository named 'archive'
- available at 'url'. 'version' specifies which version number (development
- line) will be used for the checkout: this is mostly equivalent to a
- branch name. This version uses the 'tla' tool to do the checkout, to use
- 'baz' see L{Bazaar} instead.
- """
-
- name = "arch"
- # TODO: slaves >0.6.6 will accept args['build-config'], so use it
-
- def __init__(self, url, version, archive=None, **kwargs):
- """
- @type url: string
- @param url: the Arch coordinates of the repository. This is
- typically an http:// URL, but could also be the absolute
- pathname of a local directory instead.
-
- @type version: string
- @param version: the category--branch--version to check out. This is
- the default branch. If a build specifies a different
- branch, it will be used instead of this.
-
- @type archive: string
- @param archive: The archive name. If provided, it must match the one
- that comes from the repository. If not, the
- repository's default will be used.
- """
- self.branch = version
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(url=url,
- version=version,
- archive=archive,
- )
- self.args.update({'url': url,
- 'archive': archive,
- })
-
- def computeSourceRevision(self, changes):
- # in Arch, fully-qualified revision numbers look like:
- # arch@buildbot.sourceforge.net--2004/buildbot--dev--0--patch-104
- # For any given builder, all of this is fixed except the patch-104.
- # The Change might have any part of the fully-qualified string, so we
- # just look for the last part. We return the "patch-NN" string.
- if not changes:
- return None
- lastChange = None
- for c in changes:
- if not c.revision:
- continue
- if c.revision.endswith("--base-0"):
- rev = 0
- else:
- i = c.revision.rindex("patch")
- rev = int(c.revision[i+len("patch-"):])
- lastChange = max(lastChange, rev)
- if lastChange is None:
- return None
- if lastChange == 0:
- return "base-0"
- return "patch-%d" % lastChange
-
- def checkSlaveVersion(self, cmd, branch):
- warnings = []
- slavever = self.slaveVersion(cmd)
- if not slavever:
- m = "slave is too old, does not know about %s" % cmd
- raise BuildSlaveTooOldError(m)
-
- # slave 1.28 and later understand 'revision'
- if self.slaveVersionIsOlderThan(cmd, "1.28"):
- if not self.alwaysUseLatest:
- # we don't know whether our requested revision is the latest
- # or not. If the tree does not change very quickly, this will
- # probably build the right thing, so emit a warning rather
- # than refuse to build at all
- m = "WARNING, buildslave is too old to use a revision"
- log.msg(m)
- warnings.append(m + "\n")
-
- if self.slaveVersionIsOlderThan(cmd, "1.39"):
- # the slave doesn't know to avoid re-using the same sourcedir
- # when the branch changes. We have no way of knowing which branch
- # the last build used, so if we're using a non-default branch and
- # either 'update' or 'copy' modes, it is safer to refuse to
- # build, and tell the user they need to upgrade the buildslave.
- if (branch != self.branch
- and self.args['mode'] in ("update", "copy")):
- m = ("This buildslave (%s) does not know about multiple "
- "branches, and using mode=%s would probably build the "
- "wrong tree. "
- "Refusing to build. Please upgrade the buildslave to "
- "buildbot-0.7.0 or newer." % (self.build.slavename,
- self.args['mode']))
- log.msg(m)
- raise BuildSlaveTooOldError(m)
-
- return warnings
-
- def startVC(self, branch, revision, patch):
- self.args['version'] = branch
- self.args['revision'] = revision
- self.args['patch'] = patch
- warnings = self.checkSlaveVersion("arch", branch)
-
- revstuff = []
- if branch is not None and branch != self.branch:
- revstuff.append("[branch]")
- if revision is not None:
- revstuff.append("patch%s" % revision)
- self.description.extend(revstuff)
- self.descriptionDone.extend(revstuff)
-
- cmd = LoggedRemoteCommand("arch", self.args)
- self.startCommand(cmd, warnings)
-
-
-class Bazaar(Arch):
- """Bazaar is an alternative client for Arch repositories. baz is mostly
- compatible with tla, but archive registration is slightly different."""
-
- # TODO: slaves >0.6.6 will accept args['build-config'], so use it
-
- def __init__(self, url, version, archive, **kwargs):
- """
- @type url: string
- @param url: the Arch coordinates of the repository. This is
- typically an http:// URL, but could also be the absolute
- pathname of a local directory instead.
-
- @type version: string
- @param version: the category--branch--version to check out
-
- @type archive: string
- @param archive: The archive name (required). This must always match
- the one that comes from the repository, otherwise the
- buildslave will attempt to get sources from the wrong
- archive.
- """
- self.branch = version
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(url=url,
- version=version,
- archive=archive,
- )
- self.args.update({'url': url,
- 'archive': archive,
- })
-
- def startVC(self, branch, revision, patch):
- self.args['version'] = branch
- self.args['revision'] = revision
- self.args['patch'] = patch
- warnings = self.checkSlaveVersion("bazaar", branch)
-
- revstuff = []
- if branch is not None and branch != self.branch:
- revstuff.append("[branch]")
- if revision is not None:
- revstuff.append("patch%s" % revision)
- self.description.extend(revstuff)
- self.descriptionDone.extend(revstuff)
-
- cmd = LoggedRemoteCommand("bazaar", self.args)
- self.startCommand(cmd, warnings)
-
-class Bzr(Source):
- """Check out a source tree from a bzr (Bazaar) repository at 'repourl'.
-
- """
-
- name = "bzr"
-
- def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
- **kwargs):
- """
- @type repourl: string
- @param repourl: the URL which points at the bzr repository. This
- is used as the default branch. Using C{repourl} does
- not enable builds of alternate branches: use
- C{baseURL} to enable this. Use either C{repourl} or
- C{baseURL}, not both.
-
- @param baseURL: if branches are enabled, this is the base URL to
- which a branch name will be appended. It should
- probably end in a slash. Use exactly one of
- C{repourl} and C{baseURL}.
-
- @param defaultBranch: if branches are enabled, this is the branch
- to use if the Build does not specify one
- explicitly. It will simply be appended to
- C{baseURL} and the result handed to the
- 'bzr checkout pull' command.
- """
- self.repourl = repourl
- self.baseURL = baseURL
- self.branch = defaultBranch
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(repourl=repourl,
- baseURL=baseURL,
- defaultBranch=defaultBranch,
- )
- if (not repourl and not baseURL) or (repourl and baseURL):
- raise ValueError("you must provide exactly one of repourl and"
- " baseURL")
-
- def computeSourceRevision(self, changes):
- if not changes:
- return None
- lastChange = max([int(c.revision) for c in changes])
- return lastChange
-
- def startVC(self, branch, revision, patch):
- slavever = self.slaveVersion("bzr")
- if not slavever:
- m = "slave is too old, does not know about bzr"
- raise BuildSlaveTooOldError(m)
-
- if self.repourl:
- assert not branch # we need baseURL= to use branches
- self.args['repourl'] = self.repourl
- else:
- self.args['repourl'] = self.baseURL + branch
- self.args['revision'] = revision
- self.args['patch'] = patch
-
- revstuff = []
- if branch is not None and branch != self.branch:
- revstuff.append("[branch]")
- self.description.extend(revstuff)
- self.descriptionDone.extend(revstuff)
-
- cmd = LoggedRemoteCommand("bzr", self.args)
- self.startCommand(cmd)
-
-
-class Mercurial(Source):
- """Check out a source tree from a mercurial repository 'repourl'."""
-
- name = "hg"
-
- def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
- branchType='dirname', **kwargs):
- """
- @type repourl: string
- @param repourl: the URL which points at the Mercurial repository.
- This uses the 'default' branch unless defaultBranch is
- specified below and the C{branchType} is set to
- 'inrepo'. It is an error to specify a branch without
- setting the C{branchType} to 'inrepo'.
-
- @param baseURL: if 'dirname' branches are enabled, this is the base URL
- to which a branch name will be appended. It should
- probably end in a slash. Use exactly one of C{repourl}
- and C{baseURL}.
-
- @param defaultBranch: if branches are enabled, this is the branch
- to use if the Build does not specify one
- explicitly.
- For 'dirname' branches, It will simply be
- appended to C{baseURL} and the result handed to
- the 'hg update' command.
- For 'inrepo' branches, this specifies the named
- revision to which the tree will update after a
- clone.
-
- @param branchType: either 'dirname' or 'inrepo' depending on whether
- the branch name should be appended to the C{baseURL}
- or the branch is a mercurial named branch and can be
- found within the C{repourl}
- """
- self.repourl = repourl
- self.baseURL = baseURL
- self.branch = defaultBranch
- self.branchType = branchType
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(repourl=repourl,
- baseURL=baseURL,
- defaultBranch=defaultBranch,
- branchType=branchType,
- )
- if (not repourl and not baseURL) or (repourl and baseURL):
- raise ValueError("you must provide exactly one of repourl and"
- " baseURL")
-
- def startVC(self, branch, revision, patch):
- slavever = self.slaveVersion("hg")
- if not slavever:
- raise BuildSlaveTooOldError("slave is too old, does not know "
- "about hg")
-
- if self.repourl:
- # we need baseURL= to use dirname branches
- assert self.branchType == 'inrepo' or not branch
- self.args['repourl'] = self.repourl
- if branch:
- self.args['branch'] = branch
- else:
- self.args['repourl'] = self.baseURL + branch
- self.args['revision'] = revision
- self.args['patch'] = patch
-
- revstuff = []
- if branch is not None and branch != self.branch:
- revstuff.append("[branch]")
- self.description.extend(revstuff)
- self.descriptionDone.extend(revstuff)
-
- cmd = LoggedRemoteCommand("hg", self.args)
- self.startCommand(cmd)
-
- def computeSourceRevision(self, changes):
- if not changes:
- return None
- # without knowing the revision ancestry graph, we can't sort the
- # changes at all. So for now, assume they were given to us in sorted
- # order, and just pay attention to the last one. See ticket #103 for
- # more details.
- if len(changes) > 1:
- log.msg("Mercurial.computeSourceRevision: warning: "
- "there are %d changes here, assuming the last one is "
- "the most recent" % len(changes))
- return changes[-1].revision
-
-
-class P4(Source):
- """ P4 is a class for accessing perforce revision control"""
- name = "p4"
-
- def __init__(self, p4base, defaultBranch=None, p4port=None, p4user=None,
- p4passwd=None, p4extra_views=[],
- p4client='buildbot_%(slave)s_%(builder)s', **kwargs):
- """
- @type p4base: string
- @param p4base: A view into a perforce depot, typically
- "//depot/proj/"
-
- @type defaultBranch: string
- @param defaultBranch: Identify a branch to build by default. Perforce
- is a view based branching system. So, the branch
- is normally the name after the base. For example,
- branch=1.0 is view=//depot/proj/1.0/...
- branch=1.1 is view=//depot/proj/1.1/...
-
- @type p4port: string
- @param p4port: Specify the perforce server to connection in the format
- <host>:<port>. Example "perforce.example.com:1666"
-
- @type p4user: string
- @param p4user: The perforce user to run the command as.
-
- @type p4passwd: string
- @param p4passwd: The password for the perforce user.
-
- @type p4extra_views: list of tuples
- @param p4extra_views: Extra views to be added to
- the client that is being used.
-
- @type p4client: string
- @param p4client: The perforce client to use for this buildslave.
- """
-
- self.branch = defaultBranch
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(p4base=p4base,
- defaultBranch=defaultBranch,
- p4port=p4port,
- p4user=p4user,
- p4passwd=p4passwd,
- p4extra_views=p4extra_views,
- p4client=p4client,
- )
- self.args['p4port'] = p4port
- self.args['p4user'] = p4user
- self.args['p4passwd'] = p4passwd
- self.args['p4base'] = p4base
- self.args['p4extra_views'] = p4extra_views
- self.p4client = p4client
-
- def setBuild(self, build):
- Source.setBuild(self, build)
- self.args['p4client'] = self.p4client % {
- 'slave': build.slavename,
- 'builder': build.builder.name,
- }
-
- def computeSourceRevision(self, changes):
- if not changes:
- return None
- lastChange = max([int(c.revision) for c in changes])
- return lastChange
-
- def startVC(self, branch, revision, patch):
- slavever = self.slaveVersion("p4")
- assert slavever, "slave is too old, does not know about p4"
- args = dict(self.args)
- args['branch'] = branch or self.branch
- args['revision'] = revision
- args['patch'] = patch
- cmd = LoggedRemoteCommand("p4", args)
- self.startCommand(cmd)
-
-class P4Sync(Source):
- """This is a partial solution for using a P4 source repository. You are
- required to manually set up each build slave with a useful P4
- environment, which means setting various per-slave environment variables,
- and creating a P4 client specification which maps the right files into
- the slave's working directory. Once you have done that, this step merely
- performs a 'p4 sync' to update that workspace with the newest files.
-
- Each slave needs the following environment:
-
- - PATH: the 'p4' binary must be on the slave's PATH
- - P4USER: each slave needs a distinct user account
- - P4CLIENT: each slave needs a distinct client specification
-
- You should use 'p4 client' (?) to set up a client view spec which maps
- the desired files into $SLAVEBASE/$BUILDERBASE/source .
- """
-
- name = "p4sync"
-
- def __init__(self, p4port, p4user, p4passwd, p4client, **kwargs):
- assert kwargs['mode'] == "copy", "P4Sync can only be used in mode=copy"
- self.branch = None
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(p4port=p4port,
- p4user=p4user,
- p4passwd=p4passwd,
- p4client=p4client,
- )
- self.args['p4port'] = p4port
- self.args['p4user'] = p4user
- self.args['p4passwd'] = p4passwd
- self.args['p4client'] = p4client
-
- def computeSourceRevision(self, changes):
- if not changes:
- return None
- lastChange = max([int(c.revision) for c in changes])
- return lastChange
-
- def startVC(self, branch, revision, patch):
- slavever = self.slaveVersion("p4sync")
- assert slavever, "slave is too old, does not know about p4"
- cmd = LoggedRemoteCommand("p4sync", self.args)
- self.startCommand(cmd)
-
-class Monotone(Source):
- """Check out a revision from a monotone server at 'server_addr',
- branch 'branch'. 'revision' specifies which revision id to check
- out.
-
- This step will first create a local database, if necessary, and then pull
- the contents of the server into the database. Then it will do the
- checkout/update from this database."""
-
- name = "monotone"
-
- def __init__(self, server_addr, branch, db_path="monotone.db",
- monotone="monotone",
- **kwargs):
- Source.__init__(self, **kwargs)
- self.addFactoryArguments(server_addr=server_addr,
- branch=branch,
- db_path=db_path,
- monotone=monotone,
- )
- self.args.update({"server_addr": server_addr,
- "branch": branch,
- "db_path": db_path,
- "monotone": monotone})
-
- def computeSourceRevision(self, changes):
- if not changes:
- return None
- return changes[-1].revision
-
- def startVC(self):
- slavever = self.slaveVersion("monotone")
- assert slavever, "slave is too old, does not know about monotone"
- cmd = LoggedRemoteCommand("monotone", self.args)
- self.startCommand(cmd)
-
diff --git a/buildbot/buildbot/steps/transfer.py b/buildbot/buildbot/steps/transfer.py
deleted file mode 100644
index 3e23f88..0000000
--- a/buildbot/buildbot/steps/transfer.py
+++ /dev/null
@@ -1,465 +0,0 @@
-# -*- test-case-name: buildbot.test.test_transfer -*-
-
-import os.path
-from twisted.internet import reactor
-from twisted.spread import pb
-from twisted.python import log
-from buildbot.process.buildstep import RemoteCommand, BuildStep
-from buildbot.process.buildstep import SUCCESS, FAILURE
-from buildbot.interfaces import BuildSlaveTooOldError
-
-
-class _FileWriter(pb.Referenceable):
- """
- Helper class that acts as a file-object with write access
- """
-
- def __init__(self, destfile, maxsize, mode):
- # Create missing directories.
- destfile = os.path.abspath(destfile)
- dirname = os.path.dirname(destfile)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- self.destfile = destfile
- self.fp = open(destfile, "wb")
- if mode is not None:
- os.chmod(destfile, mode)
- self.remaining = maxsize
-
- def remote_write(self, data):
- """
- Called from remote slave to write L{data} to L{fp} within boundaries
- of L{maxsize}
-
- @type data: C{string}
- @param data: String of data to write
- """
- if self.remaining is not None:
- if len(data) > self.remaining:
- data = data[:self.remaining]
- self.fp.write(data)
- self.remaining = self.remaining - len(data)
- else:
- self.fp.write(data)
-
- def remote_close(self):
- """
- Called by remote slave to state that no more data will be transfered
- """
- self.fp.close()
- self.fp = None
-
- def __del__(self):
- # unclean shutdown, the file is probably truncated, so delete it
- # altogether rather than deliver a corrupted file
- fp = getattr(self, "fp", None)
- if fp:
- fp.close()
- os.unlink(self.destfile)
-
-
-class _DirectoryWriter(pb.Referenceable):
- """
- Helper class that acts as a directory-object with write access
- """
-
- def __init__(self, destroot, maxsize, mode):
- self.destroot = destroot
- # Create missing directories.
- self.destroot = os.path.abspath(self.destroot)
- if not os.path.exists(self.destroot):
- os.makedirs(self.destroot)
-
- self.fp = None
- self.mode = mode
- self.maxsize = maxsize
-
- def remote_createdir(self, dirname):
- # This function is needed to transfer empty directories.
- dirname = os.path.join(self.destroot, dirname)
- dirname = os.path.abspath(dirname)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- def remote_open(self, destfile):
- # Create missing directories.
- destfile = os.path.join(self.destroot, destfile)
- destfile = os.path.abspath(destfile)
- dirname = os.path.dirname(destfile)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- self.fp = open(destfile, "wb")
- if self.mode is not None:
- os.chmod(destfile, self.mode)
- self.remaining = self.maxsize
-
- def remote_write(self, data):
- """
- Called from remote slave to write L{data} to L{fp} within boundaries
- of L{maxsize}
-
- @type data: C{string}
- @param data: String of data to write
- """
- if self.remaining is not None:
- if len(data) > self.remaining:
- data = data[:self.remaining]
- self.fp.write(data)
- self.remaining = self.remaining - len(data)
- else:
- self.fp.write(data)
-
- def remote_close(self):
- """
- Called by remote slave to state that no more data will be transfered
- """
- if self.fp:
- self.fp.close()
- self.fp = None
-
- def __del__(self):
- # unclean shutdown, the file is probably truncated, so delete it
- # altogether rather than deliver a corrupted file
- fp = getattr(self, "fp", None)
- if fp:
- fp.close()
-
-
-class StatusRemoteCommand(RemoteCommand):
- def __init__(self, remote_command, args):
- RemoteCommand.__init__(self, remote_command, args)
-
- self.rc = None
- self.stderr = ''
-
- def remoteUpdate(self, update):
- #log.msg('StatusRemoteCommand: update=%r' % update)
- if 'rc' in update:
- self.rc = update['rc']
- if 'stderr' in update:
- self.stderr = self.stderr + update['stderr'] + '\n'
-
-class _TransferBuildStep(BuildStep):
- """
- Base class for FileUpload and FileDownload to factor out common
- functionality.
- """
- DEFAULT_WORKDIR = "build" # is this redundant?
-
- def setDefaultWorkdir(self, workdir):
- if self.workdir is None:
- self.workdir = workdir
-
- def _getWorkdir(self):
- properties = self.build.getProperties()
- if self.workdir is None:
- workdir = self.DEFAULT_WORKDIR
- else:
- workdir = self.workdir
- return properties.render(workdir)
-
-
-class FileUpload(_TransferBuildStep):
- """
- Build step to transfer a file from the slave to the master.
-
- arguments:
-
- - ['slavesrc'] filename of source file at slave, relative to workdir
- - ['masterdest'] filename of destination file at master
- - ['workdir'] string with slave working directory relative to builder
- base dir, default 'build'
- - ['maxsize'] maximum size of the file, default None (=unlimited)
- - ['blocksize'] maximum size of each block being transfered
- - ['mode'] file access mode for the resulting master-side file.
- The default (=None) is to leave it up to the umask of
- the buildmaster process.
-
- """
-
- name = 'upload'
-
- def __init__(self, slavesrc, masterdest,
- workdir=None, maxsize=None, blocksize=16*1024, mode=None,
- **buildstep_kwargs):
- BuildStep.__init__(self, **buildstep_kwargs)
- self.addFactoryArguments(slavesrc=slavesrc,
- masterdest=masterdest,
- workdir=workdir,
- maxsize=maxsize,
- blocksize=blocksize,
- mode=mode,
- )
-
- self.slavesrc = slavesrc
- self.masterdest = masterdest
- self.workdir = workdir
- self.maxsize = maxsize
- self.blocksize = blocksize
- assert isinstance(mode, (int, type(None)))
- self.mode = mode
-
- def start(self):
- version = self.slaveVersion("uploadFile")
- properties = self.build.getProperties()
-
- if not version:
- m = "slave is too old, does not know about uploadFile"
- raise BuildSlaveTooOldError(m)
-
- source = properties.render(self.slavesrc)
- masterdest = properties.render(self.masterdest)
- # we rely upon the fact that the buildmaster runs chdir'ed into its
- # basedir to make sure that relative paths in masterdest are expanded
- # properly. TODO: maybe pass the master's basedir all the way down
- # into the BuildStep so we can do this better.
- masterdest = os.path.expanduser(masterdest)
- log.msg("FileUpload started, from slave %r to master %r"
- % (source, masterdest))
-
- self.step_status.setText(['uploading', os.path.basename(source)])
-
- # we use maxsize to limit the amount of data on both sides
- fileWriter = _FileWriter(masterdest, self.maxsize, self.mode)
-
- # default arguments
- args = {
- 'slavesrc': source,
- 'workdir': self._getWorkdir(),
- 'writer': fileWriter,
- 'maxsize': self.maxsize,
- 'blocksize': self.blocksize,
- }
-
- self.cmd = StatusRemoteCommand('uploadFile', args)
- d = self.runCommand(self.cmd)
- d.addCallback(self.finished).addErrback(self.failed)
-
- def finished(self, result):
- if self.cmd.stderr != '':
- self.addCompleteLog('stderr', self.cmd.stderr)
-
- if self.cmd.rc is None or self.cmd.rc == 0:
- return BuildStep.finished(self, SUCCESS)
- return BuildStep.finished(self, FAILURE)
-
-
-class DirectoryUpload(BuildStep):
- """
- Build step to transfer a directory from the slave to the master.
-
- arguments:
-
- - ['slavesrc'] name of source directory at slave, relative to workdir
- - ['masterdest'] name of destination directory at master
- - ['workdir'] string with slave working directory relative to builder
- base dir, default 'build'
- - ['maxsize'] maximum size of each file, default None (=unlimited)
- - ['blocksize'] maximum size of each block being transfered
- - ['mode'] file access mode for the resulting master-side file.
- The default (=None) is to leave it up to the umask of
- the buildmaster process.
-
- """
-
- name = 'upload'
-
- def __init__(self, slavesrc, masterdest,
- workdir="build", maxsize=None, blocksize=16*1024, mode=None,
- **buildstep_kwargs):
- BuildStep.__init__(self, **buildstep_kwargs)
- self.addFactoryArguments(slavesrc=slavesrc,
- masterdest=masterdest,
- workdir=workdir,
- maxsize=maxsize,
- blocksize=blocksize,
- mode=mode,
- )
-
- self.slavesrc = slavesrc
- self.masterdest = masterdest
- self.workdir = workdir
- self.maxsize = maxsize
- self.blocksize = blocksize
- assert isinstance(mode, (int, type(None)))
- self.mode = mode
-
- def start(self):
- version = self.slaveVersion("uploadDirectory")
- properties = self.build.getProperties()
-
- if not version:
- m = "slave is too old, does not know about uploadDirectory"
- raise BuildSlaveTooOldError(m)
-
- source = properties.render(self.slavesrc)
- masterdest = properties.render(self.masterdest)
- # we rely upon the fact that the buildmaster runs chdir'ed into its
- # basedir to make sure that relative paths in masterdest are expanded
- # properly. TODO: maybe pass the master's basedir all the way down
- # into the BuildStep so we can do this better.
- masterdest = os.path.expanduser(masterdest)
- log.msg("DirectoryUpload started, from slave %r to master %r"
- % (source, masterdest))
-
- self.step_status.setText(['uploading', os.path.basename(source)])
-
- # we use maxsize to limit the amount of data on both sides
- dirWriter = _DirectoryWriter(masterdest, self.maxsize, self.mode)
-
- # default arguments
- args = {
- 'slavesrc': source,
- 'workdir': self.workdir,
- 'writer': dirWriter,
- 'maxsize': self.maxsize,
- 'blocksize': self.blocksize,
- }
-
- self.cmd = StatusRemoteCommand('uploadDirectory', args)
- d = self.runCommand(self.cmd)
- d.addCallback(self.finished).addErrback(self.failed)
-
- def finished(self, result):
- if self.cmd.stderr != '':
- self.addCompleteLog('stderr', self.cmd.stderr)
-
- if self.cmd.rc is None or self.cmd.rc == 0:
- return BuildStep.finished(self, SUCCESS)
- return BuildStep.finished(self, FAILURE)
-
-
-
-
-class _FileReader(pb.Referenceable):
- """
- Helper class that acts as a file-object with read access
- """
-
- def __init__(self, fp):
- self.fp = fp
-
- def remote_read(self, maxlength):
- """
- Called from remote slave to read at most L{maxlength} bytes of data
-
- @type maxlength: C{integer}
- @param maxlength: Maximum number of data bytes that can be returned
-
- @return: Data read from L{fp}
- @rtype: C{string} of bytes read from file
- """
- if self.fp is None:
- return ''
-
- data = self.fp.read(maxlength)
- return data
-
- def remote_close(self):
- """
- Called by remote slave to state that no more data will be transfered
- """
- if self.fp is not None:
- self.fp.close()
- self.fp = None
-
-
-class FileDownload(_TransferBuildStep):
- """
- Download the first 'maxsize' bytes of a file, from the buildmaster to the
- buildslave. Set the mode of the file
-
- Arguments::
-
- ['mastersrc'] filename of source file at master
- ['slavedest'] filename of destination file at slave
- ['workdir'] string with slave working directory relative to builder
- base dir, default 'build'
- ['maxsize'] maximum size of the file, default None (=unlimited)
- ['blocksize'] maximum size of each block being transfered
- ['mode'] use this to set the access permissions of the resulting
- buildslave-side file. This is traditionally an octal
- integer, like 0644 to be world-readable (but not
- world-writable), or 0600 to only be readable by
- the buildslave account, or 0755 to be world-executable.
- The default (=None) is to leave it up to the umask of
- the buildslave process.
-
- """
- name = 'download'
-
- def __init__(self, mastersrc, slavedest,
- workdir=None, maxsize=None, blocksize=16*1024, mode=None,
- **buildstep_kwargs):
- BuildStep.__init__(self, **buildstep_kwargs)
- self.addFactoryArguments(mastersrc=mastersrc,
- slavedest=slavedest,
- workdir=workdir,
- maxsize=maxsize,
- blocksize=blocksize,
- mode=mode,
- )
-
- self.mastersrc = mastersrc
- self.slavedest = slavedest
- self.workdir = workdir
- self.maxsize = maxsize
- self.blocksize = blocksize
- assert isinstance(mode, (int, type(None)))
- self.mode = mode
-
- def start(self):
- properties = self.build.getProperties()
-
- version = self.slaveVersion("downloadFile")
- if not version:
- m = "slave is too old, does not know about downloadFile"
- raise BuildSlaveTooOldError(m)
-
- # we are currently in the buildmaster's basedir, so any non-absolute
- # paths will be interpreted relative to that
- source = os.path.expanduser(properties.render(self.mastersrc))
- slavedest = properties.render(self.slavedest)
- log.msg("FileDownload started, from master %r to slave %r" %
- (source, slavedest))
-
- self.step_status.setText(['downloading', "to",
- os.path.basename(slavedest)])
-
- # setup structures for reading the file
- try:
- fp = open(source, 'rb')
- except IOError:
- # if file does not exist, bail out with an error
- self.addCompleteLog('stderr',
- 'File %r not available at master' % source)
- # TODO: once BuildStep.start() gets rewritten to use
- # maybeDeferred, just re-raise the exception here.
- reactor.callLater(0, BuildStep.finished, self, FAILURE)
- return
- fileReader = _FileReader(fp)
-
- # default arguments
- args = {
- 'slavedest': slavedest,
- 'maxsize': self.maxsize,
- 'reader': fileReader,
- 'blocksize': self.blocksize,
- 'workdir': self._getWorkdir(),
- 'mode': self.mode,
- }
-
- self.cmd = StatusRemoteCommand('downloadFile', args)
- d = self.runCommand(self.cmd)
- d.addCallback(self.finished).addErrback(self.failed)
-
- def finished(self, result):
- if self.cmd.stderr != '':
- self.addCompleteLog('stderr', self.cmd.stderr)
-
- if self.cmd.rc is None or self.cmd.rc == 0:
- return BuildStep.finished(self, SUCCESS)
- return BuildStep.finished(self, FAILURE)
-
diff --git a/buildbot/buildbot/steps/trigger.py b/buildbot/buildbot/steps/trigger.py
deleted file mode 100644
index 7903e70..0000000
--- a/buildbot/buildbot/steps/trigger.py
+++ /dev/null
@@ -1,122 +0,0 @@
-from buildbot.process.buildstep import LoggingBuildStep, SUCCESS, FAILURE, EXCEPTION
-from buildbot.process.properties import Properties
-from buildbot.scheduler import Triggerable
-from twisted.internet import defer
-
-class Trigger(LoggingBuildStep):
- """I trigger a scheduler.Triggerable, to use one or more Builders as if
- they were a single buildstep (like a subroutine call).
- """
- name = "trigger"
-
- flunkOnFailure = True
-
- def __init__(self, schedulerNames=[], updateSourceStamp=True,
- waitForFinish=False, set_properties={}, **kwargs):
- """
- Trigger the given schedulers when this step is executed.
-
- @param schedulerNames: A list of scheduler names that should be
- triggered. Schedulers can be specified using
- WithProperties, if desired.
-
- @param updateSourceStamp: If True (the default), I will try to give
- the schedulers an absolute SourceStamp for
- their builds, so that a HEAD build will use
- the same revision even if more changes have
- occurred since my build's update step was
- run. If False, I will use the original
- SourceStamp unmodified.
-
- @param waitForFinish: If False (the default), this step will finish
- as soon as I've started the triggered
- schedulers. If True, I will wait until all of
- the triggered schedulers have finished their
- builds.
-
- @param set_properties: A dictionary of properties to set for any
- builds resulting from this trigger. To copy
- existing properties, use WithProperties. These
- properties will override properties set in the
- Triggered scheduler's constructor.
-
- """
- assert schedulerNames, "You must specify a scheduler to trigger"
- self.schedulerNames = schedulerNames
- self.updateSourceStamp = updateSourceStamp
- self.waitForFinish = waitForFinish
- self.set_properties = set_properties
- self.running = False
- LoggingBuildStep.__init__(self, **kwargs)
- self.addFactoryArguments(schedulerNames=schedulerNames,
- updateSourceStamp=updateSourceStamp,
- waitForFinish=waitForFinish,
- set_properties=set_properties)
-
- def interrupt(self, reason):
- # TODO: this doesn't actually do anything.
- if self.running:
- self.step_status.setText(["interrupted"])
-
- def start(self):
- properties = self.build.getProperties()
-
- # make a new properties object from a dict rendered by the old
- # properties object
- props_to_set = Properties()
- props_to_set.update(properties.render(self.set_properties), "Trigger")
-
- self.running = True
- ss = self.build.getSourceStamp()
- if self.updateSourceStamp:
- got = properties.getProperty('got_revision')
- if got:
- ss = ss.getAbsoluteSourceStamp(got)
-
- # (is there an easier way to find the BuildMaster?)
- all_schedulers = self.build.builder.botmaster.parent.allSchedulers()
- all_schedulers = dict([(sch.name, sch) for sch in all_schedulers])
- unknown_schedulers = []
- triggered_schedulers = []
-
- # TODO: don't fire any schedulers if we discover an unknown one
- dl = []
- for scheduler in self.schedulerNames:
- scheduler = properties.render(scheduler)
- if all_schedulers.has_key(scheduler):
- sch = all_schedulers[scheduler]
- if isinstance(sch, Triggerable):
- dl.append(sch.trigger(ss, set_props=props_to_set))
- triggered_schedulers.append(scheduler)
- else:
- unknown_schedulers.append(scheduler)
- else:
- unknown_schedulers.append(scheduler)
-
- if unknown_schedulers:
- self.step_status.setText(['no scheduler:'] + unknown_schedulers)
- rc = FAILURE
- else:
- rc = SUCCESS
- self.step_status.setText(['triggered'] + triggered_schedulers)
-
- if self.waitForFinish:
- d = defer.DeferredList(dl, consumeErrors=1)
- else:
- d = defer.succeed([])
-
- def cb(rclist):
- rc = SUCCESS # (this rc is not the same variable as that above)
- for was_cb, buildsetstatus in rclist:
- # TODO: make this algo more configurable
- if not was_cb:
- rc = EXCEPTION
- break
- if buildsetstatus.getResults() == FAILURE:
- rc = FAILURE
- return self.finished(rc)
-
- def eb(why):
- return self.finished(FAILURE)
-
- d.addCallbacks(cb, eb)
diff --git a/buildbot/buildbot/test/__init__.py b/buildbot/buildbot/test/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/buildbot/buildbot/test/__init__.py
+++ /dev/null
diff --git a/buildbot/buildbot/test/emit.py b/buildbot/buildbot/test/emit.py
deleted file mode 100644
index 1e23e92..0000000
--- a/buildbot/buildbot/test/emit.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-import os, sys
-
-sys.stdout.write("this is stdout\n")
-sys.stderr.write("this is stderr\n")
-if os.environ.has_key("EMIT_TEST"):
- sys.stdout.write("EMIT_TEST: %s\n" % os.environ["EMIT_TEST"])
-open("log1.out","wt").write("this is log1\n")
-
-rc = int(sys.argv[1])
-sys.exit(rc)
diff --git a/buildbot/buildbot/test/emitlogs.py b/buildbot/buildbot/test/emitlogs.py
deleted file mode 100644
index 1430235..0000000
--- a/buildbot/buildbot/test/emitlogs.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import sys, time, os.path, StringIO
-
-mode = 0
-if len(sys.argv) > 1:
- mode = int(sys.argv[1])
-
-if mode == 0:
- log2 = open("log2.out", "wt")
- log3 = open("log3.out", "wt")
-elif mode == 1:
- # delete the logfiles first, and wait a moment to exercise a failure path
- if os.path.exists("log2.out"):
- os.unlink("log2.out")
- if os.path.exists("log3.out"):
- os.unlink("log3.out")
- time.sleep(2)
- log2 = open("log2.out", "wt")
- log3 = open("log3.out", "wt")
-elif mode == 2:
- # don't create the logfiles at all
- log2 = StringIO.StringIO()
- log3 = StringIO.StringIO()
-
-def write(i):
- log2.write("this is log2 %d\n" % i)
- log2.flush()
- log3.write("this is log3 %d\n" % i)
- log3.flush()
- sys.stdout.write("this is stdout %d\n" % i)
- sys.stdout.flush()
-
-write(0)
-time.sleep(1)
-write(1)
-sys.stdin.read(1)
-write(2)
-
-log2.close()
-log3.close()
-
-sys.exit(0)
-
diff --git a/buildbot/buildbot/test/mail/freshcvs.1 b/buildbot/buildbot/test/mail/freshcvs.1
deleted file mode 100644
index cc8442e..0000000
--- a/buildbot/buildbot/test/mail/freshcvs.1
+++ /dev/null
@@ -1,68 +0,0 @@
-Return-Path: <twisted-commits-admin@twistedmatrix.com>
-Delivered-To: warner-twistedcvs@luther.lothar.com
-Received: (qmail 11151 invoked by uid 1000); 11 Jan 2003 17:10:04 -0000
-Delivered-To: warner-twistedcvs@lothar.com
-Received: (qmail 1548 invoked by uid 13574); 11 Jan 2003 17:06:39 -0000
-Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-twistedcvs@lothar.com>; 11 Jan 2003 17:06:39 -0000
-Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
- by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
- id 18XP0U-0002Mq-00; Sat, 11 Jan 2003 11:01:14 -0600
-Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
- id 18XP02-0002MN-00
- for <twisted-commits@twistedmatrix.com>; Sat, 11 Jan 2003 11:00:46 -0600
-To: twisted-commits@twistedmatrix.com
-From: moshez CVS <moshez@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-X-Mailer: CVSToys
-From: moshez CVS <moshez@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-Message-Id: <E18XP02-0002MN-00@pyramid.twistedmatrix.com>
-Subject: [Twisted-commits] Instance massenger, apparently
-Sender: twisted-commits-admin@twistedmatrix.com
-Errors-To: twisted-commits-admin@twistedmatrix.com
-X-BeenThere: twisted-commits@twistedmatrix.com
-X-Mailman-Version: 2.0.11
-Precedence: bulk
-List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
-List-Post: <mailto:twisted-commits@twistedmatrix.com>
-List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
-List-Id: <twisted-commits.twistedmatrix.com>
-List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
-List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
-Date: Sat, 11 Jan 2003 11:00:46 -0600
-Status:
-
-Modified files:
-Twisted/debian/python-twisted.menu.in 1.3 1.4
-
-Log message:
-Instance massenger, apparently
-
-
-ViewCVS links:
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/debian/python-twisted.menu.in.diff?r1=text&tr1=1.3&r2=text&tr2=1.4&cvsroot=Twisted
-
-Index: Twisted/debian/python-twisted.menu.in
-diff -u Twisted/debian/python-twisted.menu.in:1.3 Twisted/debian/python-twisted.menu.in:1.4
---- Twisted/debian/python-twisted.menu.in:1.3 Sat Dec 28 10:02:12 2002
-+++ Twisted/debian/python-twisted.menu.in Sat Jan 11 09:00:44 2003
-@@ -1,7 +1,7 @@
- ?package(python@VERSION@-twisted):\
- needs=x11\
- section="Apps/Net"\
--title="Twisted Instant Messenger (@VERSION@)"\
-+title="Twisted Instance Messenger (@VERSION@)"\
- command="/usr/bin/t-im@VERSION@"
-
- ?package(python@VERSION@-twisted):\
-
-.
-
-_______________________________________________
-Twisted-commits mailing list
-Twisted-commits@twistedmatrix.com
-http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.2 b/buildbot/buildbot/test/mail/freshcvs.2
deleted file mode 100644
index ada1311..0000000
--- a/buildbot/buildbot/test/mail/freshcvs.2
+++ /dev/null
@@ -1,101 +0,0 @@
-Return-Path: <twisted-commits-admin@twistedmatrix.com>
-Delivered-To: warner-twistedcvs@luther.lothar.com
-Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
-Delivered-To: warner-twistedcvs@lothar.com
-Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
-Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
-Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
- by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
- id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
-Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
- id 18YYq7-0005eQ-00
- for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
-To: twisted-commits@twistedmatrix.com
-From: itamarst CVS <itamarst@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-X-Mailer: CVSToys
-From: itamarst CVS <itamarst@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
-Subject: [Twisted-commits] submit formmethod now subclass of Choice
-Sender: twisted-commits-admin@twistedmatrix.com
-Errors-To: twisted-commits-admin@twistedmatrix.com
-X-BeenThere: twisted-commits@twistedmatrix.com
-X-Mailman-Version: 2.0.11
-Precedence: bulk
-List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
-List-Post: <mailto:twisted-commits@twistedmatrix.com>
-List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
-List-Id: <twisted-commits.twistedmatrix.com>
-List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
-List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
-Date: Tue, 14 Jan 2003 15:43:19 -0600
-Status:
-
-Modified files:
-Twisted/twisted/web/woven/form.py 1.20 1.21
-Twisted/twisted/python/formmethod.py 1.12 1.13
-
-Log message:
-submit formmethod now subclass of Choice
-
-
-ViewCVS links:
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/twisted/web/woven/form.py.diff?r1=text&tr1=1.20&r2=text&tr2=1.21&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/twisted/python/formmethod.py.diff?r1=text&tr1=1.12&r2=text&tr2=1.13&cvsroot=Twisted
-
-Index: Twisted/twisted/web/woven/form.py
-diff -u Twisted/twisted/web/woven/form.py:1.20 Twisted/twisted/web/woven/form.py:1.21
---- Twisted/twisted/web/woven/form.py:1.20 Tue Jan 14 12:07:29 2003
-+++ Twisted/twisted/web/woven/form.py Tue Jan 14 13:43:16 2003
-@@ -140,8 +140,8 @@
-
- def input_submit(self, request, content, arg):
- div = content.div()
-- for value in arg.buttons:
-- div.input(type="submit", name=arg.name, value=value)
-+ for tag, value, desc in arg.choices:
-+ div.input(type="submit", name=arg.name, value=tag)
- div.text(" ")
- if arg.reset:
- div.input(type="reset")
-
-Index: Twisted/twisted/python/formmethod.py
-diff -u Twisted/twisted/python/formmethod.py:1.12 Twisted/twisted/python/formmethod.py:1.13
---- Twisted/twisted/python/formmethod.py:1.12 Tue Jan 14 12:07:30 2003
-+++ Twisted/twisted/python/formmethod.py Tue Jan 14 13:43:17 2003
-@@ -180,19 +180,13 @@
- return 1
-
-
--class Submit(Argument):
-+class Submit(Choice):
- """Submit button or a reasonable facsimile thereof."""
-
-- def __init__(self, name, buttons=["Submit"], reset=0, shortDesc=None, longDesc=None):
-- Argument.__init__(self, name, shortDesc=shortDesc, longDesc=longDesc)
-- self.buttons = buttons
-+ def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
-+ reset=0, shortDesc=None, longDesc=None):
-+ Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc)
- self.reset = reset
--
-- def coerce(self, val):
-- if val in self.buttons:
-- return val
-- else:
-- raise InputError, "no such action"
-
-
- class PresentationHint:
-
-.
-
-_______________________________________________
-Twisted-commits mailing list
-Twisted-commits@twistedmatrix.com
-http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.3 b/buildbot/buildbot/test/mail/freshcvs.3
deleted file mode 100644
index f9ff199..0000000
--- a/buildbot/buildbot/test/mail/freshcvs.3
+++ /dev/null
@@ -1,97 +0,0 @@
-Return-Path: <twisted-commits-admin@twistedmatrix.com>
-Delivered-To: warner-twistedcvs@luther.lothar.com
-Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
-Delivered-To: warner-twistedcvs@lothar.com
-Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
-Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
-Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
- by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
- id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
-Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
- id 18YYq7-0005eQ-00
- for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
-To: twisted-commits@twistedmatrix.com
-From: itamarst CVS <itamarst@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-X-Mailer: CVSToys
-From: itamarst CVS <itamarst@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
-Subject: [Twisted-commits] submit formmethod now subclass of Choice
-Sender: twisted-commits-admin@twistedmatrix.com
-Errors-To: twisted-commits-admin@twistedmatrix.com
-X-BeenThere: twisted-commits@twistedmatrix.com
-X-Mailman-Version: 2.0.11
-Precedence: bulk
-List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
-List-Post: <mailto:twisted-commits@twistedmatrix.com>
-List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
-List-Id: <twisted-commits.twistedmatrix.com>
-List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
-List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
-Date: Tue, 14 Jan 2003 15:43:19 -0600
-Status:
-
-Modified files:
-Twisted/twisted/web/woven/form.py 1.20 1.21
-Twisted/twisted/python/formmethod.py 1.12 1.13
-
-Log message:
-submit formmethod now subclass of Choice
-
-
-Index: Twisted/twisted/web/woven/form.py
-diff -u Twisted/twisted/web/woven/form.py:1.20 Twisted/twisted/web/woven/form.py:1.21
---- Twisted/twisted/web/woven/form.py:1.20 Tue Jan 14 12:07:29 2003
-+++ Twisted/twisted/web/woven/form.py Tue Jan 14 13:43:16 2003
-@@ -140,8 +140,8 @@
-
- def input_submit(self, request, content, arg):
- div = content.div()
-- for value in arg.buttons:
-- div.input(type="submit", name=arg.name, value=value)
-+ for tag, value, desc in arg.choices:
-+ div.input(type="submit", name=arg.name, value=tag)
- div.text(" ")
- if arg.reset:
- div.input(type="reset")
-
-Index: Twisted/twisted/python/formmethod.py
-diff -u Twisted/twisted/python/formmethod.py:1.12 Twisted/twisted/python/formmethod.py:1.13
---- Twisted/twisted/python/formmethod.py:1.12 Tue Jan 14 12:07:30 2003
-+++ Twisted/twisted/python/formmethod.py Tue Jan 14 13:43:17 2003
-@@ -180,19 +180,13 @@
- return 1
-
-
--class Submit(Argument):
-+class Submit(Choice):
- """Submit button or a reasonable facsimile thereof."""
-
-- def __init__(self, name, buttons=["Submit"], reset=0, shortDesc=None, longDesc=None):
-- Argument.__init__(self, name, shortDesc=shortDesc, longDesc=longDesc)
-- self.buttons = buttons
-+ def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
-+ reset=0, shortDesc=None, longDesc=None):
-+ Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc)
- self.reset = reset
--
-- def coerce(self, val):
-- if val in self.buttons:
-- return val
-- else:
-- raise InputError, "no such action"
-
-
- class PresentationHint:
-
-.
-
-_______________________________________________
-Twisted-commits mailing list
-Twisted-commits@twistedmatrix.com
-http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.4 b/buildbot/buildbot/test/mail/freshcvs.4
deleted file mode 100644
index 9e674dc..0000000
--- a/buildbot/buildbot/test/mail/freshcvs.4
+++ /dev/null
@@ -1,45 +0,0 @@
-Return-Path: <twisted-commits-admin@twistedmatrix.com>
-Delivered-To: warner-twistedcvs@luther.lothar.com
-Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
-Delivered-To: warner-twistedcvs@lothar.com
-Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
-Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
-Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
- by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
- id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
-Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
- id 18YYq7-0005eQ-00
- for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
-To: twisted-commits@twistedmatrix.com
-From: itamarst CVS <itamarst@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-X-Mailer: CVSToys
-From: itamarst CVS <itamarst@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
-Subject: [Twisted-commits] submit formmethod now subclass of Choice
-Sender: twisted-commits-admin@twistedmatrix.com
-Errors-To: twisted-commits-admin@twistedmatrix.com
-X-BeenThere: twisted-commits@twistedmatrix.com
-X-Mailman-Version: 2.0.11
-Precedence: bulk
-List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
-List-Post: <mailto:twisted-commits@twistedmatrix.com>
-List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
-List-Id: <twisted-commits.twistedmatrix.com>
-List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
-List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
-Date: Tue, 14 Jan 2003 15:43:19 -0600
-Status:
-
-Modified files:
-Twisted/twisted/web/woven/form.py 1.20 1.21
-Twisted/twisted/python/formmethod.py 1.12 1.13
-
-Log message:
-submit formmethod now subclass of Choice
-
diff --git a/buildbot/buildbot/test/mail/freshcvs.5 b/buildbot/buildbot/test/mail/freshcvs.5
deleted file mode 100644
index f20a958..0000000
--- a/buildbot/buildbot/test/mail/freshcvs.5
+++ /dev/null
@@ -1,54 +0,0 @@
-Return-Path: <twisted-commits-admin@twistedmatrix.com>
-Delivered-To: warner-twistedcvs@luther.lothar.com
-Received: (qmail 5865 invoked by uid 1000); 17 Jan 2003 07:00:04 -0000
-Delivered-To: warner-twistedcvs@lothar.com
-Received: (qmail 40460 invoked by uid 13574); 17 Jan 2003 06:51:55 -0000
-Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-twistedcvs@lothar.com>; 17 Jan 2003 06:51:55 -0000
-Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
- by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
- id 18ZQGk-0003WL-00; Fri, 17 Jan 2003 00:46:22 -0600
-Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
- id 18ZQFy-0003VP-00
- for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 00:45:34 -0600
-To: twisted-commits@twistedmatrix.com
-From: etrepum CVS <etrepum@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-X-Mailer: CVSToys
-From: etrepum CVS <etrepum@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-Message-Id: <E18ZQFy-0003VP-00@pyramid.twistedmatrix.com>
-Subject: [Twisted-commits] Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository
-Sender: twisted-commits-admin@twistedmatrix.com
-Errors-To: twisted-commits-admin@twistedmatrix.com
-X-BeenThere: twisted-commits@twistedmatrix.com
-X-Mailman-Version: 2.0.11
-Precedence: bulk
-List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
-List-Post: <mailto:twisted-commits@twistedmatrix.com>
-List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
-List-Id: <twisted-commits.twistedmatrix.com>
-List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
-List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
-Date: Fri, 17 Jan 2003 00:45:34 -0600
-Status:
-
-Modified files:
-Twisted/doc/examples/cocoaDemo 0 0
-
-Log message:
-Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository
-
-
-ViewCVS links:
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo.diff?r1=text&tr1=NONE&r2=text&tr2=NONE&cvsroot=Twisted
-
-.
-
-_______________________________________________
-Twisted-commits mailing list
-Twisted-commits@twistedmatrix.com
-http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.6 b/buildbot/buildbot/test/mail/freshcvs.6
deleted file mode 100644
index 20719f4..0000000
--- a/buildbot/buildbot/test/mail/freshcvs.6
+++ /dev/null
@@ -1,70 +0,0 @@
-Return-Path: <twisted-commits-admin@twistedmatrix.com>
-Delivered-To: warner-twistedcvs@luther.lothar.com
-Received: (qmail 7252 invoked by uid 1000); 17 Jan 2003 07:10:04 -0000
-Delivered-To: warner-twistedcvs@lothar.com
-Received: (qmail 43115 invoked by uid 13574); 17 Jan 2003 07:07:57 -0000
-Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-twistedcvs@lothar.com>; 17 Jan 2003 07:07:57 -0000
-Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
- by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
- id 18ZQW6-0003dA-00; Fri, 17 Jan 2003 01:02:14 -0600
-Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
- id 18ZQV7-0003cm-00
- for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 01:01:13 -0600
-To: twisted-commits@twistedmatrix.com
-From: etrepum CVS <etrepum@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-X-Mailer: CVSToys
-From: etrepum CVS <etrepum@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-Message-Id: <E18ZQV7-0003cm-00@pyramid.twistedmatrix.com>
-Subject: [Twisted-commits] Cocoa (OS X) clone of the QT demo, using polling reactor
-Sender: twisted-commits-admin@twistedmatrix.com
-Errors-To: twisted-commits-admin@twistedmatrix.com
-X-BeenThere: twisted-commits@twistedmatrix.com
-X-Mailman-Version: 2.0.11
-Precedence: bulk
-List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
-List-Post: <mailto:twisted-commits@twistedmatrix.com>
-List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
-List-Id: <twisted-commits.twistedmatrix.com>
-List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
-List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
-Date: Fri, 17 Jan 2003 01:01:13 -0600
-Status:
-
-Modified files:
-Twisted/doc/examples/cocoaDemo/MyAppDelegate.py None 1.1
-Twisted/doc/examples/cocoaDemo/__main__.py None 1.1
-Twisted/doc/examples/cocoaDemo/bin-python-main.m None 1.1
-Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings None 1.1
-Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib None 1.1
-Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib None 1.1
-Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib None 1.1
-Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj None 1.1
-
-Log message:
-Cocoa (OS X) clone of the QT demo, using polling reactor
-
-Requires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project. The reactor is iterated periodically by a repeating NSTimer.
-
-
-ViewCVS links:
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/MyAppDelegate.py.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/__main__.py.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/bin-python-main.m.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
-
-.
-
-_______________________________________________
-Twisted-commits mailing list
-Twisted-commits@twistedmatrix.com
-http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.7 b/buildbot/buildbot/test/mail/freshcvs.7
deleted file mode 100644
index 515be1d..0000000
--- a/buildbot/buildbot/test/mail/freshcvs.7
+++ /dev/null
@@ -1,68 +0,0 @@
-Return-Path: <twisted-commits-admin@twistedmatrix.com>
-Delivered-To: warner-twistedcvs@luther.lothar.com
-Received: (qmail 8665 invoked by uid 1000); 17 Jan 2003 08:00:03 -0000
-Delivered-To: warner-twistedcvs@lothar.com
-Received: (qmail 50728 invoked by uid 13574); 17 Jan 2003 07:51:14 -0000
-Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-twistedcvs@lothar.com>; 17 Jan 2003 07:51:14 -0000
-Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
- by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
- id 18ZRBm-0003pN-00; Fri, 17 Jan 2003 01:45:18 -0600
-Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
- id 18ZRBQ-0003ou-00
- for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 01:44:56 -0600
-To: twisted-commits@twistedmatrix.com
-From: etrepum CVS <etrepum@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-X-Mailer: CVSToys
-From: etrepum CVS <etrepum@twistedmatrix.com>
-Reply-To: twisted-python@twistedmatrix.com
-Message-Id: <E18ZRBQ-0003ou-00@pyramid.twistedmatrix.com>
-Subject: [Twisted-commits] Directories break debian build script, waiting for reasonable fix
-Sender: twisted-commits-admin@twistedmatrix.com
-Errors-To: twisted-commits-admin@twistedmatrix.com
-X-BeenThere: twisted-commits@twistedmatrix.com
-X-Mailman-Version: 2.0.11
-Precedence: bulk
-List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
-List-Post: <mailto:twisted-commits@twistedmatrix.com>
-List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
-List-Id: <twisted-commits.twistedmatrix.com>
-List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
-List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
-Date: Fri, 17 Jan 2003 01:44:56 -0600
-Status:
-
-Modified files:
-Twisted/doc/examples/cocoaDemo/MyAppDelegate.py 1.1 None
-Twisted/doc/examples/cocoaDemo/__main__.py 1.1 None
-Twisted/doc/examples/cocoaDemo/bin-python-main.m 1.1 None
-Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings 1.1 None
-Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib 1.1 None
-Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib 1.1 None
-Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib 1.1 None
-Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj 1.1 None
-
-Log message:
-Directories break debian build script, waiting for reasonable fix
-
-
-ViewCVS links:
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/MyAppDelegate.py.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/__main__.py.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/bin-python-main.m.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
-http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
-
-.
-
-_______________________________________________
-Twisted-commits mailing list
-Twisted-commits@twistedmatrix.com
-http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.8 b/buildbot/buildbot/test/mail/freshcvs.8
deleted file mode 100644
index 9b1e4fd..0000000
--- a/buildbot/buildbot/test/mail/freshcvs.8
+++ /dev/null
@@ -1,61 +0,0 @@
-Return-Path: <twisted-commits-admin@twistedmatrix.com>
-Delivered-To: warner-twistedcvs@luther.lothar.com
-Received: (qmail 10804 invoked by uid 1000); 19 Jan 2003 14:10:03 -0000
-Delivered-To: warner-twistedcvs@lothar.com
-Received: (qmail 6704 invoked by uid 13574); 19 Jan 2003 14:00:20 -0000
-Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-twistedcvs@lothar.com>; 19 Jan 2003 14:00:20 -0000
-Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
- by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
- id 18aFtx-0002WS-00; Sun, 19 Jan 2003 07:54:17 -0600
-Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
- id 18aFtH-0002W3-00
- for <twisted-commits@twistedmatrix.com>; Sun, 19 Jan 2003 07:53:35 -0600
-To: twisted-commits@twistedmatrix.com
-From: acapnotic CVS <acapnotic@twistedmatrix.com>
-X-Mailer: CVSToys
-Message-Id: <E18aFtH-0002W3-00@pyramid.twistedmatrix.com>
-Subject: [Twisted-commits] it doesn't work with invalid syntax
-Sender: twisted-commits-admin@twistedmatrix.com
-Errors-To: twisted-commits-admin@twistedmatrix.com
-X-BeenThere: twisted-commits@twistedmatrix.com
-X-Mailman-Version: 2.0.11
-Precedence: bulk
-List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
-List-Post: <mailto:twisted-commits@twistedmatrix.com>
-List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
-List-Id: <twisted-commits.twistedmatrix.com>
-List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
- <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
-List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
-Date: Sun, 19 Jan 2003 07:53:35 -0600
-Status:
-
-Modified files:
-CVSROOT/freshCfg 1.16 1.17
-
-Log message:
-it doesn't work with invalid syntax
-
-
-Index: CVSROOT/freshCfg
-diff -u CVSROOT/freshCfg:1.16 CVSROOT/freshCfg:1.17
---- CVSROOT/freshCfg:1.16 Sun Jan 19 05:52:34 2003
-+++ CVSROOT/freshCfg Sun Jan 19 05:53:34 2003
-@@ -27,7 +27,7 @@
- ('/cvs', '^Reality', None, MailNotification(['reality-commits'])),
- ('/cvs', '^Twistby', None, MailNotification(['acapnotic'])),
- ('/cvs', '^CVSToys', None,
-- MailNotification(['CVSToys-list']
-+ MailNotification(['CVSToys-list'],
- "http://twistedmatrix.com/users/jh.twistd/"
- "viewcvs/cgi/viewcvs.cgi/",
- replyTo="cvstoys-list@twistedmatrix.com"),)
-
-
-_______________________________________________
-Twisted-commits mailing list
-Twisted-commits@twistedmatrix.com
-http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.9 b/buildbot/buildbot/test/mail/freshcvs.9
deleted file mode 100644
index fd4f785..0000000
--- a/buildbot/buildbot/test/mail/freshcvs.9
+++ /dev/null
@@ -1,18 +0,0 @@
-From twisted-python@twistedmatrix.com Fri Dec 26 07:25:13 2003
-From: twisted-python@twistedmatrix.com (exarkun CVS)
-Date: Fri, 26 Dec 2003 00:25:13 -0700
-Subject: [Twisted-commits] Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository
-Message-ID: <E1AZmLR-0000Tl-00@wolfwood>
-
-Modified files:
-Twisted/sandbox/exarkun/persist-plugin
-
-Log message:
-Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository
-
-
-ViewCVS links:
-http://cvs.twistedmatrix.com/cvs/sandbox/exarkun/persist-plugin?cvsroot=Twisted
-
-
-
diff --git a/buildbot/buildbot/test/mail/svn-commit.1 b/buildbot/buildbot/test/mail/svn-commit.1
deleted file mode 100644
index 591dfee..0000000
--- a/buildbot/buildbot/test/mail/svn-commit.1
+++ /dev/null
@@ -1,67 +0,0 @@
-X-Original-To: jm@jmason.org
-Delivered-To: jm@dogma.boxhost.net
-Received: from localhost [127.0.0.1]
- by localhost with IMAP (fetchmail-6.2.5)
- for jm@localhost (single-drop); Wed, 12 Apr 2006 01:52:04 +0100 (IST)
-Received: from mail.apache.org (hermes.apache.org [209.237.227.199])
- by dogma.boxhost.net (Postfix) with SMTP id 34F07310051
- for <jm@jmason.org>; Wed, 12 Apr 2006 01:44:17 +0100 (IST)
-Received: (qmail 71414 invoked by uid 500); 12 Apr 2006 00:44:16 -0000
-Mailing-List: contact commits-help@spamassassin.apache.org; run by ezmlm
-Precedence: bulk
-list-help: <mailto:commits-help@spamassassin.apache.org>
-list-unsubscribe: <mailto:commits-unsubscribe@spamassassin.apache.org>
-List-Post: <mailto:commits@spamassassin.apache.org>
-Reply-To: "SpamAssassin Dev" <dev@spamassassin.apache.org>
-List-Id: <commits.spamassassin.apache.org>
-Delivered-To: mailing list commits@spamassassin.apache.org
-Received: (qmail 71403 invoked by uid 99); 12 Apr 2006 00:44:16 -0000
-Received: from asf.osuosl.org (HELO asf.osuosl.org) (140.211.166.49)
- by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 11 Apr 2006 17:44:16 -0700
-X-ASF-Spam-Status: No, hits=-9.4 required=10.0
- tests=ALL_TRUSTED,NO_REAL_NAME
-Received: from [209.237.227.194] (HELO minotaur.apache.org) (209.237.227.194)
- by apache.org (qpsmtpd/0.29) with SMTP; Tue, 11 Apr 2006 17:44:15 -0700
-Received: (qmail 51950 invoked by uid 65534); 12 Apr 2006 00:43:55 -0000
-Message-ID: <20060412004355.51949.qmail@minotaur.apache.org>
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-Subject: svn commit: r393348 - /spamassassin/trunk/sa-update.raw
-Date: Wed, 12 Apr 2006 00:43:54 -0000
-To: commits@spamassassin.apache.org
-From: felicity@apache.org
-X-Mailer: svnmailer-1.0.7
-X-Virus-Checked: Checked by ClamAV on apache.org
-Status: O
-X-UID: 62932
-X-Keywords:
-
-Author: felicity
-Date: Tue Apr 11 17:43:54 2006
-New Revision: 393348
-
-URL: http://svn.apache.org/viewcvs?rev=393348&view=rev
-Log:
-bug 4864: remove extraneous front-slash from gpghomedir path
-
-Modified:
- spamassassin/trunk/sa-update.raw
-
-Modified: spamassassin/trunk/sa-update.raw
-URL: http://svn.apache.org/viewcvs/spamassassin/trunk/sa-update.raw?rev=393348&r1=393347&r2=393348&view=diff
-==============================================================================
---- spamassassin/trunk/sa-update.raw (original)
-+++ spamassassin/trunk/sa-update.raw Tue Apr 11 17:43:54 2006
-@@ -120,7 +120,7 @@
- @{$opt{'channel'}} = ();
- my $GPG_ENABLED = 1;
-
--$opt{'gpghomedir'} = File::Spec->catfile($LOCAL_RULES_DIR, '/sa-update-keys');
-+$opt{'gpghomedir'} = File::Spec->catfile($LOCAL_RULES_DIR, 'sa-update-keys');
-
- Getopt::Long::Configure(
- qw(bundling no_getopt_compat no_auto_abbrev no_ignore_case));
-
-
-
diff --git a/buildbot/buildbot/test/mail/svn-commit.2 b/buildbot/buildbot/test/mail/svn-commit.2
deleted file mode 100644
index eeef001..0000000
--- a/buildbot/buildbot/test/mail/svn-commit.2
+++ /dev/null
@@ -1,1218 +0,0 @@
-X-Original-To: jm@jmason.org
-Delivered-To: jm@dogma.boxhost.net
-Received: from localhost [127.0.0.1]
- by localhost with IMAP (fetchmail-6.2.5)
- for jm@localhost (single-drop); Thu, 09 Mar 2006 21:44:57 +0000 (GMT)
-Received: from minotaur.apache.org (minotaur.apache.org [209.237.227.194])
- by dogma.boxhost.net (Postfix) with SMTP id 0D3463105BF
- for <jm@jmason.org>; Thu, 9 Mar 2006 19:52:50 +0000 (GMT)
-Received: (qmail 30661 invoked by uid 1833); 9 Mar 2006 19:52:44 -0000
-Delivered-To: jm@locus.apache.org
-Received: (qmail 30451 invoked from network); 9 Mar 2006 19:52:38 -0000
-Received: from hermes.apache.org (HELO mail.apache.org) (209.237.227.199)
- by minotaur.apache.org with SMTP; 9 Mar 2006 19:52:38 -0000
-Received: (qmail 97860 invoked by uid 500); 9 Mar 2006 19:52:29 -0000
-Delivered-To: apmail-jm@apache.org
-Received: (qmail 97837 invoked by uid 500); 9 Mar 2006 19:52:28 -0000
-Mailing-List: contact commits-help@spamassassin.apache.org; run by ezmlm
-Precedence: bulk
-list-help: <mailto:commits-help@spamassassin.apache.org>
-list-unsubscribe: <mailto:commits-unsubscribe@spamassassin.apache.org>
-List-Post: <mailto:commits@spamassassin.apache.org>
-Reply-To: "SpamAssassin Dev" <dev@spamassassin.apache.org>
-List-Id: <commits.spamassassin.apache.org>
-Delivered-To: mailing list commits@spamassassin.apache.org
-Received: (qmail 97826 invoked by uid 99); 9 Mar 2006 19:52:28 -0000
-Received: from asf.osuosl.org (HELO asf.osuosl.org) (140.211.166.49)
- by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 Mar 2006 11:52:28 -0800
-X-ASF-Spam-Status: No, hits=-9.4 required=10.0
- tests=ALL_TRUSTED,NO_REAL_NAME
-Received: from [209.237.227.194] (HELO minotaur.apache.org) (209.237.227.194)
- by apache.org (qpsmtpd/0.29) with SMTP; Thu, 09 Mar 2006 11:52:26 -0800
-Received: (qmail 29644 invoked by uid 65534); 9 Mar 2006 19:52:06 -0000
-Message-ID: <20060309195206.29643.qmail@minotaur.apache.org>
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-Subject: svn commit: r384590 - in /spamassassin/branches/3.1: ./
- lib/Mail/SpamAssassin/ lib/Mail/SpamAssassin/Plugin/ spamd/
-Date: Thu, 09 Mar 2006 19:52:02 -0000
-To: commits@spamassassin.apache.org
-From: sidney@apache.org
-X-Mailer: svnmailer-1.0.7
-X-Virus-Checked: Checked by ClamAV on apache.org
-Status: O
-X-UID: 60795
-X-Keywords:
-
-Author: sidney
-Date: Thu Mar 9 11:51:59 2006
-New Revision: 384590
-
-URL: http://svn.apache.org/viewcvs?rev=384590&view=rev
-Log:
-Bug 4696: consolidated fixes for timeout bugs
-
-Added:
- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm
-Modified:
- spamassassin/branches/3.1/MANIFEST
- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm
- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm
- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm
- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm
- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm
- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm
- spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm
- spamassassin/branches/3.1/spamd/spamd.raw
-
-Modified: spamassassin/branches/3.1/MANIFEST
-URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/MANIFEST?rev=384590&r1=384589&r2=384590&view=diff
-==============================================================================
---- spamassassin/branches/3.1/MANIFEST (original)
-+++ spamassassin/branches/3.1/MANIFEST Thu Mar 9 11:51:59 2006
-@@ -89,6 +89,7 @@
- lib/Mail/SpamAssassin/SQLBasedAddrList.pm
- lib/Mail/SpamAssassin/SpamdForkScaling.pm
- lib/Mail/SpamAssassin/SubProcBackChannel.pm
-+lib/Mail/SpamAssassin/Timeout.pm
- lib/Mail/SpamAssassin/Util.pm
- lib/Mail/SpamAssassin/Util/DependencyInfo.pm
- lib/Mail/SpamAssassin/Util/Progress.pm
-
-Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm
-URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm?rev=384590&r1=384589&r2=384590&view=diff
-==============================================================================
---- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm (original)
-+++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm Thu Mar 9 11:51:59 2006
-@@ -142,7 +142,7 @@
-
- if ($level eq "error") {
- # don't log alarm timeouts or broken pipes of various plugins' network checks
-- return if ($message[0] =~ /__(?:alarm|brokenpipe)__ignore__/);
-+ return if ($message[0] =~ /__ignore__/);
-
- # dos: we can safely ignore any die's that we eval'd in our own modules so
- # don't log them -- this is caller 0, the use'ing package is 1, the eval is 2
-
-Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm
-URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm?rev=384590&r1=384589&r2=384590&view=diff
-==============================================================================
---- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm (original)
-+++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm Thu Mar 9 11:51:59 2006
-@@ -44,6 +44,7 @@
-
- use Mail::SpamAssassin::Plugin;
- use Mail::SpamAssassin::Logger;
-+use Mail::SpamAssassin::Timeout;
- use IO::Socket;
- use strict;
- use warnings;
-@@ -375,15 +376,10 @@
-
- $permsgstatus->enter_helper_run_mode();
-
-- my $oldalarm = 0;
-+ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
-+ my $err = $timer->run_and_catch(sub {
-
-- eval {
-- # safe to use $SIG{ALRM} here instead of Util::trap_sigalrm_fully(),
-- # since there are no killer regexp hang dangers here
-- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
-- local $SIG{__DIE__}; # bug 4631
--
-- $oldalarm = alarm $timeout;
-+ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
-
- my $sock = IO::Socket::UNIX->new(Type => SOCK_STREAM,
- Peer => $sockpath) || dbg("dcc: failed to open socket") && die;
-@@ -419,28 +415,20 @@
- }
-
- dbg("dcc: dccifd got response: $response");
-+
-+ });
-
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
-- };
-+ $permsgstatus->leave_helper_run_mode();
-
-- my $err = $@;
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-+ if ($timer->timed_out()) {
-+ dbg("dcc: dccifd check timed out after $timeout secs.");
-+ return 0;
- }
-- $permsgstatus->leave_helper_run_mode();
-
- if ($err) {
- chomp $err;
-- $response = undef;
-- if ($err eq "__alarm__ignore__") {
-- dbg("dcc: dccifd check timed out after $timeout secs.");
-- return 0;
-- } else {
-- warn("dcc: dccifd -> check skipped: $! $err");
-- return 0;
-- }
-+ warn("dcc: dccifd -> check skipped: $! $err");
-+ return 0;
- }
-
- if (!defined $response || $response !~ /^X-DCC/) {
-@@ -494,17 +482,12 @@
-
- # use a temp file here -- open2() is unreliable, buffering-wise, under spamd
- my $tmpf = $permsgstatus->create_fulltext_tmpfile($fulltext);
-- my $oldalarm = 0;
--
- my $pid;
-- eval {
-- # safe to use $SIG{ALRM} here instead of Util::trap_sigalrm_fully(),
-- # since there are no killer regexp hang dangers here
-- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
-- local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
-- local $SIG{__DIE__}; # bug 4631
-
-- $oldalarm = alarm $timeout;
-+ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
-+ my $err = $timer->run_and_catch(sub {
-+
-+ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
-
- # note: not really tainted, this came from system configuration file
- my $path = Mail::SpamAssassin::Util::untaint_file_path($self->{main}->{conf}->{dcc_path});
-@@ -542,17 +525,7 @@
-
- dbg("dcc: got response: $response");
-
-- # note: this must be called BEFORE leave_helper_run_mode()
-- # $self->cleanup_kids($pid);
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
-- };
--
-- my $err = $@;
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
-+ });
-
- if (defined(fileno(*DCC))) { # still open
- if ($pid) {
-@@ -564,11 +537,14 @@
- }
- $permsgstatus->leave_helper_run_mode();
-
-+ if ($timer->timed_out()) {
-+ dbg("dcc: check timed out after $timeout seconds");
-+ return 0;
-+ }
-+
- if ($err) {
- chomp $err;
-- if ($err eq "__alarm__ignore__") {
-- dbg("dcc: check timed out after $timeout seconds");
-- } elsif ($err eq "__brokenpipe__ignore__") {
-+ if ($err eq "__brokenpipe__ignore__") {
- dbg("dcc: check failed: broken pipe");
- } elsif ($err eq "no response") {
- dbg("dcc: check failed: no response");
-@@ -645,47 +621,37 @@
- my ($self, $options, $tmpf) = @_;
- my $timeout = $options->{report}->{conf}->{dcc_timeout};
-
-- $options->{report}->enter_helper_run_mode();
-+ # note: not really tainted, this came from system configuration file
-+ my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{dcc_path});
-
-- my $oldalarm = 0;
-+ my $opts = $options->{report}->{conf}->{dcc_options} || '';
-
-- eval {
-- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
-- local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
-- local $SIG{__DIE__}; # bug 4631
-+ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
-
-- $oldalarm = alarm $timeout;
--
-- # note: not really tainted, this came from system configuration file
-- my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{dcc_path});
-+ $options->{report}->enter_helper_run_mode();
-+ my $err = $timer->run_and_catch(sub {
-
-- my $opts = $options->{report}->{conf}->{dcc_options} || '';
-+ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
-
- my $pid = Mail::SpamAssassin::Util::helper_app_pipe_open(*DCC,
-- $tmpf, 1, $path, "-t", "many", split(' ', $opts));
-+ $tmpf, 1, $path, "-t", "many", split(' ', $opts));
- $pid or die "$!\n";
-
- my @ignored = <DCC>;
- $options->{report}->close_pipe_fh(\*DCC);
--
- waitpid ($pid, 0);
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
-- };
-+
-+ });
-+ $options->{report}->leave_helper_run_mode();
-
-- my $err = $@;
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-+ if ($timer->timed_out()) {
-+ dbg("reporter: DCC report timed out after $timeout seconds");
-+ return 0;
- }
-
-- $options->{report}->leave_helper_run_mode();
--
- if ($err) {
- chomp $err;
-- if ($err eq "__alarm__ignore__") {
-- dbg("reporter: DCC report timed out after $timeout seconds");
-- } elsif ($err eq "__brokenpipe__ignore__") {
-+ if ($err eq "__brokenpipe__ignore__") {
- dbg("reporter: DCC report failed: broken pipe");
- } else {
- warn("reporter: DCC report failed: $err\n");
-
-Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm
-URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm?rev=384590&r1=384589&r2=384590&view=diff
-==============================================================================
---- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm (original)
-+++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm Thu Mar 9 11:51:59 2006
-@@ -34,6 +34,8 @@
-
- use Mail::SpamAssassin::Plugin;
- use Mail::SpamAssassin::Logger;
-+use Mail::SpamAssassin::Timeout;
-+
- use strict;
- use warnings;
- use bytes;
-@@ -165,30 +167,22 @@
- }
-
- my $timeout = $scan->{conf}->{domainkeys_timeout};
-- my $oldalarm = 0;
-
-- eval {
-- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
-- local $SIG{__DIE__}; # bug 4631
-- $oldalarm = alarm($timeout);
-+ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
-+ my $err = $timer->run_and_catch(sub {
-+
- $self->_dk_lookup_trapped($scan, $message, $domain);
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
-- };
--
-- my $err = $@;
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-+
-+ });
-+
-+ if ($timer->timed_out()) {
-+ dbg("dk: lookup timed out after $timeout seconds");
-+ return 0;
- }
-
- if ($err) {
- chomp $err;
-- if ($err eq "__alarm__ignore__") {
-- dbg("dk: lookup timed out after $timeout seconds");
-- } else {
-- warn("dk: lookup failed: $err\n");
-- }
-+ warn("dk: lookup failed: $err\n");
- return 0;
- }
-
-
-Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm
-URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm?rev=384590&r1=384589&r2=384590&view=diff
-==============================================================================
---- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm (original)
-+++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm Thu Mar 9 11:51:59 2006
-@@ -35,6 +35,7 @@
-
- use Mail::SpamAssassin::Plugin;
- use Mail::SpamAssassin::Logger;
-+use Mail::SpamAssassin::Timeout;
- use strict;
- use warnings;
- use bytes;
-@@ -229,27 +230,22 @@
-
- $pyzor_count = 0;
- $pyzor_whitelisted = 0;
--
-- $permsgstatus->enter_helper_run_mode();
-+ my $pid;
-
- # use a temp file here -- open2() is unreliable, buffering-wise, under spamd
- my $tmpf = $permsgstatus->create_fulltext_tmpfile($fulltext);
-- my $oldalarm = 0;
-
-- my $pid;
-- eval {
-- # safe to use $SIG{ALRM} here instead of Util::trap_sigalrm_fully(),
-- # since there are no killer regexp hang dangers here
-- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
-- local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
-- local $SIG{__DIE__}; # bug 4631
-+ # note: not really tainted, this came from system configuration file
-+ my $path = Mail::SpamAssassin::Util::untaint_file_path($self->{main}->{conf}->{pyzor_path});
-+
-+ my $opts = $self->{main}->{conf}->{pyzor_options} || '';
-
-- $oldalarm = alarm $timeout;
-+ $permsgstatus->enter_helper_run_mode();
-
-- # note: not really tainted, this came from system configuration file
-- my $path = Mail::SpamAssassin::Util::untaint_file_path($self->{main}->{conf}->{pyzor_path});
-+ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
-+ my $err = $timer->run_and_catch(sub {
-
-- my $opts = $self->{main}->{conf}->{pyzor_options} || '';
-+ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
-
- dbg("pyzor: opening pipe: " . join(' ', $path, $opts, "check", "< $tmpf"));
-
-@@ -273,21 +269,7 @@
- die("internal error\n");
- }
-
-- # note: this must be called BEFORE leave_helper_run_mode()
-- # $self->cleanup_kids($pid);
--
-- # attempt to call this inside the eval, as leaving this scope is
-- # a slow operation and timing *that* out is pointless
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
-- };
--
-- # clear the alarm before doing lots of time-consuming hard work
-- my $err = $@;
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
-+ });
-
- if (defined(fileno(*PYZOR))) { # still open
- if ($pid) {
-@@ -299,11 +281,14 @@
- }
- $permsgstatus->leave_helper_run_mode();
-
-+ if ($timer->timed_out()) {
-+ dbg("pyzor: check timed out after $timeout seconds");
-+ return 0;
-+ }
-+
- if ($err) {
- chomp $err;
-- if ($err eq "__alarm__ignore__") {
-- dbg("pyzor: check timed out after $timeout seconds");
-- } elsif ($err eq "__brokenpipe__ignore__") {
-+ if ($err eq "__brokenpipe__ignore__") {
- dbg("pyzor: check failed: broken pipe");
- } elsif ($err eq "no response") {
- dbg("pyzor: check failed: no response");
-@@ -364,23 +349,19 @@
-
- sub pyzor_report {
- my ($self, $options, $tmpf) = @_;
-+
-+ # note: not really tainted, this came from system configuration file
-+ my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{pyzor_path});
-+
-+ my $opts = $options->{report}->{conf}->{pyzor_options} || '';
- my $timeout = $self->{main}->{conf}->{pyzor_timeout};
-
- $options->{report}->enter_helper_run_mode();
-
-- my $oldalarm = 0;
-+ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
-+ my $err = $timer->run_and_catch(sub {
-
-- eval {
-- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
- local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
-- local $SIG{__DIE__}; # bug 4631
--
-- $oldalarm = alarm $timeout;
--
-- # note: not really tainted, this came from system configuration file
-- my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{pyzor_path});
--
-- my $opts = $options->{report}->{conf}->{pyzor_options} || '';
-
- dbg("pyzor: opening pipe: " . join(' ', $path, $opts, "report", "< $tmpf"));
-
-@@ -391,23 +372,19 @@
- my @ignored = <PYZOR>;
- $options->{report}->close_pipe_fh(\*PYZOR);
-
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
- waitpid ($pid, 0);
-- };
-+ });
-
-- my $err = $@;
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
- $options->{report}->leave_helper_run_mode();
-
-+ if ($timer->timed_out()) {
-+ dbg("reporter: pyzor report timed out after $timeout seconds");
-+ return 0;
-+ }
-+
- if ($err) {
- chomp $err;
-- if ($err eq '__alarm__ignore__') {
-- dbg("reporter: pyzor report timed out after $timeout seconds");
-- } elsif ($err eq '__brokenpipe__ignore__') {
-+ if ($err eq '__brokenpipe__ignore__') {
- dbg("reporter: pyzor report failed: broken pipe");
- } else {
- warn("reporter: pyzor report failed: $err\n");
-
-Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm
-URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm?rev=384590&r1=384589&r2=384590&view=diff
-==============================================================================
---- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm (original)
-+++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm Thu Mar 9 11:51:59 2006
-@@ -143,14 +143,11 @@
- }
-
- Mail::SpamAssassin::PerMsgStatus::enter_helper_run_mode($self);
-- my $oldalarm = 0;
-
-- eval {
-- local ($^W) = 0; # argh, warnings in Razor
-+ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
-+ my $err = $timer->run_and_catch(sub {
-
-- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
-- local $SIG{__DIE__}; # bug 4631
-- $oldalarm = alarm $timeout;
-+ local ($^W) = 0; # argh, warnings in Razor
-
- # everything's in the module!
- my $rc = Razor2::Client::Agent->new("razor-$type");
-@@ -184,7 +181,7 @@
- # let's reset the alarm since get_server_info() calls
- # nextserver() which calls discover() which very likely will
- # reset the alarm for us ... how polite. :(
-- alarm $timeout;
-+ $timer->reset();
-
- # no facility prefix on this die
- my $sigs = $rc->compute_sigs($objects)
-@@ -219,100 +216,96 @@
- my $error = $rc->errprefix("$debug: spamassassin") || "$debug: razor2 had unknown error during disconnect";
- die $error;
- }
-+ }
-
-- # if we got here, we're done doing remote stuff, abort the alert
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
--
-- # Razor 2.14 says that if we get here, we did ok.
-- $return = 1;
-+ # Razor 2.14 says that if we get here, we did ok.
-+ $return = 1;
-
-- # figure out if we have a log file we need to close...
-- if (ref($rc->{logref}) && exists $rc->{logref}->{fd}) {
-- # the fd can be stdout or stderr, so we need to find out if it is
-- # so we don't close them by accident. Note: we can't just
-- # undef the fd here (like the IO::Handle manpage says we can)
-- # because it won't actually close, unfortunately. :(
-- my $untie = 1;
-- foreach my $log (*STDOUT{IO}, *STDERR{IO}) {
-- if ($log == $rc->{logref}->{fd}) {
-- $untie = 0;
-- last;
-- }
-- }
-- close $rc->{logref}->{fd} if ($untie);
-- }
--
-- if ($type eq 'check') {
-- # so $objects->[0] is the first (only) message, and ->{spam} is a general yes/no
-- push(@results, { result => $objects->[0]->{spam} });
-+ # figure out if we have a log file we need to close...
-+ if (ref($rc->{logref}) && exists $rc->{logref}->{fd}) {
-+ # the fd can be stdout or stderr, so we need to find out if it is
-+ # so we don't close them by accident. Note: we can't just
-+ # undef the fd here (like the IO::Handle manpage says we can)
-+ # because it won't actually close, unfortunately. :(
-+ my $untie = 1;
-+ foreach my $log (*STDOUT{IO}, *STDERR{IO}) {
-+ if ($log == $rc->{logref}->{fd}) {
-+ $untie = 0;
-+ last;
-+ }
-+ }
-+ close $rc->{logref}->{fd} if ($untie);
-+ }
-
-- # great for debugging, but leave this off!
-- #use Data::Dumper;
-- #print Dumper($objects),"\n";
--
-- # ->{p} is for each part of the message
-- # so go through each part, taking the highest cf we find
-- # of any part that isn't contested (ct). This helps avoid false
-- # positives. equals logic_method 4.
-- #
-- # razor-agents < 2.14 have a different object format, so we now support both.
-- # $objects->[0]->{resp} vs $objects->[0]->{p}->[part #]->{resp}
-- my $part = 0;
-- my $arrayref = $objects->[0]->{p} || $objects;
-- if (defined $arrayref) {
-- foreach my $cf (@{$arrayref}) {
-- if (exists $cf->{resp}) {
-- for (my $response=0; $response<@{$cf->{resp}}; $response++) {
-- my $tmp = $cf->{resp}->[$response];
-- my $tmpcf = $tmp->{cf}; # Part confidence
-- my $tmpct = $tmp->{ct}; # Part contested?
-- my $engine = $cf->{sent}->[$response]->{e};
--
-- # These should always be set, but just in case ...
-- $tmpcf = 0 unless defined $tmpcf;
-- $tmpct = 0 unless defined $tmpct;
-- $engine = 0 unless defined $engine;
--
-- push(@results,
-- { part => $part, engine => $engine, contested => $tmpct, confidence => $tmpcf });
-- }
-- }
-- else {
-- push(@results, { part => $part, noresponse => 1 });
-- }
-- $part++;
-- }
-- }
-- else {
-- # If we have some new $objects format that isn't close to
-- # the current razor-agents 2.x version, we won't FP but we
-- # should alert in debug.
-- dbg("$debug: it looks like the internal Razor object has changed format!");
-- }
-- }
-+ if ($type eq 'check') {
-+ # so $objects->[0] is the first (only) message, and ->{spam} is a general yes/no
-+ push(@results, { result => $objects->[0]->{spam} });
-+
-+ # great for debugging, but leave this off!
-+ #use Data::Dumper;
-+ #print Dumper($objects),"\n";
-+
-+ # ->{p} is for each part of the message
-+ # so go through each part, taking the highest cf we find
-+ # of any part that isn't contested (ct). This helps avoid false
-+ # positives. equals logic_method 4.
-+ #
-+ # razor-agents < 2.14 have a different object format, so we now support both.
-+ # $objects->[0]->{resp} vs $objects->[0]->{p}->[part #]->{resp}
-+ my $part = 0;
-+ my $arrayref = $objects->[0]->{p} || $objects;
-+ if (defined $arrayref) {
-+ foreach my $cf (@{$arrayref}) {
-+ if (exists $cf->{resp}) {
-+ for (my $response=0; $response<@{$cf->{resp}}; $response++) {
-+ my $tmp = $cf->{resp}->[$response];
-+ my $tmpcf = $tmp->{cf}; # Part confidence
-+ my $tmpct = $tmp->{ct}; # Part contested?
-+ my $engine = $cf->{sent}->[$response]->{e};
-+
-+ # These should always be set, but just in case ...
-+ $tmpcf = 0 unless defined $tmpcf;
-+ $tmpct = 0 unless defined $tmpct;
-+ $engine = 0 unless defined $engine;
-+
-+ push(@results,
-+ { part => $part, engine => $engine, contested => $tmpct, confidence => $tmpcf });
-+ }
-+ }
-+ else {
-+ push(@results, { part => $part, noresponse => 1 });
-+ }
-+ $part++;
-+ }
-+ }
-+ else {
-+ # If we have some new $objects format that isn't close to
-+ # the current razor-agents 2.x version, we won't FP but we
-+ # should alert in debug.
-+ dbg("$debug: it looks like the internal Razor object has changed format!");
-+ }
- }
- }
- else {
- warn "$debug: undefined Razor2::Client::Agent\n";
- }
-
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
-- };
-+ });
-+
-+ # OK, that's enough Razor stuff. now, reset all that global
-+ # state it futzes with :(
-+ # work around serious brain damage in Razor2 (constant seed)
-+ srand;
-
-- my $err = $@;
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-+ Mail::SpamAssassin::PerMsgStatus::leave_helper_run_mode($self);
-+
-+ if ($timer->timed_out()) {
-+ dbg("$debug: razor2 $type timed out after $timeout seconds");
- }
-
- if ($err) {
- chomp $err;
-- if ($err eq "__alarm__ignore__") {
-- dbg("$debug: razor2 $type timed out after $timeout seconds");
-- } elsif ($err =~ /(?:could not connect|network is unreachable)/) {
-+ if ($err =~ /(?:could not connect|network is unreachable)/) {
- # make this a dbg(); SpamAssassin will still continue,
- # but without Razor checking. otherwise there may be
- # DSNs and errors in syslog etc., yuck
-@@ -323,11 +316,6 @@
- warn("$debug: razor2 $type failed: $! $err");
- }
- }
--
-- # work around serious brain damage in Razor2 (constant seed)
-- srand;
--
-- Mail::SpamAssassin::PerMsgStatus::leave_helper_run_mode($self);
-
- # razor also debugs to stdout. argh. fix it to stderr...
- if (would_log('dbg', $debug)) {
-
-Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm
-URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm?rev=384590&r1=384589&r2=384590&view=diff
-==============================================================================
---- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm (original)
-+++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm Thu Mar 9 11:51:59 2006
-@@ -34,6 +34,7 @@
-
- use Mail::SpamAssassin::Plugin;
- use Mail::SpamAssassin::Logger;
-+use Mail::SpamAssassin::Timeout;
- use strict;
- use warnings;
- use bytes;
-@@ -300,30 +301,17 @@
-
- my ($result, $comment);
- my $timeout = $scanner->{conf}->{spf_timeout};
-- my $oldalarm = 0;
-
-- eval {
-- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
-- local $SIG{__DIE__}; # bug 4631
-- $oldalarm = alarm($timeout);
-+ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
-+ my $err = $timer->run_and_catch(sub {
-+
- ($result, $comment) = $query->result();
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
-- };
-
-- my $err = $@;
-- if (defined $oldalarm) {
-- alarm $oldalarm; $oldalarm = undef;
-- }
-+ });
-
- if ($err) {
- chomp $err;
-- if ($err eq "__alarm__ignore__") {
-- dbg("spf: lookup timed out after $timeout seconds");
-- } else {
-- warn("spf: lookup failed: $err\n");
-- }
-+ warn("spf: lookup failed: $err\n");
- return 0;
- }
-
-
-Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm
-URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm?rev=384590&r1=384589&r2=384590&view=diff
-==============================================================================
---- spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm (original)
-+++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm Thu Mar 9 11:51:59 2006
-@@ -25,6 +25,7 @@
-
- use Mail::SpamAssassin::Util;
- use Mail::SpamAssassin::Logger;
-+use Mail::SpamAssassin::Timeout;
-
- use vars qw {
- @PFSTATE_VARS %EXPORT_TAGS @EXPORT_OK
-@@ -109,6 +110,9 @@
-
- delete $self->{kids}->{$pid};
-
-+ # note this for the select()-caller's benefit
-+ $self->{child_just_exited} = 1;
-+
- # remove the child from the backchannel list, too
- $self->{backchannel}->delete_socket_for_child($pid);
-
-@@ -188,24 +192,63 @@
- vec($rin, $self->{server_fileno}, 1) = 0;
- }
-
-- my ($rout, $eout, $nfound, $timeleft);
-+ my ($rout, $eout, $nfound, $timeleft, $selerr);
-+
-+ # use alarm to back up select()'s built-in alarm, to debug Theo's bug.
-+ # not that I can remember what Theo's bug was, but hey ;) A good
-+ # 60 seconds extra on the alarm() should make that quite rare...
-+
-+ my $timer = Mail::SpamAssassin::Timeout->new({ secs => ($tout*2) + 60 });
-
-- # use alarm to back up select()'s built-in alarm, to debug theo's bug
-- eval {
-- Mail::SpamAssassin::Util::trap_sigalrm_fully(sub { die "tcp timeout"; });
-- alarm ($tout*2) if ($tout);
-+ $timer->run(sub {
-+
-+ $self->{child_just_exited} = 0;
- ($nfound, $timeleft) = select($rout=$rin, undef, $eout=$rin, $tout);
-- };
-- alarm 0;
-+ $selerr = $!;
-
-- if ($@) {
-- warn "prefork: select timeout failed! recovering\n";
-- sleep 1; # avoid overload
-- return;
-- }
-+ });
-+
-+ # bug 4696: under load, the process can go for such a long time without
-+ # being context-switched in, that when it does return the alarm() fires
-+ # before the select() timeout does. Treat this as a select() timeout
-+ if ($timer->timed_out) {
-+ dbg("prefork: select timed out (via alarm)");
-+ $nfound = 0;
-+ $timeleft = 0;
-+ }
-+
-+ # errors; handle undef *or* -1 returned. do this before "errors on
-+ # the handle" below, since an error condition is signalled both via
-+ # a -1 return and a $eout bit.
-+ if (!defined $nfound || $nfound < 0)
-+ {
-+ if (exists &Errno::EINTR && $selerr == &Errno::EINTR)
-+ {
-+ # this happens if the process is signalled during the select(),
-+ # for example if someone sends SIGHUP to reload the configuration.
-+ # just return inmmediately
-+ dbg("prefork: select returned err $selerr, probably signalled");
-+ return;
-+ }
-+
-+ # if a child exits during that select() call, it generates a spurious
-+ # error, like this:
-+ #
-+ # Jan 29 12:53:17 dogma spamd[18518]: prefork: child states: BI
-+ # Jan 29 12:53:17 dogma spamd[18518]: spamd: handled cleanup of child pid 13101 due to SIGCHLD
-+ # Jan 29 12:53:17 dogma spamd[18518]: prefork: select returned -1! recovering:
-+ #
-+ # avoid by setting a boolean in the child_exited() callback and checking
-+ # it here. log $! just in case, though.
-+ if ($self->{child_just_exited} && $nfound == -1) {
-+ dbg("prefork: select returned -1 due to child exiting, ignored ($selerr)");
-+ return;
-+ }
-+
-+ warn "prefork: select returned ".
-+ (defined $nfound ? $nfound : "undef").
-+ "! recovering: $selerr\n";
-
-- if (!defined $nfound) {
-- warn "prefork: select returned undef! recovering\n";
- sleep 1; # avoid overload
- return;
- }
-@@ -213,7 +256,7 @@
- # errors on the handle?
- # return them immediately, they may be from a SIGHUP restart signal
- if (vec ($eout, $self->{server_fileno}, 1)) {
-- warn "prefork: select returned error on server filehandle: $!\n";
-+ warn "prefork: select returned error on server filehandle: $selerr $!\n";
- return;
- }
-
-@@ -282,7 +325,7 @@
-
- my ($sock, $kid);
- while (($kid, $sock) = each %{$self->{backchannel}->{kids}}) {
-- $self->syswrite_with_retry($sock, PF_PING_ORDER) and next;
-+ $self->syswrite_with_retry($sock, PF_PING_ORDER, $kid, 3) and next;
-
- warn "prefork: write of ping failed to $kid fd=".$sock->fileno.": ".$!;
-
-@@ -353,7 +396,7 @@
- return $self->order_idle_child_to_accept();
- }
-
-- if (!$self->syswrite_with_retry($sock, PF_ACCEPT_ORDER))
-+ if (!$self->syswrite_with_retry($sock, PF_ACCEPT_ORDER, $kid))
- {
- # failure to write to the child; bad news. call it dead
- warn "prefork: killing rogue child $kid, failed to write on fd ".$sock->fileno.": $!\n";
-@@ -396,7 +439,7 @@
- my ($self, $kid) = @_;
- if ($self->{waiting_for_idle_child}) {
- my $sock = $self->{backchannel}->get_socket_for_child($kid);
-- $self->syswrite_with_retry($sock, PF_ACCEPT_ORDER)
-+ $self->syswrite_with_retry($sock, PF_ACCEPT_ORDER, $kid)
- or die "prefork: $kid claimed it was ready, but write failed on fd ".
- $sock->fileno.": ".$!;
- $self->{waiting_for_idle_child} = 0;
-@@ -426,7 +469,7 @@
- sub report_backchannel_socket {
- my ($self, $str) = @_;
- my $sock = $self->{backchannel}->get_parent_socket();
-- $self->syswrite_with_retry($sock, $str)
-+ $self->syswrite_with_retry($sock, $str, 'parent')
- or write "syswrite() to parent failed: $!";
- }
-
-@@ -537,12 +580,31 @@
- }
-
- sub syswrite_with_retry {
-- my ($self, $sock, $buf) = @_;
-+ my ($self, $sock, $buf, $targetname, $numretries) = @_;
-+ $numretries ||= 10; # default 10 retries
-
- my $written = 0;
-+ my $try = 0;
-
- retry_write:
-+
-+ $try++;
-+ if ($try > 1) {
-+ warn "prefork: syswrite(".$sock->fileno.") to $targetname failed on try $try";
-+ if ($try > $numretries) {
-+ warn "prefork: giving up";
-+ return undef;
-+ }
-+ else {
-+ # give it 1 second to recover. we retry indefinitely.
-+ my $rout = '';
-+ vec($rout, $sock->fileno, 1) = 1;
-+ select(undef, $rout, undef, 1);
-+ }
-+ }
-+
- my $nbytes = $sock->syswrite($buf);
-+
- if (!defined $nbytes) {
- unless ((exists &Errno::EAGAIN && $! == &Errno::EAGAIN)
- || (exists &Errno::EWOULDBLOCK && $! == &Errno::EWOULDBLOCK))
-@@ -551,13 +613,7 @@
- return undef;
- }
-
-- warn "prefork: syswrite(".$sock->fileno.") failed, retrying...";
--
-- # give it 5 seconds to recover. we retry indefinitely.
-- my $rout = '';
-- vec($rout, $sock->fileno, 1) = 1;
-- select(undef, $rout, undef, 5);
--
-+ warn "prefork: retrying syswrite(): $!";
- goto retry_write;
- }
- else {
-@@ -568,7 +624,8 @@
- return $written; # it's complete, we can return
- }
- else {
-- warn "prefork: partial write of $nbytes, towrite=".length($buf).
-+ warn "prefork: partial write of $nbytes to ".
-+ $targetname.", towrite=".length($buf).
- " sofar=".$written." fd=".$sock->fileno.", recovering";
- goto retry_write;
- }
-
-Added: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm
-URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm?rev=384590&view=auto
-==============================================================================
---- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm (added)
-+++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm Thu Mar 9 11:51:59 2006
-@@ -0,0 +1,215 @@
-+# <@LICENSE>
-+# Copyright 2004 Apache Software Foundation
-+#
-+# Licensed under the Apache License, Version 2.0 (the "License");
-+# you may not use this file except in compliance with the License.
-+# You may obtain a copy of the License at
-+#
-+# http://www.apache.org/licenses/LICENSE-2.0
-+#
-+# Unless required by applicable law or agreed to in writing, software
-+# distributed under the License is distributed on an "AS IS" BASIS,
-+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+# See the License for the specific language governing permissions and
-+# limitations under the License.
-+# </@LICENSE>
-+
-+=head1 NAME
-+
-+Mail::SpamAssassin::Timeout - safe, reliable timeouts in perl
-+
-+=head1 SYNOPSIS
-+
-+ # non-timeout code...
-+
-+ my $t = Mail::SpamAssassin::Timeout->new({ secs => 5 });
-+
-+ $t->run(sub {
-+ # code to run with a 5-second timeout...
-+ });
-+
-+ if ($t->timed_out()) {
-+ # do something...
-+ }
-+
-+ # more non-timeout code...
-+
-+=head1 DESCRIPTION
-+
-+This module provides a safe, reliable and clean API to provide
-+C<alarm(2)>-based timeouts for perl code.
-+
-+Note that C<$SIG{ALRM}> is used to provide the timeout, so this will not
-+interrupt out-of-control regular expression matches.
-+
-+Nested timeouts are supported.
-+
-+=head1 PUBLIC METHODS
-+
-+=over 4
-+
-+=cut
-+
-+package Mail::SpamAssassin::Timeout;
-+
-+use strict;
-+use warnings;
-+use bytes;
-+
-+use vars qw{
-+ @ISA
-+};
-+
-+@ISA = qw();
-+
-+###########################################################################
-+
-+=item my $t = Mail::SpamAssassin::Timeout->new({ ... options ... });
-+
-+Constructor. Options include:
-+
-+=over 4
-+
-+=item secs => $seconds
-+
-+timeout, in seconds. Optional; if not specified, no timeouts will be applied.
-+
-+=back
-+
-+=cut
-+
-+sub new {
-+ my ($class, $opts) = @_;
-+ $class = ref($class) || $class;
-+ my %selfval = $opts ? %{$opts} : ();
-+ my $self = \%selfval;
-+
-+ bless ($self, $class);
-+ $self;
-+}
-+
-+###########################################################################
-+
-+=item $t->run($coderef)
-+
-+Run a code reference within the currently-defined timeout.
-+
-+The timeout is as defined by the B<secs> parameter to the constructor.
-+
-+Returns whatever the subroutine returns, or C<undef> on timeout.
-+If the timer times out, C<$t-<gt>timed_out()> will return C<1>.
-+
-+Time elapsed is not cumulative; multiple runs of C<run> will restart the
-+timeout from scratch.
-+
-+=item $t->run_and_catch($coderef)
-+
-+Run a code reference, as per C<$t-<gt>run()>, but also catching any
-+C<die()> calls within the code reference.
-+
-+Returns C<undef> if no C<die()> call was executed and C<$@> was unset, or the
-+value of C<$@> if it was set. (The timeout event doesn't count as a C<die()>.)
-+
-+=cut
-+
-+sub run { $_[0]->_run($_[1], 0); }
-+
-+sub run_and_catch { $_[0]->_run($_[1], 1); }
-+
-+sub _run { # private
-+ my ($self, $sub, $and_catch) = @_;
-+
-+ delete $self->{timed_out};
-+
-+ if (!$self->{secs}) { # no timeout! just call the sub and return.
-+ return &$sub;
-+ }
-+
-+ # assertion
-+ if ($self->{secs} < 0) {
-+ die "Mail::SpamAssassin::Timeout: oops? neg value for 'secs': $self->{secs}";
-+ }
-+
-+ my $oldalarm = 0;
-+ my $ret;
-+
-+ eval {
-+ # note use of local to ensure closed scope here
-+ local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
-+ local $SIG{__DIE__}; # bug 4631
-+
-+ $oldalarm = alarm($self->{secs});
-+
-+ $ret = &$sub;
-+
-+ # Unset the alarm() before we leave eval{ } scope, as that stack-pop
-+ # operation can take a second or two under load. Note: previous versions
-+ # restored $oldalarm here; however, that is NOT what we want to do, since
-+ # it creates a new race condition, namely that an old alarm could then fire
-+ # while the stack-pop was underway, thereby appearing to be *this* timeout
-+ # timing out. In terms of how we might possibly have nested timeouts in
-+ # SpamAssassin, this is an academic issue with little impact, but it's
-+ # still worth avoiding anyway.
-+
-+ alarm 0;
-+ };
-+
-+ my $err = $@;
-+
-+ if (defined $oldalarm) {
-+ # now, we could have died from a SIGALRM == timed out. if so,
-+ # restore the previously-active one, or zero all timeouts if none
-+ # were previously active.
-+ alarm $oldalarm;
-+ }
-+
-+ if ($err) {
-+ if ($err =~ /__alarm__ignore__/) {
-+ $self->{timed_out} = 1;
-+ } else {
-+ if ($and_catch) {
-+ return $@;
-+ } else {
-+ die $@; # propagate any "real" errors
-+ }
-+ }
-+ }
-+
-+ if ($and_catch) {
-+ return; # undef
-+ } else {
-+ return $ret;
-+ }
-+}
-+
-+###########################################################################
-+
-+=item $t->timed_out()
-+
-+Returns C<1> if the most recent code executed in C<run()> timed out, or
-+C<undef> if it did not.
-+
-+=cut
-+
-+sub timed_out {
-+ my ($self) = @_;
-+ return $self->{timed_out};
-+}
-+
-+###########################################################################
-+
-+=item $t->reset()
-+
-+If called within a C<run()> code reference, causes the current alarm timer to
-+be reset to its starting value.
-+
-+=cut
-+
-+sub reset {
-+ my ($self) = @_;
-+ alarm($self->{secs});
-+}
-+
-+###########################################################################
-+
-+1;
-
-Modified: spamassassin/branches/3.1/spamd/spamd.raw
-URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/spamd/spamd.raw?rev=384590&r1=384589&r2=384590&view=diff
-==============================================================================
---- spamassassin/branches/3.1/spamd/spamd.raw (original)
-+++ spamassassin/branches/3.1/spamd/spamd.raw Thu Mar 9 11:51:59 2006
-@@ -2049,6 +2049,9 @@
- foreach (keys %children) {
- kill 'INT' => $_;
- my $pid = waitpid($_, 0);
-+ if ($scaling) {
-+ $scaling->child_exited($pid);
-+ }
- info("spamd: child $pid killed successfully");
- }
- %children = ();
-
-
-
-
- \ No newline at end of file
diff --git a/buildbot/buildbot/test/mail/syncmail.1 b/buildbot/buildbot/test/mail/syncmail.1
deleted file mode 100644
index eb35e25..0000000
--- a/buildbot/buildbot/test/mail/syncmail.1
+++ /dev/null
@@ -1,152 +0,0 @@
-Return-Path: <warner@users.sourceforge.net>
-Delivered-To: warner-sourceforge@luther.lothar.com
-Received: (qmail 23758 invoked by uid 1000); 28 Jul 2003 07:22:14 -0000
-Delivered-To: warner-sourceforge@lothar.com
-Received: (qmail 62715 invoked by uid 13574); 28 Jul 2003 07:22:03 -0000
-Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-sourceforge@lothar.com>; 28 Jul 2003 07:22:03 -0000
-Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
- by sc8-sf-list1.sourceforge.net with esmtp
- (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
- id 19h2KY-0004Nr-00
- for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
-Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
- id 19h2KY-0001rv-00
- for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
-Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
- id 19h2KY-0003r4-00
- for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
-From: warner@users.sourceforge.net
-To: warner@users.sourceforge.net
-Subject: buildbot/buildbot/changes freshcvsmail.py,1.2,1.3
-Message-Id: <E19h2KY-0003r4-00@sc8-pr-cvs1.sourceforge.net>
-Date: Mon, 28 Jul 2003 00:22:02 -0700
-Status:
-
-Update of /cvsroot/buildbot/buildbot/buildbot/changes
-In directory sc8-pr-cvs1:/tmp/cvs-serv14795/buildbot/changes
-
-Modified Files:
- freshcvsmail.py
-Log Message:
-remove leftover code, leave a temporary compatibility import. Note! Start
-importing FCMaildirSource from changes.mail instead of changes.freshcvsmail
-
-
-Index: freshcvsmail.py
-===================================================================
-RCS file: /cvsroot/buildbot/buildbot/buildbot/changes/freshcvsmail.py,v
-retrieving revision 1.2
-retrieving revision 1.3
-diff -C2 -d -r1.2 -r1.3
-*** freshcvsmail.py 27 Jul 2003 18:54:08 -0000 1.2
---- freshcvsmail.py 28 Jul 2003 07:22:00 -0000 1.3
-***************
-*** 1,96 ****
- #! /usr/bin/python
-
-! from buildbot.interfaces import IChangeSource
-! from buildbot.changes.maildirtwisted import MaildirTwisted
-! from buildbot.changes.changes import Change
-! from rfc822 import Message
-! import os, os.path
-!
-! def parseFreshCVSMail(fd, prefix=None):
-! """Parse mail sent by FreshCVS"""
-! # this uses rfc822.Message so it can run under python2.1 . In the future
-! # it will be updated to use python2.2's "email" module.
-!
-! m = Message(fd)
-! # FreshCVS sets From: to "user CVS <user>", but the <> part may be
-! # modified by the MTA (to include a local domain)
-! name, addr = m.getaddr("from")
-! if not name:
-! return None # no From means this message isn't from FreshCVS
-! cvs = name.find(" CVS")
-! if cvs == -1:
-! return None # this message isn't from FreshCVS
-! who = name[:cvs]
-!
-! # we take the time of receipt as the time of checkin. Not correct,
-! # but it avoids the out-of-order-changes issue
-! #when = m.getdate() # and convert from 9-tuple, and handle timezone
-!
-! files = []
-! comments = ""
-! isdir = 0
-! lines = m.fp.readlines()
-! while lines:
-! line = lines.pop(0)
-! if line == "Modified files:\n":
-! break
-! while lines:
-! line = lines.pop(0)
-! if line == "\n":
-! break
-! line = line.rstrip("\n")
-! file, junk = line.split(None, 1)
-! if prefix:
-! # insist that the file start with the prefix: FreshCVS sends
-! # changes we don't care about too
-! bits = file.split(os.sep)
-! if bits[0] == prefix:
-! file = apply(os.path.join, bits[1:])
-! else:
-! break
-! if junk == "0 0":
-! isdir = 1
-! files.append(file)
-! while lines:
-! line = lines.pop(0)
-! if line == "Log message:\n":
-! break
-! # message is terminated by "ViewCVS links:" or "Index:..." (patch)
-! while lines:
-! line = lines.pop(0)
-! if line == "ViewCVS links:\n":
-! break
-! if line.find("Index: ") == 0:
-! break
-! comments += line
-! comments = comments.rstrip() + "\n"
-!
-! if not files:
-! return None
-!
-! change = Change(who, files, comments, isdir)
-!
-! return change
-!
-!
-!
-! class FCMaildirSource(MaildirTwisted):
-! """This source will watch a maildir that is subscribed to a FreshCVS
-! change-announcement mailing list.
-! """
-!
-! __implements__ = IChangeSource,
-
-! def __init__(self, maildir, prefix=None):
-! MaildirTwisted.__init__(self, maildir)
-! self.changemaster = None # filled in when added
-! self.prefix = prefix
-! def describe(self):
-! return "FreshCVS mailing list in maildir %s" % self.maildir.where
-! def messageReceived(self, filename):
-! path = os.path.join(self.basedir, "new", filename)
-! change = parseFreshCVSMail(open(path, "r"), self.prefix)
-! if change:
-! self.changemaster.addChange(change)
-! os.rename(os.path.join(self.basedir, "new", filename),
-! os.path.join(self.basedir, "cur", filename))
---- 1,5 ----
- #! /usr/bin/python
-
-! # leftover import for compatibility
-
-! from buildbot.changes.mail import FCMaildirSource
-
-
diff --git a/buildbot/buildbot/test/mail/syncmail.2 b/buildbot/buildbot/test/mail/syncmail.2
deleted file mode 100644
index 5296cbe..0000000
--- a/buildbot/buildbot/test/mail/syncmail.2
+++ /dev/null
@@ -1,56 +0,0 @@
-Return-Path: <warner@users.sourceforge.net>
-Delivered-To: warner-sourceforge@luther.lothar.com
-Received: (qmail 23221 invoked by uid 1000); 28 Jul 2003 06:53:15 -0000
-Delivered-To: warner-sourceforge@lothar.com
-Received: (qmail 58537 invoked by uid 13574); 28 Jul 2003 06:53:09 -0000
-Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:53:09 -0000
-Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
- by sc8-sf-list1.sourceforge.net with esmtp
- (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
- id 19h1sb-0003nw-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:09 -0700
-Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
- id 19h1sa-00018t-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
-Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
- id 19h1sa-0002mX-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
-From: warner@users.sourceforge.net
-To: warner@users.sourceforge.net
-Subject: buildbot ChangeLog,1.93,1.94
-Message-Id: <E19h1sa-0002mX-00@sc8-pr-cvs1.sourceforge.net>
-Date: Sun, 27 Jul 2003 23:53:08 -0700
-Status:
-
-Update of /cvsroot/buildbot/buildbot
-In directory sc8-pr-cvs1:/tmp/cvs-serv10689
-
-Modified Files:
- ChangeLog
-Log Message:
- * NEWS: started adding new features
-
-
-Index: ChangeLog
-===================================================================
-RCS file: /cvsroot/buildbot/buildbot/ChangeLog,v
-retrieving revision 1.93
-retrieving revision 1.94
-diff -C2 -d -r1.93 -r1.94
-*** ChangeLog 27 Jul 2003 22:53:27 -0000 1.93
---- ChangeLog 28 Jul 2003 06:53:06 -0000 1.94
-***************
-*** 1,4 ****
---- 1,6 ----
- 2003-07-27 Brian Warner <warner@lothar.com>
-
-+ * NEWS: started adding new features
-+
- * buildbot/changes/mail.py: start work on Syncmail parser, move
- mail sources into their own file
-
-
diff --git a/buildbot/buildbot/test/mail/syncmail.3 b/buildbot/buildbot/test/mail/syncmail.3
deleted file mode 100644
index eee19b1..0000000
--- a/buildbot/buildbot/test/mail/syncmail.3
+++ /dev/null
@@ -1,39 +0,0 @@
-Return-Path: <warner@users.sourceforge.net>
-Delivered-To: warner-sourceforge@luther.lothar.com
-Received: (qmail 23196 invoked by uid 1000); 28 Jul 2003 06:51:53 -0000
-Delivered-To: warner-sourceforge@lothar.com
-Received: (qmail 58269 invoked by uid 13574); 28 Jul 2003 06:51:46 -0000
-Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:51:46 -0000
-Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
- by sc8-sf-list1.sourceforge.net with esmtp
- (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
- id 19h1rF-00027s-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:46 -0700
-Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
- id 19h1rF-00017O-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
-Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
- id 19h1rF-0002jg-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
-From: warner@users.sourceforge.net
-To: warner@users.sourceforge.net
-Subject: CVSROOT syncmail,1.1,NONE
-Message-Id: <E19h1rF-0002jg-00@sc8-pr-cvs1.sourceforge.net>
-Date: Sun, 27 Jul 2003 23:51:45 -0700
-Status:
-
-Update of /cvsroot/buildbot/CVSROOT
-In directory sc8-pr-cvs1:/tmp/cvs-serv10515
-
-Removed Files:
- syncmail
-Log Message:
-nevermind
-
---- syncmail DELETED ---
-
-
diff --git a/buildbot/buildbot/test/mail/syncmail.4 b/buildbot/buildbot/test/mail/syncmail.4
deleted file mode 100644
index 44bda5d..0000000
--- a/buildbot/buildbot/test/mail/syncmail.4
+++ /dev/null
@@ -1,290 +0,0 @@
-Return-Path: <warner@users.sourceforge.net>
-Delivered-To: warner-sourceforge@luther.lothar.com
-Received: (qmail 24111 invoked by uid 1000); 28 Jul 2003 08:01:54 -0000
-Delivered-To: warner-sourceforge@lothar.com
-Received: (qmail 68756 invoked by uid 13574); 28 Jul 2003 08:01:46 -0000
-Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-sourceforge@lothar.com>; 28 Jul 2003 08:01:46 -0000
-Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
- by sc8-sf-list1.sourceforge.net with esmtp
- (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
- id 19h2wz-00029d-00
- for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
-Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
- id 19h2wz-0002XB-00
- for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
-Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
- id 19h2wz-0005a9-00
- for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
-From: warner@users.sourceforge.net
-To: warner@users.sourceforge.net
-Subject: buildbot/test/mail syncmail.1,NONE,1.1 syncmail.2,NONE,1.1 syncmail.3,NONE,1.1
-Message-Id: <E19h2wz-0005a9-00@sc8-pr-cvs1.sourceforge.net>
-Date: Mon, 28 Jul 2003 01:01:45 -0700
-Status:
-
-Update of /cvsroot/buildbot/buildbot/test/mail
-In directory sc8-pr-cvs1:/tmp/cvs-serv21445
-
-Added Files:
- syncmail.1 syncmail.2 syncmail.3
-Log Message:
-test cases for syncmail parser
-
---- NEW FILE: syncmail.1 ---
-Return-Path: <warner@users.sourceforge.net>
-Delivered-To: warner-sourceforge@luther.lothar.com
-Received: (qmail 23758 invoked by uid 1000); 28 Jul 2003 07:22:14 -0000
-Delivered-To: warner-sourceforge@lothar.com
-Received: (qmail 62715 invoked by uid 13574); 28 Jul 2003 07:22:03 -0000
-Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-sourceforge@lothar.com>; 28 Jul 2003 07:22:03 -0000
-Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
- by sc8-sf-list1.sourceforge.net with esmtp
- (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
- id 19h2KY-0004Nr-00
- for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
-Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
- id 19h2KY-0001rv-00
- for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
-Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
- id 19h2KY-0003r4-00
- for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
-From: warner@users.sourceforge.net
-To: warner@users.sourceforge.net
-Subject: buildbot/buildbot/changes freshcvsmail.py,1.2,1.3
-Message-Id: <E19h2KY-0003r4-00@sc8-pr-cvs1.sourceforge.net>
-Date: Mon, 28 Jul 2003 00:22:02 -0700
-Status:
-
-Update of /cvsroot/buildbot/buildbot/buildbot/changes
-In directory sc8-pr-cvs1:/tmp/cvs-serv14795/buildbot/changes
-
-Modified Files:
- freshcvsmail.py
-Log Message:
-remove leftover code, leave a temporary compatibility import. Note! Start
-importing FCMaildirSource from changes.mail instead of changes.freshcvsmail
-
-
-Index: freshcvsmail.py
-===================================================================
-RCS file: /cvsroot/buildbot/buildbot/buildbot/changes/freshcvsmail.py,v
-retrieving revision 1.2
-retrieving revision 1.3
-diff -C2 -d -r1.2 -r1.3
-*** freshcvsmail.py 27 Jul 2003 18:54:08 -0000 1.2
---- freshcvsmail.py 28 Jul 2003 07:22:00 -0000 1.3
-***************
-*** 1,96 ****
- #! /usr/bin/python
-
-! from buildbot.interfaces import IChangeSource
-! from buildbot.changes.maildirtwisted import MaildirTwisted
-! from buildbot.changes.changes import Change
-! from rfc822 import Message
-! import os, os.path
-!
-! def parseFreshCVSMail(fd, prefix=None):
-! """Parse mail sent by FreshCVS"""
-! # this uses rfc822.Message so it can run under python2.1 . In the future
-! # it will be updated to use python2.2's "email" module.
-!
-! m = Message(fd)
-! # FreshCVS sets From: to "user CVS <user>", but the <> part may be
-! # modified by the MTA (to include a local domain)
-! name, addr = m.getaddr("from")
-! if not name:
-! return None # no From means this message isn't from FreshCVS
-! cvs = name.find(" CVS")
-! if cvs == -1:
-! return None # this message isn't from FreshCVS
-! who = name[:cvs]
-!
-! # we take the time of receipt as the time of checkin. Not correct,
-! # but it avoids the out-of-order-changes issue
-! #when = m.getdate() # and convert from 9-tuple, and handle timezone
-!
-! files = []
-! comments = ""
-! isdir = 0
-! lines = m.fp.readlines()
-! while lines:
-! line = lines.pop(0)
-! if line == "Modified files:\n":
-! break
-! while lines:
-! line = lines.pop(0)
-! if line == "\n":
-! break
-! line = line.rstrip("\n")
-! file, junk = line.split(None, 1)
-! if prefix:
-! # insist that the file start with the prefix: FreshCVS sends
-! # changes we don't care about too
-! bits = file.split(os.sep)
-! if bits[0] == prefix:
-! file = apply(os.path.join, bits[1:])
-! else:
-! break
-! if junk == "0 0":
-! isdir = 1
-! files.append(file)
-! while lines:
-! line = lines.pop(0)
-! if line == "Log message:\n":
-! break
-! # message is terminated by "ViewCVS links:" or "Index:..." (patch)
-! while lines:
-! line = lines.pop(0)
-! if line == "ViewCVS links:\n":
-! break
-! if line.find("Index: ") == 0:
-! break
-! comments += line
-! comments = comments.rstrip() + "\n"
-!
-! if not files:
-! return None
-!
-! change = Change(who, files, comments, isdir)
-!
-! return change
-!
-!
-!
-! class FCMaildirSource(MaildirTwisted):
-! """This source will watch a maildir that is subscribed to a FreshCVS
-! change-announcement mailing list.
-! """
-!
-! __implements__ = IChangeSource,
-
-! def __init__(self, maildir, prefix=None):
-! MaildirTwisted.__init__(self, maildir)
-! self.changemaster = None # filled in when added
-! self.prefix = prefix
-! def describe(self):
-! return "FreshCVS mailing list in maildir %s" % self.maildir.where
-! def messageReceived(self, filename):
-! path = os.path.join(self.basedir, "new", filename)
-! change = parseFreshCVSMail(open(path, "r"), self.prefix)
-! if change:
-! self.changemaster.addChange(change)
-! os.rename(os.path.join(self.basedir, "new", filename),
-! os.path.join(self.basedir, "cur", filename))
---- 1,5 ----
- #! /usr/bin/python
-
-! # leftover import for compatibility
-
-! from buildbot.changes.mail import FCMaildirSource
-
-
-
---- NEW FILE: syncmail.2 ---
-Return-Path: <warner@users.sourceforge.net>
-Delivered-To: warner-sourceforge@luther.lothar.com
-Received: (qmail 23221 invoked by uid 1000); 28 Jul 2003 06:53:15 -0000
-Delivered-To: warner-sourceforge@lothar.com
-Received: (qmail 58537 invoked by uid 13574); 28 Jul 2003 06:53:09 -0000
-Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:53:09 -0000
-Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
- by sc8-sf-list1.sourceforge.net with esmtp
- (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
- id 19h1sb-0003nw-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:09 -0700
-Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
- id 19h1sa-00018t-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
-Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
- id 19h1sa-0002mX-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
-From: warner@users.sourceforge.net
-To: warner@users.sourceforge.net
-Subject: buildbot ChangeLog,1.93,1.94
-Message-Id: <E19h1sa-0002mX-00@sc8-pr-cvs1.sourceforge.net>
-Date: Sun, 27 Jul 2003 23:53:08 -0700
-Status:
-
-Update of /cvsroot/buildbot/buildbot
-In directory sc8-pr-cvs1:/tmp/cvs-serv10689
-
-Modified Files:
- ChangeLog
-Log Message:
- * NEWS: started adding new features
-
-
-Index: ChangeLog
-===================================================================
-RCS file: /cvsroot/buildbot/buildbot/ChangeLog,v
-retrieving revision 1.93
-retrieving revision 1.94
-diff -C2 -d -r1.93 -r1.94
-*** ChangeLog 27 Jul 2003 22:53:27 -0000 1.93
---- ChangeLog 28 Jul 2003 06:53:06 -0000 1.94
-***************
-*** 1,4 ****
---- 1,6 ----
- 2003-07-27 Brian Warner <warner@lothar.com>
-
-+ * NEWS: started adding new features
-+
- * buildbot/changes/mail.py: start work on Syncmail parser, move
- mail sources into their own file
-
-
-
---- NEW FILE: syncmail.3 ---
-Return-Path: <warner@users.sourceforge.net>
-Delivered-To: warner-sourceforge@luther.lothar.com
-Received: (qmail 23196 invoked by uid 1000); 28 Jul 2003 06:51:53 -0000
-Delivered-To: warner-sourceforge@lothar.com
-Received: (qmail 58269 invoked by uid 13574); 28 Jul 2003 06:51:46 -0000
-Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
- by 130.94.181.6 (qmail-ldap-1.03) with SMTP
- for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:51:46 -0000
-Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
- by sc8-sf-list1.sourceforge.net with esmtp
- (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
- id 19h1rF-00027s-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:46 -0700
-Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
- id 19h1rF-00017O-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
-Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
- by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
- id 19h1rF-0002jg-00
- for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
-From: warner@users.sourceforge.net
-To: warner@users.sourceforge.net
-Subject: CVSROOT syncmail,1.1,NONE
-Message-Id: <E19h1rF-0002jg-00@sc8-pr-cvs1.sourceforge.net>
-Date: Sun, 27 Jul 2003 23:51:45 -0700
-Status:
-
-Update of /cvsroot/buildbot/CVSROOT
-In directory sc8-pr-cvs1:/tmp/cvs-serv10515
-
-Removed Files:
- syncmail
-Log Message:
-nevermind
-
---- syncmail DELETED ---
-
-
-
-
diff --git a/buildbot/buildbot/test/mail/syncmail.5 b/buildbot/buildbot/test/mail/syncmail.5
deleted file mode 100644
index 82ba451..0000000
--- a/buildbot/buildbot/test/mail/syncmail.5
+++ /dev/null
@@ -1,70 +0,0 @@
-From thomas@otto.amantes Mon Feb 21 17:46:45 2005
-Return-Path: <thomas@otto.amantes>
-Received: from otto.amantes (otto.amantes [127.0.0.1]) by otto.amantes
- (8.13.1/8.13.1) with ESMTP id j1LGkjr3011986 for <thomas@localhost>; Mon,
- 21 Feb 2005 17:46:45 +0100
-Message-Id: <200502211646.j1LGkjr3011986@otto.amantes>
-From: Thomas Vander Stichele <thomas@otto.amantes>
-To: thomas@otto.amantes
-Subject: test1 s
-Date: Mon, 21 Feb 2005 16:46:45 +0000
-X-Mailer: Python syncmail $Revision: 1.1 $
- <http://sf.net/projects/cvs-syncmail>
-Content-Transfer-Encoding: 8bit
-Mime-Version: 1.0
-
-Update of /home/cvs/test/test1
-In directory otto.amantes:/home/thomas/dev/tests/cvs/test1
-
-Added Files:
- Tag: BRANCH-DEVEL
- MANIFEST Makefile.am autogen.sh configure.in
-Log Message:
-stuff on the branch
-
---- NEW FILE: Makefile.am ---
-SUBDIRS = src
-
-# normally I wouldn't distribute autogen.sh and friends with a tarball
-# but this one is specifically distributed for demonstration purposes
-
-EXTRA_DIST = autogen.sh
-
-# target for making the "import this into svn" tarball
-test:
- mkdir test
- for a in `cat MANIFEST`; do \
- cp -pr $$a test/$$a; done
- tar czf test.tar.gz test
- rm -rf test
-
---- NEW FILE: MANIFEST ---
-MANIFEST
-autogen.sh
-configure.in
-Makefile.am
-src
-src/Makefile.am
-src/test.c
-
---- NEW FILE: autogen.sh ---
-#!/bin/sh
-
-set -x
-
-aclocal && \
-autoheader && \
-autoconf && \
-automake -a --foreign && \
-./configure $@
-
---- NEW FILE: configure.in ---
-dnl configure.ac for version macro
-AC_INIT
-
-AM_CONFIG_HEADER(config.h)
-
-AM_INIT_AUTOMAKE(test, 0.0.0)
-AC_PROG_CC
-
-AC_OUTPUT(Makefile src/Makefile)
diff --git a/buildbot/buildbot/test/runutils.py b/buildbot/buildbot/test/runutils.py
deleted file mode 100644
index 2be85d6..0000000
--- a/buildbot/buildbot/test/runutils.py
+++ /dev/null
@@ -1,516 +0,0 @@
-
-import signal
-import shutil, os, errno
-from cStringIO import StringIO
-from twisted.internet import defer, reactor, protocol
-from twisted.python import log, util
-
-from buildbot import master, interfaces
-from buildbot.slave import bot
-from buildbot.buildslave import BuildSlave
-from buildbot.process.builder import Builder
-from buildbot.process.base import BuildRequest, Build
-from buildbot.process.buildstep import BuildStep
-from buildbot.sourcestamp import SourceStamp
-from buildbot.status import builder
-from buildbot.process.properties import Properties
-
-
-
-class _PutEverythingGetter(protocol.ProcessProtocol):
- def __init__(self, deferred, stdin):
- self.deferred = deferred
- self.outBuf = StringIO()
- self.errBuf = StringIO()
- self.outReceived = self.outBuf.write
- self.errReceived = self.errBuf.write
- self.stdin = stdin
-
- def connectionMade(self):
- if self.stdin is not None:
- self.transport.write(self.stdin)
- self.transport.closeStdin()
-
- def processEnded(self, reason):
- out = self.outBuf.getvalue()
- err = self.errBuf.getvalue()
- e = reason.value
- code = e.exitCode
- if e.signal:
- self.deferred.errback((out, err, e.signal))
- else:
- self.deferred.callback((out, err, code))
-
-def myGetProcessOutputAndValue(executable, args=(), env={}, path='.',
- _reactor_ignored=None, stdin=None):
- """Like twisted.internet.utils.getProcessOutputAndValue but takes
- stdin, too."""
- d = defer.Deferred()
- p = _PutEverythingGetter(d, stdin)
- reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path)
- return d
-
-
-class MyBot(bot.Bot):
- def remote_getSlaveInfo(self):
- return self.parent.info
-
-class MyBuildSlave(bot.BuildSlave):
- botClass = MyBot
-
-def rmtree(d):
- try:
- shutil.rmtree(d, ignore_errors=1)
- except OSError, e:
- # stupid 2.2 appears to ignore ignore_errors
- if e.errno != errno.ENOENT:
- raise
-
-class RunMixin:
- master = None
-
- def rmtree(self, d):
- rmtree(d)
-
- def setUp(self):
- self.slaves = {}
- self.rmtree("basedir")
- os.mkdir("basedir")
- self.master = master.BuildMaster("basedir")
- self.status = self.master.getStatus()
- self.control = interfaces.IControl(self.master)
-
- def connectOneSlave(self, slavename, opts={}):
- port = self.master.slavePort._port.getHost().port
- self.rmtree("slavebase-%s" % slavename)
- os.mkdir("slavebase-%s" % slavename)
- slave = MyBuildSlave("localhost", port, slavename, "sekrit",
- "slavebase-%s" % slavename,
- keepalive=0, usePTY=False, debugOpts=opts)
- slave.info = {"admin": "one"}
- self.slaves[slavename] = slave
- slave.startService()
-
- def connectSlave(self, builders=["dummy"], slavename="bot1",
- opts={}):
- # connect buildslave 'slavename' and wait for it to connect to all of
- # the given builders
- dl = []
- # initiate call for all of them, before waiting on result,
- # otherwise we might miss some
- for b in builders:
- dl.append(self.master.botmaster.waitUntilBuilderAttached(b))
- d = defer.DeferredList(dl)
- self.connectOneSlave(slavename, opts)
- return d
-
- def connectSlaves(self, slavenames, builders):
- dl = []
- # initiate call for all of them, before waiting on result,
- # otherwise we might miss some
- for b in builders:
- dl.append(self.master.botmaster.waitUntilBuilderAttached(b))
- d = defer.DeferredList(dl)
- for name in slavenames:
- self.connectOneSlave(name)
- return d
-
- def connectSlave2(self):
- # this takes over for bot1, so it has to share the slavename
- port = self.master.slavePort._port.getHost().port
- self.rmtree("slavebase-bot2")
- os.mkdir("slavebase-bot2")
- # this uses bot1, really
- slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
- "slavebase-bot2", keepalive=0, usePTY=False)
- slave.info = {"admin": "two"}
- self.slaves['bot2'] = slave
- slave.startService()
-
- def connectSlaveFastTimeout(self):
- # this slave has a very fast keepalive timeout
- port = self.master.slavePort._port.getHost().port
- self.rmtree("slavebase-bot1")
- os.mkdir("slavebase-bot1")
- slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
- "slavebase-bot1", keepalive=2, usePTY=False,
- keepaliveTimeout=1)
- slave.info = {"admin": "one"}
- self.slaves['bot1'] = slave
- slave.startService()
- d = self.master.botmaster.waitUntilBuilderAttached("dummy")
- return d
-
- # things to start builds
- def requestBuild(self, builder):
- # returns a Deferred that fires with an IBuildStatus object when the
- # build is finished
- req = BuildRequest("forced build", SourceStamp(), 'test_builder')
- self.control.getBuilder(builder).requestBuild(req)
- return req.waitUntilFinished()
-
- def failUnlessBuildSucceeded(self, bs):
- if bs.getResults() != builder.SUCCESS:
- log.msg("failUnlessBuildSucceeded noticed that the build failed")
- self.logBuildResults(bs)
- self.failUnlessEqual(bs.getResults(), builder.SUCCESS)
- return bs # useful for chaining
-
- def logBuildResults(self, bs):
- # emit the build status and the contents of all logs to test.log
- log.msg("logBuildResults starting")
- log.msg(" bs.getResults() == %s" % builder.Results[bs.getResults()])
- log.msg(" bs.isFinished() == %s" % bs.isFinished())
- for s in bs.getSteps():
- for l in s.getLogs():
- log.msg("--- START step %s / log %s ---" % (s.getName(),
- l.getName()))
- if not l.getName().endswith(".html"):
- log.msg(l.getTextWithHeaders())
- log.msg("--- STOP ---")
- log.msg("logBuildResults finished")
-
- def tearDown(self):
- log.msg("doing tearDown")
- d = self.shutdownAllSlaves()
- d.addCallback(self._tearDown_1)
- d.addCallback(self._tearDown_2)
- return d
- def _tearDown_1(self, res):
- if self.master:
- return defer.maybeDeferred(self.master.stopService)
- def _tearDown_2(self, res):
- self.master = None
- log.msg("tearDown done")
-
-
- # various forms of slave death
-
- def shutdownAllSlaves(self):
- # the slave has disconnected normally: they SIGINT'ed it, or it shut
- # down willingly. This will kill child processes and give them a
- # chance to finish up. We return a Deferred that will fire when
- # everything is finished shutting down.
-
- log.msg("doing shutdownAllSlaves")
- dl = []
- for slave in self.slaves.values():
- dl.append(slave.waitUntilDisconnected())
- dl.append(defer.maybeDeferred(slave.stopService))
- d = defer.DeferredList(dl)
- d.addCallback(self._shutdownAllSlavesDone)
- return d
- def _shutdownAllSlavesDone(self, res):
- for name in self.slaves.keys():
- del self.slaves[name]
- return self.master.botmaster.waitUntilBuilderFullyDetached("dummy")
-
- def shutdownSlave(self, slavename, buildername):
- # this slave has disconnected normally: they SIGINT'ed it, or it shut
- # down willingly. This will kill child processes and give them a
- # chance to finish up. We return a Deferred that will fire when
- # everything is finished shutting down, and the given Builder knows
- # that the slave has gone away.
-
- s = self.slaves[slavename]
- dl = [self.master.botmaster.waitUntilBuilderDetached(buildername),
- s.waitUntilDisconnected()]
- d = defer.DeferredList(dl)
- d.addCallback(self._shutdownSlave_done, slavename)
- s.stopService()
- return d
- def _shutdownSlave_done(self, res, slavename):
- del self.slaves[slavename]
-
- def killSlave(self):
- # the slave has died, its host sent a FIN. The .notifyOnDisconnect
- # callbacks will terminate the current step, so the build should be
- # flunked (no further steps should be started).
- self.slaves['bot1'].bf.continueTrying = 0
- bot = self.slaves['bot1'].getServiceNamed("bot")
- broker = bot.builders["dummy"].remote.broker
- broker.transport.loseConnection()
- del self.slaves['bot1']
-
- def disappearSlave(self, slavename="bot1", buildername="dummy",
- allowReconnect=False):
- # the slave's host has vanished off the net, leaving the connection
- # dangling. This will be detected quickly by app-level keepalives or
- # a ping, or slowly by TCP timeouts.
-
- # simulate this by replacing the slave Broker's .dataReceived method
- # with one that just throws away all data.
- def discard(data):
- pass
- bot = self.slaves[slavename].getServiceNamed("bot")
- broker = bot.builders[buildername].remote.broker
- broker.dataReceived = discard # seal its ears
- broker.transport.write = discard # and take away its voice
- if not allowReconnect:
- # also discourage it from reconnecting once the connection goes away
- assert self.slaves[slavename].bf.continueTrying
- self.slaves[slavename].bf.continueTrying = False
-
- def ghostSlave(self):
- # the slave thinks it has lost the connection, and initiated a
- # reconnect. The master doesn't yet realize it has lost the previous
- # connection, and sees two connections at once.
- raise NotImplementedError
-
-
-def setupBuildStepStatus(basedir):
- """Return a BuildStep with a suitable BuildStepStatus object, ready to
- use."""
- os.mkdir(basedir)
- botmaster = None
- s0 = builder.Status(botmaster, basedir)
- s1 = s0.builderAdded("buildername", "buildername")
- s2 = builder.BuildStatus(s1, 1)
- s3 = builder.BuildStepStatus(s2)
- s3.setName("foostep")
- s3.started = True
- s3.stepStarted()
- return s3
-
-def fake_slaveVersion(command, oldversion=None):
- from buildbot.slave.registry import commandRegistry
- return commandRegistry[command]
-
-class FakeBuildMaster:
- properties = Properties(masterprop="master")
-
-class FakeBotMaster:
- parent = FakeBuildMaster()
-
-def makeBuildStep(basedir, step_class=BuildStep, **kwargs):
- bss = setupBuildStepStatus(basedir)
-
- ss = SourceStamp()
- setup = {'name': "builder1", "slavename": "bot1",
- 'builddir': "builddir", 'factory': None}
- b0 = Builder(setup, bss.getBuild().getBuilder())
- b0.botmaster = FakeBotMaster()
- br = BuildRequest("reason", ss, 'test_builder')
- b = Build([br])
- b.setBuilder(b0)
- s = step_class(**kwargs)
- s.setBuild(b)
- s.setStepStatus(bss)
- b.build_status = bss.getBuild()
- b.setupProperties()
- s.slaveVersion = fake_slaveVersion
- return s
-
-
-def findDir():
- # the same directory that holds this script
- return util.sibpath(__file__, ".")
-
-class SignalMixin:
- sigchldHandler = None
-
- def setUpClass(self):
- # make sure SIGCHLD handler is installed, as it should be on
- # reactor.run(). problem is reactor may not have been run when this
- # test runs.
- if hasattr(reactor, "_handleSigchld") and hasattr(signal, "SIGCHLD"):
- self.sigchldHandler = signal.signal(signal.SIGCHLD,
- reactor._handleSigchld)
-
- def tearDownClass(self):
- if self.sigchldHandler:
- signal.signal(signal.SIGCHLD, self.sigchldHandler)
-
-# these classes are used to test SlaveCommands in isolation
-
-class FakeSlaveBuilder:
- debug = False
- def __init__(self, usePTY, basedir):
- self.updates = []
- self.basedir = basedir
- self.usePTY = usePTY
-
- def sendUpdate(self, data):
- if self.debug:
- print "FakeSlaveBuilder.sendUpdate", data
- self.updates.append(data)
-
-
-class SlaveCommandTestBase(SignalMixin):
- usePTY = False
-
- def setUpBuilder(self, basedir):
- if not os.path.exists(basedir):
- os.mkdir(basedir)
- self.builder = FakeSlaveBuilder(self.usePTY, basedir)
-
- def startCommand(self, cmdclass, args):
- stepId = 0
- self.cmd = c = cmdclass(self.builder, stepId, args)
- c.running = True
- d = c.doStart()
- return d
-
- def collectUpdates(self, res=None):
- logs = {}
- for u in self.builder.updates:
- for k in u.keys():
- if k == "log":
- logname,data = u[k]
- oldlog = logs.get(("log",logname), "")
- logs[("log",logname)] = oldlog + data
- elif k == "rc":
- pass
- else:
- logs[k] = logs.get(k, "") + u[k]
- return logs
-
- def findRC(self):
- for u in self.builder.updates:
- if "rc" in u:
- return u["rc"]
- return None
-
- def printStderr(self):
- for u in self.builder.updates:
- if "stderr" in u:
- print u["stderr"]
-
-# ----------------------------------------
-
-class LocalWrapper:
- # r = pb.Referenceable()
- # w = LocalWrapper(r)
- # now you can do things like w.callRemote()
- def __init__(self, target):
- self.target = target
-
- def callRemote(self, name, *args, **kwargs):
- # callRemote is not allowed to fire its Deferred in the same turn
- d = defer.Deferred()
- d.addCallback(self._callRemote, *args, **kwargs)
- reactor.callLater(0, d.callback, name)
- return d
-
- def _callRemote(self, name, *args, **kwargs):
- method = getattr(self.target, "remote_"+name)
- return method(*args, **kwargs)
-
- def notifyOnDisconnect(self, observer):
- pass
- def dontNotifyOnDisconnect(self, observer):
- pass
-
-
-class LocalSlaveBuilder(bot.SlaveBuilder):
- """I am object that behaves like a pb.RemoteReference, but in fact I
- invoke methods locally."""
- _arg_filter = None
-
- def setArgFilter(self, filter):
- self._arg_filter = filter
-
- def remote_startCommand(self, stepref, stepId, command, args):
- if self._arg_filter:
- args = self._arg_filter(args)
- # stepref should be a RemoteReference to the RemoteCommand
- return bot.SlaveBuilder.remote_startCommand(self,
- LocalWrapper(stepref),
- stepId, command, args)
-
-class StepTester:
- """Utility class to exercise BuildSteps and RemoteCommands, without
- really using a Build or a Bot. No networks are used.
-
- Use this as follows::
-
- class MyTest(StepTester, unittest.TestCase):
- def testOne(self):
- self.slavebase = 'testOne.slave'
- self.masterbase = 'testOne.master'
- sb = self.makeSlaveBuilder()
- step = self.makeStep(stepclass, **kwargs)
- d = self.runStep(step)
- d.addCallback(_checkResults)
- return d
- """
-
- #slavebase = "slavebase"
- slavebuilderbase = "slavebuilderbase"
- #masterbase = "masterbase"
-
- def makeSlaveBuilder(self):
- os.mkdir(self.slavebase)
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase))
- b = bot.Bot(self.slavebase, False)
- b.startService()
- sb = LocalSlaveBuilder("slavebuildername", False)
- sb.setArgFilter(self.filterArgs)
- sb.usePTY = False
- sb.setServiceParent(b)
- sb.setBuilddir(self.slavebuilderbase)
- self.remote = LocalWrapper(sb)
- return sb
-
- workdir = "build"
- def makeStep(self, factory, **kwargs):
- step = makeBuildStep(self.masterbase, factory, **kwargs)
- step.setBuildSlave(BuildSlave("name", "password"))
- step.setDefaultWorkdir(self.workdir)
- return step
-
- def runStep(self, step):
- d = defer.maybeDeferred(step.startStep, self.remote)
- return d
-
- def wrap(self, target):
- return LocalWrapper(target)
-
- def filterArgs(self, args):
- # this can be overridden
- return args
-
-# ----------------------------------------
-
-_flags = {}
-
-def setTestFlag(flagname, value):
- _flags[flagname] = value
-
-class SetTestFlagStep(BuildStep):
- """
- A special BuildStep to set a named flag; this can be used with the
- TestFlagMixin to monitor what has and has not run in a particular
- configuration.
- """
- def __init__(self, flagname='flag', value=1, **kwargs):
- BuildStep.__init__(self, **kwargs)
- self.addFactoryArguments(flagname=flagname, value=value)
-
- self.flagname = flagname
- self.value = value
-
- def start(self):
- properties = self.build.getProperties()
- _flags[self.flagname] = properties.render(self.value)
- self.finished(builder.SUCCESS)
-
-class TestFlagMixin:
- def clearFlags(self):
- """
- Set up for a test by clearing all flags; call this from your test
- function.
- """
- _flags.clear()
-
- def failIfFlagSet(self, flagname, msg=None):
- if not msg: msg = "flag '%s' is set" % flagname
- self.failIf(_flags.has_key(flagname), msg=msg)
-
- def failIfFlagNotSet(self, flagname, msg=None):
- if not msg: msg = "flag '%s' is not set" % flagname
- self.failUnless(_flags.has_key(flagname), msg=msg)
-
- def getFlag(self, flagname):
- self.failIfFlagNotSet(flagname, "flag '%s' not set" % flagname)
- return _flags.get(flagname)
diff --git a/buildbot/buildbot/test/sleep.py b/buildbot/buildbot/test/sleep.py
deleted file mode 100644
index 4662852..0000000
--- a/buildbot/buildbot/test/sleep.py
+++ /dev/null
@@ -1,8 +0,0 @@
-
-import sys, time
-delay = int(sys.argv[1])
-
-sys.stdout.write("sleeping for %d seconds\n" % delay)
-time.sleep(delay)
-sys.stdout.write("woke up\n")
-sys.exit(0)
diff --git a/buildbot/buildbot/test/subdir/emit.py b/buildbot/buildbot/test/subdir/emit.py
deleted file mode 100644
index 42d2ca9..0000000
--- a/buildbot/buildbot/test/subdir/emit.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#! /usr/bin/python
-
-import os, sys
-
-sys.stdout.write("this is stdout in subdir\n")
-sys.stderr.write("this is stderr\n")
-if os.environ.has_key("EMIT_TEST"):
- sys.stdout.write("EMIT_TEST: %s\n" % os.environ["EMIT_TEST"])
-open("log1.out","wt").write("this is log1\n")
-rc = int(sys.argv[1])
-sys.exit(rc)
diff --git a/buildbot/buildbot/test/test__versions.py b/buildbot/buildbot/test/test__versions.py
deleted file mode 100644
index a69fcc4..0000000
--- a/buildbot/buildbot/test/test__versions.py
+++ /dev/null
@@ -1,16 +0,0 @@
-
-# This is a fake test which just logs the version of Twisted, to make it
-# easier to track down failures in other tests.
-
-from twisted.trial import unittest
-from twisted.python import log
-from twisted import copyright
-import sys
-import buildbot
-
-class Versions(unittest.TestCase):
- def test_versions(self):
- log.msg("Python Version: %s" % sys.version)
- log.msg("Twisted Version: %s" % copyright.version)
- log.msg("Buildbot Version: %s" % buildbot.version)
-
diff --git a/buildbot/buildbot/test/test_bonsaipoller.py b/buildbot/buildbot/test/test_bonsaipoller.py
deleted file mode 100644
index f4ca233..0000000
--- a/buildbot/buildbot/test/test_bonsaipoller.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# -*- test-case-name: buildbot.test.test_bonsaipoller -*-
-
-from twisted.trial import unittest
-from buildbot.changes.bonsaipoller import FileNode, CiNode, BonsaiResult, \
- BonsaiParser, BonsaiPoller, InvalidResultError, EmptyResult
-from buildbot.changes.changes import ChangeMaster
-
-from copy import deepcopy
-import re
-
-log1 = "Add Bug 338541a"
-who1 = "sar@gmail.com"
-date1 = 1161908700
-log2 = "bug 357427 add static ctor/dtor methods"
-who2 = "aarrg@ooacm.org"
-date2 = 1161910620
-log3 = "Testing log #3 lbah blah"
-who3 = "huoents@hueont.net"
-date3 = 1889822728
-rev1 = "1.8"
-file1 = "mozilla/testing/mochitest/tests/index.html"
-rev2 = "1.1"
-file2 = "mozilla/testing/mochitest/tests/test_bug338541.xhtml"
-rev3 = "1.1812"
-file3 = "mozilla/xpcom/threads/nsAutoLock.cpp"
-rev4 = "1.3"
-file4 = "mozilla/xpcom/threads/nsAutoLock.h"
-rev5 = "2.4"
-file5 = "mozilla/xpcom/threads/test.cpp"
-
-nodes = []
-files = []
-files.append(FileNode(rev1,file1))
-nodes.append(CiNode(log1, who1, date1, files))
-
-files = []
-files.append(FileNode(rev2, file2))
-files.append(FileNode(rev3, file3))
-nodes.append(CiNode(log2, who2, date2, files))
-
-nodes.append(CiNode(log3, who3, date3, []))
-
-goodParsedResult = BonsaiResult(nodes)
-
-goodUnparsedResult = """\
-<?xml version="1.0"?>
-<queryResults>
-<ci who="%s" date="%d">
- <log>%s</log>
- <files>
- <f rev="%s">%s</f>
- </files>
-</ci>
-<ci who="%s" date="%d">
- <log>%s</log>
- <files>
- <f rev="%s">%s</f>
- <f rev="%s">%s</f>
- </files>
-</ci>
-<ci who="%s" date="%d">
- <log>%s</log>
- <files>
- </files>
-</ci>
-</queryResults>
-""" % (who1, date1, log1, rev1, file1,
- who2, date2, log2, rev2, file2, rev3, file3,
- who3, date3, log3)
-
-badUnparsedResult = deepcopy(goodUnparsedResult)
-badUnparsedResult = badUnparsedResult.replace("</queryResults>", "")
-
-invalidDateResult = deepcopy(goodUnparsedResult)
-invalidDateResult = invalidDateResult.replace(str(date1), "foobar")
-
-missingFilenameResult = deepcopy(goodUnparsedResult)
-missingFilenameResult = missingFilenameResult.replace(file2, "")
-
-duplicateLogResult = deepcopy(goodUnparsedResult)
-duplicateLogResult = re.sub("<log>"+log1+"</log>",
- "<log>blah</log><log>blah</log>",
- duplicateLogResult)
-
-duplicateFilesResult = deepcopy(goodUnparsedResult)
-duplicateFilesResult = re.sub("<files>\s*</files>",
- "<files></files><files></files>",
- duplicateFilesResult)
-
-missingCiResult = deepcopy(goodUnparsedResult)
-r = re.compile("<ci.*</ci>", re.DOTALL | re.MULTILINE)
-missingCiResult = re.sub(r, "", missingCiResult)
-
-badResultMsgs = { 'badUnparsedResult':
- "BonsaiParser did not raise an exception when given a bad query",
- 'invalidDateResult':
- "BonsaiParser did not raise an exception when given an invalid date",
- 'missingRevisionResult':
- "BonsaiParser did not raise an exception when a revision was missing",
- 'missingFilenameResult':
- "BonsaiParser did not raise an exception when a filename was missing",
- 'duplicateLogResult':
- "BonsaiParser did not raise an exception when there was two <log> tags",
- 'duplicateFilesResult':
- "BonsaiParser did not raise an exception when there was two <files> tags",
- 'missingCiResult':
- "BonsaiParser did not raise an exception when there was no <ci> tags"
-}
-
-noCheckinMsgResult = """\
-<?xml version="1.0"?>
-<queryResults>
-<ci who="johndoe@domain.tld" date="12345678">
- <log></log>
- <files>
- <f rev="1.1">first/file.ext</f>
- </files>
-</ci>
-<ci who="johndoe@domain.tld" date="12345678">
- <log></log>
- <files>
- <f rev="1.2">second/file.ext</f>
- </files>
-</ci>
-<ci who="johndoe@domain.tld" date="12345678">
- <log></log>
- <files>
- <f rev="1.3">third/file.ext</f>
- </files>
-</ci>
-</queryResults>
-"""
-
-noCheckinMsgRef = [dict(filename="first/file.ext",
- revision="1.1"),
- dict(filename="second/file.ext",
- revision="1.2"),
- dict(filename="third/file.ext",
- revision="1.3")]
-
-class FakeChangeMaster(ChangeMaster):
- def __init__(self):
- ChangeMaster.__init__(self)
-
- def addChange(self, change):
- pass
-
-class FakeBonsaiPoller(BonsaiPoller):
- def __init__(self):
- BonsaiPoller.__init__(self, "fake url", "fake module", "fake branch")
- self.parent = FakeChangeMaster()
-
-class TestBonsaiPoller(unittest.TestCase):
- def testFullyFormedResult(self):
- br = BonsaiParser(goodUnparsedResult)
- result = br.getData()
- # make sure the result is a BonsaiResult
- self.failUnless(isinstance(result, BonsaiResult))
- # test for successful parsing
- self.failUnlessEqual(goodParsedResult, result,
- "BonsaiParser did not return the expected BonsaiResult")
-
- def testBadUnparsedResult(self):
- try:
- BonsaiParser(badUnparsedResult)
- self.fail(badResultMsgs["badUnparsedResult"])
- except InvalidResultError:
- pass
-
- def testInvalidDateResult(self):
- try:
- BonsaiParser(invalidDateResult)
- self.fail(badResultMsgs["invalidDateResult"])
- except InvalidResultError:
- pass
-
- def testMissingFilenameResult(self):
- try:
- BonsaiParser(missingFilenameResult)
- self.fail(badResultMsgs["missingFilenameResult"])
- except InvalidResultError:
- pass
-
- def testDuplicateLogResult(self):
- try:
- BonsaiParser(duplicateLogResult)
- self.fail(badResultMsgs["duplicateLogResult"])
- except InvalidResultError:
- pass
-
- def testDuplicateFilesResult(self):
- try:
- BonsaiParser(duplicateFilesResult)
- self.fail(badResultMsgs["duplicateFilesResult"])
- except InvalidResultError:
- pass
-
- def testMissingCiResult(self):
- try:
- BonsaiParser(missingCiResult)
- self.fail(badResultMsgs["missingCiResult"])
- except EmptyResult:
- pass
-
- def testChangeNotSubmitted(self):
- "Make sure a change is not submitted if the BonsaiParser fails"
- poller = FakeBonsaiPoller()
- lastChangeBefore = poller.lastChange
- poller._process_changes(badUnparsedResult)
- # self.lastChange will not be updated if the change was not submitted
- self.failUnlessEqual(lastChangeBefore, poller.lastChange)
-
- def testParserWorksAfterInvalidResult(self):
- """Make sure the BonsaiPoller still works after catching an
- InvalidResultError"""
-
- poller = FakeBonsaiPoller()
-
- lastChangeBefore = poller.lastChange
- # generate an exception first. pretend that we're doing a poll and
- # increment the timestamp, otherwise the failIfEqual test at the
- # bottom will depend upon there being a noticeable difference between
- # two successive calls to time.time().
- poller.lastPoll += 1.0
- poller._process_changes(badUnparsedResult)
- # now give it a valid one...
- poller.lastPoll += 1.0
- poller._process_changes(goodUnparsedResult)
- # if poller.lastChange has not been updated then the good result
- # was not parsed
- self.failIfEqual(lastChangeBefore, poller.lastChange)
-
- def testMergeEmptyLogMsg(self):
- """Ensure that BonsaiPoller works around the bonsai xml output
- issue when the check-in comment is empty"""
- bp = BonsaiParser(noCheckinMsgResult)
- result = bp.getData()
- self.failUnlessEqual(len(result.nodes), 1)
- self.failUnlessEqual(result.nodes[0].who, "johndoe@domain.tld")
- self.failUnlessEqual(result.nodes[0].date, 12345678)
- self.failUnlessEqual(result.nodes[0].log, "")
- for file, ref in zip(result.nodes[0].files, noCheckinMsgRef):
- self.failUnlessEqual(file.filename, ref['filename'])
- self.failUnlessEqual(file.revision, ref['revision'])
diff --git a/buildbot/buildbot/test/test_buildreq.py b/buildbot/buildbot/test/test_buildreq.py
deleted file mode 100644
index 6f7f3a9..0000000
--- a/buildbot/buildbot/test/test_buildreq.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# -*- test-case-name: buildbot.test.test_buildreq -*-
-
-from twisted.trial import unittest
-
-from buildbot import buildset, interfaces, sourcestamp
-from buildbot.process import base
-from buildbot.status import builder
-from buildbot.changes.changes import Change
-
-class Request(unittest.TestCase):
- def testMerge(self):
- R = base.BuildRequest
- S = sourcestamp.SourceStamp
- N = 'test_builder'
- b1 = R("why", S("branch1", None, None, None), N)
- b1r1 = R("why2", S("branch1", "rev1", None, None), N)
- b1r1a = R("why not", S("branch1", "rev1", None, None), N)
- b1r2 = R("why3", S("branch1", "rev2", None, None), N)
- b2r2 = R("why4", S("branch2", "rev2", None, None), N)
- b1r1p1 = R("why5", S("branch1", "rev1", (3, "diff"), None), N)
- c1 = Change("alice", [], "changed stuff", branch="branch1")
- c2 = Change("alice", [], "changed stuff", branch="branch1")
- c3 = Change("alice", [], "changed stuff", branch="branch1")
- c4 = Change("alice", [], "changed stuff", branch="branch1")
- c5 = Change("alice", [], "changed stuff", branch="branch1")
- c6 = Change("alice", [], "changed stuff", branch="branch1")
- b1c1 = R("changes", S("branch1", None, None, [c1,c2,c3]), N)
- b1c2 = R("changes", S("branch1", None, None, [c4,c5,c6]), N)
-
- self.failUnless(b1.canBeMergedWith(b1))
- self.failIf(b1.canBeMergedWith(b1r1))
- self.failIf(b1.canBeMergedWith(b2r2))
- self.failIf(b1.canBeMergedWith(b1r1p1))
- self.failIf(b1.canBeMergedWith(b1c1))
-
- self.failIf(b1r1.canBeMergedWith(b1))
- self.failUnless(b1r1.canBeMergedWith(b1r1))
- self.failIf(b1r1.canBeMergedWith(b2r2))
- self.failIf(b1r1.canBeMergedWith(b1r1p1))
- self.failIf(b1r1.canBeMergedWith(b1c1))
-
- self.failIf(b1r2.canBeMergedWith(b1))
- self.failIf(b1r2.canBeMergedWith(b1r1))
- self.failUnless(b1r2.canBeMergedWith(b1r2))
- self.failIf(b1r2.canBeMergedWith(b2r2))
- self.failIf(b1r2.canBeMergedWith(b1r1p1))
-
- self.failIf(b1r1p1.canBeMergedWith(b1))
- self.failIf(b1r1p1.canBeMergedWith(b1r1))
- self.failIf(b1r1p1.canBeMergedWith(b1r2))
- self.failIf(b1r1p1.canBeMergedWith(b2r2))
- self.failIf(b1r1p1.canBeMergedWith(b1c1))
-
- self.failIf(b1c1.canBeMergedWith(b1))
- self.failIf(b1c1.canBeMergedWith(b1r1))
- self.failIf(b1c1.canBeMergedWith(b1r2))
- self.failIf(b1c1.canBeMergedWith(b2r2))
- self.failIf(b1c1.canBeMergedWith(b1r1p1))
- self.failUnless(b1c1.canBeMergedWith(b1c1))
- self.failUnless(b1c1.canBeMergedWith(b1c2))
-
- sm = b1.mergeWith([])
- self.failUnlessEqual(sm.branch, "branch1")
- self.failUnlessEqual(sm.revision, None)
- self.failUnlessEqual(sm.patch, None)
- self.failUnlessEqual(sm.changes, ())
-
- ss = b1r1.mergeWith([b1r1])
- self.failUnlessEqual(ss, S("branch1", "rev1", None, None))
- why = b1r1.mergeReasons([b1r1])
- self.failUnlessEqual(why, "why2")
- why = b1r1.mergeReasons([b1r1a])
- self.failUnlessEqual(why, "why2, why not")
-
- ss = b1c1.mergeWith([b1c2])
- self.failUnlessEqual(ss, S("branch1", None, None, [c1,c2,c3,c4,c5,c6]))
- why = b1c1.mergeReasons([b1c2])
- self.failUnlessEqual(why, "changes")
-
-
-class FakeBuilder:
- name = "fake"
- def __init__(self):
- self.requests = []
- def submitBuildRequest(self, req):
- self.requests.append(req)
-
-
-class Set(unittest.TestCase):
- def testBuildSet(self):
- S = buildset.BuildSet
- a,b = FakeBuilder(), FakeBuilder()
-
- # two builds, the first one fails, the second one succeeds. The
- # waitUntilSuccess watcher fires as soon as the first one fails,
- # while the waitUntilFinished watcher doesn't fire until all builds
- # are complete.
-
- source = sourcestamp.SourceStamp()
- s = S(["a","b"], source, "forced build")
- s.start([a,b])
- self.failUnlessEqual(len(a.requests), 1)
- self.failUnlessEqual(len(b.requests), 1)
- r1 = a.requests[0]
- self.failUnlessEqual(r1.reason, s.reason)
- self.failUnlessEqual(r1.source, s.source)
-
- st = s.status
- self.failUnlessEqual(st.getSourceStamp(), source)
- self.failUnlessEqual(st.getReason(), "forced build")
- self.failUnlessEqual(st.getBuilderNames(), ["a","b"])
- self.failIf(st.isFinished())
- brs = st.getBuildRequests()
- self.failUnlessEqual(len(brs), 2)
-
- res = []
- d1 = s.waitUntilSuccess()
- d1.addCallback(lambda r: res.append(("success", r)))
- d2 = s.waitUntilFinished()
- d2.addCallback(lambda r: res.append(("finished", r)))
-
- self.failUnlessEqual(res, [])
-
- # the first build finishes here, with FAILURE
- builderstatus_a = builder.BuilderStatus("a")
- bsa = builder.BuildStatus(builderstatus_a, 1)
- bsa.setResults(builder.FAILURE)
- a.requests[0].finished(bsa)
-
- # any FAILURE flunks the BuildSet immediately, so the
- # waitUntilSuccess deferred fires right away. However, the
- # waitUntilFinished deferred must wait until all builds have
- # completed.
- self.failUnlessEqual(len(res), 1)
- self.failUnlessEqual(res[0][0], "success")
- bss = res[0][1]
- self.failUnless(interfaces.IBuildSetStatus(bss, None))
- self.failUnlessEqual(bss.getResults(), builder.FAILURE)
-
- # here we finish the second build
- builderstatus_b = builder.BuilderStatus("b")
- bsb = builder.BuildStatus(builderstatus_b, 1)
- bsb.setResults(builder.SUCCESS)
- b.requests[0].finished(bsb)
-
- # .. which ought to fire the waitUntilFinished deferred
- self.failUnlessEqual(len(res), 2)
- self.failUnlessEqual(res[1][0], "finished")
- self.failUnlessEqual(res[1][1], bss)
-
- # and finish the BuildSet overall
- self.failUnless(st.isFinished())
- self.failUnlessEqual(st.getResults(), builder.FAILURE)
-
- def testSuccess(self):
- S = buildset.BuildSet
- a,b = FakeBuilder(), FakeBuilder()
- # this time, both builds succeed
-
- source = sourcestamp.SourceStamp()
- s = S(["a","b"], source, "forced build")
- s.start([a,b])
-
- st = s.status
- self.failUnlessEqual(st.getSourceStamp(), source)
- self.failUnlessEqual(st.getReason(), "forced build")
- self.failUnlessEqual(st.getBuilderNames(), ["a","b"])
- self.failIf(st.isFinished())
-
- builderstatus_a = builder.BuilderStatus("a")
- bsa = builder.BuildStatus(builderstatus_a, 1)
- bsa.setResults(builder.SUCCESS)
- a.requests[0].finished(bsa)
-
- builderstatus_b = builder.BuilderStatus("b")
- bsb = builder.BuildStatus(builderstatus_b, 1)
- bsb.setResults(builder.SUCCESS)
- b.requests[0].finished(bsb)
-
- self.failUnless(st.isFinished())
- self.failUnlessEqual(st.getResults(), builder.SUCCESS)
-
diff --git a/buildbot/buildbot/test/test_buildstep.py b/buildbot/buildbot/test/test_buildstep.py
deleted file mode 100644
index 0e9c620..0000000
--- a/buildbot/buildbot/test/test_buildstep.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# -*- test-case-name: buildbot.test.test_buildstep -*-
-
-# test cases for buildbot.process.buildstep
-
-from twisted.trial import unittest
-
-from buildbot import interfaces
-from buildbot.process import buildstep
-
-# have to subclass LogObserver in order to test it, since the default
-# implementations of outReceived() and errReceived() do nothing
-class MyLogObserver(buildstep.LogObserver):
- def __init__(self):
- self._out = [] # list of chunks
- self._err = []
-
- def outReceived(self, data):
- self._out.append(data)
-
- def errReceived(self, data):
- self._err.append(data)
-
-class ObserverTestCase(unittest.TestCase):
- observer_cls = None # must be set by subclass
-
- def setUp(self):
- self.observer = self.observer_cls()
-
- def _logStdout(self, chunk):
- # why does LogObserver.logChunk() take 'build', 'step', and
- # 'log' arguments when it clearly doesn't use them for anything?
- self.observer.logChunk(None, None, None, interfaces.LOG_CHANNEL_STDOUT, chunk)
-
- def _logStderr(self, chunk):
- self.observer.logChunk(None, None, None, interfaces.LOG_CHANNEL_STDERR, chunk)
-
- def _assertStdout(self, expect_lines):
- self.assertEqual(self.observer._out, expect_lines)
-
- def _assertStderr(self, expect_lines):
- self.assertEqual(self.observer._err, expect_lines)
-
-class LogObserver(ObserverTestCase):
-
- observer_cls = MyLogObserver
-
- def testLogChunk(self):
- self._logStdout("foo")
- self._logStderr("argh")
- self._logStdout(" wubba\n")
- self._logStderr("!!!\n")
-
- self._assertStdout(["foo", " wubba\n"])
- self._assertStderr(["argh", "!!!\n"])
-
-# again, have to subclass LogLineObserver in order to test it, because the
-# default implementations of data-receiving methods are empty
-class MyLogLineObserver(buildstep.LogLineObserver):
- def __init__(self):
- #super(MyLogLineObserver, self).__init__()
- buildstep.LogLineObserver.__init__(self)
-
- self._out = [] # list of lines
- self._err = []
-
- def outLineReceived(self, line):
- self._out.append(line)
-
- def errLineReceived(self, line):
- self._err.append(line)
-
-class LogLineObserver(ObserverTestCase):
- observer_cls = MyLogLineObserver
-
- def testLineBuffered(self):
- # no challenge here: we feed it chunks that are already lines
- # (like a program writing to stdout in line-buffered mode)
- self._logStdout("stdout line 1\n")
- self._logStdout("stdout line 2\n")
- self._logStderr("stderr line 1\n")
- self._logStdout("stdout line 3\n")
-
- self._assertStdout(["stdout line 1",
- "stdout line 2",
- "stdout line 3"])
- self._assertStderr(["stderr line 1"])
-
- def testShortBrokenLines(self):
- self._logStdout("stdout line 1 starts ")
- self._logStderr("an intervening line of error\n")
- self._logStdout("and continues ")
- self._logStdout("but finishes here\n")
- self._logStderr("more error\n")
- self._logStdout("and another line of stdout\n")
-
- self._assertStdout(["stdout line 1 starts and continues but finishes here",
- "and another line of stdout"])
- self._assertStderr(["an intervening line of error",
- "more error"])
-
- def testLongLine(self):
- chunk = "." * 1024
- self._logStdout(chunk)
- self._logStdout(chunk)
- self._logStdout(chunk)
- self._logStdout(chunk)
- self._logStdout(chunk)
- self._logStdout("\n")
-
- self._assertStdout([chunk * 5])
- self._assertStderr([])
-
- def testBigChunk(self):
- chunk = "." * 5000
- self._logStdout(chunk)
- self._logStdout("\n")
-
- self._assertStdout([chunk])
- self._assertStderr([])
-
- def testReallyLongLine(self):
- # A single line of > 16384 bytes is dropped on the floor (bug #201).
- # In real life, I observed such a line being broken into chunks of
- # 4095 bytes, so that's how I'm breaking it here.
- self.observer.setMaxLineLength(65536)
- chunk = "." * 4095
- self._logStdout(chunk)
- self._logStdout(chunk)
- self._logStdout(chunk)
- self._logStdout(chunk) # now we're up to 16380 bytes
- self._logStdout("12345\n")
-
- self._assertStdout([chunk*4 + "12345"])
- self._assertStderr([])
-
-class RemoteShellTest(unittest.TestCase):
- def testRepr(self):
- # Test for #352
- rsc = buildstep.RemoteShellCommand('.', ('sh', 'make'))
- testval = repr(rsc)
- rsc = buildstep.RemoteShellCommand('.', ['sh', 'make'])
- testval = repr(rsc)
- rsc = buildstep.RemoteShellCommand('.', 'make')
- testval = repr(rsc)
diff --git a/buildbot/buildbot/test/test_changes.py b/buildbot/buildbot/test/test_changes.py
deleted file mode 100644
index faebe7b..0000000
--- a/buildbot/buildbot/test/test_changes.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# -*- test-case-name: buildbot.test.test_changes -*-
-
-from twisted.trial import unittest
-from twisted.internet import defer, reactor
-
-from buildbot import master
-from buildbot.changes import pb
-from buildbot.scripts import runner
-
-d1 = {'files': ["Project/foo.c", "Project/bar/boo.c"],
- 'who': "marvin",
- 'comments': "Some changes in Project"}
-d2 = {'files': ["OtherProject/bar.c"],
- 'who': "zaphod",
- 'comments': "other changes"}
-d3 = {'files': ["Project/baz.c", "OtherProject/bloo.c"],
- 'who': "alice",
- 'comments': "mixed changes"}
-d4 = {'files': ["trunk/baz.c", "branches/foobranch/foo.c", "trunk/bar.c"],
- 'who': "alice",
- 'comments': "mixed changes"}
-d5 = {'files': ["Project/foo.c"],
- 'who': "trillian",
- 'comments': "Some changes in Project",
- 'category': "categoryA"}
-
-class TestChangePerspective(unittest.TestCase):
-
- def setUp(self):
- self.changes = []
-
- def addChange(self, c):
- self.changes.append(c)
-
- def testNoPrefix(self):
- p = pb.ChangePerspective(self, None)
- p.perspective_addChange(d1)
- self.failUnlessEqual(len(self.changes), 1)
- c1 = self.changes[0]
- self.failUnlessEqual(set(c1.files),
- set(["Project/foo.c", "Project/bar/boo.c"]))
- self.failUnlessEqual(c1.comments, "Some changes in Project")
- self.failUnlessEqual(c1.who, "marvin")
-
- def testPrefix(self):
- p = pb.ChangePerspective(self, "Project/")
-
- p.perspective_addChange(d1)
- self.failUnlessEqual(len(self.changes), 1)
- c1 = self.changes[-1]
- self.failUnlessEqual(set(c1.files), set(["foo.c", "bar/boo.c"]))
- self.failUnlessEqual(c1.comments, "Some changes in Project")
- self.failUnlessEqual(c1.who, "marvin")
-
- p.perspective_addChange(d2) # should be ignored
- self.failUnlessEqual(len(self.changes), 1)
-
- p.perspective_addChange(d3) # should ignore the OtherProject file
- self.failUnlessEqual(len(self.changes), 2)
-
- c3 = self.changes[-1]
- self.failUnlessEqual(set(c3.files), set(["baz.c"]))
- self.failUnlessEqual(c3.comments, "mixed changes")
- self.failUnlessEqual(c3.who, "alice")
-
- def testPrefix2(self):
- p = pb.ChangePerspective(self, "Project/bar/")
-
- p.perspective_addChange(d1)
- self.failUnlessEqual(len(self.changes), 1)
- c1 = self.changes[-1]
- self.failUnlessEqual(set(c1.files), set(["boo.c"]))
- self.failUnlessEqual(c1.comments, "Some changes in Project")
- self.failUnlessEqual(c1.who, "marvin")
-
- p.perspective_addChange(d2) # should be ignored
- self.failUnlessEqual(len(self.changes), 1)
-
- p.perspective_addChange(d3) # should ignore this too
- self.failUnlessEqual(len(self.changes), 1)
-
- def testPrefix3(self):
- p = pb.ChangePerspective(self, "trunk/")
-
- p.perspective_addChange(d4)
- self.failUnlessEqual(len(self.changes), 1)
- c1 = self.changes[-1]
- self.failUnlessEqual(set(c1.files), set(["baz.c", "bar.c"]))
- self.failUnlessEqual(c1.comments, "mixed changes")
-
- def testPrefix4(self):
- p = pb.ChangePerspective(self, "branches/foobranch/")
-
- p.perspective_addChange(d4)
- self.failUnlessEqual(len(self.changes), 1)
- c1 = self.changes[-1]
- self.failUnlessEqual(set(c1.files), set(["foo.c"]))
- self.failUnlessEqual(c1.comments, "mixed changes")
-
- def testCategory(self):
- p = pb.ChangePerspective(self, None)
- p.perspective_addChange(d5)
- self.failUnlessEqual(len(self.changes), 1)
- c1 = self.changes[0]
- self.failUnlessEqual(c1.category, "categoryA")
-
-config_empty = """
-BuildmasterConfig = c = {}
-c['slaves'] = []
-c['builders'] = []
-c['schedulers'] = []
-c['slavePortnum'] = 0
-"""
-
-config_sender = config_empty + \
-"""
-from buildbot.changes import pb
-c['change_source'] = pb.PBChangeSource(port=None)
-"""
-
-class Sender(unittest.TestCase):
- def setUp(self):
- self.master = master.BuildMaster(".")
- def tearDown(self):
- d = defer.maybeDeferred(self.master.stopService)
- # TODO: something in Twisted-2.0.0 (and probably 2.0.1) doesn't shut
- # down the Broker listening socket when it's supposed to.
- # Twisted-1.3.0, and current SVN (which will be post-2.0.1) are ok.
- # This iterate() is a quick hack to deal with the problem. I need to
- # investigate more thoroughly and find a better solution.
- d.addCallback(self.stall, 0.1)
- return d
-
- def stall(self, res, timeout):
- d = defer.Deferred()
- reactor.callLater(timeout, d.callback, res)
- return d
-
- def testSender(self):
- self.master.loadConfig(config_empty)
- self.master.startService()
- # TODO: BuildMaster.loadChanges replaces the change_svc object, so we
- # have to load it twice. Clean this up.
- d = self.master.loadConfig(config_sender)
- d.addCallback(self._testSender_1)
- return d
-
- def _testSender_1(self, res):
- self.cm = cm = self.master.change_svc
- s1 = list(self.cm)[0]
- port = self.master.slavePort._port.getHost().port
-
- self.options = {'username': "alice",
- 'master': "localhost:%d" % port,
- 'files': ["foo.c"],
- 'category': "categoryA",
- }
-
- d = runner.sendchange(self.options)
- d.addCallback(self._testSender_2)
- return d
-
- def _testSender_2(self, res):
- # now check that the change was received
- self.failUnlessEqual(len(self.cm.changes), 1)
- c = self.cm.changes.pop()
- self.failUnlessEqual(c.who, "alice")
- self.failUnlessEqual(c.files, ["foo.c"])
- self.failUnlessEqual(c.comments, "")
- self.failUnlessEqual(c.revision, None)
- self.failUnlessEqual(c.category, "categoryA")
-
- self.options['revision'] = "r123"
- self.options['comments'] = "test change"
-
- d = runner.sendchange(self.options)
- d.addCallback(self._testSender_3)
- return d
-
- def _testSender_3(self, res):
- self.failUnlessEqual(len(self.cm.changes), 1)
- c = self.cm.changes.pop()
- self.failUnlessEqual(c.who, "alice")
- self.failUnlessEqual(c.files, ["foo.c"])
- self.failUnlessEqual(c.comments, "test change")
- self.failUnlessEqual(c.revision, "r123")
- self.failUnlessEqual(c.category, "categoryA")
-
- # test options['logfile'] by creating a temporary file
- logfile = self.mktemp()
- f = open(logfile, "wt")
- f.write("longer test change")
- f.close()
- self.options['comments'] = None
- self.options['logfile'] = logfile
-
- d = runner.sendchange(self.options)
- d.addCallback(self._testSender_4)
- return d
-
- def _testSender_4(self, res):
- self.failUnlessEqual(len(self.cm.changes), 1)
- c = self.cm.changes.pop()
- self.failUnlessEqual(c.who, "alice")
- self.failUnlessEqual(c.files, ["foo.c"])
- self.failUnlessEqual(c.comments, "longer test change")
- self.failUnlessEqual(c.revision, "r123")
- self.failUnlessEqual(c.category, "categoryA")
-
- # make sure that numeric revisions work too
- self.options['logfile'] = None
- del self.options['revision']
- self.options['revision_number'] = 42
-
- d = runner.sendchange(self.options)
- d.addCallback(self._testSender_5)
- return d
-
- def _testSender_5(self, res):
- self.failUnlessEqual(len(self.cm.changes), 1)
- c = self.cm.changes.pop()
- self.failUnlessEqual(c.who, "alice")
- self.failUnlessEqual(c.files, ["foo.c"])
- self.failUnlessEqual(c.comments, "")
- self.failUnlessEqual(c.revision, 42)
- self.failUnlessEqual(c.category, "categoryA")
-
- # verify --branch too
- self.options['branch'] = "branches/test"
-
- d = runner.sendchange(self.options)
- d.addCallback(self._testSender_6)
- return d
-
- def _testSender_6(self, res):
- self.failUnlessEqual(len(self.cm.changes), 1)
- c = self.cm.changes.pop()
- self.failUnlessEqual(c.who, "alice")
- self.failUnlessEqual(c.files, ["foo.c"])
- self.failUnlessEqual(c.comments, "")
- self.failUnlessEqual(c.revision, 42)
- self.failUnlessEqual(c.branch, "branches/test")
- self.failUnlessEqual(c.category, "categoryA")
diff --git a/buildbot/buildbot/test/test_config.py b/buildbot/buildbot/test/test_config.py
deleted file mode 100644
index 900dcad..0000000
--- a/buildbot/buildbot/test/test_config.py
+++ /dev/null
@@ -1,1277 +0,0 @@
-# -*- test-case-name: buildbot.test.test_config -*-
-
-import os, warnings, exceptions
-
-from twisted.trial import unittest
-from twisted.python import failure
-from twisted.internet import defer
-
-from buildbot.master import BuildMaster
-from buildbot import scheduler
-from twisted.application import service, internet
-from twisted.spread import pb
-from twisted.web.server import Site
-from twisted.web.distrib import ResourcePublisher
-from buildbot.process.builder import Builder
-from buildbot.process.factory import BasicBuildFactory
-from buildbot.changes.pb import PBChangeSource
-from buildbot.changes.mail import SyncmailMaildirSource
-from buildbot.steps.source import CVS, Darcs
-from buildbot.steps.shell import Compile, Test, ShellCommand
-from buildbot.status import base
-from buildbot.steps import dummy, maxq, python, python_twisted, shell, \
- source, transfer
-words = None
-try:
- from buildbot.status import words
-except ImportError:
- pass
-
-emptyCfg = \
-"""
-from buildbot.buildslave import BuildSlave
-BuildmasterConfig = c = {}
-c['slaves'] = []
-c['schedulers'] = []
-c['builders'] = []
-c['slavePortnum'] = 9999
-c['projectName'] = 'dummy project'
-c['projectURL'] = 'http://dummy.example.com'
-c['buildbotURL'] = 'http://dummy.example.com/buildbot'
-"""
-
-buildersCfg = \
-"""
-from buildbot.process.factory import BasicBuildFactory
-from buildbot.buildslave import BuildSlave
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-c['slavePortnum'] = 9999
-f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
-c['builders'] = [{'name':'builder1', 'slavename':'bot1',
- 'builddir':'workdir', 'factory':f1}]
-"""
-
-buildersCfg2 = buildersCfg + \
-"""
-f1 = BasicBuildFactory('cvsroot', 'cvsmodule2')
-c['builders'] = [{'name':'builder1', 'slavename':'bot1',
- 'builddir':'workdir', 'factory':f1}]
-"""
-
-buildersCfg3 = buildersCfg2 + \
-"""
-c['builders'].append({'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f1 })
-"""
-
-buildersCfg4 = buildersCfg2 + \
-"""
-c['builders'] = [{ 'name': 'builder1', 'slavename': 'bot1',
- 'builddir': 'newworkdir', 'factory': f1 },
- { 'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f1 }]
-"""
-
-wpCfg1 = buildersCfg + \
-"""
-from buildbot.steps import shell
-f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
-f1.addStep(shell.ShellCommand, command=[shell.WithProperties('echo')])
-c['builders'] = [{'name':'builder1', 'slavename':'bot1',
- 'builddir':'workdir1', 'factory': f1}]
-"""
-
-wpCfg2 = buildersCfg + \
-"""
-from buildbot.steps import shell
-f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
-f1.addStep(shell.ShellCommand,
- command=[shell.WithProperties('echo %s', 'revision')])
-c['builders'] = [{'name':'builder1', 'slavename':'bot1',
- 'builddir':'workdir1', 'factory': f1}]
-"""
-
-
-
-ircCfg1 = emptyCfg + \
-"""
-from buildbot.status import words
-c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['twisted'])]
-"""
-
-ircCfg2 = emptyCfg + \
-"""
-from buildbot.status import words
-c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['twisted']),
- words.IRC('irc.example.com', 'otherbot', ['chan1', 'chan2'])]
-"""
-
-ircCfg3 = emptyCfg + \
-"""
-from buildbot.status import words
-c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['knotted'])]
-"""
-
-webCfg1 = emptyCfg + \
-"""
-from buildbot.status import html
-c['status'] = [html.Waterfall(http_port=9980)]
-"""
-
-webCfg2 = emptyCfg + \
-"""
-from buildbot.status import html
-c['status'] = [html.Waterfall(http_port=9981)]
-"""
-
-webCfg3 = emptyCfg + \
-"""
-from buildbot.status import html
-c['status'] = [html.Waterfall(http_port='tcp:9981:interface=127.0.0.1')]
-"""
-
-webNameCfg1 = emptyCfg + \
-"""
-from buildbot.status import html
-c['status'] = [html.Waterfall(distrib_port='~/.twistd-web-pb')]
-"""
-
-webNameCfg2 = emptyCfg + \
-"""
-from buildbot.status import html
-c['status'] = [html.Waterfall(distrib_port='./bar.socket')]
-"""
-
-debugPasswordCfg = emptyCfg + \
-"""
-c['debugPassword'] = 'sekrit'
-"""
-
-interlockCfgBad = \
-"""
-from buildbot.process.factory import BasicBuildFactory
-from buildbot.buildslave import BuildSlave
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
-c['builders'] = [
- { 'name': 'builder1', 'slavename': 'bot1',
- 'builddir': 'workdir', 'factory': f1 },
- { 'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f1 },
- ]
-# interlocks have been removed
-c['interlocks'] = [('lock1', ['builder1'], ['builder2', 'builder3']),
- ]
-c['slavePortnum'] = 9999
-BuildmasterConfig = c
-"""
-
-lockCfgBad1 = \
-"""
-from buildbot.steps.dummy import Dummy
-from buildbot.process.factory import BuildFactory, s
-from buildbot.locks import MasterLock
-from buildbot.buildslave import BuildSlave
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-l1 = MasterLock('lock1')
-l2 = MasterLock('lock1') # duplicate lock name
-f1 = BuildFactory([s(Dummy, locks=[])])
-c['builders'] = [
- { 'name': 'builder1', 'slavename': 'bot1',
- 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
- { 'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f1 },
- ]
-c['slavePortnum'] = 9999
-BuildmasterConfig = c
-"""
-
-lockCfgBad2 = \
-"""
-from buildbot.steps.dummy import Dummy
-from buildbot.process.factory import BuildFactory, s
-from buildbot.locks import MasterLock, SlaveLock
-from buildbot.buildslave import BuildSlave
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-l1 = MasterLock('lock1')
-l2 = SlaveLock('lock1') # duplicate lock name
-f1 = BuildFactory([s(Dummy, locks=[])])
-c['builders'] = [
- { 'name': 'builder1', 'slavename': 'bot1',
- 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
- { 'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f1 },
- ]
-c['slavePortnum'] = 9999
-BuildmasterConfig = c
-"""
-
-lockCfgBad3 = \
-"""
-from buildbot.steps.dummy import Dummy
-from buildbot.process.factory import BuildFactory, s
-from buildbot.locks import MasterLock
-from buildbot.buildslave import BuildSlave
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-l1 = MasterLock('lock1')
-l2 = MasterLock('lock1') # duplicate lock name
-f1 = BuildFactory([s(Dummy, locks=[l2])])
-f2 = BuildFactory([s(Dummy)])
-c['builders'] = [
- { 'name': 'builder1', 'slavename': 'bot1',
- 'builddir': 'workdir', 'factory': f2, 'locks': [l1] },
- { 'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f1 },
- ]
-c['slavePortnum'] = 9999
-BuildmasterConfig = c
-"""
-
-lockCfg1a = \
-"""
-from buildbot.process.factory import BasicBuildFactory
-from buildbot.locks import MasterLock
-from buildbot.buildslave import BuildSlave
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
-l1 = MasterLock('lock1')
-l2 = MasterLock('lock2')
-c['builders'] = [
- { 'name': 'builder1', 'slavename': 'bot1',
- 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
- { 'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f1 },
- ]
-c['slavePortnum'] = 9999
-BuildmasterConfig = c
-"""
-
-lockCfg1b = \
-"""
-from buildbot.process.factory import BasicBuildFactory
-from buildbot.locks import MasterLock
-from buildbot.buildslave import BuildSlave
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
-l1 = MasterLock('lock1')
-l2 = MasterLock('lock2')
-c['builders'] = [
- { 'name': 'builder1', 'slavename': 'bot1',
- 'builddir': 'workdir', 'factory': f1, 'locks': [l1] },
- { 'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f1 },
- ]
-c['slavePortnum'] = 9999
-BuildmasterConfig = c
-"""
-
-# test out step Locks
-lockCfg2a = \
-"""
-from buildbot.steps.dummy import Dummy
-from buildbot.process.factory import BuildFactory, s
-from buildbot.locks import MasterLock
-from buildbot.buildslave import BuildSlave
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-l1 = MasterLock('lock1')
-l2 = MasterLock('lock2')
-f1 = BuildFactory([s(Dummy, locks=[l1,l2])])
-f2 = BuildFactory([s(Dummy)])
-
-c['builders'] = [
- { 'name': 'builder1', 'slavename': 'bot1',
- 'builddir': 'workdir', 'factory': f1 },
- { 'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f2 },
- ]
-c['slavePortnum'] = 9999
-BuildmasterConfig = c
-"""
-
-lockCfg2b = \
-"""
-from buildbot.steps.dummy import Dummy
-from buildbot.process.factory import BuildFactory, s
-from buildbot.locks import MasterLock
-from buildbot.buildslave import BuildSlave
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-l1 = MasterLock('lock1')
-l2 = MasterLock('lock2')
-f1 = BuildFactory([s(Dummy, locks=[l1])])
-f2 = BuildFactory([s(Dummy)])
-
-c['builders'] = [
- { 'name': 'builder1', 'slavename': 'bot1',
- 'builddir': 'workdir', 'factory': f1 },
- { 'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f2 },
- ]
-c['slavePortnum'] = 9999
-BuildmasterConfig = c
-"""
-
-lockCfg2c = \
-"""
-from buildbot.steps.dummy import Dummy
-from buildbot.process.factory import BuildFactory, s
-from buildbot.locks import MasterLock
-from buildbot.buildslave import BuildSlave
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-l1 = MasterLock('lock1')
-l2 = MasterLock('lock2')
-f1 = BuildFactory([s(Dummy)])
-f2 = BuildFactory([s(Dummy)])
-
-c['builders'] = [
- { 'name': 'builder1', 'slavename': 'bot1',
- 'builddir': 'workdir', 'factory': f1 },
- { 'name': 'builder2', 'slavename': 'bot1',
- 'builddir': 'workdir2', 'factory': f2 },
- ]
-c['slavePortnum'] = 9999
-BuildmasterConfig = c
-"""
-
-schedulersCfg = \
-"""
-from buildbot.scheduler import Scheduler, Dependent
-from buildbot.process.factory import BasicBuildFactory
-from buildbot.buildslave import BuildSlave
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
-b1 = {'name':'builder1', 'slavename':'bot1',
- 'builddir':'workdir', 'factory':f1}
-c['builders'] = [b1]
-c['schedulers'] = [Scheduler('full', None, 60, ['builder1'])]
-c['slavePortnum'] = 9999
-c['projectName'] = 'dummy project'
-c['projectURL'] = 'http://dummy.example.com'
-c['buildbotURL'] = 'http://dummy.example.com/buildbot'
-BuildmasterConfig = c
-"""
-
-class ConfigTest(unittest.TestCase):
- def setUp(self):
- # this class generates several deprecation warnings, which the user
- # doesn't need to see.
- warnings.simplefilter('ignore', exceptions.DeprecationWarning)
- self.buildmaster = BuildMaster(".")
-
- def failUnlessListsEquivalent(self, list1, list2):
- l1 = list1[:]
- l1.sort()
- l2 = list2[:]
- l2.sort()
- self.failUnlessEqual(l1, l2)
-
- def servers(self, s, types):
- # perform a recursive search of s.services, looking for instances of
- # twisted.application.internet.TCPServer, then extract their .args
- # values to find the TCP ports they want to listen on
- for child in s:
- if service.IServiceCollection.providedBy(child):
- for gc in self.servers(child, types):
- yield gc
- if isinstance(child, types):
- yield child
-
- def TCPports(self, s):
- return list(self.servers(s, internet.TCPServer))
- def UNIXports(self, s):
- return list(self.servers(s, internet.UNIXServer))
- def TCPclients(self, s):
- return list(self.servers(s, internet.TCPClient))
-
- def checkPorts(self, svc, expected):
- """Verify that the TCPServer and UNIXServer children of the given
- service have the expected portnum/pathname and factory classes. As a
- side-effect, return a list of servers in the same order as the
- 'expected' list. This can be used to verify properties of the
- factories contained therein."""
-
- expTCP = [e for e in expected if type(e[0]) == int]
- expUNIX = [e for e in expected if type(e[0]) == str]
- haveTCP = [(p.args[0], p.args[1].__class__)
- for p in self.TCPports(svc)]
- haveUNIX = [(p.args[0], p.args[1].__class__)
- for p in self.UNIXports(svc)]
- self.failUnlessListsEquivalent(expTCP, haveTCP)
- self.failUnlessListsEquivalent(expUNIX, haveUNIX)
- ret = []
- for e in expected:
- for have in self.TCPports(svc) + self.UNIXports(svc):
- if have.args[0] == e[0]:
- ret.append(have)
- continue
- assert(len(ret) == len(expected))
- return ret
-
- def testEmpty(self):
- self.failUnlessRaises(KeyError, self.buildmaster.loadConfig, "")
-
- def testSimple(self):
- # covers slavePortnum, base checker passwords
- master = self.buildmaster
- master.loadChanges()
-
- master.loadConfig(emptyCfg)
- # note: this doesn't actually start listening, because the app
- # hasn't been started running
- self.failUnlessEqual(master.slavePortnum, "tcp:9999")
- self.checkPorts(master, [(9999, pb.PBServerFactory)])
- self.failUnlessEqual(list(master.change_svc), [])
- self.failUnlessEqual(master.botmaster.builders, {})
- self.failUnlessEqual(master.checker.users,
- {"change": "changepw"})
- self.failUnlessEqual(master.projectName, "dummy project")
- self.failUnlessEqual(master.projectURL, "http://dummy.example.com")
- self.failUnlessEqual(master.buildbotURL,
- "http://dummy.example.com/buildbot")
-
- def testSlavePortnum(self):
- master = self.buildmaster
- master.loadChanges()
-
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(master.slavePortnum, "tcp:9999")
- ports = self.checkPorts(master, [(9999, pb.PBServerFactory)])
- p = ports[0]
-
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(master.slavePortnum, "tcp:9999")
- ports = self.checkPorts(master, [(9999, pb.PBServerFactory)])
- self.failUnlessIdentical(p, ports[0],
- "the slave port was changed even " + \
- "though the configuration was not")
-
- master.loadConfig(emptyCfg + "c['slavePortnum'] = 9000\n")
- self.failUnlessEqual(master.slavePortnum, "tcp:9000")
- ports = self.checkPorts(master, [(9000, pb.PBServerFactory)])
- self.failIf(p is ports[0],
- "slave port was unchanged but configuration was changed")
-
- def testSlaves(self):
- master = self.buildmaster
- master.loadChanges()
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(master.botmaster.builders, {})
- self.failUnlessEqual(master.checker.users,
- {"change": "changepw"})
- # 'botsCfg' is testing backwards compatibility, for 0.7.5 config
- # files that have not yet been updated to 0.7.6 . This compatibility
- # (and this test) is scheduled for removal in 0.8.0 .
- botsCfg = (emptyCfg +
- "c['bots'] = [('bot1', 'pw1'), ('bot2', 'pw2')]\n")
- master.loadConfig(botsCfg)
- self.failUnlessEqual(master.checker.users,
- {"change": "changepw",
- "bot1": "pw1",
- "bot2": "pw2"})
- master.loadConfig(botsCfg)
- self.failUnlessEqual(master.checker.users,
- {"change": "changepw",
- "bot1": "pw1",
- "bot2": "pw2"})
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(master.checker.users,
- {"change": "changepw"})
- slavesCfg = (emptyCfg +
- "from buildbot.buildslave import BuildSlave\n"
- "c['slaves'] = [BuildSlave('bot1','pw1'), "
- "BuildSlave('bot2','pw2')]\n")
- master.loadConfig(slavesCfg)
- self.failUnlessEqual(master.checker.users,
- {"change": "changepw",
- "bot1": "pw1",
- "bot2": "pw2"})
-
-
- def testChangeSource(self):
- master = self.buildmaster
- master.loadChanges()
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(list(master.change_svc), [])
-
- sourcesCfg = emptyCfg + \
-"""
-from buildbot.changes.pb import PBChangeSource
-c['change_source'] = PBChangeSource()
-"""
-
- d = master.loadConfig(sourcesCfg)
- def _check1(res):
- self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
- s1 = list(self.buildmaster.change_svc)[0]
- self.failUnless(isinstance(s1, PBChangeSource))
- self.failUnlessEqual(s1, list(self.buildmaster.change_svc)[0])
- self.failUnless(s1.parent)
-
- # verify that unchanged sources are not interrupted
- d1 = self.buildmaster.loadConfig(sourcesCfg)
-
- def _check2(res):
- self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
- s2 = list(self.buildmaster.change_svc)[0]
- self.failUnlessIdentical(s1, s2)
- self.failUnless(s1.parent)
- d1.addCallback(_check2)
- return d1
- d.addCallback(_check1)
-
- # make sure we can get rid of the sources too
- d.addCallback(lambda res: self.buildmaster.loadConfig(emptyCfg))
-
- def _check3(res):
- self.failUnlessEqual(list(self.buildmaster.change_svc), [])
- d.addCallback(_check3)
-
- return d
-
- def testChangeSources(self):
- # make sure we can accept a list
- master = self.buildmaster
- master.loadChanges()
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(list(master.change_svc), [])
-
- sourcesCfg = emptyCfg + \
-"""
-from buildbot.changes.pb import PBChangeSource
-from buildbot.changes.mail import SyncmailMaildirSource
-c['change_source'] = [PBChangeSource(),
- SyncmailMaildirSource('.'),
- ]
-"""
-
- d = master.loadConfig(sourcesCfg)
- def _check1(res):
- self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 2)
- s1,s2 = list(self.buildmaster.change_svc)
- if isinstance(s2, PBChangeSource):
- s1,s2 = s2,s1
- self.failUnless(isinstance(s1, PBChangeSource))
- self.failUnless(s1.parent)
- self.failUnless(isinstance(s2, SyncmailMaildirSource))
- self.failUnless(s2.parent)
- d.addCallback(_check1)
- return d
-
- def testSources(self):
- # test backwards compatibility. c['sources'] is deprecated.
- master = self.buildmaster
- master.loadChanges()
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(list(master.change_svc), [])
-
- sourcesCfg = emptyCfg + \
-"""
-from buildbot.changes.pb import PBChangeSource
-c['sources'] = [PBChangeSource()]
-"""
-
- d = master.loadConfig(sourcesCfg)
- def _check1(res):
- self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
- s1 = list(self.buildmaster.change_svc)[0]
- self.failUnless(isinstance(s1, PBChangeSource))
- self.failUnless(s1.parent)
- d.addCallback(_check1)
- return d
-
- def shouldBeFailure(self, res, *expected):
- self.failUnless(isinstance(res, failure.Failure),
- "we expected this to fail, not produce %s" % (res,))
- res.trap(*expected)
- return None # all is good
-
- def testSchedulerErrors(self):
- master = self.buildmaster
- master.loadChanges()
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(master.allSchedulers(), [])
-
- def _shouldBeFailure(res, hint=None):
- self.shouldBeFailure(res, AssertionError, ValueError)
- if hint:
- self.failUnless(str(res).find(hint) != -1)
-
- def _loadConfig(res, newcfg):
- return self.buildmaster.loadConfig(newcfg)
- d = defer.succeed(None)
-
- # c['schedulers'] must be a list
- badcfg = schedulersCfg + \
-"""
-c['schedulers'] = Scheduler('full', None, 60, ['builder1'])
-"""
- d.addCallback(_loadConfig, badcfg)
- d.addBoth(_shouldBeFailure,
- "c['schedulers'] must be a list of Scheduler instances")
-
- # c['schedulers'] must be a list of IScheduler objects
- badcfg = schedulersCfg + \
-"""
-c['schedulers'] = ['oops', 'problem']
-"""
- d.addCallback(_loadConfig, badcfg)
- d.addBoth(_shouldBeFailure,
- "c['schedulers'] must be a list of Scheduler instances")
-
- # c['schedulers'] must point at real builders
- badcfg = schedulersCfg + \
-"""
-c['schedulers'] = [Scheduler('full', None, 60, ['builder-bogus'])]
-"""
- d.addCallback(_loadConfig, badcfg)
- d.addBoth(_shouldBeFailure, "uses unknown builder")
-
- # builderNames= must be a list
- badcfg = schedulersCfg + \
-"""
-c['schedulers'] = [Scheduler('full', None, 60, 'builder1')]
-"""
- d.addCallback(_loadConfig, badcfg)
- d.addBoth(_shouldBeFailure,
- "must be a list of Builder description names")
-
- # builderNames= must be a list of strings, not dicts
- badcfg = schedulersCfg + \
-"""
-c['schedulers'] = [Scheduler('full', None, 60, [b1])]
-"""
- d.addCallback(_loadConfig, badcfg)
- d.addBoth(_shouldBeFailure,
- "must be a list of Builder description names")
-
- # builderNames= must be a list of strings, not a dict
- badcfg = schedulersCfg + \
-"""
-c['schedulers'] = [Scheduler('full', None, 60, b1)]
-"""
- d.addCallback(_loadConfig, badcfg)
- d.addBoth(_shouldBeFailure,
- "must be a list of Builder description names")
-
- # each Scheduler must have a unique name
- badcfg = schedulersCfg + \
-"""
-c['schedulers'] = [Scheduler('dup', None, 60, []),
- Scheduler('dup', None, 60, [])]
-"""
- d.addCallback(_loadConfig, badcfg)
- d.addBoth(_shouldBeFailure, "Schedulers must have unique names")
-
- return d
-
- def testSchedulers(self):
- master = self.buildmaster
- master.loadChanges()
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(master.allSchedulers(), [])
-
- d = self.buildmaster.loadConfig(schedulersCfg)
- d.addCallback(self._testSchedulers_1)
- return d
-
- def _testSchedulers_1(self, res):
- sch = self.buildmaster.allSchedulers()
- self.failUnlessEqual(len(sch), 1)
- s = sch[0]
- self.failUnless(isinstance(s, scheduler.Scheduler))
- self.failUnlessEqual(s.name, "full")
- self.failUnlessEqual(s.branch, None)
- self.failUnlessEqual(s.treeStableTimer, 60)
- self.failUnlessEqual(s.builderNames, ['builder1'])
-
- newcfg = schedulersCfg + \
-"""
-s1 = Scheduler('full', None, 60, ['builder1'])
-c['schedulers'] = [s1, Dependent('downstream', s1, ['builder1'])]
-"""
- d = self.buildmaster.loadConfig(newcfg)
- d.addCallback(self._testSchedulers_2, newcfg)
- return d
- def _testSchedulers_2(self, res, newcfg):
- sch = self.buildmaster.allSchedulers()
- self.failUnlessEqual(len(sch), 2)
- s = sch[0]
- self.failUnless(isinstance(s, scheduler.Scheduler))
- s = sch[1]
- self.failUnless(isinstance(s, scheduler.Dependent))
- self.failUnlessEqual(s.name, "downstream")
- self.failUnlessEqual(s.builderNames, ['builder1'])
-
- # reloading the same config file should leave the schedulers in place
- d = self.buildmaster.loadConfig(newcfg)
- d.addCallback(self._testSchedulers_3, sch)
- return d
- def _testSchedulers_3(self, res, sch1):
- sch2 = self.buildmaster.allSchedulers()
- self.failUnlessEqual(len(sch2), 2)
- sch1.sort()
- sch2.sort()
- self.failUnlessEqual(sch1, sch2)
- self.failUnlessIdentical(sch1[0], sch2[0])
- self.failUnlessIdentical(sch1[1], sch2[1])
- self.failUnlessIdentical(sch1[0].parent, self.buildmaster)
- self.failUnlessIdentical(sch1[1].parent, self.buildmaster)
-
-
-
- def testBuilders(self):
- master = self.buildmaster
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(master.botmaster.builders, {})
-
- master.loadConfig(buildersCfg)
- self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
- self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
- b = master.botmaster.builders["builder1"]
- self.failUnless(isinstance(b, Builder))
- self.failUnlessEqual(b.name, "builder1")
- self.failUnlessEqual(b.slavenames, ["bot1"])
- self.failUnlessEqual(b.builddir, "workdir")
- f1 = b.buildFactory
- self.failUnless(isinstance(f1, BasicBuildFactory))
- steps = f1.steps
- self.failUnlessEqual(len(steps), 3)
- self.failUnlessEqual(steps[0], (CVS,
- {'cvsroot': 'cvsroot',
- 'cvsmodule': 'cvsmodule',
- 'mode': 'clobber'}))
- self.failUnlessEqual(steps[1], (Compile,
- {'command': 'make all'}))
- self.failUnlessEqual(steps[2], (Test,
- {'command': 'make check'}))
-
-
- # make sure a reload of the same data doesn't interrupt the Builder
- master.loadConfig(buildersCfg)
- self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
- self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
- b2 = master.botmaster.builders["builder1"]
- self.failUnlessIdentical(b, b2)
- # TODO: test that the BuilderStatus object doesn't change
- #statusbag2 = master.client_svc.statusbags["builder1"]
- #self.failUnlessIdentical(statusbag, statusbag2)
-
- # but changing something should result in a new Builder
- master.loadConfig(buildersCfg2)
- self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
- self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
- b3 = master.botmaster.builders["builder1"]
- self.failIf(b is b3)
- # the statusbag remains the same TODO
- #statusbag3 = master.client_svc.statusbags["builder1"]
- #self.failUnlessIdentical(statusbag, statusbag3)
-
- # adding new builder
- master.loadConfig(buildersCfg3)
- self.failUnlessEqual(master.botmaster.builderNames, ["builder1",
- "builder2"])
- self.failUnlessListsEquivalent(master.botmaster.builders.keys(),
- ["builder1", "builder2"])
- b4 = master.botmaster.builders["builder1"]
- self.failUnlessIdentical(b3, b4)
-
- # changing first builder should leave it at the same place in the list
- master.loadConfig(buildersCfg4)
- self.failUnlessEqual(master.botmaster.builderNames, ["builder1",
- "builder2"])
- self.failUnlessListsEquivalent(master.botmaster.builders.keys(),
- ["builder1", "builder2"])
- b5 = master.botmaster.builders["builder1"]
- self.failIf(b4 is b5)
-
- # and removing it should make the Builder go away
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(master.botmaster.builderNames, [])
- self.failUnlessEqual(master.botmaster.builders, {})
- #self.failUnlessEqual(master.client_svc.statusbags, {}) # TODO
-
- def testWithProperties(self):
- master = self.buildmaster
- master.loadConfig(wpCfg1)
- self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
- self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
- b1 = master.botmaster.builders["builder1"]
-
- # reloading the same config should leave the builder unchanged
- master.loadConfig(wpCfg1)
- b2 = master.botmaster.builders["builder1"]
- self.failUnlessIdentical(b1, b2)
-
- # but changing the parameters of the WithProperties should change it
- master.loadConfig(wpCfg2)
- b3 = master.botmaster.builders["builder1"]
- self.failIf(b1 is b3)
-
- # again, reloading same config should leave the builder unchanged
- master.loadConfig(wpCfg2)
- b4 = master.botmaster.builders["builder1"]
- self.failUnlessIdentical(b3, b4)
-
- def checkIRC(self, m, expected):
- ircs = {}
- for irc in self.servers(m, words.IRC):
- ircs[irc.host] = (irc.nick, irc.channels)
- self.failUnlessEqual(ircs, expected)
-
- def testIRC(self):
- if not words:
- raise unittest.SkipTest("Twisted Words package is not installed")
- master = self.buildmaster
- master.loadChanges()
- d = master.loadConfig(emptyCfg)
- e1 = {}
- d.addCallback(lambda res: self.checkIRC(master, e1))
- d.addCallback(lambda res: master.loadConfig(ircCfg1))
- e2 = {'irc.us.freenode.net': ('buildbot', ['twisted'])}
- d.addCallback(lambda res: self.checkIRC(master, e2))
- d.addCallback(lambda res: master.loadConfig(ircCfg2))
- e3 = {'irc.us.freenode.net': ('buildbot', ['twisted']),
- 'irc.example.com': ('otherbot', ['chan1', 'chan2'])}
- d.addCallback(lambda res: self.checkIRC(master, e3))
- d.addCallback(lambda res: master.loadConfig(ircCfg3))
- e4 = {'irc.us.freenode.net': ('buildbot', ['knotted'])}
- d.addCallback(lambda res: self.checkIRC(master, e4))
- d.addCallback(lambda res: master.loadConfig(ircCfg1))
- e5 = {'irc.us.freenode.net': ('buildbot', ['twisted'])}
- d.addCallback(lambda res: self.checkIRC(master, e5))
- return d
-
- def testWebPortnum(self):
- master = self.buildmaster
- master.loadChanges()
-
- d = master.loadConfig(webCfg1)
- def _check1(res):
- ports = self.checkPorts(self.buildmaster,
- [(9999, pb.PBServerFactory), (9980, Site)])
- p = ports[1]
- self.p = p
- # nothing should be changed
- d.addCallback(_check1)
-
- d.addCallback(lambda res: self.buildmaster.loadConfig(webCfg1))
- def _check2(res):
- ports = self.checkPorts(self.buildmaster,
- [(9999, pb.PBServerFactory), (9980, Site)])
- self.failUnlessIdentical(self.p, ports[1],
- "web port was changed even though "
- "configuration was not")
- # WebStatus is no longer a ComparableMixin, so it will be
- # rebuilt on each reconfig
- #d.addCallback(_check2)
-
- d.addCallback(lambda res: self.buildmaster.loadConfig(webCfg2))
- # changes port to 9981
- def _check3(p):
- ports = self.checkPorts(self.buildmaster,
- [(9999, pb.PBServerFactory), (9981, Site)])
- self.failIf(self.p is ports[1],
- "configuration was changed but web port was unchanged")
- d.addCallback(_check3)
-
- d.addCallback(lambda res: self.buildmaster.loadConfig(webCfg3))
- # make 9981 on only localhost
- def _check4(p):
- ports = self.checkPorts(self.buildmaster,
- [(9999, pb.PBServerFactory), (9981, Site)])
- self.failUnlessEqual(ports[1].kwargs['interface'], "127.0.0.1")
- d.addCallback(_check4)
-
- d.addCallback(lambda res: self.buildmaster.loadConfig(emptyCfg))
- d.addCallback(lambda res:
- self.checkPorts(self.buildmaster,
- [(9999, pb.PBServerFactory)]))
- return d
-
- def testWebPathname(self):
- master = self.buildmaster
- master.loadChanges()
-
- d = master.loadConfig(webNameCfg1)
- def _check1(res):
- self.checkPorts(self.buildmaster,
- [(9999, pb.PBServerFactory),
- ('~/.twistd-web-pb', pb.PBServerFactory)])
- unixports = self.UNIXports(self.buildmaster)
- self.f = f = unixports[0].args[1]
- self.failUnless(isinstance(f.root, ResourcePublisher))
- d.addCallback(_check1)
-
- d.addCallback(lambda res: self.buildmaster.loadConfig(webNameCfg1))
- # nothing should be changed
- def _check2(res):
- self.checkPorts(self.buildmaster,
- [(9999, pb.PBServerFactory),
- ('~/.twistd-web-pb', pb.PBServerFactory)])
- newf = self.UNIXports(self.buildmaster)[0].args[1]
- self.failUnlessIdentical(self.f, newf,
- "web factory was changed even though "
- "configuration was not")
- # WebStatus is no longer a ComparableMixin, so it will be
- # rebuilt on each reconfig
- #d.addCallback(_check2)
-
- d.addCallback(lambda res: self.buildmaster.loadConfig(webNameCfg2))
- def _check3(res):
- self.checkPorts(self.buildmaster,
- [(9999, pb.PBServerFactory),
- ('./bar.socket', pb.PBServerFactory)])
- newf = self.UNIXports(self.buildmaster)[0].args[1],
- self.failIf(self.f is newf,
- "web factory was unchanged but "
- "configuration was changed")
- d.addCallback(_check3)
-
- d.addCallback(lambda res: self.buildmaster.loadConfig(emptyCfg))
- d.addCallback(lambda res:
- self.checkPorts(self.buildmaster,
- [(9999, pb.PBServerFactory)]))
- return d
-
- def testDebugPassword(self):
- master = self.buildmaster
-
- master.loadConfig(debugPasswordCfg)
- self.failUnlessEqual(master.checker.users,
- {"change": "changepw",
- "debug": "sekrit"})
-
- master.loadConfig(debugPasswordCfg)
- self.failUnlessEqual(master.checker.users,
- {"change": "changepw",
- "debug": "sekrit"})
-
- master.loadConfig(emptyCfg)
- self.failUnlessEqual(master.checker.users,
- {"change": "changepw"})
-
- def testLocks(self):
- master = self.buildmaster
- botmaster = master.botmaster
-
- # make sure that c['interlocks'] is rejected properly
- self.failUnlessRaises(KeyError, master.loadConfig, interlockCfgBad)
- # and that duplicate-named Locks are caught
- self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad1)
- self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad2)
- self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad3)
-
- # create a Builder that uses Locks
- master.loadConfig(lockCfg1a)
- b1 = master.botmaster.builders["builder1"]
- self.failUnlessEqual(len(b1.locks), 2)
-
- # reloading the same config should not change the Builder
- master.loadConfig(lockCfg1a)
- self.failUnlessIdentical(b1, master.botmaster.builders["builder1"])
- # but changing the set of locks used should change it
- master.loadConfig(lockCfg1b)
- self.failIfIdentical(b1, master.botmaster.builders["builder1"])
- b1 = master.botmaster.builders["builder1"]
- self.failUnlessEqual(len(b1.locks), 1)
-
- # similar test with step-scoped locks
- master.loadConfig(lockCfg2a)
- b1 = master.botmaster.builders["builder1"]
- # reloading the same config should not change the Builder
- master.loadConfig(lockCfg2a)
- self.failUnlessIdentical(b1, master.botmaster.builders["builder1"])
- # but changing the set of locks used should change it
- master.loadConfig(lockCfg2b)
- self.failIfIdentical(b1, master.botmaster.builders["builder1"])
- b1 = master.botmaster.builders["builder1"]
- # remove the locks entirely
- master.loadConfig(lockCfg2c)
- self.failIfIdentical(b1, master.botmaster.builders["builder1"])
-
-class ConfigElements(unittest.TestCase):
- # verify that ComparableMixin is working
- def testSchedulers(self):
- s1 = scheduler.Scheduler(name='quick', branch=None,
- treeStableTimer=30,
- builderNames=['quick'])
- s2 = scheduler.Scheduler(name="all", branch=None,
- treeStableTimer=5*60,
- builderNames=["a", "b"])
- s3 = scheduler.Try_Userpass("try", ["a","b"], port=9989,
- userpass=[("foo","bar")])
- s1a = scheduler.Scheduler(name='quick', branch=None,
- treeStableTimer=30,
- builderNames=['quick'])
- s2a = scheduler.Scheduler(name="all", branch=None,
- treeStableTimer=5*60,
- builderNames=["a", "b"])
- s3a = scheduler.Try_Userpass("try", ["a","b"], port=9989,
- userpass=[("foo","bar")])
- self.failUnless(s1 == s1)
- self.failUnless(s1 == s1a)
- self.failUnless(s1a in [s1, s2, s3])
- self.failUnless(s2a in [s1, s2, s3])
- self.failUnless(s3a in [s1, s2, s3])
-
-
-
-class ConfigFileTest(unittest.TestCase):
-
- def testFindConfigFile(self):
- os.mkdir("test_cf")
- open(os.path.join("test_cf", "master.cfg"), "w").write(emptyCfg)
- slaveportCfg = emptyCfg + "c['slavePortnum'] = 9000\n"
- open(os.path.join("test_cf", "alternate.cfg"), "w").write(slaveportCfg)
-
- m = BuildMaster("test_cf")
- m.loadTheConfigFile()
- self.failUnlessEqual(m.slavePortnum, "tcp:9999")
-
- m = BuildMaster("test_cf", "alternate.cfg")
- m.loadTheConfigFile()
- self.failUnlessEqual(m.slavePortnum, "tcp:9000")
-
-
-class MyTarget(base.StatusReceiverMultiService):
- def __init__(self, name):
- self.name = name
- base.StatusReceiverMultiService.__init__(self)
- def startService(self):
- # make a note in a list stashed in the BuildMaster
- self.parent.targetevents.append(("start", self.name))
- return base.StatusReceiverMultiService.startService(self)
- def stopService(self):
- self.parent.targetevents.append(("stop", self.name))
- return base.StatusReceiverMultiService.stopService(self)
-
-class MySlowTarget(MyTarget):
- def stopService(self):
- from twisted.internet import reactor
- d = base.StatusReceiverMultiService.stopService(self)
- def stall(res):
- d2 = defer.Deferred()
- reactor.callLater(0.1, d2.callback, res)
- return d2
- d.addCallback(stall)
- m = self.parent
- def finishedStalling(res):
- m.targetevents.append(("stop", self.name))
- return res
- d.addCallback(finishedStalling)
- return d
-
-# we can't actually startService a buildmaster with a config that uses a
-# fixed slavePortnum like 9999, so instead this makes it possible to pass '0'
-# for the first time, and then substitute back in the allocated port number
-# on subsequent passes.
-startableEmptyCfg = emptyCfg + \
-"""
-c['slavePortnum'] = %d
-"""
-
-targetCfg1 = startableEmptyCfg + \
-"""
-from buildbot.test.test_config import MyTarget
-c['status'] = [MyTarget('a')]
-"""
-
-targetCfg2 = startableEmptyCfg + \
-"""
-from buildbot.test.test_config import MySlowTarget
-c['status'] = [MySlowTarget('b')]
-"""
-
-class StartService(unittest.TestCase):
- def tearDown(self):
- return self.master.stopService()
-
- def testStartService(self):
- os.mkdir("test_ss")
- self.master = m = BuildMaster("test_ss")
- # inhibit the usual read-config-on-startup behavior
- m.readConfig = True
- m.startService()
- d = m.loadConfig(startableEmptyCfg % 0)
- d.addCallback(self._testStartService_0)
- return d
-
- def _testStartService_0(self, res):
- m = self.master
- m.targetevents = []
- # figure out what port got allocated
- self.portnum = m.slavePort._port.getHost().port
- d = m.loadConfig(targetCfg1 % self.portnum)
- d.addCallback(self._testStartService_1)
- return d
-
- def _testStartService_1(self, res):
- self.failUnlessEqual(len(self.master.statusTargets), 1)
- self.failUnless(isinstance(self.master.statusTargets[0], MyTarget))
- self.failUnlessEqual(self.master.targetevents,
- [('start', 'a')])
- self.master.targetevents = []
- # reloading the same config should not start or stop the target
- d = self.master.loadConfig(targetCfg1 % self.portnum)
- d.addCallback(self._testStartService_2)
- return d
-
- def _testStartService_2(self, res):
- self.failUnlessEqual(self.master.targetevents, [])
- # but loading a new config file should stop the old one, then
- # start the new one
- d = self.master.loadConfig(targetCfg2 % self.portnum)
- d.addCallback(self._testStartService_3)
- return d
-
- def _testStartService_3(self, res):
- self.failUnlessEqual(self.master.targetevents,
- [('stop', 'a'), ('start', 'b')])
- self.master.targetevents = []
- # and going back to the old one should do the same, in the same
- # order, even though the current MySlowTarget takes a moment to shut
- # down
- d = self.master.loadConfig(targetCfg1 % self.portnum)
- d.addCallback(self._testStartService_4)
- return d
-
- def _testStartService_4(self, res):
- self.failUnlessEqual(self.master.targetevents,
- [('stop', 'b'), ('start', 'a')])
-
-cfg1 = \
-"""
-from buildbot.process.factory import BuildFactory, s
-from buildbot.steps.shell import ShellCommand
-from buildbot.steps.source import Darcs
-from buildbot.buildslave import BuildSlave
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'pw1')]
-c['schedulers'] = []
-c['slavePortnum'] = 9999
-f1 = BuildFactory([ShellCommand(command='echo yes'),
- s(ShellCommand, command='old-style'),
- ])
-f1.addStep(Darcs(repourl='http://buildbot.net/repos/trunk'))
-f1.addStep(ShellCommand, command='echo old-style')
-c['builders'] = [{'name':'builder1', 'slavename':'bot1',
- 'builddir':'workdir', 'factory':f1}]
-"""
-
-class Factories(unittest.TestCase):
-
- def failUnlessExpectedShell(self, factory, defaults=True, **kwargs):
- shell_args = {}
- if defaults:
- shell_args.update({'descriptionDone': None,
- 'description': None,
- 'workdir': None,
- 'logfiles': {},
- 'usePTY': "slave-config",
- })
- shell_args.update(kwargs)
- self.failUnlessIdentical(factory[0], ShellCommand)
- if factory[1] != shell_args:
- print
- print "factory had:"
- for k in sorted(factory[1].keys()):
- print k
- print "but we were expecting:"
- for k in sorted(shell_args.keys()):
- print k
- self.failUnlessEqual(factory[1], shell_args)
-
- def failUnlessExpectedDarcs(self, factory, **kwargs):
- darcs_args = {'workdir': None,
- 'alwaysUseLatest': False,
- 'mode': 'update',
- 'timeout': 1200,
- 'retry': None,
- 'baseURL': None,
- 'defaultBranch': None,
- 'logfiles': {},
- }
- darcs_args.update(kwargs)
- self.failUnlessIdentical(factory[0], Darcs)
- if factory[1] != darcs_args:
- print
- print "factory had:"
- for k in sorted(factory[1].keys()):
- print k
- print "but we were expecting:"
- for k in sorted(darcs_args.keys()):
- print k
- self.failUnlessEqual(factory[1], darcs_args)
-
- def testSteps(self):
- m = BuildMaster(".")
- m.loadConfig(cfg1)
- b = m.botmaster.builders["builder1"]
- steps = b.buildFactory.steps
- self.failUnlessEqual(len(steps), 4)
-
- self.failUnlessExpectedShell(steps[0], command="echo yes")
- self.failUnlessExpectedShell(steps[1], defaults=False,
- command="old-style")
- self.failUnlessExpectedDarcs(steps[2],
- repourl="http://buildbot.net/repos/trunk")
- self.failUnlessExpectedShell(steps[3], defaults=False,
- command="echo old-style")
-
- def _loop(self, orig):
- step_class, kwargs = orig.getStepFactory()
- newstep = step_class(**kwargs)
- return newstep
-
- def testAllSteps(self):
- # make sure that steps can be created from the factories that they
- # return
- for s in ( dummy.Dummy(), dummy.FailingDummy(), dummy.RemoteDummy(),
- maxq.MaxQ("testdir"),
- python.BuildEPYDoc(), python.PyFlakes(),
- python_twisted.HLint(),
- python_twisted.Trial(testpath=None, tests="tests"),
- python_twisted.ProcessDocs(), python_twisted.BuildDebs(),
- python_twisted.RemovePYCs(),
- shell.ShellCommand(), shell.TreeSize(),
- shell.Configure(), shell.Compile(), shell.Test(),
- source.CVS("cvsroot", "module"),
- source.SVN("svnurl"), source.Darcs("repourl"),
- source.Git("repourl"),
- source.Arch("url", "version"),
- source.Bazaar("url", "version", "archive"),
- source.Bzr("repourl"),
- source.Mercurial("repourl"),
- source.P4("p4base"),
- source.P4Sync(1234, "p4user", "passwd", "client",
- mode="copy"),
- source.Monotone("server", "branch"),
- transfer.FileUpload("src", "dest"),
- transfer.FileDownload("src", "dest"),
- ):
- try:
- self._loop(s)
- except:
- print "error checking %s" % s
- raise
-
diff --git a/buildbot/buildbot/test/test_control.py b/buildbot/buildbot/test/test_control.py
deleted file mode 100644
index 298d48a..0000000
--- a/buildbot/buildbot/test/test_control.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# -*- test-case-name: buildbot.test.test_control -*-
-
-import os
-
-from twisted.trial import unittest
-from twisted.internet import defer
-
-from buildbot import master, interfaces
-from buildbot.sourcestamp import SourceStamp
-from buildbot.slave import bot
-from buildbot.status.builder import SUCCESS
-from buildbot.process import base
-from buildbot.test.runutils import rmtree
-
-config = """
-from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-
-def s(klass, **kwargs):
- return (klass, kwargs)
-
-f1 = factory.BuildFactory([
- s(dummy.Dummy, timeout=1),
- ])
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit')]
-c['schedulers'] = []
-c['builders'] = [{'name': 'force', 'slavename': 'bot1',
- 'builddir': 'force-dir', 'factory': f1}]
-c['slavePortnum'] = 0
-BuildmasterConfig = c
-"""
-
-class FakeBuilder:
- name = "fake"
- def getSlaveCommandVersion(self, command, oldversion=None):
- return "1.10"
-
-
-class Force(unittest.TestCase):
-
- def rmtree(self, d):
- rmtree(d)
-
- def setUp(self):
- self.master = None
- self.slave = None
- self.rmtree("control_basedir")
- os.mkdir("control_basedir")
- self.master = master.BuildMaster("control_basedir")
- self.slavebase = os.path.abspath("control_slavebase")
- self.rmtree(self.slavebase)
- os.mkdir("control_slavebase")
-
- def connectSlave(self):
- port = self.master.slavePort._port.getHost().port
- slave = bot.BuildSlave("localhost", port, "bot1", "sekrit",
- self.slavebase, keepalive=0, usePTY=1)
- self.slave = slave
- slave.startService()
- d = self.master.botmaster.waitUntilBuilderAttached("force")
- return d
-
- def tearDown(self):
- dl = []
- if self.slave:
- dl.append(self.master.botmaster.waitUntilBuilderDetached("force"))
- dl.append(defer.maybeDeferred(self.slave.stopService))
- if self.master:
- dl.append(defer.maybeDeferred(self.master.stopService))
- return defer.DeferredList(dl)
-
- def testRequest(self):
- m = self.master
- m.loadConfig(config)
- m.startService()
- d = self.connectSlave()
- d.addCallback(self._testRequest_1)
- return d
- def _testRequest_1(self, res):
- c = interfaces.IControl(self.master)
- req = base.BuildRequest("I was bored", SourceStamp(), 'test_builder')
- builder_control = c.getBuilder("force")
- d = defer.Deferred()
- req.subscribe(d.callback)
- builder_control.requestBuild(req)
- d.addCallback(self._testRequest_2)
- # we use the same check-the-results code as testForce
- return d
-
- def _testRequest_2(self, build_control):
- self.failUnless(interfaces.IBuildControl.providedBy(build_control))
- d = build_control.getStatus().waitUntilFinished()
- d.addCallback(self._testRequest_3)
- return d
-
- def _testRequest_3(self, bs):
- self.failUnless(interfaces.IBuildStatus.providedBy(bs))
- self.failUnless(bs.isFinished())
- self.failUnlessEqual(bs.getResults(), SUCCESS)
- #self.failUnlessEqual(bs.getResponsibleUsers(), ["bob"]) # TODO
- self.failUnlessEqual(bs.getChanges(), ())
- #self.failUnlessEqual(bs.getReason(), "forced") # TODO
diff --git a/buildbot/buildbot/test/test_dependencies.py b/buildbot/buildbot/test/test_dependencies.py
deleted file mode 100644
index 624efc4..0000000
--- a/buildbot/buildbot/test/test_dependencies.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# -*- test-case-name: buildbot.test.test_dependencies -*-
-
-from twisted.trial import unittest
-
-from twisted.internet import reactor, defer
-
-from buildbot.test.runutils import RunMixin
-from buildbot.status import base
-
-config_1 = """
-from buildbot import scheduler
-from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-s = factory.s
-from buildbot.test.test_locks import LockStep
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
-c['schedulers'] = []
-c['slavePortnum'] = 0
-
-# upstream1 (fastfail, slowpass)
-# -> downstream2 (b3, b4)
-# upstream3 (slowfail, slowpass)
-# -> downstream4 (b3, b4)
-# -> downstream5 (b5)
-
-s1 = scheduler.Scheduler('upstream1', None, 10, ['slowpass', 'fastfail'])
-s2 = scheduler.Dependent('downstream2', s1, ['b3', 'b4'])
-s3 = scheduler.Scheduler('upstream3', None, 10, ['fastpass', 'slowpass'])
-s4 = scheduler.Dependent('downstream4', s3, ['b3', 'b4'])
-s5 = scheduler.Dependent('downstream5', s4, ['b5'])
-c['schedulers'] = [s1, s2, s3, s4, s5]
-
-f_fastpass = factory.BuildFactory([s(dummy.Dummy, timeout=1)])
-f_slowpass = factory.BuildFactory([s(dummy.Dummy, timeout=2)])
-f_fastfail = factory.BuildFactory([s(dummy.FailingDummy, timeout=1)])
-
-def builder(name, f):
- d = {'name': name, 'slavename': 'bot1', 'builddir': name, 'factory': f}
- return d
-
-c['builders'] = [builder('slowpass', f_slowpass),
- builder('fastfail', f_fastfail),
- builder('fastpass', f_fastpass),
- builder('b3', f_fastpass),
- builder('b4', f_fastpass),
- builder('b5', f_fastpass),
- ]
-"""
-
-class Logger(base.StatusReceiverMultiService):
- def __init__(self, master):
- base.StatusReceiverMultiService.__init__(self)
- self.builds = []
- for bn in master.status.getBuilderNames():
- master.status.getBuilder(bn).subscribe(self)
-
- def buildStarted(self, builderName, build):
- self.builds.append(builderName)
-
-class Dependencies(RunMixin, unittest.TestCase):
- def setUp(self):
- RunMixin.setUp(self)
- self.master.loadConfig(config_1)
- self.master.startService()
- d = self.connectSlave(["slowpass", "fastfail", "fastpass",
- "b3", "b4", "b5"])
- return d
-
- def findScheduler(self, name):
- for s in self.master.allSchedulers():
- if s.name == name:
- return s
- raise KeyError("No Scheduler named '%s'" % name)
-
- def testParse(self):
- self.master.loadConfig(config_1)
- # that's it, just make sure this config file is loaded successfully
-
- def testRun_Fail(self):
- # add an extra status target to make pay attention to which builds
- # start and which don't.
- self.logger = Logger(self.master)
-
- # kick off upstream1, which has a failing Builder and thus will not
- # trigger downstream3
- s = self.findScheduler("upstream1")
- # this is an internal function of the Scheduler class
- s.fireTimer() # fires a build
- # t=0: two builders start: 'slowpass' and 'fastfail'
- # t=1: builder 'fastfail' finishes
- # t=2: builder 'slowpass' finishes
- d = defer.Deferred()
- d.addCallback(self._testRun_Fail_1)
- reactor.callLater(5, d.callback, None)
- return d
-
- def _testRun_Fail_1(self, res):
- # 'slowpass' and 'fastfail' should have run one build each
- b = self.status.getBuilder('slowpass').getLastFinishedBuild()
- self.failUnless(b)
- self.failUnlessEqual(b.getNumber(), 0)
- b = self.status.getBuilder('fastfail').getLastFinishedBuild()
- self.failUnless(b)
- self.failUnlessEqual(b.getNumber(), 0)
-
- # none of the other builders should have run
- self.failIf(self.status.getBuilder('b3').getLastFinishedBuild())
- self.failIf(self.status.getBuilder('b4').getLastFinishedBuild())
- self.failIf(self.status.getBuilder('b5').getLastFinishedBuild())
-
- # in fact, none of them should have even started
- self.failUnlessEqual(len(self.logger.builds), 2)
- self.failUnless("slowpass" in self.logger.builds)
- self.failUnless("fastfail" in self.logger.builds)
- self.failIf("b3" in self.logger.builds)
- self.failIf("b4" in self.logger.builds)
- self.failIf("b5" in self.logger.builds)
-
- def testRun_Pass(self):
- # kick off upstream3, which will fire downstream4 and then
- # downstream5
- s = self.findScheduler("upstream3")
- # this is an internal function of the Scheduler class
- s.fireTimer() # fires a build
- # t=0: slowpass and fastpass start
- # t=1: builder 'fastpass' finishes
- # t=2: builder 'slowpass' finishes
- # scheduler 'downstream4' fires
- # builds b3 and b4 are started
- # t=3: builds b3 and b4 finish
- # scheduler 'downstream5' fires
- # build b5 is started
- # t=4: build b5 is finished
- d = defer.Deferred()
- d.addCallback(self._testRun_Pass_1)
- reactor.callLater(5, d.callback, None)
- return d
-
- def _testRun_Pass_1(self, res):
- # 'fastpass' and 'slowpass' should have run one build each
- b = self.status.getBuilder('fastpass').getLastFinishedBuild()
- self.failUnless(b)
- self.failUnlessEqual(b.getNumber(), 0)
-
- b = self.status.getBuilder('slowpass').getLastFinishedBuild()
- self.failUnless(b)
- self.failUnlessEqual(b.getNumber(), 0)
-
- self.failIf(self.status.getBuilder('fastfail').getLastFinishedBuild())
-
- b = self.status.getBuilder('b3').getLastFinishedBuild()
- self.failUnless(b)
- self.failUnlessEqual(b.getNumber(), 0)
-
- b = self.status.getBuilder('b4').getLastFinishedBuild()
- self.failUnless(b)
- self.failUnlessEqual(b.getNumber(), 0)
-
- b = self.status.getBuilder('b4').getLastFinishedBuild()
- self.failUnless(b)
- self.failUnlessEqual(b.getNumber(), 0)
-
-
diff --git a/buildbot/buildbot/test/test_ec2buildslave.py b/buildbot/buildbot/test/test_ec2buildslave.py
deleted file mode 100644
index d0f1644..0000000
--- a/buildbot/buildbot/test/test_ec2buildslave.py
+++ /dev/null
@@ -1,552 +0,0 @@
-# Portions copyright Canonical Ltd. 2009
-
-import os
-import sys
-import StringIO
-import textwrap
-
-from twisted.trial import unittest
-from twisted.internet import defer, reactor
-
-from buildbot.process.base import BuildRequest
-from buildbot.sourcestamp import SourceStamp
-from buildbot.status.builder import SUCCESS
-from buildbot.test.runutils import RunMixin
-
-
-PENDING = 'pending'
-RUNNING = 'running'
-SHUTTINGDOWN = 'shutting-down'
-TERMINATED = 'terminated'
-
-
-class EC2ResponseError(Exception):
- def __init__(self, code):
- self.code = code
-
-
-class Stub:
- def __init__(self, **kwargs):
- self.__dict__.update(kwargs)
-
-
-class Instance:
-
- def __init__(self, data, ami, **kwargs):
- self.data = data
- self.state = PENDING
- self.id = ami
- self.public_dns_name = 'ec2-012-345-678-901.compute-1.amazonaws.com'
- self.__dict__.update(kwargs)
- self.output = Stub(name='output', output='example_output')
-
- def update(self):
- if self.state == PENDING:
- self.data.testcase.connectOneSlave(self.data.slave.slavename)
- self.state = RUNNING
- elif self.state == SHUTTINGDOWN:
- slavename = self.data.slave.slavename
- slaves = self.data.testcase.slaves
- if slavename in slaves:
- def discard(data):
- pass
- s = slaves.pop(slavename)
- bot = s.getServiceNamed("bot")
- for buildername in self.data.slave.slavebuilders:
- remote = bot.builders[buildername].remote
- if remote is None:
- continue
- broker = remote.broker
- broker.dataReceived = discard # seal its ears
- # and take away its voice
- broker.transport.write = discard
- # also discourage it from reconnecting once the connection
- # goes away
- s.bf.continueTrying = False
- # stop the service for cleanliness
- s.stopService()
- self.state = TERMINATED
-
- def get_console_output(self):
- return self.output
-
- def use_ip(self, elastic_ip):
- if isinstance(elastic_ip, Stub):
- elastic_ip = elastic_ip.public_ip
- if self.data.addresses[elastic_ip] is not None:
- raise ValueError('elastic ip already used')
- self.data.addresses[elastic_ip] = self
-
- def stop(self):
- self.state = SHUTTINGDOWN
-
-class Image:
-
- def __init__(self, data, ami, owner, location):
- self.data = data
- self.id = ami
- self.owner = owner
- self.location = location
-
- def run(self, **kwargs):
- return Stub(name='reservation',
- instances=[Instance(self.data, self.id, **kwargs)])
-
- @classmethod
- def create(klass, data, ami, owner, location):
- assert ami not in data.images
- self = klass(data, ami, owner, location)
- data.images[ami] = self
- return self
-
-
-class Connection:
-
- def __init__(self, data):
- self.data = data
-
- def get_all_key_pairs(self, keypair_name):
- try:
- return [self.data.keys[keypair_name]]
- except KeyError:
- raise EC2ResponseError('InvalidKeyPair.NotFound')
-
- def create_key_pair(self, keypair_name):
- return Key.create(keypair_name, self.data.keys)
-
- def get_all_security_groups(self, security_name):
- try:
- return [self.data.security_groups[security_name]]
- except KeyError:
- raise EC2ResponseError('InvalidGroup.NotFound')
-
- def create_security_group(self, security_name, description):
- assert security_name not in self.data.security_groups
- res = Stub(name='security_group', value=security_name,
- description=description)
- self.data.security_groups[security_name] = res
- return res
-
- def get_all_images(self, owners=None):
- # return a list of images. images have .location and .id.
- res = self.data.images.values()
- if owners:
- res = [image for image in res if image.owner in owners]
- return res
-
- def get_image(self, machine_id):
- # return image or raise an error
- return self.data.images[machine_id]
-
- def get_all_addresses(self, elastic_ips):
- res = []
- for ip in elastic_ips:
- if ip in self.data.addresses:
- res.append(Stub(public_ip=ip))
- else:
- raise EC2ResponseError('...bad address...')
- return res
-
- def disassociate_address(self, address):
- if address not in self.data.addresses:
- raise EC2ResponseError('...unknown address...')
- self.data.addresses[address] = None
-
-
-class Key:
-
- # this is what we would need to do if we actually needed a real key.
- # We don't right now.
- #def __init__(self):
- # self.raw = paramiko.RSAKey.generate(256)
- # f = StringIO.StringIO()
- # self.raw.write_private_key(f)
- # self.material = f.getvalue()
-
- @classmethod
- def create(klass, name, keys):
- self = klass()
- self.name = name
- self.keys = keys
- assert name not in keys
- keys[name] = self
- return self
-
- def delete(self):
- del self.keys[self.name]
-
-
-class Boto:
-
- slave = None # must be set in setUp
-
- def __init__(self, testcase):
- self.testcase = testcase
- self.keys = {}
- Key.create('latent_buildbot_slave', self.keys)
- Key.create('buildbot_slave', self.keys)
- assert sorted(self.keys.keys()) == ['buildbot_slave',
- 'latent_buildbot_slave']
- self.original_keys = dict(self.keys)
- self.security_groups = {
- 'latent_buildbot_slave': Stub(name='security_group',
- value='latent_buildbot_slave')}
- self.addresses = {'127.0.0.1': None}
- self.images = {}
- Image.create(self, 'ami-12345', 12345667890,
- 'test-xx/image.manifest.xml')
- Image.create(self, 'ami-AF000', 11111111111,
- 'test-f0a/image.manifest.xml')
- Image.create(self, 'ami-CE111', 22222222222,
- 'test-e1b/image.manifest.xml')
- Image.create(self, 'ami-ED222', 22222222222,
- 'test-d2c/image.manifest.xml')
- Image.create(self, 'ami-FC333', 22222222222,
- 'test-c30d/image.manifest.xml')
- Image.create(self, 'ami-DB444', 11111111111,
- 'test-b4e/image.manifest.xml')
- Image.create(self, 'ami-BA555', 11111111111,
- 'test-a5f/image.manifest.xml')
-
- def connect_ec2(self, identifier, secret_identifier):
- assert identifier == 'publickey', identifier
- assert secret_identifier == 'privatekey', secret_identifier
- return Connection(self)
-
- exception = Stub(EC2ResponseError=EC2ResponseError)
-
-
-class Mixin(RunMixin):
-
- def doBuild(self):
- br = BuildRequest("forced", SourceStamp(), 'test_builder')
- d = br.waitUntilFinished()
- self.control.getBuilder('b1').requestBuild(br)
- return d
-
- def setUp(self):
- self.boto_setUp1()
- self.master.loadConfig(self.config)
- self.boto_setUp2()
- self.boto_setUp3()
-
- def boto_setUp1(self):
- # debugging
- #import twisted.internet.base
- #twisted.internet.base.DelayedCall.debug = True
- # debugging
- RunMixin.setUp(self)
- self.boto = boto = Boto(self)
- if 'boto' not in sys.modules:
- sys.modules['boto'] = boto
- sys.modules['boto.exception'] = boto.exception
- if 'buildbot.ec2buildslave' in sys.modules:
- sys.modules['buildbot.ec2buildslave'].boto = boto
-
- def boto_setUp2(self):
- if sys.modules['boto'] is self.boto:
- del sys.modules['boto']
- del sys.modules['boto.exception']
-
- def boto_setUp3(self):
- self.master.startService()
- self.boto.slave = self.bot1 = self.master.botmaster.slaves['bot1']
- self.bot1._poll_resolution = 0.1
- self.b1 = self.master.botmaster.builders['b1']
-
- def tearDown(self):
- try:
- import boto
- import boto.exception
- except ImportError:
- pass
- else:
- sys.modules['buildbot.ec2buildslave'].boto = boto
- return RunMixin.tearDown(self)
-
-
-class BasicConfig(Mixin, unittest.TestCase):
- config = textwrap.dedent("""\
- from buildbot.process import factory
- from buildbot.steps import dummy
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- s = factory.s
-
- BuildmasterConfig = c = {}
- c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
- 'ami-12345',
- identifier='publickey',
- secret_identifier='privatekey'
- )]
- c['schedulers'] = []
- c['slavePortnum'] = 0
- c['schedulers'] = []
-
- f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
-
- c['builders'] = [
- {'name': 'b1', 'slavenames': ['bot1'],
- 'builddir': 'b1', 'factory': f1},
- ]
- """)
-
- def testSequence(self):
- # test with secrets in config, a single AMI, and defaults/
- self.assertEqual(self.bot1.ami, 'ami-12345')
- self.assertEqual(self.bot1.instance_type, 'm1.large')
- self.assertEqual(self.bot1.keypair_name, 'latent_buildbot_slave')
- self.assertEqual(self.bot1.security_name, 'latent_buildbot_slave')
- # this would be appropriate if we were recreating keys.
- #self.assertNotEqual(self.boto.keys['latent_buildbot_slave'],
- # self.boto.original_keys['latent_buildbot_slave'])
- self.failUnless(isinstance(self.bot1.get_image(), Image))
- self.assertEqual(self.bot1.get_image().id, 'ami-12345')
- self.assertIdentical(self.bot1.elastic_ip, None)
- self.assertIdentical(self.bot1.instance, None)
- # let's start a build...
- self.build_deferred = self.doBuild()
- # ...and wait for the ec2 slave to show up
- d = self.bot1.substantiation_deferred
- d.addCallback(self._testSequence_1)
- return d
- def _testSequence_1(self, res):
- # bot 1 is substantiated.
- self.assertNotIdentical(self.bot1.slave, None)
- self.failUnless(self.bot1.substantiated)
- self.failUnless(isinstance(self.bot1.instance, Instance))
- self.assertEqual(self.bot1.instance.id, 'ami-12345')
- self.assertEqual(self.bot1.instance.state, RUNNING)
- self.assertEqual(self.bot1.instance.key_name, 'latent_buildbot_slave')
- self.assertEqual(self.bot1.instance.security_groups,
- ['latent_buildbot_slave'])
- self.assertEqual(self.bot1.instance.instance_type, 'm1.large')
- self.assertEqual(self.bot1.output.output, 'example_output')
- # now we'll wait for the build to complete
- d = self.build_deferred
- del self.build_deferred
- d.addCallback(self._testSequence_2)
- return d
- def _testSequence_2(self, res):
- # build was a success!
- self.failUnlessEqual(res.getResults(), SUCCESS)
- self.failUnlessEqual(res.getSlavename(), "bot1")
- # Let's let it shut down. We'll set the build_wait_timer to fire
- # sooner, and wait for it to fire.
- self.bot1.build_wait_timer.reset(0)
- # we'll stash the instance around to look at it
- self.instance = self.bot1.instance
- # now we wait.
- d = defer.Deferred()
- reactor.callLater(0.5, d.callback, None)
- d.addCallback(self._testSequence_3)
- return d
- def _testSequence_3(self, res):
- # slave is insubstantiated
- self.assertIdentical(self.bot1.slave, None)
- self.failIf(self.bot1.substantiated)
- self.assertIdentical(self.bot1.instance, None)
- self.assertEqual(self.instance.state, TERMINATED)
- del self.instance
-
-class ElasticIP(Mixin, unittest.TestCase):
- config = textwrap.dedent("""\
- from buildbot.process import factory
- from buildbot.steps import dummy
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- s = factory.s
-
- BuildmasterConfig = c = {}
- c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
- 'ami-12345',
- identifier='publickey',
- secret_identifier='privatekey',
- elastic_ip='127.0.0.1'
- )]
- c['schedulers'] = []
- c['slavePortnum'] = 0
- c['schedulers'] = []
-
- f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
-
- c['builders'] = [
- {'name': 'b1', 'slavenames': ['bot1'],
- 'builddir': 'b1', 'factory': f1},
- ]
- """)
-
- def testSequence(self):
- self.assertEqual(self.bot1.elastic_ip.public_ip, '127.0.0.1')
- self.assertIdentical(self.boto.addresses['127.0.0.1'], None)
- # let's start a build...
- d = self.doBuild()
- d.addCallback(self._testSequence_1)
- return d
- def _testSequence_1(self, res):
- # build was a success!
- self.failUnlessEqual(res.getResults(), SUCCESS)
- self.failUnlessEqual(res.getSlavename(), "bot1")
- # we have our address
- self.assertIdentical(self.boto.addresses['127.0.0.1'],
- self.bot1.instance)
- # Let's let it shut down. We'll set the build_wait_timer to fire
- # sooner, and wait for it to fire.
- self.bot1.build_wait_timer.reset(0)
- d = defer.Deferred()
- reactor.callLater(0.5, d.callback, None)
- d.addCallback(self._testSequence_2)
- return d
- def _testSequence_2(self, res):
- # slave is insubstantiated
- self.assertIdentical(self.bot1.slave, None)
- self.failIf(self.bot1.substantiated)
- self.assertIdentical(self.bot1.instance, None)
- # the address is free again
- self.assertIdentical(self.boto.addresses['127.0.0.1'], None)
-
-
-class Initialization(Mixin, unittest.TestCase):
-
- def setUp(self):
- self.boto_setUp1()
-
- def tearDown(self):
- self.boto_setUp2()
- return Mixin.tearDown(self)
-
- def testDefaultSeparateFile(self):
- # set up .ec2/aws_id
- home = os.environ['HOME']
- fake_home = os.path.join(os.getcwd(), 'basedir') # see RunMixin.setUp
- os.environ['HOME'] = fake_home
- dir = os.path.join(fake_home, '.ec2')
- os.mkdir(dir)
- f = open(os.path.join(dir, 'aws_id'), 'w')
- f.write('publickey\nprivatekey')
- f.close()
- # The Connection checks the file, so if the secret file is not parsed
- # correctly, *this* is where it would fail. This is the real test.
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
- 'ami-12345')
- # for completeness, we'll show that the connection actually exists.
- self.failUnless(isinstance(bot1.conn, Connection))
- # clean up.
- os.environ['HOME'] = home
- self.rmtree(dir)
-
- def testCustomSeparateFile(self):
- # set up .ec2/aws_id
- file_path = os.path.join(os.getcwd(), 'basedir', 'custom_aws_id')
- f = open(file_path, 'w')
- f.write('publickey\nprivatekey')
- f.close()
- # The Connection checks the file, so if the secret file is not parsed
- # correctly, *this* is where it would fail. This is the real test.
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
- 'ami-12345', aws_id_file_path=file_path)
- # for completeness, we'll show that the connection actually exists.
- self.failUnless(isinstance(bot1.conn, Connection))
-
- def testNoAMIBroken(self):
- # you must specify an AMI, or at least one of valid_ami_owners or
- # valid_ami_location_regex
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- self.assertRaises(ValueError, EC2LatentBuildSlave, 'bot1', 'sekrit',
- 'm1.large', identifier='publickey',
- secret_identifier='privatekey')
-
- def testAMIOwnerFilter(self):
- # if you only specify an owner, you get the image owned by any of the
- # owners that sorts last by the AMI's location.
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
- valid_ami_owners=[11111111111],
- identifier='publickey',
- secret_identifier='privatekey'
- )
- self.assertEqual(bot1.get_image().location,
- 'test-f0a/image.manifest.xml')
- bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
- valid_ami_owners=[11111111111,
- 22222222222],
- identifier='publickey',
- secret_identifier='privatekey'
- )
- self.assertEqual(bot1.get_image().location,
- 'test-f0a/image.manifest.xml')
- bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
- valid_ami_owners=[22222222222],
- identifier='publickey',
- secret_identifier='privatekey'
- )
- self.assertEqual(bot1.get_image().location,
- 'test-e1b/image.manifest.xml')
- bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
- valid_ami_owners=12345667890,
- identifier='publickey',
- secret_identifier='privatekey'
- )
- self.assertEqual(bot1.get_image().location,
- 'test-xx/image.manifest.xml')
-
- def testAMISimpleRegexFilter(self):
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- bot1 = EC2LatentBuildSlave(
- 'bot1', 'sekrit', 'm1.large',
- valid_ami_location_regex=r'test\-[a-z]\w+/image.manifest.xml',
- identifier='publickey', secret_identifier='privatekey')
- self.assertEqual(bot1.get_image().location,
- 'test-xx/image.manifest.xml')
- bot1 = EC2LatentBuildSlave(
- 'bot1', 'sekrit', 'm1.large',
- valid_ami_location_regex=r'test\-[a-z]\d+\w/image.manifest.xml',
- identifier='publickey', secret_identifier='privatekey')
- self.assertEqual(bot1.get_image().location,
- 'test-f0a/image.manifest.xml')
- bot1 = EC2LatentBuildSlave(
- 'bot1', 'sekrit', 'm1.large', valid_ami_owners=[22222222222],
- valid_ami_location_regex=r'test\-[a-z]\d+\w/image.manifest.xml',
- identifier='publickey', secret_identifier='privatekey')
- self.assertEqual(bot1.get_image().location,
- 'test-e1b/image.manifest.xml')
-
- def testAMIRegexAlphaSortFilter(self):
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- bot1 = EC2LatentBuildSlave(
- 'bot1', 'sekrit', 'm1.large',
- valid_ami_owners=[11111111111, 22222222222],
- valid_ami_location_regex=r'test\-[a-z]\d+([a-z])/image.manifest.xml',
- identifier='publickey', secret_identifier='privatekey')
- self.assertEqual(bot1.get_image().location,
- 'test-a5f/image.manifest.xml')
-
- def testAMIRegexIntSortFilter(self):
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- bot1 = EC2LatentBuildSlave(
- 'bot1', 'sekrit', 'm1.large',
- valid_ami_owners=[11111111111, 22222222222],
- valid_ami_location_regex=r'test\-[a-z](\d+)[a-z]/image.manifest.xml',
- identifier='publickey', secret_identifier='privatekey')
- self.assertEqual(bot1.get_image().location,
- 'test-c30d/image.manifest.xml')
-
- def testNewSecurityGroup(self):
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- bot1 = EC2LatentBuildSlave(
- 'bot1', 'sekrit', 'm1.large', 'ami-12345',
- identifier='publickey', secret_identifier='privatekey',
- security_name='custom_security_name')
- self.assertEqual(
- self.boto.security_groups['custom_security_name'].value,
- 'custom_security_name')
- self.assertEqual(bot1.security_name, 'custom_security_name')
-
- def testNewKeypairName(self):
- from buildbot.ec2buildslave import EC2LatentBuildSlave
- bot1 = EC2LatentBuildSlave(
- 'bot1', 'sekrit', 'm1.large', 'ami-12345',
- identifier='publickey', secret_identifier='privatekey',
- keypair_name='custom_keypair_name')
- self.assertIn('custom_keypair_name', self.boto.keys)
- self.assertEqual(bot1.keypair_name, 'custom_keypair_name')
diff --git a/buildbot/buildbot/test/test_limitlogs.py b/buildbot/buildbot/test/test_limitlogs.py
deleted file mode 100644
index 9fd5bea..0000000
--- a/buildbot/buildbot/test/test_limitlogs.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# -*- test-case-name: buildbot.test.test_limitlogs -*-
-
-from twisted.trial import unittest
-from twisted.internet import reactor, defer
-from twisted.internet.utils import getProcessValue, getProcessOutput
-import twisted
-from twisted.python.versions import Version
-from twisted.python.procutils import which
-from twisted.python import log, logfile
-import os
-
-'''Testcases to verify that the --log-size and --log-count options to
-create-master and create-slave actually work.
-
-These features require Twisted 8.2.0 to work.
-
-Currently only testing the master side of it.
-'''
-
-
-master_cfg = """from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-s = factory.s
-
-f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
-
-f2 = factory.BuildFactory([
- dummy.Dummy(timeout=1),
- dummy.RemoteDummy(timeout=2),
- ])
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit')]
-c['schedulers'] = []
-c['builders'] = []
-c['builders'].append({'name':'quick', 'slavename':'bot1',
- 'builddir': 'quickdir', 'factory': f1})
-c['slavePortnum'] = 0
-
-from twisted.python import log
-for i in xrange(100):
- log.msg("this is a mighty long string and I'm going to write it into the log often")
-"""
-
-class MasterLogs(unittest.TestCase):
- '''Limit master log size and count.'''
-
- def setUp(self):
- if twisted.version < Version("twisted", 8, 2, 0):
- self.skip = True
- raise unittest.SkipTest("Twisted 8.2.0 or higher required")
-
- def testLog(self):
- exes = which('buildbot')
- if not exes:
- raise unittest.SkipTest("Buildbot needs to be installed")
- self.buildbotexe = exes[0]
- d = getProcessValue(self.buildbotexe,
- ['create-master', '--log-size=1000', '--log-count=2',
- 'master'])
- d.addCallback(self._master_created)
- return d
-
- def _master_created(self, res):
- open('master/master.cfg', 'w').write(master_cfg)
- d = getProcessOutput(self.buildbotexe,
- ['start', 'master'])
- d.addBoth(self._master_running)
- return d
-
- def _master_running(self, res):
- self.addCleanup(self._stop_master)
- d = defer.Deferred()
- reactor.callLater(2, d.callback, None)
- d.addCallback(self._do_tests)
- return d
-
- def _do_tests(self, rv):
- '''The actual method doing the tests on the master twistd.log'''
- lf = logfile.LogFile.fromFullPath(os.path.join('master', 'twistd.log'))
- self.failUnlessEqual(lf.listLogs(), [1,2])
- lr = lf.getLog(1)
- firstline = lr.readLines()[0]
- self.failUnless(firstline.endswith("this is a mighty long string and I'm going to write it into the log often\n"))
-
- def _stop_master(self):
- d = getProcessOutput(self.buildbotexe,
- ['stop', 'master'])
- d.addBoth(self._master_stopped)
- return d
-
- def _master_stopped(self, res):
- print "master stopped"
diff --git a/buildbot/buildbot/test/test_locks.py b/buildbot/buildbot/test/test_locks.py
deleted file mode 100644
index 0c1e0b5..0000000
--- a/buildbot/buildbot/test/test_locks.py
+++ /dev/null
@@ -1,495 +0,0 @@
-# -*- test-case-name: buildbot.test.test_locks -*-
-
-import random
-
-from twisted.trial import unittest
-from twisted.internet import defer, reactor
-
-from buildbot import master
-from buildbot.steps import dummy
-from buildbot.sourcestamp import SourceStamp
-from buildbot.process.base import BuildRequest
-from buildbot.test.runutils import RunMixin
-from buildbot import locks
-
-def claimHarder(lock, owner, la):
- """Return a Deferred that will fire when the lock is claimed. Keep trying
- until we succeed."""
- if lock.isAvailable(la):
- #print "claimHarder(%s): claiming" % owner
- lock.claim(owner, la)
- return defer.succeed(lock)
- #print "claimHarder(%s): waiting" % owner
- d = lock.waitUntilMaybeAvailable(owner, la)
- d.addCallback(claimHarder, owner, la)
- return d
-
-def hold(lock, owner, la, mode="now"):
- if mode == "now":
- lock.release(owner, la)
- elif mode == "very soon":
- reactor.callLater(0, lock.release, owner, la)
- elif mode == "soon":
- reactor.callLater(0.1, lock.release, owner, la)
-
-class Unit(unittest.TestCase):
- def testNowCounting(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'counting')
- return self._testNow(la)
-
- def testNowExclusive(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'exclusive')
- return self._testNow(la)
-
- def _testNow(self, la):
- l = locks.BaseLock("name")
- self.failUnless(l.isAvailable(la))
- l.claim("owner1", la)
- self.failIf(l.isAvailable(la))
- l.release("owner1", la)
- self.failUnless(l.isAvailable(la))
-
- def testNowMixed1(self):
- """ Test exclusive is not possible when a counting has the lock """
- lid = locks.MasterLock('dummy')
- lac = locks.LockAccess(lid, 'counting')
- lae = locks.LockAccess(lid, 'exclusive')
- l = locks.BaseLock("name", maxCount=2)
- self.failUnless(l.isAvailable(lac))
- l.claim("count-owner", lac)
- self.failIf(l.isAvailable(lae))
- l.release("count-owner", lac)
- self.failUnless(l.isAvailable(lac))
-
- def testNowMixed2(self):
- """ Test counting is not possible when an exclsuive has the lock """
- lid = locks.MasterLock('dummy')
- lac = locks.LockAccess(lid, 'counting')
- lae = locks.LockAccess(lid, 'exclusive')
- l = locks.BaseLock("name", maxCount=2)
- self.failUnless(l.isAvailable(lae))
- l.claim("count-owner", lae)
- self.failIf(l.isAvailable(lac))
- l.release("count-owner", lae)
- self.failUnless(l.isAvailable(lae))
-
- def testLaterCounting(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'counting')
- return self._testLater(la)
-
- def testLaterExclusive(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'exclusive')
- return self._testLater(la)
-
- def _testLater(self, la):
- lock = locks.BaseLock("name")
- d = claimHarder(lock, "owner1", la)
- d.addCallback(lambda lock: lock.release("owner1", la))
- return d
-
- def testCompetitionCounting(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'counting')
- return self._testCompetition(la)
-
- def testCompetitionExclusive(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'exclusive')
- return self._testCompetition(la)
-
- def _testCompetition(self, la):
- lock = locks.BaseLock("name")
- d = claimHarder(lock, "owner1", la)
- d.addCallback(self._claim1, la)
- return d
- def _claim1(self, lock, la):
- # we should have claimed it by now
- self.failIf(lock.isAvailable(la))
- # now set up two competing owners. We don't know which will get the
- # lock first.
- d2 = claimHarder(lock, "owner2", la)
- d2.addCallback(hold, "owner2", la, "now")
- d3 = claimHarder(lock, "owner3", la)
- d3.addCallback(hold, "owner3", la, "soon")
- dl = defer.DeferredList([d2,d3])
- dl.addCallback(self._cleanup, lock, la)
- # and release the lock in a moment
- reactor.callLater(0.1, lock.release, "owner1", la)
- return dl
-
- def _cleanup(self, res, lock, la):
- d = claimHarder(lock, "cleanup", la)
- d.addCallback(lambda lock: lock.release("cleanup", la))
- return d
-
- def testRandomCounting(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'counting')
- return self._testRandom(la)
-
- def testRandomExclusive(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'exclusive')
- return self._testRandom(la)
-
- def _testRandom(self, la):
- lock = locks.BaseLock("name")
- dl = []
- for i in range(100):
- owner = "owner%d" % i
- mode = random.choice(["now", "very soon", "soon"])
- d = claimHarder(lock, owner, la)
- d.addCallback(hold, owner, la, mode)
- dl.append(d)
- d = defer.DeferredList(dl)
- d.addCallback(self._cleanup, lock, la)
- return d
-
-class Multi(unittest.TestCase):
- def testNowCounting(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'counting')
- lock = locks.BaseLock("name", 2)
- self.failUnless(lock.isAvailable(la))
- lock.claim("owner1", la)
- self.failUnless(lock.isAvailable(la))
- lock.claim("owner2", la)
- self.failIf(lock.isAvailable(la))
- lock.release("owner1", la)
- self.failUnless(lock.isAvailable(la))
- lock.release("owner2", la)
- self.failUnless(lock.isAvailable(la))
-
- def testLaterCounting(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'counting')
- lock = locks.BaseLock("name", 2)
- lock.claim("owner1", la)
- lock.claim("owner2", la)
- d = claimHarder(lock, "owner3", la)
- d.addCallback(lambda lock: lock.release("owner3", la))
- lock.release("owner2", la)
- lock.release("owner1", la)
- return d
-
- def _cleanup(self, res, lock, count, la):
- dl = []
- for i in range(count):
- d = claimHarder(lock, "cleanup%d" % i, la)
- dl.append(d)
- d2 = defer.DeferredList(dl)
- # once all locks are claimed, we know that any previous owners have
- # been flushed out
- def _release(res):
- for i in range(count):
- lock.release("cleanup%d" % i, la)
- d2.addCallback(_release)
- return d2
-
- def testRandomCounting(self):
- lid = locks.MasterLock('dummy')
- la = locks.LockAccess(lid, 'counting')
- COUNT = 5
- lock = locks.BaseLock("name", COUNT)
- dl = []
- for i in range(100):
- owner = "owner%d" % i
- mode = random.choice(["now", "very soon", "soon"])
- d = claimHarder(lock, owner, la)
- def _check(lock):
- self.failIf(len(lock.owners) > COUNT)
- return lock
- d.addCallback(_check)
- d.addCallback(hold, owner, la, mode)
- dl.append(d)
- d = defer.DeferredList(dl)
- d.addCallback(self._cleanup, lock, COUNT, la)
- return d
-
-class Dummy:
- pass
-
-def slave(slavename):
- slavebuilder = Dummy()
- slavebuilder.slave = Dummy()
- slavebuilder.slave.slavename = slavename
- return slavebuilder
-
-class MakeRealLock(unittest.TestCase):
-
- def make(self, lockid):
- return lockid.lockClass(lockid)
-
- def testMaster(self):
- mid1 = locks.MasterLock("name1")
- mid2 = locks.MasterLock("name1")
- mid3 = locks.MasterLock("name3")
- mid4 = locks.MasterLock("name1", 3)
- self.failUnlessEqual(mid1, mid2)
- self.failIfEqual(mid1, mid3)
- # they should all be hashable
- d = {mid1: 1, mid2: 2, mid3: 3, mid4: 4}
-
- l1 = self.make(mid1)
- self.failUnlessEqual(l1.name, "name1")
- self.failUnlessEqual(l1.maxCount, 1)
- self.failUnlessIdentical(l1.getLock(slave("slave1")), l1)
- l4 = self.make(mid4)
- self.failUnlessEqual(l4.name, "name1")
- self.failUnlessEqual(l4.maxCount, 3)
- self.failUnlessIdentical(l4.getLock(slave("slave1")), l4)
-
- def testSlave(self):
- sid1 = locks.SlaveLock("name1")
- sid2 = locks.SlaveLock("name1")
- sid3 = locks.SlaveLock("name3")
- sid4 = locks.SlaveLock("name1", maxCount=3)
- mcfs = {"bigslave": 4, "smallslave": 1}
- sid5 = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs)
- mcfs2 = {"bigslave": 4, "smallslave": 1}
- sid5a = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs2)
- mcfs3 = {"bigslave": 1, "smallslave": 99}
- sid5b = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs3)
- self.failUnlessEqual(sid1, sid2)
- self.failIfEqual(sid1, sid3)
- self.failIfEqual(sid1, sid4)
- self.failIfEqual(sid1, sid5)
- self.failUnlessEqual(sid5, sid5a)
- self.failIfEqual(sid5a, sid5b)
- # they should all be hashable
- d = {sid1: 1, sid2: 2, sid3: 3, sid4: 4, sid5: 5, sid5a: 6, sid5b: 7}
-
- l1 = self.make(sid1)
- self.failUnlessEqual(l1.name, "name1")
- self.failUnlessEqual(l1.maxCount, 1)
- l1s1 = l1.getLock(slave("slave1"))
- self.failIfIdentical(l1s1, l1)
-
- l4 = self.make(sid4)
- self.failUnlessEqual(l4.maxCount, 3)
- l4s1 = l4.getLock(slave("slave1"))
- self.failUnlessEqual(l4s1.maxCount, 3)
-
- l5 = self.make(sid5)
- l5s1 = l5.getLock(slave("bigslave"))
- l5s2 = l5.getLock(slave("smallslave"))
- l5s3 = l5.getLock(slave("unnamedslave"))
- self.failUnlessEqual(l5s1.maxCount, 4)
- self.failUnlessEqual(l5s2.maxCount, 1)
- self.failUnlessEqual(l5s3.maxCount, 3)
-
-class GetLock(unittest.TestCase):
- def testGet(self):
- # the master.cfg file contains "lock ids", which are instances of
- # MasterLock and SlaveLock but which are not actually Locks per se.
- # When the build starts, these markers are turned into RealMasterLock
- # and RealSlaveLock instances. This insures that any builds running
- # on slaves that were unaffected by the config change are still
- # referring to the same Lock instance as new builds by builders that
- # *were* affected by the change. There have been bugs in the past in
- # which this didn't happen, and the Locks were bypassed because half
- # the builders were using one incarnation of the lock while the other
- # half were using a separate (but equal) incarnation.
- #
- # Changing the lock id in any way should cause it to be replaced in
- # the BotMaster. This will result in a couple of funky artifacts:
- # builds in progress might pay attention to a different lock, so we
- # might bypass the locking for the duration of a couple builds.
- # There's also the problem of old Locks lingering around in
- # BotMaster.locks, but they're small and shouldn't really cause a
- # problem.
-
- b = master.BotMaster()
- l1 = locks.MasterLock("one")
- l1a = locks.MasterLock("one")
- l2 = locks.MasterLock("one", maxCount=4)
-
- rl1 = b.getLockByID(l1)
- rl2 = b.getLockByID(l1a)
- self.failUnlessIdentical(rl1, rl2)
- rl3 = b.getLockByID(l2)
- self.failIfIdentical(rl1, rl3)
-
- s1 = locks.SlaveLock("one")
- s1a = locks.SlaveLock("one")
- s2 = locks.SlaveLock("one", maxCount=4)
- s3 = locks.SlaveLock("one", maxCount=4,
- maxCountForSlave={"a":1, "b":2})
- s3a = locks.SlaveLock("one", maxCount=4,
- maxCountForSlave={"a":1, "b":2})
- s4 = locks.SlaveLock("one", maxCount=4,
- maxCountForSlave={"a":4, "b":4})
-
- rl1 = b.getLockByID(s1)
- rl2 = b.getLockByID(s1a)
- self.failUnlessIdentical(rl1, rl2)
- rl3 = b.getLockByID(s2)
- self.failIfIdentical(rl1, rl3)
- rl4 = b.getLockByID(s3)
- self.failIfIdentical(rl1, rl4)
- self.failIfIdentical(rl3, rl4)
- rl5 = b.getLockByID(s3a)
- self.failUnlessIdentical(rl4, rl5)
- rl6 = b.getLockByID(s4)
- self.failIfIdentical(rl5, rl6)
-
-
-
-class LockStep(dummy.Dummy):
- def start(self):
- number = self.build.requests[0].number
- self.build.requests[0].events.append(("start", number))
- dummy.Dummy.start(self)
- def done(self):
- number = self.build.requests[0].number
- self.build.requests[0].events.append(("done", number))
- dummy.Dummy.done(self)
-
-config_1 = """
-from buildbot import locks
-from buildbot.process import factory
-from buildbot.buildslave import BuildSlave
-s = factory.s
-from buildbot.test.test_locks import LockStep
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
-c['schedulers'] = []
-c['slavePortnum'] = 0
-
-first_lock = locks.SlaveLock('first')
-second_lock = locks.MasterLock('second')
-f1 = factory.BuildFactory([s(LockStep, timeout=2, locks=[first_lock])])
-f2 = factory.BuildFactory([s(LockStep, timeout=3, locks=[second_lock])])
-f3 = factory.BuildFactory([s(LockStep, timeout=2, locks=[])])
-
-b1a = {'name': 'full1a', 'slavename': 'bot1', 'builddir': '1a', 'factory': f1}
-b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1b', 'factory': f1}
-b1c = {'name': 'full1c', 'slavename': 'bot1', 'builddir': '1c', 'factory': f3,
- 'locks': [first_lock, second_lock]}
-b1d = {'name': 'full1d', 'slavename': 'bot1', 'builddir': '1d', 'factory': f2}
-b2a = {'name': 'full2a', 'slavename': 'bot2', 'builddir': '2a', 'factory': f1}
-b2b = {'name': 'full2b', 'slavename': 'bot2', 'builddir': '2b', 'factory': f3,
- 'locks': [second_lock]}
-c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
-"""
-
-config_1a = config_1 + \
-"""
-b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1B', 'factory': f1}
-c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
-"""
-
-
-class Locks(RunMixin, unittest.TestCase):
- def setUp(self):
- N = 'test_builder'
- RunMixin.setUp(self)
- self.req1 = req1 = BuildRequest("forced build", SourceStamp(), N)
- req1.number = 1
- self.req2 = req2 = BuildRequest("forced build", SourceStamp(), N)
- req2.number = 2
- self.req3 = req3 = BuildRequest("forced build", SourceStamp(), N)
- req3.number = 3
- req1.events = req2.events = req3.events = self.events = []
- d = self.master.loadConfig(config_1)
- d.addCallback(lambda res: self.master.startService())
- d.addCallback(lambda res: self.connectSlaves(["bot1", "bot2"],
- ["full1a", "full1b",
- "full1c", "full1d",
- "full2a", "full2b"]))
- return d
-
- def testLock1(self):
- self.control.getBuilder("full1a").requestBuild(self.req1)
- self.control.getBuilder("full1b").requestBuild(self.req2)
- d = defer.DeferredList([self.req1.waitUntilFinished(),
- self.req2.waitUntilFinished()])
- d.addCallback(self._testLock1_1)
- return d
-
- def _testLock1_1(self, res):
- # full1a should complete its step before full1b starts it
- self.failUnlessEqual(self.events,
- [("start", 1), ("done", 1),
- ("start", 2), ("done", 2)])
-
- def testLock1a(self):
- # just like testLock1, but we reload the config file first, with a
- # change that causes full1b to be changed. This tickles a design bug
- # in which full1a and full1b wind up with distinct Lock instances.
- d = self.master.loadConfig(config_1a)
- d.addCallback(self._testLock1a_1)
- return d
- def _testLock1a_1(self, res):
- self.control.getBuilder("full1a").requestBuild(self.req1)
- self.control.getBuilder("full1b").requestBuild(self.req2)
- d = defer.DeferredList([self.req1.waitUntilFinished(),
- self.req2.waitUntilFinished()])
- d.addCallback(self._testLock1a_2)
- return d
-
- def _testLock1a_2(self, res):
- # full1a should complete its step before full1b starts it
- self.failUnlessEqual(self.events,
- [("start", 1), ("done", 1),
- ("start", 2), ("done", 2)])
-
- def testLock2(self):
- # two builds run on separate slaves with slave-scoped locks should
- # not interfere
- self.control.getBuilder("full1a").requestBuild(self.req1)
- self.control.getBuilder("full2a").requestBuild(self.req2)
- d = defer.DeferredList([self.req1.waitUntilFinished(),
- self.req2.waitUntilFinished()])
- d.addCallback(self._testLock2_1)
- return d
-
- def _testLock2_1(self, res):
- # full2a should start its step before full1a finishes it. They run on
- # different slaves, however, so they might start in either order.
- self.failUnless(self.events[:2] == [("start", 1), ("start", 2)] or
- self.events[:2] == [("start", 2), ("start", 1)])
-
- def testLock3(self):
- # two builds run on separate slaves with master-scoped locks should
- # not overlap
- self.control.getBuilder("full1c").requestBuild(self.req1)
- self.control.getBuilder("full2b").requestBuild(self.req2)
- d = defer.DeferredList([self.req1.waitUntilFinished(),
- self.req2.waitUntilFinished()])
- d.addCallback(self._testLock3_1)
- return d
-
- def _testLock3_1(self, res):
- # full2b should not start until after full1c finishes. The builds run
- # on different slaves, so we can't really predict which will start
- # first. The important thing is that they don't overlap.
- self.failUnless(self.events == [("start", 1), ("done", 1),
- ("start", 2), ("done", 2)]
- or self.events == [("start", 2), ("done", 2),
- ("start", 1), ("done", 1)]
- )
-
- def testLock4(self):
- self.control.getBuilder("full1a").requestBuild(self.req1)
- self.control.getBuilder("full1c").requestBuild(self.req2)
- self.control.getBuilder("full1d").requestBuild(self.req3)
- d = defer.DeferredList([self.req1.waitUntilFinished(),
- self.req2.waitUntilFinished(),
- self.req3.waitUntilFinished()])
- d.addCallback(self._testLock4_1)
- return d
-
- def _testLock4_1(self, res):
- # full1a starts, then full1d starts (because they do not interfere).
- # Once both are done, full1c can run.
- self.failUnlessEqual(self.events,
- [("start", 1), ("start", 3),
- ("done", 1), ("done", 3),
- ("start", 2), ("done", 2)])
-
diff --git a/buildbot/buildbot/test/test_maildir.py b/buildbot/buildbot/test/test_maildir.py
deleted file mode 100644
index b79cbd3..0000000
--- a/buildbot/buildbot/test/test_maildir.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# -*- test-case-name: buildbot.test.test_maildir -*-
-
-from twisted.trial import unittest
-import os, shutil
-from buildbot.changes.mail import FCMaildirSource
-from twisted.internet import defer, reactor, task
-from twisted.python import util, log
-
-class TimeOutError(Exception):
- """The message were not received in a timely fashion"""
-
-class MaildirTest(unittest.TestCase):
- SECONDS_PER_MESSAGE = 1.0
-
- def setUp(self):
- log.msg("creating empty maildir")
- self.maildir = "test-maildir"
- if os.path.isdir(self.maildir):
- shutil.rmtree(self.maildir)
- log.msg("removing stale maildir")
- os.mkdir(self.maildir)
- os.mkdir(os.path.join(self.maildir, "cur"))
- os.mkdir(os.path.join(self.maildir, "new"))
- os.mkdir(os.path.join(self.maildir, "tmp"))
- self.source = None
-
- def tearDown(self):
- log.msg("removing old maildir")
- shutil.rmtree(self.maildir)
- if self.source:
- return self.source.stopService()
-
- def addChange(self, c):
- # NOTE: this assumes every message results in a Change, which isn't
- # true for msg8-prefix
- log.msg("got change")
- self.changes.append(c)
-
- def deliverMail(self, msg):
- log.msg("delivering", msg)
- newdir = os.path.join(self.maildir, "new")
- # to do this right, use safecat
- shutil.copy(msg, newdir)
-
- def poll(self, changes, count, d):
- if len(changes) == count:
- d.callback("passed")
-
- def testMaildir(self):
- self.changes = []
- s = self.source = FCMaildirSource(self.maildir)
- s.parent = self
- s.startService()
- testfiles_dir = util.sibpath(__file__, "mail")
- testfiles = [msg for msg in os.listdir(testfiles_dir)
- if msg.startswith("freshcvs")]
- assert testfiles
- testfiles.sort()
- count = len(testfiles)
- d = defer.Deferred()
-
- i = 1
- for i in range(count):
- msg = testfiles[i]
- reactor.callLater(self.SECONDS_PER_MESSAGE*i, self.deliverMail,
- os.path.join(testfiles_dir, msg))
- self.loop = task.LoopingCall(self.poll, self.changes, count, d)
- self.loop.start(0.1)
- t = reactor.callLater(self.SECONDS_PER_MESSAGE*count + 15,
- d.errback, TimeOutError)
- # TODO: verify the messages, should use code from test_mailparse but
- # I'm not sure how to factor the verification routines out in a
- # useful fashion
-
- #for i in range(count):
- # msg, check = test_messages[i]
- # check(self, self.changes[i])
-
- def _shutdown(res):
- if t.active():
- t.cancel()
- self.loop.stop()
- return res
- d.addBoth(_shutdown)
-
- return d
-
- # TODO: it would be nice to set this timeout after counting the number of
- # messages in buildbot/test/mail/msg*, but I suspect trial wants to have
- # this number before the method starts, and maybe even before setUp()
- testMaildir.timeout = SECONDS_PER_MESSAGE*9 + 15
-
diff --git a/buildbot/buildbot/test/test_mailparse.py b/buildbot/buildbot/test/test_mailparse.py
deleted file mode 100644
index dc60269..0000000
--- a/buildbot/buildbot/test/test_mailparse.py
+++ /dev/null
@@ -1,293 +0,0 @@
-# -*- test-case-name: buildbot.test.test_mailparse -*-
-
-from twisted.trial import unittest
-from twisted.python import util
-from buildbot.changes import mail
-
-class TestFreshCVS(unittest.TestCase):
-
- def get(self, msg):
- msg = util.sibpath(__file__, msg)
- s = mail.FCMaildirSource(None)
- return s.parse_file(open(msg, "r"))
-
- def testMsg1(self):
- c = self.get("mail/freshcvs.1")
- self.assertEqual(c.who, "moshez")
- self.assertEqual(set(c.files), set(["Twisted/debian/python-twisted.menu.in"]))
- self.assertEqual(c.comments, "Instance massenger, apparently\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsg2(self):
- c = self.get("mail/freshcvs.2")
- self.assertEqual(c.who, "itamarst")
- self.assertEqual(set(c.files), set(["Twisted/twisted/web/woven/form.py",
- "Twisted/twisted/python/formmethod.py"]))
- self.assertEqual(c.comments,
- "submit formmethod now subclass of Choice\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsg3(self):
- # same as msg2 but missing the ViewCVS section
- c = self.get("mail/freshcvs.3")
- self.assertEqual(c.who, "itamarst")
- self.assertEqual(set(c.files), set(["Twisted/twisted/web/woven/form.py",
- "Twisted/twisted/python/formmethod.py"]))
- self.assertEqual(c.comments,
- "submit formmethod now subclass of Choice\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsg4(self):
- # same as msg3 but also missing CVS patch section
- c = self.get("mail/freshcvs.4")
- self.assertEqual(c.who, "itamarst")
- self.assertEqual(set(c.files), set(["Twisted/twisted/web/woven/form.py",
- "Twisted/twisted/python/formmethod.py"]))
- self.assertEqual(c.comments,
- "submit formmethod now subclass of Choice\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsg5(self):
- # creates a directory
- c = self.get("mail/freshcvs.5")
- self.assertEqual(c.who, "etrepum")
- self.assertEqual(set(c.files), set(["Twisted/doc/examples/cocoaDemo"]))
- self.assertEqual(c.comments,
- "Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository\n")
- self.assertEqual(c.isdir, 1)
-
- def testMsg6(self):
- # adds files
- c = self.get("mail/freshcvs.6")
- self.assertEqual(c.who, "etrepum")
- self.assertEqual(set(c.files), set([
- "Twisted/doc/examples/cocoaDemo/MyAppDelegate.py",
- "Twisted/doc/examples/cocoaDemo/__main__.py",
- "Twisted/doc/examples/cocoaDemo/bin-python-main.m",
- "Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
- "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
- "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
- "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
- "Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
- self.assertEqual(c.comments,
- "Cocoa (OS X) clone of the QT demo, using polling reactor\n\nRequires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project. The reactor is iterated periodically by a repeating NSTimer.\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsg7(self):
- # deletes files
- c = self.get("mail/freshcvs.7")
- self.assertEqual(c.who, "etrepum")
- self.assertEqual(set(c.files), set([
- "Twisted/doc/examples/cocoaDemo/MyAppDelegate.py",
- "Twisted/doc/examples/cocoaDemo/__main__.py",
- "Twisted/doc/examples/cocoaDemo/bin-python-main.m",
- "Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
- "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
- "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
- "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
- "Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
- self.assertEqual(c.comments,
- "Directories break debian build script, waiting for reasonable fix\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsg8(self):
- # files outside Twisted/
- c = self.get("mail/freshcvs.8")
- self.assertEqual(c.who, "acapnotic")
- self.assertEqual(set(c.files), set([ "CVSROOT/freshCfg" ]))
- self.assertEqual(c.comments, "it doesn't work with invalid syntax\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsg9(self):
- # also creates a directory
- c = self.get("mail/freshcvs.9")
- self.assertEqual(c.who, "exarkun")
- self.assertEqual(set(c.files), set(["Twisted/sandbox/exarkun/persist-plugin"]))
- self.assertEqual(c.comments,
- "Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository\n")
- self.assertEqual(c.isdir, 1)
-
-
-class TestFreshCVS_Prefix(unittest.TestCase):
- def get(self, msg):
- msg = util.sibpath(__file__, msg)
- s = mail.FCMaildirSource(None)
- return s.parse_file(open(msg, "r"), prefix="Twisted/")
-
- def testMsg1p(self):
- c = self.get("mail/freshcvs.1")
- self.assertEqual(c.who, "moshez")
- self.assertEqual(set(c.files), set(["debian/python-twisted.menu.in"]))
- self.assertEqual(c.comments, "Instance massenger, apparently\n")
-
- def testMsg2p(self):
- c = self.get("mail/freshcvs.2")
- self.assertEqual(c.who, "itamarst")
- self.assertEqual(set(c.files), set(["twisted/web/woven/form.py",
- "twisted/python/formmethod.py"]))
- self.assertEqual(c.comments,
- "submit formmethod now subclass of Choice\n")
-
- def testMsg3p(self):
- # same as msg2 but missing the ViewCVS section
- c = self.get("mail/freshcvs.3")
- self.assertEqual(c.who, "itamarst")
- self.assertEqual(set(c.files), set(["twisted/web/woven/form.py",
- "twisted/python/formmethod.py"]))
- self.assertEqual(c.comments,
- "submit formmethod now subclass of Choice\n")
-
- def testMsg4p(self):
- # same as msg3 but also missing CVS patch section
- c = self.get("mail/freshcvs.4")
- self.assertEqual(c.who, "itamarst")
- self.assertEqual(set(c.files), set(["twisted/web/woven/form.py",
- "twisted/python/formmethod.py"]))
- self.assertEqual(c.comments,
- "submit formmethod now subclass of Choice\n")
-
- def testMsg5p(self):
- # creates a directory
- c = self.get("mail/freshcvs.5")
- self.assertEqual(c.who, "etrepum")
- self.assertEqual(set(c.files), set(["doc/examples/cocoaDemo"]))
- self.assertEqual(c.comments,
- "Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository\n")
- self.assertEqual(c.isdir, 1)
-
- def testMsg6p(self):
- # adds files
- c = self.get("mail/freshcvs.6")
- self.assertEqual(c.who, "etrepum")
- self.assertEqual(set(c.files), set([
- "doc/examples/cocoaDemo/MyAppDelegate.py",
- "doc/examples/cocoaDemo/__main__.py",
- "doc/examples/cocoaDemo/bin-python-main.m",
- "doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
- "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
- "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
- "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
- "doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
- self.assertEqual(c.comments,
- "Cocoa (OS X) clone of the QT demo, using polling reactor\n\nRequires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project. The reactor is iterated periodically by a repeating NSTimer.\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsg7p(self):
- # deletes files
- c = self.get("mail/freshcvs.7")
- self.assertEqual(c.who, "etrepum")
- self.assertEqual(set(c.files), set([
- "doc/examples/cocoaDemo/MyAppDelegate.py",
- "doc/examples/cocoaDemo/__main__.py",
- "doc/examples/cocoaDemo/bin-python-main.m",
- "doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
- "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
- "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
- "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
- "doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
- self.assertEqual(c.comments,
- "Directories break debian build script, waiting for reasonable fix\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsg8p(self):
- # files outside Twisted/
- c = self.get("mail/freshcvs.8")
- self.assertEqual(c, None)
-
-
-class TestSyncmail(unittest.TestCase):
- def get(self, msg):
- msg = util.sibpath(__file__, msg)
- s = mail.SyncmailMaildirSource(None)
- return s.parse_file(open(msg, "r"), prefix="buildbot/")
-
- def getNoPrefix(self, msg):
- msg = util.sibpath(__file__, msg)
- s = mail.SyncmailMaildirSource(None)
- return s.parse_file(open(msg, "r"))
-
- def testMsgS1(self):
- c = self.get("mail/syncmail.1")
- self.failUnless(c is not None)
- self.assertEqual(c.who, "warner")
- self.assertEqual(set(c.files), set(["buildbot/changes/freshcvsmail.py"]))
- self.assertEqual(c.comments,
- "remove leftover code, leave a temporary compatibility import. Note! Start\nimporting FCMaildirSource from changes.mail instead of changes.freshcvsmail\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsgS2(self):
- c = self.get("mail/syncmail.2")
- self.assertEqual(c.who, "warner")
- self.assertEqual(set(c.files), set(["ChangeLog"]))
- self.assertEqual(c.comments, "\t* NEWS: started adding new features\n")
- self.assertEqual(c.isdir, 0)
-
- def testMsgS3(self):
- c = self.get("mail/syncmail.3")
- self.failUnless(c == None)
-
- def testMsgS4(self):
- c = self.get("mail/syncmail.4")
- self.assertEqual(c.who, "warner")
- self.assertEqual(set(c.files),
- set(["test/mail/syncmail.1",
- "test/mail/syncmail.2",
- "test/mail/syncmail.3"]))
- self.assertEqual(c.comments, "test cases for syncmail parser\n")
- self.assertEqual(c.isdir, 0)
- self.assertEqual(c.branch, None)
-
- # tests a tag
- def testMsgS5(self):
- c = self.getNoPrefix("mail/syncmail.5")
- self.failUnless(c)
- self.assertEqual(c.who, "thomas")
- self.assertEqual(set(c.files),
- set(['test1/MANIFEST',
- 'test1/Makefile.am',
- 'test1/autogen.sh',
- 'test1/configure.in']))
- self.assertEqual(c.branch, "BRANCH-DEVEL")
- self.assertEqual(c.isdir, 0)
-
-
-class TestSVNCommitEmail(unittest.TestCase):
- def get(self, msg, prefix):
- msg = util.sibpath(__file__, msg)
- s = mail.SVNCommitEmailMaildirSource(None)
- return s.parse_file(open(msg, "r"), prefix)
-
- def test1(self):
- c = self.get("mail/svn-commit.1", "spamassassin/trunk/")
- self.failUnless(c)
- self.failUnlessEqual(c.who, "felicity")
- self.failUnlessEqual(set(c.files), set(["sa-update.raw"]))
- self.failUnlessEqual(c.branch, None)
- self.failUnlessEqual(c.comments,
- "bug 4864: remove extraneous front-slash "
- "from gpghomedir path\n")
-
- def test2a(self):
- c = self.get("mail/svn-commit.2", "spamassassin/trunk/")
- self.failIf(c)
-
- def test2b(self):
- c = self.get("mail/svn-commit.2", "spamassassin/branches/3.1/")
- self.failUnless(c)
- self.failUnlessEqual(c.who, "sidney")
- self.failUnlessEqual(set(c.files),
- set(["lib/Mail/SpamAssassin/Timeout.pm",
- "MANIFEST",
- "lib/Mail/SpamAssassin/Logger.pm",
- "lib/Mail/SpamAssassin/Plugin/DCC.pm",
- "lib/Mail/SpamAssassin/Plugin/DomainKeys.pm",
- "lib/Mail/SpamAssassin/Plugin/Pyzor.pm",
- "lib/Mail/SpamAssassin/Plugin/Razor2.pm",
- "lib/Mail/SpamAssassin/Plugin/SPF.pm",
- "lib/Mail/SpamAssassin/SpamdForkScaling.pm",
- "spamd/spamd.raw",
- ]))
- self.failUnlessEqual(c.comments,
- "Bug 4696: consolidated fixes for timeout bugs\n")
-
-
diff --git a/buildbot/buildbot/test/test_mergerequests.py b/buildbot/buildbot/test/test_mergerequests.py
deleted file mode 100644
index e176cf1..0000000
--- a/buildbot/buildbot/test/test_mergerequests.py
+++ /dev/null
@@ -1,196 +0,0 @@
-from twisted.internet import defer, reactor
-from twisted.trial import unittest
-
-from buildbot.sourcestamp import SourceStamp
-from buildbot.process.base import BuildRequest
-from buildbot.process.properties import Properties
-from buildbot.status import builder, base, words
-from buildbot.changes.changes import Change
-
-from buildbot.test.runutils import RunMixin
-
-"""Testcases for master.botmaster.shouldMergeRequests.
-
-"""
-
-master_cfg = """from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-
-f = factory.BuildFactory([
- dummy.Dummy(timeout=0),
- ])
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit')]
-c['schedulers'] = []
-c['builders'] = []
-c['builders'].append({'name':'dummy', 'slavename':'bot1',
- 'builddir': 'dummy', 'factory': f})
-c['slavePortnum'] = 0
-
-%s
-c['mergeRequests'] = mergeRequests
-"""
-
-class MergeRequestsTest(RunMixin, unittest.TestCase):
- def do_test(self, mergefun, results, reqs = None):
- R = BuildRequest
- S = SourceStamp
- c1 = Change("alice", [], "changed stuff", branch="branch1")
- c2 = Change("alice", [], "changed stuff", branch="branch1")
- c3 = Change("alice", [], "changed stuff", branch="branch1")
- c4 = Change("alice", [], "changed stuff", branch="branch1")
- c5 = Change("alice", [], "changed stuff", branch="branch1")
- c6 = Change("alice", [], "changed stuff", branch="branch1")
- if reqs is None:
- reqs = (R("why", S("branch1", None, None, None), 'test_builder'),
- R("why2", S("branch1", "rev1", None, None), 'test_builder'),
- R("why not", S("branch1", "rev1", None, None), 'test_builder'),
- R("why3", S("branch1", "rev2", None, None), 'test_builder'),
- R("why4", S("branch2", "rev2", None, None), 'test_builder'),
- R("why5", S("branch1", "rev1", (3, "diff"), None), 'test_builder'),
- R("changes", S("branch1", None, None, [c1,c2,c3]), 'test_builder'),
- R("changes", S("branch1", None, None, [c4,c5,c6]), 'test_builder'),
- )
-
- m = self.master
- m.loadConfig(master_cfg % mergefun)
- m.readConfig = True
- m.startService()
- builder = self.control.getBuilder('dummy')
- for req in reqs:
- builder.requestBuild(req)
-
- d = self.connectSlave()
- d.addCallback(self.waitForBuilds, results)
-
- return d
-
- def waitForBuilds(self, r, results):
- d = self.master.botmaster.waitUntilBuilderIdle('dummy')
- d.addCallback(self.checkresults, results)
- return d
-
- def checkresults(self, builder, results):
- s = builder.builder_status
- builds = list(s.generateFinishedBuilds())
- builds.reverse()
- self.assertEqual(len(builds), len(results))
- for i in xrange(len(builds)):
- b = builds[i]
- r = results[i]
- ss = b.getSourceStamp()
- self.assertEquals(b.getReason(), r['reason'])
- self.assertEquals(ss.branch, r['branch'])
- self.assertEquals(len(ss.changes), r['changecount'])
- # print b.getReason(), ss.branch, len(ss.changes), ss.revision
-
- def testDefault(self):
- return self.do_test('mergeRequests = None',
- ({'reason': 'why',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why2, why not',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why3',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why4',
- 'branch': 'branch2',
- 'changecount': 0},
- {'reason': 'why5',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'changes',
- 'branch': 'branch1',
- 'changecount': 6},
- ))
-
- def testNoMerges(self):
- mergefun = """def mergeRequests(builder, req1, req2):
- return False
-"""
- return self.do_test(mergefun,
- ({'reason': 'why',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why2',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why not',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why3',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why4',
- 'branch': 'branch2',
- 'changecount': 0},
- {'reason': 'why5',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'changes',
- 'branch': 'branch1',
- 'changecount': 3},
- {'reason': 'changes',
- 'branch': 'branch1',
- 'changecount': 3},
- ))
-
- def testReasons(self):
- mergefun = """def mergeRequests(builder, req1, req2):
- return req1.reason == req2.reason
-"""
- return self.do_test(mergefun,
- ({'reason': 'why',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why2',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why not',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why3',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why4',
- 'branch': 'branch2',
- 'changecount': 0},
- {'reason': 'why5',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'changes',
- 'branch': 'branch1',
- 'changecount': 6},
- ))
-
-
- def testProperties(self):
- mergefun = """def mergeRequests(builder, req1, req2):
- return req1.properties == req2.properties
-"""
- R = BuildRequest
- S = SourceStamp
- p1 = Properties(first="value")
- p2 = Properties(first="other value")
- reqs = (R("why", S("branch1", None, None, None), 'test_builder',
- properties = p1),
- R("why", S("branch1", None, None, None), 'test_builder',
- properties = p1),
- R("why", S("branch1", None, None, None), 'test_builder',
- properties = p2),
- R("why", S("branch1", None, None, None), 'test_builder',
- properties = p2),
- )
- return self.do_test(mergefun,
- ({'reason': 'why',
- 'branch': 'branch1',
- 'changecount': 0},
- {'reason': 'why',
- 'branch': 'branch1',
- 'changecount': 0},
- ),
- reqs=reqs)
diff --git a/buildbot/buildbot/test/test_p4poller.py b/buildbot/buildbot/test/test_p4poller.py
deleted file mode 100644
index 54c6325..0000000
--- a/buildbot/buildbot/test/test_p4poller.py
+++ /dev/null
@@ -1,213 +0,0 @@
-import time
-
-from twisted.internet import defer
-from twisted.trial import unittest
-
-from buildbot.changes.changes import Change
-from buildbot.changes.p4poller import P4Source, get_simple_split
-
-first_p4changes = \
-"""Change 1 on 2006/04/13 by slamb@testclient 'first rev'
-"""
-
-second_p4changes = \
-"""Change 3 on 2006/04/13 by bob@testclient 'short desc truncated'
-Change 2 on 2006/04/13 by slamb@testclient 'bar'
-"""
-
-third_p4changes = \
-"""Change 5 on 2006/04/13 by mpatel@testclient 'first rev'
-"""
-
-change_4_log = \
-"""Change 4 by mpatel@testclient on 2006/04/13 21:55:39
-
- short desc truncated because this is a long description.
-"""
-change_3_log = \
-"""Change 3 by bob@testclient on 2006/04/13 21:51:39
-
- short desc truncated because this is a long description.
-"""
-
-change_2_log = \
-"""Change 2 by slamb@testclient on 2006/04/13 21:46:23
-
- creation
-"""
-
-p4change = {
- 3: change_3_log +
-"""Affected files ...
-
-... //depot/myproject/branch_b/branch_b_file#1 add
-... //depot/myproject/branch_b/whatbranch#1 branch
-... //depot/myproject/branch_c/whatbranch#1 branch
-""",
- 2: change_2_log +
-"""Affected files ...
-
-... //depot/myproject/trunk/whatbranch#1 add
-... //depot/otherproject/trunk/something#1 add
-""",
- 5: change_4_log +
-"""Affected files ...
-
-... //depot/myproject/branch_b/branch_b_file#1 add
-... //depot/myproject/branch_b#75 edit
-... //depot/myproject/branch_c/branch_c_file#1 add
-""",
-}
-
-
-class MockP4Source(P4Source):
- """Test P4Source which doesn't actually invoke p4."""
- invocation = 0
-
- def __init__(self, p4changes, p4change, *args, **kwargs):
- P4Source.__init__(self, *args, **kwargs)
- self.p4changes = p4changes
- self.p4change = p4change
-
- def _get_changes(self):
- assert self.working
- result = self.p4changes[self.invocation]
- self.invocation += 1
- return defer.succeed(result)
-
- def _get_describe(self, dummy, num):
- assert self.working
- return defer.succeed(self.p4change[num])
-
-class TestP4Poller(unittest.TestCase):
- def setUp(self):
- self.changes = []
- self.addChange = self.changes.append
-
- def failUnlessIn(self, substr, string):
- # this is for compatibility with python2.2
- if isinstance(string, str):
- self.failUnless(string.find(substr) != -1)
- else:
- self.assertIn(substr, string)
-
- def testCheck(self):
- """successful checks"""
- self.t = MockP4Source(p4changes=[first_p4changes, second_p4changes],
- p4change=p4change,
- p4port=None, p4user=None,
- p4base='//depot/myproject/',
- split_file=lambda x: x.split('/', 1))
- self.t.parent = self
-
- # The first time, it just learns the change to start at.
- self.assert_(self.t.last_change is None)
- self.assert_(not self.t.working)
- return self.t.checkp4().addCallback(self._testCheck2)
-
- def _testCheck2(self, res):
- self.assertEquals(self.changes, [])
- self.assertEquals(self.t.last_change, 1)
-
- # Subsequent times, it returns Change objects for new changes.
- return self.t.checkp4().addCallback(self._testCheck3)
-
- def _testCheck3(self, res):
- self.assertEquals(len(self.changes), 3)
- self.assertEquals(self.t.last_change, 3)
- self.assert_(not self.t.working)
-
- # They're supposed to go oldest to newest, so this one must be first.
- self.assertEquals(self.changes[0].asText(),
- Change(who='slamb',
- files=['whatbranch'],
- comments=change_2_log,
- revision='2',
- when=self.makeTime("2006/04/13 21:46:23"),
- branch='trunk').asText())
-
- # These two can happen in either order, since they're from the same
- # Perforce change.
- self.failUnlessIn(
- Change(who='bob',
- files=['branch_b_file',
- 'whatbranch'],
- comments=change_3_log,
- revision='3',
- when=self.makeTime("2006/04/13 21:51:39"),
- branch='branch_b').asText(),
- [c.asText() for c in self.changes])
- self.failUnlessIn(
- Change(who='bob',
- files=['whatbranch'],
- comments=change_3_log,
- revision='3',
- when=self.makeTime("2006/04/13 21:51:39"),
- branch='branch_c').asText(),
- [c.asText() for c in self.changes])
-
- def makeTime(self, timestring):
- datefmt = '%Y/%m/%d %H:%M:%S'
- when = time.mktime(time.strptime(timestring, datefmt))
- return when
-
- def testFailedChanges(self):
- """'p4 changes' failure is properly ignored"""
- self.t = MockP4Source(p4changes=['Perforce client error:\n...'],
- p4change={},
- p4port=None, p4user=None)
- self.t.parent = self
- d = self.t.checkp4()
- d.addCallback(self._testFailedChanges2)
- return d
-
- def _testFailedChanges2(self, f):
- self.failUnlessEqual(f, None)
- self.assert_(not self.t.working)
-
- def testFailedDescribe(self):
- """'p4 describe' failure is properly ignored"""
- c = dict(p4change)
- c[3] = 'Perforce client error:\n...'
- self.t = MockP4Source(p4changes=[first_p4changes, second_p4changes],
- p4change=c, p4port=None, p4user=None)
- self.t.parent = self
- d = self.t.checkp4()
- d.addCallback(self._testFailedDescribe2)
- return d
-
- def _testFailedDescribe2(self, res):
- # first time finds nothing; check again.
- return self.t.checkp4().addCallback(self._testFailedDescribe3)
-
- def _testFailedDescribe3(self, f):
- self.failUnlessEqual(f, None)
- self.assert_(not self.t.working)
- self.assertEquals(self.t.last_change, 2)
-
- def testAlreadyWorking(self):
- """don't launch a new poll while old is still going"""
- self.t = P4Source()
- self.t.working = True
- self.assert_(self.t.last_change is None)
- d = self.t.checkp4()
- d.addCallback(self._testAlreadyWorking2)
-
- def _testAlreadyWorking2(self, res):
- self.assert_(self.t.last_change is None)
-
- def testSplitFile(self):
- """Make sure split file works on branch only changes"""
- self.t = MockP4Source(p4changes=[third_p4changes],
- p4change=p4change,
- p4port=None, p4user=None,
- p4base='//depot/myproject/',
- split_file=get_simple_split)
- self.t.parent = self
- self.t.last_change = 50
- d = self.t.checkp4()
- d.addCallback(self._testSplitFile)
-
- def _testSplitFile(self, res):
- self.assertEquals(len(self.changes), 2)
- self.assertEquals(self.t.last_change, 5)
diff --git a/buildbot/buildbot/test/test_package_rpm.py b/buildbot/buildbot/test/test_package_rpm.py
deleted file mode 100644
index 05d2841..0000000
--- a/buildbot/buildbot/test/test_package_rpm.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# test step.package.rpm.*
-
-from twisted.trial import unittest
-
-from buildbot.test.runutils import SlaveCommandTestBase
-from buildbot.steps.package.rpm import RpmBuild, RpmLint, RpmSpec
-
-
-class TestRpmBuild(unittest.TestCase):
- """
- Tests the package.rpm.RpmBuild class.
- """
-
- def test_creation(self):
- """
- Test that instances are created with proper data.
- """
- rb = RpmBuild()
- self.assertEquals(rb.specfile, None)
- self.assertFalse(rb.autoRelease)
- self.assertFalse(rb.vcsRevision)
-
- rb2 = RpmBuild('aspec.spec', autoRelease=True, vcsRevision=True)
- self.assertEquals(rb2.specfile, 'aspec.spec')
- self.assertTrue(rb2.autoRelease)
- self.assertTrue(rb2.vcsRevision)
-
- def test_rpmbuild(self):
- """
- Verifies the rpmbuild string is what we would expect.
- """
- rb = RpmBuild('topdir', 'buildir', 'rpmdir', 'sourcedir',
- 'specdir', 'dist')
- expected_result = ('rpmbuild --define "_topdir buildir"'
- ' --define "_builddir rpmdir" --define "_rpmdir sourcedir"'
- ' --define "_sourcedir specdir" --define "_specdir dist"'
- ' --define "_srcrpmdir `pwd`" --define "dist .el5"')
- self.assertEquals(rb.rpmbuild, expected_result)
-
-
-class TestRpmLint(unittest.TestCase):
- """
- Tests the package.rpm.RpmLint class.
- """
-
- def test_command(self):
- """
- Test that instance command variable is created with proper data.
- """
- rl = RpmLint()
- expected_result = ["/usr/bin/rpmlint", "-i", '*rpm']
- self.assertEquals(rl.command, expected_result)
-
-
-class TestRpmSpec(unittest.TestCase):
- """
- Tests the package.rpm.RpmSpec class.
- """
-
- def test_creation(self):
- """
- Test that instances are created with proper data.
- """
- rs = RpmSpec()
- self.assertEquals(rs.specfile, None)
- self.assertEquals(rs.pkg_name, None)
- self.assertEquals(rs.pkg_version, None)
- self.assertFalse(rs.loaded)
-
- def test_load(self):
- try:
- from cStringIO import StringIO
- except ImportError, ie:
- from StringIO import StringIO
-
- specfile = StringIO()
- specfile.write("""\
-Name: example
-Version: 1.0.0
-Release: 1%{?dist}
-Summary: An example spec
-
-Group: Development/Libraries
-License: GPLv2+
-URL: http://www.example.dom
-Source0: %{name}-%{version}.tar.gz
-BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
-
-BuildArch: noarch
-Requires: python >= 2.4
-BuildRequires: python-setuptools
-
-
-%description
-An example spec for an rpm.
-
-
-%prep
-%setup -q
-
-
-%build
-%{__python} setup.py build
-
-
-%install
-rm -rf $RPM_BUILD_ROOT
-%{__python} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT/
-
-
-%clean
-rm -rf $RPM_BUILD_ROOT
-
-
-%files
-%defattr(-,root,root,-)
-%doc INSTALL LICENSE AUTHORS COPYING
-# For noarch packages: sitelib
-%{python_sitelib}/*
-
-
-%changelog
-* Wed Jan 7 2009 Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com> - \
-1.0.0-1
-- example""")
- specfile.flush()
- specfile.seek(0)
- rs = RpmSpec(specfile)
- rs.load()
- self.assertTrue(rs.loaded)
- self.assertEquals(rs.pkg_name, 'example')
- self.assertEquals(rs.pkg_version, '1.0.0')
diff --git a/buildbot/buildbot/test/test_properties.py b/buildbot/buildbot/test/test_properties.py
deleted file mode 100644
index a8973dd..0000000
--- a/buildbot/buildbot/test/test_properties.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# -*- test-case-name: buildbot.test.test_properties -*-
-
-import os
-
-from twisted.trial import unittest
-
-from buildbot.sourcestamp import SourceStamp
-from buildbot.process import base
-from buildbot.process.properties import WithProperties, Properties
-from buildbot.status import builder
-from buildbot.slave.commands import rmdirRecursive
-from buildbot.test.runutils import RunMixin
-
-
-class FakeBuild:
- pass
-class FakeBuildMaster:
- properties = Properties(masterprop="master")
-class FakeBotMaster:
- parent = FakeBuildMaster()
-class FakeBuilder:
- statusbag = None
- name = "fakebuilder"
- botmaster = FakeBotMaster()
-class FakeSlave:
- slavename = "bot12"
- properties = Properties(slavename="bot12")
-class FakeSlaveBuilder:
- slave = FakeSlave()
- def getSlaveCommandVersion(self, command, oldversion=None):
- return "1.10"
-class FakeScheduler:
- name = "fakescheduler"
-
-class TestProperties(unittest.TestCase):
- def setUp(self):
- self.props = Properties()
-
- def testDictBehavior(self):
- self.props.setProperty("do-tests", 1, "scheduler")
- self.props.setProperty("do-install", 2, "scheduler")
-
- self.assert_(self.props.has_key('do-tests'))
- self.failUnlessEqual(self.props['do-tests'], 1)
- self.failUnlessEqual(self.props['do-install'], 2)
- self.assertRaises(KeyError, lambda : self.props['do-nothing'])
- self.failUnlessEqual(self.props.getProperty('do-install'), 2)
-
- def testUpdate(self):
- self.props.setProperty("x", 24, "old")
- newprops = { 'a' : 1, 'b' : 2 }
- self.props.update(newprops, "new")
-
- self.failUnlessEqual(self.props.getProperty('x'), 24)
- self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
- self.failUnlessEqual(self.props.getProperty('a'), 1)
- self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
-
- def testUpdateFromProperties(self):
- self.props.setProperty("x", 24, "old")
- newprops = Properties()
- newprops.setProperty('a', 1, "new")
- newprops.setProperty('b', 2, "new")
- self.props.updateFromProperties(newprops)
-
- self.failUnlessEqual(self.props.getProperty('x'), 24)
- self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
- self.failUnlessEqual(self.props.getProperty('a'), 1)
- self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
-
- # render() is pretty well tested by TestWithProperties
-
-class TestWithProperties(unittest.TestCase):
- def setUp(self):
- self.props = Properties()
-
- def testBasic(self):
- # test basic substitution with WithProperties
- self.props.setProperty("revision", "47", "test")
- command = WithProperties("build-%s.tar.gz", "revision")
- self.failUnlessEqual(self.props.render(command),
- "build-47.tar.gz")
-
- def testDict(self):
- # test dict-style substitution with WithProperties
- self.props.setProperty("other", "foo", "test")
- command = WithProperties("build-%(other)s.tar.gz")
- self.failUnlessEqual(self.props.render(command),
- "build-foo.tar.gz")
-
- def testDictColonMinus(self):
- # test dict-style substitution with WithProperties
- self.props.setProperty("prop1", "foo", "test")
- command = WithProperties("build-%(prop1:-empty)s-%(prop2:-empty)s.tar.gz")
- self.failUnlessEqual(self.props.render(command),
- "build-foo-empty.tar.gz")
-
- def testDictColonPlus(self):
- # test dict-style substitution with WithProperties
- self.props.setProperty("prop1", "foo", "test")
- command = WithProperties("build-%(prop1:+exists)s-%(prop2:+exists)s.tar.gz")
- self.failUnlessEqual(self.props.render(command),
- "build-exists-.tar.gz")
-
- def testEmpty(self):
- # None should render as ''
- self.props.setProperty("empty", None, "test")
- command = WithProperties("build-%(empty)s.tar.gz")
- self.failUnlessEqual(self.props.render(command),
- "build-.tar.gz")
-
- def testRecursiveList(self):
- self.props.setProperty("x", 10, "test")
- self.props.setProperty("y", 20, "test")
- command = [ WithProperties("%(x)s %(y)s"), "and",
- WithProperties("%(y)s %(x)s") ]
- self.failUnlessEqual(self.props.render(command),
- ["10 20", "and", "20 10"])
-
- def testRecursiveTuple(self):
- self.props.setProperty("x", 10, "test")
- self.props.setProperty("y", 20, "test")
- command = ( WithProperties("%(x)s %(y)s"), "and",
- WithProperties("%(y)s %(x)s") )
- self.failUnlessEqual(self.props.render(command),
- ("10 20", "and", "20 10"))
-
- def testRecursiveDict(self):
- self.props.setProperty("x", 10, "test")
- self.props.setProperty("y", 20, "test")
- command = { WithProperties("%(x)s %(y)s") :
- WithProperties("%(y)s %(x)s") }
- self.failUnlessEqual(self.props.render(command),
- {"10 20" : "20 10"})
-
-class BuildProperties(unittest.TestCase):
- """Test the properties that a build should have."""
- def setUp(self):
- self.builder = FakeBuilder()
- self.builder_status = builder.BuilderStatus("fakebuilder")
- self.builder_status.basedir = "test_properties"
- self.builder_status.nextBuildNumber = 5
- rmdirRecursive(self.builder_status.basedir)
- os.mkdir(self.builder_status.basedir)
- self.build_status = self.builder_status.newBuild()
- req = base.BuildRequest("reason",
- SourceStamp(branch="branch2", revision="1234"),
- 'test_builder',
- properties=Properties(scheduler="fakescheduler"))
- self.build = base.Build([req])
- self.build.build_status = self.build_status
- self.build.setBuilder(self.builder)
- self.build.setupProperties()
- self.build.setupSlaveBuilder(FakeSlaveBuilder())
-
- def testProperties(self):
- self.failUnlessEqual(self.build.getProperty("scheduler"), "fakescheduler")
- self.failUnlessEqual(self.build.getProperty("branch"), "branch2")
- self.failUnlessEqual(self.build.getProperty("revision"), "1234")
- self.failUnlessEqual(self.build.getProperty("slavename"), "bot12")
- self.failUnlessEqual(self.build.getProperty("buildnumber"), 5)
- self.failUnlessEqual(self.build.getProperty("buildername"), "fakebuilder")
- self.failUnlessEqual(self.build.getProperty("masterprop"), "master")
-
-run_config = """
-from buildbot.process import factory
-from buildbot.steps.shell import ShellCommand, WithProperties
-from buildbot.buildslave import BuildSlave
-s = factory.s
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit', properties={'slprop':'slprop'})]
-c['schedulers'] = []
-c['slavePortnum'] = 0
-c['properties'] = { 'global' : 'global' }
-
-# Note: when run against twisted-1.3.0, this locks up about 5% of the time. I
-# suspect that a command with no output that finishes quickly triggers a race
-# condition in 1.3.0's process-reaping code. The 'touch' process becomes a
-# zombie and the step never completes. To keep this from messing up the unit
-# tests too badly, this step runs with a reduced timeout.
-
-f1 = factory.BuildFactory([s(ShellCommand,
- flunkOnFailure=True,
- command=['touch',
- WithProperties('%s-%s-%s',
- 'slavename', 'global', 'slprop'),
- ],
- workdir='.',
- timeout=10,
- )])
-
-b1 = {'name': 'full1', 'slavename': 'bot1', 'builddir': 'bd1', 'factory': f1}
-c['builders'] = [b1]
-
-"""
-
-class Run(RunMixin, unittest.TestCase):
- def testInterpolate(self):
- # run an actual build with a step that interpolates a build property
- d = self.master.loadConfig(run_config)
- d.addCallback(lambda res: self.master.startService())
- d.addCallback(lambda res: self.connectOneSlave("bot1"))
- d.addCallback(lambda res: self.requestBuild("full1"))
- d.addCallback(self.failUnlessBuildSucceeded)
- def _check_touch(res):
- f = os.path.join("slavebase-bot1", "bd1", "bot1-global-slprop")
- self.failUnless(os.path.exists(f))
- return res
- d.addCallback(_check_touch)
- return d
-
- SetProperty_base_config = """
-from buildbot.process import factory
-from buildbot.steps.shell import ShellCommand, SetProperty, WithProperties
-from buildbot.buildslave import BuildSlave
-s = factory.s
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit')]
-c['schedulers'] = []
-c['slavePortnum'] = 0
-
-f1 = factory.BuildFactory([
-##STEPS##
-])
-
-b1 = {'name': 'full1', 'slavename': 'bot1', 'builddir': 'bd1', 'factory': f1}
-c['builders'] = [b1]
-"""
-
- SetPropertySimple_config = SetProperty_base_config.replace("##STEPS##", """
- SetProperty(property='foo', command="echo foo"),
- SetProperty(property=WithProperties('wp'), command="echo wp"),
- SetProperty(property='bar', command="echo bar", strip=False),
- """)
-
- def testSetPropertySimple(self):
- d = self.master.loadConfig(self.SetPropertySimple_config)
- d.addCallback(lambda res: self.master.startService())
- d.addCallback(lambda res: self.connectOneSlave("bot1"))
- d.addCallback(lambda res: self.requestBuild("full1"))
- d.addCallback(self.failUnlessBuildSucceeded)
- def _check_props(bs):
- self.failUnlessEqual(bs.getProperty("foo"), "foo")
- self.failUnlessEqual(bs.getProperty("wp"), "wp")
- # (will this fail on some platforms, due to newline differences?)
- self.failUnlessEqual(bs.getProperty("bar"), "bar\n")
- return bs
- d.addCallback(_check_props)
- return d
-
- SetPropertyExtractFn_config = SetProperty_base_config.replace("##STEPS##", """
- SetProperty(
- extract_fn=lambda rc,stdout,stderr : {
- 'foo' : stdout.strip(),
- 'bar' : stderr.strip() },
- command="echo foo; echo bar >&2"),
- """)
-
- def testSetPropertyExtractFn(self):
- d = self.master.loadConfig(self.SetPropertyExtractFn_config)
- d.addCallback(lambda res: self.master.startService())
- d.addCallback(lambda res: self.connectOneSlave("bot1"))
- d.addCallback(lambda res: self.requestBuild("full1"))
- d.addCallback(self.failUnlessBuildSucceeded)
- def _check_props(bs):
- self.failUnlessEqual(bs.getProperty("foo"), "foo")
- self.failUnlessEqual(bs.getProperty("bar"), "bar")
- return bs
- d.addCallback(_check_props)
- return d
-
-# we test got_revision in test_vc
diff --git a/buildbot/buildbot/test/test_reconfig.py b/buildbot/buildbot/test/test_reconfig.py
deleted file mode 100644
index c4c3922..0000000
--- a/buildbot/buildbot/test/test_reconfig.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from twisted.trial import unittest
-from twisted.internet import reactor, defer
-from twisted.python import log
-
-from buildbot.test.runutils import RunMixin
-from buildbot.sourcestamp import SourceStamp
-
-config_base = """
-from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-from buildbot.scheduler import Triggerable, Dependent
-
-BuildmasterConfig = c = {}
-
-f = factory.BuildFactory()
-f.addStep(dummy.Dummy, timeout=%d)
-
-c['slaves'] = [BuildSlave('bot1', 'sekrit')]
-
-upstream = Triggerable('s_upstream', ['upstream'], {'prop': '%s'})
-dep = Dependent('s_dep', upstream, ['depend'], {'dep prop': '%s'})
-c['schedulers'] = [upstream, dep]
-c['builders'] = [{'name':'upstream', 'slavename':'bot1',
- 'builddir': 'upstream', 'factory': f},
- {'name':'depend', 'slavename':'bot1',
- 'builddir': 'depend', 'factory': f}]
-c['slavePortnum'] = 0
-"""
-
-class DependingScheduler(RunMixin, unittest.TestCase):
- '''Test an upstream and a dependent scheduler while reconfiguring.'''
-
- def testReconfig(self):
- self.reconfigured = 0
- self.master.loadConfig(config_base % (1, 'prop value', 'dep prop value'))
- self.prop_value = 'prop value'
- self.dep_prop_value = 'dep prop value'
- self.master.readConfig = True
- self.master.startService()
- d = self.connectSlave(builders=['upstream', 'depend'])
- d.addCallback(self._triggerUpstream)
- return d
- def _triggerUpstream(self, res):
- log.msg("trigger upstream")
- ss = SourceStamp()
- upstream = [s for s in self.master.allSchedulers()
- if s.name == 's_upstream'][0]
- d = upstream.trigger(ss)
- d.addCallback(self._gotBuild)
- return d
-
- def _gotBuild(self, res):
- log.msg("done")
- d = defer.Deferred()
- d.addCallback(self._doChecks)
- reactor.callLater(2, d.callback, None)
- return d
-
- def _doChecks(self, res):
- log.msg("starting tests")
- ub = self.status.getBuilder('upstream').getLastFinishedBuild()
- tb = self.status.getBuilder('depend').getLastFinishedBuild()
- self.assertEqual(ub.getProperty('prop'), self.prop_value)
- self.assertEqual(ub.getNumber(), self.reconfigured)
- self.assertEqual(tb.getProperty('dep prop'), self.dep_prop_value)
- self.assertEqual(tb.getNumber(), self.reconfigured)
-
- # now further on to the reconfig
- if self.reconfigured > 2:
- # actually, we're done,
- return
- if self.reconfigured == 0:
- # reconfig without changes now
- d = self.master.loadConfig(config_base% (1, 'prop value',
- 'dep prop value'))
- elif self.reconfigured == 1:
- # reconfig with changes to upstream now
- d = self.master.loadConfig(config_base% (1, 'other prop value',
- 'dep prop value'))
- self.prop_value = 'other prop value'
- self.dep_prop_value = 'dep prop value'
- else:
- # reconfig with changes to dep now
- d = self.master.loadConfig(config_base% (1, 'other prop value',
- 'other dep prop value'))
- self.prop_value = 'other prop value'
- self.dep_prop_value = 'other dep prop value'
- self.reconfigured += 1
- d.addCallback(self._triggerUpstream)
- return d
diff --git a/buildbot/buildbot/test/test_run.py b/buildbot/buildbot/test/test_run.py
deleted file mode 100644
index a04ea5b..0000000
--- a/buildbot/buildbot/test/test_run.py
+++ /dev/null
@@ -1,1199 +0,0 @@
-# -*- test-case-name: buildbot.test.test_run -*-
-
-from twisted.trial import unittest
-from twisted.internet import reactor, defer
-import os
-
-from buildbot import master, interfaces
-from buildbot.sourcestamp import SourceStamp
-from buildbot.changes import changes
-from buildbot.status import builder
-from buildbot.process.base import BuildRequest
-
-from buildbot.test.runutils import RunMixin, TestFlagMixin, rmtree
-
-config_base = """
-from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-s = factory.s
-
-f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
-
-f2 = factory.BuildFactory([
- dummy.Dummy(timeout=1),
- dummy.RemoteDummy(timeout=2),
- ])
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit')]
-c['schedulers'] = []
-c['builders'] = []
-c['builders'].append({'name':'quick', 'slavename':'bot1',
- 'builddir': 'quickdir', 'factory': f1})
-c['slavePortnum'] = 0
-"""
-
-config_run = config_base + """
-from buildbot.scheduler import Scheduler
-c['schedulers'] = [Scheduler('quick', None, 120, ['quick'])]
-"""
-
-config_can_build = config_base + """
-from buildbot.buildslave import BuildSlave
-c['slaves'] = [ BuildSlave('bot1', 'sekrit') ]
-
-from buildbot.scheduler import Scheduler
-c['schedulers'] = [Scheduler('dummy', None, 0.1, ['dummy'])]
-
-c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
- 'builddir': 'dummy1', 'factory': f2}]
-"""
-
-config_cant_build = config_can_build + """
-class MyBuildSlave(BuildSlave):
- def canStartBuild(self): return False
-c['slaves'] = [ MyBuildSlave('bot1', 'sekrit') ]
-"""
-
-config_concurrency = config_base + """
-from buildbot.buildslave import BuildSlave
-c['slaves'] = [ BuildSlave('bot1', 'sekrit', max_builds=1) ]
-
-from buildbot.scheduler import Scheduler
-c['schedulers'] = [Scheduler('dummy', None, 0.1, ['dummy', 'dummy2'])]
-
-c['builders'].append({'name': 'dummy', 'slavename': 'bot1',
- 'builddir': 'dummy', 'factory': f2})
-c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
- 'builddir': 'dummy2', 'factory': f2})
-"""
-
-config_2 = config_base + """
-c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
- 'builddir': 'dummy1', 'factory': f2},
- {'name': 'testdummy', 'slavename': 'bot1',
- 'builddir': 'dummy2', 'factory': f2, 'category': 'test'}]
-"""
-
-config_3 = config_2 + """
-c['builders'].append({'name': 'adummy', 'slavename': 'bot1',
- 'builddir': 'adummy3', 'factory': f2})
-c['builders'].append({'name': 'bdummy', 'slavename': 'bot1',
- 'builddir': 'adummy4', 'factory': f2,
- 'category': 'test'})
-"""
-
-config_4 = config_base + """
-c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
- 'builddir': 'dummy', 'factory': f2}]
-"""
-
-config_4_newbasedir = config_4 + """
-c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
- 'builddir': 'dummy2', 'factory': f2}]
-"""
-
-config_4_newbuilder = config_4_newbasedir + """
-c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
- 'builddir': 'dummy23', 'factory': f2})
-"""
-
-class Run(unittest.TestCase):
- def rmtree(self, d):
- rmtree(d)
-
- def testMaster(self):
- self.rmtree("basedir")
- os.mkdir("basedir")
- m = master.BuildMaster("basedir")
- m.loadConfig(config_run)
- m.readConfig = True
- m.startService()
- cm = m.change_svc
- c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
- cm.addChange(c)
- # verify that the Scheduler is now waiting
- s = m.allSchedulers()[0]
- self.failUnless(s.timer)
- # halting the service will also stop the timer
- d = defer.maybeDeferred(m.stopService)
- return d
-
-class CanStartBuild(RunMixin, unittest.TestCase):
- def rmtree(self, d):
- rmtree(d)
-
- def testCanStartBuild(self):
- return self.do_test(config_can_build, True)
-
- def testCantStartBuild(self):
- return self.do_test(config_cant_build, False)
-
- def do_test(self, config, builder_should_run):
- self.master.loadConfig(config)
- self.master.readConfig = True
- self.master.startService()
- d = self.connectSlave()
-
- # send a change
- cm = self.master.change_svc
- c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
- cm.addChange(c)
-
- d.addCallback(self._do_test1, builder_should_run)
-
- return d
-
- def _do_test1(self, res, builder_should_run):
- # delay a little bit. Note that relying upon timers is a bit fragile,
- # in this case we're hoping that our 0.5 second timer will land us
- # somewhere in the middle of the [0.1s, 3.1s] window (after the 0.1
- # second Scheduler fires, then during the 3-second build), so that
- # when we sample BuildSlave.state, we'll see BUILDING (or IDLE if the
- # slave was told to be unavailable). On a heavily loaded system, our
- # 0.5 second timer might not actually fire until after the build has
- # completed. In the long run, it would be good to change this test to
- # pass under those circumstances too.
- d = defer.Deferred()
- reactor.callLater(.5, d.callback, builder_should_run)
- d.addCallback(self._do_test2)
- return d
-
- def _do_test2(self, builder_should_run):
- b = self.master.botmaster.builders['dummy']
- self.failUnless(len(b.slaves) == 1)
-
- bs = b.slaves[0]
- from buildbot.process.builder import IDLE, BUILDING
- if builder_should_run:
- self.failUnlessEqual(bs.state, BUILDING)
- else:
- self.failUnlessEqual(bs.state, IDLE)
-
-
-class ConcurrencyLimit(RunMixin, unittest.TestCase):
-
- def testConcurrencyLimit(self):
- d = self.master.loadConfig(config_concurrency)
- d.addCallback(lambda res: self.master.startService())
- d.addCallback(lambda res: self.connectSlave())
-
- def _send(res):
- # send a change. This will trigger both builders at the same
- # time, but since they share a slave, the max_builds=1 setting
- # will insure that only one of the two builds gets to run.
- cm = self.master.change_svc
- c = changes.Change("bob", ["Makefile", "foo/bar.c"],
- "changed stuff")
- cm.addChange(c)
- d.addCallback(_send)
-
- def _delay(res):
- d1 = defer.Deferred()
- reactor.callLater(1, d1.callback, None)
- # this test depends upon this 1s delay landing us in the middle
- # of one of the builds.
- return d1
- d.addCallback(_delay)
-
- def _check(res):
- builders = [ self.master.botmaster.builders[bn]
- for bn in ('dummy', 'dummy2') ]
- for builder in builders:
- self.failUnless(len(builder.slaves) == 1)
-
- from buildbot.process.builder import BUILDING
- building_bs = [ builder
- for builder in builders
- if builder.slaves[0].state == BUILDING ]
- # assert that only one build is running right now. If the
- # max_builds= weren't in effect, this would be 2.
- self.failUnlessEqual(len(building_bs), 1)
- d.addCallback(_check)
-
- return d
-
-
-class Ping(RunMixin, unittest.TestCase):
- def testPing(self):
- self.master.loadConfig(config_2)
- self.master.readConfig = True
- self.master.startService()
-
- d = self.connectSlave()
- d.addCallback(self._testPing_1)
- return d
-
- def _testPing_1(self, res):
- d = interfaces.IControl(self.master).getBuilder("dummy").ping(1)
- d.addCallback(self._testPing_2)
- return d
-
- def _testPing_2(self, res):
- pass
-
-class BuilderNames(unittest.TestCase):
-
- def testGetBuilderNames(self):
- os.mkdir("bnames")
- m = master.BuildMaster("bnames")
- s = m.getStatus()
-
- m.loadConfig(config_3)
- m.readConfig = True
-
- self.failUnlessEqual(s.getBuilderNames(),
- ["dummy", "testdummy", "adummy", "bdummy"])
- self.failUnlessEqual(s.getBuilderNames(categories=['test']),
- ["testdummy", "bdummy"])
-
-class Disconnect(RunMixin, unittest.TestCase):
-
- def setUp(self):
- RunMixin.setUp(self)
-
- # verify that disconnecting the slave during a build properly
- # terminates the build
- m = self.master
- s = self.status
- c = self.control
-
- m.loadConfig(config_2)
- m.readConfig = True
- m.startService()
-
- self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
- self.s1 = s1 = s.getBuilder("dummy")
- self.failUnlessEqual(s1.getName(), "dummy")
- self.failUnlessEqual(s1.getState(), ("offline", []))
- self.failUnlessEqual(s1.getCurrentBuilds(), [])
- self.failUnlessEqual(s1.getLastFinishedBuild(), None)
- self.failUnlessEqual(s1.getBuild(-1), None)
-
- d = self.connectSlave()
- d.addCallback(self._disconnectSetup_1)
- return d
-
- def _disconnectSetup_1(self, res):
- self.failUnlessEqual(self.s1.getState(), ("idle", []))
-
-
- def verifyDisconnect(self, bs):
- self.failUnless(bs.isFinished())
-
- step1 = bs.getSteps()[0]
- self.failUnlessEqual(step1.getText(), ["delay", "interrupted"])
- self.failUnlessEqual(step1.getResults()[0], builder.FAILURE)
-
- self.failUnlessEqual(bs.getResults(), builder.FAILURE)
-
- def verifyDisconnect2(self, bs):
- self.failUnless(bs.isFinished())
-
- step1 = bs.getSteps()[1]
- self.failUnlessEqual(step1.getText(), ["remote", "delay", "2 secs",
- "failed", "slave", "lost"])
- self.failUnlessEqual(step1.getResults()[0], builder.FAILURE)
-
- self.failUnlessEqual(bs.getResults(), builder.FAILURE)
-
- def submitBuild(self):
- ss = SourceStamp()
- br = BuildRequest("forced build", ss, "dummy")
- self.control.getBuilder("dummy").requestBuild(br)
- d = defer.Deferred()
- def _started(bc):
- br.unsubscribe(_started)
- d.callback(bc)
- br.subscribe(_started)
- return d
-
- def testIdle2(self):
- # now suppose the slave goes missing
- self.disappearSlave(allowReconnect=False)
-
- # forcing a build will work: the build detect that the slave is no
- # longer available and will be re-queued. Wait 5 seconds, then check
- # to make sure the build is still in the 'waiting for a slave' queue.
- self.control.getBuilder("dummy").original.START_BUILD_TIMEOUT = 1
- req = BuildRequest("forced build", SourceStamp(), "test_builder")
- self.failUnlessEqual(req.startCount, 0)
- self.control.getBuilder("dummy").requestBuild(req)
- # this should ping the slave, which doesn't respond, and then give up
- # after a second. The BuildRequest will be re-queued, and its
- # .startCount will be incremented.
- d = defer.Deferred()
- d.addCallback(self._testIdle2_1, req)
- reactor.callLater(3, d.callback, None)
- return d
- testIdle2.timeout = 5
-
- def _testIdle2_1(self, res, req):
- self.failUnlessEqual(req.startCount, 1)
- cancelled = req.cancel()
- self.failUnless(cancelled)
-
-
- def testBuild1(self):
- # this next sequence is timing-dependent. The dummy build takes at
- # least 3 seconds to complete, and this batch of commands must
- # complete within that time.
- #
- d = self.submitBuild()
- d.addCallback(self._testBuild1_1)
- return d
-
- def _testBuild1_1(self, bc):
- bs = bc.getStatus()
- # now kill the slave before it gets to start the first step
- d = self.shutdownAllSlaves() # dies before it gets started
- d.addCallback(self._testBuild1_2, bs)
- return d # TODO: this used to have a 5-second timeout
-
- def _testBuild1_2(self, res, bs):
- # now examine the just-stopped build and make sure it is really
- # stopped. This is checking for bugs in which the slave-detach gets
- # missed or causes an exception which prevents the build from being
- # marked as "finished due to an error".
- d = bs.waitUntilFinished()
- d2 = self.master.botmaster.waitUntilBuilderDetached("dummy")
- dl = defer.DeferredList([d, d2])
- dl.addCallback(self._testBuild1_3, bs)
- return dl # TODO: this had a 5-second timeout too
-
- def _testBuild1_3(self, res, bs):
- self.failUnlessEqual(self.s1.getState()[0], "offline")
- self.verifyDisconnect(bs)
-
-
- def testBuild2(self):
- # this next sequence is timing-dependent
- d = self.submitBuild()
- d.addCallback(self._testBuild2_1)
- return d
- testBuild2.timeout = 30
-
- def _testBuild2_1(self, bc):
- bs = bc.getStatus()
- # shutdown the slave while it's running the first step
- reactor.callLater(0.5, self.shutdownAllSlaves)
-
- d = bs.waitUntilFinished()
- d.addCallback(self._testBuild2_2, bs)
- return d
-
- def _testBuild2_2(self, res, bs):
- # we hit here when the build has finished. The builder is still being
- # torn down, however, so spin for another second to allow the
- # callLater(0) in Builder.detached to fire.
- d = defer.Deferred()
- reactor.callLater(1, d.callback, None)
- d.addCallback(self._testBuild2_3, bs)
- return d
-
- def _testBuild2_3(self, res, bs):
- self.failUnlessEqual(self.s1.getState()[0], "offline")
- self.verifyDisconnect(bs)
-
-
- def testBuild3(self):
- # this next sequence is timing-dependent
- d = self.submitBuild()
- d.addCallback(self._testBuild3_1)
- return d
- testBuild3.timeout = 30
-
- def _testBuild3_1(self, bc):
- bs = bc.getStatus()
- # kill the slave while it's running the first step
- reactor.callLater(0.5, self.killSlave)
- d = bs.waitUntilFinished()
- d.addCallback(self._testBuild3_2, bs)
- return d
-
- def _testBuild3_2(self, res, bs):
- # the builder is still being torn down, so give it another second
- d = defer.Deferred()
- reactor.callLater(1, d.callback, None)
- d.addCallback(self._testBuild3_3, bs)
- return d
-
- def _testBuild3_3(self, res, bs):
- self.failUnlessEqual(self.s1.getState()[0], "offline")
- self.verifyDisconnect(bs)
-
-
- def testBuild4(self):
- # this next sequence is timing-dependent
- d = self.submitBuild()
- d.addCallback(self._testBuild4_1)
- return d
- testBuild4.timeout = 30
-
- def _testBuild4_1(self, bc):
- bs = bc.getStatus()
- # kill the slave while it's running the second (remote) step
- reactor.callLater(1.5, self.killSlave)
- d = bs.waitUntilFinished()
- d.addCallback(self._testBuild4_2, bs)
- return d
-
- def _testBuild4_2(self, res, bs):
- # at this point, the slave is in the process of being removed, so it
- # could either be 'idle' or 'offline'. I think there is a
- # reactor.callLater(0) standing between here and the offline state.
- #reactor.iterate() # TODO: remove the need for this
-
- self.failUnlessEqual(self.s1.getState()[0], "offline")
- self.verifyDisconnect2(bs)
-
-
- def testInterrupt(self):
- # this next sequence is timing-dependent
- d = self.submitBuild()
- d.addCallback(self._testInterrupt_1)
- return d
- testInterrupt.timeout = 30
-
- def _testInterrupt_1(self, bc):
- bs = bc.getStatus()
- # halt the build while it's running the first step
- reactor.callLater(0.5, bc.stopBuild, "bang go splat")
- d = bs.waitUntilFinished()
- d.addCallback(self._testInterrupt_2, bs)
- return d
-
- def _testInterrupt_2(self, res, bs):
- self.verifyDisconnect(bs)
-
-
- def testDisappear(self):
- bc = self.control.getBuilder("dummy")
-
- # ping should succeed
- d = bc.ping(1)
- d.addCallback(self._testDisappear_1, bc)
- return d
-
- def _testDisappear_1(self, res, bc):
- self.failUnlessEqual(res, True)
-
- # now, before any build is run, make the slave disappear
- self.disappearSlave(allowReconnect=False)
-
- # at this point, a ping to the slave should timeout
- d = bc.ping(1)
- d.addCallback(self. _testDisappear_2)
- return d
- def _testDisappear_2(self, res):
- self.failUnlessEqual(res, False)
-
- def testDuplicate(self):
- bc = self.control.getBuilder("dummy")
- bs = self.status.getBuilder("dummy")
- ss = bs.getSlaves()[0]
-
- self.failUnless(ss.isConnected())
- self.failUnlessEqual(ss.getAdmin(), "one")
-
- # now, before any build is run, make the first slave disappear
- self.disappearSlave(allowReconnect=False)
-
- d = self.master.botmaster.waitUntilBuilderDetached("dummy")
- # now let the new slave take over
- self.connectSlave2()
- d.addCallback(self._testDuplicate_1, ss)
- return d
- testDuplicate.timeout = 5
-
- def _testDuplicate_1(self, res, ss):
- d = self.master.botmaster.waitUntilBuilderAttached("dummy")
- d.addCallback(self._testDuplicate_2, ss)
- return d
-
- def _testDuplicate_2(self, res, ss):
- self.failUnless(ss.isConnected())
- self.failUnlessEqual(ss.getAdmin(), "two")
-
-
-class Disconnect2(RunMixin, unittest.TestCase):
-
- def setUp(self):
- RunMixin.setUp(self)
- # verify that disconnecting the slave during a build properly
- # terminates the build
- m = self.master
- s = self.status
- c = self.control
-
- m.loadConfig(config_2)
- m.readConfig = True
- m.startService()
-
- self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
- self.s1 = s1 = s.getBuilder("dummy")
- self.failUnlessEqual(s1.getName(), "dummy")
- self.failUnlessEqual(s1.getState(), ("offline", []))
- self.failUnlessEqual(s1.getCurrentBuilds(), [])
- self.failUnlessEqual(s1.getLastFinishedBuild(), None)
- self.failUnlessEqual(s1.getBuild(-1), None)
-
- d = self.connectSlaveFastTimeout()
- d.addCallback(self._setup_disconnect2_1)
- return d
-
- def _setup_disconnect2_1(self, res):
- self.failUnlessEqual(self.s1.getState(), ("idle", []))
-
-
- def testSlaveTimeout(self):
- # now suppose the slave goes missing. We want to find out when it
- # creates a new Broker, so we reach inside and mark it with the
- # well-known sigil of impending messy death.
- bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
- broker = bd.remote.broker
- broker.redshirt = 1
-
- # make sure the keepalives will keep the connection up
- d = defer.Deferred()
- reactor.callLater(5, d.callback, None)
- d.addCallback(self._testSlaveTimeout_1)
- return d
- testSlaveTimeout.timeout = 20
-
- def _testSlaveTimeout_1(self, res):
- bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
- if not bd.remote or not hasattr(bd.remote.broker, "redshirt"):
- self.fail("slave disconnected when it shouldn't have")
-
- d = self.master.botmaster.waitUntilBuilderDetached("dummy")
- # whoops! how careless of me.
- self.disappearSlave(allowReconnect=True)
- # the slave will realize the connection is lost within 2 seconds, and
- # reconnect.
- d.addCallback(self._testSlaveTimeout_2)
- return d
-
- def _testSlaveTimeout_2(self, res):
- # the ReconnectingPBClientFactory will attempt a reconnect in two
- # seconds.
- d = self.master.botmaster.waitUntilBuilderAttached("dummy")
- d.addCallback(self._testSlaveTimeout_3)
- return d
-
- def _testSlaveTimeout_3(self, res):
- # make sure it is a new connection (i.e. a new Broker)
- bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
- self.failUnless(bd.remote, "hey, slave isn't really connected")
- self.failIf(hasattr(bd.remote.broker, "redshirt"),
- "hey, slave's Broker is still marked for death")
-
-
-class Basedir(RunMixin, unittest.TestCase):
- def testChangeBuilddir(self):
- m = self.master
- m.loadConfig(config_4)
- m.readConfig = True
- m.startService()
-
- d = self.connectSlave()
- d.addCallback(self._testChangeBuilddir_1)
- return d
-
- def _testChangeBuilddir_1(self, res):
- self.bot = bot = self.slaves['bot1'].bot
- self.builder = builder = bot.builders.get("dummy")
- self.failUnless(builder)
- self.failUnlessEqual(builder.builddir, "dummy")
- self.failUnlessEqual(builder.basedir,
- os.path.join("slavebase-bot1", "dummy"))
-
- d = self.master.loadConfig(config_4_newbasedir)
- d.addCallback(self._testChangeBuilddir_2)
- return d
-
- def _testChangeBuilddir_2(self, res):
- bot = self.bot
- # this does NOT cause the builder to be replaced
- builder = bot.builders.get("dummy")
- self.failUnless(builder)
- self.failUnlessIdentical(self.builder, builder)
- # the basedir should be updated
- self.failUnlessEqual(builder.builddir, "dummy2")
- self.failUnlessEqual(builder.basedir,
- os.path.join("slavebase-bot1", "dummy2"))
-
- # add a new builder, which causes the basedir list to be reloaded
- d = self.master.loadConfig(config_4_newbuilder)
- return d
-
-class Triggers(RunMixin, TestFlagMixin, unittest.TestCase):
- config_trigger = config_base + """
-from buildbot.scheduler import Triggerable, Scheduler
-from buildbot.steps.trigger import Trigger
-from buildbot.steps.dummy import Dummy
-from buildbot.test.runutils import SetTestFlagStep
-c['schedulers'] = [
- Scheduler('triggerer', None, 0.1, ['triggerer']),
- Triggerable('triggeree', ['triggeree'])
-]
-triggerer = factory.BuildFactory()
-triggerer.addSteps([
- SetTestFlagStep(flagname='triggerer_started'),
- Trigger(flunkOnFailure=True, @ARGS@),
- SetTestFlagStep(flagname='triggerer_finished'),
- ])
-triggeree = factory.BuildFactory([
- s(SetTestFlagStep, flagname='triggeree_started'),
- s(@DUMMYCLASS@),
- s(SetTestFlagStep, flagname='triggeree_finished'),
- ])
-c['builders'] = [{'name': 'triggerer', 'slavename': 'bot1',
- 'builddir': 'triggerer', 'factory': triggerer},
- {'name': 'triggeree', 'slavename': 'bot1',
- 'builddir': 'triggeree', 'factory': triggeree}]
-"""
-
- def mkConfig(self, args, dummyclass="Dummy"):
- return self.config_trigger.replace("@ARGS@", args).replace("@DUMMYCLASS@", dummyclass)
-
- def setupTest(self, args, dummyclass, checkFn):
- self.clearFlags()
- m = self.master
- m.loadConfig(self.mkConfig(args, dummyclass))
- m.readConfig = True
- m.startService()
-
- c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
- m.change_svc.addChange(c)
-
- d = self.connectSlave(builders=['triggerer', 'triggeree'])
- d.addCallback(self.startTimer, 0.5, checkFn)
- return d
-
- def startTimer(self, res, time, next_fn):
- d = defer.Deferred()
- reactor.callLater(time, d.callback, None)
- d.addCallback(next_fn)
- return d
-
- def testTriggerBuild(self):
- return self.setupTest("schedulerNames=['triggeree']",
- "Dummy",
- self._checkTriggerBuild)
-
- def _checkTriggerBuild(self, res):
- self.failIfFlagNotSet('triggerer_started')
- self.failIfFlagNotSet('triggeree_started')
- self.failIfFlagSet('triggeree_finished')
- self.failIfFlagNotSet('triggerer_finished')
-
- def testTriggerBuildWait(self):
- return self.setupTest("schedulerNames=['triggeree'], waitForFinish=1",
- "Dummy",
- self._checkTriggerBuildWait)
-
- def _checkTriggerBuildWait(self, res):
- self.failIfFlagNotSet('triggerer_started')
- self.failIfFlagNotSet('triggeree_started')
- self.failIfFlagSet('triggeree_finished')
- self.failIfFlagSet('triggerer_finished')
-
-class PropertyPropagation(RunMixin, TestFlagMixin, unittest.TestCase):
- def setupTest(self, config, builders, checkFn):
- self.clearFlags()
- m = self.master
- m.loadConfig(config)
- m.readConfig = True
- m.startService()
-
- c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
- m.change_svc.addChange(c)
-
- d = self.connectSlave(builders=builders)
- d.addCallback(self.startTimer, 0.5, checkFn)
- return d
-
- def startTimer(self, res, time, next_fn):
- d = defer.Deferred()
- reactor.callLater(time, d.callback, None)
- d.addCallback(next_fn)
- return d
-
- config_schprop = config_base + """
-from buildbot.scheduler import Scheduler
-from buildbot.steps.dummy import Dummy
-from buildbot.test.runutils import SetTestFlagStep
-from buildbot.process.properties import WithProperties
-c['schedulers'] = [
- Scheduler('mysched', None, 0.1, ['flagcolor'], properties={'color':'red'}),
-]
-factory = factory.BuildFactory([
- s(SetTestFlagStep, flagname='testresult',
- value=WithProperties('color=%(color)s sched=%(scheduler)s')),
- ])
-c['builders'] = [{'name': 'flagcolor', 'slavename': 'bot1',
- 'builddir': 'test', 'factory': factory},
- ]
-"""
-
- def testScheduler(self):
- def _check(res):
- self.failUnlessEqual(self.getFlag('testresult'),
- 'color=red sched=mysched')
- return self.setupTest(self.config_schprop, ['flagcolor'], _check)
-
- config_slaveprop = config_base + """
-from buildbot.scheduler import Scheduler
-from buildbot.steps.dummy import Dummy
-from buildbot.test.runutils import SetTestFlagStep
-from buildbot.process.properties import WithProperties
-c['schedulers'] = [
- Scheduler('mysched', None, 0.1, ['flagcolor'])
-]
-c['slaves'] = [BuildSlave('bot1', 'sekrit', properties={'color':'orange'})]
-factory = factory.BuildFactory([
- s(SetTestFlagStep, flagname='testresult',
- value=WithProperties('color=%(color)s slavename=%(slavename)s')),
- ])
-c['builders'] = [{'name': 'flagcolor', 'slavename': 'bot1',
- 'builddir': 'test', 'factory': factory},
- ]
-"""
- def testSlave(self):
- def _check(res):
- self.failUnlessEqual(self.getFlag('testresult'),
- 'color=orange slavename=bot1')
- return self.setupTest(self.config_slaveprop, ['flagcolor'], _check)
-
- config_trigger = config_base + """
-from buildbot.scheduler import Triggerable, Scheduler
-from buildbot.steps.trigger import Trigger
-from buildbot.steps.dummy import Dummy
-from buildbot.test.runutils import SetTestFlagStep
-from buildbot.process.properties import WithProperties
-c['schedulers'] = [
- Scheduler('triggerer', None, 0.1, ['triggerer'],
- properties={'color':'mauve', 'pls_trigger':'triggeree'}),
- Triggerable('triggeree', ['triggeree'], properties={'color':'invisible'})
-]
-triggerer = factory.BuildFactory([
- s(SetTestFlagStep, flagname='testresult', value='wrongone'),
- s(Trigger, flunkOnFailure=True,
- schedulerNames=[WithProperties('%(pls_trigger)s')],
- set_properties={'color' : WithProperties('%(color)s')}),
- s(SetTestFlagStep, flagname='testresult', value='triggered'),
- ])
-triggeree = factory.BuildFactory([
- s(SetTestFlagStep, flagname='testresult',
- value=WithProperties('sched=%(scheduler)s color=%(color)s')),
- ])
-c['builders'] = [{'name': 'triggerer', 'slavename': 'bot1',
- 'builddir': 'triggerer', 'factory': triggerer},
- {'name': 'triggeree', 'slavename': 'bot1',
- 'builddir': 'triggeree', 'factory': triggeree}]
-"""
- def testTrigger(self):
- def _check(res):
- self.failUnlessEqual(self.getFlag('testresult'),
- 'sched=triggeree color=mauve')
- return self.setupTest(self.config_trigger,
- ['triggerer', 'triggeree'], _check)
-
-
-config_test_flag = config_base + """
-from buildbot.scheduler import Scheduler
-c['schedulers'] = [Scheduler('quick', None, 0.1, ['dummy'])]
-
-from buildbot.test.runutils import SetTestFlagStep
-f3 = factory.BuildFactory([
- s(SetTestFlagStep, flagname='foo', value='bar'),
- ])
-
-c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
- 'builddir': 'dummy', 'factory': f3}]
-"""
-
-class TestFlag(RunMixin, TestFlagMixin, unittest.TestCase):
- """Test for the TestFlag functionality in runutils"""
- def testTestFlag(self):
- m = self.master
- m.loadConfig(config_test_flag)
- m.readConfig = True
- m.startService()
-
- c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
- m.change_svc.addChange(c)
-
- d = self.connectSlave()
- d.addCallback(self._testTestFlag_1)
- return d
-
- def _testTestFlag_1(self, res):
- d = defer.Deferred()
- reactor.callLater(0.5, d.callback, None)
- d.addCallback(self._testTestFlag_2)
- return d
-
- def _testTestFlag_2(self, res):
- self.failUnlessEqual(self.getFlag('foo'), 'bar')
-
-# TODO: test everything, from Change submission to Scheduler to Build to
-# Status. Use all the status types. Specifically I want to catch recurrences
-# of the bug where I forgot to make Waterfall inherit from StatusReceiver
-# such that buildSetSubmitted failed.
-
-config_test_builder = config_base + """
-from buildbot.scheduler import Scheduler
-c['schedulers'] = [Scheduler('quick', 'dummy', 0.1, ['dummy']),
- Scheduler('quick2', 'dummy2', 0.1, ['dummy2']),
- Scheduler('quick3', 'dummy3', 0.1, ['dummy3'])]
-
-from buildbot.steps.shell import ShellCommand
-f3 = factory.BuildFactory([
- s(ShellCommand, command="sleep 3", env={'blah':'blah'})
- ])
-
-c['builders'] = [{'name': 'dummy', 'slavename': 'bot1', 'env': {'foo':'bar'},
- 'builddir': 'dummy', 'factory': f3}]
-
-c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
- 'env': {'blah':'bar'}, 'builddir': 'dummy2',
- 'factory': f3})
-
-f4 = factory.BuildFactory([
- s(ShellCommand, command="sleep 3")
- ])
-
-c['builders'].append({'name': 'dummy3', 'slavename': 'bot1',
- 'env': {'blah':'bar'}, 'builddir': 'dummy3',
- 'factory': f4})
-"""
-
-class TestBuilder(RunMixin, unittest.TestCase):
- def setUp(self):
- RunMixin.setUp(self)
- self.master.loadConfig(config_test_builder)
- self.master.readConfig = True
- self.master.startService()
- self.connectSlave(builders=["dummy", "dummy2", "dummy3"])
-
- def doBuilderEnvTest(self, branch, cb):
- c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed",
- branch=branch)
- self.master.change_svc.addChange(c)
-
- d = defer.Deferred()
- reactor.callLater(0.5, d.callback, None)
- d.addCallback(cb)
-
- return d
-
- def testBuilderEnv(self):
- return self.doBuilderEnvTest("dummy", self._testBuilderEnv1)
-
- def _testBuilderEnv1(self, res):
- b = self.master.botmaster.builders['dummy']
- build = b.building[0]
- s = build.currentStep
- self.failUnless('foo' in s.cmd.args['env'])
- self.failUnlessEqual('bar', s.cmd.args['env']['foo'])
- self.failUnless('blah' in s.cmd.args['env'])
- self.failUnlessEqual('blah', s.cmd.args['env']['blah'])
-
- def testBuilderEnvOverride(self):
- return self.doBuilderEnvTest("dummy2", self._testBuilderEnvOverride1)
-
- def _testBuilderEnvOverride1(self, res):
- b = self.master.botmaster.builders['dummy2']
- build = b.building[0]
- s = build.currentStep
- self.failUnless('blah' in s.cmd.args['env'])
- self.failUnlessEqual('blah', s.cmd.args['env']['blah'])
-
- def testBuilderNoStepEnv(self):
- return self.doBuilderEnvTest("dummy3", self._testBuilderNoStepEnv1)
-
- def _testBuilderNoStepEnv1(self, res):
- b = self.master.botmaster.builders['dummy3']
- build = b.building[0]
- s = build.currentStep
- self.failUnless('blah' in s.cmd.args['env'])
- self.failUnlessEqual('bar', s.cmd.args['env']['blah'])
-
-class SchedulerWatchers(RunMixin, TestFlagMixin, unittest.TestCase):
- config_watchable = config_base + """
-from buildbot.scheduler import AnyBranchScheduler
-from buildbot.steps.dummy import Dummy
-from buildbot.test.runutils import setTestFlag, SetTestFlagStep
-s = AnyBranchScheduler(
- name='abs',
- branches=None,
- treeStableTimer=0,
- builderNames=['a', 'b'])
-c['schedulers'] = [ s ]
-
-# count the number of times a success watcher is called
-numCalls = [ 0 ]
-def watcher(ss):
- numCalls[0] += 1
- setTestFlag("numCalls", numCalls[0])
-s.subscribeToSuccessfulBuilds(watcher)
-
-f = factory.BuildFactory()
-f.addStep(Dummy(timeout=0))
-c['builders'] = [{'name': 'a', 'slavename': 'bot1',
- 'builddir': 'a', 'factory': f},
- {'name': 'b', 'slavename': 'bot1',
- 'builddir': 'b', 'factory': f}]
-"""
-
- def testWatchers(self):
- self.clearFlags()
- m = self.master
- m.loadConfig(self.config_watchable)
- m.readConfig = True
- m.startService()
-
- c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
- m.change_svc.addChange(c)
-
- d = self.connectSlave(builders=['a', 'b'])
-
- def pause(res):
- d = defer.Deferred()
- reactor.callLater(1, d.callback, res)
- return d
- d.addCallback(pause)
-
- def checkFn(res):
- self.failUnlessEqual(self.getFlag('numCalls'), 1)
- d.addCallback(checkFn)
- return d
-
-config_priority = """
-from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-s = factory.s
-
-from buildbot.steps.shell import ShellCommand
-f1 = factory.BuildFactory([
- s(ShellCommand, command="sleep 3", env={'blah':'blah'})
- ])
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit', max_builds=1)]
-c['schedulers'] = []
-c['builders'] = []
-c['builders'].append({'name':'quick1', 'slavename':'bot1', 'builddir': 'quickdir1', 'factory': f1})
-c['builders'].append({'name':'quick2', 'slavename':'bot1', 'builddir': 'quickdir2', 'factory': f1})
-c['slavePortnum'] = 0
-"""
-
-class BuildPrioritization(RunMixin, unittest.TestCase):
- def rmtree(self, d):
- rmtree(d)
-
- def testPriority(self):
- self.rmtree("basedir")
- os.mkdir("basedir")
- self.master.loadConfig(config_priority)
- self.master.readConfig = True
- self.master.startService()
-
- d = self.connectSlave(builders=['quick1', 'quick2'])
- d.addCallback(self._connected)
-
- return d
-
- def _connected(self, *args):
- # Our fake source stamp
- # we override canBeMergedWith so that our requests don't get merged together
- ss = SourceStamp()
- ss.canBeMergedWith = lambda x: False
-
- # Send one request to tie up the slave before sending future requests
- req0 = BuildRequest("reason", ss, "test_builder")
- self.master.botmaster.builders['quick1'].submitBuildRequest(req0)
-
- # Send 10 requests to alternating builders
- # We fudge the submittedAt field after submitting since they're all
- # getting submitted so close together according to time.time()
- # and all we care about is what order they're run in.
- reqs = []
- self.finish_order = []
- for i in range(10):
- req = BuildRequest(str(i), ss, "test_builder")
- j = i % 2 + 1
- self.master.botmaster.builders['quick%i' % j].submitBuildRequest(req)
- req.submittedAt = i
- # Keep track of what order the builds finished in
- def append(item, arg):
- self.finish_order.append(item)
- req.waitUntilFinished().addCallback(append, req)
- reqs.append(req.waitUntilFinished())
-
- dl = defer.DeferredList(reqs)
- dl.addCallback(self._all_finished)
-
- # After our first build finishes, we should wait for the rest to finish
- d = req0.waitUntilFinished()
- d.addCallback(lambda x: dl)
- return d
-
- def _all_finished(self, *args):
- # The builds should have finished in proper order
- self.failUnlessEqual([int(b.reason) for b in self.finish_order], range(10))
-
-# Test graceful shutdown when no builds are active, as well as
-# canStartBuild after graceful shutdown is initiated
-config_graceful_shutdown_idle = config_base
-class GracefulShutdownIdle(RunMixin, unittest.TestCase):
- def testShutdown(self):
- self.rmtree("basedir")
- os.mkdir("basedir")
- self.master.loadConfig(config_graceful_shutdown_idle)
- self.master.readConfig = True
- self.master.startService()
- d = self.connectSlave(builders=['quick'])
- d.addCallback(self._do_shutdown)
- return d
-
- def _do_shutdown(self, res):
- bs = self.master.botmaster.builders['quick'].slaves[0]
- # Check that the slave is accepting builds once it's connected
- self.assertEquals(bs.slave.canStartBuild(), True)
-
- # Monkeypatch the slave's shutdown routine since the real shutdown
- # interrupts the test harness
- self.did_shutdown = False
- def _shutdown():
- self.did_shutdown = True
- bs.slave.shutdown = _shutdown
-
- # Start a graceful shutdown
- bs.slave.slave_status.setGraceful(True)
- # Check that the slave isn't accepting builds any more
- self.assertEquals(bs.slave.canStartBuild(), False)
-
- # Wait a little bit and then check that we (pretended to) shut down
- d = defer.Deferred()
- d.addCallback(self._check_shutdown)
- reactor.callLater(0.5, d.callback, None)
- return d
-
- def _check_shutdown(self, res):
- self.assertEquals(self.did_shutdown, True)
-
-# Test graceful shutdown when two builds are active
-config_graceful_shutdown_busy = config_base + """
-from buildbot.buildslave import BuildSlave
-c['slaves'] = [ BuildSlave('bot1', 'sekrit', max_builds=2) ]
-
-from buildbot.scheduler import Scheduler
-c['schedulers'] = [Scheduler('dummy', None, 0.1, ['dummy', 'dummy2'])]
-
-c['builders'].append({'name': 'dummy', 'slavename': 'bot1',
- 'builddir': 'dummy', 'factory': f2})
-c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
- 'builddir': 'dummy2', 'factory': f2})
-"""
-class GracefulShutdownBusy(RunMixin, unittest.TestCase):
- def testShutdown(self):
- self.rmtree("basedir")
- os.mkdir("basedir")
- d = self.master.loadConfig(config_graceful_shutdown_busy)
- d.addCallback(lambda res: self.master.startService())
- d.addCallback(lambda res: self.connectSlave())
-
- def _send(res):
- # send a change. This will trigger both builders at the same
- # time, but since they share a slave, the max_builds=1 setting
- # will insure that only one of the two builds gets to run.
- cm = self.master.change_svc
- c = changes.Change("bob", ["Makefile", "foo/bar.c"],
- "changed stuff")
- cm.addChange(c)
- d.addCallback(_send)
-
- def _delay(res):
- d1 = defer.Deferred()
- reactor.callLater(0.5, d1.callback, None)
- # this test depends upon this 0.5s delay landing us in the middle
- # of one of the builds.
- return d1
- d.addCallback(_delay)
-
- # Start a graceful shutdown. We should be in the middle of two builds
- def _shutdown(res):
- bs = self.master.botmaster.builders['dummy'].slaves[0]
- # Monkeypatch the slave's shutdown routine since the real shutdown
- # interrupts the test harness
- self.did_shutdown = False
- def _shutdown():
- self.did_shutdown = True
- return defer.succeed(None)
- bs.slave.shutdown = _shutdown
- # Start a graceful shutdown
- bs.slave.slave_status.setGraceful(True)
-
- builders = [ self.master.botmaster.builders[bn]
- for bn in ('dummy', 'dummy2') ]
- for builder in builders:
- self.failUnless(len(builder.slaves) == 1)
- from buildbot.process.builder import BUILDING
- building_bs = [ builder
- for builder in builders
- if builder.slaves[0].state == BUILDING ]
- # assert that both builds are running right now.
- self.failUnlessEqual(len(building_bs), 2)
-
- d.addCallback(_shutdown)
-
- # Wait a little bit again, and then make sure that we are still running
- # the two builds, and haven't shutdown yet
- d.addCallback(_delay)
- def _check(res):
- self.assertEquals(self.did_shutdown, False)
- builders = [ self.master.botmaster.builders[bn]
- for bn in ('dummy', 'dummy2') ]
- for builder in builders:
- self.failUnless(len(builder.slaves) == 1)
- from buildbot.process.builder import BUILDING
- building_bs = [ builder
- for builder in builders
- if builder.slaves[0].state == BUILDING ]
- # assert that both builds are running right now.
- self.failUnlessEqual(len(building_bs), 2)
- d.addCallback(_check)
-
- # Wait for all the builds to finish
- def _wait_finish(res):
- builders = [ self.master.botmaster.builders[bn]
- for bn in ('dummy', 'dummy2') ]
- builds = []
- for builder in builders:
- builds.append(builder.builder_status.currentBuilds[0].waitUntilFinished())
- dl = defer.DeferredList(builds)
- return dl
- d.addCallback(_wait_finish)
-
- # Wait a little bit after the builds finish, and then
- # check that the slave has shutdown
- d.addCallback(_delay)
- def _check_shutdown(res):
- # assert that we shutdown the slave
- self.assertEquals(self.did_shutdown, True)
- builders = [ self.master.botmaster.builders[bn]
- for bn in ('dummy', 'dummy2') ]
- from buildbot.process.builder import BUILDING
- building_bs = [ builder
- for builder in builders
- if builder.slaves[0].state == BUILDING ]
- # assert that no builds are running right now.
- self.failUnlessEqual(len(building_bs), 0)
- d.addCallback(_check_shutdown)
-
- return d
diff --git a/buildbot/buildbot/test/test_runner.py b/buildbot/buildbot/test/test_runner.py
deleted file mode 100644
index d94ef5f..0000000
--- a/buildbot/buildbot/test/test_runner.py
+++ /dev/null
@@ -1,392 +0,0 @@
-
-# this file tests the 'buildbot' command, with its various sub-commands
-
-from twisted.trial import unittest
-from twisted.python import usage
-import os, shutil, shlex
-import sets
-
-from buildbot.scripts import runner, tryclient
-
-class Options(unittest.TestCase):
- optionsFile = "SDFsfsFSdfsfsFSD"
-
- def make(self, d, key):
- # we use a wacky filename here in case the test code discovers the
- # user's real ~/.buildbot/ directory
- os.makedirs(os.sep.join(d + [".buildbot"]))
- f = open(os.sep.join(d + [".buildbot", self.optionsFile]), "w")
- f.write("key = '%s'\n" % key)
- f.close()
-
- def check(self, d, key):
- basedir = os.sep.join(d)
- options = runner.loadOptions(self.optionsFile, here=basedir,
- home=self.home)
- if key is None:
- self.failIf(options.has_key('key'))
- else:
- self.failUnlessEqual(options['key'], key)
-
- def testFindOptions(self):
- self.make(["home", "dir1", "dir2", "dir3"], "one")
- self.make(["home", "dir1", "dir2"], "two")
- self.make(["home"], "home")
- self.home = os.path.abspath("home")
-
- self.check(["home", "dir1", "dir2", "dir3"], "one")
- self.check(["home", "dir1", "dir2"], "two")
- self.check(["home", "dir1"], "home")
-
- self.home = os.path.abspath("nothome")
- os.makedirs(os.sep.join(["nothome", "dir1"]))
- self.check(["nothome", "dir1"], None)
-
- def doForce(self, args, expected):
- o = runner.ForceOptions()
- o.parseOptions(args)
- self.failUnlessEqual(o.keys(), expected.keys())
- for k in o.keys():
- self.failUnlessEqual(o[k], expected[k],
- "[%s] got %s instead of %s" % (k, o[k],
- expected[k]))
-
- def testForceOptions(self):
- if not hasattr(shlex, "split"):
- raise unittest.SkipTest("need python>=2.3 for shlex.split")
-
- exp = {"builder": "b1", "reason": "reason",
- "branch": None, "revision": None}
- self.doForce(shlex.split("b1 reason"), exp)
- self.doForce(shlex.split("b1 'reason'"), exp)
- self.failUnlessRaises(usage.UsageError, self.doForce,
- shlex.split("--builder b1 'reason'"), exp)
- self.doForce(shlex.split("--builder b1 --reason reason"), exp)
- self.doForce(shlex.split("--builder b1 --reason 'reason'"), exp)
- self.doForce(shlex.split("--builder b1 --reason \"reason\""), exp)
-
- exp['reason'] = "longer reason"
- self.doForce(shlex.split("b1 'longer reason'"), exp)
- self.doForce(shlex.split("b1 longer reason"), exp)
- self.doForce(shlex.split("--reason 'longer reason' b1"), exp)
-
-
-class Create(unittest.TestCase):
- def failUnlessIn(self, substring, string, msg=None):
- # trial provides a version of this that requires python-2.3 to test
- # strings.
- self.failUnless(string.find(substring) != -1, msg)
- def failUnlessExists(self, filename):
- self.failUnless(os.path.exists(filename), "%s should exist" % filename)
- def failIfExists(self, filename):
- self.failIf(os.path.exists(filename), "%s should not exist" % filename)
-
- def setUp(self):
- self.cwd = os.getcwd()
-
- def tearDown(self):
- os.chdir(self.cwd)
-
- def testMaster(self):
- basedir = "test_runner.master"
- options = runner.MasterOptions()
- options.parseOptions(["-q", basedir])
- cwd = os.getcwd()
- runner.createMaster(options)
- os.chdir(cwd)
-
- tac = os.path.join(basedir, "buildbot.tac")
- self.failUnless(os.path.exists(tac))
- tacfile = open(tac,"rt").read()
- self.failUnlessIn("basedir", tacfile)
- self.failUnlessIn("configfile = r'master.cfg'", tacfile)
- self.failUnlessIn("BuildMaster(basedir, configfile)", tacfile)
-
- cfg = os.path.join(basedir, "master.cfg")
- self.failIfExists(cfg)
- samplecfg = os.path.join(basedir, "master.cfg.sample")
- self.failUnlessExists(samplecfg)
- cfgfile = open(samplecfg,"rt").read()
- self.failUnlessIn("This is a sample buildmaster config file", cfgfile)
-
- makefile = os.path.join(basedir, "Makefile.sample")
- self.failUnlessExists(makefile)
-
- # now verify that running it a second time (with the same options)
- # does the right thing: nothing changes
- runner.createMaster(options)
- os.chdir(cwd)
-
- self.failIfExists(os.path.join(basedir, "buildbot.tac.new"))
- self.failUnlessExists(os.path.join(basedir, "master.cfg.sample"))
-
- oldtac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
-
- # mutate Makefile.sample, since it should be rewritten
- f = open(os.path.join(basedir, "Makefile.sample"), "rt")
- oldmake = f.read()
- f = open(os.path.join(basedir, "Makefile.sample"), "wt")
- f.write(oldmake)
- f.write("# additional line added\n")
- f.close()
-
- # also mutate master.cfg.sample
- f = open(os.path.join(basedir, "master.cfg.sample"), "rt")
- oldsamplecfg = f.read()
- f = open(os.path.join(basedir, "master.cfg.sample"), "wt")
- f.write(oldsamplecfg)
- f.write("# additional line added\n")
- f.close()
-
- # now run it again (with different options)
- options = runner.MasterOptions()
- options.parseOptions(["-q", "--config", "other.cfg", basedir])
- runner.createMaster(options)
- os.chdir(cwd)
-
- tac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
- self.failUnlessEqual(tac, oldtac, "shouldn't change existing .tac")
- self.failUnlessExists(os.path.join(basedir, "buildbot.tac.new"))
-
- make = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
- self.failUnlessEqual(make, oldmake, "*should* rewrite Makefile.sample")
-
- samplecfg = open(os.path.join(basedir, "master.cfg.sample"),
- "rt").read()
- self.failUnlessEqual(samplecfg, oldsamplecfg,
- "*should* rewrite master.cfg.sample")
-
- def testUpgradeMaster(self):
- # first, create a master, run it briefly, then upgrade it. Nothing
- # should change.
- basedir = "test_runner.master2"
- options = runner.MasterOptions()
- options.parseOptions(["-q", basedir])
- cwd = os.getcwd()
- runner.createMaster(options)
- os.chdir(cwd)
-
- f = open(os.path.join(basedir, "master.cfg"), "w")
- f.write(open(os.path.join(basedir, "master.cfg.sample"), "r").read())
- f.close()
-
- # the upgrade process (specifically the verify-master.cfg step) will
- # create any builder status directories that weren't already created.
- # Create those ahead of time.
- os.mkdir(os.path.join(basedir, "full"))
-
- files1 = self.record_files(basedir)
-
- # upgrade it
- options = runner.UpgradeMasterOptions()
- options.parseOptions(["--quiet", basedir])
- cwd = os.getcwd()
- runner.upgradeMaster(options)
- os.chdir(cwd)
-
- files2 = self.record_files(basedir)
- self.failUnlessSameFiles(files1, files2)
-
- # now make it look like the one that 0.7.5 creates: no public_html
- for fn in os.listdir(os.path.join(basedir, "public_html")):
- os.unlink(os.path.join(basedir, "public_html", fn))
- os.rmdir(os.path.join(basedir, "public_html"))
-
- # and make sure that upgrading it re-populates public_html
- options = runner.UpgradeMasterOptions()
- options.parseOptions(["-q", basedir])
- cwd = os.getcwd()
- runner.upgradeMaster(options)
- os.chdir(cwd)
-
- files3 = self.record_files(basedir)
- self.failUnlessSameFiles(files1, files3)
-
- # now induce an error in master.cfg and make sure that upgrade
- # notices it.
- f = open(os.path.join(basedir, "master.cfg"), "a")
- f.write("raise RuntimeError('catch me please')\n")
- f.close()
-
- options = runner.UpgradeMasterOptions()
- options.parseOptions(["-q", basedir])
- cwd = os.getcwd()
- rc = runner.upgradeMaster(options)
- os.chdir(cwd)
- self.failUnless(rc != 0, rc)
- # TODO: change the way runner.py works to let us pass in a stderr
- # filehandle, and use a StringIO to capture its output, and make sure
- # the right error messages appear therein.
-
-
- def failUnlessSameFiles(self, files1, files2):
- f1 = sets.Set(files1.keys())
- f2 = sets.Set(files2.keys())
- msg = ""
- if f2 - f1:
- msg += "Missing from files1: %s\n" % (list(f2-f1),)
- if f1 - f2:
- msg += "Missing from files2: %s\n" % (list(f1-f2),)
- if msg:
- self.fail(msg)
-
- def record_files(self, basedir):
- allfiles = {}
- for root, dirs, files in os.walk(basedir):
- for f in files:
- fn = os.path.join(root, f)
- allfiles[fn] = ("FILE", open(fn,"rb").read())
- for d in dirs:
- allfiles[os.path.join(root, d)] = ("DIR",)
- return allfiles
-
-
- def testSlave(self):
- basedir = "test_runner.slave"
- options = runner.SlaveOptions()
- options.parseOptions(["-q", basedir, "buildmaster:1234",
- "botname", "passwd"])
- cwd = os.getcwd()
- runner.createSlave(options)
- os.chdir(cwd)
-
- tac = os.path.join(basedir, "buildbot.tac")
- self.failUnless(os.path.exists(tac))
- tacfile = open(tac,"rt").read()
- self.failUnlessIn("basedir", tacfile)
- self.failUnlessIn("buildmaster_host = 'buildmaster'", tacfile)
- self.failUnlessIn("port = 1234", tacfile)
- self.failUnlessIn("slavename = 'botname'", tacfile)
- self.failUnlessIn("passwd = 'passwd'", tacfile)
- self.failUnlessIn("keepalive = 600", tacfile)
- self.failUnlessIn("BuildSlave(buildmaster_host, port, slavename",
- tacfile)
-
- makefile = os.path.join(basedir, "Makefile.sample")
- self.failUnlessExists(makefile)
-
- self.failUnlessExists(os.path.join(basedir, "info", "admin"))
- self.failUnlessExists(os.path.join(basedir, "info", "host"))
- # edit one to make sure the later install doesn't change it
- f = open(os.path.join(basedir, "info", "admin"), "wt")
- f.write("updated@buildbot.example.org\n")
- f.close()
-
- # now verify that running it a second time (with the same options)
- # does the right thing: nothing changes
- runner.createSlave(options)
- os.chdir(cwd)
-
- self.failIfExists(os.path.join(basedir, "buildbot.tac.new"))
- admin = open(os.path.join(basedir, "info", "admin"), "rt").read()
- self.failUnlessEqual(admin, "updated@buildbot.example.org\n")
-
-
- # mutate Makefile.sample, since it should be rewritten
- oldmake = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
- f = open(os.path.join(basedir, "Makefile.sample"), "wt")
- f.write(oldmake)
- f.write("# additional line added\n")
- f.close()
- oldtac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
-
- # now run it again (with different options)
- options = runner.SlaveOptions()
- options.parseOptions(["-q", "--keepalive", "30",
- basedir, "buildmaster:9999",
- "newbotname", "passwd"])
- runner.createSlave(options)
- os.chdir(cwd)
-
- tac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
- self.failUnlessEqual(tac, oldtac, "shouldn't change existing .tac")
- self.failUnlessExists(os.path.join(basedir, "buildbot.tac.new"))
- tacfile = open(os.path.join(basedir, "buildbot.tac.new"),"rt").read()
- self.failUnlessIn("basedir", tacfile)
- self.failUnlessIn("buildmaster_host = 'buildmaster'", tacfile)
- self.failUnlessIn("port = 9999", tacfile)
- self.failUnlessIn("slavename = 'newbotname'", tacfile)
- self.failUnlessIn("passwd = 'passwd'", tacfile)
- self.failUnlessIn("keepalive = 30", tacfile)
- self.failUnlessIn("BuildSlave(buildmaster_host, port, slavename",
- tacfile)
-
- make = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
- self.failUnlessEqual(make, oldmake, "*should* rewrite Makefile.sample")
-
-class Try(unittest.TestCase):
- # test some aspects of the 'buildbot try' command
- def makeOptions(self, contents):
- if os.path.exists(".buildbot"):
- shutil.rmtree(".buildbot")
- os.mkdir(".buildbot")
- open(os.path.join(".buildbot", "options"), "w").write(contents)
-
- def testGetopt1(self):
- opts = "try_connect = 'ssh'\n" + "try_builders = ['a']\n"
- self.makeOptions(opts)
- config = runner.TryOptions()
- config.parseOptions([])
- t = tryclient.Try(config)
- self.failUnlessEqual(t.connect, "ssh")
- self.failUnlessEqual(t.builderNames, ['a'])
-
- def testGetopt2(self):
- opts = ""
- self.makeOptions(opts)
- config = runner.TryOptions()
- config.parseOptions(['--connect=ssh', '--builder', 'a'])
- t = tryclient.Try(config)
- self.failUnlessEqual(t.connect, "ssh")
- self.failUnlessEqual(t.builderNames, ['a'])
-
- def testGetopt3(self):
- opts = ""
- self.makeOptions(opts)
- config = runner.TryOptions()
- config.parseOptions(['--connect=ssh',
- '--builder', 'a', '--builder=b'])
- t = tryclient.Try(config)
- self.failUnlessEqual(t.connect, "ssh")
- self.failUnlessEqual(t.builderNames, ['a', 'b'])
-
- def testGetopt4(self):
- opts = "try_connect = 'ssh'\n" + "try_builders = ['a']\n"
- self.makeOptions(opts)
- config = runner.TryOptions()
- config.parseOptions(['--builder=b'])
- t = tryclient.Try(config)
- self.failUnlessEqual(t.connect, "ssh")
- self.failUnlessEqual(t.builderNames, ['b'])
-
- def testGetTopdir(self):
- os.mkdir("gettopdir")
- os.mkdir(os.path.join("gettopdir", "foo"))
- os.mkdir(os.path.join("gettopdir", "foo", "bar"))
- open(os.path.join("gettopdir", "1"),"w").write("1")
- open(os.path.join("gettopdir", "foo", "2"),"w").write("2")
- open(os.path.join("gettopdir", "foo", "bar", "3"),"w").write("3")
-
- target = os.path.abspath("gettopdir")
- t = tryclient.getTopdir("1", "gettopdir")
- self.failUnlessEqual(os.path.abspath(t), target)
- t = tryclient.getTopdir("1", os.path.join("gettopdir", "foo"))
- self.failUnlessEqual(os.path.abspath(t), target)
- t = tryclient.getTopdir("1", os.path.join("gettopdir", "foo", "bar"))
- self.failUnlessEqual(os.path.abspath(t), target)
-
- target = os.path.abspath(os.path.join("gettopdir", "foo"))
- t = tryclient.getTopdir("2", os.path.join("gettopdir", "foo"))
- self.failUnlessEqual(os.path.abspath(t), target)
- t = tryclient.getTopdir("2", os.path.join("gettopdir", "foo", "bar"))
- self.failUnlessEqual(os.path.abspath(t), target)
-
- target = os.path.abspath(os.path.join("gettopdir", "foo", "bar"))
- t = tryclient.getTopdir("3", os.path.join("gettopdir", "foo", "bar"))
- self.failUnlessEqual(os.path.abspath(t), target)
-
- nonexistent = "nonexistent\n29fis3kq\tBAR"
- # hopefully there won't be a real file with that name between here
- # and the filesystem root.
- self.failUnlessRaises(ValueError, tryclient.getTopdir, nonexistent)
-
diff --git a/buildbot/buildbot/test/test_scheduler.py b/buildbot/buildbot/test/test_scheduler.py
deleted file mode 100644
index 667e349..0000000
--- a/buildbot/buildbot/test/test_scheduler.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# -*- test-case-name: buildbot.test.test_scheduler -*-
-
-import os, time
-
-from twisted.trial import unittest
-from twisted.internet import defer, reactor
-from twisted.application import service
-from twisted.spread import pb
-
-from buildbot import scheduler, sourcestamp, buildset, status
-from buildbot.changes.changes import Change
-from buildbot.scripts import tryclient
-
-
-class FakeMaster(service.MultiService):
- d = None
- def submitBuildSet(self, bs):
- self.sets.append(bs)
- if self.d:
- reactor.callLater(0, self.d.callback, bs)
- self.d = None
- return pb.Referenceable() # makes the cleanup work correctly
-
-class Scheduling(unittest.TestCase):
- def setUp(self):
- self.master = master = FakeMaster()
- master.sets = []
- master.startService()
-
- def tearDown(self):
- d = self.master.stopService()
- return d
-
- def addScheduler(self, s):
- s.setServiceParent(self.master)
-
- def testPeriodic1(self):
- self.addScheduler(scheduler.Periodic("quickly", ["a","b"], 2))
- d = defer.Deferred()
- reactor.callLater(5, d.callback, None)
- d.addCallback(self._testPeriodic1_1)
- return d
- def _testPeriodic1_1(self, res):
- self.failUnless(len(self.master.sets) > 1)
- s1 = self.master.sets[0]
- self.failUnlessEqual(s1.builderNames, ["a","b"])
- self.failUnlessEqual(s1.reason, "The Periodic scheduler named 'quickly' triggered this build")
-
- def testNightly(self):
- # now == 15-Nov-2005, 00:05:36 AM . By using mktime, this is
- # converted into the local timezone, which happens to match what
- # Nightly is going to do anyway.
- MIN=60; HOUR=60*MIN; DAY=24*3600
- now = time.mktime((2005, 11, 15, 0, 5, 36, 1, 319, 0))
-
- s = scheduler.Nightly('nightly', ["a"], hour=3)
- t = s.calculateNextRunTimeFrom(now)
- self.failUnlessEqual(int(t-now), 2*HOUR+54*MIN+24)
-
- s = scheduler.Nightly('nightly', ["a"], minute=[3,8,54])
- t = s.calculateNextRunTimeFrom(now)
- self.failUnlessEqual(int(t-now), 2*MIN+24)
-
- s = scheduler.Nightly('nightly', ["a"],
- dayOfMonth=16, hour=1, minute=6)
- t = s.calculateNextRunTimeFrom(now)
- self.failUnlessEqual(int(t-now), DAY+HOUR+24)
-
- s = scheduler.Nightly('nightly', ["a"],
- dayOfMonth=16, hour=1, minute=3)
- t = s.calculateNextRunTimeFrom(now)
- self.failUnlessEqual(int(t-now), DAY+57*MIN+24)
-
- s = scheduler.Nightly('nightly', ["a"],
- dayOfMonth=15, hour=1, minute=3)
- t = s.calculateNextRunTimeFrom(now)
- self.failUnlessEqual(int(t-now), 57*MIN+24)
-
- s = scheduler.Nightly('nightly', ["a"],
- dayOfMonth=15, hour=0, minute=3)
- t = s.calculateNextRunTimeFrom(now)
- self.failUnlessEqual(int(t-now), 30*DAY-3*MIN+24)
-
-
- def isImportant(self, change):
- if "important" in change.files:
- return True
- return False
-
- def testBranch(self):
- s = scheduler.Scheduler("b1", "branch1", 2, ["a","b"],
- fileIsImportant=self.isImportant)
- self.addScheduler(s)
-
- c0 = Change("carol", ["important"], "other branch", branch="other")
- s.addChange(c0)
- self.failIf(s.timer)
- self.failIf(s.importantChanges)
-
- c1 = Change("alice", ["important", "not important"], "some changes",
- branch="branch1")
- s.addChange(c1)
- c2 = Change("bob", ["not important", "boring"], "some more changes",
- branch="branch1")
- s.addChange(c2)
- c3 = Change("carol", ["important", "dull"], "even more changes",
- branch="branch1")
- s.addChange(c3)
-
- self.failUnlessEqual(s.importantChanges, [c1,c3])
- self.failUnlessEqual(s.unimportantChanges, [c2])
- self.failUnless(s.timer)
-
- d = defer.Deferred()
- reactor.callLater(4, d.callback, None)
- d.addCallback(self._testBranch_1)
- return d
- def _testBranch_1(self, res):
- self.failUnlessEqual(len(self.master.sets), 1)
- s = self.master.sets[0].source
- self.failUnlessEqual(s.branch, "branch1")
- self.failUnlessEqual(s.revision, None)
- self.failUnlessEqual(len(s.changes), 3)
- self.failUnlessEqual(s.patch, None)
-
-
- def testAnyBranch(self):
- s = scheduler.AnyBranchScheduler("b1", None, 1, ["a","b"],
- fileIsImportant=self.isImportant)
- self.addScheduler(s)
-
- c1 = Change("alice", ["important", "not important"], "some changes",
- branch="branch1")
- s.addChange(c1)
- c2 = Change("bob", ["not important", "boring"], "some more changes",
- branch="branch1")
- s.addChange(c2)
- c3 = Change("carol", ["important", "dull"], "even more changes",
- branch="branch1")
- s.addChange(c3)
-
- c4 = Change("carol", ["important"], "other branch", branch="branch2")
- s.addChange(c4)
-
- c5 = Change("carol", ["important"], "default branch", branch=None)
- s.addChange(c5)
-
- d = defer.Deferred()
- reactor.callLater(2, d.callback, None)
- d.addCallback(self._testAnyBranch_1)
- return d
- def _testAnyBranch_1(self, res):
- self.failUnlessEqual(len(self.master.sets), 3)
- self.master.sets.sort(lambda a,b: cmp(a.source.branch,
- b.source.branch))
-
- s1 = self.master.sets[0].source
- self.failUnlessEqual(s1.branch, None)
- self.failUnlessEqual(s1.revision, None)
- self.failUnlessEqual(len(s1.changes), 1)
- self.failUnlessEqual(s1.patch, None)
-
- s2 = self.master.sets[1].source
- self.failUnlessEqual(s2.branch, "branch1")
- self.failUnlessEqual(s2.revision, None)
- self.failUnlessEqual(len(s2.changes), 3)
- self.failUnlessEqual(s2.patch, None)
-
- s3 = self.master.sets[2].source
- self.failUnlessEqual(s3.branch, "branch2")
- self.failUnlessEqual(s3.revision, None)
- self.failUnlessEqual(len(s3.changes), 1)
- self.failUnlessEqual(s3.patch, None)
-
- def testAnyBranch2(self):
- # like testAnyBranch but without fileIsImportant
- s = scheduler.AnyBranchScheduler("b1", None, 2, ["a","b"])
- self.addScheduler(s)
- c1 = Change("alice", ["important", "not important"], "some changes",
- branch="branch1")
- s.addChange(c1)
- c2 = Change("bob", ["not important", "boring"], "some more changes",
- branch="branch1")
- s.addChange(c2)
- c3 = Change("carol", ["important", "dull"], "even more changes",
- branch="branch1")
- s.addChange(c3)
-
- c4 = Change("carol", ["important"], "other branch", branch="branch2")
- s.addChange(c4)
-
- d = defer.Deferred()
- reactor.callLater(2, d.callback, None)
- d.addCallback(self._testAnyBranch2_1)
- return d
- def _testAnyBranch2_1(self, res):
- self.failUnlessEqual(len(self.master.sets), 2)
- self.master.sets.sort(lambda a,b: cmp(a.source.branch,
- b.source.branch))
- s1 = self.master.sets[0].source
- self.failUnlessEqual(s1.branch, "branch1")
- self.failUnlessEqual(s1.revision, None)
- self.failUnlessEqual(len(s1.changes), 3)
- self.failUnlessEqual(s1.patch, None)
-
- s2 = self.master.sets[1].source
- self.failUnlessEqual(s2.branch, "branch2")
- self.failUnlessEqual(s2.revision, None)
- self.failUnlessEqual(len(s2.changes), 1)
- self.failUnlessEqual(s2.patch, None)
-
-
- def createMaildir(self, jobdir):
- os.mkdir(jobdir)
- os.mkdir(os.path.join(jobdir, "new"))
- os.mkdir(os.path.join(jobdir, "cur"))
- os.mkdir(os.path.join(jobdir, "tmp"))
-
- jobcounter = 1
- def pushJob(self, jobdir, job):
- while 1:
- filename = "job_%d" % self.jobcounter
- self.jobcounter += 1
- if os.path.exists(os.path.join(jobdir, "new", filename)):
- continue
- if os.path.exists(os.path.join(jobdir, "tmp", filename)):
- continue
- if os.path.exists(os.path.join(jobdir, "cur", filename)):
- continue
- break
- f = open(os.path.join(jobdir, "tmp", filename), "w")
- f.write(job)
- f.close()
- os.rename(os.path.join(jobdir, "tmp", filename),
- os.path.join(jobdir, "new", filename))
-
- def testTryJobdir(self):
- self.master.basedir = "try_jobdir"
- os.mkdir(self.master.basedir)
- jobdir = "jobdir1"
- jobdir_abs = os.path.join(self.master.basedir, jobdir)
- self.createMaildir(jobdir_abs)
- s = scheduler.Try_Jobdir("try1", ["a", "b"], jobdir)
- self.addScheduler(s)
- self.failIf(self.master.sets)
- job1 = tryclient.createJobfile("buildsetID",
- "branch1", "123", 1, "diff",
- ["a", "b"])
- self.master.d = d = defer.Deferred()
- self.pushJob(jobdir_abs, job1)
- d.addCallback(self._testTryJobdir_1)
- # N.B.: if we don't have DNotify, we poll every 10 seconds, so don't
- # set a .timeout here shorter than that. TODO: make it possible to
- # set the polling interval, so we can make it shorter.
- return d
-
- def _testTryJobdir_1(self, bs):
- self.failUnlessEqual(bs.builderNames, ["a", "b"])
- self.failUnlessEqual(bs.source.branch, "branch1")
- self.failUnlessEqual(bs.source.revision, "123")
- self.failUnlessEqual(bs.source.patch, (1, "diff"))
-
-
- def testTryUserpass(self):
- up = [("alice","pw1"), ("bob","pw2")]
- s = scheduler.Try_Userpass("try2", ["a", "b"], 0, userpass=up)
- self.addScheduler(s)
- port = s.getPort()
- config = {'connect': 'pb',
- 'username': 'alice',
- 'passwd': 'pw1',
- 'master': "localhost:%d" % port,
- 'builders': ["a", "b"],
- }
- t = tryclient.Try(config)
- ss = sourcestamp.SourceStamp("branch1", "123", (1, "diff"))
- t.sourcestamp = ss
- d2 = self.master.d = defer.Deferred()
- d = t.deliverJob()
- d.addCallback(self._testTryUserpass_1, t, d2)
- return d
- testTryUserpass.timeout = 5
- def _testTryUserpass_1(self, res, t, d2):
- # at this point, the Try object should have a RemoteReference to the
- # status object. The FakeMaster returns a stub.
- self.failUnless(t.buildsetStatus)
- d2.addCallback(self._testTryUserpass_2, t)
- return d2
- def _testTryUserpass_2(self, bs, t):
- # this should be the BuildSet submitted by the TryScheduler
- self.failUnlessEqual(bs.builderNames, ["a", "b"])
- self.failUnlessEqual(bs.source.branch, "branch1")
- self.failUnlessEqual(bs.source.revision, "123")
- self.failUnlessEqual(bs.source.patch, (1, "diff"))
-
- t.cleanup()
-
- # twisted-2.0.1 (but not later versions) seems to require a reactor
- # iteration before stopListening actually works. TODO: investigate
- # this.
- d = defer.Deferred()
- reactor.callLater(0, d.callback, None)
- return d
-
- def testGetBuildSets(self):
- # validate IStatus.getBuildSets
- s = status.builder.Status(None, ".")
- bs1 = buildset.BuildSet(["a","b"], sourcestamp.SourceStamp(),
- reason="one", bsid="1")
- s.buildsetSubmitted(bs1.status)
- self.failUnlessEqual(s.getBuildSets(), [bs1.status])
- bs1.status.notifyFinishedWatchers()
- self.failUnlessEqual(s.getBuildSets(), [])
-
- def testCategory(self):
- s1 = scheduler.Scheduler("b1", "branch1", 2, ["a","b"], categories=["categoryA", "both"])
- self.addScheduler(s1)
- s2 = scheduler.Scheduler("b2", "branch1", 2, ["a","b"], categories=["categoryB", "both"])
- self.addScheduler(s2)
-
- c0 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryA")
- s1.addChange(c0)
- s2.addChange(c0)
-
- c1 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryB")
- s1.addChange(c1)
- s2.addChange(c1)
-
- c2 = Change("carol", ["important"], "branch1", branch="branch1")
- s1.addChange(c2)
- s2.addChange(c2)
-
- c3 = Change("carol", ["important"], "branch1", branch="branch1", category="both")
- s1.addChange(c3)
- s2.addChange(c3)
-
- self.failUnlessEqual(s1.importantChanges, [c0, c3])
- self.failUnlessEqual(s2.importantChanges, [c1, c3])
-
- s = scheduler.Scheduler("b3", "branch1", 2, ["a","b"])
- self.addScheduler(s)
-
- c0 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryA")
- s.addChange(c0)
- c1 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryB")
- s.addChange(c1)
-
- self.failUnlessEqual(s.importantChanges, [c0, c1])
diff --git a/buildbot/buildbot/test/test_shell.py b/buildbot/buildbot/test/test_shell.py
deleted file mode 100644
index 52a17f4..0000000
--- a/buildbot/buildbot/test/test_shell.py
+++ /dev/null
@@ -1,138 +0,0 @@
-
-
-# test step.ShellCommand and the slave-side commands.ShellCommand
-
-import sys, time, os
-from twisted.trial import unittest
-from twisted.internet import reactor, defer
-from twisted.python import util
-from buildbot.slave.commands import SlaveShellCommand
-from buildbot.test.runutils import SlaveCommandTestBase
-
-class SlaveSide(SlaveCommandTestBase, unittest.TestCase):
- def testOne(self):
- self.setUpBuilder("test_shell.testOne")
- emitcmd = util.sibpath(__file__, "emit.py")
- args = {
- 'command': [sys.executable, emitcmd, "0"],
- 'workdir': ".",
- }
- d = self.startCommand(SlaveShellCommand, args)
- d.addCallback(self.collectUpdates)
- def _check(logs):
- self.failUnlessEqual(logs['stdout'], "this is stdout\n")
- self.failUnlessEqual(logs['stderr'], "this is stderr\n")
- d.addCallback(_check)
- return d
-
- # TODO: move test_slavecommand.Shell and .ShellPTY over here
-
- def _generateText(self, filename):
- lines = []
- for i in range(3):
- lines.append("this is %s %d\n" % (filename, i))
- return "".join(lines)
-
- def testLogFiles_0(self):
- return self._testLogFiles(0)
-
- def testLogFiles_1(self):
- return self._testLogFiles(1)
-
- def testLogFiles_2(self):
- return self._testLogFiles(2)
-
- def testLogFiles_3(self):
- return self._testLogFiles(3)
-
- def _testLogFiles(self, mode):
- basedir = "test_shell.testLogFiles"
- self.setUpBuilder(basedir)
- # emitlogs.py writes two lines to stdout and two logfiles, one second
- # apart. Then it waits for us to write something to stdin, then it
- # writes one more line.
-
- if mode != 3:
- # we write something to the log file first, to exercise the logic
- # that distinguishes between the old file and the one as modified
- # by the ShellCommand. We set the timestamp back 5 seconds so
- # that timestamps can be used to distinguish old from new.
- log2file = os.path.join(basedir, "log2.out")
- f = open(log2file, "w")
- f.write("dummy text\n")
- f.close()
- earlier = time.time() - 5
- os.utime(log2file, (earlier, earlier))
-
- if mode == 3:
- # mode=3 doesn't create the old logfiles in the first place, but
- # then behaves like mode=1 (where the command pauses before
- # creating them).
- mode = 1
-
- # mode=1 will cause emitlogs.py to delete the old logfiles first, and
- # then wait two seconds before creating the new files. mode=0 does
- # not do this.
- args = {
- 'command': [sys.executable,
- util.sibpath(__file__, "emitlogs.py"),
- "%s" % mode],
- 'workdir': ".",
- 'logfiles': {"log2": "log2.out",
- "log3": "log3.out"},
- 'keep_stdin_open': True,
- }
- finishd = self.startCommand(SlaveShellCommand, args)
- # The first batch of lines is written immediately. The second is
- # written after a pause of one second. We poll once per second until
- # we see both batches.
-
- self._check_timeout = 10
- d = self._check_and_wait()
- def _wait_for_finish(res, finishd):
- return finishd
- d.addCallback(_wait_for_finish, finishd)
- d.addCallback(self.collectUpdates)
- def _check(logs):
- self.failUnlessEqual(logs['stdout'], self._generateText("stdout"))
- if mode == 2:
- self.failIf(('log','log2') in logs)
- self.failIf(('log','log3') in logs)
- else:
- self.failUnlessEqual(logs[('log','log2')],
- self._generateText("log2"))
- self.failUnlessEqual(logs[('log','log3')],
- self._generateText("log3"))
- d.addCallback(_check)
- d.addBoth(self._maybePrintError)
- return d
-
- def _check_and_wait(self, res=None):
- self._check_timeout -= 1
- if self._check_timeout <= 0:
- raise defer.TimeoutError("gave up on command")
- logs = self.collectUpdates()
- if logs.get('stdout') == "this is stdout 0\nthis is stdout 1\n":
- # the emitlogs.py process is now waiting for something to arrive
- # on stdin
- self.cmd.command.pp.transport.write("poke\n")
- return
- if not self.cmd.running:
- self.fail("command finished too early")
- spin = defer.Deferred()
- spin.addCallback(self._check_and_wait)
- reactor.callLater(1, spin.callback, None)
- return spin
-
- def _maybePrintError(self, res):
- rc = self.findRC()
- if rc != 0:
- print "Command ended with rc=%s" % rc
- print "STDERR:"
- self.printStderr()
- return res
-
- # MAYBE TODO: a command which appends to an existing logfile should
- # result in only the new text being sent up to the master. I need to
- # think about this more first.
-
diff --git a/buildbot/buildbot/test/test_slavecommand.py b/buildbot/buildbot/test/test_slavecommand.py
deleted file mode 100644
index 9809163..0000000
--- a/buildbot/buildbot/test/test_slavecommand.py
+++ /dev/null
@@ -1,294 +0,0 @@
-# -*- test-case-name: buildbot.test.test_slavecommand -*-
-
-from twisted.trial import unittest
-from twisted.internet import reactor, interfaces
-from twisted.python import runtime, failure, util
-
-import os, sys
-
-from buildbot.slave import commands
-SlaveShellCommand = commands.SlaveShellCommand
-
-from buildbot.test.runutils import SignalMixin, FakeSlaveBuilder
-
-# test slavecommand.py by running the various commands with a fake
-# SlaveBuilder object that logs the calls to sendUpdate()
-
-class Utilities(unittest.TestCase):
- def mkdir(self, basedir, path, mode=None):
- fn = os.path.join(basedir, path)
- os.makedirs(fn)
- if mode is not None:
- os.chmod(fn, mode)
-
- def touch(self, basedir, path, mode=None):
- fn = os.path.join(basedir, path)
- f = open(fn, "w")
- f.write("touch\n")
- f.close()
- if mode is not None:
- os.chmod(fn, mode)
-
- def test_rmdirRecursive(self):
- basedir = "slavecommand/Utilities/test_rmdirRecursive"
- os.makedirs(basedir)
- d = os.path.join(basedir, "doomed")
- self.mkdir(d, "a/b")
- self.touch(d, "a/b/1.txt")
- self.touch(d, "a/b/2.txt", 0444)
- self.touch(d, "a/b/3.txt", 0)
- self.mkdir(d, "a/c")
- self.touch(d, "a/c/1.txt")
- self.touch(d, "a/c/2.txt", 0444)
- self.touch(d, "a/c/3.txt", 0)
- os.chmod(os.path.join(d, "a/c"), 0444)
- self.mkdir(d, "a/d")
- self.touch(d, "a/d/1.txt")
- self.touch(d, "a/d/2.txt", 0444)
- self.touch(d, "a/d/3.txt", 0)
- os.chmod(os.path.join(d, "a/d"), 0)
-
- commands.rmdirRecursive(d)
- self.failIf(os.path.exists(d))
-
-
-class ShellBase(SignalMixin):
-
- def setUp(self):
- self.basedir = "test_slavecommand"
- if not os.path.isdir(self.basedir):
- os.mkdir(self.basedir)
- self.subdir = os.path.join(self.basedir, "subdir")
- if not os.path.isdir(self.subdir):
- os.mkdir(self.subdir)
- self.builder = FakeSlaveBuilder(self.usePTY, self.basedir)
- self.emitcmd = util.sibpath(__file__, "emit.py")
- self.subemitcmd = os.path.join(util.sibpath(__file__, "subdir"),
- "emit.py")
- self.sleepcmd = util.sibpath(__file__, "sleep.py")
-
- def failUnlessIn(self, substring, string):
- self.failUnless(string.find(substring) != -1,
- "'%s' not in '%s'" % (substring, string))
-
- def getfile(self, which):
- got = ""
- for r in self.builder.updates:
- if r.has_key(which):
- got += r[which]
- return got
-
- def checkOutput(self, expected):
- """
- @type expected: list of (streamname, contents) tuples
- @param expected: the expected output
- """
- expected_linesep = os.linesep
- if self.usePTY:
- # PTYs change the line ending. I'm not sure why.
- expected_linesep = "\r\n"
- expected = [(stream, contents.replace("\n", expected_linesep, 1000))
- for (stream, contents) in expected]
- if self.usePTY:
- # PTYs merge stdout+stderr into a single stream
- expected = [('stdout', contents)
- for (stream, contents) in expected]
- # now merge everything into one string per stream
- streams = {}
- for (stream, contents) in expected:
- streams[stream] = streams.get(stream, "") + contents
- for (stream, contents) in streams.items():
- got = self.getfile(stream)
- self.assertEquals(got, contents)
-
- def getrc(self):
- # updates[-2] is the rc, unless the step was interrupted
- # updates[-1] is the elapsed-time header
- u = self.builder.updates[-1]
- if "rc" not in u:
- self.failUnless(len(self.builder.updates) >= 2)
- u = self.builder.updates[-2]
- self.failUnless("rc" in u)
- return u['rc']
- def checkrc(self, expected):
- got = self.getrc()
- self.assertEquals(got, expected)
-
- def testShell1(self):
- targetfile = os.path.join(self.basedir, "log1.out")
- if os.path.exists(targetfile):
- os.unlink(targetfile)
- cmd = "%s %s 0" % (sys.executable, self.emitcmd)
- args = {'command': cmd, 'workdir': '.', 'timeout': 60}
- c = SlaveShellCommand(self.builder, None, args)
- d = c.start()
- expected = [('stdout', "this is stdout\n"),
- ('stderr', "this is stderr\n")]
- d.addCallback(self._checkPass, expected, 0)
- def _check_targetfile(res):
- self.failUnless(os.path.exists(targetfile))
- d.addCallback(_check_targetfile)
- return d
-
- def _checkPass(self, res, expected, rc):
- self.checkOutput(expected)
- self.checkrc(rc)
-
- def testShell2(self):
- cmd = [sys.executable, self.emitcmd, "0"]
- args = {'command': cmd, 'workdir': '.', 'timeout': 60}
- c = SlaveShellCommand(self.builder, None, args)
- d = c.start()
- expected = [('stdout', "this is stdout\n"),
- ('stderr', "this is stderr\n")]
- d.addCallback(self._checkPass, expected, 0)
- return d
-
- def testShellRC(self):
- cmd = [sys.executable, self.emitcmd, "1"]
- args = {'command': cmd, 'workdir': '.', 'timeout': 60}
- c = SlaveShellCommand(self.builder, None, args)
- d = c.start()
- expected = [('stdout', "this is stdout\n"),
- ('stderr', "this is stderr\n")]
- d.addCallback(self._checkPass, expected, 1)
- return d
-
- def testShellEnv(self):
- cmd = "%s %s 0" % (sys.executable, self.emitcmd)
- args = {'command': cmd, 'workdir': '.',
- 'env': {'EMIT_TEST': "envtest"}, 'timeout': 60}
- c = SlaveShellCommand(self.builder, None, args)
- d = c.start()
- expected = [('stdout', "this is stdout\n"),
- ('stderr', "this is stderr\n"),
- ('stdout', "EMIT_TEST: envtest\n"),
- ]
- d.addCallback(self._checkPass, expected, 0)
- return d
-
- def testShellSubdir(self):
- targetfile = os.path.join(self.basedir, "subdir", "log1.out")
- if os.path.exists(targetfile):
- os.unlink(targetfile)
- cmd = "%s %s 0" % (sys.executable, self.subemitcmd)
- args = {'command': cmd, 'workdir': "subdir", 'timeout': 60}
- c = SlaveShellCommand(self.builder, None, args)
- d = c.start()
- expected = [('stdout', "this is stdout in subdir\n"),
- ('stderr', "this is stderr\n")]
- d.addCallback(self._checkPass, expected, 0)
- def _check_targetfile(res):
- self.failUnless(os.path.exists(targetfile))
- d.addCallback(_check_targetfile)
- return d
-
- def testShellMissingCommand(self):
- args = {'command': "/bin/EndWorldHungerAndMakePigsFly",
- 'workdir': '.', 'timeout': 10,
- 'env': {"LC_ALL": "C"},
- }
- c = SlaveShellCommand(self.builder, None, args)
- d = c.start()
- d.addCallback(self._testShellMissingCommand_1)
- return d
- def _testShellMissingCommand_1(self, res):
- self.failIfEqual(self.getrc(), 0)
- # we used to check the error message to make sure it said something
- # about a missing command, but there are a variety of shells out
- # there, and they emit message sin a variety of languages, so we
- # stopped trying.
-
- def testTimeout(self):
- args = {'command': [sys.executable, self.sleepcmd, "10"],
- 'workdir': '.', 'timeout': 2}
- c = SlaveShellCommand(self.builder, None, args)
- d = c.start()
- d.addCallback(self._testTimeout_1)
- return d
- def _testTimeout_1(self, res):
- self.failIfEqual(self.getrc(), 0)
- got = self.getfile('header')
- self.failUnlessIn("command timed out: 2 seconds without output", got)
- if runtime.platformType == "posix":
- # the "killing pid" message is not present in windows
- self.failUnlessIn("killing pid", got)
- # but the process *ought* to be killed somehow
- self.failUnlessIn("process killed by signal", got)
- #print got
- if runtime.platformType != 'posix':
- testTimeout.todo = "timeout doesn't appear to work under windows"
-
- def testInterrupt1(self):
- args = {'command': [sys.executable, self.sleepcmd, "10"],
- 'workdir': '.', 'timeout': 20}
- c = SlaveShellCommand(self.builder, None, args)
- d = c.start()
- reactor.callLater(1, c.interrupt)
- d.addCallback(self._testInterrupt1_1)
- return d
- def _testInterrupt1_1(self, res):
- self.failIfEqual(self.getrc(), 0)
- got = self.getfile('header')
- self.failUnlessIn("command interrupted", got)
- if runtime.platformType == "posix":
- self.failUnlessIn("process killed by signal", got)
- if runtime.platformType != 'posix':
- testInterrupt1.todo = "interrupt doesn't appear to work under windows"
-
-
- # todo: twisted-specific command tests
-
-class Shell(ShellBase, unittest.TestCase):
- usePTY = False
-
- def testInterrupt2(self):
- # test the backup timeout. This doesn't work under a PTY, because the
- # transport.loseConnection we do in the timeout handler actually
- # *does* kill the process.
- args = {'command': [sys.executable, self.sleepcmd, "5"],
- 'workdir': '.', 'timeout': 20}
- c = SlaveShellCommand(self.builder, None, args)
- d = c.start()
- c.command.BACKUP_TIMEOUT = 1
- # make it unable to kill the child, by changing the signal it uses
- # from SIGKILL to the do-nothing signal 0.
- c.command.KILL = None
- reactor.callLater(1, c.interrupt)
- d.addBoth(self._testInterrupt2_1)
- return d
- def _testInterrupt2_1(self, res):
- # the slave should raise a TimeoutError exception. In a normal build
- # process (i.e. one that uses step.RemoteShellCommand), this
- # exception will be handed to the Step, which will acquire an ERROR
- # status. In our test environment, it isn't such a big deal.
- self.failUnless(isinstance(res, failure.Failure),
- "res is not a Failure: %s" % (res,))
- self.failUnless(res.check(commands.TimeoutError))
- self.checkrc(-1)
- return
- # the command is still actually running. Start another command, to
- # make sure that a) the old command's output doesn't interfere with
- # the new one, and b) the old command's actual termination doesn't
- # break anything
- args = {'command': [sys.executable, self.sleepcmd, "5"],
- 'workdir': '.', 'timeout': 20}
- c = SlaveShellCommand(self.builder, None, args)
- d = c.start()
- d.addCallback(self._testInterrupt2_2)
- return d
- def _testInterrupt2_2(self, res):
- self.checkrc(0)
- # N.B.: under windows, the trial process hangs out for another few
- # seconds. I assume that the win32eventreactor is waiting for one of
- # the lingering child processes to really finish.
-
-haveProcess = interfaces.IReactorProcess(reactor, None)
-if runtime.platformType == 'posix':
- # test with PTYs also
- class ShellPTY(ShellBase, unittest.TestCase):
- usePTY = True
- if not haveProcess:
- ShellPTY.skip = "this reactor doesn't support IReactorProcess"
-if not haveProcess:
- Shell.skip = "this reactor doesn't support IReactorProcess"
diff --git a/buildbot/buildbot/test/test_slaves.py b/buildbot/buildbot/test/test_slaves.py
deleted file mode 100644
index 4005fc6..0000000
--- a/buildbot/buildbot/test/test_slaves.py
+++ /dev/null
@@ -1,991 +0,0 @@
-# -*- test-case-name: buildbot.test.test_slaves -*-
-
-# Portions copyright Canonical Ltd. 2009
-
-from twisted.trial import unittest
-from twisted.internet import defer, reactor
-from twisted.python import log, runtime, failure
-
-from buildbot.buildslave import AbstractLatentBuildSlave
-from buildbot.test.runutils import RunMixin
-from buildbot.sourcestamp import SourceStamp
-from buildbot.process.base import BuildRequest
-from buildbot.status.builder import SUCCESS
-from buildbot.status import mail
-from buildbot.slave import bot
-
-config_1 = """
-from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-s = factory.s
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit'),
- BuildSlave('bot3', 'sekrit')]
-c['schedulers'] = []
-c['slavePortnum'] = 0
-c['schedulers'] = []
-
-f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
-f2 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=2)])
-f3 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=3)])
-f4 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=5)])
-
-c['builders'] = [
- {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
- 'builddir': 'b1', 'factory': f1},
- ]
-"""
-
-config_2 = config_1 + """
-
-c['builders'] = [
- {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
- 'builddir': 'b1', 'factory': f2},
- ]
-
-"""
-
-config_busyness = config_1 + """
-c['builders'] = [
- {'name': 'b1', 'slavenames': ['bot1'],
- 'builddir': 'b1', 'factory': f3},
- {'name': 'b2', 'slavenames': ['bot1'],
- 'builddir': 'b2', 'factory': f4},
- ]
-"""
-
-class Slave(RunMixin, unittest.TestCase):
-
- def setUp(self):
- RunMixin.setUp(self)
- self.master.loadConfig(config_1)
- self.master.startService()
- d = self.connectSlave(["b1"])
- d.addCallback(lambda res: self.connectSlave(["b1"], "bot2"))
- return d
-
- def doBuild(self, buildername):
- br = BuildRequest("forced", SourceStamp(), 'test_builder')
- d = br.waitUntilFinished()
- self.control.getBuilder(buildername).requestBuild(br)
- return d
-
- def testSequence(self):
- # make sure both slaves appear in the list.
- attached_slaves = [c for c in self.master.botmaster.slaves.values()
- if c.slave]
- self.failUnlessEqual(len(attached_slaves), 2)
- b = self.master.botmaster.builders["b1"]
- self.failUnlessEqual(len(b.slaves), 2)
-
- # since the current scheduling algorithm is simple and does not
- # rotate or attempt any sort of load-balancing, two builds in
- # sequence should both use the first slave. This may change later if
- # we move to a more sophisticated scheme.
- b.CHOOSE_SLAVES_RANDOMLY = False
-
- d = self.doBuild("b1")
- d.addCallback(self._testSequence_1)
- return d
- def _testSequence_1(self, res):
- self.failUnlessEqual(res.getResults(), SUCCESS)
- self.failUnlessEqual(res.getSlavename(), "bot1")
-
- d = self.doBuild("b1")
- d.addCallback(self._testSequence_2)
- return d
- def _testSequence_2(self, res):
- self.failUnlessEqual(res.getSlavename(), "bot1")
-
-
- def testSimultaneous(self):
- # make sure we can actually run two builds at the same time
- d1 = self.doBuild("b1")
- d2 = self.doBuild("b1")
- d1.addCallback(self._testSimultaneous_1, d2)
- return d1
- def _testSimultaneous_1(self, res, d2):
- self.failUnlessEqual(res.getResults(), SUCCESS)
- b1_slavename = res.getSlavename()
- d2.addCallback(self._testSimultaneous_2, b1_slavename)
- return d2
- def _testSimultaneous_2(self, res, b1_slavename):
- self.failUnlessEqual(res.getResults(), SUCCESS)
- b2_slavename = res.getSlavename()
- # make sure the two builds were run by different slaves
- slavenames = [b1_slavename, b2_slavename]
- slavenames.sort()
- self.failUnlessEqual(slavenames, ["bot1", "bot2"])
-
- def testFallback1(self):
- # detach the first slave, verify that a build is run using the second
- # slave instead
- d = self.shutdownSlave("bot1", "b1")
- d.addCallback(self._testFallback1_1)
- return d
- def _testFallback1_1(self, res):
- attached_slaves = [c for c in self.master.botmaster.slaves.values()
- if c.slave]
- self.failUnlessEqual(len(attached_slaves), 1)
- self.failUnlessEqual(len(self.master.botmaster.builders["b1"].slaves),
- 1)
- d = self.doBuild("b1")
- d.addCallback(self._testFallback1_2)
- return d
- def _testFallback1_2(self, res):
- self.failUnlessEqual(res.getResults(), SUCCESS)
- self.failUnlessEqual(res.getSlavename(), "bot2")
-
- def testFallback2(self):
- # Disable the first slave, so that a slaveping will timeout. Then
- # start a build, and verify that the non-failing (second) one is
- # claimed for the build, and that the failing one is removed from the
- # list.
-
- b1 = self.master.botmaster.builders["b1"]
- # reduce the ping time so we'll failover faster
- b1.START_BUILD_TIMEOUT = 1
- assert b1.CHOOSE_SLAVES_RANDOMLY
- b1.CHOOSE_SLAVES_RANDOMLY = False
- self.disappearSlave("bot1", "b1", allowReconnect=False)
- d = self.doBuild("b1")
- d.addCallback(self._testFallback2_1)
- return d
- def _testFallback2_1(self, res):
- self.failUnlessEqual(res.getResults(), SUCCESS)
- self.failUnlessEqual(res.getSlavename(), "bot2")
- b1slaves = self.master.botmaster.builders["b1"].slaves
- self.failUnlessEqual(len(b1slaves), 1, "whoops: %s" % (b1slaves,))
- self.failUnlessEqual(b1slaves[0].slave.slavename, "bot2")
-
-
- def notFinished(self, brs):
- # utility method
- builds = brs.getBuilds()
- self.failIf(len(builds) > 1)
- if builds:
- self.failIf(builds[0].isFinished())
-
- def testDontClaimPingingSlave(self):
- # have two slaves connect for the same builder. Do something to the
- # first one so that slavepings are delayed (but do not fail
- # outright).
- timers = []
- self.slaves['bot1'].debugOpts["stallPings"] = (10, timers)
- br = BuildRequest("forced", SourceStamp(), 'test_builder')
- d1 = br.waitUntilFinished()
- self.master.botmaster.builders["b1"].CHOOSE_SLAVES_RANDOMLY = False
- self.control.getBuilder("b1").requestBuild(br)
- s1 = br.status # this is a BuildRequestStatus
- # give it a chance to start pinging
- d2 = defer.Deferred()
- d2.addCallback(self._testDontClaimPingingSlave_1, d1, s1, timers)
- reactor.callLater(1, d2.callback, None)
- return d2
- def _testDontClaimPingingSlave_1(self, res, d1, s1, timers):
- # now the first build is running (waiting on the ping), so start the
- # second build. This should claim the second slave, not the first,
- # because the first is busy doing the ping.
- self.notFinished(s1)
- d3 = self.doBuild("b1")
- d3.addCallback(self._testDontClaimPingingSlave_2, d1, s1, timers)
- return d3
- def _testDontClaimPingingSlave_2(self, res, d1, s1, timers):
- self.failUnlessEqual(res.getSlavename(), "bot2")
- self.notFinished(s1)
- # now let the ping complete
- self.failUnlessEqual(len(timers), 1)
- timers[0].reset(0)
- d1.addCallback(self._testDontClaimPingingSlave_3)
- return d1
- def _testDontClaimPingingSlave_3(self, res):
- self.failUnlessEqual(res.getSlavename(), "bot1")
-
-class FakeLatentBuildSlave(AbstractLatentBuildSlave):
-
- testcase = None
- stop_wait = None
- start_message = None
- stopped = testing_substantiation_timeout = False
-
- def start_instance(self):
- # responsible for starting instance that will try to connect with
- # this master
- # simulate having to do some work.
- d = defer.Deferred()
- if not self.testing_substantiation_timeout:
- reactor.callLater(0, self._start_instance, d)
- return d
-
- def _start_instance(self, d):
- self.testcase.connectOneSlave(self.slavename)
- d.callback(self.start_message)
-
- def stop_instance(self, fast=False):
- # responsible for shutting down instance
- # we're going to emulate dropping off the net.
-
- # simulate this by replacing the slave Broker's .dataReceived method
- # with one that just throws away all data.
- self.fast_stop_request = fast
- if self.slavename not in self.testcase.slaves:
- assert self.testing_substantiation_timeout
- self.stopped = True
- return defer.succeed(None)
- d = defer.Deferred()
- if self.stop_wait is None:
- self._stop_instance(d)
- else:
- reactor.callLater(self.stop_wait, self._stop_instance, d)
- return d
-
- def _stop_instance(self, d):
- try:
- s = self.testcase.slaves.pop(self.slavename)
- except KeyError:
- pass
- else:
- def discard(data):
- pass
- bot = s.getServiceNamed("bot")
- for buildername in self.slavebuilders:
- remote = bot.builders[buildername].remote
- if remote is None:
- continue
- broker = remote.broker
- broker.dataReceived = discard # seal its ears
- broker.transport.write = discard # and take away its voice
- # also discourage it from reconnecting once the connection goes away
- s.bf.continueTrying = False
- # stop the service for cleanliness
- s.stopService()
- d.callback(None)
-
-latent_config = """
-from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-from buildbot.test.test_slaves import FakeLatentBuildSlave
-s = factory.s
-
-BuildmasterConfig = c = {}
-c['slaves'] = [FakeLatentBuildSlave('bot1', 'sekrit',
- ),
- FakeLatentBuildSlave('bot2', 'sekrit',
- ),
- BuildSlave('bot3', 'sekrit')]
-c['schedulers'] = []
-c['slavePortnum'] = 0
-c['schedulers'] = []
-
-f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
-f2 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=2)])
-f3 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=3)])
-f4 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=5)])
-
-c['builders'] = [
- {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
- 'builddir': 'b1', 'factory': f1},
- ]
-"""
-
-
-class LatentSlave(RunMixin, unittest.TestCase):
-
- def setUp(self):
- # debugging
- #import twisted.internet.base
- #twisted.internet.base.DelayedCall.debug = True
- # debugging
- RunMixin.setUp(self)
- self.master.loadConfig(latent_config)
- self.master.startService()
- self.bot1 = self.master.botmaster.slaves['bot1']
- self.bot2 = self.master.botmaster.slaves['bot2']
- self.bot3 = self.master.botmaster.slaves['bot3']
- self.bot1.testcase = self
- self.bot2.testcase = self
- self.b1 = self.master.botmaster.builders['b1']
-
- def doBuild(self, buildername):
- br = BuildRequest("forced", SourceStamp(), 'test_builder')
- d = br.waitUntilFinished()
- self.control.getBuilder(buildername).requestBuild(br)
- return d
-
- def testSequence(self):
- # make sure both slaves appear in the builder. This is automatically,
- # without any attaching.
- self.assertEqual(len(self.b1.slaves), 2)
- self.assertEqual(sorted(sb.slave.slavename for sb in self.b1.slaves),
- ['bot1', 'bot2'])
- # These have not substantiated
- self.assertEqual([sb.slave.substantiated for sb in self.b1.slaves],
- [False, False])
- self.assertEqual([sb.slave.slave for sb in self.b1.slaves],
- [None, None])
- # we can mix and match latent slaves and normal slaves. ATM, they
- # are treated identically in terms of selecting slaves.
- d = self.connectSlave(builders=['b1'], slavename='bot3')
- d.addCallback(self._testSequence_1)
- return d
- def _testSequence_1(self, res):
- # now we have all three slaves. Two are latent slaves, and one is a
- # standard slave.
- self.assertEqual(sorted(sb.slave.slavename for sb in self.b1.slaves),
- ['bot1', 'bot2', 'bot3'])
- # Now it's time to try a build on one of the latent slaves,
- # substantiating it.
- # since the current scheduling algorithm is simple and does not
- # rotate or attempt any sort of load-balancing, two builds in
- # sequence should both use the first slave. This may change later if
- # we move to a more sophisticated scheme.
- self.b1.CHOOSE_SLAVES_RANDOMLY = False
-
- self.build_deferred = self.doBuild("b1")
- # now there's an event waiting for the slave to substantiate.
- e = self.b1.builder_status.getEvent(-1)
- self.assertEqual(e.text, ['substantiating'])
- # the substantiation_deferred is an internal stash of a deferred
- # that we'll grab so we can find the point at which the slave is
- # substantiated but the build has not yet started.
- d = self.bot1.substantiation_deferred
- self.assertNotIdentical(d, None)
- d.addCallback(self._testSequence_2)
- return d
- def _testSequence_2(self, res):
- # bot 1 is substantiated.
- self.assertNotIdentical(self.bot1.slave, None)
- self.failUnless(self.bot1.substantiated)
- # the event has announced it's success
- e = self.b1.builder_status.getEvent(-1)
- self.assertEqual(e.text, ['substantiate', 'success'])
- self.assertNotIdentical(e.finished, None)
- # now we'll wait for the build to complete
- d = self.build_deferred
- del self.build_deferred
- d.addCallback(self._testSequence_3)
- return d
- def _testSequence_3(self, res):
- # build was a success!
- self.failUnlessEqual(res.getResults(), SUCCESS)
- self.failUnlessEqual(res.getSlavename(), "bot1")
- # bot1 is substantiated now. bot2 has not.
- self.failUnless(self.bot1.substantiated)
- self.failIf(self.bot2.substantiated)
- # bot1 is waiting a bit to see if there will be another build before
- # it shuts down the instance ("insubstantiates")
- self.build_wait_timer = self.bot1.build_wait_timer
- self.assertNotIdentical(self.build_wait_timer, None)
- self.failUnless(self.build_wait_timer.active())
- self.assertApproximates(
- self.bot1.build_wait_timeout,
- self.build_wait_timer.time - runtime.seconds(),
- 2)
- # now we'll do another build
- d = self.doBuild("b1")
- # the slave is already substantiated, so no event is created
- e = self.b1.builder_status.getEvent(-1)
- self.assertNotEqual(e.text, ['substantiating'])
- # wait for the next build
- d.addCallback(self._testSequence_4)
- return d
- def _testSequence_4(self, res):
- # build was a success!
- self.failUnlessEqual(res.getResults(), SUCCESS)
- self.failUnlessEqual(res.getSlavename(), "bot1")
- # bot1 is still waiting, but with a new timer
- self.assertNotIdentical(self.bot1.build_wait_timer, None)
- self.assertNotIdentical(self.build_wait_timer,
- self.bot1.build_wait_timer)
- self.assertApproximates(
- self.bot1.build_wait_timeout,
- self.bot1.build_wait_timer.time - runtime.seconds(),
- 2)
- del self.build_wait_timer
- # We'll set the timer to fire sooner, and wait for it to fire.
- self.bot1.build_wait_timer.reset(0)
- d = defer.Deferred()
- reactor.callLater(1, d.callback, None)
- d.addCallback(self._testSequence_5)
- return d
- def _testSequence_5(self, res):
- # slave is insubstantiated
- self.assertIdentical(self.bot1.slave, None)
- self.failIf(self.bot1.substantiated)
- # Now we'll start up another build, to show that the shutdown left
- # things in such a state that we can restart.
- d = self.doBuild("b1")
- # the bot can return an informative message on success that the event
- # will render. Let's use a mechanism of our test latent bot to
- # demonstrate that.
- self.bot1.start_message = ['[instance id]', '[start-up time]']
- # here's our event again:
- self.e = self.b1.builder_status.getEvent(-1)
- self.assertEqual(self.e.text, ['substantiating'])
- d.addCallback(self._testSequence_6)
- return d
- def _testSequence_6(self, res):
- # build was a success!
- self.failUnlessEqual(res.getResults(), SUCCESS)
- self.failUnlessEqual(res.getSlavename(), "bot1")
- # the event has announced it's success. (Just imagine that
- # [instance id] and [start-up time] were actually valuable
- # information.)
- e = self.e
- del self.e
- self.assertEqual(
- e.text,
- ['substantiate', 'success', '[instance id]', '[start-up time]'])
- # Now we need to clean up the timer. We could just cancel it, but
- # we'll go through the full dance once more time to show we can.
- # We'll set the timer to fire sooner, and wait for it to fire.
- # Also, we'll set the build_slave to take a little bit longer to shut
- # down, to see that it doesn't affect anything.
- self.bot1.stop_wait = 2
- self.bot1.build_wait_timer.reset(0)
- d = defer.Deferred()
- reactor.callLater(1, d.callback, None)
- d.addCallback(self._testSequence_7)
- return d
- def _testSequence_7(self, res):
- # slave is insubstantiated
- self.assertIdentical(self.bot1.slave, None)
- self.assertNot(self.bot1.substantiated)
- # the remote is still not cleaned out. We'll wait for it.
- d = defer.Deferred()
- reactor.callLater(1, d.callback, None)
- return d
-
- def testNeverSubstantiated(self):
- # When a substantiation is requested, the slave may never appear.
- # This is a serious problem, and recovering from it is not really
- # handled well right now (in part because a way to handle it is not
- # clear). However, at the least, the status event will show a
- # failure, and the slave will be told to insubstantiate, and to be
- # removed from the botmaster as anavailable slave.
- # This tells our test bot to never start, and to not complain about
- # being told to stop without ever starting
- self.bot1.testing_substantiation_timeout = True
- # normally (by default) we have 20 minutes to try and connect to the
- # remote
- self.assertEqual(self.bot1.missing_timeout, 20*60)
- # for testing purposes, we'll put that down to a tenth of a second!
- self.bot1.missing_timeout = 0.1
- # since the current scheduling algorithm is simple and does not
- # rotate or attempt any sort of load-balancing, two builds in
- # sequence should both use the first slave. This may change later if
- # we move to a more sophisticated scheme.
- self.b1.CHOOSE_SLAVES_RANDOMLY = False
- # start a build
- self.build_deferred = self.doBuild('b1')
- # the event tells us we are instantiating, as usual
- e = self.b1.builder_status.getEvent(-1)
- self.assertEqual(e.text, ['substantiating'])
- # we'll see in a moment that the test flag we have to show that the
- # bot was told to insubstantiate has been fired. Here, we just verify
- # that it is ready to be fired.
- self.failIf(self.bot1.stopped)
- # That substantiation is going to fail. Let's wait for it.
- d = self.bot1.substantiation_deferred
- self.assertNotIdentical(d, None)
- d.addCallbacks(self._testNeverSubstantiated_BadSuccess,
- self._testNeverSubstantiated_1)
- return d
- def _testNeverSubstantiated_BadSuccess(self, res):
- self.fail('we should not have succeeded here.')
- def _testNeverSubstantiated_1(self, res):
- # ok, we failed.
- self.assertIdentical(self.bot1.slave, None)
- self.failIf(self.bot1.substantiated)
- self.failUnless(isinstance(res, failure.Failure))
- self.assertIdentical(self.bot1.substantiation_deferred, None)
- # our event informs us of this
- e1 = self.b1.builder_status.getEvent(-3)
- self.assertEqual(e1.text, ['substantiate', 'failed'])
- self.assertNotIdentical(e1.finished, None)
- # the slave is no longer available to build. The events show it...
- e2 = self.b1.builder_status.getEvent(-2)
- self.assertEqual(e2.text, ['removing', 'latent', 'bot1'])
- e3 = self.b1.builder_status.getEvent(-1)
- self.assertEqual(e3.text, ['disconnect', 'bot1'])
- # ...and the builder shows it.
- self.assertEqual(['bot2'],
- [sb.slave.slavename for sb in self.b1.slaves])
- # ideally, we would retry the build, but that infrastructure (which
- # would be used for other situations in the builder as well) does not
- # yet exist. Therefore the build never completes one way or the
- # other, just as if a normal slave detached.
-
- def testServiceStop(self):
- # if the slave has an instance when it is stopped, the slave should
- # be told to shut down.
- self.b1.CHOOSE_SLAVES_RANDOMLY = False
- d = self.doBuild("b1")
- d.addCallback(self._testServiceStop_1)
- return d
- def _testServiceStop_1(self, res):
- # build was a success!
- self.failUnlessEqual(res.getResults(), SUCCESS)
- self.failUnlessEqual(res.getSlavename(), "bot1")
- # bot 1 is substantiated.
- self.assertNotIdentical(self.bot1.slave, None)
- self.failUnless(self.bot1.substantiated)
- # now let's stop the bot.
- d = self.bot1.stopService()
- d.addCallback(self._testServiceStop_2)
- return d
- def _testServiceStop_2(self, res):
- # bot 1 is NOT substantiated.
- self.assertIdentical(self.bot1.slave, None)
- self.failIf(self.bot1.substantiated)
-
- def testPing(self):
- # While a latent slave pings normally when it is substantiated, (as
- # happens behind the scene when a build is request), when
- # it is insubstantial, the ping is a no-op success.
- self.assertIdentical(self.bot1.slave, None)
- self.failIf(self.bot1.substantiated)
- d = self.connectSlave(builders=['b1'], slavename='bot3')
- d.addCallback(self._testPing_1)
- return d
- def _testPing_1(self, res):
- self.assertEqual(sorted(sb.slave.slavename for sb in self.b1.slaves),
- ['bot1', 'bot2', 'bot3'])
- d = self.control.getBuilder('b1').ping()
- d.addCallback(self._testPing_2)
- return d
- def _testPing_2(self, res):
- # all three pings were successful
- self.assert_(res)
- # but neither bot1 not bot2 substantiated.
- self.assertIdentical(self.bot1.slave, None)
- self.failIf(self.bot1.substantiated)
- self.assertIdentical(self.bot2.slave, None)
- self.failIf(self.bot2.substantiated)
-
-
-class SlaveBusyness(RunMixin, unittest.TestCase):
-
- def setUp(self):
- RunMixin.setUp(self)
- self.master.loadConfig(config_busyness)
- self.master.startService()
- d = self.connectSlave(["b1", "b2"])
- return d
-
- def doBuild(self, buildername):
- br = BuildRequest("forced", SourceStamp(), 'test_builder')
- d = br.waitUntilFinished()
- self.control.getBuilder(buildername).requestBuild(br)
- return d
-
- def getRunningBuilds(self):
- return len(self.status.getSlave("bot1").getRunningBuilds())
-
- def testSlaveNotBusy(self):
- self.failUnlessEqual(self.getRunningBuilds(), 0)
- # now kick a build, wait for it to finish, then check again
- d = self.doBuild("b1")
- d.addCallback(self._testSlaveNotBusy_1)
- return d
-
- def _testSlaveNotBusy_1(self, res):
- self.failUnlessEqual(self.getRunningBuilds(), 0)
-
- def testSlaveBusyOneBuild(self):
- d1 = self.doBuild("b1")
- d2 = defer.Deferred()
- reactor.callLater(.5, d2.callback, None)
- d2.addCallback(self._testSlaveBusyOneBuild_1)
- d1.addCallback(self._testSlaveBusyOneBuild_finished_1)
- return defer.DeferredList([d1,d2])
-
- def _testSlaveBusyOneBuild_1(self, res):
- self.failUnlessEqual(self.getRunningBuilds(), 1)
-
- def _testSlaveBusyOneBuild_finished_1(self, res):
- self.failUnlessEqual(self.getRunningBuilds(), 0)
-
- def testSlaveBusyTwoBuilds(self):
- d1 = self.doBuild("b1")
- d2 = self.doBuild("b2")
- d3 = defer.Deferred()
- reactor.callLater(.5, d3.callback, None)
- d3.addCallback(self._testSlaveBusyTwoBuilds_1)
- d1.addCallback(self._testSlaveBusyTwoBuilds_finished_1, d2)
- return defer.DeferredList([d1,d3])
-
- def _testSlaveBusyTwoBuilds_1(self, res):
- self.failUnlessEqual(self.getRunningBuilds(), 2)
-
- def _testSlaveBusyTwoBuilds_finished_1(self, res, d2):
- self.failUnlessEqual(self.getRunningBuilds(), 1)
- d2.addCallback(self._testSlaveBusyTwoBuilds_finished_2)
- return d2
-
- def _testSlaveBusyTwoBuilds_finished_2(self, res):
- self.failUnlessEqual(self.getRunningBuilds(), 0)
-
- def testSlaveDisconnect(self):
- d1 = self.doBuild("b1")
- d2 = defer.Deferred()
- reactor.callLater(.5, d2.callback, None)
- d2.addCallback(self._testSlaveDisconnect_1)
- d1.addCallback(self._testSlaveDisconnect_finished_1)
- return defer.DeferredList([d1, d2])
-
- def _testSlaveDisconnect_1(self, res):
- self.failUnlessEqual(self.getRunningBuilds(), 1)
- return self.shutdownAllSlaves()
-
- def _testSlaveDisconnect_finished_1(self, res):
- self.failUnlessEqual(self.getRunningBuilds(), 0)
-
-config_3 = """
-from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-s = factory.s
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit')]
-c['schedulers'] = []
-c['slavePortnum'] = 0
-c['schedulers'] = []
-
-f1 = factory.BuildFactory([s(dummy.Wait, handle='one')])
-f2 = factory.BuildFactory([s(dummy.Wait, handle='two')])
-f3 = factory.BuildFactory([s(dummy.Wait, handle='three')])
-
-c['builders'] = [
- {'name': 'b1', 'slavenames': ['bot1'],
- 'builddir': 'b1', 'factory': f1},
- ]
-"""
-
-config_4 = config_3 + """
-c['builders'] = [
- {'name': 'b1', 'slavenames': ['bot1'],
- 'builddir': 'b1', 'factory': f2},
- ]
-"""
-
-config_5 = config_3 + """
-c['builders'] = [
- {'name': 'b1', 'slavenames': ['bot1'],
- 'builddir': 'b1', 'factory': f3},
- ]
-"""
-
-from buildbot.slave.commands import waitCommandRegistry
-
-class Reconfig(RunMixin, unittest.TestCase):
-
- def setUp(self):
- RunMixin.setUp(self)
- self.master.loadConfig(config_3)
- self.master.startService()
- d = self.connectSlave(["b1"])
- return d
-
- def _one_started(self):
- log.msg("testReconfig._one_started")
- self.build1_started = True
- self.d1.callback(None)
- return self.d2
-
- def _two_started(self):
- log.msg("testReconfig._two_started")
- self.build2_started = True
- self.d3.callback(None)
- return self.d4
-
- def _three_started(self):
- log.msg("testReconfig._three_started")
- self.build3_started = True
- self.d5.callback(None)
- return self.d6
-
- def testReconfig(self):
- # reconfiguring a Builder should not interrupt any running Builds. No
- # queued BuildRequests should be lost. The next Build started should
- # use the new process.
- slave1 = self.slaves['bot1']
- bot1 = slave1.getServiceNamed('bot')
- sb1 = bot1.builders['b1']
- self.failUnless(isinstance(sb1, bot.SlaveBuilder))
- self.failUnless(sb1.running)
- b1 = self.master.botmaster.builders['b1']
- self.orig_b1 = b1
-
- self.d1 = d1 = defer.Deferred()
- self.d2 = d2 = defer.Deferred()
- self.d3, self.d4 = defer.Deferred(), defer.Deferred()
- self.d5, self.d6 = defer.Deferred(), defer.Deferred()
- self.build1_started = False
- self.build2_started = False
- self.build3_started = False
- waitCommandRegistry[("one","build1")] = self._one_started
- waitCommandRegistry[("two","build2")] = self._two_started
- waitCommandRegistry[("three","build3")] = self._three_started
-
- # use different branches to make sure these cannot be merged
- br1 = BuildRequest("build1", SourceStamp(branch="1"), 'test_builder')
- b1.submitBuildRequest(br1)
- br2 = BuildRequest("build2", SourceStamp(branch="2"), 'test_builder')
- b1.submitBuildRequest(br2)
- br3 = BuildRequest("build3", SourceStamp(branch="3"), 'test_builder')
- b1.submitBuildRequest(br3)
- self.requests = (br1, br2, br3)
- # all three are now in the queue
-
- # wait until the first one has started
- d1.addCallback(self._testReconfig_2)
- return d1
-
- def _testReconfig_2(self, res):
- log.msg("_testReconfig_2")
- # confirm that it is building
- brs = self.requests[0].status.getBuilds()
- self.failUnlessEqual(len(brs), 1)
- self.build1 = brs[0]
- self.failUnlessEqual(self.build1.getCurrentStep().getName(), "wait")
- # br1 is building, br2 and br3 are in the queue (in that order). Now
- # we reconfigure the Builder.
- self.failUnless(self.build1_started)
- d = self.master.loadConfig(config_4)
- d.addCallback(self._testReconfig_3)
- return d
-
- def _testReconfig_3(self, res):
- log.msg("_testReconfig_3")
- # now check to see that br1 is still building, and that br2 and br3
- # are in the queue of the new builder
- b1 = self.master.botmaster.builders['b1']
- self.failIfIdentical(b1, self.orig_b1)
- self.failIf(self.build1.isFinished())
- self.failUnlessEqual(self.build1.getCurrentStep().getName(), "wait")
- self.failUnlessEqual(len(b1.buildable), 2)
- self.failUnless(self.requests[1] in b1.buildable)
- self.failUnless(self.requests[2] in b1.buildable)
-
- # allow br1 to finish, and make sure its status is delivered normally
- d = self.requests[0].waitUntilFinished()
- d.addCallback(self._testReconfig_4)
- self.d2.callback(None)
- return d
-
- def _testReconfig_4(self, bs):
- log.msg("_testReconfig_4")
- self.failUnlessEqual(bs.getReason(), "build1")
- self.failUnless(bs.isFinished())
- self.failUnlessEqual(bs.getResults(), SUCCESS)
-
- # at this point, the first build has finished, and there is a pending
- # call to start the second build. Once that pending call fires, there
- # is a network roundtrip before the 'wait' RemoteCommand is delivered
- # to the slave. We need to wait for both events to happen before we
- # can check to make sure it is using the correct process. Just wait a
- # full second.
- d = defer.Deferred()
- d.addCallback(self._testReconfig_5)
- reactor.callLater(1, d.callback, None)
- return d
-
- def _testReconfig_5(self, res):
- log.msg("_testReconfig_5")
- # at this point the next build ought to be running
- b1 = self.master.botmaster.builders['b1']
- self.failUnlessEqual(len(b1.buildable), 1)
- self.failUnless(self.requests[2] in b1.buildable)
- self.failUnlessEqual(len(b1.building), 1)
- # and it ought to be using the new process
- self.failUnless(self.build2_started)
-
- # now, while the second build is running, change the config multiple
- # times.
-
- d = self.master.loadConfig(config_3)
- d.addCallback(lambda res: self.master.loadConfig(config_4))
- d.addCallback(lambda res: self.master.loadConfig(config_5))
- def _done(res):
- # then once that's done, allow the second build to finish and
- # wait for it to complete
- da = self.requests[1].waitUntilFinished()
- self.d4.callback(None)
- return da
- d.addCallback(_done)
- def _done2(res):
- # and once *that*'s done, wait another second to let the third
- # build start
- db = defer.Deferred()
- reactor.callLater(1, db.callback, None)
- return db
- d.addCallback(_done2)
- d.addCallback(self._testReconfig_6)
- return d
-
- def _testReconfig_6(self, res):
- log.msg("_testReconfig_6")
- # now check to see that the third build is running
- self.failUnless(self.build3_started)
-
- # we're done
-
-
-
-class Slave2(RunMixin, unittest.TestCase):
-
- revision = 0
-
- def setUp(self):
- RunMixin.setUp(self)
- self.master.loadConfig(config_1)
- self.master.startService()
-
- def doBuild(self, buildername, reason="forced"):
- # we need to prevent these builds from being merged, so we create
- # each of them with a different revision specifier. The revision is
- # ignored because our build process does not have a source checkout
- # step.
- self.revision += 1
- br = BuildRequest(reason, SourceStamp(revision=self.revision),
- 'test_builder')
- d = br.waitUntilFinished()
- self.control.getBuilder(buildername).requestBuild(br)
- return d
-
- def testFirstComeFirstServed(self):
- # submit three builds, then connect a slave which fails the
- # slaveping. The first build will claim the slave, do the slaveping,
- # give up, and re-queue the build. Verify that the build gets
- # re-queued in front of all other builds. This may be tricky, because
- # the other builds may attempt to claim the just-failed slave.
-
- d1 = self.doBuild("b1", "first")
- d2 = self.doBuild("b1", "second")
- #buildable = self.master.botmaster.builders["b1"].buildable
- #print [b.reason for b in buildable]
-
- # specifically, I want the poor build to get precedence over any
- # others that were waiting. To test this, we need more builds than
- # slaves.
-
- # now connect a broken slave. The first build started as soon as it
- # connects, so by the time we get to our _1 method, the ill-fated
- # build has already started.
- d = self.connectSlave(["b1"], opts={"failPingOnce": True})
- d.addCallback(self._testFirstComeFirstServed_1, d1, d2)
- return d
- def _testFirstComeFirstServed_1(self, res, d1, d2):
- # the master has send the slaveping. When this is received, it will
- # fail, causing the master to hang up on the slave. When it
- # reconnects, it should find the first build at the front of the
- # queue. If we simply wait for both builds to complete, then look at
- # the status logs, we should see that the builds ran in the correct
- # order.
-
- d = defer.DeferredList([d1,d2])
- d.addCallback(self._testFirstComeFirstServed_2)
- return d
- def _testFirstComeFirstServed_2(self, res):
- b = self.status.getBuilder("b1")
- builds = b.getBuild(0), b.getBuild(1)
- reasons = [build.getReason() for build in builds]
- self.failUnlessEqual(reasons, ["first", "second"])
-
-config_multi_builders = config_1 + """
-c['builders'] = [
- {'name': 'dummy', 'slavenames': ['bot1','bot2','bot3'],
- 'builddir': 'b1', 'factory': f2},
- {'name': 'dummy2', 'slavenames': ['bot1','bot2','bot3'],
- 'builddir': 'b2', 'factory': f2},
- {'name': 'dummy3', 'slavenames': ['bot1','bot2','bot3'],
- 'builddir': 'b3', 'factory': f2},
- ]
-
-"""
-
-config_mail_missing = config_1 + """
-c['slaves'] = [BuildSlave('bot1', 'sekrit', notify_on_missing='admin',
- missing_timeout=1)]
-c['builders'] = [
- {'name': 'dummy', 'slavenames': ['bot1'],
- 'builddir': 'b1', 'factory': f1},
- ]
-c['projectName'] = 'myproject'
-c['projectURL'] = 'myURL'
-"""
-
-class FakeMailer(mail.MailNotifier):
- def sendMessage(self, m, recipients):
- self.messages.append((m,recipients))
- return defer.succeed(None)
-
-class BuildSlave(RunMixin, unittest.TestCase):
- def test_track_builders(self):
- self.master.loadConfig(config_multi_builders)
- self.master.readConfig = True
- self.master.startService()
- d = self.connectSlave()
-
- def _check(res):
- b = self.master.botmaster.builders['dummy']
- self.failUnless(len(b.slaves) == 1) # just bot1
-
- bs = b.slaves[0].slave
- self.failUnless(len(bs.slavebuilders) == 3)
- self.failUnless(b in [sb.builder for sb in
- bs.slavebuilders.values()])
-
- d.addCallback(_check)
- return d
-
- def test_mail_on_missing(self):
- self.master.loadConfig(config_mail_missing)
- self.master.readConfig = True
- self.master.startService()
- fm = FakeMailer("buildbot@example.org")
- fm.messages = []
- fm.setServiceParent(self.master)
- self.master.statusTargets.append(fm)
-
- d = self.connectSlave()
- d.addCallback(self.stall, 1)
- d.addCallback(lambda res: self.shutdownSlave("bot1", "dummy"))
- def _not_yet(res):
- self.failIf(fm.messages)
- d.addCallback(_not_yet)
- # we reconnect right away, so the timer shouldn't fire
- d.addCallback(lambda res: self.connectSlave())
- d.addCallback(self.stall, 3)
- d.addCallback(_not_yet)
- d.addCallback(lambda res: self.shutdownSlave("bot1", "dummy"))
- d.addCallback(_not_yet)
- # now we let it sit disconnected for long enough for the timer to
- # fire
- d.addCallback(self.stall, 3)
- def _check(res):
- self.failUnlessEqual(len(fm.messages), 1)
- msg,recips = fm.messages[0]
- self.failUnlessEqual(recips, ["admin"])
- body = msg.as_string()
- self.failUnlessIn("To: admin", body)
- self.failUnlessIn("Subject: Buildbot: buildslave bot1 was lost",
- body)
- self.failUnlessIn("From: buildbot@example.org", body)
- self.failUnlessIn("working for 'myproject'", body)
- self.failUnlessIn("has noticed that the buildslave named bot1 went away",
- body)
- self.failUnlessIn("was 'one'", body)
- self.failUnlessIn("myURL", body)
- d.addCallback(_check)
- return d
-
- def stall(self, result, delay=1):
- d = defer.Deferred()
- reactor.callLater(delay, d.callback, result)
- return d
diff --git a/buildbot/buildbot/test/test_status.py b/buildbot/buildbot/test/test_status.py
deleted file mode 100644
index b3c162a..0000000
--- a/buildbot/buildbot/test/test_status.py
+++ /dev/null
@@ -1,1631 +0,0 @@
-# -*- test-case-name: buildbot.test.test_status -*-
-
-import email, os
-import operator
-
-from zope.interface import implements
-from twisted.internet import defer, reactor
-from twisted.trial import unittest
-
-from buildbot import interfaces
-from buildbot.sourcestamp import SourceStamp
-from buildbot.process.base import BuildRequest, Build
-from buildbot.status import builder, base, words, progress
-from buildbot.changes.changes import Change
-from buildbot.process.builder import Builder
-from time import sleep
-
-mail = None
-try:
- from buildbot.status import mail
-except ImportError:
- pass
-from buildbot.status import progress, client # NEEDS COVERAGE
-from buildbot.test.runutils import RunMixin, setupBuildStepStatus
-
-class MyStep:
- build = None
- def getName(self):
- return "step"
-
-class MyLogFileProducer(builder.LogFileProducer):
- # The reactor.callLater(0) in LogFileProducer.resumeProducing is a bit of
- # a nuisance from a testing point of view. This subclass adds a Deferred
- # to that call so we can find out when it is complete.
- def resumeProducing(self):
- d = defer.Deferred()
- reactor.callLater(0, self._resumeProducing, d)
- return d
- def _resumeProducing(self, d):
- builder.LogFileProducer._resumeProducing(self)
- reactor.callLater(0, d.callback, None)
-
-class MyLog(builder.LogFile):
- def __init__(self, basedir, name, text=None, step=None):
- self.fakeBuilderBasedir = basedir
- if not step:
- step = MyStep()
- builder.LogFile.__init__(self, step, name, name)
- if text:
- self.addStdout(text)
- self.finish()
- def getFilename(self):
- return os.path.join(self.fakeBuilderBasedir, self.name)
-
- def subscribeConsumer(self, consumer):
- p = MyLogFileProducer(self, consumer)
- d = p.resumeProducing()
- return d
-
-class MyHTMLLog(builder.HTMLLogFile):
- def __init__(self, basedir, name, html):
- step = MyStep()
- builder.HTMLLogFile.__init__(self, step, name, name, html)
-
-class MyLogSubscriber:
- def __init__(self):
- self.chunks = []
- def logChunk(self, build, step, log, channel, text):
- self.chunks.append((channel, text))
-
-class MyLogConsumer:
- def __init__(self, limit=None):
- self.chunks = []
- self.finished = False
- self.limit = limit
- def registerProducer(self, producer, streaming):
- self.producer = producer
- self.streaming = streaming
- def unregisterProducer(self):
- self.producer = None
- def writeChunk(self, chunk):
- self.chunks.append(chunk)
- if self.limit:
- self.limit -= 1
- if self.limit == 0:
- self.producer.pauseProducing()
- def finish(self):
- self.finished = True
-
-if mail:
- class MyMailer(mail.MailNotifier):
- def sendMessage(self, m, recipients):
- self.parent.messages.append((m, recipients))
-
-class MyStatus:
- def getBuildbotURL(self):
- return self.url
- def getURLForThing(self, thing):
- return None
- def getProjectName(self):
- return "myproj"
-
-class MyBuilder(builder.BuilderStatus):
- nextBuildNumber = 0
-
-class MyBuild(builder.BuildStatus):
- testlogs = []
- def __init__(self, parent, number, results):
- builder.BuildStatus.__init__(self, parent, number)
- self.results = results
- self.source = SourceStamp(revision="1.14")
- self.reason = "build triggered by changes"
- self.finished = True
- def getLogs(self):
- return self.testlogs
-
-class MyLookup:
- implements(interfaces.IEmailLookup)
-
- def getAddress(self, user):
- d = defer.Deferred()
- # With me now is Mr Thomas Walters of West Hartlepool who is totally
- # invisible.
- if user == "Thomas_Walters":
- d.callback(None)
- else:
- d.callback(user + "@" + "dev.com")
- return d
-
-def customTextMailMessage(attrs):
- logLines = 3
- text = list()
- text.append("STATUS: %s" % attrs['result'].title())
- text.append("")
- text.extend([c.asText() for c in attrs['changes']])
- text.append("")
- name, url, lines = attrs['logs'][-1]
- text.append("Last %d lines of '%s':" % (logLines, name))
- text.extend(["\t%s\n" % line for line in lines[len(lines)-logLines:]])
- text.append("")
- text.append("-buildbot")
- return ("\n".join(text), 'plain')
-
-def customHTMLMailMessage(attrs):
- logLines = 3
- text = list()
- text.append("<h3>STATUS <a href='%s'>%s</a>:</h3>" % (attrs['buildURL'],
- attrs['result'].title()))
- text.append("<h4>Recent Changes:</h4>")
- text.extend([c.asHTML() for c in attrs['changes']])
- name, url, lines = attrs['logs'][-1]
- text.append("<h4>Last %d lines of '%s':</h4>" % (logLines, name))
- text.append("<p>")
- text.append("<br>".join([line for line in lines[len(lines)-logLines:]]))
- text.append("</p>")
- text.append("<br>")
- text.append("<b>-<a href='%s'>buildbot</a></b>" % attrs['buildbotURL'])
- return ("\n".join(text), 'html')
-
-class Mail(unittest.TestCase):
-
- def setUp(self):
- self.builder = MyBuilder("builder1")
-
- def stall(self, res, timeout):
- d = defer.Deferred()
- reactor.callLater(timeout, d.callback, res)
- return d
-
- def makeBuild(self, number, results):
- return MyBuild(self.builder, number, results)
-
- def failUnlessIn(self, substring, string):
- self.failUnless(string.find(substring) != -1,
- "didn't see '%s' in '%s'" % (substring, string))
-
- def getProjectName(self):
- return "PROJECT"
-
- def getBuildbotURL(self):
- return "BUILDBOT_URL"
-
- def getURLForThing(self, thing):
- return None
-
- def testBuild1(self):
- mailer = MyMailer(fromaddr="buildbot@example.com",
- extraRecipients=["recip@example.com",
- "recip2@example.com"],
- lookup=mail.Domain("dev.com"))
- mailer.parent = self
- mailer.status = self
- self.messages = []
-
- b1 = self.makeBuild(3, builder.SUCCESS)
- b1.blamelist = ["bob"]
-
- mailer.buildFinished("builder1", b1, b1.results)
- self.failUnless(len(self.messages) == 1)
- m,r = self.messages.pop()
- t = m.as_string()
- self.failUnlessIn("To: bob@dev.com\n", t)
- self.failUnlessIn("CC: recip2@example.com, recip@example.com\n", t)
- self.failUnlessIn("From: buildbot@example.com\n", t)
- self.failUnlessIn("Subject: buildbot success in PROJECT on builder1\n", t)
- self.failUnlessIn("Date: ", t)
- self.failUnlessIn("Build succeeded!\n", t)
- self.failUnlessIn("Buildbot URL: BUILDBOT_URL\n", t)
-
- def testBuild2(self):
- mailer = MyMailer(fromaddr="buildbot@example.com",
- extraRecipients=["recip@example.com",
- "recip2@example.com"],
- lookup="dev.com",
- sendToInterestedUsers=False)
- mailer.parent = self
- mailer.status = self
- self.messages = []
-
- b1 = self.makeBuild(3, builder.SUCCESS)
- b1.blamelist = ["bob"]
-
- mailer.buildFinished("builder1", b1, b1.results)
- self.failUnless(len(self.messages) == 1)
- m,r = self.messages.pop()
- t = m.as_string()
- self.failUnlessIn("To: recip2@example.com, "
- "recip@example.com\n", t)
- self.failUnlessIn("From: buildbot@example.com\n", t)
- self.failUnlessIn("Subject: buildbot success in PROJECT on builder1\n", t)
- self.failUnlessIn("Build succeeded!\n", t)
- self.failUnlessIn("Buildbot URL: BUILDBOT_URL\n", t)
-
- def testBuildStatusCategory(self):
- # a status client only interested in a category should only receive
- # from that category
- mailer = MyMailer(fromaddr="buildbot@example.com",
- extraRecipients=["recip@example.com",
- "recip2@example.com"],
- lookup="dev.com",
- sendToInterestedUsers=False,
- categories=["debug"])
-
- mailer.parent = self
- mailer.status = self
- self.messages = []
-
- b1 = self.makeBuild(3, builder.SUCCESS)
- b1.blamelist = ["bob"]
-
- mailer.buildFinished("builder1", b1, b1.results)
- self.failIf(self.messages)
-
- def testBuilderCategory(self):
- # a builder in a certain category should notify status clients that
- # did not list categories, or categories including this one
- mailer1 = MyMailer(fromaddr="buildbot@example.com",
- extraRecipients=["recip@example.com",
- "recip2@example.com"],
- lookup="dev.com",
- sendToInterestedUsers=False)
- mailer2 = MyMailer(fromaddr="buildbot@example.com",
- extraRecipients=["recip@example.com",
- "recip2@example.com"],
- lookup="dev.com",
- sendToInterestedUsers=False,
- categories=["active"])
- mailer3 = MyMailer(fromaddr="buildbot@example.com",
- extraRecipients=["recip@example.com",
- "recip2@example.com"],
- lookup="dev.com",
- sendToInterestedUsers=False,
- categories=["active", "debug"])
-
- builderd = MyBuilder("builder2", "debug")
-
- mailer1.parent = self
- mailer1.status = self
- mailer2.parent = self
- mailer2.status = self
- mailer3.parent = self
- mailer3.status = self
- self.messages = []
-
- t = mailer1.builderAdded("builder2", builderd)
- self.assertEqual(len(mailer1.watched), 1)
- self.assertEqual(t, mailer1)
- t = mailer2.builderAdded("builder2", builderd)
- self.assertEqual(len(mailer2.watched), 0)
- self.assertEqual(t, None)
- t = mailer3.builderAdded("builder2", builderd)
- self.assertEqual(len(mailer3.watched), 1)
- self.assertEqual(t, mailer3)
-
- b2 = MyBuild(builderd, 3, builder.SUCCESS)
- b2.blamelist = ["bob"]
-
- mailer1.buildFinished("builder2", b2, b2.results)
- self.failUnlessEqual(len(self.messages), 1)
- self.messages = []
- mailer2.buildFinished("builder2", b2, b2.results)
- self.failUnlessEqual(len(self.messages), 0)
- self.messages = []
- mailer3.buildFinished("builder2", b2, b2.results)
- self.failUnlessEqual(len(self.messages), 1)
-
- def testCustomTextMessage(self):
- basedir = "test_custom_text_mesg"
- os.mkdir(basedir)
- mailer = MyMailer(fromaddr="buildbot@example.com", mode="problem",
- extraRecipients=["recip@example.com",
- "recip2@example.com"],
- lookup=MyLookup(),
- customMesg=customTextMailMessage)
- mailer.parent = self
- mailer.status = self
- self.messages = []
-
- b1 = self.makeBuild(4, builder.FAILURE)
- b1.setText(["snarkleack", "polarization", "failed"])
- b1.blamelist = ["dev3", "dev3", "dev3", "dev4",
- "Thomas_Walters"]
- b1.source.changes = (Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 123),
- Change(who = 'author2', files = ['file2'], comments = 'comment2', revision = 456))
- b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
- MyLog(basedir, 'test', "Test log here\nTest 1 failed\nTest 2 failed\nTest 3 failed\nTest 4 failed\n")]
-
- mailer.buildFinished("builder1", b1, b1.results)
- m,r = self.messages.pop()
- t = m.as_string()
- #
- # Uncomment to review custom message
- #
- #self.fail(t)
- self.failUnlessIn("comment1", t)
- self.failUnlessIn("comment2", t)
- self.failUnlessIn("Test 4 failed", t)
-
-
- def testCustomHTMLMessage(self):
- basedir = "test_custom_HTML_mesg"
- os.mkdir(basedir)
- mailer = MyMailer(fromaddr="buildbot@example.com", mode="problem",
- extraRecipients=["recip@example.com",
- "recip2@example.com"],
- lookup=MyLookup(),
- customMesg=customHTMLMailMessage)
- mailer.parent = self
- mailer.status = self
- self.messages = []
-
- b1 = self.makeBuild(4, builder.FAILURE)
- b1.setText(["snarkleack", "polarization", "failed"])
- b1.blamelist = ["dev3", "dev3", "dev3", "dev4",
- "Thomas_Walters"]
- b1.source.changes = (Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 123),
- Change(who = 'author2', files = ['file2'], comments = 'comment2', revision = 456))
- b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
- MyLog(basedir, 'test', "Test log here\nTest 1 failed\nTest 2 failed\nTest 3 failed\nTest 4 failed\n")]
-
- mailer.buildFinished("builder1", b1, b1.results)
- m,r = self.messages.pop()
- t = m.as_string()
- #
- # Uncomment to review custom message
- #
- #self.fail(t)
- self.failUnlessIn("<h4>Last 3 lines of 'step.test':</h4>", t)
- self.failUnlessIn("<p>Changed by: <b>author2</b><br />", t)
- self.failUnlessIn("Test 3 failed", t)
-
- def testShouldAttachLog(self):
- mailer = mail.MailNotifier(fromaddr="buildbot@example.com", addLogs=True)
- self.assertTrue(mailer._shouldAttachLog('anything'))
- mailer = mail.MailNotifier(fromaddr="buildbot@example.com", addLogs=False)
- self.assertFalse(mailer._shouldAttachLog('anything'))
- mailer = mail.MailNotifier(fromaddr="buildbot@example.com", addLogs=['something'])
- self.assertFalse(mailer._shouldAttachLog('anything'))
- self.assertTrue(mailer._shouldAttachLog('something'))
-
- def testFailure(self):
- mailer = MyMailer(fromaddr="buildbot@example.com", mode="problem",
- extraRecipients=["recip@example.com",
- "recip2@example.com"],
- lookup=MyLookup())
- mailer.parent = self
- mailer.status = self
- self.messages = []
-
- b1 = self.makeBuild(3, builder.SUCCESS)
- b1.blamelist = ["dev1", "dev2"]
- b2 = self.makeBuild(4, builder.FAILURE)
- b2.setText(["snarkleack", "polarization", "failed"])
- b2.blamelist = ["dev3", "dev3", "dev3", "dev4",
- "Thomas_Walters"]
- mailer.buildFinished("builder1", b1, b1.results)
- self.failIf(self.messages)
- mailer.buildFinished("builder1", b2, b2.results)
- self.failUnless(len(self.messages) == 1)
- m,r = self.messages.pop()
- t = m.as_string()
- self.failUnlessIn("To: dev3@dev.com, dev4@dev.com\n", t)
- self.failUnlessIn("CC: recip2@example.com, recip@example.com\n", t)
- self.failUnlessIn("From: buildbot@example.com\n", t)
- self.failUnlessIn("Subject: buildbot failure in PROJECT on builder1\n", t)
- self.failUnlessIn("The Buildbot has detected a new failure", t)
- self.failUnlessIn("BUILD FAILED: snarkleack polarization failed\n", t)
- self.failUnlessEqual(set(r), set(["dev3@dev.com", "dev4@dev.com",
- "recip2@example.com", "recip@example.com"]))
-
- def testLogs(self):
- basedir = "test_status_logs"
- os.mkdir(basedir)
- mailer = MyMailer(fromaddr="buildbot@example.com", addLogs=True,
- extraRecipients=["recip@example.com",
- "recip2@example.com"])
- mailer.parent = self
- mailer.status = self
- self.messages = []
-
- b1 = self.makeBuild(3, builder.WARNINGS)
- b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
- MyLog(basedir,
- 'test', "Test log here\nTest 4 failed\n"),
- ]
- b1.text = ["unusual", "gnarzzler", "output"]
- mailer.buildFinished("builder1", b1, b1.results)
- self.failUnless(len(self.messages) == 1)
- m,r = self.messages.pop()
- t = m.as_string()
- self.failUnlessIn("Subject: buildbot warnings in PROJECT on builder1\n", t)
- m2 = email.message_from_string(t)
- p = m2.get_payload()
- self.failUnlessEqual(len(p), 3)
-
- self.failUnlessIn("Build Had Warnings: unusual gnarzzler output\n",
- p[0].get_payload())
-
- self.failUnlessEqual(p[1].get_filename(), "step.compile")
- self.failUnlessEqual(p[1].get_payload(), "Compile log here\n")
-
- self.failUnlessEqual(p[2].get_filename(), "step.test")
- self.failUnlessIn("Test log here\n", p[2].get_payload())
-
- def testMail(self):
- basedir = "test_status_mail"
- os.mkdir(basedir)
- dest = os.environ.get("BUILDBOT_TEST_MAIL")
- if not dest:
- raise unittest.SkipTest("define BUILDBOT_TEST_MAIL=dest to run this")
- mailer = mail.MailNotifier(fromaddr="buildbot@example.com",
- addLogs=True,
- extraRecipients=[dest])
- s = MyStatus()
- s.url = "project URL"
- mailer.status = s
-
- b1 = self.makeBuild(3, builder.SUCCESS)
- b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
- MyLog(basedir,
- 'test', "Test log here\nTest 4 failed\n"),
- ]
-
- d = mailer.buildFinished("builder1", b1, b1.results)
- # When this fires, the mail has been sent, but the SMTP connection is
- # still up (because smtp.sendmail relies upon the server to hang up).
- # Spin for a moment to avoid the "unclean reactor" warning that Trial
- # gives us if we finish before the socket is disconnected. Really,
- # sendmail() ought to hang up the connection once it is finished:
- # otherwise a malicious SMTP server could make us consume lots of
- # memory.
- d.addCallback(self.stall, 0.1)
- return d
-
-if not mail:
- Mail.skip = "the Twisted Mail package is not installed"
-
-class Progress(unittest.TestCase):
- def testWavg(self):
- bp = progress.BuildProgress([])
- e = progress.Expectations(bp)
- # wavg(old, current)
- self.failUnlessEqual(e.wavg(None, None), None)
- self.failUnlessEqual(e.wavg(None, 3), 3)
- self.failUnlessEqual(e.wavg(3, None), 3)
- self.failUnlessEqual(e.wavg(3, 4), 3.5)
- e.decay = 0.1
- self.failUnlessEqual(e.wavg(3, 4), 3.1)
-
-
-class Results(unittest.TestCase):
-
- def testAddResults(self):
- b = builder.BuildStatus(builder.BuilderStatus("test"), 12)
- testname = ("buildbot", "test", "test_status", "Results",
- "testAddResults")
- r1 = builder.TestResult(name=testname,
- results=builder.SUCCESS,
- text=["passed"],
- logs={'output': ""},
- )
- b.addTestResult(r1)
-
- res = b.getTestResults()
- self.failUnlessEqual(res.keys(), [testname])
- t = res[testname]
- self.failUnless(interfaces.ITestResult.providedBy(t))
- self.failUnlessEqual(t.getName(), testname)
- self.failUnlessEqual(t.getResults(), builder.SUCCESS)
- self.failUnlessEqual(t.getText(), ["passed"])
- self.failUnlessEqual(t.getLogs(), {'output': ""})
-
-class Log(unittest.TestCase):
- def setUpClass(self):
- self.basedir = "status_log_add"
- os.mkdir(self.basedir)
-
- def testAdd(self):
- l = MyLog(self.basedir, "compile", step=13)
- self.failUnlessEqual(l.getName(), "compile")
- self.failUnlessEqual(l.getStep(), 13)
- l.addHeader("HEADER\n")
- l.addStdout("Some text\n")
- l.addStderr("Some error\n")
- l.addStdout("Some more text\n")
- self.failIf(l.isFinished())
- l.finish()
- self.failUnless(l.isFinished())
- self.failUnlessEqual(l.getText(),
- "Some text\nSome error\nSome more text\n")
- self.failUnlessEqual(l.getTextWithHeaders(),
- "HEADER\n" +
- "Some text\nSome error\nSome more text\n")
- self.failUnlessEqual(len(list(l.getChunks())), 4)
-
- self.failUnless(l.hasContents())
- try:
- os.unlink(l.getFilename())
- except OSError:
- os.unlink(l.getFilename() + ".bz2")
- self.failIf(l.hasContents())
-
- def TODO_testDuplicate(self):
- # create multiple logs for the same step with the same logname, make
- # sure their on-disk filenames are suitably uniquified. This
- # functionality actually lives in BuildStepStatus and BuildStatus, so
- # this test must involve more than just the MyLog class.
-
- # naieve approach, doesn't work
- l1 = MyLog(self.basedir, "duplicate")
- l1.addStdout("Some text\n")
- l1.finish()
- l2 = MyLog(self.basedir, "duplicate")
- l2.addStdout("Some more text\n")
- l2.finish()
- self.failIfEqual(l1.getFilename(), l2.getFilename())
-
- def testMerge1(self):
- l = MyLog(self.basedir, "merge1")
- l.addHeader("HEADER\n")
- l.addStdout("Some text\n")
- l.addStdout("Some more text\n")
- l.addStdout("more\n")
- l.finish()
- self.failUnlessEqual(l.getText(),
- "Some text\nSome more text\nmore\n")
- self.failUnlessEqual(l.getTextWithHeaders(),
- "HEADER\n" +
- "Some text\nSome more text\nmore\n")
- self.failUnlessEqual(len(list(l.getChunks())), 2)
-
- def testMerge2(self):
- l = MyLog(self.basedir, "merge2")
- l.addHeader("HEADER\n")
- for i in xrange(1000):
- l.addStdout("aaaa")
- for i in xrange(30):
- l.addStderr("bbbb")
- for i in xrange(10):
- l.addStdout("cc")
- target = 1000*"aaaa" + 30 * "bbbb" + 10 * "cc"
- self.failUnlessEqual(len(l.getText()), len(target))
- self.failUnlessEqual(l.getText(), target)
- l.finish()
- self.failUnlessEqual(len(l.getText()), len(target))
- self.failUnlessEqual(l.getText(), target)
- self.failUnlessEqual(len(list(l.getChunks())), 4)
-
- def testMerge3(self):
- l = MyLog(self.basedir, "merge3")
- l.chunkSize = 100
- l.addHeader("HEADER\n")
- for i in xrange(8):
- l.addStdout(10*"a")
- for i in xrange(8):
- l.addStdout(10*"a")
- self.failUnlessEqual(list(l.getChunks()),
- [(builder.HEADER, "HEADER\n"),
- (builder.STDOUT, 100*"a"),
- (builder.STDOUT, 60*"a")])
- l.finish()
- self.failUnlessEqual(l.getText(), 160*"a")
-
- def testReadlines(self):
- l = MyLog(self.basedir, "chunks1")
- l.addHeader("HEADER\n") # should be ignored
- l.addStdout("Some text\n")
- l.addStdout("Some More Text\nAnd Some More\n")
- l.addStderr("Some Stderr\n")
- l.addStdout("Last line\n")
- l.finish()
- alllines = list(l.readlines())
- self.failUnlessEqual(len(alllines), 4)
- self.failUnlessEqual(alllines[0], "Some text\n")
- self.failUnlessEqual(alllines[2], "And Some More\n")
- self.failUnlessEqual(alllines[3], "Last line\n")
- stderr = list(l.readlines(interfaces.LOG_CHANNEL_STDERR))
- self.failUnlessEqual(len(stderr), 1)
- self.failUnlessEqual(stderr[0], "Some Stderr\n")
- lines = l.readlines()
- if False: # TODO: l.readlines() is not yet an iterator
- # verify that it really is an iterator
- line0 = lines.next()
- self.failUnlessEqual(line0, "Some text\n")
- line1 = lines.next()
- line2 = lines.next()
- self.failUnlessEqual(line2, "And Some More\n")
-
-
- def testChunks(self):
- l = MyLog(self.basedir, "chunks2")
- c1 = l.getChunks()
- l.addHeader("HEADER\n")
- l.addStdout("Some text\n")
- self.failUnlessEqual("".join(l.getChunks(onlyText=True)),
- "HEADER\nSome text\n")
- c2 = l.getChunks()
-
- l.addStdout("Some more text\n")
- self.failUnlessEqual("".join(l.getChunks(onlyText=True)),
- "HEADER\nSome text\nSome more text\n")
- c3 = l.getChunks()
-
- l.addStdout("more\n")
- l.finish()
-
- self.failUnlessEqual(list(c1), [])
- self.failUnlessEqual(list(c2), [(builder.HEADER, "HEADER\n"),
- (builder.STDOUT, "Some text\n")])
- self.failUnlessEqual(list(c3), [(builder.HEADER, "HEADER\n"),
- (builder.STDOUT,
- "Some text\nSome more text\n")])
-
- self.failUnlessEqual(l.getText(),
- "Some text\nSome more text\nmore\n")
- self.failUnlessEqual(l.getTextWithHeaders(),
- "HEADER\n" +
- "Some text\nSome more text\nmore\n")
- self.failUnlessEqual(len(list(l.getChunks())), 2)
-
- def testUpgrade(self):
- l = MyLog(self.basedir, "upgrade")
- l.addHeader("HEADER\n")
- l.addStdout("Some text\n")
- l.addStdout("Some more text\n")
- l.addStdout("more\n")
- l.finish()
- self.failUnless(l.hasContents())
- # now doctor it to look like a 0.6.4-era non-upgraded logfile
- l.entries = list(l.getChunks())
- del l.filename
- try:
- os.unlink(l.getFilename() + ".bz2")
- except OSError:
- os.unlink(l.getFilename())
- # now make sure we can upgrade it
- l.upgrade("upgrade")
- self.failUnlessEqual(l.getText(),
- "Some text\nSome more text\nmore\n")
- self.failUnlessEqual(len(list(l.getChunks())), 2)
- self.failIf(l.entries)
-
- # now, do it again, but make it look like an upgraded 0.6.4 logfile
- # (i.e. l.filename is missing, but the contents are there on disk)
- l.entries = list(l.getChunks())
- del l.filename
- l.upgrade("upgrade")
- self.failUnlessEqual(l.getText(),
- "Some text\nSome more text\nmore\n")
- self.failUnlessEqual(len(list(l.getChunks())), 2)
- self.failIf(l.entries)
- self.failUnless(l.hasContents())
-
- def testHTMLUpgrade(self):
- l = MyHTMLLog(self.basedir, "upgrade", "log contents")
- l.upgrade("filename")
-
- def testSubscribe(self):
- l1 = MyLog(self.basedir, "subscribe1")
- l1.finish()
- self.failUnless(l1.isFinished())
-
- s = MyLogSubscriber()
- l1.subscribe(s, True)
- l1.unsubscribe(s)
- self.failIf(s.chunks)
-
- s = MyLogSubscriber()
- l1.subscribe(s, False)
- l1.unsubscribe(s)
- self.failIf(s.chunks)
-
- finished = []
- l2 = MyLog(self.basedir, "subscribe2")
- l2.waitUntilFinished().addCallback(finished.append)
- l2.addHeader("HEADER\n")
- s1 = MyLogSubscriber()
- l2.subscribe(s1, True)
- s2 = MyLogSubscriber()
- l2.subscribe(s2, False)
- self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n")])
- self.failUnlessEqual(s2.chunks, [])
-
- l2.addStdout("Some text\n")
- self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n"),
- (builder.STDOUT, "Some text\n")])
- self.failUnlessEqual(s2.chunks, [(builder.STDOUT, "Some text\n")])
- l2.unsubscribe(s1)
-
- l2.addStdout("Some more text\n")
- self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n"),
- (builder.STDOUT, "Some text\n")])
- self.failUnlessEqual(s2.chunks, [(builder.STDOUT, "Some text\n"),
- (builder.STDOUT, "Some more text\n"),
- ])
- self.failIf(finished)
- l2.finish()
- self.failUnlessEqual(finished, [l2])
-
- def testConsumer(self):
- l1 = MyLog(self.basedir, "consumer1")
- l1.finish()
- self.failUnless(l1.isFinished())
-
- s = MyLogConsumer()
- d = l1.subscribeConsumer(s)
- d.addCallback(self._testConsumer_1, s)
- return d
- testConsumer.timeout = 5
- def _testConsumer_1(self, res, s):
- self.failIf(s.chunks)
- self.failUnless(s.finished)
- self.failIf(s.producer) # producer should be registered and removed
-
- l2 = MyLog(self.basedir, "consumer2")
- l2.addHeader("HEADER\n")
- l2.finish()
- self.failUnless(l2.isFinished())
-
- s = MyLogConsumer()
- d = l2.subscribeConsumer(s)
- d.addCallback(self._testConsumer_2, s)
- return d
- def _testConsumer_2(self, res, s):
- self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n")])
- self.failUnless(s.finished)
- self.failIf(s.producer) # producer should be registered and removed
-
-
- l2 = MyLog(self.basedir, "consumer3")
- l2.chunkSize = 1000
- l2.addHeader("HEADER\n")
- l2.addStdout(800*"a")
- l2.addStdout(800*"a") # should now have two chunks on disk, 1000+600
- l2.addStdout(800*"b") # HEADER,1000+600*a on disk, 800*a in memory
- l2.addStdout(800*"b") # HEADER,1000+600*a,1000+600*b on disk
- l2.addStdout(200*"c") # HEADER,1000+600*a,1000+600*b on disk,
- # 200*c in memory
-
- s = MyLogConsumer(limit=1)
- d = l2.subscribeConsumer(s)
- d.addCallback(self._testConsumer_3, l2, s)
- return d
- def _testConsumer_3(self, res, l2, s):
- self.failUnless(s.streaming)
- self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n")])
- s.limit = 1
- d = s.producer.resumeProducing()
- d.addCallback(self._testConsumer_4, l2, s)
- return d
- def _testConsumer_4(self, res, l2, s):
- self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
- (builder.STDOUT, 1000*"a"),
- ])
- s.limit = None
- d = s.producer.resumeProducing()
- d.addCallback(self._testConsumer_5, l2, s)
- return d
- def _testConsumer_5(self, res, l2, s):
- self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
- (builder.STDOUT, 1000*"a"),
- (builder.STDOUT, 600*"a"),
- (builder.STDOUT, 1000*"b"),
- (builder.STDOUT, 600*"b"),
- (builder.STDOUT, 200*"c")])
- l2.addStdout(1000*"c") # HEADER,1600*a,1600*b,1200*c on disk
- self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
- (builder.STDOUT, 1000*"a"),
- (builder.STDOUT, 600*"a"),
- (builder.STDOUT, 1000*"b"),
- (builder.STDOUT, 600*"b"),
- (builder.STDOUT, 200*"c"),
- (builder.STDOUT, 1000*"c")])
- l2.finish()
- self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
- (builder.STDOUT, 1000*"a"),
- (builder.STDOUT, 600*"a"),
- (builder.STDOUT, 1000*"b"),
- (builder.STDOUT, 600*"b"),
- (builder.STDOUT, 200*"c"),
- (builder.STDOUT, 1000*"c")])
- self.failIf(s.producer)
- self.failUnless(s.finished)
-
- def testLargeSummary(self):
- bigtext = "a" * 200000 # exceed the NetstringReceiver 100KB limit
- l = MyLog(self.basedir, "large", bigtext)
- s = MyLogConsumer()
- d = l.subscribeConsumer(s)
- def _check(res):
- for ctype,chunk in s.chunks:
- self.failUnless(len(chunk) < 100000)
- merged = "".join([c[1] for c in s.chunks])
- self.failUnless(merged == bigtext)
- d.addCallback(_check)
- # when this fails, it fails with a timeout, and there is an exception
- # sent to log.err(). This AttributeError exception is in
- # NetstringReceiver.dataReceived where it does
- # self.transport.loseConnection() because of the NetstringParseError,
- # however self.transport is None
- return d
- testLargeSummary.timeout = 5
-
-
-class CompressLog(unittest.TestCase):
- def testCompressLogs(self):
- bss = setupBuildStepStatus("test-compress")
- bss.build.builder.setLogCompressionLimit(1024)
- l = bss.addLog('not-compress')
- l.addStdout('a' * 512)
- l.finish()
- lc = bss.addLog('to-compress')
- lc.addStdout('b' * 1024)
- lc.finish()
- d = bss.stepFinished(builder.SUCCESS)
- self.failUnless(d is not None)
- d.addCallback(self._verifyCompression, bss)
- return d
-
- def _verifyCompression(self, result, bss):
- self.failUnless(len(bss.getLogs()), 2)
- (ncl, cl) = bss.getLogs() # not compressed, compressed log
- self.failUnless(os.path.isfile(ncl.getFilename()))
- self.failIf(os.path.isfile(ncl.getFilename() + ".bz2"))
- self.failIf(os.path.isfile(cl.getFilename()))
- self.failUnless(os.path.isfile(cl.getFilename() + ".bz2"))
- content = ncl.getText()
- self.failUnless(len(content), 512)
- content = cl.getText()
- self.failUnless(len(content), 1024)
- pass
-
-config_base = """
-from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.buildslave import BuildSlave
-s = factory.s
-
-f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
-
-f2 = factory.BuildFactory([
- s(dummy.Dummy, timeout=1),
- s(dummy.RemoteDummy, timeout=2),
- ])
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit')]
-c['schedulers'] = []
-c['builders'] = []
-c['builders'].append({'name':'quick', 'slavename':'bot1',
- 'builddir': 'quickdir', 'factory': f1})
-c['slavePortnum'] = 0
-"""
-
-config_2 = config_base + """
-c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
- 'builddir': 'dummy1', 'factory': f2},
- {'name': 'testdummy', 'slavename': 'bot1',
- 'builddir': 'dummy2', 'factory': f2, 'category': 'test'}]
-"""
-
-class STarget(base.StatusReceiver):
- debug = False
-
- def __init__(self, mode):
- self.mode = mode
- self.events = []
- def announce(self):
- if self.debug:
- print self.events[-1]
-
- def builderAdded(self, name, builder):
- self.events.append(("builderAdded", name, builder))
- self.announce()
- if "builder" in self.mode:
- return self
- def builderChangedState(self, name, state):
- self.events.append(("builderChangedState", name, state))
- self.announce()
- def buildStarted(self, name, build):
- self.events.append(("buildStarted", name, build))
- self.announce()
- if "eta" in self.mode:
- self.eta_build = build.getETA()
- if "build" in self.mode:
- return self
- def buildETAUpdate(self, build, ETA):
- self.events.append(("buildETAUpdate", build, ETA))
- self.announce()
- def stepStarted(self, build, step):
- self.events.append(("stepStarted", build, step))
- self.announce()
- if 0 and "eta" in self.mode:
- print "TIMES", step.getTimes()
- print "ETA", step.getETA()
- print "EXP", step.getExpectations()
- if "step" in self.mode:
- return self
- def stepTextChanged(self, build, step, text):
- self.events.append(("stepTextChanged", step, text))
- def stepText2Changed(self, build, step, text2):
- self.events.append(("stepText2Changed", step, text2))
- def stepETAUpdate(self, build, step, ETA, expectations):
- self.events.append(("stepETAUpdate", build, step, ETA, expectations))
- self.announce()
- def logStarted(self, build, step, log):
- self.events.append(("logStarted", build, step, log))
- self.announce()
- def logFinished(self, build, step, log):
- self.events.append(("logFinished", build, step, log))
- self.announce()
- def stepFinished(self, build, step, results):
- self.events.append(("stepFinished", build, step, results))
- if 0 and "eta" in self.mode:
- print "post-EXP", step.getExpectations()
- self.announce()
- def buildFinished(self, name, build, results):
- self.events.append(("buildFinished", name, build, results))
- self.announce()
- def builderRemoved(self, name):
- self.events.append(("builderRemoved", name))
- self.announce()
-
-class Subscription(RunMixin, unittest.TestCase):
- # verify that StatusTargets can subscribe/unsubscribe properly
-
- def testSlave(self):
- m = self.master
- s = m.getStatus()
- self.t1 = t1 = STarget(["builder"])
- #t1.debug = True; print
- s.subscribe(t1)
- self.failUnlessEqual(len(t1.events), 0)
-
- self.t3 = t3 = STarget(["builder", "build", "step"])
- s.subscribe(t3)
-
- m.loadConfig(config_2)
- m.readConfig = True
- m.startService()
-
- self.failUnlessEqual(len(t1.events), 4)
- self.failUnlessEqual(t1.events[0][0:2], ("builderAdded", "dummy"))
- self.failUnlessEqual(t1.events[1],
- ("builderChangedState", "dummy", "offline"))
- self.failUnlessEqual(t1.events[2][0:2], ("builderAdded", "testdummy"))
- self.failUnlessEqual(t1.events[3],
- ("builderChangedState", "testdummy", "offline"))
- t1.events = []
-
- self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
- self.failUnlessEqual(s.getBuilderNames(categories=['test']),
- ["testdummy"])
- self.s1 = s1 = s.getBuilder("dummy")
- self.failUnlessEqual(s1.getName(), "dummy")
- self.failUnlessEqual(s1.getState(), ("offline", []))
- self.failUnlessEqual(s1.getCurrentBuilds(), [])
- self.failUnlessEqual(s1.getLastFinishedBuild(), None)
- self.failUnlessEqual(s1.getBuild(-1), None)
- #self.failUnlessEqual(s1.getEvent(-1), foo("created"))
-
- # status targets should, upon being subscribed, immediately get a
- # list of all current builders matching their category
- self.t2 = t2 = STarget([])
- s.subscribe(t2)
- self.failUnlessEqual(len(t2.events), 2)
- self.failUnlessEqual(t2.events[0][0:2], ("builderAdded", "dummy"))
- self.failUnlessEqual(t2.events[1][0:2], ("builderAdded", "testdummy"))
-
- d = self.connectSlave(builders=["dummy", "testdummy"])
- d.addCallback(self._testSlave_1, t1)
- return d
-
- def _testSlave_1(self, res, t1):
- self.failUnlessEqual(len(t1.events), 2)
- self.failUnlessEqual(t1.events[0],
- ("builderChangedState", "dummy", "idle"))
- self.failUnlessEqual(t1.events[1],
- ("builderChangedState", "testdummy", "idle"))
- t1.events = []
-
- c = interfaces.IControl(self.master)
- req = BuildRequest("forced build for testing", SourceStamp(), 'test_builder')
- c.getBuilder("dummy").requestBuild(req)
- d = req.waitUntilFinished()
- d2 = self.master.botmaster.waitUntilBuilderIdle("dummy")
- dl = defer.DeferredList([d, d2])
- dl.addCallback(self._testSlave_2)
- return dl
-
- def _testSlave_2(self, res):
- # t1 subscribes to builds, but not anything lower-level
- ev = self.t1.events
- self.failUnlessEqual(len(ev), 4)
- self.failUnlessEqual(ev[0][0:3],
- ("builderChangedState", "dummy", "building"))
- self.failUnlessEqual(ev[1][0], "buildStarted")
- self.failUnlessEqual(ev[2][0:2]+ev[2][3:4],
- ("buildFinished", "dummy", builder.SUCCESS))
- self.failUnlessEqual(ev[3][0:3],
- ("builderChangedState", "dummy", "idle"))
-
- self.failUnlessEqual([ev[0] for ev in self.t3.events],
- ["builderAdded",
- "builderChangedState", # offline
- "builderAdded",
- "builderChangedState", # idle
- "builderChangedState", # offline
- "builderChangedState", # idle
- "builderChangedState", # building
- "buildStarted",
- "stepStarted", "stepETAUpdate",
- "stepTextChanged", "stepFinished",
- "stepStarted", "stepETAUpdate",
- "stepTextChanged", "logStarted", "logFinished",
- "stepTextChanged", "stepText2Changed",
- "stepFinished",
- "buildFinished",
- "builderChangedState", # idle
- ])
-
- b = self.s1.getLastFinishedBuild()
- self.failUnless(b)
- self.failUnlessEqual(b.getBuilder().getName(), "dummy")
- self.failUnlessEqual(b.getNumber(), 0)
- self.failUnlessEqual(b.getSourceStamp().branch, None)
- self.failUnlessEqual(b.getSourceStamp().patch, None)
- self.failUnlessEqual(b.getSourceStamp().revision, None)
- self.failUnlessEqual(b.getReason(), "forced build for testing")
- self.failUnlessEqual(b.getChanges(), ())
- self.failUnlessEqual(b.getResponsibleUsers(), [])
- self.failUnless(b.isFinished())
- self.failUnlessEqual(b.getText(), ['build', 'successful'])
- self.failUnlessEqual(b.getResults(), builder.SUCCESS)
-
- steps = b.getSteps()
- self.failUnlessEqual(len(steps), 2)
-
- eta = 0
- st1 = steps[0]
- self.failUnlessEqual(st1.getName(), "dummy")
- self.failUnless(st1.isFinished())
- self.failUnlessEqual(st1.getText(), ["delay", "1 secs"])
- start,finish = st1.getTimes()
- self.failUnless(0.5 < (finish-start) < 10)
- self.failUnlessEqual(st1.getExpectations(), [])
- self.failUnlessEqual(st1.getLogs(), [])
- eta += finish-start
-
- st2 = steps[1]
- self.failUnlessEqual(st2.getName(), "remote dummy")
- self.failUnless(st2.isFinished())
- self.failUnlessEqual(st2.getText(),
- ["remote", "delay", "2 secs"])
- start,finish = st2.getTimes()
- self.failUnless(1.5 < (finish-start) < 10)
- eta += finish-start
- self.failUnlessEqual(st2.getExpectations(), [('output', 38, None)])
- logs = st2.getLogs()
- self.failUnlessEqual(len(logs), 1)
- self.failUnlessEqual(logs[0].getName(), "stdio")
- self.failUnlessEqual(logs[0].getText(), "data")
-
- self.eta = eta
- # now we run it a second time, and we should have an ETA
-
- self.t4 = t4 = STarget(["builder", "build", "eta"])
- self.master.getStatus().subscribe(t4)
- c = interfaces.IControl(self.master)
- req = BuildRequest("forced build for testing", SourceStamp(), 'test_builder')
- c.getBuilder("dummy").requestBuild(req)
- d = req.waitUntilFinished()
- d2 = self.master.botmaster.waitUntilBuilderIdle("dummy")
- dl = defer.DeferredList([d, d2])
- dl.addCallback(self._testSlave_3)
- return dl
-
- def _testSlave_3(self, res):
- t4 = self.t4
- eta = self.eta
- self.failUnless(eta-1 < t4.eta_build < eta+1, # should be 3 seconds
- "t4.eta_build was %g, not in (%g,%g)"
- % (t4.eta_build, eta-1, eta+1))
-
-
-class Client(unittest.TestCase):
- def testAdaptation(self):
- b = builder.BuilderStatus("bname")
- b2 = client.makeRemote(b)
- self.failUnless(isinstance(b2, client.RemoteBuilder))
- b3 = client.makeRemote(None)
- self.failUnless(b3 is None)
-
-
-class ContactTester(unittest.TestCase):
- def test_notify_invalid_syntax(self):
- irc = MyContact()
- self.assertRaises(words.UsageError, lambda args, who: irc.command_NOTIFY(args, who), "", "mynick")
-
- def test_notify_list(self):
- irc = MyContact()
- irc.command_NOTIFY("list", "mynick")
- self.failUnlessEqual(irc.message, "The following events are being notified: []", "empty notify list")
-
- irc.message = ""
- irc.command_NOTIFY("on started", "mynick")
- self.failUnlessEqual(irc.message, "The following events are being notified: ['started']", "on started")
-
- irc.message = ""
- irc.command_NOTIFY("on finished", "mynick")
- self.failUnlessEqual(irc.message, "The following events are being notified: ['started', 'finished']", "on finished")
-
- irc.message = ""
- irc.command_NOTIFY("off", "mynick")
- self.failUnlessEqual(irc.message, "The following events are being notified: []", "off all")
-
- irc.message = ""
- irc.command_NOTIFY("on", "mynick")
- self.failUnlessEqual(irc.message, "The following events are being notified: ['started', 'finished']", "on default set")
-
- irc.message = ""
- irc.command_NOTIFY("off started", "mynick")
- self.failUnlessEqual(irc.message, "The following events are being notified: ['finished']", "off started")
-
- irc.message = ""
- irc.command_NOTIFY("on success failure exception", "mynick")
- self.failUnlessEqual(irc.message, "The following events are being notified: ['failure', 'finished', 'exception', 'success']", "on multiple events")
-
- def test_notification_default(self):
- irc = MyContact()
-
- my_builder = MyBuilder("builder78")
- my_build = MyIrcBuild(my_builder, 23, builder.SUCCESS)
-
- irc.buildStarted(my_builder.getName(), my_build)
- self.failUnlessEqual(irc.message, "", "No notification with default settings")
-
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "", "No notification with default settings")
-
- def test_notification_started(self):
- irc = MyContact()
-
- my_builder = MyBuilder("builder78")
- my_build = MyIrcBuild(my_builder, 23, builder.SUCCESS)
- my_build.changes = (
- Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 123),
- Change(who = 'author2', files = ['file2'], comments = 'comment2', revision = 456),
- )
-
- irc.command_NOTIFY("on started", "mynick")
-
- irc.message = ""
- irc.buildStarted(my_builder.getName(), my_build)
- self.failUnlessEqual(irc.message, "build #23 of builder78 started including [123, 456]", "Start notification generated with notify_events=['started']")
-
- irc.message = ""
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "", "No finished notification with notify_events=['started']")
-
- def test_notification_finished(self):
- irc = MyContact()
-
- my_builder = MyBuilder("builder834")
- my_build = MyIrcBuild(my_builder, 862, builder.SUCCESS)
- my_build.changes = (
- Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
- )
-
- irc.command_NOTIFY("on finished", "mynick")
-
- irc.message = ""
- irc.buildStarted(my_builder.getName(), my_build)
- self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['finished']")
-
- irc.message = ""
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated with notify_events=['finished']")
-
- def test_notification_success(self):
- irc = MyContact()
-
- my_builder = MyBuilder("builder834")
- my_build = MyIrcBuild(my_builder, 862, builder.SUCCESS)
- my_build.changes = (
- Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
- )
-
- irc.command_NOTIFY("on success", "mynick")
-
- irc.message = ""
- irc.buildStarted(my_builder.getName(), my_build)
- self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['success']")
-
- irc.message = ""
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on success with notify_events=['success']")
-
- irc.message = ""
- my_build.results = builder.FAILURE
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "", "No finish notification generated on failure with notify_events=['success']")
-
- irc.message = ""
- my_build.results = builder.EXCEPTION
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "", "No finish notification generated on exception with notify_events=['success']")
-
- def test_notification_failed(self):
- irc = MyContact()
-
- my_builder = MyBuilder("builder834")
- my_build = MyIrcBuild(my_builder, 862, builder.FAILURE)
- my_build.changes = (
- Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
- )
-
- irc.command_NOTIFY("on failure", "mynick")
-
- irc.message = ""
- irc.buildStarted(my_builder.getName(), my_build)
- self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['failed']")
-
- irc.message = ""
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on failure with notify_events=['failed']")
-
- irc.message = ""
- my_build.results = builder.SUCCESS
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "", "No finish notification generated on success with notify_events=['failed']")
-
- irc.message = ""
- my_build.results = builder.EXCEPTION
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "", "No finish notification generated on exception with notify_events=['failed']")
-
- def test_notification_exception(self):
- irc = MyContact()
-
- my_builder = MyBuilder("builder834")
- my_build = MyIrcBuild(my_builder, 862, builder.EXCEPTION)
- my_build.changes = (
- Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
- )
-
- irc.command_NOTIFY("on exception", "mynick")
-
- irc.message = ""
- irc.buildStarted(my_builder.getName(), my_build)
- self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['exception']")
-
- irc.message = ""
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on failure with notify_events=['exception']")
-
- irc.message = ""
- my_build.results = builder.SUCCESS
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "", "No finish notification generated on success with notify_events=['exception']")
-
- irc.message = ""
- my_build.results = builder.FAILURE
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "", "No finish notification generated on exception with notify_events=['exception']")
-
- def do_x_to_y_notification_test(self, notify, previous_result, new_result, expected_msg):
- irc = MyContact()
- irc.command_NOTIFY("on %s" % notify, "mynick")
-
- my_builder = MyBuilder("builder834")
- my_build = MyIrcBuild(my_builder, 862, builder.FAILURE)
- my_build.changes = (
- Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
- )
-
- previous_build = MyIrcBuild(my_builder, 861, previous_result)
- my_build.setPreviousBuild(previous_build)
-
- irc.message = ""
- my_build.results = new_result
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, expected_msg, "Finish notification generated on failure with notify_events=['successToFailure']")
-
- def test_notification_successToFailure(self):
- self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.FAILURE,
- expected_msg="build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.SUCCESS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.WARNINGS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.EXCEPTION,
- expected_msg = "" )
-
- def test_notification_successToWarnings(self):
- self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.WARNINGS,
- expected_msg="build #862 of builder834 is complete: Warnings [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.SUCCESS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.FAILURE,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.EXCEPTION,
- expected_msg = "" )
-
- def test_notification_successToException(self):
- self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.EXCEPTION,
- expected_msg="build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.SUCCESS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.FAILURE,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.WARNINGS,
- expected_msg = "" )
-
-
-
-
-
- def test_notification_failureToSuccess(self):
- self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.SUCCESS,
- expected_msg="build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.FAILURE,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.WARNINGS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.EXCEPTION,
- expected_msg = "" )
-
- def test_notification_failureToWarnings(self):
- self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.WARNINGS,
- expected_msg="build #862 of builder834 is complete: Warnings [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.SUCCESS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.FAILURE,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.EXCEPTION,
- expected_msg = "" )
-
- def test_notification_failureToException(self):
- self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.EXCEPTION,
- expected_msg="build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.SUCCESS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.FAILURE,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.WARNINGS,
- expected_msg = "" )
-
-
-
-
-
- def test_notification_warningsToFailure(self):
- self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.FAILURE,
- expected_msg="build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.SUCCESS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.WARNINGS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.EXCEPTION,
- expected_msg = "" )
-
- def test_notification_warningsToSuccess(self):
- self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.SUCCESS,
- expected_msg="build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.WARNINGS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.FAILURE,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.EXCEPTION,
- expected_msg = "" )
-
- def test_notification_warningsToException(self):
- self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.EXCEPTION,
- expected_msg="build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.SUCCESS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.FAILURE,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.WARNINGS,
- expected_msg = "" )
-
-
-
-
- def test_notification_exceptionToFailure(self):
- self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.FAILURE,
- expected_msg="build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.SUCCESS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.WARNINGS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.EXCEPTION,
- expected_msg = "" )
-
- def test_notification_exceptionToWarnings(self):
- self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.WARNINGS,
- expected_msg="build #862 of builder834 is complete: Warnings [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.SUCCESS,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.FAILURE,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.EXCEPTION,
- expected_msg = "" )
-
- def test_notification_exceptionToSuccess(self):
- self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.SUCCESS,
- expected_msg="build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765" )
-
- self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.EXCEPTION,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.FAILURE,
- expected_msg = "" )
-
- self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.WARNINGS,
- expected_msg = "" )
-
- def test_notification_set_in_config(self):
- irc = MyContact(channel = MyChannel(notify_events = {'success': 1}))
-
- my_builder = MyBuilder("builder834")
- my_build = MyIrcBuild(my_builder, 862, builder.SUCCESS)
- my_build.changes = (
- Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
- )
-
- irc.message = ""
- irc.buildFinished(my_builder.getName(), my_build, None)
- self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on success with notify_events=['success']")
-
-class MyIrcBuild(builder.BuildStatus):
- results = None
-
- def __init__(self, parent, number, results):
- builder.BuildStatus.__init__(self, parent, number)
- self.results = results
- self.previousBuild = None
-
- def getResults(self):
- return self.results
-
- def getText(self):
- return ('step1', 'step2')
-
- def setPreviousBuild(self, pb):
- self.previousBuild = pb
-
- def getPreviousBuild(self):
- return self.previousBuild
-
-class URLProducer:
- def getURLForThing(self, build):
- return 'http://myserver/mypath?build=765'
-
-class MyChannel:
- categories = None
- status = URLProducer()
- notify_events = {}
-
- def __init__(self, notify_events = {}):
- self.notify_events = notify_events
-
-class MyContact(words.Contact):
- message = ""
-
- def __init__(self, channel = MyChannel()):
- words.Contact.__init__(self, channel)
- self.message = ""
-
- def subscribe_to_build_events(self):
- pass
-
- def unsubscribe_from_build_events(self):
- pass
-
- def send(self, msg):
- self.message += msg
-
-class StepStatistics(unittest.TestCase):
- def testStepStatistics(self):
- status = builder.BuildStatus(builder.BuilderStatus("test"), 123)
- status.addStepWithName('step1')
- status.addStepWithName('step2')
- status.addStepWithName('step3')
- status.addStepWithName('step4')
-
- steps = status.getSteps()
- (step1, step2, step3, step4) = steps
-
- step1.setStatistic('test-prop', 1)
- step3.setStatistic('test-prop', 2)
- step4.setStatistic('test-prop', 4)
-
- step1.setStatistic('other-prop', 27)
- # Just to have some other properties around
-
- self.failUnlessEqual(step1.getStatistic('test-prop'), 1,
- 'Retrieve an existing property')
- self.failUnlessEqual(step1.getStatistic('test-prop', 99), 1,
- "Don't default an existing property")
- self.failUnlessEqual(step2.getStatistic('test-prop', 99), 99,
- 'Default a non-existant property')
-
- self.failUnlessEqual(
- status.getSummaryStatistic('test-prop', operator.add), 7,
- 'Sum property across the build')
-
- self.failUnlessEqual(
- status.getSummaryStatistic('test-prop', operator.add, 13), 20,
- 'Sum property across the build with initial value')
-
-class BuildExpectation(unittest.TestCase):
- class MyBuilderStatus:
- implements(interfaces.IBuilderStatus)
-
- def setSlavenames(self, slaveName):
- pass
-
- class MyBuilder(Builder):
- def __init__(self, name):
- Builder.__init__(self, {
- 'name': name,
- 'builddir': '/tmp/somewhere',
- 'factory': 'aFactory'
- }, BuildExpectation.MyBuilderStatus())
-
- class MyBuild(Build):
- def __init__(self, b):
- self.builder = b
- self.remote = None
-
- step1_progress = progress.StepProgress('step1', ['elapsed'])
- self.progress = progress.BuildProgress([step1_progress])
- step1_progress.setBuildProgress(self.progress)
-
- step1_progress.start()
- sleep(1);
- step1_progress.finish()
-
- self.deferred = defer.Deferred()
- self.locks = []
- self.build_status = builder.BuildStatus(b.builder_status, 1)
-
-
- def testBuildExpectation_BuildSuccess(self):
- b = BuildExpectation.MyBuilder("builder1")
- build = BuildExpectation.MyBuild(b)
-
- build.buildFinished(['sometext'], builder.SUCCESS)
- self.failIfEqual(b.expectations.expectedBuildTime(), 0, 'Non-Zero expectation for a failed build')
-
- def testBuildExpectation_BuildFailure(self):
- b = BuildExpectation.MyBuilder("builder1")
- build = BuildExpectation.MyBuild(b)
-
- build.buildFinished(['sometext'], builder.FAILURE)
- self.failUnlessEqual(b.expectations, None, 'Zero expectation for a failed build')
diff --git a/buildbot/buildbot/test/test_steps.py b/buildbot/buildbot/test/test_steps.py
deleted file mode 100644
index 880658c..0000000
--- a/buildbot/buildbot/test/test_steps.py
+++ /dev/null
@@ -1,788 +0,0 @@
-# -*- test-case-name: buildbot.test.test_steps -*-
-
-# create the BuildStep with a fake .remote instance that logs the
-# .callRemote invocations and compares them against the expected calls. Then
-# the test harness should send statusUpdate() messages in with assorted
-# data, eventually calling remote_complete(). Then we can verify that the
-# Step's rc was correct, and that the status it was supposed to return
-# matches.
-
-# sometimes, .callRemote should raise an exception because of a stale
-# reference. Sometimes it should errBack with an UnknownCommand failure.
-# Or other failure.
-
-# todo: test batched updates, by invoking remote_update(updates) instead of
-# statusUpdate(update). Also involves interrupted builds.
-
-import os
-
-from twisted.trial import unittest
-from twisted.internet import reactor, defer
-
-from buildbot.sourcestamp import SourceStamp
-from buildbot.process import buildstep, base, factory
-from buildbot.buildslave import BuildSlave
-from buildbot.steps import shell, source, python, master
-from buildbot.status import builder
-from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE
-from buildbot.test.runutils import RunMixin, rmtree
-from buildbot.test.runutils import makeBuildStep, StepTester
-from buildbot.slave import commands, registry
-
-
-class MyShellCommand(shell.ShellCommand):
- started = False
- def runCommand(self, c):
- self.started = True
- self.rc = c
- return shell.ShellCommand.runCommand(self, c)
-
-class FakeBuild:
- pass
-class FakeBuilder:
- statusbag = None
- name = "fakebuilder"
-class FakeSlaveBuilder:
- def getSlaveCommandVersion(self, command, oldversion=None):
- return "1.10"
-
-class FakeRemote:
- def __init__(self):
- self.events = []
- self.remoteCalls = 0
- #self.callRemoteNotifier = None
- def callRemote(self, methname, *args):
- event = ["callRemote", methname, args]
- self.events.append(event)
-## if self.callRemoteNotifier:
-## reactor.callLater(0, self.callRemoteNotifier, event)
- self.remoteCalls += 1
- self.deferred = defer.Deferred()
- return self.deferred
- def notifyOnDisconnect(self, callback):
- pass
- def dontNotifyOnDisconnect(self, callback):
- pass
-
-
-class BuildStep(unittest.TestCase):
-
- def setUp(self):
- rmtree("test_steps")
- self.builder = FakeBuilder()
- self.builder_status = builder.BuilderStatus("fakebuilder")
- self.builder_status.basedir = "test_steps"
- self.builder_status.nextBuildNumber = 0
- os.mkdir(self.builder_status.basedir)
- self.build_status = self.builder_status.newBuild()
- req = base.BuildRequest("reason", SourceStamp(), 'test_builder')
- self.build = base.Build([req])
- self.build.build_status = self.build_status # fake it
- self.build.builder = self.builder
- self.build.slavebuilder = FakeSlaveBuilder()
- self.remote = FakeRemote()
- self.finished = 0
-
- def callback(self, results):
- self.failed = 0
- self.failure = None
- self.results = results
- self.finished = 1
- def errback(self, failure):
- self.failed = 1
- self.failure = failure
- self.results = None
- self.finished = 1
-
- def testShellCommand1(self):
- cmd = "argle bargle"
- dir = "murkle"
- self.expectedEvents = []
- buildstep.RemoteCommand.commandCounter[0] = 3
- c = MyShellCommand(workdir=dir, command=cmd, timeout=10)
- c.setBuild(self.build)
- c.setBuildSlave(BuildSlave("name", "password"))
- self.assertEqual(self.remote.events, self.expectedEvents)
- c.step_status = self.build_status.addStepWithName("myshellcommand")
- d = c.startStep(self.remote)
- self.failUnless(c.started)
- d.addCallbacks(self.callback, self.errback)
- d2 = self.poll()
- d2.addCallback(self._testShellCommand1_2, c)
- return d2
- testShellCommand1.timeout = 10
-
- def poll(self, ignored=None):
- # TODO: This is gross, but at least it's no longer using
- # reactor.iterate() . Still, get rid of this some day soon.
- if self.remote.remoteCalls == 0:
- d = defer.Deferred()
- d.addCallback(self.poll)
- reactor.callLater(0.1, d.callback, None)
- return d
- return defer.succeed(None)
-
- def _testShellCommand1_2(self, res, c):
- rc = c.rc
- self.expectedEvents.append(["callRemote", "startCommand",
- (rc, "3",
- "shell",
- {'command': "argle bargle",
- 'workdir': "murkle",
- 'want_stdout': 1,
- 'want_stderr': 1,
- 'logfiles': {},
- 'timeout': 10,
- 'usePTY': 'slave-config',
- 'env': None}) ] )
- self.assertEqual(self.remote.events, self.expectedEvents)
-
- # we could do self.remote.deferred.errback(UnknownCommand) here. We
- # could also do .callback(), but generally the master end silently
- # ignores the slave's ack
-
- logs = c.step_status.getLogs()
- for log in logs:
- if log.getName() == "log":
- break
-
- rc.remoteUpdate({'header':
- "command 'argle bargle' in dir 'murkle'\n\n"})
- rc.remoteUpdate({'stdout': "foo\n"})
- self.assertEqual(log.getText(), "foo\n")
- self.assertEqual(log.getTextWithHeaders(),
- "command 'argle bargle' in dir 'murkle'\n\n"
- "foo\n")
- rc.remoteUpdate({'stderr': "bar\n"})
- self.assertEqual(log.getText(), "foo\nbar\n")
- self.assertEqual(log.getTextWithHeaders(),
- "command 'argle bargle' in dir 'murkle'\n\n"
- "foo\nbar\n")
- rc.remoteUpdate({'rc': 0})
- self.assertEqual(rc.rc, 0)
-
- rc.remote_complete()
- # that should fire the Deferred
- d = self.poll2()
- d.addCallback(self._testShellCommand1_3)
- return d
-
- def poll2(self, ignored=None):
- if not self.finished:
- d = defer.Deferred()
- d.addCallback(self.poll2)
- reactor.callLater(0.1, d.callback, None)
- return d
- return defer.succeed(None)
-
- def _testShellCommand1_3(self, res):
- self.assertEqual(self.failed, 0)
- self.assertEqual(self.results, 0)
-
-
-class MyObserver(buildstep.LogObserver):
- out = ""
- def outReceived(self, data):
- self.out = self.out + data
-
-class Steps(unittest.TestCase):
- def testMultipleStepInstances(self):
- steps = [
- (source.CVS, {'cvsroot': "root", 'cvsmodule': "module"}),
- (shell.Configure, {'command': "./configure"}),
- (shell.Compile, {'command': "make"}),
- (shell.Compile, {'command': "make more"}),
- (shell.Compile, {'command': "make evenmore"}),
- (shell.Test, {'command': "make test"}),
- (shell.Test, {'command': "make testharder"}),
- ]
- f = factory.ConfigurableBuildFactory(steps)
- req = base.BuildRequest("reason", SourceStamp(), 'test_builder')
- b = f.newBuild([req])
- #for s in b.steps: print s.name
-
- def failUnlessClones(self, s1, attrnames):
- f1 = s1.getStepFactory()
- f,args = f1
- s2 = f(**args)
- for name in attrnames:
- self.failUnlessEqual(getattr(s1, name), getattr(s2, name))
-
- def clone(self, s1):
- f1 = s1.getStepFactory()
- f,args = f1
- s2 = f(**args)
- return s2
-
- def testClone(self):
- s1 = shell.ShellCommand(command=["make", "test"],
- timeout=1234,
- workdir="here",
- description="yo",
- descriptionDone="yoyo",
- env={'key': 'value'},
- want_stdout=False,
- want_stderr=False,
- logfiles={"name": "filename"},
- )
- shellparms = (buildstep.BuildStep.parms +
- ("remote_kwargs description descriptionDone "
- "command logfiles").split() )
- self.failUnlessClones(s1, shellparms)
-
-
- # test the various methods available to buildsteps
-
- def test_getProperty(self):
- s = makeBuildStep("test_steps.Steps.test_getProperty")
- bs = s.step_status.getBuild()
-
- s.setProperty("prop1", "value1", "test")
- s.setProperty("prop2", "value2", "test")
- self.failUnlessEqual(s.getProperty("prop1"), "value1")
- self.failUnlessEqual(bs.getProperty("prop1"), "value1")
- self.failUnlessEqual(s.getProperty("prop2"), "value2")
- self.failUnlessEqual(bs.getProperty("prop2"), "value2")
- s.setProperty("prop1", "value1a", "test")
- self.failUnlessEqual(s.getProperty("prop1"), "value1a")
- self.failUnlessEqual(bs.getProperty("prop1"), "value1a")
-
-
- def test_addURL(self):
- s = makeBuildStep("test_steps.Steps.test_addURL")
- s.addURL("coverage", "http://coverage.example.org/target")
- s.addURL("icon", "http://coverage.example.org/icon.png")
- bs = s.step_status
- links = bs.getURLs()
- expected = {"coverage": "http://coverage.example.org/target",
- "icon": "http://coverage.example.org/icon.png",
- }
- self.failUnlessEqual(links, expected)
-
- def test_addLog(self):
- s = makeBuildStep("test_steps.Steps.test_addLog")
- l = s.addLog("newlog")
- l.addStdout("some stdout here")
- l.finish()
- bs = s.step_status
- logs = bs.getLogs()
- self.failUnlessEqual(len(logs), 1)
- l1 = logs[0]
- self.failUnlessEqual(l1.getText(), "some stdout here")
- l1a = s.getLog("newlog")
- self.failUnlessEqual(l1a.getText(), "some stdout here")
-
- def test_addHTMLLog(self):
- s = makeBuildStep("test_steps.Steps.test_addHTMLLog")
- l = s.addHTMLLog("newlog", "some html here")
- bs = s.step_status
- logs = bs.getLogs()
- self.failUnlessEqual(len(logs), 1)
- l1 = logs[0]
- self.failUnless(isinstance(l1, builder.HTMLLogFile))
- self.failUnlessEqual(l1.getText(), "some html here")
-
- def test_addCompleteLog(self):
- s = makeBuildStep("test_steps.Steps.test_addCompleteLog")
- l = s.addCompleteLog("newlog", "some stdout here")
- bs = s.step_status
- logs = bs.getLogs()
- self.failUnlessEqual(len(logs), 1)
- l1 = logs[0]
- self.failUnlessEqual(l1.getText(), "some stdout here")
- l1a = s.getLog("newlog")
- self.failUnlessEqual(l1a.getText(), "some stdout here")
-
- def test_addLogObserver(self):
- s = makeBuildStep("test_steps.Steps.test_addLogObserver")
- bss = s.step_status
- o1,o2,o3 = MyObserver(), MyObserver(), MyObserver()
-
- # add the log before the observer
- l1 = s.addLog("one")
- l1.addStdout("onestuff")
- s.addLogObserver("one", o1)
- self.failUnlessEqual(o1.out, "onestuff")
- l1.addStdout(" morestuff")
- self.failUnlessEqual(o1.out, "onestuff morestuff")
-
- # add the observer before the log
- s.addLogObserver("two", o2)
- l2 = s.addLog("two")
- l2.addStdout("twostuff")
- self.failUnlessEqual(o2.out, "twostuff")
-
- # test more stuff about ShellCommands
-
- def test_description(self):
- s = makeBuildStep("test_steps.Steps.test_description.1",
- step_class=shell.ShellCommand,
- workdir="dummy",
- description=["list", "of", "strings"],
- descriptionDone=["another", "list"])
- self.failUnlessEqual(s.description, ["list", "of", "strings"])
- self.failUnlessEqual(s.descriptionDone, ["another", "list"])
-
- s = makeBuildStep("test_steps.Steps.test_description.2",
- step_class=shell.ShellCommand,
- workdir="dummy",
- description="single string",
- descriptionDone="another string")
- self.failUnlessEqual(s.description, ["single string"])
- self.failUnlessEqual(s.descriptionDone, ["another string"])
-
-class VersionCheckingStep(buildstep.BuildStep):
- def start(self):
- # give our test a chance to run. It is non-trivial for a buildstep to
- # claw its way back out to the test case which is currently running.
- master = self.build.builder.botmaster.parent
- checker = master._checker
- checker(self)
- # then complete
- self.finished(buildstep.SUCCESS)
-
-version_config = """
-from buildbot.process import factory
-from buildbot.test.test_steps import VersionCheckingStep
-from buildbot.buildslave import BuildSlave
-BuildmasterConfig = c = {}
-f1 = factory.BuildFactory([
- factory.s(VersionCheckingStep),
- ])
-c['slaves'] = [BuildSlave('bot1', 'sekrit')]
-c['schedulers'] = []
-c['builders'] = [{'name':'quick', 'slavename':'bot1',
- 'builddir': 'quickdir', 'factory': f1}]
-c['slavePortnum'] = 0
-"""
-
-class SlaveVersion(RunMixin, unittest.TestCase):
- def setUp(self):
- RunMixin.setUp(self)
- self.master.loadConfig(version_config)
- self.master.startService()
- d = self.connectSlave(["quick"])
- return d
-
- def doBuild(self, buildername):
- br = base.BuildRequest("forced", SourceStamp(), 'test_builder')
- d = br.waitUntilFinished()
- self.control.getBuilder(buildername).requestBuild(br)
- return d
-
-
- def checkCompare(self, s):
- cver = commands.command_version
- v = s.slaveVersion("svn", None)
- # this insures that we are getting the version correctly
- self.failUnlessEqual(s.slaveVersion("svn", None), cver)
- # and that non-existent commands do not provide a version
- self.failUnlessEqual(s.slaveVersion("NOSUCHCOMMAND"), None)
- # TODO: verify that a <=0.5.0 buildslave (which does not implement
- # remote_getCommands) handles oldversion= properly. This requires a
- # mutant slave which does not offer that method.
- #self.failUnlessEqual(s.slaveVersion("NOSUCHCOMMAND", "old"), "old")
-
- # now check the comparison functions
- self.failIf(s.slaveVersionIsOlderThan("svn", cver))
- self.failIf(s.slaveVersionIsOlderThan("svn", "1.1"))
- self.failUnless(s.slaveVersionIsOlderThan("svn", cver + ".1"))
-
- self.failUnlessEqual(s.getSlaveName(), "bot1")
-
- def testCompare(self):
- self.master._checker = self.checkCompare
- d = self.doBuild("quick")
- return d
-
-
-class _SimpleBuildStep(buildstep.BuildStep):
- def start(self):
- args = {"arg1": "value"}
- cmd = buildstep.RemoteCommand("simple", args)
- d = self.runCommand(cmd)
- d.addCallback(lambda res: self.finished(SUCCESS))
-
-class _SimpleCommand(commands.Command):
- def start(self):
- self.builder.flag = True
- self.builder.flag_args = self.args
- return defer.succeed(None)
-
-class CheckStepTester(StepTester, unittest.TestCase):
- def testSimple(self):
- self.slavebase = "testSimple.slave"
- self.masterbase = "testSimple.master"
- sb = self.makeSlaveBuilder()
- sb.flag = False
- registry.registerSlaveCommand("simple", _SimpleCommand, "1")
- step = self.makeStep(_SimpleBuildStep)
- d = self.runStep(step)
- def _checkSimple(results):
- self.failUnless(sb.flag)
- self.failUnlessEqual(sb.flag_args, {"arg1": "value"})
- d.addCallback(_checkSimple)
- return d
-
-class Python(StepTester, unittest.TestCase):
- def testPyFlakes1(self):
- self.masterbase = "Python.testPyFlakes1"
- step = self.makeStep(python.PyFlakes)
- output = \
-"""pyflakes buildbot
-buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
-buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
-buildbot/clients/debug.py:9: 'gnome' imported but unused
-buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
-buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
-buildbot/scripts/imaginary.py:12: undefined name 'size'
-buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
-"""
- log = step.addLog("stdio")
- log.addStdout(output)
- log.finish()
- step.createSummary(log)
- desc = step.descriptionDone
- self.failUnless("unused=2" in desc)
- self.failUnless("undefined=1" in desc)
- self.failUnless("redefs=3" in desc)
- self.failUnless("import*=1" in desc)
- self.failIf("misc=" in desc)
-
- self.failUnlessEqual(step.getProperty("pyflakes-unused"), 2)
- self.failUnlessEqual(step.getProperty("pyflakes-undefined"), 1)
- self.failUnlessEqual(step.getProperty("pyflakes-redefs"), 3)
- self.failUnlessEqual(step.getProperty("pyflakes-import*"), 1)
- self.failUnlessEqual(step.getProperty("pyflakes-misc"), 0)
- self.failUnlessEqual(step.getProperty("pyflakes-total"), 7)
-
- logs = {}
- for log in step.step_status.getLogs():
- logs[log.getName()] = log
-
- for name in ["unused", "undefined", "redefs", "import*"]:
- self.failUnless(name in logs)
- self.failIf("misc" in logs)
- lines = logs["unused"].readlines()
- self.failUnlessEqual(len(lines), 2)
- self.failUnlessEqual(lines[0], "buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused\n")
-
- cmd = buildstep.RemoteCommand(None, {})
- cmd.rc = 0
- results = step.evaluateCommand(cmd)
- self.failUnlessEqual(results, FAILURE) # because of the 'undefined'
-
- def testPyFlakes2(self):
- self.masterbase = "Python.testPyFlakes2"
- step = self.makeStep(python.PyFlakes)
- output = \
-"""pyflakes buildbot
-some more text here that should be ignored
-buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
-buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
-buildbot/clients/debug.py:9: 'gnome' imported but unused
-buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
-buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
-buildbot/scripts/imaginary.py:12: undefined name 'size'
-could not compile 'blah/blah.py':3:
-pretend there was an invalid line here
-buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
-"""
- log = step.addLog("stdio")
- log.addStdout(output)
- log.finish()
- step.createSummary(log)
- desc = step.descriptionDone
- self.failUnless("unused=2" in desc)
- self.failUnless("undefined=1" in desc)
- self.failUnless("redefs=3" in desc)
- self.failUnless("import*=1" in desc)
- self.failUnless("misc=2" in desc)
-
-
- def testPyFlakes3(self):
- self.masterbase = "Python.testPyFlakes3"
- step = self.makeStep(python.PyFlakes)
- output = \
-"""buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
-buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
-buildbot/clients/debug.py:9: 'gnome' imported but unused
-buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
-buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
-buildbot/scripts/imaginary.py:12: undefined name 'size'
-buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
-"""
- log = step.addLog("stdio")
- log.addStdout(output)
- log.finish()
- step.createSummary(log)
- desc = step.descriptionDone
- self.failUnless("unused=2" in desc)
- self.failUnless("undefined=1" in desc)
- self.failUnless("redefs=3" in desc)
- self.failUnless("import*=1" in desc)
- self.failIf("misc" in desc)
-
-
-class OrdinaryCompile(shell.Compile):
- warningPattern = "ordinary line"
-
-class Warnings(StepTester, unittest.TestCase):
- def testCompile1(self):
- self.masterbase = "Warnings.testCompile1"
- step = self.makeStep(shell.Compile)
- output = \
-"""Compile started
-normal line
-warning: oh noes!
-ordinary line
-error (but we aren't looking for errors now, are we)
-line 23: warning: we are now on line 23
-ending line
-"""
- log = step.addLog("stdio")
- log.addStdout(output)
- log.finish()
- step.createSummary(log)
- self.failUnlessEqual(step.getProperty("warnings-count"), 2)
- logs = {}
- for log in step.step_status.getLogs():
- logs[log.getName()] = log
- self.failUnless("warnings" in logs)
- lines = logs["warnings"].readlines()
- self.failUnlessEqual(len(lines), 2)
- self.failUnlessEqual(lines[0], "warning: oh noes!\n")
- self.failUnlessEqual(lines[1],
- "line 23: warning: we are now on line 23\n")
-
- cmd = buildstep.RemoteCommand(None, {})
- cmd.rc = 0
- results = step.evaluateCommand(cmd)
- self.failUnlessEqual(results, WARNINGS)
-
- def testCompile2(self):
- self.masterbase = "Warnings.testCompile2"
- step = self.makeStep(shell.Compile, warningPattern="ordinary line")
- output = \
-"""Compile started
-normal line
-warning: oh noes!
-ordinary line
-error (but we aren't looking for errors now, are we)
-line 23: warning: we are now on line 23
-ending line
-"""
- log = step.addLog("stdio")
- log.addStdout(output)
- log.finish()
- step.createSummary(log)
- self.failUnlessEqual(step.getProperty("warnings-count"), 1)
- logs = {}
- for log in step.step_status.getLogs():
- logs[log.getName()] = log
- self.failUnless("warnings" in logs)
- lines = logs["warnings"].readlines()
- self.failUnlessEqual(len(lines), 1)
- self.failUnlessEqual(lines[0], "ordinary line\n")
-
- cmd = buildstep.RemoteCommand(None, {})
- cmd.rc = 0
- results = step.evaluateCommand(cmd)
- self.failUnlessEqual(results, WARNINGS)
-
- def testCompile3(self):
- self.masterbase = "Warnings.testCompile3"
- step = self.makeStep(OrdinaryCompile)
- output = \
-"""Compile started
-normal line
-warning: oh noes!
-ordinary line
-error (but we aren't looking for errors now, are we)
-line 23: warning: we are now on line 23
-ending line
-"""
- step.setProperty("warnings-count", 10, "test")
- log = step.addLog("stdio")
- log.addStdout(output)
- log.finish()
- step.createSummary(log)
- self.failUnlessEqual(step.getProperty("warnings-count"), 11)
- logs = {}
- for log in step.step_status.getLogs():
- logs[log.getName()] = log
- self.failUnless("warnings" in logs)
- lines = logs["warnings"].readlines()
- self.failUnlessEqual(len(lines), 1)
- self.failUnlessEqual(lines[0], "ordinary line\n")
-
- cmd = buildstep.RemoteCommand(None, {})
- cmd.rc = 0
- results = step.evaluateCommand(cmd)
- self.failUnlessEqual(results, WARNINGS)
-
-
-class TreeSize(StepTester, unittest.TestCase):
- def testTreeSize(self):
- self.slavebase = "TreeSize.testTreeSize.slave"
- self.masterbase = "TreeSize.testTreeSize.master"
-
- sb = self.makeSlaveBuilder()
- step = self.makeStep(shell.TreeSize)
- d = self.runStep(step)
- def _check(results):
- self.failUnlessEqual(results, SUCCESS)
- kib = step.getProperty("tree-size-KiB")
- self.failUnless(isinstance(kib, int))
- self.failUnless(kib < 100) # should be empty, I get '4'
- s = step.step_status
- self.failUnlessEqual(" ".join(s.getText()),
- "treesize %d KiB" % kib)
- d.addCallback(_check)
- return d
-
-class FakeCommand:
- def __init__(self, rc):
- self.rc = rc
-
-class PerlModuleTest(StepTester, unittest.TestCase):
- def testAllTestsPassed(self):
- self.masterbase = "PMT.testAllTestsPassed"
- step = self.makeStep(shell.PerlModuleTest)
- output = \
-"""ok 1
-ok 2
-All tests successful
-Files=1, Tests=123, other stuff
-"""
- log = step.addLog("stdio")
- log.addStdout(output)
- log.finish()
- rc = step.evaluateCommand(FakeCommand(rc=241))
- self.failUnlessEqual(rc, SUCCESS)
- ss = step.step_status
- self.failUnlessEqual(ss.getStatistic('tests-failed'), 0)
- self.failUnlessEqual(ss.getStatistic('tests-total'), 123)
- self.failUnlessEqual(ss.getStatistic('tests-passed'), 123)
-
- def testFailures_OldTestHarness(self):
- self.masterbase = "PMT.testFailures_OldTestHarness"
- step = self.makeStep(shell.PerlModuleTest)
- output = \
-"""
-ok 1
-ok 2
-3/7 subtests failed
-"""
- log = step.addLog("stdio")
- log.addStdout(output)
- log.finish()
- rc = step.evaluateCommand(FakeCommand(rc = 123))
- self.failUnlessEqual(rc, FAILURE)
- ss = step.step_status
- self.failUnlessEqual(ss.getStatistic('tests-failed'), 3)
- self.failUnlessEqual(ss.getStatistic('tests-total'), 7)
- self.failUnlessEqual(ss.getStatistic('tests-passed'), 4)
-
- def testFailures_UnparseableStdio(self):
- self.masterbase = "PMT.testFailures_UnparseableStdio"
- step = self.makeStep(shell.PerlModuleTest)
- output = \
-"""
-just some random stuff, you know
-"""
- log = step.addLog("stdio")
- log.addStdout(output)
- log.finish()
- rc = step.evaluateCommand(FakeCommand(rc = 243))
- self.failUnlessEqual(rc, 243)
- ss = step.step_status
- self.failUnlessEqual(ss.getStatistic('tests-failed'), None)
- self.failUnlessEqual(ss.getStatistic('tests-total'), None)
- self.failUnlessEqual(ss.getStatistic('tests-passed'), None)
-
- def testFailures_NewTestHarness(self):
- self.masterbase = "PMT.testFailures_NewTestHarness"
- step = self.makeStep(shell.PerlModuleTest)
- output = \
-"""
-# Looks like you failed 15 tests of 18.
-tests/services.......................... Failed 265/30904 subtests
- (less 16 skipped subtests: 30623 okay)
-tests/simple_query_backend..............ok
-tests/simple_query_middleware...........ok
-tests/soap_globalcollect................ok
-tests/three_d_me........................ok
-tests/three_d_me_callback...............ok
-tests/transaction_create................ok
-tests/unique_txid.......................ok
-
-Test Summary Report
--------------------
-tests/000policies (Wstat: 5632 Tests: 9078 Failed: 22)
- Failed tests: 2409, 2896-2897, 2900-2901, 2940-2941, 2944-2945
- 2961-2962, 2965-2966, 2969-2970, 2997-2998
- 3262, 3281-3282, 3288-3289
- Non-zero exit status: 22
-tests/services (Wstat: 0 Tests: 30904 Failed: 265)
- Failed tests: 14, 16-21, 64-69, 71-96, 98, 30157, 30159
- 30310, 30316, 30439-30543, 30564, 30566-30577
- 30602, 30604-30607, 30609-30612, 30655
- 30657-30668, 30675, 30697-30716, 30718-30720
- 30722-30736, 30773-30774, 30776-30777, 30786
- 30791, 30795, 30797, 30801, 30822-30827
- 30830-30831, 30848-30855, 30858-30859, 30888-30899
- 30901, 30903-30904
-Files=68, Tests=264809, 1944 wallclock secs (17.59 usr 0.63 sys + 470.04 cusr 131.40 csys = 619.66 CPU)
-Result: FAIL
-"""
- log = step.addLog("stdio")
- log.addStdout(output)
- log.finish()
- rc = step.evaluateCommand(FakeCommand(rc=87))
- self.failUnlessEqual(rc, FAILURE)
- ss = step.step_status
- self.failUnlessEqual(ss.getStatistic('tests-failed'), 287)
- self.failUnlessEqual(ss.getStatistic('tests-total'), 264809)
- self.failUnlessEqual(ss.getStatistic('tests-passed'), 264522)
-
-class MasterShellCommand(StepTester, unittest.TestCase):
- def testMasterShellCommand(self):
- self.slavebase = "testMasterShellCommand.slave"
- self.masterbase = "testMasterShellCommand.master"
- sb = self.makeSlaveBuilder()
- step = self.makeStep(master.MasterShellCommand, command=['echo', 'hi'])
-
- # we can't invoke runStep until the reactor is started .. hence this
- # little dance
- d = defer.Deferred()
- def _dotest(_):
- return self.runStep(step)
- d.addCallback(_dotest)
-
- def _check(results):
- self.failUnlessEqual(results, SUCCESS)
- logtxt = step.getLog("stdio").getText()
- self.failUnlessEqual(logtxt.strip(), "hi")
- d.addCallback(_check)
- reactor.callLater(0, d.callback, None)
- return d
-
- def testMasterShellCommand_badexit(self):
- self.slavebase = "testMasterShellCommand_badexit.slave"
- self.masterbase = "testMasterShellCommand_badexit.master"
- sb = self.makeSlaveBuilder()
- step = self.makeStep(master.MasterShellCommand, command="exit 1")
-
- # we can't invoke runStep until the reactor is started .. hence this
- # little dance
- d = defer.Deferred()
- def _dotest(_):
- return self.runStep(step)
- d.addCallback(_dotest)
-
- def _check(results):
- self.failUnlessEqual(results, FAILURE)
- d.addCallback(_check)
- reactor.callLater(0, d.callback, None)
- return d
diff --git a/buildbot/buildbot/test/test_svnpoller.py b/buildbot/buildbot/test/test_svnpoller.py
deleted file mode 100644
index 452a514..0000000
--- a/buildbot/buildbot/test/test_svnpoller.py
+++ /dev/null
@@ -1,476 +0,0 @@
-# -*- test-case-name: buildbot.test.test_svnpoller -*-
-
-import time
-from twisted.internet import defer
-from twisted.trial import unittest
-from buildbot.changes.svnpoller import SVNPoller
-
-# this is the output of "svn info --xml
-# svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
-prefix_output = """\
-<?xml version="1.0"?>
-<info>
-<entry
- kind="dir"
- path="trunk"
- revision="18354">
-<url>svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk</url>
-<repository>
-<root>svn+ssh://svn.twistedmatrix.com/svn/Twisted</root>
-<uuid>bbbe8e31-12d6-0310-92fd-ac37d47ddeeb</uuid>
-</repository>
-<commit
- revision="18352">
-<author>jml</author>
-<date>2006-10-01T02:37:34.063255Z</date>
-</commit>
-</entry>
-</info>
-"""
-
-# and this is "svn info --xml svn://svn.twistedmatrix.com/svn/Twisted". I
-# think this is kind of a degenerate case.. it might even be a form of error.
-prefix_output_2 = """\
-<?xml version="1.0"?>
-<info>
-</info>
-"""
-
-# this is the svn info output for a local repository, svn info --xml
-# file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository
-prefix_output_3 = """\
-<?xml version="1.0"?>
-<info>
-<entry
- kind="dir"
- path="SVN-Repository"
- revision="3">
-<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</url>
-<repository>
-<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
-<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
-</repository>
-<commit
- revision="3">
-<author>warner</author>
-<date>2006-10-01T07:37:04.182499Z</date>
-</commit>
-</entry>
-</info>
-"""
-
-# % svn info --xml file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk
-
-prefix_output_4 = """\
-<?xml version="1.0"?>
-<info>
-<entry
- kind="dir"
- path="trunk"
- revision="3">
-<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk</url>
-<repository>
-<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
-<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
-</repository>
-<commit
- revision="1">
-<author>warner</author>
-<date>2006-10-01T07:37:02.286440Z</date>
-</commit>
-</entry>
-</info>
-"""
-
-
-
-class ComputePrefix(unittest.TestCase):
- def test1(self):
- base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
- s = SVNPoller(base + "/")
- self.failUnlessEqual(s.svnurl, base) # certify slash-stripping
- prefix = s.determine_prefix(prefix_output)
- self.failUnlessEqual(prefix, "trunk")
- self.failUnlessEqual(s._prefix, prefix)
-
- def test2(self):
- base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted"
- s = SVNPoller(base)
- self.failUnlessEqual(s.svnurl, base)
- prefix = s.determine_prefix(prefix_output_2)
- self.failUnlessEqual(prefix, "")
-
- def test3(self):
- base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository"
- s = SVNPoller(base)
- self.failUnlessEqual(s.svnurl, base)
- prefix = s.determine_prefix(prefix_output_3)
- self.failUnlessEqual(prefix, "")
-
- def test4(self):
- base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk"
- s = SVNPoller(base)
- self.failUnlessEqual(s.svnurl, base)
- prefix = s.determine_prefix(prefix_output_4)
- self.failUnlessEqual(prefix, "sample/trunk")
-
-# output from svn log on .../SVN-Repository/sample
-# (so it includes trunk and branches)
-sample_base = "file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample"
-sample_logentries = [None] * 6
-
-sample_logentries[5] = """\
-<logentry
- revision="6">
-<author>warner</author>
-<date>2006-10-01T19:35:16.165664Z</date>
-<paths>
-<path
- action="D">/sample/branch/version.c</path>
-</paths>
-<msg>revised_to_2</msg>
-</logentry>
-"""
-
-sample_logentries[4] = """\
-<logentry
- revision="5">
-<author>warner</author>
-<date>2006-10-01T19:35:16.165664Z</date>
-<paths>
-<path
- action="D">/sample/branch</path>
-</paths>
-<msg>revised_to_2</msg>
-</logentry>
-"""
-
-sample_logentries[3] = """\
-<logentry
- revision="4">
-<author>warner</author>
-<date>2006-10-01T19:35:16.165664Z</date>
-<paths>
-<path
- action="M">/sample/trunk/version.c</path>
-</paths>
-<msg>revised_to_2</msg>
-</logentry>
-"""
-
-sample_logentries[2] = """\
-<logentry
- revision="3">
-<author>warner</author>
-<date>2006-10-01T19:35:10.215692Z</date>
-<paths>
-<path
- action="M">/sample/branch/main.c</path>
-</paths>
-<msg>commit_on_branch</msg>
-</logentry>
-"""
-
-sample_logentries[1] = """\
-<logentry
- revision="2">
-<author>warner</author>
-<date>2006-10-01T19:35:09.154973Z</date>
-<paths>
-<path
- copyfrom-path="/sample/trunk"
- copyfrom-rev="1"
- action="A">/sample/branch</path>
-</paths>
-<msg>make_branch</msg>
-</logentry>
-"""
-
-sample_logentries[0] = """\
-<logentry
- revision="1">
-<author>warner</author>
-<date>2006-10-01T19:35:08.642045Z</date>
-<paths>
-<path
- action="A">/sample</path>
-<path
- action="A">/sample/trunk</path>
-<path
- action="A">/sample/trunk/subdir/subdir.c</path>
-<path
- action="A">/sample/trunk/main.c</path>
-<path
- action="A">/sample/trunk/version.c</path>
-<path
- action="A">/sample/trunk/subdir</path>
-</paths>
-<msg>sample_project_files</msg>
-</logentry>
-"""
-
-sample_info_output = """\
-<?xml version="1.0"?>
-<info>
-<entry
- kind="dir"
- path="sample"
- revision="4">
-<url>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample</url>
-<repository>
-<root>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository</root>
-<uuid>4f94adfc-c41e-0410-92d5-fbf86b7c7689</uuid>
-</repository>
-<commit
- revision="4">
-<author>warner</author>
-<date>2006-10-01T19:35:16.165664Z</date>
-</commit>
-</entry>
-</info>
-"""
-
-
-changes_output_template = """\
-<?xml version="1.0"?>
-<log>
-%s</log>
-"""
-
-def make_changes_output(maxrevision):
- # return what 'svn log' would have just after the given revision was
- # committed
- logs = sample_logentries[0:maxrevision]
- assert len(logs) == maxrevision
- logs.reverse()
- output = changes_output_template % ("".join(logs))
- return output
-
-def split_file(path):
- pieces = path.split("/")
- if pieces[0] == "branch":
- return "branch", "/".join(pieces[1:])
- if pieces[0] == "trunk":
- return None, "/".join(pieces[1:])
- raise RuntimeError("there shouldn't be any files like %s" % path)
-
-class MySVNPoller(SVNPoller):
- def __init__(self, *args, **kwargs):
- SVNPoller.__init__(self, *args, **kwargs)
- self.pending_commands = []
- self.finished_changes = []
-
- def getProcessOutput(self, args):
- d = defer.Deferred()
- self.pending_commands.append((args, d))
- return d
-
- def submit_changes(self, changes):
- self.finished_changes.extend(changes)
-
-class ComputeChanges(unittest.TestCase):
- def test1(self):
- base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
- s = SVNPoller(base)
- s._prefix = "sample"
- output = make_changes_output(4)
- doc = s.parse_logs(output)
-
- newlast, logentries = s._filter_new_logentries(doc, 4)
- self.failUnlessEqual(newlast, 4)
- self.failUnlessEqual(len(logentries), 0)
-
- newlast, logentries = s._filter_new_logentries(doc, 3)
- self.failUnlessEqual(newlast, 4)
- self.failUnlessEqual(len(logentries), 1)
-
- newlast, logentries = s._filter_new_logentries(doc, 1)
- self.failUnlessEqual(newlast, 4)
- self.failUnlessEqual(len(logentries), 3)
-
- newlast, logentries = s._filter_new_logentries(doc, None)
- self.failUnlessEqual(newlast, 4)
- self.failUnlessEqual(len(logentries), 0)
-
- def testChanges(self):
- base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
- s = SVNPoller(base, split_file=split_file)
- s._prefix = "sample"
- doc = s.parse_logs(make_changes_output(3))
- newlast, logentries = s._filter_new_logentries(doc, 1)
- # so we see revisions 2 and 3 as being new
- self.failUnlessEqual(newlast, 3)
- changes = s.create_changes(logentries)
- self.failUnlessEqual(len(changes), 2)
- self.failUnlessEqual(changes[0].branch, "branch")
- self.failUnlessEqual(changes[0].revision, '2')
- self.failUnlessEqual(changes[1].branch, "branch")
- self.failUnlessEqual(changes[1].files, ["main.c"])
- self.failUnlessEqual(changes[1].revision, '3')
-
- # and now pull in r4
- doc = s.parse_logs(make_changes_output(4))
- newlast, logentries = s._filter_new_logentries(doc, newlast)
- self.failUnlessEqual(newlast, 4)
- # so we see revision 4 as being new
- changes = s.create_changes(logentries)
- self.failUnlessEqual(len(changes), 1)
- self.failUnlessEqual(changes[0].branch, None)
- self.failUnlessEqual(changes[0].revision, '4')
- self.failUnlessEqual(changes[0].files, ["version.c"])
-
- # and now pull in r5 (should *not* create a change as it's a
- # branch deletion
- doc = s.parse_logs(make_changes_output(5))
- newlast, logentries = s._filter_new_logentries(doc, newlast)
- self.failUnlessEqual(newlast, 5)
- # so we see revision 5 as being new
- changes = s.create_changes(logentries)
- self.failUnlessEqual(len(changes), 0)
-
- # and now pull in r6 (should create a change as it's not
- # deleting an entire branch
- doc = s.parse_logs(make_changes_output(6))
- newlast, logentries = s._filter_new_logentries(doc, newlast)
- self.failUnlessEqual(newlast, 6)
- # so we see revision 6 as being new
- changes = s.create_changes(logentries)
- self.failUnlessEqual(len(changes), 1)
- self.failUnlessEqual(changes[0].branch, 'branch')
- self.failUnlessEqual(changes[0].revision, '6')
- self.failUnlessEqual(changes[0].files, ["version.c"])
-
- def testFirstTime(self):
- base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
- s = SVNPoller(base, split_file=split_file)
- s._prefix = "sample"
- doc = s.parse_logs(make_changes_output(4))
- logentries = s.get_new_logentries(doc)
- # SVNPoller ignores all changes that happened before it was started
- self.failUnlessEqual(len(logentries), 0)
- self.failUnlessEqual(s.last_change, 4)
-
-class Misc(unittest.TestCase):
- def testAlreadyWorking(self):
- base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
- s = MySVNPoller(base)
- d = s.checksvn()
- # the SVNPoller is now waiting for its getProcessOutput to finish
- self.failUnlessEqual(s.overrun_counter, 0)
- d2 = s.checksvn()
- self.failUnlessEqual(s.overrun_counter, 1)
- self.failUnlessEqual(len(s.pending_commands), 1)
-
- def testGetRoot(self):
- base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
- s = MySVNPoller(base)
- d = s.checksvn()
- # the SVNPoller is now waiting for its getProcessOutput to finish
- self.failUnlessEqual(len(s.pending_commands), 1)
- self.failUnlessEqual(s.pending_commands[0][0],
- ["info", "--xml", "--non-interactive", base])
-
-def makeTime(timestring):
- datefmt = '%Y/%m/%d %H:%M:%S'
- when = time.mktime(time.strptime(timestring, datefmt))
- return when
-
-
-class Everything(unittest.TestCase):
- def test1(self):
- s = MySVNPoller(sample_base, split_file=split_file)
- d = s.checksvn()
- # the SVNPoller is now waiting for its getProcessOutput to finish
- self.failUnlessEqual(len(s.pending_commands), 1)
- self.failUnlessEqual(s.pending_commands[0][0],
- ["info", "--xml", "--non-interactive",
- sample_base])
- d = s.pending_commands[0][1]
- s.pending_commands.pop(0)
- d.callback(sample_info_output)
- # now it should be waiting for the 'svn log' command
- self.failUnlessEqual(len(s.pending_commands), 1)
- self.failUnlessEqual(s.pending_commands[0][0],
- ["log", "--xml", "--verbose", "--non-interactive",
- "--limit=100", sample_base])
- d = s.pending_commands[0][1]
- s.pending_commands.pop(0)
- d.callback(make_changes_output(1))
- # the command ignores the first batch of changes
- self.failUnlessEqual(len(s.finished_changes), 0)
- self.failUnlessEqual(s.last_change, 1)
-
- # now fire it again, nothing changing
- d = s.checksvn()
- self.failUnlessEqual(s.pending_commands[0][0],
- ["log", "--xml", "--verbose", "--non-interactive",
- "--limit=100", sample_base])
- d = s.pending_commands[0][1]
- s.pending_commands.pop(0)
- d.callback(make_changes_output(1))
- # nothing has changed
- self.failUnlessEqual(len(s.finished_changes), 0)
- self.failUnlessEqual(s.last_change, 1)
-
- # and again, with r2 this time
- d = s.checksvn()
- self.failUnlessEqual(s.pending_commands[0][0],
- ["log", "--xml", "--verbose", "--non-interactive",
- "--limit=100", sample_base])
- d = s.pending_commands[0][1]
- s.pending_commands.pop(0)
- d.callback(make_changes_output(2))
- # r2 should appear
- self.failUnlessEqual(len(s.finished_changes), 1)
- self.failUnlessEqual(s.last_change, 2)
-
- c = s.finished_changes[0]
- self.failUnlessEqual(c.branch, "branch")
- self.failUnlessEqual(c.revision, '2')
- self.failUnlessEqual(c.files, [''])
- # TODO: this is what creating the branch looks like: a Change with a
- # zero-length file. We should decide if we want filenames like this
- # in the Change (and make sure nobody else gets confused by it) or if
- # we want to strip them out.
- self.failUnlessEqual(c.comments, "make_branch")
-
- # and again at r2, so nothing should change
- d = s.checksvn()
- self.failUnlessEqual(s.pending_commands[0][0],
- ["log", "--xml", "--verbose", "--non-interactive",
- "--limit=100", sample_base])
- d = s.pending_commands[0][1]
- s.pending_commands.pop(0)
- d.callback(make_changes_output(2))
- # nothing has changed
- self.failUnlessEqual(len(s.finished_changes), 1)
- self.failUnlessEqual(s.last_change, 2)
-
- # and again with both r3 and r4 appearing together
- d = s.checksvn()
- self.failUnlessEqual(s.pending_commands[0][0],
- ["log", "--xml", "--verbose", "--non-interactive",
- "--limit=100", sample_base])
- d = s.pending_commands[0][1]
- s.pending_commands.pop(0)
- d.callback(make_changes_output(4))
- self.failUnlessEqual(len(s.finished_changes), 3)
- self.failUnlessEqual(s.last_change, 4)
-
- c3 = s.finished_changes[1]
- self.failUnlessEqual(c3.branch, "branch")
- self.failUnlessEqual(c3.revision, '3')
- self.failUnlessEqual(c3.files, ["main.c"])
- self.failUnlessEqual(c3.comments, "commit_on_branch")
-
- c4 = s.finished_changes[2]
- self.failUnlessEqual(c4.branch, None)
- self.failUnlessEqual(c4.revision, '4')
- self.failUnlessEqual(c4.files, ["version.c"])
- self.failUnlessEqual(c4.comments, "revised_to_2")
- self.failUnless(abs(c4.when - time.time()) < 60)
-
-
-# TODO:
-# get coverage of split_file returning None
-# point at a live SVN server for a little while
diff --git a/buildbot/buildbot/test/test_transfer.py b/buildbot/buildbot/test/test_transfer.py
deleted file mode 100644
index c85c630..0000000
--- a/buildbot/buildbot/test/test_transfer.py
+++ /dev/null
@@ -1,721 +0,0 @@
-# -*- test-case-name: buildbot.test.test_transfer -*-
-
-import os
-from stat import ST_MODE
-from twisted.trial import unittest
-from buildbot.process.buildstep import WithProperties
-from buildbot.steps.transfer import FileUpload, FileDownload, DirectoryUpload
-from buildbot.test.runutils import StepTester
-from buildbot.status.builder import SUCCESS, FAILURE
-
-# these steps pass a pb.Referenceable inside their arguments, so we have to
-# catch and wrap them. If the LocalAsRemote wrapper were a proper membrane,
-# we wouldn't have to do this.
-
-class UploadFile(StepTester, unittest.TestCase):
-
- def filterArgs(self, args):
- if "writer" in args:
- args["writer"] = self.wrap(args["writer"])
- return args
-
- def testSuccess(self):
- self.slavebase = "UploadFile.testSuccess.slave"
- self.masterbase = "UploadFile.testSuccess.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- # the buildmaster normally runs chdir'ed into masterbase, so uploaded
- # files will appear there. Under trial, we're chdir'ed into
- # _trial_temp instead, so use a different masterdest= to keep the
- # uploaded file in a test-local directory
- masterdest = os.path.join(self.masterbase, "dest.text")
- step = self.makeStep(FileUpload,
- slavesrc="source.txt",
- masterdest=masterdest)
- slavesrc = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "source.txt")
- contents = "this is the source file\n" * 1000
- open(slavesrc, "w").write(contents)
- f = open(masterdest, "w")
- f.write("overwrite me\n")
- f.close()
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- #l = step_status.getLogs()
- #if l:
- # logtext = l[0].getText()
- # print logtext
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(masterdest))
- masterdest_contents = open(masterdest, "r").read()
- self.failUnlessEqual(masterdest_contents, contents)
- d.addCallback(_checkUpload)
- return d
-
- def testMaxsize(self):
- self.slavebase = "UploadFile.testMaxsize.slave"
- self.masterbase = "UploadFile.testMaxsize.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- masterdest = os.path.join(self.masterbase, "dest2.text")
- step = self.makeStep(FileUpload,
- slavesrc="source.txt",
- masterdest=masterdest,
- maxsize=12345)
- slavesrc = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "source.txt")
- contents = "this is the source file\n" * 1000
- open(slavesrc, "w").write(contents)
- f = open(masterdest, "w")
- f.write("overwrite me\n")
- f.close()
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- #l = step_status.getLogs()
- #if l:
- # logtext = l[0].getText()
- # print logtext
- self.failUnlessEqual(results, FAILURE)
- self.failUnless(os.path.exists(masterdest))
- masterdest_contents = open(masterdest, "r").read()
- self.failUnlessEqual(len(masterdest_contents), 12345)
- self.failUnlessEqual(masterdest_contents, contents[:12345])
- d.addCallback(_checkUpload)
- return d
-
- def testMode(self):
- self.slavebase = "UploadFile.testMode.slave"
- self.masterbase = "UploadFile.testMode.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- masterdest = os.path.join(self.masterbase, "dest3.text")
- step = self.makeStep(FileUpload,
- slavesrc="source.txt",
- masterdest=masterdest,
- mode=0755)
- slavesrc = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "source.txt")
- contents = "this is the source file\n"
- open(slavesrc, "w").write(contents)
- f = open(masterdest, "w")
- f.write("overwrite me\n")
- f.close()
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- #l = step_status.getLogs()
- #if l:
- # logtext = l[0].getText()
- # print logtext
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(masterdest))
- masterdest_contents = open(masterdest, "r").read()
- self.failUnlessEqual(masterdest_contents, contents)
- # and with 0777 to ignore sticky bits
- dest_mode = os.stat(masterdest)[ST_MODE] & 0777
- self.failUnlessEqual(dest_mode, 0755,
- "target mode was %o, we wanted %o" %
- (dest_mode, 0755))
- d.addCallback(_checkUpload)
- return d
-
- def testMissingFile(self):
- self.slavebase = "UploadFile.testMissingFile.slave"
- self.masterbase = "UploadFile.testMissingFile.master"
- sb = self.makeSlaveBuilder()
- step = self.makeStep(FileUpload,
- slavesrc="MISSING.txt",
- masterdest="dest.txt")
- masterdest = os.path.join(self.masterbase, "dest4.txt")
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- self.failUnlessEqual(results, FAILURE)
- self.failIf(os.path.exists(masterdest))
- l = step_status.getLogs()
- logtext = l[0].getText().strip()
- self.failUnless(logtext.startswith("Cannot open file"))
- self.failUnless(logtext.endswith("for upload"))
- d.addCallback(_checkUpload)
- return d
-
- def testLotsOfBlocks(self):
- self.slavebase = "UploadFile.testLotsOfBlocks.slave"
- self.masterbase = "UploadFile.testLotsOfBlocks.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- # the buildmaster normally runs chdir'ed into masterbase, so uploaded
- # files will appear there. Under trial, we're chdir'ed into
- # _trial_temp instead, so use a different masterdest= to keep the
- # uploaded file in a test-local directory
- masterdest = os.path.join(self.masterbase, "dest.text")
- step = self.makeStep(FileUpload,
- slavesrc="source.txt",
- masterdest=masterdest,
- blocksize=15)
- slavesrc = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "source.txt")
- contents = "".join(["this is the source file #%d\n" % i
- for i in range(1000)])
- open(slavesrc, "w").write(contents)
- f = open(masterdest, "w")
- f.write("overwrite me\n")
- f.close()
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- #l = step_status.getLogs()
- #if l:
- # logtext = l[0].getText()
- # print logtext
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(masterdest))
- masterdest_contents = open(masterdest, "r").read()
- self.failUnlessEqual(masterdest_contents, contents)
- d.addCallback(_checkUpload)
- return d
-
- def testWorkdir(self):
- self.slavebase = "Upload.testWorkdir.slave"
- self.masterbase = "Upload.testWorkdir.master"
- sb = self.makeSlaveBuilder()
-
- self.workdir = "mybuild" # override default in StepTest
- full_workdir = os.path.join(
- self.slavebase, self.slavebuilderbase, self.workdir)
- os.mkdir(full_workdir)
-
- masterdest = os.path.join(self.masterbase, "dest.txt")
-
- step = self.makeStep(FileUpload,
- slavesrc="source.txt",
- masterdest=masterdest)
-
- # Testing that the FileUpload's workdir is set when makeStep()
- # calls setDefaultWorkdir() is actually enough; carrying on and
- # making sure the upload actually succeeds is pure gravy.
- self.failUnlessEqual(self.workdir, step.workdir)
-
- slavesrc = os.path.join(full_workdir, "source.txt")
- open(slavesrc, "w").write("upload me\n")
-
- def _checkUpload(results):
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.isfile(masterdest))
-
- d = self.runStep(step)
- d.addCallback(_checkUpload)
- return d
-
- def testWithProperties(self):
- # test that workdir can be a WithProperties object
- self.slavebase = "Upload.testWithProperties.slave"
- self.masterbase = "Upload.testWithProperties.master"
- sb = self.makeSlaveBuilder()
-
- step = self.makeStep(FileUpload,
- slavesrc="src.txt",
- masterdest="dest.txt")
- step.workdir = WithProperties("build.%s", "buildnumber")
-
- self.failUnlessEqual(step._getWorkdir(), "build.1")
-
-class DownloadFile(StepTester, unittest.TestCase):
-
- def filterArgs(self, args):
- if "reader" in args:
- args["reader"] = self.wrap(args["reader"])
- return args
-
- def testSuccess(self):
- self.slavebase = "DownloadFile.testSuccess.slave"
- self.masterbase = "DownloadFile.testSuccess.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- mastersrc = os.path.join(self.masterbase, "source.text")
- slavedest = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "dest.txt")
- step = self.makeStep(FileDownload,
- mastersrc=mastersrc,
- slavedest="dest.txt")
- contents = "this is the source file\n" * 1000 # 24kb, so two blocks
- open(mastersrc, "w").write(contents)
- f = open(slavedest, "w")
- f.write("overwrite me\n")
- f.close()
-
- d = self.runStep(step)
- def _checkDownload(results):
- step_status = step.step_status
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(slavedest))
- slavedest_contents = open(slavedest, "r").read()
- self.failUnlessEqual(slavedest_contents, contents)
- d.addCallback(_checkDownload)
- return d
-
- def testMaxsize(self):
- self.slavebase = "DownloadFile.testMaxsize.slave"
- self.masterbase = "DownloadFile.testMaxsize.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- mastersrc = os.path.join(self.masterbase, "source.text")
- slavedest = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "dest.txt")
- step = self.makeStep(FileDownload,
- mastersrc=mastersrc,
- slavedest="dest.txt",
- maxsize=12345)
- contents = "this is the source file\n" * 1000 # 24kb, so two blocks
- open(mastersrc, "w").write(contents)
- f = open(slavedest, "w")
- f.write("overwrite me\n")
- f.close()
-
- d = self.runStep(step)
- def _checkDownload(results):
- step_status = step.step_status
- # the file should be truncated, and the step a FAILURE
- self.failUnlessEqual(results, FAILURE)
- self.failUnless(os.path.exists(slavedest))
- slavedest_contents = open(slavedest, "r").read()
- self.failUnlessEqual(len(slavedest_contents), 12345)
- self.failUnlessEqual(slavedest_contents, contents[:12345])
- d.addCallback(_checkDownload)
- return d
-
- def testMode(self):
- self.slavebase = "DownloadFile.testMode.slave"
- self.masterbase = "DownloadFile.testMode.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- mastersrc = os.path.join(self.masterbase, "source.text")
- slavedest = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "dest.txt")
- step = self.makeStep(FileDownload,
- mastersrc=mastersrc,
- slavedest="dest.txt",
- mode=0755)
- contents = "this is the source file\n"
- open(mastersrc, "w").write(contents)
- f = open(slavedest, "w")
- f.write("overwrite me\n")
- f.close()
-
- d = self.runStep(step)
- def _checkDownload(results):
- step_status = step.step_status
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(slavedest))
- slavedest_contents = open(slavedest, "r").read()
- self.failUnlessEqual(slavedest_contents, contents)
- # and with 0777 to ignore sticky bits
- dest_mode = os.stat(slavedest)[ST_MODE] & 0777
- self.failUnlessEqual(dest_mode, 0755,
- "target mode was %o, we wanted %o" %
- (dest_mode, 0755))
- d.addCallback(_checkDownload)
- return d
-
- def testMissingFile(self):
- self.slavebase = "DownloadFile.testMissingFile.slave"
- self.masterbase = "DownloadFile.testMissingFile.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- mastersrc = os.path.join(self.masterbase, "MISSING.text")
- slavedest = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "dest.txt")
- step = self.makeStep(FileDownload,
- mastersrc=mastersrc,
- slavedest="dest.txt")
-
- d = self.runStep(step)
- def _checkDownload(results):
- step_status = step.step_status
- self.failUnlessEqual(results, FAILURE)
- self.failIf(os.path.exists(slavedest))
- l = step_status.getLogs()
- logtext = l[0].getText().strip()
- self.failUnless(logtext.endswith(" not available at master"))
- d.addCallbacks(_checkDownload)
-
- return d
-
- def testLotsOfBlocks(self):
- self.slavebase = "DownloadFile.testLotsOfBlocks.slave"
- self.masterbase = "DownloadFile.testLotsOfBlocks.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- mastersrc = os.path.join(self.masterbase, "source.text")
- slavedest = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "dest.txt")
- step = self.makeStep(FileDownload,
- mastersrc=mastersrc,
- slavedest="dest.txt",
- blocksize=15)
- contents = "".join(["this is the source file #%d\n" % i
- for i in range(1000)])
- open(mastersrc, "w").write(contents)
- f = open(slavedest, "w")
- f.write("overwrite me\n")
- f.close()
-
- d = self.runStep(step)
- def _checkDownload(results):
- step_status = step.step_status
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(slavedest))
- slavedest_contents = open(slavedest, "r").read()
- self.failUnlessEqual(slavedest_contents, contents)
- d.addCallback(_checkDownload)
- return d
-
- def testWorkdir(self):
- self.slavebase = "Download.testWorkdir.slave"
- self.masterbase = "Download.testWorkdir.master"
- sb = self.makeSlaveBuilder()
-
- # As in Upload.testWorkdir(), it's enough to test that makeStep()'s
- # call of setDefaultWorkdir() actually sets step.workdir.
- self.workdir = "mybuild"
- step = self.makeStep(FileDownload,
- mastersrc="foo",
- slavedest="foo")
- self.failUnlessEqual(step.workdir, self.workdir)
-
- def testWithProperties(self):
- # test that workdir can be a WithProperties object
- self.slavebase = "Download.testWithProperties.slave"
- self.masterbase = "Download.testWithProperties.master"
- sb = self.makeSlaveBuilder()
-
- step = self.makeStep(FileDownload,
- mastersrc="src.txt",
- slavedest="dest.txt")
- step.workdir = WithProperties("build.%s", "buildnumber")
-
- self.failUnlessEqual(step._getWorkdir(), "build.1")
-
-
-
-class UploadDirectory(StepTester, unittest.TestCase):
-
- def filterArgs(self, args):
- if "writer" in args:
- args["writer"] = self.wrap(args["writer"])
- return args
-
- def testSuccess(self):
- self.slavebase = "UploadDirectory.testSuccess.slave"
- self.masterbase = "UploadDirectory.testSuccess.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- # the buildmaster normally runs chdir'ed into masterbase, so uploaded
- # files will appear there. Under trial, we're chdir'ed into
- # _trial_temp instead, so use a different masterdest= to keep the
- # uploaded file in a test-local directory
- masterdest = os.path.join(self.masterbase, "dest_dir")
- step = self.makeStep(DirectoryUpload,
- slavesrc="source_dir",
- masterdest=masterdest)
- slavesrc = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "source_dir")
- dircount = 5
- content = []
- content.append("this is one source file\n" * 1000)
- content.append("this is a second source file\n" * 978)
- content.append("this is a third source file\n" * 473)
- os.mkdir(slavesrc)
- for i in range(dircount):
- os.mkdir(os.path.join(slavesrc, "d%i" % (i)))
- for j in range(dircount):
- curdir = os.path.join("d%i" % (i), "e%i" % (j))
- os.mkdir(os.path.join(slavesrc, curdir))
- for h in range(3):
- open(os.path.join(slavesrc, curdir, "file%i" % (h)), "w").write(content[h])
- for j in range(dircount):
- #empty dirs, must be uploaded too
- curdir = os.path.join("d%i" % (i), "f%i" % (j))
- os.mkdir(os.path.join(slavesrc, curdir))
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- #l = step_status.getLogs()
- #if l:
- # logtext = l[0].getText()
- # print logtext
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(masterdest))
- for i in range(dircount):
- for j in range(dircount):
- curdir = os.path.join("d%i" % (i), "e%i" % (j))
- self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
- for h in range(3):
- masterdest_contents = open(os.path.join(masterdest, curdir, "file%i" % (h)), "r").read()
- self.failUnlessEqual(masterdest_contents, content[h])
- for j in range(dircount):
- curdir = os.path.join("d%i" % (i), "f%i" % (j))
- self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
- d.addCallback(_checkUpload)
- return d
-
- def testOneEmptyDir(self):
- self.slavebase = "UploadDirectory.testOneEmptyDir.slave"
- self.masterbase = "UploadDirectory.testOneEmptyDir.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- # the buildmaster normally runs chdir'ed into masterbase, so uploaded
- # files will appear there. Under trial, we're chdir'ed into
- # _trial_temp instead, so use a different masterdest= to keep the
- # uploaded file in a test-local directory
- masterdest = os.path.join(self.masterbase, "dest_dir")
- step = self.makeStep(DirectoryUpload,
- slavesrc="source_dir",
- masterdest=masterdest)
- slavesrc = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "source_dir")
- os.mkdir(slavesrc)
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- #l = step_status.getLogs()
- #if l:
- # logtext = l[0].getText()
- # print logtext
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(masterdest))
- d.addCallback(_checkUpload)
- return d
-
- def testManyEmptyDirs(self):
- self.slavebase = "UploadDirectory.testManyEmptyDirs.slave"
- self.masterbase = "UploadDirectory.testManyEmptyDirs.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- # the buildmaster normally runs chdir'ed into masterbase, so uploaded
- # files will appear there. Under trial, we're chdir'ed into
- # _trial_temp instead, so use a different masterdest= to keep the
- # uploaded file in a test-local directory
- masterdest = os.path.join(self.masterbase, "dest_dir")
- step = self.makeStep(DirectoryUpload,
- slavesrc="source_dir",
- masterdest=masterdest)
- slavesrc = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "source_dir")
- dircount = 25
- os.mkdir(slavesrc)
- for i in range(dircount):
- os.mkdir(os.path.join(slavesrc, "d%i" % (i)))
- for j in range(dircount):
- curdir = os.path.join("d%i" % (i), "e%i" % (j))
- os.mkdir(os.path.join(slavesrc, curdir))
- curdir = os.path.join("d%i" % (i), "f%i" % (j))
- os.mkdir(os.path.join(slavesrc, curdir))
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- #l = step_status.getLogs()
- #if l:
- # logtext = l[0].getText()
- # print logtext
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(masterdest))
- for i in range(dircount):
- for j in range(dircount):
- curdir = os.path.join("d%i" % (i), "e%i" % (j))
- self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
- curdir = os.path.join("d%i" % (i), "f%i" % (j))
- self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
- d.addCallback(_checkUpload)
- return d
-
- def testOneDirOneFile(self):
- self.slavebase = "UploadDirectory.testOneDirOneFile.slave"
- self.masterbase = "UploadDirectory.testOneDirOneFile.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- # the buildmaster normally runs chdir'ed into masterbase, so uploaded
- # files will appear there. Under trial, we're chdir'ed into
- # _trial_temp instead, so use a different masterdest= to keep the
- # uploaded file in a test-local directory
- masterdest = os.path.join(self.masterbase, "dest_dir")
- step = self.makeStep(DirectoryUpload,
- slavesrc="source_dir",
- masterdest=masterdest)
- slavesrc = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "source_dir")
- os.mkdir(slavesrc)
- content = "this is one source file\n" * 1000
- open(os.path.join(slavesrc, "srcfile"), "w").write(content)
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- #l = step_status.getLogs()
- #if l:
- # logtext = l[0].getText()
- # print logtext
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(masterdest))
- masterdest_contents = open(os.path.join(masterdest, "srcfile"), "r").read()
- self.failUnlessEqual(masterdest_contents, content)
- d.addCallback(_checkUpload)
- return d
-
- def testOneDirManyFiles(self):
- self.slavebase = "UploadDirectory.testOneDirManyFile.slave"
- self.masterbase = "UploadDirectory.testOneDirManyFile.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- # the buildmaster normally runs chdir'ed into masterbase, so uploaded
- # files will appear there. Under trial, we're chdir'ed into
- # _trial_temp instead, so use a different masterdest= to keep the
- # uploaded file in a test-local directory
- masterdest = os.path.join(self.masterbase, "dest_dir")
- step = self.makeStep(DirectoryUpload,
- slavesrc="source_dir",
- masterdest=masterdest)
- slavesrc = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "source_dir")
- filecount = 20
- os.mkdir(slavesrc)
- content = []
- content.append("this is one source file\n" * 1000)
- content.append("this is a second source file\n" * 978)
- content.append("this is a third source file\n" * 473)
- for i in range(3):
- for j in range(filecount):
- open(os.path.join(slavesrc, "srcfile%i_%i" % (i, j)), "w").write(content[i])
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- #l = step_status.getLogs()
- #if l:
- # logtext = l[0].getText()
- # print logtext
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(masterdest))
- for i in range(3):
- for j in range(filecount):
- masterdest_contents = open(os.path.join(masterdest, "srcfile%i_%i" % (i, j)), "r").read()
- self.failUnlessEqual(masterdest_contents, content[i])
- d.addCallback(_checkUpload)
- return d
-
- def testManyDirsManyFiles(self):
- self.slavebase = "UploadDirectory.testManyDirsManyFile.slave"
- self.masterbase = "UploadDirectory.testManyDirsManyFile.master"
- sb = self.makeSlaveBuilder()
- os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
- "build"))
- # the buildmaster normally runs chdir'ed into masterbase, so uploaded
- # files will appear there. Under trial, we're chdir'ed into
- # _trial_temp instead, so use a different masterdest= to keep the
- # uploaded file in a test-local directory
- masterdest = os.path.join(self.masterbase, "dest_dir")
- step = self.makeStep(DirectoryUpload,
- slavesrc="source_dir",
- masterdest=masterdest)
- slavesrc = os.path.join(self.slavebase,
- self.slavebuilderbase,
- "build",
- "source_dir")
- dircount = 10
- os.mkdir(slavesrc)
- for i in range(dircount):
- os.mkdir(os.path.join(slavesrc, "d%i" % (i)))
- for j in range(dircount):
- curdir = os.path.join("d%i" % (i), "e%i" % (j))
- os.mkdir(os.path.join(slavesrc, curdir))
- curdir = os.path.join("d%i" % (i), "f%i" % (j))
- os.mkdir(os.path.join(slavesrc, curdir))
-
- filecount = 5
- content = []
- content.append("this is one source file\n" * 1000)
- content.append("this is a second source file\n" * 978)
- content.append("this is a third source file\n" * 473)
- for i in range(dircount):
- for j in range(dircount):
- for k in range(3):
- for l in range(filecount):
- open(os.path.join(slavesrc, "d%i" % (i), "e%i" % (j), "srcfile%i_%i" % (k, l)), "w").write(content[k])
-
- d = self.runStep(step)
- def _checkUpload(results):
- step_status = step.step_status
- #l = step_status.getLogs()
- #if l:
- # logtext = l[0].getText()
- # print logtext
- self.failUnlessEqual(results, SUCCESS)
- self.failUnless(os.path.exists(masterdest))
- for i in range(dircount):
- for j in range(dircount):
- for k in range(3):
- for l in range(filecount):
- masterdest_contents = open(os.path.join(masterdest, "d%i" % (i), "e%i" % (j), "srcfile%i_%i" % (k, l)), "r").read()
- self.failUnlessEqual(masterdest_contents, content[k])
- d.addCallback(_checkUpload)
- return d
-
-
-# TODO:
-# test relative paths, ~/paths
-# need to implement expanduser() for slave-side
-# test error message when master-side file is in a missing directory
-# remove workdir= default?
-
diff --git a/buildbot/buildbot/test/test_twisted.py b/buildbot/buildbot/test/test_twisted.py
deleted file mode 100644
index 7b4f9bf..0000000
--- a/buildbot/buildbot/test/test_twisted.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# -*- test-case-name: buildbot.test.test_twisted -*-
-
-from twisted.trial import unittest
-
-from buildbot import interfaces
-from buildbot.steps.python_twisted import countFailedTests
-from buildbot.steps.python_twisted import Trial, TrialTestCaseCounter
-from buildbot.status import builder
-
-noisy = 0
-if noisy:
- from twisted.python.log import startLogging
- import sys
- startLogging(sys.stdout)
-
-out1 = """
--------------------------------------------------------------------------------
-Ran 13 tests in 1.047s
-
-OK
-"""
-
-out2 = """
--------------------------------------------------------------------------------
-Ran 12 tests in 1.040s
-
-FAILED (failures=1)
-"""
-
-out3 = """
- NotImplementedError
--------------------------------------------------------------------------------
-Ran 13 tests in 1.042s
-
-FAILED (failures=1, errors=1)
-"""
-
-out4 = """
-unparseable
-"""
-
-out5 = """
- File "/usr/home/warner/stuff/python/twisted/Twisted-CVS/twisted/test/test_defer.py", line 79, in testTwoCallbacks
- self.fail("just because")
- File "/usr/home/warner/stuff/python/twisted/Twisted-CVS/twisted/trial/unittest.py", line 21, in fail
- raise AssertionError, message
- AssertionError: just because
-unparseable
-"""
-
-out6 = """
-===============================================================================
-SKIPPED: testProtocolLocalhost (twisted.flow.test.test_flow.FlowTest)
--------------------------------------------------------------------------------
-XXX freezes, fixme
-===============================================================================
-SKIPPED: testIPv6 (twisted.names.test.test_names.HostsTestCase)
--------------------------------------------------------------------------------
-IPv6 support is not in our hosts resolver yet
-===============================================================================
-EXPECTED FAILURE: testSlots (twisted.test.test_rebuild.NewStyleTestCase)
--------------------------------------------------------------------------------
-Traceback (most recent call last):
- File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 240, in _runPhase
- stage(*args, **kwargs)
- File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 262, in _main
- self.runner(self.method)
- File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/runner.py", line 95, in runTest
- method()
- File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/test/test_rebuild.py", line 130, in testSlots
- rebuild.updateInstance(self.m.SlottedClass())
- File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/python/rebuild.py", line 114, in updateInstance
- self.__class__ = latestClass(self.__class__)
-TypeError: __class__ assignment: 'SlottedClass' object layout differs from 'SlottedClass'
-===============================================================================
-FAILURE: testBatchFile (twisted.conch.test.test_sftp.TestOurServerBatchFile)
--------------------------------------------------------------------------------
-Traceback (most recent call last):
- File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 240, in _runPhase
- stage(*args, **kwargs)
- File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 262, in _main
- self.runner(self.method)
- File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/runner.py", line 95, in runTest
- method()
- File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/conch/test/test_sftp.py", line 450, in testBatchFile
- self.failUnlessEqual(res[1:-2], ['testDirectory', 'testRemoveFile', 'testRenameFile', 'testfile1'])
- File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 115, in failUnlessEqual
- raise FailTest, (msg or '%r != %r' % (first, second))
-FailTest: [] != ['testDirectory', 'testRemoveFile', 'testRenameFile', 'testfile1']
--------------------------------------------------------------------------------
-Ran 1454 tests in 911.579s
-
-FAILED (failures=2, skips=49, expectedFailures=9)
-Exception exceptions.AttributeError: "'NoneType' object has no attribute 'StringIO'" in <bound method RemoteReference.__del__ of <twisted.spread.pb.RemoteReference instance at 0x27036c0>> ignored
-"""
-
-class MyTrial(Trial):
- def addTestResult(self, testname, results, text, logs):
- self.results.append((testname, results, text, logs))
- def addCompleteLog(self, name, log):
- pass
-
-class MyLogFile:
- def __init__(self, text):
- self.text = text
- def getText(self):
- return self.text
-
-
-class Count(unittest.TestCase):
-
- def count(self, total, failures=0, errors=0,
- expectedFailures=0, unexpectedSuccesses=0, skips=0):
- d = {
- 'total': total,
- 'failures': failures,
- 'errors': errors,
- 'expectedFailures': expectedFailures,
- 'unexpectedSuccesses': unexpectedSuccesses,
- 'skips': skips,
- }
- return d
-
- def testCountFailedTests(self):
- count = countFailedTests(out1)
- self.assertEquals(count, self.count(total=13))
- count = countFailedTests(out2)
- self.assertEquals(count, self.count(total=12, failures=1))
- count = countFailedTests(out3)
- self.assertEquals(count, self.count(total=13, failures=1, errors=1))
- count = countFailedTests(out4)
- self.assertEquals(count, self.count(total=None))
- count = countFailedTests(out5)
- self.assertEquals(count, self.count(total=None))
-
-class Counter(unittest.TestCase):
-
- def setProgress(self, metric, value):
- self.progress = (metric, value)
-
- def testCounter(self):
- self.progress = (None,None)
- c = TrialTestCaseCounter()
- c.setStep(self)
- STDOUT = interfaces.LOG_CHANNEL_STDOUT
- def add(text):
- c.logChunk(None, None, None, STDOUT, text)
- add("\n\n")
- self.failUnlessEqual(self.progress, (None,None))
- add("bogus line\n")
- self.failUnlessEqual(self.progress, (None,None))
- add("buildbot.test.test_config.ConfigTest.testBots ... [OK]\n")
- self.failUnlessEqual(self.progress, ("tests", 1))
- add("buildbot.test.test_config.ConfigTest.tes")
- self.failUnlessEqual(self.progress, ("tests", 1))
- add("tBuilders ... [OK]\n")
- self.failUnlessEqual(self.progress, ("tests", 2))
- # confirm alternative delimiters work too.. ptys seem to emit
- # something different
- add("buildbot.test.test_config.ConfigTest.testIRC ... [OK]\r\n")
- self.failUnlessEqual(self.progress, ("tests", 3))
- add("===============================================================================\n")
- self.failUnlessEqual(self.progress, ("tests", 3))
- add("buildbot.test.test_config.IOnlyLookLikeA.testLine ... [OK]\n")
- self.failUnlessEqual(self.progress, ("tests", 3))
-
-
-
-class Parse(unittest.TestCase):
- def failUnlessIn(self, substr, string):
- self.failUnless(string.find(substr) != -1)
-
- def testParse(self):
- t = MyTrial(build=None, workdir=".", testpath=None, testChanges=True)
- t.results = []
- log = MyLogFile(out6)
- t.createSummary(log)
-
- self.failUnlessEqual(len(t.results), 4)
- r1, r2, r3, r4 = t.results
- testname, results, text, logs = r1
- self.failUnlessEqual(testname,
- ("twisted", "flow", "test", "test_flow",
- "FlowTest", "testProtocolLocalhost"))
- self.failUnlessEqual(results, builder.SKIPPED)
- self.failUnlessEqual(text, ['skipped'])
- self.failUnlessIn("XXX freezes, fixme", logs)
- self.failUnless(logs.startswith("SKIPPED:"))
- self.failUnless(logs.endswith("fixme\n"))
-
- testname, results, text, logs = r2
- self.failUnlessEqual(testname,
- ("twisted", "names", "test", "test_names",
- "HostsTestCase", "testIPv6"))
- self.failUnlessEqual(results, builder.SKIPPED)
- self.failUnlessEqual(text, ['skipped'])
- self.failUnless(logs.startswith("SKIPPED: testIPv6"))
- self.failUnless(logs.endswith("IPv6 support is not in our hosts resolver yet\n"))
-
- testname, results, text, logs = r3
- self.failUnlessEqual(testname,
- ("twisted", "test", "test_rebuild",
- "NewStyleTestCase", "testSlots"))
- self.failUnlessEqual(results, builder.SUCCESS)
- self.failUnlessEqual(text, ['expected', 'failure'])
- self.failUnless(logs.startswith("EXPECTED FAILURE: "))
- self.failUnlessIn("\nTraceback ", logs)
- self.failUnless(logs.endswith("layout differs from 'SlottedClass'\n"))
-
- testname, results, text, logs = r4
- self.failUnlessEqual(testname,
- ("twisted", "conch", "test", "test_sftp",
- "TestOurServerBatchFile", "testBatchFile"))
- self.failUnlessEqual(results, builder.FAILURE)
- self.failUnlessEqual(text, ['failure'])
- self.failUnless(logs.startswith("FAILURE: "))
- self.failUnlessIn("Traceback ", logs)
- self.failUnless(logs.endswith("'testRenameFile', 'testfile1']\n"))
-
diff --git a/buildbot/buildbot/test/test_util.py b/buildbot/buildbot/test/test_util.py
deleted file mode 100644
index b375390..0000000
--- a/buildbot/buildbot/test/test_util.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- test-case-name: buildbot.test.test_util -*-
-
-from twisted.trial import unittest
-
-from buildbot import util
-
-
-class Foo(util.ComparableMixin):
- compare_attrs = ["a", "b"]
-
- def __init__(self, a, b, c):
- self.a, self.b, self.c = a,b,c
-
-
-class Bar(Foo, util.ComparableMixin):
- compare_attrs = ["b", "c"]
-
-class Compare(unittest.TestCase):
- def testCompare(self):
- f1 = Foo(1, 2, 3)
- f2 = Foo(1, 2, 4)
- f3 = Foo(1, 3, 4)
- b1 = Bar(1, 2, 3)
- self.failUnless(f1 == f2)
- self.failIf(f1 == f3)
- self.failIf(f1 == b1)
diff --git a/buildbot/buildbot/test/test_vc.py b/buildbot/buildbot/test/test_vc.py
deleted file mode 100644
index 4d0c18e..0000000
--- a/buildbot/buildbot/test/test_vc.py
+++ /dev/null
@@ -1,3023 +0,0 @@
-# -*- test-case-name: buildbot.test.test_vc -*-
-
-import sys, os, time, re
-from email.Utils import mktime_tz, parsedate_tz
-
-from twisted.trial import unittest
-from twisted.internet import defer, reactor, utils, protocol, task, error
-from twisted.python import failure
-from twisted.python.procutils import which
-from twisted.web import client, static, server
-
-#defer.Deferred.debug = True
-
-from twisted.python import log
-#log.startLogging(sys.stderr)
-
-from buildbot import master, interfaces
-from buildbot.slave import bot, commands
-from buildbot.slave.commands import rmdirRecursive
-from buildbot.status.builder import SUCCESS, FAILURE
-from buildbot.process import base
-from buildbot.steps import source
-from buildbot.changes import changes
-from buildbot.sourcestamp import SourceStamp
-from buildbot.scripts import tryclient
-from buildbot.test.runutils import SignalMixin, myGetProcessOutputAndValue
-
-#step.LoggedRemoteCommand.debug = True
-
-from twisted.internet.defer import waitForDeferred, deferredGenerator
-
-# Most of these tests (all but SourceStamp) depend upon having a set of
-# repositories from which we can perform checkouts. These repositories are
-# created by the setUp method at the start of each test class. In earlier
-# versions these repositories were created offline and distributed with a
-# separate tarball named 'buildbot-test-vc-1.tar.gz'. This is no longer
-# necessary.
-
-# CVS requires a local file repository. Providing remote access is beyond
-# the feasible abilities of this test program (needs pserver or ssh).
-
-# SVN requires a local file repository. To provide remote access over HTTP
-# requires an apache server with DAV support and mod_svn, way beyond what we
-# can test from here.
-
-# Arch and Darcs both allow remote (read-only) operation with any web
-# server. We test both local file access and HTTP access (by spawning a
-# small web server to provide access to the repository files while the test
-# is running).
-
-# Perforce starts the daemon running on localhost. Unfortunately, it must
-# use a predetermined Internet-domain port number, unless we want to go
-# all-out: bind the listen socket ourselves and pretend to be inetd.
-
-config_vc = """
-from buildbot.process import factory
-from buildbot.steps import source
-from buildbot.buildslave import BuildSlave
-s = factory.s
-
-f1 = factory.BuildFactory([
- %s,
- ])
-c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit')]
-c['schedulers'] = []
-c['builders'] = [{'name': 'vc', 'slavename': 'bot1',
- 'builddir': 'vc-dir', 'factory': f1}]
-c['slavePortnum'] = 0
-# do not compress logs in tests
-c['logCompressionLimit'] = False
-BuildmasterConfig = c
-"""
-
-p0_diff = r"""
-Index: subdir/subdir.c
-===================================================================
-RCS file: /home/warner/stuff/Projects/BuildBot/code-arch/_trial_temp/test_vc/repositories/CVS-Repository/sample/subdir/subdir.c,v
-retrieving revision 1.1.1.1
-diff -u -r1.1.1.1 subdir.c
---- subdir/subdir.c 14 Aug 2005 01:32:49 -0000 1.1.1.1
-+++ subdir/subdir.c 14 Aug 2005 01:36:15 -0000
-@@ -4,6 +4,6 @@
- int
- main(int argc, const char *argv[])
- {
-- printf("Hello subdir.\n");
-+ printf("Hello patched subdir.\n");
- return 0;
- }
-"""
-
-# this patch does not include the filename headers, so it is
-# patchlevel-neutral
-TRY_PATCH = '''
-@@ -5,6 +5,6 @@
- int
- main(int argc, const char *argv[])
- {
-- printf("Hello subdir.\\n");
-+ printf("Hello try.\\n");
- return 0;
- }
-'''
-
-MAIN_C = '''
-// this is main.c
-#include <stdio.h>
-
-int
-main(int argc, const char *argv[])
-{
- printf("Hello world.\\n");
- return 0;
-}
-'''
-
-BRANCH_C = '''
-// this is main.c
-#include <stdio.h>
-
-int
-main(int argc, const char *argv[])
-{
- printf("Hello branch.\\n");
- return 0;
-}
-'''
-
-VERSION_C = '''
-// this is version.c
-#include <stdio.h>
-
-int
-main(int argc, const char *argv[])
-{
- printf("Hello world, version=%d\\n");
- return 0;
-}
-'''
-
-SUBDIR_C = '''
-// this is subdir/subdir.c
-#include <stdio.h>
-
-int
-main(int argc, const char *argv[])
-{
- printf("Hello subdir.\\n");
- return 0;
-}
-'''
-
-TRY_C = '''
-// this is subdir/subdir.c
-#include <stdio.h>
-
-int
-main(int argc, const char *argv[])
-{
- printf("Hello try.\\n");
- return 0;
-}
-'''
-
-def qw(s):
- return s.split()
-
-class VCS_Helper:
- # this is a helper class which keeps track of whether each VC system is
- # available, and whether the repository for each has been created. There
- # is one instance of this class, at module level, shared between all test
- # cases.
-
- def __init__(self):
- self._helpers = {}
- self._isCapable = {}
- self._excuses = {}
- self._repoReady = {}
-
- def registerVC(self, name, helper):
- self._helpers[name] = helper
- self._repoReady[name] = False
-
- def skipIfNotCapable(self, name):
- """Either return None, or raise SkipTest"""
- d = self.capable(name)
- def _maybeSkip(res):
- if not res[0]:
- raise unittest.SkipTest(res[1])
- d.addCallback(_maybeSkip)
- return d
-
- def capable(self, name):
- """Return a Deferred that fires with (True,None) if this host offers
- the given VC tool, or (False,excuse) if it does not (and therefore
- the tests should be skipped)."""
-
- if self._isCapable.has_key(name):
- if self._isCapable[name]:
- return defer.succeed((True,None))
- else:
- return defer.succeed((False, self._excuses[name]))
- d = defer.maybeDeferred(self._helpers[name].capable)
- def _capable(res):
- if res[0]:
- self._isCapable[name] = True
- else:
- self._excuses[name] = res[1]
- return res
- d.addCallback(_capable)
- return d
-
- def getHelper(self, name):
- return self._helpers[name]
-
- def createRepository(self, name):
- """Return a Deferred that fires when the repository is set up."""
- if self._repoReady[name]:
- return defer.succeed(True)
- d = self._helpers[name].createRepository()
- def _ready(res):
- self._repoReady[name] = True
- d.addCallback(_ready)
- return d
-
-VCS = VCS_Helper()
-
-
-# the overall plan here:
-#
-# Each VC system is tested separately, all using the same source tree defined
-# in the 'files' dictionary above. Each VC system gets its own TestCase
-# subclass. The first test case that is run will create the repository during
-# setUp(), making two branches: 'trunk' and 'branch'. The trunk gets a copy
-# of all the files in 'files'. The variant of good.c is committed on the
-# branch.
-#
-# then testCheckout is run, which does a number of checkout/clobber/update
-# builds. These all use trunk r1. It then runs self.fix(), which modifies
-# 'fixable.c', then performs another build and makes sure the tree has been
-# updated.
-#
-# testBranch uses trunk-r1 and branch-r1, making sure that we clobber the
-# tree properly when we switch between them
-#
-# testPatch does a trunk-r1 checkout and applies a patch.
-#
-# testTryGetPatch performs a trunk-r1 checkout, modifies some files, then
-# verifies that tryclient.getSourceStamp figures out the base revision and
-# what got changed.
-
-
-# vc_create makes a repository at r1 with three files: main.c, version.c, and
-# subdir/foo.c . It also creates a branch from r1 (called b1) in which main.c
-# says "hello branch" instead of "hello world". self.trunk[] contains
-# revision stamps for everything on the trunk, and self.branch[] does the
-# same for the branch.
-
-# vc_revise() checks out a tree at HEAD, changes version.c, then checks it
-# back in. The new version stamp is appended to self.trunk[]. The tree is
-# removed afterwards.
-
-# vc_try_checkout(workdir, rev) checks out a tree at REV, then changes
-# subdir/subdir.c to say 'Hello try'
-# vc_try_finish(workdir) removes the tree and cleans up any VC state
-# necessary (like deleting the Arch archive entry).
-
-
-class BaseHelper:
- def __init__(self):
- self.trunk = []
- self.branch = []
- self.allrevs = []
-
- def capable(self):
- # this is also responsible for setting self.vcexe
- raise NotImplementedError
-
- def createBasedir(self):
- # you must call this from createRepository
- self.repbase = os.path.abspath(os.path.join("test_vc",
- "repositories"))
- if not os.path.isdir(self.repbase):
- os.makedirs(self.repbase)
-
- def createRepository(self):
- # this will only be called once per process
- raise NotImplementedError
-
- def populate(self, basedir):
- if not os.path.exists(basedir):
- os.makedirs(basedir)
- os.makedirs(os.path.join(basedir, "subdir"))
- open(os.path.join(basedir, "main.c"), "w").write(MAIN_C)
- self.version = 1
- version_c = VERSION_C % self.version
- open(os.path.join(basedir, "version.c"), "w").write(version_c)
- open(os.path.join(basedir, "main.c"), "w").write(MAIN_C)
- open(os.path.join(basedir, "subdir", "subdir.c"), "w").write(SUBDIR_C)
-
- def populate_branch(self, basedir):
- open(os.path.join(basedir, "main.c"), "w").write(BRANCH_C)
-
- def addTrunkRev(self, rev):
- self.trunk.append(rev)
- self.allrevs.append(rev)
- def addBranchRev(self, rev):
- self.branch.append(rev)
- self.allrevs.append(rev)
-
- def runCommand(self, basedir, command, failureIsOk=False,
- stdin=None, env=None):
- # all commands passed to do() should be strings or lists. If they are
- # strings, none of the arguments may have spaces. This makes the
- # commands less verbose at the expense of restricting what they can
- # specify.
- if type(command) not in (list, tuple):
- command = command.split(" ")
-
- # execute scripts through cmd.exe on windows, to avoid space in path issues
- if sys.platform == 'win32' and command[0].lower().endswith('.cmd'):
- command = [which('cmd.exe')[0], '/c', 'call'] + command
-
- DEBUG = False
- if DEBUG:
- print "do %s" % command
- print " in basedir %s" % basedir
- if stdin:
- print " STDIN:\n", stdin, "\n--STDIN DONE"
-
- if not env:
- env = os.environ.copy()
- env['LC_ALL'] = "C"
- d = myGetProcessOutputAndValue(command[0], command[1:],
- env=env, path=basedir,
- stdin=stdin)
- def check((out, err, code)):
- if DEBUG:
- print
- print "command was: %s" % command
- if out: print "out: %s" % out
- if err: print "err: %s" % err
- print "code: %s" % code
- if code != 0 and not failureIsOk:
- log.msg("command %s finished with exit code %d" %
- (command, code))
- log.msg(" and stdout %s" % (out,))
- log.msg(" and stderr %s" % (err,))
- raise RuntimeError("command %s finished with exit code %d"
- % (command, code)
- + ": see logs for stdout")
- return out
- d.addCallback(check)
- return d
-
- def do(self, basedir, command, failureIsOk=False, stdin=None, env=None):
- d = self.runCommand(basedir, command, failureIsOk=failureIsOk,
- stdin=stdin, env=env)
- return waitForDeferred(d)
-
- def dovc(self, basedir, command, failureIsOk=False, stdin=None, env=None):
- """Like do(), but the VC binary will be prepended to COMMAND."""
- if isinstance(command, (str, unicode)):
- command = [self.vcexe] + command.split(' ')
- else:
- # command is a list
- command = [self.vcexe] + command
- return self.do(basedir, command, failureIsOk, stdin, env)
-
-class VCBase(SignalMixin):
- metadir = None
- createdRepository = False
- master = None
- slave = None
- helper = None
- httpServer = None
- httpPort = None
- skip = None
- has_got_revision = False
- has_got_revision_branches_are_merged = False # for SVN
-
- def failUnlessIn(self, substring, string, msg=None):
- # trial provides a version of this that requires python-2.3 to test
- # strings.
- if msg is None:
- msg = ("did not see the expected substring '%s' in string '%s'" %
- (substring, string))
- self.failUnless(string.find(substring) != -1, msg)
-
- def setUp(self):
- d = VCS.skipIfNotCapable(self.vc_name)
- d.addCallback(self._setUp1)
- return d
-
- def _setUp1(self, res):
- self.helper = VCS.getHelper(self.vc_name)
-
- if os.path.exists("basedir"):
- rmdirRecursive("basedir")
- os.mkdir("basedir")
- self.master = master.BuildMaster("basedir")
- self.slavebase = os.path.abspath("slavebase")
- if os.path.exists(self.slavebase):
- rmdirRecursive(self.slavebase)
- os.mkdir("slavebase")
-
- d = VCS.createRepository(self.vc_name)
- return d
-
- def connectSlave(self):
- port = self.master.slavePort._port.getHost().port
- slave = bot.BuildSlave("localhost", port, "bot1", "sekrit",
- self.slavebase, keepalive=0, usePTY=False)
- self.slave = slave
- slave.startService()
- d = self.master.botmaster.waitUntilBuilderAttached("vc")
- return d
-
- def loadConfig(self, config):
- # reloading the config file causes a new 'listDirs' command to be
- # sent to the slave. To synchronize on this properly, it is easiest
- # to stop and restart the slave.
- d = defer.succeed(None)
- if self.slave:
- d = self.master.botmaster.waitUntilBuilderDetached("vc")
- self.slave.stopService()
- d.addCallback(lambda res: self.master.loadConfig(config))
- d.addCallback(lambda res: self.connectSlave())
- return d
-
- def serveHTTP(self):
- # launch an HTTP server to serve the repository files
- self.root = static.File(self.helper.repbase)
- self.site = server.Site(self.root)
- self.httpServer = reactor.listenTCP(0, self.site)
- self.httpPort = self.httpServer.getHost().port
-
- def doBuild(self, shouldSucceed=True, ss=None):
- c = interfaces.IControl(self.master)
-
- if ss is None:
- ss = SourceStamp()
- #print "doBuild(ss: b=%s rev=%s)" % (ss.branch, ss.revision)
- req = base.BuildRequest("test_vc forced build", ss, 'test_builder')
- d = req.waitUntilFinished()
- c.getBuilder("vc").requestBuild(req)
- d.addCallback(self._doBuild_1, shouldSucceed)
- return d
- def _doBuild_1(self, bs, shouldSucceed):
- r = bs.getResults()
- if r != SUCCESS and shouldSucceed:
- print
- print
- if not bs.isFinished():
- print "Hey, build wasn't even finished!"
- print "Build did not succeed:", r, bs.getText()
- for s in bs.getSteps():
- for l in s.getLogs():
- print "--- START step %s / log %s ---" % (s.getName(),
- l.getName())
- print l.getTextWithHeaders()
- print "--- STOP ---"
- print
- self.fail("build did not succeed")
- return bs
-
- def printLogs(self, bs):
- for s in bs.getSteps():
- for l in s.getLogs():
- print "--- START step %s / log %s ---" % (s.getName(),
- l.getName())
- print l.getTextWithHeaders()
- print "--- STOP ---"
- print
-
- def touch(self, d, f):
- open(os.path.join(d,f),"w").close()
- def shouldExist(self, *args):
- target = os.path.join(*args)
- self.failUnless(os.path.exists(target),
- "expected to find %s but didn't" % target)
- def shouldNotExist(self, *args):
- target = os.path.join(*args)
- self.failIf(os.path.exists(target),
- "expected to NOT find %s, but did" % target)
- def shouldContain(self, d, f, contents):
- c = open(os.path.join(d, f), "r").read()
- self.failUnlessIn(contents, c)
-
- def checkGotRevision(self, bs, expected):
- if self.has_got_revision:
- self.failUnlessEqual(bs.getProperty("got_revision"), str(expected))
-
- def checkGotRevisionIsLatest(self, bs):
- expected = self.helper.trunk[-1]
- if self.has_got_revision_branches_are_merged:
- expected = self.helper.allrevs[-1]
- self.checkGotRevision(bs, expected)
-
- def do_vctest(self, testRetry=True):
- vctype = self.vctype
- args = self.helper.vcargs
- m = self.master
- self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
- self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
- # woo double-substitution
- s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
- for k,v in args.items():
- s += ", %s=%s" % (k, repr(v))
- s += ")"
- config = config_vc % s
-
- m.loadConfig(config % 'clobber')
- m.readConfig = True
- m.startService()
-
- d = self.connectSlave()
- d.addCallback(lambda res: log.msg("testing clobber"))
- d.addCallback(self._do_vctest_clobber)
- d.addCallback(lambda res: log.msg("doing update"))
- d.addCallback(lambda res: self.loadConfig(config % 'update'))
- d.addCallback(lambda res: log.msg("testing update"))
- d.addCallback(self._do_vctest_update)
- if testRetry:
- d.addCallback(lambda res: log.msg("testing update retry"))
- d.addCallback(self._do_vctest_update_retry)
- d.addCallback(lambda res: log.msg("doing copy"))
- d.addCallback(lambda res: self.loadConfig(config % 'copy'))
- d.addCallback(lambda res: log.msg("testing copy"))
- d.addCallback(self._do_vctest_copy)
- d.addCallback(lambda res: log.msg("did copy test"))
- if self.metadir:
- d.addCallback(lambda res: log.msg("doing export"))
- d.addCallback(lambda res: self.loadConfig(config % 'export'))
- d.addCallback(lambda res: log.msg("testing export"))
- d.addCallback(self._do_vctest_export)
- d.addCallback(lambda res: log.msg("did export test"))
- return d
-
- def _do_vctest_clobber(self, res):
- d = self.doBuild() # initial checkout
- d.addCallback(self._do_vctest_clobber_1)
- return d
- def _do_vctest_clobber_1(self, bs):
- self.shouldExist(self.workdir, "main.c")
- self.shouldExist(self.workdir, "version.c")
- self.shouldExist(self.workdir, "subdir", "subdir.c")
- if self.metadir:
- self.shouldExist(self.workdir, self.metadir)
- self.failUnlessEqual(bs.getProperty("revision"), None)
- self.failUnlessEqual(bs.getProperty("branch"), None)
- self.checkGotRevisionIsLatest(bs)
-
- self.touch(self.workdir, "newfile")
- self.shouldExist(self.workdir, "newfile")
- d = self.doBuild() # rebuild clobbers workdir
- d.addCallback(self._do_vctest_clobber_2)
- return d
- def _do_vctest_clobber_2(self, res):
- self.shouldNotExist(self.workdir, "newfile")
- # do a checkout to a specific version. Mercurial-over-HTTP (when
- # either client or server is older than hg-0.9.2) cannot do this
- # directly, so it must checkout HEAD and then update back to the
- # requested revision.
- d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[0]))
- d.addCallback(self._do_vctest_clobber_3)
- return d
- def _do_vctest_clobber_3(self, bs):
- self.shouldExist(self.workdir, "main.c")
- self.shouldExist(self.workdir, "version.c")
- self.shouldExist(self.workdir, "subdir", "subdir.c")
- if self.metadir:
- self.shouldExist(self.workdir, self.metadir)
- self.failUnlessEqual(bs.getProperty("revision"), self.helper.trunk[0] or None)
- self.failUnlessEqual(bs.getProperty("branch"), None)
- self.checkGotRevision(bs, self.helper.trunk[0])
- # leave the tree at HEAD
- return self.doBuild()
-
-
- def _do_vctest_update(self, res):
- log.msg("_do_vctest_update")
- d = self.doBuild() # rebuild with update
- d.addCallback(self._do_vctest_update_1)
- return d
- def _do_vctest_update_1(self, bs):
- log.msg("_do_vctest_update_1")
- self.shouldExist(self.workdir, "main.c")
- self.shouldExist(self.workdir, "version.c")
- self.shouldContain(self.workdir, "version.c",
- "version=%d" % self.helper.version)
- if self.metadir:
- self.shouldExist(self.workdir, self.metadir)
- self.failUnlessEqual(bs.getProperty("revision"), None)
- self.checkGotRevisionIsLatest(bs)
-
- self.touch(self.workdir, "newfile")
- d = self.doBuild() # update rebuild leaves new files
- d.addCallback(self._do_vctest_update_2)
- return d
- def _do_vctest_update_2(self, bs):
- log.msg("_do_vctest_update_2")
- self.shouldExist(self.workdir, "main.c")
- self.shouldExist(self.workdir, "version.c")
- self.touch(self.workdir, "newfile")
- # now make a change to the repository and make sure we pick it up
- d = self.helper.vc_revise()
- d.addCallback(lambda res: self.doBuild())
- d.addCallback(self._do_vctest_update_3)
- return d
- def _do_vctest_update_3(self, bs):
- log.msg("_do_vctest_update_3")
- self.shouldExist(self.workdir, "main.c")
- self.shouldExist(self.workdir, "version.c")
- self.shouldContain(self.workdir, "version.c",
- "version=%d" % self.helper.version)
- self.shouldExist(self.workdir, "newfile")
- self.failUnlessEqual(bs.getProperty("revision"), None)
- self.checkGotRevisionIsLatest(bs)
-
- # now "update" to an older revision
- d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[-2]))
- d.addCallback(self._do_vctest_update_4)
- return d
- def _do_vctest_update_4(self, bs):
- log.msg("_do_vctest_update_4")
- self.shouldExist(self.workdir, "main.c")
- self.shouldExist(self.workdir, "version.c")
- self.shouldContain(self.workdir, "version.c",
- "version=%d" % (self.helper.version-1))
- self.failUnlessEqual(bs.getProperty("revision"),
- self.helper.trunk[-2] or None)
- self.checkGotRevision(bs, self.helper.trunk[-2])
-
- # now update to the newer revision
- d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[-1]))
- d.addCallback(self._do_vctest_update_5)
- return d
- def _do_vctest_update_5(self, bs):
- log.msg("_do_vctest_update_5")
- self.shouldExist(self.workdir, "main.c")
- self.shouldExist(self.workdir, "version.c")
- self.shouldContain(self.workdir, "version.c",
- "version=%d" % self.helper.version)
- self.failUnlessEqual(bs.getProperty("revision"),
- self.helper.trunk[-1] or None)
- self.checkGotRevision(bs, self.helper.trunk[-1])
-
-
- def _do_vctest_update_retry(self, res):
- # certain local changes will prevent an update from working. The
- # most common is to replace a file with a directory, or vice
- # versa. The slave code should spot the failure and do a
- # clobber/retry.
- os.unlink(os.path.join(self.workdir, "main.c"))
- os.mkdir(os.path.join(self.workdir, "main.c"))
- self.touch(os.path.join(self.workdir, "main.c"), "foo")
- self.touch(self.workdir, "newfile")
-
- d = self.doBuild() # update, but must clobber to handle the error
- d.addCallback(self._do_vctest_update_retry_1)
- return d
- def _do_vctest_update_retry_1(self, bs):
- # SVN-1.4.0 doesn't seem to have any problem with the
- # file-turned-directory issue (although older versions did). So don't
- # actually check that the tree was clobbered.. as long as the update
- # succeeded (checked by doBuild), that should be good enough.
- #self.shouldNotExist(self.workdir, "newfile")
- pass
-
- def _do_vctest_copy(self, res):
- log.msg("_do_vctest_copy 1")
- d = self.doBuild() # copy rebuild clobbers new files
- d.addCallback(self._do_vctest_copy_1)
- return d
- def _do_vctest_copy_1(self, bs):
- log.msg("_do_vctest_copy 2")
- if self.metadir:
- self.shouldExist(self.workdir, self.metadir)
- self.shouldNotExist(self.workdir, "newfile")
- self.touch(self.workdir, "newfile")
- self.touch(self.vcdir, "newvcfile")
- self.failUnlessEqual(bs.getProperty("revision"), None)
- self.checkGotRevisionIsLatest(bs)
-
- d = self.doBuild() # copy rebuild clobbers new files
- d.addCallback(self._do_vctest_copy_2)
- return d
- def _do_vctest_copy_2(self, bs):
- log.msg("_do_vctest_copy 3")
- if self.metadir:
- self.shouldExist(self.workdir, self.metadir)
- self.shouldNotExist(self.workdir, "newfile")
- self.shouldExist(self.vcdir, "newvcfile")
- self.shouldExist(self.workdir, "newvcfile")
- self.failUnlessEqual(bs.getProperty("revision"), None)
- self.checkGotRevisionIsLatest(bs)
- self.touch(self.workdir, "newfile")
-
- def _do_vctest_export(self, res):
- d = self.doBuild() # export rebuild clobbers new files
- d.addCallback(self._do_vctest_export_1)
- return d
- def _do_vctest_export_1(self, bs):
- self.shouldNotExist(self.workdir, self.metadir)
- self.shouldNotExist(self.workdir, "newfile")
- self.failUnlessEqual(bs.getProperty("revision"), None)
- #self.checkGotRevisionIsLatest(bs)
- # VC 'export' is not required to have a got_revision
- self.touch(self.workdir, "newfile")
-
- d = self.doBuild() # export rebuild clobbers new files
- d.addCallback(self._do_vctest_export_2)
- return d
- def _do_vctest_export_2(self, bs):
- self.shouldNotExist(self.workdir, self.metadir)
- self.shouldNotExist(self.workdir, "newfile")
- self.failUnlessEqual(bs.getProperty("revision"), None)
- #self.checkGotRevisionIsLatest(bs)
- # VC 'export' is not required to have a got_revision
-
- def do_patch(self):
- vctype = self.vctype
- args = self.helper.vcargs
- m = self.master
- self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
- self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
- s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
- for k,v in args.items():
- s += ", %s=%s" % (k, repr(v))
- s += ")"
- self.config = config_vc % s
-
- m.loadConfig(self.config % "clobber")
- m.readConfig = True
- m.startService()
-
- ss = SourceStamp(revision=self.helper.trunk[-1], patch=(0, p0_diff))
-
- d = self.connectSlave()
- d.addCallback(lambda res: self.doBuild(ss=ss))
- d.addCallback(self._doPatch_1)
- return d
- def _doPatch_1(self, bs):
- self.shouldContain(self.workdir, "version.c",
- "version=%d" % self.helper.version)
- # make sure the file actually got patched
- subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
- "subdir", "subdir.c")
- data = open(subdir_c, "r").read()
- self.failUnlessIn("Hello patched subdir.\\n", data)
- self.failUnlessEqual(bs.getProperty("revision"),
- self.helper.trunk[-1] or None)
- self.checkGotRevision(bs, self.helper.trunk[-1])
-
- # make sure that a rebuild does not use the leftover patched workdir
- d = self.master.loadConfig(self.config % "update")
- d.addCallback(lambda res: self.doBuild(ss=None))
- d.addCallback(self._doPatch_2)
- return d
- def _doPatch_2(self, bs):
- # make sure the file is back to its original
- subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
- "subdir", "subdir.c")
- data = open(subdir_c, "r").read()
- self.failUnlessIn("Hello subdir.\\n", data)
- self.failUnlessEqual(bs.getProperty("revision"), None)
- self.checkGotRevisionIsLatest(bs)
-
- # now make sure we can patch an older revision. We need at least two
- # revisions here, so we might have to create one first
- if len(self.helper.trunk) < 2:
- d = self.helper.vc_revise()
- d.addCallback(self._doPatch_3)
- return d
- return self._doPatch_3()
-
- def _doPatch_3(self, res=None):
- ss = SourceStamp(revision=self.helper.trunk[-2], patch=(0, p0_diff))
- d = self.doBuild(ss=ss)
- d.addCallback(self._doPatch_4)
- return d
- def _doPatch_4(self, bs):
- self.shouldContain(self.workdir, "version.c",
- "version=%d" % (self.helper.version-1))
- # and make sure the file actually got patched
- subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
- "subdir", "subdir.c")
- data = open(subdir_c, "r").read()
- self.failUnlessIn("Hello patched subdir.\\n", data)
- self.failUnlessEqual(bs.getProperty("revision"),
- self.helper.trunk[-2] or None)
- self.checkGotRevision(bs, self.helper.trunk[-2])
-
- # now check that we can patch a branch
- ss = SourceStamp(branch=self.helper.branchname,
- revision=self.helper.branch[-1],
- patch=(0, p0_diff))
- d = self.doBuild(ss=ss)
- d.addCallback(self._doPatch_5)
- return d
- def _doPatch_5(self, bs):
- self.shouldContain(self.workdir, "version.c",
- "version=%d" % 1)
- self.shouldContain(self.workdir, "main.c", "Hello branch.")
- subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
- "subdir", "subdir.c")
- data = open(subdir_c, "r").read()
- self.failUnlessIn("Hello patched subdir.\\n", data)
- self.failUnlessEqual(bs.getProperty("revision"),
- self.helper.branch[-1] or None)
- self.failUnlessEqual(bs.getProperty("branch"), self.helper.branchname or None)
- self.checkGotRevision(bs, self.helper.branch[-1])
-
-
- def do_vctest_once(self, shouldSucceed):
- m = self.master
- vctype = self.vctype
- args = self.helper.vcargs
- vcdir = os.path.join(self.slavebase, "vc-dir", "source")
- workdir = os.path.join(self.slavebase, "vc-dir", "build")
- # woo double-substitution
- s = "s(%s, timeout=200, workdir='build', mode='clobber'" % (vctype,)
- for k,v in args.items():
- s += ", %s=%s" % (k, repr(v))
- s += ")"
- config = config_vc % s
-
- m.loadConfig(config)
- m.readConfig = True
- m.startService()
-
- self.connectSlave()
- d = self.doBuild(shouldSucceed) # initial checkout
- return d
-
- def do_branch(self):
- log.msg("do_branch")
- vctype = self.vctype
- args = self.helper.vcargs
- m = self.master
- self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
- self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
- s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
- for k,v in args.items():
- s += ", %s=%s" % (k, repr(v))
- s += ")"
- self.config = config_vc % s
-
- m.loadConfig(self.config % "update")
- m.readConfig = True
- m.startService()
-
- # first we do a build of the trunk
- d = self.connectSlave()
- d.addCallback(lambda res: self.doBuild(ss=SourceStamp()))
- d.addCallback(self._doBranch_1)
- return d
- def _doBranch_1(self, bs):
- log.msg("_doBranch_1")
- # make sure the checkout was of the trunk
- main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
- data = open(main_c, "r").read()
- self.failUnlessIn("Hello world.", data)
-
- # now do a checkout on the branch. The change in branch name should
- # trigger a clobber.
- self.touch(self.workdir, "newfile")
- d = self.doBuild(ss=SourceStamp(branch=self.helper.branchname))
- d.addCallback(self._doBranch_2)
- return d
- def _doBranch_2(self, bs):
- log.msg("_doBranch_2")
- # make sure it was on the branch
- main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
- data = open(main_c, "r").read()
- self.failUnlessIn("Hello branch.", data)
- # and make sure the tree was clobbered
- self.shouldNotExist(self.workdir, "newfile")
-
- # doing another build on the same branch should not clobber the tree
- self.touch(self.workdir, "newbranchfile")
- d = self.doBuild(ss=SourceStamp(branch=self.helper.branchname))
- d.addCallback(self._doBranch_3)
- return d
- def _doBranch_3(self, bs):
- log.msg("_doBranch_3")
- # make sure it is still on the branch
- main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
- data = open(main_c, "r").read()
- self.failUnlessIn("Hello branch.", data)
- # and make sure the tree was not clobbered
- self.shouldExist(self.workdir, "newbranchfile")
-
- # now make sure that a non-branch checkout clobbers the tree
- d = self.doBuild(ss=SourceStamp())
- d.addCallback(self._doBranch_4)
- return d
- def _doBranch_4(self, bs):
- log.msg("_doBranch_4")
- # make sure it was on the trunk
- main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
- data = open(main_c, "r").read()
- self.failUnlessIn("Hello world.", data)
- self.shouldNotExist(self.workdir, "newbranchfile")
-
- def do_getpatch(self, doBranch=True):
- log.msg("do_getpatch")
- # prepare a buildslave to do checkouts
- vctype = self.vctype
- args = self.helper.vcargs
- m = self.master
- self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
- self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
- # woo double-substitution
- s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
- for k,v in args.items():
- s += ", %s=%s" % (k, repr(v))
- s += ")"
- config = config_vc % s
-
- m.loadConfig(config % 'clobber')
- m.readConfig = True
- m.startService()
-
- d = self.connectSlave()
-
- # then set up the "developer's tree". first we modify a tree from the
- # head of the trunk
- tmpdir = "try_workdir"
- self.trydir = os.path.join(self.helper.repbase, tmpdir)
- rmdirRecursive(self.trydir)
- d.addCallback(self.do_getpatch_trunkhead)
- d.addCallback(self.do_getpatch_trunkold)
- if doBranch:
- d.addCallback(self.do_getpatch_branch)
- d.addCallback(self.do_getpatch_finish)
- return d
-
- def do_getpatch_finish(self, res):
- log.msg("do_getpatch_finish")
- self.helper.vc_try_finish(self.trydir)
- return res
-
- def try_shouldMatch(self, filename):
- devfilename = os.path.join(self.trydir, filename)
- devfile = open(devfilename, "r").read()
- slavefilename = os.path.join(self.workdir, filename)
- slavefile = open(slavefilename, "r").read()
- self.failUnlessEqual(devfile, slavefile,
- ("slavefile (%s) contains '%s'. "
- "developer's file (%s) contains '%s'. "
- "These ought to match") %
- (slavefilename, slavefile,
- devfilename, devfile))
-
- def do_getpatch_trunkhead(self, res):
- log.msg("do_getpatch_trunkhead")
- d = self.helper.vc_try_checkout(self.trydir, self.helper.trunk[-1])
- d.addCallback(self._do_getpatch_trunkhead_1)
- return d
- def _do_getpatch_trunkhead_1(self, res):
- log.msg("_do_getpatch_trunkhead_1")
- d = tryclient.getSourceStamp(self.vctype_try, self.trydir, None)
- d.addCallback(self._do_getpatch_trunkhead_2)
- return d
- def _do_getpatch_trunkhead_2(self, ss):
- log.msg("_do_getpatch_trunkhead_2")
- d = self.doBuild(ss=ss)
- d.addCallback(self._do_getpatch_trunkhead_3)
- return d
- def _do_getpatch_trunkhead_3(self, res):
- log.msg("_do_getpatch_trunkhead_3")
- # verify that the resulting buildslave tree matches the developer's
- self.try_shouldMatch("main.c")
- self.try_shouldMatch("version.c")
- self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
-
- def do_getpatch_trunkold(self, res):
- log.msg("do_getpatch_trunkold")
- # now try a tree from an older revision. We need at least two
- # revisions here, so we might have to create one first
- if len(self.helper.trunk) < 2:
- d = self.helper.vc_revise()
- d.addCallback(self._do_getpatch_trunkold_1)
- return d
- return self._do_getpatch_trunkold_1()
- def _do_getpatch_trunkold_1(self, res=None):
- log.msg("_do_getpatch_trunkold_1")
- d = self.helper.vc_try_checkout(self.trydir, self.helper.trunk[-2])
- d.addCallback(self._do_getpatch_trunkold_2)
- return d
- def _do_getpatch_trunkold_2(self, res):
- log.msg("_do_getpatch_trunkold_2")
- d = tryclient.getSourceStamp(self.vctype_try, self.trydir, None)
- d.addCallback(self._do_getpatch_trunkold_3)
- return d
- def _do_getpatch_trunkold_3(self, ss):
- log.msg("_do_getpatch_trunkold_3")
- d = self.doBuild(ss=ss)
- d.addCallback(self._do_getpatch_trunkold_4)
- return d
- def _do_getpatch_trunkold_4(self, res):
- log.msg("_do_getpatch_trunkold_4")
- # verify that the resulting buildslave tree matches the developer's
- self.try_shouldMatch("main.c")
- self.try_shouldMatch("version.c")
- self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
-
- def do_getpatch_branch(self, res):
- log.msg("do_getpatch_branch")
- # now try a tree from a branch
- d = self.helper.vc_try_checkout(self.trydir, self.helper.branch[-1],
- self.helper.branchname)
- d.addCallback(self._do_getpatch_branch_1)
- return d
- def _do_getpatch_branch_1(self, res):
- log.msg("_do_getpatch_branch_1")
- d = tryclient.getSourceStamp(self.vctype_try, self.trydir,
- self.helper.try_branchname)
- d.addCallback(self._do_getpatch_branch_2)
- return d
- def _do_getpatch_branch_2(self, ss):
- log.msg("_do_getpatch_branch_2")
- d = self.doBuild(ss=ss)
- d.addCallback(self._do_getpatch_branch_3)
- return d
- def _do_getpatch_branch_3(self, res):
- log.msg("_do_getpatch_branch_3")
- # verify that the resulting buildslave tree matches the developer's
- self.try_shouldMatch("main.c")
- self.try_shouldMatch("version.c")
- self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
-
-
- def dumpPatch(self, patch):
- # this exists to help me figure out the right 'patchlevel' value
- # should be returned by tryclient.getSourceStamp
- n = self.mktemp()
- open(n,"w").write(patch)
- d = self.runCommand(".", ["lsdiff", n])
- def p(res): print "lsdiff:", res.strip().split("\n")
- d.addCallback(p)
- return d
-
-
- def tearDown(self):
- d = defer.succeed(None)
- if self.slave:
- d2 = self.master.botmaster.waitUntilBuilderDetached("vc")
- d.addCallback(lambda res: self.slave.stopService())
- d.addCallback(lambda res: d2)
- if self.master:
- d.addCallback(lambda res: self.master.stopService())
- if self.httpServer:
- d.addCallback(lambda res: self.httpServer.stopListening())
- def stopHTTPTimer():
- from twisted.web import http
- http._logDateTimeStop() # shut down the internal timer. DUMB!
- d.addCallback(lambda res: stopHTTPTimer())
- d.addCallback(lambda res: self.tearDown2())
- return d
-
- def tearDown2(self):
- pass
-
-class CVSHelper(BaseHelper):
- branchname = "branch"
- try_branchname = "branch"
-
- def capable(self):
- cvspaths = which('cvs')
- if not cvspaths:
- return (False, "CVS is not installed")
- # cvs-1.10 (as shipped with OS-X 10.3 "Panther") is too old for this
- # test. There is a situation where we check out a tree, make a
- # change, then commit it back, and CVS refuses to believe that we're
- # operating in a CVS tree. I tested cvs-1.12.9 and it works ok, OS-X
- # 10.4 "Tiger" comes with cvs-1.11, but I haven't tested that yet.
- # For now, skip the tests if we've got 1.10 .
- log.msg("running %s --version.." % (cvspaths[0],))
- d = utils.getProcessOutput(cvspaths[0], ["--version"],
- env=os.environ)
- d.addCallback(self._capable, cvspaths[0])
- return d
-
- def _capable(self, v, vcexe):
- m = re.search(r'\(CVS\) ([\d\.]+) ', v)
- if not m:
- log.msg("couldn't identify CVS version number in output:")
- log.msg("'''%s'''" % v)
- log.msg("skipping tests")
- return (False, "Found CVS but couldn't identify its version")
- ver = m.group(1)
- log.msg("found CVS version '%s'" % ver)
- if ver == "1.10":
- return (False, "Found CVS, but it is too old")
- self.vcexe = vcexe
- return (True, None)
-
- def getdate(self):
- # this timestamp is eventually passed to CVS in a -D argument, and
- # strftime's %z specifier doesn't seem to work reliably (I get +0000
- # where I should get +0700 under linux sometimes, and windows seems
- # to want to put a verbose 'Eastern Standard Time' in there), so
- # leave off the timezone specifier and treat this as localtime. A
- # valid alternative would be to use a hard-coded +0000 and
- # time.gmtime().
- return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
-
- def createRepository(self):
- self.createBasedir()
- self.cvsrep = cvsrep = os.path.join(self.repbase, "CVS-Repository")
- tmp = os.path.join(self.repbase, "cvstmp")
-
- w = self.dovc(self.repbase, ['-d', cvsrep, 'init'])
- yield w; w.getResult() # we must getResult() to raise any exceptions
-
- self.populate(tmp)
- cmd = ['-d', self.cvsrep, 'import',
- '-m', 'sample_project_files', 'sample', 'vendortag', 'start']
- w = self.dovc(tmp, cmd)
- yield w; w.getResult()
- rmdirRecursive(tmp)
- # take a timestamp as the first revision number
- time.sleep(2)
- self.addTrunkRev(self.getdate())
- time.sleep(2)
-
- w = self.dovc(self.repbase,
- ['-d', self.cvsrep, 'checkout', '-d', 'cvstmp', 'sample'])
- yield w; w.getResult()
-
- w = self.dovc(tmp, ['tag', '-b', self.branchname])
- yield w; w.getResult()
- self.populate_branch(tmp)
- w = self.dovc(tmp,
- ['commit', '-m', 'commit_on_branch', '-r', self.branchname])
- yield w; w.getResult()
- rmdirRecursive(tmp)
- time.sleep(2)
- self.addBranchRev(self.getdate())
- time.sleep(2)
- self.vcargs = { 'cvsroot': self.cvsrep, 'cvsmodule': "sample" }
- createRepository = deferredGenerator(createRepository)
-
-
- def vc_revise(self):
- tmp = os.path.join(self.repbase, "cvstmp")
-
- w = self.dovc(self.repbase,
- ['-d', self.cvsrep, 'checkout', '-d', 'cvstmp', 'sample'])
- yield w; w.getResult()
- self.version += 1
- version_c = VERSION_C % self.version
- open(os.path.join(tmp, "version.c"), "w").write(version_c)
- w = self.dovc(tmp,
- ['commit', '-m', 'revised_to_%d' % self.version, 'version.c'])
- yield w; w.getResult()
- rmdirRecursive(tmp)
- time.sleep(2)
- self.addTrunkRev(self.getdate())
- time.sleep(2)
- vc_revise = deferredGenerator(vc_revise)
-
- def vc_try_checkout(self, workdir, rev, branch=None):
- # 'workdir' is an absolute path
- assert os.path.abspath(workdir) == workdir
- cmd = [self.vcexe, "-d", self.cvsrep, "checkout",
- "-d", workdir,
- "-D", rev]
- if branch is not None:
- cmd.append("-r")
- cmd.append(branch)
- cmd.append("sample")
- w = self.do(self.repbase, cmd)
- yield w; w.getResult()
- open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
- vc_try_checkout = deferredGenerator(vc_try_checkout)
-
- def vc_try_finish(self, workdir):
- rmdirRecursive(workdir)
-
-class CVS(VCBase, unittest.TestCase):
- vc_name = "cvs"
-
- metadir = "CVS"
- vctype = "source.CVS"
- vctype_try = "cvs"
- # CVS gives us got_revision, but it is based entirely upon the local
- # clock, which means it is unlikely to match the timestamp taken earlier.
- # This might be enough for common use, but won't be good enough for our
- # tests to accept, so pretend it doesn't have got_revision at all.
- has_got_revision = False
-
- def testCheckout(self):
- d = self.do_vctest()
- return d
-
- def testPatch(self):
- d = self.do_patch()
- return d
-
- def testCheckoutBranch(self):
- d = self.do_branch()
- return d
-
- def testTry(self):
- d = self.do_getpatch(doBranch=False)
- return d
-
-VCS.registerVC(CVS.vc_name, CVSHelper())
-
-
-class SVNHelper(BaseHelper):
- branchname = "sample/branch"
- try_branchname = "sample/branch"
-
- def capable(self):
- svnpaths = which('svn')
- svnadminpaths = which('svnadmin')
- if not svnpaths:
- return (False, "SVN is not installed")
- if not svnadminpaths:
- return (False, "svnadmin is not installed")
- # we need svn to be compiled with the ra_local access
- # module
- log.msg("running svn --version..")
- env = os.environ.copy()
- env['LC_ALL'] = "C"
- d = utils.getProcessOutput(svnpaths[0], ["--version"],
- env=env)
- d.addCallback(self._capable, svnpaths[0], svnadminpaths[0])
- return d
-
- def _capable(self, v, vcexe, svnadmin):
- if v.find("handles 'file' schem") != -1:
- # older versions say 'schema', 1.2.0 and beyond say 'scheme'
- self.vcexe = vcexe
- self.svnadmin = svnadmin
- return (True, None)
- excuse = ("%s found but it does not support 'file:' " +
- "schema, skipping svn tests") % vcexe
- log.msg(excuse)
- return (False, excuse)
-
- def createRepository(self):
- self.createBasedir()
- self.svnrep = os.path.join(self.repbase,
- "SVN-Repository").replace('\\','/')
- tmp = os.path.join(self.repbase, "svntmp")
- if sys.platform == 'win32':
- # On Windows Paths do not start with a /
- self.svnurl = "file:///%s" % self.svnrep
- else:
- self.svnurl = "file://%s" % self.svnrep
- self.svnurl_trunk = self.svnurl + "/sample/trunk"
- self.svnurl_branch = self.svnurl + "/sample/branch"
-
- w = self.do(self.repbase, [self.svnadmin, "create", self.svnrep])
- yield w; w.getResult()
-
- self.populate(tmp)
- w = self.dovc(tmp,
- ['import', '-m', 'sample_project_files', self.svnurl_trunk])
- yield w; out = w.getResult()
- rmdirRecursive(tmp)
- m = re.search(r'Committed revision (\d+)\.', out)
- assert m.group(1) == "1" # first revision is always "1"
- self.addTrunkRev(int(m.group(1)))
-
- w = self.dovc(self.repbase,
- ['checkout', self.svnurl_trunk, 'svntmp'])
- yield w; w.getResult()
-
- w = self.dovc(tmp, ['cp', '-m' , 'make_branch', self.svnurl_trunk,
- self.svnurl_branch])
- yield w; w.getResult()
- w = self.dovc(tmp, ['switch', self.svnurl_branch])
- yield w; w.getResult()
- self.populate_branch(tmp)
- w = self.dovc(tmp, ['commit', '-m', 'commit_on_branch'])
- yield w; out = w.getResult()
- rmdirRecursive(tmp)
- m = re.search(r'Committed revision (\d+)\.', out)
- self.addBranchRev(int(m.group(1)))
- createRepository = deferredGenerator(createRepository)
-
- def vc_revise(self):
- tmp = os.path.join(self.repbase, "svntmp")
- rmdirRecursive(tmp)
- log.msg("vc_revise" + self.svnurl_trunk)
- w = self.dovc(self.repbase,
- ['checkout', self.svnurl_trunk, 'svntmp'])
- yield w; w.getResult()
- self.version += 1
- version_c = VERSION_C % self.version
- open(os.path.join(tmp, "version.c"), "w").write(version_c)
- w = self.dovc(tmp, ['commit', '-m', 'revised_to_%d' % self.version])
- yield w; out = w.getResult()
- m = re.search(r'Committed revision (\d+)\.', out)
- self.addTrunkRev(int(m.group(1)))
- rmdirRecursive(tmp)
- vc_revise = deferredGenerator(vc_revise)
-
- def vc_try_checkout(self, workdir, rev, branch=None):
- assert os.path.abspath(workdir) == workdir
- if os.path.exists(workdir):
- rmdirRecursive(workdir)
- if not branch:
- svnurl = self.svnurl_trunk
- else:
- # N.B.: this is *not* os.path.join: SVN URLs use slashes
- # regardless of the host operating system's filepath separator
- svnurl = self.svnurl + "/" + branch
- w = self.dovc(self.repbase,
- ['checkout', svnurl, workdir])
- yield w; w.getResult()
- open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
- vc_try_checkout = deferredGenerator(vc_try_checkout)
-
- def vc_try_finish(self, workdir):
- rmdirRecursive(workdir)
-
-
-class SVN(VCBase, unittest.TestCase):
- vc_name = "svn"
-
- metadir = ".svn"
- vctype = "source.SVN"
- vctype_try = "svn"
- has_got_revision = True
- has_got_revision_branches_are_merged = True
-
- def testCheckout(self):
- # we verify this one with the svnurl style of vcargs. We test the
- # baseURL/defaultBranch style in testPatch and testCheckoutBranch.
- self.helper.vcargs = { 'svnurl': self.helper.svnurl_trunk }
- d = self.do_vctest()
- return d
-
- def testPatch(self):
- self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
- 'defaultBranch': "sample/trunk",
- }
- d = self.do_patch()
- return d
-
- def testCheckoutBranch(self):
- self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
- 'defaultBranch': "sample/trunk",
- }
- d = self.do_branch()
- return d
-
- def testTry(self):
- # extract the base revision and patch from a modified tree, use it to
- # create the same contents on the buildslave
- self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
- 'defaultBranch': "sample/trunk",
- }
- d = self.do_getpatch()
- return d
-
- ## can't test the username= and password= options, because we do not have an
- ## svn repository that requires authentication.
-
-VCS.registerVC(SVN.vc_name, SVNHelper())
-
-
-class P4Helper(BaseHelper):
- branchname = "branch"
- p4port = 'localhost:1666'
- pid = None
- base_descr = 'Change: new\nDescription: asdf\nFiles:\n'
-
- def capable(self):
- p4paths = which('p4')
- p4dpaths = which('p4d')
- if not p4paths:
- return (False, "p4 is not installed")
- if not p4dpaths:
- return (False, "p4d is not installed")
- self.vcexe = p4paths[0]
- self.p4dexe = p4dpaths[0]
- return (True, None)
-
- class _P4DProtocol(protocol.ProcessProtocol):
- def __init__(self):
- self.started = defer.Deferred()
- self.ended = defer.Deferred()
-
- def outReceived(self, data):
- # When it says starting, it has bound to the socket.
- if self.started:
- #
- # Make sure p4d has started. Newer versions of p4d
- # have more verbose messaging when db files don't exist, so
- # we use re.search instead of startswith.
- #
- if re.search('Perforce Server starting...', data):
- self.started.callback(None)
- else:
- print "p4d said %r" % data
- try:
- raise Exception('p4d said %r' % data)
- except:
- self.started.errback(failure.Failure())
- self.started = None
-
- def errReceived(self, data):
- print "p4d stderr: %s" % data
-
- def processEnded(self, status_object):
- if status_object.check(error.ProcessDone):
- self.ended.callback(None)
- else:
- self.ended.errback(status_object)
-
- def _start_p4d(self):
- proto = self._P4DProtocol()
- reactor.spawnProcess(proto, self.p4dexe, ['p4d', '-p', self.p4port],
- env=os.environ, path=self.p4rep)
- return proto.started, proto.ended
-
- def dop4(self, basedir, command, failureIsOk=False, stdin=None):
- # p4 looks at $PWD instead of getcwd(), which causes confusion when
- # we spawn commands without an intervening shell (sh -c). We can
- # override this with a -d argument.
- command = "-p %s -d %s %s" % (self.p4port, basedir, command)
- return self.dovc(basedir, command, failureIsOk, stdin)
-
- def createRepository(self):
- # this is only called once per VC system, so start p4d here.
-
- self.createBasedir()
- tmp = os.path.join(self.repbase, "p4tmp")
- self.p4rep = os.path.join(self.repbase, 'P4-Repository')
- os.mkdir(self.p4rep)
-
- # Launch p4d.
- started, self.p4d_shutdown = self._start_p4d()
- w = waitForDeferred(started)
- yield w; w.getResult()
-
- # Create client spec.
- os.mkdir(tmp)
- clispec = 'Client: creator\n'
- clispec += 'Root: %s\n' % tmp
- clispec += 'View:\n'
- clispec += '\t//depot/... //creator/...\n'
- w = self.dop4(tmp, 'client -i', stdin=clispec)
- yield w; w.getResult()
-
- # Create first rev (trunk).
- self.populate(os.path.join(tmp, 'trunk'))
- files = ['main.c', 'version.c', 'subdir/subdir.c']
- w = self.dop4(tmp, "-c creator add "
- + " ".join(['trunk/%s' % f for f in files]))
- yield w; w.getResult()
- descr = self.base_descr
- for file in files:
- descr += '\t//depot/trunk/%s\n' % file
- w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
- yield w; out = w.getResult()
- m = re.search(r'Change (\d+) submitted.', out)
- assert m.group(1) == '1'
- self.addTrunkRev(m.group(1))
-
- # Create second rev (branch).
- w = self.dop4(tmp, '-c creator integrate '
- + '//depot/trunk/... //depot/branch/...')
- yield w; w.getResult()
- w = self.dop4(tmp, "-c creator edit branch/main.c")
- yield w; w.getResult()
- self.populate_branch(os.path.join(tmp, 'branch'))
- descr = self.base_descr
- for file in files:
- descr += '\t//depot/branch/%s\n' % file
- w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
- yield w; out = w.getResult()
- m = re.search(r'Change (\d+) submitted.', out)
- self.addBranchRev(m.group(1))
- createRepository = deferredGenerator(createRepository)
-
- def vc_revise(self):
- tmp = os.path.join(self.repbase, "p4tmp")
- self.version += 1
- version_c = VERSION_C % self.version
- w = self.dop4(tmp, '-c creator edit trunk/version.c')
- yield w; w.getResult()
- open(os.path.join(tmp, "trunk/version.c"), "w").write(version_c)
- descr = self.base_descr + '\t//depot/trunk/version.c\n'
- w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
- yield w; out = w.getResult()
- m = re.search(r'Change (\d+) submitted.', out)
- self.addTrunkRev(m.group(1))
- vc_revise = deferredGenerator(vc_revise)
-
- def shutdown_p4d(self):
- d = self.runCommand(self.repbase, '%s -p %s admin stop'
- % (self.vcexe, self.p4port))
- return d.addCallback(lambda _: self.p4d_shutdown)
-
-class P4(VCBase, unittest.TestCase):
- metadir = None
- vctype = "source.P4"
- vc_name = "p4"
- has_got_revision = True
-
- def tearDownClass(self):
- if self.helper:
- return self.helper.shutdown_p4d()
-
- def testCheckout(self):
- self.helper.vcargs = { 'p4port': self.helper.p4port,
- 'p4base': '//depot/',
- 'defaultBranch': 'trunk' }
- d = self.do_vctest(testRetry=False)
- # TODO: like arch and darcs, sync does nothing when server is not
- # changed.
- return d
-
- def testCheckoutBranch(self):
- self.helper.vcargs = { 'p4port': self.helper.p4port,
- 'p4base': '//depot/',
- 'defaultBranch': 'trunk' }
- d = self.do_branch()
- return d
-
- def testPatch(self):
- self.helper.vcargs = { 'p4port': self.helper.p4port,
- 'p4base': '//depot/',
- 'defaultBranch': 'trunk' }
- d = self.do_patch()
- return d
-
-VCS.registerVC(P4.vc_name, P4Helper())
-
-
-class DarcsHelper(BaseHelper):
- branchname = "branch"
- try_branchname = "branch"
-
- def capable(self):
- darcspaths = which('darcs')
- if not darcspaths:
- return (False, "Darcs is not installed")
- self.vcexe = darcspaths[0]
- return (True, None)
-
- def createRepository(self):
- self.createBasedir()
- self.darcs_base = os.path.join(self.repbase, "Darcs-Repository")
- self.rep_trunk = os.path.join(self.darcs_base, "trunk")
- self.rep_branch = os.path.join(self.darcs_base, "branch")
- tmp = os.path.join(self.repbase, "darcstmp")
-
- os.makedirs(self.rep_trunk)
- w = self.dovc(self.rep_trunk, ["initialize"])
- yield w; w.getResult()
- os.makedirs(self.rep_branch)
- w = self.dovc(self.rep_branch, ["initialize"])
- yield w; w.getResult()
-
- self.populate(tmp)
- w = self.dovc(tmp, qw("initialize"))
- yield w; w.getResult()
- w = self.dovc(tmp, qw("add -r ."))
- yield w; w.getResult()
- w = self.dovc(tmp, qw("record -a -m initial_import --skip-long-comment -A test@buildbot.sf.net"))
- yield w; w.getResult()
- w = self.dovc(tmp, ["push", "-a", self.rep_trunk])
- yield w; w.getResult()
- w = self.dovc(tmp, qw("changes --context"))
- yield w; out = w.getResult()
- self.addTrunkRev(out)
-
- self.populate_branch(tmp)
- w = self.dovc(tmp, qw("record -a --ignore-times -m commit_on_branch --skip-long-comment -A test@buildbot.sf.net"))
- yield w; w.getResult()
- w = self.dovc(tmp, ["push", "-a", self.rep_branch])
- yield w; w.getResult()
- w = self.dovc(tmp, qw("changes --context"))
- yield w; out = w.getResult()
- self.addBranchRev(out)
- rmdirRecursive(tmp)
- createRepository = deferredGenerator(createRepository)
-
- def vc_revise(self):
- tmp = os.path.join(self.repbase, "darcstmp")
- os.makedirs(tmp)
- w = self.dovc(tmp, qw("initialize"))
- yield w; w.getResult()
- w = self.dovc(tmp, ["pull", "-a", self.rep_trunk])
- yield w; w.getResult()
-
- self.version += 1
- version_c = VERSION_C % self.version
- open(os.path.join(tmp, "version.c"), "w").write(version_c)
- w = self.dovc(tmp, qw("record -a --ignore-times -m revised_to_%d --skip-long-comment -A test@buildbot.sf.net" % self.version))
- yield w; w.getResult()
- w = self.dovc(tmp, ["push", "-a", self.rep_trunk])
- yield w; w.getResult()
- w = self.dovc(tmp, qw("changes --context"))
- yield w; out = w.getResult()
- self.addTrunkRev(out)
- rmdirRecursive(tmp)
- vc_revise = deferredGenerator(vc_revise)
-
- def vc_try_checkout(self, workdir, rev, branch=None):
- assert os.path.abspath(workdir) == workdir
- if os.path.exists(workdir):
- rmdirRecursive(workdir)
- os.makedirs(workdir)
- w = self.dovc(workdir, qw("initialize"))
- yield w; w.getResult()
- if not branch:
- rep = self.rep_trunk
- else:
- rep = os.path.join(self.darcs_base, branch)
- w = self.dovc(workdir, ["pull", "-a", rep])
- yield w; w.getResult()
- open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
- vc_try_checkout = deferredGenerator(vc_try_checkout)
-
- def vc_try_finish(self, workdir):
- rmdirRecursive(workdir)
-
-
-class Darcs(VCBase, unittest.TestCase):
- vc_name = "darcs"
-
- # Darcs has a metadir="_darcs", but it does not have an 'export'
- # mode
- metadir = None
- vctype = "source.Darcs"
- vctype_try = "darcs"
- has_got_revision = True
-
- def testCheckout(self):
- self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
- d = self.do_vctest(testRetry=False)
-
- # TODO: testRetry has the same problem with Darcs as it does for
- # Arch
- return d
-
- def testPatch(self):
- self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
- 'defaultBranch': "trunk" }
- d = self.do_patch()
- return d
-
- def testCheckoutBranch(self):
- self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
- 'defaultBranch': "trunk" }
- d = self.do_branch()
- return d
-
- def testCheckoutHTTP(self):
- self.serveHTTP()
- repourl = "http://localhost:%d/Darcs-Repository/trunk" % self.httpPort
- self.helper.vcargs = { 'repourl': repourl }
- d = self.do_vctest(testRetry=False)
- return d
-
- def testTry(self):
- self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
- 'defaultBranch': "trunk" }
- d = self.do_getpatch()
- return d
-
-VCS.registerVC(Darcs.vc_name, DarcsHelper())
-
-
-class ArchCommon:
- def registerRepository(self, coordinates):
- a = self.archname
- w = self.dovc(self.repbase, "archives %s" % a)
- yield w; out = w.getResult()
- if out:
- w = self.dovc(self.repbase, "register-archive -d %s" % a)
- yield w; w.getResult()
- w = self.dovc(self.repbase, "register-archive %s" % coordinates)
- yield w; w.getResult()
- registerRepository = deferredGenerator(registerRepository)
-
- def unregisterRepository(self):
- a = self.archname
- w = self.dovc(self.repbase, "archives %s" % a)
- yield w; out = w.getResult()
- if out:
- w = self.dovc(self.repbase, "register-archive -d %s" % a)
- yield w; out = w.getResult()
- unregisterRepository = deferredGenerator(unregisterRepository)
-
-class TlaHelper(BaseHelper, ArchCommon):
- defaultbranch = "testvc--mainline--1"
- branchname = "testvc--branch--1"
- try_branchname = None # TlaExtractor can figure it out by itself
- archcmd = "tla"
-
- def capable(self):
- tlapaths = which('tla')
- if not tlapaths:
- return (False, "Arch (tla) is not installed")
- self.vcexe = tlapaths[0]
- return (True, None)
-
- def do_get(self, basedir, archive, branch, newdir):
- # the 'get' syntax is different between tla and baz. baz, while
- # claiming to honor an --archive argument, in fact ignores it. The
- # correct invocation is 'baz get archive/revision newdir'.
- if self.archcmd == "tla":
- w = self.dovc(basedir,
- "get -A %s %s %s" % (archive, branch, newdir))
- else:
- w = self.dovc(basedir,
- "get %s/%s %s" % (archive, branch, newdir))
- return w
-
- def createRepository(self):
- self.createBasedir()
- # first check to see if bazaar is around, since we'll need to know
- # later
- d = VCS.capable(Bazaar.vc_name)
- d.addCallback(self._createRepository_1)
- return d
-
- def _createRepository_1(self, res):
- has_baz = res[0]
-
- # pick a hopefully unique string for the archive name, in the form
- # test-%d@buildbot.sf.net--testvc, since otherwise multiple copies of
- # the unit tests run in the same user account will collide (since the
- # archive names are kept in the per-user ~/.arch-params/ directory).
- pid = os.getpid()
- self.archname = "test-%s-%d@buildbot.sf.net--testvc" % (self.archcmd,
- pid)
- trunk = self.defaultbranch
- branch = self.branchname
-
- repword = self.archcmd.capitalize()
- self.archrep = os.path.join(self.repbase, "%s-Repository" % repword)
- tmp = os.path.join(self.repbase, "archtmp")
- a = self.archname
-
- self.populate(tmp)
-
- w = self.dovc(tmp, "my-id", failureIsOk=True)
- yield w; res = w.getResult()
- if not res:
- # tla will fail a lot of operations if you have not set an ID
- w = self.do(tmp, [self.vcexe, "my-id",
- "Buildbot Test Suite <test@buildbot.sf.net>"])
- yield w; w.getResult()
-
- if has_baz:
- # bazaar keeps a cache of revisions, but this test creates a new
- # archive each time it is run, so the cache causes errors.
- # Disable the cache to avoid these problems. This will be
- # slightly annoying for people who run the buildbot tests under
- # the same UID as one which uses baz on a regular basis, but
- # bazaar doesn't give us a way to disable the cache just for this
- # one archive.
- cmd = "%s cache-config --disable" % VCS.getHelper('bazaar').vcexe
- w = self.do(tmp, cmd)
- yield w; w.getResult()
-
- w = waitForDeferred(self.unregisterRepository())
- yield w; w.getResult()
-
- # these commands can be run in any directory
- w = self.dovc(tmp, "make-archive -l %s %s" % (a, self.archrep))
- yield w; w.getResult()
- if self.archcmd == "tla":
- w = self.dovc(tmp, "archive-setup -A %s %s" % (a, trunk))
- yield w; w.getResult()
- w = self.dovc(tmp, "archive-setup -A %s %s" % (a, branch))
- yield w; w.getResult()
- else:
- # baz does not require an 'archive-setup' step
- pass
-
- # these commands must be run in the directory that is to be imported
- w = self.dovc(tmp, "init-tree --nested %s/%s" % (a, trunk))
- yield w; w.getResult()
- files = " ".join(["main.c", "version.c", "subdir",
- os.path.join("subdir", "subdir.c")])
- w = self.dovc(tmp, "add-id %s" % files)
- yield w; w.getResult()
-
- w = self.dovc(tmp, "import %s/%s" % (a, trunk))
- yield w; out = w.getResult()
- self.addTrunkRev("base-0")
-
- # create the branch
- if self.archcmd == "tla":
- branchstart = "%s--base-0" % trunk
- w = self.dovc(tmp, "tag -A %s %s %s" % (a, branchstart, branch))
- yield w; w.getResult()
- else:
- w = self.dovc(tmp, "branch %s" % branch)
- yield w; w.getResult()
-
- rmdirRecursive(tmp)
-
- # check out the branch
- w = self.do_get(self.repbase, a, branch, "archtmp")
- yield w; w.getResult()
- # and edit the file
- self.populate_branch(tmp)
- logfile = "++log.%s--%s" % (branch, a)
- logmsg = "Summary: commit on branch\nKeywords:\n\n"
- open(os.path.join(tmp, logfile), "w").write(logmsg)
- w = self.dovc(tmp, "commit")
- yield w; out = w.getResult()
- m = re.search(r'committed %s/%s--([\S]+)' % (a, branch),
- out)
- assert (m.group(1) == "base-0" or m.group(1).startswith("patch-"))
- self.addBranchRev(m.group(1))
-
- w = waitForDeferred(self.unregisterRepository())
- yield w; w.getResult()
- rmdirRecursive(tmp)
-
- # we unregister the repository each time, because we might have
- # changed the coordinates (since we switch from a file: URL to an
- # http: URL for various tests). The buildslave code doesn't forcibly
- # unregister the archive, so we have to do it here.
- w = waitForDeferred(self.unregisterRepository())
- yield w; w.getResult()
-
- _createRepository_1 = deferredGenerator(_createRepository_1)
-
- def vc_revise(self):
- # the fix needs to be done in a workspace that is linked to a
- # read-write version of the archive (i.e., using file-based
- # coordinates instead of HTTP ones), so we re-register the repository
- # before we begin. We unregister it when we're done to make sure the
- # build will re-register the correct one for whichever test is
- # currently being run.
-
- # except, that source.Bazaar really doesn't like it when the archive
- # gets unregistered behind its back. The slave tries to do a 'baz
- # replay' in a tree with an archive that is no longer recognized, and
- # baz aborts with a botched invariant exception. This causes
- # mode=update to fall back to clobber+get, which flunks one of the
- # tests (the 'newfile' check in _do_vctest_update_3 fails)
-
- # to avoid this, we take heroic steps here to leave the archive
- # registration in the same state as we found it.
-
- tmp = os.path.join(self.repbase, "archtmp")
- a = self.archname
-
- w = self.dovc(self.repbase, "archives %s" % a)
- yield w; out = w.getResult()
- assert out
- lines = out.split("\n")
- coordinates = lines[1].strip()
-
- # now register the read-write location
- w = waitForDeferred(self.registerRepository(self.archrep))
- yield w; w.getResult()
-
- trunk = self.defaultbranch
-
- w = self.do_get(self.repbase, a, trunk, "archtmp")
- yield w; w.getResult()
-
- # tla appears to use timestamps to determine which files have
- # changed, so wait long enough for the new file to have a different
- # timestamp
- time.sleep(2)
- self.version += 1
- version_c = VERSION_C % self.version
- open(os.path.join(tmp, "version.c"), "w").write(version_c)
-
- logfile = "++log.%s--%s" % (trunk, a)
- logmsg = "Summary: revised_to_%d\nKeywords:\n\n" % self.version
- open(os.path.join(tmp, logfile), "w").write(logmsg)
- w = self.dovc(tmp, "commit")
- yield w; out = w.getResult()
- m = re.search(r'committed %s/%s--([\S]+)' % (a, trunk),
- out)
- assert (m.group(1) == "base-0" or m.group(1).startswith("patch-"))
- self.addTrunkRev(m.group(1))
-
- # now re-register the original coordinates
- w = waitForDeferred(self.registerRepository(coordinates))
- yield w; w.getResult()
- rmdirRecursive(tmp)
- vc_revise = deferredGenerator(vc_revise)
-
- def vc_try_checkout(self, workdir, rev, branch=None):
- assert os.path.abspath(workdir) == workdir
- if os.path.exists(workdir):
- rmdirRecursive(workdir)
-
- a = self.archname
-
- # register the read-write location, if it wasn't already registered
- w = waitForDeferred(self.registerRepository(self.archrep))
- yield w; w.getResult()
-
- w = self.do_get(self.repbase, a, "testvc--mainline--1", workdir)
- yield w; w.getResult()
-
- # timestamps. ick.
- time.sleep(2)
- open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
- vc_try_checkout = deferredGenerator(vc_try_checkout)
-
- def vc_try_finish(self, workdir):
- rmdirRecursive(workdir)
-
-class Arch(VCBase, unittest.TestCase):
- vc_name = "tla"
-
- metadir = None
- # Arch has a metadir="{arch}", but it does not have an 'export' mode.
- vctype = "source.Arch"
- vctype_try = "tla"
- has_got_revision = True
-
- def testCheckout(self):
- # these are the coordinates of the read-write archive used by all the
- # non-HTTP tests. testCheckoutHTTP overrides these.
- self.helper.vcargs = {'url': self.helper.archrep,
- 'version': self.helper.defaultbranch }
- d = self.do_vctest(testRetry=False)
- # the current testRetry=True logic doesn't have the desired effect:
- # "update" is a no-op because arch knows that the repository hasn't
- # changed. Other VC systems will re-checkout missing files on
- # update, arch just leaves the tree untouched. TODO: come up with
- # some better test logic, probably involving a copy of the
- # repository that has a few changes checked in.
-
- return d
-
- def testCheckoutHTTP(self):
- self.serveHTTP()
- url = "http://localhost:%d/Tla-Repository" % self.httpPort
- self.helper.vcargs = { 'url': url,
- 'version': "testvc--mainline--1" }
- d = self.do_vctest(testRetry=False)
- return d
-
- def testPatch(self):
- self.helper.vcargs = {'url': self.helper.archrep,
- 'version': self.helper.defaultbranch }
- d = self.do_patch()
- return d
-
- def testCheckoutBranch(self):
- self.helper.vcargs = {'url': self.helper.archrep,
- 'version': self.helper.defaultbranch }
- d = self.do_branch()
- return d
-
- def testTry(self):
- self.helper.vcargs = {'url': self.helper.archrep,
- 'version': self.helper.defaultbranch }
- d = self.do_getpatch()
- return d
-
-VCS.registerVC(Arch.vc_name, TlaHelper())
-
-
-class BazaarHelper(TlaHelper):
- archcmd = "baz"
-
- def capable(self):
- bazpaths = which('baz')
- if not bazpaths:
- return (False, "Arch (baz) is not installed")
- self.vcexe = bazpaths[0]
- return (True, None)
-
- def setUp2(self, res):
- # we unregister the repository each time, because we might have
- # changed the coordinates (since we switch from a file: URL to an
- # http: URL for various tests). The buildslave code doesn't forcibly
- # unregister the archive, so we have to do it here.
- d = self.unregisterRepository()
- return d
-
-
-class Bazaar(Arch):
- vc_name = "bazaar"
-
- vctype = "source.Bazaar"
- vctype_try = "baz"
- has_got_revision = True
-
- fixtimer = None
-
- def testCheckout(self):
- self.helper.vcargs = {'url': self.helper.archrep,
- # Baz adds the required 'archive' argument
- 'archive': self.helper.archname,
- 'version': self.helper.defaultbranch,
- }
- d = self.do_vctest(testRetry=False)
- # the current testRetry=True logic doesn't have the desired effect:
- # "update" is a no-op because arch knows that the repository hasn't
- # changed. Other VC systems will re-checkout missing files on
- # update, arch just leaves the tree untouched. TODO: come up with
- # some better test logic, probably involving a copy of the
- # repository that has a few changes checked in.
-
- return d
-
- def testCheckoutHTTP(self):
- self.serveHTTP()
- url = "http://localhost:%d/Baz-Repository" % self.httpPort
- self.helper.vcargs = { 'url': url,
- 'archive': self.helper.archname,
- 'version': self.helper.defaultbranch,
- }
- d = self.do_vctest(testRetry=False)
- return d
-
- def testPatch(self):
- self.helper.vcargs = {'url': self.helper.archrep,
- # Baz adds the required 'archive' argument
- 'archive': self.helper.archname,
- 'version': self.helper.defaultbranch,
- }
- d = self.do_patch()
- return d
-
- def testCheckoutBranch(self):
- self.helper.vcargs = {'url': self.helper.archrep,
- # Baz adds the required 'archive' argument
- 'archive': self.helper.archname,
- 'version': self.helper.defaultbranch,
- }
- d = self.do_branch()
- return d
-
- def testTry(self):
- self.helper.vcargs = {'url': self.helper.archrep,
- # Baz adds the required 'archive' argument
- 'archive': self.helper.archname,
- 'version': self.helper.defaultbranch,
- }
- d = self.do_getpatch()
- return d
-
- def fixRepository(self):
- self.fixtimer = None
- self.site.resource = self.root
-
- def testRetry(self):
- # we want to verify that source.Source(retry=) works, and the easiest
- # way to make VC updates break (temporarily) is to break the HTTP
- # server that's providing the repository. Anything else pretty much
- # requires mutating the (read-only) BUILDBOT_TEST_VC repository, or
- # modifying the buildslave's checkout command while it's running.
-
- # this test takes a while to run, so don't bother doing it with
- # anything other than baz
-
- self.serveHTTP()
-
- # break the repository server
- from twisted.web import static
- self.site.resource = static.Data("Sorry, repository is offline",
- "text/plain")
- # and arrange to fix it again in 5 seconds, while the test is
- # running.
- self.fixtimer = reactor.callLater(5, self.fixRepository)
-
- url = "http://localhost:%d/Baz-Repository" % self.httpPort
- self.helper.vcargs = { 'url': url,
- 'archive': self.helper.archname,
- 'version': self.helper.defaultbranch,
- 'retry': (5.0, 4),
- }
- d = self.do_vctest_once(True)
- d.addCallback(self._testRetry_1)
- return d
- def _testRetry_1(self, bs):
- # make sure there was mention of the retry attempt in the logs
- l = bs.getLogs()[0]
- self.failUnlessIn("unable to access URL", l.getText(),
- "funny, VC operation didn't fail at least once")
- self.failUnlessIn("update failed, trying 4 more times after 5 seconds",
- l.getTextWithHeaders(),
- "funny, VC operation wasn't reattempted")
-
- def testRetryFails(self):
- # make sure that the build eventually gives up on a repository which
- # is completely unavailable
-
- self.serveHTTP()
-
- # break the repository server, and leave it broken
- from twisted.web import static
- self.site.resource = static.Data("Sorry, repository is offline",
- "text/plain")
-
- url = "http://localhost:%d/Baz-Repository" % self.httpPort
- self.helper.vcargs = {'url': url,
- 'archive': self.helper.archname,
- 'version': self.helper.defaultbranch,
- 'retry': (0.5, 3),
- }
- d = self.do_vctest_once(False)
- d.addCallback(self._testRetryFails_1)
- return d
- def _testRetryFails_1(self, bs):
- self.failUnlessEqual(bs.getResults(), FAILURE)
-
- def tearDown2(self):
- if self.fixtimer:
- self.fixtimer.cancel()
- # tell tla to get rid of the leftover archive this test leaves in the
- # user's 'tla archives' listing. The name of this archive is provided
- # by the repository tarball, so the following command must use the
- # same name. We could use archive= to set it explicitly, but if you
- # change it from the default, then 'tla update' won't work.
- d = self.helper.unregisterRepository()
- return d
-
-VCS.registerVC(Bazaar.vc_name, BazaarHelper())
-
-class BzrHelper(BaseHelper):
- branchname = "branch"
- try_branchname = "branch"
-
- def capable(self):
- bzrpaths = which('bzr')
- if not bzrpaths:
- return (False, "bzr is not installed")
- self.vcexe = bzrpaths[0]
- return (True, None)
-
- def get_revision_number(self, out):
- for line in out.split("\n"):
- colon = line.index(":")
- key, value = line[:colon], line[colon+2:]
- if key == "revno":
- return int(value)
- raise RuntimeError("unable to find revno: in bzr output: '%s'" % out)
-
- def createRepository(self):
- self.createBasedir()
- self.bzr_base = os.path.join(self.repbase, "Bzr-Repository")
- self.rep_trunk = os.path.join(self.bzr_base, "trunk")
- self.rep_branch = os.path.join(self.bzr_base, "branch")
- tmp = os.path.join(self.repbase, "bzrtmp")
- btmp = os.path.join(self.repbase, "bzrtmp-branch")
-
- os.makedirs(self.rep_trunk)
- w = self.dovc(self.rep_trunk, ["init"])
- yield w; w.getResult()
- w = self.dovc(self.bzr_base,
- ["branch", self.rep_trunk, self.rep_branch])
- yield w; w.getResult()
-
- w = self.dovc(self.repbase, ["checkout", self.rep_trunk, tmp])
- yield w; w.getResult()
- self.populate(tmp)
- w = self.dovc(tmp, qw("add"))
- yield w; w.getResult()
- w = self.dovc(tmp, qw("commit -m initial_import"))
- yield w; w.getResult()
- w = self.dovc(tmp, qw("version-info"))
- yield w; out = w.getResult()
- self.addTrunkRev(self.get_revision_number(out))
- rmdirRecursive(tmp)
-
- # pull all trunk revisions to the branch
- w = self.dovc(self.rep_branch, qw("pull"))
- yield w; w.getResult()
- # obtain a branch tree
- w = self.dovc(self.repbase, ["checkout", self.rep_branch, btmp])
- yield w; w.getResult()
- # modify it
- self.populate_branch(btmp)
- w = self.dovc(btmp, qw("add"))
- yield w; w.getResult()
- w = self.dovc(btmp, qw("commit -m commit_on_branch"))
- yield w; w.getResult()
- w = self.dovc(btmp, qw("version-info"))
- yield w; out = w.getResult()
- self.addBranchRev(self.get_revision_number(out))
- rmdirRecursive(btmp)
- createRepository = deferredGenerator(createRepository)
-
- def vc_revise(self):
- tmp = os.path.join(self.repbase, "bzrtmp")
- w = self.dovc(self.repbase, ["checkout", self.rep_trunk, tmp])
- yield w; w.getResult()
-
- self.version += 1
- version_c = VERSION_C % self.version
- open(os.path.join(tmp, "version.c"), "w").write(version_c)
- w = self.dovc(tmp, qw("commit -m revised_to_%d" % self.version))
- yield w; w.getResult()
- w = self.dovc(tmp, qw("version-info"))
- yield w; out = w.getResult()
- self.addTrunkRev(self.get_revision_number(out))
- rmdirRecursive(tmp)
- vc_revise = deferredGenerator(vc_revise)
-
- def vc_try_checkout(self, workdir, rev, branch=None):
- assert os.path.abspath(workdir) == workdir
- if os.path.exists(workdir):
- rmdirRecursive(workdir)
- #os.makedirs(workdir)
- if not branch:
- rep = self.rep_trunk
- else:
- rep = os.path.join(self.bzr_base, branch)
- w = self.dovc(self.bzr_base, ["checkout", rep, workdir])
- yield w; w.getResult()
- open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
- vc_try_checkout = deferredGenerator(vc_try_checkout)
-
- def vc_try_finish(self, workdir):
- rmdirRecursive(workdir)
-
-class Bzr(VCBase, unittest.TestCase):
- vc_name = "bzr"
-
- metadir = ".bzr"
- vctype = "source.Bzr"
- vctype_try = "bzr"
- has_got_revision = True
-
- def testCheckout(self):
- self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
- d = self.do_vctest(testRetry=False)
-
- # TODO: testRetry has the same problem with Bzr as it does for
- # Arch
- return d
-
- def testPatch(self):
- self.helper.vcargs = { 'baseURL': self.helper.bzr_base + "/",
- 'defaultBranch': "trunk" }
- d = self.do_patch()
- return d
-
- def testCheckoutBranch(self):
- self.helper.vcargs = { 'baseURL': self.helper.bzr_base + "/",
- 'defaultBranch': "trunk" }
- d = self.do_branch()
- return d
-
- def testCheckoutHTTP(self):
- self.serveHTTP()
- repourl = "http://localhost:%d/Bzr-Repository/trunk" % self.httpPort
- self.helper.vcargs = { 'repourl': repourl }
- d = self.do_vctest(testRetry=False)
- return d
-
-
- def fixRepository(self):
- self.fixtimer = None
- self.site.resource = self.root
-
- def testRetry(self):
- # this test takes a while to run
- self.serveHTTP()
-
- # break the repository server
- from twisted.web import static
- self.site.resource = static.Data("Sorry, repository is offline",
- "text/plain")
- # and arrange to fix it again in 5 seconds, while the test is
- # running.
- self.fixtimer = reactor.callLater(5, self.fixRepository)
-
- repourl = "http://localhost:%d/Bzr-Repository/trunk" % self.httpPort
- self.helper.vcargs = { 'repourl': repourl,
- 'retry': (5.0, 4),
- }
- d = self.do_vctest_once(True)
- d.addCallback(self._testRetry_1)
- return d
- def _testRetry_1(self, bs):
- # make sure there was mention of the retry attempt in the logs
- l = bs.getLogs()[0]
- self.failUnlessIn("ERROR: Not a branch: ", l.getText(),
- "funny, VC operation didn't fail at least once")
- self.failUnlessIn("update failed, trying 4 more times after 5 seconds",
- l.getTextWithHeaders(),
- "funny, VC operation wasn't reattempted")
-
- def testRetryFails(self):
- # make sure that the build eventually gives up on a repository which
- # is completely unavailable
-
- self.serveHTTP()
-
- # break the repository server, and leave it broken
- from twisted.web import static
- self.site.resource = static.Data("Sorry, repository is offline",
- "text/plain")
-
- repourl = "http://localhost:%d/Bzr-Repository/trunk" % self.httpPort
- self.helper.vcargs = { 'repourl': repourl,
- 'retry': (0.5, 3),
- }
- d = self.do_vctest_once(False)
- d.addCallback(self._testRetryFails_1)
- return d
- def _testRetryFails_1(self, bs):
- self.failUnlessEqual(bs.getResults(), FAILURE)
-
-
- def testTry(self):
- self.helper.vcargs = { 'baseURL': self.helper.bzr_base + "/",
- 'defaultBranch': "trunk" }
- d = self.do_getpatch()
- return d
-
-VCS.registerVC(Bzr.vc_name, BzrHelper())
-
-
-class MercurialHelper(BaseHelper):
- branchname = "branch"
- try_branchname = "branch"
-
- def capable(self):
- hgpaths = which("hg")
- if not hgpaths:
- return (False, "Mercurial is not installed")
- self.vcexe = hgpaths[0]
- return (True, None)
-
- def extract_id(self, output):
- m = re.search(r'^(\w+)', output)
- return m.group(0)
-
- def createRepository(self):
- self.createBasedir()
- self.hg_base = os.path.join(self.repbase, "Mercurial-Repository")
- self.rep_trunk = os.path.join(self.hg_base, "trunk")
- self.rep_branch = os.path.join(self.hg_base, "branch")
- tmp = os.path.join(self.hg_base, "hgtmp")
-
- os.makedirs(self.rep_trunk)
- w = self.dovc(self.rep_trunk, "init")
- yield w; w.getResult()
- os.makedirs(self.rep_branch)
- w = self.dovc(self.rep_branch, "init")
- yield w; w.getResult()
-
- self.populate(tmp)
- w = self.dovc(tmp, "init")
- yield w; w.getResult()
- w = self.dovc(tmp, "add")
- yield w; w.getResult()
- w = self.dovc(tmp, ['commit', '-m', 'initial_import'])
- yield w; w.getResult()
- w = self.dovc(tmp, ['push', self.rep_trunk])
- # note that hg-push does not actually update the working directory
- yield w; w.getResult()
- w = self.dovc(tmp, "identify")
- yield w; out = w.getResult()
- self.addTrunkRev(self.extract_id(out))
-
- self.populate_branch(tmp)
- w = self.dovc(tmp, ['commit', '-m', 'commit_on_branch'])
- yield w; w.getResult()
- w = self.dovc(tmp, ['push', self.rep_branch])
- yield w; w.getResult()
- w = self.dovc(tmp, "identify")
- yield w; out = w.getResult()
- self.addBranchRev(self.extract_id(out))
- rmdirRecursive(tmp)
- createRepository = deferredGenerator(createRepository)
-
- def vc_revise(self):
- tmp = os.path.join(self.hg_base, "hgtmp2")
- w = self.dovc(self.hg_base, ['clone', self.rep_trunk, tmp])
- yield w; w.getResult()
-
- self.version += 1
- version_c = VERSION_C % self.version
- version_c_filename = os.path.join(tmp, "version.c")
- open(version_c_filename, "w").write(version_c)
- # hg uses timestamps to distinguish files which have changed, so we
- # force the mtime forward a little bit
- future = time.time() + 2*self.version
- os.utime(version_c_filename, (future, future))
- w = self.dovc(tmp, ['commit', '-m', 'revised_to_%d' % self.version])
- yield w; w.getResult()
- w = self.dovc(tmp, ['push', self.rep_trunk])
- yield w; w.getResult()
- w = self.dovc(tmp, "identify")
- yield w; out = w.getResult()
- self.addTrunkRev(self.extract_id(out))
- rmdirRecursive(tmp)
- vc_revise = deferredGenerator(vc_revise)
-
- def vc_try_checkout(self, workdir, rev, branch=None):
- assert os.path.abspath(workdir) == workdir
- if os.path.exists(workdir):
- rmdirRecursive(workdir)
- if branch:
- src = self.rep_branch
- else:
- src = self.rep_trunk
- w = self.dovc(self.hg_base, ['clone', src, workdir])
- yield w; w.getResult()
- try_c_filename = os.path.join(workdir, "subdir", "subdir.c")
- open(try_c_filename, "w").write(TRY_C)
- future = time.time() + 2*self.version
- os.utime(try_c_filename, (future, future))
- vc_try_checkout = deferredGenerator(vc_try_checkout)
-
- def vc_try_finish(self, workdir):
- rmdirRecursive(workdir)
-
-class MercurialServerPP(protocol.ProcessProtocol):
- def __init__(self):
- self.wait = defer.Deferred()
-
- def outReceived(self, data):
- log.msg("hg-serve-stdout: %s" % (data,))
- def errReceived(self, data):
- print "HG-SERVE-STDERR:", data
- log.msg("hg-serve-stderr: %s" % (data,))
- def processEnded(self, reason):
- log.msg("hg-serve ended: %s" % reason)
- self.wait.callback(None)
-
-
-class Mercurial(VCBase, unittest.TestCase):
- vc_name = "hg"
-
- # Mercurial has a metadir=".hg", but it does not have an 'export' mode.
- metadir = None
- vctype = "source.Mercurial"
- vctype_try = "hg"
- has_got_revision = True
- _hg_server = None
- _wait_for_server_poller = None
- _pp = None
-
- def testCheckout(self):
- self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
- d = self.do_vctest(testRetry=False)
-
- # TODO: testRetry has the same problem with Mercurial as it does for
- # Arch
- return d
-
- def testPatch(self):
- self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
- 'defaultBranch': "trunk" }
- d = self.do_patch()
- return d
-
- def testCheckoutBranch(self):
- self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
- 'defaultBranch': "trunk" }
- d = self.do_branch()
- return d
-
- def serveHTTP(self):
- # the easiest way to publish hg over HTTP is by running 'hg serve' as
- # a child process while the test is running. (you can also use a CGI
- # script, which sounds difficult, or you can publish the files
- # directly, which isn't well documented).
-
- # grr.. 'hg serve' doesn't let you use --port=0 to mean "pick a free
- # port", instead it uses it as a signal to use the default (port
- # 8000). This means there is no way to make it choose a free port, so
- # we are forced to make it use a statically-defined one, making it
- # harder to avoid collisions.
- self.httpPort = 8300 + (os.getpid() % 200)
- args = [self.helper.vcexe,
- "serve", "--port", str(self.httpPort), "--verbose"]
-
- # in addition, hg doesn't flush its stdout, so we can't wait for the
- # "listening at" message to know when it's safe to start the test.
- # Instead, poll every second until a getPage works.
-
- self._pp = MercurialServerPP() # logs+discards everything
-
- # this serves one tree at a time, so we serve trunk. TODO: test hg's
- # in-repo branches, for which a single tree will hold all branches.
- self._hg_server = reactor.spawnProcess(self._pp, self.helper.vcexe, args,
- os.environ,
- self.helper.rep_trunk)
- log.msg("waiting for hg serve to start")
- done_d = defer.Deferred()
- def poll():
- d = client.getPage("http://localhost:%d/" % self.httpPort)
- def success(res):
- log.msg("hg serve appears to have started")
- self._wait_for_server_poller.stop()
- done_d.callback(None)
- def ignore_connection_refused(f):
- f.trap(error.ConnectionRefusedError)
- d.addCallbacks(success, ignore_connection_refused)
- d.addErrback(done_d.errback)
- return d
- self._wait_for_server_poller = task.LoopingCall(poll)
- self._wait_for_server_poller.start(0.5, True)
- return done_d
-
- def tearDown(self):
- if self._wait_for_server_poller:
- if self._wait_for_server_poller.running:
- self._wait_for_server_poller.stop()
- if self._hg_server:
- self._hg_server.loseConnection()
- try:
- self._hg_server.signalProcess("KILL")
- except error.ProcessExitedAlready:
- pass
- self._hg_server = None
- return VCBase.tearDown(self)
-
- def tearDown2(self):
- if self._pp:
- return self._pp.wait
-
- def testCheckoutHTTP(self):
- d = self.serveHTTP()
- def _started(res):
- repourl = "http://localhost:%d/" % self.httpPort
- self.helper.vcargs = { 'repourl': repourl }
- return self.do_vctest(testRetry=False)
- d.addCallback(_started)
- return d
-
- def testTry(self):
- self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
- 'defaultBranch': "trunk" }
- d = self.do_getpatch()
- return d
-
-VCS.registerVC(Mercurial.vc_name, MercurialHelper())
-
-class MercurialInRepoHelper(MercurialHelper):
- branchname = "the_branch"
- try_branchname = "the_branch"
-
-
- def createRepository(self):
- self.createBasedir()
- self.hg_base = os.path.join(self.repbase, "Mercurial-Repository")
- self.repo = os.path.join(self.hg_base, "inrepobranch")
- tmp = os.path.join(self.hg_base, "hgtmp")
-
- os.makedirs(self.repo)
- w = self.dovc(self.repo, "init")
- yield w; w.getResult()
-
- self.populate(tmp)
- w = self.dovc(tmp, "init")
- yield w; w.getResult()
- w = self.dovc(tmp, "add")
- yield w; w.getResult()
- w = self.dovc(tmp, ['commit', '-m', 'initial_import'])
- yield w; w.getResult()
- w = self.dovc(tmp, ['push', self.repo])
- # note that hg-push does not actually update the working directory
- yield w; w.getResult()
- w = self.dovc(tmp, "identify")
- yield w; out = w.getResult()
- self.addTrunkRev(self.extract_id(out))
-
- self.populate_branch(tmp)
- w = self.dovc(tmp, ['branch', self.branchname])
- yield w; w.getResult()
- w = self.dovc(tmp, ['commit', '-m', 'commit_on_branch'])
- yield w; w.getResult()
- w = self.dovc(tmp, ['push', '-f', self.repo])
- yield w; w.getResult()
- w = self.dovc(tmp, "identify")
- yield w; out = w.getResult()
- self.addBranchRev(self.extract_id(out))
- rmdirRecursive(tmp)
- createRepository = deferredGenerator(createRepository)
-
- def vc_revise(self):
- tmp = os.path.join(self.hg_base, "hgtmp2")
- w = self.dovc(self.hg_base, ['clone', self.repo, tmp])
- yield w; w.getResult()
- w = self.dovc(tmp, ['update', '--clean', '--rev', 'default'])
- yield w; w.getResult()
-
- self.version += 1
- version_c = VERSION_C % self.version
- version_c_filename = os.path.join(tmp, "version.c")
- open(version_c_filename, "w").write(version_c)
- # hg uses timestamps to distinguish files which have changed, so we
- # force the mtime forward a little bit
- future = time.time() + 2*self.version
- os.utime(version_c_filename, (future, future))
- w = self.dovc(tmp, ['commit', '-m', 'revised_to_%d' % self.version])
- yield w; w.getResult()
- w = self.dovc(tmp, ['push', '--force', self.repo])
- yield w; w.getResult()
- w = self.dovc(tmp, "identify")
- yield w; out = w.getResult()
- self.addTrunkRev(self.extract_id(out))
- rmdirRecursive(tmp)
- vc_revise = deferredGenerator(vc_revise)
-
- def vc_try_checkout(self, workdir, rev, branch=None):
- assert os.path.abspath(workdir) == workdir
- if os.path.exists(workdir):
- rmdirRecursive(workdir)
- w = self.dovc(self.hg_base, ['clone', self.repo, workdir])
- yield w; w.getResult()
- if not branch: branch = "default"
- w = self.dovc(workdir, ['update', '--clean', '--rev', branch ])
- yield w; w.getResult()
-
- try_c_filename = os.path.join(workdir, "subdir", "subdir.c")
- open(try_c_filename, "w").write(TRY_C)
- future = time.time() + 2*self.version
- os.utime(try_c_filename, (future, future))
- vc_try_checkout = deferredGenerator(vc_try_checkout)
-
- def vc_try_finish(self, workdir):
- rmdirRecursive(workdir)
- pass
-
-
-class MercurialInRepo(Mercurial):
- vc_name = 'MercurialInRepo'
-
- def default_args(self):
- return { 'repourl': self.helper.repo,
- 'branchType': 'inrepo',
- 'defaultBranch': 'default' }
-
- def testCheckout(self):
- self.helper.vcargs = self.default_args()
- d = self.do_vctest(testRetry=False)
-
- # TODO: testRetry has the same problem with Mercurial as it does for
- # Arch
- return d
-
- def testPatch(self):
- self.helper.vcargs = self.default_args()
- d = self.do_patch()
- return d
-
- def testCheckoutBranch(self):
- self.helper.vcargs = self.default_args()
- d = self.do_branch()
- return d
-
- def serveHTTP(self):
- # the easiest way to publish hg over HTTP is by running 'hg serve' as
- # a child process while the test is running. (you can also use a CGI
- # script, which sounds difficult, or you can publish the files
- # directly, which isn't well documented).
-
- # grr.. 'hg serve' doesn't let you use --port=0 to mean "pick a free
- # port", instead it uses it as a signal to use the default (port
- # 8000). This means there is no way to make it choose a free port, so
- # we are forced to make it use a statically-defined one, making it
- # harder to avoid collisions.
- self.httpPort = 8300 + (os.getpid() % 200)
- args = [self.helper.vcexe,
- "serve", "--port", str(self.httpPort), "--verbose"]
-
- # in addition, hg doesn't flush its stdout, so we can't wait for the
- # "listening at" message to know when it's safe to start the test.
- # Instead, poll every second until a getPage works.
-
- self._pp = MercurialServerPP() # logs+discards everything
- # this serves one tree at a time, so we serve trunk. TODO: test hg's
- # in-repo branches, for which a single tree will hold all branches.
- self._hg_server = reactor.spawnProcess(self._pp, self.helper.vcexe, args,
- os.environ,
- self.helper.repo)
- log.msg("waiting for hg serve to start")
- done_d = defer.Deferred()
- def poll():
- d = client.getPage("http://localhost:%d/" % self.httpPort)
- def success(res):
- log.msg("hg serve appears to have started")
- self._wait_for_server_poller.stop()
- done_d.callback(None)
- def ignore_connection_refused(f):
- f.trap(error.ConnectionRefusedError)
- d.addCallbacks(success, ignore_connection_refused)
- d.addErrback(done_d.errback)
- return d
- self._wait_for_server_poller = task.LoopingCall(poll)
- self._wait_for_server_poller.start(0.5, True)
- return done_d
-
- def tearDown(self):
- if self._wait_for_server_poller:
- if self._wait_for_server_poller.running:
- self._wait_for_server_poller.stop()
- if self._hg_server:
- self._hg_server.loseConnection()
- try:
- self._hg_server.signalProcess("KILL")
- except error.ProcessExitedAlready:
- pass
- self._hg_server = None
- return VCBase.tearDown(self)
-
- def tearDown2(self):
- if self._pp:
- return self._pp.wait
-
- def testCheckoutHTTP(self):
- d = self.serveHTTP()
- def _started(res):
- repourl = "http://localhost:%d/" % self.httpPort
- self.helper.vcargs = self.default_args()
- self.helper.vcargs['repourl'] = repourl
- return self.do_vctest(testRetry=False)
- d.addCallback(_started)
- return d
-
- def testTry(self):
- self.helper.vcargs = self.default_args()
- d = self.do_getpatch()
- return d
-
-VCS.registerVC(MercurialInRepo.vc_name, MercurialInRepoHelper())
-
-
-class GitHelper(BaseHelper):
- branchname = "branch"
- try_branchname = "branch"
-
- def capable(self):
- gitpaths = which('git')
- if not gitpaths:
- return (False, "GIT is not installed")
- d = utils.getProcessOutput(gitpaths[0], ["--version"], env=os.environ)
- d.addCallback(self._capable, gitpaths[0])
- return d
-
- def _capable(self, v, vcexe):
- try:
- m = re.search(r'\b(\d+)\.(\d+)', v)
-
- if not m:
- raise Exception, 'no regex match'
-
- ver = tuple([int(num) for num in m.groups()])
-
- # git-1.1.3 (as shipped with Dapper) doesn't understand 'git
- # init' (it wants 'git init-db'), and fails unit tests that
- # involve branches. git-1.5.3.6 (on my debian/unstable system)
- # works. I don't know where the dividing point is: if someone can
- # figure it out (or figure out how to make buildbot support more
- # versions), please update this check.
- if ver < (1, 2):
- return (False, "Found git (%s) but it is older than 1.2.x" % vcexe)
-
- except Exception, e:
- log.msg("couldn't identify git version number in output:")
- log.msg("'''%s'''" % v)
- log.msg("because: %s" % e)
- log.msg("skipping tests")
- return (False,
- "Found git (%s) but couldn't identify its version from '%s'" % (vcexe, v))
-
- self.vcexe = vcexe
- return (True, None)
-
- def createRepository(self):
- self.createBasedir()
- self.gitrepo = os.path.join(self.repbase,
- "GIT-Repository")
- tmp = os.path.join(self.repbase, "gittmp")
-
- env = os.environ.copy()
- env['GIT_DIR'] = self.gitrepo
- w = self.dovc(self.repbase, "init", env=env)
- yield w; w.getResult()
-
- self.populate(tmp)
- w = self.dovc(tmp, "init")
- yield w; w.getResult()
- w = self.dovc(tmp, ["add", "."])
- yield w; w.getResult()
- w = self.dovc(tmp, ["config", "user.email", "buildbot-trial@localhost"])
- yield w; w.getResult()
- w = self.dovc(tmp, ["config", "user.name", "Buildbot Trial"])
- yield w; w.getResult()
- w = self.dovc(tmp, ["commit", "-m", "initial_import"])
- yield w; w.getResult()
-
- w = self.dovc(tmp, ["checkout", "-b", self.branchname])
- yield w; w.getResult()
- self.populate_branch(tmp)
- w = self.dovc(tmp, ["commit", "-a", "-m", "commit_on_branch"])
- yield w; w.getResult()
-
- w = self.dovc(tmp, ["rev-parse", "master", self.branchname])
- yield w; out = w.getResult()
- revs = out.splitlines()
- self.addTrunkRev(revs[0])
- self.addBranchRev(revs[1])
-
- w = self.dovc(tmp, ["push", self.gitrepo, "master", self.branchname])
- yield w; w.getResult()
-
- rmdirRecursive(tmp)
- createRepository = deferredGenerator(createRepository)
-
- def vc_revise(self):
- tmp = os.path.join(self.repbase, "gittmp")
- rmdirRecursive(tmp)
- log.msg("vc_revise" + self.gitrepo)
- w = self.dovc(self.repbase, ["clone", self.gitrepo, "gittmp"])
- yield w; w.getResult()
- w = self.dovc(tmp, ["config", "user.email", "buildbot-trial@localhost"])
- yield w; w.getResult()
- w = self.dovc(tmp, ["config", "user.name", "Buildbot Trial"])
- yield w; w.getResult()
-
- self.version += 1
- version_c = VERSION_C % self.version
- open(os.path.join(tmp, "version.c"), "w").write(version_c)
-
- w = self.dovc(tmp, ["commit", "-m", "revised_to_%d" % self.version,
- "version.c"])
- yield w; w.getResult()
- w = self.dovc(tmp, ["rev-parse", "master"])
- yield w; out = w.getResult()
- self.addTrunkRev(out.strip())
-
- w = self.dovc(tmp, ["push", self.gitrepo, "master"])
- yield w; out = w.getResult()
- rmdirRecursive(tmp)
- vc_revise = deferredGenerator(vc_revise)
-
- def vc_try_checkout(self, workdir, rev, branch=None):
- assert os.path.abspath(workdir) == workdir
- if os.path.exists(workdir):
- rmdirRecursive(workdir)
-
- w = self.dovc(self.repbase, ["clone", self.gitrepo, workdir])
- yield w; w.getResult()
- w = self.dovc(workdir, ["config", "user.email", "buildbot-trial@localhost"])
- yield w; w.getResult()
- w = self.dovc(workdir, ["config", "user.name", "Buildbot Trial"])
- yield w; w.getResult()
-
- if branch is not None:
- w = self.dovc(workdir, ["checkout", "-b", branch,
- "origin/%s" % branch])
- yield w; w.getResult()
-
- # Hmm...why do nobody else bother to check out the correct
- # revision?
- w = self.dovc(workdir, ["reset", "--hard", rev])
- yield w; w.getResult()
-
- try_c_filename = os.path.join(workdir, "subdir", "subdir.c")
- open(try_c_filename, "w").write(TRY_C)
- vc_try_checkout = deferredGenerator(vc_try_checkout)
-
- def vc_try_finish(self, workdir):
- rmdirRecursive(workdir)
-
-class Git(VCBase, unittest.TestCase):
- vc_name = "git"
-
- # No 'export' mode yet...
- # metadir = ".git"
- vctype = "source.Git"
- vctype_try = "git"
- has_got_revision = True
-
- def testCheckout(self):
- self.helper.vcargs = { 'repourl': self.helper.gitrepo }
- d = self.do_vctest()
- return d
-
- def testPatch(self):
- self.helper.vcargs = { 'repourl': self.helper.gitrepo,
- 'branch': "master" }
- d = self.do_patch()
- return d
-
- def testCheckoutBranch(self):
- self.helper.vcargs = { 'repourl': self.helper.gitrepo,
- 'branch': "master" }
- d = self.do_branch()
- return d
-
- def testTry(self):
- self.helper.vcargs = { 'repourl': self.helper.gitrepo,
- 'branch': "master" }
- d = self.do_getpatch()
- return d
-
-VCS.registerVC(Git.vc_name, GitHelper())
-
-
-class Sources(unittest.TestCase):
- # TODO: this needs serious rethink
- def makeChange(self, when=None, revision=None):
- if when:
- when = mktime_tz(parsedate_tz(when))
- return changes.Change("fred", [], "", when=when, revision=revision)
-
- def testCVS1(self):
- r = base.BuildRequest("forced build", SourceStamp(), 'test_builder')
- b = base.Build([r])
- s = source.CVS(cvsroot=None, cvsmodule=None)
- s.setBuild(b)
- self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), None)
-
- def testCVS2(self):
- c = []
- c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
- c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
- c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
- r = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
- submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
- r.submittedAt = mktime_tz(parsedate_tz(submitted))
- b = base.Build([r])
- s = source.CVS(cvsroot=None, cvsmodule=None)
- s.setBuild(b)
- self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
- "Wed, 08 Sep 2004 16:03:00 -0000")
-
- def testCVS3(self):
- c = []
- c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
- c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
- c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
- r = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
- submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
- r.submittedAt = mktime_tz(parsedate_tz(submitted))
- b = base.Build([r])
- s = source.CVS(cvsroot=None, cvsmodule=None, checkoutDelay=10)
- s.setBuild(b)
- self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
- "Wed, 08 Sep 2004 16:02:10 -0000")
-
- def testCVS4(self):
- c = []
- c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
- c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
- c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
- r1 = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
- submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
- r1.submittedAt = mktime_tz(parsedate_tz(submitted))
-
- c = []
- c.append(self.makeChange("Wed, 08 Sep 2004 09:05:00 -0700"))
- r2 = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
- submitted = "Wed, 08 Sep 2004 09:07:00 -0700"
- r2.submittedAt = mktime_tz(parsedate_tz(submitted))
-
- b = base.Build([r1, r2])
- s = source.CVS(cvsroot=None, cvsmodule=None)
- s.setBuild(b)
- self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
- "Wed, 08 Sep 2004 16:06:00 -0000")
-
- def testSVN1(self):
- r = base.BuildRequest("forced", SourceStamp(), 'test_builder')
- b = base.Build([r])
- s = source.SVN(svnurl="dummy")
- s.setBuild(b)
- self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), None)
-
- def testSVN2(self):
- c = []
- c.append(self.makeChange(revision=4))
- c.append(self.makeChange(revision=10))
- c.append(self.makeChange(revision=67))
- r = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
- b = base.Build([r])
- s = source.SVN(svnurl="dummy")
- s.setBuild(b)
- self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), 67)
-
-class Patch(VCBase, unittest.TestCase):
- def setUp(self):
- pass
-
- def tearDown(self):
- pass
-
- def testPatch(self):
- # invoke 'patch' all by itself, to see if it works the way we think
- # it should. This is intended to ferret out some windows test
- # failures.
- helper = BaseHelper()
- self.workdir = os.path.join("test_vc", "testPatch")
- helper.populate(self.workdir)
- patch = which("patch")[0]
-
- command = [patch, "-p0"]
- class FakeBuilder:
- usePTY = False
- def sendUpdate(self, status):
- pass
- c = commands.ShellCommand(FakeBuilder(), command, self.workdir,
- sendRC=False, initialStdin=p0_diff)
- d = c.start()
- d.addCallback(self._testPatch_1)
- return d
-
- def _testPatch_1(self, res):
- # make sure the file actually got patched
- subdir_c = os.path.join(self.workdir, "subdir", "subdir.c")
- data = open(subdir_c, "r").read()
- self.failUnlessIn("Hello patched subdir.\\n", data)
diff --git a/buildbot/buildbot/test/test_web.py b/buildbot/buildbot/test/test_web.py
deleted file mode 100644
index 0f353d8..0000000
--- a/buildbot/buildbot/test/test_web.py
+++ /dev/null
@@ -1,594 +0,0 @@
-# -*- test-case-name: buildbot.test.test_web -*-
-
-import os, time, shutil
-from HTMLParser import HTMLParser
-from twisted.python import components
-
-from twisted.trial import unittest
-from buildbot.test.runutils import RunMixin
-
-from twisted.internet import reactor, defer, protocol
-from twisted.internet.interfaces import IReactorUNIX
-from twisted.web import client
-
-from buildbot import master, interfaces, sourcestamp
-from buildbot.status import html, builder
-from buildbot.status.web import waterfall
-from buildbot.changes.changes import Change
-from buildbot.process import base
-from buildbot.process.buildstep import BuildStep
-from buildbot.test.runutils import setupBuildStepStatus
-
-class ConfiguredMaster(master.BuildMaster):
- """This BuildMaster variant has a static config file, provided as a
- string when it is created."""
-
- def __init__(self, basedir, config):
- self.config = config
- master.BuildMaster.__init__(self, basedir)
-
- def loadTheConfigFile(self):
- self.loadConfig(self.config)
-
-components.registerAdapter(master.Control, ConfiguredMaster,
- interfaces.IControl)
-
-
-base_config = """
-from buildbot.changes.pb import PBChangeSource
-from buildbot.status import html
-from buildbot.buildslave import BuildSlave
-from buildbot.scheduler import Scheduler
-from buildbot.process.factory import BuildFactory
-
-BuildmasterConfig = c = {
- 'change_source': PBChangeSource(),
- 'slaves': [BuildSlave('bot1name', 'bot1passwd')],
- 'schedulers': [Scheduler('name', None, 60, ['builder1'])],
- 'builders': [{'name': 'builder1', 'slavename': 'bot1name',
- 'builddir': 'builder1', 'factory': BuildFactory()}],
- 'slavePortnum': 0,
- }
-"""
-
-
-
-class DistribUNIX:
- def __init__(self, unixpath):
- from twisted.web import server, resource, distrib
- root = resource.Resource()
- self.r = r = distrib.ResourceSubscription("unix", unixpath)
- root.putChild('remote', r)
- self.p = p = reactor.listenTCP(0, server.Site(root))
- self.portnum = p.getHost().port
- def shutdown(self):
- d = defer.maybeDeferred(self.p.stopListening)
- return d
-
-class DistribTCP:
- def __init__(self, port):
- from twisted.web import server, resource, distrib
- root = resource.Resource()
- self.r = r = distrib.ResourceSubscription("localhost", port)
- root.putChild('remote', r)
- self.p = p = reactor.listenTCP(0, server.Site(root))
- self.portnum = p.getHost().port
- def shutdown(self):
- d = defer.maybeDeferred(self.p.stopListening)
- d.addCallback(self._shutdown_1)
- return d
- def _shutdown_1(self, res):
- return self.r.publisher.broker.transport.loseConnection()
-
-class SlowReader(protocol.Protocol):
- didPause = False
- count = 0
- data = ""
- def __init__(self, req):
- self.req = req
- self.d = defer.Deferred()
- def connectionMade(self):
- self.transport.write(self.req)
- def dataReceived(self, data):
- self.data += data
- self.count += len(data)
- if not self.didPause and self.count > 10*1000:
- self.didPause = True
- self.transport.pauseProducing()
- reactor.callLater(2, self.resume)
- def resume(self):
- self.transport.resumeProducing()
- def connectionLost(self, why):
- self.d.callback(None)
-
-class CFactory(protocol.ClientFactory):
- def __init__(self, p):
- self.p = p
- def buildProtocol(self, addr):
- self.p.factory = self
- return self.p
-
-def stopHTTPLog():
- # grr.
- from twisted.web import http
- http._logDateTimeStop()
-
-class BaseWeb:
- master = None
-
- def failUnlessIn(self, substr, string, note=None):
- self.failUnless(string.find(substr) != -1, note)
-
- def tearDown(self):
- stopHTTPLog()
- if self.master:
- d = self.master.stopService()
- return d
-
- def find_webstatus(self, master):
- for child in list(master):
- if isinstance(child, html.WebStatus):
- return child
-
- def find_waterfall(self, master):
- for child in list(master):
- if isinstance(child, html.Waterfall):
- return child
-
-class Ports(BaseWeb, unittest.TestCase):
-
- def test_webPortnum(self):
- # run a regular web server on a TCP socket
- config = base_config + "c['status'] = [html.WebStatus(http_port=0)]\n"
- os.mkdir("test_web1")
- self.master = m = ConfiguredMaster("test_web1", config)
- m.startService()
- # hack to find out what randomly-assigned port it is listening on
- port = self.find_webstatus(m).getPortnum()
-
- d = client.getPage("http://localhost:%d/waterfall" % port)
- def _check(page):
- #print page
- self.failUnless(page)
- d.addCallback(_check)
- return d
- test_webPortnum.timeout = 10
-
- def test_webPathname(self):
- # running a t.web.distrib server over a UNIX socket
- if not IReactorUNIX.providedBy(reactor):
- raise unittest.SkipTest("UNIX sockets not supported here")
- config = (base_config +
- "c['status'] = [html.WebStatus(distrib_port='.web-pb')]\n")
- os.mkdir("test_web2")
- self.master = m = ConfiguredMaster("test_web2", config)
- m.startService()
-
- p = DistribUNIX("test_web2/.web-pb")
-
- d = client.getPage("http://localhost:%d/remote/waterfall" % p.portnum)
- def _check(page):
- self.failUnless(page)
- d.addCallback(_check)
- def _done(res):
- d1 = p.shutdown()
- d1.addCallback(lambda x: res)
- return d1
- d.addBoth(_done)
- return d
- test_webPathname.timeout = 10
-
-
- def test_webPathname_port(self):
- # running a t.web.distrib server over TCP
- config = (base_config +
- "c['status'] = [html.WebStatus(distrib_port=0)]\n")
- os.mkdir("test_web3")
- self.master = m = ConfiguredMaster("test_web3", config)
- m.startService()
- dport = self.find_webstatus(m).getPortnum()
-
- p = DistribTCP(dport)
-
- d = client.getPage("http://localhost:%d/remote/waterfall" % p.portnum)
- def _check(page):
- self.failUnlessIn("BuildBot", page)
- d.addCallback(_check)
- def _done(res):
- d1 = p.shutdown()
- d1.addCallback(lambda x: res)
- return d1
- d.addBoth(_done)
- return d
- test_webPathname_port.timeout = 10
-
-
-class Waterfall(BaseWeb, unittest.TestCase):
- def test_waterfall(self):
- os.mkdir("test_web4")
- os.mkdir("my-maildir"); os.mkdir("my-maildir/new")
- self.robots_txt = os.path.abspath(os.path.join("test_web4",
- "robots.txt"))
- self.robots_txt_contents = "User-agent: *\nDisallow: /\n"
- f = open(self.robots_txt, "w")
- f.write(self.robots_txt_contents)
- f.close()
- # this is the right way to configure the Waterfall status
- config1 = base_config + """
-from buildbot.changes import mail
-c['change_source'] = mail.SyncmailMaildirSource('my-maildir')
-c['status'] = [html.Waterfall(http_port=0, robots_txt=%s)]
-""" % repr(self.robots_txt)
-
- self.master = m = ConfiguredMaster("test_web4", config1)
- m.startService()
- port = self.find_waterfall(m).getPortnum()
- self.port = port
- # insert an event
- m.change_svc.addChange(Change("user", ["foo.c"], "comments"))
-
- d = client.getPage("http://localhost:%d/" % port)
-
- def _check1(page):
- self.failUnless(page)
- self.failUnlessIn("current activity", page)
- self.failUnlessIn("<html", page)
- TZ = time.tzname[time.localtime()[-1]]
- self.failUnlessIn("time (%s)" % TZ, page)
-
- # phase=0 is really for debugging the waterfall layout
- return client.getPage("http://localhost:%d/?phase=0" % self.port)
- d.addCallback(_check1)
-
- def _check2(page):
- self.failUnless(page)
- self.failUnlessIn("<html", page)
-
- return client.getPage("http://localhost:%d/changes" % self.port)
- d.addCallback(_check2)
-
- def _check3(changes):
- self.failUnlessIn("<li>Syncmail mailing list in maildir " +
- "my-maildir</li>", changes)
-
- return client.getPage("http://localhost:%d/robots.txt" % self.port)
- d.addCallback(_check3)
-
- def _check4(robotstxt):
- self.failUnless(robotstxt == self.robots_txt_contents)
- d.addCallback(_check4)
-
- return d
-
- test_waterfall.timeout = 10
-
-class WaterfallSteps(unittest.TestCase):
-
- # failUnlessSubstring copied from twisted-2.1.0, because this helps us
- # maintain compatibility with python2.2.
- def failUnlessSubstring(self, substring, astring, msg=None):
- """a python2.2 friendly test to assert that substring is found in
- astring parameters follow the semantics of failUnlessIn
- """
- if astring.find(substring) == -1:
- raise self.failureException(msg or "%r not found in %r"
- % (substring, astring))
- return substring
- assertSubstring = failUnlessSubstring
-
- def test_urls(self):
- s = setupBuildStepStatus("test_web.test_urls")
- s.addURL("coverage", "http://coverage.example.org/target")
- s.addURL("icon", "http://coverage.example.org/icon.png")
- class FakeRequest:
- prepath = []
- postpath = []
- def childLink(self, name):
- return name
- req = FakeRequest()
- box = waterfall.IBox(s).getBox(req)
- td = box.td()
- e1 = '[<a href="http://coverage.example.org/target" class="BuildStep external">coverage</a>]'
- self.failUnlessSubstring(e1, td)
- e2 = '[<a href="http://coverage.example.org/icon.png" class="BuildStep external">icon</a>]'
- self.failUnlessSubstring(e2, td)
-
-
-
-geturl_config = """
-from buildbot.status import html
-from buildbot.changes import mail
-from buildbot.process import factory
-from buildbot.steps import dummy
-from buildbot.scheduler import Scheduler
-from buildbot.changes.base import ChangeSource
-from buildbot.buildslave import BuildSlave
-s = factory.s
-
-class DiscardScheduler(Scheduler):
- def addChange(self, change):
- pass
-class DummyChangeSource(ChangeSource):
- pass
-
-BuildmasterConfig = c = {}
-c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
-c['change_source'] = DummyChangeSource()
-c['schedulers'] = [DiscardScheduler('discard', None, 60, ['b1'])]
-c['slavePortnum'] = 0
-c['status'] = [html.Waterfall(http_port=0)]
-
-f = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
-
-c['builders'] = [
- {'name': 'b1', 'slavenames': ['bot1','bot2'],
- 'builddir': 'b1', 'factory': f},
- ]
-c['buildbotURL'] = 'http://dummy.example.org:8010/'
-
-"""
-
-class GetURL(RunMixin, unittest.TestCase):
-
- def setUp(self):
- RunMixin.setUp(self)
- self.master.loadConfig(geturl_config)
- self.master.startService()
- d = self.connectSlave(["b1"])
- return d
-
- def tearDown(self):
- stopHTTPLog()
- return RunMixin.tearDown(self)
-
- def doBuild(self, buildername):
- br = base.BuildRequest("forced", sourcestamp.SourceStamp(), 'test_builder')
- d = br.waitUntilFinished()
- self.control.getBuilder(buildername).requestBuild(br)
- return d
-
- def assertNoURL(self, target):
- self.failUnlessIdentical(self.status.getURLForThing(target), None)
-
- def assertURLEqual(self, target, expected):
- got = self.status.getURLForThing(target)
- full_expected = "http://dummy.example.org:8010/" + expected
- self.failUnlessEqual(got, full_expected)
-
- def testMissingBase(self):
- noweb_config1 = geturl_config + "del c['buildbotURL']\n"
- d = self.master.loadConfig(noweb_config1)
- d.addCallback(self._testMissingBase_1)
- return d
- def _testMissingBase_1(self, res):
- s = self.status
- self.assertNoURL(s)
- builder_s = s.getBuilder("b1")
- self.assertNoURL(builder_s)
-
- def testBase(self):
- s = self.status
- self.assertURLEqual(s, "")
- builder_s = s.getBuilder("b1")
- self.assertURLEqual(builder_s, "builders/b1")
-
- def testChange(self):
- s = self.status
- c = Change("user", ["foo.c"], "comments")
- self.master.change_svc.addChange(c)
- # TODO: something more like s.getChanges(), requires IChange and
- # an accessor in IStatus. The HTML page exists already, though
- self.assertURLEqual(c, "changes/1")
-
- def testBuild(self):
- # first we do some stuff so we'll have things to look at.
- s = self.status
- d = self.doBuild("b1")
- # maybe check IBuildSetStatus here?
- d.addCallback(self._testBuild_1)
- return d
-
- def _testBuild_1(self, res):
- s = self.status
- builder_s = s.getBuilder("b1")
- build_s = builder_s.getLastFinishedBuild()
- self.assertURLEqual(build_s, "builders/b1/builds/0")
- # no page for builder.getEvent(-1)
- step = build_s.getSteps()[0]
- self.assertURLEqual(step, "builders/b1/builds/0/steps/remote%20dummy")
- # maybe page for build.getTestResults?
- self.assertURLEqual(step.getLogs()[0],
- "builders/b1/builds/0/steps/remote%20dummy/logs/0")
-
-
-
-class Logfile(BaseWeb, RunMixin, unittest.TestCase):
- def setUp(self):
- config = """
-from buildbot.status import html
-from buildbot.process.factory import BasicBuildFactory
-from buildbot.buildslave import BuildSlave
-f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
-BuildmasterConfig = {
- 'slaves': [BuildSlave('bot1', 'passwd1')],
- 'schedulers': [],
- 'builders': [{'name': 'builder1', 'slavename': 'bot1',
- 'builddir':'workdir', 'factory':f1}],
- 'slavePortnum': 0,
- 'status': [html.WebStatus(http_port=0)],
- }
-"""
- if os.path.exists("test_logfile"):
- shutil.rmtree("test_logfile")
- os.mkdir("test_logfile")
- self.master = m = ConfiguredMaster("test_logfile", config)
- m.startService()
- # hack to find out what randomly-assigned port it is listening on
- port = self.find_webstatus(m).getPortnum()
- self.port = port
- # insert an event
-
- req = base.BuildRequest("reason", sourcestamp.SourceStamp(), 'test_builder')
- build1 = base.Build([req])
- bs = m.status.getBuilder("builder1").newBuild()
- bs.setReason("reason")
- bs.buildStarted(build1)
-
- step1 = BuildStep(name="setup")
- step1.setBuild(build1)
- bss = bs.addStepWithName("setup")
- step1.setStepStatus(bss)
- bss.stepStarted()
-
- log1 = step1.addLog("output")
- log1.addStdout("some stdout\n")
- log1.finish()
-
- log2 = step1.addHTMLLog("error", "<html>ouch</html>")
-
- log3 = step1.addLog("big")
- log3.addStdout("big log\n")
- for i in range(1000):
- log3.addStdout("a" * 500)
- log3.addStderr("b" * 500)
- log3.finish()
-
- log4 = step1.addCompleteLog("bigcomplete",
- "big2 log\n" + "a" * 1*1000*1000)
-
- log5 = step1.addLog("mixed")
- log5.addHeader("header content")
- log5.addStdout("this is stdout content")
- log5.addStderr("errors go here")
- log5.addEntry(5, "non-standard content on channel 5")
- log5.addStderr(" and some trailing stderr")
-
- d = defer.maybeDeferred(step1.step_status.stepFinished,
- builder.SUCCESS)
- bs.buildFinished()
- return d
-
- def getLogPath(self, stepname, logname):
- return ("/builders/builder1/builds/0/steps/%s/logs/%s" %
- (stepname, logname))
-
- def getLogURL(self, stepname, logname):
- return ("http://localhost:%d" % self.port
- + self.getLogPath(stepname, logname))
-
- def test_logfile1(self):
- d = client.getPage("http://localhost:%d/" % self.port)
- def _check(page):
- self.failUnless(page)
- d.addCallback(_check)
- return d
-
- def test_logfile2(self):
- logurl = self.getLogURL("setup", "output")
- d = client.getPage(logurl)
- def _check(logbody):
- self.failUnless(logbody)
- d.addCallback(_check)
- return d
-
- def test_logfile3(self):
- logurl = self.getLogURL("setup", "output")
- d = client.getPage(logurl + "/text")
- def _check(logtext):
- self.failUnlessEqual(logtext, "some stdout\n")
- d.addCallback(_check)
- return d
-
- def test_logfile4(self):
- logurl = self.getLogURL("setup", "error")
- d = client.getPage(logurl)
- def _check(logbody):
- self.failUnlessEqual(logbody, "<html>ouch</html>")
- d.addCallback(_check)
- return d
-
- def test_logfile5(self):
- # this is log3, which is about 1MB in size, made up of alternating
- # stdout/stderr chunks. buildbot-0.6.6, when run against
- # twisted-1.3.0, fails to resume sending chunks after the client
- # stalls for a few seconds, because of a recursive doWrite() call
- # that was fixed in twisted-2.0.0
- p = SlowReader("GET %s HTTP/1.0\r\n\r\n"
- % self.getLogPath("setup", "big"))
- cf = CFactory(p)
- c = reactor.connectTCP("localhost", self.port, cf)
- d = p.d
- def _check(res):
- self.failUnlessIn("big log", p.data)
- self.failUnlessIn("a"*100, p.data)
- self.failUnless(p.count > 1*1000*1000)
- d.addCallback(_check)
- return d
-
- def test_logfile6(self):
- # this is log4, which is about 1MB in size, one big chunk.
- # buildbot-0.6.6 dies as the NetstringReceiver barfs on the
- # saved logfile, because it was using one big chunk and exceeding
- # NetstringReceiver.MAX_LENGTH
- p = SlowReader("GET %s HTTP/1.0\r\n\r\n"
- % self.getLogPath("setup", "bigcomplete"))
- cf = CFactory(p)
- c = reactor.connectTCP("localhost", self.port, cf)
- d = p.d
- def _check(res):
- self.failUnlessIn("big2 log", p.data)
- self.failUnlessIn("a"*100, p.data)
- self.failUnless(p.count > 1*1000*1000)
- d.addCallback(_check)
- return d
-
- def test_logfile7(self):
- # this is log5, with mixed content on the tree standard channels
- # as well as on channel 5
-
- class SpanParser(HTMLParser):
- '''Parser subclass to gather all the log spans from the log page'''
- def __init__(self, test):
- self.spans = []
- self.test = test
- self.inSpan = False
- HTMLParser.__init__(self)
-
- def handle_starttag(self, tag, attrs):
- if tag == 'span':
- self.inSpan = True
- cls = attrs[0]
- self.test.failUnless(cls[0] == 'class')
- self.spans.append([cls[1],''])
-
- def handle_data(self, data):
- if self.inSpan:
- self.spans[-1][1] += data
-
- def handle_endtag(self, tag):
- if tag == 'span':
- self.inSpan = False
-
- logurl = self.getLogURL("setup", "mixed")
- d = client.getPage(logurl, timeout=2)
- def _check(logbody):
- try:
- p = SpanParser(self)
- p.feed(logbody)
- p.close
- except Exception, e:
- print e
- self.failUnlessEqual(len(p.spans), 4)
- self.failUnlessEqual(p.spans[0][0], 'header')
- self.failUnlessEqual(p.spans[0][1], 'header content')
- self.failUnlessEqual(p.spans[1][0], 'stdout')
- self.failUnlessEqual(p.spans[1][1], 'this is stdout content')
- self.failUnlessEqual(p.spans[2][0], 'stderr')
- self.failUnlessEqual(p.spans[2][1], 'errors go here')
- self.failUnlessEqual(p.spans[3][0], 'stderr')
- self.failUnlessEqual(p.spans[3][1], ' and some trailing stderr')
- def _fail(err):
- pass
- d.addCallback(_check)
- d.addErrback(_fail)
- return d
diff --git a/buildbot/buildbot/test/test_webparts.py b/buildbot/buildbot/test/test_webparts.py
deleted file mode 100644
index 71dd59e..0000000
--- a/buildbot/buildbot/test/test_webparts.py
+++ /dev/null
@@ -1,141 +0,0 @@
-
-import os
-from twisted.trial import unittest
-from twisted.internet import defer
-from twisted.web import client
-from twisted.web.error import Error as WebError
-from buildbot.slave.commands import rmdirRecursive
-from buildbot.status import html
-from test_web import BaseWeb, base_config, ConfiguredMaster
-from buildbot.scripts import runner
-
-class Webparts(BaseWeb, unittest.TestCase):
-
- def find_webstatus(self, master):
- return filter(lambda child: isinstance(child, html.WebStatus),
- list(master))
-
- def startMaster(self, extraconfig):
- config = base_config + extraconfig
- rmdirRecursive("test_webparts")
- os.mkdir("test_webparts")
- runner.upgradeMaster({'basedir': "test_webparts",
- 'quiet': True,
- })
- self.master = m = ConfiguredMaster("test_webparts", config)
- m.startService()
- # hack to find out what randomly-assigned port it is listening on
- port = list(self.find_webstatus(m)[0])[0]._port.getHost().port
- self.baseurl = "http://localhost:%d/" % port
-
- def reconfigMaster(self, extraconfig):
- config = base_config + extraconfig
- d = self.master.loadConfig(config)
- def _done(res):
- m = self.master
- port = list(self.find_webstatus(m)[0])[0]._port.getHost().port
- self.baseurl = "http://localhost:%d/" % port
- d.addCallback(_done)
- return d
-
- def getAndCheck(self, url, substring, show=False):
- d = client.getPage(url)
- def _show_weberror(why):
- why.trap(WebError)
- self.fail("error for %s: %s" % (url, why))
- d.addErrback(_show_weberror)
- d.addCallback(self._getAndCheck, substring, show)
- return d
- def _getAndCheck(self, page, substring, show):
- if show:
- print page
- self.failUnlessIn(substring, page,
- "Couldn't find substring '%s' in page:\n%s" %
- (substring, page))
-
- def testInit(self):
- extraconfig = """
-from twisted.web import static
-ws = html.WebStatus(http_port=0)
-c['status'] = [ws]
-ws.putChild('child.html', static.Data('I am the child', 'text/plain'))
-"""
- self.startMaster(extraconfig)
- d = self.getAndCheck(self.baseurl + "child.html",
- "I am the child")
- return d
- testInit.timeout = 10
-
- def testStatic(self):
- extraconfig = """
-from twisted.web import static
-ws = html.WebStatus(http_port=0)
-c['status'] = [ws]
-ws.putChild('child.html', static.Data('I am the child', 'text/plain'))
-"""
- self.startMaster(extraconfig)
- os.mkdir(os.path.join("test_webparts", "public_html", "subdir"))
- f = open(os.path.join("test_webparts", "public_html", "foo.html"), "wt")
- f.write("see me foo\n")
- f.close()
- f = open(os.path.join("test_webparts", "public_html", "subdir",
- "bar.html"), "wt")
- f.write("see me subdir/bar\n")
- f.close()
- d = self.getAndCheck(self.baseurl + "child.html", "I am the child")
- d.addCallback(lambda res:
- self.getAndCheck(self.baseurl+"foo.html",
- "see me foo"))
- d.addCallback(lambda res:
- self.getAndCheck(self.baseurl+"subdir/bar.html",
- "see me subdir/bar"))
- return d
-
- def _check(self, res, suburl, substring, show=False):
- d = self.getAndCheck(self.baseurl + suburl, substring, show)
- return d
-
- def testPages(self):
- extraconfig = """
-ws = html.WebStatus(http_port=0)
-c['status'] = [ws]
-"""
- self.startMaster(extraconfig)
- d = defer.succeed(None)
- d.addCallback(self._do_page_tests)
- extraconfig2 = """
-ws = html.WebStatus(http_port=0, allowForce=True)
-c['status'] = [ws]
-"""
- d.addCallback(lambda res: self.reconfigMaster(extraconfig2))
- d.addCallback(self._do_page_tests)
- return d
-
- def _do_page_tests(self, res):
- d = defer.succeed(None)
- d.addCallback(self._check, "", "Welcome to the Buildbot")
- d.addCallback(self._check, "waterfall", "current activity")
- d.addCallback(self._check, "about", "Buildbot is a free software")
- d.addCallback(self._check, "changes", "PBChangeSource listener")
- d.addCallback(self._check, "buildslaves", "Build Slaves")
- d.addCallback(self._check, "one_line_per_build",
- "Last 20 finished builds")
- d.addCallback(self._check, "one_box_per_builder", "Latest builds")
- d.addCallback(self._check, "builders", "Builders")
- d.addCallback(self._check, "builders/builder1", "Builder: builder1")
- d.addCallback(self._check, "builders/builder1/builds", "") # dummy
- # TODO: the pages beyond here would be great to test, but that would
- # require causing a build to complete.
- #d.addCallback(self._check, "builders/builder1/builds/1", "")
- # it'd be nice to assert that the Build page has a "Stop Build" button
- #d.addCallback(self._check, "builders/builder1/builds/1/steps", "")
- #d.addCallback(self._check,
- # "builders/builder1/builds/1/steps/compile", "")
- #d.addCallback(self._check,
- # "builders/builder1/builds/1/steps/compile/logs", "")
- #d.addCallback(self._check,
- # "builders/builder1/builds/1/steps/compile/logs/stdio","")
- #d.addCallback(self._check,
- # "builders/builder1/builds/1/steps/compile/logs/stdio/text", "")
- return d
-
diff --git a/buildbot/buildbot/util.py b/buildbot/buildbot/util.py
deleted file mode 100644
index 071cf5f..0000000
--- a/buildbot/buildbot/util.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# -*- test-case-name: buildbot.test.test_util -*-
-
-from twisted.internet.defer import Deferred
-from twisted.spread import pb
-import time, re
-
-def naturalSort(l):
- """Returns a sorted copy of l, so that numbers in strings are sorted in the
- proper order.
-
- e.g. ['foo10', 'foo1', 'foo2'] will be sorted as ['foo1', 'foo2', 'foo10']
- instead of the default ['foo1', 'foo10', 'foo2']"""
- l = l[:]
- def try_int(s):
- try:
- return int(s)
- except:
- return s
- def key_func(item):
- return [try_int(s) for s in re.split('(\d+)', item)]
- l.sort(key=key_func)
- return l
-
-def now():
- #return int(time.time())
- return time.time()
-
-def earlier(old, new):
- # minimum of two things, but "None" counts as +infinity
- if old:
- if new < old:
- return new
- return old
- return new
-
-def later(old, new):
- # maximum of two things, but "None" counts as -infinity
- if old:
- if new > old:
- return new
- return old
- return new
-
-def formatInterval(eta):
- eta_parts = []
- if eta > 3600:
- eta_parts.append("%d hrs" % (eta / 3600))
- eta %= 3600
- if eta > 60:
- eta_parts.append("%d mins" % (eta / 60))
- eta %= 60
- eta_parts.append("%d secs" % eta)
- return ", ".join(eta_parts)
-
-class CancelableDeferred(Deferred):
- """I am a version of Deferred that can be canceled by calling my
- .cancel() method. After being canceled, no callbacks or errbacks will be
- executed.
- """
- def __init__(self):
- Deferred.__init__(self)
- self.canceled = 0
- def cancel(self):
- self.canceled = 1
- def _runCallbacks(self):
- if self.canceled:
- self.callbacks = []
- return
- Deferred._runCallbacks(self)
-
-def ignoreStaleRefs(failure):
- """d.addErrback(util.ignoreStaleRefs)"""
- r = failure.trap(pb.DeadReferenceError, pb.PBConnectionLost)
- return None
-
-class _None:
- pass
-
-class ComparableMixin:
- """Specify a list of attributes that are 'important'. These will be used
- for all comparison operations."""
-
- compare_attrs = []
-
- def __hash__(self):
- alist = [self.__class__] + \
- [getattr(self, name, _None) for name in self.compare_attrs]
- return hash(tuple(alist))
-
- def __cmp__(self, them):
- result = cmp(type(self), type(them))
- if result:
- return result
-
- result = cmp(self.__class__, them.__class__)
- if result:
- return result
-
- assert self.compare_attrs == them.compare_attrs
- self_list= [getattr(self, name, _None) for name in self.compare_attrs]
- them_list= [getattr(them, name, _None) for name in self.compare_attrs]
- return cmp(self_list, them_list)