mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-02 07:05:24 +00:00
This commit was generated by cvs2svn to track changes on a CVS vendor
branch.
This commit is contained in:
commit
01d14eea87
51
tools/buildbot/CREDITS
Normal file
51
tools/buildbot/CREDITS
Normal file
@ -0,0 +1,51 @@
|
||||
This is a list of everybody who has contributed to Buildbot in some way, in
|
||||
no particular order. Thanks everybody!
|
||||
|
||||
Scott Lamb
|
||||
Olivier Bonnet
|
||||
Mark Hammond
|
||||
Gary Granger
|
||||
Marius Gedminas
|
||||
Paul Winkler
|
||||
John O'Duinn
|
||||
JP Calderone
|
||||
Zandr Milewski
|
||||
Niklaus Giger
|
||||
Tobi Vollebregt
|
||||
John Pye
|
||||
Neal Norwitz
|
||||
Anthony Baxter
|
||||
AllMyData.com
|
||||
Clement Stenac
|
||||
Kevin Turner
|
||||
Steven Walter
|
||||
Dobes Vandermeer
|
||||
Brad Hards
|
||||
Nathaniel Smith
|
||||
Mark Dillavou
|
||||
Thomas Vander Stichele
|
||||
Bear
|
||||
Brandon Philips
|
||||
Nick Trout
|
||||
Paul Warren
|
||||
Rene Rivera
|
||||
Baptiste Lepilleur
|
||||
Gerald Combs
|
||||
Yoz Grahame
|
||||
Alexander Staubo
|
||||
Elliot Murphy
|
||||
Stephen Davis
|
||||
Kirill Lapshin
|
||||
Dave Peticolas
|
||||
Andrew Bennetts
|
||||
Olly Betts
|
||||
Philipp Frauenfelder
|
||||
James Knight
|
||||
Albert Hofkamp
|
||||
Brett Neely
|
||||
Wade Brainerd
|
||||
Nick Mathewson
|
||||
Roy Rapoport
|
||||
Mark Rowe
|
||||
Ben Hearsum
|
||||
Dave Liebreich
|
7331
tools/buildbot/ChangeLog
Normal file
7331
tools/buildbot/ChangeLog
Normal file
File diff suppressed because it is too large
Load Diff
14
tools/buildbot/MANIFEST.in
Normal file
14
tools/buildbot/MANIFEST.in
Normal file
@ -0,0 +1,14 @@
|
||||
|
||||
include ChangeLog MANIFEST.in README README.w32 NEWS CREDITS
|
||||
include docs/examples/*.cfg
|
||||
include docs/buildbot.texinfo
|
||||
include docs/buildbot.info
|
||||
include docs/buildbot.html docs/images/*.png
|
||||
include docs/epyrun docs/gen-reference
|
||||
include buildbot/test/mail/* buildbot/test/subdir/*
|
||||
include buildbot/scripts/sample.cfg
|
||||
include buildbot/status/classic.css
|
||||
include buildbot/clients/debug.glade
|
||||
include buildbot/buildbot.png
|
||||
|
||||
include contrib/* contrib/windows/*
|
1917
tools/buildbot/NEWS
Normal file
1917
tools/buildbot/NEWS
Normal file
File diff suppressed because it is too large
Load Diff
30
tools/buildbot/PKG-INFO
Normal file
30
tools/buildbot/PKG-INFO
Normal file
@ -0,0 +1,30 @@
|
||||
Metadata-Version: 1.0
|
||||
Name: buildbot
|
||||
Version: 0.7.5
|
||||
Summary: BuildBot build automation system
|
||||
Home-page: http://buildbot.sourceforge.net/
|
||||
Author: Brian Warner
|
||||
Author-email: warner-buildbot@lothar.com
|
||||
License: GNU GPL
|
||||
Description:
|
||||
The BuildBot is a system to automate the compile/test cycle required by
|
||||
most software projects to validate code changes. By automatically
|
||||
rebuilding and testing the tree each time something has changed, build
|
||||
problems are pinpointed quickly, before other developers are
|
||||
inconvenienced by the failure. The guilty developer can be identified
|
||||
and harassed without human intervention. By running the builds on a
|
||||
variety of platforms, developers who do not have the facilities to test
|
||||
their changes everywhere before checkin will at least know shortly
|
||||
afterwards whether they have broken the build or not. Warning counts,
|
||||
lint checks, image size, compile time, and other build parameters can
|
||||
be tracked over time, are more visible, and are therefore easier to
|
||||
improve.
|
||||
|
||||
Platform: UNKNOWN
|
||||
Classifier: Development Status :: 4 - Beta
|
||||
Classifier: Environment :: No Input/Output (Daemon)
|
||||
Classifier: Environment :: Web Environment
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: GNU General Public License (GPL)
|
||||
Classifier: Topic :: Software Development :: Build Tools
|
||||
Classifier: Topic :: Software Development :: Testing
|
193
tools/buildbot/README
Normal file
193
tools/buildbot/README
Normal file
@ -0,0 +1,193 @@
|
||||
|
||||
BuildBot: build/test automation
|
||||
http://buildbot.sourceforge.net/
|
||||
Brian Warner <warner-buildbot @ lothar . com>
|
||||
|
||||
|
||||
Abstract:
|
||||
|
||||
The BuildBot is a system to automate the compile/test cycle required by most
|
||||
software projects to validate code changes. By automatically rebuilding and
|
||||
testing the tree each time something has changed, build problems are
|
||||
pinpointed quickly, before other developers are inconvenienced by the
|
||||
failure. The guilty developer can be identified and harassed without human
|
||||
intervention. By running the builds on a variety of platforms, developers
|
||||
who do not have the facilities to test their changes everywhere before
|
||||
checkin will at least know shortly afterwards whether they have broken the
|
||||
build or not. Warning counts, lint checks, image size, compile time, and
|
||||
other build parameters can be tracked over time, are more visible, and
|
||||
are therefore easier to improve.
|
||||
|
||||
The overall goal is to reduce tree breakage and provide a platform to run
|
||||
tests or code-quality checks that are too annoying or pedantic for any human
|
||||
to waste their time with. Developers get immediate (and potentially public)
|
||||
feedback about their changes, encouraging them to be more careful about
|
||||
testing before checkin.
|
||||
|
||||
|
||||
Features:
|
||||
|
||||
* run builds on a variety of slave platforms
|
||||
* arbitrary build process: handles projects using C, Python, whatever
|
||||
* minimal host requirements: python and Twisted
|
||||
* slaves can be behind a firewall if they can still do checkout
|
||||
* status delivery through web page, email, IRC, other protocols
|
||||
* track builds in progress, provide estimated completion time
|
||||
* flexible configuration by subclassing generic build process classes
|
||||
* debug tools to force a new build, submit fake Changes, query slave status
|
||||
* released under the GPL
|
||||
|
||||
|
||||
DOCUMENTATION:
|
||||
|
||||
The PyCon paper has a good description of the overall architecture. It is
|
||||
available in HTML form in docs/PyCon-2003/buildbot.html, or on the web page.
|
||||
|
||||
The User's Manual is in docs/buildbot.info, and the Installation chapter is
|
||||
the best guide to use for setup instructions. The .texinfo source can also be
|
||||
turned into printed documentation. An HTML representation is available on the
|
||||
Buildbot home page.
|
||||
|
||||
REQUIREMENTS:
|
||||
|
||||
Python: http://www.python.org
|
||||
|
||||
Buildbot requires python-2.3 or later, and is primarily developed against
|
||||
python-2.4 . It has *not* yet been tested against python-2.5 .
|
||||
|
||||
Twisted: http://twistedmatrix.com
|
||||
|
||||
Both the buildmaster and the buildslaves require Twisted-2.0.x or later.
|
||||
As always, the most recent version is recommended. It has been tested
|
||||
against Twisted-2.4.0 and Twisted SVN as of the date of release.
|
||||
|
||||
Twisted is delivered as a collection of subpackages. You'll need at least
|
||||
"Twisted" (the core package), and you'll also want TwistedMail,
|
||||
TwistedWeb, and TwistedWords (for sending email, serving a web status
|
||||
page, and delivering build status via IRC, respectively). You might also
|
||||
want TwistedConch (for the encrypted Manhole debug port). Note that
|
||||
Twisted requires ZopeInterface to be installed as well.
|
||||
|
||||
ACCESSORIES:
|
||||
|
||||
CVSToys: http://purl.net/net/CVSToys
|
||||
|
||||
If your buildmaster uses FreshCVSSource to receive change notification
|
||||
from a cvstoys daemon, it will require CVSToys be installed (tested with
|
||||
CVSToys-1.0.10). If the it doesn't use that source (i.e. if you only use
|
||||
a mail-parsing change source, or the SVN notification script), you will
|
||||
not need CVSToys.
|
||||
|
||||
INSTALLATION:
|
||||
|
||||
Please read the User's Manual in docs/buildbot.info or docs/buildbot.html for
|
||||
complete instructions. This file only contains a brief summary.
|
||||
|
||||
RUNNING THE UNIT TESTS
|
||||
|
||||
If you would like to run the unit test suite, use a command like this:
|
||||
|
||||
PYTHONPATH=. trial buildbot.test
|
||||
|
||||
This should run up to 221 tests, depending upon what VC tools you have
|
||||
installed. On my desktop machine it takes about six minutes to complete.
|
||||
Nothing should fail (at least under unix), a few might be skipped. If any of
|
||||
the tests fail, you should stop and investigate the cause before continuing
|
||||
the installation process, as it will probably be easier to track down the bug
|
||||
early. There are a few known failures under windows and OS-X, but please
|
||||
report these to the mailing list so we can isolate and resolve them.
|
||||
|
||||
Neither CVS nor SVN support file based repositories on network filesystem
|
||||
(or network drives in Windows parlance). Therefore it is recommended to run
|
||||
all unit tests on local hard disks.
|
||||
|
||||
INSTALLING THE LIBRARIES:
|
||||
|
||||
The first step is to install the python libraries. This package uses the
|
||||
standard 'distutils' module, so installing them is usually a matter of
|
||||
doing something like:
|
||||
|
||||
python ./setup.py install
|
||||
|
||||
To test this, shift to a different directory (like /tmp), and run:
|
||||
|
||||
buildbot --version
|
||||
|
||||
If it announces the versions of Buildbot and Twisted, the install went ok.
|
||||
|
||||
|
||||
SETTING UP A BUILD SLAVE:
|
||||
|
||||
If you want to run a build slave, you need to obtain the following pieces of
|
||||
information from the administrator of the buildmaster you intend to connect
|
||||
to:
|
||||
|
||||
your buildslave's name
|
||||
the password assigned to your buildslave
|
||||
the hostname and port number of the buildmaster, i.e. example.com:8007
|
||||
|
||||
You also need to pick a working directory for the buildslave. All commands
|
||||
will be run inside this directory.
|
||||
|
||||
Now run the 'buildbot' command as follows:
|
||||
|
||||
buildbot create-slave WORKDIR MASTERHOST:PORT SLAVENAME PASSWORD
|
||||
|
||||
This will create a file called "buildbot.tac", which bundles up all the state
|
||||
needed by the build slave application. Twisted has a tool called "twistd"
|
||||
which knows how to load these saved applications and start running them.
|
||||
twistd takes care of logging and daemonization (running the program in the
|
||||
background). /usr/bin/buildbot is a front end which runs twistd for you.
|
||||
|
||||
Once you've set up the directory with the .tac file, you start it running
|
||||
like this:
|
||||
|
||||
buildbot start WORKDIR
|
||||
|
||||
This will start the build slave in the background and finish, so you don't
|
||||
need to put it in the background yourself with "&". The process ID of the
|
||||
background task is written to a file called "twistd.pid", and all output from
|
||||
the program is written to a log file named "twistd.log". Look in twistd.log
|
||||
to make sure the buildslave has started.
|
||||
|
||||
To shut down the build slave, use:
|
||||
|
||||
buildbot stop WORKDIR
|
||||
|
||||
|
||||
RUNNING BEHIND A NAT BOX:
|
||||
|
||||
Some network environments will not properly maintain a TCP connection that
|
||||
appears to be idle. NAT boxes which do some form of connection tracking may
|
||||
drop the port mapping if it looks like the TCP session has been idle for too
|
||||
long. The buildslave attempts to turn on TCP "keepalives" (supported by
|
||||
Twisted 1.0.6 and later), and if these cannot be activated, it uses
|
||||
application level keepalives (which send a dummy message to the build master
|
||||
on a periodic basis). The TCP keepalive is typically sent at intervals of
|
||||
about 2 hours, and is configurable through the kernel. The application-level
|
||||
keepalive defaults to running once every 10 minutes.
|
||||
|
||||
To manually turn on application-level keepalives, or to set them to use some
|
||||
other interval, add "--keepalive NNN" to the 'buildbot slave' command line.
|
||||
NNN is the number of seconds between keepalives. Use as large a value as your
|
||||
NAT box allows to reduce the amount of unnecessary traffic on the wire. 600
|
||||
seconds (10 minutes) is a reasonable value.
|
||||
|
||||
|
||||
SETTING UP A BUILD MASTER:
|
||||
|
||||
Please read the user's manual for instructions. The short form is that you
|
||||
use 'buildbot create-master MASTERDIR' to create the base directory, then you
|
||||
edit the 'master.cfg' file to configure the buildmaster. Once this is ready,
|
||||
you use 'buildbot START MASTERDIR' to launch it.
|
||||
|
||||
A sample configuration file will be created for you in WORKDIR/master.cfg .
|
||||
There are more examples in docs/examples/, and plenty of documentation in the
|
||||
user's manual. Everything is controlled by the config file.
|
||||
|
||||
|
||||
SUPPORT:
|
||||
|
||||
Please send questions, bugs, patches, etc, to the buildbot-devel mailing
|
||||
list reachable through http://buildbot.sourceforge.net/, so that everyone
|
||||
can see them.
|
95
tools/buildbot/README.w32
Normal file
95
tools/buildbot/README.w32
Normal file
@ -0,0 +1,95 @@
|
||||
Several users have reported success in running a buildslave under Windows.
|
||||
The following list of steps might help you accomplish the same. They are a
|
||||
list of what I did as a unix guy struggling to make a winXP box run the
|
||||
buildbot unit tests. When I was done, most of the unit tests passed.
|
||||
|
||||
If you discover things that are missing or incorrect, please send your
|
||||
corrections to the buildbot-devel mailing list (archives and subscription
|
||||
information are available at http://buildbot.sourceforge.net).
|
||||
|
||||
Many thanks to Mike "Bear" Taylor for developing this list.
|
||||
|
||||
|
||||
0. Check to make sure your PATHEXT environment variable has ";.PY" in
|
||||
it -- if not set your global environment to include it.
|
||||
|
||||
Control Panels / System / Advanced / Environment Variables / System variables
|
||||
|
||||
1. Install python -- 2.4 -- http://python.org
|
||||
* run win32 installer - no special options needed so far
|
||||
|
||||
2. install zope interface package -- 3.0.1final --
|
||||
http://www.zope.org/Products/ZopeInterface
|
||||
* run win32 installer - it should auto-detect your python 2.4
|
||||
installation
|
||||
|
||||
3. python for windows extensions -- build 203 --
|
||||
http://pywin32.sourceforge.net/
|
||||
* run win32 installer - it should auto-detect your python 2.4
|
||||
installation
|
||||
|
||||
the installer complains about a missing DLL. Download mfc71.dll from the
|
||||
site mentioned in the warning
|
||||
(http://starship.python.net/crew/mhammond/win32/) and move it into
|
||||
c:\Python24\DLLs
|
||||
|
||||
4. at this point, to preserve my own sanity, I grabbed cygwin.com's setup.exe
|
||||
and started it. It behaves a lot like dselect. I installed bash and other
|
||||
tools (but *not* python). I added C:\cygwin\bin to PATH, allowing me to
|
||||
use tar, md5sum, cvs, all the usual stuff. I also installed emacs, going
|
||||
from the notes at http://www.gnu.org/software/emacs/windows/ntemacs.html .
|
||||
Their FAQ at http://www.gnu.org/software/emacs/windows/faq3.html#install
|
||||
has a note on how to swap CapsLock and Control.
|
||||
|
||||
I also modified PATH (in the same place as PATHEXT) to include C:\Python24
|
||||
and C:\Python24\Scripts . This will allow 'python' and (eventually) 'trial'
|
||||
to work in a regular command shell.
|
||||
|
||||
5. twisted -- 2.0 -- http://twistedmatrix.com/projects/core/
|
||||
* unpack tarball and run
|
||||
python setup.py install
|
||||
Note: if you want to test your setup - run:
|
||||
python c:\python24\Scripts\trial.py -o -R twisted
|
||||
(the -o will format the output for console and the "-R twisted" will
|
||||
recursively run all unit tests)
|
||||
|
||||
I had to edit Twisted (core)'s setup.py, to make detectExtensions() return
|
||||
an empty list before running builder._compile_helper(). Apparently the test
|
||||
it uses to detect if the (optional) C modules can be compiled causes the
|
||||
install process to simply quit without actually installing anything.
|
||||
|
||||
I installed several packages: core, Lore, Mail, Web, and Words. They all got
|
||||
copied to C:\Python24\Lib\site-packages\
|
||||
|
||||
At this point
|
||||
|
||||
trial --version
|
||||
|
||||
works, so 'trial -o -R twisted' will run the Twisted test suite. Note that
|
||||
this is not necessarily setting PYTHONPATH, so it may be running the test
|
||||
suite that was installed, not the one in the current directory.
|
||||
|
||||
6. I used CVS to grab a copy of the latest Buildbot sources. To run the
|
||||
tests, you must first add the buildbot directory to PYTHONPATH. Windows
|
||||
does not appear to have a Bourne-shell-style syntax to set a variable just
|
||||
for a single command, so you have to set it once and remember it will
|
||||
affect all commands for the lifetime of that shell session.
|
||||
|
||||
set PYTHONPATH=.
|
||||
trial -o -r win32 buildbot.test
|
||||
|
||||
To run against both buildbot-CVS and, say, Twisted-SVN, do:
|
||||
|
||||
set PYTHONPATH=.;C:\path to\Twisted-SVN
|
||||
|
||||
|
||||
All commands are done using the normal cmd.exe command shell. As of
|
||||
buildbot-0.6.4, only one unit test fails (test_webPathname_port) when you run
|
||||
under the 'win32' reactor. (if you run under the default reactor, many of the
|
||||
child-process-spawning commands fail, but test_webPathname_port passes. go
|
||||
figure.)
|
||||
|
||||
Actually setting up a buildslave is not yet covered by this document. Patches
|
||||
gladly accepted.
|
||||
|
||||
-Brian
|
4
tools/buildbot/bin/buildbot
Executable file
4
tools/buildbot/bin/buildbot
Executable file
@ -0,0 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from buildbot.scripts import runner
|
||||
runner.run()
|
3
tools/buildbot/buildbot/__init__.py
Normal file
3
tools/buildbot/buildbot/__init__.py
Normal file
@ -0,0 +1,3 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
version = "0.7.5"
|
BIN
tools/buildbot/buildbot/buildbot.png
Normal file
BIN
tools/buildbot/buildbot/buildbot.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 783 B |
75
tools/buildbot/buildbot/buildset.py
Normal file
75
tools/buildbot/buildbot/buildset.py
Normal file
@ -0,0 +1,75 @@
|
||||
|
||||
from buildbot.process import base
|
||||
from buildbot.status import builder
|
||||
|
||||
|
||||
class BuildSet:
|
||||
"""I represent a set of potential Builds, all of the same source tree,
|
||||
across a specified list of Builders. I can represent a build of a
|
||||
specific version of the source tree (named by source.branch and
|
||||
source.revision), or a build of a certain set of Changes
|
||||
(source.changes=list)."""
|
||||
|
||||
def __init__(self, builderNames, source, reason=None, bsid=None):
|
||||
"""
|
||||
@param source: a L{buildbot.sourcestamp.SourceStamp}
|
||||
"""
|
||||
self.builderNames = builderNames
|
||||
self.source = source
|
||||
self.reason = reason
|
||||
self.stillHopeful = True
|
||||
self.status = bss = builder.BuildSetStatus(source, reason,
|
||||
builderNames, bsid)
|
||||
|
||||
def waitUntilSuccess(self):
|
||||
return self.status.waitUntilSuccess()
|
||||
def waitUntilFinished(self):
|
||||
return self.status.waitUntilFinished()
|
||||
|
||||
def start(self, builders):
|
||||
"""This is called by the BuildMaster to actually create and submit
|
||||
the BuildRequests."""
|
||||
self.requests = []
|
||||
reqs = []
|
||||
|
||||
# create the requests
|
||||
for b in builders:
|
||||
req = base.BuildRequest(self.reason, self.source, b.name)
|
||||
reqs.append((b, req))
|
||||
self.requests.append(req)
|
||||
d = req.waitUntilFinished()
|
||||
d.addCallback(self.requestFinished, req)
|
||||
|
||||
# tell our status about them
|
||||
req_statuses = [req.status for req in self.requests]
|
||||
self.status.setBuildRequestStatuses(req_statuses)
|
||||
|
||||
# now submit them
|
||||
for b,req in reqs:
|
||||
b.submitBuildRequest(req)
|
||||
|
||||
def requestFinished(self, buildstatus, req):
|
||||
# TODO: this is where individual build status results are aggregated
|
||||
# into a BuildSet-wide status. Consider making a rule that says one
|
||||
# WARNINGS results in the overall status being WARNINGS too. The
|
||||
# current rule is that any FAILURE means FAILURE, otherwise you get
|
||||
# SUCCESS.
|
||||
self.requests.remove(req)
|
||||
results = buildstatus.getResults()
|
||||
if results == builder.FAILURE:
|
||||
self.status.setResults(results)
|
||||
if self.stillHopeful:
|
||||
# oh, cruel reality cuts deep. no joy for you. This is the
|
||||
# first failure. This flunks the overall BuildSet, so we can
|
||||
# notify success watchers that they aren't going to be happy.
|
||||
self.stillHopeful = False
|
||||
self.status.giveUpHope()
|
||||
self.status.notifySuccessWatchers()
|
||||
if not self.requests:
|
||||
# that was the last build, so we can notify finished watchers. If
|
||||
# we haven't failed by now, we can claim success.
|
||||
if self.stillHopeful:
|
||||
self.status.setResults(builder.SUCCESS)
|
||||
self.status.notifySuccessWatchers()
|
||||
self.status.notifyFinishedWatchers()
|
||||
|
0
tools/buildbot/buildbot/changes/__init__.py
Normal file
0
tools/buildbot/buildbot/changes/__init__.py
Normal file
13
tools/buildbot/buildbot/changes/base.py
Normal file
13
tools/buildbot/buildbot/changes/base.py
Normal file
@ -0,0 +1,13 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from twisted.application import service
|
||||
|
||||
from buildbot.twcompat import implements
|
||||
from buildbot.interfaces import IChangeSource
|
||||
from buildbot import util
|
||||
|
||||
class ChangeSource(service.Service, util.ComparableMixin):
|
||||
if implements:
|
||||
implements(IChangeSource)
|
||||
else:
|
||||
__implements__ = IChangeSource, service.Service.__implements__
|
300
tools/buildbot/buildbot/changes/bonsaipoller.py
Normal file
300
tools/buildbot/buildbot/changes/bonsaipoller.py
Normal file
@ -0,0 +1,300 @@
|
||||
import time
|
||||
from urllib import urlopen
|
||||
from xml.dom import minidom, Node
|
||||
|
||||
from twisted.python import log, failure
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet.task import LoopingCall
|
||||
|
||||
from buildbot.changes import base, changes
|
||||
|
||||
class InvalidResultError(Exception):
|
||||
def __init__(self, value="InvalidResultError"):
|
||||
self.value = value
|
||||
def __str__(self):
|
||||
return repr(self.value)
|
||||
|
||||
class EmptyResult(Exception):
|
||||
pass
|
||||
|
||||
class NoMoreCiNodes(Exception):
|
||||
pass
|
||||
|
||||
class NoMoreFileNodes(Exception):
|
||||
pass
|
||||
|
||||
class BonsaiResult:
|
||||
"""I hold a list of CiNodes"""
|
||||
def __init__(self, nodes=[]):
|
||||
self.nodes = nodes
|
||||
|
||||
def __cmp__(self, other):
|
||||
if len(self.nodes) != len(other.nodes):
|
||||
return False
|
||||
for i in range(len(self.nodes)):
|
||||
if self.nodes[i].log != other.nodes[i].log \
|
||||
or self.nodes[i].who != other.nodes[i].who \
|
||||
or self.nodes[i].date != other.nodes[i].date \
|
||||
or len(self.nodes[i].files) != len(other.nodes[i].files):
|
||||
return -1
|
||||
|
||||
for j in range(len(self.nodes[i].files)):
|
||||
if self.nodes[i].files[j].revision \
|
||||
!= other.nodes[i].files[j].revision \
|
||||
or self.nodes[i].files[j].filename \
|
||||
!= other.nodes[i].files[j].filename:
|
||||
return -1
|
||||
|
||||
return 0
|
||||
|
||||
class CiNode:
|
||||
"""I hold information baout one <ci> node, including a list of files"""
|
||||
def __init__(self, log="", who="", date=0, files=[]):
|
||||
self.log = log
|
||||
self.who = who
|
||||
self.date = date
|
||||
self.files = files
|
||||
|
||||
class FileNode:
|
||||
"""I hold information about one <f> node"""
|
||||
def __init__(self, revision="", filename=""):
|
||||
self.revision = revision
|
||||
self.filename = filename
|
||||
|
||||
class BonsaiParser:
|
||||
"""I parse the XML result from a bonsai cvsquery."""
|
||||
|
||||
def __init__(self, bonsaiQuery):
|
||||
try:
|
||||
self.dom = minidom.parse(bonsaiQuery)
|
||||
except:
|
||||
raise InvalidResultError("Malformed XML in result")
|
||||
|
||||
self.ciNodes = self.dom.getElementsByTagName("ci")
|
||||
self.currentCiNode = None # filled in by _nextCiNode()
|
||||
self.fileNodes = None # filled in by _nextCiNode()
|
||||
self.currentFileNode = None # filled in by _nextFileNode()
|
||||
self.bonsaiResult = self._parseData()
|
||||
|
||||
def getData(self):
|
||||
return self.bonsaiResult
|
||||
|
||||
def _parseData(self):
|
||||
"""Returns data from a Bonsai cvsquery in a BonsaiResult object"""
|
||||
nodes = []
|
||||
try:
|
||||
while self._nextCiNode():
|
||||
files = []
|
||||
try:
|
||||
while self._nextFileNode():
|
||||
files.append(FileNode(self._getRevision(),
|
||||
self._getFilename()))
|
||||
except NoMoreFileNodes:
|
||||
pass
|
||||
except InvalidResultError:
|
||||
raise
|
||||
nodes.append(CiNode(self._getLog(), self._getWho(),
|
||||
self._getDate(), files))
|
||||
|
||||
except NoMoreCiNodes:
|
||||
pass
|
||||
except InvalidResultError, EmptyResult:
|
||||
raise
|
||||
|
||||
return BonsaiResult(nodes)
|
||||
|
||||
|
||||
def _nextCiNode(self):
|
||||
"""Iterates to the next <ci> node and fills self.fileNodes with
|
||||
child <f> nodes"""
|
||||
try:
|
||||
self.currentCiNode = self.ciNodes.pop(0)
|
||||
if len(self.currentCiNode.getElementsByTagName("files")) > 1:
|
||||
raise InvalidResultError("Multiple <files> for one <ci>")
|
||||
|
||||
self.fileNodes = self.currentCiNode.getElementsByTagName("f")
|
||||
except IndexError:
|
||||
# if there was zero <ci> nodes in the result
|
||||
if not self.currentCiNode:
|
||||
raise EmptyResult
|
||||
else:
|
||||
raise NoMoreCiNodes
|
||||
|
||||
return True
|
||||
|
||||
def _nextFileNode(self):
|
||||
"""Iterates to the next <f> node"""
|
||||
try:
|
||||
self.currentFileNode = self.fileNodes.pop(0)
|
||||
except IndexError:
|
||||
raise NoMoreFileNodes
|
||||
|
||||
return True
|
||||
|
||||
def _getLog(self):
|
||||
"""Returns the log of the current <ci> node"""
|
||||
logs = self.currentCiNode.getElementsByTagName("log")
|
||||
if len(logs) < 1:
|
||||
raise InvalidResultError("No log present")
|
||||
elif len(logs) > 1:
|
||||
raise InvalidResultError("Multiple logs present")
|
||||
|
||||
return logs[0].firstChild.data
|
||||
|
||||
def _getWho(self):
|
||||
"""Returns the e-mail address of the commiter"""
|
||||
# convert unicode string to regular string
|
||||
return str(self.currentCiNode.getAttribute("who"))
|
||||
|
||||
def _getDate(self):
|
||||
"""Returns the date (unix time) of the commit"""
|
||||
# convert unicode number to regular one
|
||||
try:
|
||||
commitDate = int(self.currentCiNode.getAttribute("date"))
|
||||
except ValueError:
|
||||
raise InvalidResultError
|
||||
|
||||
return commitDate
|
||||
|
||||
def _getFilename(self):
|
||||
"""Returns the filename of the current <f> node"""
|
||||
try:
|
||||
filename = self.currentFileNode.firstChild.data
|
||||
except AttributeError:
|
||||
raise InvalidResultError("Missing filename")
|
||||
|
||||
return filename
|
||||
|
||||
def _getRevision(self):
|
||||
"""Returns the revision of the current <f> node"""
|
||||
rev = self.currentFileNode.getAttribute("rev")
|
||||
if rev == "":
|
||||
raise InvalidResultError("A revision was missing from a file")
|
||||
|
||||
return rev
|
||||
|
||||
|
||||
class BonsaiPoller(base.ChangeSource):
|
||||
"""This source will poll a bonsai server for changes and submit
|
||||
them to the change master."""
|
||||
|
||||
compare_attrs = ["bonsaiURL", "pollInterval", "tree",
|
||||
"module", "branch", "cvsroot"]
|
||||
|
||||
parent = None # filled in when we're added
|
||||
loop = None
|
||||
volatile = ['loop']
|
||||
working = False
|
||||
|
||||
def __init__(self, bonsaiURL, module, branch, tree="default",
|
||||
cvsroot="/cvsroot", pollInterval=30):
|
||||
"""
|
||||
@type bonsaiURL: string
|
||||
@param bonsaiURL: The base URL of the Bonsai server
|
||||
(ie. http://bonsai.mozilla.org)
|
||||
@type module: string
|
||||
@param module: The module to look for changes in. Commonly
|
||||
this is 'all'
|
||||
@type branch: string
|
||||
@param branch: The branch to look for changes in. This must
|
||||
match the
|
||||
'branch' option for the Scheduler.
|
||||
@type tree: string
|
||||
@param tree: The tree to look for changes in. Commonly this
|
||||
is 'all'
|
||||
@type cvsroot: string
|
||||
@param cvsroot: The cvsroot of the repository. Usually this is
|
||||
'/cvsroot'
|
||||
@type pollInterval: int
|
||||
@param pollInterval: The time (in seconds) between queries for changes
|
||||
"""
|
||||
|
||||
self.bonsaiURL = bonsaiURL
|
||||
self.module = module
|
||||
self.branch = branch
|
||||
self.tree = tree
|
||||
self.cvsroot = cvsroot
|
||||
self.pollInterval = pollInterval
|
||||
self.lastChange = time.time()
|
||||
self.lastPoll = time.time()
|
||||
|
||||
def startService(self):
|
||||
self.loop = LoopingCall(self.poll)
|
||||
base.ChangeSource.startService(self)
|
||||
|
||||
reactor.callLater(0, self.loop.start, self.pollInterval)
|
||||
|
||||
def stopService(self):
|
||||
self.loop.stop()
|
||||
return base.ChangeSource.stopService(self)
|
||||
|
||||
def describe(self):
|
||||
str = ""
|
||||
str += "Getting changes from the Bonsai service running at %s " \
|
||||
% self.bonsaiURL
|
||||
str += "<br>Using tree: %s, branch: %s, and module: %s" % (self.tree, \
|
||||
self.branch, self.module)
|
||||
return str
|
||||
|
||||
def poll(self):
|
||||
if self.working:
|
||||
log.msg("Not polling Bonsai because last poll is still working")
|
||||
else:
|
||||
self.working = True
|
||||
d = self._get_changes()
|
||||
d.addCallback(self._process_changes)
|
||||
d.addBoth(self._finished)
|
||||
return
|
||||
|
||||
def _finished(self, res):
|
||||
assert self.working
|
||||
self.working = False
|
||||
|
||||
# check for failure
|
||||
if isinstance(res, failure.Failure):
|
||||
log.msg("Bonsai poll failed: %s" % res)
|
||||
return res
|
||||
|
||||
def _make_url(self):
|
||||
args = ["treeid=%s" % self.tree, "module=%s" % self.module,
|
||||
"branch=%s" % self.branch, "branchtype=match",
|
||||
"sortby=Date", "date=explicit",
|
||||
"mindate=%d" % self.lastChange,
|
||||
"maxdate=%d" % int(time.time()),
|
||||
"cvsroot=%s" % self.cvsroot, "xml=1"]
|
||||
# build the bonsai URL
|
||||
url = self.bonsaiURL
|
||||
url += "/cvsquery.cgi?"
|
||||
url += "&".join(args)
|
||||
|
||||
return url
|
||||
|
||||
def _get_changes(self):
|
||||
url = self._make_url()
|
||||
log.msg("Polling Bonsai tree at %s" % url)
|
||||
|
||||
self.lastPoll = time.time()
|
||||
# get the page, in XML format
|
||||
return defer.maybeDeferred(urlopen, url)
|
||||
|
||||
def _process_changes(self, query):
|
||||
files = []
|
||||
try:
|
||||
bp = BonsaiParser(query)
|
||||
result = bp.getData()
|
||||
except InvalidResultError, e:
|
||||
log.msg("Could not process Bonsai query: " + e.value)
|
||||
return
|
||||
except EmptyResult:
|
||||
return
|
||||
|
||||
for cinode in result.nodes:
|
||||
for file in cinode.files:
|
||||
files.append(file.filename+' (revision '+file.revision+')')
|
||||
c = changes.Change(who = cinode.who,
|
||||
files = files,
|
||||
comments = cinode.log,
|
||||
when = cinode.date,
|
||||
branch = self.branch)
|
||||
self.parent.addChange(c)
|
||||
self.lastChange = self.lastPoll
|
264
tools/buildbot/buildbot/changes/changes.py
Normal file
264
tools/buildbot/buildbot/changes/changes.py
Normal file
@ -0,0 +1,264 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from __future__ import generators
|
||||
import sys, os, time
|
||||
try:
|
||||
import cPickle
|
||||
pickle = cPickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
from twisted.python import log
|
||||
from twisted.internet import defer
|
||||
from twisted.application import service
|
||||
from twisted.web import html
|
||||
|
||||
from buildbot import interfaces, util
|
||||
from buildbot.twcompat import implements, providedBy
|
||||
|
||||
html_tmpl = """
|
||||
<p>Changed by: <b>%(who)s</b><br />
|
||||
Changed at: <b>%(at)s</b><br />
|
||||
%(branch)s
|
||||
%(revision)s
|
||||
<br />
|
||||
|
||||
Changed files:
|
||||
%(files)s
|
||||
|
||||
Comments:
|
||||
%(comments)s
|
||||
</p>
|
||||
"""
|
||||
|
||||
class Change:
|
||||
"""I represent a single change to the source tree. This may involve
|
||||
several files, but they are all changed by the same person, and there is
|
||||
a change comment for the group as a whole.
|
||||
|
||||
If the version control system supports sequential repository- (or
|
||||
branch-) wide change numbers (like SVN, P4, and Arch), then revision=
|
||||
should be set to that number. The highest such number will be used at
|
||||
checkout time to get the correct set of files.
|
||||
|
||||
If it does not (like CVS), when= should be set to the timestamp (seconds
|
||||
since epoch, as returned by time.time()) when the change was made. when=
|
||||
will be filled in for you (to the current time) if you omit it, which is
|
||||
suitable for ChangeSources which have no way of getting more accurate
|
||||
timestamps.
|
||||
|
||||
Changes should be submitted to ChangeMaster.addChange() in
|
||||
chronologically increasing order. Out-of-order changes will probably
|
||||
cause the html.Waterfall display to be corrupted."""
|
||||
|
||||
if implements:
|
||||
implements(interfaces.IStatusEvent)
|
||||
else:
|
||||
__implements__ = interfaces.IStatusEvent,
|
||||
|
||||
number = None
|
||||
|
||||
links = []
|
||||
branch = None
|
||||
revision = None # used to create a source-stamp
|
||||
|
||||
def __init__(self, who, files, comments, isdir=0, links=[],
|
||||
revision=None, when=None, branch=None):
|
||||
self.who = who
|
||||
self.files = files
|
||||
self.comments = comments
|
||||
self.isdir = isdir
|
||||
self.links = links
|
||||
self.revision = revision
|
||||
if when is None:
|
||||
when = util.now()
|
||||
self.when = when
|
||||
self.branch = branch
|
||||
|
||||
def asText(self):
|
||||
data = ""
|
||||
data += self.getFileContents()
|
||||
data += "At: %s\n" % self.getTime()
|
||||
data += "Changed By: %s\n" % self.who
|
||||
data += "Comments: %s\n\n" % self.comments
|
||||
return data
|
||||
|
||||
def asHTML(self):
|
||||
links = []
|
||||
for file in self.files:
|
||||
link = filter(lambda s: s.find(file) != -1, self.links)
|
||||
if len(link) == 1:
|
||||
# could get confused
|
||||
links.append('<a href="%s"><b>%s</b></a>' % (link[0], file))
|
||||
else:
|
||||
links.append('<b>%s</b>' % file)
|
||||
revision = ""
|
||||
if self.revision:
|
||||
revision = "Revision: <b>%s</b><br />\n" % self.revision
|
||||
branch = ""
|
||||
if self.branch:
|
||||
branch = "Branch: <b>%s</b><br />\n" % self.branch
|
||||
|
||||
kwargs = { 'who' : html.escape(self.who),
|
||||
'at' : self.getTime(),
|
||||
'files' : html.UL(links) + '\n',
|
||||
'revision': revision,
|
||||
'branch' : branch,
|
||||
'comments': html.PRE(self.comments) }
|
||||
return html_tmpl % kwargs
|
||||
|
||||
def getTime(self):
|
||||
if not self.when:
|
||||
return "?"
|
||||
return time.strftime("%a %d %b %Y %H:%M:%S",
|
||||
time.localtime(self.when))
|
||||
|
||||
def getTimes(self):
|
||||
return (self.when, None)
|
||||
|
||||
def getText(self):
|
||||
return [html.escape(self.who)]
|
||||
def getColor(self):
|
||||
return "white"
|
||||
def getLogs(self):
|
||||
return {}
|
||||
|
||||
def getFileContents(self):
|
||||
data = ""
|
||||
if len(self.files) == 1:
|
||||
if self.isdir:
|
||||
data += "Directory: %s\n" % self.files[0]
|
||||
else:
|
||||
data += "File: %s\n" % self.files[0]
|
||||
else:
|
||||
data += "Files:\n"
|
||||
for f in self.files:
|
||||
data += " %s\n" % f
|
||||
return data
|
||||
|
||||
class ChangeMaster(service.MultiService):
|
||||
|
||||
"""This is the master-side service which receives file change
|
||||
notifications from CVS. It keeps a log of these changes, enough to
|
||||
provide for the HTML waterfall display, and to tell
|
||||
temporarily-disconnected bots what they missed while they were
|
||||
offline.
|
||||
|
||||
Change notifications come from two different kinds of sources. The first
|
||||
is a PB service (servicename='changemaster', perspectivename='change'),
|
||||
which provides a remote method called 'addChange', which should be
|
||||
called with a dict that has keys 'filename' and 'comments'.
|
||||
|
||||
The second is a list of objects derived from the ChangeSource class.
|
||||
These are added with .addSource(), which also sets the .changemaster
|
||||
attribute in the source to point at the ChangeMaster. When the
|
||||
application begins, these will be started with .start() . At shutdown
|
||||
time, they will be terminated with .stop() . They must be persistable.
|
||||
They are expected to call self.changemaster.addChange() with Change
|
||||
objects.
|
||||
|
||||
There are several different variants of the second type of source:
|
||||
|
||||
- L{buildbot.changes.mail.MaildirSource} watches a maildir for CVS
|
||||
commit mail. It uses DNotify if available, or polls every 10
|
||||
seconds if not. It parses incoming mail to determine what files
|
||||
were changed.
|
||||
|
||||
- L{buildbot.changes.freshcvs.FreshCVSSource} makes a PB
|
||||
connection to the CVSToys 'freshcvs' daemon and relays any
|
||||
changes it announces.
|
||||
|
||||
"""
|
||||
|
||||
debug = False
|
||||
# todo: use Maildir class to watch for changes arriving by mail
|
||||
|
||||
def __init__(self):
|
||||
service.MultiService.__init__(self)
|
||||
self.changes = []
|
||||
# self.basedir must be filled in by the parent
|
||||
self.nextNumber = 1
|
||||
|
||||
def addSource(self, source):
|
||||
assert providedBy(source, interfaces.IChangeSource)
|
||||
assert providedBy(source, service.IService)
|
||||
if self.debug:
|
||||
print "ChangeMaster.addSource", source
|
||||
source.setServiceParent(self)
|
||||
|
||||
def removeSource(self, source):
|
||||
assert source in self
|
||||
if self.debug:
|
||||
print "ChangeMaster.removeSource", source, source.parent
|
||||
d = defer.maybeDeferred(source.disownServiceParent)
|
||||
return d
|
||||
|
||||
def addChange(self, change):
|
||||
"""Deliver a file change event. The event should be a Change object.
|
||||
This method will timestamp the object as it is received."""
|
||||
log.msg("adding change, who %s, %d files, rev=%s, branch=%s, "
|
||||
"comments %s" % (change.who, len(change.files),
|
||||
change.revision, change.branch,
|
||||
change.comments))
|
||||
change.number = self.nextNumber
|
||||
self.nextNumber += 1
|
||||
self.changes.append(change)
|
||||
self.parent.addChange(change)
|
||||
# TODO: call pruneChanges after a while
|
||||
|
||||
def pruneChanges(self):
|
||||
self.changes = self.changes[-100:] # or something
|
||||
|
||||
def eventGenerator(self):
|
||||
for i in range(len(self.changes)-1, -1, -1):
|
||||
c = self.changes[i]
|
||||
yield c
|
||||
|
||||
def getChangeNumbered(self, num):
|
||||
if not self.changes:
|
||||
return None
|
||||
first = self.changes[0].number
|
||||
if first + len(self.changes)-1 != self.changes[-1].number:
|
||||
log.msg(self,
|
||||
"lost a change somewhere: [0] is %d, [%d] is %d" % \
|
||||
(self.changes[0].number,
|
||||
len(self.changes) - 1,
|
||||
self.changes[-1].number))
|
||||
for c in self.changes:
|
||||
log.msg("c[%d]: " % c.number, c)
|
||||
return None
|
||||
offset = num - first
|
||||
log.msg(self, "offset", offset)
|
||||
return self.changes[offset]
|
||||
|
||||
def __getstate__(self):
|
||||
d = service.MultiService.__getstate__(self)
|
||||
del d['parent']
|
||||
del d['services'] # lose all children
|
||||
del d['namedServices']
|
||||
return d
|
||||
|
||||
def __setstate__(self, d):
|
||||
self.__dict__ = d
|
||||
# self.basedir must be set by the parent
|
||||
self.services = [] # they'll be repopulated by readConfig
|
||||
self.namedServices = {}
|
||||
|
||||
|
||||
def saveYourself(self):
|
||||
filename = os.path.join(self.basedir, "changes.pck")
|
||||
tmpfilename = filename + ".tmp"
|
||||
try:
|
||||
pickle.dump(self, open(tmpfilename, "wb"))
|
||||
if sys.platform == 'win32':
|
||||
# windows cannot rename a file on top of an existing one
|
||||
if os.path.exists(filename):
|
||||
os.unlink(filename)
|
||||
os.rename(tmpfilename, filename)
|
||||
except Exception, e:
|
||||
log.msg("unable to save changes")
|
||||
log.err()
|
||||
|
||||
def stopService(self):
|
||||
self.saveYourself()
|
||||
return service.MultiService.stopService(self)
|
101
tools/buildbot/buildbot/changes/dnotify.py
Normal file
101
tools/buildbot/buildbot/changes/dnotify.py
Normal file
@ -0,0 +1,101 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import fcntl, signal, os
|
||||
|
||||
class DNotify_Handler:
|
||||
def __init__(self):
|
||||
self.watchers = {}
|
||||
self.installed = 0
|
||||
def install(self):
|
||||
if self.installed:
|
||||
return
|
||||
signal.signal(signal.SIGIO, self.fire)
|
||||
self.installed = 1
|
||||
def uninstall(self):
|
||||
if not self.installed:
|
||||
return
|
||||
signal.signal(signal.SIGIO, signal.SIG_DFL)
|
||||
self.installed = 0
|
||||
def add(self, watcher):
|
||||
self.watchers[watcher.fd] = watcher
|
||||
self.install()
|
||||
def remove(self, watcher):
|
||||
if self.watchers.has_key(watcher.fd):
|
||||
del(self.watchers[watcher.fd])
|
||||
if not self.watchers:
|
||||
self.uninstall()
|
||||
def fire(self, signum, frame):
|
||||
# this is the signal handler
|
||||
# without siginfo_t, we must fire them all
|
||||
for watcher in self.watchers.values():
|
||||
watcher.callback()
|
||||
|
||||
class DNotify:
|
||||
DN_ACCESS = fcntl.DN_ACCESS # a file in the directory was read
|
||||
DN_MODIFY = fcntl.DN_MODIFY # a file was modified (write,truncate)
|
||||
DN_CREATE = fcntl.DN_CREATE # a file was created
|
||||
DN_DELETE = fcntl.DN_DELETE # a file was unlinked
|
||||
DN_RENAME = fcntl.DN_RENAME # a file was renamed
|
||||
DN_ATTRIB = fcntl.DN_ATTRIB # a file had attributes changed (chmod,chown)
|
||||
|
||||
handler = [None]
|
||||
|
||||
def __init__(self, dirname, callback=None,
|
||||
flags=[DN_MODIFY,DN_CREATE,DN_DELETE,DN_RENAME]):
|
||||
|
||||
"""This object watches a directory for changes. The .callback
|
||||
attribute should be set to a function to be run every time something
|
||||
happens to it. Be aware that it will be called more times than you
|
||||
expect."""
|
||||
|
||||
if callback:
|
||||
self.callback = callback
|
||||
else:
|
||||
self.callback = self.fire
|
||||
self.dirname = dirname
|
||||
self.flags = reduce(lambda x, y: x | y, flags) | fcntl.DN_MULTISHOT
|
||||
self.fd = os.open(dirname, os.O_RDONLY)
|
||||
# ideally we would move the notification to something like SIGRTMIN,
|
||||
# (to free up SIGIO) and use sigaction to have the signal handler
|
||||
# receive a structure with the fd number. But python doesn't offer
|
||||
# either.
|
||||
if not self.handler[0]:
|
||||
self.handler[0] = DNotify_Handler()
|
||||
self.handler[0].add(self)
|
||||
fcntl.fcntl(self.fd, fcntl.F_NOTIFY, self.flags)
|
||||
def remove(self):
|
||||
self.handler[0].remove(self)
|
||||
os.close(self.fd)
|
||||
def fire(self):
|
||||
print self.dirname, "changed!"
|
||||
|
||||
def test_dnotify1():
|
||||
d = DNotify(".")
|
||||
while 1:
|
||||
signal.pause()
|
||||
|
||||
def test_dnotify2():
|
||||
# create ./foo/, create/delete files in ./ and ./foo/ while this is
|
||||
# running. Notice how both notifiers are fired when anything changes;
|
||||
# this is an unfortunate side-effect of the lack of extended sigaction
|
||||
# support in Python.
|
||||
count = [0]
|
||||
d1 = DNotify(".")
|
||||
def fire1(count=count, d1=d1):
|
||||
print "./ changed!", count[0]
|
||||
count[0] += 1
|
||||
if count[0] > 5:
|
||||
d1.remove()
|
||||
del(d1)
|
||||
# change the callback, since we can't define it until after we have the
|
||||
# dnotify object. Hmm, unless we give the dnotify to the callback.
|
||||
d1.callback = fire1
|
||||
def fire2(): print "foo/ changed!"
|
||||
d2 = DNotify("foo", fire2)
|
||||
while 1:
|
||||
signal.pause()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_dnotify2()
|
||||
|
148
tools/buildbot/buildbot/changes/freshcvs.py
Normal file
148
tools/buildbot/buildbot/changes/freshcvs.py
Normal file
@ -0,0 +1,148 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import os.path
|
||||
|
||||
from twisted.cred import credentials
|
||||
from twisted.spread import pb
|
||||
from twisted.application.internet import TCPClient
|
||||
from twisted.python import log
|
||||
|
||||
import cvstoys.common # to make sure VersionedPatch gets registered
|
||||
|
||||
from buildbot.twcompat import implements
|
||||
from buildbot.interfaces import IChangeSource
|
||||
from buildbot.pbutil import ReconnectingPBClientFactory
|
||||
from buildbot.changes.changes import Change
|
||||
from buildbot import util
|
||||
|
||||
class FreshCVSListener(pb.Referenceable):
|
||||
def remote_notify(self, root, files, message, user):
|
||||
try:
|
||||
self.source.notify(root, files, message, user)
|
||||
except Exception, e:
|
||||
print "notify failed"
|
||||
log.err()
|
||||
|
||||
def remote_goodbye(self, message):
|
||||
pass
|
||||
|
||||
class FreshCVSConnectionFactory(ReconnectingPBClientFactory):
|
||||
|
||||
def gotPerspective(self, perspective):
|
||||
log.msg("connected to FreshCVS daemon")
|
||||
ReconnectingPBClientFactory.gotPerspective(self, perspective)
|
||||
self.source.connected = True
|
||||
# TODO: freshcvs-1.0.10 doesn't handle setFilter correctly, it will
|
||||
# be fixed in the upcoming 1.0.11 . I haven't been able to test it
|
||||
# to make sure the failure mode is survivable, so I'll just leave
|
||||
# this out for now.
|
||||
return
|
||||
if self.source.prefix is not None:
|
||||
pathfilter = "^%s" % self.source.prefix
|
||||
d = perspective.callRemote("setFilter",
|
||||
None, pathfilter, None)
|
||||
# ignore failures, setFilter didn't work in 1.0.10 and this is
|
||||
# just an optimization anyway
|
||||
d.addErrback(lambda f: None)
|
||||
|
||||
def clientConnectionLost(self, connector, reason):
|
||||
ReconnectingPBClientFactory.clientConnectionLost(self, connector,
|
||||
reason)
|
||||
self.source.connected = False
|
||||
|
||||
class FreshCVSSourceNewcred(TCPClient, util.ComparableMixin):
|
||||
"""This source will connect to a FreshCVS server associated with one or
|
||||
more CVS repositories. Each time a change is committed to a repository,
|
||||
the server will send us a message describing the change. This message is
|
||||
used to build a Change object, which is then submitted to the
|
||||
ChangeMaster.
|
||||
|
||||
This class handles freshcvs daemons which use newcred. CVSToys-1.0.9
|
||||
does not, later versions might.
|
||||
"""
|
||||
|
||||
if implements:
|
||||
implements(IChangeSource)
|
||||
else:
|
||||
__implements__ = IChangeSource, TCPClient.__implements__
|
||||
compare_attrs = ["host", "port", "username", "password", "prefix"]
|
||||
|
||||
changemaster = None # filled in when we're added
|
||||
connected = False
|
||||
|
||||
def __init__(self, host, port, user, passwd, prefix=None):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.username = user
|
||||
self.password = passwd
|
||||
if prefix is not None and not prefix.endswith("/"):
|
||||
log.msg("WARNING: prefix '%s' should probably end with a slash" \
|
||||
% prefix)
|
||||
self.prefix = prefix
|
||||
self.listener = l = FreshCVSListener()
|
||||
l.source = self
|
||||
self.factory = f = FreshCVSConnectionFactory()
|
||||
f.source = self
|
||||
self.creds = credentials.UsernamePassword(user, passwd)
|
||||
f.startLogin(self.creds, client=l)
|
||||
TCPClient.__init__(self, host, port, f)
|
||||
|
||||
def __repr__(self):
|
||||
return "<FreshCVSSource where=%s, prefix=%s>" % \
|
||||
((self.host, self.port), self.prefix)
|
||||
|
||||
def describe(self):
|
||||
online = ""
|
||||
if not self.connected:
|
||||
online = " [OFFLINE]"
|
||||
return "freshcvs %s:%s%s" % (self.host, self.port, online)
|
||||
|
||||
def notify(self, root, files, message, user):
|
||||
pathnames = []
|
||||
isdir = 0
|
||||
for f in files:
|
||||
if not isinstance(f, (cvstoys.common.VersionedPatch,
|
||||
cvstoys.common.Directory)):
|
||||
continue
|
||||
pathname, filename = f.pathname, f.filename
|
||||
#r1, r2 = getattr(f, 'r1', None), getattr(f, 'r2', None)
|
||||
if isinstance(f, cvstoys.common.Directory):
|
||||
isdir = 1
|
||||
path = os.path.join(pathname, filename)
|
||||
log.msg("FreshCVS notify '%s'" % path)
|
||||
if self.prefix:
|
||||
if path.startswith(self.prefix):
|
||||
path = path[len(self.prefix):]
|
||||
else:
|
||||
continue
|
||||
pathnames.append(path)
|
||||
if pathnames:
|
||||
# now() is close enough: FreshCVS *is* realtime, after all
|
||||
when=util.now()
|
||||
c = Change(user, pathnames, message, isdir, when=when)
|
||||
self.parent.addChange(c)
|
||||
|
||||
class FreshCVSSourceOldcred(FreshCVSSourceNewcred):
|
||||
"""This is for older freshcvs daemons (from CVSToys-1.0.9 and earlier).
|
||||
"""
|
||||
|
||||
def __init__(self, host, port, user, passwd,
|
||||
serviceName="cvstoys.notify", prefix=None):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.prefix = prefix
|
||||
self.listener = l = FreshCVSListener()
|
||||
l.source = self
|
||||
self.factory = f = FreshCVSConnectionFactory()
|
||||
f.source = self
|
||||
f.startGettingPerspective(user, passwd, serviceName, client=l)
|
||||
TCPClient.__init__(self, host, port, f)
|
||||
|
||||
def __repr__(self):
|
||||
return "<FreshCVSSourceOldcred where=%s, prefix=%s>" % \
|
||||
((self.host, self.port), self.prefix)
|
||||
|
||||
# this is suitable for CVSToys-1.0.10 and later. If you run CVSToys-1.0.9 or
|
||||
# earlier, use FreshCVSSourceOldcred instead.
|
||||
FreshCVSSource = FreshCVSSourceNewcred
|
||||
|
5
tools/buildbot/buildbot/changes/freshcvsmail.py
Normal file
5
tools/buildbot/buildbot/changes/freshcvsmail.py
Normal file
@ -0,0 +1,5 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
# leftover import for compatibility
|
||||
|
||||
from buildbot.changes.mail import FCMaildirSource
|
338
tools/buildbot/buildbot/changes/mail.py
Normal file
338
tools/buildbot/buildbot/changes/mail.py
Normal file
@ -0,0 +1,338 @@
|
||||
# -*- test-case-name: buildbot.test.test_mailparse -*-
|
||||
|
||||
"""
|
||||
Parse various kinds of 'CVS notify' email.
|
||||
"""
|
||||
import os, re
|
||||
from rfc822 import Message
|
||||
|
||||
from buildbot import util
|
||||
from buildbot.twcompat import implements
|
||||
from buildbot.changes import base, changes, maildirtwisted
|
||||
|
||||
def parseFreshCVSMail(self, fd, prefix=None, sep="/"):
|
||||
"""Parse mail sent by FreshCVS"""
|
||||
# this uses rfc822.Message so it can run under python2.1 . In the future
|
||||
# it will be updated to use python2.2's "email" module.
|
||||
|
||||
m = Message(fd)
|
||||
# FreshCVS sets From: to "user CVS <user>", but the <> part may be
|
||||
# modified by the MTA (to include a local domain)
|
||||
name, addr = m.getaddr("from")
|
||||
if not name:
|
||||
return None # no From means this message isn't from FreshCVS
|
||||
cvs = name.find(" CVS")
|
||||
if cvs == -1:
|
||||
return None # this message isn't from FreshCVS
|
||||
who = name[:cvs]
|
||||
|
||||
# we take the time of receipt as the time of checkin. Not correct, but it
|
||||
# avoids the out-of-order-changes issue. See the comment in parseSyncmail
|
||||
# about using the 'Date:' header
|
||||
when = util.now()
|
||||
|
||||
files = []
|
||||
comments = ""
|
||||
isdir = 0
|
||||
lines = m.fp.readlines()
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line == "Modified files:\n":
|
||||
break
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line == "\n":
|
||||
break
|
||||
line = line.rstrip("\n")
|
||||
linebits = line.split(None, 1)
|
||||
file = linebits[0]
|
||||
if prefix:
|
||||
# insist that the file start with the prefix: FreshCVS sends
|
||||
# changes we don't care about too
|
||||
bits = file.split(sep)
|
||||
if bits[0] == prefix:
|
||||
file = sep.join(bits[1:])
|
||||
else:
|
||||
break
|
||||
if len(linebits) == 1:
|
||||
isdir = 1
|
||||
elif linebits[1] == "0 0":
|
||||
isdir = 1
|
||||
files.append(file)
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line == "Log message:\n":
|
||||
break
|
||||
# message is terminated by "ViewCVS links:" or "Index:..." (patch)
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line == "ViewCVS links:\n":
|
||||
break
|
||||
if line.find("Index: ") == 0:
|
||||
break
|
||||
comments += line
|
||||
comments = comments.rstrip() + "\n"
|
||||
|
||||
if not files:
|
||||
return None
|
||||
|
||||
change = changes.Change(who, files, comments, isdir, when=when)
|
||||
|
||||
return change
|
||||
|
||||
def parseSyncmail(self, fd, prefix=None, sep="/"):
|
||||
"""Parse messages sent by the 'syncmail' program, as suggested by the
|
||||
sourceforge.net CVS Admin documentation. Syncmail is maintained at
|
||||
syncmail.sf.net .
|
||||
"""
|
||||
# pretty much the same as freshcvs mail, not surprising since CVS is the
|
||||
# one creating most of the text
|
||||
|
||||
m = Message(fd)
|
||||
# The mail is sent from the person doing the checkin. Assume that the
|
||||
# local username is enough to identify them (this assumes a one-server
|
||||
# cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
|
||||
# model)
|
||||
name, addr = m.getaddr("from")
|
||||
if not addr:
|
||||
return None # no From means this message isn't from FreshCVS
|
||||
at = addr.find("@")
|
||||
if at == -1:
|
||||
who = addr # might still be useful
|
||||
else:
|
||||
who = addr[:at]
|
||||
|
||||
# we take the time of receipt as the time of checkin. Not correct (it
|
||||
# depends upon the email latency), but it avoids the out-of-order-changes
|
||||
# issue. Also syncmail doesn't give us anything better to work with,
|
||||
# unless you count pulling the v1-vs-v2 timestamp out of the diffs, which
|
||||
# would be ugly. TODO: Pulling the 'Date:' header from the mail is a
|
||||
# possibility, and email.Utils.parsedate_tz may be useful. It should be
|
||||
# configurable, however, because there are a lot of broken clocks out
|
||||
# there.
|
||||
when = util.now()
|
||||
|
||||
subject = m.getheader("subject")
|
||||
# syncmail puts the repository-relative directory in the subject:
|
||||
# mprefix + "%(dir)s %(file)s,%(oldversion)s,%(newversion)s", where
|
||||
# 'mprefix' is something that could be added by a mailing list
|
||||
# manager.
|
||||
# this is the only reasonable way to determine the directory name
|
||||
space = subject.find(" ")
|
||||
if space != -1:
|
||||
directory = subject[:space]
|
||||
else:
|
||||
directory = subject
|
||||
|
||||
files = []
|
||||
comments = ""
|
||||
isdir = 0
|
||||
branch = None
|
||||
|
||||
lines = m.fp.readlines()
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
|
||||
if (line == "Modified Files:\n" or
|
||||
line == "Added Files:\n" or
|
||||
line == "Removed Files:\n"):
|
||||
break
|
||||
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line == "\n":
|
||||
break
|
||||
if line == "Log Message:\n":
|
||||
lines.insert(0, line)
|
||||
break
|
||||
line = line.lstrip()
|
||||
line = line.rstrip()
|
||||
# note: syncmail will send one email per directory involved in a
|
||||
# commit, with multiple files if they were in the same directory.
|
||||
# Unlike freshCVS, it makes no attempt to collect all related
|
||||
# commits into a single message.
|
||||
|
||||
# note: syncmail will report a Tag underneath the ... Files: line
|
||||
# e.g.: Tag: BRANCH-DEVEL
|
||||
|
||||
if line.startswith('Tag:'):
|
||||
branch = line.split(' ')[-1].rstrip()
|
||||
continue
|
||||
|
||||
# note: it doesn't actually make sense to use portable functions
|
||||
# like os.path.join and os.sep, because these filenames all use
|
||||
# separator conventions established by the remote CVS server (which
|
||||
# is probably running on unix), not the local buildmaster system.
|
||||
thesefiles = line.split(" ")
|
||||
for f in thesefiles:
|
||||
f = sep.join([directory, f])
|
||||
if prefix:
|
||||
# insist that the file start with the prefix: we may get
|
||||
# changes we don't care about too
|
||||
bits = f.split(sep)
|
||||
if bits[0] == prefix:
|
||||
f = sep.join(bits[1:])
|
||||
else:
|
||||
break
|
||||
# TODO: figure out how new directories are described, set .isdir
|
||||
files.append(f)
|
||||
|
||||
if not files:
|
||||
return None
|
||||
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line == "Log Message:\n":
|
||||
break
|
||||
# message is terminated by "Index:..." (patch) or "--- NEW FILE.."
|
||||
# or "--- filename DELETED ---". Sigh.
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line.find("Index: ") == 0:
|
||||
break
|
||||
if re.search(r"^--- NEW FILE", line):
|
||||
break
|
||||
if re.search(r" DELETED ---$", line):
|
||||
break
|
||||
comments += line
|
||||
comments = comments.rstrip() + "\n"
|
||||
|
||||
change = changes.Change(who, files, comments, isdir, when=when,
|
||||
branch=branch)
|
||||
|
||||
return change
|
||||
|
||||
# Bonsai mail parser by Stephen Davis.
|
||||
#
|
||||
# This handles changes for CVS repositories that are watched by Bonsai
|
||||
# (http://www.mozilla.org/bonsai.html)
|
||||
|
||||
# A Bonsai-formatted email message looks like:
|
||||
#
|
||||
# C|1071099907|stephend|/cvs|Sources/Scripts/buildbot|bonsai.py|1.2|||18|7
|
||||
# A|1071099907|stephend|/cvs|Sources/Scripts/buildbot|master.cfg|1.1|||18|7
|
||||
# R|1071099907|stephend|/cvs|Sources/Scripts/buildbot|BuildMaster.py|||
|
||||
# LOGCOMMENT
|
||||
# Updated bonsai parser and switched master config to buildbot-0.4.1 style.
|
||||
#
|
||||
# :ENDLOGCOMMENT
|
||||
#
|
||||
# In the first example line, stephend is the user, /cvs the repository,
|
||||
# buildbot the directory, bonsai.py the file, 1.2 the revision, no sticky
|
||||
# and branch, 18 lines added and 7 removed. All of these fields might not be
|
||||
# present (during "removes" for example).
|
||||
#
|
||||
# There may be multiple "control" lines or even none (imports, directory
|
||||
# additions) but there is one email per directory. We only care about actual
|
||||
# changes since it is presumed directory additions don't actually affect the
|
||||
# build. At least one file should need to change (the makefile, say) to
|
||||
# actually make a new directory part of the build process. That's my story
|
||||
# and I'm sticking to it.
|
||||
|
||||
def parseBonsaiMail(self, fd, prefix=None, sep="/"):
|
||||
"""Parse mail sent by the Bonsai cvs loginfo script."""
|
||||
|
||||
msg = Message(fd)
|
||||
|
||||
# we don't care who the email came from b/c the cvs user is in the msg
|
||||
# text
|
||||
|
||||
who = "unknown"
|
||||
timestamp = None
|
||||
files = []
|
||||
lines = msg.fp.readlines()
|
||||
|
||||
# read the control lines (what/who/where/file/etc.)
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line == "LOGCOMMENT\n":
|
||||
break;
|
||||
line = line.rstrip("\n")
|
||||
|
||||
# we'd like to do the following but it won't work if the number of
|
||||
# items doesn't match so...
|
||||
# what, timestamp, user, repo, module, file = line.split( '|' )
|
||||
items = line.split('|')
|
||||
if len(items) < 6:
|
||||
# not a valid line, assume this isn't a bonsai message
|
||||
return None
|
||||
|
||||
try:
|
||||
# just grab the bottom-most timestamp, they're probably all the
|
||||
# same. TODO: I'm assuming this is relative to the epoch, but
|
||||
# this needs testing.
|
||||
timestamp = int(items[1])
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
user = items[2]
|
||||
if user:
|
||||
who = user
|
||||
|
||||
module = items[4]
|
||||
file = items[5]
|
||||
if module and file:
|
||||
path = "%s/%s" % (module, file)
|
||||
files.append(path)
|
||||
sticky = items[7]
|
||||
branch = items[8]
|
||||
|
||||
# if no files changed, return nothing
|
||||
if not files:
|
||||
return None
|
||||
|
||||
# read the comments
|
||||
comments = ""
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
if line == ":ENDLOGCOMMENT\n":
|
||||
break
|
||||
comments += line
|
||||
comments = comments.rstrip() + "\n"
|
||||
|
||||
# return buildbot Change object
|
||||
return changes.Change(who, files, comments, when=timestamp, branch=branch)
|
||||
|
||||
|
||||
|
||||
class MaildirSource(maildirtwisted.MaildirTwisted, base.ChangeSource):
|
||||
"""This source will watch a maildir that is subscribed to a FreshCVS
|
||||
change-announcement mailing list.
|
||||
"""
|
||||
# we need our own implements() here, at least for twisted-1.3, because
|
||||
# the double-inheritance of Service shadows __implements__ from
|
||||
# ChangeSource.
|
||||
if not implements:
|
||||
__implements__ = base.ChangeSource.__implements__
|
||||
|
||||
compare_attrs = ["basedir", "newdir", "pollinterval", "parser"]
|
||||
parser = None
|
||||
name = None
|
||||
|
||||
def __init__(self, maildir, prefix=None, sep="/"):
|
||||
maildirtwisted.MaildirTwisted.__init__(self, maildir)
|
||||
self.prefix = prefix
|
||||
self.sep = sep
|
||||
|
||||
def describe(self):
|
||||
return "%s mailing list in maildir %s" % (self.name, self.basedir)
|
||||
|
||||
def messageReceived(self, filename):
|
||||
path = os.path.join(self.basedir, "new", filename)
|
||||
change = self.parser(open(path, "r"), self.prefix, self.sep)
|
||||
if change:
|
||||
self.parent.addChange(change)
|
||||
os.rename(os.path.join(self.basedir, "new", filename),
|
||||
os.path.join(self.basedir, "cur", filename))
|
||||
|
||||
class FCMaildirSource(MaildirSource):
|
||||
parser = parseFreshCVSMail
|
||||
name = "FreshCVS"
|
||||
|
||||
class SyncmailMaildirSource(MaildirSource):
|
||||
parser = parseSyncmail
|
||||
name = "Syncmail"
|
||||
|
||||
class BonsaiMaildirSource(MaildirSource):
|
||||
parser = parseBonsaiMail
|
||||
name = "Bonsai"
|
115
tools/buildbot/buildbot/changes/maildir.py
Normal file
115
tools/buildbot/buildbot/changes/maildir.py
Normal file
@ -0,0 +1,115 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
# This is a class which watches a maildir for new messages. It uses the
|
||||
# linux dirwatcher API (if available) to look for new files. The
|
||||
# .messageReceived method is invoked with the filename of the new message,
|
||||
# relative to the 'new' directory of the maildir.
|
||||
|
||||
# this is an abstract base class. It must be subclassed by something to
|
||||
# provide a delay function (which polls in the case that DNotify isn't
|
||||
# available) and a way to safely schedule code to run after a signal handler
|
||||
# has fired. See maildirgtk.py and maildirtwisted.py for forms that use the
|
||||
# event loops provided by Gtk+ and Twisted.
|
||||
|
||||
try:
|
||||
from dnotify import DNotify
|
||||
have_dnotify = 1
|
||||
except:
|
||||
have_dnotify = 0
|
||||
import os
|
||||
|
||||
class Maildir:
|
||||
"""This is a class which watches a maildir for new messages. Once
|
||||
started, it will run its .messageReceived method when a message is
|
||||
available.
|
||||
"""
|
||||
def __init__(self, basedir=None):
|
||||
"""Create the Maildir watcher. BASEDIR is the maildir directory (the
|
||||
one which contains new/ and tmp/)
|
||||
"""
|
||||
self.basedir = basedir
|
||||
self.files = []
|
||||
self.pollinterval = 10 # only used if we don't have DNotify
|
||||
self.running = 0
|
||||
self.dnotify = None
|
||||
|
||||
def setBasedir(self, basedir):
|
||||
self.basedir = basedir
|
||||
|
||||
def start(self):
|
||||
"""You must run start to receive any messages."""
|
||||
assert self.basedir
|
||||
self.newdir = os.path.join(self.basedir, "new")
|
||||
if self.running:
|
||||
return
|
||||
self.running = 1
|
||||
if not os.path.isdir(self.basedir) or not os.path.isdir(self.newdir):
|
||||
raise "invalid maildir '%s'" % self.basedir
|
||||
# we must hold an fd open on the directory, so we can get notified
|
||||
# when it changes.
|
||||
global have_dnotify
|
||||
if have_dnotify:
|
||||
try:
|
||||
self.dnotify = DNotify(self.newdir, self.dnotify_callback,
|
||||
[DNotify.DN_CREATE])
|
||||
except (IOError, OverflowError):
|
||||
# IOError is probably linux<2.4.19, which doesn't support
|
||||
# dnotify. OverflowError will occur on some 64-bit machines
|
||||
# because of a python bug
|
||||
print "DNotify failed, falling back to polling"
|
||||
have_dnotify = 0
|
||||
|
||||
self.poll()
|
||||
|
||||
def startTimeout(self):
|
||||
raise NotImplemented
|
||||
def stopTimeout(self):
|
||||
raise NotImplemented
|
||||
def dnotify_callback(self):
|
||||
print "callback"
|
||||
self.poll()
|
||||
raise NotImplemented
|
||||
|
||||
def stop(self):
|
||||
if self.dnotify:
|
||||
self.dnotify.remove()
|
||||
self.dnotify = None
|
||||
else:
|
||||
self.stopTimeout()
|
||||
self.running = 0
|
||||
|
||||
def poll(self):
|
||||
assert self.basedir
|
||||
# see what's new
|
||||
for f in self.files:
|
||||
if not os.path.isfile(os.path.join(self.newdir, f)):
|
||||
self.files.remove(f)
|
||||
newfiles = []
|
||||
for f in os.listdir(self.newdir):
|
||||
if not f in self.files:
|
||||
newfiles.append(f)
|
||||
self.files.extend(newfiles)
|
||||
# TODO: sort by ctime, then filename, since safecat uses a rather
|
||||
# fine-grained timestamp in the filename
|
||||
for n in newfiles:
|
||||
# TODO: consider catching exceptions in messageReceived
|
||||
self.messageReceived(n)
|
||||
if not have_dnotify:
|
||||
self.startTimeout()
|
||||
|
||||
def messageReceived(self, filename):
|
||||
"""Called when a new file is noticed. Override it in subclasses.
|
||||
Will receive path relative to maildir/new."""
|
||||
print filename
|
||||
|
||||
|
||||
def test1():
|
||||
m = Maildir("ddir")
|
||||
m.start()
|
||||
import signal
|
||||
while 1:
|
||||
signal.pause()
|
||||
|
||||
if __name__ == '__main__':
|
||||
test1()
|
||||
|
55
tools/buildbot/buildbot/changes/maildirgtk.py
Normal file
55
tools/buildbot/buildbot/changes/maildirgtk.py
Normal file
@ -0,0 +1,55 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
# This is a class which watches a maildir for new messages. It uses the
|
||||
# linux dirwatcher API (if available) to look for new files. The
|
||||
# .messageReceived method is invoked with the filename of the new message,
|
||||
# relative to the top of the maildir (so it will look like "new/blahblah").
|
||||
|
||||
# This form uses the Gtk event loop to handle polling and signal safety
|
||||
|
||||
if __name__ == '__main__':
|
||||
import pygtk
|
||||
pygtk.require("2.0")
|
||||
|
||||
import gtk
|
||||
from maildir import Maildir
|
||||
|
||||
class MaildirGtk(Maildir):
|
||||
def __init__(self, basedir):
|
||||
Maildir.__init__(self, basedir)
|
||||
self.idler = None
|
||||
def startTimeout(self):
|
||||
self.timeout = gtk.timeout_add(self.pollinterval*1000, self.doTimeout)
|
||||
def doTimeout(self):
|
||||
self.poll()
|
||||
return gtk.TRUE # keep going
|
||||
def stopTimeout(self):
|
||||
if self.timeout:
|
||||
gtk.timeout_remove(self.timeout)
|
||||
self.timeout = None
|
||||
def dnotify_callback(self):
|
||||
# make it safe
|
||||
self.idler = gtk.idle_add(self.idlePoll)
|
||||
def idlePoll(self):
|
||||
gtk.idle_remove(self.idler)
|
||||
self.idler = None
|
||||
self.poll()
|
||||
return gtk.FALSE
|
||||
|
||||
def test1():
|
||||
class MaildirTest(MaildirGtk):
|
||||
def messageReceived(self, filename):
|
||||
print "changed:", filename
|
||||
m = MaildirTest("ddir")
|
||||
print "watching ddir/new/"
|
||||
m.start()
|
||||
#gtk.main()
|
||||
# to allow the python-side signal handler to run, we must surface from
|
||||
# gtk (which blocks on the C-side) every once in a while.
|
||||
while 1:
|
||||
gtk.mainiteration() # this will block until there is something to do
|
||||
m.stop()
|
||||
print "done"
|
||||
|
||||
if __name__ == '__main__':
|
||||
test1()
|
76
tools/buildbot/buildbot/changes/maildirtwisted.py
Normal file
76
tools/buildbot/buildbot/changes/maildirtwisted.py
Normal file
@ -0,0 +1,76 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
# This is a class which watches a maildir for new messages. It uses the
|
||||
# linux dirwatcher API (if available) to look for new files. The
|
||||
# .messageReceived method is invoked with the filename of the new message,
|
||||
# relative to the top of the maildir (so it will look like "new/blahblah").
|
||||
|
||||
# This version is implemented as a Twisted Python "Service". It uses the
|
||||
# twisted Reactor to handle polling and signal safety.
|
||||
|
||||
from twisted.application import service
|
||||
from twisted.internet import reactor
|
||||
from maildir import Maildir
|
||||
|
||||
class MaildirTwisted(Maildir, service.Service):
|
||||
timeout = None
|
||||
|
||||
def startService(self):
|
||||
self.start()
|
||||
service.Service.startService(self)
|
||||
def stopService(self):
|
||||
self.stop()
|
||||
service.Service.stopService(self)
|
||||
|
||||
def startTimeout(self):
|
||||
self.timeout = reactor.callLater(self.pollinterval, self.poll)
|
||||
def stopTimeout(self):
|
||||
if self.timeout:
|
||||
self.timeout.cancel()
|
||||
self.timeout = None
|
||||
|
||||
def dnotify_callback(self):
|
||||
# make it safe
|
||||
#reactor.callFromThread(self.poll)
|
||||
reactor.callLater(1, self.poll)
|
||||
# give it a moment. I found that qmail had problems when the message
|
||||
# was removed from the maildir instantly. It shouldn't, that's what
|
||||
# maildirs are made for. I wasn't able to eyeball any reason for the
|
||||
# problem, and safecat didn't behave the same way, but qmail reports
|
||||
# "Temporary_error_on_maildir_delivery" (qmail-local.c:165,
|
||||
# maildir_child() process exited with rc not in 0,2,3,4). Not sure why,
|
||||
# would have to hack qmail to investigate further, easier to just
|
||||
# wait a second before yanking the message out of new/ .
|
||||
|
||||
## def messageReceived(self, filename):
|
||||
## if self.callback:
|
||||
## self.callback(filename)
|
||||
|
||||
class MaildirService(MaildirTwisted):
|
||||
"""I watch a maildir for new messages. I should be placed as the service
|
||||
child of some MultiService instance. When running, I use the linux
|
||||
dirwatcher API (if available) or poll for new files in the 'new'
|
||||
subdirectory of my maildir path. When I discover a new message, I invoke
|
||||
my parent's .messageReceived() method with the short filename of the new
|
||||
message, so the full name of the new file can be obtained with
|
||||
os.path.join(maildir, 'new', filename). I will not move or delete the
|
||||
file on my own: the parent should do this in messageReceived().
|
||||
"""
|
||||
def messageReceived(self, filename):
|
||||
self.parent.messageReceived(filename)
|
||||
|
||||
|
||||
def test1():
|
||||
class MaildirTest(MaildirTwisted):
|
||||
def messageReceived(self, filename):
|
||||
print "changed:", filename
|
||||
m = MaildirTest(basedir="ddir")
|
||||
print "watching ddir/new/"
|
||||
m.startService()
|
||||
reactor.run()
|
||||
print "done"
|
||||
|
||||
if __name__ == '__main__':
|
||||
test1()
|
||||
|
||||
|
306
tools/buildbot/buildbot/changes/monotone.py
Normal file
306
tools/buildbot/buildbot/changes/monotone.py
Normal file
@ -0,0 +1,306 @@
|
||||
|
||||
import tempfile
|
||||
import os
|
||||
import os.path
|
||||
from cStringIO import StringIO
|
||||
|
||||
from twisted.python import log
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, protocol, error, reactor
|
||||
from twisted.internet.task import LoopingCall
|
||||
|
||||
from buildbot import util
|
||||
from buildbot.interfaces import IChangeSource
|
||||
from buildbot.changes.changes import Change
|
||||
|
||||
class _MTProtocol(protocol.ProcessProtocol):
|
||||
|
||||
def __init__(self, deferred, cmdline):
|
||||
self.cmdline = cmdline
|
||||
self.deferred = deferred
|
||||
self.s = StringIO()
|
||||
|
||||
def errReceived(self, text):
|
||||
log.msg("stderr: %s" % text)
|
||||
|
||||
def outReceived(self, text):
|
||||
log.msg("stdout: %s" % text)
|
||||
self.s.write(text)
|
||||
|
||||
def processEnded(self, reason):
|
||||
log.msg("Command %r exited with value %s" % (self.cmdline, reason))
|
||||
if isinstance(reason.value, error.ProcessDone):
|
||||
self.deferred.callback(self.s.getvalue())
|
||||
else:
|
||||
self.deferred.errback(reason)
|
||||
|
||||
class Monotone:
|
||||
"""All methods of this class return a Deferred."""
|
||||
|
||||
def __init__(self, bin, db):
|
||||
self.bin = bin
|
||||
self.db = db
|
||||
|
||||
def _run_monotone(self, args):
|
||||
d = defer.Deferred()
|
||||
cmdline = (self.bin, "--db=" + self.db) + tuple(args)
|
||||
p = _MTProtocol(d, cmdline)
|
||||
log.msg("Running command: %r" % (cmdline,))
|
||||
log.msg("wd: %s" % os.getcwd())
|
||||
reactor.spawnProcess(p, self.bin, cmdline)
|
||||
return d
|
||||
|
||||
def _process_revision_list(self, output):
|
||||
if output:
|
||||
return output.strip().split("\n")
|
||||
else:
|
||||
return []
|
||||
|
||||
def get_interface_version(self):
|
||||
d = self._run_monotone(["automate", "interface_version"])
|
||||
d.addCallback(self._process_interface_version)
|
||||
return d
|
||||
|
||||
def _process_interface_version(self, output):
|
||||
return tuple(map(int, output.strip().split(".")))
|
||||
|
||||
def db_init(self):
|
||||
return self._run_monotone(["db", "init"])
|
||||
|
||||
def db_migrate(self):
|
||||
return self._run_monotone(["db", "migrate"])
|
||||
|
||||
def pull(self, server, pattern):
|
||||
return self._run_monotone(["pull", server, pattern])
|
||||
|
||||
def get_revision(self, rid):
|
||||
return self._run_monotone(["cat", "revision", rid])
|
||||
|
||||
def get_heads(self, branch, rcfile=""):
|
||||
cmd = ["automate", "heads", branch]
|
||||
if rcfile:
|
||||
cmd += ["--rcfile=" + rcfile]
|
||||
d = self._run_monotone(cmd)
|
||||
d.addCallback(self._process_revision_list)
|
||||
return d
|
||||
|
||||
def erase_ancestors(self, revs):
|
||||
d = self._run_monotone(["automate", "erase_ancestors"] + revs)
|
||||
d.addCallback(self._process_revision_list)
|
||||
return d
|
||||
|
||||
def ancestry_difference(self, new_rev, old_revs):
|
||||
d = self._run_monotone(["automate", "ancestry_difference", new_rev]
|
||||
+ old_revs)
|
||||
d.addCallback(self._process_revision_list)
|
||||
return d
|
||||
|
||||
def descendents(self, rev):
|
||||
d = self._run_monotone(["automate", "descendents", rev])
|
||||
d.addCallback(self._process_revision_list)
|
||||
return d
|
||||
|
||||
def log(self, rev, depth=None):
|
||||
if depth is not None:
|
||||
depth_arg = ["--last=%i" % (depth,)]
|
||||
else:
|
||||
depth_arg = []
|
||||
return self._run_monotone(["log", "-r", rev] + depth_arg)
|
||||
|
||||
|
||||
class MonotoneSource(service.Service, util.ComparableMixin):
|
||||
"""This source will poll a monotone server for changes and submit them to
|
||||
the change master.
|
||||
|
||||
@param server_addr: monotone server specification (host:portno)
|
||||
|
||||
@param branch: monotone branch to watch
|
||||
|
||||
@param trusted_keys: list of keys whose code you trust
|
||||
|
||||
@param db_path: path to monotone database to pull into
|
||||
|
||||
@param pollinterval: interval in seconds between polls, defaults to 10 minutes
|
||||
@param monotone_exec: path to monotone executable, defaults to "monotone"
|
||||
"""
|
||||
|
||||
__implements__ = IChangeSource, service.Service.__implements__
|
||||
compare_attrs = ["server_addr", "trusted_keys", "db_path",
|
||||
"pollinterval", "branch", "monotone_exec"]
|
||||
|
||||
parent = None # filled in when we're added
|
||||
done_revisions = []
|
||||
last_revision = None
|
||||
loop = None
|
||||
d = None
|
||||
tmpfile = None
|
||||
monotone = None
|
||||
volatile = ["loop", "d", "tmpfile", "monotone"]
|
||||
|
||||
def __init__(self, server_addr, branch, trusted_keys, db_path,
|
||||
pollinterval=60 * 10, monotone_exec="monotone"):
|
||||
self.server_addr = server_addr
|
||||
self.branch = branch
|
||||
self.trusted_keys = trusted_keys
|
||||
self.db_path = db_path
|
||||
self.pollinterval = pollinterval
|
||||
self.monotone_exec = monotone_exec
|
||||
self.monotone = Monotone(self.monotone_exec, self.db_path)
|
||||
|
||||
def startService(self):
|
||||
self.loop = LoopingCall(self.start_poll)
|
||||
self.loop.start(self.pollinterval)
|
||||
service.Service.startService(self)
|
||||
|
||||
def stopService(self):
|
||||
self.loop.stop()
|
||||
return service.Service.stopService(self)
|
||||
|
||||
def describe(self):
|
||||
return "monotone_source %s %s" % (self.server_addr,
|
||||
self.branch)
|
||||
|
||||
def start_poll(self):
|
||||
if self.d is not None:
|
||||
log.msg("last poll still in progress, skipping next poll")
|
||||
return
|
||||
log.msg("starting poll")
|
||||
self.d = self._maybe_init_db()
|
||||
self.d.addCallback(self._do_netsync)
|
||||
self.d.addCallback(self._get_changes)
|
||||
self.d.addErrback(self._handle_error)
|
||||
|
||||
def _handle_error(self, failure):
|
||||
log.err(failure)
|
||||
self.d = None
|
||||
|
||||
def _maybe_init_db(self):
|
||||
if not os.path.exists(self.db_path):
|
||||
log.msg("init'ing db")
|
||||
return self.monotone.db_init()
|
||||
else:
|
||||
log.msg("db already exists, migrating")
|
||||
return self.monotone.db_migrate()
|
||||
|
||||
def _do_netsync(self, output):
|
||||
return self.monotone.pull(self.server_addr, self.branch)
|
||||
|
||||
def _get_changes(self, output):
|
||||
d = self._get_new_head()
|
||||
d.addCallback(self._process_new_head)
|
||||
return d
|
||||
|
||||
def _get_new_head(self):
|
||||
# This function returns a deferred that resolves to a good pick of new
|
||||
# head (or None if there is no good new head.)
|
||||
|
||||
# First need to get all new heads...
|
||||
rcfile = """function get_revision_cert_trust(signers, id, name, val)
|
||||
local trusted_signers = { %s }
|
||||
local ts_table = {}
|
||||
for k, v in pairs(trusted_signers) do ts_table[v] = 1 end
|
||||
for k, v in pairs(signers) do
|
||||
if ts_table[v] then
|
||||
return true
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
"""
|
||||
trusted_list = ", ".join(['"' + key + '"' for key in self.trusted_keys])
|
||||
# mktemp is unsafe, but mkstemp is not 2.2 compatible.
|
||||
tmpfile_name = tempfile.mktemp()
|
||||
f = open(tmpfile_name, "w")
|
||||
f.write(rcfile % trusted_list)
|
||||
f.close()
|
||||
d = self.monotone.get_heads(self.branch, tmpfile_name)
|
||||
d.addCallback(self._find_new_head, tmpfile_name)
|
||||
return d
|
||||
|
||||
def _find_new_head(self, new_heads, tmpfile_name):
|
||||
os.unlink(tmpfile_name)
|
||||
# Now get the old head's descendents...
|
||||
if self.last_revision is not None:
|
||||
d = self.monotone.descendents(self.last_revision)
|
||||
else:
|
||||
d = defer.succeed(new_heads)
|
||||
d.addCallback(self._pick_new_head, new_heads)
|
||||
return d
|
||||
|
||||
def _pick_new_head(self, old_head_descendents, new_heads):
|
||||
for r in new_heads:
|
||||
if r in old_head_descendents:
|
||||
return r
|
||||
return None
|
||||
|
||||
def _process_new_head(self, new_head):
|
||||
if new_head is None:
|
||||
log.msg("No new head")
|
||||
self.d = None
|
||||
return None
|
||||
# Okay, we have a new head; we need to get all the revisions since
|
||||
# then and create change objects for them.
|
||||
# Step 1: simplify set of processed revisions.
|
||||
d = self._simplify_revisions()
|
||||
# Step 2: get the list of new revisions
|
||||
d.addCallback(self._get_new_revisions, new_head)
|
||||
# Step 3: add a change for each
|
||||
d.addCallback(self._add_changes_for_revisions)
|
||||
# Step 4: all done
|
||||
d.addCallback(self._finish_changes, new_head)
|
||||
return d
|
||||
|
||||
def _simplify_revisions(self):
|
||||
d = self.monotone.erase_ancestors(self.done_revisions)
|
||||
d.addCallback(self._reset_done_revisions)
|
||||
return d
|
||||
|
||||
def _reset_done_revisions(self, new_done_revisions):
|
||||
self.done_revisions = new_done_revisions
|
||||
return None
|
||||
|
||||
def _get_new_revisions(self, blah, new_head):
|
||||
if self.done_revisions:
|
||||
return self.monotone.ancestry_difference(new_head,
|
||||
self.done_revisions)
|
||||
else:
|
||||
# Don't force feed the builder with every change since the
|
||||
# beginning of time when it's first started up.
|
||||
return defer.succeed([new_head])
|
||||
|
||||
def _add_changes_for_revisions(self, revs):
|
||||
d = defer.succeed(None)
|
||||
for rid in revs:
|
||||
d.addCallback(self._add_change_for_revision, rid)
|
||||
return d
|
||||
|
||||
def _add_change_for_revision(self, blah, rid):
|
||||
d = self.monotone.log(rid, 1)
|
||||
d.addCallback(self._add_change_from_log, rid)
|
||||
return d
|
||||
|
||||
def _add_change_from_log(self, log, rid):
|
||||
d = self.monotone.get_revision(rid)
|
||||
d.addCallback(self._add_change_from_log_and_revision, log, rid)
|
||||
return d
|
||||
|
||||
def _add_change_from_log_and_revision(self, revision, log, rid):
|
||||
# Stupid way to pull out everything inside quotes (which currently
|
||||
# uniquely identifies filenames inside a changeset).
|
||||
pieces = revision.split('"')
|
||||
files = []
|
||||
for i in range(len(pieces)):
|
||||
if (i % 2) == 1:
|
||||
files.append(pieces[i])
|
||||
# Also pull out author key and date
|
||||
author = "unknown author"
|
||||
pieces = log.split('\n')
|
||||
for p in pieces:
|
||||
if p.startswith("Author:"):
|
||||
author = p.split()[1]
|
||||
self.parent.addChange(Change(author, files, log, revision=rid))
|
||||
|
||||
def _finish_changes(self, blah, new_head):
|
||||
self.done_revisions.append(new_head)
|
||||
self.last_revision = new_head
|
||||
self.d = None
|
204
tools/buildbot/buildbot/changes/p4poller.py
Normal file
204
tools/buildbot/buildbot/changes/p4poller.py
Normal file
@ -0,0 +1,204 @@
|
||||
# -*- test-case-name: buildbot.test.test_p4poller -*-
|
||||
|
||||
# Many thanks to Dave Peticolas for contributing this module
|
||||
|
||||
import re
|
||||
import time
|
||||
|
||||
from twisted.python import log, failure
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet.utils import getProcessOutput
|
||||
from twisted.internet.task import LoopingCall
|
||||
|
||||
from buildbot import util
|
||||
from buildbot.changes import base, changes
|
||||
|
||||
def get_simple_split(branchfile):
|
||||
"""Splits the branchfile argument and assuming branch is
|
||||
the first path component in branchfile, will return
|
||||
branch and file else None."""
|
||||
|
||||
index = branchfile.find('/')
|
||||
if index == -1: return None, None
|
||||
branch, file = branchfile.split('/', 1)
|
||||
return branch, file
|
||||
|
||||
class P4Source(base.ChangeSource, util.ComparableMixin):
|
||||
"""This source will poll a perforce repository for changes and submit
|
||||
them to the change master."""
|
||||
|
||||
compare_attrs = ["p4port", "p4user", "p4passwd", "p4base",
|
||||
"p4bin", "pollinterval", "histmax"]
|
||||
|
||||
changes_line_re = re.compile(
|
||||
r"Change (?P<num>\d+) on \S+ by \S+@\S+ '.+'$")
|
||||
describe_header_re = re.compile(
|
||||
r"Change \d+ by (?P<who>\S+)@\S+ on (?P<when>.+)$")
|
||||
file_re = re.compile(r"^\.\.\. (?P<path>[^#]+)#\d+ \w+$")
|
||||
datefmt = '%Y/%m/%d %H:%M:%S'
|
||||
|
||||
parent = None # filled in when we're added
|
||||
last_change = None
|
||||
loop = None
|
||||
working = False
|
||||
|
||||
def __init__(self, p4port=None, p4user=None, p4passwd=None,
|
||||
p4base='//', p4bin='p4',
|
||||
split_file=lambda branchfile: (None, branchfile),
|
||||
pollinterval=60 * 10, histmax=100):
|
||||
"""
|
||||
@type p4port: string
|
||||
@param p4port: p4 port definition (host:portno)
|
||||
@type p4user: string
|
||||
@param p4user: p4 user
|
||||
@type p4passwd: string
|
||||
@param p4passwd: p4 passwd
|
||||
@type p4base: string
|
||||
@param p4base: p4 file specification to limit a poll to
|
||||
without the trailing '...' (i.e., //)
|
||||
@type p4bin: string
|
||||
@param p4bin: path to p4 binary, defaults to just 'p4'
|
||||
@type split_file: func
|
||||
$param split_file: splits a filename into branch and filename.
|
||||
@type pollinterval: int
|
||||
@param pollinterval: interval in seconds between polls
|
||||
@type histmax: int
|
||||
@param histmax: maximum number of changes to look back through
|
||||
"""
|
||||
|
||||
self.p4port = p4port
|
||||
self.p4user = p4user
|
||||
self.p4passwd = p4passwd
|
||||
self.p4base = p4base
|
||||
self.p4bin = p4bin
|
||||
self.split_file = split_file
|
||||
self.pollinterval = pollinterval
|
||||
self.histmax = histmax
|
||||
self.loop = LoopingCall(self.checkp4)
|
||||
|
||||
def startService(self):
|
||||
base.ChangeSource.startService(self)
|
||||
|
||||
# Don't start the loop just yet because the reactor isn't running.
|
||||
# Give it a chance to go and install our SIGCHLD handler before
|
||||
# spawning processes.
|
||||
reactor.callLater(0, self.loop.start, self.pollinterval)
|
||||
|
||||
def stopService(self):
|
||||
self.loop.stop()
|
||||
return base.ChangeSource.stopService(self)
|
||||
|
||||
def describe(self):
|
||||
return "p4source %s %s" % (self.p4port, self.p4base)
|
||||
|
||||
def checkp4(self):
|
||||
# Our return value is only used for unit testing.
|
||||
if self.working:
|
||||
log.msg("Skipping checkp4 because last one has not finished")
|
||||
return defer.succeed(None)
|
||||
else:
|
||||
self.working = True
|
||||
d = self._get_changes()
|
||||
d.addCallback(self._process_changes)
|
||||
d.addBoth(self._finished)
|
||||
return d
|
||||
|
||||
def _finished(self, res):
|
||||
assert self.working
|
||||
self.working = False
|
||||
|
||||
# Again, the return value is only for unit testing.
|
||||
# If there's a failure, log it so it isn't lost.
|
||||
if isinstance(res, failure.Failure):
|
||||
log.msg('P4 poll failed: %s' % res)
|
||||
return res
|
||||
|
||||
def _get_changes(self):
|
||||
args = []
|
||||
if self.p4port:
|
||||
args.extend(['-p', self.p4port])
|
||||
if self.p4user:
|
||||
args.extend(['-u', self.p4user])
|
||||
if self.p4passwd:
|
||||
args.extend(['-P', self.p4passwd])
|
||||
args.extend(['changes', '-m', str(self.histmax), self.p4base + '...'])
|
||||
env = {}
|
||||
return getProcessOutput(self.p4bin, args, env)
|
||||
|
||||
def _process_changes(self, result):
|
||||
last_change = self.last_change
|
||||
changelists = []
|
||||
for line in result.split('\n'):
|
||||
line = line.strip()
|
||||
if not line: continue
|
||||
m = self.changes_line_re.match(line)
|
||||
assert m, "Unexpected 'p4 changes' output: %r" % result
|
||||
num = m.group('num')
|
||||
if last_change is None:
|
||||
log.msg('P4Poller: starting at change %s' % num)
|
||||
self.last_change = num
|
||||
return []
|
||||
if last_change == num:
|
||||
break
|
||||
changelists.append(num)
|
||||
changelists.reverse() # oldest first
|
||||
|
||||
# Retrieve each sequentially.
|
||||
d = defer.succeed(None)
|
||||
for c in changelists:
|
||||
d.addCallback(self._get_describe, c)
|
||||
d.addCallback(self._process_describe, c)
|
||||
return d
|
||||
|
||||
def _get_describe(self, dummy, num):
|
||||
args = []
|
||||
if self.p4port:
|
||||
args.extend(['-p', self.p4port])
|
||||
if self.p4user:
|
||||
args.extend(['-u', self.p4user])
|
||||
if self.p4passwd:
|
||||
args.extend(['-P', self.p4passwd])
|
||||
args.extend(['describe', '-s', num])
|
||||
env = {}
|
||||
d = getProcessOutput(self.p4bin, args, env)
|
||||
return d
|
||||
|
||||
def _process_describe(self, result, num):
|
||||
lines = result.split('\n')
|
||||
# SF#1555985: Wade Brainerd reports a stray ^M at the end of the date
|
||||
# field. The rstrip() is intended to remove that.
|
||||
lines[0] = lines[0].rstrip()
|
||||
m = self.describe_header_re.match(lines[0])
|
||||
assert m, "Unexpected 'p4 describe -s' result: %r" % result
|
||||
who = m.group('who')
|
||||
when = time.mktime(time.strptime(m.group('when'), self.datefmt))
|
||||
comments = ''
|
||||
while not lines[0].startswith('Affected files'):
|
||||
comments += lines.pop(0) + '\n'
|
||||
lines.pop(0) # affected files
|
||||
|
||||
branch_files = {} # dict for branch mapped to file(s)
|
||||
while lines:
|
||||
line = lines.pop(0).strip()
|
||||
if not line: continue
|
||||
m = self.file_re.match(line)
|
||||
assert m, "Invalid file line: %r" % line
|
||||
path = m.group('path')
|
||||
if path.startswith(self.p4base):
|
||||
branch, file = self.split_file(path[len(self.p4base):])
|
||||
if (branch == None and file == None): continue
|
||||
if branch_files.has_key(branch):
|
||||
branch_files[branch].append(file)
|
||||
else:
|
||||
branch_files[branch] = [file]
|
||||
|
||||
for branch in branch_files:
|
||||
c = changes.Change(who=who,
|
||||
files=branch_files[branch],
|
||||
comments=comments,
|
||||
revision=num,
|
||||
when=when,
|
||||
branch=branch)
|
||||
self.parent.addChange(c)
|
||||
|
||||
self.last_change = num
|
108
tools/buildbot/buildbot/changes/pb.py
Normal file
108
tools/buildbot/buildbot/changes/pb.py
Normal file
@ -0,0 +1,108 @@
|
||||
# -*- test-case-name: buildbot.test.test_changes -*-
|
||||
|
||||
from twisted.python import log
|
||||
|
||||
from buildbot.pbutil import NewCredPerspective
|
||||
from buildbot.changes import base, changes
|
||||
|
||||
class ChangePerspective(NewCredPerspective):
|
||||
|
||||
def __init__(self, changemaster, prefix):
|
||||
self.changemaster = changemaster
|
||||
self.prefix = prefix
|
||||
|
||||
def attached(self, mind):
|
||||
return self
|
||||
def detached(self, mind):
|
||||
pass
|
||||
|
||||
def perspective_addChange(self, changedict):
|
||||
log.msg("perspective_addChange called")
|
||||
pathnames = []
|
||||
prefixpaths = None
|
||||
for path in changedict['files']:
|
||||
if self.prefix:
|
||||
if not path.startswith(self.prefix):
|
||||
# this file does not start with the prefix, so ignore it
|
||||
continue
|
||||
path = path[len(self.prefix):]
|
||||
pathnames.append(path)
|
||||
|
||||
if pathnames:
|
||||
change = changes.Change(changedict['who'],
|
||||
pathnames,
|
||||
changedict['comments'],
|
||||
branch=changedict.get('branch'),
|
||||
revision=changedict.get('revision'),
|
||||
)
|
||||
self.changemaster.addChange(change)
|
||||
|
||||
class PBChangeSource(base.ChangeSource):
|
||||
compare_attrs = ["user", "passwd", "port", "prefix"]
|
||||
|
||||
def __init__(self, user="change", passwd="changepw", port=None,
|
||||
prefix=None, sep=None):
|
||||
"""I listen on a TCP port for Changes from 'buildbot sendchange'.
|
||||
|
||||
I am a ChangeSource which will accept Changes from a remote source. I
|
||||
share a TCP listening port with the buildslaves.
|
||||
|
||||
Both the 'buildbot sendchange' command and the
|
||||
contrib/svn_buildbot.py tool know how to send changes to me.
|
||||
|
||||
@type prefix: string (or None)
|
||||
@param prefix: if set, I will ignore any filenames that do not start
|
||||
with this string. Moreover I will remove this string
|
||||
from all filenames before creating the Change object
|
||||
and delivering it to the Schedulers. This is useful
|
||||
for changes coming from version control systems that
|
||||
represent branches as parent directories within the
|
||||
repository (like SVN and Perforce). Use a prefix of
|
||||
'trunk/' or 'project/branches/foobranch/' to only
|
||||
follow one branch and to get correct tree-relative
|
||||
filenames.
|
||||
|
||||
@param sep: DEPRECATED (with an axe). sep= was removed in
|
||||
buildbot-0.7.4 . Instead of using it, you should use
|
||||
prefix= with a trailing directory separator. This
|
||||
docstring (and the better-than-nothing error message
|
||||
which occurs when you use it) will be removed in 0.7.5 .
|
||||
"""
|
||||
|
||||
# sep= was removed in 0.7.4 . This more-helpful-than-nothing error
|
||||
# message will be removed in 0.7.5 .
|
||||
assert sep is None, "prefix= is now a complete string, do not use sep="
|
||||
# TODO: current limitations
|
||||
assert user == "change"
|
||||
assert passwd == "changepw"
|
||||
assert port == None
|
||||
self.user = user
|
||||
self.passwd = passwd
|
||||
self.port = port
|
||||
self.prefix = prefix
|
||||
|
||||
def describe(self):
|
||||
# TODO: when the dispatcher is fixed, report the specific port
|
||||
#d = "PB listener on port %d" % self.port
|
||||
d = "PBChangeSource listener on all-purpose slaveport"
|
||||
if self.prefix is not None:
|
||||
d += " (prefix '%s')" % self.prefix
|
||||
return d
|
||||
|
||||
def startService(self):
|
||||
base.ChangeSource.startService(self)
|
||||
# our parent is the ChangeMaster object
|
||||
# find the master's Dispatch object and register our username
|
||||
# TODO: the passwd should be registered here too
|
||||
master = self.parent.parent
|
||||
master.dispatcher.register(self.user, self)
|
||||
|
||||
def stopService(self):
|
||||
base.ChangeSource.stopService(self)
|
||||
# unregister our username
|
||||
master = self.parent.parent
|
||||
master.dispatcher.unregister(self.user)
|
||||
|
||||
def getPerspective(self):
|
||||
return ChangePerspective(self.parent, self.prefix)
|
||||
|
446
tools/buildbot/buildbot/changes/svnpoller.py
Normal file
446
tools/buildbot/buildbot/changes/svnpoller.py
Normal file
@ -0,0 +1,446 @@
|
||||
# -*- test-case-name: buildbot.test.test_svnpoller -*-
|
||||
|
||||
# Based on the work of Dave Peticolas for the P4poll
|
||||
# Changed to svn (using xml.dom.minidom) by Niklaus Giger
|
||||
# Hacked beyond recognition by Brian Warner
|
||||
|
||||
import time
|
||||
|
||||
from twisted.python import log
|
||||
from twisted.internet import defer, reactor, utils
|
||||
from twisted.internet.task import LoopingCall
|
||||
|
||||
from buildbot import util
|
||||
from buildbot.changes import base
|
||||
from buildbot.changes.changes import Change
|
||||
|
||||
import xml.dom.minidom
|
||||
|
||||
def _assert(condition, msg):
|
||||
if condition:
|
||||
return True
|
||||
raise AssertionError(msg)
|
||||
|
||||
def dbgMsg(myString):
|
||||
log.msg(myString)
|
||||
return 1
|
||||
|
||||
# these split_file_* functions are available for use as values to the
|
||||
# split_file= argument.
|
||||
def split_file_alwaystrunk(path):
|
||||
return (None, path)
|
||||
|
||||
def split_file_branches(path):
|
||||
# turn trunk/subdir/file.c into (None, "subdir/file.c")
|
||||
# and branches/1.5.x/subdir/file.c into ("branches/1.5.x", "subdir/file.c")
|
||||
pieces = path.split('/')
|
||||
if pieces[0] == 'trunk':
|
||||
return (None, '/'.join(pieces[1:]))
|
||||
elif pieces[0] == 'branches':
|
||||
return ('/'.join(pieces[0:2]), '/'.join(pieces[2:]))
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class SVNPoller(base.ChangeSource, util.ComparableMixin):
|
||||
"""This source will poll a Subversion repository for changes and submit
|
||||
them to the change master."""
|
||||
|
||||
compare_attrs = ["svnurl", "split_file_function",
|
||||
"svnuser", "svnpasswd",
|
||||
"pollinterval", "histmax",
|
||||
"svnbin"]
|
||||
|
||||
parent = None # filled in when we're added
|
||||
last_change = None
|
||||
loop = None
|
||||
working = False
|
||||
|
||||
def __init__(self, svnurl, split_file=None,
|
||||
svnuser=None, svnpasswd=None,
|
||||
pollinterval=10*60, histmax=100,
|
||||
svnbin='svn'):
|
||||
"""
|
||||
@type svnurl: string
|
||||
@param svnurl: the SVN URL that describes the repository and
|
||||
subdirectory to watch. If this ChangeSource should
|
||||
only pay attention to a single branch, this should
|
||||
point at the repository for that branch, like
|
||||
svn://svn.twistedmatrix.com/svn/Twisted/trunk . If it
|
||||
should follow multiple branches, point it at the
|
||||
repository directory that contains all the branches
|
||||
like svn://svn.twistedmatrix.com/svn/Twisted and also
|
||||
provide a branch-determining function.
|
||||
|
||||
Each file in the repository has a SVN URL in the form
|
||||
(SVNURL)/(BRANCH)/(FILEPATH), where (BRANCH) could be
|
||||
empty or not, depending upon your branch-determining
|
||||
function. Only files that start with (SVNURL)/(BRANCH)
|
||||
will be monitored. The Change objects that are sent to
|
||||
the Schedulers will see (FILEPATH) for each modified
|
||||
file.
|
||||
|
||||
@type split_file: callable or None
|
||||
@param split_file: a function that is called with a string of the
|
||||
form (BRANCH)/(FILEPATH) and should return a tuple
|
||||
(BRANCH, FILEPATH). This function should match
|
||||
your repository's branch-naming policy. Each
|
||||
changed file has a fully-qualified URL that can be
|
||||
split into a prefix (which equals the value of the
|
||||
'svnurl' argument) and a suffix; it is this suffix
|
||||
which is passed to the split_file function.
|
||||
|
||||
If the function returns None, the file is ignored.
|
||||
Use this to indicate that the file is not a part
|
||||
of this project.
|
||||
|
||||
For example, if your repository puts the trunk in
|
||||
trunk/... and branches are in places like
|
||||
branches/1.5/..., your split_file function could
|
||||
look like the following (this function is
|
||||
available as svnpoller.split_file_branches)::
|
||||
|
||||
pieces = path.split('/')
|
||||
if pieces[0] == 'trunk':
|
||||
return (None, '/'.join(pieces[1:]))
|
||||
elif pieces[0] == 'branches':
|
||||
return ('/'.join(pieces[0:2]),
|
||||
'/'.join(pieces[2:]))
|
||||
else:
|
||||
return None
|
||||
|
||||
If instead your repository layout puts the trunk
|
||||
for ProjectA in trunk/ProjectA/... and the 1.5
|
||||
branch in branches/1.5/ProjectA/..., your
|
||||
split_file function could look like::
|
||||
|
||||
pieces = path.split('/')
|
||||
if pieces[0] == 'trunk':
|
||||
branch = None
|
||||
pieces.pop(0) # remove 'trunk'
|
||||
elif pieces[0] == 'branches':
|
||||
pieces.pop(0) # remove 'branches'
|
||||
# grab branch name
|
||||
branch = 'branches/' + pieces.pop(0)
|
||||
else:
|
||||
return None # something weird
|
||||
projectname = pieces.pop(0)
|
||||
if projectname != 'ProjectA':
|
||||
return None # wrong project
|
||||
return (branch, '/'.join(pieces))
|
||||
|
||||
The default of split_file= is None, which
|
||||
indicates that no splitting should be done. This
|
||||
is equivalent to the following function::
|
||||
|
||||
return (None, path)
|
||||
|
||||
If you wish, you can override the split_file
|
||||
method with the same sort of function instead of
|
||||
passing in a split_file= argument.
|
||||
|
||||
|
||||
@type svnuser: string
|
||||
@param svnuser: If set, the --username option will be added to
|
||||
the 'svn log' command. You may need this to get
|
||||
access to a private repository.
|
||||
@type svnpasswd: string
|
||||
@param svnpasswd: If set, the --password option will be added.
|
||||
|
||||
@type pollinterval: int
|
||||
@param pollinterval: interval in seconds between polls. The default
|
||||
is 600 seconds (10 minutes). Smaller values
|
||||
decrease the latency between the time a change
|
||||
is recorded and the time the buildbot notices
|
||||
it, but it also increases the system load.
|
||||
|
||||
@type histmax: int
|
||||
@param histmax: maximum number of changes to look back through.
|
||||
The default is 100. Smaller values decrease
|
||||
system load, but if more than histmax changes
|
||||
are recorded between polls, the extra ones will
|
||||
be silently lost.
|
||||
|
||||
@type svnbin: string
|
||||
@param svnbin: path to svn binary, defaults to just 'svn'. Use
|
||||
this if your subversion command lives in an
|
||||
unusual location.
|
||||
"""
|
||||
|
||||
if svnurl.endswith("/"):
|
||||
svnurl = svnurl[:-1] # strip the trailing slash
|
||||
self.svnurl = svnurl
|
||||
self.split_file_function = split_file or split_file_alwaystrunk
|
||||
self.svnuser = svnuser
|
||||
self.svnpasswd = svnpasswd
|
||||
|
||||
self.svnbin = svnbin
|
||||
self.pollinterval = pollinterval
|
||||
self.histmax = histmax
|
||||
self._prefix = None
|
||||
self.overrun_counter = 0
|
||||
self.loop = LoopingCall(self.checksvn)
|
||||
|
||||
def split_file(self, path):
|
||||
# use getattr() to avoid turning this function into a bound method,
|
||||
# which would require it to have an extra 'self' argument
|
||||
f = getattr(self, "split_file_function")
|
||||
return f(path)
|
||||
|
||||
def startService(self):
|
||||
log.msg("SVNPoller(%s) starting" % self.svnurl)
|
||||
base.ChangeSource.startService(self)
|
||||
# Don't start the loop just yet because the reactor isn't running.
|
||||
# Give it a chance to go and install our SIGCHLD handler before
|
||||
# spawning processes.
|
||||
reactor.callLater(0, self.loop.start, self.pollinterval)
|
||||
|
||||
def stopService(self):
|
||||
log.msg("SVNPoller(%s) shutting down" % self.svnurl)
|
||||
self.loop.stop()
|
||||
return base.ChangeSource.stopService(self)
|
||||
|
||||
def describe(self):
|
||||
return "SVNPoller watching %s" % self.svnurl
|
||||
|
||||
def checksvn(self):
|
||||
# Our return value is only used for unit testing.
|
||||
|
||||
# we need to figure out the repository root, so we can figure out
|
||||
# repository-relative pathnames later. Each SVNURL is in the form
|
||||
# (ROOT)/(PROJECT)/(BRANCH)/(FILEPATH), where (ROOT) is something
|
||||
# like svn://svn.twistedmatrix.com/svn/Twisted (i.e. there is a
|
||||
# physical repository at /svn/Twisted on that host), (PROJECT) is
|
||||
# something like Projects/Twisted (i.e. within the repository's
|
||||
# internal namespace, everything under Projects/Twisted/ has
|
||||
# something to do with Twisted, but these directory names do not
|
||||
# actually appear on the repository host), (BRANCH) is something like
|
||||
# "trunk" or "branches/2.0.x", and (FILEPATH) is a tree-relative
|
||||
# filename like "twisted/internet/defer.py".
|
||||
|
||||
# our self.svnurl attribute contains (ROOT)/(PROJECT) combined
|
||||
# together in a way that we can't separate without svn's help. If the
|
||||
# user is not using the split_file= argument, then self.svnurl might
|
||||
# be (ROOT)/(PROJECT)/(BRANCH) . In any case, the filenames we will
|
||||
# get back from 'svn log' will be of the form
|
||||
# (PROJECT)/(BRANCH)/(FILEPATH), but we want to be able to remove
|
||||
# that (PROJECT) prefix from them. To do this without requiring the
|
||||
# user to tell us how svnurl is split into ROOT and PROJECT, we do an
|
||||
# 'svn info --xml' command at startup. This command will include a
|
||||
# <root> element that tells us ROOT. We then strip this prefix from
|
||||
# self.svnurl to determine PROJECT, and then later we strip the
|
||||
# PROJECT prefix from the filenames reported by 'svn log --xml' to
|
||||
# get a (BRANCH)/(FILEPATH) that can be passed to split_file() to
|
||||
# turn into separate BRANCH and FILEPATH values.
|
||||
|
||||
# whew.
|
||||
|
||||
if self.working:
|
||||
log.msg("SVNPoller(%s) overrun: timer fired but the previous "
|
||||
"poll had not yet finished.")
|
||||
self.overrun_counter += 1
|
||||
return defer.succeed(None)
|
||||
self.working = True
|
||||
|
||||
log.msg("SVNPoller polling")
|
||||
if not self._prefix:
|
||||
# this sets self._prefix when it finishes. It fires with
|
||||
# self._prefix as well, because that makes the unit tests easier
|
||||
# to write.
|
||||
d = self.get_root()
|
||||
d.addCallback(self.determine_prefix)
|
||||
else:
|
||||
d = defer.succeed(self._prefix)
|
||||
|
||||
d.addCallback(self.get_logs)
|
||||
d.addCallback(self.parse_logs)
|
||||
d.addCallback(self.get_new_logentries)
|
||||
d.addCallback(self.create_changes)
|
||||
d.addCallback(self.submit_changes)
|
||||
d.addBoth(self.finished)
|
||||
return d
|
||||
|
||||
def getProcessOutput(self, args):
|
||||
# this exists so we can override it during the unit tests
|
||||
d = utils.getProcessOutput(self.svnbin, args, {})
|
||||
return d
|
||||
|
||||
def get_root(self):
|
||||
args = ["info", "--xml", "--non-interactive", self.svnurl]
|
||||
if self.svnuser:
|
||||
args.extend(["--username=%s" % self.svnuser])
|
||||
if self.svnpasswd:
|
||||
args.extend(["--password=%s" % self.svnpasswd])
|
||||
d = self.getProcessOutput(args)
|
||||
return d
|
||||
|
||||
def determine_prefix(self, output):
|
||||
try:
|
||||
doc = xml.dom.minidom.parseString(output)
|
||||
except xml.parsers.expat.ExpatError:
|
||||
dbgMsg("_process_changes: ExpatError in %s" % output)
|
||||
log.msg("SVNPoller._determine_prefix_2: ExpatError in '%s'"
|
||||
% output)
|
||||
raise
|
||||
rootnodes = doc.getElementsByTagName("root")
|
||||
if not rootnodes:
|
||||
# this happens if the URL we gave was already the root. In this
|
||||
# case, our prefix is empty.
|
||||
self._prefix = ""
|
||||
return self._prefix
|
||||
rootnode = rootnodes[0]
|
||||
root = "".join([c.data for c in rootnode.childNodes])
|
||||
# root will be a unicode string
|
||||
_assert(self.svnurl.startswith(root),
|
||||
"svnurl='%s' doesn't start with <root>='%s'" %
|
||||
(self.svnurl, root))
|
||||
self._prefix = self.svnurl[len(root):]
|
||||
if self._prefix.startswith("/"):
|
||||
self._prefix = self._prefix[1:]
|
||||
log.msg("SVNPoller: svnurl=%s, root=%s, so prefix=%s" %
|
||||
(self.svnurl, root, self._prefix))
|
||||
return self._prefix
|
||||
|
||||
def get_logs(self, ignored_prefix=None):
|
||||
args = []
|
||||
args.extend(["log", "--xml", "--verbose", "--non-interactive"])
|
||||
if self.svnuser:
|
||||
args.extend(["--username=%s" % self.svnuser])
|
||||
if self.svnpasswd:
|
||||
args.extend(["--password=%s" % self.svnpasswd])
|
||||
args.extend(["--limit=%d" % (self.histmax), self.svnurl])
|
||||
d = self.getProcessOutput(args)
|
||||
return d
|
||||
|
||||
def parse_logs(self, output):
|
||||
# parse the XML output, return a list of <logentry> nodes
|
||||
try:
|
||||
doc = xml.dom.minidom.parseString(output)
|
||||
except xml.parsers.expat.ExpatError:
|
||||
dbgMsg("_process_changes: ExpatError in %s" % output)
|
||||
log.msg("SVNPoller._parse_changes: ExpatError in '%s'" % output)
|
||||
raise
|
||||
logentries = doc.getElementsByTagName("logentry")
|
||||
return logentries
|
||||
|
||||
|
||||
def _filter_new_logentries(self, logentries, last_change):
|
||||
# given a list of logentries, return a tuple of (new_last_change,
|
||||
# new_logentries), where new_logentries contains only the ones after
|
||||
# last_change
|
||||
if not logentries:
|
||||
# no entries, so last_change must stay at None
|
||||
return (None, [])
|
||||
|
||||
mostRecent = int(logentries[0].getAttribute("revision"))
|
||||
|
||||
if last_change is None:
|
||||
# if this is the first time we've been run, ignore any changes
|
||||
# that occurred before now. This prevents a build at every
|
||||
# startup.
|
||||
log.msg('svnPoller: starting at change %s' % mostRecent)
|
||||
return (mostRecent, [])
|
||||
|
||||
if last_change == mostRecent:
|
||||
# an unmodified repository will hit this case
|
||||
log.msg('svnPoller: _process_changes last %s mostRecent %s' % (
|
||||
last_change, mostRecent))
|
||||
return (mostRecent, [])
|
||||
|
||||
new_logentries = []
|
||||
for el in logentries:
|
||||
if last_change == int(el.getAttribute("revision")):
|
||||
break
|
||||
new_logentries.append(el)
|
||||
new_logentries.reverse() # return oldest first
|
||||
return (mostRecent, new_logentries)
|
||||
|
||||
def get_new_logentries(self, logentries):
|
||||
last_change = self.last_change
|
||||
(new_last_change,
|
||||
new_logentries) = self._filter_new_logentries(logentries,
|
||||
self.last_change)
|
||||
self.last_change = new_last_change
|
||||
log.msg('svnPoller: _process_changes %s .. %s' %
|
||||
(last_change, new_last_change))
|
||||
return new_logentries
|
||||
|
||||
|
||||
def _get_text(self, element, tag_name):
|
||||
child_nodes = element.getElementsByTagName(tag_name)[0].childNodes
|
||||
text = "".join([t.data for t in child_nodes])
|
||||
return text
|
||||
|
||||
def _transform_path(self, path):
|
||||
_assert(path.startswith(self._prefix),
|
||||
"filepath '%s' should start with prefix '%s'" %
|
||||
(path, self._prefix))
|
||||
relative_path = path[len(self._prefix):]
|
||||
if relative_path.startswith("/"):
|
||||
relative_path = relative_path[1:]
|
||||
where = self.split_file(relative_path)
|
||||
# 'where' is either None or (branch, final_path)
|
||||
return where
|
||||
|
||||
def create_changes(self, new_logentries):
|
||||
changes = []
|
||||
|
||||
for el in new_logentries:
|
||||
branch_files = [] # get oldest change first
|
||||
# TODO: revisit this, I think I've settled on Change.revision
|
||||
# being a string everywhere, and leaving the interpretation
|
||||
# of that string up to b.s.source.SVN methods
|
||||
revision = int(el.getAttribute("revision"))
|
||||
dbgMsg("Adding change revision %s" % (revision,))
|
||||
# TODO: the rest of buildbot may not be ready for unicode 'who'
|
||||
# values
|
||||
author = self._get_text(el, "author")
|
||||
comments = self._get_text(el, "msg")
|
||||
# there is a "date" field, but it provides localtime in the
|
||||
# repository's timezone, whereas we care about buildmaster's
|
||||
# localtime (since this will get used to position the boxes on
|
||||
# the Waterfall display, etc). So ignore the date field and use
|
||||
# our local clock instead.
|
||||
#when = self._get_text(el, "date")
|
||||
#when = time.mktime(time.strptime("%.19s" % when,
|
||||
# "%Y-%m-%dT%H:%M:%S"))
|
||||
branches = {}
|
||||
pathlist = el.getElementsByTagName("paths")[0]
|
||||
for p in pathlist.getElementsByTagName("path"):
|
||||
path = "".join([t.data for t in p.childNodes])
|
||||
# the rest of buildbot is certaily not yet ready to handle
|
||||
# unicode filenames, because they get put in RemoteCommands
|
||||
# which get sent via PB to the buildslave, and PB doesn't
|
||||
# handle unicode.
|
||||
path = path.encode("ascii")
|
||||
if path.startswith("/"):
|
||||
path = path[1:]
|
||||
where = self._transform_path(path)
|
||||
# if 'where' is None, the file was outside any project that
|
||||
# we care about and we should ignore it
|
||||
if where:
|
||||
branch, filename = where
|
||||
if not branch in branches:
|
||||
branches[branch] = []
|
||||
branches[branch].append(filename)
|
||||
|
||||
for branch in branches:
|
||||
c = Change(who=author,
|
||||
files=branches[branch],
|
||||
comments=comments,
|
||||
revision=revision,
|
||||
branch=branch)
|
||||
changes.append(c)
|
||||
|
||||
return changes
|
||||
|
||||
def submit_changes(self, changes):
|
||||
for c in changes:
|
||||
self.parent.addChange(c)
|
||||
|
||||
def finished(self, res):
|
||||
log.msg("SVNPoller finished polling")
|
||||
dbgMsg('_finished : %s' % res)
|
||||
assert self.working
|
||||
self.working = False
|
||||
return res
|
0
tools/buildbot/buildbot/clients/__init__.py
Normal file
0
tools/buildbot/buildbot/clients/__init__.py
Normal file
126
tools/buildbot/buildbot/clients/base.py
Normal file
126
tools/buildbot/buildbot/clients/base.py
Normal file
@ -0,0 +1,126 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import sys, re
|
||||
|
||||
from twisted.spread import pb
|
||||
from twisted.cred import credentials, error
|
||||
from twisted.internet import reactor
|
||||
|
||||
class StatusClient(pb.Referenceable):
|
||||
"""To use this, call my .connected method with a RemoteReference to the
|
||||
buildmaster's StatusClientPerspective object.
|
||||
"""
|
||||
|
||||
def __init__(self, events):
|
||||
self.builders = {}
|
||||
self.events = events
|
||||
|
||||
def connected(self, remote):
|
||||
print "connected"
|
||||
self.remote = remote
|
||||
remote.callRemote("subscribe", self.events, 5, self)
|
||||
|
||||
def remote_builderAdded(self, buildername, builder):
|
||||
print "builderAdded", buildername
|
||||
|
||||
def remote_builderRemoved(self, buildername):
|
||||
print "builderRemoved", buildername
|
||||
|
||||
def remote_builderChangedState(self, buildername, state, eta):
|
||||
print "builderChangedState", buildername, state, eta
|
||||
|
||||
def remote_buildStarted(self, buildername, build):
|
||||
print "buildStarted", buildername
|
||||
|
||||
def remote_buildFinished(self, buildername, build, results):
|
||||
print "buildFinished", results
|
||||
|
||||
def remote_buildETAUpdate(self, buildername, build, eta):
|
||||
print "ETA", buildername, eta
|
||||
|
||||
def remote_stepStarted(self, buildername, build, stepname, step):
|
||||
print "stepStarted", buildername, stepname
|
||||
|
||||
def remote_stepFinished(self, buildername, build, stepname, step, results):
|
||||
print "stepFinished", buildername, stepname, results
|
||||
|
||||
def remote_stepETAUpdate(self, buildername, build, stepname, step,
|
||||
eta, expectations):
|
||||
print "stepETA", buildername, stepname, eta
|
||||
|
||||
def remote_logStarted(self, buildername, build, stepname, step,
|
||||
logname, log):
|
||||
print "logStarted", buildername, stepname
|
||||
|
||||
def remote_logFinished(self, buildername, build, stepname, step,
|
||||
logname, log):
|
||||
print "logFinished", buildername, stepname
|
||||
|
||||
def remote_logChunk(self, buildername, build, stepname, step, logname, log,
|
||||
channel, text):
|
||||
ChunkTypes = ["STDOUT", "STDERR", "HEADER"]
|
||||
print "logChunk[%s]: %s" % (ChunkTypes[channel], text)
|
||||
|
||||
class TextClient:
|
||||
def __init__(self, master, events="steps"):
|
||||
"""
|
||||
@type events: string, one of builders, builds, steps, logs, full
|
||||
@param events: specify what level of detail should be reported.
|
||||
- 'builders': only announce new/removed Builders
|
||||
- 'builds': also announce builderChangedState, buildStarted, and
|
||||
buildFinished
|
||||
- 'steps': also announce buildETAUpdate, stepStarted, stepFinished
|
||||
- 'logs': also announce stepETAUpdate, logStarted, logFinished
|
||||
- 'full': also announce log contents
|
||||
"""
|
||||
self.master = master
|
||||
self.listener = StatusClient(events)
|
||||
|
||||
def run(self):
|
||||
"""Start the TextClient."""
|
||||
self.startConnecting()
|
||||
reactor.run()
|
||||
|
||||
def startConnecting(self):
|
||||
try:
|
||||
host, port = re.search(r'(.+):(\d+)', self.master).groups()
|
||||
port = int(port)
|
||||
except:
|
||||
print "unparseable master location '%s'" % self.master
|
||||
print " expecting something more like localhost:8007"
|
||||
raise
|
||||
cf = pb.PBClientFactory()
|
||||
creds = credentials.UsernamePassword("statusClient", "clientpw")
|
||||
d = cf.login(creds)
|
||||
reactor.connectTCP(host, port, cf)
|
||||
d.addCallbacks(self.connected, self.not_connected)
|
||||
return d
|
||||
def connected(self, ref):
|
||||
ref.notifyOnDisconnect(self.disconnected)
|
||||
self.listener.connected(ref)
|
||||
def not_connected(self, why):
|
||||
if why.check(error.UnauthorizedLogin):
|
||||
print """
|
||||
Unable to login.. are you sure we are connecting to a
|
||||
buildbot.status.client.PBListener port and not to the slaveport?
|
||||
"""
|
||||
reactor.stop()
|
||||
return why
|
||||
def disconnected(self, ref):
|
||||
print "lost connection"
|
||||
# we can get here in one of two ways: the buildmaster has
|
||||
# disconnected us (probably because it shut itself down), or because
|
||||
# we've been SIGINT'ed. In the latter case, our reactor is already
|
||||
# shut down, but we have no easy way of detecting that. So protect
|
||||
# our attempt to shut down the reactor.
|
||||
try:
|
||||
reactor.stop()
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
master = "localhost:8007"
|
||||
if len(sys.argv) > 1:
|
||||
master = sys.argv[1]
|
||||
c = TextClient()
|
||||
c.run()
|
684
tools/buildbot/buildbot/clients/debug.glade
Normal file
684
tools/buildbot/buildbot/clients/debug.glade
Normal file
@ -0,0 +1,684 @@
|
||||
<?xml version="1.0" standalone="no"?> <!--*- mode: xml -*-->
|
||||
<!DOCTYPE glade-interface SYSTEM "http://glade.gnome.org/glade-2.0.dtd">
|
||||
|
||||
<glade-interface>
|
||||
<requires lib="gnome"/>
|
||||
|
||||
<widget class="GtkWindow" id="window1">
|
||||
<property name="visible">True</property>
|
||||
<property name="title" translatable="yes">Buildbot Debug Tool</property>
|
||||
<property name="type">GTK_WINDOW_TOPLEVEL</property>
|
||||
<property name="window_position">GTK_WIN_POS_NONE</property>
|
||||
<property name="modal">False</property>
|
||||
<property name="resizable">True</property>
|
||||
<property name="destroy_with_parent">False</property>
|
||||
<property name="decorated">True</property>
|
||||
<property name="skip_taskbar_hint">False</property>
|
||||
<property name="skip_pager_hint">False</property>
|
||||
<property name="type_hint">GDK_WINDOW_TYPE_HINT_NORMAL</property>
|
||||
<property name="gravity">GDK_GRAVITY_NORTH_WEST</property>
|
||||
<property name="focus_on_map">True</property>
|
||||
<property name="urgency_hint">False</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkVBox" id="vbox1">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkHBox" id="connection">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="connectbutton">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">Connect</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_connect"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkLabel" id="connectlabel">
|
||||
<property name="visible">True</property>
|
||||
<property name="label" translatable="yes">Disconnected</property>
|
||||
<property name="use_underline">False</property>
|
||||
<property name="use_markup">False</property>
|
||||
<property name="justify">GTK_JUSTIFY_CENTER</property>
|
||||
<property name="wrap">False</property>
|
||||
<property name="selectable">False</property>
|
||||
<property name="xalign">0.5</property>
|
||||
<property name="yalign">0.5</property>
|
||||
<property name="xpad">0</property>
|
||||
<property name="ypad">0</property>
|
||||
<property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
|
||||
<property name="width_chars">-1</property>
|
||||
<property name="single_line_mode">False</property>
|
||||
<property name="angle">0</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkHBox" id="commands">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="reload">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">Reload .cfg</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_reload" last_modification_time="Wed, 24 Sep 2003 20:47:55 GMT"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="rebuild">
|
||||
<property name="visible">True</property>
|
||||
<property name="sensitive">False</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">Rebuild .py</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_rebuild" last_modification_time="Wed, 24 Sep 2003 20:49:18 GMT"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="button7">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">poke IRC</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_poke_irc" last_modification_time="Wed, 14 Jan 2004 22:23:59 GMT"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkHBox" id="hbox3">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkCheckButton" id="usebranch">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">Branch:</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<property name="active">False</property>
|
||||
<property name="inconsistent">False</property>
|
||||
<property name="draw_indicator">True</property>
|
||||
<signal name="toggled" handler="on_usebranch_toggled" last_modification_time="Tue, 25 Oct 2005 01:42:45 GMT"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkEntry" id="branch">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="editable">True</property>
|
||||
<property name="visibility">True</property>
|
||||
<property name="max_length">0</property>
|
||||
<property name="text" translatable="yes"></property>
|
||||
<property name="has_frame">True</property>
|
||||
<property name="invisible_char">*</property>
|
||||
<property name="activates_default">False</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkHBox" id="hbox1">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkCheckButton" id="userevision">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">Revision:</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<property name="active">False</property>
|
||||
<property name="inconsistent">False</property>
|
||||
<property name="draw_indicator">True</property>
|
||||
<signal name="toggled" handler="on_userevision_toggled" last_modification_time="Wed, 08 Sep 2004 17:58:33 GMT"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkEntry" id="revision">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="editable">True</property>
|
||||
<property name="visibility">True</property>
|
||||
<property name="max_length">0</property>
|
||||
<property name="text" translatable="yes"></property>
|
||||
<property name="has_frame">True</property>
|
||||
<property name="invisible_char">*</property>
|
||||
<property name="activates_default">False</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkFrame" id="Commit">
|
||||
<property name="border_width">4</property>
|
||||
<property name="visible">True</property>
|
||||
<property name="label_xalign">0</property>
|
||||
<property name="label_yalign">0.5</property>
|
||||
<property name="shadow_type">GTK_SHADOW_ETCHED_IN</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkAlignment" id="alignment1">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0.5</property>
|
||||
<property name="yalign">0.5</property>
|
||||
<property name="xscale">1</property>
|
||||
<property name="yscale">1</property>
|
||||
<property name="top_padding">0</property>
|
||||
<property name="bottom_padding">0</property>
|
||||
<property name="left_padding">0</property>
|
||||
<property name="right_padding">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkVBox" id="vbox3">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkHBox" id="commit">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="button2">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">commit</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_commit"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkEntry" id="filename">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="editable">True</property>
|
||||
<property name="visibility">True</property>
|
||||
<property name="max_length">0</property>
|
||||
<property name="text" translatable="yes">twisted/internet/app.py</property>
|
||||
<property name="has_frame">True</property>
|
||||
<property name="invisible_char">*</property>
|
||||
<property name="activates_default">False</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkHBox" id="hbox2">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label5">
|
||||
<property name="visible">True</property>
|
||||
<property name="label" translatable="yes">Who: </property>
|
||||
<property name="use_underline">False</property>
|
||||
<property name="use_markup">False</property>
|
||||
<property name="justify">GTK_JUSTIFY_LEFT</property>
|
||||
<property name="wrap">False</property>
|
||||
<property name="selectable">False</property>
|
||||
<property name="xalign">0.5</property>
|
||||
<property name="yalign">0.5</property>
|
||||
<property name="xpad">0</property>
|
||||
<property name="ypad">0</property>
|
||||
<property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
|
||||
<property name="width_chars">-1</property>
|
||||
<property name="single_line_mode">False</property>
|
||||
<property name="angle">0</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkEntry" id="who">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="editable">True</property>
|
||||
<property name="visibility">True</property>
|
||||
<property name="max_length">0</property>
|
||||
<property name="text" translatable="yes">bob</property>
|
||||
<property name="has_frame">True</property>
|
||||
<property name="invisible_char">*</property>
|
||||
<property name="activates_default">False</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label4">
|
||||
<property name="visible">True</property>
|
||||
<property name="label" translatable="yes">Commit</property>
|
||||
<property name="use_underline">False</property>
|
||||
<property name="use_markup">False</property>
|
||||
<property name="justify">GTK_JUSTIFY_LEFT</property>
|
||||
<property name="wrap">False</property>
|
||||
<property name="selectable">False</property>
|
||||
<property name="xalign">0.5</property>
|
||||
<property name="yalign">0.5</property>
|
||||
<property name="xpad">2</property>
|
||||
<property name="ypad">0</property>
|
||||
<property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
|
||||
<property name="width_chars">-1</property>
|
||||
<property name="single_line_mode">False</property>
|
||||
<property name="angle">0</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="type">label_item</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkFrame" id="builderframe">
|
||||
<property name="border_width">4</property>
|
||||
<property name="visible">True</property>
|
||||
<property name="label_xalign">0</property>
|
||||
<property name="label_yalign">0.5</property>
|
||||
<property name="shadow_type">GTK_SHADOW_ETCHED_IN</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkVBox" id="vbox2">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkHBox" id="builder">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">3</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label1">
|
||||
<property name="visible">True</property>
|
||||
<property name="label" translatable="yes">Builder:</property>
|
||||
<property name="use_underline">False</property>
|
||||
<property name="use_markup">False</property>
|
||||
<property name="justify">GTK_JUSTIFY_CENTER</property>
|
||||
<property name="wrap">False</property>
|
||||
<property name="selectable">False</property>
|
||||
<property name="xalign">0.5</property>
|
||||
<property name="yalign">0.5</property>
|
||||
<property name="xpad">0</property>
|
||||
<property name="ypad">0</property>
|
||||
<property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
|
||||
<property name="width_chars">-1</property>
|
||||
<property name="single_line_mode">False</property>
|
||||
<property name="angle">0</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkEntry" id="buildname">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="editable">True</property>
|
||||
<property name="visibility">True</property>
|
||||
<property name="max_length">0</property>
|
||||
<property name="text" translatable="yes">one</property>
|
||||
<property name="has_frame">True</property>
|
||||
<property name="invisible_char">*</property>
|
||||
<property name="activates_default">False</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkHBox" id="buildercontrol">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="button1">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">Request
|
||||
Build</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_build"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="button8">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">Ping
|
||||
Builder</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_ping" last_modification_time="Fri, 24 Nov 2006 05:18:51 GMT"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<placeholder/>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkHBox" id="status">
|
||||
<property name="visible">True</property>
|
||||
<property name="homogeneous">False</property>
|
||||
<property name="spacing">0</property>
|
||||
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label2">
|
||||
<property name="visible">True</property>
|
||||
<property name="label" translatable="yes">Currently:</property>
|
||||
<property name="use_underline">False</property>
|
||||
<property name="use_markup">False</property>
|
||||
<property name="justify">GTK_JUSTIFY_CENTER</property>
|
||||
<property name="wrap">False</property>
|
||||
<property name="selectable">False</property>
|
||||
<property name="xalign">0.5</property>
|
||||
<property name="yalign">0.5</property>
|
||||
<property name="xpad">7</property>
|
||||
<property name="ypad">0</property>
|
||||
<property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
|
||||
<property name="width_chars">-1</property>
|
||||
<property name="single_line_mode">False</property>
|
||||
<property name="angle">0</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="button3">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">offline</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_current_offline"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="button4">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">idle</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_current_idle"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="button5">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">waiting</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_current_waiting"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkButton" id="button6">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="label" translatable="yes">building</property>
|
||||
<property name="use_underline">True</property>
|
||||
<property name="relief">GTK_RELIEF_NORMAL</property>
|
||||
<property name="focus_on_click">True</property>
|
||||
<signal name="clicked" handler="do_current_building"/>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label3">
|
||||
<property name="visible">True</property>
|
||||
<property name="label" translatable="yes">Builder</property>
|
||||
<property name="use_underline">False</property>
|
||||
<property name="use_markup">False</property>
|
||||
<property name="justify">GTK_JUSTIFY_LEFT</property>
|
||||
<property name="wrap">False</property>
|
||||
<property name="selectable">False</property>
|
||||
<property name="xalign">0.5</property>
|
||||
<property name="yalign">0.5</property>
|
||||
<property name="xpad">2</property>
|
||||
<property name="ypad">0</property>
|
||||
<property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
|
||||
<property name="width_chars">-1</property>
|
||||
<property name="single_line_mode">False</property>
|
||||
<property name="angle">0</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="type">label_item</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="padding">0</property>
|
||||
<property name="expand">True</property>
|
||||
<property name="fill">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
|
||||
</glade-interface>
|
183
tools/buildbot/buildbot/clients/debug.py
Normal file
183
tools/buildbot/buildbot/clients/debug.py
Normal file
@ -0,0 +1,183 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from twisted.internet import gtk2reactor
|
||||
gtk2reactor.install()
|
||||
from twisted.internet import reactor
|
||||
from twisted.python import util
|
||||
from twisted.spread import pb
|
||||
from twisted.cred import credentials
|
||||
import gtk, gtk.glade, gnome.ui
|
||||
import sys, re
|
||||
|
||||
class DebugWidget:
|
||||
def __init__(self, master="localhost:8007", passwd="debugpw"):
|
||||
self.connected = 0
|
||||
try:
|
||||
host, port = re.search(r'(.+):(\d+)', master).groups()
|
||||
except:
|
||||
print "unparseable master location '%s'" % master
|
||||
print " expecting something more like localhost:8007"
|
||||
raise
|
||||
self.host = host
|
||||
self.port = int(port)
|
||||
self.passwd = passwd
|
||||
self.remote = None
|
||||
xml = self.xml = gtk.glade.XML(util.sibpath(__file__, "debug.glade"))
|
||||
g = xml.get_widget
|
||||
self.buildname = g('buildname')
|
||||
self.filename = g('filename')
|
||||
self.connectbutton = g('connectbutton')
|
||||
self.connectlabel = g('connectlabel')
|
||||
g('window1').connect('destroy', lambda win: gtk.main_quit())
|
||||
# put the master info in the window's titlebar
|
||||
g('window1').set_title("Buildbot Debug Tool: %s" % master)
|
||||
c = xml.signal_connect
|
||||
c('do_connect', self.do_connect)
|
||||
c('do_reload', self.do_reload)
|
||||
c('do_rebuild', self.do_rebuild)
|
||||
c('do_poke_irc', self.do_poke_irc)
|
||||
c('do_build', self.do_build)
|
||||
c('do_ping', self.do_ping)
|
||||
c('do_commit', self.do_commit)
|
||||
c('on_usebranch_toggled', self.usebranch_toggled)
|
||||
self.usebranch_toggled(g('usebranch'))
|
||||
c('on_userevision_toggled', self.userevision_toggled)
|
||||
self.userevision_toggled(g('userevision'))
|
||||
c('do_current_offline', self.do_current, "offline")
|
||||
c('do_current_idle', self.do_current, "idle")
|
||||
c('do_current_waiting', self.do_current, "waiting")
|
||||
c('do_current_building', self.do_current, "building")
|
||||
|
||||
def do_connect(self, widget):
|
||||
if self.connected:
|
||||
self.connectlabel.set_text("Disconnecting...")
|
||||
if self.remote:
|
||||
self.remote.broker.transport.loseConnection()
|
||||
else:
|
||||
self.connectlabel.set_text("Connecting...")
|
||||
f = pb.PBClientFactory()
|
||||
creds = credentials.UsernamePassword("debug", self.passwd)
|
||||
d = f.login(creds)
|
||||
reactor.connectTCP(self.host, int(self.port), f)
|
||||
d.addCallbacks(self.connect_complete, self.connect_failed)
|
||||
def connect_complete(self, ref):
|
||||
self.connectbutton.set_label("Disconnect")
|
||||
self.connectlabel.set_text("Connected")
|
||||
self.connected = 1
|
||||
self.remote = ref
|
||||
self.remote.callRemote("print", "hello cleveland")
|
||||
self.remote.notifyOnDisconnect(self.disconnected)
|
||||
def connect_failed(self, why):
|
||||
self.connectlabel.set_text("Failed")
|
||||
print why
|
||||
def disconnected(self, ref):
|
||||
self.connectbutton.set_label("Connect")
|
||||
self.connectlabel.set_text("Disconnected")
|
||||
self.connected = 0
|
||||
self.remote = None
|
||||
|
||||
def do_reload(self, widget):
|
||||
if not self.remote:
|
||||
return
|
||||
d = self.remote.callRemote("reload")
|
||||
d.addErrback(self.err)
|
||||
def do_rebuild(self, widget):
|
||||
print "Not yet implemented"
|
||||
return
|
||||
def do_poke_irc(self, widget):
|
||||
if not self.remote:
|
||||
return
|
||||
d = self.remote.callRemote("pokeIRC")
|
||||
d.addErrback(self.err)
|
||||
|
||||
def do_build(self, widget):
|
||||
if not self.remote:
|
||||
return
|
||||
name = self.buildname.get_text()
|
||||
branch = None
|
||||
if self.xml.get_widget("usebranch").get_active():
|
||||
branch = self.xml.get_widget('branch').get_text()
|
||||
if branch == '':
|
||||
branch = None
|
||||
revision = None
|
||||
if self.xml.get_widget("userevision").get_active():
|
||||
revision = self.xml.get_widget('revision').get_text()
|
||||
if revision == '':
|
||||
revision = None
|
||||
reason = "debugclient 'Request Build' button pushed"
|
||||
d = self.remote.callRemote("requestBuild",
|
||||
name, reason, branch, revision)
|
||||
d.addErrback(self.err)
|
||||
|
||||
def do_ping(self, widget):
|
||||
if not self.remote:
|
||||
return
|
||||
name = self.buildname.get_text()
|
||||
d = self.remote.callRemote("pingBuilder", name)
|
||||
d.addErrback(self.err)
|
||||
|
||||
def usebranch_toggled(self, widget):
|
||||
rev = self.xml.get_widget('branch')
|
||||
if widget.get_active():
|
||||
rev.set_sensitive(True)
|
||||
else:
|
||||
rev.set_sensitive(False)
|
||||
|
||||
def userevision_toggled(self, widget):
|
||||
rev = self.xml.get_widget('revision')
|
||||
if widget.get_active():
|
||||
rev.set_sensitive(True)
|
||||
else:
|
||||
rev.set_sensitive(False)
|
||||
|
||||
def do_commit(self, widget):
|
||||
if not self.remote:
|
||||
return
|
||||
filename = self.filename.get_text()
|
||||
who = self.xml.get_widget("who").get_text()
|
||||
|
||||
branch = None
|
||||
if self.xml.get_widget("usebranch").get_active():
|
||||
branch = self.xml.get_widget('branch').get_text()
|
||||
if branch == '':
|
||||
branch = None
|
||||
|
||||
revision = None
|
||||
if self.xml.get_widget("userevision").get_active():
|
||||
revision = self.xml.get_widget('revision').get_text()
|
||||
try:
|
||||
revision = int(revision)
|
||||
except ValueError:
|
||||
pass
|
||||
if revision == '':
|
||||
revision = None
|
||||
|
||||
kwargs = { 'revision': revision, 'who': who }
|
||||
if branch:
|
||||
kwargs['branch'] = branch
|
||||
d = self.remote.callRemote("fakeChange", filename, **kwargs)
|
||||
d.addErrback(self.err)
|
||||
|
||||
def do_current(self, widget, state):
|
||||
if not self.remote:
|
||||
return
|
||||
name = self.buildname.get_text()
|
||||
d = self.remote.callRemote("setCurrentState", name, state)
|
||||
d.addErrback(self.err)
|
||||
def err(self, failure):
|
||||
print "received error"
|
||||
failure.printTraceback()
|
||||
|
||||
|
||||
def run(self):
|
||||
reactor.run()
|
||||
|
||||
if __name__ == '__main__':
|
||||
master = "localhost:8007"
|
||||
if len(sys.argv) > 1:
|
||||
master = sys.argv[1]
|
||||
passwd = "debugpw"
|
||||
if len(sys.argv) > 2:
|
||||
passwd = sys.argv[2]
|
||||
d = DebugWidget(master, passwd)
|
||||
d.run()
|
526
tools/buildbot/buildbot/clients/gtkPanes.py
Normal file
526
tools/buildbot/buildbot/clients/gtkPanes.py
Normal file
@ -0,0 +1,526 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from twisted.internet import gtk2reactor
|
||||
gtk2reactor.install()
|
||||
|
||||
from twisted.internet import reactor
|
||||
|
||||
import sys, time
|
||||
|
||||
import pygtk
|
||||
pygtk.require("2.0")
|
||||
import gobject, gtk
|
||||
assert(gtk.Window) # in gtk1 it's gtk.GtkWindow
|
||||
|
||||
from twisted.spread import pb
|
||||
|
||||
#from buildbot.clients.base import Builder, Client
|
||||
from buildbot.clients.base import TextClient
|
||||
from buildbot.util import now
|
||||
|
||||
'''
|
||||
class Pane:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
class OneRow(Pane):
|
||||
"""This is a one-row status bar. It has one square per Builder, and that
|
||||
square is either red, yellow, or green. """
|
||||
|
||||
def __init__(self):
|
||||
Pane.__init__(self)
|
||||
self.widget = gtk.VBox(gtk.FALSE, 2)
|
||||
self.nameBox = gtk.HBox(gtk.TRUE)
|
||||
self.statusBox = gtk.HBox(gtk.TRUE)
|
||||
self.widget.add(self.nameBox)
|
||||
self.widget.add(self.statusBox)
|
||||
self.widget.show_all()
|
||||
self.builders = []
|
||||
|
||||
def getWidget(self):
|
||||
return self.widget
|
||||
def addBuilder(self, builder):
|
||||
print "OneRow.addBuilder"
|
||||
# todo: ordering. Should follow the order in which they were added
|
||||
# to the original BotMaster
|
||||
self.builders.append(builder)
|
||||
# add the name to the left column, and a label (with background) to
|
||||
# the right
|
||||
name = gtk.Label(builder.name)
|
||||
status = gtk.Label('??')
|
||||
status.set_size_request(64,64)
|
||||
box = gtk.EventBox()
|
||||
box.add(status)
|
||||
name.show()
|
||||
box.show_all()
|
||||
self.nameBox.add(name)
|
||||
self.statusBox.add(box)
|
||||
builder.haveSomeWidgets([name, status, box])
|
||||
|
||||
class R2Builder(Builder):
|
||||
def start(self):
|
||||
self.nameSquare.set_text(self.name)
|
||||
self.statusSquare.set_text("???")
|
||||
self.subscribe()
|
||||
def haveSomeWidgets(self, widgets):
|
||||
self.nameSquare, self.statusSquare, self.statusBox = widgets
|
||||
|
||||
def remote_newLastBuildStatus(self, event):
|
||||
color = None
|
||||
if event:
|
||||
text = "\n".join(event.text)
|
||||
color = event.color
|
||||
else:
|
||||
text = "none"
|
||||
self.statusSquare.set_text(text)
|
||||
if color:
|
||||
print "color", color
|
||||
self.statusBox.modify_bg(gtk.STATE_NORMAL,
|
||||
gtk.gdk.color_parse(color))
|
||||
|
||||
def remote_currentlyOffline(self):
|
||||
self.statusSquare.set_text("offline")
|
||||
def remote_currentlyIdle(self):
|
||||
self.statusSquare.set_text("idle")
|
||||
def remote_currentlyWaiting(self, seconds):
|
||||
self.statusSquare.set_text("waiting")
|
||||
def remote_currentlyInterlocked(self):
|
||||
self.statusSquare.set_text("interlocked")
|
||||
def remote_currentlyBuilding(self, eta):
|
||||
self.statusSquare.set_text("building")
|
||||
|
||||
|
||||
class CompactRow(Pane):
|
||||
def __init__(self):
|
||||
Pane.__init__(self)
|
||||
self.widget = gtk.VBox(gtk.FALSE, 3)
|
||||
self.nameBox = gtk.HBox(gtk.TRUE, 2)
|
||||
self.lastBuildBox = gtk.HBox(gtk.TRUE, 2)
|
||||
self.statusBox = gtk.HBox(gtk.TRUE, 2)
|
||||
self.widget.add(self.nameBox)
|
||||
self.widget.add(self.lastBuildBox)
|
||||
self.widget.add(self.statusBox)
|
||||
self.widget.show_all()
|
||||
self.builders = []
|
||||
|
||||
def getWidget(self):
|
||||
return self.widget
|
||||
|
||||
def addBuilder(self, builder):
|
||||
self.builders.append(builder)
|
||||
|
||||
name = gtk.Label(builder.name)
|
||||
name.show()
|
||||
self.nameBox.add(name)
|
||||
|
||||
last = gtk.Label('??')
|
||||
last.set_size_request(64,64)
|
||||
lastbox = gtk.EventBox()
|
||||
lastbox.add(last)
|
||||
lastbox.show_all()
|
||||
self.lastBuildBox.add(lastbox)
|
||||
|
||||
status = gtk.Label('??')
|
||||
status.set_size_request(64,64)
|
||||
statusbox = gtk.EventBox()
|
||||
statusbox.add(status)
|
||||
statusbox.show_all()
|
||||
self.statusBox.add(statusbox)
|
||||
|
||||
builder.haveSomeWidgets([name, last, lastbox, status, statusbox])
|
||||
|
||||
def removeBuilder(self, name, builder):
|
||||
self.nameBox.remove(builder.nameSquare)
|
||||
self.lastBuildBox.remove(builder.lastBuildBox)
|
||||
self.statusBox.remove(builder.statusBox)
|
||||
self.builders.remove(builder)
|
||||
|
||||
class CompactBuilder(Builder):
|
||||
def setup(self):
|
||||
self.timer = None
|
||||
self.text = []
|
||||
self.eta = None
|
||||
def start(self):
|
||||
self.nameSquare.set_text(self.name)
|
||||
self.statusSquare.set_text("???")
|
||||
self.subscribe()
|
||||
def haveSomeWidgets(self, widgets):
|
||||
(self.nameSquare,
|
||||
self.lastBuildSquare, self.lastBuildBox,
|
||||
self.statusSquare, self.statusBox) = widgets
|
||||
|
||||
def remote_currentlyOffline(self):
|
||||
self.eta = None
|
||||
self.stopTimer()
|
||||
self.statusSquare.set_text("offline")
|
||||
self.statusBox.modify_bg(gtk.STATE_NORMAL,
|
||||
gtk.gdk.color_parse("red"))
|
||||
def remote_currentlyIdle(self):
|
||||
self.eta = None
|
||||
self.stopTimer()
|
||||
self.statusSquare.set_text("idle")
|
||||
def remote_currentlyWaiting(self, seconds):
|
||||
self.nextBuild = now() + seconds
|
||||
self.startTimer(self.updateWaiting)
|
||||
def remote_currentlyInterlocked(self):
|
||||
self.stopTimer()
|
||||
self.statusSquare.set_text("interlocked")
|
||||
def startTimer(self, func):
|
||||
# the func must clear self.timer and return gtk.FALSE when the event
|
||||
# has arrived
|
||||
self.stopTimer()
|
||||
self.timer = gtk.timeout_add(1000, func)
|
||||
func()
|
||||
def stopTimer(self):
|
||||
if self.timer:
|
||||
gtk.timeout_remove(self.timer)
|
||||
self.timer = None
|
||||
def updateWaiting(self):
|
||||
when = self.nextBuild
|
||||
if now() < when:
|
||||
next = time.strftime("%H:%M:%S", time.localtime(when))
|
||||
secs = "[%d seconds]" % (when - now())
|
||||
self.statusSquare.set_text("waiting\n%s\n%s" % (next, secs))
|
||||
return gtk.TRUE # restart timer
|
||||
else:
|
||||
# done
|
||||
self.statusSquare.set_text("waiting\n[RSN]")
|
||||
self.timer = None
|
||||
return gtk.FALSE
|
||||
|
||||
def remote_currentlyBuilding(self, eta):
|
||||
self.stopTimer()
|
||||
self.statusSquare.set_text("building")
|
||||
if eta:
|
||||
d = eta.callRemote("subscribe", self, 5)
|
||||
|
||||
def remote_newLastBuildStatus(self, event):
|
||||
color = None
|
||||
if event:
|
||||
text = "\n".join(event.text)
|
||||
color = event.color
|
||||
else:
|
||||
text = "none"
|
||||
if not color: color = "gray"
|
||||
self.lastBuildSquare.set_text(text)
|
||||
self.lastBuildBox.modify_bg(gtk.STATE_NORMAL,
|
||||
gtk.gdk.color_parse(color))
|
||||
|
||||
def remote_newEvent(self, event):
|
||||
assert(event.__class__ == GtkUpdatingEvent)
|
||||
self.current = event
|
||||
event.builder = self
|
||||
self.text = event.text
|
||||
if not self.text: self.text = ["idle"]
|
||||
self.eta = None
|
||||
self.stopTimer()
|
||||
self.updateText()
|
||||
color = event.color
|
||||
if not color: color = "gray"
|
||||
self.statusBox.modify_bg(gtk.STATE_NORMAL,
|
||||
gtk.gdk.color_parse(color))
|
||||
|
||||
def updateCurrent(self):
|
||||
text = self.current.text
|
||||
if text:
|
||||
self.text = text
|
||||
self.updateText()
|
||||
color = self.current.color
|
||||
if color:
|
||||
self.statusBox.modify_bg(gtk.STATE_NORMAL,
|
||||
gtk.gdk.color_parse(color))
|
||||
def updateText(self):
|
||||
etatext = []
|
||||
if self.eta:
|
||||
etatext = [time.strftime("%H:%M:%S", time.localtime(self.eta))]
|
||||
if now() > self.eta:
|
||||
etatext += ["RSN"]
|
||||
else:
|
||||
seconds = self.eta - now()
|
||||
etatext += ["[%d secs]" % seconds]
|
||||
text = "\n".join(self.text + etatext)
|
||||
self.statusSquare.set_text(text)
|
||||
def updateTextTimer(self):
|
||||
self.updateText()
|
||||
return gtk.TRUE # restart timer
|
||||
|
||||
def remote_progress(self, seconds):
|
||||
if seconds == None:
|
||||
self.eta = None
|
||||
else:
|
||||
self.eta = now() + seconds
|
||||
self.startTimer(self.updateTextTimer)
|
||||
self.updateText()
|
||||
def remote_finished(self, eta):
|
||||
self.eta = None
|
||||
self.stopTimer()
|
||||
self.updateText()
|
||||
eta.callRemote("unsubscribe", self)
|
||||
'''
|
||||
|
||||
class Box:
|
||||
def __init__(self, text="?"):
|
||||
self.text = text
|
||||
self.box = gtk.EventBox()
|
||||
self.label = gtk.Label(text)
|
||||
self.box.add(self.label)
|
||||
self.box.set_size_request(64,64)
|
||||
self.timer = None
|
||||
|
||||
def getBox(self):
|
||||
return self.box
|
||||
|
||||
def setText(self, text):
|
||||
self.text = text
|
||||
self.label.set_text(text)
|
||||
|
||||
def setColor(self, color):
|
||||
if not color:
|
||||
return
|
||||
self.box.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(color))
|
||||
|
||||
def setETA(self, eta):
|
||||
if eta:
|
||||
self.when = now() + eta
|
||||
self.startTimer()
|
||||
else:
|
||||
self.stopTimer()
|
||||
|
||||
def startTimer(self):
|
||||
self.stopTimer()
|
||||
self.timer = gobject.timeout_add(1000, self.update)
|
||||
self.update()
|
||||
|
||||
def stopTimer(self):
|
||||
if self.timer:
|
||||
gobject.source_remove(self.timer)
|
||||
self.timer = None
|
||||
self.label.set_text(self.text)
|
||||
|
||||
def update(self):
|
||||
if now() < self.when:
|
||||
next = time.strftime("%H:%M:%S", time.localtime(self.when))
|
||||
secs = "[%d secs]" % (self.when - now())
|
||||
self.label.set_text("%s\n%s\n%s" % (self.text, next, secs))
|
||||
return True # restart timer
|
||||
else:
|
||||
# done
|
||||
self.label.set_text("%s\n[soon]\n[overdue]" % (self.text,))
|
||||
self.timer = None
|
||||
return False
|
||||
|
||||
|
||||
|
||||
class ThreeRowBuilder:
|
||||
def __init__(self, name, ref):
|
||||
self.name = name
|
||||
|
||||
self.last = Box()
|
||||
self.current = Box()
|
||||
self.step = Box("idle")
|
||||
self.step.setColor("white")
|
||||
|
||||
self.ref = ref
|
||||
|
||||
def getBoxes(self):
|
||||
return self.last.getBox(), self.current.getBox(), self.step.getBox()
|
||||
|
||||
def getLastBuild(self):
|
||||
d = self.ref.callRemote("getLastFinishedBuild")
|
||||
d.addCallback(self.gotLastBuild)
|
||||
def gotLastBuild(self, build):
|
||||
if build:
|
||||
build.callRemote("getText").addCallback(self.gotLastText)
|
||||
build.callRemote("getColor").addCallback(self.gotLastColor)
|
||||
|
||||
def gotLastText(self, text):
|
||||
self.last.setText("\n".join(text))
|
||||
def gotLastColor(self, color):
|
||||
self.last.setColor(color)
|
||||
|
||||
def getState(self):
|
||||
self.ref.callRemote("getState").addCallback(self.gotState)
|
||||
def gotState(self, res):
|
||||
state, ETA, builds = res
|
||||
# state is one of: offline, idle, waiting, interlocked, building
|
||||
# TODO: ETA is going away, you have to look inside the builds to get
|
||||
# that value
|
||||
currentmap = {"offline": "red",
|
||||
"idle": "white",
|
||||
"waiting": "yellow",
|
||||
"interlocked": "yellow",
|
||||
"building": "yellow",}
|
||||
text = state
|
||||
self.current.setColor(currentmap[state])
|
||||
if ETA is not None:
|
||||
text += "\nETA=%s secs" % ETA
|
||||
self.current.setText(state)
|
||||
|
||||
def buildStarted(self, build):
|
||||
print "[%s] buildStarted" % (self.name,)
|
||||
self.current.setColor("yellow")
|
||||
|
||||
def buildFinished(self, build, results):
|
||||
print "[%s] buildFinished: %s" % (self.name, results)
|
||||
self.gotLastBuild(build)
|
||||
self.current.setColor("white")
|
||||
self.current.stopTimer()
|
||||
|
||||
def buildETAUpdate(self, eta):
|
||||
print "[%s] buildETAUpdate: %s" % (self.name, eta)
|
||||
self.current.setETA(eta)
|
||||
|
||||
|
||||
def stepStarted(self, stepname, step):
|
||||
print "[%s] stepStarted: %s" % (self.name, stepname)
|
||||
self.step.setText(stepname)
|
||||
self.step.setColor("yellow")
|
||||
def stepFinished(self, stepname, step, results):
|
||||
print "[%s] stepFinished: %s %s" % (self.name, stepname, results)
|
||||
self.step.setText("idle")
|
||||
self.step.setColor("white")
|
||||
self.step.stopTimer()
|
||||
def stepETAUpdate(self, stepname, eta):
|
||||
print "[%s] stepETAUpdate: %s %s" % (self.name, stepname, eta)
|
||||
self.step.setETA(eta)
|
||||
|
||||
|
||||
class ThreeRowClient(pb.Referenceable):
|
||||
def __init__(self, window):
|
||||
self.window = window
|
||||
self.buildernames = []
|
||||
self.builders = {}
|
||||
|
||||
def connected(self, ref):
|
||||
print "connected"
|
||||
self.ref = ref
|
||||
self.pane = gtk.VBox(False, 2)
|
||||
self.table = gtk.Table(1+3, 1)
|
||||
self.pane.add(self.table)
|
||||
self.window.vb.add(self.pane)
|
||||
self.pane.show_all()
|
||||
ref.callRemote("subscribe", "logs", 5, self)
|
||||
|
||||
def removeTable(self):
|
||||
for child in self.table.get_children():
|
||||
self.table.remove(child)
|
||||
self.pane.remove(self.table)
|
||||
|
||||
def makeTable(self):
|
||||
columns = len(self.builders)
|
||||
self.table = gtk.Table(2, columns)
|
||||
self.pane.add(self.table)
|
||||
for i in range(len(self.buildernames)):
|
||||
name = self.buildernames[i]
|
||||
b = self.builders[name]
|
||||
last,current,step = b.getBoxes()
|
||||
self.table.attach(gtk.Label(name), i, i+1, 0, 1)
|
||||
self.table.attach(last, i, i+1, 1, 2,
|
||||
xpadding=1, ypadding=1)
|
||||
self.table.attach(current, i, i+1, 2, 3,
|
||||
xpadding=1, ypadding=1)
|
||||
self.table.attach(step, i, i+1, 3, 4,
|
||||
xpadding=1, ypadding=1)
|
||||
self.table.show_all()
|
||||
|
||||
def rebuildTable(self):
|
||||
self.removeTable()
|
||||
self.makeTable()
|
||||
|
||||
def remote_builderAdded(self, buildername, builder):
|
||||
print "builderAdded", buildername
|
||||
assert buildername not in self.buildernames
|
||||
self.buildernames.append(buildername)
|
||||
|
||||
b = ThreeRowBuilder(buildername, builder)
|
||||
self.builders[buildername] = b
|
||||
self.rebuildTable()
|
||||
b.getLastBuild()
|
||||
b.getState()
|
||||
|
||||
def remote_builderRemoved(self, buildername):
|
||||
del self.builders[buildername]
|
||||
self.buildernames.remove(buildername)
|
||||
self.rebuildTable()
|
||||
|
||||
def remote_builderChangedState(self, name, state, eta):
|
||||
self.builders[name].gotState((state, eta, None))
|
||||
def remote_buildStarted(self, name, build):
|
||||
self.builders[name].buildStarted(build)
|
||||
def remote_buildFinished(self, name, build, results):
|
||||
self.builders[name].buildFinished(build, results)
|
||||
|
||||
def remote_buildETAUpdate(self, name, build, eta):
|
||||
self.builders[name].buildETAUpdate(eta)
|
||||
def remote_stepStarted(self, name, build, stepname, step):
|
||||
self.builders[name].stepStarted(stepname, step)
|
||||
def remote_stepFinished(self, name, build, stepname, step, results):
|
||||
self.builders[name].stepFinished(stepname, step, results)
|
||||
|
||||
def remote_stepETAUpdate(self, name, build, stepname, step,
|
||||
eta, expectations):
|
||||
# expectations is a list of (metricname, current_value,
|
||||
# expected_value) tuples, so that we could show individual progress
|
||||
# meters for each metric
|
||||
self.builders[name].stepETAUpdate(stepname, eta)
|
||||
|
||||
def remote_logStarted(self, buildername, build, stepname, step,
|
||||
logname, log):
|
||||
pass
|
||||
|
||||
def remote_logFinished(self, buildername, build, stepname, step,
|
||||
logname, log):
|
||||
pass
|
||||
|
||||
|
||||
class GtkClient(TextClient):
|
||||
ClientClass = ThreeRowClient
|
||||
|
||||
def __init__(self, master):
|
||||
self.master = master
|
||||
|
||||
w = gtk.Window()
|
||||
self.w = w
|
||||
#w.set_size_request(64,64)
|
||||
w.connect('destroy', lambda win: gtk.main_quit())
|
||||
self.vb = gtk.VBox(False, 2)
|
||||
self.status = gtk.Label("unconnected")
|
||||
self.vb.add(self.status)
|
||||
self.listener = self.ClientClass(self)
|
||||
w.add(self.vb)
|
||||
w.show_all()
|
||||
|
||||
def connected(self, ref):
|
||||
self.status.set_text("connected")
|
||||
TextClient.connected(self, ref)
|
||||
|
||||
"""
|
||||
def addBuilder(self, name, builder):
|
||||
Client.addBuilder(self, name, builder)
|
||||
self.pane.addBuilder(builder)
|
||||
def removeBuilder(self, name):
|
||||
self.pane.removeBuilder(name, self.builders[name])
|
||||
Client.removeBuilder(self, name)
|
||||
|
||||
def startConnecting(self, master):
|
||||
self.master = master
|
||||
Client.startConnecting(self, master)
|
||||
self.status.set_text("connecting to %s.." % master)
|
||||
def connected(self, remote):
|
||||
Client.connected(self, remote)
|
||||
self.status.set_text(self.master)
|
||||
remote.notifyOnDisconnect(self.disconnected)
|
||||
def disconnected(self, remote):
|
||||
self.status.set_text("disconnected, will retry")
|
||||
"""
|
||||
|
||||
def main():
|
||||
master = "localhost:8007"
|
||||
if len(sys.argv) > 1:
|
||||
master = sys.argv[1]
|
||||
c = GtkClient(master)
|
||||
c.run()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
38
tools/buildbot/buildbot/clients/sendchange.py
Normal file
38
tools/buildbot/buildbot/clients/sendchange.py
Normal file
@ -0,0 +1,38 @@
|
||||
|
||||
from twisted.spread import pb
|
||||
from twisted.cred import credentials
|
||||
from twisted.internet import reactor
|
||||
|
||||
class Sender:
|
||||
def __init__(self, master, user):
|
||||
self.user = user
|
||||
self.host, self.port = master.split(":")
|
||||
self.port = int(self.port)
|
||||
|
||||
def send(self, branch, revision, comments, files):
|
||||
change = {'who': self.user, 'files': files, 'comments': comments,
|
||||
'branch': branch, 'revision': revision}
|
||||
|
||||
f = pb.PBClientFactory()
|
||||
d = f.login(credentials.UsernamePassword("change", "changepw"))
|
||||
reactor.connectTCP(self.host, self.port, f)
|
||||
d.addCallback(self.addChange, change)
|
||||
return d
|
||||
|
||||
def addChange(self, remote, change):
|
||||
d = remote.callRemote('addChange', change)
|
||||
d.addCallback(lambda res: remote.broker.transport.loseConnection())
|
||||
return d
|
||||
|
||||
def printSuccess(self, res):
|
||||
print "change sent successfully"
|
||||
def printFailure(self, why):
|
||||
print "change NOT sent"
|
||||
print why
|
||||
|
||||
def stop(self, res):
|
||||
reactor.stop()
|
||||
return res
|
||||
|
||||
def run(self):
|
||||
reactor.run()
|
103
tools/buildbot/buildbot/dnotify.py
Normal file
103
tools/buildbot/buildbot/dnotify.py
Normal file
@ -0,0 +1,103 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
# spiv wants this
|
||||
|
||||
import fcntl, signal
|
||||
|
||||
class DNotify_Handler:
|
||||
def __init__(self):
|
||||
self.watchers = {}
|
||||
self.installed = 0
|
||||
def install(self):
|
||||
if self.installed:
|
||||
return
|
||||
signal.signal(signal.SIGIO, self.fire)
|
||||
self.installed = 1
|
||||
def uninstall(self):
|
||||
if not self.installed:
|
||||
return
|
||||
signal.signal(signal.SIGIO, signal.SIG_DFL)
|
||||
self.installed = 0
|
||||
def add(self, watcher):
|
||||
self.watchers[watcher.fd.fileno()] = watcher
|
||||
self.install()
|
||||
def remove(self, watcher):
|
||||
if self.watchers.has_key(watcher.fd.fileno()):
|
||||
del(self.watchers[watcher.fd.fileno()])
|
||||
if not self.watchers:
|
||||
self.uninstall()
|
||||
def fire(self, signum, frame):
|
||||
# this is the signal handler
|
||||
# without siginfo_t, we must fire them all
|
||||
for watcher in self.watchers.values():
|
||||
watcher.callback()
|
||||
|
||||
class DNotify:
|
||||
DN_ACCESS = fcntl.DN_ACCESS # a file in the directory was read
|
||||
DN_MODIFY = fcntl.DN_MODIFY # a file was modified (write,truncate)
|
||||
DN_CREATE = fcntl.DN_CREATE # a file was created
|
||||
DN_DELETE = fcntl.DN_DELETE # a file was unlinked
|
||||
DN_RENAME = fcntl.DN_RENAME # a file was renamed
|
||||
DN_ATTRIB = fcntl.DN_ATTRIB # a file had attributes changed (chmod,chown)
|
||||
|
||||
handler = [None]
|
||||
|
||||
def __init__(self, dirname, callback=None,
|
||||
flags=[DN_MODIFY,DN_CREATE,DN_DELETE,DN_RENAME]):
|
||||
|
||||
"""This object watches a directory for changes. The .callback
|
||||
attribute should be set to a function to be run every time something
|
||||
happens to it. Be aware that it will be called more times than you
|
||||
expect."""
|
||||
|
||||
if callback:
|
||||
self.callback = callback
|
||||
else:
|
||||
self.callback = self.fire
|
||||
self.dirname = dirname
|
||||
self.flags = reduce(lambda x, y: x | y, flags) | fcntl.DN_MULTISHOT
|
||||
self.fd = open(dirname, "r")
|
||||
# ideally we would move the notification to something like SIGRTMIN,
|
||||
# (to free up SIGIO) and use sigaction to have the signal handler
|
||||
# receive a structure with the fd number. But python doesn't offer
|
||||
# either.
|
||||
if not self.handler[0]:
|
||||
self.handler[0] = DNotify_Handler()
|
||||
self.handler[0].add(self)
|
||||
fcntl.fcntl(self.fd, fcntl.F_NOTIFY, self.flags)
|
||||
def remove(self):
|
||||
self.handler[0].remove(self)
|
||||
self.fd.close()
|
||||
def fire(self):
|
||||
print self.dirname, "changed!"
|
||||
|
||||
def test_dnotify1():
|
||||
d = DNotify(".")
|
||||
while 1:
|
||||
signal.pause()
|
||||
|
||||
def test_dnotify2():
|
||||
# create ./foo/, create/delete files in ./ and ./foo/ while this is
|
||||
# running. Notice how both notifiers are fired when anything changes;
|
||||
# this is an unfortunate side-effect of the lack of extended sigaction
|
||||
# support in Python.
|
||||
count = [0]
|
||||
d1 = DNotify(".")
|
||||
def fire1(count=count, d1=d1):
|
||||
print "./ changed!", count[0]
|
||||
count[0] += 1
|
||||
if count[0] > 5:
|
||||
d1.remove()
|
||||
del(d1)
|
||||
# change the callback, since we can't define it until after we have the
|
||||
# dnotify object. Hmm, unless we give the dnotify to the callback.
|
||||
d1.callback = fire1
|
||||
def fire2(): print "foo/ changed!"
|
||||
d2 = DNotify("foo", fire2)
|
||||
while 1:
|
||||
signal.pause()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_dnotify2()
|
||||
|
918
tools/buildbot/buildbot/interfaces.py
Normal file
918
tools/buildbot/buildbot/interfaces.py
Normal file
@ -0,0 +1,918 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
"""Interface documentation.
|
||||
|
||||
Define the interfaces that are implemented by various buildbot classes.
|
||||
"""
|
||||
|
||||
from buildbot.twcompat import Interface
|
||||
|
||||
# exceptions that can be raised while trying to start a build
|
||||
class NoSlaveError(Exception):
|
||||
pass
|
||||
class BuilderInUseError(Exception):
|
||||
pass
|
||||
class BuildSlaveTooOldError(Exception):
|
||||
pass
|
||||
|
||||
class IChangeSource(Interface):
|
||||
"""Object which feeds Change objects to the changemaster. When files or
|
||||
directories are changed and the version control system provides some
|
||||
kind of notification, this object should turn it into a Change object
|
||||
and pass it through::
|
||||
|
||||
self.changemaster.addChange(change)
|
||||
"""
|
||||
|
||||
def start():
|
||||
"""Called when the buildmaster starts. Can be used to establish
|
||||
connections to VC daemons or begin polling."""
|
||||
|
||||
def stop():
|
||||
"""Called when the buildmaster shuts down. Connections should be
|
||||
terminated, polling timers should be canceled."""
|
||||
|
||||
def describe():
|
||||
"""Should return a string which briefly describes this source. This
|
||||
string will be displayed in an HTML status page."""
|
||||
|
||||
class IScheduler(Interface):
|
||||
"""I watch for Changes in the source tree and decide when to trigger
|
||||
Builds. I create BuildSet objects and submit them to the BuildMaster. I
|
||||
am a service, and the BuildMaster is always my parent."""
|
||||
|
||||
def addChange(change):
|
||||
"""A Change has just been dispatched by one of the ChangeSources.
|
||||
Each Scheduler will receive this Change. I may decide to start a
|
||||
build as a result, or I might choose to ignore it."""
|
||||
|
||||
def listBuilderNames():
|
||||
"""Return a list of strings indicating the Builders that this
|
||||
Scheduler might feed."""
|
||||
|
||||
def getPendingBuildTimes():
|
||||
"""Return a list of timestamps for any builds that are waiting in the
|
||||
tree-stable-timer queue. This is only relevant for Change-based
|
||||
schedulers, all others can just return an empty list."""
|
||||
# TODO: it might be nice to make this into getPendingBuildSets, which
|
||||
# would let someone subscribe to the buildset being finished.
|
||||
# However, the Scheduler doesn't actually create the buildset until
|
||||
# it gets submitted, so doing this would require some major rework.
|
||||
|
||||
class IUpstreamScheduler(Interface):
|
||||
"""This marks an IScheduler as being eligible for use as the 'upstream='
|
||||
argument to a buildbot.scheduler.Dependent instance."""
|
||||
|
||||
def subscribeToSuccessfulBuilds(target):
|
||||
"""Request that the target callbable be invoked after every
|
||||
successful buildset. The target will be called with a single
|
||||
argument: the SourceStamp used by the successful builds."""
|
||||
|
||||
def listBuilderNames():
|
||||
"""Return a list of strings indicating the Builders that this
|
||||
Scheduler might feed."""
|
||||
|
||||
class ISourceStamp(Interface):
|
||||
pass
|
||||
|
||||
class IEmailSender(Interface):
|
||||
"""I know how to send email, and can be used by other parts of the
|
||||
Buildbot to contact developers."""
|
||||
pass
|
||||
|
||||
class IEmailLookup(Interface):
|
||||
def getAddress(user):
|
||||
"""Turn a User-name string into a valid email address. Either return
|
||||
a string (with an @ in it), None (to indicate that the user cannot
|
||||
be reached by email), or a Deferred which will fire with the same."""
|
||||
|
||||
class IStatus(Interface):
|
||||
"""I am an object, obtainable from the buildmaster, which can provide
|
||||
status information."""
|
||||
|
||||
def getProjectName():
|
||||
"""Return the name of the project that this Buildbot is working
|
||||
for."""
|
||||
def getProjectURL():
|
||||
"""Return the URL of this Buildbot's project."""
|
||||
def getBuildbotURL():
|
||||
"""Return the URL of the top-most Buildbot status page, or None if
|
||||
this Buildbot does not provide a web status page."""
|
||||
def getURLForThing(thing):
|
||||
"""Return the URL of a page which provides information on 'thing',
|
||||
which should be an object that implements one of the status
|
||||
interfaces defined in L{buildbot.interfaces}. Returns None if no
|
||||
suitable page is available (or if no Waterfall is running)."""
|
||||
|
||||
def getSchedulers():
|
||||
"""Return a list of ISchedulerStatus objects for all
|
||||
currently-registered Schedulers."""
|
||||
|
||||
def getBuilderNames(categories=None):
|
||||
"""Return a list of the names of all current Builders."""
|
||||
def getBuilder(name):
|
||||
"""Return the IBuilderStatus object for a given named Builder."""
|
||||
def getSlave(name):
|
||||
"""Return the ISlaveStatus object for a given named buildslave."""
|
||||
|
||||
def getBuildSets():
|
||||
"""Return a list of active (non-finished) IBuildSetStatus objects."""
|
||||
|
||||
def subscribe(receiver):
|
||||
"""Register an IStatusReceiver to receive new status events. The
|
||||
receiver will immediately be sent a set of 'builderAdded' messages
|
||||
for all current builders. It will receive further 'builderAdded' and
|
||||
'builderRemoved' messages as the config file is reloaded and builders
|
||||
come and go. It will also receive 'buildsetSubmitted' messages for
|
||||
all outstanding BuildSets (and each new BuildSet that gets
|
||||
submitted). No additional messages will be sent unless the receiver
|
||||
asks for them by calling .subscribe on the IBuilderStatus objects
|
||||
which accompany the addedBuilder message."""
|
||||
|
||||
def unsubscribe(receiver):
|
||||
"""Unregister an IStatusReceiver. No further status messgaes will be
|
||||
delivered."""
|
||||
|
||||
class IBuildSetStatus(Interface):
|
||||
"""I represent a set of Builds, each run on a separate Builder but all
|
||||
using the same source tree."""
|
||||
|
||||
def getSourceStamp():
|
||||
pass
|
||||
def getReason():
|
||||
pass
|
||||
def getID():
|
||||
"""Return the BuildSet's ID string, if any. The 'try' feature uses a
|
||||
random string as a BuildSetID to relate submitted jobs with the
|
||||
resulting BuildSet."""
|
||||
def getResponsibleUsers():
|
||||
pass # not implemented
|
||||
def getInterestedUsers():
|
||||
pass # not implemented
|
||||
def getBuilderNames():
|
||||
"""Return a list of the names of all Builders on which this set will
|
||||
do builds."""
|
||||
def getBuildRequests():
|
||||
"""Return a list of IBuildRequestStatus objects that represent my
|
||||
component Builds. This list might correspond to the Builders named by
|
||||
getBuilderNames(), but if builder categories are used, or 'Builder
|
||||
Aliases' are implemented, then they may not."""
|
||||
def isFinished():
|
||||
pass
|
||||
def waitUntilSuccess():
|
||||
"""Return a Deferred that fires (with this IBuildSetStatus object)
|
||||
when the outcome of the BuildSet is known, i.e., upon the first
|
||||
failure, or after all builds complete successfully."""
|
||||
def waitUntilFinished():
|
||||
"""Return a Deferred that fires (with this IBuildSetStatus object)
|
||||
when all builds have finished."""
|
||||
def getResults():
|
||||
pass
|
||||
|
||||
class IBuildRequestStatus(Interface):
|
||||
"""I represent a request to build a particular set of source code on a
|
||||
particular Builder. These requests may be merged by the time they are
|
||||
finally turned into a Build."""
|
||||
|
||||
def getSourceStamp():
|
||||
pass
|
||||
def getBuilderName():
|
||||
pass
|
||||
def getBuilds():
|
||||
"""Return a list of IBuildStatus objects for each Build that has been
|
||||
started in an attempt to satify this BuildRequest."""
|
||||
|
||||
def subscribe(observer):
|
||||
"""Register a callable that will be invoked (with a single
|
||||
IBuildStatus object) for each Build that is created to satisfy this
|
||||
request. There may be multiple Builds created in an attempt to handle
|
||||
the request: they may be interrupted by the user or abandoned due to
|
||||
a lost slave. The last Build (the one which actually gets to run to
|
||||
completion) is said to 'satisfy' the BuildRequest. The observer will
|
||||
be called once for each of these Builds, both old and new."""
|
||||
def unsubscribe(observer):
|
||||
"""Unregister the callable that was registered with subscribe()."""
|
||||
|
||||
|
||||
class ISlaveStatus(Interface):
|
||||
def getName():
|
||||
"""Return the name of the build slave."""
|
||||
|
||||
def getAdmin():
|
||||
"""Return a string with the slave admin's contact data."""
|
||||
|
||||
def getHost():
|
||||
"""Return a string with the slave host info."""
|
||||
|
||||
def isConnected():
|
||||
"""Return True if the slave is currently online, False if not."""
|
||||
|
||||
class ISchedulerStatus(Interface):
|
||||
def getName():
|
||||
"""Return the name of this Scheduler (a string)."""
|
||||
|
||||
def getPendingBuildsets():
|
||||
"""Return an IBuildSet for all BuildSets that are pending. These
|
||||
BuildSets are waiting for their tree-stable-timers to expire."""
|
||||
# TODO: this is not implemented anywhere
|
||||
|
||||
|
||||
class IBuilderStatus(Interface):
|
||||
def getName():
|
||||
"""Return the name of this Builder (a string)."""
|
||||
|
||||
def getState():
|
||||
# TODO: this isn't nearly as meaningful as it used to be
|
||||
"""Return a tuple (state, builds) for this Builder. 'state' is the
|
||||
so-called 'big-status', indicating overall status (as opposed to
|
||||
which step is currently running). It is a string, one of 'offline',
|
||||
'idle', or 'building'. 'builds' is a list of IBuildStatus objects
|
||||
(possibly empty) representing the currently active builds."""
|
||||
|
||||
def getSlaves():
|
||||
"""Return a list of ISlaveStatus objects for the buildslaves that are
|
||||
used by this builder."""
|
||||
|
||||
def getPendingBuilds():
|
||||
"""Return an IBuildRequestStatus object for all upcoming builds
|
||||
(those which are ready to go but which are waiting for a buildslave
|
||||
to be available."""
|
||||
|
||||
def getCurrentBuilds():
|
||||
"""Return a list containing an IBuildStatus object for each build
|
||||
currently in progress."""
|
||||
# again, we could probably provide an object for 'waiting' and
|
||||
# 'interlocked' too, but things like the Change list might still be
|
||||
# subject to change
|
||||
|
||||
def getLastFinishedBuild():
|
||||
"""Return the IBuildStatus object representing the last finished
|
||||
build, which may be None if the builder has not yet finished any
|
||||
builds."""
|
||||
|
||||
def getBuild(number):
|
||||
"""Return an IBuildStatus object for a historical build. Each build
|
||||
is numbered (starting at 0 when the Builder is first added),
|
||||
getBuild(n) will retrieve the Nth such build. getBuild(-n) will
|
||||
retrieve a recent build, with -1 being the most recent build
|
||||
started. If the Builder is idle, this will be the same as
|
||||
getLastFinishedBuild(). If the Builder is active, it will be an
|
||||
unfinished build. This method will return None if the build is no
|
||||
longer available. Older builds are likely to have less information
|
||||
stored: Logs are the first to go, then Steps."""
|
||||
|
||||
def getEvent(number):
|
||||
"""Return an IStatusEvent object for a recent Event. Builders
|
||||
connecting and disconnecting are events, as are ping attempts.
|
||||
getEvent(-1) will return the most recent event. Events are numbered,
|
||||
but it probably doesn't make sense to ever do getEvent(+n)."""
|
||||
|
||||
def subscribe(receiver):
|
||||
"""Register an IStatusReceiver to receive new status events. The
|
||||
receiver will be given builderChangedState, buildStarted, and
|
||||
buildFinished messages."""
|
||||
|
||||
def unsubscribe(receiver):
|
||||
"""Unregister an IStatusReceiver. No further status messgaes will be
|
||||
delivered."""
|
||||
|
||||
class IBuildStatus(Interface):
|
||||
"""I represent the status of a single Build/BuildRequest. It could be
|
||||
in-progress or finished."""
|
||||
|
||||
def getBuilder():
|
||||
"""
|
||||
Return the BuilderStatus that owns this build.
|
||||
|
||||
@rtype: implementor of L{IBuilderStatus}
|
||||
"""
|
||||
|
||||
def isFinished():
|
||||
"""Return a boolean. True means the build has finished, False means
|
||||
it is still running."""
|
||||
|
||||
def waitUntilFinished():
|
||||
"""Return a Deferred that will fire when the build finishes. If the
|
||||
build has already finished, this deferred will fire right away. The
|
||||
callback is given this IBuildStatus instance as an argument."""
|
||||
|
||||
def getProperty(propname):
|
||||
"""Return the value of the build property with the given name."""
|
||||
|
||||
def getReason():
|
||||
"""Return a string that indicates why the build was run. 'changes',
|
||||
'forced', and 'periodic' are the most likely values. 'try' will be
|
||||
added in the future."""
|
||||
|
||||
def getSourceStamp():
|
||||
"""Return a tuple of (branch, revision, patch) which can be used to
|
||||
re-create the source tree that this build used. 'branch' is a string
|
||||
with a VC-specific meaning, or None to indicate that the checkout
|
||||
step used its default branch. 'revision' is a string, the sort you
|
||||
would pass to 'cvs co -r REVISION'. 'patch' is either None, or a
|
||||
(level, diff) tuple which represents a patch that should be applied
|
||||
with 'patch -pLEVEL < DIFF' from the directory created by the
|
||||
checkout operation.
|
||||
|
||||
This method will return None if the source information is no longer
|
||||
available."""
|
||||
# TODO: it should be possible to expire the patch but still remember
|
||||
# that the build was r123+something.
|
||||
|
||||
# TODO: change this to return the actual SourceStamp instance, and
|
||||
# remove getChanges()
|
||||
|
||||
def getChanges():
|
||||
"""Return a list of Change objects which represent which source
|
||||
changes went into the build."""
|
||||
|
||||
def getResponsibleUsers():
|
||||
"""Return a list of Users who are to blame for the changes that went
|
||||
into this build. If anything breaks (at least anything that wasn't
|
||||
already broken), blame them. Specifically, this is the set of users
|
||||
who were responsible for the Changes that went into this build. Each
|
||||
User is a string, corresponding to their name as known by the VC
|
||||
repository."""
|
||||
|
||||
def getInterestedUsers():
|
||||
"""Return a list of Users who will want to know about the results of
|
||||
this build. This is a superset of getResponsibleUsers(): it adds
|
||||
people who are interested in this build but who did not actually
|
||||
make the Changes that went into it (build sheriffs, code-domain
|
||||
owners)."""
|
||||
|
||||
def getNumber():
|
||||
"""Within each builder, each Build has a number. Return it."""
|
||||
|
||||
def getPreviousBuild():
|
||||
"""Convenience method. Returns None if the previous build is
|
||||
unavailable."""
|
||||
|
||||
def getSteps():
|
||||
"""Return a list of IBuildStepStatus objects. For invariant builds
|
||||
(those which always use the same set of Steps), this should always
|
||||
return the complete list, however some of the steps may not have
|
||||
started yet (step.getTimes()[0] will be None). For variant builds,
|
||||
this may not be complete (asking again later may give you more of
|
||||
them)."""
|
||||
|
||||
def getTimes():
|
||||
"""Returns a tuple of (start, end). 'start' and 'end' are the times
|
||||
(seconds since the epoch) when the Build started and finished. If
|
||||
the build is still running, 'end' will be None."""
|
||||
|
||||
# while the build is running, the following methods make sense.
|
||||
# Afterwards they return None
|
||||
|
||||
def getETA():
|
||||
"""Returns the number of seconds from now in which the build is
|
||||
expected to finish, or None if we can't make a guess. This guess will
|
||||
be refined over time."""
|
||||
|
||||
def getCurrentStep():
|
||||
"""Return an IBuildStepStatus object representing the currently
|
||||
active step."""
|
||||
|
||||
# Once you know the build has finished, the following methods are legal.
|
||||
# Before ths build has finished, they all return None.
|
||||
|
||||
def getSlavename():
|
||||
"""Return the name of the buildslave which handled this build."""
|
||||
|
||||
def getText():
|
||||
"""Returns a list of strings to describe the build. These are
|
||||
intended to be displayed in a narrow column. If more space is
|
||||
available, the caller should join them together with spaces before
|
||||
presenting them to the user."""
|
||||
|
||||
def getColor():
|
||||
"""Returns a single string with the color that should be used to
|
||||
display the build. 'green', 'orange', or 'red' are the most likely
|
||||
ones."""
|
||||
|
||||
def getResults():
|
||||
"""Return a constant describing the results of the build: one of the
|
||||
constants in buildbot.status.builder: SUCCESS, WARNINGS, or
|
||||
FAILURE."""
|
||||
|
||||
def getLogs():
|
||||
"""Return a list of logs that describe the build as a whole. Some
|
||||
steps will contribute their logs, while others are are less important
|
||||
and will only be accessible through the IBuildStepStatus objects.
|
||||
Each log is an object which implements the IStatusLog interface."""
|
||||
|
||||
def getTestResults():
|
||||
"""Return a dictionary that maps test-name tuples to ITestResult
|
||||
objects. This may return an empty or partially-filled dictionary
|
||||
until the build has completed."""
|
||||
|
||||
# subscription interface
|
||||
|
||||
def subscribe(receiver, updateInterval=None):
|
||||
"""Register an IStatusReceiver to receive new status events. The
|
||||
receiver will be given stepStarted and stepFinished messages. If
|
||||
'updateInterval' is non-None, buildETAUpdate messages will be sent
|
||||
every 'updateInterval' seconds."""
|
||||
|
||||
def unsubscribe(receiver):
|
||||
"""Unregister an IStatusReceiver. No further status messgaes will be
|
||||
delivered."""
|
||||
|
||||
class ITestResult(Interface):
|
||||
"""I describe the results of a single unit test."""
|
||||
|
||||
def getName():
|
||||
"""Returns a tuple of strings which make up the test name. Tests may
|
||||
be arranged in a hierarchy, so looking for common prefixes may be
|
||||
useful."""
|
||||
|
||||
def getResults():
|
||||
"""Returns a constant describing the results of the test: SUCCESS,
|
||||
WARNINGS, FAILURE."""
|
||||
|
||||
def getText():
|
||||
"""Returns a list of short strings which describe the results of the
|
||||
test in slightly more detail. Suggested components include
|
||||
'failure', 'error', 'passed', 'timeout'."""
|
||||
|
||||
def getLogs():
|
||||
# in flux, it may be possible to provide more structured information
|
||||
# like python Failure instances
|
||||
"""Returns a dictionary of test logs. The keys are strings like
|
||||
'stdout', 'log', 'exceptions'. The values are strings."""
|
||||
|
||||
|
||||
class IBuildStepStatus(Interface):
|
||||
"""I hold status for a single BuildStep."""
|
||||
|
||||
def getName():
|
||||
"""Returns a short string with the name of this step. This string
|
||||
may have spaces in it."""
|
||||
|
||||
def getBuild():
|
||||
"""Returns the IBuildStatus object which contains this step."""
|
||||
|
||||
def getTimes():
|
||||
"""Returns a tuple of (start, end). 'start' and 'end' are the times
|
||||
(seconds since the epoch) when the Step started and finished. If the
|
||||
step has not yet started, 'start' will be None. If the step is still
|
||||
running, 'end' will be None."""
|
||||
|
||||
def getExpectations():
|
||||
"""Returns a list of tuples (name, current, target). Each tuple
|
||||
describes a single axis along which the step's progress can be
|
||||
measured. 'name' is a string which describes the axis itself, like
|
||||
'filesCompiled' or 'tests run' or 'bytes of output'. 'current' is a
|
||||
number with the progress made so far, while 'target' is the value
|
||||
that we expect (based upon past experience) to get to when the build
|
||||
is finished.
|
||||
|
||||
'current' will change over time until the step is finished. It is
|
||||
'None' until the step starts. When the build is finished, 'current'
|
||||
may or may not equal 'target' (which is merely the expectation based
|
||||
upon previous builds)."""
|
||||
|
||||
def getURLs():
|
||||
"""Returns a dictionary of URLs. Each key is a link name (a short
|
||||
string, like 'results' or 'coverage'), and each value is a URL. These
|
||||
links will be displayed along with the LogFiles.
|
||||
"""
|
||||
|
||||
def getLogs():
|
||||
"""Returns a list of IStatusLog objects. If the step has not yet
|
||||
finished, this list may be incomplete (asking again later may give
|
||||
you more of them)."""
|
||||
|
||||
|
||||
def isFinished():
|
||||
"""Return a boolean. True means the step has finished, False means it
|
||||
is still running."""
|
||||
|
||||
def waitUntilFinished():
|
||||
"""Return a Deferred that will fire when the step finishes. If the
|
||||
step has already finished, this deferred will fire right away. The
|
||||
callback is given this IBuildStepStatus instance as an argument."""
|
||||
|
||||
# while the step is running, the following methods make sense.
|
||||
# Afterwards they return None
|
||||
|
||||
def getETA():
|
||||
"""Returns the number of seconds from now in which the step is
|
||||
expected to finish, or None if we can't make a guess. This guess will
|
||||
be refined over time."""
|
||||
|
||||
# Once you know the step has finished, the following methods are legal.
|
||||
# Before ths step has finished, they all return None.
|
||||
|
||||
def getText():
|
||||
"""Returns a list of strings which describe the step. These are
|
||||
intended to be displayed in a narrow column. If more space is
|
||||
available, the caller should join them together with spaces before
|
||||
presenting them to the user."""
|
||||
|
||||
def getColor():
|
||||
"""Returns a single string with the color that should be used to
|
||||
display this step. 'green', 'orange', 'red' and 'yellow' are the
|
||||
most likely ones."""
|
||||
|
||||
def getResults():
|
||||
"""Return a tuple describing the results of the step: (result,
|
||||
strings). 'result' is one of the constants in
|
||||
buildbot.status.builder: SUCCESS, WARNINGS, FAILURE, or SKIPPED.
|
||||
'strings' is an optional list of strings that the step wants to
|
||||
append to the overall build's results. These strings are usually
|
||||
more terse than the ones returned by getText(): in particular,
|
||||
successful Steps do not usually contribute any text to the overall
|
||||
build."""
|
||||
|
||||
# subscription interface
|
||||
|
||||
def subscribe(receiver, updateInterval=10):
|
||||
"""Register an IStatusReceiver to receive new status events. The
|
||||
receiver will be given logStarted and logFinished messages. It will
|
||||
also be given a ETAUpdate message every 'updateInterval' seconds."""
|
||||
|
||||
def unsubscribe(receiver):
|
||||
"""Unregister an IStatusReceiver. No further status messgaes will be
|
||||
delivered."""
|
||||
|
||||
class IStatusEvent(Interface):
|
||||
"""I represent a Builder Event, something non-Build related that can
|
||||
happen to a Builder."""
|
||||
|
||||
def getTimes():
|
||||
"""Returns a tuple of (start, end) like IBuildStepStatus, but end==0
|
||||
indicates that this is a 'point event', which has no duration.
|
||||
SlaveConnect/Disconnect are point events. Ping is not: it starts
|
||||
when requested and ends when the response (positive or negative) is
|
||||
returned"""
|
||||
|
||||
def getText():
|
||||
"""Returns a list of strings which describe the event. These are
|
||||
intended to be displayed in a narrow column. If more space is
|
||||
available, the caller should join them together with spaces before
|
||||
presenting them to the user."""
|
||||
|
||||
def getColor():
|
||||
"""Returns a single string with the color that should be used to
|
||||
display this event. 'red' and 'yellow' are the most likely ones."""
|
||||
|
||||
|
||||
LOG_CHANNEL_STDOUT = 0
|
||||
LOG_CHANNEL_STDERR = 1
|
||||
LOG_CHANNEL_HEADER = 2
|
||||
|
||||
class IStatusLog(Interface):
|
||||
"""I represent a single Log, which is a growing list of text items that
|
||||
contains some kind of output for a single BuildStep. I might be finished,
|
||||
in which case this list has stopped growing.
|
||||
|
||||
Each Log has a name, usually something boring like 'log' or 'output'.
|
||||
These names are not guaranteed to be unique, however they are usually
|
||||
chosen to be useful within the scope of a single step (i.e. the Compile
|
||||
step might produce both 'log' and 'warnings'). The name may also have
|
||||
spaces. If you want something more globally meaningful, at least within a
|
||||
given Build, try::
|
||||
|
||||
'%s.%s' % (log.getStep.getName(), log.getName())
|
||||
|
||||
The Log can be presented as plain text, or it can be accessed as a list
|
||||
of items, each of which has a channel indicator (header, stdout, stderr)
|
||||
and a text chunk. An HTML display might represent the interleaved
|
||||
channels with different styles, while a straight download-the-text
|
||||
interface would just want to retrieve a big string.
|
||||
|
||||
The 'header' channel is used by ShellCommands to prepend a note about
|
||||
which command is about to be run ('running command FOO in directory
|
||||
DIR'), and append another note giving the exit code of the process.
|
||||
|
||||
Logs can be streaming: if the Log has not yet finished, you can
|
||||
subscribe to receive new chunks as they are added.
|
||||
|
||||
A ShellCommand will have a Log associated with it that gathers stdout
|
||||
and stderr. Logs may also be created by parsing command output or
|
||||
through other synthetic means (grepping for all the warnings in a
|
||||
compile log, or listing all the test cases that are going to be run).
|
||||
Such synthetic Logs are usually finished as soon as they are created."""
|
||||
|
||||
|
||||
def getName():
|
||||
"""Returns a short string with the name of this log, probably 'log'.
|
||||
"""
|
||||
|
||||
def getStep():
|
||||
"""Returns the IBuildStepStatus which owns this log."""
|
||||
# TODO: can there be non-Step logs?
|
||||
|
||||
def isFinished():
|
||||
"""Return a boolean. True means the log has finished and is closed,
|
||||
False means it is still open and new chunks may be added to it."""
|
||||
|
||||
def waitUntilFinished():
|
||||
"""Return a Deferred that will fire when the log is closed. If the
|
||||
log has already finished, this deferred will fire right away. The
|
||||
callback is given this IStatusLog instance as an argument."""
|
||||
|
||||
def subscribe(receiver, catchup):
|
||||
"""Register an IStatusReceiver to receive chunks (with logChunk) as
|
||||
data is added to the Log. If you use this, you will also want to use
|
||||
waitUntilFinished to find out when the listener can be retired.
|
||||
Subscribing to a closed Log is a no-op.
|
||||
|
||||
If 'catchup' is True, the receiver will immediately be sent a series
|
||||
of logChunk messages to bring it up to date with the partially-filled
|
||||
log. This allows a status client to join a Log already in progress
|
||||
without missing any data. If the Log has already finished, it is too
|
||||
late to catch up: just do getText() instead.
|
||||
|
||||
If the Log is very large, the receiver will be called many times with
|
||||
a lot of data. There is no way to throttle this data. If the receiver
|
||||
is planning on sending the data on to somewhere else, over a narrow
|
||||
connection, you can get a throttleable subscription by using
|
||||
C{subscribeConsumer} instead."""
|
||||
|
||||
def unsubscribe(receiver):
|
||||
"""Remove a receiver previously registered with subscribe(). Attempts
|
||||
to remove a receiver which was not previously registered is a no-op.
|
||||
"""
|
||||
|
||||
def subscribeConsumer(consumer):
|
||||
"""Register an L{IStatusLogConsumer} to receive all chunks of the
|
||||
logfile, including all the old entries and any that will arrive in
|
||||
the future. The consumer will first have their C{registerProducer}
|
||||
method invoked with a reference to an object that can be told
|
||||
C{pauseProducing}, C{resumeProducing}, and C{stopProducing}. Then the
|
||||
consumer's C{writeChunk} method will be called repeatedly with each
|
||||
(channel, text) tuple in the log, starting with the very first. The
|
||||
consumer will be notified with C{finish} when the log has been
|
||||
exhausted (which can only happen when the log is finished). Note that
|
||||
a small amount of data could be written via C{writeChunk} even after
|
||||
C{pauseProducing} has been called.
|
||||
|
||||
To unsubscribe the consumer, use C{producer.stopProducing}."""
|
||||
|
||||
# once the log has finished, the following methods make sense. They can
|
||||
# be called earlier, but they will only return the contents of the log up
|
||||
# to the point at which they were called. You will lose items that are
|
||||
# added later. Use C{subscribe} or C{subscribeConsumer} to avoid missing
|
||||
# anything.
|
||||
|
||||
def hasContents():
|
||||
"""Returns True if the LogFile still has contents available. Returns
|
||||
False for logs that have been pruned. Clients should test this before
|
||||
offering to show the contents of any log."""
|
||||
|
||||
def getText():
|
||||
"""Return one big string with the contents of the Log. This merges
|
||||
all non-header chunks together."""
|
||||
|
||||
def readlines(channel=LOG_CHANNEL_STDOUT):
|
||||
"""Read lines from one channel of the logfile. This returns an
|
||||
iterator that will provide single lines of text (including the
|
||||
trailing newline).
|
||||
"""
|
||||
|
||||
def getTextWithHeaders():
|
||||
"""Return one big string with the contents of the Log. This merges
|
||||
all chunks (including headers) together."""
|
||||
|
||||
def getChunks():
|
||||
"""Generate a list of (channel, text) tuples. 'channel' is a number,
|
||||
0 for stdout, 1 for stderr, 2 for header. (note that stderr is merged
|
||||
into stdout if PTYs are in use)."""
|
||||
|
||||
class IStatusLogConsumer(Interface):
|
||||
"""I am an object which can be passed to IStatusLog.subscribeConsumer().
|
||||
I represent a target for writing the contents of an IStatusLog. This
|
||||
differs from a regular IStatusReceiver in that it can pause the producer.
|
||||
This makes it more suitable for use in streaming data over network
|
||||
sockets, such as an HTTP request. Note that the consumer can only pause
|
||||
the producer until it has caught up with all the old data. After that
|
||||
point, C{pauseProducing} is ignored and all new output from the log is
|
||||
sent directoy to the consumer."""
|
||||
|
||||
def registerProducer(producer, streaming):
|
||||
"""A producer is being hooked up to this consumer. The consumer only
|
||||
has to handle a single producer. It should send .pauseProducing and
|
||||
.resumeProducing messages to the producer when it wants to stop or
|
||||
resume the flow of data. 'streaming' will be set to True because the
|
||||
producer is always a PushProducer.
|
||||
"""
|
||||
|
||||
def unregisterProducer():
|
||||
"""The previously-registered producer has been removed. No further
|
||||
pauseProducing or resumeProducing calls should be made. The consumer
|
||||
should delete its reference to the Producer so it can be released."""
|
||||
|
||||
def writeChunk(chunk):
|
||||
"""A chunk (i.e. a tuple of (channel, text)) is being written to the
|
||||
consumer."""
|
||||
|
||||
def finish():
|
||||
"""The log has finished sending chunks to the consumer."""
|
||||
|
||||
class IStatusReceiver(Interface):
|
||||
"""I am an object which can receive build status updates. I may be
|
||||
subscribed to an IStatus, an IBuilderStatus, or an IBuildStatus."""
|
||||
|
||||
def buildsetSubmitted(buildset):
|
||||
"""A new BuildSet has been submitted to the buildmaster.
|
||||
|
||||
@type buildset: implementor of L{IBuildSetStatus}
|
||||
"""
|
||||
|
||||
def builderAdded(builderName, builder):
|
||||
"""
|
||||
A new Builder has just been added. This method may return an
|
||||
IStatusReceiver (probably 'self') which will be subscribed to receive
|
||||
builderChangedState and buildStarted/Finished events.
|
||||
|
||||
@type builderName: string
|
||||
@type builder: L{buildbot.status.builder.BuilderStatus}
|
||||
@rtype: implementor of L{IStatusReceiver}
|
||||
"""
|
||||
|
||||
def builderChangedState(builderName, state):
|
||||
"""Builder 'builderName' has changed state. The possible values for
|
||||
'state' are 'offline', 'idle', and 'building'."""
|
||||
|
||||
def buildStarted(builderName, build):
|
||||
"""Builder 'builderName' has just started a build. The build is an
|
||||
object which implements IBuildStatus, and can be queried for more
|
||||
information.
|
||||
|
||||
This method may return an IStatusReceiver (it could even return
|
||||
'self'). If it does so, stepStarted and stepFinished methods will be
|
||||
invoked on the object for the steps of this one build. This is a
|
||||
convenient way to subscribe to all build steps without missing any.
|
||||
This receiver will automatically be unsubscribed when the build
|
||||
finishes.
|
||||
|
||||
It can also return a tuple of (IStatusReceiver, interval), in which
|
||||
case buildETAUpdate messages are sent ever 'interval' seconds, in
|
||||
addition to the stepStarted and stepFinished messages."""
|
||||
|
||||
def buildETAUpdate(build, ETA):
|
||||
"""This is a periodic update on the progress this Build has made
|
||||
towards completion."""
|
||||
|
||||
def stepStarted(build, step):
|
||||
"""A step has just started. 'step' is the IBuildStepStatus which
|
||||
represents the step: it can be queried for more information.
|
||||
|
||||
This method may return an IStatusReceiver (it could even return
|
||||
'self'). If it does so, logStarted and logFinished methods will be
|
||||
invoked on the object for logs created by this one step. This
|
||||
receiver will be automatically unsubscribed when the step finishes.
|
||||
|
||||
Alternatively, the method may return a tuple of an IStatusReceiver
|
||||
and an integer named 'updateInterval'. In addition to
|
||||
logStarted/logFinished messages, it will also receive stepETAUpdate
|
||||
messages about every updateInterval seconds."""
|
||||
|
||||
def stepETAUpdate(build, step, ETA, expectations):
|
||||
"""This is a periodic update on the progress this Step has made
|
||||
towards completion. It gets an ETA (in seconds from the present) of
|
||||
when the step ought to be complete, and a list of expectation tuples
|
||||
(as returned by IBuildStepStatus.getExpectations) with more detailed
|
||||
information."""
|
||||
|
||||
def logStarted(build, step, log):
|
||||
"""A new Log has been started, probably because a step has just
|
||||
started running a shell command. 'log' is the IStatusLog object
|
||||
which can be queried for more information.
|
||||
|
||||
This method may return an IStatusReceiver (such as 'self'), in which
|
||||
case the target's logChunk method will be invoked as text is added to
|
||||
the logfile. This receiver will automatically be unsubsribed when the
|
||||
log finishes."""
|
||||
|
||||
def logChunk(build, step, log, channel, text):
|
||||
"""Some text has been added to this log. 'channel' is one of
|
||||
LOG_CHANNEL_STDOUT, LOG_CHANNEL_STDERR, or LOG_CHANNEL_HEADER, as
|
||||
defined in IStatusLog.getChunks."""
|
||||
|
||||
def logFinished(build, step, log):
|
||||
"""A Log has been closed."""
|
||||
|
||||
def stepFinished(build, step, results):
|
||||
"""A step has just finished. 'results' is the result tuple described
|
||||
in IBuildStepStatus.getResults."""
|
||||
|
||||
def buildFinished(builderName, build, results):
|
||||
"""
|
||||
A build has just finished. 'results' is the result tuple described
|
||||
in L{IBuildStatus.getResults}.
|
||||
|
||||
@type builderName: string
|
||||
@type build: L{buildbot.status.builder.BuildStatus}
|
||||
@type results: tuple
|
||||
"""
|
||||
|
||||
def builderRemoved(builderName):
|
||||
"""The Builder has been removed."""
|
||||
|
||||
class IControl(Interface):
|
||||
def addChange(change):
|
||||
"""Add a change to all builders. Each Builder will decide for
|
||||
themselves whether the change is interesting or not, and may initiate
|
||||
a build as a result."""
|
||||
|
||||
def submitBuildSet(buildset):
|
||||
"""Submit a BuildSet object, which will eventually be run on all of
|
||||
the builders listed therein."""
|
||||
|
||||
def getBuilder(name):
|
||||
"""Retrieve the IBuilderControl object for the given Builder."""
|
||||
|
||||
class IBuilderControl(Interface):
|
||||
def requestBuild(request):
|
||||
"""Queue a L{buildbot.process.base.BuildRequest} object for later
|
||||
building."""
|
||||
|
||||
def requestBuildSoon(request):
|
||||
"""Submit a BuildRequest like requestBuild, but raise a
|
||||
L{buildbot.interfaces.NoSlaveError} if no slaves are currently
|
||||
available, so it cannot be used to queue a BuildRequest in the hopes
|
||||
that a slave will eventually connect. This method is appropriate for
|
||||
use by things like the web-page 'Force Build' button."""
|
||||
|
||||
def resubmitBuild(buildStatus, reason="<rebuild, no reason given>"):
|
||||
"""Rebuild something we've already built before. This submits a
|
||||
BuildRequest to our Builder using the same SourceStamp as the earlier
|
||||
build. This has no effect (but may eventually raise an exception) if
|
||||
this Build has not yet finished."""
|
||||
|
||||
def getPendingBuilds():
|
||||
"""Return a list of L{IBuildRequestControl} objects for this Builder.
|
||||
Each one corresponds to a pending build that has not yet started (due
|
||||
to a scarcity of build slaves). These upcoming builds can be canceled
|
||||
through the control object."""
|
||||
|
||||
def getBuild(number):
|
||||
"""Attempt to return an IBuildControl object for the given build.
|
||||
Returns None if no such object is available. This will only work for
|
||||
the build that is currently in progress: once the build finishes,
|
||||
there is nothing to control anymore."""
|
||||
|
||||
def ping(timeout=30):
|
||||
"""Attempt to contact the slave and see if it is still alive. This
|
||||
returns a Deferred which fires with either True (the slave is still
|
||||
alive) or False (the slave did not respond). As a side effect, adds
|
||||
an event to this builder's column in the waterfall display
|
||||
containing the results of the ping."""
|
||||
# TODO: this ought to live in ISlaveControl, maybe with disconnect()
|
||||
# or something. However the event that is emitted is most useful in
|
||||
# the Builder column, so it kinda fits here too.
|
||||
|
||||
class IBuildRequestControl(Interface):
|
||||
def subscribe(observer):
|
||||
"""Register a callable that will be invoked (with a single
|
||||
IBuildControl object) for each Build that is created to satisfy this
|
||||
request. There may be multiple Builds created in an attempt to handle
|
||||
the request: they may be interrupted by the user or abandoned due to
|
||||
a lost slave. The last Build (the one which actually gets to run to
|
||||
completion) is said to 'satisfy' the BuildRequest. The observer will
|
||||
be called once for each of these Builds, both old and new."""
|
||||
def unsubscribe(observer):
|
||||
"""Unregister the callable that was registered with subscribe()."""
|
||||
def cancel():
|
||||
"""Remove the build from the pending queue. Has no effect if the
|
||||
build has already been started."""
|
||||
|
||||
class IBuildControl(Interface):
|
||||
def getStatus():
|
||||
"""Return an IBuildStatus object for the Build that I control."""
|
||||
def stopBuild(reason="<no reason given>"):
|
||||
"""Halt the build. This has no effect if the build has already
|
||||
finished."""
|
||||
|
||||
class ILogFile(Interface):
|
||||
"""This is the internal interface to a LogFile, used by the BuildStep to
|
||||
write data into the log.
|
||||
"""
|
||||
def addStdout(data):
|
||||
pass
|
||||
def addStderr(data):
|
||||
pass
|
||||
def addHeader(data):
|
||||
pass
|
||||
def finish():
|
||||
"""The process that is feeding the log file has finished, and no
|
||||
further data will be added. This closes the logfile."""
|
||||
|
||||
class ILogObserver(Interface):
|
||||
"""Objects which provide this interface can be used in a BuildStep to
|
||||
watch the output of a LogFile and parse it incrementally.
|
||||
"""
|
||||
|
||||
# internal methods
|
||||
def setStep(step):
|
||||
pass
|
||||
def setLog(log):
|
||||
pass
|
||||
|
||||
# methods called by the LogFile
|
||||
def logChunk(build, step, log, channel, text):
|
||||
pass
|
||||
|
148
tools/buildbot/buildbot/locks.py
Normal file
148
tools/buildbot/buildbot/locks.py
Normal file
@ -0,0 +1,148 @@
|
||||
# -*- test-case-name: buildbot.test.test_locks -*-
|
||||
|
||||
from twisted.python import log
|
||||
from twisted.internet import reactor, defer
|
||||
from buildbot import util
|
||||
|
||||
if False: # for debugging
|
||||
def debuglog(msg):
|
||||
log.msg(msg)
|
||||
else:
|
||||
def debuglog(msg):
|
||||
pass
|
||||
|
||||
class BaseLock:
|
||||
description = "<BaseLock>"
|
||||
|
||||
def __init__(self, name, maxCount=1):
|
||||
self.name = name
|
||||
self.waiting = []
|
||||
self.owners = []
|
||||
self.maxCount=maxCount
|
||||
|
||||
def __repr__(self):
|
||||
return self.description
|
||||
|
||||
def isAvailable(self):
|
||||
debuglog("%s isAvailable: self.owners=%r" % (self, self.owners))
|
||||
return len(self.owners) < self.maxCount
|
||||
|
||||
def claim(self, owner):
|
||||
debuglog("%s claim(%s)" % (self, owner))
|
||||
assert owner is not None
|
||||
assert len(self.owners) < self.maxCount, "ask for isAvailable() first"
|
||||
self.owners.append(owner)
|
||||
debuglog(" %s is claimed" % (self,))
|
||||
|
||||
def release(self, owner):
|
||||
debuglog("%s release(%s)" % (self, owner))
|
||||
assert owner in self.owners
|
||||
self.owners.remove(owner)
|
||||
# who can we wake up?
|
||||
if self.waiting:
|
||||
d = self.waiting.pop(0)
|
||||
reactor.callLater(0, d.callback, self)
|
||||
|
||||
def waitUntilMaybeAvailable(self, owner):
|
||||
"""Fire when the lock *might* be available. The caller will need to
|
||||
check with isAvailable() when the deferred fires. This loose form is
|
||||
used to avoid deadlocks. If we were interested in a stronger form,
|
||||
this would be named 'waitUntilAvailable', and the deferred would fire
|
||||
after the lock had been claimed.
|
||||
"""
|
||||
debuglog("%s waitUntilAvailable(%s)" % (self, owner))
|
||||
if self.isAvailable():
|
||||
return defer.succeed(self)
|
||||
d = defer.Deferred()
|
||||
self.waiting.append(d)
|
||||
return d
|
||||
|
||||
|
||||
class RealMasterLock(BaseLock):
|
||||
def __init__(self, lockid):
|
||||
BaseLock.__init__(self, lockid.name, lockid.maxCount)
|
||||
self.description = "<MasterLock(%s, %s)>" % (self.name, self.maxCount)
|
||||
|
||||
def getLock(self, slave):
|
||||
return self
|
||||
|
||||
class RealSlaveLock:
|
||||
def __init__(self, lockid):
|
||||
self.name = lockid.name
|
||||
self.maxCount = lockid.maxCount
|
||||
self.maxCountForSlave = lockid.maxCountForSlave
|
||||
self.description = "<SlaveLock(%s, %s, %s)>" % (self.name,
|
||||
self.maxCount,
|
||||
self.maxCountForSlave)
|
||||
self.locks = {}
|
||||
|
||||
def __repr__(self):
|
||||
return self.description
|
||||
|
||||
def getLock(self, slavebuilder):
|
||||
slavename = slavebuilder.slave.slavename
|
||||
if not self.locks.has_key(slavename):
|
||||
maxCount = self.maxCountForSlave.get(slavename,
|
||||
self.maxCount)
|
||||
lock = self.locks[slavename] = BaseLock(self.name, maxCount)
|
||||
desc = "<SlaveLock(%s, %s)[%s] %d>" % (self.name, maxCount,
|
||||
slavename, id(lock))
|
||||
lock.description = desc
|
||||
self.locks[slavename] = lock
|
||||
return self.locks[slavename]
|
||||
|
||||
|
||||
# master.cfg should only reference the following MasterLock and SlaveLock
|
||||
# classes. They are identifiers that will be turned into real Locks later,
|
||||
# via the BotMaster.getLockByID method.
|
||||
|
||||
class MasterLock(util.ComparableMixin):
|
||||
"""I am a semaphore that limits the number of simultaneous actions.
|
||||
|
||||
Builds and BuildSteps can declare that they wish to claim me as they run.
|
||||
Only a limited number of such builds or steps will be able to run
|
||||
simultaneously. By default this number is one, but my maxCount parameter
|
||||
can be raised to allow two or three or more operations to happen at the
|
||||
same time.
|
||||
|
||||
Use this to protect a resource that is shared among all builders and all
|
||||
slaves, for example to limit the load on a common SVN repository.
|
||||
"""
|
||||
|
||||
compare_attrs = ['name', 'maxCount']
|
||||
lockClass = RealMasterLock
|
||||
def __init__(self, name, maxCount=1):
|
||||
self.name = name
|
||||
self.maxCount = maxCount
|
||||
|
||||
class SlaveLock(util.ComparableMixin):
|
||||
"""I am a semaphore that limits simultaneous actions on each buildslave.
|
||||
|
||||
Builds and BuildSteps can declare that they wish to claim me as they run.
|
||||
Only a limited number of such builds or steps will be able to run
|
||||
simultaneously on any given buildslave. By default this number is one,
|
||||
but my maxCount parameter can be raised to allow two or three or more
|
||||
operations to happen on a single buildslave at the same time.
|
||||
|
||||
Use this to protect a resource that is shared among all the builds taking
|
||||
place on each slave, for example to limit CPU or memory load on an
|
||||
underpowered machine.
|
||||
|
||||
Each buildslave will get an independent copy of this semaphore. By
|
||||
default each copy will use the same owner count (set with maxCount), but
|
||||
you can provide maxCountForSlave with a dictionary that maps slavename to
|
||||
owner count, to allow some slaves more parallelism than others.
|
||||
|
||||
"""
|
||||
|
||||
compare_attrs = ['name', 'maxCount', '_maxCountForSlaveList']
|
||||
lockClass = RealSlaveLock
|
||||
def __init__(self, name, maxCount=1, maxCountForSlave={}):
|
||||
self.name = name
|
||||
self.maxCount = maxCount
|
||||
self.maxCountForSlave = maxCountForSlave
|
||||
# for comparison purposes, turn this dictionary into a stably-sorted
|
||||
# list of tuples
|
||||
self._maxCountForSlaveList = self.maxCountForSlave.items()
|
||||
self._maxCountForSlaveList.sort()
|
||||
self._maxCountForSlaveList = tuple(self._maxCountForSlaveList)
|
265
tools/buildbot/buildbot/manhole.py
Normal file
265
tools/buildbot/buildbot/manhole.py
Normal file
@ -0,0 +1,265 @@
|
||||
|
||||
import os.path
|
||||
import binascii, base64
|
||||
from twisted.python import log
|
||||
from twisted.application import service, strports
|
||||
from twisted.cred import checkers, portal
|
||||
from twisted.conch import manhole, telnet, manhole_ssh, checkers as conchc
|
||||
from twisted.conch.insults import insults
|
||||
from twisted.internet import protocol
|
||||
|
||||
from buildbot.util import ComparableMixin
|
||||
from zope.interface import implements # requires Twisted-2.0 or later
|
||||
|
||||
# makeTelnetProtocol and _TelnetRealm are for the TelnetManhole
|
||||
|
||||
class makeTelnetProtocol:
|
||||
# this curries the 'portal' argument into a later call to
|
||||
# TelnetTransport()
|
||||
def __init__(self, portal):
|
||||
self.portal = portal
|
||||
|
||||
def __call__(self):
|
||||
auth = telnet.AuthenticatingTelnetProtocol
|
||||
return telnet.TelnetTransport(auth, self.portal)
|
||||
|
||||
class _TelnetRealm:
|
||||
implements(portal.IRealm)
|
||||
|
||||
def __init__(self, namespace_maker):
|
||||
self.namespace_maker = namespace_maker
|
||||
|
||||
def requestAvatar(self, avatarId, *interfaces):
|
||||
if telnet.ITelnetProtocol in interfaces:
|
||||
namespace = self.namespace_maker()
|
||||
p = telnet.TelnetBootstrapProtocol(insults.ServerProtocol,
|
||||
manhole.ColoredManhole,
|
||||
namespace)
|
||||
return (telnet.ITelnetProtocol, p, lambda: None)
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class chainedProtocolFactory:
|
||||
# this curries the 'namespace' argument into a later call to
|
||||
# chainedProtocolFactory()
|
||||
def __init__(self, namespace):
|
||||
self.namespace = namespace
|
||||
|
||||
def __call__(self):
|
||||
return insults.ServerProtocol(manhole.ColoredManhole, self.namespace)
|
||||
|
||||
class AuthorizedKeysChecker(conchc.SSHPublicKeyDatabase):
|
||||
"""Accept connections using SSH keys from a given file.
|
||||
|
||||
SSHPublicKeyDatabase takes the username that the prospective client has
|
||||
requested and attempts to get a ~/.ssh/authorized_keys file for that
|
||||
username. This requires root access, so it isn't as useful as you'd
|
||||
like.
|
||||
|
||||
Instead, this subclass looks for keys in a single file, given as an
|
||||
argument. This file is typically kept in the buildmaster's basedir. The
|
||||
file should have 'ssh-dss ....' lines in it, just like authorized_keys.
|
||||
"""
|
||||
|
||||
def __init__(self, authorized_keys_file):
|
||||
self.authorized_keys_file = os.path.expanduser(authorized_keys_file)
|
||||
|
||||
def checkKey(self, credentials):
|
||||
f = open(self.authorized_keys_file)
|
||||
for l in f.readlines():
|
||||
l2 = l.split()
|
||||
if len(l2) < 2:
|
||||
continue
|
||||
try:
|
||||
if base64.decodestring(l2[1]) == credentials.blob:
|
||||
return 1
|
||||
except binascii.Error:
|
||||
continue
|
||||
return 0
|
||||
|
||||
|
||||
class _BaseManhole(service.MultiService):
|
||||
"""This provides remote access to a python interpreter (a read/exec/print
|
||||
loop) embedded in the buildmaster via an internal SSH server. This allows
|
||||
detailed inspection of the buildmaster state. It is of most use to
|
||||
buildbot developers. Connect to this by running an ssh client.
|
||||
"""
|
||||
|
||||
def __init__(self, port, checker, using_ssh=True):
|
||||
"""
|
||||
@type port: string or int
|
||||
@param port: what port should the Manhole listen on? This is a
|
||||
strports specification string, like 'tcp:12345' or
|
||||
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
|
||||
simple tcp port.
|
||||
|
||||
@type checker: an object providing the
|
||||
L{twisted.cred.checkers.ICredentialsChecker} interface
|
||||
@param checker: if provided, this checker is used to authenticate the
|
||||
client instead of using the username/password scheme. You must either
|
||||
provide a username/password or a Checker. Some useful values are::
|
||||
import twisted.cred.checkers as credc
|
||||
import twisted.conch.checkers as conchc
|
||||
c = credc.AllowAnonymousAccess # completely open
|
||||
c = credc.FilePasswordDB(passwd_filename) # file of name:passwd
|
||||
c = conchc.UNIXPasswordDatabase # getpwnam() (probably /etc/passwd)
|
||||
|
||||
@type using_ssh: bool
|
||||
@param using_ssh: If True, accept SSH connections. If False, accept
|
||||
regular unencrypted telnet connections.
|
||||
"""
|
||||
|
||||
# unfortunately, these don't work unless we're running as root
|
||||
#c = credc.PluggableAuthenticationModulesChecker: PAM
|
||||
#c = conchc.SSHPublicKeyDatabase() # ~/.ssh/authorized_keys
|
||||
# and I can't get UNIXPasswordDatabase to work
|
||||
|
||||
service.MultiService.__init__(self)
|
||||
if type(port) is int:
|
||||
port = "tcp:%d" % port
|
||||
self.port = port # for comparison later
|
||||
self.checker = checker # to maybe compare later
|
||||
|
||||
def makeNamespace():
|
||||
# close over 'self' so we can get access to .parent later
|
||||
master = self.parent
|
||||
namespace = {
|
||||
'master': master,
|
||||
'status': master.getStatus(),
|
||||
}
|
||||
return namespace
|
||||
|
||||
def makeProtocol():
|
||||
namespace = makeNamespace()
|
||||
p = insults.ServerProtocol(manhole.ColoredManhole, namespace)
|
||||
return p
|
||||
|
||||
self.using_ssh = using_ssh
|
||||
if using_ssh:
|
||||
r = manhole_ssh.TerminalRealm()
|
||||
r.chainedProtocolFactory = makeProtocol
|
||||
p = portal.Portal(r, [self.checker])
|
||||
f = manhole_ssh.ConchFactory(p)
|
||||
else:
|
||||
r = _TelnetRealm(makeNamespace)
|
||||
p = portal.Portal(r, [self.checker])
|
||||
f = protocol.ServerFactory()
|
||||
f.protocol = makeTelnetProtocol(p)
|
||||
s = strports.service(self.port, f)
|
||||
s.setServiceParent(self)
|
||||
|
||||
|
||||
def startService(self):
|
||||
service.MultiService.startService(self)
|
||||
if self.using_ssh:
|
||||
via = "via SSH"
|
||||
else:
|
||||
via = "via telnet"
|
||||
log.msg("Manhole listening %s on port %s" % (via, self.port))
|
||||
|
||||
|
||||
class TelnetManhole(_BaseManhole, ComparableMixin):
|
||||
"""This Manhole accepts unencrypted (telnet) connections, and requires a
|
||||
username and password authorize access. You are encouraged to use the
|
||||
encrypted ssh-based manhole classes instead."""
|
||||
|
||||
compare_attrs = ["port", "username", "password"]
|
||||
|
||||
def __init__(self, port, username, password):
|
||||
"""
|
||||
@type port: string or int
|
||||
@param port: what port should the Manhole listen on? This is a
|
||||
strports specification string, like 'tcp:12345' or
|
||||
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
|
||||
simple tcp port.
|
||||
|
||||
@param username:
|
||||
@param password: username= and password= form a pair of strings to
|
||||
use when authenticating the remote user.
|
||||
"""
|
||||
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
|
||||
c.addUser(username, password)
|
||||
|
||||
_BaseManhole.__init__(self, port, c, using_ssh=False)
|
||||
|
||||
class PasswordManhole(_BaseManhole, ComparableMixin):
|
||||
"""This Manhole accepts encrypted (ssh) connections, and requires a
|
||||
username and password to authorize access.
|
||||
"""
|
||||
|
||||
compare_attrs = ["port", "username", "password"]
|
||||
|
||||
def __init__(self, port, username, password):
|
||||
"""
|
||||
@type port: string or int
|
||||
@param port: what port should the Manhole listen on? This is a
|
||||
strports specification string, like 'tcp:12345' or
|
||||
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
|
||||
simple tcp port.
|
||||
|
||||
@param username:
|
||||
@param password: username= and password= form a pair of strings to
|
||||
use when authenticating the remote user.
|
||||
"""
|
||||
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
|
||||
c.addUser(username, password)
|
||||
|
||||
_BaseManhole.__init__(self, port, c)
|
||||
|
||||
class AuthorizedKeysManhole(_BaseManhole, ComparableMixin):
|
||||
"""This Manhole accepts ssh connections, and requires that the
|
||||
prospective client have an ssh private key that matches one of the public
|
||||
keys in our authorized_keys file. It is created with the name of a file
|
||||
that contains the public keys that we will accept."""
|
||||
|
||||
compare_attrs = ["port", "keyfile"]
|
||||
|
||||
def __init__(self, port, keyfile):
|
||||
"""
|
||||
@type port: string or int
|
||||
@param port: what port should the Manhole listen on? This is a
|
||||
strports specification string, like 'tcp:12345' or
|
||||
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
|
||||
simple tcp port.
|
||||
|
||||
@param keyfile: the name of a file (relative to the buildmaster's
|
||||
basedir) that contains SSH public keys of authorized
|
||||
users, one per line. This is the exact same format
|
||||
as used by sshd in ~/.ssh/authorized_keys .
|
||||
"""
|
||||
|
||||
# TODO: expanduser this, and make it relative to the buildmaster's
|
||||
# basedir
|
||||
self.keyfile = keyfile
|
||||
c = AuthorizedKeysChecker(keyfile)
|
||||
_BaseManhole.__init__(self, port, c)
|
||||
|
||||
class ArbitraryCheckerManhole(_BaseManhole, ComparableMixin):
|
||||
"""This Manhole accepts ssh connections, but uses an arbitrary
|
||||
user-supplied 'checker' object to perform authentication."""
|
||||
|
||||
compare_attrs = ["port", "checker"]
|
||||
|
||||
def __init__(self, port, checker):
|
||||
"""
|
||||
@type port: string or int
|
||||
@param port: what port should the Manhole listen on? This is a
|
||||
strports specification string, like 'tcp:12345' or
|
||||
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
|
||||
simple tcp port.
|
||||
|
||||
@param checker: an instance of a twisted.cred 'checker' which will
|
||||
perform authentication
|
||||
"""
|
||||
|
||||
_BaseManhole.__init__(self, port, checker)
|
||||
|
||||
|
1027
tools/buildbot/buildbot/master.py
Normal file
1027
tools/buildbot/buildbot/master.py
Normal file
File diff suppressed because it is too large
Load Diff
147
tools/buildbot/buildbot/pbutil.py
Normal file
147
tools/buildbot/buildbot/pbutil.py
Normal file
@ -0,0 +1,147 @@
|
||||
|
||||
"""Base classes handy for use with PB clients.
|
||||
"""
|
||||
|
||||
from twisted.spread import pb
|
||||
|
||||
from twisted.spread.pb import PBClientFactory
|
||||
from twisted.internet import protocol
|
||||
from twisted.python import log
|
||||
|
||||
class NewCredPerspective(pb.Avatar):
|
||||
def attached(self, mind):
|
||||
return self
|
||||
def detached(self, mind):
|
||||
pass
|
||||
|
||||
class ReconnectingPBClientFactory(PBClientFactory,
|
||||
protocol.ReconnectingClientFactory):
|
||||
"""Reconnecting client factory for PB brokers.
|
||||
|
||||
Like PBClientFactory, but if the connection fails or is lost, the factory
|
||||
will attempt to reconnect.
|
||||
|
||||
Instead of using f.getRootObject (which gives a Deferred that can only
|
||||
be fired once), override the gotRootObject method.
|
||||
|
||||
Instead of using the newcred f.login (which is also one-shot), call
|
||||
f.startLogin() with the credentials and client, and override the
|
||||
gotPerspective method.
|
||||
|
||||
Instead of using the oldcred f.getPerspective (also one-shot), call
|
||||
f.startGettingPerspective() with the same arguments, and override
|
||||
gotPerspective.
|
||||
|
||||
gotRootObject and gotPerspective will be called each time the object is
|
||||
received (once per successful connection attempt). You will probably want
|
||||
to use obj.notifyOnDisconnect to find out when the connection is lost.
|
||||
|
||||
If an authorization error occurs, failedToGetPerspective() will be
|
||||
invoked.
|
||||
|
||||
To use me, subclass, then hand an instance to a connector (like
|
||||
TCPClient).
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
PBClientFactory.__init__(self)
|
||||
self._doingLogin = False
|
||||
self._doingGetPerspective = False
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
PBClientFactory.clientConnectionFailed(self, connector, reason)
|
||||
# Twisted-1.3 erroneously abandons the connection on non-UserErrors.
|
||||
# To avoid this bug, don't upcall, and implement the correct version
|
||||
# of the method here.
|
||||
if self.continueTrying:
|
||||
self.connector = connector
|
||||
self.retry()
|
||||
|
||||
def clientConnectionLost(self, connector, reason):
|
||||
PBClientFactory.clientConnectionLost(self, connector, reason,
|
||||
reconnecting=True)
|
||||
RCF = protocol.ReconnectingClientFactory
|
||||
RCF.clientConnectionLost(self, connector, reason)
|
||||
|
||||
def clientConnectionMade(self, broker):
|
||||
self.resetDelay()
|
||||
PBClientFactory.clientConnectionMade(self, broker)
|
||||
if self._doingLogin:
|
||||
self.doLogin(self._root)
|
||||
if self._doingGetPerspective:
|
||||
self.doGetPerspective(self._root)
|
||||
self.gotRootObject(self._root)
|
||||
|
||||
def __getstate__(self):
|
||||
# this should get folded into ReconnectingClientFactory
|
||||
d = self.__dict__.copy()
|
||||
d['connector'] = None
|
||||
d['_callID'] = None
|
||||
return d
|
||||
|
||||
# oldcred methods
|
||||
|
||||
def getPerspective(self, *args):
|
||||
raise RuntimeError, "getPerspective is one-shot: use startGettingPerspective instead"
|
||||
|
||||
def startGettingPerspective(self, username, password, serviceName,
|
||||
perspectiveName=None, client=None):
|
||||
self._doingGetPerspective = True
|
||||
if perspectiveName == None:
|
||||
perspectiveName = username
|
||||
self._oldcredArgs = (username, password, serviceName,
|
||||
perspectiveName, client)
|
||||
|
||||
def doGetPerspective(self, root):
|
||||
# oldcred getPerspective()
|
||||
(username, password,
|
||||
serviceName, perspectiveName, client) = self._oldcredArgs
|
||||
d = self._cbAuthIdentity(root, username, password)
|
||||
d.addCallback(self._cbGetPerspective,
|
||||
serviceName, perspectiveName, client)
|
||||
d.addCallbacks(self.gotPerspective, self.failedToGetPerspective)
|
||||
|
||||
|
||||
# newcred methods
|
||||
|
||||
def login(self, *args):
|
||||
raise RuntimeError, "login is one-shot: use startLogin instead"
|
||||
|
||||
def startLogin(self, credentials, client=None):
|
||||
self._credentials = credentials
|
||||
self._client = client
|
||||
self._doingLogin = True
|
||||
|
||||
def doLogin(self, root):
|
||||
# newcred login()
|
||||
d = self._cbSendUsername(root, self._credentials.username,
|
||||
self._credentials.password, self._client)
|
||||
d.addCallbacks(self.gotPerspective, self.failedToGetPerspective)
|
||||
|
||||
|
||||
# methods to override
|
||||
|
||||
def gotPerspective(self, perspective):
|
||||
"""The remote avatar or perspective (obtained each time this factory
|
||||
connects) is now available."""
|
||||
pass
|
||||
|
||||
def gotRootObject(self, root):
|
||||
"""The remote root object (obtained each time this factory connects)
|
||||
is now available. This method will be called each time the connection
|
||||
is established and the object reference is retrieved."""
|
||||
pass
|
||||
|
||||
def failedToGetPerspective(self, why):
|
||||
"""The login process failed, most likely because of an authorization
|
||||
failure (bad password), but it is also possible that we lost the new
|
||||
connection before we managed to send our credentials.
|
||||
"""
|
||||
log.msg("ReconnectingPBClientFactory.failedToGetPerspective")
|
||||
if why.check(pb.PBConnectionLost):
|
||||
log.msg("we lost the brand-new connection")
|
||||
# retrying might help here, let clientConnectionLost decide
|
||||
return
|
||||
# probably authorization
|
||||
self.stopTrying() # logging in harder won't help
|
||||
log.err(why)
|
0
tools/buildbot/buildbot/process/__init__.py
Normal file
0
tools/buildbot/buildbot/process/__init__.py
Normal file
574
tools/buildbot/buildbot/process/base.py
Normal file
574
tools/buildbot/buildbot/process/base.py
Normal file
@ -0,0 +1,574 @@
|
||||
# -*- test-case-name: buildbot.test.test_step -*-
|
||||
|
||||
import types
|
||||
|
||||
from twisted.python import log
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.internet import reactor, defer, error
|
||||
|
||||
from buildbot import interfaces
|
||||
from buildbot.twcompat import implements
|
||||
from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
|
||||
from buildbot.status.builder import Results, BuildRequestStatus
|
||||
from buildbot.status.progress import BuildProgress
|
||||
|
||||
class BuildRequest:
|
||||
"""I represent a request to a specific Builder to run a single build.
|
||||
|
||||
I have a SourceStamp which specifies what sources I will build. This may
|
||||
specify a specific revision of the source tree (so source.branch,
|
||||
source.revision, and source.patch are used). The .patch attribute is
|
||||
either None or a tuple of (patchlevel, diff), consisting of a number to
|
||||
use in 'patch -pN', and a unified-format context diff.
|
||||
|
||||
Alternatively, the SourceStamp may specify a set of Changes to be built,
|
||||
contained in source.changes. In this case, I may be mergeable with other
|
||||
BuildRequests on the same branch.
|
||||
|
||||
I may be part of a BuildSet, in which case I will report status results
|
||||
to it.
|
||||
|
||||
I am paired with a BuildRequestStatus object, to which I feed status
|
||||
information.
|
||||
|
||||
@type source: a L{buildbot.sourcestamp.SourceStamp} instance.
|
||||
@ivar source: the source code that this BuildRequest use
|
||||
|
||||
@type reason: string
|
||||
@ivar reason: the reason this Build is being requested. Schedulers
|
||||
provide this, but for forced builds the user requesting the
|
||||
build will provide a string.
|
||||
|
||||
@ivar status: the IBuildStatus object which tracks our status
|
||||
|
||||
@ivar submittedAt: a timestamp (seconds since epoch) when this request
|
||||
was submitted to the Builder. This is used by the CVS
|
||||
step to compute a checkout timestamp.
|
||||
"""
|
||||
|
||||
source = None
|
||||
builder = None
|
||||
startCount = 0 # how many times we have tried to start this build
|
||||
|
||||
if implements:
|
||||
implements(interfaces.IBuildRequestControl)
|
||||
else:
|
||||
__implements__ = interfaces.IBuildRequestControl,
|
||||
|
||||
def __init__(self, reason, source, builderName=None):
|
||||
# TODO: remove the =None on builderName, it is there so I don't have
|
||||
# to change a lot of tests that create BuildRequest objects
|
||||
assert interfaces.ISourceStamp(source, None)
|
||||
self.reason = reason
|
||||
self.source = source
|
||||
self.start_watchers = []
|
||||
self.finish_watchers = []
|
||||
self.status = BuildRequestStatus(source, builderName)
|
||||
|
||||
def canBeMergedWith(self, other):
|
||||
return self.source.canBeMergedWith(other.source)
|
||||
|
||||
def mergeWith(self, others):
|
||||
return self.source.mergeWith([o.source for o in others])
|
||||
|
||||
def mergeReasons(self, others):
|
||||
"""Return a reason for the merged build request."""
|
||||
reasons = []
|
||||
for req in [self] + others:
|
||||
if req.reason and req.reason not in reasons:
|
||||
reasons.append(req.reason)
|
||||
return ", ".join(reasons)
|
||||
|
||||
def waitUntilFinished(self):
|
||||
"""Get a Deferred that will fire (with a
|
||||
L{buildbot.interfaces.IBuildStatus} instance when the build
|
||||
finishes."""
|
||||
d = defer.Deferred()
|
||||
self.finish_watchers.append(d)
|
||||
return d
|
||||
|
||||
# these are called by the Builder
|
||||
|
||||
def requestSubmitted(self, builder):
|
||||
# the request has been placed on the queue
|
||||
self.builder = builder
|
||||
|
||||
def buildStarted(self, build, buildstatus):
|
||||
"""This is called by the Builder when a Build has been started in the
|
||||
hopes of satifying this BuildRequest. It may be called multiple
|
||||
times, since interrupted builds and lost buildslaves may force
|
||||
multiple Builds to be run until the fate of the BuildRequest is known
|
||||
for certain."""
|
||||
for o in self.start_watchers[:]:
|
||||
# these observers get the IBuildControl
|
||||
o(build)
|
||||
# while these get the IBuildStatus
|
||||
self.status.buildStarted(buildstatus)
|
||||
|
||||
def finished(self, buildstatus):
|
||||
"""This is called by the Builder when the BuildRequest has been
|
||||
retired. This happens when its Build has either succeeded (yay!) or
|
||||
failed (boo!). TODO: If it is halted due to an exception (oops!), or
|
||||
some other retryable error, C{finished} will not be called yet."""
|
||||
|
||||
for w in self.finish_watchers:
|
||||
w.callback(buildstatus)
|
||||
self.finish_watchers = []
|
||||
|
||||
# IBuildRequestControl
|
||||
|
||||
def subscribe(self, observer):
|
||||
self.start_watchers.append(observer)
|
||||
def unsubscribe(self, observer):
|
||||
self.start_watchers.remove(observer)
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel this request. This can only be successful if the Build has
|
||||
not yet been started.
|
||||
|
||||
@return: a boolean indicating if the cancel was successful."""
|
||||
if self.builder:
|
||||
return self.builder.cancelBuildRequest(self)
|
||||
return False
|
||||
|
||||
|
||||
class Build:
|
||||
"""I represent a single build by a single bot. Specialized Builders can
|
||||
use subclasses of Build to hold status information unique to those build
|
||||
processes.
|
||||
|
||||
I control B{how} the build proceeds. The actual build is broken up into a
|
||||
series of steps, saved in the .buildSteps[] array as a list of
|
||||
L{buildbot.process.step.BuildStep} objects. Each step is a single remote
|
||||
command, possibly a shell command.
|
||||
|
||||
During the build, I put status information into my C{BuildStatus}
|
||||
gatherer.
|
||||
|
||||
After the build, I go away.
|
||||
|
||||
I can be used by a factory by setting buildClass on
|
||||
L{buildbot.process.factory.BuildFactory}
|
||||
|
||||
@ivar request: the L{BuildRequest} that triggered me
|
||||
@ivar build_status: the L{buildbot.status.builder.BuildStatus} that
|
||||
collects our status
|
||||
"""
|
||||
|
||||
if implements:
|
||||
implements(interfaces.IBuildControl)
|
||||
else:
|
||||
__implements__ = interfaces.IBuildControl,
|
||||
|
||||
workdir = "build"
|
||||
build_status = None
|
||||
reason = "changes"
|
||||
finished = False
|
||||
results = None
|
||||
|
||||
def __init__(self, requests):
|
||||
self.requests = requests
|
||||
for req in self.requests:
|
||||
req.startCount += 1
|
||||
self.locks = []
|
||||
# build a source stamp
|
||||
self.source = requests[0].mergeWith(requests[1:])
|
||||
self.reason = requests[0].mergeReasons(requests[1:])
|
||||
|
||||
#self.abandoned = False
|
||||
|
||||
self.progress = None
|
||||
self.currentStep = None
|
||||
self.slaveEnvironment = {}
|
||||
|
||||
def setBuilder(self, builder):
|
||||
"""
|
||||
Set the given builder as our builder.
|
||||
|
||||
@type builder: L{buildbot.process.builder.Builder}
|
||||
"""
|
||||
self.builder = builder
|
||||
|
||||
def setLocks(self, locks):
|
||||
self.locks = locks
|
||||
|
||||
def getSourceStamp(self):
|
||||
return self.source
|
||||
|
||||
def setProperty(self, propname, value):
|
||||
"""Set a property on this build. This may only be called after the
|
||||
build has started, so that it has a BuildStatus object where the
|
||||
properties can live."""
|
||||
self.build_status.setProperty(propname, value)
|
||||
|
||||
def getProperty(self, propname):
|
||||
return self.build_status.properties[propname]
|
||||
|
||||
|
||||
def allChanges(self):
|
||||
return self.source.changes
|
||||
|
||||
def allFiles(self):
|
||||
# return a list of all source files that were changed
|
||||
files = []
|
||||
havedirs = 0
|
||||
for c in self.allChanges():
|
||||
for f in c.files:
|
||||
files.append(f)
|
||||
if c.isdir:
|
||||
havedirs = 1
|
||||
return files
|
||||
|
||||
def __repr__(self):
|
||||
return "<Build %s>" % (self.builder.name,)
|
||||
|
||||
def __getstate__(self):
|
||||
d = self.__dict__.copy()
|
||||
if d.has_key('remote'):
|
||||
del d['remote']
|
||||
return d
|
||||
|
||||
def blamelist(self):
|
||||
blamelist = []
|
||||
for c in self.allChanges():
|
||||
if c.who not in blamelist:
|
||||
blamelist.append(c.who)
|
||||
blamelist.sort()
|
||||
return blamelist
|
||||
|
||||
def changesText(self):
|
||||
changetext = ""
|
||||
for c in self.allChanges():
|
||||
changetext += "-" * 60 + "\n\n" + c.asText() + "\n"
|
||||
# consider sorting these by number
|
||||
return changetext
|
||||
|
||||
def setSteps(self, steps):
|
||||
"""Set a list of StepFactories, which are generally just class
|
||||
objects which derive from step.BuildStep . These are used to create
|
||||
the Steps themselves when the Build starts (as opposed to when it is
|
||||
first created). By creating the steps later, their __init__ method
|
||||
will have access to things like build.allFiles() ."""
|
||||
self.stepFactories = steps # tuples of (factory, kwargs)
|
||||
for s in steps:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
useProgress = True
|
||||
|
||||
def getSlaveCommandVersion(self, command, oldversion=None):
|
||||
return self.slavebuilder.getSlaveCommandVersion(command, oldversion)
|
||||
def getSlaveName(self):
|
||||
return self.slavebuilder.slave.slavename
|
||||
|
||||
def setupStatus(self, build_status):
|
||||
self.build_status = build_status
|
||||
self.setProperty("buildername", self.builder.name)
|
||||
self.setProperty("buildnumber", self.build_status.number)
|
||||
self.setProperty("branch", self.source.branch)
|
||||
self.setProperty("revision", self.source.revision)
|
||||
|
||||
def setupSlaveBuilder(self, slavebuilder):
|
||||
self.slavebuilder = slavebuilder
|
||||
self.slavename = slavebuilder.slave.slavename
|
||||
self.build_status.setSlavename(self.slavename)
|
||||
self.setProperty("slavename", self.slavename)
|
||||
|
||||
def startBuild(self, build_status, expectations, slavebuilder):
|
||||
"""This method sets up the build, then starts it by invoking the
|
||||
first Step. It returns a Deferred which will fire when the build
|
||||
finishes. This Deferred is guaranteed to never errback."""
|
||||
|
||||
# we are taking responsibility for watching the connection to the
|
||||
# remote. This responsibility was held by the Builder until our
|
||||
# startBuild was called, and will not return to them until we fire
|
||||
# the Deferred returned by this method.
|
||||
|
||||
log.msg("%s.startBuild" % self)
|
||||
self.setupStatus(build_status)
|
||||
# now that we have a build_status, we can set properties
|
||||
self.setupSlaveBuilder(slavebuilder)
|
||||
|
||||
# convert all locks into their real forms
|
||||
self.locks = [self.builder.botmaster.getLockByID(l)
|
||||
for l in self.locks]
|
||||
# then narrow SlaveLocks down to the right slave
|
||||
self.locks = [l.getLock(self.slavebuilder) for l in self.locks]
|
||||
self.remote = slavebuilder.remote
|
||||
self.remote.notifyOnDisconnect(self.lostRemote)
|
||||
d = self.deferred = defer.Deferred()
|
||||
def _release_slave(res):
|
||||
self.slavebuilder.buildFinished()
|
||||
return res
|
||||
d.addCallback(_release_slave)
|
||||
|
||||
try:
|
||||
self.setupBuild(expectations) # create .steps
|
||||
except:
|
||||
# the build hasn't started yet, so log the exception as a point
|
||||
# event instead of flunking the build. TODO: associate this
|
||||
# failure with the build instead. this involves doing
|
||||
# self.build_status.buildStarted() from within the exception
|
||||
# handler
|
||||
log.msg("Build.setupBuild failed")
|
||||
log.err(Failure())
|
||||
self.builder.builder_status.addPointEvent(["setupBuild",
|
||||
"exception"],
|
||||
color="purple")
|
||||
self.finished = True
|
||||
self.results = FAILURE
|
||||
self.deferred = None
|
||||
d.callback(self)
|
||||
return d
|
||||
|
||||
self.build_status.buildStarted(self)
|
||||
self.acquireLocks().addCallback(self._startBuild_2)
|
||||
return d
|
||||
|
||||
def acquireLocks(self, res=None):
|
||||
log.msg("acquireLocks(step %s, locks %s)" % (self, self.locks))
|
||||
if not self.locks:
|
||||
return defer.succeed(None)
|
||||
for lock in self.locks:
|
||||
if not lock.isAvailable():
|
||||
log.msg("Build %s waiting for lock %s" % (self, lock))
|
||||
d = lock.waitUntilMaybeAvailable(self)
|
||||
d.addCallback(self.acquireLocks)
|
||||
return d
|
||||
# all locks are available, claim them all
|
||||
for lock in self.locks:
|
||||
lock.claim(self)
|
||||
return defer.succeed(None)
|
||||
|
||||
def _startBuild_2(self, res):
|
||||
self.startNextStep()
|
||||
|
||||
def setupBuild(self, expectations):
|
||||
# create the actual BuildSteps. If there are any name collisions, we
|
||||
# add a count to the loser until it is unique.
|
||||
self.steps = []
|
||||
self.stepStatuses = {}
|
||||
stepnames = []
|
||||
sps = []
|
||||
|
||||
for factory, args in self.stepFactories:
|
||||
args = args.copy()
|
||||
if not args.has_key("workdir"):
|
||||
args['workdir'] = self.workdir
|
||||
try:
|
||||
step = factory(build=self, **args)
|
||||
except:
|
||||
log.msg("error while creating step, factory=%s, args=%s"
|
||||
% (factory, args))
|
||||
raise
|
||||
name = step.name
|
||||
count = 1
|
||||
while name in stepnames and count < 100:
|
||||
count += 1
|
||||
name = step.name + "_%d" % count
|
||||
if name in stepnames:
|
||||
raise RuntimeError("duplicate step '%s'" % step.name)
|
||||
step.name = name
|
||||
stepnames.append(name)
|
||||
self.steps.append(step)
|
||||
|
||||
# tell the BuildStatus about the step. This will create a
|
||||
# BuildStepStatus and bind it to the Step.
|
||||
step_status = self.build_status.addStepWithName(name)
|
||||
step.setStepStatus(step_status)
|
||||
|
||||
sp = None
|
||||
if self.useProgress:
|
||||
# XXX: maybe bail if step.progressMetrics is empty? or skip
|
||||
# progress for that one step (i.e. "it is fast"), or have a
|
||||
# separate "variable" flag that makes us bail on progress
|
||||
# tracking
|
||||
sp = step.setupProgress()
|
||||
if sp:
|
||||
sps.append(sp)
|
||||
|
||||
# Create a buildbot.status.progress.BuildProgress object. This is
|
||||
# called once at startup to figure out how to build the long-term
|
||||
# Expectations object, and again at the start of each build to get a
|
||||
# fresh BuildProgress object to track progress for that individual
|
||||
# build. TODO: revisit at-startup call
|
||||
|
||||
if self.useProgress:
|
||||
self.progress = BuildProgress(sps)
|
||||
if self.progress and expectations:
|
||||
self.progress.setExpectationsFrom(expectations)
|
||||
|
||||
# we are now ready to set up our BuildStatus.
|
||||
self.build_status.setSourceStamp(self.source)
|
||||
self.build_status.setReason(self.reason)
|
||||
self.build_status.setBlamelist(self.blamelist())
|
||||
self.build_status.setProgress(self.progress)
|
||||
|
||||
self.results = [] # list of FAILURE, SUCCESS, WARNINGS, SKIPPED
|
||||
self.result = SUCCESS # overall result, may downgrade after each step
|
||||
self.text = [] # list of text string lists (text2)
|
||||
|
||||
def getNextStep(self):
|
||||
"""This method is called to obtain the next BuildStep for this build.
|
||||
When it returns None (or raises a StopIteration exception), the build
|
||||
is complete."""
|
||||
if not self.steps:
|
||||
return None
|
||||
return self.steps.pop(0)
|
||||
|
||||
def startNextStep(self):
|
||||
try:
|
||||
s = self.getNextStep()
|
||||
except StopIteration:
|
||||
s = None
|
||||
if not s:
|
||||
return self.allStepsDone()
|
||||
self.currentStep = s
|
||||
d = defer.maybeDeferred(s.startStep, self.remote)
|
||||
d.addCallback(self._stepDone, s)
|
||||
d.addErrback(self.buildException)
|
||||
|
||||
def _stepDone(self, results, step):
|
||||
self.currentStep = None
|
||||
if self.finished:
|
||||
return # build was interrupted, don't keep building
|
||||
terminate = self.stepDone(results, step) # interpret/merge results
|
||||
if terminate:
|
||||
return self.allStepsDone()
|
||||
self.startNextStep()
|
||||
|
||||
def stepDone(self, result, step):
|
||||
"""This method is called when the BuildStep completes. It is passed a
|
||||
status object from the BuildStep and is responsible for merging the
|
||||
Step's results into those of the overall Build."""
|
||||
|
||||
terminate = False
|
||||
text = None
|
||||
if type(result) == types.TupleType:
|
||||
result, text = result
|
||||
assert type(result) == type(SUCCESS)
|
||||
log.msg(" step '%s' complete: %s" % (step.name, Results[result]))
|
||||
self.results.append(result)
|
||||
if text:
|
||||
self.text.extend(text)
|
||||
if not self.remote:
|
||||
terminate = True
|
||||
if result == FAILURE:
|
||||
if step.warnOnFailure:
|
||||
if self.result != FAILURE:
|
||||
self.result = WARNINGS
|
||||
if step.flunkOnFailure:
|
||||
self.result = FAILURE
|
||||
if step.haltOnFailure:
|
||||
self.result = FAILURE
|
||||
terminate = True
|
||||
elif result == WARNINGS:
|
||||
if step.warnOnWarnings:
|
||||
if self.result != FAILURE:
|
||||
self.result = WARNINGS
|
||||
if step.flunkOnWarnings:
|
||||
self.result = FAILURE
|
||||
elif result == EXCEPTION:
|
||||
self.result = EXCEPTION
|
||||
terminate = True
|
||||
return terminate
|
||||
|
||||
def lostRemote(self, remote=None):
|
||||
# the slave went away. There are several possible reasons for this,
|
||||
# and they aren't necessarily fatal. For now, kill the build, but
|
||||
# TODO: see if we can resume the build when it reconnects.
|
||||
log.msg("%s.lostRemote" % self)
|
||||
self.remote = None
|
||||
if self.currentStep:
|
||||
# this should cause the step to finish.
|
||||
log.msg(" stopping currentStep", self.currentStep)
|
||||
self.currentStep.interrupt(Failure(error.ConnectionLost()))
|
||||
|
||||
def stopBuild(self, reason="<no reason given>"):
|
||||
# the idea here is to let the user cancel a build because, e.g.,
|
||||
# they realized they committed a bug and they don't want to waste
|
||||
# the time building something that they know will fail. Another
|
||||
# reason might be to abandon a stuck build. We want to mark the
|
||||
# build as failed quickly rather than waiting for the slave's
|
||||
# timeout to kill it on its own.
|
||||
|
||||
log.msg(" %s: stopping build: %s" % (self, reason))
|
||||
if self.finished:
|
||||
return
|
||||
# TODO: include 'reason' in this point event
|
||||
self.builder.builder_status.addPointEvent(['interrupt'])
|
||||
self.currentStep.interrupt(reason)
|
||||
if 0:
|
||||
# TODO: maybe let its deferred do buildFinished
|
||||
if self.currentStep and self.currentStep.progress:
|
||||
# XXX: really .fail or something
|
||||
self.currentStep.progress.finish()
|
||||
text = ["stopped", reason]
|
||||
self.buildFinished(text, "red", FAILURE)
|
||||
|
||||
def allStepsDone(self):
|
||||
if self.result == FAILURE:
|
||||
color = "red"
|
||||
text = ["failed"]
|
||||
elif self.result == WARNINGS:
|
||||
color = "orange"
|
||||
text = ["warnings"]
|
||||
elif self.result == EXCEPTION:
|
||||
color = "purple"
|
||||
text = ["exception"]
|
||||
else:
|
||||
color = "green"
|
||||
text = ["build", "successful"]
|
||||
text.extend(self.text)
|
||||
return self.buildFinished(text, color, self.result)
|
||||
|
||||
def buildException(self, why):
|
||||
log.msg("%s.buildException" % self)
|
||||
log.err(why)
|
||||
self.buildFinished(["build", "exception"], "purple", FAILURE)
|
||||
|
||||
def buildFinished(self, text, color, results):
|
||||
"""This method must be called when the last Step has completed. It
|
||||
marks the Build as complete and returns the Builder to the 'idle'
|
||||
state.
|
||||
|
||||
It takes three arguments which describe the overall build status:
|
||||
text, color, results. 'results' is one of SUCCESS, WARNINGS, or
|
||||
FAILURE.
|
||||
|
||||
If 'results' is SUCCESS or WARNINGS, we will permit any dependant
|
||||
builds to start. If it is 'FAILURE', those builds will be
|
||||
abandoned."""
|
||||
|
||||
self.finished = True
|
||||
if self.remote:
|
||||
self.remote.dontNotifyOnDisconnect(self.lostRemote)
|
||||
self.results = results
|
||||
|
||||
log.msg(" %s: build finished" % self)
|
||||
self.build_status.setText(text)
|
||||
self.build_status.setColor(color)
|
||||
self.build_status.setResults(results)
|
||||
self.build_status.buildFinished()
|
||||
if self.progress:
|
||||
# XXX: also test a 'timing consistent' flag?
|
||||
log.msg(" setting expectations for next time")
|
||||
self.builder.setExpectations(self.progress)
|
||||
reactor.callLater(0, self.releaseLocks)
|
||||
self.deferred.callback(self)
|
||||
self.deferred = None
|
||||
|
||||
def releaseLocks(self):
|
||||
log.msg("releaseLocks(%s): %s" % (self, self.locks))
|
||||
for lock in self.locks:
|
||||
lock.release(self)
|
||||
|
||||
# IBuildControl
|
||||
|
||||
def getStatus(self):
|
||||
return self.build_status
|
||||
|
||||
# stopBuild is defined earlier
|
||||
|
697
tools/buildbot/buildbot/process/builder.py
Normal file
697
tools/buildbot/buildbot/process/builder.py
Normal file
@ -0,0 +1,697 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import warnings
|
||||
|
||||
from twisted.python import log, components
|
||||
from twisted.spread import pb
|
||||
from twisted.internet import reactor, defer
|
||||
|
||||
from buildbot import interfaces, sourcestamp
|
||||
from buildbot.twcompat import implements
|
||||
from buildbot.status.progress import Expectations
|
||||
from buildbot.util import now
|
||||
from buildbot.process import base
|
||||
|
||||
(ATTACHING, # slave attached, still checking hostinfo/etc
|
||||
IDLE, # idle, available for use
|
||||
PINGING, # build about to start, making sure it is still alive
|
||||
BUILDING, # build is running
|
||||
) = range(4)
|
||||
|
||||
class SlaveBuilder(pb.Referenceable):
|
||||
"""I am the master-side representative for one of the
|
||||
L{buildbot.slave.bot.SlaveBuilder} objects that lives in a remote
|
||||
buildbot. When a remote builder connects, I query it for command versions
|
||||
and then make it available to any Builds that are ready to run. """
|
||||
|
||||
def __init__(self):
|
||||
self.ping_watchers = []
|
||||
self.state = ATTACHING
|
||||
self.remote = None
|
||||
|
||||
def setBuilder(self, b):
|
||||
self.builder = b
|
||||
self.builder_name = b.name
|
||||
|
||||
def getSlaveCommandVersion(self, command, oldversion=None):
|
||||
if self.remoteCommands is None:
|
||||
# the slave is 0.5.0 or earlier
|
||||
return oldversion
|
||||
return self.remoteCommands.get(command)
|
||||
|
||||
def isAvailable(self):
|
||||
if self.state == IDLE:
|
||||
return True
|
||||
return False
|
||||
|
||||
def attached(self, slave, remote, commands):
|
||||
self.slave = slave
|
||||
self.remote = remote
|
||||
self.remoteCommands = commands # maps command name to version
|
||||
log.msg("Buildslave %s attached to %s" % (slave.slavename,
|
||||
self.builder_name))
|
||||
d = self.remote.callRemote("setMaster", self)
|
||||
d.addErrback(self._attachFailure, "Builder.setMaster")
|
||||
d.addCallback(self._attached2)
|
||||
return d
|
||||
|
||||
def _attached2(self, res):
|
||||
d = self.remote.callRemote("print", "attached")
|
||||
d.addErrback(self._attachFailure, "Builder.print 'attached'")
|
||||
d.addCallback(self._attached3)
|
||||
return d
|
||||
|
||||
def _attached3(self, res):
|
||||
# now we say they're really attached
|
||||
self.state = IDLE
|
||||
return self
|
||||
|
||||
def _attachFailure(self, why, where):
|
||||
assert isinstance(where, str)
|
||||
log.msg(where)
|
||||
log.err(why)
|
||||
return why
|
||||
|
||||
def detached(self):
|
||||
log.msg("Buildslave %s detached from %s" % (self.slave.slavename,
|
||||
self.builder_name))
|
||||
self.slave = None
|
||||
self.remote = None
|
||||
self.remoteCommands = None
|
||||
|
||||
def buildStarted(self):
|
||||
self.state = BUILDING
|
||||
|
||||
def buildFinished(self):
|
||||
self.state = IDLE
|
||||
reactor.callLater(0, self.builder.maybeStartBuild)
|
||||
|
||||
def ping(self, timeout, status=None):
|
||||
"""Ping the slave to make sure it is still there. Returns a Deferred
|
||||
that fires with True if it is.
|
||||
|
||||
@param status: if you point this at a BuilderStatus, a 'pinging'
|
||||
event will be pushed.
|
||||
"""
|
||||
|
||||
self.state = PINGING
|
||||
newping = not self.ping_watchers
|
||||
d = defer.Deferred()
|
||||
self.ping_watchers.append(d)
|
||||
if newping:
|
||||
if status:
|
||||
event = status.addEvent(["pinging"], "yellow")
|
||||
d2 = defer.Deferred()
|
||||
d2.addCallback(self._pong_status, event)
|
||||
self.ping_watchers.insert(0, d2)
|
||||
# I think it will make the tests run smoother if the status
|
||||
# is updated before the ping completes
|
||||
Ping().ping(self.remote, timeout).addCallback(self._pong)
|
||||
|
||||
return d
|
||||
|
||||
def _pong(self, res):
|
||||
watchers, self.ping_watchers = self.ping_watchers, []
|
||||
for d in watchers:
|
||||
d.callback(res)
|
||||
|
||||
def _pong_status(self, res, event):
|
||||
if res:
|
||||
event.text = ["ping", "success"]
|
||||
event.color = "green"
|
||||
else:
|
||||
event.text = ["ping", "failed"]
|
||||
event.color = "red"
|
||||
event.finish()
|
||||
|
||||
class Ping:
|
||||
running = False
|
||||
timer = None
|
||||
|
||||
def ping(self, remote, timeout):
|
||||
assert not self.running
|
||||
self.running = True
|
||||
log.msg("sending ping")
|
||||
self.d = defer.Deferred()
|
||||
# TODO: add a distinct 'ping' command on the slave.. using 'print'
|
||||
# for this purpose is kind of silly.
|
||||
remote.callRemote("print", "ping").addCallbacks(self._pong,
|
||||
self._ping_failed,
|
||||
errbackArgs=(remote,))
|
||||
|
||||
# We use either our own timeout or the (long) TCP timeout to detect
|
||||
# silently-missing slaves. This might happen because of a NAT
|
||||
# timeout or a routing loop. If the slave just shuts down (and we
|
||||
# somehow missed the FIN), we should get a "connection refused"
|
||||
# message.
|
||||
self.timer = reactor.callLater(timeout, self._ping_timeout, remote)
|
||||
return self.d
|
||||
|
||||
def _ping_timeout(self, remote):
|
||||
log.msg("ping timeout")
|
||||
# force the BotPerspective to disconnect, since this indicates that
|
||||
# the bot is unreachable.
|
||||
del self.timer
|
||||
remote.broker.transport.loseConnection()
|
||||
# the forcibly-lost connection will now cause the ping to fail
|
||||
|
||||
def _stopTimer(self):
|
||||
if not self.running:
|
||||
return
|
||||
self.running = False
|
||||
|
||||
if self.timer:
|
||||
self.timer.cancel()
|
||||
del self.timer
|
||||
|
||||
def _pong(self, res):
|
||||
log.msg("ping finished: success")
|
||||
self._stopTimer()
|
||||
self.d.callback(True)
|
||||
|
||||
def _ping_failed(self, res, remote):
|
||||
log.msg("ping finished: failure")
|
||||
self._stopTimer()
|
||||
# the slave has some sort of internal error, disconnect them. If we
|
||||
# don't, we'll requeue a build and ping them again right away,
|
||||
# creating a nasty loop.
|
||||
remote.broker.transport.loseConnection()
|
||||
# TODO: except, if they actually did manage to get this far, they'll
|
||||
# probably reconnect right away, and we'll do this game again. Maybe
|
||||
# it would be better to leave them in the PINGING state.
|
||||
self.d.callback(False)
|
||||
|
||||
|
||||
class Builder(pb.Referenceable):
|
||||
"""I manage all Builds of a given type.
|
||||
|
||||
Each Builder is created by an entry in the config file (the c['builders']
|
||||
list), with a number of parameters.
|
||||
|
||||
One of these parameters is the L{buildbot.process.factory.BuildFactory}
|
||||
object that is associated with this Builder. The factory is responsible
|
||||
for creating new L{Build<buildbot.process.base.Build>} objects. Each
|
||||
Build object defines when and how the build is performed, so a new
|
||||
Factory or Builder should be defined to control this behavior.
|
||||
|
||||
The Builder holds on to a number of L{base.BuildRequest} objects in a
|
||||
list named C{.buildable}. Incoming BuildRequest objects will be added to
|
||||
this list, or (if possible) merged into an existing request. When a slave
|
||||
becomes available, I will use my C{BuildFactory} to turn the request into
|
||||
a new C{Build} object. The C{BuildRequest} is forgotten, the C{Build}
|
||||
goes into C{.building} while it runs. Once the build finishes, I will
|
||||
discard it.
|
||||
|
||||
I maintain a list of available SlaveBuilders, one for each connected
|
||||
slave that the C{slavenames} parameter says we can use. Some of these
|
||||
will be idle, some of them will be busy running builds for me. If there
|
||||
are multiple slaves, I can run multiple builds at once.
|
||||
|
||||
I also manage forced builds, progress expectation (ETA) management, and
|
||||
some status delivery chores.
|
||||
|
||||
I am persisted in C{BASEDIR/BUILDERNAME/builder}, so I can remember how
|
||||
long a build usually takes to run (in my C{expectations} attribute). This
|
||||
pickle also includes the L{buildbot.status.builder.BuilderStatus} object,
|
||||
which remembers the set of historic builds.
|
||||
|
||||
@type buildable: list of L{buildbot.process.base.BuildRequest}
|
||||
@ivar buildable: BuildRequests that are ready to build, but which are
|
||||
waiting for a buildslave to be available.
|
||||
|
||||
@type building: list of L{buildbot.process.base.Build}
|
||||
@ivar building: Builds that are actively running
|
||||
|
||||
"""
|
||||
|
||||
expectations = None # this is created the first time we get a good build
|
||||
START_BUILD_TIMEOUT = 10
|
||||
|
||||
def __init__(self, setup, builder_status):
|
||||
"""
|
||||
@type setup: dict
|
||||
@param setup: builder setup data, as stored in
|
||||
BuildmasterConfig['builders']. Contains name,
|
||||
slavename(s), builddir, factory, locks.
|
||||
@type builder_status: L{buildbot.status.builder.BuilderStatus}
|
||||
"""
|
||||
self.name = setup['name']
|
||||
self.slavenames = []
|
||||
if setup.has_key('slavename'):
|
||||
self.slavenames.append(setup['slavename'])
|
||||
if setup.has_key('slavenames'):
|
||||
self.slavenames.extend(setup['slavenames'])
|
||||
self.builddir = setup['builddir']
|
||||
self.buildFactory = setup['factory']
|
||||
self.locks = setup.get("locks", [])
|
||||
if setup.has_key('periodicBuildTime'):
|
||||
raise ValueError("periodicBuildTime can no longer be defined as"
|
||||
" part of the Builder: use scheduler.Periodic"
|
||||
" instead")
|
||||
|
||||
# build/wannabuild slots: Build objects move along this sequence
|
||||
self.buildable = []
|
||||
self.building = []
|
||||
|
||||
# buildslaves which have connected but which are not yet available.
|
||||
# These are always in the ATTACHING state.
|
||||
self.attaching_slaves = []
|
||||
|
||||
# buildslaves at our disposal. Each SlaveBuilder instance has a
|
||||
# .state that is IDLE, PINGING, or BUILDING. "PINGING" is used when a
|
||||
# Build is about to start, to make sure that they're still alive.
|
||||
self.slaves = []
|
||||
|
||||
self.builder_status = builder_status
|
||||
self.builder_status.setSlavenames(self.slavenames)
|
||||
|
||||
# for testing, to help synchronize tests
|
||||
self.watchers = {'attach': [], 'detach': [], 'detach_all': [],
|
||||
'idle': []}
|
||||
|
||||
def setBotmaster(self, botmaster):
|
||||
self.botmaster = botmaster
|
||||
|
||||
def compareToSetup(self, setup):
|
||||
diffs = []
|
||||
setup_slavenames = []
|
||||
if setup.has_key('slavename'):
|
||||
setup_slavenames.append(setup['slavename'])
|
||||
setup_slavenames.extend(setup.get('slavenames', []))
|
||||
if setup_slavenames != self.slavenames:
|
||||
diffs.append('slavenames changed from %s to %s' \
|
||||
% (self.slavenames, setup_slavenames))
|
||||
if setup['builddir'] != self.builddir:
|
||||
diffs.append('builddir changed from %s to %s' \
|
||||
% (self.builddir, setup['builddir']))
|
||||
if setup['factory'] != self.buildFactory: # compare objects
|
||||
diffs.append('factory changed')
|
||||
oldlocks = [(lock.__class__, lock.name)
|
||||
for lock in setup.get('locks',[])]
|
||||
newlocks = [(lock.__class__, lock.name)
|
||||
for lock in self.locks]
|
||||
if oldlocks != newlocks:
|
||||
diffs.append('locks changed from %s to %s' % (oldlocks, newlocks))
|
||||
return diffs
|
||||
|
||||
def __repr__(self):
|
||||
return "<Builder '%s' at %d>" % (self.name, id(self))
|
||||
|
||||
|
||||
def submitBuildRequest(self, req):
|
||||
req.submittedAt = now()
|
||||
self.buildable.append(req)
|
||||
req.requestSubmitted(self)
|
||||
self.builder_status.addBuildRequest(req.status)
|
||||
self.maybeStartBuild()
|
||||
|
||||
def cancelBuildRequest(self, req):
|
||||
if req in self.buildable:
|
||||
self.buildable.remove(req)
|
||||
self.builder_status.removeBuildRequest(req.status)
|
||||
return True
|
||||
return False
|
||||
|
||||
def __getstate__(self):
|
||||
d = self.__dict__.copy()
|
||||
# TODO: note that d['buildable'] can contain Deferreds
|
||||
del d['building'] # TODO: move these back to .buildable?
|
||||
del d['slaves']
|
||||
return d
|
||||
|
||||
def __setstate__(self, d):
|
||||
self.__dict__ = d
|
||||
self.building = []
|
||||
self.slaves = []
|
||||
|
||||
def consumeTheSoulOfYourPredecessor(self, old):
|
||||
"""Suck the brain out of an old Builder.
|
||||
|
||||
This takes all the runtime state from an existing Builder and moves
|
||||
it into ourselves. This is used when a Builder is changed in the
|
||||
master.cfg file: the new Builder has a different factory, but we want
|
||||
all the builds that were queued for the old one to get processed by
|
||||
the new one. Any builds which are already running will keep running.
|
||||
The new Builder will get as many of the old SlaveBuilder objects as
|
||||
it wants."""
|
||||
|
||||
log.msg("consumeTheSoulOfYourPredecessor: %s feeding upon %s" %
|
||||
(self, old))
|
||||
# we claim all the pending builds, removing them from the old
|
||||
# Builder's queue. This insures that the old Builder will not start
|
||||
# any new work.
|
||||
log.msg(" stealing %s buildrequests" % len(old.buildable))
|
||||
self.buildable.extend(old.buildable)
|
||||
old.buildable = []
|
||||
|
||||
# old.building is not migrated: it keeps track of builds which were
|
||||
# in progress in the old Builder. When those builds finish, the old
|
||||
# Builder will be notified, not us. However, since the old
|
||||
# SlaveBuilder will point to us, it is our maybeStartBuild() that
|
||||
# will be triggered.
|
||||
if old.building:
|
||||
self.builder_status.setBigState("building")
|
||||
|
||||
# Our set of slavenames may be different. Steal any of the old
|
||||
# buildslaves that we want to keep using.
|
||||
for sb in old.slaves[:]:
|
||||
if sb.slave.slavename in self.slavenames:
|
||||
log.msg(" stealing buildslave %s" % sb)
|
||||
self.slaves.append(sb)
|
||||
old.slaves.remove(sb)
|
||||
sb.setBuilder(self)
|
||||
|
||||
# old.attaching_slaves:
|
||||
# these SlaveBuilders are waiting on a sequence of calls:
|
||||
# remote.setMaster and remote.print . When these two complete,
|
||||
# old._attached will be fired, which will add a 'connect' event to
|
||||
# the builder_status and try to start a build. However, we've pulled
|
||||
# everything out of the old builder's queue, so it will have no work
|
||||
# to do. The outstanding remote.setMaster/print call will be holding
|
||||
# the last reference to the old builder, so it will disappear just
|
||||
# after that response comes back.
|
||||
#
|
||||
# The BotMaster will ask the slave to re-set their list of Builders
|
||||
# shortly after this function returns, which will cause our
|
||||
# attached() method to be fired with a bunch of references to remote
|
||||
# SlaveBuilders, some of which we already have (by stealing them
|
||||
# from the old Builder), some of which will be new. The new ones
|
||||
# will be re-attached.
|
||||
|
||||
# Therefore, we don't need to do anything about old.attaching_slaves
|
||||
|
||||
return # all done
|
||||
|
||||
def fireTestEvent(self, name, with=None):
|
||||
if with is None:
|
||||
with = self
|
||||
watchers = self.watchers[name]
|
||||
self.watchers[name] = []
|
||||
for w in watchers:
|
||||
reactor.callLater(0, w.callback, with)
|
||||
|
||||
def attached(self, slave, remote, commands):
|
||||
"""This is invoked by the BotPerspective when the self.slavename bot
|
||||
registers their builder.
|
||||
|
||||
@type slave: L{buildbot.master.BotPerspective}
|
||||
@param slave: the BotPerspective that represents the buildslave as a
|
||||
whole
|
||||
@type remote: L{twisted.spread.pb.RemoteReference}
|
||||
@param remote: a reference to the L{buildbot.slave.bot.SlaveBuilder}
|
||||
@type commands: dict: string -> string, or None
|
||||
@param commands: provides the slave's version of each RemoteCommand
|
||||
|
||||
@rtype: L{twisted.internet.defer.Deferred}
|
||||
@return: a Deferred that fires (with 'self') when the slave-side
|
||||
builder is fully attached and ready to accept commands.
|
||||
"""
|
||||
for s in self.attaching_slaves + self.slaves:
|
||||
if s.slave == slave:
|
||||
# already attached to them. This is fairly common, since
|
||||
# attached() gets called each time we receive the builder
|
||||
# list from the slave, and we ask for it each time we add or
|
||||
# remove a builder. So if the slave is hosting builders
|
||||
# A,B,C, and the config file changes A, we'll remove A and
|
||||
# re-add it, triggering two builder-list requests, getting
|
||||
# two redundant calls to attached() for B, and another two
|
||||
# for C.
|
||||
#
|
||||
# Therefore, when we see that we're already attached, we can
|
||||
# just ignore it. TODO: build a diagram of the state
|
||||
# transitions here, I'm concerned about sb.attached() failing
|
||||
# and leaving sb.state stuck at 'ATTACHING', and about
|
||||
# the detached() message arriving while there's some
|
||||
# transition pending such that the response to the transition
|
||||
# re-vivifies sb
|
||||
return defer.succeed(self)
|
||||
|
||||
sb = SlaveBuilder()
|
||||
sb.setBuilder(self)
|
||||
self.attaching_slaves.append(sb)
|
||||
d = sb.attached(slave, remote, commands)
|
||||
d.addCallback(self._attached)
|
||||
d.addErrback(self._not_attached, slave)
|
||||
return d
|
||||
|
||||
def _attached(self, sb):
|
||||
# TODO: make this .addSlaveEvent(slave.slavename, ['connect']) ?
|
||||
self.builder_status.addPointEvent(['connect', sb.slave.slavename])
|
||||
self.attaching_slaves.remove(sb)
|
||||
self.slaves.append(sb)
|
||||
reactor.callLater(0, self.maybeStartBuild)
|
||||
|
||||
self.fireTestEvent('attach')
|
||||
return self
|
||||
|
||||
def _not_attached(self, why, slave):
|
||||
# already log.err'ed by SlaveBuilder._attachFailure
|
||||
# TODO: make this .addSlaveEvent?
|
||||
# TODO: remove from self.slaves (except that detached() should get
|
||||
# run first, right?)
|
||||
self.builder_status.addPointEvent(['failed', 'connect',
|
||||
slave.slave.slavename])
|
||||
# TODO: add an HTMLLogFile of the exception
|
||||
self.fireTestEvent('attach', why)
|
||||
|
||||
def detached(self, slave):
|
||||
"""This is called when the connection to the bot is lost."""
|
||||
log.msg("%s.detached" % self, slave.slavename)
|
||||
for sb in self.attaching_slaves + self.slaves:
|
||||
if sb.slave == slave:
|
||||
break
|
||||
else:
|
||||
log.msg("WEIRD: Builder.detached(%s) (%s)"
|
||||
" not in attaching_slaves(%s)"
|
||||
" or slaves(%s)" % (slave, slave.slavename,
|
||||
self.attaching_slaves,
|
||||
self.slaves))
|
||||
return
|
||||
if sb.state == BUILDING:
|
||||
# the Build's .lostRemote method (invoked by a notifyOnDisconnect
|
||||
# handler) will cause the Build to be stopped, probably right
|
||||
# after the notifyOnDisconnect that invoked us finishes running.
|
||||
|
||||
# TODO: should failover to a new Build
|
||||
#self.retryBuild(sb.build)
|
||||
pass
|
||||
|
||||
if sb in self.attaching_slaves:
|
||||
self.attaching_slaves.remove(sb)
|
||||
if sb in self.slaves:
|
||||
self.slaves.remove(sb)
|
||||
|
||||
# TODO: make this .addSlaveEvent?
|
||||
self.builder_status.addPointEvent(['disconnect', slave.slavename])
|
||||
sb.detached() # inform the SlaveBuilder that their slave went away
|
||||
self.updateBigStatus()
|
||||
self.fireTestEvent('detach')
|
||||
if not self.slaves:
|
||||
self.fireTestEvent('detach_all')
|
||||
|
||||
def updateBigStatus(self):
|
||||
if not self.slaves:
|
||||
self.builder_status.setBigState("offline")
|
||||
elif self.building:
|
||||
self.builder_status.setBigState("building")
|
||||
else:
|
||||
self.builder_status.setBigState("idle")
|
||||
self.fireTestEvent('idle')
|
||||
|
||||
def maybeStartBuild(self):
|
||||
log.msg("maybeStartBuild %s: %s %s" %
|
||||
(self, self.buildable, self.slaves))
|
||||
if not self.buildable:
|
||||
self.updateBigStatus()
|
||||
return # nothing to do
|
||||
# find the first idle slave
|
||||
for sb in self.slaves:
|
||||
if sb.isAvailable():
|
||||
break
|
||||
else:
|
||||
log.msg("%s: want to start build, but we don't have a remote"
|
||||
% self)
|
||||
self.updateBigStatus()
|
||||
return
|
||||
|
||||
# there is something to build, and there is a slave on which to build
|
||||
# it. Grab the oldest request, see if we can merge it with anything
|
||||
# else.
|
||||
req = self.buildable.pop(0)
|
||||
self.builder_status.removeBuildRequest(req.status)
|
||||
mergers = []
|
||||
for br in self.buildable[:]:
|
||||
if req.canBeMergedWith(br):
|
||||
self.buildable.remove(br)
|
||||
self.builder_status.removeBuildRequest(br.status)
|
||||
mergers.append(br)
|
||||
requests = [req] + mergers
|
||||
|
||||
# Create a new build from our build factory and set ourself as the
|
||||
# builder.
|
||||
build = self.buildFactory.newBuild(requests)
|
||||
build.setBuilder(self)
|
||||
build.setLocks(self.locks)
|
||||
|
||||
# start it
|
||||
self.startBuild(build, sb)
|
||||
|
||||
def startBuild(self, build, sb):
|
||||
"""Start a build on the given slave.
|
||||
@param build: the L{base.Build} to start
|
||||
@param sb: the L{SlaveBuilder} which will host this build
|
||||
|
||||
@return: a Deferred which fires with a
|
||||
L{buildbot.interfaces.IBuildControl} that can be used to stop the
|
||||
Build, or to access a L{buildbot.interfaces.IBuildStatus} which will
|
||||
watch the Build as it runs. """
|
||||
|
||||
self.building.append(build)
|
||||
self.updateBigStatus()
|
||||
|
||||
log.msg("starting build %s.. pinging the slave" % build)
|
||||
# ping the slave to make sure they're still there. If they're fallen
|
||||
# off the map (due to a NAT timeout or something), this will fail in
|
||||
# a couple of minutes, depending upon the TCP timeout. TODO: consider
|
||||
# making this time out faster, or at least characterize the likely
|
||||
# duration.
|
||||
d = sb.ping(self.START_BUILD_TIMEOUT)
|
||||
d.addCallback(self._startBuild_1, build, sb)
|
||||
return d
|
||||
|
||||
def _startBuild_1(self, res, build, sb):
|
||||
if not res:
|
||||
return self._startBuildFailed("slave ping failed", build, sb)
|
||||
# The buildslave is ready to go. sb.buildStarted() sets its state to
|
||||
# BUILDING (so we won't try to use it for any other builds). This
|
||||
# gets set back to IDLE by the Build itself when it finishes.
|
||||
sb.buildStarted()
|
||||
d = sb.remote.callRemote("startBuild")
|
||||
d.addCallbacks(self._startBuild_2, self._startBuildFailed,
|
||||
callbackArgs=(build,sb), errbackArgs=(build,sb))
|
||||
return d
|
||||
|
||||
def _startBuild_2(self, res, build, sb):
|
||||
# create the BuildStatus object that goes with the Build
|
||||
bs = self.builder_status.newBuild()
|
||||
|
||||
# start the build. This will first set up the steps, then tell the
|
||||
# BuildStatus that it has started, which will announce it to the
|
||||
# world (through our BuilderStatus object, which is its parent).
|
||||
# Finally it will start the actual build process.
|
||||
d = build.startBuild(bs, self.expectations, sb)
|
||||
d.addCallback(self.buildFinished, sb)
|
||||
d.addErrback(log.err) # this shouldn't happen. if it does, the slave
|
||||
# will be wedged
|
||||
for req in build.requests:
|
||||
req.buildStarted(build, bs)
|
||||
return build # this is the IBuildControl
|
||||
|
||||
def _startBuildFailed(self, why, build, sb):
|
||||
# put the build back on the buildable list
|
||||
log.msg("I tried to tell the slave that the build %s started, but "
|
||||
"remote_startBuild failed: %s" % (build, why))
|
||||
# release the slave. This will queue a call to maybeStartBuild, which
|
||||
# will fire after other notifyOnDisconnect handlers have marked the
|
||||
# slave as disconnected (so we don't try to use it again).
|
||||
sb.buildFinished()
|
||||
|
||||
log.msg("re-queueing the BuildRequest")
|
||||
self.building.remove(build)
|
||||
for req in build.requests:
|
||||
self.buildable.insert(0, req) # the interrupted build gets first
|
||||
# priority
|
||||
self.builder_status.addBuildRequest(req.status)
|
||||
|
||||
|
||||
def buildFinished(self, build, sb):
|
||||
"""This is called when the Build has finished (either success or
|
||||
failure). Any exceptions during the build are reported with
|
||||
results=FAILURE, not with an errback."""
|
||||
|
||||
# by the time we get here, the Build has already released the slave
|
||||
# (which queues a call to maybeStartBuild)
|
||||
|
||||
self.building.remove(build)
|
||||
for req in build.requests:
|
||||
req.finished(build.build_status)
|
||||
|
||||
def setExpectations(self, progress):
|
||||
"""Mark the build as successful and update expectations for the next
|
||||
build. Only call this when the build did not fail in any way that
|
||||
would invalidate the time expectations generated by it. (if the
|
||||
compile failed and thus terminated early, we can't use the last
|
||||
build to predict how long the next one will take).
|
||||
"""
|
||||
if self.expectations:
|
||||
self.expectations.update(progress)
|
||||
else:
|
||||
# the first time we get a good build, create our Expectations
|
||||
# based upon its results
|
||||
self.expectations = Expectations(progress)
|
||||
log.msg("new expectations: %s seconds" % \
|
||||
self.expectations.expectedBuildTime())
|
||||
|
||||
def shutdownSlave(self):
|
||||
if self.remote:
|
||||
self.remote.callRemote("shutdown")
|
||||
|
||||
|
||||
class BuilderControl(components.Adapter):
|
||||
if implements:
|
||||
implements(interfaces.IBuilderControl)
|
||||
else:
|
||||
__implements__ = interfaces.IBuilderControl,
|
||||
|
||||
def requestBuild(self, req):
|
||||
"""Submit a BuildRequest to this Builder."""
|
||||
self.original.submitBuildRequest(req)
|
||||
|
||||
def requestBuildSoon(self, req):
|
||||
"""Submit a BuildRequest like requestBuild, but raise a
|
||||
L{buildbot.interfaces.NoSlaveError} if no slaves are currently
|
||||
available, so it cannot be used to queue a BuildRequest in the hopes
|
||||
that a slave will eventually connect. This method is appropriate for
|
||||
use by things like the web-page 'Force Build' button."""
|
||||
if not self.original.slaves:
|
||||
raise interfaces.NoSlaveError
|
||||
self.requestBuild(req)
|
||||
|
||||
def resubmitBuild(self, bs, reason="<rebuild, no reason given>"):
|
||||
if not bs.isFinished():
|
||||
return
|
||||
branch, revision, patch = bs.getSourceStamp()
|
||||
changes = bs.getChanges()
|
||||
ss = sourcestamp.SourceStamp(branch, revision, patch, changes)
|
||||
req = base.BuildRequest(reason, ss, self.original.name)
|
||||
self.requestBuild(req)
|
||||
|
||||
def getPendingBuilds(self):
|
||||
# return IBuildRequestControl objects
|
||||
raise NotImplementedError
|
||||
|
||||
def getBuild(self, number):
|
||||
for b in self.original.building:
|
||||
if b.build_status.number == number:
|
||||
return b
|
||||
return None
|
||||
|
||||
def ping(self, timeout=30):
|
||||
if not self.original.slaves:
|
||||
self.original.builder_status.addPointEvent(["ping", "no slave"],
|
||||
"red")
|
||||
return defer.succeed(False) # interfaces.NoSlaveError
|
||||
dl = []
|
||||
for s in self.original.slaves:
|
||||
dl.append(s.ping(timeout, self.original.builder_status))
|
||||
d = defer.DeferredList(dl)
|
||||
d.addCallback(self._gatherPingResults)
|
||||
return d
|
||||
|
||||
def _gatherPingResults(self, res):
|
||||
for ignored,success in res:
|
||||
if not success:
|
||||
return False
|
||||
return True
|
||||
|
||||
components.registerAdapter(BuilderControl, Builder, interfaces.IBuilderControl)
|
1063
tools/buildbot/buildbot/process/buildstep.py
Normal file
1063
tools/buildbot/buildbot/process/buildstep.py
Normal file
File diff suppressed because it is too large
Load Diff
179
tools/buildbot/buildbot/process/factory.py
Normal file
179
tools/buildbot/buildbot/process/factory.py
Normal file
@ -0,0 +1,179 @@
|
||||
# -*- test-case-name: buildbot.test.test_step -*-
|
||||
|
||||
from buildbot import util
|
||||
from buildbot.process.base import Build
|
||||
from buildbot.process.buildstep import BuildStep
|
||||
from buildbot.steps.source import CVS, SVN
|
||||
from buildbot.steps.shell import Configure, Compile, Test
|
||||
|
||||
# deprecated, use BuildFactory.addStep
|
||||
def s(steptype, **kwargs):
|
||||
# convenience function for master.cfg files, to create step
|
||||
# specification tuples
|
||||
return (steptype, kwargs)
|
||||
|
||||
class BuildFactory(util.ComparableMixin):
|
||||
"""
|
||||
@cvar buildClass: class to use when creating builds
|
||||
@type buildClass: L{buildbot.process.base.Build}
|
||||
"""
|
||||
buildClass = Build
|
||||
useProgress = 1
|
||||
compare_attrs = ['buildClass', 'steps', 'useProgress']
|
||||
|
||||
def __init__(self, steps=None):
|
||||
if steps is None:
|
||||
steps = []
|
||||
self.steps = steps
|
||||
|
||||
def newBuild(self, request):
|
||||
"""Create a new Build instance.
|
||||
@param request: a L{base.BuildRequest} describing what is to be built
|
||||
"""
|
||||
b = self.buildClass(request)
|
||||
b.useProgress = self.useProgress
|
||||
b.setSteps(self.steps)
|
||||
return b
|
||||
|
||||
def addStep(self, steptype, **kwargs):
|
||||
self.steps.append((steptype, kwargs))
|
||||
|
||||
|
||||
# BuildFactory subclasses for common build tools
|
||||
|
||||
class GNUAutoconf(BuildFactory):
|
||||
def __init__(self, source, configure="./configure",
|
||||
configureEnv={},
|
||||
configureFlags=[],
|
||||
compile=["make", "all"],
|
||||
test=["make", "check"]):
|
||||
assert isinstance(source, tuple)
|
||||
assert issubclass(source[0], BuildStep)
|
||||
BuildFactory.__init__(self, [source])
|
||||
if configure is not None:
|
||||
# we either need to wind up with a string (which will be
|
||||
# space-split), or with a list of strings (which will not). The
|
||||
# list of strings is the preferred form.
|
||||
if type(configure) is str:
|
||||
if configureFlags:
|
||||
assert not " " in configure # please use list instead
|
||||
command = [configure] + configureFlags
|
||||
else:
|
||||
command = configure
|
||||
else:
|
||||
assert isinstance(configure, (list, tuple))
|
||||
command = configure + configureFlags
|
||||
self.addStep(Configure, command=command, env=configureEnv)
|
||||
if compile is not None:
|
||||
self.addStep(Compile, command=compile)
|
||||
if test is not None:
|
||||
self.addStep(Test, command=test)
|
||||
|
||||
class CPAN(BuildFactory):
|
||||
def __init__(self, source, perl="perl"):
|
||||
assert isinstance(source, tuple)
|
||||
assert issubclass(source[0], BuildStep)
|
||||
BuildFactory.__init__(self, [source])
|
||||
self.addStep(Configure, command=[perl, "Makefile.PL"])
|
||||
self.addStep(Compile, command=["make"])
|
||||
self.addStep(Test, command=["make", "test"])
|
||||
|
||||
class Distutils(BuildFactory):
|
||||
def __init__(self, source, python="python", test=None):
|
||||
assert isinstance(source, tuple)
|
||||
assert issubclass(source[0], BuildStep)
|
||||
BuildFactory.__init__(self, [source])
|
||||
self.addStep(Compile, command=[python, "./setup.py", "build"])
|
||||
if test is not None:
|
||||
self.addStep(Test, command=test)
|
||||
|
||||
class Trial(BuildFactory):
|
||||
"""Build a python module that uses distutils and trial. Set 'tests' to
|
||||
the module in which the tests can be found, or set useTestCaseNames=True
|
||||
to always have trial figure out which tests to run (based upon which
|
||||
files have been changed).
|
||||
|
||||
See docs/factories.xhtml for usage samples. Not all of the Trial
|
||||
BuildStep options are available here, only the most commonly used ones.
|
||||
To get complete access, you will need to create a custom
|
||||
BuildFactory."""
|
||||
|
||||
trial = "trial"
|
||||
randomly = False
|
||||
recurse = False
|
||||
|
||||
def __init__(self, source,
|
||||
buildpython=["python"], trialpython=[], trial=None,
|
||||
testpath=".", randomly=None, recurse=None,
|
||||
tests=None, useTestCaseNames=False, env=None):
|
||||
BuildFactory.__init__(self, [source])
|
||||
assert isinstance(source, tuple)
|
||||
assert issubclass(source[0], BuildStep)
|
||||
assert tests or useTestCaseNames, "must use one or the other"
|
||||
if trial is not None:
|
||||
self.trial = trial
|
||||
if randomly is not None:
|
||||
self.randomly = randomly
|
||||
if recurse is not None:
|
||||
self.recurse = recurse
|
||||
|
||||
from buildbot.steps.python_twisted import Trial
|
||||
buildcommand = buildpython + ["./setup.py", "build"]
|
||||
self.addStep(Compile, command=buildcommand, env=env)
|
||||
self.addStep(Trial,
|
||||
python=trialpython, trial=self.trial,
|
||||
testpath=testpath,
|
||||
tests=tests, testChanges=useTestCaseNames,
|
||||
randomly=self.randomly,
|
||||
recurse=self.recurse,
|
||||
env=env,
|
||||
)
|
||||
|
||||
|
||||
# compatibility classes, will go away. Note that these only offer
|
||||
# compatibility at the constructor level: if you have subclassed these
|
||||
# factories, your subclasses are unlikely to still work correctly.
|
||||
|
||||
ConfigurableBuildFactory = BuildFactory
|
||||
|
||||
class BasicBuildFactory(GNUAutoconf):
|
||||
# really a "GNU Autoconf-created tarball -in-CVS tree" builder
|
||||
|
||||
def __init__(self, cvsroot, cvsmodule,
|
||||
configure=None, configureEnv={},
|
||||
compile="make all",
|
||||
test="make check", cvsCopy=False):
|
||||
mode = "clobber"
|
||||
if cvsCopy:
|
||||
mode = "copy"
|
||||
source = s(CVS, cvsroot=cvsroot, cvsmodule=cvsmodule, mode=mode)
|
||||
GNUAutoconf.__init__(self, source,
|
||||
configure=configure, configureEnv=configureEnv,
|
||||
compile=compile,
|
||||
test=test)
|
||||
|
||||
class QuickBuildFactory(BasicBuildFactory):
|
||||
useProgress = False
|
||||
|
||||
def __init__(self, cvsroot, cvsmodule,
|
||||
configure=None, configureEnv={},
|
||||
compile="make all",
|
||||
test="make check", cvsCopy=False):
|
||||
mode = "update"
|
||||
source = s(CVS, cvsroot=cvsroot, cvsmodule=cvsmodule, mode=mode)
|
||||
GNUAutoconf.__init__(self, source,
|
||||
configure=configure, configureEnv=configureEnv,
|
||||
compile=compile,
|
||||
test=test)
|
||||
|
||||
class BasicSVN(GNUAutoconf):
|
||||
|
||||
def __init__(self, svnurl,
|
||||
configure=None, configureEnv={},
|
||||
compile="make all",
|
||||
test="make check"):
|
||||
source = s(SVN, svnurl=svnurl, mode="update")
|
||||
GNUAutoconf.__init__(self, source,
|
||||
configure=configure, configureEnv=configureEnv,
|
||||
compile=compile,
|
||||
test=test)
|
4
tools/buildbot/buildbot/process/maxq.py
Normal file
4
tools/buildbot/buildbot/process/maxq.py
Normal file
@ -0,0 +1,4 @@
|
||||
|
||||
# legacy compatibility
|
||||
from buildbot.steps.maxq import MaxQ
|
||||
|
119
tools/buildbot/buildbot/process/process_twisted.py
Normal file
119
tools/buildbot/buildbot/process/process_twisted.py
Normal file
@ -0,0 +1,119 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
# Build classes specific to the Twisted codebase
|
||||
|
||||
from buildbot.process.base import Build
|
||||
from buildbot.process.factory import BuildFactory
|
||||
from buildbot.steps import shell
|
||||
from buildbot.steps.python_twisted import HLint, ProcessDocs, BuildDebs, \
|
||||
Trial, RemovePYCs
|
||||
|
||||
class TwistedBuild(Build):
|
||||
workdir = "Twisted" # twisted's bin/trial expects to live in here
|
||||
def isFileImportant(self, filename):
|
||||
if filename.startswith("doc/fun/"):
|
||||
return 0
|
||||
if filename.startswith("sandbox/"):
|
||||
return 0
|
||||
return 1
|
||||
|
||||
class TwistedTrial(Trial):
|
||||
tests = "twisted"
|
||||
# the Trial in Twisted >=2.1.0 has --recurse on by default, and -to
|
||||
# turned into --reporter=bwverbose .
|
||||
recurse = False
|
||||
trialMode = ["--reporter=bwverbose"]
|
||||
testpath = None
|
||||
trial = "./bin/trial"
|
||||
|
||||
class TwistedBaseFactory(BuildFactory):
|
||||
buildClass = TwistedBuild
|
||||
# bin/trial expects its parent directory to be named "Twisted": it uses
|
||||
# this to add the local tree to PYTHONPATH during tests
|
||||
workdir = "Twisted"
|
||||
|
||||
def __init__(self, source):
|
||||
BuildFactory.__init__(self, [source])
|
||||
|
||||
class QuickTwistedBuildFactory(TwistedBaseFactory):
|
||||
treeStableTimer = 30
|
||||
useProgress = 0
|
||||
|
||||
def __init__(self, source, python="python"):
|
||||
TwistedBaseFactory.__init__(self, source)
|
||||
if type(python) is str:
|
||||
python = [python]
|
||||
self.addStep(HLint, python=python[0])
|
||||
self.addStep(RemovePYCs)
|
||||
for p in python:
|
||||
cmd = [p, "setup.py", "build_ext", "-i"]
|
||||
self.addStep(shell.Compile, command=cmd, flunkOnFailure=True)
|
||||
self.addStep(TwistedTrial, python=p, testChanges=True)
|
||||
|
||||
class FullTwistedBuildFactory(TwistedBaseFactory):
|
||||
treeStableTimer = 5*60
|
||||
|
||||
def __init__(self, source, python="python",
|
||||
processDocs=False, runTestsRandomly=False,
|
||||
compileOpts=[], compileOpts2=[]):
|
||||
TwistedBaseFactory.__init__(self, source)
|
||||
if processDocs:
|
||||
self.addStep(ProcessDocs)
|
||||
|
||||
if type(python) == str:
|
||||
python = [python]
|
||||
assert isinstance(compileOpts, list)
|
||||
assert isinstance(compileOpts2, list)
|
||||
cmd = (python + compileOpts + ["setup.py", "build_ext"]
|
||||
+ compileOpts2 + ["-i"])
|
||||
|
||||
self.addStep(shell.Compile, command=cmd, flunkOnFailure=True)
|
||||
self.addStep(RemovePYCs)
|
||||
self.addStep(TwistedTrial, python=python, randomly=runTestsRandomly)
|
||||
|
||||
class TwistedDebsBuildFactory(TwistedBaseFactory):
|
||||
treeStableTimer = 10*60
|
||||
|
||||
def __init__(self, source, python="python"):
|
||||
TwistedBaseFactory.__init__(self, source)
|
||||
self.addStep(ProcessDocs, haltOnFailure=True)
|
||||
self.addStep(BuildDebs, warnOnWarnings=True)
|
||||
|
||||
class TwistedReactorsBuildFactory(TwistedBaseFactory):
|
||||
treeStableTimer = 5*60
|
||||
|
||||
def __init__(self, source,
|
||||
python="python", compileOpts=[], compileOpts2=[],
|
||||
reactors=None):
|
||||
TwistedBaseFactory.__init__(self, source)
|
||||
|
||||
if type(python) == str:
|
||||
python = [python]
|
||||
assert isinstance(compileOpts, list)
|
||||
assert isinstance(compileOpts2, list)
|
||||
cmd = (python + compileOpts + ["setup.py", "build_ext"]
|
||||
+ compileOpts2 + ["-i"])
|
||||
|
||||
self.addStep(shell.Compile, command=cmd, warnOnFailure=True)
|
||||
|
||||
if reactors == None:
|
||||
reactors = [
|
||||
'gtk2',
|
||||
'gtk',
|
||||
#'kqueue',
|
||||
'poll',
|
||||
'c',
|
||||
'qt',
|
||||
#'win32',
|
||||
]
|
||||
for reactor in reactors:
|
||||
flunkOnFailure = 1
|
||||
warnOnFailure = 0
|
||||
#if reactor in ['c', 'qt', 'win32']:
|
||||
# # these are buggy, so tolerate failures for now
|
||||
# flunkOnFailure = 0
|
||||
# warnOnFailure = 1
|
||||
self.addStep(RemovePYCs) # TODO: why?
|
||||
self.addStep(TwistedTrial, name=reactor, python=python,
|
||||
reactor=reactor, flunkOnFailure=flunkOnFailure,
|
||||
warnOnFailure=warnOnFailure)
|
16
tools/buildbot/buildbot/process/step.py
Normal file
16
tools/buildbot/buildbot/process/step.py
Normal file
@ -0,0 +1,16 @@
|
||||
# -*- test-case-name: buildbot.test.test_steps.ReorgCompatibility -*-
|
||||
|
||||
# legacy compatibility
|
||||
|
||||
import warnings
|
||||
warnings.warn("buildbot.process.step is deprecated. Please import things like ShellCommand from one of the buildbot.steps.* modules instead.",
|
||||
DeprecationWarning)
|
||||
|
||||
from buildbot.steps.shell import ShellCommand, WithProperties, TreeSize, Configure, Compile, Test
|
||||
from buildbot.steps.source import CVS, SVN, Darcs, Git, Arch, Bazaar, Mercurial, P4, P4Sync
|
||||
from buildbot.steps.dummy import Dummy, FailingDummy, RemoteDummy
|
||||
|
||||
from buildbot.process.buildstep import LogObserver, LogLineObserver
|
||||
from buildbot.process.buildstep import RemoteShellCommand
|
||||
from buildbot.process.buildstep import BuildStep, LoggingBuildStep
|
||||
|
6
tools/buildbot/buildbot/process/step_twisted.py
Normal file
6
tools/buildbot/buildbot/process/step_twisted.py
Normal file
@ -0,0 +1,6 @@
|
||||
# -*- test-case-name: buildbot.test.test_twisted -*-
|
||||
|
||||
# legacy compatibility
|
||||
|
||||
from buildbot.steps.python_twisted import HLint, Trial, ProcessDocs, BuildDebs
|
||||
from buildbot.steps.python_twisted import RemovePYCs
|
162
tools/buildbot/buildbot/process/step_twisted2.py
Normal file
162
tools/buildbot/buildbot/process/step_twisted2.py
Normal file
@ -0,0 +1,162 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from buildbot.status import tests
|
||||
from buildbot.process.step import SUCCESS, FAILURE, BuildStep
|
||||
from buildbot.process.step_twisted import RunUnitTests
|
||||
|
||||
from zope.interface import implements
|
||||
from twisted.python import log, failure
|
||||
from twisted.spread import jelly
|
||||
from twisted.pb.tokens import BananaError
|
||||
from twisted.web.html import PRE
|
||||
from twisted.web.error import NoResource
|
||||
|
||||
class Null: pass
|
||||
ResultTypes = Null()
|
||||
ResultTypeNames = ["SKIP",
|
||||
"EXPECTED_FAILURE", "FAILURE", "ERROR",
|
||||
"UNEXPECTED_SUCCESS", "SUCCESS"]
|
||||
try:
|
||||
from twisted.trial import reporter # introduced in Twisted-1.0.5
|
||||
# extract the individual result types
|
||||
for name in ResultTypeNames:
|
||||
setattr(ResultTypes, name, getattr(reporter, name))
|
||||
except ImportError:
|
||||
from twisted.trial import unittest # Twisted-1.0.4 has them here
|
||||
for name in ResultTypeNames:
|
||||
setattr(ResultTypes, name, getattr(unittest, name))
|
||||
|
||||
log._keepErrors = 0
|
||||
from twisted.trial import remote # for trial/jelly parsing
|
||||
|
||||
import StringIO
|
||||
|
||||
class OneJellyTest(tests.OneTest):
|
||||
def html(self, request):
|
||||
tpl = "<HTML><BODY>\n\n%s\n\n</body></html>\n"
|
||||
pptpl = "<HTML><BODY>\n\n<pre>%s</pre>\n\n</body></html>\n"
|
||||
t = request.postpath[0] # one of 'short', 'long' #, or 'html'
|
||||
if isinstance(self.results, failure.Failure):
|
||||
# it would be nice to remove unittest functions from the
|
||||
# traceback like unittest.format_exception() does.
|
||||
if t == 'short':
|
||||
s = StringIO.StringIO()
|
||||
self.results.printTraceback(s)
|
||||
return pptpl % PRE(s.getvalue())
|
||||
elif t == 'long':
|
||||
s = StringIO.StringIO()
|
||||
self.results.printDetailedTraceback(s)
|
||||
return pptpl % PRE(s.getvalue())
|
||||
#elif t == 'html':
|
||||
# return tpl % formatFailure(self.results)
|
||||
# ACK! source lines aren't stored in the Failure, rather,
|
||||
# formatFailure pulls them (by filename) from the local
|
||||
# disk. Feh. Even printTraceback() won't work. Double feh.
|
||||
return NoResource("No such mode '%s'" % t)
|
||||
if self.results == None:
|
||||
return tpl % "No results to show: test probably passed."
|
||||
# maybe results are plain text?
|
||||
return pptpl % PRE(self.results)
|
||||
|
||||
class TwistedJellyTestResults(tests.TestResults):
|
||||
oneTestClass = OneJellyTest
|
||||
def describeOneTest(self, testname):
|
||||
return "%s: %s\n" % (testname, self.tests[testname][0])
|
||||
|
||||
class RunUnitTestsJelly(RunUnitTests):
|
||||
"""I run the unit tests with the --jelly option, which generates
|
||||
machine-parseable results as the tests are run.
|
||||
"""
|
||||
trialMode = "--jelly"
|
||||
implements(remote.IRemoteReporter)
|
||||
|
||||
ourtypes = { ResultTypes.SKIP: tests.SKIP,
|
||||
ResultTypes.EXPECTED_FAILURE: tests.EXPECTED_FAILURE,
|
||||
ResultTypes.FAILURE: tests.FAILURE,
|
||||
ResultTypes.ERROR: tests.ERROR,
|
||||
ResultTypes.UNEXPECTED_SUCCESS: tests.UNEXPECTED_SUCCESS,
|
||||
ResultTypes.SUCCESS: tests.SUCCESS,
|
||||
}
|
||||
|
||||
def __getstate__(self):
|
||||
#d = RunUnitTests.__getstate__(self)
|
||||
d = self.__dict__.copy()
|
||||
# Banana subclasses are Ephemeral
|
||||
if d.has_key("decoder"):
|
||||
del d['decoder']
|
||||
return d
|
||||
def start(self):
|
||||
self.decoder = remote.DecodeReport(self)
|
||||
# don't accept anything unpleasant from the (untrusted) build slave
|
||||
# The jellied stream may have Failures, but everything inside should
|
||||
# be a string
|
||||
security = jelly.SecurityOptions()
|
||||
security.allowBasicTypes()
|
||||
security.allowInstancesOf(failure.Failure)
|
||||
self.decoder.taster = security
|
||||
self.results = TwistedJellyTestResults()
|
||||
RunUnitTests.start(self)
|
||||
|
||||
def logProgress(self, progress):
|
||||
# XXX: track number of tests
|
||||
BuildStep.logProgress(self, progress)
|
||||
|
||||
def addStdout(self, data):
|
||||
if not self.decoder:
|
||||
return
|
||||
try:
|
||||
self.decoder.dataReceived(data)
|
||||
except BananaError:
|
||||
self.decoder = None
|
||||
log.msg("trial --jelly output unparseable, traceback follows")
|
||||
log.deferr()
|
||||
|
||||
def remote_start(self, expectedTests, times=None):
|
||||
print "remote_start", expectedTests
|
||||
def remote_reportImportError(self, name, aFailure, times=None):
|
||||
pass
|
||||
def remote_reportStart(self, testClass, method, times=None):
|
||||
print "reportStart", testClass, method
|
||||
|
||||
def remote_reportResults(self, testClass, method, resultType, results,
|
||||
times=None):
|
||||
print "reportResults", testClass, method, resultType
|
||||
which = testClass + "." + method
|
||||
self.results.addTest(which,
|
||||
self.ourtypes.get(resultType, tests.UNKNOWN),
|
||||
results)
|
||||
|
||||
def finished(self, rc):
|
||||
# give self.results to our Build object
|
||||
self.build.testsFinished(self.results)
|
||||
total = self.results.countTests()
|
||||
count = self.results.countFailures()
|
||||
result = SUCCESS
|
||||
if total == None:
|
||||
result = (FAILURE, ['tests%s' % self.rtext(' (%s)')])
|
||||
if count:
|
||||
result = (FAILURE, ["%d tes%s%s" % (count,
|
||||
(count == 1 and 't' or 'ts'),
|
||||
self.rtext(' (%s)'))])
|
||||
return self.stepComplete(result)
|
||||
def finishStatus(self, result):
|
||||
total = self.results.countTests()
|
||||
count = self.results.countFailures()
|
||||
color = "green"
|
||||
text = []
|
||||
if count == 0:
|
||||
text.extend(["%d %s" % \
|
||||
(total,
|
||||
total == 1 and "test" or "tests"),
|
||||
"passed"])
|
||||
else:
|
||||
text.append("tests")
|
||||
text.append("%d %s" % \
|
||||
(count,
|
||||
count == 1 and "failure" or "failures"))
|
||||
color = "red"
|
||||
self.updateCurrentActivity(color=color, text=text)
|
||||
self.addFileToCurrentActivity("tests", self.results)
|
||||
#self.finishStatusSummary()
|
||||
self.finishCurrentActivity()
|
||||
|
699
tools/buildbot/buildbot/scheduler.py
Normal file
699
tools/buildbot/buildbot/scheduler.py
Normal file
@ -0,0 +1,699 @@
|
||||
# -*- test-case-name: buildbot.test.test_dependencies -*-
|
||||
|
||||
import time, os.path
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.application import service, internet, strports
|
||||
from twisted.python import log, runtime
|
||||
from twisted.protocols import basic
|
||||
from twisted.cred import portal, checkers
|
||||
from twisted.spread import pb
|
||||
|
||||
from buildbot import interfaces, buildset, util, pbutil
|
||||
from buildbot.status import builder
|
||||
from buildbot.twcompat import implements, providedBy
|
||||
from buildbot.sourcestamp import SourceStamp
|
||||
from buildbot.changes import maildirtwisted
|
||||
|
||||
|
||||
class BaseScheduler(service.MultiService, util.ComparableMixin):
|
||||
if implements:
|
||||
implements(interfaces.IScheduler)
|
||||
else:
|
||||
__implements__ = (interfaces.IScheduler,
|
||||
service.MultiService.__implements__)
|
||||
|
||||
def __init__(self, name):
|
||||
service.MultiService.__init__(self)
|
||||
self.name = name
|
||||
|
||||
def __repr__(self):
|
||||
# TODO: why can't id() return a positive number? %d is ugly.
|
||||
return "<Scheduler '%s' at %d>" % (self.name, id(self))
|
||||
|
||||
def submit(self, bs):
|
||||
self.parent.submitBuildSet(bs)
|
||||
|
||||
def addChange(self, change):
|
||||
pass
|
||||
|
||||
class BaseUpstreamScheduler(BaseScheduler):
|
||||
if implements:
|
||||
implements(interfaces.IUpstreamScheduler)
|
||||
else:
|
||||
__implements__ = (interfaces.IUpstreamScheduler,
|
||||
BaseScheduler.__implements__)
|
||||
|
||||
def __init__(self, name):
|
||||
BaseScheduler.__init__(self, name)
|
||||
self.successWatchers = []
|
||||
|
||||
def subscribeToSuccessfulBuilds(self, watcher):
|
||||
self.successWatchers.append(watcher)
|
||||
def unsubscribeToSuccessfulBuilds(self, watcher):
|
||||
self.successWatchers.remove(watcher)
|
||||
|
||||
def submit(self, bs):
|
||||
d = bs.waitUntilFinished()
|
||||
d.addCallback(self.buildSetFinished)
|
||||
self.parent.submitBuildSet(bs)
|
||||
|
||||
def buildSetFinished(self, bss):
|
||||
if not self.running:
|
||||
return
|
||||
if bss.getResults() == builder.SUCCESS:
|
||||
ss = bss.getSourceStamp()
|
||||
for w in self.successWatchers:
|
||||
w(ss)
|
||||
|
||||
|
||||
class Scheduler(BaseUpstreamScheduler):
|
||||
"""The default Scheduler class will run a build after some period of time
|
||||
called the C{treeStableTimer}, on a given set of Builders. It only pays
|
||||
attention to a single branch. You you can provide a C{fileIsImportant}
|
||||
function which will evaluate each Change to decide whether or not it
|
||||
should trigger a new build.
|
||||
"""
|
||||
|
||||
fileIsImportant = None
|
||||
compare_attrs = ('name', 'treeStableTimer', 'builderNames', 'branch',
|
||||
'fileIsImportant')
|
||||
|
||||
def __init__(self, name, branch, treeStableTimer, builderNames,
|
||||
fileIsImportant=None):
|
||||
"""
|
||||
@param name: the name of this Scheduler
|
||||
@param branch: The branch name that the Scheduler should pay
|
||||
attention to. Any Change that is not on this branch
|
||||
will be ignored. It can be set to None to only pay
|
||||
attention to the default branch.
|
||||
@param treeStableTimer: the duration, in seconds, for which the tree
|
||||
must remain unchanged before a build will be
|
||||
triggered. This is intended to avoid builds
|
||||
of partially-committed fixes.
|
||||
@param builderNames: a list of Builder names. When this Scheduler
|
||||
decides to start a set of builds, they will be
|
||||
run on the Builders named by this list.
|
||||
|
||||
@param fileIsImportant: A callable which takes one argument (a Change
|
||||
instance) and returns True if the change is
|
||||
worth building, and False if it is not.
|
||||
Unimportant Changes are accumulated until the
|
||||
build is triggered by an important change.
|
||||
The default value of None means that all
|
||||
Changes are important.
|
||||
"""
|
||||
|
||||
BaseUpstreamScheduler.__init__(self, name)
|
||||
self.treeStableTimer = treeStableTimer
|
||||
errmsg = ("The builderNames= argument to Scheduler must be a list "
|
||||
"of Builder description names (i.e. the 'name' key of the "
|
||||
"Builder specification dictionary)")
|
||||
assert isinstance(builderNames, (list, tuple)), errmsg
|
||||
for b in builderNames:
|
||||
assert isinstance(b, str), errmsg
|
||||
self.builderNames = builderNames
|
||||
self.branch = branch
|
||||
if fileIsImportant:
|
||||
assert callable(fileIsImportant)
|
||||
self.fileIsImportant = fileIsImportant
|
||||
|
||||
self.importantChanges = []
|
||||
self.unimportantChanges = []
|
||||
self.nextBuildTime = None
|
||||
self.timer = None
|
||||
|
||||
def listBuilderNames(self):
|
||||
return self.builderNames
|
||||
|
||||
def getPendingBuildTimes(self):
|
||||
if self.nextBuildTime is not None:
|
||||
return [self.nextBuildTime]
|
||||
return []
|
||||
|
||||
def addChange(self, change):
|
||||
if change.branch != self.branch:
|
||||
log.msg("%s ignoring off-branch %s" % (self, change))
|
||||
return
|
||||
if not self.fileIsImportant:
|
||||
self.addImportantChange(change)
|
||||
elif self.fileIsImportant(change):
|
||||
self.addImportantChange(change)
|
||||
else:
|
||||
self.addUnimportantChange(change)
|
||||
|
||||
def addImportantChange(self, change):
|
||||
log.msg("%s: change is important, adding %s" % (self, change))
|
||||
self.importantChanges.append(change)
|
||||
self.nextBuildTime = max(self.nextBuildTime,
|
||||
change.when + self.treeStableTimer)
|
||||
self.setTimer(self.nextBuildTime)
|
||||
|
||||
def addUnimportantChange(self, change):
|
||||
log.msg("%s: change is not important, adding %s" % (self, change))
|
||||
self.unimportantChanges.append(change)
|
||||
|
||||
def setTimer(self, when):
|
||||
log.msg("%s: setting timer to %s" %
|
||||
(self, time.strftime("%H:%M:%S", time.localtime(when))))
|
||||
now = util.now()
|
||||
if when < now:
|
||||
when = now + 1
|
||||
if self.timer:
|
||||
self.timer.cancel()
|
||||
self.timer = reactor.callLater(when - now, self.fireTimer)
|
||||
|
||||
def stopTimer(self):
|
||||
if self.timer:
|
||||
self.timer.cancel()
|
||||
self.timer = None
|
||||
|
||||
def fireTimer(self):
|
||||
# clear out our state
|
||||
self.timer = None
|
||||
self.nextBuildTime = None
|
||||
changes = self.importantChanges + self.unimportantChanges
|
||||
self.importantChanges = []
|
||||
self.unimportantChanges = []
|
||||
|
||||
# create a BuildSet, submit it to the BuildMaster
|
||||
bs = buildset.BuildSet(self.builderNames,
|
||||
SourceStamp(changes=changes))
|
||||
self.submit(bs)
|
||||
|
||||
def stopService(self):
|
||||
self.stopTimer()
|
||||
return service.MultiService.stopService(self)
|
||||
|
||||
|
||||
class AnyBranchScheduler(BaseUpstreamScheduler):
|
||||
"""This Scheduler will handle changes on a variety of branches. It will
|
||||
accumulate Changes for each branch separately. It works by creating a
|
||||
separate Scheduler for each new branch it sees."""
|
||||
|
||||
schedulerFactory = Scheduler
|
||||
fileIsImportant = None
|
||||
|
||||
compare_attrs = ('name', 'branches', 'treeStableTimer', 'builderNames',
|
||||
'fileIsImportant')
|
||||
|
||||
def __init__(self, name, branches, treeStableTimer, builderNames,
|
||||
fileIsImportant=None):
|
||||
"""
|
||||
@param name: the name of this Scheduler
|
||||
@param branches: The branch names that the Scheduler should pay
|
||||
attention to. Any Change that is not on one of these
|
||||
branches will be ignored. It can be set to None to
|
||||
accept changes from any branch. Don't use [] (an
|
||||
empty list), because that means we don't pay
|
||||
attention to *any* branches, so we'll never build
|
||||
anything.
|
||||
@param treeStableTimer: the duration, in seconds, for which the tree
|
||||
must remain unchanged before a build will be
|
||||
triggered. This is intended to avoid builds
|
||||
of partially-committed fixes.
|
||||
@param builderNames: a list of Builder names. When this Scheduler
|
||||
decides to start a set of builds, they will be
|
||||
run on the Builders named by this list.
|
||||
|
||||
@param fileIsImportant: A callable which takes one argument (a Change
|
||||
instance) and returns True if the change is
|
||||
worth building, and False if it is not.
|
||||
Unimportant Changes are accumulated until the
|
||||
build is triggered by an important change.
|
||||
The default value of None means that all
|
||||
Changes are important.
|
||||
"""
|
||||
|
||||
BaseUpstreamScheduler.__init__(self, name)
|
||||
self.treeStableTimer = treeStableTimer
|
||||
for b in builderNames:
|
||||
assert isinstance(b, str)
|
||||
self.builderNames = builderNames
|
||||
self.branches = branches
|
||||
if self.branches == []:
|
||||
log.msg("AnyBranchScheduler %s: branches=[], so we will ignore "
|
||||
"all branches, and never trigger any builds. Please set "
|
||||
"branches=None to mean 'all branches'" % self)
|
||||
# consider raising an exception here, to make this warning more
|
||||
# prominent, but I can vaguely imagine situations where you might
|
||||
# want to comment out branches temporarily and wouldn't
|
||||
# appreciate it being treated as an error.
|
||||
if fileIsImportant:
|
||||
assert callable(fileIsImportant)
|
||||
self.fileIsImportant = fileIsImportant
|
||||
self.schedulers = {} # one per branch
|
||||
|
||||
def __repr__(self):
|
||||
return "<AnyBranchScheduler '%s'>" % self.name
|
||||
|
||||
def listBuilderNames(self):
|
||||
return self.builderNames
|
||||
|
||||
def getPendingBuildTimes(self):
|
||||
bts = []
|
||||
for s in self.schedulers.values():
|
||||
if s.nextBuildTime is not None:
|
||||
bts.append(s.nextBuildTime)
|
||||
return bts
|
||||
|
||||
def addChange(self, change):
|
||||
branch = change.branch
|
||||
if self.branches is not None and branch not in self.branches:
|
||||
log.msg("%s ignoring off-branch %s" % (self, change))
|
||||
return
|
||||
s = self.schedulers.get(branch)
|
||||
if not s:
|
||||
if branch:
|
||||
name = self.name + "." + branch
|
||||
else:
|
||||
name = self.name + ".<default>"
|
||||
s = self.schedulerFactory(name, branch,
|
||||
self.treeStableTimer,
|
||||
self.builderNames,
|
||||
self.fileIsImportant)
|
||||
s.successWatchers = self.successWatchers
|
||||
s.setServiceParent(self)
|
||||
# TODO: does this result in schedulers that stack up forever?
|
||||
# When I make the persistify-pass, think about this some more.
|
||||
self.schedulers[branch] = s
|
||||
s.addChange(change)
|
||||
|
||||
def submitBuildSet(self, bs):
|
||||
self.parent.submitBuildSet(bs)
|
||||
|
||||
|
||||
class Dependent(BaseUpstreamScheduler):
|
||||
"""This scheduler runs some set of 'downstream' builds when the
|
||||
'upstream' scheduler has completed successfully."""
|
||||
|
||||
compare_attrs = ('name', 'upstream', 'builders')
|
||||
|
||||
def __init__(self, name, upstream, builderNames):
|
||||
assert providedBy(upstream, interfaces.IUpstreamScheduler)
|
||||
BaseUpstreamScheduler.__init__(self, name)
|
||||
self.upstream = upstream
|
||||
self.builderNames = builderNames
|
||||
|
||||
def listBuilderNames(self):
|
||||
return self.builderNames
|
||||
|
||||
def getPendingBuildTimes(self):
|
||||
# report the upstream's value
|
||||
return self.upstream.getPendingBuildTimes()
|
||||
|
||||
def startService(self):
|
||||
service.MultiService.startService(self)
|
||||
self.upstream.subscribeToSuccessfulBuilds(self.upstreamBuilt)
|
||||
|
||||
def stopService(self):
|
||||
d = service.MultiService.stopService(self)
|
||||
self.upstream.unsubscribeToSuccessfulBuilds(self.upstreamBuilt)
|
||||
return d
|
||||
|
||||
def upstreamBuilt(self, ss):
|
||||
bs = buildset.BuildSet(self.builderNames, ss)
|
||||
self.submit(bs)
|
||||
|
||||
|
||||
|
||||
class Periodic(BaseUpstreamScheduler):
|
||||
"""Instead of watching for Changes, this Scheduler can just start a build
|
||||
at fixed intervals. The C{periodicBuildTimer} parameter sets the number
|
||||
of seconds to wait between such periodic builds. The first build will be
|
||||
run immediately."""
|
||||
|
||||
# TODO: consider having this watch another (changed-based) scheduler and
|
||||
# merely enforce a minimum time between builds.
|
||||
|
||||
compare_attrs = ('name', 'builderNames', 'periodicBuildTimer', 'branch')
|
||||
|
||||
def __init__(self, name, builderNames, periodicBuildTimer,
|
||||
branch=None):
|
||||
BaseUpstreamScheduler.__init__(self, name)
|
||||
self.builderNames = builderNames
|
||||
self.periodicBuildTimer = periodicBuildTimer
|
||||
self.branch = branch
|
||||
self.reason = ("The Periodic scheduler named '%s' triggered this build"
|
||||
% name)
|
||||
self.timer = internet.TimerService(self.periodicBuildTimer,
|
||||
self.doPeriodicBuild)
|
||||
self.timer.setServiceParent(self)
|
||||
|
||||
def listBuilderNames(self):
|
||||
return self.builderNames
|
||||
|
||||
def getPendingBuildTimes(self):
|
||||
# TODO: figure out when self.timer is going to fire next and report
|
||||
# that
|
||||
return []
|
||||
|
||||
def doPeriodicBuild(self):
|
||||
bs = buildset.BuildSet(self.builderNames,
|
||||
SourceStamp(branch=self.branch),
|
||||
self.reason)
|
||||
self.submit(bs)
|
||||
|
||||
|
||||
|
||||
class Nightly(BaseUpstreamScheduler):
|
||||
"""Imitate 'cron' scheduling. This can be used to schedule a nightly
|
||||
build, or one which runs are certain times of the day, week, or month.
|
||||
|
||||
Pass some subset of minute, hour, dayOfMonth, month, and dayOfWeek; each
|
||||
may be a single number or a list of valid values. The builds will be
|
||||
triggered whenever the current time matches these values. Wildcards are
|
||||
represented by a '*' string. All fields default to a wildcard except
|
||||
'minute', so with no fields this defaults to a build every hour, on the
|
||||
hour.
|
||||
|
||||
For example, the following master.cfg clause will cause a build to be
|
||||
started every night at 3:00am::
|
||||
|
||||
s = Nightly('nightly', ['builder1', 'builder2'], hour=3, minute=0)
|
||||
c['schedules'].append(s)
|
||||
|
||||
This scheduler will perform a build each monday morning at 6:23am and
|
||||
again at 8:23am::
|
||||
|
||||
s = Nightly('BeforeWork', ['builder1'],
|
||||
dayOfWeek=0, hour=[6,8], minute=23)
|
||||
|
||||
The following runs a build every two hours::
|
||||
|
||||
s = Nightly('every2hours', ['builder1'], hour=range(0, 24, 2))
|
||||
|
||||
And this one will run only on December 24th::
|
||||
|
||||
s = Nightly('SleighPreflightCheck', ['flying_circuits', 'radar'],
|
||||
month=12, dayOfMonth=24, hour=12, minute=0)
|
||||
|
||||
For dayOfWeek and dayOfMonth, builds are triggered if the date matches
|
||||
either of them. All time values are compared against the tuple returned
|
||||
by time.localtime(), so month and dayOfMonth numbers start at 1, not
|
||||
zero. dayOfWeek=0 is Monday, dayOfWeek=6 is Sunday.
|
||||
"""
|
||||
|
||||
compare_attrs = ('name', 'builderNames',
|
||||
'minute', 'hour', 'dayOfMonth', 'month',
|
||||
'dayOfWeek', 'branch')
|
||||
|
||||
def __init__(self, name, builderNames, minute=0, hour='*',
|
||||
dayOfMonth='*', month='*', dayOfWeek='*',
|
||||
branch=None):
|
||||
# Setting minute=0 really makes this an 'Hourly' scheduler. This
|
||||
# seemed like a better default than minute='*', which would result in
|
||||
# a build every 60 seconds.
|
||||
BaseUpstreamScheduler.__init__(self, name)
|
||||
self.builderNames = builderNames
|
||||
self.minute = minute
|
||||
self.hour = hour
|
||||
self.dayOfMonth = dayOfMonth
|
||||
self.month = month
|
||||
self.dayOfWeek = dayOfWeek
|
||||
self.branch = branch
|
||||
self.delayedRun = None
|
||||
self.nextRunTime = None
|
||||
self.reason = ("The Nightly scheduler named '%s' triggered this build"
|
||||
% name)
|
||||
|
||||
def addTime(self, timetuple, secs):
|
||||
return time.localtime(time.mktime(timetuple)+secs)
|
||||
def findFirstValueAtLeast(self, values, value, default=None):
|
||||
for v in values:
|
||||
if v >= value: return v
|
||||
return default
|
||||
|
||||
def setTimer(self):
|
||||
self.nextRunTime = self.calculateNextRunTime()
|
||||
self.delayedRun = reactor.callLater(self.nextRunTime - time.time(),
|
||||
self.doPeriodicBuild)
|
||||
|
||||
def startService(self):
|
||||
BaseUpstreamScheduler.startService(self)
|
||||
self.setTimer()
|
||||
|
||||
def stopService(self):
|
||||
BaseUpstreamScheduler.stopService(self)
|
||||
self.delayedRun.cancel()
|
||||
|
||||
def isRunTime(self, timetuple):
|
||||
def check(ourvalue, value):
|
||||
if ourvalue == '*': return True
|
||||
if isinstance(ourvalue, int): return value == ourvalue
|
||||
return (value in ourvalue)
|
||||
|
||||
if not check(self.minute, timetuple[4]):
|
||||
#print 'bad minute', timetuple[4], self.minute
|
||||
return False
|
||||
|
||||
if not check(self.hour, timetuple[3]):
|
||||
#print 'bad hour', timetuple[3], self.hour
|
||||
return False
|
||||
|
||||
if not check(self.month, timetuple[1]):
|
||||
#print 'bad month', timetuple[1], self.month
|
||||
return False
|
||||
|
||||
if self.dayOfMonth != '*' and self.dayOfWeek != '*':
|
||||
# They specified both day(s) of month AND day(s) of week.
|
||||
# This means that we only have to match one of the two. If
|
||||
# neither one matches, this time is not the right time.
|
||||
if not (check(self.dayOfMonth, timetuple[2]) or
|
||||
check(self.dayOfWeek, timetuple[6])):
|
||||
#print 'bad day'
|
||||
return False
|
||||
else:
|
||||
if not check(self.dayOfMonth, timetuple[2]):
|
||||
#print 'bad day of month'
|
||||
return False
|
||||
|
||||
if not check(self.dayOfWeek, timetuple[6]):
|
||||
#print 'bad day of week'
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def calculateNextRunTime(self):
|
||||
return self.calculateNextRunTimeFrom(time.time())
|
||||
|
||||
def calculateNextRunTimeFrom(self, now):
|
||||
dateTime = time.localtime(now)
|
||||
|
||||
# Remove seconds by advancing to at least the next minue
|
||||
dateTime = self.addTime(dateTime, 60-dateTime[5])
|
||||
|
||||
# Now we just keep adding minutes until we find something that matches
|
||||
|
||||
# It not an efficient algorithm, but it'll *work* for now
|
||||
yearLimit = dateTime[0]+2
|
||||
while not self.isRunTime(dateTime):
|
||||
dateTime = self.addTime(dateTime, 60)
|
||||
#print 'Trying', time.asctime(dateTime)
|
||||
assert dateTime[0] < yearLimit, 'Something is wrong with this code'
|
||||
return time.mktime(dateTime)
|
||||
|
||||
def listBuilderNames(self):
|
||||
return self.builderNames
|
||||
|
||||
def getPendingBuildTimes(self):
|
||||
# TODO: figure out when self.timer is going to fire next and report
|
||||
# that
|
||||
if self.nextRunTime is None: return []
|
||||
return [self.nextRunTime]
|
||||
|
||||
def doPeriodicBuild(self):
|
||||
# Schedule the next run
|
||||
self.setTimer()
|
||||
|
||||
# And trigger a build
|
||||
bs = buildset.BuildSet(self.builderNames,
|
||||
SourceStamp(branch=self.branch),
|
||||
self.reason)
|
||||
self.submit(bs)
|
||||
|
||||
def addChange(self, change):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
class TryBase(service.MultiService, util.ComparableMixin):
|
||||
if implements:
|
||||
implements(interfaces.IScheduler)
|
||||
else:
|
||||
__implements__ = (interfaces.IScheduler,
|
||||
service.MultiService.__implements__)
|
||||
|
||||
def __init__(self, name, builderNames):
|
||||
service.MultiService.__init__(self)
|
||||
self.name = name
|
||||
self.builderNames = builderNames
|
||||
|
||||
def listBuilderNames(self):
|
||||
return self.builderNames
|
||||
|
||||
def getPendingBuildTimes(self):
|
||||
# we can't predict what the developers are going to do in the future
|
||||
return []
|
||||
|
||||
def addChange(self, change):
|
||||
# Try schedulers ignore Changes
|
||||
pass
|
||||
|
||||
|
||||
class BadJobfile(Exception):
|
||||
pass
|
||||
|
||||
class JobFileScanner(basic.NetstringReceiver):
|
||||
def __init__(self):
|
||||
self.strings = []
|
||||
self.transport = self # so transport.loseConnection works
|
||||
self.error = False
|
||||
|
||||
def stringReceived(self, s):
|
||||
self.strings.append(s)
|
||||
|
||||
def loseConnection(self):
|
||||
self.error = True
|
||||
|
||||
class Try_Jobdir(TryBase):
|
||||
compare_attrs = ["name", "builderNames", "jobdir"]
|
||||
|
||||
def __init__(self, name, builderNames, jobdir):
|
||||
TryBase.__init__(self, name, builderNames)
|
||||
self.jobdir = jobdir
|
||||
self.watcher = maildirtwisted.MaildirService()
|
||||
self.watcher.setServiceParent(self)
|
||||
|
||||
def setServiceParent(self, parent):
|
||||
self.watcher.setBasedir(os.path.join(parent.basedir, self.jobdir))
|
||||
TryBase.setServiceParent(self, parent)
|
||||
|
||||
def parseJob(self, f):
|
||||
# jobfiles are serialized build requests. Each is a list of
|
||||
# serialized netstrings, in the following order:
|
||||
# "1", the version number of this format
|
||||
# buildsetID, arbitrary string, used to find the buildSet later
|
||||
# branch name, "" for default-branch
|
||||
# base revision
|
||||
# patchlevel, usually "1"
|
||||
# patch
|
||||
# builderNames...
|
||||
p = JobFileScanner()
|
||||
p.dataReceived(f.read())
|
||||
if p.error:
|
||||
raise BadJobfile("unable to parse netstrings")
|
||||
s = p.strings
|
||||
ver = s.pop(0)
|
||||
if ver != "1":
|
||||
raise BadJobfile("unknown version '%s'" % ver)
|
||||
buildsetID, branch, baserev, patchlevel, diff = s[:5]
|
||||
builderNames = s[5:]
|
||||
if branch == "":
|
||||
branch = None
|
||||
patchlevel = int(patchlevel)
|
||||
patch = (patchlevel, diff)
|
||||
ss = SourceStamp(branch, baserev, patch)
|
||||
return builderNames, ss, buildsetID
|
||||
|
||||
def messageReceived(self, filename):
|
||||
md = os.path.join(self.parent.basedir, self.jobdir)
|
||||
if runtime.platformType == "posix":
|
||||
# open the file before moving it, because I'm afraid that once
|
||||
# it's in cur/, someone might delete it at any moment
|
||||
path = os.path.join(md, "new", filename)
|
||||
f = open(path, "r")
|
||||
os.rename(os.path.join(md, "new", filename),
|
||||
os.path.join(md, "cur", filename))
|
||||
else:
|
||||
# do this backwards under windows, because you can't move a file
|
||||
# that somebody is holding open. This was causing a Permission
|
||||
# Denied error on bear's win32-twisted1.3 buildslave.
|
||||
os.rename(os.path.join(md, "new", filename),
|
||||
os.path.join(md, "cur", filename))
|
||||
path = os.path.join(md, "cur", filename)
|
||||
f = open(path, "r")
|
||||
|
||||
try:
|
||||
builderNames, ss, bsid = self.parseJob(f)
|
||||
except BadJobfile:
|
||||
log.msg("%s reports a bad jobfile in %s" % (self, filename))
|
||||
log.err()
|
||||
return
|
||||
# compare builderNames against self.builderNames
|
||||
# TODO: think about this some more.. why bother restricting it?
|
||||
# perhaps self.builderNames should be used as the default list
|
||||
# instead of being used as a restriction?
|
||||
for b in builderNames:
|
||||
if not b in self.builderNames:
|
||||
log.msg("%s got jobfile %s with builder %s" % (self,
|
||||
filename, b))
|
||||
log.msg(" but that wasn't in our list: %s"
|
||||
% (self.builderNames,))
|
||||
return
|
||||
|
||||
reason = "'try' job"
|
||||
bs = buildset.BuildSet(builderNames, ss, reason=reason, bsid=bsid)
|
||||
self.parent.submitBuildSet(bs)
|
||||
|
||||
class Try_Userpass(TryBase):
|
||||
compare_attrs = ["name", "builderNames", "port", "userpass"]
|
||||
|
||||
if implements:
|
||||
implements(portal.IRealm)
|
||||
else:
|
||||
__implements__ = (portal.IRealm,
|
||||
TryBase.__implements__)
|
||||
|
||||
def __init__(self, name, builderNames, port, userpass):
|
||||
TryBase.__init__(self, name, builderNames)
|
||||
if type(port) is int:
|
||||
port = "tcp:%d" % port
|
||||
self.port = port
|
||||
self.userpass = userpass
|
||||
c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
|
||||
for user,passwd in self.userpass:
|
||||
c.addUser(user, passwd)
|
||||
|
||||
p = portal.Portal(self)
|
||||
p.registerChecker(c)
|
||||
f = pb.PBServerFactory(p)
|
||||
s = strports.service(port, f)
|
||||
s.setServiceParent(self)
|
||||
|
||||
def getPort(self):
|
||||
# utility method for tests: figure out which TCP port we just opened.
|
||||
return self.services[0]._port.getHost().port
|
||||
|
||||
def requestAvatar(self, avatarID, mind, interface):
|
||||
log.msg("%s got connection from user %s" % (self, avatarID))
|
||||
assert interface == pb.IPerspective
|
||||
p = Try_Userpass_Perspective(self, avatarID)
|
||||
return (pb.IPerspective, p, lambda: None)
|
||||
|
||||
def submitBuildSet(self, bs):
|
||||
return self.parent.submitBuildSet(bs)
|
||||
|
||||
class Try_Userpass_Perspective(pbutil.NewCredPerspective):
|
||||
def __init__(self, parent, username):
|
||||
self.parent = parent
|
||||
self.username = username
|
||||
|
||||
def perspective_try(self, branch, revision, patch, builderNames):
|
||||
log.msg("user %s requesting build on builders %s" % (self.username,
|
||||
builderNames))
|
||||
for b in builderNames:
|
||||
if not b in self.parent.builderNames:
|
||||
log.msg("%s got job with builder %s" % (self, b))
|
||||
log.msg(" but that wasn't in our list: %s"
|
||||
% (self.parent.builderNames,))
|
||||
return
|
||||
ss = SourceStamp(branch, revision, patch)
|
||||
reason = "'try' job from user %s" % self.username
|
||||
bs = buildset.BuildSet(builderNames, ss, reason=reason)
|
||||
self.parent.submitBuildSet(bs)
|
||||
|
||||
# return a remotely-usable BuildSetStatus object
|
||||
from buildbot.status.client import makeRemote
|
||||
return makeRemote(bs.status)
|
||||
|
0
tools/buildbot/buildbot/scripts/__init__.py
Normal file
0
tools/buildbot/buildbot/scripts/__init__.py
Normal file
95
tools/buildbot/buildbot/scripts/logwatcher.py
Normal file
95
tools/buildbot/buildbot/scripts/logwatcher.py
Normal file
@ -0,0 +1,95 @@
|
||||
|
||||
import os
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.internet import task, defer, reactor
|
||||
from twisted.protocols.basic import LineOnlyReceiver
|
||||
|
||||
class FakeTransport:
|
||||
disconnecting = False
|
||||
|
||||
class BuildmasterTimeoutError(Exception):
|
||||
pass
|
||||
class BuildslaveTimeoutError(Exception):
|
||||
pass
|
||||
class ReconfigError(Exception):
|
||||
pass
|
||||
class BuildSlaveDetectedError(Exception):
|
||||
pass
|
||||
|
||||
class LogWatcher(LineOnlyReceiver):
|
||||
POLL_INTERVAL = 0.1
|
||||
TIMEOUT_DELAY = 5.0
|
||||
delimiter = os.linesep
|
||||
|
||||
def __init__(self, logfile):
|
||||
self.logfile = logfile
|
||||
self.in_reconfig = False
|
||||
self.transport = FakeTransport()
|
||||
self.f = None
|
||||
self.processtype = "buildmaster"
|
||||
|
||||
def start(self):
|
||||
# return a Deferred that fires when the reconfig process has
|
||||
# finished. It errbacks with TimeoutError if the finish line has not
|
||||
# been seen within 5 seconds, and with ReconfigError if the error
|
||||
# line was seen. If the logfile could not be opened, it errbacks with
|
||||
# an IOError.
|
||||
self.running = True
|
||||
d = defer.maybeDeferred(self._start)
|
||||
return d
|
||||
|
||||
def _start(self):
|
||||
self.d = defer.Deferred()
|
||||
try:
|
||||
self.f = open(self.logfile, "rb")
|
||||
self.f.seek(0, 2) # start watching from the end
|
||||
except IOError:
|
||||
pass
|
||||
reactor.callLater(self.TIMEOUT_DELAY, self.timeout)
|
||||
self.poller = task.LoopingCall(self.poll)
|
||||
self.poller.start(self.POLL_INTERVAL)
|
||||
return self.d
|
||||
|
||||
def timeout(self):
|
||||
if self.processtype == "buildmaster":
|
||||
self.d.errback(BuildmasterTimeoutError())
|
||||
else:
|
||||
self.d.errback(BuildslaveTimeoutError())
|
||||
|
||||
def finished(self, results):
|
||||
self.running = False
|
||||
self.in_reconfig = False
|
||||
self.d.callback(results)
|
||||
|
||||
def lineReceived(self, line):
|
||||
if not self.running:
|
||||
return
|
||||
if "Log opened." in line:
|
||||
self.in_reconfig = True
|
||||
if "loading configuration from" in line:
|
||||
self.in_reconfig = True
|
||||
if "Creating BuildSlave" in line:
|
||||
self.processtype = "buildslave"
|
||||
|
||||
if self.in_reconfig:
|
||||
print line
|
||||
|
||||
if "message from master: attached" in line:
|
||||
return self.finished("buildslave")
|
||||
if "I will keep using the previous config file" in line:
|
||||
return self.finished(Failure(ReconfigError()))
|
||||
if "configuration update complete" in line:
|
||||
return self.finished("buildmaster")
|
||||
|
||||
def poll(self):
|
||||
if not self.f:
|
||||
try:
|
||||
self.f = open(self.logfile, "rb")
|
||||
except IOError:
|
||||
return
|
||||
while True:
|
||||
data = self.f.read(1000)
|
||||
if not data:
|
||||
return
|
||||
self.dataReceived(data)
|
||||
|
63
tools/buildbot/buildbot/scripts/reconfig.py
Normal file
63
tools/buildbot/buildbot/scripts/reconfig.py
Normal file
@ -0,0 +1,63 @@
|
||||
|
||||
import os, signal
|
||||
from twisted.internet import reactor
|
||||
|
||||
from buildbot.scripts.logwatcher import LogWatcher, BuildmasterTimeoutError, \
|
||||
ReconfigError
|
||||
|
||||
class Reconfigurator:
|
||||
def run(self, config):
|
||||
|
||||
basedir = config['basedir']
|
||||
quiet = config['quiet']
|
||||
os.chdir(basedir)
|
||||
f = open("twistd.pid", "rt")
|
||||
self.pid = int(f.read().strip())
|
||||
if quiet:
|
||||
os.kill(self.pid, signal.SIGHUP)
|
||||
return
|
||||
|
||||
# keep reading twistd.log. Display all messages between "loading
|
||||
# configuration from ..." and "configuration update complete" or
|
||||
# "I will keep using the previous config file instead.", or until
|
||||
# 5 seconds have elapsed.
|
||||
|
||||
self.sent_signal = False
|
||||
lw = LogWatcher("twistd.log")
|
||||
d = lw.start()
|
||||
d.addCallbacks(self.success, self.failure)
|
||||
reactor.callLater(0.2, self.sighup)
|
||||
reactor.run()
|
||||
|
||||
def sighup(self):
|
||||
if self.sent_signal:
|
||||
return
|
||||
print "sending SIGHUP to process %d" % self.pid
|
||||
self.sent_signal = True
|
||||
os.kill(self.pid, signal.SIGHUP)
|
||||
|
||||
def success(self, res):
|
||||
print """
|
||||
Reconfiguration appears to have completed successfully.
|
||||
"""
|
||||
reactor.stop()
|
||||
|
||||
def failure(self, why):
|
||||
if why.check(BuildmasterTimeoutError):
|
||||
print "Never saw reconfiguration finish."
|
||||
elif why.check(ReconfigError):
|
||||
print """
|
||||
Reconfiguration failed. Please inspect the master.cfg file for errors,
|
||||
correct them, then try 'buildbot reconfig' again.
|
||||
"""
|
||||
elif why.check(IOError):
|
||||
# we were probably unable to open the file in the first place
|
||||
self.sighup()
|
||||
else:
|
||||
print "Error while following twistd.log: %s" % why
|
||||
reactor.stop()
|
||||
|
||||
def reconfig(config):
|
||||
r = Reconfigurator()
|
||||
r.run(config)
|
||||
|
735
tools/buildbot/buildbot/scripts/runner.py
Normal file
735
tools/buildbot/buildbot/scripts/runner.py
Normal file
@ -0,0 +1,735 @@
|
||||
# -*- test-case-name: buildbot.test.test_runner -*-
|
||||
|
||||
# N.B.: don't import anything that might pull in a reactor yet. Some of our
|
||||
# subcommands want to load modules that need the gtk reactor.
|
||||
import os, sys, stat, re, time
|
||||
from twisted.python import usage, util, runtime
|
||||
|
||||
# this is mostly just a front-end for mktap, twistd, and kill(1), but in the
|
||||
# future it will also provide an interface to some developer tools that talk
|
||||
# directly to a remote buildmaster (like 'try' and a status client)
|
||||
|
||||
# the create/start/stop commands should all be run as the same user,
|
||||
# preferably a separate 'buildbot' account.
|
||||
|
||||
class MakerBase(usage.Options):
|
||||
optFlags = [
|
||||
['help', 'h', "Display this message"],
|
||||
["quiet", "q", "Do not emit the commands being run"],
|
||||
]
|
||||
|
||||
#["basedir", "d", None, "Base directory for the buildmaster"],
|
||||
opt_h = usage.Options.opt_help
|
||||
|
||||
def parseArgs(self, *args):
|
||||
if len(args) > 0:
|
||||
self['basedir'] = args[0]
|
||||
else:
|
||||
self['basedir'] = None
|
||||
if len(args) > 1:
|
||||
raise usage.UsageError("I wasn't expecting so many arguments")
|
||||
|
||||
def postOptions(self):
|
||||
if self['basedir'] is None:
|
||||
raise usage.UsageError("<basedir> parameter is required")
|
||||
self['basedir'] = os.path.abspath(self['basedir'])
|
||||
|
||||
makefile_sample = """# -*- makefile -*-
|
||||
|
||||
# This is a simple makefile which lives in a buildmaster/buildslave
|
||||
# directory (next to the buildbot.tac file). It allows you to start/stop the
|
||||
# master or slave by doing 'make start' or 'make stop'.
|
||||
|
||||
# The 'reconfig' target will tell a buildmaster to reload its config file.
|
||||
|
||||
start:
|
||||
twistd --no_save -y buildbot.tac
|
||||
|
||||
stop:
|
||||
kill `cat twistd.pid`
|
||||
|
||||
reconfig:
|
||||
kill -HUP `cat twistd.pid`
|
||||
|
||||
log:
|
||||
tail -f twistd.log
|
||||
"""
|
||||
|
||||
class Maker:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.basedir = config['basedir']
|
||||
self.force = config['force']
|
||||
self.quiet = config['quiet']
|
||||
|
||||
def mkdir(self):
|
||||
if os.path.exists(self.basedir):
|
||||
if not self.quiet:
|
||||
print "updating existing installation"
|
||||
return
|
||||
if not self.quiet: print "mkdir", self.basedir
|
||||
os.mkdir(self.basedir)
|
||||
|
||||
def mkinfo(self):
|
||||
path = os.path.join(self.basedir, "info")
|
||||
if not os.path.exists(path):
|
||||
if not self.quiet: print "mkdir", path
|
||||
os.mkdir(path)
|
||||
created = False
|
||||
admin = os.path.join(path, "admin")
|
||||
if not os.path.exists(admin):
|
||||
if not self.quiet:
|
||||
print "Creating info/admin, you need to edit it appropriately"
|
||||
f = open(admin, "wt")
|
||||
f.write("Your Name Here <admin@youraddress.invalid>\n")
|
||||
f.close()
|
||||
created = True
|
||||
host = os.path.join(path, "host")
|
||||
if not os.path.exists(host):
|
||||
if not self.quiet:
|
||||
print "Creating info/host, you need to edit it appropriately"
|
||||
f = open(host, "wt")
|
||||
f.write("Please put a description of this build host here\n")
|
||||
f.close()
|
||||
created = True
|
||||
if created and not self.quiet:
|
||||
print "Please edit the files in %s appropriately." % path
|
||||
|
||||
def chdir(self):
|
||||
if not self.quiet: print "chdir", self.basedir
|
||||
os.chdir(self.basedir)
|
||||
|
||||
def makeTAC(self, contents, secret=False):
|
||||
tacfile = "buildbot.tac"
|
||||
if os.path.exists(tacfile):
|
||||
oldcontents = open(tacfile, "rt").read()
|
||||
if oldcontents == contents:
|
||||
if not self.quiet:
|
||||
print "buildbot.tac already exists and is correct"
|
||||
return
|
||||
if not self.quiet:
|
||||
print "not touching existing buildbot.tac"
|
||||
print "creating buildbot.tac.new instead"
|
||||
tacfile = "buildbot.tac.new"
|
||||
f = open(tacfile, "wt")
|
||||
f.write(contents)
|
||||
f.close()
|
||||
if secret:
|
||||
os.chmod(tacfile, 0600)
|
||||
|
||||
def makefile(self):
|
||||
target = "Makefile.sample"
|
||||
if os.path.exists(target):
|
||||
oldcontents = open(target, "rt").read()
|
||||
if oldcontents == makefile_sample:
|
||||
if not self.quiet:
|
||||
print "Makefile.sample already exists and is correct"
|
||||
return
|
||||
if not self.quiet:
|
||||
print "replacing Makefile.sample"
|
||||
else:
|
||||
if not self.quiet:
|
||||
print "creating Makefile.sample"
|
||||
f = open(target, "wt")
|
||||
f.write(makefile_sample)
|
||||
f.close()
|
||||
|
||||
def sampleconfig(self, source):
|
||||
target = "master.cfg.sample"
|
||||
config_sample = open(source, "rt").read()
|
||||
if os.path.exists(target):
|
||||
oldcontents = open(target, "rt").read()
|
||||
if oldcontents == config_sample:
|
||||
if not self.quiet:
|
||||
print "master.cfg.sample already exists and is up-to-date"
|
||||
return
|
||||
if not self.quiet:
|
||||
print "replacing master.cfg.sample"
|
||||
else:
|
||||
if not self.quiet:
|
||||
print "creating master.cfg.sample"
|
||||
f = open(target, "wt")
|
||||
f.write(config_sample)
|
||||
f.close()
|
||||
os.chmod(target, 0600)
|
||||
|
||||
class MasterOptions(MakerBase):
|
||||
optFlags = [
|
||||
["force", "f",
|
||||
"Re-use an existing directory (will not overwrite master.cfg file)"],
|
||||
]
|
||||
optParameters = [
|
||||
["config", "c", "master.cfg", "name of the buildmaster config file"],
|
||||
]
|
||||
def getSynopsis(self):
|
||||
return "Usage: buildbot create-master [options] <basedir>"
|
||||
|
||||
longdesc = """
|
||||
This command creates a buildmaster working directory and buildbot.tac
|
||||
file. The master will live in <dir> and create various files there.
|
||||
|
||||
At runtime, the master will read a configuration file (named
|
||||
'master.cfg' by default) in its basedir. This file should contain python
|
||||
code which eventually defines a dictionary named 'BuildmasterConfig'.
|
||||
The elements of this dictionary are used to configure the Buildmaster.
|
||||
See doc/config.xhtml for details about what can be controlled through
|
||||
this interface."""
|
||||
|
||||
masterTAC = """
|
||||
from twisted.application import service
|
||||
from buildbot.master import BuildMaster
|
||||
|
||||
basedir = r'%(basedir)s'
|
||||
configfile = r'%(config)s'
|
||||
|
||||
application = service.Application('buildmaster')
|
||||
BuildMaster(basedir, configfile).setServiceParent(application)
|
||||
|
||||
"""
|
||||
|
||||
def createMaster(config):
|
||||
m = Maker(config)
|
||||
m.mkdir()
|
||||
m.chdir()
|
||||
contents = masterTAC % config
|
||||
m.makeTAC(contents)
|
||||
m.sampleconfig(util.sibpath(__file__, "sample.cfg"))
|
||||
m.makefile()
|
||||
|
||||
if not m.quiet: print "buildmaster configured in %s" % m.basedir
|
||||
|
||||
class SlaveOptions(MakerBase):
|
||||
optFlags = [
|
||||
["force", "f", "Re-use an existing directory"],
|
||||
]
|
||||
optParameters = [
|
||||
# ["name", "n", None, "Name for this build slave"],
|
||||
# ["passwd", "p", None, "Password for this build slave"],
|
||||
# ["basedir", "d", ".", "Base directory to use"],
|
||||
# ["master", "m", "localhost:8007",
|
||||
# "Location of the buildmaster (host:port)"],
|
||||
|
||||
["keepalive", "k", 600,
|
||||
"Interval at which keepalives should be sent (in seconds)"],
|
||||
["usepty", None, 1,
|
||||
"(1 or 0) child processes should be run in a pty"],
|
||||
["umask", None, "None",
|
||||
"controls permissions of generated files. Use --umask=022 to be world-readable"],
|
||||
]
|
||||
|
||||
longdesc = """
|
||||
This command creates a buildslave working directory and buildbot.tac
|
||||
file. The bot will use the <name> and <passwd> arguments to authenticate
|
||||
itself when connecting to the master. All commands are run in a
|
||||
build-specific subdirectory of <basedir>. <master> is a string of the
|
||||
form 'hostname:port', and specifies where the buildmaster can be reached.
|
||||
|
||||
<name>, <passwd>, and <master> will be provided by the buildmaster
|
||||
administrator for your bot. You must choose <basedir> yourself.
|
||||
"""
|
||||
|
||||
def getSynopsis(self):
|
||||
return "Usage: buildbot create-slave [options] <basedir> <master> <name> <passwd>"
|
||||
|
||||
def parseArgs(self, *args):
|
||||
if len(args) < 4:
|
||||
raise usage.UsageError("command needs more arguments")
|
||||
basedir, master, name, passwd = args
|
||||
self['basedir'] = basedir
|
||||
self['master'] = master
|
||||
self['name'] = name
|
||||
self['passwd'] = passwd
|
||||
|
||||
def postOptions(self):
|
||||
MakerBase.postOptions(self)
|
||||
self['usepty'] = int(self['usepty'])
|
||||
self['keepalive'] = int(self['keepalive'])
|
||||
if self['master'].find(":") == -1:
|
||||
raise usage.UsageError("--master must be in the form host:portnum")
|
||||
|
||||
slaveTAC = """
|
||||
from twisted.application import service
|
||||
from buildbot.slave.bot import BuildSlave
|
||||
|
||||
basedir = r'%(basedir)s'
|
||||
host = '%(host)s'
|
||||
port = %(port)d
|
||||
slavename = '%(name)s'
|
||||
passwd = '%(passwd)s'
|
||||
keepalive = %(keepalive)d
|
||||
usepty = %(usepty)d
|
||||
umask = %(umask)s
|
||||
|
||||
application = service.Application('buildslave')
|
||||
s = BuildSlave(host, port, slavename, passwd, basedir, keepalive, usepty,
|
||||
umask=umask)
|
||||
s.setServiceParent(application)
|
||||
|
||||
"""
|
||||
|
||||
def createSlave(config):
|
||||
m = Maker(config)
|
||||
m.mkdir()
|
||||
m.chdir()
|
||||
try:
|
||||
master = config['master']
|
||||
host, port = re.search(r'(.+):(\d+)', master).groups()
|
||||
config['host'] = host
|
||||
config['port'] = int(port)
|
||||
except:
|
||||
print "unparseable master location '%s'" % master
|
||||
print " expecting something more like localhost:8007"
|
||||
raise
|
||||
contents = slaveTAC % config
|
||||
|
||||
m.makeTAC(contents, secret=True)
|
||||
|
||||
m.makefile()
|
||||
m.mkinfo()
|
||||
|
||||
if not m.quiet: print "buildslave configured in %s" % m.basedir
|
||||
|
||||
|
||||
|
||||
def stop(config, signame="TERM", wait=False):
|
||||
import signal
|
||||
basedir = config['basedir']
|
||||
quiet = config['quiet']
|
||||
os.chdir(basedir)
|
||||
f = open("twistd.pid", "rt")
|
||||
pid = int(f.read().strip())
|
||||
signum = getattr(signal, "SIG"+signame)
|
||||
timer = 0
|
||||
os.kill(pid, signum)
|
||||
if not wait:
|
||||
if not quiet:
|
||||
print "sent SIG%s to process" % signame
|
||||
return
|
||||
time.sleep(0.1)
|
||||
while timer < 5:
|
||||
# poll once per second until twistd.pid goes away, up to 5 seconds
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
except OSError:
|
||||
if not quiet:
|
||||
print "buildbot process %d is dead" % pid
|
||||
return
|
||||
timer += 1
|
||||
time.sleep(1)
|
||||
if not quiet:
|
||||
print "never saw process go away"
|
||||
|
||||
def restart(config):
|
||||
quiet = config['quiet']
|
||||
from buildbot.scripts.startup import start
|
||||
stop(config, wait=True)
|
||||
if not quiet:
|
||||
print "now restarting buildbot process.."
|
||||
start(config)
|
||||
|
||||
|
||||
def loadOptions(filename="options", here=None, home=None):
|
||||
"""Find the .buildbot/FILENAME file. Crawl from the current directory up
|
||||
towards the root, and also look in ~/.buildbot . The first directory
|
||||
that's owned by the user and has the file we're looking for wins. Windows
|
||||
skips the owned-by-user test.
|
||||
|
||||
@rtype: dict
|
||||
@return: a dictionary of names defined in the options file. If no options
|
||||
file was found, return an empty dict.
|
||||
"""
|
||||
|
||||
if here is None:
|
||||
here = os.getcwd()
|
||||
here = os.path.abspath(here)
|
||||
|
||||
if home is None:
|
||||
if runtime.platformType == 'win32':
|
||||
home = os.path.join(os.environ['APPDATA'], "buildbot")
|
||||
else:
|
||||
home = os.path.expanduser("~/.buildbot")
|
||||
|
||||
searchpath = []
|
||||
toomany = 20
|
||||
while True:
|
||||
searchpath.append(os.path.join(here, ".buildbot"))
|
||||
next = os.path.dirname(here)
|
||||
if next == here:
|
||||
break # we've hit the root
|
||||
here = next
|
||||
toomany -= 1 # just in case
|
||||
if toomany == 0:
|
||||
raise ValueError("Hey, I seem to have wandered up into the "
|
||||
"infinite glories of the heavens. Oops.")
|
||||
searchpath.append(home)
|
||||
|
||||
localDict = {}
|
||||
|
||||
for d in searchpath:
|
||||
if os.path.isdir(d):
|
||||
if runtime.platformType != 'win32':
|
||||
if os.stat(d)[stat.ST_UID] != os.getuid():
|
||||
print "skipping %s because you don't own it" % d
|
||||
continue # security, skip other people's directories
|
||||
optfile = os.path.join(d, filename)
|
||||
if os.path.exists(optfile):
|
||||
try:
|
||||
f = open(optfile, "r")
|
||||
options = f.read()
|
||||
exec options in localDict
|
||||
except:
|
||||
print "error while reading %s" % optfile
|
||||
raise
|
||||
break
|
||||
|
||||
for k in localDict.keys():
|
||||
if k.startswith("__"):
|
||||
del localDict[k]
|
||||
return localDict
|
||||
|
||||
class StartOptions(MakerBase):
|
||||
optFlags = [
|
||||
['quiet', 'q', "Don't display startup log messages"],
|
||||
]
|
||||
def getSynopsis(self):
|
||||
return "Usage: buildbot start <basedir>"
|
||||
|
||||
class StopOptions(MakerBase):
|
||||
def getSynopsis(self):
|
||||
return "Usage: buildbot stop <basedir>"
|
||||
|
||||
class ReconfigOptions(MakerBase):
|
||||
optFlags = [
|
||||
['quiet', 'q', "Don't display log messages about reconfiguration"],
|
||||
]
|
||||
def getSynopsis(self):
|
||||
return "Usage: buildbot reconfig <basedir>"
|
||||
|
||||
|
||||
|
||||
class RestartOptions(MakerBase):
|
||||
optFlags = [
|
||||
['quiet', 'q', "Don't display startup log messages"],
|
||||
]
|
||||
def getSynopsis(self):
|
||||
return "Usage: buildbot restart <basedir>"
|
||||
|
||||
class DebugClientOptions(usage.Options):
|
||||
optFlags = [
|
||||
['help', 'h', "Display this message"],
|
||||
]
|
||||
optParameters = [
|
||||
["master", "m", None,
|
||||
"Location of the buildmaster's slaveport (host:port)"],
|
||||
["passwd", "p", None, "Debug password to use"],
|
||||
]
|
||||
|
||||
def parseArgs(self, *args):
|
||||
if len(args) > 0:
|
||||
self['master'] = args[0]
|
||||
if len(args) > 1:
|
||||
self['passwd'] = args[1]
|
||||
if len(args) > 2:
|
||||
raise usage.UsageError("I wasn't expecting so many arguments")
|
||||
|
||||
def debugclient(config):
|
||||
from buildbot.clients import debug
|
||||
opts = loadOptions()
|
||||
|
||||
master = config.get('master')
|
||||
if not master:
|
||||
master = opts.get('master')
|
||||
if master is None:
|
||||
raise usage.UsageError("master must be specified: on the command "
|
||||
"line or in ~/.buildbot/options")
|
||||
|
||||
passwd = config.get('passwd')
|
||||
if not passwd:
|
||||
passwd = opts.get('debugPassword')
|
||||
if passwd is None:
|
||||
raise usage.UsageError("passwd must be specified: on the command "
|
||||
"line or in ~/.buildbot/options")
|
||||
|
||||
d = debug.DebugWidget(master, passwd)
|
||||
d.run()
|
||||
|
||||
class StatusClientOptions(usage.Options):
|
||||
optFlags = [
|
||||
['help', 'h', "Display this message"],
|
||||
]
|
||||
optParameters = [
|
||||
["master", "m", None,
|
||||
"Location of the buildmaster's status port (host:port)"],
|
||||
]
|
||||
|
||||
def parseArgs(self, *args):
|
||||
if len(args) > 0:
|
||||
self['master'] = args[0]
|
||||
if len(args) > 1:
|
||||
raise usage.UsageError("I wasn't expecting so many arguments")
|
||||
|
||||
def statuslog(config):
|
||||
from buildbot.clients import base
|
||||
opts = loadOptions()
|
||||
master = config.get('master')
|
||||
if not master:
|
||||
master = opts.get('masterstatus')
|
||||
if master is None:
|
||||
raise usage.UsageError("master must be specified: on the command "
|
||||
"line or in ~/.buildbot/options")
|
||||
c = base.TextClient(master)
|
||||
c.run()
|
||||
|
||||
def statusgui(config):
|
||||
from buildbot.clients import gtkPanes
|
||||
opts = loadOptions()
|
||||
master = config.get('master')
|
||||
if not master:
|
||||
master = opts.get('masterstatus')
|
||||
if master is None:
|
||||
raise usage.UsageError("master must be specified: on the command "
|
||||
"line or in ~/.buildbot/options")
|
||||
c = gtkPanes.GtkClient(master)
|
||||
c.run()
|
||||
|
||||
class SendChangeOptions(usage.Options):
|
||||
optParameters = [
|
||||
("master", "m", None,
|
||||
"Location of the buildmaster's PBListener (host:port)"),
|
||||
("username", "u", None, "Username performing the commit"),
|
||||
("branch", "b", None, "Branch specifier"),
|
||||
("revision", "r", None, "Revision specifier (string)"),
|
||||
("revision_number", "n", None, "Revision specifier (integer)"),
|
||||
("revision_file", None, None, "Filename containing revision spec"),
|
||||
("comments", "m", None, "log message"),
|
||||
("logfile", "F", None,
|
||||
"Read the log messages from this file (- for stdin)"),
|
||||
]
|
||||
def getSynopsis(self):
|
||||
return "Usage: buildbot sendchange [options] filenames.."
|
||||
def parseArgs(self, *args):
|
||||
self['files'] = args
|
||||
|
||||
|
||||
def sendchange(config, runReactor=False):
|
||||
"""Send a single change to the buildmaster's PBChangeSource. The
|
||||
connection will be drpoped as soon as the Change has been sent."""
|
||||
from buildbot.clients.sendchange import Sender
|
||||
|
||||
opts = loadOptions()
|
||||
user = config.get('username', opts.get('username'))
|
||||
master = config.get('master', opts.get('master'))
|
||||
branch = config.get('branch', opts.get('branch'))
|
||||
revision = config.get('revision')
|
||||
# SVN and P4 use numeric revisions
|
||||
if config.get("revision_number"):
|
||||
revision = int(config['revision_number'])
|
||||
if config.get("revision_file"):
|
||||
revision = open(config["revision_file"],"r").read()
|
||||
|
||||
comments = config.get('comments')
|
||||
if not comments and config.get('logfile'):
|
||||
if config['logfile'] == "-":
|
||||
f = sys.stdin
|
||||
else:
|
||||
f = open(config['logfile'], "rt")
|
||||
comments = f.read()
|
||||
if comments is None:
|
||||
comments = ""
|
||||
|
||||
files = config.get('files', [])
|
||||
|
||||
assert user, "you must provide a username"
|
||||
assert master, "you must provide the master location"
|
||||
|
||||
s = Sender(master, user)
|
||||
d = s.send(branch, revision, comments, files)
|
||||
if runReactor:
|
||||
d.addCallbacks(s.printSuccess, s.printFailure)
|
||||
d.addBoth(s.stop)
|
||||
s.run()
|
||||
return d
|
||||
|
||||
|
||||
class ForceOptions(usage.Options):
|
||||
optParameters = [
|
||||
["builder", None, None, "which Builder to start"],
|
||||
["branch", None, None, "which branch to build"],
|
||||
["revision", None, None, "which revision to build"],
|
||||
["reason", None, None, "the reason for starting the build"],
|
||||
]
|
||||
|
||||
def parseArgs(self, *args):
|
||||
args = list(args)
|
||||
if len(args) > 0:
|
||||
if self['builder'] is not None:
|
||||
raise usage.UsageError("--builder provided in two ways")
|
||||
self['builder'] = args.pop(0)
|
||||
if len(args) > 0:
|
||||
if self['reason'] is not None:
|
||||
raise usage.UsageError("--reason provided in two ways")
|
||||
self['reason'] = " ".join(args)
|
||||
|
||||
|
||||
class TryOptions(usage.Options):
|
||||
optParameters = [
|
||||
["connect", "c", None,
|
||||
"how to reach the buildmaster, either 'ssh' or 'pb'"],
|
||||
# for ssh, use --tryhost, --username, and --trydir
|
||||
["tryhost", None, None,
|
||||
"the hostname (used by ssh) for the buildmaster"],
|
||||
["trydir", None, None,
|
||||
"the directory (on the tryhost) where tryjobs are deposited"],
|
||||
["username", "u", None, "Username performing the trial build"],
|
||||
# for PB, use --master, --username, and --passwd
|
||||
["master", "m", None,
|
||||
"Location of the buildmaster's PBListener (host:port)"],
|
||||
["passwd", None, None, "password for PB authentication"],
|
||||
|
||||
["vc", None, None,
|
||||
"The VC system in use, one of: cvs,svn,tla,baz,darcs"],
|
||||
["branch", None, None,
|
||||
"The branch in use, for VC systems that can't figure it out"
|
||||
" themselves"],
|
||||
|
||||
["builder", "b", None,
|
||||
"Run the trial build on this Builder. Can be used multiple times."],
|
||||
]
|
||||
|
||||
optFlags = [
|
||||
["wait", None, "wait until the builds have finished"],
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
super(TryOptions, self).__init__()
|
||||
self['builders'] = []
|
||||
|
||||
def opt_builder(self, option):
|
||||
self['builders'].append(option)
|
||||
|
||||
def getSynopsis(self):
|
||||
return "Usage: buildbot try [options]"
|
||||
|
||||
def doTry(config):
|
||||
from buildbot.scripts import tryclient
|
||||
t = tryclient.Try(config)
|
||||
t.run()
|
||||
|
||||
class TryServerOptions(usage.Options):
|
||||
optParameters = [
|
||||
["jobdir", None, None, "the jobdir (maildir) for submitting jobs"],
|
||||
]
|
||||
|
||||
def doTryServer(config):
|
||||
import md5
|
||||
jobdir = os.path.expanduser(config["jobdir"])
|
||||
job = sys.stdin.read()
|
||||
# now do a 'safecat'-style write to jobdir/tmp, then move atomically to
|
||||
# jobdir/new . Rather than come up with a unique name randomly, I'm just
|
||||
# going to MD5 the contents and prepend a timestamp.
|
||||
timestring = "%d" % time.time()
|
||||
jobhash = md5.new(job).hexdigest()
|
||||
fn = "%s-%s" % (timestring, jobhash)
|
||||
tmpfile = os.path.join(jobdir, "tmp", fn)
|
||||
newfile = os.path.join(jobdir, "new", fn)
|
||||
f = open(tmpfile, "w")
|
||||
f.write(job)
|
||||
f.close()
|
||||
os.rename(tmpfile, newfile)
|
||||
|
||||
|
||||
class Options(usage.Options):
|
||||
synopsis = "Usage: buildbot <command> [command options]"
|
||||
|
||||
subCommands = [
|
||||
# the following are all admin commands
|
||||
['create-master', None, MasterOptions,
|
||||
"Create and populate a directory for a new buildmaster"],
|
||||
['create-slave', None, SlaveOptions,
|
||||
"Create and populate a directory for a new buildslave"],
|
||||
['start', None, StartOptions, "Start a buildmaster or buildslave"],
|
||||
['stop', None, StopOptions, "Stop a buildmaster or buildslave"],
|
||||
['restart', None, RestartOptions,
|
||||
"Restart a buildmaster or buildslave"],
|
||||
|
||||
['reconfig', None, ReconfigOptions,
|
||||
"SIGHUP a buildmaster to make it re-read the config file"],
|
||||
['sighup', None, ReconfigOptions,
|
||||
"SIGHUP a buildmaster to make it re-read the config file"],
|
||||
|
||||
['sendchange', None, SendChangeOptions,
|
||||
"Send a change to the buildmaster"],
|
||||
|
||||
['debugclient', None, DebugClientOptions,
|
||||
"Launch a small debug panel GUI"],
|
||||
|
||||
['statuslog', None, StatusClientOptions,
|
||||
"Emit current builder status to stdout"],
|
||||
['statusgui', None, StatusClientOptions,
|
||||
"Display a small window showing current builder status"],
|
||||
|
||||
#['force', None, ForceOptions, "Run a build"],
|
||||
['try', None, TryOptions, "Run a build with your local changes"],
|
||||
|
||||
['tryserver', None, TryServerOptions,
|
||||
"buildmaster-side 'try' support function, not for users"],
|
||||
|
||||
# TODO: 'watch'
|
||||
]
|
||||
|
||||
def opt_version(self):
|
||||
import buildbot
|
||||
print "Buildbot version: %s" % buildbot.version
|
||||
usage.Options.opt_version(self)
|
||||
|
||||
def opt_verbose(self):
|
||||
from twisted.python import log
|
||||
log.startLogging(sys.stderr)
|
||||
|
||||
def postOptions(self):
|
||||
if not hasattr(self, 'subOptions'):
|
||||
raise usage.UsageError("must specify a command")
|
||||
|
||||
|
||||
def run():
|
||||
config = Options()
|
||||
try:
|
||||
config.parseOptions()
|
||||
except usage.error, e:
|
||||
print "%s: %s" % (sys.argv[0], e)
|
||||
print
|
||||
c = getattr(config, 'subOptions', config)
|
||||
print str(c)
|
||||
sys.exit(1)
|
||||
|
||||
command = config.subCommand
|
||||
so = config.subOptions
|
||||
|
||||
if command == "create-master":
|
||||
createMaster(so)
|
||||
elif command == "create-slave":
|
||||
createSlave(so)
|
||||
elif command == "start":
|
||||
from buildbot.scripts.startup import start
|
||||
start(so)
|
||||
elif command == "stop":
|
||||
stop(so, wait=True)
|
||||
elif command == "restart":
|
||||
restart(so)
|
||||
elif command == "reconfig" or command == "sighup":
|
||||
from buildbot.scripts.reconfig import Reconfigurator
|
||||
Reconfigurator().run(so)
|
||||
elif command == "sendchange":
|
||||
sendchange(so, True)
|
||||
elif command == "debugclient":
|
||||
debugclient(so)
|
||||
elif command == "statuslog":
|
||||
statuslog(so)
|
||||
elif command == "statusgui":
|
||||
statusgui(so)
|
||||
elif command == "try":
|
||||
doTry(so)
|
||||
elif command == "tryserver":
|
||||
doTryServer(so)
|
||||
|
||||
|
175
tools/buildbot/buildbot/scripts/sample.cfg
Normal file
175
tools/buildbot/buildbot/scripts/sample.cfg
Normal file
@ -0,0 +1,175 @@
|
||||
# -*- python -*-
|
||||
# ex: set syntax=python:
|
||||
|
||||
# This is a sample buildmaster config file. It must be installed as
|
||||
# 'master.cfg' in your buildmaster's base directory (although the filename
|
||||
# can be changed with the --basedir option to 'mktap buildbot master').
|
||||
|
||||
# It has one job: define a dictionary named BuildmasterConfig. This
|
||||
# dictionary has a variety of keys to control different aspects of the
|
||||
# buildmaster. They are documented in docs/config.xhtml .
|
||||
|
||||
|
||||
# This is the dictionary that the buildmaster pays attention to. We also use
|
||||
# a shorter alias to save typing.
|
||||
c = BuildmasterConfig = {}
|
||||
|
||||
####### BUILDSLAVES
|
||||
|
||||
# the 'bots' list defines the set of allowable buildslaves. Each element is a
|
||||
# tuple of bot-name and bot-password. These correspond to values given to the
|
||||
# buildslave's mktap invocation.
|
||||
c['bots'] = [("bot1name", "bot1passwd")]
|
||||
|
||||
|
||||
# 'slavePortnum' defines the TCP port to listen on. This must match the value
|
||||
# configured into the buildslaves (with their --master option)
|
||||
|
||||
c['slavePortnum'] = 9989
|
||||
|
||||
|
||||
####### CHANGESOURCES
|
||||
|
||||
# the 'sources' list tells the buildmaster how it should find out about
|
||||
# source code changes. Any class which implements IChangeSource can be added
|
||||
# to this list: there are several in buildbot/changes/*.py to choose from.
|
||||
|
||||
c['sources'] = []
|
||||
|
||||
# For example, if you had CVSToys installed on your repository, and your
|
||||
# CVSROOT/freshcfg file had an entry like this:
|
||||
#pb = ConfigurationSet([
|
||||
# (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
|
||||
# ])
|
||||
|
||||
# then you could use the following buildmaster Change Source to subscribe to
|
||||
# the FreshCVS daemon and be notified on every commit:
|
||||
#
|
||||
#from buildbot.changes.freshcvs import FreshCVSSource
|
||||
#fc_source = FreshCVSSource("cvs.example.com", 4519, "foo", "bar")
|
||||
#c['sources'].append(fc_source)
|
||||
|
||||
# or, use a PBChangeSource, and then have your repository's commit script run
|
||||
# 'buildbot sendchange', or contrib/svn_buildbot.py, or
|
||||
# contrib/arch_buildbot.py :
|
||||
#
|
||||
#from buildbot.changes.pb import PBChangeSource
|
||||
#c['sources'].append(PBChangeSource())
|
||||
|
||||
|
||||
####### SCHEDULERS
|
||||
|
||||
## configure the Schedulers
|
||||
|
||||
from buildbot.scheduler import Scheduler
|
||||
c['schedulers'] = []
|
||||
c['schedulers'].append(Scheduler(name="all", branch=None,
|
||||
treeStableTimer=2*60,
|
||||
builderNames=["buildbot-full"]))
|
||||
|
||||
|
||||
####### BUILDERS
|
||||
|
||||
# the 'builders' list defines the Builders. Each one is configured with a
|
||||
# dictionary, using the following keys:
|
||||
# name (required): the name used to describe this bilder
|
||||
# slavename (required): which slave to use, must appear in c['bots']
|
||||
# builddir (required): which subdirectory to run the builder in
|
||||
# factory (required): a BuildFactory to define how the build is run
|
||||
# periodicBuildTime (optional): if set, force a build every N seconds
|
||||
|
||||
# buildbot/process/factory.py provides several BuildFactory classes you can
|
||||
# start with, which implement build processes for common targets (GNU
|
||||
# autoconf projects, CPAN perl modules, etc). The factory.BuildFactory is the
|
||||
# base class, and is configured with a series of BuildSteps. When the build
|
||||
# is run, the appropriate buildslave is told to execute each Step in turn.
|
||||
|
||||
# the first BuildStep is typically responsible for obtaining a copy of the
|
||||
# sources. There are source-obtaining Steps in buildbot/process/step.py for
|
||||
# CVS, SVN, and others.
|
||||
|
||||
cvsroot = ":pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot"
|
||||
cvsmodule = "buildbot"
|
||||
|
||||
builders = []
|
||||
|
||||
from buildbot.process import factory
|
||||
from buildbot.steps.source import CVS
|
||||
from buildbot.steps.shell import Compile
|
||||
from buildbot.steps.python_twisted import Trial
|
||||
f1 = factory.BuildFactory()
|
||||
f1.addStep(CVS,
|
||||
cvsroot=cvsroot, cvsmodule=cvsmodule, login="",
|
||||
mode="copy")
|
||||
f1.addStep(Compile, command=["./setup.py", "build"])
|
||||
f1.addStep(Trial, testpath=".")
|
||||
|
||||
b1 = {'name': "buildbot-full",
|
||||
'slavename': "bot1name",
|
||||
'builddir': "full",
|
||||
'factory': f1,
|
||||
}
|
||||
c['builders'] = [b1]
|
||||
|
||||
|
||||
####### STATUS TARGETS
|
||||
|
||||
# 'status' is a list of Status Targets. The results of each build will be
|
||||
# pushed to these targets. buildbot/status/*.py has a variety to choose from,
|
||||
# including web pages, email senders, and IRC bots.
|
||||
|
||||
c['status'] = []
|
||||
|
||||
from buildbot.status import html
|
||||
c['status'].append(html.Waterfall(http_port=8010))
|
||||
|
||||
# from buildbot.status import mail
|
||||
# c['status'].append(mail.MailNotifier(fromaddr="buildbot@localhost",
|
||||
# extraRecipients=["builds@example.com"],
|
||||
# sendToInterestedUsers=False))
|
||||
#
|
||||
# from buildbot.status import words
|
||||
# c['status'].append(words.IRC(host="irc.example.com", nick="bb",
|
||||
# channels=["#example"]))
|
||||
#
|
||||
# from buildbot.status import client
|
||||
# c['status'].append(client.PBListener(9988))
|
||||
|
||||
|
||||
####### DEBUGGING OPTIONS
|
||||
|
||||
# if you set 'debugPassword', then you can connect to the buildmaster with
|
||||
# the diagnostic tool in contrib/debugclient.py . From this tool, you can
|
||||
# manually force builds and inject changes, which may be useful for testing
|
||||
# your buildmaster without actually commiting changes to your repository (or
|
||||
# before you have a functioning 'sources' set up). The debug tool uses the
|
||||
# same port number as the slaves do: 'slavePortnum'.
|
||||
|
||||
#c['debugPassword'] = "debugpassword"
|
||||
|
||||
# if you set 'manhole', you can ssh into the buildmaster and get an
|
||||
# interactive python shell, which may be useful for debugging buildbot
|
||||
# internals. It is probably only useful for buildbot developers. You can also
|
||||
# use an authorized_keys file, or plain telnet.
|
||||
#from buildbot import manhole
|
||||
#c['manhole'] = manhole.PasswordManhole("tcp:9999:interface=127.0.0.1",
|
||||
# "admin", "password")
|
||||
|
||||
|
||||
####### PROJECT IDENTITY
|
||||
|
||||
# the 'projectName' string will be used to describe the project that this
|
||||
# buildbot is working on. For example, it is used as the title of the
|
||||
# waterfall HTML page. The 'projectURL' string will be used to provide a link
|
||||
# from buildbot HTML pages to your project's home page.
|
||||
|
||||
c['projectName'] = "Buildbot"
|
||||
c['projectURL'] = "http://buildbot.sourceforge.net/"
|
||||
|
||||
# the 'buildbotURL' string should point to the location where the buildbot's
|
||||
# internal web server (usually the html.Waterfall page) is visible. This
|
||||
# typically uses the port number set in the Waterfall 'status' entry, but
|
||||
# with an externally-visible host name which the buildbot cannot figure out
|
||||
# without some help.
|
||||
|
||||
c['buildbotURL'] = "http://localhost:8010/"
|
118
tools/buildbot/buildbot/scripts/startup.py
Normal file
118
tools/buildbot/buildbot/scripts/startup.py
Normal file
@ -0,0 +1,118 @@
|
||||
|
||||
import os, sys, time
|
||||
|
||||
class Follower:
|
||||
def follow(self):
|
||||
from twisted.internet import reactor
|
||||
from buildbot.scripts.reconfig import LogWatcher
|
||||
self.rc = 0
|
||||
print "Following twistd.log until startup finished.."
|
||||
lw = LogWatcher("twistd.log")
|
||||
d = lw.start()
|
||||
d.addCallbacks(self._success, self._failure)
|
||||
reactor.run()
|
||||
return self.rc
|
||||
|
||||
def _success(self, processtype):
|
||||
from twisted.internet import reactor
|
||||
print "The %s appears to have (re)started correctly." % processtype
|
||||
self.rc = 0
|
||||
reactor.stop()
|
||||
|
||||
def _failure(self, why):
|
||||
from twisted.internet import reactor
|
||||
from buildbot.scripts.logwatcher import BuildmasterTimeoutError, \
|
||||
ReconfigError, BuildslaveTimeoutError, BuildSlaveDetectedError
|
||||
if why.check(BuildmasterTimeoutError):
|
||||
print """
|
||||
The buildmaster took more than 5 seconds to start, so we were unable to
|
||||
confirm that it started correctly. Please 'tail twistd.log' and look for a
|
||||
line that says 'configuration update complete' to verify correct startup.
|
||||
"""
|
||||
elif why.check(BuildslaveTimeoutError):
|
||||
print """
|
||||
The buildslave took more than 5 seconds to start and/or connect to the
|
||||
buildmaster, so we were unable to confirm that it started and connected
|
||||
correctly. Please 'tail twistd.log' and look for a line that says 'message
|
||||
from master: attached' to verify correct startup. If you see a bunch of
|
||||
messages like 'will retry in 6 seconds', your buildslave might not have the
|
||||
correct hostname or portnumber for the buildmaster, or the buildmaster might
|
||||
not be running. If you see messages like
|
||||
'Failure: twisted.cred.error.UnauthorizedLogin'
|
||||
then your buildslave might be using the wrong botname or password. Please
|
||||
correct these problems and then restart the buildslave.
|
||||
"""
|
||||
elif why.check(ReconfigError):
|
||||
print """
|
||||
The buildmaster appears to have encountered an error in the master.cfg config
|
||||
file during startup. It is probably running with an empty configuration right
|
||||
now. Please inspect and fix master.cfg, then restart the buildmaster.
|
||||
"""
|
||||
elif why.check(BuildSlaveDetectedError):
|
||||
print """
|
||||
Buildslave is starting up, not following logfile.
|
||||
"""
|
||||
else:
|
||||
print """
|
||||
Unable to confirm that the buildmaster started correctly. You may need to
|
||||
stop it, fix the config file, and restart.
|
||||
"""
|
||||
print why
|
||||
self.rc = 1
|
||||
reactor.stop()
|
||||
|
||||
|
||||
def start(config):
|
||||
os.chdir(config['basedir'])
|
||||
if config['quiet']:
|
||||
return launch(config)
|
||||
|
||||
# we probably can't do this os.fork under windows
|
||||
from twisted.python.runtime import platformType
|
||||
if platformType == "win32":
|
||||
return launch(config)
|
||||
|
||||
# fork a child to launch the daemon, while the parent process tails the
|
||||
# logfile
|
||||
if os.fork():
|
||||
# this is the parent
|
||||
rc = Follower().follow()
|
||||
sys.exit(rc)
|
||||
# this is the child: give the logfile-watching parent a chance to start
|
||||
# watching it before we start the daemon
|
||||
time.sleep(0.2)
|
||||
launch(config)
|
||||
|
||||
def launch(config):
|
||||
sys.path.insert(0, os.path.abspath(os.getcwd()))
|
||||
if os.path.exists("/usr/bin/make") and os.path.exists("Makefile.buildbot"):
|
||||
# Preferring the Makefile lets slave admins do useful things like set
|
||||
# up environment variables for the buildslave.
|
||||
cmd = "make -f Makefile.buildbot start"
|
||||
if not config['quiet']:
|
||||
print cmd
|
||||
os.system(cmd)
|
||||
else:
|
||||
# see if we can launch the application without actually having to
|
||||
# spawn twistd, since spawning processes correctly is a real hassle
|
||||
# on windows.
|
||||
from twisted.python.runtime import platformType
|
||||
argv = ["twistd",
|
||||
"--no_save",
|
||||
"--logfile=twistd.log", # windows doesn't use the same default
|
||||
"--python=buildbot.tac"]
|
||||
if platformType == "win32":
|
||||
argv.append("--reactor=win32")
|
||||
sys.argv = argv
|
||||
|
||||
# this is copied from bin/twistd. twisted-1.3.0 uses twistw, while
|
||||
# twisted-2.0.0 uses _twistw.
|
||||
if platformType == "win32":
|
||||
try:
|
||||
from twisted.scripts._twistw import run
|
||||
except ImportError:
|
||||
from twisted.scripts.twistw import run
|
||||
else:
|
||||
from twisted.scripts.twistd import run
|
||||
run()
|
||||
|
578
tools/buildbot/buildbot/scripts/tryclient.py
Normal file
578
tools/buildbot/buildbot/scripts/tryclient.py
Normal file
@ -0,0 +1,578 @@
|
||||
# -*- test-case-name: buildbot.test.test_scheduler,buildbot.test.test_vc -*-
|
||||
|
||||
import sys, os, re, time, random
|
||||
from twisted.internet import utils, protocol, defer, reactor, task
|
||||
from twisted.spread import pb
|
||||
from twisted.cred import credentials
|
||||
from twisted.python import log
|
||||
|
||||
from buildbot.sourcestamp import SourceStamp
|
||||
from buildbot.scripts import runner
|
||||
from buildbot.util import now
|
||||
from buildbot.status import builder
|
||||
from buildbot.twcompat import which
|
||||
|
||||
class SourceStampExtractor:
|
||||
|
||||
def __init__(self, treetop, branch):
|
||||
self.treetop = treetop
|
||||
self.branch = branch
|
||||
self.exe = which(self.vcexe)[0]
|
||||
|
||||
def dovc(self, cmd):
|
||||
"""This accepts the arguments of a command, without the actual
|
||||
command itself."""
|
||||
env = os.environ.copy()
|
||||
env['LC_ALL'] = "C"
|
||||
return utils.getProcessOutput(self.exe, cmd, env=env,
|
||||
path=self.treetop)
|
||||
|
||||
def get(self):
|
||||
"""Return a Deferred that fires with a SourceStamp instance."""
|
||||
d = self.getBaseRevision()
|
||||
d.addCallback(self.getPatch)
|
||||
d.addCallback(self.done)
|
||||
return d
|
||||
def readPatch(self, res, patchlevel):
|
||||
self.patch = (patchlevel, res)
|
||||
def done(self, res):
|
||||
# TODO: figure out the branch too
|
||||
ss = SourceStamp(self.branch, self.baserev, self.patch)
|
||||
return ss
|
||||
|
||||
class CVSExtractor(SourceStampExtractor):
|
||||
patchlevel = 0
|
||||
vcexe = "cvs"
|
||||
def getBaseRevision(self):
|
||||
# this depends upon our local clock and the repository's clock being
|
||||
# reasonably synchronized with each other. We express everything in
|
||||
# UTC because the '%z' format specifier for strftime doesn't always
|
||||
# work.
|
||||
self.baserev = time.strftime("%Y-%m-%d %H:%M:%S +0000",
|
||||
time.gmtime(now()))
|
||||
return defer.succeed(None)
|
||||
|
||||
def getPatch(self, res):
|
||||
# the -q tells CVS to not announce each directory as it works
|
||||
if self.branch is not None:
|
||||
# 'cvs diff' won't take both -r and -D at the same time (it
|
||||
# ignores the -r). As best I can tell, there is no way to make
|
||||
# cvs give you a diff relative to a timestamp on the non-trunk
|
||||
# branch. A bare 'cvs diff' will tell you about the changes
|
||||
# relative to your checked-out versions, but I know of no way to
|
||||
# find out what those checked-out versions are.
|
||||
raise RuntimeError("Sorry, CVS 'try' builds don't work with "
|
||||
"branches")
|
||||
args = ['-q', 'diff', '-u', '-D', self.baserev]
|
||||
d = self.dovc(args)
|
||||
d.addCallback(self.readPatch, self.patchlevel)
|
||||
return d
|
||||
|
||||
class SVNExtractor(SourceStampExtractor):
|
||||
patchlevel = 0
|
||||
vcexe = "svn"
|
||||
|
||||
def getBaseRevision(self):
|
||||
d = self.dovc(["status", "-u"])
|
||||
d.addCallback(self.parseStatus)
|
||||
return d
|
||||
def parseStatus(self, res):
|
||||
# svn shows the base revision for each file that has been modified or
|
||||
# which needs an update. You can update each file to a different
|
||||
# version, so each file is displayed with its individual base
|
||||
# revision. It also shows the repository-wide latest revision number
|
||||
# on the last line ("Status against revision: \d+").
|
||||
|
||||
# for our purposes, we use the latest revision number as the "base"
|
||||
# revision, and get a diff against that. This means we will get
|
||||
# reverse-diffs for local files that need updating, but the resulting
|
||||
# tree will still be correct. The only weirdness is that the baserev
|
||||
# that we emit may be different than the version of the tree that we
|
||||
# first checked out.
|
||||
|
||||
# to do this differently would probably involve scanning the revision
|
||||
# numbers to find the max (or perhaps the min) revision, and then
|
||||
# using that as a base.
|
||||
|
||||
for line in res.split("\n"):
|
||||
m = re.search(r'^Status against revision:\s+(\d+)', line)
|
||||
if m:
|
||||
self.baserev = int(m.group(1))
|
||||
return
|
||||
raise IndexError("Could not find 'Status against revision' in "
|
||||
"SVN output: %s" % res)
|
||||
def getPatch(self, res):
|
||||
d = self.dovc(["diff", "-r%d" % self.baserev])
|
||||
d.addCallback(self.readPatch, self.patchlevel)
|
||||
return d
|
||||
|
||||
class BazExtractor(SourceStampExtractor):
|
||||
vcexe = "baz"
|
||||
def getBaseRevision(self):
|
||||
d = self.dovc(["tree-id"])
|
||||
d.addCallback(self.parseStatus)
|
||||
return d
|
||||
def parseStatus(self, res):
|
||||
tid = res.strip()
|
||||
slash = tid.index("/")
|
||||
dd = tid.rindex("--")
|
||||
self.branch = tid[slash+1:dd]
|
||||
self.baserev = tid[dd+2:]
|
||||
def getPatch(self, res):
|
||||
d = self.dovc(["diff"])
|
||||
d.addCallback(self.readPatch, 1)
|
||||
return d
|
||||
|
||||
class TlaExtractor(SourceStampExtractor):
|
||||
vcexe = "tla"
|
||||
def getBaseRevision(self):
|
||||
# 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
|
||||
# 'tla logs' gives us REVISION
|
||||
d = self.dovc(["logs", "--full", "--reverse"])
|
||||
d.addCallback(self.parseStatus)
|
||||
return d
|
||||
def parseStatus(self, res):
|
||||
tid = res.split("\n")[0].strip()
|
||||
slash = tid.index("/")
|
||||
dd = tid.rindex("--")
|
||||
self.branch = tid[slash+1:dd]
|
||||
self.baserev = tid[dd+2:]
|
||||
|
||||
def getPatch(self, res):
|
||||
d = self.dovc(["changes", "--diffs"])
|
||||
d.addCallback(self.readPatch, 1)
|
||||
return d
|
||||
|
||||
class MercurialExtractor(SourceStampExtractor):
|
||||
patchlevel = 1
|
||||
vcexe = "hg"
|
||||
def getBaseRevision(self):
|
||||
d = self.dovc(["identify"])
|
||||
d.addCallback(self.parseStatus)
|
||||
return d
|
||||
def parseStatus(self, output):
|
||||
m = re.search(r'^(\w+)', output)
|
||||
self.baserev = m.group(0)
|
||||
def getPatch(self, res):
|
||||
d = self.dovc(["diff"])
|
||||
d.addCallback(self.readPatch, self.patchlevel)
|
||||
return d
|
||||
|
||||
class DarcsExtractor(SourceStampExtractor):
|
||||
patchlevel = 1
|
||||
vcexe = "darcs"
|
||||
def getBaseRevision(self):
|
||||
d = self.dovc(["changes", "--context"])
|
||||
d.addCallback(self.parseStatus)
|
||||
return d
|
||||
def parseStatus(self, res):
|
||||
self.baserev = res # the whole context file
|
||||
def getPatch(self, res):
|
||||
d = self.dovc(["diff", "-u"])
|
||||
d.addCallback(self.readPatch, self.patchlevel)
|
||||
return d
|
||||
|
||||
def getSourceStamp(vctype, treetop, branch=None):
|
||||
if vctype == "cvs":
|
||||
e = CVSExtractor(treetop, branch)
|
||||
elif vctype == "svn":
|
||||
e = SVNExtractor(treetop, branch)
|
||||
elif vctype == "baz":
|
||||
e = BazExtractor(treetop, branch)
|
||||
elif vctype == "tla":
|
||||
e = TlaExtractor(treetop, branch)
|
||||
elif vctype == "hg":
|
||||
e = MercurialExtractor(treetop, branch)
|
||||
elif vctype == "darcs":
|
||||
e = DarcsExtractor(treetop, branch)
|
||||
else:
|
||||
raise KeyError("unknown vctype '%s'" % vctype)
|
||||
return e.get()
|
||||
|
||||
|
||||
def ns(s):
|
||||
return "%d:%s," % (len(s), s)
|
||||
|
||||
def createJobfile(bsid, branch, baserev, patchlevel, diff, builderNames):
|
||||
job = ""
|
||||
job += ns("1")
|
||||
job += ns(bsid)
|
||||
job += ns(branch)
|
||||
job += ns(str(baserev))
|
||||
job += ns("%d" % patchlevel)
|
||||
job += ns(diff)
|
||||
for bn in builderNames:
|
||||
job += ns(bn)
|
||||
return job
|
||||
|
||||
def getTopdir(topfile, start=None):
|
||||
"""walk upwards from the current directory until we find this topfile"""
|
||||
if not start:
|
||||
start = os.getcwd()
|
||||
here = start
|
||||
toomany = 20
|
||||
while toomany > 0:
|
||||
if os.path.exists(os.path.join(here, topfile)):
|
||||
return here
|
||||
next = os.path.dirname(here)
|
||||
if next == here:
|
||||
break # we've hit the root
|
||||
here = next
|
||||
toomany -= 1
|
||||
raise ValueError("Unable to find topfile '%s' anywhere from %s upwards"
|
||||
% (topfile, start))
|
||||
|
||||
class RemoteTryPP(protocol.ProcessProtocol):
|
||||
def __init__(self, job):
|
||||
self.job = job
|
||||
self.d = defer.Deferred()
|
||||
def connectionMade(self):
|
||||
self.transport.write(self.job)
|
||||
self.transport.closeStdin()
|
||||
def outReceived(self, data):
|
||||
sys.stdout.write(data)
|
||||
def errReceived(self, data):
|
||||
sys.stderr.write(data)
|
||||
def processEnded(self, status_object):
|
||||
sig = status_object.value.signal
|
||||
rc = status_object.value.exitCode
|
||||
if sig != None or rc != 0:
|
||||
self.d.errback(RuntimeError("remote 'buildbot tryserver' failed"
|
||||
": sig=%s, rc=%s" % (sig, rc)))
|
||||
return
|
||||
self.d.callback((sig, rc))
|
||||
|
||||
class BuildSetStatusGrabber:
|
||||
retryCount = 5 # how many times to we try to grab the BuildSetStatus?
|
||||
retryDelay = 3 # seconds to wait between attempts
|
||||
|
||||
def __init__(self, status, bsid):
|
||||
self.status = status
|
||||
self.bsid = bsid
|
||||
|
||||
def grab(self):
|
||||
# return a Deferred that either fires with the BuildSetStatus
|
||||
# reference or errbacks because we were unable to grab it
|
||||
self.d = defer.Deferred()
|
||||
# wait a second before querying to give the master's maildir watcher
|
||||
# a chance to see the job
|
||||
reactor.callLater(1, self.go)
|
||||
return self.d
|
||||
|
||||
def go(self, dummy=None):
|
||||
if self.retryCount == 0:
|
||||
raise RuntimeError("couldn't find matching buildset")
|
||||
self.retryCount -= 1
|
||||
d = self.status.callRemote("getBuildSets")
|
||||
d.addCallback(self._gotSets)
|
||||
|
||||
def _gotSets(self, buildsets):
|
||||
for bs,bsid in buildsets:
|
||||
if bsid == self.bsid:
|
||||
# got it
|
||||
self.d.callback(bs)
|
||||
return
|
||||
d = defer.Deferred()
|
||||
d.addCallback(self.go)
|
||||
reactor.callLater(self.retryDelay, d.callback, None)
|
||||
|
||||
|
||||
class Try(pb.Referenceable):
|
||||
buildsetStatus = None
|
||||
quiet = False
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.opts = runner.loadOptions()
|
||||
self.connect = self.getopt('connect', 'try_connect')
|
||||
assert self.connect, "you must specify a connect style: ssh or pb"
|
||||
self.builderNames = self.getopt('builders', 'try_builders')
|
||||
assert self.builderNames, "no builders! use --builder or " \
|
||||
"try_builders=[names..] in .buildbot/options"
|
||||
|
||||
def getopt(self, config_name, options_name, default=None):
|
||||
value = self.config.get(config_name)
|
||||
if value is None or value == []:
|
||||
value = self.opts.get(options_name)
|
||||
if value is None or value == []:
|
||||
value = default
|
||||
return value
|
||||
|
||||
def createJob(self):
|
||||
# returns a Deferred which fires when the job parameters have been
|
||||
# created
|
||||
opts = self.opts
|
||||
# generate a random (unique) string. It would make sense to add a
|
||||
# hostname and process ID here, but a) I suspect that would cause
|
||||
# windows portability problems, and b) really this is good enough
|
||||
self.bsid = "%d-%s" % (time.time(), random.randint(0, 1000000))
|
||||
|
||||
# common options
|
||||
vc = self.getopt("vc", "try_vc")
|
||||
branch = self.getopt("branch", "try_branch")
|
||||
|
||||
if vc in ("cvs", "svn"):
|
||||
# we need to find the tree-top
|
||||
topdir = self.getopt("try_topdir", "try_topdir")
|
||||
if topdir:
|
||||
treedir = os.path.expanduser(topdir)
|
||||
else:
|
||||
topfile = self.getopt("try-topfile", "try_topfile")
|
||||
treedir = getTopdir(topfile)
|
||||
else:
|
||||
treedir = os.getcwd()
|
||||
d = getSourceStamp(vc, treedir, branch)
|
||||
d.addCallback(self._createJob_1)
|
||||
return d
|
||||
def _createJob_1(self, ss):
|
||||
self.sourcestamp = ss
|
||||
if self.connect == "ssh":
|
||||
patchlevel, diff = ss.patch
|
||||
self.jobfile = createJobfile(self.bsid,
|
||||
ss.branch or "", ss.revision,
|
||||
patchlevel, diff,
|
||||
self.builderNames)
|
||||
|
||||
def deliverJob(self):
|
||||
# returns a Deferred that fires when the job has been delivered
|
||||
opts = self.opts
|
||||
|
||||
if self.connect == "ssh":
|
||||
tryhost = self.getopt("tryhost", "try_host")
|
||||
tryuser = self.getopt("username", "try_username")
|
||||
trydir = self.getopt("trydir", "try_dir")
|
||||
|
||||
argv = ["ssh", "-l", tryuser, tryhost,
|
||||
"buildbot", "tryserver", "--jobdir", trydir]
|
||||
# now run this command and feed the contents of 'job' into stdin
|
||||
|
||||
pp = RemoteTryPP(self.jobfile)
|
||||
p = reactor.spawnProcess(pp, argv[0], argv, os.environ)
|
||||
d = pp.d
|
||||
return d
|
||||
if self.connect == "pb":
|
||||
user = self.getopt("username", "try_username")
|
||||
passwd = self.getopt("passwd", "try_password")
|
||||
master = self.getopt("master", "try_master")
|
||||
tryhost, tryport = master.split(":")
|
||||
tryport = int(tryport)
|
||||
f = pb.PBClientFactory()
|
||||
d = f.login(credentials.UsernamePassword(user, passwd))
|
||||
reactor.connectTCP(tryhost, tryport, f)
|
||||
d.addCallback(self._deliverJob_pb)
|
||||
return d
|
||||
raise RuntimeError("unknown connecttype '%s', should be 'ssh' or 'pb'"
|
||||
% self.connect)
|
||||
|
||||
def _deliverJob_pb(self, remote):
|
||||
ss = self.sourcestamp
|
||||
d = remote.callRemote("try",
|
||||
ss.branch, ss.revision, ss.patch,
|
||||
self.builderNames)
|
||||
d.addCallback(self._deliverJob_pb2)
|
||||
return d
|
||||
def _deliverJob_pb2(self, status):
|
||||
self.buildsetStatus = status
|
||||
return status
|
||||
|
||||
def getStatus(self):
|
||||
# returns a Deferred that fires when the builds have finished, and
|
||||
# may emit status messages while we wait
|
||||
wait = bool(self.getopt("wait", "try_wait", False))
|
||||
if not wait:
|
||||
# TODO: emit the URL where they can follow the builds. This
|
||||
# requires contacting the Status server over PB and doing
|
||||
# getURLForThing() on the BuildSetStatus. To get URLs for
|
||||
# individual builds would require we wait for the builds to
|
||||
# start.
|
||||
print "not waiting for builds to finish"
|
||||
return
|
||||
d = self.running = defer.Deferred()
|
||||
if self.buildsetStatus:
|
||||
self._getStatus_1()
|
||||
# contact the status port
|
||||
# we're probably using the ssh style
|
||||
master = self.getopt("master", "masterstatus")
|
||||
host, port = master.split(":")
|
||||
port = int(port)
|
||||
self.announce("contacting the status port at %s:%d" % (host, port))
|
||||
f = pb.PBClientFactory()
|
||||
creds = credentials.UsernamePassword("statusClient", "clientpw")
|
||||
d = f.login(creds)
|
||||
reactor.connectTCP(host, port, f)
|
||||
d.addCallback(self._getStatus_ssh_1)
|
||||
return self.running
|
||||
|
||||
def _getStatus_ssh_1(self, remote):
|
||||
# find a remotereference to the corresponding BuildSetStatus object
|
||||
self.announce("waiting for job to be accepted")
|
||||
g = BuildSetStatusGrabber(remote, self.bsid)
|
||||
d = g.grab()
|
||||
d.addCallback(self._getStatus_1)
|
||||
return d
|
||||
|
||||
def _getStatus_1(self, res=None):
|
||||
if res:
|
||||
self.buildsetStatus = res
|
||||
# gather the set of BuildRequests
|
||||
d = self.buildsetStatus.callRemote("getBuildRequests")
|
||||
d.addCallback(self._getStatus_2)
|
||||
|
||||
def _getStatus_2(self, brs):
|
||||
self.builderNames = []
|
||||
self.buildRequests = {}
|
||||
|
||||
# self.builds holds the current BuildStatus object for each one
|
||||
self.builds = {}
|
||||
|
||||
# self.outstanding holds the list of builderNames which haven't
|
||||
# finished yet
|
||||
self.outstanding = []
|
||||
|
||||
# self.results holds the list of build results. It holds a tuple of
|
||||
# (result, text)
|
||||
self.results = {}
|
||||
|
||||
# self.currentStep holds the name of the Step that each build is
|
||||
# currently running
|
||||
self.currentStep = {}
|
||||
|
||||
# self.ETA holds the expected finishing time (absolute time since
|
||||
# epoch)
|
||||
self.ETA = {}
|
||||
|
||||
for n,br in brs:
|
||||
self.builderNames.append(n)
|
||||
self.buildRequests[n] = br
|
||||
self.builds[n] = None
|
||||
self.outstanding.append(n)
|
||||
self.results[n] = [None,None]
|
||||
self.currentStep[n] = None
|
||||
self.ETA[n] = None
|
||||
# get new Builds for this buildrequest. We follow each one until
|
||||
# it finishes or is interrupted.
|
||||
br.callRemote("subscribe", self)
|
||||
|
||||
# now that those queries are in transit, we can start the
|
||||
# display-status-every-30-seconds loop
|
||||
self.printloop = task.LoopingCall(self.printStatus)
|
||||
self.printloop.start(3, now=False)
|
||||
|
||||
|
||||
# these methods are invoked by the status objects we've subscribed to
|
||||
|
||||
def remote_newbuild(self, bs, builderName):
|
||||
if self.builds[builderName]:
|
||||
self.builds[builderName].callRemote("unsubscribe", self)
|
||||
self.builds[builderName] = bs
|
||||
bs.callRemote("subscribe", self, 20)
|
||||
d = bs.callRemote("waitUntilFinished")
|
||||
d.addCallback(self._build_finished, builderName)
|
||||
|
||||
def remote_stepStarted(self, buildername, build, stepname, step):
|
||||
self.currentStep[buildername] = stepname
|
||||
|
||||
def remote_stepFinished(self, buildername, build, stepname, step, results):
|
||||
pass
|
||||
|
||||
def remote_buildETAUpdate(self, buildername, build, eta):
|
||||
self.ETA[buildername] = now() + eta
|
||||
|
||||
def _build_finished(self, bs, builderName):
|
||||
# we need to collect status from the newly-finished build. We don't
|
||||
# remove the build from self.outstanding until we've collected
|
||||
# everything we want.
|
||||
self.builds[builderName] = None
|
||||
self.ETA[builderName] = None
|
||||
self.currentStep[builderName] = "finished"
|
||||
d = bs.callRemote("getResults")
|
||||
d.addCallback(self._build_finished_2, bs, builderName)
|
||||
return d
|
||||
def _build_finished_2(self, results, bs, builderName):
|
||||
self.results[builderName][0] = results
|
||||
d = bs.callRemote("getText")
|
||||
d.addCallback(self._build_finished_3, builderName)
|
||||
return d
|
||||
def _build_finished_3(self, text, builderName):
|
||||
self.results[builderName][1] = text
|
||||
|
||||
self.outstanding.remove(builderName)
|
||||
if not self.outstanding:
|
||||
# all done
|
||||
return self.statusDone()
|
||||
|
||||
def printStatus(self):
|
||||
names = self.buildRequests.keys()
|
||||
names.sort()
|
||||
for n in names:
|
||||
if n not in self.outstanding:
|
||||
# the build is finished, and we have results
|
||||
code,text = self.results[n]
|
||||
t = builder.Results[code]
|
||||
if text:
|
||||
t += " (%s)" % " ".join(text)
|
||||
elif self.builds[n]:
|
||||
t = self.currentStep[n] or "building"
|
||||
if self.ETA[n]:
|
||||
t += " [ETA %ds]" % (self.ETA[n] - now())
|
||||
else:
|
||||
t = "no build"
|
||||
self.announce("%s: %s" % (n, t))
|
||||
self.announce("")
|
||||
|
||||
def statusDone(self):
|
||||
self.printloop.stop()
|
||||
print "All Builds Complete"
|
||||
# TODO: include a URL for all failing builds
|
||||
names = self.buildRequests.keys()
|
||||
names.sort()
|
||||
happy = True
|
||||
for n in names:
|
||||
code,text = self.results[n]
|
||||
t = "%s: %s" % (n, builder.Results[code])
|
||||
if text:
|
||||
t += " (%s)" % " ".join(text)
|
||||
print t
|
||||
if self.results[n] != builder.SUCCESS:
|
||||
happy = False
|
||||
|
||||
if happy:
|
||||
self.exitcode = 0
|
||||
else:
|
||||
self.exitcode = 1
|
||||
self.running.callback(self.exitcode)
|
||||
|
||||
def announce(self, message):
|
||||
if not self.quiet:
|
||||
print message
|
||||
|
||||
def run(self):
|
||||
# we can't do spawnProcess until we're inside reactor.run(), so get
|
||||
# funky
|
||||
print "using '%s' connect method" % self.connect
|
||||
self.exitcode = 0
|
||||
d = defer.Deferred()
|
||||
d.addCallback(lambda res: self.createJob())
|
||||
d.addCallback(lambda res: self.announce("job created"))
|
||||
d.addCallback(lambda res: self.deliverJob())
|
||||
d.addCallback(lambda res: self.announce("job has been delivered"))
|
||||
d.addCallback(lambda res: self.getStatus())
|
||||
d.addErrback(log.err)
|
||||
d.addCallback(self.cleanup)
|
||||
d.addCallback(lambda res: reactor.stop())
|
||||
|
||||
reactor.callLater(0, d.callback, None)
|
||||
reactor.run()
|
||||
sys.exit(self.exitcode)
|
||||
|
||||
def logErr(self, why):
|
||||
log.err(why)
|
||||
print "error during 'try' processing"
|
||||
print why
|
||||
|
||||
def cleanup(self, res=None):
|
||||
if self.buildsetStatus:
|
||||
self.buildsetStatus.broker.transport.loseConnection()
|
||||
|
||||
|
||||
|
0
tools/buildbot/buildbot/slave/__init__.py
Normal file
0
tools/buildbot/buildbot/slave/__init__.py
Normal file
500
tools/buildbot/buildbot/slave/bot.py
Normal file
500
tools/buildbot/buildbot/slave/bot.py
Normal file
@ -0,0 +1,500 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import os.path
|
||||
|
||||
from twisted.spread import pb
|
||||
from twisted.python import log
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.application import service, internet
|
||||
from twisted.cred import credentials
|
||||
|
||||
from buildbot.util import now
|
||||
from buildbot.pbutil import ReconnectingPBClientFactory
|
||||
from buildbot.slave import registry
|
||||
# make sure the standard commands get registered
|
||||
from buildbot.slave import commands
|
||||
|
||||
class NoCommandRunning(pb.Error):
|
||||
pass
|
||||
class WrongCommandRunning(pb.Error):
|
||||
pass
|
||||
class UnknownCommand(pb.Error):
|
||||
pass
|
||||
|
||||
class Master:
|
||||
def __init__(self, host, port, username, password):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
class SlaveBuild:
|
||||
|
||||
"""This is an object that can hold state from one step to another in the
|
||||
same build. All SlaveCommands have access to it.
|
||||
"""
|
||||
def __init__(self, builder):
|
||||
self.builder = builder
|
||||
|
||||
class SlaveBuilder(pb.Referenceable, service.Service):
|
||||
|
||||
"""This is the local representation of a single Builder: it handles a
|
||||
single kind of build (like an all-warnings build). It has a name and a
|
||||
home directory. The rest of its behavior is determined by the master.
|
||||
"""
|
||||
|
||||
stopCommandOnShutdown = True
|
||||
|
||||
# remote is a ref to the Builder object on the master side, and is set
|
||||
# when they attach. We use it to detect when the connection to the master
|
||||
# is severed.
|
||||
remote = None
|
||||
|
||||
# .build points to a SlaveBuild object, a new one for each build
|
||||
build = None
|
||||
|
||||
# .command points to a SlaveCommand instance, and is set while the step
|
||||
# is running. We use it to implement the stopBuild method.
|
||||
command = None
|
||||
|
||||
# .remoteStep is a ref to the master-side BuildStep object, and is set
|
||||
# when the step is started
|
||||
remoteStep = None
|
||||
|
||||
def __init__(self, name, not_really):
|
||||
#service.Service.__init__(self) # Service has no __init__ method
|
||||
self.setName(name)
|
||||
self.not_really = not_really
|
||||
|
||||
def __repr__(self):
|
||||
return "<SlaveBuilder '%s' at %d>" % (self.name, id(self))
|
||||
|
||||
def setServiceParent(self, parent):
|
||||
service.Service.setServiceParent(self, parent)
|
||||
self.bot = self.parent
|
||||
# note that self.parent will go away when the buildmaster's config
|
||||
# file changes and this Builder is removed (possibly because it has
|
||||
# been changed, so the Builder will be re-added again in a moment).
|
||||
# This may occur during a build, while a step is running.
|
||||
|
||||
def setBuilddir(self, builddir):
|
||||
assert self.parent
|
||||
self.builddir = builddir
|
||||
self.basedir = os.path.join(self.bot.basedir, self.builddir)
|
||||
if not os.path.isdir(self.basedir):
|
||||
os.mkdir(self.basedir)
|
||||
|
||||
def stopService(self):
|
||||
service.Service.stopService(self)
|
||||
if self.stopCommandOnShutdown:
|
||||
self.stopCommand()
|
||||
|
||||
def activity(self):
|
||||
bot = self.parent
|
||||
if bot:
|
||||
buildslave = bot.parent
|
||||
if buildslave:
|
||||
bf = buildslave.bf
|
||||
bf.activity()
|
||||
|
||||
def remote_setMaster(self, remote):
|
||||
self.remote = remote
|
||||
self.remote.notifyOnDisconnect(self.lostRemote)
|
||||
def remote_print(self, message):
|
||||
log.msg("SlaveBuilder.remote_print(%s): message from master: %s" %
|
||||
(self.name, message))
|
||||
if message == "ping":
|
||||
return self.remote_ping()
|
||||
|
||||
def remote_ping(self):
|
||||
log.msg("SlaveBuilder.remote_ping(%s)" % self)
|
||||
if self.bot and self.bot.parent:
|
||||
debugOpts = self.bot.parent.debugOpts
|
||||
if debugOpts.get("stallPings"):
|
||||
log.msg(" debug_stallPings")
|
||||
timeout, timers = debugOpts["stallPings"]
|
||||
d = defer.Deferred()
|
||||
t = reactor.callLater(timeout, d.callback, None)
|
||||
timers.append(t)
|
||||
return d
|
||||
if debugOpts.get("failPingOnce"):
|
||||
log.msg(" debug_failPingOnce")
|
||||
class FailPingError(pb.Error): pass
|
||||
del debugOpts['failPingOnce']
|
||||
raise FailPingError("debug_failPingOnce means we should fail")
|
||||
|
||||
def lostRemote(self, remote):
|
||||
log.msg("lost remote")
|
||||
self.remote = None
|
||||
|
||||
def lostRemoteStep(self, remotestep):
|
||||
log.msg("lost remote step")
|
||||
self.remoteStep = None
|
||||
if self.stopCommandOnShutdown:
|
||||
self.stopCommand()
|
||||
|
||||
# the following are Commands that can be invoked by the master-side
|
||||
# Builder
|
||||
def remote_startBuild(self):
|
||||
"""This is invoked before the first step of any new build is run. It
|
||||
creates a new SlaveBuild object, which holds slave-side state from
|
||||
one step to the next."""
|
||||
self.build = SlaveBuild(self)
|
||||
log.msg("%s.startBuild" % self)
|
||||
|
||||
def remote_startCommand(self, stepref, stepId, command, args):
|
||||
"""
|
||||
This gets invoked by L{buildbot.process.step.RemoteCommand.start}, as
|
||||
part of various master-side BuildSteps, to start various commands
|
||||
that actually do the build. I return nothing. Eventually I will call
|
||||
.commandComplete() to notify the master-side RemoteCommand that I'm
|
||||
done.
|
||||
"""
|
||||
|
||||
self.activity()
|
||||
|
||||
if self.command:
|
||||
log.msg("leftover command, dropping it")
|
||||
self.stopCommand()
|
||||
|
||||
try:
|
||||
factory, version = registry.commandRegistry[command]
|
||||
except KeyError:
|
||||
raise UnknownCommand, "unrecognized SlaveCommand '%s'" % command
|
||||
self.command = factory(self, stepId, args)
|
||||
|
||||
log.msg(" startCommand:%s [id %s]" % (command,stepId))
|
||||
self.remoteStep = stepref
|
||||
self.remoteStep.notifyOnDisconnect(self.lostRemoteStep)
|
||||
d = self.command.doStart()
|
||||
d.addCallback(lambda res: None)
|
||||
d.addBoth(self.commandComplete)
|
||||
return None
|
||||
|
||||
def remote_interruptCommand(self, stepId, why):
|
||||
"""Halt the current step."""
|
||||
log.msg("asked to interrupt current command: %s" % why)
|
||||
self.activity()
|
||||
if not self.command:
|
||||
# TODO: just log it, a race could result in their interrupting a
|
||||
# command that wasn't actually running
|
||||
log.msg(" .. but none was running")
|
||||
return
|
||||
self.command.doInterrupt()
|
||||
|
||||
|
||||
def stopCommand(self):
|
||||
"""Make any currently-running command die, with no further status
|
||||
output. This is used when the buildslave is shutting down or the
|
||||
connection to the master has been lost. Interrupt the command,
|
||||
silence it, and then forget about it."""
|
||||
if not self.command:
|
||||
return
|
||||
log.msg("stopCommand: halting current command %s" % self.command)
|
||||
self.command.doInterrupt() # shut up! and die!
|
||||
self.command = None # forget you!
|
||||
|
||||
# sendUpdate is invoked by the Commands we spawn
|
||||
def sendUpdate(self, data):
|
||||
"""This sends the status update to the master-side
|
||||
L{buildbot.process.step.RemoteCommand} object, giving it a sequence
|
||||
number in the process. It adds the update to a queue, and asks the
|
||||
master to acknowledge the update so it can be removed from that
|
||||
queue."""
|
||||
|
||||
if not self.running:
|
||||
# .running comes from service.Service, and says whether the
|
||||
# service is running or not. If we aren't running, don't send any
|
||||
# status messages.
|
||||
return
|
||||
# the update[1]=0 comes from the leftover 'updateNum', which the
|
||||
# master still expects to receive. Provide it to avoid significant
|
||||
# interoperability issues between new slaves and old masters.
|
||||
if self.remoteStep:
|
||||
update = [data, 0]
|
||||
updates = [update]
|
||||
d = self.remoteStep.callRemote("update", updates)
|
||||
d.addCallback(self.ackUpdate)
|
||||
d.addErrback(self._ackFailed, "SlaveBuilder.sendUpdate")
|
||||
|
||||
def ackUpdate(self, acknum):
|
||||
self.activity() # update the "last activity" timer
|
||||
|
||||
def ackComplete(self, dummy):
|
||||
self.activity() # update the "last activity" timer
|
||||
|
||||
def _ackFailed(self, why, where):
|
||||
log.msg("SlaveBuilder._ackFailed:", where)
|
||||
#log.err(why) # we don't really care
|
||||
|
||||
|
||||
# this is fired by the Deferred attached to each Command
|
||||
def commandComplete(self, failure):
|
||||
if failure:
|
||||
log.msg("SlaveBuilder.commandFailed", self.command)
|
||||
log.err(failure)
|
||||
# failure, if present, is a failure.Failure. To send it across
|
||||
# the wire, we must turn it into a pb.CopyableFailure.
|
||||
failure = pb.CopyableFailure(failure)
|
||||
failure.unsafeTracebacks = True
|
||||
else:
|
||||
# failure is None
|
||||
log.msg("SlaveBuilder.commandComplete", self.command)
|
||||
self.command = None
|
||||
if not self.running:
|
||||
log.msg(" but we weren't running, quitting silently")
|
||||
return
|
||||
if self.remoteStep:
|
||||
self.remoteStep.dontNotifyOnDisconnect(self.lostRemoteStep)
|
||||
d = self.remoteStep.callRemote("complete", failure)
|
||||
d.addCallback(self.ackComplete)
|
||||
d.addErrback(self._ackFailed, "sendComplete")
|
||||
self.remoteStep = None
|
||||
|
||||
|
||||
def remote_shutdown(self):
|
||||
print "slave shutting down on command from master"
|
||||
reactor.stop()
|
||||
|
||||
|
||||
class Bot(pb.Referenceable, service.MultiService):
|
||||
"""I represent the slave-side bot."""
|
||||
usePTY = None
|
||||
name = "bot"
|
||||
|
||||
def __init__(self, basedir, usePTY, not_really=0):
|
||||
service.MultiService.__init__(self)
|
||||
self.basedir = basedir
|
||||
self.usePTY = usePTY
|
||||
self.not_really = not_really
|
||||
self.builders = {}
|
||||
|
||||
def startService(self):
|
||||
assert os.path.isdir(self.basedir)
|
||||
service.MultiService.startService(self)
|
||||
|
||||
def remote_getDirs(self):
|
||||
return filter(lambda d: os.path.isdir(d), os.listdir(self.basedir))
|
||||
|
||||
def remote_getCommands(self):
|
||||
commands = {}
|
||||
for name, (factory, version) in registry.commandRegistry.items():
|
||||
commands[name] = version
|
||||
return commands
|
||||
|
||||
def remote_setBuilderList(self, wanted):
|
||||
retval = {}
|
||||
wanted_dirs = []
|
||||
for (name, builddir) in wanted:
|
||||
wanted_dirs.append(builddir)
|
||||
b = self.builders.get(name, None)
|
||||
if b:
|
||||
if b.builddir != builddir:
|
||||
log.msg("changing builddir for builder %s from %s to %s" \
|
||||
% (name, b.builddir, builddir))
|
||||
b.setBuilddir(builddir)
|
||||
else:
|
||||
b = SlaveBuilder(name, self.not_really)
|
||||
b.usePTY = self.usePTY
|
||||
b.setServiceParent(self)
|
||||
b.setBuilddir(builddir)
|
||||
self.builders[name] = b
|
||||
retval[name] = b
|
||||
for name in self.builders.keys():
|
||||
if not name in map(lambda a: a[0], wanted):
|
||||
log.msg("removing old builder %s" % name)
|
||||
self.builders[name].disownServiceParent()
|
||||
del(self.builders[name])
|
||||
|
||||
for d in os.listdir(self.basedir):
|
||||
if os.path.isdir(d):
|
||||
if d not in wanted_dirs:
|
||||
log.msg("I have a leftover directory '%s' that is not "
|
||||
"being used by the buildmaster: you can delete "
|
||||
"it now" % d)
|
||||
return retval
|
||||
|
||||
def remote_print(self, message):
|
||||
log.msg("message from master:", message)
|
||||
|
||||
def remote_getSlaveInfo(self):
|
||||
"""This command retrieves data from the files in SLAVEDIR/info/* and
|
||||
sends the contents to the buildmaster. These are used to describe
|
||||
the slave and its configuration, and should be created and
|
||||
maintained by the slave administrator. They will be retrieved each
|
||||
time the master-slave connection is established.
|
||||
"""
|
||||
|
||||
files = {}
|
||||
basedir = os.path.join(self.basedir, "info")
|
||||
if not os.path.isdir(basedir):
|
||||
return files
|
||||
for f in os.listdir(basedir):
|
||||
filename = os.path.join(basedir, f)
|
||||
if os.path.isfile(filename):
|
||||
files[f] = open(filename, "r").read()
|
||||
return files
|
||||
|
||||
class BotFactory(ReconnectingPBClientFactory):
|
||||
# 'keepaliveInterval' serves two purposes. The first is to keep the
|
||||
# connection alive: it guarantees that there will be at least some
|
||||
# traffic once every 'keepaliveInterval' seconds, which may help keep an
|
||||
# interposed NAT gateway from dropping the address mapping because it
|
||||
# thinks the connection has been abandoned. The second is to put an upper
|
||||
# limit on how long the buildmaster might have gone away before we notice
|
||||
# it. For this second purpose, we insist upon seeing *some* evidence of
|
||||
# the buildmaster at least once every 'keepaliveInterval' seconds.
|
||||
keepaliveInterval = None # None = do not use keepalives
|
||||
|
||||
# 'keepaliveTimeout' seconds before the interval expires, we will send a
|
||||
# keepalive request, both to add some traffic to the connection, and to
|
||||
# prompt a response from the master in case all our builders are idle. We
|
||||
# don't insist upon receiving a timely response from this message: a slow
|
||||
# link might put the request at the wrong end of a large build message.
|
||||
keepaliveTimeout = 30 # how long we will go without a response
|
||||
|
||||
keepaliveTimer = None
|
||||
activityTimer = None
|
||||
lastActivity = 0
|
||||
unsafeTracebacks = 1
|
||||
perspective = None
|
||||
|
||||
def __init__(self, keepaliveInterval, keepaliveTimeout):
|
||||
ReconnectingPBClientFactory.__init__(self)
|
||||
self.keepaliveInterval = keepaliveInterval
|
||||
self.keepaliveTimeout = keepaliveTimeout
|
||||
|
||||
def startedConnecting(self, connector):
|
||||
ReconnectingPBClientFactory.startedConnecting(self, connector)
|
||||
self.connector = connector
|
||||
|
||||
def gotPerspective(self, perspective):
|
||||
ReconnectingPBClientFactory.gotPerspective(self, perspective)
|
||||
self.perspective = perspective
|
||||
try:
|
||||
perspective.broker.transport.setTcpKeepAlive(1)
|
||||
except:
|
||||
log.msg("unable to set SO_KEEPALIVE")
|
||||
if not self.keepaliveInterval:
|
||||
self.keepaliveInterval = 10*60
|
||||
self.activity()
|
||||
if self.keepaliveInterval:
|
||||
log.msg("sending application-level keepalives every %d seconds" \
|
||||
% self.keepaliveInterval)
|
||||
self.startTimers()
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
self.connector = None
|
||||
ReconnectingPBClientFactory.clientConnectionFailed(self,
|
||||
connector, reason)
|
||||
|
||||
def clientConnectionLost(self, connector, reason):
|
||||
self.connector = None
|
||||
self.stopTimers()
|
||||
self.perspective = None
|
||||
ReconnectingPBClientFactory.clientConnectionLost(self,
|
||||
connector, reason)
|
||||
|
||||
def startTimers(self):
|
||||
assert self.keepaliveInterval
|
||||
assert not self.keepaliveTimer
|
||||
assert not self.activityTimer
|
||||
# Insist that doKeepalive fires before checkActivity. Really, it
|
||||
# needs to happen at least one RTT beforehand.
|
||||
assert self.keepaliveInterval > self.keepaliveTimeout
|
||||
|
||||
# arrange to send a keepalive a little while before our deadline
|
||||
when = self.keepaliveInterval - self.keepaliveTimeout
|
||||
self.keepaliveTimer = reactor.callLater(when, self.doKeepalive)
|
||||
# and check for activity too
|
||||
self.activityTimer = reactor.callLater(self.keepaliveInterval,
|
||||
self.checkActivity)
|
||||
|
||||
def stopTimers(self):
|
||||
if self.keepaliveTimer:
|
||||
self.keepaliveTimer.cancel()
|
||||
self.keepaliveTimer = None
|
||||
if self.activityTimer:
|
||||
self.activityTimer.cancel()
|
||||
self.activityTimer = None
|
||||
|
||||
def activity(self, res=None):
|
||||
self.lastActivity = now()
|
||||
|
||||
def doKeepalive(self):
|
||||
# send the keepalive request. If it fails outright, the connection
|
||||
# was already dropped, so just log and ignore.
|
||||
self.keepaliveTimer = None
|
||||
log.msg("sending app-level keepalive")
|
||||
d = self.perspective.callRemote("keepalive")
|
||||
d.addCallback(self.activity)
|
||||
d.addErrback(self.keepaliveLost)
|
||||
|
||||
def keepaliveLost(self, f):
|
||||
log.msg("BotFactory.keepaliveLost")
|
||||
|
||||
def checkActivity(self):
|
||||
self.activityTimer = None
|
||||
if self.lastActivity + self.keepaliveInterval < now():
|
||||
log.msg("BotFactory.checkActivity: nothing from master for "
|
||||
"%d secs" % (now() - self.lastActivity))
|
||||
self.perspective.broker.transport.loseConnection()
|
||||
return
|
||||
self.startTimers()
|
||||
|
||||
def stopFactory(self):
|
||||
ReconnectingPBClientFactory.stopFactory(self)
|
||||
self.stopTimers()
|
||||
|
||||
|
||||
class BuildSlave(service.MultiService):
|
||||
botClass = Bot
|
||||
|
||||
# debugOpts is a dictionary used during unit tests.
|
||||
|
||||
# debugOpts['stallPings'] can be set to a tuple of (timeout, []). Any
|
||||
# calls to remote_print will stall for 'timeout' seconds before
|
||||
# returning. The DelayedCalls used to implement this are stashed in the
|
||||
# list so they can be cancelled later.
|
||||
|
||||
# debugOpts['failPingOnce'] can be set to True to make the slaveping fail
|
||||
# exactly once.
|
||||
|
||||
def __init__(self, host, port, name, passwd, basedir, keepalive,
|
||||
usePTY, keepaliveTimeout=30, umask=None, debugOpts={}):
|
||||
log.msg("Creating BuildSlave")
|
||||
service.MultiService.__init__(self)
|
||||
self.debugOpts = debugOpts.copy()
|
||||
bot = self.botClass(basedir, usePTY)
|
||||
bot.setServiceParent(self)
|
||||
self.bot = bot
|
||||
if keepalive == 0:
|
||||
keepalive = None
|
||||
self.umask = umask
|
||||
bf = self.bf = BotFactory(keepalive, keepaliveTimeout)
|
||||
bf.startLogin(credentials.UsernamePassword(name, passwd), client=bot)
|
||||
self.connection = c = internet.TCPClient(host, port, bf)
|
||||
c.setServiceParent(self)
|
||||
|
||||
def waitUntilDisconnected(self):
|
||||
# utility method for testing. Returns a Deferred that will fire when
|
||||
# we lose the connection to the master.
|
||||
if not self.bf.perspective:
|
||||
return defer.succeed(None)
|
||||
d = defer.Deferred()
|
||||
self.bf.perspective.notifyOnDisconnect(lambda res: d.callback(None))
|
||||
return d
|
||||
|
||||
def startService(self):
|
||||
if self.umask is not None:
|
||||
os.umask(self.umask)
|
||||
service.MultiService.startService(self)
|
||||
|
||||
def stopService(self):
|
||||
self.bf.continueTrying = 0
|
||||
self.bf.stopTrying()
|
||||
service.MultiService.stopService(self)
|
||||
# now kill the TCP connection
|
||||
# twisted >2.0.1 does this for us, and leaves _connection=None
|
||||
if self.connection._connection:
|
||||
self.connection._connection.disconnect()
|
2191
tools/buildbot/buildbot/slave/commands.py
Normal file
2191
tools/buildbot/buildbot/slave/commands.py
Normal file
File diff suppressed because it is too large
Load Diff
57
tools/buildbot/buildbot/slave/interfaces.py
Normal file
57
tools/buildbot/buildbot/slave/interfaces.py
Normal file
@ -0,0 +1,57 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from buildbot.twcompat import Interface
|
||||
|
||||
class ISlaveCommand(Interface):
|
||||
"""This interface is implemented by all of the buildslave's Command
|
||||
subclasses. It specifies how the buildslave can start, interrupt, and
|
||||
query the various Commands running on behalf of the buildmaster."""
|
||||
|
||||
def __init__(builder, stepId, args):
|
||||
"""Create the Command. 'builder' is a reference to the parent
|
||||
buildbot.bot.SlaveBuilder instance, which will be used to send status
|
||||
updates (by calling builder.sendStatus). 'stepId' is a random string
|
||||
which helps correlate slave logs with the master. 'args' is a dict of
|
||||
arguments that comes from the master-side BuildStep, with contents
|
||||
that are specific to the individual Command subclass.
|
||||
|
||||
This method is not intended to be subclassed."""
|
||||
|
||||
def setup(args):
|
||||
"""This method is provided for subclasses to override, to extract
|
||||
parameters from the 'args' dictionary. The default implemention does
|
||||
nothing. It will be called from __init__"""
|
||||
|
||||
def start():
|
||||
"""Begin the command, and return a Deferred.
|
||||
|
||||
While the command runs, it should send status updates to the
|
||||
master-side BuildStep by calling self.sendStatus(status). The
|
||||
'status' argument is typically a dict with keys like 'stdout',
|
||||
'stderr', and 'rc'.
|
||||
|
||||
When the step completes, it should fire the Deferred (the results are
|
||||
not used). If an exception occurs during execution, it may also
|
||||
errback the deferred, however any reasonable errors should be trapped
|
||||
and indicated with a non-zero 'rc' status rather than raising an
|
||||
exception. Exceptions should indicate problems within the buildbot
|
||||
itself, not problems in the project being tested.
|
||||
|
||||
"""
|
||||
|
||||
def interrupt():
|
||||
"""This is called to tell the Command that the build is being stopped
|
||||
and therefore the command should be terminated as quickly as
|
||||
possible. The command may continue to send status updates, up to and
|
||||
including an 'rc' end-of-command update (which should indicate an
|
||||
error condition). The Command's deferred should still be fired when
|
||||
the command has finally completed.
|
||||
|
||||
If the build is being stopped because the slave it shutting down or
|
||||
because the connection to the buildmaster has been lost, the status
|
||||
updates will simply be discarded. The Command does not need to be
|
||||
aware of this.
|
||||
|
||||
Child shell processes should be killed. Simple ShellCommand classes
|
||||
can just insert a header line indicating that the process will be
|
||||
killed, then os.kill() the child."""
|
18
tools/buildbot/buildbot/slave/registry.py
Normal file
18
tools/buildbot/buildbot/slave/registry.py
Normal file
@ -0,0 +1,18 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
commandRegistry = {}
|
||||
|
||||
def registerSlaveCommand(name, factory, version):
|
||||
"""
|
||||
Register a slave command with the registry, making it available in slaves.
|
||||
|
||||
@type name: string
|
||||
@param name: name under which the slave command will be registered; used
|
||||
for L{buildbot.slave.bot.SlaveBuilder.remote_startCommand}
|
||||
|
||||
@type factory: L{buildbot.slave.commands.Command}
|
||||
@type version: string
|
||||
@param version: version string of the factory code
|
||||
"""
|
||||
assert not commandRegistry.has_key(name)
|
||||
commandRegistry[name] = (factory, version)
|
175
tools/buildbot/buildbot/slave/trial.py
Normal file
175
tools/buildbot/buildbot/slave/trial.py
Normal file
@ -0,0 +1,175 @@
|
||||
# -*- test-case-name: buildbot.test.test_trial.TestRemoteReporter -*-
|
||||
|
||||
import types, time
|
||||
import zope.interface as zi
|
||||
|
||||
from twisted.spread import pb
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.python import reflect, failure, log, usage, util
|
||||
from twisted.trial import registerAdapter, adaptWithDefault, reporter, runner
|
||||
from twisted.trial.interfaces import ITestMethod, ITestSuite, ITestRunner, \
|
||||
IJellied, IUnjellied, IRemoteReporter
|
||||
from twisted.application import strports
|
||||
|
||||
|
||||
class RemoteTestAny(object, util.FancyStrMixin):
|
||||
def __init__(self, original):
|
||||
self.original = original
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if attr not in self.original:
|
||||
raise AttributeError, "%s has no attribute %s" % (self.__str__(), attr)
|
||||
return self.original[attr]
|
||||
|
||||
|
||||
class RemoteTestMethod(RemoteTestAny):
|
||||
zi.implements(ITestMethod)
|
||||
|
||||
class RemoteTestSuite(RemoteTestAny):
|
||||
zi.implements(ITestSuite)
|
||||
|
||||
|
||||
class RemoteReporter(reporter.Reporter):
|
||||
zi.implements(IRemoteReporter)
|
||||
pbroot = None
|
||||
|
||||
def __init__(self, stream=None, tbformat=None, args=None):
|
||||
super(RemoteReporter, self).__init__(stream, tbformat, args)
|
||||
|
||||
def setUpReporter(self):
|
||||
factory = pb.PBClientFactory()
|
||||
|
||||
self.pbcnx = reactor.connectTCP("localhost", self.args, factory)
|
||||
assert self.pbcnx is not None
|
||||
|
||||
def _cb(root):
|
||||
self.pbroot = root
|
||||
return root
|
||||
|
||||
return factory.getRootObject().addCallback(_cb
|
||||
).addErrback(log.err)
|
||||
|
||||
def tearDownReporter(self):
|
||||
def _disconnected(passthru):
|
||||
log.msg(sekritHQ='_disconnected, passthru: %r' % (passthru,))
|
||||
return passthru
|
||||
|
||||
d = defer.Deferred().addCallback(_disconnected
|
||||
).addErrback(log.err)
|
||||
|
||||
self.pbroot.notifyOnDisconnect(d.callback)
|
||||
self.pbcnx.transport.loseConnection()
|
||||
return d
|
||||
|
||||
def reportImportError(self, name, fail):
|
||||
pass
|
||||
|
||||
def startTest(self, method):
|
||||
return self.pbroot.callRemote('startTest', IJellied(method))
|
||||
|
||||
def endTest(self, method):
|
||||
return self.pbroot.callRemote('endTest', IJellied(method))
|
||||
|
||||
def startSuite(self, arg):
|
||||
return self.pbroot.callRemote('startSuite', IJellied(arg))
|
||||
|
||||
def endSuite(self, suite):
|
||||
return self.pbroot.callRemote('endSuite', IJellied(suite))
|
||||
|
||||
|
||||
# -- Adapters --
|
||||
|
||||
def jellyList(L):
|
||||
return [IJellied(i) for i in L]
|
||||
|
||||
def jellyTuple(T):
|
||||
return tuple(IJellied(list(T)))
|
||||
|
||||
def jellyDict(D):
|
||||
def _clean(*a):
|
||||
return tuple(map(lambda x: adaptWithDefault(IJellied, x, None), a))
|
||||
return dict([_clean(k, v) for k, v in D.iteritems()])
|
||||
|
||||
def jellyTimingInfo(d, timed):
|
||||
for attr in ('startTime', 'endTime'):
|
||||
d[attr] = getattr(timed, attr, 0.0)
|
||||
return d
|
||||
|
||||
def _logFormatter(eventDict):
|
||||
#XXX: this is pretty weak, it's basically the guts of
|
||||
# t.p.log.FileLogObserver.emit, but then again, that's been pretty
|
||||
# stable over the past few releases....
|
||||
edm = eventDict['message']
|
||||
if not edm:
|
||||
if eventDict['isError'] and eventDict.has_key('failure'):
|
||||
text = eventDict['failure'].getTraceback()
|
||||
elif eventDict.has_key('format'):
|
||||
try:
|
||||
text = eventDict['format'] % eventDict
|
||||
except:
|
||||
try:
|
||||
text = ('Invalid format string in log message: %s'
|
||||
% eventDict)
|
||||
except:
|
||||
text = 'UNFORMATTABLE OBJECT WRITTEN TO LOG, MESSAGE LOST'
|
||||
else:
|
||||
# we don't know how to log this
|
||||
return
|
||||
else:
|
||||
text = ' '.join(map(str, edm))
|
||||
|
||||
timeStr = time.strftime("%Y/%m/%d %H:%M %Z", time.localtime(eventDict['time']))
|
||||
fmtDict = {'system': eventDict['system'], 'text': text.replace("\n", "\n\t")}
|
||||
msgStr = " [%(system)s] %(text)s\n" % fmtDict
|
||||
return "%s%s" % (timeStr, msgStr)
|
||||
|
||||
def jellyTestMethod(testMethod):
|
||||
"""@param testMethod: an object that implements L{twisted.trial.interfaces.ITestMethod}"""
|
||||
d = {}
|
||||
for attr in ('status', 'todo', 'skip', 'stdout', 'stderr',
|
||||
'name', 'fullName', 'runs', 'errors', 'failures', 'module'):
|
||||
d[attr] = getattr(testMethod, attr)
|
||||
|
||||
q = None
|
||||
try:
|
||||
q = reflect.qual(testMethod.klass)
|
||||
except TypeError:
|
||||
# XXX: This may be incorrect somehow
|
||||
q = "%s.%s" % (testMethod.module, testMethod.klass.__name__)
|
||||
d['klass'] = q
|
||||
|
||||
d['logevents'] = [_logFormatter(event) for event in testMethod.logevents]
|
||||
|
||||
jellyTimingInfo(d, testMethod)
|
||||
|
||||
return d
|
||||
|
||||
def jellyTestRunner(testRunner):
|
||||
"""@param testRunner: an object that implements L{twisted.trial.interfaces.ITestRunner}"""
|
||||
d = dict(testMethods=[IJellied(m) for m in testRunner.testMethods])
|
||||
jellyTimingInfo(d, testRunner)
|
||||
return d
|
||||
|
||||
def jellyTestSuite(testSuite):
|
||||
d = {}
|
||||
for attr in ('tests', 'runners', 'couldNotImport'):
|
||||
d[attr] = IJellied(getattr(testSuite, attr))
|
||||
|
||||
jellyTimingInfo(d, testSuite)
|
||||
return d
|
||||
|
||||
|
||||
|
||||
for a, o, i in [(jellyTuple, types.TupleType, IJellied),
|
||||
(jellyTestMethod, ITestMethod, IJellied),
|
||||
(jellyList, types.ListType, IJellied),
|
||||
(jellyTestSuite, ITestSuite, IJellied),
|
||||
(jellyTestRunner, ITestRunner, IJellied),
|
||||
(jellyDict, types.DictType, IJellied),
|
||||
(RemoteTestMethod, types.DictType, ITestMethod),
|
||||
(RemoteTestSuite, types.DictType, ITestSuite)]:
|
||||
registerAdapter(a, o, i)
|
||||
|
||||
for t in [types.StringType, types.IntType, types.FloatType, failure.Failure]:
|
||||
zi.classImplements(t, IJellied)
|
||||
|
85
tools/buildbot/buildbot/sourcestamp.py
Normal file
85
tools/buildbot/buildbot/sourcestamp.py
Normal file
@ -0,0 +1,85 @@
|
||||
|
||||
from buildbot import util, interfaces
|
||||
from buildbot.twcompat import implements
|
||||
|
||||
class SourceStamp(util.ComparableMixin):
|
||||
"""This is a tuple of (branch, revision, patchspec, changes).
|
||||
|
||||
C{branch} is always valid, although it may be None to let the Source
|
||||
step use its default branch. There are four possibilities for the
|
||||
remaining elements:
|
||||
- (revision=REV, patchspec=None, changes=None): build REV
|
||||
- (revision=REV, patchspec=(LEVEL, DIFF), changes=None): checkout REV,
|
||||
then apply a patch to the source, with C{patch -pPATCHLEVEL <DIFF}.
|
||||
- (revision=None, patchspec=None, changes=[CHANGES]): let the Source
|
||||
step check out the latest revision indicated by the given Changes.
|
||||
CHANGES is a list of L{buildbot.changes.changes.Change} instances,
|
||||
and all must be on the same branch.
|
||||
- (revision=None, patchspec=None, changes=None): build the latest code
|
||||
from the given branch.
|
||||
"""
|
||||
|
||||
# all four of these are publically visible attributes
|
||||
branch = None
|
||||
revision = None
|
||||
patch = None
|
||||
changes = []
|
||||
|
||||
compare_attrs = ('branch', 'revision', 'patch', 'changes')
|
||||
|
||||
if implements:
|
||||
implements(interfaces.ISourceStamp)
|
||||
else:
|
||||
__implements__ = interfaces.ISourceStamp,
|
||||
|
||||
def __init__(self, branch=None, revision=None, patch=None,
|
||||
changes=None):
|
||||
self.branch = branch
|
||||
self.revision = revision
|
||||
self.patch = patch
|
||||
if changes:
|
||||
self.changes = changes
|
||||
self.branch = changes[0].branch
|
||||
|
||||
def canBeMergedWith(self, other):
|
||||
if other.branch != self.branch:
|
||||
return False # the builds are completely unrelated
|
||||
|
||||
if self.changes and other.changes:
|
||||
# TODO: consider not merging these. It's a tradeoff between
|
||||
# minimizing the number of builds and obtaining finer-grained
|
||||
# results.
|
||||
return True
|
||||
elif self.changes and not other.changes:
|
||||
return False # we're using changes, they aren't
|
||||
elif not self.changes and other.changes:
|
||||
return False # they're using changes, we aren't
|
||||
|
||||
if self.patch or other.patch:
|
||||
return False # you can't merge patched builds with anything
|
||||
if self.revision == other.revision:
|
||||
# both builds are using the same specific revision, so they can
|
||||
# be merged. It might be the case that revision==None, so they're
|
||||
# both building HEAD.
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def mergeWith(self, others):
|
||||
"""Generate a SourceStamp for the merger of me and all the other
|
||||
BuildRequests. This is called by a Build when it starts, to figure
|
||||
out what its sourceStamp should be."""
|
||||
|
||||
# either we're all building the same thing (changes==None), or we're
|
||||
# all building changes (which can be merged)
|
||||
changes = []
|
||||
changes.extend(self.changes)
|
||||
for req in others:
|
||||
assert self.canBeMergedWith(req) # should have been checked already
|
||||
changes.extend(req.changes)
|
||||
newsource = SourceStamp(branch=self.branch,
|
||||
revision=self.revision,
|
||||
patch=self.patch,
|
||||
changes=changes)
|
||||
return newsource
|
||||
|
0
tools/buildbot/buildbot/status/__init__.py
Normal file
0
tools/buildbot/buildbot/status/__init__.py
Normal file
70
tools/buildbot/buildbot/status/base.py
Normal file
70
tools/buildbot/buildbot/status/base.py
Normal file
@ -0,0 +1,70 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from twisted.application import service
|
||||
from buildbot.twcompat import implements
|
||||
|
||||
from buildbot.interfaces import IStatusReceiver
|
||||
from buildbot import util, pbutil
|
||||
|
||||
class StatusReceiver:
|
||||
if implements:
|
||||
implements(IStatusReceiver)
|
||||
else:
|
||||
__implements__ = IStatusReceiver,
|
||||
|
||||
def buildsetSubmitted(self, buildset):
|
||||
pass
|
||||
|
||||
def builderAdded(self, builderName, builder):
|
||||
pass
|
||||
|
||||
def builderChangedState(self, builderName, state):
|
||||
pass
|
||||
|
||||
def buildStarted(self, builderName, build):
|
||||
pass
|
||||
|
||||
def buildETAUpdate(self, build, ETA):
|
||||
pass
|
||||
|
||||
def stepStarted(self, build, step):
|
||||
pass
|
||||
|
||||
def stepETAUpdate(self, build, step, ETA, expectations):
|
||||
pass
|
||||
|
||||
def logStarted(self, build, step, log):
|
||||
pass
|
||||
|
||||
def logChunk(self, build, step, log, channel, text):
|
||||
pass
|
||||
|
||||
def logFinished(self, build, step, log):
|
||||
pass
|
||||
|
||||
def stepFinished(self, build, step, results):
|
||||
pass
|
||||
|
||||
def buildFinished(self, builderName, build, results):
|
||||
pass
|
||||
|
||||
def builderRemoved(self, builderName):
|
||||
pass
|
||||
|
||||
class StatusReceiverMultiService(StatusReceiver, service.MultiService,
|
||||
util.ComparableMixin):
|
||||
if implements:
|
||||
implements(IStatusReceiver)
|
||||
else:
|
||||
__implements__ = IStatusReceiver, service.MultiService.__implements__
|
||||
|
||||
def __init__(self):
|
||||
service.MultiService.__init__(self)
|
||||
|
||||
|
||||
class StatusReceiverPerspective(StatusReceiver, pbutil.NewCredPerspective):
|
||||
if implements:
|
||||
implements(IStatusReceiver)
|
||||
else:
|
||||
__implements__ = (IStatusReceiver,
|
||||
pbutil.NewCredPerspective.__implements__)
|
1942
tools/buildbot/buildbot/status/builder.py
Normal file
1942
tools/buildbot/buildbot/status/builder.py
Normal file
File diff suppressed because it is too large
Load Diff
39
tools/buildbot/buildbot/status/classic.css
Normal file
39
tools/buildbot/buildbot/status/classic.css
Normal file
@ -0,0 +1,39 @@
|
||||
a:visited {
|
||||
color: #800080;
|
||||
}
|
||||
|
||||
td.Event, td.BuildStep, td.Activity, td.Change, td.Time, td.Builder {
|
||||
border-top: 1px solid;
|
||||
border-right: 1px solid;
|
||||
}
|
||||
|
||||
/* Activity states */
|
||||
.offline {
|
||||
background-color: red;
|
||||
}
|
||||
.idle {
|
||||
background-color: white;
|
||||
}
|
||||
.waiting {
|
||||
background-color: yellow;
|
||||
}
|
||||
.building {
|
||||
background-color: yellow;
|
||||
}
|
||||
|
||||
/* LastBuild, BuildStep states */
|
||||
.success {
|
||||
background-color: #72ff75;
|
||||
}
|
||||
.failure {
|
||||
background-color: red;
|
||||
}
|
||||
.warnings {
|
||||
background-color: #ff8000;
|
||||
}
|
||||
.exception {
|
||||
background-color: #c000c0;
|
||||
}
|
||||
.start,.running {
|
||||
background-color: yellow;
|
||||
}
|
572
tools/buildbot/buildbot/status/client.py
Normal file
572
tools/buildbot/buildbot/status/client.py
Normal file
@ -0,0 +1,572 @@
|
||||
# -*- test-case-name: buildbot.test.test_status -*-
|
||||
|
||||
from twisted.spread import pb
|
||||
from twisted.python import log, components
|
||||
from twisted.internet import reactor
|
||||
from twisted.application import strports
|
||||
from twisted.cred import portal, checkers
|
||||
|
||||
from buildbot import interfaces
|
||||
from buildbot.twcompat import Interface, implements
|
||||
from buildbot.status import builder, base
|
||||
from buildbot.changes import changes
|
||||
|
||||
class IRemote(Interface):
|
||||
pass
|
||||
|
||||
def makeRemote(obj):
|
||||
# we want IRemote(None) to be None, but you can't really do that with
|
||||
# adapters, so we fake it
|
||||
if obj is None:
|
||||
return None
|
||||
return IRemote(obj)
|
||||
|
||||
|
||||
class RemoteBuildSet(pb.Referenceable):
|
||||
def __init__(self, buildset):
|
||||
self.b = buildset
|
||||
|
||||
def remote_getSourceStamp(self):
|
||||
return self.b.getSourceStamp()
|
||||
|
||||
def remote_getReason(self):
|
||||
return self.b.getReason()
|
||||
|
||||
def remote_getID(self):
|
||||
return self.b.getID()
|
||||
|
||||
def remote_getBuilderNames(self):
|
||||
return self.b.getBuilderNames()
|
||||
|
||||
def remote_getBuildRequests(self):
|
||||
"""Returns a list of (builderName, BuildRequest) tuples."""
|
||||
return [(br.getBuilderName(), IRemote(br))
|
||||
for br in self.b.getBuildRequests()]
|
||||
|
||||
def remote_isFinished(self):
|
||||
return self.b.isFinished()
|
||||
|
||||
def remote_waitUntilSuccess(self):
|
||||
d = self.b.waitUntilSuccess()
|
||||
d.addCallback(lambda res: self)
|
||||
return d
|
||||
|
||||
def remote_waitUntilFinished(self):
|
||||
d = self.b.waitUntilFinished()
|
||||
d.addCallback(lambda res: self)
|
||||
return d
|
||||
|
||||
def remote_getResults(self):
|
||||
return self.b.getResults()
|
||||
|
||||
components.registerAdapter(RemoteBuildSet,
|
||||
interfaces.IBuildSetStatus, IRemote)
|
||||
|
||||
|
||||
class RemoteBuilder(pb.Referenceable):
|
||||
def __init__(self, builder):
|
||||
self.b = builder
|
||||
|
||||
def remote_getName(self):
|
||||
return self.b.getName()
|
||||
|
||||
def remote_getState(self):
|
||||
state, builds = self.b.getState()
|
||||
return (state,
|
||||
None, # TODO: remove leftover ETA
|
||||
[makeRemote(b) for b in builds])
|
||||
|
||||
def remote_getSlaves(self):
|
||||
return [IRemote(s) for s in self.b.getSlaves()]
|
||||
|
||||
def remote_getLastFinishedBuild(self):
|
||||
return makeRemote(self.b.getLastFinishedBuild())
|
||||
|
||||
def remote_getCurrentBuilds(self):
|
||||
return [IRemote(b) for b in self.b.getCurrentBuilds()]
|
||||
|
||||
def remote_getBuild(self, number):
|
||||
return makeRemote(self.b.getBuild(number))
|
||||
|
||||
def remote_getEvent(self, number):
|
||||
return IRemote(self.b.getEvent(number))
|
||||
|
||||
components.registerAdapter(RemoteBuilder,
|
||||
interfaces.IBuilderStatus, IRemote)
|
||||
|
||||
|
||||
class RemoteBuildRequest(pb.Referenceable):
|
||||
def __init__(self, buildreq):
|
||||
self.b = buildreq
|
||||
self.observers = []
|
||||
|
||||
def remote_getSourceStamp(self):
|
||||
return self.b.getSourceStamp()
|
||||
|
||||
def remote_getBuilderName(self):
|
||||
return self.b.getBuilderName()
|
||||
|
||||
def remote_subscribe(self, observer):
|
||||
"""The observer's remote_newbuild method will be called (with two
|
||||
arguments: the RemoteBuild object, and our builderName) for each new
|
||||
Build that is created to handle this BuildRequest."""
|
||||
self.observers.append(observer)
|
||||
def send(bs):
|
||||
d = observer.callRemote("newbuild",
|
||||
IRemote(bs), self.b.getBuilderName())
|
||||
d.addErrback(lambda err: None)
|
||||
reactor.callLater(0, self.b.subscribe, send)
|
||||
|
||||
def remote_unsubscribe(self, observer):
|
||||
# PB (well, at least oldpb) doesn't re-use RemoteReference instances,
|
||||
# so sending the same object across the wire twice will result in two
|
||||
# separate objects that compare as equal ('a is not b' and 'a == b').
|
||||
# That means we can't use a simple 'self.observers.remove(observer)'
|
||||
# here.
|
||||
for o in self.observers:
|
||||
if o == observer:
|
||||
self.observers.remove(o)
|
||||
|
||||
components.registerAdapter(RemoteBuildRequest,
|
||||
interfaces.IBuildRequestStatus, IRemote)
|
||||
|
||||
class RemoteBuild(pb.Referenceable):
|
||||
def __init__(self, build):
|
||||
self.b = build
|
||||
self.observers = []
|
||||
|
||||
def remote_getBuilderName(self):
|
||||
return self.b.getBuilder().getName()
|
||||
|
||||
def remote_getNumber(self):
|
||||
return self.b.getNumber()
|
||||
|
||||
def remote_getReason(self):
|
||||
return self.b.getReason()
|
||||
|
||||
def remote_getChanges(self):
|
||||
return [IRemote(c) for c in self.b.getChanges()]
|
||||
|
||||
def remote_getResponsibleUsers(self):
|
||||
return self.b.getResponsibleUsers()
|
||||
|
||||
def remote_getSteps(self):
|
||||
return [IRemote(s) for s in self.b.getSteps()]
|
||||
|
||||
def remote_getTimes(self):
|
||||
return self.b.getTimes()
|
||||
|
||||
def remote_isFinished(self):
|
||||
return self.b.isFinished()
|
||||
|
||||
def remote_waitUntilFinished(self):
|
||||
# the Deferred returned by callRemote() will fire when this build is
|
||||
# finished
|
||||
d = self.b.waitUntilFinished()
|
||||
d.addCallback(lambda res: self)
|
||||
return d
|
||||
|
||||
def remote_getETA(self):
|
||||
return self.b.getETA()
|
||||
|
||||
def remote_getCurrentStep(self):
|
||||
return makeRemote(self.b.getCurrentStep())
|
||||
|
||||
def remote_getText(self):
|
||||
return self.b.getText()
|
||||
|
||||
def remote_getColor(self):
|
||||
return self.b.getColor()
|
||||
|
||||
def remote_getResults(self):
|
||||
return self.b.getResults()
|
||||
|
||||
def remote_getLogs(self):
|
||||
logs = {}
|
||||
for name,log in self.b.getLogs().items():
|
||||
logs[name] = IRemote(log)
|
||||
return logs
|
||||
|
||||
def remote_subscribe(self, observer, updateInterval=None):
|
||||
"""The observer will have remote_stepStarted(buildername, build,
|
||||
stepname, step), remote_stepFinished(buildername, build, stepname,
|
||||
step, results), and maybe remote_buildETAUpdate(buildername, build,
|
||||
eta)) messages sent to it."""
|
||||
self.observers.append(observer)
|
||||
s = BuildSubscriber(observer)
|
||||
self.b.subscribe(s, updateInterval)
|
||||
|
||||
def remote_unsubscribe(self, observer):
|
||||
# TODO: is the observer automatically unsubscribed when the build
|
||||
# finishes? Or are they responsible for unsubscribing themselves
|
||||
# anyway? How do we avoid a race condition here?
|
||||
for o in self.observers:
|
||||
if o == observer:
|
||||
self.observers.remove(o)
|
||||
|
||||
|
||||
components.registerAdapter(RemoteBuild,
|
||||
interfaces.IBuildStatus, IRemote)
|
||||
|
||||
class BuildSubscriber:
|
||||
def __init__(self, observer):
|
||||
self.observer = observer
|
||||
|
||||
def buildETAUpdate(self, build, eta):
|
||||
self.observer.callRemote("buildETAUpdate",
|
||||
build.getBuilder().getName(),
|
||||
IRemote(build),
|
||||
eta)
|
||||
|
||||
def stepStarted(self, build, step):
|
||||
self.observer.callRemote("stepStarted",
|
||||
build.getBuilder().getName(),
|
||||
IRemote(build),
|
||||
step.getName(), IRemote(step))
|
||||
return None
|
||||
|
||||
def stepFinished(self, build, step, results):
|
||||
self.observer.callRemote("stepFinished",
|
||||
build.getBuilder().getName(),
|
||||
IRemote(build),
|
||||
step.getName(), IRemote(step),
|
||||
results)
|
||||
|
||||
|
||||
class RemoteBuildStep(pb.Referenceable):
|
||||
def __init__(self, step):
|
||||
self.s = step
|
||||
|
||||
def remote_getName(self):
|
||||
return self.s.getName()
|
||||
|
||||
def remote_getBuild(self):
|
||||
return IRemote(self.s.getBuild())
|
||||
|
||||
def remote_getTimes(self):
|
||||
return self.s.getTimes()
|
||||
|
||||
def remote_getExpectations(self):
|
||||
return self.s.getExpectations()
|
||||
|
||||
def remote_getLogs(self):
|
||||
logs = {}
|
||||
for name,log in self.s.getLogs().items():
|
||||
logs[name] = IRemote(log)
|
||||
return logs
|
||||
|
||||
def remote_isFinished(self):
|
||||
return self.s.isFinished()
|
||||
|
||||
def remote_waitUntilFinished(self):
|
||||
return self.s.waitUntilFinished() # returns a Deferred
|
||||
|
||||
def remote_getETA(self):
|
||||
return self.s.getETA()
|
||||
|
||||
def remote_getText(self):
|
||||
return self.s.getText()
|
||||
|
||||
def remote_getColor(self):
|
||||
return self.s.getColor()
|
||||
|
||||
def remote_getResults(self):
|
||||
return self.s.getResults()
|
||||
|
||||
components.registerAdapter(RemoteBuildStep,
|
||||
interfaces.IBuildStepStatus, IRemote)
|
||||
|
||||
class RemoteSlave:
|
||||
def __init__(self, slave):
|
||||
self.s = slave
|
||||
|
||||
def remote_getName(self):
|
||||
return self.s.getName()
|
||||
def remote_getAdmin(self):
|
||||
return self.s.getAdmin()
|
||||
def remote_getHost(self):
|
||||
return self.s.getHost()
|
||||
def remote_isConnected(self):
|
||||
return self.s.isConnected()
|
||||
|
||||
components.registerAdapter(RemoteSlave,
|
||||
interfaces.ISlaveStatus, IRemote)
|
||||
|
||||
class RemoteEvent:
|
||||
def __init__(self, event):
|
||||
self.e = event
|
||||
|
||||
def remote_getTimes(self):
|
||||
return self.s.getTimes()
|
||||
def remote_getText(self):
|
||||
return self.s.getText()
|
||||
def remote_getColor(self):
|
||||
return self.s.getColor()
|
||||
|
||||
components.registerAdapter(RemoteEvent,
|
||||
interfaces.IStatusEvent, IRemote)
|
||||
|
||||
class RemoteLog(pb.Referenceable):
|
||||
def __init__(self, log):
|
||||
self.l = log
|
||||
|
||||
def remote_getName(self):
|
||||
return self.l.getName()
|
||||
|
||||
def remote_isFinished(self):
|
||||
return self.l.isFinished()
|
||||
def remote_waitUntilFinished(self):
|
||||
d = self.l.waitUntilFinished()
|
||||
d.addCallback(lambda res: self)
|
||||
return d
|
||||
|
||||
def remote_getText(self):
|
||||
return self.l.getText()
|
||||
def remote_getTextWithHeaders(self):
|
||||
return self.l.getTextWithHeaders()
|
||||
def remote_getChunks(self):
|
||||
return self.l.getChunks()
|
||||
# TODO: subscription interface
|
||||
|
||||
components.registerAdapter(RemoteLog, builder.LogFile, IRemote)
|
||||
# TODO: something similar for builder.HTMLLogfile ?
|
||||
|
||||
class RemoteChange:
|
||||
def __init__(self, change):
|
||||
self.c = change
|
||||
|
||||
def getWho(self):
|
||||
return self.c.who
|
||||
def getFiles(self):
|
||||
return self.c.files
|
||||
def getComments(self):
|
||||
return self.c.comments
|
||||
|
||||
components.registerAdapter(RemoteChange, changes.Change, IRemote)
|
||||
|
||||
|
||||
class StatusClientPerspective(base.StatusReceiverPerspective):
|
||||
|
||||
subscribed = None
|
||||
client = None
|
||||
|
||||
def __init__(self, status):
|
||||
self.status = status # the IStatus
|
||||
self.subscribed_to_builders = [] # Builders to which we're subscribed
|
||||
self.subscribed_to = [] # everything else we're subscribed to
|
||||
|
||||
def __getstate__(self):
|
||||
d = self.__dict__.copy()
|
||||
d['client'] = None
|
||||
return d
|
||||
|
||||
def attached(self, mind):
|
||||
#log.msg("StatusClientPerspective.attached")
|
||||
return self
|
||||
|
||||
def detached(self, mind):
|
||||
log.msg("PB client detached")
|
||||
self.client = None
|
||||
for name in self.subscribed_to_builders:
|
||||
log.msg(" unsubscribing from Builder(%s)" % name)
|
||||
self.status.getBuilder(name).unsubscribe(self)
|
||||
for s in self.subscribed_to:
|
||||
log.msg(" unsubscribe from %s" % s)
|
||||
s.unsubscribe(self)
|
||||
self.subscribed = None
|
||||
|
||||
def perspective_subscribe(self, mode, interval, target):
|
||||
"""The remote client wishes to subscribe to some set of events.
|
||||
'target' will be sent remote messages when these events happen.
|
||||
'mode' indicates which events are desired: it is a string with one
|
||||
of the following values:
|
||||
|
||||
'builders': builderAdded, builderRemoved
|
||||
'builds': those plus builderChangedState, buildStarted, buildFinished
|
||||
'steps': all those plus buildETAUpdate, stepStarted, stepFinished
|
||||
'logs': all those plus stepETAUpdate, logStarted, logFinished
|
||||
'full': all those plus logChunk (with the log contents)
|
||||
|
||||
|
||||
Messages are defined by buildbot.interfaces.IStatusReceiver .
|
||||
'interval' is used to specify how frequently ETAUpdate messages
|
||||
should be sent.
|
||||
|
||||
Raising or lowering the subscription level will take effect starting
|
||||
with the next build or step."""
|
||||
|
||||
assert mode in ("builders", "builds", "steps", "logs", "full")
|
||||
assert target
|
||||
log.msg("PB subscribe(%s)" % mode)
|
||||
|
||||
self.client = target
|
||||
self.subscribed = mode
|
||||
self.interval = interval
|
||||
self.subscribed_to.append(self.status)
|
||||
# wait a moment before subscribing, so the new-builder messages
|
||||
# won't appear before this remote method finishes
|
||||
reactor.callLater(0, self.status.subscribe, self)
|
||||
return None
|
||||
|
||||
def perspective_unsubscribe(self):
|
||||
log.msg("PB unsubscribe")
|
||||
self.status.unsubscribe(self)
|
||||
self.subscribed_to.remove(self.status)
|
||||
self.client = None
|
||||
|
||||
def perspective_getBuildSets(self):
|
||||
"""This returns tuples of (buildset, bsid), because that is much more
|
||||
convenient for tryclient."""
|
||||
return [(IRemote(s), s.getID()) for s in self.status.getBuildSets()]
|
||||
|
||||
def perspective_getBuilderNames(self):
|
||||
return self.status.getBuilderNames()
|
||||
|
||||
def perspective_getBuilder(self, name):
|
||||
b = self.status.getBuilder(name)
|
||||
return IRemote(b)
|
||||
|
||||
def perspective_getSlave(self, name):
|
||||
s = self.status.getSlave(name)
|
||||
return IRemote(s)
|
||||
|
||||
# IStatusReceiver methods, invoked if we've subscribed
|
||||
|
||||
# mode >= builder
|
||||
def builderAdded(self, name, builder):
|
||||
self.client.callRemote("builderAdded", name, IRemote(builder))
|
||||
if self.subscribed in ("builds", "steps", "logs", "full"):
|
||||
self.subscribed_to_builders.append(name)
|
||||
return self
|
||||
return None
|
||||
|
||||
def builderChangedState(self, name, state):
|
||||
self.client.callRemote("builderChangedState", name, state, None)
|
||||
# TODO: remove leftover ETA argument
|
||||
|
||||
def builderRemoved(self, name):
|
||||
if name in self.subscribed_to_builders:
|
||||
self.subscribed_to_builders.remove(name)
|
||||
self.client.callRemote("builderRemoved", name)
|
||||
|
||||
def buildsetSubmitted(self, buildset):
|
||||
# TODO: deliver to client, somehow
|
||||
pass
|
||||
|
||||
# mode >= builds
|
||||
def buildStarted(self, name, build):
|
||||
self.client.callRemote("buildStarted", name, IRemote(build))
|
||||
if self.subscribed in ("steps", "logs", "full"):
|
||||
self.subscribed_to.append(build)
|
||||
return (self, self.interval)
|
||||
return None
|
||||
|
||||
def buildFinished(self, name, build, results):
|
||||
if build in self.subscribed_to:
|
||||
# we might have joined during the build
|
||||
self.subscribed_to.remove(build)
|
||||
self.client.callRemote("buildFinished",
|
||||
name, IRemote(build), results)
|
||||
|
||||
# mode >= steps
|
||||
def buildETAUpdate(self, build, eta):
|
||||
self.client.callRemote("buildETAUpdate",
|
||||
build.getBuilder().getName(), IRemote(build),
|
||||
eta)
|
||||
|
||||
def stepStarted(self, build, step):
|
||||
# we add some information here so the client doesn't have to do an
|
||||
# extra round-trip
|
||||
self.client.callRemote("stepStarted",
|
||||
build.getBuilder().getName(), IRemote(build),
|
||||
step.getName(), IRemote(step))
|
||||
if self.subscribed in ("logs", "full"):
|
||||
self.subscribed_to.append(step)
|
||||
return (self, self.interval)
|
||||
return None
|
||||
|
||||
def stepFinished(self, build, step, results):
|
||||
self.client.callRemote("stepFinished",
|
||||
build.getBuilder().getName(), IRemote(build),
|
||||
step.getName(), IRemote(step),
|
||||
results)
|
||||
if step in self.subscribed_to:
|
||||
# eventually (through some new subscription method) we could
|
||||
# join in the middle of the step
|
||||
self.subscribed_to.remove(step)
|
||||
|
||||
# mode >= logs
|
||||
def stepETAUpdate(self, build, step, ETA, expectations):
|
||||
self.client.callRemote("stepETAUpdate",
|
||||
build.getBuilder().getName(), IRemote(build),
|
||||
step.getName(), IRemote(step),
|
||||
ETA, expectations)
|
||||
|
||||
def logStarted(self, build, step, log):
|
||||
# TODO: make the HTMLLog adapter
|
||||
rlog = IRemote(log, None)
|
||||
if not rlog:
|
||||
print "hey, couldn't adapt %s to IRemote" % log
|
||||
self.client.callRemote("logStarted",
|
||||
build.getBuilder().getName(), IRemote(build),
|
||||
step.getName(), IRemote(step),
|
||||
log.getName(), IRemote(log, None))
|
||||
if self.subscribed in ("full",):
|
||||
self.subscribed_to.append(log)
|
||||
return self
|
||||
return None
|
||||
|
||||
def logFinished(self, build, step, log):
|
||||
self.client.callRemote("logFinished",
|
||||
build.getBuilder().getName(), IRemote(build),
|
||||
step.getName(), IRemote(step),
|
||||
log.getName(), IRemote(log, None))
|
||||
if log in self.subscribed_to:
|
||||
self.subscribed_to.remove(log)
|
||||
|
||||
# mode >= full
|
||||
def logChunk(self, build, step, log, channel, text):
|
||||
self.client.callRemote("logChunk",
|
||||
build.getBuilder().getName(), IRemote(build),
|
||||
step.getName(), IRemote(step),
|
||||
log.getName(), IRemote(log),
|
||||
channel, text)
|
||||
|
||||
|
||||
class PBListener(base.StatusReceiverMultiService):
|
||||
"""I am a listener for PB-based status clients."""
|
||||
|
||||
compare_attrs = ["port", "cred"]
|
||||
if implements:
|
||||
implements(portal.IRealm)
|
||||
else:
|
||||
__implements__ = (portal.IRealm,
|
||||
base.StatusReceiverMultiService.__implements__)
|
||||
|
||||
def __init__(self, port, user="statusClient", passwd="clientpw"):
|
||||
base.StatusReceiverMultiService.__init__(self)
|
||||
if type(port) is int:
|
||||
port = "tcp:%d" % port
|
||||
self.port = port
|
||||
self.cred = (user, passwd)
|
||||
p = portal.Portal(self)
|
||||
c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
|
||||
c.addUser(user, passwd)
|
||||
p.registerChecker(c)
|
||||
f = pb.PBServerFactory(p)
|
||||
s = strports.service(port, f)
|
||||
s.setServiceParent(self)
|
||||
|
||||
def setServiceParent(self, parent):
|
||||
base.StatusReceiverMultiService.setServiceParent(self, parent)
|
||||
self.setup()
|
||||
|
||||
def setup(self):
|
||||
self.status = self.parent.getStatus()
|
||||
|
||||
def requestAvatar(self, avatarID, mind, interface):
|
||||
assert interface == pb.IPerspective
|
||||
p = StatusClientPerspective(self.status)
|
||||
p.attached(mind) # perhaps .callLater(0) ?
|
||||
return (pb.IPerspective, p,
|
||||
lambda p=p,mind=mind: p.detached(mind))
|
1762
tools/buildbot/buildbot/status/html.py
Normal file
1762
tools/buildbot/buildbot/status/html.py
Normal file
File diff suppressed because it is too large
Load Diff
362
tools/buildbot/buildbot/status/mail.py
Normal file
362
tools/buildbot/buildbot/status/mail.py
Normal file
@ -0,0 +1,362 @@
|
||||
# -*- test-case-name: buildbot.test.test_status -*-
|
||||
|
||||
# the email.MIMEMultipart module is only available in python-2.2.2 and later
|
||||
|
||||
from email.Message import Message
|
||||
from email.Utils import formatdate
|
||||
from email.MIMEText import MIMEText
|
||||
try:
|
||||
from email.MIMEMultipart import MIMEMultipart
|
||||
canDoAttachments = True
|
||||
except ImportError:
|
||||
canDoAttachments = False
|
||||
import urllib
|
||||
|
||||
from twisted.internet import defer
|
||||
try:
|
||||
from twisted.mail.smtp import sendmail # Twisted-2.0
|
||||
except ImportError:
|
||||
from twisted.protocols.smtp import sendmail # Twisted-1.3
|
||||
from twisted.python import log
|
||||
|
||||
from buildbot import interfaces, util
|
||||
from buildbot.twcompat import implements, providedBy
|
||||
from buildbot.status import base
|
||||
from buildbot.status.builder import FAILURE, SUCCESS, WARNINGS
|
||||
|
||||
|
||||
class Domain(util.ComparableMixin):
|
||||
if implements:
|
||||
implements(interfaces.IEmailLookup)
|
||||
else:
|
||||
__implements__ = interfaces.IEmailLookup
|
||||
compare_attrs = ["domain"]
|
||||
|
||||
def __init__(self, domain):
|
||||
assert "@" not in domain
|
||||
self.domain = domain
|
||||
|
||||
def getAddress(self, name):
|
||||
return name + "@" + self.domain
|
||||
|
||||
|
||||
class MailNotifier(base.StatusReceiverMultiService):
|
||||
"""This is a status notifier which sends email to a list of recipients
|
||||
upon the completion of each build. It can be configured to only send out
|
||||
mail for certain builds, and only send messages when the build fails, or
|
||||
when it transitions from success to failure. It can also be configured to
|
||||
include various build logs in each message.
|
||||
|
||||
By default, the message will be sent to the Interested Users list, which
|
||||
includes all developers who made changes in the build. You can add
|
||||
additional recipients with the extraRecipients argument.
|
||||
|
||||
To get a simple one-message-per-build (say, for a mailing list), use
|
||||
sendToInterestedUsers=False, extraRecipients=['listaddr@example.org']
|
||||
|
||||
Each MailNotifier sends mail to a single set of recipients. To send
|
||||
different kinds of mail to different recipients, use multiple
|
||||
MailNotifiers.
|
||||
"""
|
||||
|
||||
if implements:
|
||||
implements(interfaces.IEmailSender)
|
||||
else:
|
||||
__implements__ = (interfaces.IEmailSender,
|
||||
base.StatusReceiverMultiService.__implements__)
|
||||
|
||||
compare_attrs = ["extraRecipients", "lookup", "fromaddr", "mode",
|
||||
"categories", "builders", "addLogs", "relayhost",
|
||||
"subject", "sendToInterestedUsers"]
|
||||
|
||||
def __init__(self, fromaddr, mode="all", categories=None, builders=None,
|
||||
addLogs=False, relayhost="localhost",
|
||||
subject="buildbot %(result)s in %(builder)s",
|
||||
lookup=None, extraRecipients=[],
|
||||
sendToInterestedUsers=True):
|
||||
"""
|
||||
@type fromaddr: string
|
||||
@param fromaddr: the email address to be used in the 'From' header.
|
||||
@type sendToInterestedUsers: boolean
|
||||
@param sendToInterestedUsers: if True (the default), send mail to all
|
||||
of the Interested Users. If False, only
|
||||
send mail to the extraRecipients list.
|
||||
|
||||
@type extraRecipients: tuple of string
|
||||
@param extraRecipients: a list of email addresses to which messages
|
||||
should be sent (in addition to the
|
||||
InterestedUsers list, which includes any
|
||||
developers who made Changes that went into this
|
||||
build). It is a good idea to create a small
|
||||
mailing list and deliver to that, then let
|
||||
subscribers come and go as they please.
|
||||
|
||||
@type subject: string
|
||||
@param subject: a string to be used as the subject line of the message.
|
||||
%(builder)s will be replaced with the name of the
|
||||
builder which provoked the message.
|
||||
|
||||
@type mode: string (defaults to all)
|
||||
@param mode: one of:
|
||||
- 'all': send mail about all builds, passing and failing
|
||||
- 'failing': only send mail about builds which fail
|
||||
- 'problem': only send mail about a build which failed
|
||||
when the previous build passed
|
||||
|
||||
@type builders: list of strings
|
||||
@param builders: a list of builder names for which mail should be
|
||||
sent. Defaults to None (send mail for all builds).
|
||||
Use either builders or categories, but not both.
|
||||
|
||||
@type categories: list of strings
|
||||
@param categories: a list of category names to serve status
|
||||
information for. Defaults to None (all
|
||||
categories). Use either builders or categories,
|
||||
but not both.
|
||||
|
||||
@type addLogs: boolean.
|
||||
@param addLogs: if True, include all build logs as attachments to the
|
||||
messages. These can be quite large. This can also be
|
||||
set to a list of log names, to send a subset of the
|
||||
logs. Defaults to False.
|
||||
|
||||
@type relayhost: string
|
||||
@param relayhost: the host to which the outbound SMTP connection
|
||||
should be made. Defaults to 'localhost'
|
||||
|
||||
@type lookup: implementor of {IEmailLookup}
|
||||
@param lookup: object which provides IEmailLookup, which is
|
||||
responsible for mapping User names (which come from
|
||||
the VC system) into valid email addresses. If not
|
||||
provided, the notifier will only be able to send mail
|
||||
to the addresses in the extraRecipients list. Most of
|
||||
the time you can use a simple Domain instance. As a
|
||||
shortcut, you can pass as string: this will be
|
||||
treated as if you had provided Domain(str). For
|
||||
example, lookup='twistedmatrix.com' will allow mail
|
||||
to be sent to all developers whose SVN usernames
|
||||
match their twistedmatrix.com account names.
|
||||
"""
|
||||
|
||||
base.StatusReceiverMultiService.__init__(self)
|
||||
assert isinstance(extraRecipients, (list, tuple))
|
||||
for r in extraRecipients:
|
||||
assert isinstance(r, str)
|
||||
assert "@" in r # require full email addresses, not User names
|
||||
self.extraRecipients = extraRecipients
|
||||
self.sendToInterestedUsers = sendToInterestedUsers
|
||||
self.fromaddr = fromaddr
|
||||
self.mode = mode
|
||||
self.categories = categories
|
||||
self.builders = builders
|
||||
self.addLogs = addLogs
|
||||
self.relayhost = relayhost
|
||||
self.subject = subject
|
||||
if lookup is not None:
|
||||
if type(lookup) is str:
|
||||
lookup = Domain(lookup)
|
||||
assert providedBy(lookup, interfaces.IEmailLookup)
|
||||
self.lookup = lookup
|
||||
self.watched = []
|
||||
self.status = None
|
||||
|
||||
# you should either limit on builders or categories, not both
|
||||
if self.builders != None and self.categories != None:
|
||||
log.err("Please specify only builders to ignore or categories to include")
|
||||
raise # FIXME: the asserts above do not raise some Exception either
|
||||
|
||||
def setServiceParent(self, parent):
|
||||
"""
|
||||
@type parent: L{buildbot.master.BuildMaster}
|
||||
"""
|
||||
base.StatusReceiverMultiService.setServiceParent(self, parent)
|
||||
self.setup()
|
||||
|
||||
def setup(self):
|
||||
self.status = self.parent.getStatus()
|
||||
self.status.subscribe(self)
|
||||
|
||||
def disownServiceParent(self):
|
||||
self.status.unsubscribe(self)
|
||||
for w in self.watched:
|
||||
w.unsubscribe(self)
|
||||
return base.StatusReceiverMultiService.disownServiceParent(self)
|
||||
|
||||
def builderAdded(self, name, builder):
|
||||
# only subscribe to builders we are interested in
|
||||
if self.categories != None and builder.category not in self.categories:
|
||||
return None
|
||||
|
||||
self.watched.append(builder)
|
||||
return self # subscribe to this builder
|
||||
|
||||
def builderRemoved(self, name):
|
||||
pass
|
||||
|
||||
def builderChangedState(self, name, state):
|
||||
pass
|
||||
def buildStarted(self, name, build):
|
||||
pass
|
||||
def buildFinished(self, name, build, results):
|
||||
# here is where we actually do something.
|
||||
builder = build.getBuilder()
|
||||
if self.builders is not None and name not in self.builders:
|
||||
return # ignore this build
|
||||
if self.categories is not None and \
|
||||
builder.category not in self.categories:
|
||||
return # ignore this build
|
||||
|
||||
if self.mode == "failing" and results != FAILURE:
|
||||
return
|
||||
if self.mode == "problem":
|
||||
if results != FAILURE:
|
||||
return
|
||||
prev = build.getPreviousBuild()
|
||||
if prev and prev.getResults() == FAILURE:
|
||||
return
|
||||
# for testing purposes, buildMessage returns a Deferred that fires
|
||||
# when the mail has been sent. To help unit tests, we return that
|
||||
# Deferred here even though the normal IStatusReceiver.buildFinished
|
||||
# signature doesn't do anything with it. If that changes (if
|
||||
# .buildFinished's return value becomes significant), we need to
|
||||
# rearrange this.
|
||||
return self.buildMessage(name, build, results)
|
||||
|
||||
def buildMessage(self, name, build, results):
|
||||
text = ""
|
||||
if self.mode == "all":
|
||||
text += "The Buildbot has finished a build of %s.\n" % name
|
||||
elif self.mode == "failing":
|
||||
text += "The Buildbot has detected a failed build of %s.\n" % name
|
||||
else:
|
||||
text += "The Buildbot has detected a new failure of %s.\n" % name
|
||||
buildurl = self.status.getURLForThing(build)
|
||||
if buildurl:
|
||||
text += "Full details are available at:\n %s\n" % buildurl
|
||||
text += "\n"
|
||||
|
||||
url = self.status.getBuildbotURL()
|
||||
if url:
|
||||
text += "Buildbot URL: %s\n\n" % urllib.quote(url, '/:')
|
||||
|
||||
text += "Buildslave for this Build: %s\n\n" % build.getSlavename()
|
||||
text += "Build Reason: %s\n" % build.getReason()
|
||||
|
||||
patch = None
|
||||
ss = build.getSourceStamp()
|
||||
if ss is None:
|
||||
source = "unavailable"
|
||||
else:
|
||||
branch, revision, patch = ss
|
||||
source = ""
|
||||
if branch:
|
||||
source += "[branch %s] " % branch
|
||||
if revision:
|
||||
source += revision
|
||||
else:
|
||||
source += "HEAD"
|
||||
if patch is not None:
|
||||
source += " (plus patch)"
|
||||
text += "Build Source Stamp: %s\n" % source
|
||||
|
||||
text += "Blamelist: %s\n" % ",".join(build.getResponsibleUsers())
|
||||
|
||||
# TODO: maybe display changes here? or in an attachment?
|
||||
text += "\n"
|
||||
|
||||
t = build.getText()
|
||||
if t:
|
||||
t = ": " + " ".join(t)
|
||||
else:
|
||||
t = ""
|
||||
|
||||
if results == SUCCESS:
|
||||
text += "Build succeeded!\n"
|
||||
res = "success"
|
||||
elif results == WARNINGS:
|
||||
text += "Build Had Warnings%s\n" % t
|
||||
res = "warnings"
|
||||
else:
|
||||
text += "BUILD FAILED%s\n" % t
|
||||
res = "failure"
|
||||
|
||||
if self.addLogs and build.getLogs():
|
||||
text += "Logs are attached.\n"
|
||||
|
||||
# TODO: it would be nice to provide a URL for the specific build
|
||||
# here. That involves some coordination with html.Waterfall .
|
||||
# Ideally we could do:
|
||||
# helper = self.parent.getServiceNamed("html")
|
||||
# if helper:
|
||||
# url = helper.getURLForBuild(build)
|
||||
|
||||
text += "\n"
|
||||
text += "sincerely,\n"
|
||||
text += " -The Buildbot\n"
|
||||
text += "\n"
|
||||
|
||||
haveAttachments = False
|
||||
if patch or self.addLogs:
|
||||
haveAttachments = True
|
||||
if not canDoAttachments:
|
||||
log.msg("warning: I want to send mail with attachments, "
|
||||
"but this python is too old to have "
|
||||
"email.MIMEMultipart . Please upgrade to python-2.3 "
|
||||
"or newer to enable addLogs=True")
|
||||
|
||||
if haveAttachments and canDoAttachments:
|
||||
m = MIMEMultipart()
|
||||
m.attach(MIMEText(text))
|
||||
else:
|
||||
m = Message()
|
||||
m.set_payload(text)
|
||||
|
||||
m['Date'] = formatdate(localtime=True)
|
||||
m['Subject'] = self.subject % { 'result': res,
|
||||
'builder': name,
|
||||
}
|
||||
m['From'] = self.fromaddr
|
||||
# m['To'] is added later
|
||||
|
||||
if patch:
|
||||
a = MIMEText(patch)
|
||||
a.add_header('Content-Disposition', "attachment",
|
||||
filename="source patch")
|
||||
m.attach(a)
|
||||
if self.addLogs:
|
||||
for log in build.getLogs():
|
||||
name = "%s.%s" % (log.getStep().getName(),
|
||||
log.getName())
|
||||
a = MIMEText(log.getText())
|
||||
a.add_header('Content-Disposition', "attachment",
|
||||
filename=name)
|
||||
m.attach(a)
|
||||
|
||||
# now, who is this message going to?
|
||||
dl = []
|
||||
recipients = self.extraRecipients[:]
|
||||
if self.sendToInterestedUsers and self.lookup:
|
||||
for u in build.getInterestedUsers():
|
||||
d = defer.maybeDeferred(self.lookup.getAddress, u)
|
||||
d.addCallback(recipients.append)
|
||||
dl.append(d)
|
||||
d = defer.DeferredList(dl)
|
||||
d.addCallback(self._gotRecipients, recipients, m)
|
||||
return d
|
||||
|
||||
def _gotRecipients(self, res, rlist, m):
|
||||
recipients = []
|
||||
for r in rlist:
|
||||
if r is not None and r not in recipients:
|
||||
recipients.append(r)
|
||||
recipients.sort()
|
||||
m['To'] = ", ".join(recipients)
|
||||
return self.sendMessage(m, recipients)
|
||||
|
||||
def sendMessage(self, m, recipients):
|
||||
s = m.as_string()
|
||||
ds = []
|
||||
log.msg("sending mail (%d bytes) to" % len(s), recipients)
|
||||
for recip in recipients:
|
||||
ds.append(sendmail(self.relayhost, self.fromaddr, recip, s))
|
||||
return defer.DeferredList(ds)
|
308
tools/buildbot/buildbot/status/progress.py
Normal file
308
tools/buildbot/buildbot/status/progress.py
Normal file
@ -0,0 +1,308 @@
|
||||
# -*- test-case-name: buildbot.test.test_status -*-
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.spread import pb
|
||||
from twisted.python import log
|
||||
from buildbot import util
|
||||
|
||||
class StepProgress:
|
||||
"""I keep track of how much progress a single BuildStep has made.
|
||||
|
||||
Progress is measured along various axes. Time consumed is one that is
|
||||
available for all steps. Amount of command output is another, and may be
|
||||
better quantified by scanning the output for markers to derive number of
|
||||
files compiled, directories walked, tests run, etc.
|
||||
|
||||
I am created when the build begins, and given to a BuildProgress object
|
||||
so it can track the overall progress of the whole build.
|
||||
|
||||
"""
|
||||
|
||||
startTime = None
|
||||
stopTime = None
|
||||
expectedTime = None
|
||||
buildProgress = None
|
||||
debug = False
|
||||
|
||||
def __init__(self, name, metricNames):
|
||||
self.name = name
|
||||
self.progress = {}
|
||||
self.expectations = {}
|
||||
for m in metricNames:
|
||||
self.progress[m] = None
|
||||
self.expectations[m] = None
|
||||
|
||||
def setBuildProgress(self, bp):
|
||||
self.buildProgress = bp
|
||||
|
||||
def setExpectations(self, metrics):
|
||||
"""The step can call this to explicitly set a target value for one
|
||||
of its metrics. E.g., ShellCommands knows how many commands it will
|
||||
execute, so it could set the 'commands' expectation."""
|
||||
for metric, value in metrics.items():
|
||||
self.expectations[metric] = value
|
||||
self.buildProgress.newExpectations()
|
||||
|
||||
def setExpectedTime(self, seconds):
|
||||
self.expectedTime = seconds
|
||||
self.buildProgress.newExpectations()
|
||||
|
||||
def start(self):
|
||||
if self.debug: print "StepProgress.start[%s]" % self.name
|
||||
self.startTime = util.now()
|
||||
|
||||
def setProgress(self, metric, value):
|
||||
"""The step calls this as progress is made along various axes."""
|
||||
if self.debug:
|
||||
print "setProgress[%s][%s] = %s" % (self.name, metric, value)
|
||||
self.progress[metric] = value
|
||||
if self.debug:
|
||||
r = self.remaining()
|
||||
print " step remaining:", r
|
||||
self.buildProgress.newProgress()
|
||||
|
||||
def finish(self):
|
||||
"""This stops the 'time' metric and marks the step as finished
|
||||
overall. It should be called after the last .setProgress has been
|
||||
done for each axis."""
|
||||
if self.debug: print "StepProgress.finish[%s]" % self.name
|
||||
self.stopTime = util.now()
|
||||
self.buildProgress.stepFinished(self.name)
|
||||
|
||||
def totalTime(self):
|
||||
if self.startTime != None and self.stopTime != None:
|
||||
return self.stopTime - self.startTime
|
||||
|
||||
def remaining(self):
|
||||
if self.startTime == None:
|
||||
return self.expectedTime
|
||||
if self.stopTime != None:
|
||||
return 0 # already finished
|
||||
# TODO: replace this with cleverness that graphs each metric vs.
|
||||
# time, then finds the inverse function. Will probably need to save
|
||||
# a timestamp with each setProgress update, when finished, go back
|
||||
# and find the 2% transition points, then save those 50 values in a
|
||||
# list. On the next build, do linear interpolation between the two
|
||||
# closest samples to come up with a percentage represented by that
|
||||
# metric.
|
||||
|
||||
# TODO: If no other metrics are available, just go with elapsed
|
||||
# time. Given the non-time-uniformity of text output from most
|
||||
# steps, this would probably be better than the text-percentage
|
||||
# scheme currently implemented.
|
||||
|
||||
percentages = []
|
||||
for metric, value in self.progress.items():
|
||||
expectation = self.expectations[metric]
|
||||
if value != None and expectation != None:
|
||||
p = 1.0 * value / expectation
|
||||
percentages.append(p)
|
||||
if percentages:
|
||||
avg = reduce(lambda x,y: x+y, percentages) / len(percentages)
|
||||
if avg > 1.0:
|
||||
# overdue
|
||||
avg = 1.0
|
||||
if avg < 0.0:
|
||||
avg = 0.0
|
||||
if percentages and self.expectedTime != None:
|
||||
return self.expectedTime - (avg * self.expectedTime)
|
||||
if self.expectedTime is not None:
|
||||
# fall back to pure time
|
||||
return self.expectedTime - (util.now() - self.startTime)
|
||||
return None # no idea
|
||||
|
||||
|
||||
class WatcherState:
|
||||
def __init__(self, interval):
|
||||
self.interval = interval
|
||||
self.timer = None
|
||||
self.needUpdate = 0
|
||||
|
||||
class BuildProgress(pb.Referenceable):
|
||||
"""I keep track of overall build progress. I hold a list of StepProgress
|
||||
objects.
|
||||
"""
|
||||
|
||||
def __init__(self, stepProgresses):
|
||||
self.steps = {}
|
||||
for s in stepProgresses:
|
||||
self.steps[s.name] = s
|
||||
s.setBuildProgress(self)
|
||||
self.finishedSteps = []
|
||||
self.watchers = {}
|
||||
self.debug = 0
|
||||
|
||||
def setExpectationsFrom(self, exp):
|
||||
"""Set our expectations from the builder's Expectations object."""
|
||||
for name, metrics in exp.steps.items():
|
||||
s = self.steps[name]
|
||||
s.setExpectedTime(exp.times[name])
|
||||
s.setExpectations(exp.steps[name])
|
||||
|
||||
def newExpectations(self):
|
||||
"""Call this when one of the steps has changed its expectations.
|
||||
This should trigger us to update our ETA value and notify any
|
||||
subscribers."""
|
||||
pass # subscribers are not implemented: they just poll
|
||||
|
||||
def stepFinished(self, stepname):
|
||||
assert(stepname not in self.finishedSteps)
|
||||
self.finishedSteps.append(stepname)
|
||||
if len(self.finishedSteps) == len(self.steps.keys()):
|
||||
self.sendLastUpdates()
|
||||
|
||||
def newProgress(self):
|
||||
r = self.remaining()
|
||||
if self.debug:
|
||||
print " remaining:", r
|
||||
if r != None:
|
||||
self.sendAllUpdates()
|
||||
|
||||
def remaining(self):
|
||||
# sum eta of all steps
|
||||
sum = 0
|
||||
for name, step in self.steps.items():
|
||||
rem = step.remaining()
|
||||
if rem == None:
|
||||
return None # not sure
|
||||
sum += rem
|
||||
return sum
|
||||
def eta(self):
|
||||
left = self.remaining()
|
||||
if left == None:
|
||||
return None # not sure
|
||||
done = util.now() + left
|
||||
return done
|
||||
|
||||
|
||||
def remote_subscribe(self, remote, interval=5):
|
||||
# [interval, timer, needUpdate]
|
||||
# don't send an update more than once per interval
|
||||
self.watchers[remote] = WatcherState(interval)
|
||||
remote.notifyOnDisconnect(self.removeWatcher)
|
||||
self.updateWatcher(remote)
|
||||
self.startTimer(remote)
|
||||
log.msg("BuildProgress.remote_subscribe(%s)" % remote)
|
||||
def remote_unsubscribe(self, remote):
|
||||
# TODO: this doesn't work. I think 'remote' will always be different
|
||||
# than the object that appeared in _subscribe.
|
||||
log.msg("BuildProgress.remote_unsubscribe(%s)" % remote)
|
||||
self.removeWatcher(remote)
|
||||
#remote.dontNotifyOnDisconnect(self.removeWatcher)
|
||||
def removeWatcher(self, remote):
|
||||
#log.msg("removeWatcher(%s)" % remote)
|
||||
try:
|
||||
timer = self.watchers[remote].timer
|
||||
if timer:
|
||||
timer.cancel()
|
||||
del self.watchers[remote]
|
||||
except KeyError:
|
||||
log.msg("Weird, removeWatcher on non-existent subscriber:",
|
||||
remote)
|
||||
def sendAllUpdates(self):
|
||||
for r in self.watchers.keys():
|
||||
self.updateWatcher(r)
|
||||
def updateWatcher(self, remote):
|
||||
# an update wants to go to this watcher. Send it if we can, otherwise
|
||||
# queue it for later
|
||||
w = self.watchers[remote]
|
||||
if not w.timer:
|
||||
# no timer, so send update now and start the timer
|
||||
self.sendUpdate(remote)
|
||||
self.startTimer(remote)
|
||||
else:
|
||||
# timer is running, just mark as needing an update
|
||||
w.needUpdate = 1
|
||||
def startTimer(self, remote):
|
||||
w = self.watchers[remote]
|
||||
timer = reactor.callLater(w.interval, self.watcherTimeout, remote)
|
||||
w.timer = timer
|
||||
def sendUpdate(self, remote, last=0):
|
||||
self.watchers[remote].needUpdate = 0
|
||||
#text = self.asText() # TODO: not text, duh
|
||||
try:
|
||||
remote.callRemote("progress", self.remaining())
|
||||
if last:
|
||||
remote.callRemote("finished", self)
|
||||
except:
|
||||
log.deferr()
|
||||
self.removeWatcher(remote)
|
||||
|
||||
def watcherTimeout(self, remote):
|
||||
w = self.watchers.get(remote, None)
|
||||
if not w:
|
||||
return # went away
|
||||
w.timer = None
|
||||
if w.needUpdate:
|
||||
self.sendUpdate(remote)
|
||||
self.startTimer(remote)
|
||||
def sendLastUpdates(self):
|
||||
for remote in self.watchers.keys():
|
||||
self.sendUpdate(remote, 1)
|
||||
self.removeWatcher(remote)
|
||||
|
||||
|
||||
class Expectations:
|
||||
debug = False
|
||||
# decay=1.0 ignores all but the last build
|
||||
# 0.9 is short time constant. 0.1 is very long time constant
|
||||
# TODO: let decay be specified per-metric
|
||||
decay = 0.5
|
||||
|
||||
def __init__(self, buildprogress):
|
||||
"""Create us from a successful build. We will expect each step to
|
||||
take as long as it did in that build."""
|
||||
|
||||
# .steps maps stepname to dict2
|
||||
# dict2 maps metricname to final end-of-step value
|
||||
self.steps = {}
|
||||
|
||||
# .times maps stepname to per-step elapsed time
|
||||
self.times = {}
|
||||
|
||||
for name, step in buildprogress.steps.items():
|
||||
self.steps[name] = {}
|
||||
for metric, value in step.progress.items():
|
||||
self.steps[name][metric] = value
|
||||
self.times[name] = None
|
||||
if step.startTime is not None and step.stopTime is not None:
|
||||
self.times[name] = step.stopTime - step.startTime
|
||||
|
||||
def wavg(self, old, current):
|
||||
if old is None:
|
||||
return current
|
||||
if current is None:
|
||||
return old
|
||||
else:
|
||||
return (current * self.decay) + (old * (1 - self.decay))
|
||||
|
||||
def update(self, buildprogress):
|
||||
for name, stepprogress in buildprogress.steps.items():
|
||||
old = self.times[name]
|
||||
current = stepprogress.totalTime()
|
||||
if current == None:
|
||||
log.msg("Expectations.update: current[%s] was None!" % name)
|
||||
continue
|
||||
new = self.wavg(old, current)
|
||||
self.times[name] = new
|
||||
if self.debug:
|
||||
print "new expected time[%s] = %s, old %s, cur %s" % \
|
||||
(name, new, old, current)
|
||||
|
||||
for metric, current in stepprogress.progress.items():
|
||||
old = self.steps[name][metric]
|
||||
new = self.wavg(old, current)
|
||||
if self.debug:
|
||||
print "new expectation[%s][%s] = %s, old %s, cur %s" % \
|
||||
(name, metric, new, old, current)
|
||||
self.steps[name][metric] = new
|
||||
|
||||
def expectedBuildTime(self):
|
||||
if None in self.times.values():
|
||||
return None
|
||||
#return sum(self.times.values())
|
||||
# python-2.2 doesn't have 'sum'. TODO: drop python-2.2 support
|
||||
s = 0
|
||||
for v in self.times.values():
|
||||
s += v
|
||||
return s
|
75
tools/buildbot/buildbot/status/tests.py
Normal file
75
tools/buildbot/buildbot/status/tests.py
Normal file
@ -0,0 +1,75 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from twisted.web import resource
|
||||
from twisted.web.error import NoResource
|
||||
from twisted.web.html import PRE
|
||||
|
||||
# these are our test result types. Steps are responsible for mapping results
|
||||
# into these values.
|
||||
SKIP, EXPECTED_FAILURE, FAILURE, ERROR, UNEXPECTED_SUCCESS, SUCCESS = \
|
||||
"skip", "expected failure", "failure", "error", "unexpected success", \
|
||||
"success"
|
||||
UNKNOWN = "unknown" # catch-all
|
||||
|
||||
|
||||
class OneTest(resource.Resource):
|
||||
isLeaf = 1
|
||||
def __init__(self, parent, testName, results):
|
||||
self.parent = parent
|
||||
self.testName = testName
|
||||
self.resultType, self.results = results
|
||||
|
||||
def render(self, request):
|
||||
request.setHeader("content-type", "text/html")
|
||||
if request.method == "HEAD":
|
||||
request.setHeader("content-length", len(self.html(request)))
|
||||
return ''
|
||||
return self.html(request)
|
||||
|
||||
def html(self, request):
|
||||
# turn ourselves into HTML
|
||||
raise NotImplementedError
|
||||
|
||||
class TestResults(resource.Resource):
|
||||
oneTestClass = OneTest
|
||||
def __init__(self):
|
||||
resource.Resource.__init__(self)
|
||||
self.tests = {}
|
||||
def addTest(self, testName, resultType, results=None):
|
||||
self.tests[testName] = (resultType, results)
|
||||
# TODO: .setName and .delete should be used on our Swappable
|
||||
def countTests(self):
|
||||
return len(self.tests)
|
||||
def countFailures(self):
|
||||
failures = 0
|
||||
for t in self.tests.values():
|
||||
if t[0] in (FAILURE, ERROR):
|
||||
failures += 1
|
||||
return failures
|
||||
def summary(self):
|
||||
"""Return a short list of text strings as a summary, suitable for
|
||||
inclusion in an Event"""
|
||||
return ["some", "tests"]
|
||||
def describeOneTest(self, testname):
|
||||
return "%s: %s\n" % (testname, self.tests[testname][0])
|
||||
def html(self):
|
||||
data = "<html>\n<head><title>Test Results</title></head>\n"
|
||||
data += "<body>\n"
|
||||
data += "<pre>\n"
|
||||
tests = self.tests.keys()
|
||||
tests.sort()
|
||||
for testname in tests:
|
||||
data += self.describeOneTest(testname)
|
||||
data += "</pre>\n"
|
||||
data += "</body></html>\n"
|
||||
return data
|
||||
def render(self, request):
|
||||
request.setHeader("content-type", "text/html")
|
||||
if request.method == "HEAD":
|
||||
request.setHeader("content-length", len(self.html()))
|
||||
return ''
|
||||
return self.html()
|
||||
def getChild(self, path, request):
|
||||
if self.tests.has_key(path):
|
||||
return self.oneTestClass(self, path, self.tests[path])
|
||||
return NoResource("No such test '%s'" % path)
|
176
tools/buildbot/buildbot/status/tinderbox.py
Normal file
176
tools/buildbot/buildbot/status/tinderbox.py
Normal file
@ -0,0 +1,176 @@
|
||||
|
||||
from email.Message import Message
|
||||
from email.Utils import formatdate
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from buildbot import interfaces
|
||||
from buildbot.twcompat import implements
|
||||
from buildbot.status import base, mail
|
||||
from buildbot.status.builder import SUCCESS, WARNINGS
|
||||
|
||||
import zlib, bz2, base64
|
||||
|
||||
# TODO: docs, maybe a test of some sort just to make sure it actually imports
|
||||
# and can format email without raising an exception.
|
||||
|
||||
class TinderboxMailNotifier(mail.MailNotifier):
|
||||
"""This is a Tinderbox status notifier. It can send e-mail to a number of
|
||||
different tinderboxes or people. E-mails are sent at the beginning and
|
||||
upon completion of each build. It can be configured to send out e-mails
|
||||
for only certain builds.
|
||||
|
||||
The most basic usage is as follows::
|
||||
TinderboxMailNotifier(fromaddr="buildbot@localhost",
|
||||
tree="MyTinderboxTree",
|
||||
extraRecipients=["tinderboxdaemon@host.org"])
|
||||
|
||||
The builder name (as specified in master.cfg) is used as the "build"
|
||||
tinderbox option.
|
||||
|
||||
"""
|
||||
if implements:
|
||||
implements(interfaces.IEmailSender)
|
||||
else:
|
||||
__implements__ = (interfaces.IEmailSender,
|
||||
base.StatusReceiverMultiService.__implements__)
|
||||
|
||||
compare_attrs = ["extraRecipients", "fromaddr", "categories", "builders",
|
||||
"addLogs", "relayhost", "subject", "binaryURL", "tree",
|
||||
"logCompression"]
|
||||
|
||||
def __init__(self, fromaddr, tree, extraRecipients,
|
||||
categories=None, builders=None, relayhost="localhost",
|
||||
subject="buildbot %(result)s in %(builder)s", binaryURL="",
|
||||
logCompression=""):
|
||||
"""
|
||||
@type fromaddr: string
|
||||
@param fromaddr: the email address to be used in the 'From' header.
|
||||
|
||||
@type tree: string
|
||||
@param tree: The Tinderbox tree to post to.
|
||||
|
||||
@type extraRecipients: tuple of string
|
||||
@param extraRecipients: E-mail addresses of recipients. This should at
|
||||
least include the tinderbox daemon.
|
||||
|
||||
@type categories: list of strings
|
||||
@param categories: a list of category names to serve status
|
||||
information for. Defaults to None (all
|
||||
categories). Use either builders or categories,
|
||||
but not both.
|
||||
|
||||
@type builders: list of strings
|
||||
@param builders: a list of builder names for which mail should be
|
||||
sent. Defaults to None (send mail for all builds).
|
||||
Use either builders or categories, but not both.
|
||||
|
||||
@type relayhost: string
|
||||
@param relayhost: the host to which the outbound SMTP connection
|
||||
should be made. Defaults to 'localhost'
|
||||
|
||||
@type subject: string
|
||||
@param subject: a string to be used as the subject line of the message.
|
||||
%(builder)s will be replaced with the name of the
|
||||
%builder which provoked the message.
|
||||
This parameter is not significant for the tinderbox
|
||||
daemon.
|
||||
|
||||
@type binaryURL: string
|
||||
@param binaryURL: If specified, this should be the location where final
|
||||
binary for a build is located.
|
||||
(ie. http://www.myproject.org/nightly/08-08-2006.tgz)
|
||||
It will be posted to the Tinderbox.
|
||||
|
||||
@type logCompression: string
|
||||
@param logCompression: The type of compression to use on the log.
|
||||
Valid options are"bzip2" and "gzip". gzip is
|
||||
only known to work on Python 2.4 and above.
|
||||
"""
|
||||
|
||||
mail.MailNotifier.__init__(self, fromaddr, categories=categories,
|
||||
builders=builders, relayhost=relayhost,
|
||||
subject=subject,
|
||||
extraRecipients=extraRecipients,
|
||||
sendToInterestedUsers=False)
|
||||
self.tree = tree
|
||||
self.binaryURL = binaryURL
|
||||
self.logCompression = logCompression
|
||||
|
||||
def buildStarted(self, name, build):
|
||||
self.buildMessage(name, build, "building")
|
||||
|
||||
def buildMessage(self, name, build, results):
|
||||
text = ""
|
||||
res = ""
|
||||
# shortform
|
||||
t = "tinderbox:"
|
||||
|
||||
text += "%s tree: %s\n" % (t, self.tree)
|
||||
# the start time
|
||||
# getTimes() returns a fractioned time that tinderbox doesn't understand
|
||||
text += "%s builddate: %s\n" % (t, int(build.getTimes()[0]))
|
||||
text += "%s status: " % t
|
||||
|
||||
if results == "building":
|
||||
res = "building"
|
||||
text += res
|
||||
elif results == SUCCESS:
|
||||
res = "success"
|
||||
text += res
|
||||
elif results == WARNINGS:
|
||||
res = "testfailed"
|
||||
text += res
|
||||
else:
|
||||
res += "busted"
|
||||
text += res
|
||||
|
||||
text += "\n";
|
||||
|
||||
text += "%s build: %s\n" % (t, name)
|
||||
text += "%s errorparser: unix\n" % t # always use the unix errorparser
|
||||
|
||||
# if the build just started...
|
||||
if results == "building":
|
||||
text += "%s END\n" % t
|
||||
# if the build finished...
|
||||
else:
|
||||
text += "%s binaryurl: %s\n" % (t, self.binaryURL)
|
||||
text += "%s logcompression: %s\n" % (t, self.logCompression)
|
||||
|
||||
# logs will always be appended
|
||||
tinderboxLogs = ""
|
||||
for log in build.getLogs():
|
||||
l = ""
|
||||
logEncoding = ""
|
||||
if self.logCompression == "bzip2":
|
||||
compressedLog = bz2.compress(log.getText())
|
||||
l = base64.encodestring(compressedLog)
|
||||
logEncoding = "base64";
|
||||
elif self.logCompression == "gzip":
|
||||
compressedLog = zlib.compress(log.getText())
|
||||
l = base64.encodestring(compressedLog)
|
||||
logEncoding = "base64";
|
||||
else:
|
||||
l = log.getText()
|
||||
tinderboxLogs += l
|
||||
|
||||
text += "%s logencoding: %s\n" % (t, logEncoding)
|
||||
text += "%s END\n\n" % t
|
||||
text += tinderboxLogs
|
||||
text += "\n"
|
||||
|
||||
m = Message()
|
||||
m.set_payload(text)
|
||||
|
||||
m['Date'] = formatdate(localtime=True)
|
||||
m['Subject'] = self.subject % { 'result': res,
|
||||
'builder': name,
|
||||
}
|
||||
m['From'] = self.fromaddr
|
||||
# m['To'] is added later
|
||||
|
||||
d = defer.DeferredList([])
|
||||
d.addCallback(self._gotRecipients, self.extraRecipients, m)
|
||||
return d
|
||||
|
614
tools/buildbot/buildbot/status/words.py
Normal file
614
tools/buildbot/buildbot/status/words.py
Normal file
@ -0,0 +1,614 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
# code to deliver build status through twisted.words (instant messaging
|
||||
# protocols: irc, etc)
|
||||
|
||||
import re, shlex
|
||||
|
||||
from twisted.internet import protocol, reactor
|
||||
try:
|
||||
# Twisted-2.0
|
||||
from twisted.words.protocols import irc
|
||||
except ImportError:
|
||||
# Twisted-1.3
|
||||
from twisted.protocols import irc
|
||||
from twisted.python import log, failure
|
||||
from twisted.application import internet
|
||||
|
||||
from buildbot import interfaces, util
|
||||
from buildbot import version
|
||||
from buildbot.sourcestamp import SourceStamp
|
||||
from buildbot.process.base import BuildRequest
|
||||
from buildbot.status import base
|
||||
from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
|
||||
from buildbot.scripts.runner import ForceOptions
|
||||
|
||||
class UsageError(ValueError):
|
||||
def __init__(self, string = "Invalid usage", *more):
|
||||
ValueError.__init__(self, string, *more)
|
||||
|
||||
class IrcBuildRequest:
|
||||
hasStarted = False
|
||||
timer = None
|
||||
|
||||
def __init__(self, parent, reply):
|
||||
self.parent = parent
|
||||
self.reply = reply
|
||||
self.timer = reactor.callLater(5, self.soon)
|
||||
|
||||
def soon(self):
|
||||
del self.timer
|
||||
if not self.hasStarted:
|
||||
self.parent.reply(self.reply,
|
||||
"The build has been queued, I'll give a shout"
|
||||
" when it starts")
|
||||
|
||||
def started(self, c):
|
||||
self.hasStarted = True
|
||||
if self.timer:
|
||||
self.timer.cancel()
|
||||
del self.timer
|
||||
s = c.getStatus()
|
||||
eta = s.getETA()
|
||||
response = "build #%d forced" % s.getNumber()
|
||||
if eta is not None:
|
||||
response = "build forced [ETA %s]" % self.parent.convertTime(eta)
|
||||
self.parent.reply(self.reply, response)
|
||||
self.parent.reply(self.reply,
|
||||
"I'll give a shout when the build finishes")
|
||||
d = s.waitUntilFinished()
|
||||
d.addCallback(self.parent.buildFinished, self.reply)
|
||||
|
||||
|
||||
class IrcStatusBot(irc.IRCClient):
|
||||
silly = {
|
||||
"What happen ?": "Somebody set up us the bomb.",
|
||||
"It's You !!": ["How are you gentlemen !!",
|
||||
"All your base are belong to us.",
|
||||
"You are on the way to destruction."],
|
||||
"What you say !!": ["You have no chance to survive make your time.",
|
||||
"HA HA HA HA ...."],
|
||||
}
|
||||
def __init__(self, nickname, password, channels, status, categories):
|
||||
"""
|
||||
@type nickname: string
|
||||
@param nickname: the nickname by which this bot should be known
|
||||
@type password: string
|
||||
@param password: the password to use for identifying with Nickserv
|
||||
@type channels: list of strings
|
||||
@param channels: the bot will maintain a presence in these channels
|
||||
@type status: L{buildbot.status.builder.Status}
|
||||
@param status: the build master's Status object, through which the
|
||||
bot retrieves all status information
|
||||
"""
|
||||
self.nickname = nickname
|
||||
self.channels = channels
|
||||
self.password = password
|
||||
self.status = status
|
||||
self.categories = categories
|
||||
self.counter = 0
|
||||
self.hasQuit = 0
|
||||
|
||||
def signedOn(self):
|
||||
if self.password:
|
||||
self.msg("Nickserv", "IDENTIFY " + self.password)
|
||||
for c in self.channels:
|
||||
self.join(c)
|
||||
def joined(self, channel):
|
||||
log.msg("I have joined", channel)
|
||||
def left(self, channel):
|
||||
log.msg("I have left", channel)
|
||||
def kickedFrom(self, channel, kicker, message):
|
||||
log.msg("I have been kicked from %s by %s: %s" % (channel,
|
||||
kicker,
|
||||
message))
|
||||
|
||||
# input
|
||||
def privmsg(self, user, channel, message):
|
||||
user = user.split('!', 1)[0] # rest is ~user@hostname
|
||||
# channel is '#twisted' or 'buildbot' (for private messages)
|
||||
channel = channel.lower()
|
||||
#print "privmsg:", user, channel, message
|
||||
if channel == self.nickname:
|
||||
# private message
|
||||
message = "%s: %s" % (self.nickname, message)
|
||||
reply = user
|
||||
else:
|
||||
reply = channel
|
||||
if message.startswith("%s:" % self.nickname):
|
||||
message = message[len("%s:" % self.nickname):]
|
||||
|
||||
message = message.lstrip()
|
||||
if self.silly.has_key(message):
|
||||
return self.doSilly(user, reply, message)
|
||||
|
||||
parts = message.split(' ', 1)
|
||||
if len(parts) == 1:
|
||||
parts = parts + ['']
|
||||
cmd, args = parts
|
||||
log.msg("irc command", cmd)
|
||||
|
||||
meth = self.getCommandMethod(cmd)
|
||||
if not meth and message[-1] == '!':
|
||||
meth = self.command_EXCITED
|
||||
|
||||
error = None
|
||||
try:
|
||||
if meth:
|
||||
meth(user, reply, args.strip())
|
||||
except UsageError, e:
|
||||
self.reply(reply, str(e))
|
||||
except:
|
||||
f = failure.Failure()
|
||||
log.err(f)
|
||||
error = "Something bad happened (see logs): %s" % f.type
|
||||
|
||||
if error:
|
||||
try:
|
||||
self.reply(reply, error)
|
||||
except:
|
||||
log.err()
|
||||
|
||||
#self.say(channel, "count %d" % self.counter)
|
||||
self.counter += 1
|
||||
def reply(self, dest, message):
|
||||
# maybe self.notice(dest, message) instead?
|
||||
self.msg(dest, message)
|
||||
|
||||
def getCommandMethod(self, command):
|
||||
meth = getattr(self, 'command_' + command.upper(), None)
|
||||
return meth
|
||||
|
||||
def getBuilder(self, which):
|
||||
try:
|
||||
b = self.status.getBuilder(which)
|
||||
except KeyError:
|
||||
raise UsageError, "no such builder '%s'" % which
|
||||
return b
|
||||
|
||||
def getControl(self, which):
|
||||
if not self.control:
|
||||
raise UsageError("builder control is not enabled")
|
||||
try:
|
||||
bc = self.control.getBuilder(which)
|
||||
except KeyError:
|
||||
raise UsageError("no such builder '%s'" % which)
|
||||
return bc
|
||||
|
||||
def getAllBuilders(self):
|
||||
"""
|
||||
@rtype: list of L{buildbot.process.builder.Builder}
|
||||
"""
|
||||
names = self.status.getBuilderNames(categories=self.categories)
|
||||
names.sort()
|
||||
builders = [self.status.getBuilder(n) for n in names]
|
||||
return builders
|
||||
|
||||
def convertTime(self, seconds):
|
||||
if seconds < 60:
|
||||
return "%d seconds" % seconds
|
||||
minutes = int(seconds / 60)
|
||||
seconds = seconds - 60*minutes
|
||||
if minutes < 60:
|
||||
return "%dm%02ds" % (minutes, seconds)
|
||||
hours = int(minutes / 60)
|
||||
minutes = minutes - 60*hours
|
||||
return "%dh%02dm%02ds" % (hours, minutes, seconds)
|
||||
|
||||
def doSilly(self, user, reply, message):
|
||||
response = self.silly[message]
|
||||
if type(response) != type([]):
|
||||
response = [response]
|
||||
when = 0.5
|
||||
for r in response:
|
||||
reactor.callLater(when, self.reply, reply, r)
|
||||
when += 2.5
|
||||
|
||||
def command_HELLO(self, user, reply, args):
|
||||
self.reply(reply, "yes?")
|
||||
|
||||
def command_VERSION(self, user, reply, args):
|
||||
self.reply(reply, "buildbot-%s at your service" % version)
|
||||
|
||||
def command_LIST(self, user, reply, args):
|
||||
args = args.split()
|
||||
if len(args) == 0:
|
||||
raise UsageError, "try 'list builders'"
|
||||
if args[0] == 'builders':
|
||||
builders = self.getAllBuilders()
|
||||
str = "Configured builders: "
|
||||
for b in builders:
|
||||
str += b.name
|
||||
state = b.getState()[0]
|
||||
if state == 'offline':
|
||||
str += "[offline]"
|
||||
str += " "
|
||||
str.rstrip()
|
||||
self.reply(reply, str)
|
||||
return
|
||||
command_LIST.usage = "list builders - List configured builders"
|
||||
|
||||
def command_STATUS(self, user, reply, args):
|
||||
args = args.split()
|
||||
if len(args) == 0:
|
||||
which = "all"
|
||||
elif len(args) == 1:
|
||||
which = args[0]
|
||||
else:
|
||||
raise UsageError, "try 'status <builder>'"
|
||||
if which == "all":
|
||||
builders = self.getAllBuilders()
|
||||
for b in builders:
|
||||
self.emit_status(reply, b.name)
|
||||
return
|
||||
self.emit_status(reply, which)
|
||||
command_STATUS.usage = "status [<which>] - List status of a builder (or all builders)"
|
||||
|
||||
def command_WATCH(self, user, reply, args):
|
||||
args = args.split()
|
||||
if len(args) != 1:
|
||||
raise UsageError("try 'watch <builder>'")
|
||||
which = args[0]
|
||||
b = self.getBuilder(which)
|
||||
builds = b.getCurrentBuilds()
|
||||
if not builds:
|
||||
self.reply(reply, "there are no builds currently running")
|
||||
return
|
||||
for build in builds:
|
||||
assert not build.isFinished()
|
||||
d = build.waitUntilFinished()
|
||||
d.addCallback(self.buildFinished, reply)
|
||||
r = "watching build %s #%d until it finishes" \
|
||||
% (which, build.getNumber())
|
||||
eta = build.getETA()
|
||||
if eta is not None:
|
||||
r += " [%s]" % self.convertTime(eta)
|
||||
r += ".."
|
||||
self.reply(reply, r)
|
||||
command_WATCH.usage = "watch <which> - announce the completion of an active build"
|
||||
|
||||
def buildFinished(self, b, reply):
|
||||
results = {SUCCESS: "Success",
|
||||
WARNINGS: "Warnings",
|
||||
FAILURE: "Failure",
|
||||
EXCEPTION: "Exception",
|
||||
}
|
||||
|
||||
# only notify about builders we are interested in
|
||||
builder = b.getBuilder()
|
||||
log.msg('builder %r in category %s finished' % (builder,
|
||||
builder.category))
|
||||
if (self.categories != None and
|
||||
builder.category not in self.categories):
|
||||
return
|
||||
|
||||
r = "Hey! build %s #%d is complete: %s" % \
|
||||
(b.getBuilder().getName(),
|
||||
b.getNumber(),
|
||||
results.get(b.getResults(), "??"))
|
||||
r += " [%s]" % " ".join(b.getText())
|
||||
self.reply(reply, r)
|
||||
buildurl = self.status.getURLForThing(b)
|
||||
if buildurl:
|
||||
self.reply(reply, "Build details are at %s" % buildurl)
|
||||
|
||||
def command_FORCE(self, user, reply, args):
|
||||
args = shlex.split(args) # TODO: this requires python2.3 or newer
|
||||
if args.pop(0) != "build":
|
||||
raise UsageError("try 'force build WHICH <REASON>'")
|
||||
opts = ForceOptions()
|
||||
opts.parseOptions(args)
|
||||
|
||||
which = opts['builder']
|
||||
branch = opts['branch']
|
||||
revision = opts['revision']
|
||||
reason = opts['reason']
|
||||
|
||||
# keep weird stuff out of the branch and revision strings. TODO:
|
||||
# centralize this somewhere.
|
||||
if branch and not re.match(r'^[\w\.\-\/]*$', branch):
|
||||
log.msg("bad branch '%s'" % branch)
|
||||
self.reply(reply, "sorry, bad branch '%s'" % branch)
|
||||
return
|
||||
if revision and not re.match(r'^[\w\.\-\/]*$', revision):
|
||||
log.msg("bad revision '%s'" % revision)
|
||||
self.reply(reply, "sorry, bad revision '%s'" % revision)
|
||||
return
|
||||
|
||||
bc = self.getControl(which)
|
||||
|
||||
who = None # TODO: if we can authenticate that a particular User
|
||||
# asked for this, use User Name instead of None so they'll
|
||||
# be informed of the results.
|
||||
# TODO: or, monitor this build and announce the results through the
|
||||
# 'reply' argument.
|
||||
r = "forced: by IRC user <%s>: %s" % (user, reason)
|
||||
# TODO: maybe give certain users the ability to request builds of
|
||||
# certain branches
|
||||
s = SourceStamp(branch=branch, revision=revision)
|
||||
req = BuildRequest(r, s, which)
|
||||
try:
|
||||
bc.requestBuildSoon(req)
|
||||
except interfaces.NoSlaveError:
|
||||
self.reply(reply,
|
||||
"sorry, I can't force a build: all slaves are offline")
|
||||
return
|
||||
ireq = IrcBuildRequest(self, reply)
|
||||
req.subscribe(ireq.started)
|
||||
|
||||
|
||||
command_FORCE.usage = "force build <which> <reason> - Force a build"
|
||||
|
||||
def command_STOP(self, user, reply, args):
|
||||
args = args.split(None, 2)
|
||||
if len(args) < 3 or args[0] != 'build':
|
||||
raise UsageError, "try 'stop build WHICH <REASON>'"
|
||||
which = args[1]
|
||||
reason = args[2]
|
||||
|
||||
buildercontrol = self.getControl(which)
|
||||
|
||||
who = None
|
||||
r = "stopped: by IRC user <%s>: %s" % (user, reason)
|
||||
|
||||
# find an in-progress build
|
||||
builderstatus = self.getBuilder(which)
|
||||
builds = builderstatus.getCurrentBuilds()
|
||||
if not builds:
|
||||
self.reply(reply, "sorry, no build is currently running")
|
||||
return
|
||||
for build in builds:
|
||||
num = build.getNumber()
|
||||
|
||||
# obtain the BuildControl object
|
||||
buildcontrol = buildercontrol.getBuild(num)
|
||||
|
||||
# make it stop
|
||||
buildcontrol.stopBuild(r)
|
||||
|
||||
self.reply(reply, "build %d interrupted" % num)
|
||||
|
||||
command_STOP.usage = "stop build <which> <reason> - Stop a running build"
|
||||
|
||||
def emit_status(self, reply, which):
|
||||
b = self.getBuilder(which)
|
||||
str = "%s: " % which
|
||||
state, builds = b.getState()
|
||||
str += state
|
||||
if state == "idle":
|
||||
last = b.getLastFinishedBuild()
|
||||
if last:
|
||||
start,finished = last.getTimes()
|
||||
str += ", last build %s secs ago: %s" % \
|
||||
(int(util.now() - finished), " ".join(last.getText()))
|
||||
if state == "building":
|
||||
t = []
|
||||
for build in builds:
|
||||
step = build.getCurrentStep()
|
||||
s = "(%s)" % " ".join(step.getText())
|
||||
ETA = build.getETA()
|
||||
if ETA is not None:
|
||||
s += " [ETA %s]" % self.convertTime(ETA)
|
||||
t.append(s)
|
||||
str += ", ".join(t)
|
||||
self.reply(reply, str)
|
||||
|
||||
def emit_last(self, reply, which):
|
||||
last = self.getBuilder(which).getLastFinishedBuild()
|
||||
if not last:
|
||||
str = "(no builds run since last restart)"
|
||||
else:
|
||||
start,finish = last.getTimes()
|
||||
str = "%s secs ago: " % (int(util.now() - finish))
|
||||
str += " ".join(last.getText())
|
||||
self.reply(reply, "last build [%s]: %s" % (which, str))
|
||||
|
||||
def command_LAST(self, user, reply, args):
|
||||
args = args.split()
|
||||
if len(args) == 0:
|
||||
which = "all"
|
||||
elif len(args) == 1:
|
||||
which = args[0]
|
||||
else:
|
||||
raise UsageError, "try 'last <builder>'"
|
||||
if which == "all":
|
||||
builders = self.getAllBuilders()
|
||||
for b in builders:
|
||||
self.emit_last(reply, b.name)
|
||||
return
|
||||
self.emit_last(reply, which)
|
||||
command_LAST.usage = "last <which> - list last build status for builder <which>"
|
||||
|
||||
def build_commands(self):
|
||||
commands = []
|
||||
for k in self.__class__.__dict__.keys():
|
||||
if k.startswith('command_'):
|
||||
commands.append(k[8:].lower())
|
||||
commands.sort()
|
||||
return commands
|
||||
|
||||
def command_HELP(self, user, reply, args):
|
||||
args = args.split()
|
||||
if len(args) == 0:
|
||||
self.reply(reply, "Get help on what? (try 'help <foo>', or 'commands' for a command list)")
|
||||
return
|
||||
command = args[0]
|
||||
meth = self.getCommandMethod(command)
|
||||
if not meth:
|
||||
raise UsageError, "no such command '%s'" % command
|
||||
usage = getattr(meth, 'usage', None)
|
||||
if usage:
|
||||
self.reply(reply, "Usage: %s" % usage)
|
||||
else:
|
||||
self.reply(reply, "No usage info for '%s'" % command)
|
||||
command_HELP.usage = "help <command> - Give help for <command>"
|
||||
|
||||
def command_SOURCE(self, user, reply, args):
|
||||
banner = "My source can be found at http://buildbot.sourceforge.net/"
|
||||
self.reply(reply, banner)
|
||||
|
||||
def command_COMMANDS(self, user, reply, args):
|
||||
commands = self.build_commands()
|
||||
str = "buildbot commands: " + ", ".join(commands)
|
||||
self.reply(reply, str)
|
||||
command_COMMANDS.usage = "commands - List available commands"
|
||||
|
||||
def command_DESTROY(self, user, reply, args):
|
||||
self.me(reply, "readies phasers")
|
||||
|
||||
def command_DANCE(self, user, reply, args):
|
||||
reactor.callLater(1.0, self.reply, reply, "0-<")
|
||||
reactor.callLater(3.0, self.reply, reply, "0-/")
|
||||
reactor.callLater(3.5, self.reply, reply, "0-\\")
|
||||
|
||||
def command_EXCITED(self, user, reply, args):
|
||||
# like 'buildbot: destroy the sun!'
|
||||
self.reply(reply, "What you say!")
|
||||
|
||||
def action(self, user, channel, data):
|
||||
#log.msg("action: %s,%s,%s" % (user, channel, data))
|
||||
user = user.split('!', 1)[0] # rest is ~user@hostname
|
||||
# somebody did an action (/me actions)
|
||||
if data.endswith("s buildbot"):
|
||||
words = data.split()
|
||||
verb = words[-2]
|
||||
timeout = 4
|
||||
if verb == "kicks":
|
||||
response = "%s back" % verb
|
||||
timeout = 1
|
||||
else:
|
||||
response = "%s %s too" % (verb, user)
|
||||
reactor.callLater(timeout, self.me, channel, response)
|
||||
# userJoined(self, user, channel)
|
||||
|
||||
# output
|
||||
# self.say(channel, message) # broadcast
|
||||
# self.msg(user, message) # unicast
|
||||
# self.me(channel, action) # send action
|
||||
# self.away(message='')
|
||||
# self.quit(message='')
|
||||
|
||||
class ThrottledClientFactory(protocol.ClientFactory):
|
||||
lostDelay = 2
|
||||
failedDelay = 60
|
||||
def clientConnectionLost(self, connector, reason):
|
||||
reactor.callLater(self.lostDelay, connector.connect)
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
reactor.callLater(self.failedDelay, connector.connect)
|
||||
|
||||
class IrcStatusFactory(ThrottledClientFactory):
|
||||
protocol = IrcStatusBot
|
||||
|
||||
status = None
|
||||
control = None
|
||||
shuttingDown = False
|
||||
p = None
|
||||
|
||||
def __init__(self, nickname, password, channels, categories):
|
||||
#ThrottledClientFactory.__init__(self) # doesn't exist
|
||||
self.status = None
|
||||
self.nickname = nickname
|
||||
self.password = password
|
||||
self.channels = channels
|
||||
self.categories = categories
|
||||
|
||||
def __getstate__(self):
|
||||
d = self.__dict__.copy()
|
||||
del d['p']
|
||||
return d
|
||||
|
||||
def shutdown(self):
|
||||
self.shuttingDown = True
|
||||
if self.p:
|
||||
self.p.quit("buildmaster reconfigured: bot disconnecting")
|
||||
|
||||
def buildProtocol(self, address):
|
||||
p = self.protocol(self.nickname, self.password,
|
||||
self.channels, self.status,
|
||||
self.categories)
|
||||
p.factory = self
|
||||
p.status = self.status
|
||||
p.control = self.control
|
||||
self.p = p
|
||||
return p
|
||||
|
||||
# TODO: I think a shutdown that occurs while the connection is being
|
||||
# established will make this explode
|
||||
|
||||
def clientConnectionLost(self, connector, reason):
|
||||
if self.shuttingDown:
|
||||
log.msg("not scheduling reconnection attempt")
|
||||
return
|
||||
ThrottledClientFactory.clientConnectionLost(self, connector, reason)
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
if self.shuttingDown:
|
||||
log.msg("not scheduling reconnection attempt")
|
||||
return
|
||||
ThrottledClientFactory.clientConnectionFailed(self, connector, reason)
|
||||
|
||||
|
||||
class IRC(base.StatusReceiverMultiService):
|
||||
"""I am an IRC bot which can be queried for status information. I
|
||||
connect to a single IRC server and am known by a single nickname on that
|
||||
server, however I can join multiple channels."""
|
||||
|
||||
compare_attrs = ["host", "port", "nick", "password",
|
||||
"channels", "allowForce",
|
||||
"categories"]
|
||||
|
||||
def __init__(self, host, nick, channels, port=6667, allowForce=True,
|
||||
categories=None, password=None):
|
||||
base.StatusReceiverMultiService.__init__(self)
|
||||
|
||||
assert allowForce in (True, False) # TODO: implement others
|
||||
|
||||
# need to stash these so we can detect changes later
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.nick = nick
|
||||
self.channels = channels
|
||||
self.password = password
|
||||
self.allowForce = allowForce
|
||||
self.categories = categories
|
||||
|
||||
# need to stash the factory so we can give it the status object
|
||||
self.f = IrcStatusFactory(self.nick, self.password,
|
||||
self.channels, self.categories)
|
||||
|
||||
c = internet.TCPClient(host, port, self.f)
|
||||
c.setServiceParent(self)
|
||||
|
||||
def setServiceParent(self, parent):
|
||||
base.StatusReceiverMultiService.setServiceParent(self, parent)
|
||||
self.f.status = parent.getStatus()
|
||||
if self.allowForce:
|
||||
self.f.control = interfaces.IControl(parent)
|
||||
|
||||
def stopService(self):
|
||||
# make sure the factory will stop reconnecting
|
||||
self.f.shutdown()
|
||||
return base.StatusReceiverMultiService.stopService(self)
|
||||
|
||||
|
||||
def main():
|
||||
from twisted.internet import app
|
||||
a = app.Application("irctest")
|
||||
f = IrcStatusFactory()
|
||||
host = "localhost"
|
||||
port = 6667
|
||||
f.addNetwork((host, port), ["private", "other"])
|
||||
a.connectTCP(host, port, f)
|
||||
a.run(save=0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
## buildbot: list builders
|
||||
# buildbot: watch quick
|
||||
# print notification when current build in 'quick' finishes
|
||||
## buildbot: status
|
||||
## buildbot: status full-2.3
|
||||
## building, not, % complete, ETA
|
||||
## buildbot: force build full-2.3 "reason"
|
0
tools/buildbot/buildbot/steps/__init__.py
Normal file
0
tools/buildbot/buildbot/steps/__init__.py
Normal file
100
tools/buildbot/buildbot/steps/dummy.py
Normal file
100
tools/buildbot/buildbot/steps/dummy.py
Normal file
@ -0,0 +1,100 @@
|
||||
|
||||
from twisted.internet import reactor
|
||||
from buildbot.process.buildstep import BuildStep, LoggingBuildStep
|
||||
from buildbot.process.buildstep import LoggedRemoteCommand
|
||||
from buildbot.status.builder import SUCCESS, FAILURE
|
||||
|
||||
# these classes are used internally by buildbot unit tests
|
||||
|
||||
class Dummy(BuildStep):
|
||||
"""I am a dummy no-op step, which runs entirely on the master, and simply
|
||||
waits 5 seconds before finishing with SUCCESS
|
||||
"""
|
||||
|
||||
haltOnFailure = True
|
||||
name = "dummy"
|
||||
|
||||
def __init__(self, timeout=5, **kwargs):
|
||||
"""
|
||||
@type timeout: int
|
||||
@param timeout: the number of seconds to delay before completing
|
||||
"""
|
||||
BuildStep.__init__(self, **kwargs)
|
||||
self.timeout = timeout
|
||||
self.timer = None
|
||||
|
||||
def start(self):
|
||||
self.step_status.setColor("yellow")
|
||||
self.step_status.setText(["delay", "%s secs" % self.timeout])
|
||||
self.timer = reactor.callLater(self.timeout, self.done)
|
||||
|
||||
def interrupt(self, reason):
|
||||
if self.timer:
|
||||
self.timer.cancel()
|
||||
self.timer = None
|
||||
self.step_status.setColor("red")
|
||||
self.step_status.setText(["delay", "interrupted"])
|
||||
self.finished(FAILURE)
|
||||
|
||||
def done(self):
|
||||
self.step_status.setColor("green")
|
||||
self.finished(SUCCESS)
|
||||
|
||||
class FailingDummy(Dummy):
|
||||
"""I am a dummy no-op step that 'runs' master-side and finishes (with a
|
||||
FAILURE status) after 5 seconds."""
|
||||
|
||||
name = "failing dummy"
|
||||
|
||||
def start(self):
|
||||
self.step_status.setColor("yellow")
|
||||
self.step_status.setText(["boom", "%s secs" % self.timeout])
|
||||
self.timer = reactor.callLater(self.timeout, self.done)
|
||||
|
||||
def done(self):
|
||||
self.step_status.setColor("red")
|
||||
self.finished(FAILURE)
|
||||
|
||||
class RemoteDummy(LoggingBuildStep):
|
||||
"""I am a dummy no-op step that runs on the remote side and
|
||||
simply waits 5 seconds before completing with success.
|
||||
See L{buildbot.slave.commands.DummyCommand}
|
||||
"""
|
||||
|
||||
haltOnFailure = True
|
||||
name = "remote dummy"
|
||||
|
||||
def __init__(self, timeout=5, **kwargs):
|
||||
"""
|
||||
@type timeout: int
|
||||
@param timeout: the number of seconds to delay
|
||||
"""
|
||||
LoggingBuildStep.__init__(self, **kwargs)
|
||||
self.timeout = timeout
|
||||
self.description = ["remote", "delay", "%s secs" % timeout]
|
||||
|
||||
def describe(self, done=False):
|
||||
return self.description
|
||||
|
||||
def start(self):
|
||||
args = {'timeout': self.timeout}
|
||||
cmd = LoggedRemoteCommand("dummy", args)
|
||||
self.startCommand(cmd)
|
||||
|
||||
class Wait(LoggingBuildStep):
|
||||
"""I start a command on the slave that waits for the unit test to
|
||||
tell it when to finish.
|
||||
"""
|
||||
|
||||
name = "wait"
|
||||
def __init__(self, handle, **kwargs):
|
||||
LoggingBuildStep.__init__(self, **kwargs)
|
||||
self.handle = handle
|
||||
|
||||
def describe(self, done=False):
|
||||
return ["wait: %s" % self.handle]
|
||||
|
||||
def start(self):
|
||||
args = {'handle': (self.handle, self.build.reason)}
|
||||
cmd = LoggedRemoteCommand("dummy.wait", args)
|
||||
self.startCommand(cmd)
|
46
tools/buildbot/buildbot/steps/maxq.py
Normal file
46
tools/buildbot/buildbot/steps/maxq.py
Normal file
@ -0,0 +1,46 @@
|
||||
from buildbot.steps.shell import ShellCommand
|
||||
from buildbot.status import event, builder
|
||||
|
||||
class MaxQ(ShellCommand):
|
||||
flunkOnFailure = True
|
||||
name = "maxq"
|
||||
|
||||
def __init__(self, testdir=None, **kwargs):
|
||||
if not testdir:
|
||||
raise TypeError("please pass testdir")
|
||||
command = 'run_maxq.py %s' % (testdir,)
|
||||
ShellCommand.__init__(self, command=command, **kwargs)
|
||||
|
||||
def startStatus(self):
|
||||
evt = event.Event("yellow", ['running', 'maxq', 'tests'],
|
||||
files={'log': self.log})
|
||||
self.setCurrentActivity(evt)
|
||||
|
||||
|
||||
def finished(self, rc):
|
||||
self.failures = 0
|
||||
if rc:
|
||||
self.failures = 1
|
||||
output = self.log.getAll()
|
||||
self.failures += output.count('\nTEST FAILURE:')
|
||||
|
||||
result = (builder.SUCCESS, ['maxq'])
|
||||
|
||||
if self.failures:
|
||||
result = (builder.FAILURE,
|
||||
[str(self.failures), 'maxq', 'failures'])
|
||||
|
||||
return self.stepComplete(result)
|
||||
|
||||
def finishStatus(self, result):
|
||||
if self.failures:
|
||||
color = "red"
|
||||
text = ["maxq", "failed"]
|
||||
else:
|
||||
color = "green"
|
||||
text = ['maxq', 'tests']
|
||||
self.updateCurrentActivity(color=color, text=text)
|
||||
self.finishStatusSummary()
|
||||
self.finishCurrentActivity()
|
||||
|
||||
|
112
tools/buildbot/buildbot/steps/python.py
Normal file
112
tools/buildbot/buildbot/steps/python.py
Normal file
@ -0,0 +1,112 @@
|
||||
|
||||
from buildbot.status.builder import SUCCESS, FAILURE, WARNINGS
|
||||
from buildbot.steps.shell import ShellCommand
|
||||
|
||||
try:
|
||||
import cStringIO
|
||||
StringIO = cStringIO.StringIO
|
||||
except ImportError:
|
||||
from StringIO import StringIO
|
||||
|
||||
|
||||
class BuildEPYDoc(ShellCommand):
|
||||
name = "epydoc"
|
||||
command = ["make", "epydocs"]
|
||||
description = ["building", "epydocs"]
|
||||
descriptionDone = ["epydoc"]
|
||||
|
||||
def createSummary(self, log):
|
||||
import_errors = 0
|
||||
warnings = 0
|
||||
errors = 0
|
||||
|
||||
for line in StringIO(log.getText()):
|
||||
if line.startswith("Error importing "):
|
||||
import_errors += 1
|
||||
if line.find("Warning: ") != -1:
|
||||
warnings += 1
|
||||
if line.find("Error: ") != -1:
|
||||
errors += 1
|
||||
|
||||
self.descriptionDone = self.descriptionDone[:]
|
||||
if import_errors:
|
||||
self.descriptionDone.append("ierr=%d" % import_errors)
|
||||
if warnings:
|
||||
self.descriptionDone.append("warn=%d" % warnings)
|
||||
if errors:
|
||||
self.descriptionDone.append("err=%d" % errors)
|
||||
|
||||
self.import_errors = import_errors
|
||||
self.warnings = warnings
|
||||
self.errors = errors
|
||||
|
||||
def evaluateCommand(self, cmd):
|
||||
if cmd.rc != 0:
|
||||
return FAILURE
|
||||
if self.warnings or self.errors:
|
||||
return WARNINGS
|
||||
return SUCCESS
|
||||
|
||||
|
||||
class PyFlakes(ShellCommand):
|
||||
name = "pyflakes"
|
||||
command = ["make", "pyflakes"]
|
||||
description = ["running", "pyflakes"]
|
||||
descriptionDone = ["pyflakes"]
|
||||
flunkOnFailure = False
|
||||
flunkingIssues = ["undefined"] # any pyflakes lines like this cause FAILURE
|
||||
|
||||
MESSAGES = ("unused", "undefined", "redefs", "import*", "misc")
|
||||
|
||||
def createSummary(self, log):
|
||||
counts = {}
|
||||
summaries = {}
|
||||
for m in self.MESSAGES:
|
||||
counts[m] = 0
|
||||
summaries[m] = []
|
||||
|
||||
first = True
|
||||
for line in StringIO(log.getText()).readlines():
|
||||
# the first few lines might contain echoed commands from a 'make
|
||||
# pyflakes' step, so don't count these as warnings. Stop ignoring
|
||||
# the initial lines as soon as we see one with a colon.
|
||||
if first:
|
||||
if line.find(":") != -1:
|
||||
# there's the colon, this is the first real line
|
||||
first = False
|
||||
# fall through and parse the line
|
||||
else:
|
||||
# skip this line, keep skipping non-colon lines
|
||||
continue
|
||||
if line.find("imported but unused") != -1:
|
||||
m = "unused"
|
||||
elif line.find("*' used; unable to detect undefined names") != -1:
|
||||
m = "import*"
|
||||
elif line.find("undefined name") != -1:
|
||||
m = "undefined"
|
||||
elif line.find("redefinition of unused") != -1:
|
||||
m = "redefs"
|
||||
else:
|
||||
m = "misc"
|
||||
summaries[m].append(line)
|
||||
counts[m] += 1
|
||||
|
||||
self.descriptionDone = self.descriptionDone[:]
|
||||
for m in self.MESSAGES:
|
||||
if counts[m]:
|
||||
self.descriptionDone.append("%s=%d" % (m, counts[m]))
|
||||
self.addCompleteLog(m, "".join(summaries[m]))
|
||||
self.setProperty("pyflakes-%s" % m, counts[m])
|
||||
self.setProperty("pyflakes-total", sum(counts.values()))
|
||||
|
||||
|
||||
def evaluateCommand(self, cmd):
|
||||
if cmd.rc != 0:
|
||||
return FAILURE
|
||||
for m in self.flunkingIssues:
|
||||
if self.getProperty("pyflakes-%s" % m):
|
||||
return FAILURE
|
||||
if self.getProperty("pyflakes-total"):
|
||||
return WARNINGS
|
||||
return SUCCESS
|
||||
|
806
tools/buildbot/buildbot/steps/python_twisted.py
Normal file
806
tools/buildbot/buildbot/steps/python_twisted.py
Normal file
@ -0,0 +1,806 @@
|
||||
# -*- test-case-name: buildbot.test.test_twisted -*-
|
||||
|
||||
from twisted.python import log
|
||||
|
||||
from buildbot.status import tests, builder
|
||||
from buildbot.status.builder import SUCCESS, FAILURE, WARNINGS, SKIPPED
|
||||
from buildbot.process.buildstep import LogLineObserver, OutputProgressObserver
|
||||
from buildbot.process.buildstep import RemoteShellCommand
|
||||
from buildbot.steps.shell import ShellCommand
|
||||
|
||||
try:
|
||||
import cStringIO
|
||||
StringIO = cStringIO
|
||||
except ImportError:
|
||||
import StringIO
|
||||
import re
|
||||
|
||||
# BuildSteps that are specific to the Twisted source tree
|
||||
|
||||
class HLint(ShellCommand):
|
||||
"""I run a 'lint' checker over a set of .xhtml files. Any deviations
|
||||
from recommended style is flagged and put in the output log.
|
||||
|
||||
This step looks at .changes in the parent Build to extract a list of
|
||||
Lore XHTML files to check."""
|
||||
|
||||
name = "hlint"
|
||||
description = ["running", "hlint"]
|
||||
descriptionDone = ["hlint"]
|
||||
warnOnWarnings = True
|
||||
warnOnFailure = True
|
||||
# TODO: track time, but not output
|
||||
warnings = 0
|
||||
|
||||
def __init__(self, python=None, **kwargs):
|
||||
ShellCommand.__init__(self, **kwargs)
|
||||
self.python = python
|
||||
|
||||
def start(self):
|
||||
# create the command
|
||||
htmlFiles = {}
|
||||
for f in self.build.allFiles():
|
||||
if f.endswith(".xhtml") and not f.startswith("sandbox/"):
|
||||
htmlFiles[f] = 1
|
||||
# remove duplicates
|
||||
hlintTargets = htmlFiles.keys()
|
||||
hlintTargets.sort()
|
||||
if not hlintTargets:
|
||||
return SKIPPED
|
||||
self.hlintFiles = hlintTargets
|
||||
c = []
|
||||
if self.python:
|
||||
c.append(self.python)
|
||||
c += ["bin/lore", "-p", "--output", "lint"] + self.hlintFiles
|
||||
self.setCommand(c)
|
||||
|
||||
# add an extra log file to show the .html files we're checking
|
||||
self.addCompleteLog("files", "\n".join(self.hlintFiles)+"\n")
|
||||
|
||||
ShellCommand.start(self)
|
||||
|
||||
def commandComplete(self, cmd):
|
||||
# TODO: remove the 'files' file (a list of .xhtml files that were
|
||||
# submitted to hlint) because it is available in the logfile and
|
||||
# mostly exists to give the user an idea of how long the step will
|
||||
# take anyway).
|
||||
lines = cmd.logs['stdio'].getText().split("\n")
|
||||
warningLines = filter(lambda line:':' in line, lines)
|
||||
if warningLines:
|
||||
self.addCompleteLog("warnings", "".join(warningLines))
|
||||
warnings = len(warningLines)
|
||||
self.warnings = warnings
|
||||
|
||||
def evaluateCommand(self, cmd):
|
||||
# warnings are in stdout, rc is always 0, unless the tools break
|
||||
if cmd.rc != 0:
|
||||
return FAILURE
|
||||
if self.warnings:
|
||||
return WARNINGS
|
||||
return SUCCESS
|
||||
|
||||
def getText2(self, cmd, results):
|
||||
if cmd.rc != 0:
|
||||
return ["hlint"]
|
||||
return ["%d hlin%s" % (self.warnings,
|
||||
self.warnings == 1 and 't' or 'ts')]
|
||||
|
||||
def countFailedTests(output):
|
||||
# start scanning 10kb from the end, because there might be a few kb of
|
||||
# import exception tracebacks between the total/time line and the errors
|
||||
# line
|
||||
chunk = output[-10000:]
|
||||
lines = chunk.split("\n")
|
||||
lines.pop() # blank line at end
|
||||
# lines[-3] is "Ran NN tests in 0.242s"
|
||||
# lines[-2] is blank
|
||||
# lines[-1] is 'OK' or 'FAILED (failures=1, errors=12)'
|
||||
# or 'FAILED (failures=1)'
|
||||
# or "PASSED (skips=N, successes=N)" (for Twisted-2.0)
|
||||
# there might be other lines dumped here. Scan all the lines.
|
||||
res = {'total': None,
|
||||
'failures': 0,
|
||||
'errors': 0,
|
||||
'skips': 0,
|
||||
'expectedFailures': 0,
|
||||
'unexpectedSuccesses': 0,
|
||||
}
|
||||
for l in lines:
|
||||
out = re.search(r'Ran (\d+) tests', l)
|
||||
if out:
|
||||
res['total'] = int(out.group(1))
|
||||
if (l.startswith("OK") or
|
||||
l.startswith("FAILED ") or
|
||||
l.startswith("PASSED")):
|
||||
# the extra space on FAILED_ is to distinguish the overall
|
||||
# status from an individual test which failed. The lack of a
|
||||
# space on the OK is because it may be printed without any
|
||||
# additional text (if there are no skips,etc)
|
||||
out = re.search(r'failures=(\d+)', l)
|
||||
if out: res['failures'] = int(out.group(1))
|
||||
out = re.search(r'errors=(\d+)', l)
|
||||
if out: res['errors'] = int(out.group(1))
|
||||
out = re.search(r'skips=(\d+)', l)
|
||||
if out: res['skips'] = int(out.group(1))
|
||||
out = re.search(r'expectedFailures=(\d+)', l)
|
||||
if out: res['expectedFailures'] = int(out.group(1))
|
||||
out = re.search(r'unexpectedSuccesses=(\d+)', l)
|
||||
if out: res['unexpectedSuccesses'] = int(out.group(1))
|
||||
# successes= is a Twisted-2.0 addition, and is not currently used
|
||||
out = re.search(r'successes=(\d+)', l)
|
||||
if out: res['successes'] = int(out.group(1))
|
||||
|
||||
return res
|
||||
|
||||
|
||||
class TrialTestCaseCounter(LogLineObserver):
|
||||
_line_re = re.compile(r'^([\w\.]+) \.\.\. \[([^\]]+)\]$')
|
||||
numTests = 0
|
||||
finished = False
|
||||
|
||||
def outLineReceived(self, line):
|
||||
# different versions of Twisted emit different per-test lines with
|
||||
# the bwverbose reporter.
|
||||
# 2.0.0: testSlave (buildbot.test.test_runner.Create) ... [OK]
|
||||
# 2.1.0: buildbot.test.test_runner.Create.testSlave ... [OK]
|
||||
# 2.4.0: buildbot.test.test_runner.Create.testSlave ... [OK]
|
||||
# Let's just handle the most recent version, since it's the easiest.
|
||||
|
||||
if self.finished:
|
||||
return
|
||||
if line.startswith("=" * 40):
|
||||
self.finished = True
|
||||
return
|
||||
|
||||
m = self._line_re.search(line.strip())
|
||||
if m:
|
||||
testname, result = m.groups()
|
||||
self.numTests += 1
|
||||
self.step.setProgress('tests', self.numTests)
|
||||
|
||||
|
||||
UNSPECIFIED=() # since None is a valid choice
|
||||
|
||||
class Trial(ShellCommand):
|
||||
"""I run a unit test suite using 'trial', a unittest-like testing
|
||||
framework that comes with Twisted. Trial is used to implement Twisted's
|
||||
own unit tests, and is the unittest-framework of choice for many projects
|
||||
that use Twisted internally.
|
||||
|
||||
Projects that use trial typically have all their test cases in a 'test'
|
||||
subdirectory of their top-level library directory. I.e. for my package
|
||||
'petmail', the tests are in 'petmail/test/test_*.py'. More complicated
|
||||
packages (like Twisted itself) may have multiple test directories, like
|
||||
'twisted/test/test_*.py' for the core functionality and
|
||||
'twisted/mail/test/test_*.py' for the email-specific tests.
|
||||
|
||||
To run trial tests, you run the 'trial' executable and tell it where the
|
||||
test cases are located. The most common way of doing this is with a
|
||||
module name. For petmail, I would run 'trial petmail.test' and it would
|
||||
locate all the test_*.py files under petmail/test/, running every test
|
||||
case it could find in them. Unlike the unittest.py that comes with
|
||||
Python, you do not run the test_foo.py as a script; you always let trial
|
||||
do the importing and running. The 'tests' parameter controls which tests
|
||||
trial will run: it can be a string or a list of strings.
|
||||
|
||||
You can also use a higher-level module name and pass the --recursive flag
|
||||
to trial: this will search recursively within the named module to find
|
||||
all test cases. For large multiple-test-directory projects like Twisted,
|
||||
this means you can avoid specifying all the test directories explicitly.
|
||||
Something like 'trial --recursive twisted' will pick up everything.
|
||||
|
||||
To find these test cases, you must set a PYTHONPATH that allows something
|
||||
like 'import petmail.test' to work. For packages that don't use a
|
||||
separate top-level 'lib' directory, PYTHONPATH=. will work, and will use
|
||||
the test cases (and the code they are testing) in-place.
|
||||
PYTHONPATH=build/lib or PYTHONPATH=build/lib.$ARCH are also useful when
|
||||
you do a'setup.py build' step first. The 'testpath' attribute of this
|
||||
class controls what PYTHONPATH= is set to.
|
||||
|
||||
Trial has the ability (through the --testmodule flag) to run only the set
|
||||
of test cases named by special 'test-case-name' tags in source files. We
|
||||
can get the list of changed source files from our parent Build and
|
||||
provide them to trial, thus running the minimal set of test cases needed
|
||||
to cover the Changes. This is useful for quick builds, especially in
|
||||
trees with a lot of test cases. The 'testChanges' parameter controls this
|
||||
feature: if set, it will override 'tests'.
|
||||
|
||||
The trial executable itself is typically just 'trial' (which is usually
|
||||
found on your $PATH as /usr/bin/trial), but it can be overridden with the
|
||||
'trial' parameter. This is useful for Twisted's own unittests, which want
|
||||
to use the copy of bin/trial that comes with the sources. (when bin/trial
|
||||
discovers that it is living in a subdirectory named 'Twisted', it assumes
|
||||
it is being run from the source tree and adds that parent directory to
|
||||
PYTHONPATH. Therefore the canonical way to run Twisted's own unittest
|
||||
suite is './bin/trial twisted.test' rather than 'PYTHONPATH=.
|
||||
/usr/bin/trial twisted.test', especially handy when /usr/bin/trial has
|
||||
not yet been installed).
|
||||
|
||||
To influence the version of python being used for the tests, or to add
|
||||
flags to the command, set the 'python' parameter. This can be a string
|
||||
(like 'python2.2') or a list (like ['python2.3', '-Wall']).
|
||||
|
||||
Trial creates and switches into a directory named _trial_temp/ before
|
||||
running the tests, and sends the twisted log (which includes all
|
||||
exceptions) to a file named test.log . This file will be pulled up to
|
||||
the master where it can be seen as part of the status output.
|
||||
|
||||
There are some class attributes which may be usefully overridden
|
||||
by subclasses. 'trialMode' and 'trialArgs' can influence the trial
|
||||
command line.
|
||||
"""
|
||||
|
||||
name = "trial"
|
||||
progressMetrics = ('output', 'tests', 'test.log')
|
||||
# note: the slash only works on unix buildslaves, of course, but we have
|
||||
# no way to know what the buildslave uses as a separator. TODO: figure
|
||||
# out something clever.
|
||||
logfiles = {"test.log": "_trial_temp/test.log"}
|
||||
# we use test.log to track Progress at the end of __init__()
|
||||
|
||||
flunkOnFailure = True
|
||||
python = None
|
||||
trial = "trial"
|
||||
trialMode = ["--reporter=bwverbose"] # requires Twisted-2.1.0 or newer
|
||||
# for Twisted-2.0.0 or 1.3.0, use ["-o"] instead
|
||||
trialArgs = []
|
||||
testpath = UNSPECIFIED # required (but can be None)
|
||||
testChanges = False # TODO: needs better name
|
||||
recurse = False
|
||||
reactor = None
|
||||
randomly = False
|
||||
tests = None # required
|
||||
|
||||
def __init__(self, reactor=UNSPECIFIED, python=None, trial=None,
|
||||
testpath=UNSPECIFIED,
|
||||
tests=None, testChanges=None,
|
||||
recurse=None, randomly=None,
|
||||
trialMode=None, trialArgs=None,
|
||||
**kwargs):
|
||||
"""
|
||||
@type testpath: string
|
||||
@param testpath: use in PYTHONPATH when running the tests. If
|
||||
None, do not set PYTHONPATH. Setting this to '.' will
|
||||
cause the source files to be used in-place.
|
||||
|
||||
@type python: string (without spaces) or list
|
||||
@param python: which python executable to use. Will form the start of
|
||||
the argv array that will launch trial. If you use this,
|
||||
you should set 'trial' to an explicit path (like
|
||||
/usr/bin/trial or ./bin/trial). Defaults to None, which
|
||||
leaves it out entirely (running 'trial args' instead of
|
||||
'python ./bin/trial args'). Likely values are 'python',
|
||||
['python2.2'], ['python', '-Wall'], etc.
|
||||
|
||||
@type trial: string
|
||||
@param trial: which 'trial' executable to run.
|
||||
Defaults to 'trial', which will cause $PATH to be
|
||||
searched and probably find /usr/bin/trial . If you set
|
||||
'python', this should be set to an explicit path (because
|
||||
'python2.3 trial' will not work).
|
||||
|
||||
@type trialMode: list of strings
|
||||
@param trialMode: a list of arguments to pass to trial, specifically
|
||||
to set the reporting mode. This defaults to ['-to']
|
||||
which means 'verbose colorless output' to the trial
|
||||
that comes with Twisted-2.0.x and at least -2.1.0 .
|
||||
Newer versions of Twisted may come with a trial
|
||||
that prefers ['--reporter=bwverbose'].
|
||||
|
||||
@type trialArgs: list of strings
|
||||
@param trialArgs: a list of arguments to pass to trial, available to
|
||||
turn on any extra flags you like. Defaults to [].
|
||||
|
||||
@type tests: list of strings
|
||||
@param tests: a list of test modules to run, like
|
||||
['twisted.test.test_defer', 'twisted.test.test_process'].
|
||||
If this is a string, it will be converted into a one-item
|
||||
list.
|
||||
|
||||
@type testChanges: boolean
|
||||
@param testChanges: if True, ignore the 'tests' parameter and instead
|
||||
ask the Build for all the files that make up the
|
||||
Changes going into this build. Pass these filenames
|
||||
to trial and ask it to look for test-case-name
|
||||
tags, running just the tests necessary to cover the
|
||||
changes.
|
||||
|
||||
@type recurse: boolean
|
||||
@param recurse: If True, pass the --recurse option to trial, allowing
|
||||
test cases to be found in deeper subdirectories of the
|
||||
modules listed in 'tests'. This does not appear to be
|
||||
necessary when using testChanges.
|
||||
|
||||
@type reactor: string
|
||||
@param reactor: which reactor to use, like 'gtk' or 'java'. If not
|
||||
provided, the Twisted's usual platform-dependent
|
||||
default is used.
|
||||
|
||||
@type randomly: boolean
|
||||
@param randomly: if True, add the --random=0 argument, which instructs
|
||||
trial to run the unit tests in a random order each
|
||||
time. This occasionally catches problems that might be
|
||||
masked when one module always runs before another
|
||||
(like failing to make registerAdapter calls before
|
||||
lookups are done).
|
||||
|
||||
@type kwargs: dict
|
||||
@param kwargs: parameters. The following parameters are inherited from
|
||||
L{ShellCommand} and may be useful to set: workdir,
|
||||
haltOnFailure, flunkOnWarnings, flunkOnFailure,
|
||||
warnOnWarnings, warnOnFailure, want_stdout, want_stderr,
|
||||
timeout.
|
||||
"""
|
||||
ShellCommand.__init__(self, **kwargs)
|
||||
|
||||
if python:
|
||||
self.python = python
|
||||
if self.python is not None:
|
||||
if type(self.python) is str:
|
||||
self.python = [self.python]
|
||||
for s in self.python:
|
||||
if " " in s:
|
||||
# this is not strictly an error, but I suspect more
|
||||
# people will accidentally try to use python="python2.3
|
||||
# -Wall" than will use embedded spaces in a python flag
|
||||
log.msg("python= component '%s' has spaces")
|
||||
log.msg("To add -Wall, use python=['python', '-Wall']")
|
||||
why = "python= value has spaces, probably an error"
|
||||
raise ValueError(why)
|
||||
|
||||
if trial:
|
||||
self.trial = trial
|
||||
if " " in self.trial:
|
||||
raise ValueError("trial= value has spaces")
|
||||
if trialMode is not None:
|
||||
self.trialMode = trialMode
|
||||
if trialArgs is not None:
|
||||
self.trialArgs = trialArgs
|
||||
|
||||
if testpath is not UNSPECIFIED:
|
||||
self.testpath = testpath
|
||||
if self.testpath is UNSPECIFIED:
|
||||
raise ValueError("You must specify testpath= (it can be None)")
|
||||
assert isinstance(self.testpath, str) or self.testpath is None
|
||||
|
||||
if reactor is not UNSPECIFIED:
|
||||
self.reactor = reactor
|
||||
|
||||
if tests is not None:
|
||||
self.tests = tests
|
||||
if type(self.tests) is str:
|
||||
self.tests = [self.tests]
|
||||
if testChanges is not None:
|
||||
self.testChanges = testChanges
|
||||
#self.recurse = True # not sure this is necessary
|
||||
|
||||
if not self.testChanges and self.tests is None:
|
||||
raise ValueError("Must either set testChanges= or provide tests=")
|
||||
|
||||
if recurse is not None:
|
||||
self.recurse = recurse
|
||||
if randomly is not None:
|
||||
self.randomly = randomly
|
||||
|
||||
# build up most of the command, then stash it until start()
|
||||
command = []
|
||||
if self.python:
|
||||
command.extend(self.python)
|
||||
command.append(self.trial)
|
||||
command.extend(self.trialMode)
|
||||
if self.recurse:
|
||||
command.append("--recurse")
|
||||
if self.reactor:
|
||||
command.append("--reactor=%s" % reactor)
|
||||
if self.randomly:
|
||||
command.append("--random=0")
|
||||
command.extend(self.trialArgs)
|
||||
self.command = command
|
||||
|
||||
if self.reactor:
|
||||
self.description = ["testing", "(%s)" % self.reactor]
|
||||
self.descriptionDone = ["tests"]
|
||||
# commandComplete adds (reactorname) to self.text
|
||||
else:
|
||||
self.description = ["testing"]
|
||||
self.descriptionDone = ["tests"]
|
||||
|
||||
# this counter will feed Progress along the 'test cases' metric
|
||||
self.addLogObserver('stdio', TrialTestCaseCounter())
|
||||
# this one just measures bytes of output in _trial_temp/test.log
|
||||
self.addLogObserver('test.log', OutputProgressObserver('test.log'))
|
||||
|
||||
def setupEnvironment(self, cmd):
|
||||
ShellCommand.setupEnvironment(self, cmd)
|
||||
if self.testpath != None:
|
||||
e = cmd.args['env']
|
||||
if e is None:
|
||||
cmd.args['env'] = {'PYTHONPATH': self.testpath}
|
||||
else:
|
||||
# TODO: somehow, each build causes another copy of
|
||||
# self.testpath to get prepended
|
||||
if e.get('PYTHONPATH', "") == "":
|
||||
e['PYTHONPATH'] = self.testpath
|
||||
else:
|
||||
e['PYTHONPATH'] = self.testpath + ":" + e['PYTHONPATH']
|
||||
try:
|
||||
p = cmd.args['env']['PYTHONPATH']
|
||||
if type(p) is not str:
|
||||
log.msg("hey, not a string:", p)
|
||||
assert False
|
||||
except (KeyError, TypeError):
|
||||
# KeyError if args doesn't have ['env']
|
||||
# KeyError if args['env'] doesn't have ['PYTHONPATH']
|
||||
# TypeError if args is None
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
# now that self.build.allFiles() is nailed down, finish building the
|
||||
# command
|
||||
if self.testChanges:
|
||||
for f in self.build.allFiles():
|
||||
if f.endswith(".py"):
|
||||
self.command.append("--testmodule=%s" % f)
|
||||
else:
|
||||
self.command.extend(self.tests)
|
||||
log.msg("Trial.start: command is", self.command)
|
||||
|
||||
# if our slave is too old to understand logfiles=, fetch them
|
||||
# manually. This is a fallback for the Twisted buildbot and some old
|
||||
# buildslaves.
|
||||
self._needToPullTestDotLog = False
|
||||
if self.slaveVersionIsOlderThan("shell", "2.1"):
|
||||
log.msg("Trial: buildslave %s is too old to accept logfiles=" %
|
||||
self.getSlaveName())
|
||||
log.msg(" falling back to 'cat _trial_temp/test.log' instead")
|
||||
self.logfiles = {}
|
||||
self._needToPullTestDotLog = True
|
||||
|
||||
ShellCommand.start(self)
|
||||
|
||||
|
||||
def commandComplete(self, cmd):
|
||||
if not self._needToPullTestDotLog:
|
||||
return self._gotTestDotLog(cmd)
|
||||
|
||||
# if the buildslave was too old, pull test.log now
|
||||
catcmd = ["cat", "_trial_temp/test.log"]
|
||||
c2 = RemoteShellCommand(command=catcmd, workdir=self.workdir)
|
||||
loog = self.addLog("test.log")
|
||||
c2.useLog(loog, True, logfileName="stdio")
|
||||
self.cmd = c2 # to allow interrupts
|
||||
d = c2.run(self, self.remote)
|
||||
d.addCallback(lambda res: self._gotTestDotLog(cmd))
|
||||
return d
|
||||
|
||||
def rtext(self, fmt='%s'):
|
||||
if self.reactor:
|
||||
rtext = fmt % self.reactor
|
||||
return rtext.replace("reactor", "")
|
||||
return ""
|
||||
|
||||
def _gotTestDotLog(self, cmd):
|
||||
# figure out all status, then let the various hook functions return
|
||||
# different pieces of it
|
||||
|
||||
# 'cmd' is the original trial command, so cmd.logs['stdio'] is the
|
||||
# trial output. We don't have access to test.log from here.
|
||||
output = cmd.logs['stdio'].getText()
|
||||
counts = countFailedTests(output)
|
||||
|
||||
total = counts['total']
|
||||
failures, errors = counts['failures'], counts['errors']
|
||||
parsed = (total != None)
|
||||
text = []
|
||||
text2 = ""
|
||||
|
||||
if cmd.rc == 0:
|
||||
if parsed:
|
||||
results = SUCCESS
|
||||
if total:
|
||||
text += ["%d %s" % \
|
||||
(total,
|
||||
total == 1 and "test" or "tests"),
|
||||
"passed"]
|
||||
else:
|
||||
text += ["no tests", "run"]
|
||||
else:
|
||||
results = FAILURE
|
||||
text += ["testlog", "unparseable"]
|
||||
text2 = "tests"
|
||||
else:
|
||||
# something failed
|
||||
results = FAILURE
|
||||
if parsed:
|
||||
text.append("tests")
|
||||
if failures:
|
||||
text.append("%d %s" % \
|
||||
(failures,
|
||||
failures == 1 and "failure" or "failures"))
|
||||
if errors:
|
||||
text.append("%d %s" % \
|
||||
(errors,
|
||||
errors == 1 and "error" or "errors"))
|
||||
count = failures + errors
|
||||
text2 = "%d tes%s" % (count, (count == 1 and 't' or 'ts'))
|
||||
else:
|
||||
text += ["tests", "failed"]
|
||||
text2 = "tests"
|
||||
|
||||
if counts['skips']:
|
||||
text.append("%d %s" % \
|
||||
(counts['skips'],
|
||||
counts['skips'] == 1 and "skip" or "skips"))
|
||||
if counts['expectedFailures']:
|
||||
text.append("%d %s" % \
|
||||
(counts['expectedFailures'],
|
||||
counts['expectedFailures'] == 1 and "todo"
|
||||
or "todos"))
|
||||
if 0: # TODO
|
||||
results = WARNINGS
|
||||
if not text2:
|
||||
text2 = "todo"
|
||||
|
||||
if 0:
|
||||
# ignore unexpectedSuccesses for now, but it should really mark
|
||||
# the build WARNING
|
||||
if counts['unexpectedSuccesses']:
|
||||
text.append("%d surprises" % counts['unexpectedSuccesses'])
|
||||
results = WARNINGS
|
||||
if not text2:
|
||||
text2 = "tests"
|
||||
|
||||
if self.reactor:
|
||||
text.append(self.rtext('(%s)'))
|
||||
if text2:
|
||||
text2 = "%s %s" % (text2, self.rtext('(%s)'))
|
||||
|
||||
self.results = results
|
||||
self.text = text
|
||||
self.text2 = [text2]
|
||||
|
||||
def addTestResult(self, testname, results, text, tlog):
|
||||
if self.reactor is not None:
|
||||
testname = (self.reactor,) + testname
|
||||
tr = builder.TestResult(testname, results, text, logs={'log': tlog})
|
||||
#self.step_status.build.addTestResult(tr)
|
||||
self.build.build_status.addTestResult(tr)
|
||||
|
||||
def createSummary(self, loog):
|
||||
output = loog.getText()
|
||||
problems = ""
|
||||
sio = StringIO.StringIO(output)
|
||||
warnings = {}
|
||||
while 1:
|
||||
line = sio.readline()
|
||||
if line == "":
|
||||
break
|
||||
if line.find(" exceptions.DeprecationWarning: ") != -1:
|
||||
# no source
|
||||
warning = line # TODO: consider stripping basedir prefix here
|
||||
warnings[warning] = warnings.get(warning, 0) + 1
|
||||
elif (line.find(" DeprecationWarning: ") != -1 or
|
||||
line.find(" UserWarning: ") != -1):
|
||||
# next line is the source
|
||||
warning = line + sio.readline()
|
||||
warnings[warning] = warnings.get(warning, 0) + 1
|
||||
elif line.find("Warning: ") != -1:
|
||||
warning = line
|
||||
warnings[warning] = warnings.get(warning, 0) + 1
|
||||
|
||||
if line.find("=" * 60) == 0 or line.find("-" * 60) == 0:
|
||||
problems += line
|
||||
problems += sio.read()
|
||||
break
|
||||
|
||||
if problems:
|
||||
self.addCompleteLog("problems", problems)
|
||||
# now parse the problems for per-test results
|
||||
pio = StringIO.StringIO(problems)
|
||||
pio.readline() # eat the first separator line
|
||||
testname = None
|
||||
done = False
|
||||
while not done:
|
||||
while 1:
|
||||
line = pio.readline()
|
||||
if line == "":
|
||||
done = True
|
||||
break
|
||||
if line.find("=" * 60) == 0:
|
||||
break
|
||||
if line.find("-" * 60) == 0:
|
||||
# the last case has --- as a separator before the
|
||||
# summary counts are printed
|
||||
done = True
|
||||
break
|
||||
if testname is None:
|
||||
# the first line after the === is like:
|
||||
# EXPECTED FAILURE: testLackOfTB (twisted.test.test_failure.FailureTestCase)
|
||||
# SKIPPED: testRETR (twisted.test.test_ftp.TestFTPServer)
|
||||
# FAILURE: testBatchFile (twisted.conch.test.test_sftp.TestOurServerBatchFile)
|
||||
r = re.search(r'^([^:]+): (\w+) \(([\w\.]+)\)', line)
|
||||
if not r:
|
||||
# TODO: cleanup, if there are no problems,
|
||||
# we hit here
|
||||
continue
|
||||
result, name, case = r.groups()
|
||||
testname = tuple(case.split(".") + [name])
|
||||
results = {'SKIPPED': SKIPPED,
|
||||
'EXPECTED FAILURE': SUCCESS,
|
||||
'UNEXPECTED SUCCESS': WARNINGS,
|
||||
'FAILURE': FAILURE,
|
||||
'ERROR': FAILURE,
|
||||
'SUCCESS': SUCCESS, # not reported
|
||||
}.get(result, WARNINGS)
|
||||
text = result.lower().split()
|
||||
loog = line
|
||||
# the next line is all dashes
|
||||
loog += pio.readline()
|
||||
else:
|
||||
# the rest goes into the log
|
||||
loog += line
|
||||
if testname:
|
||||
self.addTestResult(testname, results, text, loog)
|
||||
testname = None
|
||||
|
||||
if warnings:
|
||||
lines = warnings.keys()
|
||||
lines.sort()
|
||||
self.addCompleteLog("warnings", "".join(lines))
|
||||
|
||||
def evaluateCommand(self, cmd):
|
||||
return self.results
|
||||
|
||||
def getText(self, cmd, results):
|
||||
return self.text
|
||||
def getText2(self, cmd, results):
|
||||
return self.text2
|
||||
|
||||
|
||||
class ProcessDocs(ShellCommand):
|
||||
"""I build all docs. This requires some LaTeX packages to be installed.
|
||||
It will result in the full documentation book (dvi, pdf, etc).
|
||||
|
||||
"""
|
||||
|
||||
name = "process-docs"
|
||||
warnOnWarnings = 1
|
||||
command = ["admin/process-docs"]
|
||||
description = ["processing", "docs"]
|
||||
descriptionDone = ["docs"]
|
||||
# TODO: track output and time
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""
|
||||
@type workdir: string
|
||||
@keyword workdir: the workdir to start from: must be the base of the
|
||||
Twisted tree
|
||||
|
||||
@type results: triple of (int, int, string)
|
||||
@keyword results: [rc, warnings, output]
|
||||
- rc==0 if all files were converted successfully.
|
||||
- warnings is a count of hlint warnings.
|
||||
- output is the verbose output of the command.
|
||||
"""
|
||||
ShellCommand.__init__(self, **kwargs)
|
||||
|
||||
def createSummary(self, log):
|
||||
output = log.getText()
|
||||
# hlint warnings are of the format: 'WARNING: file:line:col: stuff
|
||||
# latex warnings start with "WARNING: LaTeX Warning: stuff", but
|
||||
# sometimes wrap around to a second line.
|
||||
lines = output.split("\n")
|
||||
warningLines = []
|
||||
wantNext = False
|
||||
for line in lines:
|
||||
wantThis = wantNext
|
||||
wantNext = False
|
||||
if line.startswith("WARNING: "):
|
||||
wantThis = True
|
||||
wantNext = True
|
||||
if wantThis:
|
||||
warningLines.append(line)
|
||||
|
||||
if warningLines:
|
||||
self.addCompleteLog("warnings", "\n".join(warningLines) + "\n")
|
||||
self.warnings = len(warningLines)
|
||||
|
||||
def evaluateCommand(self, cmd):
|
||||
if cmd.rc != 0:
|
||||
return FAILURE
|
||||
if self.warnings:
|
||||
return WARNINGS
|
||||
return SUCCESS
|
||||
|
||||
def getText(self, cmd, results):
|
||||
if results == SUCCESS:
|
||||
return ["docs", "successful"]
|
||||
if results == WARNINGS:
|
||||
return ["docs",
|
||||
"%d warnin%s" % (self.warnings,
|
||||
self.warnings == 1 and 'g' or 'gs')]
|
||||
if results == FAILURE:
|
||||
return ["docs", "failed"]
|
||||
|
||||
def getText2(self, cmd, results):
|
||||
if results == WARNINGS:
|
||||
return ["%d do%s" % (self.warnings,
|
||||
self.warnings == 1 and 'c' or 'cs')]
|
||||
return ["docs"]
|
||||
|
||||
|
||||
|
||||
class BuildDebs(ShellCommand):
|
||||
"""I build the .deb packages."""
|
||||
|
||||
name = "debuild"
|
||||
flunkOnFailure = 1
|
||||
command = ["debuild", "-uc", "-us"]
|
||||
description = ["building", "debs"]
|
||||
descriptionDone = ["debs"]
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""
|
||||
@type workdir: string
|
||||
@keyword workdir: the workdir to start from (must be the base of the
|
||||
Twisted tree)
|
||||
@type results: double of [int, string]
|
||||
@keyword results: [rc, output].
|
||||
- rc == 0 if all .debs were created successfully
|
||||
- output: string with any errors or warnings
|
||||
"""
|
||||
ShellCommand.__init__(self, **kwargs)
|
||||
|
||||
def commandComplete(self, cmd):
|
||||
errors, warnings = 0, 0
|
||||
output = cmd.logs['stdio'].getText()
|
||||
summary = ""
|
||||
sio = StringIO.StringIO(output)
|
||||
for line in sio.readlines():
|
||||
if line.find("E: ") == 0:
|
||||
summary += line
|
||||
errors += 1
|
||||
if line.find("W: ") == 0:
|
||||
summary += line
|
||||
warnings += 1
|
||||
if summary:
|
||||
self.addCompleteLog("problems", summary)
|
||||
self.errors = errors
|
||||
self.warnings = warnings
|
||||
|
||||
def evaluateCommand(self, cmd):
|
||||
if cmd.rc != 0:
|
||||
return FAILURE
|
||||
if self.errors:
|
||||
return FAILURE
|
||||
if self.warnings:
|
||||
return WARNINGS
|
||||
return SUCCESS
|
||||
|
||||
def getText(self, cmd, results):
|
||||
text = ["debuild"]
|
||||
if cmd.rc != 0:
|
||||
text.append("failed")
|
||||
errors, warnings = self.errors, self.warnings
|
||||
if warnings or errors:
|
||||
text.append("lintian:")
|
||||
if warnings:
|
||||
text.append("%d warnin%s" % (warnings,
|
||||
warnings == 1 and 'g' or 'gs'))
|
||||
if errors:
|
||||
text.append("%d erro%s" % (errors,
|
||||
errors == 1 and 'r' or 'rs'))
|
||||
return text
|
||||
|
||||
def getText2(self, cmd, results):
|
||||
if cmd.rc != 0:
|
||||
return ["debuild"]
|
||||
if self.errors or self.warnings:
|
||||
return ["%d lintian" % (self.errors + self.warnings)]
|
||||
return []
|
||||
|
||||
class RemovePYCs(ShellCommand):
|
||||
name = "remove-.pyc"
|
||||
command = 'find . -name "*.pyc" | xargs rm'
|
||||
description = ["removing", ".pyc", "files"]
|
||||
descriptionDone = ["remove", ".pycs"]
|
273
tools/buildbot/buildbot/steps/shell.py
Normal file
273
tools/buildbot/buildbot/steps/shell.py
Normal file
@ -0,0 +1,273 @@
|
||||
# -*- test-case-name: buildbot.test.test_steps,buildbot.test.test_properties -*-
|
||||
|
||||
import types, re
|
||||
from twisted.python import log
|
||||
from buildbot import util
|
||||
from buildbot.process.buildstep import LoggingBuildStep, RemoteShellCommand
|
||||
from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE
|
||||
|
||||
class _BuildPropertyDictionary:
|
||||
def __init__(self, build):
|
||||
self.build = build
|
||||
def __getitem__(self, name):
|
||||
p = self.build.getProperty(name)
|
||||
if p is None:
|
||||
p = ""
|
||||
return p
|
||||
|
||||
class WithProperties(util.ComparableMixin):
|
||||
"""This is a marker class, used in ShellCommand's command= argument to
|
||||
indicate that we want to interpolate a build property.
|
||||
"""
|
||||
|
||||
compare_attrs = ('fmtstring', 'args')
|
||||
|
||||
def __init__(self, fmtstring, *args):
|
||||
self.fmtstring = fmtstring
|
||||
self.args = args
|
||||
|
||||
def render(self, build):
|
||||
if self.args:
|
||||
strings = []
|
||||
for name in self.args:
|
||||
p = build.getProperty(name)
|
||||
if p is None:
|
||||
p = ""
|
||||
strings.append(p)
|
||||
s = self.fmtstring % tuple(strings)
|
||||
else:
|
||||
s = self.fmtstring % _BuildPropertyDictionary(build)
|
||||
return s
|
||||
|
||||
class ShellCommand(LoggingBuildStep):
|
||||
"""I run a single shell command on the buildslave. I return FAILURE if
|
||||
the exit code of that command is non-zero, SUCCESS otherwise. To change
|
||||
this behavior, override my .evaluateCommand method.
|
||||
|
||||
By default, a failure of this step will mark the whole build as FAILURE.
|
||||
To override this, give me an argument of flunkOnFailure=False .
|
||||
|
||||
I create a single Log named 'log' which contains the output of the
|
||||
command. To create additional summary Logs, override my .createSummary
|
||||
method.
|
||||
|
||||
The shell command I run (a list of argv strings) can be provided in
|
||||
several ways:
|
||||
- a class-level .command attribute
|
||||
- a command= parameter to my constructor (overrides .command)
|
||||
- set explicitly with my .setCommand() method (overrides both)
|
||||
|
||||
@ivar command: a list of argv strings (or WithProperties instances).
|
||||
This will be used by start() to create a
|
||||
RemoteShellCommand instance.
|
||||
|
||||
@ivar logfiles: a dict mapping log NAMEs to workdir-relative FILENAMEs
|
||||
of their corresponding logfiles. The contents of the file
|
||||
named FILENAME will be put into a LogFile named NAME, ina
|
||||
something approximating real-time. (note that logfiles=
|
||||
is actually handled by our parent class LoggingBuildStep)
|
||||
|
||||
"""
|
||||
|
||||
name = "shell"
|
||||
description = None # set this to a list of short strings to override
|
||||
descriptionDone = None # alternate description when the step is complete
|
||||
command = None # set this to a command, or set in kwargs
|
||||
# logfiles={} # you can also set 'logfiles' to a dictionary, and it
|
||||
# will be merged with any logfiles= argument passed in
|
||||
# to __init__
|
||||
|
||||
# override this on a specific ShellCommand if you want to let it fail
|
||||
# without dooming the entire build to a status of FAILURE
|
||||
flunkOnFailure = True
|
||||
|
||||
def __init__(self, workdir,
|
||||
description=None, descriptionDone=None,
|
||||
command=None,
|
||||
**kwargs):
|
||||
# most of our arguments get passed through to the RemoteShellCommand
|
||||
# that we create, but first strip out the ones that we pass to
|
||||
# BuildStep (like haltOnFailure and friends), and a couple that we
|
||||
# consume ourselves.
|
||||
self.workdir = workdir # required by RemoteShellCommand
|
||||
if description:
|
||||
self.description = description
|
||||
if isinstance(self.description, str):
|
||||
self.description = [self.description]
|
||||
if descriptionDone:
|
||||
self.descriptionDone = descriptionDone
|
||||
if isinstance(self.descriptionDone, str):
|
||||
self.descriptionDone = [self.descriptionDone]
|
||||
if command:
|
||||
self.command = command
|
||||
|
||||
# pull out the ones that LoggingBuildStep wants, then upcall
|
||||
buildstep_kwargs = {}
|
||||
for k in kwargs.keys()[:]:
|
||||
if k in self.__class__.parms:
|
||||
buildstep_kwargs[k] = kwargs[k]
|
||||
del kwargs[k]
|
||||
LoggingBuildStep.__init__(self, **buildstep_kwargs)
|
||||
|
||||
# everything left over goes to the RemoteShellCommand
|
||||
kwargs['workdir'] = workdir # including a copy of 'workdir'
|
||||
self.remote_kwargs = kwargs
|
||||
|
||||
|
||||
def setCommand(self, command):
|
||||
self.command = command
|
||||
|
||||
def describe(self, done=False):
|
||||
"""Return a list of short strings to describe this step, for the
|
||||
status display. This uses the first few words of the shell command.
|
||||
You can replace this by setting .description in your subclass, or by
|
||||
overriding this method to describe the step better.
|
||||
|
||||
@type done: boolean
|
||||
@param done: whether the command is complete or not, to improve the
|
||||
way the command is described. C{done=False} is used
|
||||
while the command is still running, so a single
|
||||
imperfect-tense verb is appropriate ('compiling',
|
||||
'testing', ...) C{done=True} is used when the command
|
||||
has finished, and the default getText() method adds some
|
||||
text, so a simple noun is appropriate ('compile',
|
||||
'tests' ...)
|
||||
"""
|
||||
|
||||
if done and self.descriptionDone is not None:
|
||||
return self.descriptionDone
|
||||
if self.description is not None:
|
||||
return self.description
|
||||
|
||||
words = self.command
|
||||
# TODO: handle WithProperties here
|
||||
if isinstance(words, types.StringTypes):
|
||||
words = words.split()
|
||||
if len(words) < 1:
|
||||
return ["???"]
|
||||
if len(words) == 1:
|
||||
return ["'%s'" % words[0]]
|
||||
if len(words) == 2:
|
||||
return ["'%s" % words[0], "%s'" % words[1]]
|
||||
return ["'%s" % words[0], "%s" % words[1], "...'"]
|
||||
|
||||
def _interpolateProperties(self, command):
|
||||
# interpolate any build properties into our command
|
||||
if not isinstance(command, (list, tuple)):
|
||||
return command
|
||||
command_argv = []
|
||||
for argv in command:
|
||||
if isinstance(argv, WithProperties):
|
||||
command_argv.append(argv.render(self.build))
|
||||
else:
|
||||
command_argv.append(argv)
|
||||
return command_argv
|
||||
|
||||
def setupEnvironment(self, cmd):
|
||||
# merge in anything from Build.slaveEnvironment . Earlier steps
|
||||
# (perhaps ones which compile libraries or sub-projects that need to
|
||||
# be referenced by later steps) can add keys to
|
||||
# self.build.slaveEnvironment to affect later steps.
|
||||
slaveEnv = self.build.slaveEnvironment
|
||||
if slaveEnv:
|
||||
if cmd.args['env'] is None:
|
||||
cmd.args['env'] = {}
|
||||
cmd.args['env'].update(slaveEnv)
|
||||
# note that each RemoteShellCommand gets its own copy of the
|
||||
# dictionary, so we shouldn't be affecting anyone but ourselves.
|
||||
|
||||
def checkForOldSlaveAndLogfiles(self):
|
||||
if not self.logfiles:
|
||||
return # doesn't matter
|
||||
if not self.slaveVersionIsOlderThan("shell", "2.1"):
|
||||
return # slave is new enough
|
||||
# this buildslave is too old and will ignore the 'logfiles'
|
||||
# argument. You'll either have to pull the logfiles manually
|
||||
# (say, by using 'cat' in a separate RemoteShellCommand) or
|
||||
# upgrade the buildslave.
|
||||
msg1 = ("Warning: buildslave %s is too old "
|
||||
"to understand logfiles=, ignoring it."
|
||||
% self.getSlaveName())
|
||||
msg2 = "You will have to pull this logfile (%s) manually."
|
||||
log.msg(msg1)
|
||||
for logname,remotefilename in self.logfiles.items():
|
||||
newlog = self.addLog(logname)
|
||||
newlog.addHeader(msg1 + "\n")
|
||||
newlog.addHeader(msg2 % remotefilename + "\n")
|
||||
newlog.finish()
|
||||
# now prevent setupLogfiles() from adding them
|
||||
self.logfiles = {}
|
||||
|
||||
def start(self):
|
||||
# this block is specific to ShellCommands. subclasses that don't need
|
||||
# to set up an argv array, an environment, or extra logfiles= (like
|
||||
# the Source subclasses) can just skip straight to startCommand()
|
||||
command = self._interpolateProperties(self.command)
|
||||
assert isinstance(command, (list, tuple, str))
|
||||
# create the actual RemoteShellCommand instance now
|
||||
kwargs = self.remote_kwargs
|
||||
kwargs['command'] = command
|
||||
kwargs['logfiles'] = self.logfiles
|
||||
cmd = RemoteShellCommand(**kwargs)
|
||||
self.setupEnvironment(cmd)
|
||||
self.checkForOldSlaveAndLogfiles()
|
||||
|
||||
self.startCommand(cmd)
|
||||
|
||||
|
||||
|
||||
class TreeSize(ShellCommand):
|
||||
name = "treesize"
|
||||
command = ["du", "-s", "."]
|
||||
kb = None
|
||||
|
||||
def commandComplete(self, cmd):
|
||||
out = cmd.log.getText()
|
||||
m = re.search(r'^(\d+)', out)
|
||||
if m:
|
||||
self.kb = int(m.group(1))
|
||||
|
||||
def evaluateCommand(self, cmd):
|
||||
if cmd.rc != 0:
|
||||
return FAILURE
|
||||
if self.kb is None:
|
||||
return WARNINGS # not sure how 'du' could fail, but whatever
|
||||
return SUCCESS
|
||||
|
||||
def getText(self, cmd, results):
|
||||
if self.kb is not None:
|
||||
return ["treesize", "%d kb" % self.kb]
|
||||
return ["treesize", "unknown"]
|
||||
|
||||
class Configure(ShellCommand):
|
||||
|
||||
name = "configure"
|
||||
haltOnFailure = 1
|
||||
description = ["configuring"]
|
||||
descriptionDone = ["configure"]
|
||||
command = ["./configure"]
|
||||
|
||||
class Compile(ShellCommand):
|
||||
|
||||
name = "compile"
|
||||
haltOnFailure = 1
|
||||
description = ["compiling"]
|
||||
descriptionDone = ["compile"]
|
||||
command = ["make", "all"]
|
||||
|
||||
OFFprogressMetrics = ('output',)
|
||||
# things to track: number of files compiled, number of directories
|
||||
# traversed (assuming 'make' is being used)
|
||||
|
||||
def createSummary(self, cmd):
|
||||
# TODO: grep for the characteristic GCC warning/error lines and
|
||||
# assemble them into a pair of buffers
|
||||
pass
|
||||
|
||||
class Test(ShellCommand):
|
||||
|
||||
name = "test"
|
||||
warnOnFailure = 1
|
||||
description = ["testing"]
|
||||
descriptionDone = ["test"]
|
||||
command = ["make", "test"]
|
932
tools/buildbot/buildbot/steps/source.py
Normal file
932
tools/buildbot/buildbot/steps/source.py
Normal file
@ -0,0 +1,932 @@
|
||||
# -*- test-case-name: buildbot.test.test_vc -*-
|
||||
|
||||
import warnings
|
||||
from email.Utils import formatdate
|
||||
from twisted.python import log
|
||||
from buildbot.process.buildstep import LoggingBuildStep, LoggedRemoteCommand
|
||||
from buildbot.interfaces import BuildSlaveTooOldError
|
||||
from buildbot.status.builder import SKIPPED
|
||||
|
||||
|
||||
class Source(LoggingBuildStep):
|
||||
"""This is a base class to generate a source tree in the buildslave.
|
||||
Each version control system has a specialized subclass, and is expected
|
||||
to override __init__ and implement computeSourceRevision() and
|
||||
startVC(). The class as a whole builds up the self.args dictionary, then
|
||||
starts a LoggedRemoteCommand with those arguments.
|
||||
"""
|
||||
|
||||
# if the checkout fails, there's no point in doing anything else
|
||||
haltOnFailure = True
|
||||
notReally = False
|
||||
|
||||
branch = None # the default branch, should be set in __init__
|
||||
|
||||
def __init__(self, workdir, mode='update', alwaysUseLatest=False,
|
||||
timeout=20*60, retry=None, **kwargs):
|
||||
"""
|
||||
@type workdir: string
|
||||
@param workdir: local directory (relative to the Builder's root)
|
||||
where the tree should be placed
|
||||
|
||||
@type mode: string
|
||||
@param mode: the kind of VC operation that is desired:
|
||||
- 'update': specifies that the checkout/update should be
|
||||
performed directly into the workdir. Each build is performed
|
||||
in the same directory, allowing for incremental builds. This
|
||||
minimizes disk space, bandwidth, and CPU time. However, it
|
||||
may encounter problems if the build process does not handle
|
||||
dependencies properly (if you must sometimes do a 'clean
|
||||
build' to make sure everything gets compiled), or if source
|
||||
files are deleted but generated files can influence test
|
||||
behavior (e.g. python's .pyc files), or when source
|
||||
directories are deleted but generated files prevent CVS from
|
||||
removing them.
|
||||
|
||||
- 'copy': specifies that the source-controlled workspace
|
||||
should be maintained in a separate directory (called the
|
||||
'copydir'), using checkout or update as necessary. For each
|
||||
build, a new workdir is created with a copy of the source
|
||||
tree (rm -rf workdir; cp -r copydir workdir). This doubles
|
||||
the disk space required, but keeps the bandwidth low
|
||||
(update instead of a full checkout). A full 'clean' build
|
||||
is performed each time. This avoids any generated-file
|
||||
build problems, but is still occasionally vulnerable to
|
||||
problems such as a CVS repository being manually rearranged
|
||||
(causing CVS errors on update) which are not an issue with
|
||||
a full checkout.
|
||||
|
||||
- 'clobber': specifies that the working directory should be
|
||||
deleted each time, necessitating a full checkout for each
|
||||
build. This insures a clean build off a complete checkout,
|
||||
avoiding any of the problems described above, but is
|
||||
bandwidth intensive, as the whole source tree must be
|
||||
pulled down for each build.
|
||||
|
||||
- 'export': is like 'clobber', except that e.g. the 'cvs
|
||||
export' command is used to create the working directory.
|
||||
This command removes all VC metadata files (the
|
||||
CVS/.svn/{arch} directories) from the tree, which is
|
||||
sometimes useful for creating source tarballs (to avoid
|
||||
including the metadata in the tar file). Not all VC systems
|
||||
support export.
|
||||
|
||||
@type alwaysUseLatest: boolean
|
||||
@param alwaysUseLatest: whether to always update to the most
|
||||
recent available sources for this build.
|
||||
|
||||
Normally the Source step asks its Build for a list of all
|
||||
Changes that are supposed to go into the build, then computes a
|
||||
'source stamp' (revision number or timestamp) that will cause
|
||||
exactly that set of changes to be present in the checked out
|
||||
tree. This is turned into, e.g., 'cvs update -D timestamp', or
|
||||
'svn update -r revnum'. If alwaysUseLatest=True, bypass this
|
||||
computation and always update to the latest available sources
|
||||
for each build.
|
||||
|
||||
The source stamp helps avoid a race condition in which someone
|
||||
commits a change after the master has decided to start a build
|
||||
but before the slave finishes checking out the sources. At best
|
||||
this results in a build which contains more changes than the
|
||||
buildmaster thinks it has (possibly resulting in the wrong
|
||||
person taking the blame for any problems that result), at worst
|
||||
is can result in an incoherent set of sources (splitting a
|
||||
non-atomic commit) which may not build at all.
|
||||
|
||||
@type retry: tuple of ints (delay, repeats) (or None)
|
||||
@param retry: if provided, VC update failures are re-attempted up
|
||||
to REPEATS times, with DELAY seconds between each
|
||||
attempt. Some users have slaves with poor connectivity
|
||||
to their VC repository, and they say that up to 80% of
|
||||
their build failures are due to transient network
|
||||
failures that could be handled by simply retrying a
|
||||
couple times.
|
||||
|
||||
"""
|
||||
|
||||
LoggingBuildStep.__init__(self, **kwargs)
|
||||
|
||||
assert mode in ("update", "copy", "clobber", "export")
|
||||
if retry:
|
||||
delay, repeats = retry
|
||||
assert isinstance(repeats, int)
|
||||
assert repeats > 0
|
||||
self.args = {'mode': mode,
|
||||
'workdir': workdir,
|
||||
'timeout': timeout,
|
||||
'retry': retry,
|
||||
'patch': None, # set during .start
|
||||
}
|
||||
self.alwaysUseLatest = alwaysUseLatest
|
||||
|
||||
# Compute defaults for descriptions:
|
||||
description = ["updating"]
|
||||
descriptionDone = ["update"]
|
||||
if mode == "clobber":
|
||||
description = ["checkout"]
|
||||
# because checkingouting takes too much space
|
||||
descriptionDone = ["checkout"]
|
||||
elif mode == "export":
|
||||
description = ["exporting"]
|
||||
descriptionDone = ["export"]
|
||||
self.description = description
|
||||
self.descriptionDone = descriptionDone
|
||||
|
||||
def describe(self, done=False):
|
||||
if done:
|
||||
return self.descriptionDone
|
||||
return self.description
|
||||
|
||||
def computeSourceRevision(self, changes):
|
||||
"""Each subclass must implement this method to do something more
|
||||
precise than -rHEAD every time. For version control systems that use
|
||||
repository-wide change numbers (SVN, P4), this can simply take the
|
||||
maximum such number from all the changes involved in this build. For
|
||||
systems that do not (CVS), it needs to create a timestamp based upon
|
||||
the latest Change, the Build's treeStableTimer, and an optional
|
||||
self.checkoutDelay value."""
|
||||
return None
|
||||
|
||||
def start(self):
|
||||
if self.notReally:
|
||||
log.msg("faking %s checkout/update" % self.name)
|
||||
self.step_status.setColor("green")
|
||||
self.step_status.setText(["fake", self.name, "successful"])
|
||||
self.addCompleteLog("log",
|
||||
"Faked %s checkout/update 'successful'\n" \
|
||||
% self.name)
|
||||
return SKIPPED
|
||||
|
||||
# what source stamp would this build like to use?
|
||||
s = self.build.getSourceStamp()
|
||||
# if branch is None, then use the Step's "default" branch
|
||||
branch = s.branch or self.branch
|
||||
# if revision is None, use the latest sources (-rHEAD)
|
||||
revision = s.revision
|
||||
if not revision and not self.alwaysUseLatest:
|
||||
revision = self.computeSourceRevision(s.changes)
|
||||
# if patch is None, then do not patch the tree after checkout
|
||||
|
||||
# 'patch' is None or a tuple of (patchlevel, diff)
|
||||
patch = s.patch
|
||||
|
||||
self.startVC(branch, revision, patch)
|
||||
|
||||
def commandComplete(self, cmd):
|
||||
got_revision = None
|
||||
if cmd.updates.has_key("got_revision"):
|
||||
got_revision = cmd.updates["got_revision"][-1]
|
||||
self.setProperty("got_revision", got_revision)
|
||||
|
||||
|
||||
|
||||
class CVS(Source):
|
||||
"""I do CVS checkout/update operations.
|
||||
|
||||
Note: if you are doing anonymous/pserver CVS operations, you will need
|
||||
to manually do a 'cvs login' on each buildslave before the slave has any
|
||||
hope of success. XXX: fix then, take a cvs password as an argument and
|
||||
figure out how to do a 'cvs login' on each build
|
||||
"""
|
||||
|
||||
name = "cvs"
|
||||
|
||||
#progressMetrics = ('output',)
|
||||
#
|
||||
# additional things to track: update gives one stderr line per directory
|
||||
# (starting with 'cvs server: Updating ') (and is fairly stable if files
|
||||
# is empty), export gives one line per directory (starting with 'cvs
|
||||
# export: Updating ') and another line per file (starting with U). Would
|
||||
# be nice to track these, requires grepping LogFile data for lines,
|
||||
# parsing each line. Might be handy to have a hook in LogFile that gets
|
||||
# called with each complete line.
|
||||
|
||||
def __init__(self, cvsroot, cvsmodule,
|
||||
global_options=[], branch=None, checkoutDelay=None,
|
||||
login=None,
|
||||
clobber=0, export=0, copydir=None,
|
||||
**kwargs):
|
||||
|
||||
"""
|
||||
@type cvsroot: string
|
||||
@param cvsroot: CVS Repository from which the source tree should
|
||||
be obtained. '/home/warner/Repository' for local
|
||||
or NFS-reachable repositories,
|
||||
':pserver:anon@foo.com:/cvs' for anonymous CVS,
|
||||
'user@host.com:/cvs' for non-anonymous CVS or
|
||||
CVS over ssh. Lots of possibilities, check the
|
||||
CVS documentation for more.
|
||||
|
||||
@type cvsmodule: string
|
||||
@param cvsmodule: subdirectory of CVS repository that should be
|
||||
retrieved
|
||||
|
||||
@type login: string or None
|
||||
@param login: if not None, a string which will be provided as a
|
||||
password to the 'cvs login' command, used when a
|
||||
:pserver: method is used to access the repository.
|
||||
This login is only needed once, but must be run
|
||||
each time (just before the CVS operation) because
|
||||
there is no way for the buildslave to tell whether
|
||||
it was previously performed or not.
|
||||
|
||||
@type branch: string
|
||||
@param branch: the default branch name, will be used in a '-r'
|
||||
argument to specify which branch of the source tree
|
||||
should be used for this checkout. Defaults to None,
|
||||
which means to use 'HEAD'.
|
||||
|
||||
@type checkoutDelay: int or None
|
||||
@param checkoutDelay: if not None, the number of seconds to put
|
||||
between the last known Change and the
|
||||
timestamp given to the -D argument. This
|
||||
defaults to exactly half of the parent
|
||||
Build's .treeStableTimer, but it could be
|
||||
set to something else if your CVS change
|
||||
notification has particularly weird
|
||||
latency characteristics.
|
||||
|
||||
@type global_options: list of strings
|
||||
@param global_options: these arguments are inserted in the cvs
|
||||
command line, before the
|
||||
'checkout'/'update' command word. See
|
||||
'cvs --help-options' for a list of what
|
||||
may be accepted here. ['-r'] will make
|
||||
the checked out files read only. ['-r',
|
||||
'-R'] will also assume the repository is
|
||||
read-only (I assume this means it won't
|
||||
use locks to insure atomic access to the
|
||||
,v files)."""
|
||||
|
||||
self.checkoutDelay = checkoutDelay
|
||||
self.branch = branch
|
||||
|
||||
if not kwargs.has_key('mode') and (clobber or export or copydir):
|
||||
# deal with old configs
|
||||
warnings.warn("Please use mode=, not clobber/export/copydir",
|
||||
DeprecationWarning)
|
||||
if export:
|
||||
kwargs['mode'] = "export"
|
||||
elif clobber:
|
||||
kwargs['mode'] = "clobber"
|
||||
elif copydir:
|
||||
kwargs['mode'] = "copy"
|
||||
else:
|
||||
kwargs['mode'] = "update"
|
||||
|
||||
Source.__init__(self, **kwargs)
|
||||
|
||||
self.args.update({'cvsroot': cvsroot,
|
||||
'cvsmodule': cvsmodule,
|
||||
'global_options': global_options,
|
||||
'login': login,
|
||||
})
|
||||
|
||||
def computeSourceRevision(self, changes):
|
||||
if not changes:
|
||||
return None
|
||||
lastChange = max([c.when for c in changes])
|
||||
if self.checkoutDelay is not None:
|
||||
when = lastChange + self.checkoutDelay
|
||||
else:
|
||||
lastSubmit = max([r.submittedAt for r in self.build.requests])
|
||||
when = (lastChange + lastSubmit) / 2
|
||||
return formatdate(when)
|
||||
|
||||
def startVC(self, branch, revision, patch):
|
||||
if self.slaveVersionIsOlderThan("cvs", "1.39"):
|
||||
# the slave doesn't know to avoid re-using the same sourcedir
|
||||
# when the branch changes. We have no way of knowing which branch
|
||||
# the last build used, so if we're using a non-default branch and
|
||||
# either 'update' or 'copy' modes, it is safer to refuse to
|
||||
# build, and tell the user they need to upgrade the buildslave.
|
||||
if (branch != self.branch
|
||||
and self.args['mode'] in ("update", "copy")):
|
||||
m = ("This buildslave (%s) does not know about multiple "
|
||||
"branches, and using mode=%s would probably build the "
|
||||
"wrong tree. "
|
||||
"Refusing to build. Please upgrade the buildslave to "
|
||||
"buildbot-0.7.0 or newer." % (self.build.slavename,
|
||||
self.args['mode']))
|
||||
log.msg(m)
|
||||
raise BuildSlaveTooOldError(m)
|
||||
|
||||
if branch is None:
|
||||
branch = "HEAD"
|
||||
self.args['branch'] = branch
|
||||
self.args['revision'] = revision
|
||||
self.args['patch'] = patch
|
||||
|
||||
if self.args['branch'] == "HEAD" and self.args['revision']:
|
||||
# special case. 'cvs update -r HEAD -D today' gives no files
|
||||
# TODO: figure out why, see if it applies to -r BRANCH
|
||||
self.args['branch'] = None
|
||||
|
||||
# deal with old slaves
|
||||
warnings = []
|
||||
slavever = self.slaveVersion("cvs", "old")
|
||||
|
||||
if slavever == "old":
|
||||
# 0.5.0
|
||||
if self.args['mode'] == "export":
|
||||
self.args['export'] = 1
|
||||
elif self.args['mode'] == "clobber":
|
||||
self.args['clobber'] = 1
|
||||
elif self.args['mode'] == "copy":
|
||||
self.args['copydir'] = "source"
|
||||
self.args['tag'] = self.args['branch']
|
||||
assert not self.args['patch'] # 0.5.0 slave can't do patch
|
||||
|
||||
cmd = LoggedRemoteCommand("cvs", self.args)
|
||||
self.startCommand(cmd, warnings)
|
||||
|
||||
|
||||
class SVN(Source):
|
||||
"""I perform Subversion checkout/update operations."""
|
||||
|
||||
name = 'svn'
|
||||
|
||||
def __init__(self, svnurl=None, baseURL=None, defaultBranch=None,
|
||||
directory=None, **kwargs):
|
||||
"""
|
||||
@type svnurl: string
|
||||
@param svnurl: the URL which points to the Subversion server,
|
||||
combining the access method (HTTP, ssh, local file),
|
||||
the repository host/port, the repository path, the
|
||||
sub-tree within the repository, and the branch to
|
||||
check out. Using C{svnurl} does not enable builds of
|
||||
alternate branches: use C{baseURL} to enable this.
|
||||
Use exactly one of C{svnurl} and C{baseURL}.
|
||||
|
||||
@param baseURL: if branches are enabled, this is the base URL to
|
||||
which a branch name will be appended. It should
|
||||
probably end in a slash. Use exactly one of
|
||||
C{svnurl} and C{baseURL}.
|
||||
|
||||
@param defaultBranch: if branches are enabled, this is the branch
|
||||
to use if the Build does not specify one
|
||||
explicitly. It will simply be appended
|
||||
to C{baseURL} and the result handed to
|
||||
the SVN command.
|
||||
"""
|
||||
|
||||
if not kwargs.has_key('workdir') and directory is not None:
|
||||
# deal with old configs
|
||||
warnings.warn("Please use workdir=, not directory=",
|
||||
DeprecationWarning)
|
||||
kwargs['workdir'] = directory
|
||||
|
||||
self.svnurl = svnurl
|
||||
self.baseURL = baseURL
|
||||
self.branch = defaultBranch
|
||||
|
||||
Source.__init__(self, **kwargs)
|
||||
|
||||
if not svnurl and not baseURL:
|
||||
raise ValueError("you must use exactly one of svnurl and baseURL")
|
||||
|
||||
|
||||
def computeSourceRevision(self, changes):
|
||||
if not changes:
|
||||
return None
|
||||
lastChange = max([int(c.revision) for c in changes])
|
||||
return lastChange
|
||||
|
||||
def startVC(self, branch, revision, patch):
|
||||
|
||||
# handle old slaves
|
||||
warnings = []
|
||||
slavever = self.slaveVersion("svn", "old")
|
||||
if not slavever:
|
||||
m = "slave does not have the 'svn' command"
|
||||
raise BuildSlaveTooOldError(m)
|
||||
|
||||
if self.slaveVersionIsOlderThan("svn", "1.39"):
|
||||
# the slave doesn't know to avoid re-using the same sourcedir
|
||||
# when the branch changes. We have no way of knowing which branch
|
||||
# the last build used, so if we're using a non-default branch and
|
||||
# either 'update' or 'copy' modes, it is safer to refuse to
|
||||
# build, and tell the user they need to upgrade the buildslave.
|
||||
if (branch != self.branch
|
||||
and self.args['mode'] in ("update", "copy")):
|
||||
m = ("This buildslave (%s) does not know about multiple "
|
||||
"branches, and using mode=%s would probably build the "
|
||||
"wrong tree. "
|
||||
"Refusing to build. Please upgrade the buildslave to "
|
||||
"buildbot-0.7.0 or newer." % (self.build.slavename,
|
||||
self.args['mode']))
|
||||
raise BuildSlaveTooOldError(m)
|
||||
|
||||
if slavever == "old":
|
||||
# 0.5.0 compatibility
|
||||
if self.args['mode'] in ("clobber", "copy"):
|
||||
# TODO: use some shell commands to make up for the
|
||||
# deficiency, by blowing away the old directory first (thus
|
||||
# forcing a full checkout)
|
||||
warnings.append("WARNING: this slave can only do SVN updates"
|
||||
", not mode=%s\n" % self.args['mode'])
|
||||
log.msg("WARNING: this slave only does mode=update")
|
||||
if self.args['mode'] == "export":
|
||||
raise BuildSlaveTooOldError("old slave does not have "
|
||||
"mode=export")
|
||||
self.args['directory'] = self.args['workdir']
|
||||
if revision is not None:
|
||||
# 0.5.0 can only do HEAD. We have no way of knowing whether
|
||||
# the requested revision is HEAD or not, and for
|
||||
# slowly-changing trees this will probably do the right
|
||||
# thing, so let it pass with a warning
|
||||
m = ("WARNING: old slave can only update to HEAD, not "
|
||||
"revision=%s" % revision)
|
||||
log.msg(m)
|
||||
warnings.append(m + "\n")
|
||||
revision = "HEAD" # interprets this key differently
|
||||
if patch:
|
||||
raise BuildSlaveTooOldError("old slave can't do patch")
|
||||
|
||||
if self.svnurl:
|
||||
assert not branch # we need baseURL= to use branches
|
||||
self.args['svnurl'] = self.svnurl
|
||||
else:
|
||||
self.args['svnurl'] = self.baseURL + branch
|
||||
self.args['revision'] = revision
|
||||
self.args['patch'] = patch
|
||||
|
||||
revstuff = []
|
||||
if branch is not None and branch != self.branch:
|
||||
revstuff.append("[branch]")
|
||||
if revision is not None:
|
||||
revstuff.append("r%s" % revision)
|
||||
self.description.extend(revstuff)
|
||||
self.descriptionDone.extend(revstuff)
|
||||
|
||||
cmd = LoggedRemoteCommand("svn", self.args)
|
||||
self.startCommand(cmd, warnings)
|
||||
|
||||
|
||||
class Darcs(Source):
|
||||
"""Check out a source tree from a Darcs repository at 'repourl'.
|
||||
|
||||
To the best of my knowledge, Darcs has no concept of file modes. This
|
||||
means the eXecute-bit will be cleared on all source files. As a result,
|
||||
you may need to invoke configuration scripts with something like:
|
||||
|
||||
C{s(step.Configure, command=['/bin/sh', './configure'])}
|
||||
"""
|
||||
|
||||
name = "darcs"
|
||||
|
||||
def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
|
||||
**kwargs):
|
||||
"""
|
||||
@type repourl: string
|
||||
@param repourl: the URL which points at the Darcs repository. This
|
||||
is used as the default branch. Using C{repourl} does
|
||||
not enable builds of alternate branches: use
|
||||
C{baseURL} to enable this. Use either C{repourl} or
|
||||
C{baseURL}, not both.
|
||||
|
||||
@param baseURL: if branches are enabled, this is the base URL to
|
||||
which a branch name will be appended. It should
|
||||
probably end in a slash. Use exactly one of
|
||||
C{repourl} and C{baseURL}.
|
||||
|
||||
@param defaultBranch: if branches are enabled, this is the branch
|
||||
to use if the Build does not specify one
|
||||
explicitly. It will simply be appended to
|
||||
C{baseURL} and the result handed to the
|
||||
'darcs pull' command.
|
||||
"""
|
||||
self.repourl = repourl
|
||||
self.baseURL = baseURL
|
||||
self.branch = defaultBranch
|
||||
Source.__init__(self, **kwargs)
|
||||
assert kwargs['mode'] != "export", \
|
||||
"Darcs does not have an 'export' mode"
|
||||
if (not repourl and not baseURL) or (repourl and baseURL):
|
||||
raise ValueError("you must provide exactly one of repourl and"
|
||||
" baseURL")
|
||||
|
||||
def startVC(self, branch, revision, patch):
|
||||
slavever = self.slaveVersion("darcs")
|
||||
if not slavever:
|
||||
m = "slave is too old, does not know about darcs"
|
||||
raise BuildSlaveTooOldError(m)
|
||||
|
||||
if self.slaveVersionIsOlderThan("darcs", "1.39"):
|
||||
if revision:
|
||||
# TODO: revisit this once we implement computeSourceRevision
|
||||
m = "0.6.6 slaves can't handle args['revision']"
|
||||
raise BuildSlaveTooOldError(m)
|
||||
|
||||
# the slave doesn't know to avoid re-using the same sourcedir
|
||||
# when the branch changes. We have no way of knowing which branch
|
||||
# the last build used, so if we're using a non-default branch and
|
||||
# either 'update' or 'copy' modes, it is safer to refuse to
|
||||
# build, and tell the user they need to upgrade the buildslave.
|
||||
if (branch != self.branch
|
||||
and self.args['mode'] in ("update", "copy")):
|
||||
m = ("This buildslave (%s) does not know about multiple "
|
||||
"branches, and using mode=%s would probably build the "
|
||||
"wrong tree. "
|
||||
"Refusing to build. Please upgrade the buildslave to "
|
||||
"buildbot-0.7.0 or newer." % (self.build.slavename,
|
||||
self.args['mode']))
|
||||
raise BuildSlaveTooOldError(m)
|
||||
|
||||
if self.repourl:
|
||||
assert not branch # we need baseURL= to use branches
|
||||
self.args['repourl'] = self.repourl
|
||||
else:
|
||||
self.args['repourl'] = self.baseURL + branch
|
||||
self.args['revision'] = revision
|
||||
self.args['patch'] = patch
|
||||
|
||||
revstuff = []
|
||||
if branch is not None and branch != self.branch:
|
||||
revstuff.append("[branch]")
|
||||
self.description.extend(revstuff)
|
||||
self.descriptionDone.extend(revstuff)
|
||||
|
||||
cmd = LoggedRemoteCommand("darcs", self.args)
|
||||
self.startCommand(cmd)
|
||||
|
||||
|
||||
class Git(Source):
|
||||
"""Check out a source tree from a git repository 'repourl'."""
|
||||
|
||||
name = "git"
|
||||
|
||||
def __init__(self, repourl, **kwargs):
|
||||
"""
|
||||
@type repourl: string
|
||||
@param repourl: the URL which points at the git repository
|
||||
"""
|
||||
self.branch = None # TODO
|
||||
Source.__init__(self, **kwargs)
|
||||
self.args['repourl'] = repourl
|
||||
|
||||
def startVC(self, branch, revision, patch):
|
||||
self.args['branch'] = branch
|
||||
self.args['revision'] = revision
|
||||
self.args['patch'] = patch
|
||||
slavever = self.slaveVersion("git")
|
||||
if not slavever:
|
||||
raise BuildSlaveTooOldError("slave is too old, does not know "
|
||||
"about git")
|
||||
cmd = LoggedRemoteCommand("git", self.args)
|
||||
self.startCommand(cmd)
|
||||
|
||||
|
||||
class Arch(Source):
|
||||
"""Check out a source tree from an Arch repository named 'archive'
|
||||
available at 'url'. 'version' specifies which version number (development
|
||||
line) will be used for the checkout: this is mostly equivalent to a
|
||||
branch name. This version uses the 'tla' tool to do the checkout, to use
|
||||
'baz' see L{Bazaar} instead.
|
||||
"""
|
||||
|
||||
name = "arch"
|
||||
# TODO: slaves >0.6.6 will accept args['build-config'], so use it
|
||||
|
||||
def __init__(self, url, version, archive=None, **kwargs):
|
||||
"""
|
||||
@type url: string
|
||||
@param url: the Arch coordinates of the repository. This is
|
||||
typically an http:// URL, but could also be the absolute
|
||||
pathname of a local directory instead.
|
||||
|
||||
@type version: string
|
||||
@param version: the category--branch--version to check out. This is
|
||||
the default branch. If a build specifies a different
|
||||
branch, it will be used instead of this.
|
||||
|
||||
@type archive: string
|
||||
@param archive: The archive name. If provided, it must match the one
|
||||
that comes from the repository. If not, the
|
||||
repository's default will be used.
|
||||
"""
|
||||
self.branch = version
|
||||
Source.__init__(self, **kwargs)
|
||||
self.args.update({'url': url,
|
||||
'archive': archive,
|
||||
})
|
||||
|
||||
def computeSourceRevision(self, changes):
|
||||
# in Arch, fully-qualified revision numbers look like:
|
||||
# arch@buildbot.sourceforge.net--2004/buildbot--dev--0--patch-104
|
||||
# For any given builder, all of this is fixed except the patch-104.
|
||||
# The Change might have any part of the fully-qualified string, so we
|
||||
# just look for the last part. We return the "patch-NN" string.
|
||||
if not changes:
|
||||
return None
|
||||
lastChange = None
|
||||
for c in changes:
|
||||
if not c.revision:
|
||||
continue
|
||||
if c.revision.endswith("--base-0"):
|
||||
rev = 0
|
||||
else:
|
||||
i = c.revision.rindex("patch")
|
||||
rev = int(c.revision[i+len("patch-"):])
|
||||
lastChange = max(lastChange, rev)
|
||||
if lastChange is None:
|
||||
return None
|
||||
if lastChange == 0:
|
||||
return "base-0"
|
||||
return "patch-%d" % lastChange
|
||||
|
||||
def checkSlaveVersion(self, cmd, branch):
|
||||
warnings = []
|
||||
slavever = self.slaveVersion(cmd)
|
||||
if not slavever:
|
||||
m = "slave is too old, does not know about %s" % cmd
|
||||
raise BuildSlaveTooOldError(m)
|
||||
|
||||
# slave 1.28 and later understand 'revision'
|
||||
if self.slaveVersionIsOlderThan(cmd, "1.28"):
|
||||
if not self.alwaysUseLatest:
|
||||
# we don't know whether our requested revision is the latest
|
||||
# or not. If the tree does not change very quickly, this will
|
||||
# probably build the right thing, so emit a warning rather
|
||||
# than refuse to build at all
|
||||
m = "WARNING, buildslave is too old to use a revision"
|
||||
log.msg(m)
|
||||
warnings.append(m + "\n")
|
||||
|
||||
if self.slaveVersionIsOlderThan(cmd, "1.39"):
|
||||
# the slave doesn't know to avoid re-using the same sourcedir
|
||||
# when the branch changes. We have no way of knowing which branch
|
||||
# the last build used, so if we're using a non-default branch and
|
||||
# either 'update' or 'copy' modes, it is safer to refuse to
|
||||
# build, and tell the user they need to upgrade the buildslave.
|
||||
if (branch != self.branch
|
||||
and self.args['mode'] in ("update", "copy")):
|
||||
m = ("This buildslave (%s) does not know about multiple "
|
||||
"branches, and using mode=%s would probably build the "
|
||||
"wrong tree. "
|
||||
"Refusing to build. Please upgrade the buildslave to "
|
||||
"buildbot-0.7.0 or newer." % (self.build.slavename,
|
||||
self.args['mode']))
|
||||
log.msg(m)
|
||||
raise BuildSlaveTooOldError(m)
|
||||
|
||||
return warnings
|
||||
|
||||
def startVC(self, branch, revision, patch):
|
||||
self.args['version'] = branch
|
||||
self.args['revision'] = revision
|
||||
self.args['patch'] = patch
|
||||
warnings = self.checkSlaveVersion("arch", branch)
|
||||
|
||||
revstuff = []
|
||||
if branch is not None and branch != self.branch:
|
||||
revstuff.append("[branch]")
|
||||
if revision is not None:
|
||||
revstuff.append("patch%s" % revision)
|
||||
self.description.extend(revstuff)
|
||||
self.descriptionDone.extend(revstuff)
|
||||
|
||||
cmd = LoggedRemoteCommand("arch", self.args)
|
||||
self.startCommand(cmd, warnings)
|
||||
|
||||
|
||||
class Bazaar(Arch):
|
||||
"""Bazaar is an alternative client for Arch repositories. baz is mostly
|
||||
compatible with tla, but archive registration is slightly different."""
|
||||
|
||||
# TODO: slaves >0.6.6 will accept args['build-config'], so use it
|
||||
|
||||
def __init__(self, url, version, archive, **kwargs):
|
||||
"""
|
||||
@type url: string
|
||||
@param url: the Arch coordinates of the repository. This is
|
||||
typically an http:// URL, but could also be the absolute
|
||||
pathname of a local directory instead.
|
||||
|
||||
@type version: string
|
||||
@param version: the category--branch--version to check out
|
||||
|
||||
@type archive: string
|
||||
@param archive: The archive name (required). This must always match
|
||||
the one that comes from the repository, otherwise the
|
||||
buildslave will attempt to get sources from the wrong
|
||||
archive.
|
||||
"""
|
||||
self.branch = version
|
||||
Source.__init__(self, **kwargs)
|
||||
self.args.update({'url': url,
|
||||
'archive': archive,
|
||||
})
|
||||
|
||||
def startVC(self, branch, revision, patch):
|
||||
self.args['version'] = branch
|
||||
self.args['revision'] = revision
|
||||
self.args['patch'] = patch
|
||||
warnings = self.checkSlaveVersion("bazaar", branch)
|
||||
|
||||
revstuff = []
|
||||
if branch is not None and branch != self.branch:
|
||||
revstuff.append("[branch]")
|
||||
if revision is not None:
|
||||
revstuff.append("patch%s" % revision)
|
||||
self.description.extend(revstuff)
|
||||
self.descriptionDone.extend(revstuff)
|
||||
|
||||
cmd = LoggedRemoteCommand("bazaar", self.args)
|
||||
self.startCommand(cmd, warnings)
|
||||
|
||||
class Mercurial(Source):
|
||||
"""Check out a source tree from a mercurial repository 'repourl'."""
|
||||
|
||||
name = "hg"
|
||||
|
||||
def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
|
||||
**kwargs):
|
||||
"""
|
||||
@type repourl: string
|
||||
@param repourl: the URL which points at the Mercurial repository.
|
||||
This is used as the default branch. Using C{repourl}
|
||||
does not enable builds of alternate branches: use
|
||||
C{baseURL} to enable this. Use either C{repourl} or
|
||||
C{baseURL}, not both.
|
||||
|
||||
@param baseURL: if branches are enabled, this is the base URL to
|
||||
which a branch name will be appended. It should
|
||||
probably end in a slash. Use exactly one of
|
||||
C{repourl} and C{baseURL}.
|
||||
|
||||
@param defaultBranch: if branches are enabled, this is the branch
|
||||
to use if the Build does not specify one
|
||||
explicitly. It will simply be appended to
|
||||
C{baseURL} and the result handed to the
|
||||
'hg clone' command.
|
||||
"""
|
||||
self.repourl = repourl
|
||||
self.baseURL = baseURL
|
||||
self.branch = defaultBranch
|
||||
Source.__init__(self, **kwargs)
|
||||
if (not repourl and not baseURL) or (repourl and baseURL):
|
||||
raise ValueError("you must provide exactly one of repourl and"
|
||||
" baseURL")
|
||||
|
||||
def startVC(self, branch, revision, patch):
|
||||
slavever = self.slaveVersion("hg")
|
||||
if not slavever:
|
||||
raise BuildSlaveTooOldError("slave is too old, does not know "
|
||||
"about hg")
|
||||
|
||||
if self.repourl:
|
||||
assert not branch # we need baseURL= to use branches
|
||||
self.args['repourl'] = self.repourl
|
||||
else:
|
||||
self.args['repourl'] = self.baseURL + branch
|
||||
self.args['revision'] = revision
|
||||
self.args['patch'] = patch
|
||||
|
||||
revstuff = []
|
||||
if branch is not None and branch != self.branch:
|
||||
revstuff.append("[branch]")
|
||||
self.description.extend(revstuff)
|
||||
self.descriptionDone.extend(revstuff)
|
||||
|
||||
cmd = LoggedRemoteCommand("hg", self.args)
|
||||
self.startCommand(cmd)
|
||||
|
||||
|
||||
class P4(Source):
|
||||
""" P4 is a class for accessing perforce revision control"""
|
||||
name = "p4"
|
||||
|
||||
def __init__(self, p4base, defaultBranch=None, p4port=None, p4user=None,
|
||||
p4passwd=None, p4extra_views=[],
|
||||
p4client='buildbot_%(slave)s_%(builder)s', **kwargs):
|
||||
"""
|
||||
@type p4base: string
|
||||
@param p4base: A view into a perforce depot, typically
|
||||
"//depot/proj/"
|
||||
|
||||
@type defaultBranch: string
|
||||
@param defaultBranch: Identify a branch to build by default. Perforce
|
||||
is a view based branching system. So, the branch
|
||||
is normally the name after the base. For example,
|
||||
branch=1.0 is view=//depot/proj/1.0/...
|
||||
branch=1.1 is view=//depot/proj/1.1/...
|
||||
|
||||
@type p4port: string
|
||||
@param p4port: Specify the perforce server to connection in the format
|
||||
<host>:<port>. Example "perforce.example.com:1666"
|
||||
|
||||
@type p4user: string
|
||||
@param p4user: The perforce user to run the command as.
|
||||
|
||||
@type p4passwd: string
|
||||
@param p4passwd: The password for the perforce user.
|
||||
|
||||
@type p4extra_views: list of tuples
|
||||
@param p4extra_views: Extra views to be added to
|
||||
the client that is being used.
|
||||
|
||||
@type p4client: string
|
||||
@param p4client: The perforce client to use for this buildslave.
|
||||
"""
|
||||
|
||||
self.branch = defaultBranch
|
||||
Source.__init__(self, **kwargs)
|
||||
self.args['p4port'] = p4port
|
||||
self.args['p4user'] = p4user
|
||||
self.args['p4passwd'] = p4passwd
|
||||
self.args['p4base'] = p4base
|
||||
self.args['p4extra_views'] = p4extra_views
|
||||
self.args['p4client'] = p4client % {
|
||||
'slave': self.build.slavename,
|
||||
'builder': self.build.builder.name,
|
||||
}
|
||||
|
||||
def computeSourceRevision(self, changes):
|
||||
if not changes:
|
||||
return None
|
||||
lastChange = max([int(c.revision) for c in changes])
|
||||
return lastChange
|
||||
|
||||
def startVC(self, branch, revision, patch):
|
||||
slavever = self.slaveVersion("p4")
|
||||
assert slavever, "slave is too old, does not know about p4"
|
||||
args = dict(self.args)
|
||||
args['branch'] = branch or self.branch
|
||||
args['revision'] = revision
|
||||
args['patch'] = patch
|
||||
cmd = LoggedRemoteCommand("p4", args)
|
||||
self.startCommand(cmd)
|
||||
|
||||
class P4Sync(Source):
|
||||
"""This is a partial solution for using a P4 source repository. You are
|
||||
required to manually set up each build slave with a useful P4
|
||||
environment, which means setting various per-slave environment variables,
|
||||
and creating a P4 client specification which maps the right files into
|
||||
the slave's working directory. Once you have done that, this step merely
|
||||
performs a 'p4 sync' to update that workspace with the newest files.
|
||||
|
||||
Each slave needs the following environment:
|
||||
|
||||
- PATH: the 'p4' binary must be on the slave's PATH
|
||||
- P4USER: each slave needs a distinct user account
|
||||
- P4CLIENT: each slave needs a distinct client specification
|
||||
|
||||
You should use 'p4 client' (?) to set up a client view spec which maps
|
||||
the desired files into $SLAVEBASE/$BUILDERBASE/source .
|
||||
"""
|
||||
|
||||
name = "p4sync"
|
||||
|
||||
def __init__(self, p4port, p4user, p4passwd, p4client, **kwargs):
|
||||
assert kwargs['mode'] == "copy", "P4Sync can only be used in mode=copy"
|
||||
self.branch = None
|
||||
Source.__init__(self, **kwargs)
|
||||
self.args['p4port'] = p4port
|
||||
self.args['p4user'] = p4user
|
||||
self.args['p4passwd'] = p4passwd
|
||||
self.args['p4client'] = p4client
|
||||
|
||||
def computeSourceRevision(self, changes):
|
||||
if not changes:
|
||||
return None
|
||||
lastChange = max([int(c.revision) for c in changes])
|
||||
return lastChange
|
||||
|
||||
def startVC(self, branch, revision, patch):
|
||||
slavever = self.slaveVersion("p4sync")
|
||||
assert slavever, "slave is too old, does not know about p4"
|
||||
cmd = LoggedRemoteCommand("p4sync", self.args)
|
||||
self.startCommand(cmd)
|
||||
|
||||
class Monotone(Source):
|
||||
"""Check out a revision from a monotone server at 'server_addr',
|
||||
branch 'branch'. 'revision' specifies which revision id to check
|
||||
out.
|
||||
|
||||
This step will first create a local database, if necessary, and then pull
|
||||
the contents of the server into the database. Then it will do the
|
||||
checkout/update from this database."""
|
||||
|
||||
name = "monotone"
|
||||
|
||||
def __init__(self, server_addr, branch, db_path="monotone.db",
|
||||
monotone="monotone",
|
||||
**kwargs):
|
||||
Source.__init__(self, **kwargs)
|
||||
self.args.update({"server_addr": server_addr,
|
||||
"branch": branch,
|
||||
"db_path": db_path,
|
||||
"monotone": monotone})
|
||||
|
||||
def computeSourceRevision(self, changes):
|
||||
if not changes:
|
||||
return None
|
||||
return changes[-1].revision
|
||||
|
||||
def startVC(self):
|
||||
slavever = self.slaveVersion("monotone")
|
||||
assert slavever, "slave is too old, does not know about monotone"
|
||||
cmd = LoggedRemoteCommand("monotone", self.args)
|
||||
self.startCommand(cmd)
|
||||
|
277
tools/buildbot/buildbot/steps/transfer.py
Normal file
277
tools/buildbot/buildbot/steps/transfer.py
Normal file
@ -0,0 +1,277 @@
|
||||
# -*- test-case-name: buildbot.test.test_transfer -*-
|
||||
|
||||
import os.path
|
||||
from twisted.internet import reactor
|
||||
from twisted.spread import pb
|
||||
from twisted.python import log
|
||||
from buildbot.process.buildstep import RemoteCommand, BuildStep
|
||||
from buildbot.process.buildstep import SUCCESS, FAILURE
|
||||
from buildbot.interfaces import BuildSlaveTooOldError
|
||||
|
||||
|
||||
class _FileWriter(pb.Referenceable):
|
||||
"""
|
||||
Helper class that acts as a file-object with write access
|
||||
"""
|
||||
|
||||
def __init__(self, destfile, maxsize, mode):
|
||||
self.destfile = destfile
|
||||
self.fp = open(destfile, "w")
|
||||
if mode is not None:
|
||||
os.chmod(destfile, mode)
|
||||
self.remaining = maxsize
|
||||
|
||||
def remote_write(self, data):
|
||||
"""
|
||||
Called from remote slave to write L{data} to L{fp} within boundaries
|
||||
of L{maxsize}
|
||||
|
||||
@type data: C{string}
|
||||
@param data: String of data to write
|
||||
"""
|
||||
if self.remaining is not None:
|
||||
if len(data) > self.remaining:
|
||||
data = data[:self.remaining]
|
||||
self.fp.write(data)
|
||||
self.remaining = self.remaining - len(data)
|
||||
else:
|
||||
self.fp.write(data)
|
||||
|
||||
def remote_close(self):
|
||||
"""
|
||||
Called by remote slave to state that no more data will be transfered
|
||||
"""
|
||||
self.fp.close()
|
||||
self.fp = None
|
||||
|
||||
def __del__(self):
|
||||
# unclean shutdown, the file is probably truncated, so delete it
|
||||
# altogether rather than deliver a corrupted file
|
||||
fp = getattr(self, "fp", None)
|
||||
if fp:
|
||||
fp.close()
|
||||
os.unlink(self.destfile)
|
||||
|
||||
|
||||
class StatusRemoteCommand(RemoteCommand):
|
||||
def __init__(self, remote_command, args):
|
||||
RemoteCommand.__init__(self, remote_command, args)
|
||||
|
||||
self.rc = None
|
||||
self.stderr = ''
|
||||
|
||||
def remoteUpdate(self, update):
|
||||
#log.msg('StatusRemoteCommand: update=%r' % update)
|
||||
if 'rc' in update:
|
||||
self.rc = update['rc']
|
||||
if 'stderr' in update:
|
||||
self.stderr = self.stderr + update['stderr'] + '\n'
|
||||
|
||||
|
||||
class FileUpload(BuildStep):
|
||||
"""
|
||||
Build step to transfer a file from the slave to the master.
|
||||
|
||||
arguments:
|
||||
|
||||
- ['slavesrc'] filename of source file at slave, relative to workdir
|
||||
- ['masterdest'] filename of destination file at master
|
||||
- ['workdir'] string with slave working directory relative to builder
|
||||
base dir, default 'build'
|
||||
- ['maxsize'] maximum size of the file, default None (=unlimited)
|
||||
- ['blocksize'] maximum size of each block being transfered
|
||||
- ['mode'] file access mode for the resulting master-side file.
|
||||
The default (=None) is to leave it up to the umask of
|
||||
the buildmaster process.
|
||||
|
||||
"""
|
||||
|
||||
name = 'upload'
|
||||
|
||||
def __init__(self, build, slavesrc, masterdest,
|
||||
workdir="build", maxsize=None, blocksize=16*1024, mode=None,
|
||||
**buildstep_kwargs):
|
||||
BuildStep.__init__(self, build, **buildstep_kwargs)
|
||||
|
||||
self.slavesrc = slavesrc
|
||||
self.masterdest = masterdest
|
||||
self.workdir = workdir
|
||||
self.maxsize = maxsize
|
||||
self.blocksize = blocksize
|
||||
assert isinstance(mode, (int, type(None)))
|
||||
self.mode = mode
|
||||
|
||||
def start(self):
|
||||
version = self.slaveVersion("uploadFile")
|
||||
if not version:
|
||||
m = "slave is too old, does not know about uploadFile"
|
||||
raise BuildSlaveTooOldError(m)
|
||||
|
||||
source = self.slavesrc
|
||||
masterdest = self.masterdest
|
||||
# we rely upon the fact that the buildmaster runs chdir'ed into its
|
||||
# basedir to make sure that relative paths in masterdest are expanded
|
||||
# properly. TODO: maybe pass the master's basedir all the way down
|
||||
# into the BuildStep so we can do this better.
|
||||
target = os.path.expanduser(masterdest)
|
||||
log.msg("FileUpload started, from slave %r to master %r"
|
||||
% (source, target))
|
||||
|
||||
self.step_status.setColor('yellow')
|
||||
self.step_status.setText(['uploading', os.path.basename(source)])
|
||||
|
||||
# we use maxsize to limit the amount of data on both sides
|
||||
fileWriter = _FileWriter(self.masterdest, self.maxsize, self.mode)
|
||||
|
||||
# default arguments
|
||||
args = {
|
||||
'slavesrc': source,
|
||||
'workdir': self.workdir,
|
||||
'writer': fileWriter,
|
||||
'maxsize': self.maxsize,
|
||||
'blocksize': self.blocksize,
|
||||
}
|
||||
|
||||
self.cmd = StatusRemoteCommand('uploadFile', args)
|
||||
d = self.runCommand(self.cmd)
|
||||
d.addCallback(self.finished).addErrback(self.failed)
|
||||
|
||||
def finished(self, result):
|
||||
if self.cmd.stderr != '':
|
||||
self.addCompleteLog('stderr', self.cmd.stderr)
|
||||
|
||||
if self.cmd.rc is None or self.cmd.rc == 0:
|
||||
self.step_status.setColor('green')
|
||||
return BuildStep.finished(self, SUCCESS)
|
||||
self.step_status.setColor('red')
|
||||
return BuildStep.finished(self, FAILURE)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class _FileReader(pb.Referenceable):
|
||||
"""
|
||||
Helper class that acts as a file-object with read access
|
||||
"""
|
||||
|
||||
def __init__(self, fp):
|
||||
self.fp = fp
|
||||
|
||||
def remote_read(self, maxlength):
|
||||
"""
|
||||
Called from remote slave to read at most L{maxlength} bytes of data
|
||||
|
||||
@type maxlength: C{integer}
|
||||
@param maxlength: Maximum number of data bytes that can be returned
|
||||
|
||||
@return: Data read from L{fp}
|
||||
@rtype: C{string} of bytes read from file
|
||||
"""
|
||||
if self.fp is None:
|
||||
return ''
|
||||
|
||||
data = self.fp.read(maxlength)
|
||||
return data
|
||||
|
||||
def remote_close(self):
|
||||
"""
|
||||
Called by remote slave to state that no more data will be transfered
|
||||
"""
|
||||
if self.fp is not None:
|
||||
self.fp.close()
|
||||
self.fp = None
|
||||
|
||||
|
||||
class FileDownload(BuildStep):
|
||||
"""
|
||||
Download the first 'maxsize' bytes of a file, from the buildmaster to the
|
||||
buildslave. Set the mode of the file
|
||||
|
||||
Arguments::
|
||||
|
||||
['mastersrc'] filename of source file at master
|
||||
['slavedest'] filename of destination file at slave
|
||||
['workdir'] string with slave working directory relative to builder
|
||||
base dir, default 'build'
|
||||
['maxsize'] maximum size of the file, default None (=unlimited)
|
||||
['blocksize'] maximum size of each block being transfered
|
||||
['mode'] use this to set the access permissions of the resulting
|
||||
buildslave-side file. This is traditionally an octal
|
||||
integer, like 0644 to be world-readable (but not
|
||||
world-writable), or 0600 to only be readable by
|
||||
the buildslave account, or 0755 to be world-executable.
|
||||
The default (=None) is to leave it up to the umask of
|
||||
the buildslave process.
|
||||
|
||||
"""
|
||||
|
||||
name = 'download'
|
||||
|
||||
def __init__(self, build, mastersrc, slavedest,
|
||||
workdir="build", maxsize=None, blocksize=16*1024, mode=None,
|
||||
**buildstep_kwargs):
|
||||
BuildStep.__init__(self, build, **buildstep_kwargs)
|
||||
|
||||
self.mastersrc = mastersrc
|
||||
self.slavedest = slavedest
|
||||
self.workdir = workdir
|
||||
self.maxsize = maxsize
|
||||
self.blocksize = blocksize
|
||||
assert isinstance(mode, (int, type(None)))
|
||||
self.mode = mode
|
||||
|
||||
def start(self):
|
||||
version = self.slaveVersion("downloadFile")
|
||||
if not version:
|
||||
m = "slave is too old, does not know about downloadFile"
|
||||
raise BuildSlaveTooOldError(m)
|
||||
|
||||
# we are currently in the buildmaster's basedir, so any non-absolute
|
||||
# paths will be interpreted relative to that
|
||||
source = os.path.expanduser(self.mastersrc)
|
||||
slavedest = self.slavedest
|
||||
log.msg("FileDownload started, from master %r to slave %r" %
|
||||
(source, slavedest))
|
||||
|
||||
self.step_status.setColor('yellow')
|
||||
self.step_status.setText(['downloading', "to",
|
||||
os.path.basename(slavedest)])
|
||||
|
||||
# setup structures for reading the file
|
||||
try:
|
||||
fp = open(source, 'r')
|
||||
except IOError:
|
||||
# if file does not exist, bail out with an error
|
||||
self.addCompleteLog('stderr',
|
||||
'File %r not available at master' % source)
|
||||
# TODO: once BuildStep.start() gets rewritten to use
|
||||
# maybeDeferred, just re-raise the exception here.
|
||||
reactor.callLater(0, BuildStep.finished, self, FAILURE)
|
||||
return
|
||||
fileReader = _FileReader(fp)
|
||||
|
||||
# default arguments
|
||||
args = {
|
||||
'slavedest': self.slavedest,
|
||||
'maxsize': self.maxsize,
|
||||
'reader': fileReader,
|
||||
'blocksize': self.blocksize,
|
||||
'workdir': self.workdir,
|
||||
'mode': self.mode,
|
||||
}
|
||||
|
||||
self.cmd = StatusRemoteCommand('downloadFile', args)
|
||||
d = self.runCommand(self.cmd)
|
||||
d.addCallback(self.finished).addErrback(self.failed)
|
||||
|
||||
def finished(self, result):
|
||||
if self.cmd.stderr != '':
|
||||
self.addCompleteLog('stderr', self.cmd.stderr)
|
||||
|
||||
if self.cmd.rc is None or self.cmd.rc == 0:
|
||||
self.step_status.setColor('green')
|
||||
return BuildStep.finished(self, SUCCESS)
|
||||
self.step_status.setColor('red')
|
||||
return BuildStep.finished(self, FAILURE)
|
||||
|
0
tools/buildbot/buildbot/test/__init__.py
Normal file
0
tools/buildbot/buildbot/test/__init__.py
Normal file
12
tools/buildbot/buildbot/test/emit.py
Normal file
12
tools/buildbot/buildbot/test/emit.py
Normal file
@ -0,0 +1,12 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import os, sys
|
||||
|
||||
sys.stdout.write("this is stdout\n")
|
||||
sys.stderr.write("this is stderr\n")
|
||||
if os.environ.has_key("EMIT_TEST"):
|
||||
sys.stdout.write("EMIT_TEST: %s\n" % os.environ["EMIT_TEST"])
|
||||
open("log1.out","wt").write("this is log1\n")
|
||||
|
||||
rc = int(sys.argv[1])
|
||||
sys.exit(rc)
|
44
tools/buildbot/buildbot/test/emitlogs.py
Normal file
44
tools/buildbot/buildbot/test/emitlogs.py
Normal file
@ -0,0 +1,44 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import sys, time, os.path, StringIO
|
||||
|
||||
mode = 0
|
||||
if len(sys.argv) > 1:
|
||||
mode = int(sys.argv[1])
|
||||
|
||||
if mode == 0:
|
||||
log2 = open("log2.out", "wt")
|
||||
log3 = open("log3.out", "wt")
|
||||
elif mode == 1:
|
||||
# delete the logfiles first, and wait a moment to exercise a failure path
|
||||
if os.path.exists("log2.out"):
|
||||
os.unlink("log2.out")
|
||||
if os.path.exists("log3.out"):
|
||||
os.unlink("log3.out")
|
||||
time.sleep(2)
|
||||
log2 = open("log2.out", "wt")
|
||||
log3 = open("log3.out", "wt")
|
||||
elif mode == 2:
|
||||
# don't create the logfiles at all
|
||||
log2 = StringIO.StringIO()
|
||||
log3 = StringIO.StringIO()
|
||||
|
||||
def write(i):
|
||||
log2.write("this is log2 %d\n" % i)
|
||||
log2.flush()
|
||||
log3.write("this is log3 %d\n" % i)
|
||||
log3.flush()
|
||||
sys.stdout.write("this is stdout %d\n" % i)
|
||||
sys.stdout.flush()
|
||||
|
||||
write(0)
|
||||
time.sleep(1)
|
||||
write(1)
|
||||
sys.stdin.read(1)
|
||||
write(2)
|
||||
|
||||
log2.close()
|
||||
log3.close()
|
||||
|
||||
sys.exit(0)
|
||||
|
68
tools/buildbot/buildbot/test/mail/msg1
Normal file
68
tools/buildbot/buildbot/test/mail/msg1
Normal file
@ -0,0 +1,68 @@
|
||||
Return-Path: <twisted-commits-admin@twistedmatrix.com>
|
||||
Delivered-To: warner-twistedcvs@luther.lothar.com
|
||||
Received: (qmail 11151 invoked by uid 1000); 11 Jan 2003 17:10:04 -0000
|
||||
Delivered-To: warner-twistedcvs@lothar.com
|
||||
Received: (qmail 1548 invoked by uid 13574); 11 Jan 2003 17:06:39 -0000
|
||||
Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-twistedcvs@lothar.com>; 11 Jan 2003 17:06:39 -0000
|
||||
Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
|
||||
by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
|
||||
id 18XP0U-0002Mq-00; Sat, 11 Jan 2003 11:01:14 -0600
|
||||
Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
|
||||
id 18XP02-0002MN-00
|
||||
for <twisted-commits@twistedmatrix.com>; Sat, 11 Jan 2003 11:00:46 -0600
|
||||
To: twisted-commits@twistedmatrix.com
|
||||
From: moshez CVS <moshez@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
X-Mailer: CVSToys
|
||||
From: moshez CVS <moshez@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
Message-Id: <E18XP02-0002MN-00@pyramid.twistedmatrix.com>
|
||||
Subject: [Twisted-commits] Instance massenger, apparently
|
||||
Sender: twisted-commits-admin@twistedmatrix.com
|
||||
Errors-To: twisted-commits-admin@twistedmatrix.com
|
||||
X-BeenThere: twisted-commits@twistedmatrix.com
|
||||
X-Mailman-Version: 2.0.11
|
||||
Precedence: bulk
|
||||
List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
|
||||
List-Post: <mailto:twisted-commits@twistedmatrix.com>
|
||||
List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
|
||||
List-Id: <twisted-commits.twistedmatrix.com>
|
||||
List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
|
||||
List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
|
||||
Date: Sat, 11 Jan 2003 11:00:46 -0600
|
||||
Status:
|
||||
|
||||
Modified files:
|
||||
Twisted/debian/python-twisted.menu.in 1.3 1.4
|
||||
|
||||
Log message:
|
||||
Instance massenger, apparently
|
||||
|
||||
|
||||
ViewCVS links:
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/debian/python-twisted.menu.in.diff?r1=text&tr1=1.3&r2=text&tr2=1.4&cvsroot=Twisted
|
||||
|
||||
Index: Twisted/debian/python-twisted.menu.in
|
||||
diff -u Twisted/debian/python-twisted.menu.in:1.3 Twisted/debian/python-twisted.menu.in:1.4
|
||||
--- Twisted/debian/python-twisted.menu.in:1.3 Sat Dec 28 10:02:12 2002
|
||||
+++ Twisted/debian/python-twisted.menu.in Sat Jan 11 09:00:44 2003
|
||||
@@ -1,7 +1,7 @@
|
||||
?package(python@VERSION@-twisted):\
|
||||
needs=x11\
|
||||
section="Apps/Net"\
|
||||
-title="Twisted Instant Messenger (@VERSION@)"\
|
||||
+title="Twisted Instance Messenger (@VERSION@)"\
|
||||
command="/usr/bin/t-im@VERSION@"
|
||||
|
||||
?package(python@VERSION@-twisted):\
|
||||
|
||||
.
|
||||
|
||||
_______________________________________________
|
||||
Twisted-commits mailing list
|
||||
Twisted-commits@twistedmatrix.com
|
||||
http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
|
101
tools/buildbot/buildbot/test/mail/msg2
Normal file
101
tools/buildbot/buildbot/test/mail/msg2
Normal file
@ -0,0 +1,101 @@
|
||||
Return-Path: <twisted-commits-admin@twistedmatrix.com>
|
||||
Delivered-To: warner-twistedcvs@luther.lothar.com
|
||||
Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
|
||||
Delivered-To: warner-twistedcvs@lothar.com
|
||||
Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
|
||||
Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
|
||||
Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
|
||||
by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
|
||||
id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
|
||||
Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
|
||||
id 18YYq7-0005eQ-00
|
||||
for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
|
||||
To: twisted-commits@twistedmatrix.com
|
||||
From: itamarst CVS <itamarst@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
X-Mailer: CVSToys
|
||||
From: itamarst CVS <itamarst@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
|
||||
Subject: [Twisted-commits] submit formmethod now subclass of Choice
|
||||
Sender: twisted-commits-admin@twistedmatrix.com
|
||||
Errors-To: twisted-commits-admin@twistedmatrix.com
|
||||
X-BeenThere: twisted-commits@twistedmatrix.com
|
||||
X-Mailman-Version: 2.0.11
|
||||
Precedence: bulk
|
||||
List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
|
||||
List-Post: <mailto:twisted-commits@twistedmatrix.com>
|
||||
List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
|
||||
List-Id: <twisted-commits.twistedmatrix.com>
|
||||
List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
|
||||
List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
|
||||
Date: Tue, 14 Jan 2003 15:43:19 -0600
|
||||
Status:
|
||||
|
||||
Modified files:
|
||||
Twisted/twisted/web/woven/form.py 1.20 1.21
|
||||
Twisted/twisted/python/formmethod.py 1.12 1.13
|
||||
|
||||
Log message:
|
||||
submit formmethod now subclass of Choice
|
||||
|
||||
|
||||
ViewCVS links:
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/twisted/web/woven/form.py.diff?r1=text&tr1=1.20&r2=text&tr2=1.21&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/twisted/python/formmethod.py.diff?r1=text&tr1=1.12&r2=text&tr2=1.13&cvsroot=Twisted
|
||||
|
||||
Index: Twisted/twisted/web/woven/form.py
|
||||
diff -u Twisted/twisted/web/woven/form.py:1.20 Twisted/twisted/web/woven/form.py:1.21
|
||||
--- Twisted/twisted/web/woven/form.py:1.20 Tue Jan 14 12:07:29 2003
|
||||
+++ Twisted/twisted/web/woven/form.py Tue Jan 14 13:43:16 2003
|
||||
@@ -140,8 +140,8 @@
|
||||
|
||||
def input_submit(self, request, content, arg):
|
||||
div = content.div()
|
||||
- for value in arg.buttons:
|
||||
- div.input(type="submit", name=arg.name, value=value)
|
||||
+ for tag, value, desc in arg.choices:
|
||||
+ div.input(type="submit", name=arg.name, value=tag)
|
||||
div.text(" ")
|
||||
if arg.reset:
|
||||
div.input(type="reset")
|
||||
|
||||
Index: Twisted/twisted/python/formmethod.py
|
||||
diff -u Twisted/twisted/python/formmethod.py:1.12 Twisted/twisted/python/formmethod.py:1.13
|
||||
--- Twisted/twisted/python/formmethod.py:1.12 Tue Jan 14 12:07:30 2003
|
||||
+++ Twisted/twisted/python/formmethod.py Tue Jan 14 13:43:17 2003
|
||||
@@ -180,19 +180,13 @@
|
||||
return 1
|
||||
|
||||
|
||||
-class Submit(Argument):
|
||||
+class Submit(Choice):
|
||||
"""Submit button or a reasonable facsimile thereof."""
|
||||
|
||||
- def __init__(self, name, buttons=["Submit"], reset=0, shortDesc=None, longDesc=None):
|
||||
- Argument.__init__(self, name, shortDesc=shortDesc, longDesc=longDesc)
|
||||
- self.buttons = buttons
|
||||
+ def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
|
||||
+ reset=0, shortDesc=None, longDesc=None):
|
||||
+ Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc)
|
||||
self.reset = reset
|
||||
-
|
||||
- def coerce(self, val):
|
||||
- if val in self.buttons:
|
||||
- return val
|
||||
- else:
|
||||
- raise InputError, "no such action"
|
||||
|
||||
|
||||
class PresentationHint:
|
||||
|
||||
.
|
||||
|
||||
_______________________________________________
|
||||
Twisted-commits mailing list
|
||||
Twisted-commits@twistedmatrix.com
|
||||
http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
|
97
tools/buildbot/buildbot/test/mail/msg3
Normal file
97
tools/buildbot/buildbot/test/mail/msg3
Normal file
@ -0,0 +1,97 @@
|
||||
Return-Path: <twisted-commits-admin@twistedmatrix.com>
|
||||
Delivered-To: warner-twistedcvs@luther.lothar.com
|
||||
Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
|
||||
Delivered-To: warner-twistedcvs@lothar.com
|
||||
Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
|
||||
Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
|
||||
Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
|
||||
by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
|
||||
id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
|
||||
Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
|
||||
id 18YYq7-0005eQ-00
|
||||
for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
|
||||
To: twisted-commits@twistedmatrix.com
|
||||
From: itamarst CVS <itamarst@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
X-Mailer: CVSToys
|
||||
From: itamarst CVS <itamarst@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
|
||||
Subject: [Twisted-commits] submit formmethod now subclass of Choice
|
||||
Sender: twisted-commits-admin@twistedmatrix.com
|
||||
Errors-To: twisted-commits-admin@twistedmatrix.com
|
||||
X-BeenThere: twisted-commits@twistedmatrix.com
|
||||
X-Mailman-Version: 2.0.11
|
||||
Precedence: bulk
|
||||
List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
|
||||
List-Post: <mailto:twisted-commits@twistedmatrix.com>
|
||||
List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
|
||||
List-Id: <twisted-commits.twistedmatrix.com>
|
||||
List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
|
||||
List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
|
||||
Date: Tue, 14 Jan 2003 15:43:19 -0600
|
||||
Status:
|
||||
|
||||
Modified files:
|
||||
Twisted/twisted/web/woven/form.py 1.20 1.21
|
||||
Twisted/twisted/python/formmethod.py 1.12 1.13
|
||||
|
||||
Log message:
|
||||
submit formmethod now subclass of Choice
|
||||
|
||||
|
||||
Index: Twisted/twisted/web/woven/form.py
|
||||
diff -u Twisted/twisted/web/woven/form.py:1.20 Twisted/twisted/web/woven/form.py:1.21
|
||||
--- Twisted/twisted/web/woven/form.py:1.20 Tue Jan 14 12:07:29 2003
|
||||
+++ Twisted/twisted/web/woven/form.py Tue Jan 14 13:43:16 2003
|
||||
@@ -140,8 +140,8 @@
|
||||
|
||||
def input_submit(self, request, content, arg):
|
||||
div = content.div()
|
||||
- for value in arg.buttons:
|
||||
- div.input(type="submit", name=arg.name, value=value)
|
||||
+ for tag, value, desc in arg.choices:
|
||||
+ div.input(type="submit", name=arg.name, value=tag)
|
||||
div.text(" ")
|
||||
if arg.reset:
|
||||
div.input(type="reset")
|
||||
|
||||
Index: Twisted/twisted/python/formmethod.py
|
||||
diff -u Twisted/twisted/python/formmethod.py:1.12 Twisted/twisted/python/formmethod.py:1.13
|
||||
--- Twisted/twisted/python/formmethod.py:1.12 Tue Jan 14 12:07:30 2003
|
||||
+++ Twisted/twisted/python/formmethod.py Tue Jan 14 13:43:17 2003
|
||||
@@ -180,19 +180,13 @@
|
||||
return 1
|
||||
|
||||
|
||||
-class Submit(Argument):
|
||||
+class Submit(Choice):
|
||||
"""Submit button or a reasonable facsimile thereof."""
|
||||
|
||||
- def __init__(self, name, buttons=["Submit"], reset=0, shortDesc=None, longDesc=None):
|
||||
- Argument.__init__(self, name, shortDesc=shortDesc, longDesc=longDesc)
|
||||
- self.buttons = buttons
|
||||
+ def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
|
||||
+ reset=0, shortDesc=None, longDesc=None):
|
||||
+ Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc)
|
||||
self.reset = reset
|
||||
-
|
||||
- def coerce(self, val):
|
||||
- if val in self.buttons:
|
||||
- return val
|
||||
- else:
|
||||
- raise InputError, "no such action"
|
||||
|
||||
|
||||
class PresentationHint:
|
||||
|
||||
.
|
||||
|
||||
_______________________________________________
|
||||
Twisted-commits mailing list
|
||||
Twisted-commits@twistedmatrix.com
|
||||
http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
|
45
tools/buildbot/buildbot/test/mail/msg4
Normal file
45
tools/buildbot/buildbot/test/mail/msg4
Normal file
@ -0,0 +1,45 @@
|
||||
Return-Path: <twisted-commits-admin@twistedmatrix.com>
|
||||
Delivered-To: warner-twistedcvs@luther.lothar.com
|
||||
Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
|
||||
Delivered-To: warner-twistedcvs@lothar.com
|
||||
Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
|
||||
Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
|
||||
Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
|
||||
by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
|
||||
id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
|
||||
Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
|
||||
id 18YYq7-0005eQ-00
|
||||
for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
|
||||
To: twisted-commits@twistedmatrix.com
|
||||
From: itamarst CVS <itamarst@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
X-Mailer: CVSToys
|
||||
From: itamarst CVS <itamarst@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
|
||||
Subject: [Twisted-commits] submit formmethod now subclass of Choice
|
||||
Sender: twisted-commits-admin@twistedmatrix.com
|
||||
Errors-To: twisted-commits-admin@twistedmatrix.com
|
||||
X-BeenThere: twisted-commits@twistedmatrix.com
|
||||
X-Mailman-Version: 2.0.11
|
||||
Precedence: bulk
|
||||
List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
|
||||
List-Post: <mailto:twisted-commits@twistedmatrix.com>
|
||||
List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
|
||||
List-Id: <twisted-commits.twistedmatrix.com>
|
||||
List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
|
||||
List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
|
||||
Date: Tue, 14 Jan 2003 15:43:19 -0600
|
||||
Status:
|
||||
|
||||
Modified files:
|
||||
Twisted/twisted/web/woven/form.py 1.20 1.21
|
||||
Twisted/twisted/python/formmethod.py 1.12 1.13
|
||||
|
||||
Log message:
|
||||
submit formmethod now subclass of Choice
|
||||
|
54
tools/buildbot/buildbot/test/mail/msg5
Normal file
54
tools/buildbot/buildbot/test/mail/msg5
Normal file
@ -0,0 +1,54 @@
|
||||
Return-Path: <twisted-commits-admin@twistedmatrix.com>
|
||||
Delivered-To: warner-twistedcvs@luther.lothar.com
|
||||
Received: (qmail 5865 invoked by uid 1000); 17 Jan 2003 07:00:04 -0000
|
||||
Delivered-To: warner-twistedcvs@lothar.com
|
||||
Received: (qmail 40460 invoked by uid 13574); 17 Jan 2003 06:51:55 -0000
|
||||
Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-twistedcvs@lothar.com>; 17 Jan 2003 06:51:55 -0000
|
||||
Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
|
||||
by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
|
||||
id 18ZQGk-0003WL-00; Fri, 17 Jan 2003 00:46:22 -0600
|
||||
Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
|
||||
id 18ZQFy-0003VP-00
|
||||
for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 00:45:34 -0600
|
||||
To: twisted-commits@twistedmatrix.com
|
||||
From: etrepum CVS <etrepum@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
X-Mailer: CVSToys
|
||||
From: etrepum CVS <etrepum@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
Message-Id: <E18ZQFy-0003VP-00@pyramid.twistedmatrix.com>
|
||||
Subject: [Twisted-commits] Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository
|
||||
Sender: twisted-commits-admin@twistedmatrix.com
|
||||
Errors-To: twisted-commits-admin@twistedmatrix.com
|
||||
X-BeenThere: twisted-commits@twistedmatrix.com
|
||||
X-Mailman-Version: 2.0.11
|
||||
Precedence: bulk
|
||||
List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
|
||||
List-Post: <mailto:twisted-commits@twistedmatrix.com>
|
||||
List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
|
||||
List-Id: <twisted-commits.twistedmatrix.com>
|
||||
List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
|
||||
List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
|
||||
Date: Fri, 17 Jan 2003 00:45:34 -0600
|
||||
Status:
|
||||
|
||||
Modified files:
|
||||
Twisted/doc/examples/cocoaDemo 0 0
|
||||
|
||||
Log message:
|
||||
Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository
|
||||
|
||||
|
||||
ViewCVS links:
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo.diff?r1=text&tr1=NONE&r2=text&tr2=NONE&cvsroot=Twisted
|
||||
|
||||
.
|
||||
|
||||
_______________________________________________
|
||||
Twisted-commits mailing list
|
||||
Twisted-commits@twistedmatrix.com
|
||||
http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
|
70
tools/buildbot/buildbot/test/mail/msg6
Normal file
70
tools/buildbot/buildbot/test/mail/msg6
Normal file
@ -0,0 +1,70 @@
|
||||
Return-Path: <twisted-commits-admin@twistedmatrix.com>
|
||||
Delivered-To: warner-twistedcvs@luther.lothar.com
|
||||
Received: (qmail 7252 invoked by uid 1000); 17 Jan 2003 07:10:04 -0000
|
||||
Delivered-To: warner-twistedcvs@lothar.com
|
||||
Received: (qmail 43115 invoked by uid 13574); 17 Jan 2003 07:07:57 -0000
|
||||
Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-twistedcvs@lothar.com>; 17 Jan 2003 07:07:57 -0000
|
||||
Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
|
||||
by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
|
||||
id 18ZQW6-0003dA-00; Fri, 17 Jan 2003 01:02:14 -0600
|
||||
Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
|
||||
id 18ZQV7-0003cm-00
|
||||
for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 01:01:13 -0600
|
||||
To: twisted-commits@twistedmatrix.com
|
||||
From: etrepum CVS <etrepum@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
X-Mailer: CVSToys
|
||||
From: etrepum CVS <etrepum@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
Message-Id: <E18ZQV7-0003cm-00@pyramid.twistedmatrix.com>
|
||||
Subject: [Twisted-commits] Cocoa (OS X) clone of the QT demo, using polling reactor
|
||||
Sender: twisted-commits-admin@twistedmatrix.com
|
||||
Errors-To: twisted-commits-admin@twistedmatrix.com
|
||||
X-BeenThere: twisted-commits@twistedmatrix.com
|
||||
X-Mailman-Version: 2.0.11
|
||||
Precedence: bulk
|
||||
List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
|
||||
List-Post: <mailto:twisted-commits@twistedmatrix.com>
|
||||
List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
|
||||
List-Id: <twisted-commits.twistedmatrix.com>
|
||||
List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
|
||||
List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
|
||||
Date: Fri, 17 Jan 2003 01:01:13 -0600
|
||||
Status:
|
||||
|
||||
Modified files:
|
||||
Twisted/doc/examples/cocoaDemo/MyAppDelegate.py None 1.1
|
||||
Twisted/doc/examples/cocoaDemo/__main__.py None 1.1
|
||||
Twisted/doc/examples/cocoaDemo/bin-python-main.m None 1.1
|
||||
Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings None 1.1
|
||||
Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib None 1.1
|
||||
Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib None 1.1
|
||||
Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib None 1.1
|
||||
Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj None 1.1
|
||||
|
||||
Log message:
|
||||
Cocoa (OS X) clone of the QT demo, using polling reactor
|
||||
|
||||
Requires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project. The reactor is iterated periodically by a repeating NSTimer.
|
||||
|
||||
|
||||
ViewCVS links:
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/MyAppDelegate.py.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/__main__.py.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/bin-python-main.m.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
|
||||
|
||||
.
|
||||
|
||||
_______________________________________________
|
||||
Twisted-commits mailing list
|
||||
Twisted-commits@twistedmatrix.com
|
||||
http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
|
68
tools/buildbot/buildbot/test/mail/msg7
Normal file
68
tools/buildbot/buildbot/test/mail/msg7
Normal file
@ -0,0 +1,68 @@
|
||||
Return-Path: <twisted-commits-admin@twistedmatrix.com>
|
||||
Delivered-To: warner-twistedcvs@luther.lothar.com
|
||||
Received: (qmail 8665 invoked by uid 1000); 17 Jan 2003 08:00:03 -0000
|
||||
Delivered-To: warner-twistedcvs@lothar.com
|
||||
Received: (qmail 50728 invoked by uid 13574); 17 Jan 2003 07:51:14 -0000
|
||||
Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-twistedcvs@lothar.com>; 17 Jan 2003 07:51:14 -0000
|
||||
Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
|
||||
by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
|
||||
id 18ZRBm-0003pN-00; Fri, 17 Jan 2003 01:45:18 -0600
|
||||
Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
|
||||
id 18ZRBQ-0003ou-00
|
||||
for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 01:44:56 -0600
|
||||
To: twisted-commits@twistedmatrix.com
|
||||
From: etrepum CVS <etrepum@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
X-Mailer: CVSToys
|
||||
From: etrepum CVS <etrepum@twistedmatrix.com>
|
||||
Reply-To: twisted-python@twistedmatrix.com
|
||||
Message-Id: <E18ZRBQ-0003ou-00@pyramid.twistedmatrix.com>
|
||||
Subject: [Twisted-commits] Directories break debian build script, waiting for reasonable fix
|
||||
Sender: twisted-commits-admin@twistedmatrix.com
|
||||
Errors-To: twisted-commits-admin@twistedmatrix.com
|
||||
X-BeenThere: twisted-commits@twistedmatrix.com
|
||||
X-Mailman-Version: 2.0.11
|
||||
Precedence: bulk
|
||||
List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
|
||||
List-Post: <mailto:twisted-commits@twistedmatrix.com>
|
||||
List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
|
||||
List-Id: <twisted-commits.twistedmatrix.com>
|
||||
List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
|
||||
List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
|
||||
Date: Fri, 17 Jan 2003 01:44:56 -0600
|
||||
Status:
|
||||
|
||||
Modified files:
|
||||
Twisted/doc/examples/cocoaDemo/MyAppDelegate.py 1.1 None
|
||||
Twisted/doc/examples/cocoaDemo/__main__.py 1.1 None
|
||||
Twisted/doc/examples/cocoaDemo/bin-python-main.m 1.1 None
|
||||
Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings 1.1 None
|
||||
Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib 1.1 None
|
||||
Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib 1.1 None
|
||||
Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib 1.1 None
|
||||
Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj 1.1 None
|
||||
|
||||
Log message:
|
||||
Directories break debian build script, waiting for reasonable fix
|
||||
|
||||
|
||||
ViewCVS links:
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/MyAppDelegate.py.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/__main__.py.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/bin-python-main.m.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
|
||||
http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
|
||||
|
||||
.
|
||||
|
||||
_______________________________________________
|
||||
Twisted-commits mailing list
|
||||
Twisted-commits@twistedmatrix.com
|
||||
http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
|
61
tools/buildbot/buildbot/test/mail/msg8
Normal file
61
tools/buildbot/buildbot/test/mail/msg8
Normal file
@ -0,0 +1,61 @@
|
||||
Return-Path: <twisted-commits-admin@twistedmatrix.com>
|
||||
Delivered-To: warner-twistedcvs@luther.lothar.com
|
||||
Received: (qmail 10804 invoked by uid 1000); 19 Jan 2003 14:10:03 -0000
|
||||
Delivered-To: warner-twistedcvs@lothar.com
|
||||
Received: (qmail 6704 invoked by uid 13574); 19 Jan 2003 14:00:20 -0000
|
||||
Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-twistedcvs@lothar.com>; 19 Jan 2003 14:00:20 -0000
|
||||
Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
|
||||
by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
|
||||
id 18aFtx-0002WS-00; Sun, 19 Jan 2003 07:54:17 -0600
|
||||
Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
|
||||
id 18aFtH-0002W3-00
|
||||
for <twisted-commits@twistedmatrix.com>; Sun, 19 Jan 2003 07:53:35 -0600
|
||||
To: twisted-commits@twistedmatrix.com
|
||||
From: acapnotic CVS <acapnotic@twistedmatrix.com>
|
||||
X-Mailer: CVSToys
|
||||
Message-Id: <E18aFtH-0002W3-00@pyramid.twistedmatrix.com>
|
||||
Subject: [Twisted-commits] it doesn't work with invalid syntax
|
||||
Sender: twisted-commits-admin@twistedmatrix.com
|
||||
Errors-To: twisted-commits-admin@twistedmatrix.com
|
||||
X-BeenThere: twisted-commits@twistedmatrix.com
|
||||
X-Mailman-Version: 2.0.11
|
||||
Precedence: bulk
|
||||
List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
|
||||
List-Post: <mailto:twisted-commits@twistedmatrix.com>
|
||||
List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
|
||||
List-Id: <twisted-commits.twistedmatrix.com>
|
||||
List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
|
||||
<mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
|
||||
List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
|
||||
Date: Sun, 19 Jan 2003 07:53:35 -0600
|
||||
Status:
|
||||
|
||||
Modified files:
|
||||
CVSROOT/freshCfg 1.16 1.17
|
||||
|
||||
Log message:
|
||||
it doesn't work with invalid syntax
|
||||
|
||||
|
||||
Index: CVSROOT/freshCfg
|
||||
diff -u CVSROOT/freshCfg:1.16 CVSROOT/freshCfg:1.17
|
||||
--- CVSROOT/freshCfg:1.16 Sun Jan 19 05:52:34 2003
|
||||
+++ CVSROOT/freshCfg Sun Jan 19 05:53:34 2003
|
||||
@@ -27,7 +27,7 @@
|
||||
('/cvs', '^Reality', None, MailNotification(['reality-commits'])),
|
||||
('/cvs', '^Twistby', None, MailNotification(['acapnotic'])),
|
||||
('/cvs', '^CVSToys', None,
|
||||
- MailNotification(['CVSToys-list']
|
||||
+ MailNotification(['CVSToys-list'],
|
||||
"http://twistedmatrix.com/users/jh.twistd/"
|
||||
"viewcvs/cgi/viewcvs.cgi/",
|
||||
replyTo="cvstoys-list@twistedmatrix.com"),)
|
||||
|
||||
|
||||
_______________________________________________
|
||||
Twisted-commits mailing list
|
||||
Twisted-commits@twistedmatrix.com
|
||||
http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
|
18
tools/buildbot/buildbot/test/mail/msg9
Normal file
18
tools/buildbot/buildbot/test/mail/msg9
Normal file
@ -0,0 +1,18 @@
|
||||
From twisted-python@twistedmatrix.com Fri Dec 26 07:25:13 2003
|
||||
From: twisted-python@twistedmatrix.com (exarkun CVS)
|
||||
Date: Fri, 26 Dec 2003 00:25:13 -0700
|
||||
Subject: [Twisted-commits] Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository
|
||||
Message-ID: <E1AZmLR-0000Tl-00@wolfwood>
|
||||
|
||||
Modified files:
|
||||
Twisted/sandbox/exarkun/persist-plugin
|
||||
|
||||
Log message:
|
||||
Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository
|
||||
|
||||
|
||||
ViewCVS links:
|
||||
http://cvs.twistedmatrix.com/cvs/sandbox/exarkun/persist-plugin?cvsroot=Twisted
|
||||
|
||||
|
||||
|
152
tools/buildbot/buildbot/test/mail/syncmail.1
Normal file
152
tools/buildbot/buildbot/test/mail/syncmail.1
Normal file
@ -0,0 +1,152 @@
|
||||
Return-Path: <warner@users.sourceforge.net>
|
||||
Delivered-To: warner-sourceforge@luther.lothar.com
|
||||
Received: (qmail 23758 invoked by uid 1000); 28 Jul 2003 07:22:14 -0000
|
||||
Delivered-To: warner-sourceforge@lothar.com
|
||||
Received: (qmail 62715 invoked by uid 13574); 28 Jul 2003 07:22:03 -0000
|
||||
Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-sourceforge@lothar.com>; 28 Jul 2003 07:22:03 -0000
|
||||
Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
|
||||
by sc8-sf-list1.sourceforge.net with esmtp
|
||||
(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
|
||||
id 19h2KY-0004Nr-00
|
||||
for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
|
||||
Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
|
||||
id 19h2KY-0001rv-00
|
||||
for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
|
||||
Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
|
||||
id 19h2KY-0003r4-00
|
||||
for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
|
||||
From: warner@users.sourceforge.net
|
||||
To: warner@users.sourceforge.net
|
||||
Subject: buildbot/buildbot/changes freshcvsmail.py,1.2,1.3
|
||||
Message-Id: <E19h2KY-0003r4-00@sc8-pr-cvs1.sourceforge.net>
|
||||
Date: Mon, 28 Jul 2003 00:22:02 -0700
|
||||
Status:
|
||||
|
||||
Update of /cvsroot/buildbot/buildbot/buildbot/changes
|
||||
In directory sc8-pr-cvs1:/tmp/cvs-serv14795/buildbot/changes
|
||||
|
||||
Modified Files:
|
||||
freshcvsmail.py
|
||||
Log Message:
|
||||
remove leftover code, leave a temporary compatibility import. Note! Start
|
||||
importing FCMaildirSource from changes.mail instead of changes.freshcvsmail
|
||||
|
||||
|
||||
Index: freshcvsmail.py
|
||||
===================================================================
|
||||
RCS file: /cvsroot/buildbot/buildbot/buildbot/changes/freshcvsmail.py,v
|
||||
retrieving revision 1.2
|
||||
retrieving revision 1.3
|
||||
diff -C2 -d -r1.2 -r1.3
|
||||
*** freshcvsmail.py 27 Jul 2003 18:54:08 -0000 1.2
|
||||
--- freshcvsmail.py 28 Jul 2003 07:22:00 -0000 1.3
|
||||
***************
|
||||
*** 1,96 ****
|
||||
#! /usr/bin/python
|
||||
|
||||
! from buildbot.interfaces import IChangeSource
|
||||
! from buildbot.changes.maildirtwisted import MaildirTwisted
|
||||
! from buildbot.changes.changes import Change
|
||||
! from rfc822 import Message
|
||||
! import os, os.path
|
||||
!
|
||||
! def parseFreshCVSMail(fd, prefix=None):
|
||||
! """Parse mail sent by FreshCVS"""
|
||||
! # this uses rfc822.Message so it can run under python2.1 . In the future
|
||||
! # it will be updated to use python2.2's "email" module.
|
||||
!
|
||||
! m = Message(fd)
|
||||
! # FreshCVS sets From: to "user CVS <user>", but the <> part may be
|
||||
! # modified by the MTA (to include a local domain)
|
||||
! name, addr = m.getaddr("from")
|
||||
! if not name:
|
||||
! return None # no From means this message isn't from FreshCVS
|
||||
! cvs = name.find(" CVS")
|
||||
! if cvs == -1:
|
||||
! return None # this message isn't from FreshCVS
|
||||
! who = name[:cvs]
|
||||
!
|
||||
! # we take the time of receipt as the time of checkin. Not correct,
|
||||
! # but it avoids the out-of-order-changes issue
|
||||
! #when = m.getdate() # and convert from 9-tuple, and handle timezone
|
||||
!
|
||||
! files = []
|
||||
! comments = ""
|
||||
! isdir = 0
|
||||
! lines = m.fp.readlines()
|
||||
! while lines:
|
||||
! line = lines.pop(0)
|
||||
! if line == "Modified files:\n":
|
||||
! break
|
||||
! while lines:
|
||||
! line = lines.pop(0)
|
||||
! if line == "\n":
|
||||
! break
|
||||
! line = line.rstrip("\n")
|
||||
! file, junk = line.split(None, 1)
|
||||
! if prefix:
|
||||
! # insist that the file start with the prefix: FreshCVS sends
|
||||
! # changes we don't care about too
|
||||
! bits = file.split(os.sep)
|
||||
! if bits[0] == prefix:
|
||||
! file = apply(os.path.join, bits[1:])
|
||||
! else:
|
||||
! break
|
||||
! if junk == "0 0":
|
||||
! isdir = 1
|
||||
! files.append(file)
|
||||
! while lines:
|
||||
! line = lines.pop(0)
|
||||
! if line == "Log message:\n":
|
||||
! break
|
||||
! # message is terminated by "ViewCVS links:" or "Index:..." (patch)
|
||||
! while lines:
|
||||
! line = lines.pop(0)
|
||||
! if line == "ViewCVS links:\n":
|
||||
! break
|
||||
! if line.find("Index: ") == 0:
|
||||
! break
|
||||
! comments += line
|
||||
! comments = comments.rstrip() + "\n"
|
||||
!
|
||||
! if not files:
|
||||
! return None
|
||||
!
|
||||
! change = Change(who, files, comments, isdir)
|
||||
!
|
||||
! return change
|
||||
!
|
||||
!
|
||||
!
|
||||
! class FCMaildirSource(MaildirTwisted):
|
||||
! """This source will watch a maildir that is subscribed to a FreshCVS
|
||||
! change-announcement mailing list.
|
||||
! """
|
||||
!
|
||||
! __implements__ = IChangeSource,
|
||||
|
||||
! def __init__(self, maildir, prefix=None):
|
||||
! MaildirTwisted.__init__(self, maildir)
|
||||
! self.changemaster = None # filled in when added
|
||||
! self.prefix = prefix
|
||||
! def describe(self):
|
||||
! return "FreshCVS mailing list in maildir %s" % self.maildir.where
|
||||
! def messageReceived(self, filename):
|
||||
! path = os.path.join(self.basedir, "new", filename)
|
||||
! change = parseFreshCVSMail(open(path, "r"), self.prefix)
|
||||
! if change:
|
||||
! self.changemaster.addChange(change)
|
||||
! os.rename(os.path.join(self.basedir, "new", filename),
|
||||
! os.path.join(self.basedir, "cur", filename))
|
||||
--- 1,5 ----
|
||||
#! /usr/bin/python
|
||||
|
||||
! # leftover import for compatibility
|
||||
|
||||
! from buildbot.changes.mail import FCMaildirSource
|
||||
|
||||
|
56
tools/buildbot/buildbot/test/mail/syncmail.2
Normal file
56
tools/buildbot/buildbot/test/mail/syncmail.2
Normal file
@ -0,0 +1,56 @@
|
||||
Return-Path: <warner@users.sourceforge.net>
|
||||
Delivered-To: warner-sourceforge@luther.lothar.com
|
||||
Received: (qmail 23221 invoked by uid 1000); 28 Jul 2003 06:53:15 -0000
|
||||
Delivered-To: warner-sourceforge@lothar.com
|
||||
Received: (qmail 58537 invoked by uid 13574); 28 Jul 2003 06:53:09 -0000
|
||||
Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:53:09 -0000
|
||||
Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
|
||||
by sc8-sf-list1.sourceforge.net with esmtp
|
||||
(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
|
||||
id 19h1sb-0003nw-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:09 -0700
|
||||
Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
|
||||
id 19h1sa-00018t-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
|
||||
Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
|
||||
id 19h1sa-0002mX-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
|
||||
From: warner@users.sourceforge.net
|
||||
To: warner@users.sourceforge.net
|
||||
Subject: buildbot ChangeLog,1.93,1.94
|
||||
Message-Id: <E19h1sa-0002mX-00@sc8-pr-cvs1.sourceforge.net>
|
||||
Date: Sun, 27 Jul 2003 23:53:08 -0700
|
||||
Status:
|
||||
|
||||
Update of /cvsroot/buildbot/buildbot
|
||||
In directory sc8-pr-cvs1:/tmp/cvs-serv10689
|
||||
|
||||
Modified Files:
|
||||
ChangeLog
|
||||
Log Message:
|
||||
* NEWS: started adding new features
|
||||
|
||||
|
||||
Index: ChangeLog
|
||||
===================================================================
|
||||
RCS file: /cvsroot/buildbot/buildbot/ChangeLog,v
|
||||
retrieving revision 1.93
|
||||
retrieving revision 1.94
|
||||
diff -C2 -d -r1.93 -r1.94
|
||||
*** ChangeLog 27 Jul 2003 22:53:27 -0000 1.93
|
||||
--- ChangeLog 28 Jul 2003 06:53:06 -0000 1.94
|
||||
***************
|
||||
*** 1,4 ****
|
||||
--- 1,6 ----
|
||||
2003-07-27 Brian Warner <warner@lothar.com>
|
||||
|
||||
+ * NEWS: started adding new features
|
||||
+
|
||||
* buildbot/changes/mail.py: start work on Syncmail parser, move
|
||||
mail sources into their own file
|
||||
|
||||
|
39
tools/buildbot/buildbot/test/mail/syncmail.3
Normal file
39
tools/buildbot/buildbot/test/mail/syncmail.3
Normal file
@ -0,0 +1,39 @@
|
||||
Return-Path: <warner@users.sourceforge.net>
|
||||
Delivered-To: warner-sourceforge@luther.lothar.com
|
||||
Received: (qmail 23196 invoked by uid 1000); 28 Jul 2003 06:51:53 -0000
|
||||
Delivered-To: warner-sourceforge@lothar.com
|
||||
Received: (qmail 58269 invoked by uid 13574); 28 Jul 2003 06:51:46 -0000
|
||||
Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:51:46 -0000
|
||||
Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
|
||||
by sc8-sf-list1.sourceforge.net with esmtp
|
||||
(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
|
||||
id 19h1rF-00027s-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:46 -0700
|
||||
Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
|
||||
id 19h1rF-00017O-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
|
||||
Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
|
||||
id 19h1rF-0002jg-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
|
||||
From: warner@users.sourceforge.net
|
||||
To: warner@users.sourceforge.net
|
||||
Subject: CVSROOT syncmail,1.1,NONE
|
||||
Message-Id: <E19h1rF-0002jg-00@sc8-pr-cvs1.sourceforge.net>
|
||||
Date: Sun, 27 Jul 2003 23:51:45 -0700
|
||||
Status:
|
||||
|
||||
Update of /cvsroot/buildbot/CVSROOT
|
||||
In directory sc8-pr-cvs1:/tmp/cvs-serv10515
|
||||
|
||||
Removed Files:
|
||||
syncmail
|
||||
Log Message:
|
||||
nevermind
|
||||
|
||||
--- syncmail DELETED ---
|
||||
|
||||
|
290
tools/buildbot/buildbot/test/mail/syncmail.4
Normal file
290
tools/buildbot/buildbot/test/mail/syncmail.4
Normal file
@ -0,0 +1,290 @@
|
||||
Return-Path: <warner@users.sourceforge.net>
|
||||
Delivered-To: warner-sourceforge@luther.lothar.com
|
||||
Received: (qmail 24111 invoked by uid 1000); 28 Jul 2003 08:01:54 -0000
|
||||
Delivered-To: warner-sourceforge@lothar.com
|
||||
Received: (qmail 68756 invoked by uid 13574); 28 Jul 2003 08:01:46 -0000
|
||||
Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-sourceforge@lothar.com>; 28 Jul 2003 08:01:46 -0000
|
||||
Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
|
||||
by sc8-sf-list1.sourceforge.net with esmtp
|
||||
(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
|
||||
id 19h2wz-00029d-00
|
||||
for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
|
||||
Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
|
||||
id 19h2wz-0002XB-00
|
||||
for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
|
||||
Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
|
||||
id 19h2wz-0005a9-00
|
||||
for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
|
||||
From: warner@users.sourceforge.net
|
||||
To: warner@users.sourceforge.net
|
||||
Subject: buildbot/test/mail syncmail.1,NONE,1.1 syncmail.2,NONE,1.1 syncmail.3,NONE,1.1
|
||||
Message-Id: <E19h2wz-0005a9-00@sc8-pr-cvs1.sourceforge.net>
|
||||
Date: Mon, 28 Jul 2003 01:01:45 -0700
|
||||
Status:
|
||||
|
||||
Update of /cvsroot/buildbot/buildbot/test/mail
|
||||
In directory sc8-pr-cvs1:/tmp/cvs-serv21445
|
||||
|
||||
Added Files:
|
||||
syncmail.1 syncmail.2 syncmail.3
|
||||
Log Message:
|
||||
test cases for syncmail parser
|
||||
|
||||
--- NEW FILE: syncmail.1 ---
|
||||
Return-Path: <warner@users.sourceforge.net>
|
||||
Delivered-To: warner-sourceforge@luther.lothar.com
|
||||
Received: (qmail 23758 invoked by uid 1000); 28 Jul 2003 07:22:14 -0000
|
||||
Delivered-To: warner-sourceforge@lothar.com
|
||||
Received: (qmail 62715 invoked by uid 13574); 28 Jul 2003 07:22:03 -0000
|
||||
Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-sourceforge@lothar.com>; 28 Jul 2003 07:22:03 -0000
|
||||
Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
|
||||
by sc8-sf-list1.sourceforge.net with esmtp
|
||||
(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
|
||||
id 19h2KY-0004Nr-00
|
||||
for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
|
||||
Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
|
||||
id 19h2KY-0001rv-00
|
||||
for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
|
||||
Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
|
||||
id 19h2KY-0003r4-00
|
||||
for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
|
||||
From: warner@users.sourceforge.net
|
||||
To: warner@users.sourceforge.net
|
||||
Subject: buildbot/buildbot/changes freshcvsmail.py,1.2,1.3
|
||||
Message-Id: <E19h2KY-0003r4-00@sc8-pr-cvs1.sourceforge.net>
|
||||
Date: Mon, 28 Jul 2003 00:22:02 -0700
|
||||
Status:
|
||||
|
||||
Update of /cvsroot/buildbot/buildbot/buildbot/changes
|
||||
In directory sc8-pr-cvs1:/tmp/cvs-serv14795/buildbot/changes
|
||||
|
||||
Modified Files:
|
||||
freshcvsmail.py
|
||||
Log Message:
|
||||
remove leftover code, leave a temporary compatibility import. Note! Start
|
||||
importing FCMaildirSource from changes.mail instead of changes.freshcvsmail
|
||||
|
||||
|
||||
Index: freshcvsmail.py
|
||||
===================================================================
|
||||
RCS file: /cvsroot/buildbot/buildbot/buildbot/changes/freshcvsmail.py,v
|
||||
retrieving revision 1.2
|
||||
retrieving revision 1.3
|
||||
diff -C2 -d -r1.2 -r1.3
|
||||
*** freshcvsmail.py 27 Jul 2003 18:54:08 -0000 1.2
|
||||
--- freshcvsmail.py 28 Jul 2003 07:22:00 -0000 1.3
|
||||
***************
|
||||
*** 1,96 ****
|
||||
#! /usr/bin/python
|
||||
|
||||
! from buildbot.interfaces import IChangeSource
|
||||
! from buildbot.changes.maildirtwisted import MaildirTwisted
|
||||
! from buildbot.changes.changes import Change
|
||||
! from rfc822 import Message
|
||||
! import os, os.path
|
||||
!
|
||||
! def parseFreshCVSMail(fd, prefix=None):
|
||||
! """Parse mail sent by FreshCVS"""
|
||||
! # this uses rfc822.Message so it can run under python2.1 . In the future
|
||||
! # it will be updated to use python2.2's "email" module.
|
||||
!
|
||||
! m = Message(fd)
|
||||
! # FreshCVS sets From: to "user CVS <user>", but the <> part may be
|
||||
! # modified by the MTA (to include a local domain)
|
||||
! name, addr = m.getaddr("from")
|
||||
! if not name:
|
||||
! return None # no From means this message isn't from FreshCVS
|
||||
! cvs = name.find(" CVS")
|
||||
! if cvs == -1:
|
||||
! return None # this message isn't from FreshCVS
|
||||
! who = name[:cvs]
|
||||
!
|
||||
! # we take the time of receipt as the time of checkin. Not correct,
|
||||
! # but it avoids the out-of-order-changes issue
|
||||
! #when = m.getdate() # and convert from 9-tuple, and handle timezone
|
||||
!
|
||||
! files = []
|
||||
! comments = ""
|
||||
! isdir = 0
|
||||
! lines = m.fp.readlines()
|
||||
! while lines:
|
||||
! line = lines.pop(0)
|
||||
! if line == "Modified files:\n":
|
||||
! break
|
||||
! while lines:
|
||||
! line = lines.pop(0)
|
||||
! if line == "\n":
|
||||
! break
|
||||
! line = line.rstrip("\n")
|
||||
! file, junk = line.split(None, 1)
|
||||
! if prefix:
|
||||
! # insist that the file start with the prefix: FreshCVS sends
|
||||
! # changes we don't care about too
|
||||
! bits = file.split(os.sep)
|
||||
! if bits[0] == prefix:
|
||||
! file = apply(os.path.join, bits[1:])
|
||||
! else:
|
||||
! break
|
||||
! if junk == "0 0":
|
||||
! isdir = 1
|
||||
! files.append(file)
|
||||
! while lines:
|
||||
! line = lines.pop(0)
|
||||
! if line == "Log message:\n":
|
||||
! break
|
||||
! # message is terminated by "ViewCVS links:" or "Index:..." (patch)
|
||||
! while lines:
|
||||
! line = lines.pop(0)
|
||||
! if line == "ViewCVS links:\n":
|
||||
! break
|
||||
! if line.find("Index: ") == 0:
|
||||
! break
|
||||
! comments += line
|
||||
! comments = comments.rstrip() + "\n"
|
||||
!
|
||||
! if not files:
|
||||
! return None
|
||||
!
|
||||
! change = Change(who, files, comments, isdir)
|
||||
!
|
||||
! return change
|
||||
!
|
||||
!
|
||||
!
|
||||
! class FCMaildirSource(MaildirTwisted):
|
||||
! """This source will watch a maildir that is subscribed to a FreshCVS
|
||||
! change-announcement mailing list.
|
||||
! """
|
||||
!
|
||||
! __implements__ = IChangeSource,
|
||||
|
||||
! def __init__(self, maildir, prefix=None):
|
||||
! MaildirTwisted.__init__(self, maildir)
|
||||
! self.changemaster = None # filled in when added
|
||||
! self.prefix = prefix
|
||||
! def describe(self):
|
||||
! return "FreshCVS mailing list in maildir %s" % self.maildir.where
|
||||
! def messageReceived(self, filename):
|
||||
! path = os.path.join(self.basedir, "new", filename)
|
||||
! change = parseFreshCVSMail(open(path, "r"), self.prefix)
|
||||
! if change:
|
||||
! self.changemaster.addChange(change)
|
||||
! os.rename(os.path.join(self.basedir, "new", filename),
|
||||
! os.path.join(self.basedir, "cur", filename))
|
||||
--- 1,5 ----
|
||||
#! /usr/bin/python
|
||||
|
||||
! # leftover import for compatibility
|
||||
|
||||
! from buildbot.changes.mail import FCMaildirSource
|
||||
|
||||
|
||||
|
||||
--- NEW FILE: syncmail.2 ---
|
||||
Return-Path: <warner@users.sourceforge.net>
|
||||
Delivered-To: warner-sourceforge@luther.lothar.com
|
||||
Received: (qmail 23221 invoked by uid 1000); 28 Jul 2003 06:53:15 -0000
|
||||
Delivered-To: warner-sourceforge@lothar.com
|
||||
Received: (qmail 58537 invoked by uid 13574); 28 Jul 2003 06:53:09 -0000
|
||||
Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:53:09 -0000
|
||||
Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
|
||||
by sc8-sf-list1.sourceforge.net with esmtp
|
||||
(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
|
||||
id 19h1sb-0003nw-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:09 -0700
|
||||
Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
|
||||
id 19h1sa-00018t-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
|
||||
Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
|
||||
id 19h1sa-0002mX-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
|
||||
From: warner@users.sourceforge.net
|
||||
To: warner@users.sourceforge.net
|
||||
Subject: buildbot ChangeLog,1.93,1.94
|
||||
Message-Id: <E19h1sa-0002mX-00@sc8-pr-cvs1.sourceforge.net>
|
||||
Date: Sun, 27 Jul 2003 23:53:08 -0700
|
||||
Status:
|
||||
|
||||
Update of /cvsroot/buildbot/buildbot
|
||||
In directory sc8-pr-cvs1:/tmp/cvs-serv10689
|
||||
|
||||
Modified Files:
|
||||
ChangeLog
|
||||
Log Message:
|
||||
* NEWS: started adding new features
|
||||
|
||||
|
||||
Index: ChangeLog
|
||||
===================================================================
|
||||
RCS file: /cvsroot/buildbot/buildbot/ChangeLog,v
|
||||
retrieving revision 1.93
|
||||
retrieving revision 1.94
|
||||
diff -C2 -d -r1.93 -r1.94
|
||||
*** ChangeLog 27 Jul 2003 22:53:27 -0000 1.93
|
||||
--- ChangeLog 28 Jul 2003 06:53:06 -0000 1.94
|
||||
***************
|
||||
*** 1,4 ****
|
||||
--- 1,6 ----
|
||||
2003-07-27 Brian Warner <warner@lothar.com>
|
||||
|
||||
+ * NEWS: started adding new features
|
||||
+
|
||||
* buildbot/changes/mail.py: start work on Syncmail parser, move
|
||||
mail sources into their own file
|
||||
|
||||
|
||||
|
||||
--- NEW FILE: syncmail.3 ---
|
||||
Return-Path: <warner@users.sourceforge.net>
|
||||
Delivered-To: warner-sourceforge@luther.lothar.com
|
||||
Received: (qmail 23196 invoked by uid 1000); 28 Jul 2003 06:51:53 -0000
|
||||
Delivered-To: warner-sourceforge@lothar.com
|
||||
Received: (qmail 58269 invoked by uid 13574); 28 Jul 2003 06:51:46 -0000
|
||||
Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
|
||||
by 130.94.181.6 (qmail-ldap-1.03) with SMTP
|
||||
for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:51:46 -0000
|
||||
Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
|
||||
by sc8-sf-list1.sourceforge.net with esmtp
|
||||
(Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
|
||||
id 19h1rF-00027s-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:46 -0700
|
||||
Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
|
||||
id 19h1rF-00017O-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
|
||||
Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
|
||||
by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
|
||||
id 19h1rF-0002jg-00
|
||||
for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
|
||||
From: warner@users.sourceforge.net
|
||||
To: warner@users.sourceforge.net
|
||||
Subject: CVSROOT syncmail,1.1,NONE
|
||||
Message-Id: <E19h1rF-0002jg-00@sc8-pr-cvs1.sourceforge.net>
|
||||
Date: Sun, 27 Jul 2003 23:51:45 -0700
|
||||
Status:
|
||||
|
||||
Update of /cvsroot/buildbot/CVSROOT
|
||||
In directory sc8-pr-cvs1:/tmp/cvs-serv10515
|
||||
|
||||
Removed Files:
|
||||
syncmail
|
||||
Log Message:
|
||||
nevermind
|
||||
|
||||
--- syncmail DELETED ---
|
||||
|
||||
|
||||
|
||||
|
70
tools/buildbot/buildbot/test/mail/syncmail.5
Normal file
70
tools/buildbot/buildbot/test/mail/syncmail.5
Normal file
@ -0,0 +1,70 @@
|
||||
From thomas@otto.amantes Mon Feb 21 17:46:45 2005
|
||||
Return-Path: <thomas@otto.amantes>
|
||||
Received: from otto.amantes (otto.amantes [127.0.0.1]) by otto.amantes
|
||||
(8.13.1/8.13.1) with ESMTP id j1LGkjr3011986 for <thomas@localhost>; Mon,
|
||||
21 Feb 2005 17:46:45 +0100
|
||||
Message-Id: <200502211646.j1LGkjr3011986@otto.amantes>
|
||||
From: Thomas Vander Stichele <thomas@otto.amantes>
|
||||
To: thomas@otto.amantes
|
||||
Subject: test1 s
|
||||
Date: Mon, 21 Feb 2005 16:46:45 +0000
|
||||
X-Mailer: Python syncmail $Revision: 1.1 $
|
||||
<http://sf.net/projects/cvs-syncmail>
|
||||
Content-Transfer-Encoding: 8bit
|
||||
Mime-Version: 1.0
|
||||
|
||||
Update of /home/cvs/test/test1
|
||||
In directory otto.amantes:/home/thomas/dev/tests/cvs/test1
|
||||
|
||||
Added Files:
|
||||
Tag: BRANCH-DEVEL
|
||||
MANIFEST Makefile.am autogen.sh configure.in
|
||||
Log Message:
|
||||
stuff on the branch
|
||||
|
||||
--- NEW FILE: Makefile.am ---
|
||||
SUBDIRS = src
|
||||
|
||||
# normally I wouldn't distribute autogen.sh and friends with a tarball
|
||||
# but this one is specifically distributed for demonstration purposes
|
||||
|
||||
EXTRA_DIST = autogen.sh
|
||||
|
||||
# target for making the "import this into svn" tarball
|
||||
test:
|
||||
mkdir test
|
||||
for a in `cat MANIFEST`; do \
|
||||
cp -pr $$a test/$$a; done
|
||||
tar czf test.tar.gz test
|
||||
rm -rf test
|
||||
|
||||
--- NEW FILE: MANIFEST ---
|
||||
MANIFEST
|
||||
autogen.sh
|
||||
configure.in
|
||||
Makefile.am
|
||||
src
|
||||
src/Makefile.am
|
||||
src/test.c
|
||||
|
||||
--- NEW FILE: autogen.sh ---
|
||||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
aclocal && \
|
||||
autoheader && \
|
||||
autoconf && \
|
||||
automake -a --foreign && \
|
||||
./configure $@
|
||||
|
||||
--- NEW FILE: configure.in ---
|
||||
dnl configure.ac for version macro
|
||||
AC_INIT
|
||||
|
||||
AM_CONFIG_HEADER(config.h)
|
||||
|
||||
AM_INIT_AUTOMAKE(test, 0.0.0)
|
||||
AC_PROG_CC
|
||||
|
||||
AC_OUTPUT(Makefile src/Makefile)
|
416
tools/buildbot/buildbot/test/runutils.py
Normal file
416
tools/buildbot/buildbot/test/runutils.py
Normal file
@ -0,0 +1,416 @@
|
||||
|
||||
import signal
|
||||
import shutil, os, errno
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.python import log, util
|
||||
|
||||
from buildbot import master, interfaces
|
||||
from buildbot.twcompat import maybeWait
|
||||
from buildbot.slave import bot
|
||||
from buildbot.process.builder import Builder
|
||||
from buildbot.process.base import BuildRequest, Build
|
||||
from buildbot.process.buildstep import BuildStep
|
||||
from buildbot.sourcestamp import SourceStamp
|
||||
from buildbot.status import builder
|
||||
|
||||
class MyBot(bot.Bot):
|
||||
def remote_getSlaveInfo(self):
|
||||
return self.parent.info
|
||||
|
||||
class MyBuildSlave(bot.BuildSlave):
|
||||
botClass = MyBot
|
||||
|
||||
def rmtree(d):
|
||||
try:
|
||||
shutil.rmtree(d, ignore_errors=1)
|
||||
except OSError, e:
|
||||
# stupid 2.2 appears to ignore ignore_errors
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
class RunMixin:
|
||||
master = None
|
||||
|
||||
def rmtree(self, d):
|
||||
rmtree(d)
|
||||
|
||||
def setUp(self):
|
||||
self.slaves = {}
|
||||
self.rmtree("basedir")
|
||||
os.mkdir("basedir")
|
||||
self.master = master.BuildMaster("basedir")
|
||||
self.status = self.master.getStatus()
|
||||
self.control = interfaces.IControl(self.master)
|
||||
|
||||
def connectOneSlave(self, slavename, opts={}):
|
||||
port = self.master.slavePort._port.getHost().port
|
||||
self.rmtree("slavebase-%s" % slavename)
|
||||
os.mkdir("slavebase-%s" % slavename)
|
||||
slave = MyBuildSlave("localhost", port, slavename, "sekrit",
|
||||
"slavebase-%s" % slavename,
|
||||
keepalive=0, usePTY=1, debugOpts=opts)
|
||||
slave.info = {"admin": "one"}
|
||||
self.slaves[slavename] = slave
|
||||
slave.startService()
|
||||
|
||||
def connectSlave(self, builders=["dummy"], slavename="bot1",
|
||||
opts={}):
|
||||
# connect buildslave 'slavename' and wait for it to connect to all of
|
||||
# the given builders
|
||||
dl = []
|
||||
# initiate call for all of them, before waiting on result,
|
||||
# otherwise we might miss some
|
||||
for b in builders:
|
||||
dl.append(self.master.botmaster.waitUntilBuilderAttached(b))
|
||||
d = defer.DeferredList(dl)
|
||||
self.connectOneSlave(slavename, opts)
|
||||
return d
|
||||
|
||||
def connectSlaves(self, slavenames, builders):
|
||||
dl = []
|
||||
# initiate call for all of them, before waiting on result,
|
||||
# otherwise we might miss some
|
||||
for b in builders:
|
||||
dl.append(self.master.botmaster.waitUntilBuilderAttached(b))
|
||||
d = defer.DeferredList(dl)
|
||||
for name in slavenames:
|
||||
self.connectOneSlave(name)
|
||||
return d
|
||||
|
||||
def connectSlave2(self):
|
||||
# this takes over for bot1, so it has to share the slavename
|
||||
port = self.master.slavePort._port.getHost().port
|
||||
self.rmtree("slavebase-bot2")
|
||||
os.mkdir("slavebase-bot2")
|
||||
# this uses bot1, really
|
||||
slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
|
||||
"slavebase-bot2", keepalive=0, usePTY=1)
|
||||
slave.info = {"admin": "two"}
|
||||
self.slaves['bot2'] = slave
|
||||
slave.startService()
|
||||
|
||||
def connectSlaveFastTimeout(self):
|
||||
# this slave has a very fast keepalive timeout
|
||||
port = self.master.slavePort._port.getHost().port
|
||||
self.rmtree("slavebase-bot1")
|
||||
os.mkdir("slavebase-bot1")
|
||||
slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
|
||||
"slavebase-bot1", keepalive=2, usePTY=1,
|
||||
keepaliveTimeout=1)
|
||||
slave.info = {"admin": "one"}
|
||||
self.slaves['bot1'] = slave
|
||||
slave.startService()
|
||||
d = self.master.botmaster.waitUntilBuilderAttached("dummy")
|
||||
return d
|
||||
|
||||
# things to start builds
|
||||
def requestBuild(self, builder):
|
||||
# returns a Deferred that fires with an IBuildStatus object when the
|
||||
# build is finished
|
||||
req = BuildRequest("forced build", SourceStamp())
|
||||
self.control.getBuilder(builder).requestBuild(req)
|
||||
return req.waitUntilFinished()
|
||||
|
||||
def failUnlessBuildSucceeded(self, bs):
|
||||
if bs.getResults() != builder.SUCCESS:
|
||||
log.msg("failUnlessBuildSucceeded noticed that the build failed")
|
||||
self.logBuildResults(bs)
|
||||
self.failUnless(bs.getResults() == builder.SUCCESS)
|
||||
return bs # useful for chaining
|
||||
|
||||
def logBuildResults(self, bs):
|
||||
# emit the build status and the contents of all logs to test.log
|
||||
log.msg("logBuildResults starting")
|
||||
log.msg(" bs.getResults() == %s" % builder.Results[bs.getResults()])
|
||||
log.msg(" bs.isFinished() == %s" % bs.isFinished())
|
||||
for s in bs.getSteps():
|
||||
for l in s.getLogs():
|
||||
log.msg("--- START step %s / log %s ---" % (s.getName(),
|
||||
l.getName()))
|
||||
if not l.getName().endswith(".html"):
|
||||
log.msg(l.getTextWithHeaders())
|
||||
log.msg("--- STOP ---")
|
||||
log.msg("logBuildResults finished")
|
||||
|
||||
def tearDown(self):
|
||||
log.msg("doing tearDown")
|
||||
d = self.shutdownAllSlaves()
|
||||
d.addCallback(self._tearDown_1)
|
||||
d.addCallback(self._tearDown_2)
|
||||
return maybeWait(d)
|
||||
def _tearDown_1(self, res):
|
||||
if self.master:
|
||||
return defer.maybeDeferred(self.master.stopService)
|
||||
def _tearDown_2(self, res):
|
||||
self.master = None
|
||||
log.msg("tearDown done")
|
||||
|
||||
|
||||
# various forms of slave death
|
||||
|
||||
def shutdownAllSlaves(self):
|
||||
# the slave has disconnected normally: they SIGINT'ed it, or it shut
|
||||
# down willingly. This will kill child processes and give them a
|
||||
# chance to finish up. We return a Deferred that will fire when
|
||||
# everything is finished shutting down.
|
||||
|
||||
log.msg("doing shutdownAllSlaves")
|
||||
dl = []
|
||||
for slave in self.slaves.values():
|
||||
dl.append(slave.waitUntilDisconnected())
|
||||
dl.append(defer.maybeDeferred(slave.stopService))
|
||||
d = defer.DeferredList(dl)
|
||||
d.addCallback(self._shutdownAllSlavesDone)
|
||||
return d
|
||||
def _shutdownAllSlavesDone(self, res):
|
||||
for name in self.slaves.keys():
|
||||
del self.slaves[name]
|
||||
return self.master.botmaster.waitUntilBuilderFullyDetached("dummy")
|
||||
|
||||
def shutdownSlave(self, slavename, buildername):
|
||||
# this slave has disconnected normally: they SIGINT'ed it, or it shut
|
||||
# down willingly. This will kill child processes and give them a
|
||||
# chance to finish up. We return a Deferred that will fire when
|
||||
# everything is finished shutting down, and the given Builder knows
|
||||
# that the slave has gone away.
|
||||
|
||||
s = self.slaves[slavename]
|
||||
dl = [self.master.botmaster.waitUntilBuilderDetached(buildername),
|
||||
s.waitUntilDisconnected()]
|
||||
d = defer.DeferredList(dl)
|
||||
d.addCallback(self._shutdownSlave_done, slavename)
|
||||
s.stopService()
|
||||
return d
|
||||
def _shutdownSlave_done(self, res, slavename):
|
||||
del self.slaves[slavename]
|
||||
|
||||
def killSlave(self):
|
||||
# the slave has died, its host sent a FIN. The .notifyOnDisconnect
|
||||
# callbacks will terminate the current step, so the build should be
|
||||
# flunked (no further steps should be started).
|
||||
self.slaves['bot1'].bf.continueTrying = 0
|
||||
bot = self.slaves['bot1'].getServiceNamed("bot")
|
||||
broker = bot.builders["dummy"].remote.broker
|
||||
broker.transport.loseConnection()
|
||||
del self.slaves['bot1']
|
||||
|
||||
def disappearSlave(self, slavename="bot1", buildername="dummy"):
|
||||
# the slave's host has vanished off the net, leaving the connection
|
||||
# dangling. This will be detected quickly by app-level keepalives or
|
||||
# a ping, or slowly by TCP timeouts.
|
||||
|
||||
# simulate this by replacing the slave Broker's .dataReceived method
|
||||
# with one that just throws away all data.
|
||||
def discard(data):
|
||||
pass
|
||||
bot = self.slaves[slavename].getServiceNamed("bot")
|
||||
broker = bot.builders[buildername].remote.broker
|
||||
broker.dataReceived = discard # seal its ears
|
||||
broker.transport.write = discard # and take away its voice
|
||||
|
||||
def ghostSlave(self):
|
||||
# the slave thinks it has lost the connection, and initiated a
|
||||
# reconnect. The master doesn't yet realize it has lost the previous
|
||||
# connection, and sees two connections at once.
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def setupBuildStepStatus(basedir):
|
||||
"""Return a BuildStep with a suitable BuildStepStatus object, ready to
|
||||
use."""
|
||||
os.mkdir(basedir)
|
||||
botmaster = None
|
||||
s0 = builder.Status(botmaster, basedir)
|
||||
s1 = s0.builderAdded("buildername", "buildername")
|
||||
s2 = builder.BuildStatus(s1, 1)
|
||||
s3 = builder.BuildStepStatus(s2)
|
||||
s3.setName("foostep")
|
||||
s3.started = True
|
||||
s3.stepStarted()
|
||||
return s3
|
||||
|
||||
def fake_slaveVersion(command, oldversion=None):
|
||||
from buildbot.slave.registry import commandRegistry
|
||||
return commandRegistry[command]
|
||||
|
||||
def makeBuildStep(basedir, step_class=BuildStep, **kwargs):
|
||||
bss = setupBuildStepStatus(basedir)
|
||||
|
||||
ss = SourceStamp()
|
||||
setup = {'name': "builder1", "slavename": "bot1",
|
||||
'builddir': "builddir", 'factory': None}
|
||||
b0 = Builder(setup, bss.getBuild().getBuilder())
|
||||
br = BuildRequest("reason", ss)
|
||||
b = Build([br])
|
||||
b.setBuilder(b0)
|
||||
s = step_class(build=b, **kwargs)
|
||||
s.setStepStatus(bss)
|
||||
b.setupStatus(bss.getBuild())
|
||||
s.slaveVersion = fake_slaveVersion
|
||||
return s
|
||||
|
||||
|
||||
def findDir():
|
||||
# the same directory that holds this script
|
||||
return util.sibpath(__file__, ".")
|
||||
|
||||
class SignalMixin:
|
||||
sigchldHandler = None
|
||||
|
||||
def setUpClass(self):
|
||||
# make sure SIGCHLD handler is installed, as it should be on
|
||||
# reactor.run(). problem is reactor may not have been run when this
|
||||
# test runs.
|
||||
if hasattr(reactor, "_handleSigchld") and hasattr(signal, "SIGCHLD"):
|
||||
self.sigchldHandler = signal.signal(signal.SIGCHLD,
|
||||
reactor._handleSigchld)
|
||||
|
||||
def tearDownClass(self):
|
||||
if self.sigchldHandler:
|
||||
signal.signal(signal.SIGCHLD, self.sigchldHandler)
|
||||
|
||||
# these classes are used to test SlaveCommands in isolation
|
||||
|
||||
class FakeSlaveBuilder:
|
||||
debug = False
|
||||
def __init__(self, usePTY, basedir):
|
||||
self.updates = []
|
||||
self.basedir = basedir
|
||||
self.usePTY = usePTY
|
||||
|
||||
def sendUpdate(self, data):
|
||||
if self.debug:
|
||||
print "FakeSlaveBuilder.sendUpdate", data
|
||||
self.updates.append(data)
|
||||
|
||||
|
||||
class SlaveCommandTestBase(SignalMixin):
|
||||
usePTY = False
|
||||
|
||||
def setUpBuilder(self, basedir):
|
||||
if not os.path.exists(basedir):
|
||||
os.mkdir(basedir)
|
||||
self.builder = FakeSlaveBuilder(self.usePTY, basedir)
|
||||
|
||||
def startCommand(self, cmdclass, args):
|
||||
stepId = 0
|
||||
self.cmd = c = cmdclass(self.builder, stepId, args)
|
||||
c.running = True
|
||||
d = c.doStart()
|
||||
return d
|
||||
|
||||
def collectUpdates(self, res=None):
|
||||
logs = {}
|
||||
for u in self.builder.updates:
|
||||
for k in u.keys():
|
||||
if k == "log":
|
||||
logname,data = u[k]
|
||||
oldlog = logs.get(("log",logname), "")
|
||||
logs[("log",logname)] = oldlog + data
|
||||
elif k == "rc":
|
||||
pass
|
||||
else:
|
||||
logs[k] = logs.get(k, "") + u[k]
|
||||
return logs
|
||||
|
||||
def findRC(self):
|
||||
for u in self.builder.updates:
|
||||
if "rc" in u:
|
||||
return u["rc"]
|
||||
return None
|
||||
|
||||
def printStderr(self):
|
||||
for u in self.builder.updates:
|
||||
if "stderr" in u:
|
||||
print u["stderr"]
|
||||
|
||||
# ----------------------------------------
|
||||
|
||||
class LocalWrapper:
|
||||
# r = pb.Referenceable()
|
||||
# w = LocalWrapper(r)
|
||||
# now you can do things like w.callRemote()
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
|
||||
def callRemote(self, name, *args, **kwargs):
|
||||
d = defer.maybeDeferred(self._callRemote, name, *args, **kwargs)
|
||||
return d
|
||||
|
||||
def _callRemote(self, name, *args, **kwargs):
|
||||
method = getattr(self.target, "remote_"+name)
|
||||
return method(*args, **kwargs)
|
||||
|
||||
def notifyOnDisconnect(self, observer):
|
||||
pass
|
||||
def dontNotifyOnDisconnect(self, observer):
|
||||
pass
|
||||
|
||||
|
||||
class LocalSlaveBuilder(bot.SlaveBuilder):
|
||||
"""I am object that behaves like a pb.RemoteReference, but in fact I
|
||||
invoke methods locally."""
|
||||
_arg_filter = None
|
||||
|
||||
def setArgFilter(self, filter):
|
||||
self._arg_filter = filter
|
||||
|
||||
def remote_startCommand(self, stepref, stepId, command, args):
|
||||
if self._arg_filter:
|
||||
args = self._arg_filter(args)
|
||||
# stepref should be a RemoteReference to the RemoteCommand
|
||||
return bot.SlaveBuilder.remote_startCommand(self,
|
||||
LocalWrapper(stepref),
|
||||
stepId, command, args)
|
||||
|
||||
class StepTester:
|
||||
"""Utility class to exercise BuildSteps and RemoteCommands, without
|
||||
really using a Build or a Bot. No networks are used.
|
||||
|
||||
Use this as follows::
|
||||
|
||||
class MyTest(StepTester, unittest.TestCase):
|
||||
def testOne(self):
|
||||
self.slavebase = 'testOne.slave'
|
||||
self.masterbase = 'testOne.master'
|
||||
sb = self.makeSlaveBuilder()
|
||||
step = self.makeStep(stepclass, **kwargs)
|
||||
d = self.runStep(step)
|
||||
d.addCallback(_checkResults)
|
||||
return d
|
||||
"""
|
||||
|
||||
#slavebase = "slavebase"
|
||||
slavebuilderbase = "slavebuilderbase"
|
||||
#masterbase = "masterbase"
|
||||
|
||||
def makeSlaveBuilder(self):
|
||||
os.mkdir(self.slavebase)
|
||||
os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase))
|
||||
b = bot.Bot(self.slavebase, False)
|
||||
b.startService()
|
||||
sb = LocalSlaveBuilder("slavebuildername", False)
|
||||
sb.setArgFilter(self.filterArgs)
|
||||
sb.usePTY = False
|
||||
sb.setServiceParent(b)
|
||||
sb.setBuilddir(self.slavebuilderbase)
|
||||
self.remote = LocalWrapper(sb)
|
||||
return sb
|
||||
|
||||
workdir = "build"
|
||||
def makeStep(self, factory, **kwargs):
|
||||
if not kwargs.has_key("workdir"):
|
||||
kwargs['workdir'] = self.workdir
|
||||
step = makeBuildStep(self.masterbase, factory, **kwargs)
|
||||
return step
|
||||
|
||||
def runStep(self, step):
|
||||
d = defer.maybeDeferred(step.startStep, self.remote)
|
||||
return d
|
||||
|
||||
def wrap(self, target):
|
||||
return LocalWrapper(target)
|
||||
|
||||
def filterArgs(self, args):
|
||||
# this can be overridden
|
||||
return args
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user