Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSascha Silbe <sascha-org-sugar-git@silbe.org>2009-03-20 19:57:15 (GMT)
committer Sascha Silbe <sascha-org-sugar-git@silbe.org>2009-03-20 19:57:15 (GMT)
commit5be6898fbeb754a0b9ec44d9a9ecb990c618b803 (patch)
tree4c35c8cf2a2af53ca0b9094e7e857402581c9671
parentfcf8dc1e73f8da5a67639a89940fd10744d03915 (diff)
add (unpacked) buildbot 0.7.10p1 sources directly in sugar-jhbuild tree for usage on sunjammer
-rw-r--r--buildbot/COPYING339
-rw-r--r--buildbot/CREDITS83
-rw-r--r--buildbot/MANIFEST.in14
-rw-r--r--buildbot/NEWS2591
-rw-r--r--buildbot/PKG-INFO30
-rw-r--r--buildbot/README201
-rw-r--r--buildbot/README.w3295
-rwxr-xr-xbuildbot/bin/buildbot4
-rw-r--r--buildbot/buildbot.egg-info/PKG-INFO30
-rw-r--r--buildbot/buildbot.egg-info/SOURCES.txt215
-rw-r--r--buildbot/buildbot.egg-info/dependency_links.txt1
-rw-r--r--buildbot/buildbot.egg-info/requires.txt1
-rw-r--r--buildbot/buildbot.egg-info/top_level.txt1
-rw-r--r--buildbot/buildbot/__init__.py1
-rw-r--r--buildbot/buildbot/buildbot.pngbin0 -> 783 bytes
-rw-r--r--buildbot/buildbot/buildset.py81
-rw-r--r--buildbot/buildbot/buildslave.py688
-rw-r--r--buildbot/buildbot/changes/__init__.py0
-rw-r--r--buildbot/buildbot/changes/base.py10
-rw-r--r--buildbot/buildbot/changes/bonsaipoller.py320
-rw-r--r--buildbot/buildbot/changes/changes.py288
-rw-r--r--buildbot/buildbot/changes/dnotify.py100
-rw-r--r--buildbot/buildbot/changes/freshcvs.py144
-rw-r--r--buildbot/buildbot/changes/hgbuildbot.py114
-rw-r--r--buildbot/buildbot/changes/mail.py458
-rw-r--r--buildbot/buildbot/changes/maildir.py116
-rw-r--r--buildbot/buildbot/changes/monotone.py305
-rw-r--r--buildbot/buildbot/changes/p4poller.py207
-rw-r--r--buildbot/buildbot/changes/pb.py108
-rw-r--r--buildbot/buildbot/changes/svnpoller.py463
-rw-r--r--buildbot/buildbot/clients/__init__.py0
-rw-r--r--buildbot/buildbot/clients/base.py125
-rw-r--r--buildbot/buildbot/clients/debug.glade684
-rw-r--r--buildbot/buildbot/clients/debug.py181
-rw-r--r--buildbot/buildbot/clients/gtkPanes.py532
-rw-r--r--buildbot/buildbot/clients/sendchange.py48
-rw-r--r--buildbot/buildbot/dnotify.py102
-rw-r--r--buildbot/buildbot/ec2buildslave.py283
-rw-r--r--buildbot/buildbot/interfaces.py1123
-rw-r--r--buildbot/buildbot/locks.py247
-rw-r--r--buildbot/buildbot/manhole.py265
-rw-r--r--buildbot/buildbot/master.py965
-rw-r--r--buildbot/buildbot/pbutil.py147
-rw-r--r--buildbot/buildbot/process/__init__.py0
-rw-r--r--buildbot/buildbot/process/base.py627
-rw-r--r--buildbot/buildbot/process/builder.py874
-rw-r--r--buildbot/buildbot/process/buildstep.py1097
-rw-r--r--buildbot/buildbot/process/factory.py182
-rw-r--r--buildbot/buildbot/process/process_twisted.py118
-rw-r--r--buildbot/buildbot/process/properties.py157
-rw-r--r--buildbot/buildbot/process/step_twisted2.py159
-rw-r--r--buildbot/buildbot/scheduler.py837
-rw-r--r--buildbot/buildbot/scripts/__init__.py0
-rw-r--r--buildbot/buildbot/scripts/checkconfig.py53
-rw-r--r--buildbot/buildbot/scripts/logwatcher.py97
-rw-r--r--buildbot/buildbot/scripts/reconfig.py69
-rw-r--r--buildbot/buildbot/scripts/runner.py1023
-rw-r--r--buildbot/buildbot/scripts/sample.cfg175
-rw-r--r--buildbot/buildbot/scripts/startup.py128
-rw-r--r--buildbot/buildbot/scripts/tryclient.py707
-rw-r--r--buildbot/buildbot/slave/__init__.py0
-rw-r--r--buildbot/buildbot/slave/bot.py510
-rw-r--r--buildbot/buildbot/slave/commands.py2788
-rw-r--r--buildbot/buildbot/slave/interfaces.py56
-rw-r--r--buildbot/buildbot/slave/registry.py17
-rw-r--r--buildbot/buildbot/sourcestamp.py95
-rw-r--r--buildbot/buildbot/status/__init__.py0
-rw-r--r--buildbot/buildbot/status/base.py69
-rw-r--r--buildbot/buildbot/status/builder.py2182
-rw-r--r--buildbot/buildbot/status/client.py564
-rw-r--r--buildbot/buildbot/status/html.py6
-rw-r--r--buildbot/buildbot/status/mail.py524
-rw-r--r--buildbot/buildbot/status/progress.py308
-rw-r--r--buildbot/buildbot/status/tests.py73
-rw-r--r--buildbot/buildbot/status/tinderbox.py223
-rw-r--r--buildbot/buildbot/status/web/__init__.py0
-rw-r--r--buildbot/buildbot/status/web/about.py33
-rw-r--r--buildbot/buildbot/status/web/base.py421
-rw-r--r--buildbot/buildbot/status/web/baseweb.py614
-rw-r--r--buildbot/buildbot/status/web/build.py302
-rw-r--r--buildbot/buildbot/status/web/builder.py312
-rw-r--r--buildbot/buildbot/status/web/changes.py41
-rw-r--r--buildbot/buildbot/status/web/classic.css78
-rw-r--r--buildbot/buildbot/status/web/feeds.py359
-rw-r--r--buildbot/buildbot/status/web/grid.py252
-rw-r--r--buildbot/buildbot/status/web/index.html32
-rw-r--r--buildbot/buildbot/status/web/logs.py171
-rw-r--r--buildbot/buildbot/status/web/robots.txt9
-rw-r--r--buildbot/buildbot/status/web/slaves.py181
-rw-r--r--buildbot/buildbot/status/web/step.py97
-rw-r--r--buildbot/buildbot/status/web/tests.py64
-rw-r--r--buildbot/buildbot/status/web/waterfall.py962
-rw-r--r--buildbot/buildbot/status/web/xmlrpc.py203
-rw-r--r--buildbot/buildbot/status/words.py875
-rw-r--r--buildbot/buildbot/steps/__init__.py0
-rw-r--r--buildbot/buildbot/steps/dummy.py100
-rw-r--r--buildbot/buildbot/steps/master.py76
-rw-r--r--buildbot/buildbot/steps/maxq.py44
-rw-r--r--buildbot/buildbot/steps/package/__init__.py11
-rw-r--r--buildbot/buildbot/steps/package/rpm/__init__.py15
-rw-r--r--buildbot/buildbot/steps/package/rpm/rpmbuild.py144
-rw-r--r--buildbot/buildbot/steps/package/rpm/rpmlint.py51
-rw-r--r--buildbot/buildbot/steps/package/rpm/rpmspec.py67
-rw-r--r--buildbot/buildbot/steps/python.py187
-rw-r--r--buildbot/buildbot/steps/python_twisted.py804
-rw-r--r--buildbot/buildbot/steps/shell.py487
-rw-r--r--buildbot/buildbot/steps/source.py1107
-rw-r--r--buildbot/buildbot/steps/transfer.py465
-rw-r--r--buildbot/buildbot/steps/trigger.py122
-rw-r--r--buildbot/buildbot/test/__init__.py0
-rw-r--r--buildbot/buildbot/test/emit.py11
-rw-r--r--buildbot/buildbot/test/emitlogs.py42
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.168
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.2101
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.397
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.445
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.554
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.670
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.768
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.861
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.918
-rw-r--r--buildbot/buildbot/test/mail/svn-commit.167
-rw-r--r--buildbot/buildbot/test/mail/svn-commit.21218
-rw-r--r--buildbot/buildbot/test/mail/syncmail.1152
-rw-r--r--buildbot/buildbot/test/mail/syncmail.256
-rw-r--r--buildbot/buildbot/test/mail/syncmail.339
-rw-r--r--buildbot/buildbot/test/mail/syncmail.4290
-rw-r--r--buildbot/buildbot/test/mail/syncmail.570
-rw-r--r--buildbot/buildbot/test/runutils.py516
-rw-r--r--buildbot/buildbot/test/sleep.py8
-rw-r--r--buildbot/buildbot/test/subdir/emit.py11
-rw-r--r--buildbot/buildbot/test/test__versions.py16
-rw-r--r--buildbot/buildbot/test/test_bonsaipoller.py244
-rw-r--r--buildbot/buildbot/test/test_buildreq.py182
-rw-r--r--buildbot/buildbot/test/test_buildstep.py144
-rw-r--r--buildbot/buildbot/test/test_changes.py243
-rw-r--r--buildbot/buildbot/test/test_config.py1277
-rw-r--r--buildbot/buildbot/test/test_control.py104
-rw-r--r--buildbot/buildbot/test/test_dependencies.py166
-rw-r--r--buildbot/buildbot/test/test_ec2buildslave.py552
-rw-r--r--buildbot/buildbot/test/test_limitlogs.py94
-rw-r--r--buildbot/buildbot/test/test_locks.py495
-rw-r--r--buildbot/buildbot/test/test_maildir.py92
-rw-r--r--buildbot/buildbot/test/test_mailparse.py293
-rw-r--r--buildbot/buildbot/test/test_mergerequests.py196
-rw-r--r--buildbot/buildbot/test/test_p4poller.py213
-rw-r--r--buildbot/buildbot/test/test_package_rpm.py132
-rw-r--r--buildbot/buildbot/test/test_properties.py274
-rw-r--r--buildbot/buildbot/test/test_reconfig.py91
-rw-r--r--buildbot/buildbot/test/test_run.py1199
-rw-r--r--buildbot/buildbot/test/test_runner.py392
-rw-r--r--buildbot/buildbot/test/test_scheduler.py348
-rw-r--r--buildbot/buildbot/test/test_shell.py138
-rw-r--r--buildbot/buildbot/test/test_slavecommand.py294
-rw-r--r--buildbot/buildbot/test/test_slaves.py991
-rw-r--r--buildbot/buildbot/test/test_status.py1631
-rw-r--r--buildbot/buildbot/test/test_steps.py788
-rw-r--r--buildbot/buildbot/test/test_svnpoller.py476
-rw-r--r--buildbot/buildbot/test/test_transfer.py721
-rw-r--r--buildbot/buildbot/test/test_twisted.py219
-rw-r--r--buildbot/buildbot/test/test_util.py26
-rw-r--r--buildbot/buildbot/test/test_vc.py3023
-rw-r--r--buildbot/buildbot/test/test_web.py594
-rw-r--r--buildbot/buildbot/test/test_webparts.py141
-rw-r--r--buildbot/buildbot/util.py102
-rw-r--r--buildbot/contrib/CSS/sample1.css53
-rw-r--r--buildbot/contrib/CSS/sample2.css53
-rw-r--r--buildbot/contrib/OS-X/README23
-rw-r--r--buildbot/contrib/OS-X/net.sourceforge.buildbot.master.plist42
-rw-r--r--buildbot/contrib/OS-X/net.sourceforge.buildbot.slave.plist36
-rw-r--r--buildbot/contrib/README.txt44
-rwxr-xr-xbuildbot/contrib/arch_buildbot.py76
-rwxr-xr-xbuildbot/contrib/bb_applet.py413
-rwxr-xr-xbuildbot/contrib/bzr_buildbot.py467
-rwxr-xr-xbuildbot/contrib/darcs_buildbot.py173
-rwxr-xr-xbuildbot/contrib/fakechange.py82
-rwxr-xr-xbuildbot/contrib/generate_changelog.py71
-rwxr-xr-xbuildbot/contrib/git_buildbot.py285
-rwxr-xr-xbuildbot/contrib/hg_buildbot.py49
-rwxr-xr-xbuildbot/contrib/run_maxq.py47
-rwxr-xr-xbuildbot/contrib/svn_buildbot.py260
-rwxr-xr-xbuildbot/contrib/svn_watcher.py107
-rwxr-xr-xbuildbot/contrib/svnpoller.py100
-rwxr-xr-xbuildbot/contrib/viewcvspoll.py99
-rw-r--r--buildbot/contrib/windows/buildbot.bat1
-rw-r--r--buildbot/contrib/windows/buildbot2.bat98
-rwxr-xr-xbuildbot/contrib/windows/buildbot_service.py536
-rwxr-xr-xbuildbot/contrib/windows/setup.py83
-rw-r--r--buildbot/docs/buildbot.html9606
-rw-r--r--buildbot/docs/buildbot.info192
-rw-r--r--buildbot/docs/buildbot.info-17278
-rw-r--r--buildbot/docs/buildbot.info-21654
-rw-r--r--buildbot/docs/buildbot.texinfo8807
-rw-r--r--buildbot/docs/epyrun195
-rw-r--r--buildbot/docs/examples/hello.cfg92
-rw-r--r--buildbot/docs/examples/twisted_master.cfg329
-rw-r--r--buildbot/docs/gen-reference1
-rw-r--r--buildbot/docs/hexnut32.pngbin0 -> 1899 bytes
-rw-r--r--buildbot/docs/hexnut48.pngbin0 -> 3582 bytes
-rw-r--r--buildbot/docs/hexnut64.pngbin0 -> 5850 bytes
-rw-r--r--buildbot/docs/images/master.pngbin0 -> 34670 bytes
-rw-r--r--buildbot/docs/images/master.svg508
-rw-r--r--buildbot/docs/images/master.txt34
-rw-r--r--buildbot/docs/images/overview.pngbin0 -> 38814 bytes
-rw-r--r--buildbot/docs/images/overview.svg396
-rw-r--r--buildbot/docs/images/overview.txt23
-rw-r--r--buildbot/docs/images/slavebuilder.pngbin0 -> 49534 bytes
-rw-r--r--buildbot/docs/images/slavebuilder.svg593
-rw-r--r--buildbot/docs/images/slavebuilder.txt31
-rw-r--r--buildbot/docs/images/slaves.pngbin0 -> 46854 bytes
-rw-r--r--buildbot/docs/images/slaves.svg336
-rw-r--r--buildbot/docs/images/slaves.txt27
-rw-r--r--buildbot/docs/images/status.pngbin0 -> 62173 bytes
-rw-r--r--buildbot/docs/images/status.svg853
-rw-r--r--buildbot/docs/images/status.txt32
-rw-r--r--buildbot/setup.cfg5
-rw-r--r--buildbot/setup.py127
217 files changed, 87384 insertions, 0 deletions
diff --git a/buildbot/COPYING b/buildbot/COPYING
new file mode 100644
index 0000000..d511905
--- /dev/null
+++ b/buildbot/COPYING
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/buildbot/CREDITS b/buildbot/CREDITS
new file mode 100644
index 0000000..94188b1
--- /dev/null
+++ b/buildbot/CREDITS
@@ -0,0 +1,83 @@
+This is a list of everybody who has contributed to Buildbot in some way, in
+no particular order. Thanks everybody!
+
+Aaron Hsieh
+Albert Hofkamp
+Alexander Lorenz
+Alexander Staubo
+AllMyData.com
+Andrew Bennetts
+Anthony Baxter
+Axel Hecht
+Baptiste Lepilleur
+Bear
+Ben Hearsum
+Benoit Sigoure
+Brad Hards
+Brandon Philips
+Brett Neely
+Charles Lepple
+Chad Metcalf
+Christian Unger
+Clement Stenac
+Dan Locks
+Dave Liebreich
+Dave Peticolas
+Dobes Vandermeer
+Dustin Mitchell
+Dustin Sallings
+Elliot Murphy
+Fabrice Crestois
+Gary Granger
+Gary Poster
+Gerald Combs
+Greg Ward
+Grig Gheorghiu
+Haavard Skinnemoen
+Igor Slepchin
+JP Calderone
+James Knight
+Jerome Davann
+John Backstrand
+John O'Duinn
+John Pye
+John Saxton
+Jose Dapena Paz
+Kevin Turner
+Kirill Lapshin
+Marcus Lindblom
+Marius Gedminas
+Mark Dillavou
+Mark Hammond
+Mark Pauley
+Mark Rowe
+Mateusz Loskot
+Nathaniel Smith
+Neal Norwitz
+Nick Mathewson
+Nick Trout
+Niklaus Giger
+Neil Hemingway
+Olivier Bonnet
+Olly Betts
+Paul Warren
+Paul Winkler
+Phil Thompson
+Philipp Frauenfelder
+Rene Rivera
+Riccardo Magliocchetti
+Rob Helmer
+Roch Gadsdon
+Roy Rapoport
+Scott Lamb
+Stephen Davis
+Steve 'Ashcrow' Milner
+Steven Walter
+Ted Mielczarek
+Thomas Vander Stichele
+Tobi Vollebregt
+Wade Brainerd
+Yoz Grahame
+Zandr Milewski
+chops
+zooko
diff --git a/buildbot/MANIFEST.in b/buildbot/MANIFEST.in
new file mode 100644
index 0000000..698be38
--- /dev/null
+++ b/buildbot/MANIFEST.in
@@ -0,0 +1,14 @@
+
+include MANIFEST.in README README.w32 NEWS CREDITS COPYING
+include docs/examples/*.cfg
+include docs/buildbot.texinfo docs/buildbot.info* docs/buildbot.html
+include docs/*.png docs/images/*.png docs/images/*.svg docs/images/*.txt
+include docs/epyrun docs/gen-reference
+include buildbot/test/mail/* buildbot/test/subdir/*
+include buildbot/scripts/sample.cfg
+include buildbot/status/web/*.css buildbot/status/web/*.html
+include buildbot/status/web/robots.txt
+include buildbot/clients/debug.glade
+include buildbot/buildbot.png
+
+include contrib/* contrib/windows/* contrib/OS-X/* contrib/CSS/*
diff --git a/buildbot/NEWS b/buildbot/NEWS
new file mode 100644
index 0000000..e12b3a8
--- /dev/null
+++ b/buildbot/NEWS
@@ -0,0 +1,2591 @@
+User visible changes in Buildbot. -*- outline -*-
+
+* Release 0.7.10p1 (2 Mar 2009)
+
+This is a bugfix release for 0.7.10, fixing a few minor bugs:
+
+** Bugs Fixed
+
+*** add a missing method to the IRC status plugin
+
+*** add RPM-related buildsteps to setup.py
+
+* Release 0.7.10 (25 Feb 2009)
+
+This release is mainly a collection of user-submitted patches since
+the last release.
+
+** New Features
+
+*** Environment variables in a builder (#100)
+
+It is useful to be able to pass environment variables to all steps in a
+builder. This is now possible by adding { .. 'env': { 'var' : 'value' }, ... }
+to the builder specification.
+
+*** IRC status plugin improvements (#330, #357, #378, #280, #381, #411, #368)
+
+*** usePTY specified in master.cfg, defaults to False (#158, #255)
+
+Using a pty has some benefits in terms of supporting "Stop Build", but causes
+numerous problems with simpler jobs which can be killed by a SIGHUP when their
+standard input is closed. With this change, PTYs are not used by default,
+although you can enable them either on slaves (with the --usepty option to
+create-slave) or on the master.
+
+*** More information about buildslaves via the web plugin (#110)
+
+A new page, rooted at /buildslave/$SLAVENAME, gives extensive information about
+the buildslave.
+
+*** More flexible merging of requests (#415)
+
+The optional c['mergeRequests'] configuration parameter takes a function
+which can decide whether two requests are mergeable.
+
+*** Steps can be made to run even if the build has halted (#414)
+
+Adding alwaysRun=True to a step will cause it to run even if some other step
+has failed and has haltOnFailure=True.
+
+*** Compress buildstep logfiles (#26)
+
+Logs for each buildstep, which can take a lot of space on a busy buildmaster,
+are automatically compressed after the step has finished.
+
+*** Support for "latent" buildslaves
+
+The buildslaves that are started on-demand are called "latent" buildslaves.
+Buildbot ships with an abstract base class for building latent buildslaves, and
+a concrete implementation for AWS EC2.
+
+*** Customized MailNotifier messages (#175)
+
+MailNotifier now takes an optional function to build the notification message,
+allowing ultimate site-level control over the format of buildbot's notification
+emails.
+
+*** Nightly scheduler support for building only if changes have occurred
+
+With the addition of onlyIfChanged=True, the Nightly scheduler will not schedule
+a new build if no changes have been made since its last scheduled build.
+
+*** Add ATOM/RSS feeds to WebStatus (#372)
+
+Two new pages, /atom and /rss, provide feeds of build events to any feed
+reader. These paths take the same "category" and "branch" arguments as the
+waterfall and grid.
+
+*** Add categories to Schedulers and Changes (#182)
+
+This allows a moderate amount of support for multiple projects built in a
+single buildmaster.
+
+*** Gracefully shut down a buildslave after its build is complete
+
+The /buildslaves/$SLAVENAME pages have a "Gracefully Shutdown" button which
+will cause the corresponding slave to shut itself down when it finishes its
+current build. This is a good way to do work on a slave without causing a
+spurious build failure.
+
+*** SVN source steps can send usernames and passwords (#41)
+
+Adding username="foo" and/or password="bar" to an SVN step will cause
+--username and --password arguments to be passed to 'svn' on the slave side.
+Passwords are suitably obfuscated in logfiles.
+
+** New Steps
+
+*** DirectoryUpload (#393)
+
+This step uploads an entire directory to the master, and can be useful when a
+build creates several products (e.g., a client and server package).
+
+*** MasterShellCommand
+
+This step runs a shell command on the server, and can be useful for
+post-processing build products, or performing other maintenance tasks on the
+master.
+
+*** PyLint (#259)
+
+A PyLint step is available to complement the existing PyFlakes step.
+
+** Bugs Fixed
+
+*** Process output from new versions of Test::Harness (#346)
+
+*** Fixes to the try client and scheduler
+
+*** Remove redundant loop in MailNotifier (#315)
+
+*** Display correct $PWD in logfiles (#179)
+
+*** Do not assume a particular python version on Windows (#401)
+
+*** Sort files in changes (#402)
+
+*** Sort buildslaves lexically (#416)
+
+*** Send properties to all builds initiated by AnyBranchScheduler
+
+*** Dependent Schedulers are more robust to reconfiguration (#35)
+
+*** Fix properties handling in triggered buidls (#392)
+
+*** Use "call" on Windows to avoid errors (#417)
+
+*** Support setDefaultWorkdir in FileUpload and FileDownload (#209)
+
+*** Support WithProperties in FileUpload and FileDownload (#210)
+
+*** Fix a bug where changes could be lost on a master crash (#202)
+
+*** Remove color settings from non-presentation code (#251)
+
+*** Fix builders which stopped working after a PING (#349, #85)
+
+*** Isolate Python exceptions in status plugins (#388)
+
+*** Notify about slaves missing at master startup (#302)
+
+*** Fix tracebacks in web display after a reconfig (#176)
+
+** Version-Control Changes
+
+*** Many Mercurial fixes
+
+ - Inrepo branch support finalized (source step + changegroup hook + test case)
+ (#65 #185 #187)
+
+ - Reduced amount of full clones by separating clone with update into
+ clone/pull/update steps (#186, #227) (see #412 for future work here)
+
+ - Fixed mercurial changegroup hook to work with Mercurial 1.1 API (#181, #380)
+
+*** Many git fixes
+
+*** Add got_revision to Perforce support (#127)
+
+*** Use "git foo" everywhere instead of deprecated "git-foo"
+
+** Minor Changes
+
+*** factory.addSteps (#317)
+
+If you have a common list of steps that are included in multiple factories, you
+can use f.addSteps(steplist) to add them all at once.
+
+*** Twisted logfile rotation and cleanup (#108)
+
+By default, Buildbot now rotates and cleans up the (potentially voluminous)
+twistd.log files.
+
+*** Prioritize build requests based on the time they wre submitted (#334)
+
+Balancing of load is a bit more fair, although not true load balancing.
+
+* Release 0.7.9 (15 Sep 2008)
+
+** New Features
+
+*** Configurable public_html directory (#162)
+
+The public_html/ directory, which provides static content for the WebStatus()
+HTTP server, is now configurable. The default location is still the
+public_html/ subdirectory of the buildmaster's base directory, but you can
+change this by passing a suitable argument when creating the WebStatus()
+instance in your master.cfg file:
+
+ c['status'].append( WebStatus(8080, public_html="/var/www/buildbot") )
+
+*** Lock access modes (#313)
+
+Albert Hofkamp added code to provide two distinct access modes to Locks:
+"counting" and "exclusive". Locks can accept a configurable number of
+"counting"-mode users, or a single "exclusive"-mode. For example, a Lock is
+defined with maxCount=3, and then a 'compile' BuildStep uses this lock in
+counting mode, while a 'cleanup' BuildStep uses this lock in exclusive mode.
+Then, there can be one, two, or three simultaneous Builds in the compile step
+(as long as there are no builds in the cleanup step). Only one build can be
+in the cleanup step at a time, and if there is such a build in the cleanup
+step, then the compile steps in other builds will wait for it to finish.
+Please see the "Interlocks" section of the user's manual for more details.
+
+** Bugs Fixed
+
+*** Buildslave missing_timeout= fired too quickly (#211)
+
+By providing a missing_timeout= argument when creating the BuildSlave
+instance, you can ask the buildmaster to send email if a buildslave is
+disconnected for too long. A bug in the previous version caused this
+notification to be sent too soon, rather than waiting until the timeout
+period expired. This should be fixed now.
+
+*** Test command display fixed (#332)
+
+In the previous version, a steps.shell.Test step would display the parsed
+test results (in the step's box on the waterfall display) in lieu of any
+other descriptive text the step might provide. In this release, these two
+pieces of information are combined.
+
+** Minor Changes
+
+The buildmaster's version is logged to its twistd.log file at startup. The
+buildslave does the same, to its own logfile.
+
+Remote commands now record how long each command took. The "elapsedTime="
+message will appear in the step's main logfile.
+
+The "buildbot restart" command no longer fails if the buildbot wasn't already
+running.
+
+The FileUpload and FileDownload steps now create their target directories
+(and any missing intermediate directories) before writing to the destination
+file.
+
+The per-build and per-step web pages now show the start, finish, and elapsed
+time of their build or step.
+
+If a Subversion-based build is started with a mixture of Changes that specify
+particular numeric revisions and "HEAD" Changes (which indicate that a trunk
+checkout is desired), the build will use a trunk checkout. Previously this
+would probably cause an error. It is not clear how this situation might
+arise.
+
+** Compability With Other Tools
+
+The mercurial commit hook (buildbot.changes.hgbuildbot) in the previous
+version doesn't work with hg-1.0 or later (it uses an API function that was
+present in the hg-0.9.5 release, but was removed from hg-1.0). This
+incompability has been fixed: the new version of buildbot should be
+compatible with hg-1.0 and newer (and it probably retains compability with
+hg-0.9.5 and earlier too). (#328)
+
+The Git tool has traditionally provided two ways to run each command, either
+as subcommands of /usr/bin/git (like "git checkout"), or as individual tools
+(like /usr/bin/git-checkout). The latter form is being removed in the
+upcoming 1.6 Git release. Previous versions of Buildbot have used the
+git-checkout form, and will break when Git is upgraded to 1.6 or beyond. The
+new Buildbot release switches to the subcommand form. Note that this is a
+change on the buildslave side.
+
+The Git checkout command will now use the default branch (as set in the
+steps.source.Git() step definition) if the changes that it is building do not
+specify some other branch to build. (#340)
+
+** Deprecation Schedule
+
+No features have been deprecated in this release, and no deprecated features
+have been removed. As a reminder, the following deprecated features are
+scheduled for removal in an upcoming release:
+
+c['sources'] (plural) was replaced by c['change_source'] (singular) in 0.7.6,
+and will be removed by 0.8.0.
+
+c['bots'] was replaced by c['buildslaves'] in 0.7.6, and will be removed by
+0.8.0 . c['bots'] only accepts BuildSlave instances, not name/passwd tuples.
+
+The html.Waterfall status target was replaced by html.WebStatus in 0.7.6, and
+will be removed by 0.8.0.
+
+
+* Release 0.7.8 (24 Jul 2008)
+
+** New features
+
+The IRC bot will respond to three new commands: 'notify' subscribes the
+channel (or the sender, if the command is sent as a private "/msg") to hear
+about build events. 'join' tells the bot to join some new IRC channel.
+'leave' tells it to leave a channel. See the "IRC Bot" section of the User's
+Manual for details. (#171)
+
+Build Steps now have "statistics", in addition to logfiles. These are used to
+count things like how many tests passed or failed. There are methods to sum
+these counters across all steps and display the results in the Build status.
+The Waterfall display now shows the count of failed tests on the top-most box
+in each column, using this mechanism.
+
+The new buildbot.steps.shell.PerlModuleTest step was added, to run Perl unit
+tests. This is a wrapper around the regular ShellCommand that parses the
+output of the standard perl unit test system and counts how many tests
+passed/failed/etc. The results are put into the step's summary text, and a
+count of tests passed/failed/skipped are tracked in the steps's statistics.
+The factory.CPAN build factory has been updated to use this, so configuring a
+Buildbot to test a perl module available from CPAN should be as easy as:
+
+ s = source.CVS(cvsroot, cvsmodule)
+ f = factory.CPAN(s)
+
+Build Properties have been generalized: they remain associated with a single
+Build, but the properties can be set from a variety of sources. In previous
+releases, the Build itself would set properties like 'buildername', 'branch',
+and 'revision' (the latter two indicating which version of the source code it
+was trying to get), and the source-checkout BuildSteps would set a property
+named 'got_revision' (to indicate what version of the soruce code it actually
+got). In this release, the 'scheduler' property is set to indicate which
+Scheduler caused the build to be started. In addition, the config file can
+specify properties to be set on all Builds, or on all Builds for a specific
+Builder. All these properties are available for interpolation into
+ShellCommands and environment variables by using the WithProperties() marker.
+
+It may be easier to implement simple build parameterization (e.g. to upload
+generated binaries to a specific directory, or to only perform long-running
+tests on a nightly build instead of upon every checkin) by using these Build
+Properties than to write custom BuildSteps.
+
+** Other improvements
+
+The /buildslaves web page shows which slaves are currently running builds.
+Offline slaves are displayed in bold.
+
+Buildbot's setup.py now provides metadata to setuptools (if installed): an
+entry_points script was added, and a dependency upon twisted-2.4.x or newer
+was declared. This makes it more likely that 'easy_install buildbot' will
+work.
+
+The MailNotifier class acquired a mode="passing" flag: in this mode, the
+buildbot will only send mail about passing builds (versus only on failing
+builds, or only on builds which failed when the previous build had passed).
+
+** Bugs fixed
+
+Don't display force/stop build buttons when build control is disabled (#246)
+
+When a build is waiting on a lock, don't claim that it has started (#107)
+
+Make SVN mode=copy tolerate symlinks on freebsd, "cp -rp" -> "cp -RPp" (#86)
+
+The svnpoller changesource now ignores branch deletion (#261)
+
+The Git unit tests should run even if the user has not told Git about their
+username/email.
+
+The WebStatus /xmlrpc server's getStatus() method was renamed to the
+more-accurate getLastBuildResults().
+
+The TinderboxMailNotifier status output acquired an useChangeTime= argument.
+
+The bonsaipoller changesource got some fixes.
+
+** Deprecation Schedule
+
+No features have been deprecated in this release, and no deprecated features
+have been removed. As a reminder, the following deprecated features are
+scheduled for removal in an upcoming release:
+
+c['sources'] (plural) was replaced by c['change_source'] (singular) in 0.7.6,
+and will be removed by 0.8.0.
+
+c['bots'] was replaced by c['buildslaves'] in 0.7.6, and will be removed by
+0.8.0 . c['bots'] only accepts BuildSlave instances, not name/passwd tuples.
+
+The html.Waterfall status target was replaced by html.WebStatus in 0.7.6, and
+will be removed by 0.8.0.
+
+
+
+* Release 0.7.7 (29 Mar 2008)
+
+** Things You Need To Know
+
+*** builder names must not start with an underscore (`_').
+
+These are now reserved for internal buildbot purposes, such as the magic
+"_all" pseudo-builder that the web pages use to allow force-build buttons
+that start builds on all Builders at once.
+
+** New Features
+
+*** "buildbot checkconfig"
+
+The "buildbot checkconfig" command will look at your master.cfg file and tell
+you if there are any problems with it. This can be used to test potential
+changes to your config file before submitting them to the running
+buildmaster. This is particularly useful to run just before doing "buildbot
+restart", since the restart will fail if the config file has an error. By
+running "buildbot checkconfig master.cfg && buildbot restart", you'll only
+perform the restart if the config file was ok. Many thanks to Ben Hearsum for
+the patch.
+
+*** Waterfall "?category=FOO" query-arguments
+
+The Waterfall page now accepts one or more "category=" query arguments in the
+URL, to filter the display by categories. These behave a lot like the
+"builder=" query argument. Thanks to Jermo Davann for the patch.
+
+** Bugs Fixed
+
+Many bugs were fixed, and many minor features were added. Many thanks to
+Dustin Mitchell who fixed and coordinated many of these. Here is a terse
+list, for more details, please see the Trac page for the 0.7.7 release, at
+http://buildbot.net/trac/query?status=closed&milestone=0.7.7 :
+
+Many of the URLs generated by the buildbot were wrong.
+Display of last-heard-from timestamps on the buildslaves web page were wrong.
+Asking an IRC bot about a build waiting on a Lock should no longer crash.
+Same for the web viewer.
+Stop treating the encouraged info/ directory as leftover.
+Add more force/stop build buttons.
+Timestamps displayed on the waterfall now handle daylight savings properly.
+p4poller no longer quits after a single failure.
+Improved Git support, including 'try', branch, and revisions.
+Buildslaves now use 'git', not 'cogito'.
+Make older hg client/servers handle specific-revision builds properly.
+Fix twisted.scripts._twistw problem on twisted-2.5.0 and windows.
+Fix workdir= and env= on ShellCommands
+Fix logfile-watching in 'buildbot start' on OS-X.
+Fix ShellCommand crashes when the program emits >640kB of output per chunk.
+New WarningCountingShellCommand step.
+Fix TreeSize step.
+Fix transfer.FileUpload/FileDownload crashes for large files.
+Make 'buildbor reconfig' on windows tell you that it doesn't work.
+Add a To: header to the mail sent by the slave-missing timeout.
+Disable usePTY= for most unit tests, it makes some debian systems flunk tests.
+Add 'absolute source stamps'
+Add 'triggerable schedulers', and a buildstep to trigger them.
+Remove buildbot.changes.freshcvsmail
+Add new XMLRPC methods: getAllBuilders, getStatus, getLastBuilds.
+Accept WithProperties in more places: env=, workdir=, others.
+Use --no-auth-cache with SVN commands to avoid clobbering shared svn state.
+Add hours/minutes/seconds in the waterfall's ETA display.
+Trial: count Doctest lines too.
+ShellCommand: record more info in the headers: stdin closing, PTY usage.
+Make it possible to stop builds across reconfig boundaries.
+SVN revision numbers are now passed as strings, which was breaking MailNotifier
+
+** Deprecation Schedule
+
+The changes.freshcvsmail change source was replaced by
+changes.mail.FCMaildirSource in 0.7.6, and has been removed in 0.7.7 .
+
+c['sources'] (plural) was replaced by c['change_source'] (singular) in 0.7.6,
+and will be removed by 0.8.0.
+
+c['bots'] was replaced by c['buildslaves'] in 0.7.6, and will be removed by
+0.8.0 . c['bots'] only accepts BuildSlave instances, not name/passwd tuples.
+
+The html.Waterfall status target was replaced by html.WebStatus in 0.7.6, and
+will be removed by 0.8.0.
+
+
+* Release 0.7.6 (30 Sep 2007)
+
+** Things You Need To Know
+
+*** 'buildbot upgrade-master'
+
+Each time you install a new version of Buildbot, you should run the new
+'buildbot upgrade-master' command on each of your pre-existing buildmasters.
+This will add files and fix (or at least detect) incompatibilities between
+your old config and the new code.
+
+*** new WebStatus page
+
+The Waterfall has been replaced by the more general WebStatus display,
+described below. WebStatus serves static files from a new public_html/
+directory that lives in the buildmaster's basedir. Files like index.html,
+buildbot.css, and robots.txt are served directly from that directory, so any
+modifications you wish to make should be made to those files. In particular,
+any custom CSS you've written should be copied into public_html/buildbot.css.
+The 'upgrade-master' command will populate this directory for you.
+
+The old Waterfall page is deprecated, but it should continue to work for
+another few releases. It is now a subclass of WebStatus which just replaces
+the default root URL with another copy of the /waterfall resource.
+
+*** Compatibility: Python-2.3 or newer, Twisted-2.0 or newer
+
+No compatiblity losses here, buildbot-0.7.6 is compatible with the same
+versions of python and twisted that 0.7.5 was.
+
+Buildbot is tested on a regular basis (http://buildbot.buildbot.net) against
+nearly a full matrix of Python-(2.3,2.4,2.5) * Twisted-(2.0,2.1,2.2,2.4,2.5).
+
+*** New Buildbot Home Page
+
+Buildbot has moved to a new Trac instance at http://buildbot.net/ , and all
+new bugs and tickets should be filed there. The old sourceforge bugs at
+http://buildbot.sf.net/ will slowly be migrated over. Mailing lists are still
+managed at sourceforge, and downloads are still available there.
+
+*** Changed/Deprecated master.cfg Keys and Classes
+
+c['sources'] (plural) has been replaced by c['change_source'] (singular).
+
+c['bots'] has been replaced by c['buildslaves'], and it expects a list of
+BuildSlave instances instead of tuples. See below for more details.
+
+The 'freshcvsmail' change source has been deprecated, and will be removed in
+the next release.
+
+The html.Waterfall status target has been deprecated, and replaced by
+html.WebStatus .
+
+** New Features
+
+*** WebStatus
+
+The new WebStatus display is a superset of the old Waterfall. It contains a
+waterfall as a sub-page, but it also contains pages with more compact
+representations of recent build status. The "one_line_per_build" page
+contains just that, and "one_box_per_builder" shows just the information from
+the top of the waterfall page (last-finished-build and current-activity).
+
+The initial page (when you hit the root of the web site) is served from
+index.html, and provides links to the Waterfall as well as the other pages.
+
+Most of these pages can be filtered by adding query arguments to the URL.
+Adding "?builder=XYZ" will cause the page to only show results for the given
+builder. Adding "?builder=XYZ&builder=ABC" will show results for either
+builder. "?branch=trunk" will limit the results to builds that involved code
+from the trunk.
+
+The /waterfall page has arguments to hide those annoying "buildslave
+connected" messages, to start and and at arbitrary times, and to auto-refresh
+at a chosen interval (with a hardcoded minimum of 15 seconds). It also has a
+"help" page with forms that will help you add all of these nifty filtering
+arguments.
+
+The recommended practice is to modify the index.html file to include links to
+the filtered pages that you find most useful.
+
+Note that WebStatus defaults to allowForce=False, meaning that the display
+will not offer or accept "Force Build" or "Stop Build" controls. (The old
+Waterfall defaults to allowForce=True).
+
+The new WebStatus pages try very hard to use only relative links, making life
+better when the Buildbot sits behind an HTTP reverse proxy.
+
+In addition, there is a rudimentary XMLRPC server run by the WebStatus
+object. It only has two methods so far, but it will acquire more in the
+future. The first customer of this is a project to add a buildbot plugin to
+Trac.
+
+*** BuildFactory.addStep(Step(args))
+
+BuildFactories can be set up either with a complete list of steps, or by
+calling the .addStep() method repeatedly. The preferred way to provide a step
+is by instantiating it, rather than giving a class/kwargs pair. This gives
+the BuildStep class a chance to examine the arguments (and complain about
+anything it doesn't like) while the config file is being read and problems
+are being logged. For example, the old-style:
+
+ from buildbot.process.factory import BuildFactory, s
+ steps = [s(CVS, cvsroot="blah", mode="copy"),
+ s(Compile, command=["make", "all"]),
+ s(Test, command=["make", "test"]),
+ ]
+ f = BuildFactory(steps)
+
+is now:
+
+ f = BuildFactory()
+ f.addStep( CVS(cvsroot="blah", mode="copy") )
+ f.addStep( Compile(command=["make", "all"]) )
+ f.addStep( Test(command=["make", "test"]) )
+
+Authors of BuildStep subclasses which override __init__ to add new arguments
+must register them with self.addFactoryArguments(**newargs) to make sure that
+those classes will work with this new style, otherwise the new arguments will
+be lost.
+
+Using class/kwargs pairs is deprecated, and will be removed in a future
+release.
+
+
+*** BuildSlave instances, max_builds=, notify_on_missing=
+
+Buildslave specification has changed a lot in this release. The old config:
+
+ c['bots'] = [ ("bot1name", "bot1passwd"),
+ ("bot2name", "bot2passwd") ]
+
+is now:
+
+ from buildbot.buildslave import BuildSlave
+ c['slaves'] = [ BuildSlave("bot1name", "bot1passwd"),
+ BuildSlave("bot2name", "bot2passwd") ]
+
+This new form gives us the ability to add new controls. The first is
+"max_builds=", which imposes a concurrency limit that is like the usual
+SlaveLock, but gives the buildmaster the opportunity to find a different
+slave to run the build. (the buildslave is chosen before the SlaveLock is
+claimed, so pure SlaveLocks don't let you take full advantage of build
+farms).
+
+The other addition is "notify_on_missing=", which accepts an email address
+(or list of addresses), and sends a message when the buildslave has been
+disconnected for more than an hour (configurable with missing_timeout=). This
+may be useful when you expect that the buildslave hosts should be available
+most of the time, and want to investigate the reasons that it went offline.
+
+
+** Other Improvements
+
+The IRC bot has been refactored to make it easier to add instant-messaging
+status delivery in the future. The IM plugins are not yet written, though.
+
+When multiple buildslaves are available for a given build, one of them will
+be picked at random. In previous releases, the first one on the list was
+always picked. This helps to add a certain measure of load-balancing. More
+improvements will be made in the future.
+
+When the buildslave does a VC checkout step that requires clobbering the
+build directory (i.e. in all modes except for 'update'), the buildslave will
+first set the permissions on all build files to allow their deletion, before
+it attempts to delete them. This should fix some problems in which a build
+process left non-user-writable files lying around (frequently a result of
+enthusiastic unit tests).
+
+The BuildStep's workdir= argument can now accept a WithProperties()
+specification, allowing greater control over the workdir.
+
+Support for the 'Bazaar' version control system (/usr/bin/bzr) has been
+added, using the buildbot.steps.source.Bzr class. This is a replacement for
+the old 'Arch' (/usr/bin/tla and /usr/bin/baz) systems, which are still
+supported by Buildbot with the source.Arch and source.Bazaar classes,
+respectively. Unfortunately the old baz system claimed the 'Bazaar' classname
+early, so the new system must use source.Bzr instead of the desired
+source.Bazaar . A future release might change this.
+
+A rudimentary Gnome Panel applet is provided in contrib/bb_applet.py, which
+provides 'buildbot statusgui' -like colored status boxes inside the panel.
+Installing it is a bit tricky, though.
+
+The 'buildbot try' command now accepts a '--diff=foo.patch' argument, to let
+you provide a pre-computed patch. This makes it easier to test out patches
+that you've looked over for safety, without first applying them to your local
+source tree.
+
+A new Mercurial change source was added, hg_buildbot.py, which runs as an
+in-process post-commit hook. This gives us access to much more information
+about the change, as well as being much faster.
+
+The email-based changesource have been refactored, to make it easier to write
+new mail parsers. A parser for the SVN "commit-email.pl" script has been
+added.
+
+** Bugs Fixed
+
+Far too many to count. Please see
+http://buildbot.net/trac/query?status=closed&milestone=0.7.6 for a partial
+list of tickets closed for this release, and the ChangeLog for a complete
+list of all changes since 0.7.5 .
+
+
+* Release 0.7.5 (10 Dec 2006)
+
+** Things You Need To Know
+
+*** The Great BuildStep Renaming
+
+All BuildSteps have moved! They used to be classes in buildbot.process.step,
+but now they all have separate modules in buildbot.steps.* . They have been
+split out into separate categories: for example, the source checkout steps
+are now buildbot.steps.source.CVS, buildbot.steps.source.Darcs, etc. The most
+commonly used one is probably buildbot.steps.shell.ShellCommand . The
+python-specific steps are in buildbot.steps.python, and the Twisted-specific
+steps are in buildbot.steps.python_twisted .
+
+You will need to update your master.cfg files to use the new names. The old
+names are deprecated and will be removed altogether in the next release.
+
+*** Compatibility
+
+Buildbot now requires python-2.3 or later. Buildbot now requires
+Twisted-2.0.0 or later. Support for earlier versions of both has finally been
+removed. If you discover it works with unsupported versions, please return
+your Buildbot to the factory for repairs :-).
+
+Buildbot has *not* yet been tested against the recent python-2.5 release. It
+has been tested against the latest SVN version of Twisted, but only in
+conjunction with python-2.4 .
+
+** new features
+
+*** reconfiguring a Builder no longer causes a disconnect/reconnect cycle
+
+This means that sending SIGHUP to the master or running 'buildbot reconfig
+MASTERDIR' command no longer interrupts any current builds, nor does it lose
+pending builds like it did before. This involved a fairly substantial
+refactoring of the various internal BotPerspective/BotMaster/Builder classes.
+Note that reconfiguring Schedulers still loses any Changes that were waiting
+for the tree to become stable: hopefully this will be fixed in the next
+release.
+
+*** 'buildbot start/restart/reconfig' now show logs until startup is complete
+
+These commands now have additional code to follow twistd.log and display all
+the lines that are emitted from the beginning of the start/reconfig action
+until it has completed. This gives you a chance to see any problems detected
+in the config file without needing to manually look in twistd.log or use
+another shell to 'tail -f' it. This also makes it clear which config file is
+being used. This functionality is not available under windows.
+
+In addition, if any problems are detected during 'start' or 'restart' (but
+not reconfig), the buildbot command will terminate with a non-zero exit
+status, making it easier to use in scripts. Closes SF#1517975.
+
+*** Locks now take maxCount=N to allow multiple simultaneous owners
+
+This allows Locks to be non-exclusive but still limit maximum concurrency.
+Thanks to James Knight for the patch. Closes SF#1434997.
+
+*** filetransfer steps
+
+buildbot.steps.transfer.FileUpload is a buildstep that will move files from
+the slave to the master. Likewise, FileDownload will move files from the
+master down to the buildslave. Many thanks to Albert Hofkamp for contributing
+these classes. Closes SF#1504631.
+
+*** pyflakes step
+
+buildbot.steps.python.PyFlakes will run the simple 'pyflakes' static analysis
+tool and parse the results to tell you about undefined names, unused imports,
+etc. You'll need to tell it how to run pyflakes, usually with something like
+command=["pyflakes", "src/packagedir"] or the like. The default command is
+"make pyflakes", which assumes that you have a suitable target in your
+top-level Makefile.
+
+*** Monotone support
+
+Nathaniel Smith has contributed initial support for the Monotone version
+control system. The code still needs docs and tests, but on the other hand it
+has been in use by the Monotone buildbot for a long time now, so it is
+probably fairly stable.
+
+*** Tinderbox support
+
+Ben Hearsum and the Mozilla crew have contributed some classes to allow
+Buildbot to work with Tinderbox clients. One piece is
+buildbot.changes.bonsaipoller.BonsaiPoller, which is a ChangeSource that
+polls a Bonsai server (which is a kind of web-vased viewcvs CGI script) to
+discover source code changes. The other piece is
+buildbot.status.tinderbox.TinderboxMailNotifier, which is a status plugin
+that sends email in the same format as Tinderbox does, which allows a number
+of Tinderbox tools to be driven by Buildbot instead.
+
+*** SVN Poller
+
+Niklaus Giger contributed a ChangeSource (buildbot.changes.svnpoller) which
+polls a remote SVN repository on a periodic basis. This is useful when, for
+whatever reason, you cannot add a post-commit hook script to the repository.
+This obsoletes the external contrib/svn_watcher.py script.
+
+** notes for plugin developers
+
+*** IStatusLog.readlines()
+
+This new method makes it easier for a status plugin (or a
+BuildStep.createSummary method) to walk through a StatusLog one line at a
+time. For example, if you wanted to create an extra logfile that just
+contained all the GCC warnings from the main log, you could use the
+following:
+
+ def createSummary(self, log):
+ warnings = []
+ for line in log.readlines():
+ if "warning:" in line:
+ warnings.append()
+ self.addCompleteLog('warnings', "".join(warnings))
+
+The "BuildStep LogFiles" section of the user's manual contains more
+information. This method is not particularly memory-efficient yet (it reads
+the whole logfile into memory first, then splits it into lines); this will be
+improved in a future release.
+
+** bug fixes
+
+*** Update source.SVN to work with the new SVN-1.4.0
+
+The latest subversion changed the behavior in an unusual situation which
+caused the unit tests to fail. This was unlikely to cause a problem in actual
+usage, but the tests have been updated to pass with the new version.
+
+*** update svn_buildbot.py to avoid mangling filenames
+
+Older versions of this script were stripping the wrong number of columns from
+the output of 'svnlook changed', and would sometimes mangle filenames. This
+has been fixed. Closes SF#1545146.
+
+*** logfiles= caused subsequent build failures under Windows
+
+Earlier versions of buildbot didn't explicitly close any logfiles= file
+handles when the build finished. On windows (where you cannot delete a file
+that someone else is reading), this could cause the next build to fail as the
+source checkout step was unable to delete the old working directory. This has
+been fixed. Closes SF#1568415.
+
+*** logfiles= didn't work on OS-X
+
+Macintosh OS-X has a different behavior when reading files that have reached
+EOF, the result was that logfiles= sometimes didn't work. Thanks to Mark Rowe
+for the patch.
+
+** other changes
+
+The 'buildbot sighup MASTERDIR' command has been replaced with 'buildbot
+reconfig MASTERDIR', since that seems to be a slightly more meaningful name.
+The 'sighup' form will remain as an alias.
+
+
+* Release 0.7.4 (23 Aug 2006)
+
+** Things You Need To Know
+
+The PBChangeSource's prefix= argument has changed, you probably need to add a
+slash now. This is mostly used by sites which use Subversion and
+svn_buildbot.py.
+
+The subcommands that are used to create a buildmaster or a buildslave have
+changed. They used to be called 'buildbot master' and 'buildbot slave'. Now
+they are called 'buildbot create-master' and 'buildbot create-slave'. Zipf's
+Law suggests that these are more appropriate names for these
+infrequently-used commands.
+
+The syntax for the c['manhole'] feature has changed.
+
+** new features
+
+*** full Perforce support
+
+SF#1473939: large patch from Scott Lamb, with docs and unit tests! This
+includes both the step.P4 source-checkout BuildStep, and the changes.p4poller
+ChangeSource you'll want to feed it. P4 is now supported just as well as all
+the other VC systems. Thanks Scott!
+
+*** SSH-based Manhole
+
+The 'manhole' feature allows buildbot developers to get access to a python
+read/eval/print loop (REPL) inside the buildmaster through a network
+connection. Previously, this ran over unencrypted telnet, using a simple
+username/password for access control. The new release defaults to encrypted
+SSH access, using either username/password or an authorized_keys file (just
+like sshd). There also exists an unencrypted telnet form, but its use is
+discouraged. The syntax for setting up a manhole has changed, so master.cfg
+files that use them must be updated. The "Debug options" section in the
+user's manual provides a complete description.
+
+*** Multiple Logfiles
+
+BuildSteps can watch multiple log files in realtime, not just stdout/stderr.
+This works in a similar fashion to 'tail -f': the file is polled once per
+second, and any new data is sent to the buildmaster.
+
+This requires a buildslave running 0.7.4 or later, and a warning message is
+produced if used against an old buildslave (which will otherwise produce no
+data). Use "logfiles={'name': 'filename'}" to take advantage of this feature
+from master.cfg, and see the "ShellCommand" section of the user's manual for
+full documentation.
+
+The 'Trial' buildstep has been updated to use this, to display
+_trial_temp/test.log in realtime. It also knows to fall back to the previous
+"cat" command if the buildslave is too old.
+
+*** BuildStep URLs
+
+BuildSteps can now add arbitrary URLs which will be displayed on the
+Waterfall page in the same place that Logs are presented. This is intended to
+provide a link to generated HTML pages, such as the output of a code coverage
+tool. The step is responsible for somehow uploading the HTML to a web server:
+this feature merely provides an easy way to present the HREF link to the
+user. See the "BuildStep URLs" section of the user's manual for details and
+examples.
+
+*** LogObservers
+
+BuildSteps can now attach LogObservers to various logfiles, allowing them to
+get real-time log output. They can use this to watch for progress-indicating
+events (like counting the number of files compiled, or the number of tests
+which have run), and update both ETA/progress-tracking and step text. This
+allows for more accurate ETA information, and more information passed to the
+user about how much of the process has completed.
+
+The 'Trial' buildstep has been updated to use this for progress tracking, by
+counting how many test cases have run.
+
+** new documentation
+
+What classes are useful in your master.cfg file? A table of them has been
+added to the user's manual, in a section called "Index of Useful Classes".
+
+Want a list of all the keys in master.cfg? Look in the "Index of master.cfg
+keys" section.
+
+A number of pretty diagrams have been added to the "System Architecture"
+portion of the manual, explaining how all the buildbot pieces fit together.
+
+An HTML form of the user's manual is now shipped in the source tarball. This
+makes it a bit bigger: sorry about that. The old PyCon-2003 paper has been
+removed from the distribution, as it is mostly supplanted by the user's
+manual by this point.
+
+** bugfixes
+
+SF#1217699 + SF#1381867: The prefix= argument to PBChangeSource has been
+changed: now it does just a simple string-prefix match and strip. The
+previous behavior was buggy and unhelpful. NOTE: if you were using prefix=
+before, you probably need to add a slash to the end of it.
+
+SF#1398174: ignore SVN property changes better, fixed by Olivier Bonnet
+
+SF#1452801: don't double-escape the build URL, fixed by Olivier Bonnet
+
+SF#1401121: add support for running py2exe on windows, by Mark Hammond
+
+reloading unchanged config files with WithProperties shouldn't change anything.
+
+All svn commands now include --non-interactive so they won't ask for
+passwords. Instead, the command will fail if it cannot be performed without
+user input.
+
+Deprecation warnings with newer versions of Twisted have been hushed.
+
+** compatibility
+
+I haven't actually removed support for Twisted-1.3.0 yet, but I'd like to.
+
+The step_twisted default value for --reporter matches modern Twisteds,
+though, and won't work under 1.3.0.
+
+ShellCommand.flunkOnFailure now defaults to True, so any shell command which
+fails counts as a build failure. Set this to False if you don't want this
+behavior.
+
+** minor features
+
+contrib/darcs_buildbot.py contains a new script suitable for use in a darcs
+commit-hook.
+
+Hovering a cursor over the yellow "Build #123" box in the Waterfall display
+will pop up an HTML tooltip to show the reason for the build. Thanks to Zandr
+Milewski for the suggestion.
+
+contrib/CSS/*.css now contains several contributed stylesheets to make the
+Waterfall display a bit less ugly. Thanks to John O'Duinn for gathering them.
+
+ShellCommand and its derivatives can now accept either a string or a list of
+strings in the description= and descriptionDone= arguments. Thanks to Paul
+Winkler for the catch.
+
+
+* Release 0.7.3 (23 May 2006)
+
+** compatibility
+
+This release is compatible with Twisted-1.3.0, but the next one will not be.
+Please upgrade to at least Twisted-2.0.x soon, as the next buildbot release
+will require it.
+
+** new features
+
+*** Mercurial support
+
+Support for Mercurial version control system (http://selenic.com/mercurial)
+has been added. This adds a buildbot.process.step.Mercurial BuildStep. A
+suitable hook script to deliver changes to the buildmaster is still missing.
+
+*** 'buildbot restart' command
+
+The 'buildbot restart BASEDIR' command will perform a 'buildbot stop' and
+'buildbot start', and will attempt to wait for the buildbot process to shut
+down in between. This is useful when you need to upgrade the code on your
+buildmaster or buildslave and want to take it down for a minimum amount of
+time.
+
+*** build properties
+
+Each build now has a set of named "Build Properties", which can be set by
+steps and interpolated into ShellCommands. The 'revision' and 'got_revision'
+properties are the most interesting ones available at this point, and can be
+used e.g. to get the VC revision number into the filename of a generated
+tarball. See the user's manual section entited "Build Properties" for more
+details.
+
+** minor features
+
+*** IRC now takes password= argument
+
+Useful for letting your bot claim a persistent identity.
+
+*** svn_buildbot.py is easier to modify to understand branches
+*** BuildFactory has a new .addStep method
+*** p4poller has new arguments
+*** new contrib scripts: viewcvspoll, svnpoller, svn_watcher
+
+These poll an external VC repository to watch for changes, as opposed to
+adding a hook script to the repository that pushes changes into the
+buildmaster. This means higher latency but may be easier to configure,
+especially if you do not have authority on the repository host.
+
+*** VC build property 'got_revision'
+
+The 'got_revision' property reports what revision a VC step actually
+acquired, which may be useful to know when building from HEAD.
+
+*** improved CSS in Waterfall
+
+The Waterfall display has a few new class= tags, which may make it easier to
+write custom CSS to make it look prettier.
+
+*** robots_txt= argument in Waterfall
+
+You can now pass a filename to the robots_txt= argument, which will be served
+as the "robots.txt" file. This can be used to discourage search engine
+spiders from crawling through the numerous build-status pages.
+
+** bugfixes
+
+*** tests more likely to pass on non-English systems
+
+The unit test suite now sets $LANG='C' to make subcommands emit error
+messages in english instead of whatever native language is in use on the
+host. This improves the chances that the unit tests will pass on such
+systems. This affects certain VC-related subcommands too.
+
+test_vc was assuming that the system time was expressed with a numeric
+timezone, which is not always the case, especially under windows. This
+probably works better now than it did before. This only affects the CVS
+tests.
+
+'buildbot try' (for CVS) now uses UTC instead of the local timezone. The
+'got_revision' property is also expressed in UTC. Both should help deal with
+buggy versions of CVS that don't parse numeric timezones properly.
+
+
+* Release 0.7.2 (17 Feb 2006)
+
+** new features
+
+*** all TCP port numbers in config file now accept a strports string
+
+Sometimes it is useful to restrict certain TCP ports that the buildmaster
+listens on to use specific network interfaces. In particular, if the
+buildmaster and SVN repository live on the same machine, you may want to
+restrict the PBChangeSource to only listen on the loopback interface,
+insuring that no external entities can inject Changes into the buildbot.
+Likewise, if you are using something like Apache's reverse-proxy feature to
+provide access to the buildmaster's HTML status page, you might want to hide
+the real Waterfall port by having it only bind to the loopback interface.
+
+To accomplish this, use a string like "tcp:12345:interface=127.0.0.1" instead
+of a number like 12345. These strings are called "strports specification
+strings", and are documented in twisted's twisted.application.strports module
+(you can probably type 'pydoc twisted.application.strports' to see this
+documentation). Pretty much everywhere the buildbot takes a port number will
+now accept a strports spec, and any bare numbers are translated into TCP port
+numbers (listening on all network interfaces) for compatibility.
+
+*** buildslave --umask control
+
+Twisted's daemonization utility (/usr/bin/twistd) automatically sets the
+umask to 077, which means that all files generated by both the buildmaster
+and the buildslave will only be readable by the account under which the
+respective daemon is running. This makes it unnecessarily difficult to share
+build products (e.g. by symlinking ~/public_html/current_docs/ to a directory
+within the slave's build directory where each build puts the results of a
+"make docs" step).
+
+The 'buildbot slave <PARAMS>' command now accepts a --umask argument, which
+can be used to override the umask set by twistd. If you create the buildslave
+with '--umask=022', then all build products will be world-readable, making it
+easier for other processes (run under other accounts) to access them.
+
+** bug fixes
+
+The 0.7.1 release had a bug whereby reloading the config file could break all
+configured Schedulers, causing them to raise an exception when new changes
+arrived but not actually schedule a new build. This has been fixed.
+
+Fixed a bug which caused the AnyBranchScheduler to explode when branch==None.
+Thanks to Kevin Turner for the catch. I also think I fixed a bug whereby the
+TryScheduler would explode when it was given a Change (which it is supposed
+to simply ignore).
+
+The Waterfall display now does more quoting of names (including Builder
+names, BuildStep names, etc), so it is more likely that these names can
+contain unusual characters like spaces, quotes, and slashes. There may still
+be some problems with these kinds of names, however.. please report any bugs
+to the mailing list.
+
+
+* Release 0.7.1 (26 Nov 2005)
+
+** new features
+
+*** scheduler.Nightly
+
+Dobes Vandermeer contributed a cron-style 'Nightly' scheduler. Unlike the
+more-primitive Periodic class (which only lets you specify the duration
+between build attempts), Nightly lets you schedule builds for specific times
+of day, week, month, or year. The interface is very much like the crontab(5)
+file. See the buildbot.scheduler.Nightly docstring for complete details.
+
+** minor new features
+
+*** step.Trial can work with Trial from Twisted >2.1.0
+
+The 'Trial' step now accepts the trialMode= argument, which should be a list
+of strings to be added to trial's argv array. This defaults to ["-to"], which
+is appropriate for the Trial that ships in Twisted-2.1.0 and earlier, and
+tells Trial to emit non-colorized verbose output. To use this step with
+trials from later versions of Twisted, this should be changed to
+["--reporter=bwverbose"].
+
+In addition, you can now set other Trial command-line parameters through the
+trialArgs= argument. This is a list of strings, and defaults to an empty list.
+
+*** Added a 'resubmit this build' button to the web page
+
+*** Make the VC-checkout step's description more useful
+
+Added the word "[branch]" to the VC step's description (used in the Step's
+box on the Waterfall page, among others) when we're checking out a
+non-default branch. Also add "rNNN" where appropriate to indicate which
+revision is being checked out. Thanks to Brad Hards and Nathaniel Smith for
+the suggestion.
+
+** bugs fixed
+
+Several patches from Dobes Vandermeer: Escape the URLs in email, in case they
+have spaces and such. Fill otherwise-empty <td> elements, as a workaround for
+buggy browsers that might optimize them away. Also use binary mode when
+opening status pickle files, to make windows work better. The
+AnyBranchScheduler now works even when you don't provide a fileIsImportant=
+argument.
+
+Stringify the base revision before stuffing it into a 'try' jobfile, helping
+SVN and Arch implement 'try' builds better. Thanks to Steven Walter for the
+patch.
+
+Fix the compare_attrs list in PBChangeSource, FreshCVSSource, and Waterfall.
+Before this, certain changes to these objects in the master.cfg file were
+ignored, such that you would have to stop and re-start the buildmaster to
+make them take effect.
+
+The config file is now loaded serially, shutting down old (or replaced)
+Status/ChangeSource plugins before starting new ones. This fixes a bug in
+which changing an aspect of, say, the Waterfall display would cause an
+exception as both old and new instances fight over the same TCP port. This
+should also fix a bug whereby new Periodic Schedulers could fire a build
+before the Builders have finished being added.
+
+There was a bug in the way Locks were handled when the config file was
+reloaded: changing one Builder (but not the others) and reloading master.cfg
+would result in multiple instances of the same Lock object, so the Locks
+would fail to prevent simultaneous execution of Builds or Steps. This has
+been fixed.
+
+** other changes
+
+For a long time, certain StatusReceiver methods (like buildStarted and
+stepStarted) have been able to return another StatusReceiver instance
+(usually 'self') to indicate that they wish to subscribe to events within the
+new object. For example, if the buildStarted() method returns 'self', the
+status receiver will also receive events for the new build, like
+stepStarted() and buildETAUpdate(). Returning a 'self' from buildStarted() is
+equivalent to calling build.subscribe(self).
+
+Starting with buildbot-0.7.1, this auto-subscribe convenience will also
+register to automatically unsubscribe the target when the build or step has
+finished, just as if build.unsubscribe(self) had been called. Also, the
+unsubscribe() method has been changed to not explode if the same receiver is
+unsubscribed multiple times. (note that it will still explode is the same
+receiver is *subscribed* multiple times, so please continue to refrain from
+doing that).
+
+
+* Release 0.7.0 (24 Oct 2005)
+
+** new features
+
+*** new c['schedulers'] config-file element (REQUIRED)
+
+The code which decides exactly *when* a build is performed has been massively
+refactored, enabling much more flexible build scheduling. YOU MUST UPDATE
+your master.cfg files to match: in general this will merely require you to
+add an appropriate c['schedulers'] entry. Any old ".treeStableTime" settings
+on the BuildFactory instances will now be ignored. The user's manual has
+complete details with examples of how the new Scheduler classes work.
+
+*** c['interlocks'] removed, Locks and Dependencies now separate items
+
+The c['interlocks'] config element has been removed, and its functionality
+replaced with two separate objects. Locks are used to tell the buildmaster
+that certain Steps or Builds should not run at the same time as other Steps
+or Builds (useful for test suites that require exclusive access to some
+external resource: of course the real fix is to fix the tests, because
+otherwise your developers will be suffering from the same limitations). The
+Lock object is created in the config file and then referenced by a Step
+specification tuple or by the 'locks' key of the Builder specification
+dictionary. Locks come in two flavors: MasterLocks are buildmaster-wide,
+while SlaveLocks are specific to a single buildslave.
+
+When you want to have one Build run or not run depending upon whether some
+other set of Builds have passed or failed, you use a special kind of
+Scheduler defined in the scheduler.Dependent class. This scheduler watches an
+upstream Scheduler for builds of a given source version to complete, and only
+fires off its own Builders when all of the upstream's Builders have built
+that version successfully.
+
+Both features are fully documented in the user's manual.
+
+*** 'buildbot try'
+
+The 'try' feature has finally been added. There is some configuration
+involved, both in the buildmaster config and on the developer's side, but
+once in place this allows the developer to type 'buildbot try' in their
+locally-modified tree and to be given a report of what would happen if their
+changes were to be committed. This works by computing a (base revision,
+patch) tuple that describes the developer's tree, sending that to the
+buildmaster, then running a build with that source on a given set of
+Builders. The 'buildbot try' tool then emits status messages until the builds
+have finished.
+
+'try' exists to allow developers to run cross-platform tests on their code
+before committing it, reducing the chances they will inconvenience other
+developers by breaking the build. The UI is still clunky, but expect it to
+change and improve over the next few releases.
+
+Instructions for developers who want to use 'try' (and the configuration
+changes necessary to enable its use) are in the user's manual.
+
+*** Build-On-Branch
+
+When suitably configured, the buildbot can be used to build trees from a
+variety of related branches. You can set up Schedulers to build a tree using
+whichever branch was last changed, or users can request builds of specific
+branches through IRC, the web page, or (eventually) the CLI 'buildbot force'
+subcommand.
+
+The IRC 'force' command now takes --branch and --revision arguments (not that
+they always make sense). Likewise the HTML 'force build' button now has an
+input field for branch and revision. Your build's source-checkout step must
+be suitably configured to support this: for SVN it involves giving both a
+base URL and a default branch. Other VC systems are configured differently.
+The ChangeSource must also provide branch information: the 'buildbot
+sendchange' command now takes a --branch argument to help hook script writers
+accomplish this.
+
+*** Multiple slaves per Builder
+
+You can now attach multiple buildslaves to each Builder. This can provide
+redundancy or primitive load-balancing among many machines equally capable of
+running the build. To use this, define a key in the Builder specification
+dictionary named 'slavenames' with a list of buildslave names (instead of the
+usual 'slavename' that contains just a single slavename).
+
+*** minor new features
+
+The IRC and email status-reporting facilities now provide more specific URLs
+for particular builds, in addition to the generic buildmaster home page. The
+HTML per-build page now has more information.
+
+The Twisted-specific test classes have been modified to match the argument
+syntax preferred by Trial as of Twisted-2.1.0 and newer. The generic trial
+steps are still suitable for the Trial that comes with older versions of
+Twisted, but may produce deprecation warnings or errors when used with the
+latest Trial.
+
+** bugs fixed
+
+DNotify, used by the maildir-watching ChangeSources, had problems on some
+64-bit systems relating to signed-vs-unsigned constants and the DN_MULTISHOT
+flag. A workaround was provided by Brad Hards.
+
+The web status page should now be valid XHTML, thanks to a patch by Brad
+Hards. The charset parameter is specified to be UTF-8, so VC comments,
+builder names, etc, should probably all be in UTF-8 to be displayed properly.
+
+** creeping version dependencies
+
+The IRC 'force build' command now requires python2.3 (for the shlex.split
+function).
+
+
+* Release 0.6.6 (23 May 2005)
+
+** bugs fixed
+
+The 'sendchange', 'stop', and 'sighup' subcommands were broken, simple bugs
+that were not caught by the test suite. Sorry.
+
+The 'buildbot master' command now uses "raw" strings to create .tac files
+that will still function under windows (since we must put directory names
+that contain backslashes into that file).
+
+The keep-on-disk behavior added in 0.6.5 included the ability to upgrade old
+in-pickle LogFile instances. This upgrade function was not added to the
+HTMLLogFile class, so an exception would be raised when attempting to load or
+display any build with one of these logs (which are normally used only for
+showing build exceptions). This has been fixed.
+
+Several unnecessary imports were removed, so the Buildbot should function
+normally with just Twisted-2.0.0's "Core" module installed. (of course you
+will need TwistedWeb, TwistedWords, and/or TwistedMail if you use status
+targets that require them). The test suite should skip all tests that cannot
+be run because of missing Twisted modules.
+
+The master/slave's basedir is now prepended to sys.path before starting the
+daemon. This used to happen implicitly (as a result of twistd's setup
+preamble), but 0.6.5 internalized the invocation of twistd and did not copy
+this behavior. This change restores the ability to access "private.py"-style
+modules in the basedir from the master.cfg file with a simple "import
+private" statement. Thanks to Thomas Vander Stichele for the catch.
+
+
+* Release 0.6.5 (18 May 2005)
+
+** deprecated config keys removed
+
+The 'webPortnum', 'webPathname', 'irc', and 'manholePort' config-file keys,
+which were deprecated in the previous release, have now been removed. In
+addition, Builders must now always be configured with dictionaries: the
+support for configuring them with tuples has been removed.
+
+** master/slave creation and startup changed
+
+The buildbot no longer uses .tap files to store serialized representations of
+the buildmaster/buildslave applications. Instead, this release now uses .tac
+files, which are human-readable scripts that create new instances (rather
+than .tap files, which were pickles of pre-created instances). 'mktap
+buildbot' is gone.
+
+You will need to update your buildbot directories to handle this. The
+procedure is the same as creating a new buildmaster or buildslave: use
+'buildbot master BASEDIR' or 'buildbot slave BASEDIR ARGS..'. This will
+create a 'buildbot.tac' file in the target directory. The 'buildbot start
+BASEDIR' will use twistd to start the application.
+
+The 'buildbot start' command now looks for a Makefile.buildbot, and if it
+finds one (and /usr/bin/make exists), it will use it to start the application
+instead of calling twistd directly. This allows you to customize startup,
+perhaps by adding environment variables. The setup commands create a sample
+file in Makefile.sample, but you must copy this to Makefile.buildbot to
+actually use it. The previous release looked for a bare 'Makefile', and also
+installed a 'Makefile', so you were always using the customized approach,
+even if you didn't ask for it. That old Makefile launched the .tap file, so
+changing names was also necessary to make sure that the new 'buildbot start'
+doesn't try to run the old .tap file.
+
+'buildbot stop' now uses os.kill instead of spawning an external process,
+making it more likely to work under windows. It waits up to 5 seconds for the
+daemon to go away, so you can now do 'buildbot stop BASEDIR; buildbot start
+BASEDIR' with less risk of launching the new daemon before the old one has
+fully shut down. Likewise, 'buildbot start' imports twistd's internals
+directly instead of spawning an external copy, so it should work better under
+windows.
+
+** new documentation
+
+All of the old Lore-based documents were converted into a new Texinfo-format
+manual, and considerable new text was added to describe the installation
+process. The docs are not yet complete, but they're slowly shaping up to form
+a proper user's manual.
+
+** new features
+
+Arch checkouts can now use precise revision stamps instead of always using
+the latest revision. A separate Source step for using Bazaar (an alternative
+Arch client) instead of 'tla' was added. A Source step for Cogito (the new
+linux kernel VC system) was contributed by Brandon Philips. All Source steps
+now accept a retry= argument to indicate that failing VC checkouts should be
+retried a few times (SF#1200395), note that this requires an updated
+buildslave.
+
+The 'buildbot sendchange' command was added, to be used in VC hook scripts to
+send changes at a pb.PBChangeSource . contrib/arch_buildbot.py was added to
+use this tool; it should be installed using the 'Arch meta hook' scheme.
+
+Changes can now accept a branch= parameter, and Builders have an
+isBranchImportant() test that acts like isFileImportant(). Thanks to Thomas
+Vander Stichele. Note: I renamed his tag= to branch=, in anticipation of an
+upcoming feature to build specific branches. "tag" seemed too CVS-centric.
+
+LogFiles have been rewritten to stream the incoming data directly to disk
+rather than keeping a copy in memory all the time (SF#1200392). This
+drastically reduces the buildmaster's memory requirements and makes 100MB+
+log files feasible. The log files are stored next to the serialized Builds,
+in files like BASEDIR/builder-dir/12-log-compile-output, so you'll want a
+cron job to delete old ones just like you do with old Builds. Old-style
+Builds from 0.6.4 and earlier are converted when they are first read, so the
+first load of the Waterfall display after updating to this release may take
+quite some time.
+
+** build process updates
+
+BuildSteps can now return a status of EXCEPTION, which terminates the build
+right away. This allows exceptions to be caught right away, but still make
+sure the build stops quickly.
+
+** bug fixes
+
+Some more windows incompatibilities were fixed. The test suite now has two
+failing tests remaining, both of which appear to be Twisted issues that
+should not affect normal operation.
+
+The test suite no longer raises any deprecation warnings when run against
+twisted-2.0 (except for the ones which come from Twisted itself).
+
+
+* Release 0.6.4 (28 Apr 2005)
+
+** major bugs fixed
+
+The 'buildbot' tool in 0.6.3, when used to create a new buildmaster, failed
+unless it found a 'changes.pck' file. As this file is created by a running
+buildmaster, this made 0.6.3 completely unusable for first-time
+installations. This has been fixed.
+
+** minor bugs fixed
+
+The IRC bot had a bug wherein asking it to watch a certain builder (the "I'll
+give a shout when the build finishes" message) would cause an exception, so
+it would not, in fact, shout. The HTML page had an exception in the "change
+sources" page (reached by following the "Changes" link at the top of the
+column that shows the names of commiters). Re-loading the config file while
+builders were already attached would result in a benign error message. The
+server side of the PBListener status client had an exception when providing
+information about a non-existent Build (e.g., when the client asks for the
+Build that is currently running, and the server says "None").
+
+These bugs have all been fixed.
+
+The unit tests now pass under python2.2; they were failing before because of
+some 2.3isms that crept in. More unit tests which failed under windows now
+pass, only one (test_webPathname_port) is still failing.
+
+** 'buildbot' tool looks for a .buildbot/options file
+
+The 'statusgui' and the 'debugclient' subcommands can both look for a
+.buildbot/ directory, and an 'options' file therein, to extract default
+values for the location of the buildmaster. This directory is searched in the
+current directory, its parent, etc, all the way up to the filesystem root
+(assuming you own the directories in question). It also look in ~/.buildbot/
+for this file. This feature allows you to put a .buildbot at the top of your
+working tree, telling any 'buildbot' invocations you perform therein how to
+get to the buildmaster associated with that tree's project.
+
+Windows users get something similar, using %APPDATA%/buildbot instead of
+~/.buildbot .
+
+** windows ShellCommands are launched with 'cmd.exe'
+
+The buildslave has been modified to run all list-based ShellCommands by
+prepending [os.environ['COMSPEC'], '/c'] to the argv list before execution.
+This should allow the buildslave's PATH to be searched for commands,
+improving the chances that it can run the same 'trial -o foo' commands as a
+unix buildslave. The potential downside is that spaces in argv elements might
+be re-parsed, or quotes might be re-interpreted. The consensus on the mailing
+list was that this is a useful thing to do, but please report any problems
+you encounter with it.
+
+** minor features
+
+The Waterfall display now shows the buildbot's home timezone at the top of
+the timestamp column. The default favicon.ico is now much nicer-looking (it
+is generated with Blender.. the icon.blend file is available in CVS in
+docs/images/ should you care to play with it).
+
+
+
+* Release 0.6.3 (25 Apr 2005)
+
+** 'buildbot' tool gets more uses
+
+The 'buildbot' executable has acquired three new subcommands. 'buildbot
+debugclient' brings up the small remote-control panel that connects to a
+buildmaster (via the slave port and the c['debugPassword']). This tool,
+formerly in contrib/debugclient.py, lets you reload the config file, force
+builds, and simulate inbound commit messages. It requires gtk2, glade, and
+the python bindings for both to be installed.
+
+'buildbot statusgui' brings up a live status client, formerly available by
+running buildbot/clients/gtkPanes.py as a program. This connects to the PB
+status port that you create with:
+
+ c['status'].append(client.PBListener(portnum))
+
+and shows two boxes per Builder, one for the last build, one for current
+activity. These boxes are updated in realtime. The effect is primitive, but
+is intended as an example of what's possible with the PB status interface.
+
+'buildbot statuslog' provides a text-based running log of buildmaster events.
+
+Note: command names are subject to change. These should get much more useful
+over time.
+
+** web page has a favicon
+
+When constructing the html.Waterfall instance, you can provide the filename
+of an image that will be provided when the "favicon.ico" resource is
+requested. Many web browsers display this as an icon next to the URL or
+bookmark. A goofy little default icon is included.
+
+** web page has CSS
+
+Thanks to Thomas Vander Stichele, the Waterfall page is now themable through
+CSS. The default CSS is located in buildbot/status/classic.css, and creates a
+page that is mostly identical to the old, non-CSS based table.
+
+You can specify a different CSS file to use by passing it as the css=
+argument to html.Waterfall(). See the docstring for Waterfall for some more
+details.
+
+** builder "categories"
+
+Thomas has added code which places each Builder in an optional "category".
+The various status targets (Waterfall, IRC, MailNotifier) can accept a list
+of categories, and they will ignore any activity in builders outside this
+list. This makes it easy to create some Builders which are "experimental" or
+otherwise not yet ready for the world to see, or indicate that certain
+builders should not harass developers when their tests fail, perhaps because
+the build slaves for them are not yet fully functional.
+
+** Deprecated features
+
+*** defining Builders with tuples is deprecated
+
+For a long time, the preferred way to define builders in the config file has
+been with a dictionary. The less-flexible old style of a 4-item tuple (name,
+slavename, builddir, factory) is now officially deprecated (i.e., it will
+emit a warning if you use it), and will be removed in the next release.
+Dictionaries are more flexible: additional keys like periodicBuildTime are
+simply unavailable to tuple-defined builders.
+
+Note: it is a good idea to watch the logfile (usually in twistd.log) when you
+first start the buildmaster, or whenever you reload the config file. Any
+warnings or errors in the config file will be found there.
+
+*** c['webPortnum'], c['webPathname'], c['irc'] are deprecated
+
+All status reporters should be defined in the c['status'] array, using
+buildbot.status.html.Waterfall or buildbot.status.words.IRC . These have been
+deprecated for a while, but this is fair warning that these keys will be
+removed in the next release.
+
+*** c['manholePort'] is deprecated
+
+Again, this has been deprecated for a while, in favor of:
+
+ c['manhole'] = master.Manhole(port, username, password)
+
+The preferred syntax will eventually let us use other, better kinds of debug
+shells, such as the experimental curses-based ones in the Twisted sandbox
+(which would offer command-line editing and history).
+
+** bug fixes
+
+The waterfall page has been improved a bit. A circular-reference bug in the
+web page's TextLog class was fixed, which caused a major memory leak in a
+long-running buildmaster with large logfiles that are viewed frequently.
+Modifying the config file in a way which only changed a builder's base
+directory now works correctly. The 'buildbot' command tries to create
+slightly more useful master/slave directories, adding a Makefile entry to
+re-create the .tap file, and removing global-read permissions from the files
+that may contain buildslave passwords.
+
+** twisted-2.0.0 compatibility
+
+Both buildmaster and buildslave should run properly under Twisted-2.0 . There
+are still some warnings about deprecated functions, some of which could be
+fixed, but there are others that would require removing compatibility with
+Twisted-1.3, and I don't expect to do that until 2.0 has been out and stable
+for at least several months. The unit tests should pass under 2.0, whereas
+the previous buildbot release had tests which could hang when run against the
+new "trial" framework in 2.0.
+
+The Twisted-specific steps (including Trial) have been updated to match 2.0
+functionality.
+
+** win32 compatibility
+
+Thankt to Nick Trout, more compatibility fixes have been incorporated,
+improving the chances that the unit tests will pass on windows systems. There
+are still some problems, and a step-by-step "running buildslaves on windows"
+document would be greatly appreciated.
+
+** API docs
+
+Thanks to Thomas Vander Stichele, most of the docstrings have been converted
+to epydoc format. There is a utility in docs/gen-reference to turn these into
+a tree of cross-referenced HTML pages. Eventually these docs will be
+auto-generated and somehow published on the buildbot web page.
+
+
+
+* Release 0.6.2 (13 Dec 2004)
+
+** new features
+
+It is now possible to interrupt a running build. Both the web page and the
+IRC bot feature 'stop build' commands, which can be used to interrupt the
+current BuildStep and accelerate the termination of the overall Build. The
+status reporting for these still leaves something to be desired (an
+'interrupt' event is pushed into the column, and the reason for the interrupt
+is added to a pseudo-logfile for the step that was stopped, but if you only
+look at the top-level status it appears that the build failed on its own).
+
+Builds are also halted if the connection to the buildslave is lost. On the
+slave side, any active commands are halted if the connection to the
+buildmaster is lost.
+
+** minor new features
+
+The IRC log bot now reports ETA times in a MMSS format like "2m45s" instead
+of the clunky "165 seconds".
+
+** bug fixes
+
+*** Slave Disconnect
+
+Slave disconnects should be handled better now: the current build should be
+abandoned properly. Earlier versions could get into weird states where the
+build failed to finish, clogging the builder forever (or at least until the
+buildmaster was restarted).
+
+In addition, there are weird network conditions which could cause a
+buildslave to attempt to connect twice to the same buildmaster. This can
+happen when the slave is sending large logfiles over a slow link, while using
+short keepalive timeouts. The buildmaster has been fixed to allow the second
+connection attempt to take precedence over the first, so that the older
+connection is jettisoned to make way for the newer one.
+
+In addition, the buildslave has been fixed to be less twitchy about timeouts.
+There are now two parameters: keepaliveInterval (which is controlled by the
+mktap 'keepalive' argument), and keepaliveTimeout (which requires editing the
+.py source to change from the default of 30 seconds). The slave expects to
+see *something* from the master at least once every keepaliveInterval
+seconds, and will try to provoke a response (by sending a keepalive request)
+'keepaliveTimeout' seconds before the end of this interval just in case there
+was no regular traffic. Any kind of traffic will qualify, including
+acknowledgements of normal build-status updates.
+
+The net result is that, as long as any given PB message can be sent over the
+wire in less than 'keepaliveTimeout' seconds, the slave should not mistakenly
+disconnect because of a timeout. There will be traffic on the wire at least
+every 'keepaliveInterval' seconds, which is what you want to pay attention to
+if you're trying to keep an intervening NAT box from dropping what it thinks
+is an abandoned connection. A quiet loss of connection will be detected
+within 'keepaliveInterval' seconds.
+
+*** Large Logfiles
+
+The web page rendering code has been fixed to deliver large logfiles in
+pieces, using a producer/consumer apparatus. This avoids the large spike in
+memory consumption when the log file body was linearized into a single string
+and then buffered in the socket's application-side transmit buffer. This
+should also avoid the 640k single-string limit for web.distrib servers that
+could be hit by large (>640k) logfiles.
+
+
+
+* Release 0.6.1 (23 Nov 2004)
+
+** win32 improvements/bugfixes
+
+Several changes have gone in to improve portability to non-unix systems. It
+should be possible to run a build slave under windows without major issues
+(although step-by-step documentation is still greatly desired: check the
+mailing list for suggestions from current win32 users).
+
+*** PBChangeSource: use configurable directory separator, not os.sep
+
+The PBChangeSource, which listens on a TCP socket for change notices
+delivered from tools like contrib/svn_buildbot.py, was splitting source
+filenames with os.sep . This is inappropriate, because those file names are
+coming from the VC repository, not the local filesystem, and the repository
+host may be running a different OS (with a different separator convention)
+than the buildmaster host. In particular, a win32 buildmaster using a CVS
+repository running on a unix box would be confused.
+
+PBChangeSource now takes a sep= argument to indicate the separator character
+to use.
+
+*** build saving should work better
+
+windows cannot do the atomic os.rename() trick that unix can, so under win32
+the buildmaster falls back to save/delete-old/rename, which carries a slight
+risk of losing a saved build log (if the system were to crash between the
+delete-old and the rename).
+
+** new features
+
+*** test-result tracking
+
+Work has begun on fine-grained test-result handling. The eventual goal is to
+be able to track individual tests over time, and create problem reports when
+a test starts failing (which then are resolved when the test starts passing
+again). The first step towards this is an ITestResult interface, and code in
+the TrialTestParser to create such results for all non-passing tests (the
+ones for which Trial emits exception tracebacks).
+
+These test results are currently displayed in a tree-like display in a page
+accessible from each Build's page (follow the numbered link in the yellow
+box at the start of each build to get there).
+
+This interface is still in flux, as it really wants to be able to accomodate
+things like compiler warnings and tests that are skipped because of missing
+libraries or unsupported architectures.
+
+** bug fixes
+
+*** VC updates should survive temporary failures
+
+Some VC systems (CVS and SVN in particular) get upset when files are turned
+into directories or vice versa, or when repository items are moved without
+the knowledge of the VC system. The usual symptom is that a 'cvs update'
+fails where a fresh checkout succeeds.
+
+To avoid having to manually intervene, the build slaves' VC commands have
+been refactored to respond to update failures by deleting the tree and
+attempting a full checkout. This may cause some unnecessary effort when,
+e.g., the CVS server falls off the net, but in the normal case it will only
+come into play when one of these can't-cope situations arises.
+
+*** forget about an existing build when the slave detaches
+
+If the slave was lost during a build, the master did not clear the
+.currentBuild reference, making that builder unavailable for later builds.
+This has been fixed, so that losing a slave should be handled better. This
+area still needs some work, I think it's still possible to get both the
+slave and the master wedged by breaking the connection at just the right
+time. Eventually I want to be able to resume interrupted builds (especially
+when the interruption is the result of a network failure and not because the
+slave or the master actually died).
+
+*** large logfiles now consume less memory
+
+Build logs are stored as lists of (type,text) chunks, so that
+stdout/stderr/headers can be displayed differently (if they were
+distinguishable when they were generated: stdout and stderr are merged when
+usePTY=1). For multi-megabyte logfiles, a large list with many short strings
+could incur a large overhead. The new behavior is to merge same-type string
+chunks together as they are received, aiming for a chunk size of about 10kb,
+which should bring the overhead down to a more reasonable level.
+
+There remains an issue with actually delivering large logfiles over, say,
+the HTML interface. The string chunks must be merged together into a single
+string before delivery, which causes a spike in the memory usage when the
+logfile is viewed. This can also break twisted.web.distrib -type servers,
+where the underlying PB protocol imposes a 640k limit on the size of
+strings. This will be fixed (with a proper Producer/Consumer scheme) in the
+next release.
+
+
+* Release 0.6.0 (30 Sep 2004)
+
+** new features
+
+*** /usr/bin/buildbot control tool
+
+There is now an executable named 'buildbot'. For now, this just provides a
+convenient front-end to mktap/twistd/kill, but eventually it will provide
+access to other client functionality (like the 'try' builds, and a status
+client). Assuming you put your buildbots in /var/lib/buildbot/master/FOO,
+you can do 'buildbot create-master /var/lib/buildbot/master/FOO' and it will
+create the .tap file and set up a sample master.cfg for you. Later,
+'buildbot start /var/lib/buildbot/master/FOO' will start the daemon.
+
+
+*** build status now saved in external files, -shutdown.tap unnecessary
+
+The status rewrite included a change to save all build status in a set of
+external files. These files, one per build, are put in a subdirectory of the
+master's basedir (named according to the 'builddir' parameter of the Builder
+configuration dictionary). This helps keep the buildmaster's memory
+consumption small: the (potentially large) build logs are kept on disk
+instead of in RAM. There is a small cache (2 builds per builder) kept in
+memory, but everything else lives on disk.
+
+The big change is that the buildmaster now keeps *all* status in these
+files. It is no longer necessary to preserve the buildbot-shutdown.tap file
+to run a persistent buildmaster. The buildmaster may be launched with
+'twistd -f buildbot.tap' each time, in fact the '-n' option can be added to
+prevent twistd from automatically creating the -shutdown.tap file.
+
+There is still one lingering bug with this change: the Expectations object
+for each builder (which records how long the various steps took, to provide
+an ETA value for the next time) is not yet saved. The result is that the
+first build after a restart will not provide an ETA value.
+
+0.6.0 keeps status in a single file per build, as opposed to 0.5.0 which
+kept status in many subdirectories (one layer for builds, another for steps,
+and a third for logs). 0.6.0 will detect and delete these subdirectories as
+it overwrites them.
+
+The saved builds are optional. To prevent disk usage from growing without
+bounds, you may want to set up a cron job to run 'find' and delete any which
+are too old. The status displays will happily survive without those saved
+build objects.
+
+The set of recorded Changes is kept in a similar file named 'changes.pck'.
+
+
+*** source checkout now uses timestamp/revision
+
+Source checkouts are now performed with an appropriate -D TIMESTAMP (for
+CVS) or -r REVISION (for SVN) marker to obtain the exact sources that were
+specified by the most recent Change going into the current Build. This
+avoids a race condition in which a change might be committed after the build
+has started but before the source checkout has completed, resulting in a
+mismatched set of source files. Such changes are now ignored.
+
+This works by keeping track of repository-wide revision/transaction numbers
+(for version control systems that offer them, like SVN). The checkout or
+update is performed with the highest such revision number. For CVS (which
+does not have them), the timestamp of each commit message is used, and a -D
+argument is created to place the checkout squarely in the middle of the "tree
+stable timer"'s window.
+
+This also provides the infrastructure for the upcoming 'try' feature. All
+source-checkout commands can now obtain a base revision marker and a patch
+from the Build, allowing certain builds to be performed on something other
+than the most recent sources.
+
+See source.xhtml and steps.xhtml for details.
+
+
+*** Darcs and Arch support added
+
+There are now build steps which retrieve a source tree from Darcs and Arch
+repositories. See steps.xhtml for details.
+
+Preliminary P4 support has been added, thanks to code from Dave Peticolas.
+You must manually set up each build slave with an appropriate P4CLIENT: all
+buildbot does is run 'p4 sync' at the appropriate times.
+
+
+*** Status reporting rewritten
+
+Status reporting was completely revamped. The config file now accepts a
+BuildmasterConfig['status'] entry, with a list of objects that perform status
+delivery. The old config file entries which controlled the web status port
+and the IRC bot have been deprecated in favor of adding instances to
+['status']. The following status-delivery classes have been implemented, all
+in the 'buildbot.status' package:
+
+ client.PBListener(port, username, passwd)
+ html.Waterfall(http_port, distrib_port)
+ mail.MailNotifier(fromaddr, mode, extraRecipients..)
+ words.IRC(host, nick, channels)
+
+See the individual docstrings for details about how to use each one. You can
+create new status-delivery objects by following the interfaces found in the
+buildbot.interfaces module.
+
+
+*** BuildFactory configuration process changed
+
+The basic BuildFactory class is now defined in buildbot.process.factory
+rather than buildbot.process.base, so you will have to update your config
+files. factory.BuildFactory is the base class, which accepts a list of Steps
+to run. See docs/factories.xhtml for details.
+
+There are now easier-to-use BuildFactory classes for projects which use GNU
+Autoconf, perl's MakeMaker (CPAN), python's distutils (but no unit tests),
+and Twisted's Trial. Each one takes a separate 'source' Step to obtain the
+source tree, and then fills in the rest of the Steps for you.
+
+
+*** CVS/SVN VC steps unified, simplified
+
+The confusing collection of arguments for the CVS step ('clobber=',
+'copydir=', and 'export=') have been removed in favor of a single 'mode'
+argument. This argument describes how you want to use the sources: whether
+you want to update and compile everything in the same tree (mode='update'),
+or do a fresh checkout and full build each time (mode='clobber'), or
+something in between.
+
+The SVN (Subversion) step has been unified and accepts the same mode=
+parameter as CVS. New version control steps will obey the same interface.
+
+Most of the old configuration arguments have been removed. You will need to
+update your configuration files to use the new arguments. See
+docs/steps.xhtml for a description of all the new parameters.
+
+
+*** Preliminary Debian packaging added
+
+Thanks to the contributions of Kirill Lapshin, we can now produce .deb
+installer packages. These are still experimental, but they include init.d
+startup/shutdown scripts, which the the new /usr/bin/buildbot to invoke
+twistd. Create your buildmasters in /var/lib/buildbot/master/FOO, and your
+slaves in /var/lib/buildbot/slave/BAR, then put FOO and BAR in the
+appropriate places in /etc/default/buildbot . After that, the buildmasters
+and slaves will be started at every boot.
+
+Pre-built .debs are not yet distributed. Use 'debuild -uc -us' from the
+source directory to create them.
+
+
+** minor features
+
+
+*** Source Stamps
+
+Each build now has a "source stamp" which describes what sources it used. The
+idea is that the sources for this particular build can be completely
+regenerated from the stamp. The stamp is a tuple of (revision, patch), where
+the revision depends on the VC system being used (for CVS it is either a
+revision tag like "BUILDBOT-0_5_0" or a datestamp like "2004/07/23", for
+Subversion it is a revision number like 11455). This must be combined with
+information from the Builder that is constant across all builds (something to
+point at the repository, and possibly a branch indicator for CVS and other VC
+systems that don't fold this into the repository string).
+
+The patch is an optional unified diff file, ready to be applied by running
+'patch -p0 <PATCH' from inside the workdir. This provides support for the
+'try' feature that will eventually allow developers to run buildbot tests on
+their code before checking it in.
+
+
+*** SIGHUP causes the buildmaster's configuration file to be re-read
+
+*** IRC bot now has 'watch' command
+
+You can now tell the buildbot's IRC bot to 'watch <buildername>' on a builder
+which is currently performing a build. When that build is finished, the
+buildbot will make an announcement (including the results of the build).
+
+The IRC 'force build' command will also announce when the resulting build has
+completed.
+
+
+*** the 'force build' option on HTML and IRC status targets can be disabled
+
+The html.Waterfall display and the words.IRC bot may be constructed with an
+allowForce=False argument, which removes the ability to force a build through
+these interfaces. Future versions will be able to restrict this build-forcing
+capability to authenticated users. The per-builder HTML page no longer
+displays the 'Force Build' buttons if it does not have this ability. Thanks
+to Fred Drake for code and design suggestions.
+
+
+*** master now takes 'projectName' and 'projectURL' settings
+
+These strings allow the buildbot to describe what project it is working for.
+At the moment they are only displayed on the Waterfall page, but in the next
+release they will be retrieveable from the IRC bot as well.
+
+
+*** survive recent (SVN) Twisted versions
+
+The buildbot should run correctly (albeit with plenty of noisy deprecation
+warnings) under the upcoming Twisted-2.0 release.
+
+
+*** work-in-progress realtime Trial results acquisition
+
+Jonathan Simms (<slyphon>) has been working on 'retrial', a rewrite of
+Twisted's unit test framework that will most likely be available in
+Twisted-2.0 . Although it is not yet complete, the buildbot will be able to
+use retrial in such a way that build status is reported on a per-test basis,
+in real time. This will be the beginning of fine-grained test tracking and
+Problem management, described in docs/users.xhtml .
+
+
+* Release 0.5.0 (22 Jul 2004)
+
+** new features
+
+*** web.distrib servers via TCP
+
+The 'webPathname' config option, which specifies a UNIX socket on which to
+publish the waterfall HTML page (for use by 'mktap web -u' or equivalent),
+now accepts a numeric port number. This publishes the same thing via TCP,
+allowing the parent web server to live on a separate machine.
+
+This config option could be named better, but it will go away altogether in
+a few releases, when status delivery is unified. It will be replaced with a
+WebStatusTarget object, and the config file will simply contain a list of
+various kinds of status targets.
+
+*** 'master.cfg' filename is configurable
+
+The buildmaster can use a config file named something other than
+"master.cfg". Use the --config=foo.cfg option to mktap to control this.
+
+*** FreshCVSSource now uses newcred (CVSToys >= 1.0.10)
+
+The FreshCVSSource class now defaults to speaking to freshcvs daemons from
+modern CVSToys releases. If you need to use the buildbot with a daemon from
+CVSToys-1.0.9 or earlier, use FreshCVSSourceOldcred instead. Note that the
+new form only requires host/port/username/passwd: the "serviceName"
+parameter is no longer meaningful.
+
+*** Builders are now configured with a dictionary, not a tuple
+
+The preferred way to set up a Builder in master.cfg is to provide a
+dictionary with various keys, rather than a (non-extensible) 4-tuple. See
+docs/config.xhtml for details. The old tuple-way is still supported for now,
+it will probably be deprecated in the next release and removed altogether in
+the following one.
+
+*** .periodicBuildTime is now exposed to the config file
+
+To set a builder to run at periodic intervals, simply add a
+'periodicBuildTime' key to its master.cfg dictionary. Again, see
+docs/config.xhtml for details.
+
+*** svn_buildbot.py adds --include, --exclude
+
+The commit trigger script now gives you more control over which files are
+sent to the buildmaster and which are not.
+
+*** usePTY is controllable at slave mktap time
+
+The buildslaves usually run their child processes in a pty, which creates a
+process group for all the children, which makes it much easier to kill them
+all at once (i.e. if a test hangs). However this causes problems on some
+systems. Rather than hacking slavecommand.py to disable the use of these
+ptys, you can now create the slave's .tap file with --usepty=0 at mktap
+time.
+
+** Twisted changes
+
+A summary of warnings (e.g. DeprecationWarnings) is provided as part of the
+test-case summarizer. The summarizer also counts Skips, expectedFailures,
+and unexpectedSuccesses, displaying the counts on the test step's event box.
+
+The RunUnitTests step now uses "trial -R twisted" instead of "trial
+twisted.test", which is a bit cleaner. All .pyc files are deleted before
+starting trial, to avoid getting tripped up by deleted .py files.
+
+** documentation
+
+docs/config.xhtml now describes the syntax and allowed contents of the
+'master.cfg' configuration file.
+
+** bugfixes
+
+Interlocks had a race condition that could cause the lock to get stuck
+forever.
+
+FreshCVSSource has a prefix= argument that was moderately broken (it used to
+only work if the prefix was a single directory component). It now works with
+subdirectories.
+
+The buildmaster used to complain when it saw the "info" directory in a
+slave's workspace. This directory is used to publish information about the
+slave host and its administrator, and is not a leftover build directory as
+the complaint suggested. This complain has been silenced.
+
+
+* Release 0.4.3 (30 Apr 2004)
+
+** PBChangeSource made explicit
+
+In 0.4.2 and before, an internal interface was available which allowed
+special clients to inject changes into the Buildmaster. This interface is
+used by the contrib/svn_buildbot.py script. The interface has been extracted
+into a proper PBChangeSource object, which should be created in the
+master.cfg file just like the other kinds of ChangeSources. See
+docs/sources.xhtml for details.
+
+If you were implicitly using this change source (for example, if you use
+Subversion and the svn_buildbot.py script), you *must* add this source to
+your master.cfg file, or changes will not be delivered and no builds will be
+triggered.
+
+The PBChangeSource accepts the same "prefix" argument as all other
+ChangeSources. For a SVN repository that follows the recommended practice of
+using "trunk/" for the trunk revisions, you probably want to construct the
+source like this:
+
+ source = PBChangeSource(prefix="trunk")
+
+to make sure that the Builders are given sensible (trunk-relative)
+filenames for each changed source file.
+
+** Twisted changes
+
+*** step_twisted.RunUnitTests can change "bin/trial"
+
+The twisted RunUnitTests step was enhanced to let you run something other
+than "bin/trial", making it easier to use a buildbot on projects which use
+Twisted but aren't actually Twisted itself.
+
+*** Twisted now uses Subversion
+
+Now that Twisted has moved from CVS to SVN, the Twisted build processes have
+been modified to perform source checkouts from the Subversion repository.
+
+** minor feature additions
+
+*** display Changes with HTML
+
+Changes are displayed with a bit more pizazz, and a links= argument was
+added to allow things like ViewCVS links to be added to the display
+(although it is not yet clear how this argument should be used: the
+interface remains subject to change untill it has been documented).
+
+*** display ShellCommand logs with HTML
+
+Headers are in blue, stderr is in red (unless usePTY=1 in which case stderr
+and stdout are indistinguishable). A link is provided which returns the same
+contents as plain text (by appending "?text=1" to the URL).
+
+*** buildslaves send real tracebacks upon error
+
+The .unsafeTracebacks option has been turned on for the buildslaves,
+allowing them to send a full stack trace when an exception occurs, which is
+logged in the buildmaster's twistd.log file. This makes it much easier to
+determine what went wrong on the slave side.
+
+*** BasicBuildFactory refactored
+
+The BasicBuildFactory class was refactored to make it easier to create
+derivative classes, in particular the BasicSVN variant.
+
+*** "ping buildslave" web button added
+
+There is now a button on the "builder information" page that lets a web user
+initiate a ping of the corresponding build slave (right next to the button
+that lets them force a build). This was added to help track down a problem
+with the slave keepalives.
+
+** bugs fixed:
+
+You can now have multiple BuildSteps with the same name (the names are used
+as hash keys in the data structure that helps determine ETA values for each
+step, the new code creates unique key names if necessary to avoid
+collisions). This means that, for example, you do not have to create a
+BuildStep subclass just to have two Compile steps in the same process.
+
+If CVSToys is not installed, the tests that depend upon it are skipped.
+
+Some tests in 0.4.2 failed because of a missing set of test files, they are
+now included in the tarball properly.
+
+Slave keepalives should work better now in the face of silent connection
+loss (such as when an intervening NAT box times out the association), the
+connection should be reestablished in minutes instead of hours.
+
+Shell commands on the slave are invoked with an argument list instead of the
+ugly and error-prone split-on-spaces approach. If the ShellCommand is given
+a string (instead of a list), it will fall back to splitting on spaces.
+Shell commands should work on win32 now (using COMSPEC instead of /bin/sh).
+
+Buildslaves under w32 should theoretically work now, and one was running for
+the Twisted buildbot for a while until the machine had to be returned.
+
+The "header" lines in ShellCommand logs (which include the first line, that
+displays the command being run, and the last, which shows its exit status)
+are now generated by the buildslave side instead of the local (buildmaster)
+side. This can provide better error handling and is generally cleaner.
+However, if you have an old buildslave (running 0.4.2 or earlier) and a new
+buildmaster, then neither end will generate these header lines.
+
+CVSCommand was improved, in certain situations 0.4.2 would perform
+unnecessary checkouts (when an update would have sufficed). Thanks to Johan
+Dahlin for the patches. The status output was fixed as well, so that
+failures in CVS and SVN commands (such as not being able to find the 'svn'
+executable) make the step status box red.
+
+Subversion support was refactored to make it behave more like CVS. This is a
+work in progress and will be improved in the next release.
+
+
+* Release 0.4.2 (08 Jan 2004)
+
+** test suite updated
+
+The test suite has been completely moved over to Twisted's "Trial"
+framework, and all tests now pass. To run the test suite (consisting of 64
+tests, probably covering about 30% of BuildBot's logic), do this:
+
+ PYTHONPATH=. trial -v buildbot.test
+
+** Mail parsers updated
+
+Several bugs in the mail-parsing code were fixed, allowing a buildmaster to
+be triggered by mail sent out by a CVS repository. (The Twisted Buildbot is
+now using this to trigger builds, as their CVS server machine is having some
+difficulties with FreshCVS). The FreshCVS mail format for directory
+additions appears to have changed recently: the new parser should handle
+both old and new-style messages.
+
+A parser for Bonsai commit messages (buildbot.changes.mail.parseBonsaiMail)
+was contributed by Stephen Davis. Thanks Stephen!
+
+** CVS "global options" now available
+
+The CVS build step can now accept a list of "global options" to give to the
+cvs command. These go before the "update"/"checkout" word, and are described
+fully by "cvs --help-options". Two useful ones might be "-r", which causes
+checked-out files to be read-only, and "-R", which assumes the repository is
+read-only (perhaps by not attempting to write to lock files).
+
+
+* Release 0.4.1 (09 Dec 2003)
+
+** MaildirSources fixed
+
+Several bugs in MaildirSource made them unusable. These have been fixed (for
+real this time). The Twisted buildbot is using an FCMaildirSource while they
+fix some FreshCVS daemon problems, which provided the encouragement for
+getting these bugs fixed.
+
+In addition, the use of DNotify (only available under linux) was somehow
+broken, possibly by changes in some recent version of Python. It appears to
+be working again now (against both python-2.3.3c1 and python-2.2.1).
+
+** master.cfg can use 'basedir' variable
+
+As documented in the sample configuration file (but not actually implemented
+until now), a variable named 'basedir' is inserted into the namespace used
+by master.cfg . This can be used with something like:
+
+ os.path.join(basedir, "maildir")
+
+to obtain a master-basedir-relative location.
+
+
+* Release 0.4.0 (05 Dec 2003)
+
+** newapp
+
+I've moved the codebase to Twisted's new 'application' framework, which
+drastically cleans up service startup/shutdown just like newcred did for
+authorization. This is mostly an internal change, but the interface to
+IChangeSources was modified, so in the off chance that someone has written a
+custom change source, it may have to be updated to the new scheme.
+
+The most user-visible consequence of this change is that now both
+buildmasters and buildslaves are generated with the standard Twisted 'mktap'
+utility. Basic documentation is in the README file.
+
+Both buildmaster and buildslave .tap files need to be re-generated to run
+under the new code. I have not figured out the styles.Versioned upgrade path
+well enough to avoid this yet. Sorry.
+
+This also means that both buildslaves and the buildmaster require
+Twisted-1.1.0 or later.
+
+** reloadable master.cfg
+
+Most aspects of a buildmaster is now controlled by a configuration file
+which can be re-read at runtime without losing build history. This feature
+makes the buildmaster *much* easier to maintain.
+
+In the previous release, you would create the buildmaster by writing a
+program to define the Builders and ChangeSources and such, then run it to
+create the .tap file. In the new release, you use 'mktap' to create the .tap
+file, and the only parameter you give it is the base directory to use. Each
+time the buildmaster starts, it will look for a file named 'master.cfg' in
+that directory and parse it as a python script. That script must define a
+dictionary named 'BuildmasterConfig' with various keys to define the
+builders, the known slaves, what port to use for the web server, what IRC
+channels to connect to, etc.
+
+This config file can be re-read at runtime, and the buildmaster will compute
+the differences and add/remove services as necessary. The re-reading is
+currently triggered through the debug port (contrib/debugclient.py is the
+debug port client), but future releases will add the ability to trigger the
+reconfiguration by IRC command, web page button, and probably a local UNIX
+socket (with a helper script to trigger a rebuild locally).
+
+docs/examples/twisted_master.cfg contains a sample configuration file, which
+also lists all the keys that can be set.
+
+There may be some bugs lurking, such as re-configuring the buildmaster while
+a build is running. It needs more testing.
+
+** MaxQ support
+
+Radix contributed some support scripts to run MaxQ test scripts. MaxQ
+(http://maxq.tigris.org/) is a web testing tool that allows you to record
+HTTP sessions and play them back.
+
+** Builders can now wait on multiple Interlocks
+
+The "Interlock" code has been enhanced to allow multiple builders to wait on
+each one. This was done to support the new config-file syntax for specifying
+Interlocks (in which each interlock is a tuple of A and [B], where A is the
+builder the Interlock depends upon, and [B] is a list of builders that
+depend upon the Interlock).
+
+"Interlock" is misnamed. In the next release it will be changed to
+"Dependency", because that's what it really expresses. A new class (probably
+called Interlock) will be created to express the notion that two builders
+should not run at the same time, useful when multiple builders are run on
+the same machine and thrashing results when several CPU- or disk- intensive
+compiles are done simultaneously.
+
+** FreshCVSSource can now handle newcred-enabled FreshCVS daemons
+
+There are now two FreshCVSSource classes: FreshCVSSourceNewcred talks to
+newcred daemons, and FreshCVSSourceOldcred talks to oldcred ones. Mind you,
+FreshCVS doesn't yet do newcred, but when it does, we'll be ready.
+
+'FreshCVSSource' maps to the oldcred form for now. That will probably change
+when the current release of CVSToys supports newcred by default.
+
+** usePTY=1 on posix buildslaves
+
+When a buildslave is running under POSIX (i.e. pretty much everything except
+windows), child processes are created with a pty instead of separate
+stdin/stdout/stderr pipes. This makes it more likely that a hanging build
+(when killed off by the timeout code) will have all its sub-childred cleaned
+up. Non-pty children would tend to leave subprocesses running because the
+buildslave was only able to kill off the top-level process (typically
+'make').
+
+Windows doesn't have any concept of ptys, so non-posix systems do not try to
+enable them.
+
+** mail parsers should actually work now
+
+The email parsing functions (FCMaildirSource and SyncmailMaildirSource) were
+broken because of my confused understanding of how python class methods
+work. These sources should be functional now.
+
+** more irc bot sillyness
+
+The IRC bot can now perform half of the famous AYBABTO scene.
+
+
+* Release 0.3.5 (19 Sep 2003)
+
+** newcred
+
+Buildbot has moved to "newcred", a new authorization framework provided by
+Twisted, which is a good bit cleaner and easier to work with than the
+"oldcred" scheme in older versions. This causes both buildmaster and
+buildslaves to depend upon Twisted 1.0.7 or later. The interface to
+'makeApp' has changed somewhat (the multiple kinds of remote connections all
+use the same TCP port now).
+
+Old buildslaves will get "_PortalWrapper instance has no attribute
+'remote_username'" errors when they try to connect. They must be upgraded.
+
+The FreshCVSSource uses PB to connect to the CVSToys server. This has been
+upgraded to use newcred too. If you get errors (TODO: what do they look
+like?) in the log when the buildmaster tries to connect, you need to upgrade
+your FreshCVS service or use the 'useOldcred' argument when creating your
+FreshCVSSource. This is a temporary hack to allow the buildmaster to talk to
+oldcred CVSToys servers. Using it will trigger deprecation warnings. It will
+go away eventually.
+
+In conjunction with this change, makeApp() now accepts a password which can
+be applied to the debug service.
+
+** new features
+
+*** "copydir" for CVS checkouts
+
+The CVS build step can now accept a "copydir" parameter, which should be a
+directory name like "source" or "orig". If provided, the CVS checkout is
+done once into this directory, then copied into the actual working directory
+for compilation etc. Later updates are done in place in the copydir, then
+the workdir is replaced with a copy.
+
+This reduces CVS bandwidth (update instead of full checkout) at the expense
+of twice the disk space (two copies of the tree).
+
+*** Subversion (SVN) support
+
+Radix (Christopher Armstrong) contributed early support for building
+Subversion-based trees. The new 'SVN' buildstep behaves roughly like the
+'CVS' buildstep, and the contrib/svn_buildbot.py script can be used as a
+checkin trigger to feed changes to a running buildmaster.
+
+** notable bugfixes
+
+*** .tap file generation
+
+We no longer set the .tap filename, because the buildmaster/buildslave
+service might be added to an existing .tap file and we shouldn't presume to
+own the whole thing. You may want to manually rename the "buildbot.tap" file
+to something more meaningful (like "buildslave-bot1.tap").
+
+*** IRC reconnect
+
+If the IRC server goes away (it was restarted, or the network connection was
+lost), the buildmaster will now schedule a reconnect attempt.
+
+*** w32 buildslave fixes
+
+An "rm -rf" was turned into shutil.rmtree on non-posix systems.
+
+
+* Release 0.3.4 (28 Jul 2003)
+
+** IRC client
+
+The buildmaster can now join a set of IRC channels and respond to simple
+queries about builder status.
+
+** slave information
+
+The build slaves can now report information from a set of info/* files in
+the slave base directory to the buildmaster. This will be used by the slave
+administrator to announce details about the system hosting the slave,
+contact information, etc. For now, info/admin should contain the name/email
+of the person who is responsible for the buildslave, and info/host should
+describe the system hosting the build slave (OS version, CPU speed, memory,
+etc). The contents of these files are made available through the waterfall
+display.
+
+** change notification email parsers
+
+A parser for Syncmail (syncmail.sourceforge.net) was added. SourceForge
+provides examples of setting up syncmail to deliver CVS commit messages to
+mailing lists, so hopefully this will make it easier for sourceforge-hosted
+projects to set up a buildbot.
+
+email processors were moved into buildbot.changes.mail . FCMaildirSource was
+moved, and the compatibility location (buildbot.changes.freshcvsmail) will
+go away in the next release.
+
+** w32 buildslave ought to work
+
+Some non-portable code was changed to make it more likely that the
+buildslave will run under windows. The Twisted buildbot now has a
+(more-or-less) working w32 buildslave.
+
+
+* Release 0.3.3 (21 May 2003):
+
+** packaging changes
+
+*** include doc/examples in the release. Oops again.
+
+** network changes
+
+*** add keepalives to deal with NAT boxes
+
+Some NAT boxes drop port mappings if the TCP connection looks idle for too
+long (maybe 30 minutes?). Add application-level keepalives (dummy commands
+sent from slave to master every 10 minutes) to appease the NAT box and keep
+our connection alive. Enable this with --keepalive in the slave mktap
+command line. Check the README for more details.
+
+** UI changes
+
+*** allow slaves to trigger any build that they host
+
+Added an internal function to ask the buildmaster to start one of their
+builds. Must be triggered with a debugger or manhole on the slave side for
+now, will add a better UI later.
+
+*** allow web page viewers to trigger any build
+
+Added a button to the per-build page (linked by the build names on the third
+row of the waterfall page) to allow viewers to manually trigger builds.
+There is a field for them to indicate who they are and why they are
+triggering the build. It is possible to abuse this, but for now the benefits
+outweigh the damage that could be done (worst case, someone can make your
+machine run builds continuously).
+
+** generic buildprocess changes
+
+*** don't queue multiple builds for offline slaves
+
+If a slave is not online when a build is ready to run, that build is queued
+so the slave will run it when it next connects. However, the buildmaster
+used to queue every such build, so the poor slave machine would be subject
+to tens or hundreds of builds in a row when they finally did come online.
+The buildmaster has been changed to merge these multiple builds into a
+single one.
+
+*** bump ShellCommand default timeout to 20 minutes
+
+Used for testing out the win32 twisted builder. I will probably revert this
+in the next relese.
+
+*** split args in ShellCommand ourselves instead of using /bin/sh
+
+This should remove the need for /bin/sh on the slave side, improving the
+chances that the buildslave can run on win32.
+
+*** add configureEnv argument to Configure step, pass env dict to slave
+
+Allows build processes to do things like 'CFLAGS=-O0 ./configure' without
+using /bin/sh to set the environment variable
+
+** Twisted buildprocess changes
+
+*** warn instead of flunk the build when cReactor or qtreactor tests fail
+
+These two always fail. For now, downgrade those failures to a warning
+(orange box instead of red).
+
+*** don't use 'clobber' on remote builds
+
+Builds that run on remote machines (freebsd, OS-X) now use 'cvs update'
+instead of clobbering their trees and doing a fresh checkout. The multiple
+simultaneous CVS checkouts were causing a strain on Glyph's upstream
+bandwidth.
+
+*** use trial --testmodule instead of our own test-case-name grepper
+
+The Twisted coding/testing convention has developers put 'test-case-name'
+tags (emacs local variables, actually) in source files to indicate which
+test cases should be run to exercise that code. Twisted's unit-test
+framework just acquired an argument to look for these tags itself. Use that
+instead of the extra FindUnitTestsForFiles build step we were doing before.
+Removes a good bit of code from buildbot and into Twisted where it really
+belongs.
+
+
+* Release 0.3.2 (07 May 2003):
+
+** packaging changes
+
+*** fix major packaging bug: none of the buildbot/* subdirectories were
+included in the 0.3.1 release. Sorry, I'm still figuring out distutils
+here..
+
+** internal changes
+
+*** use pb.Cacheable to update Events in remote status client. much cleaner.
+
+*** start to clean up BuildProcess->status.builder interface
+
+** bug fixes
+
+*** waterfall display was missing a <tr>, causing it to be misrendered in most
+browsers (except the one I was testing it with, of course)
+
+*** URL without trailing slash (when served in a twisted-web distributed
+server, with a url like "http://twistedmatrix.com/~warner.twistd") should do
+redirect to URL-with-trailing-slash, otherwise internal hrefs are broken.
+
+*** remote status clients: forget RemoteReferences at shutdown, removes
+warnings about "persisting Ephemerals"
+
+** Twisted buildprocess updates:
+
+*** match build process as of twisted-1.0.5
+**** use python2.2 everywhere now that twisted rejects python2.1
+**** look for test-result constants in multiple places
+*** move experimental 'trial --jelly' code to separate module
+*** add FreeBSD builder
+*** catch rc!=0 in HLint step
+*** remove RunUnitTestsRandomly, use randomly=1 parameter instead
+*** parameterize ['twisted.test'] default test case to make subclassing easier
+*** ignore internal distutils warnings in python2.3 builder
+
+
+* Release 0.3.1 (29 Apr 2003):
+
+** First release.
+
+** Features implemented:
+
+ change notification from FreshCVS server or parsed maildir contents
+
+ timed builds
+
+ basic builds, configure/compile/test
+
+ some Twisted-specific build steps: docs, unit tests, debuild
+
+ status reporting via web page
+
+** Features still experimental/unpolished
+
+ status reporting via PB client
diff --git a/buildbot/PKG-INFO b/buildbot/PKG-INFO
new file mode 100644
index 0000000..99384a2
--- /dev/null
+++ b/buildbot/PKG-INFO
@@ -0,0 +1,30 @@
+Metadata-Version: 1.0
+Name: buildbot
+Version: 0.7.10p1
+Summary: BuildBot build automation system
+Home-page: http://buildbot.net/
+Author: Brian Warner
+Author-email: warner-buildbot@lothar.com
+License: GNU GPL
+Description:
+ The BuildBot is a system to automate the compile/test cycle required by
+ most software projects to validate code changes. By automatically
+ rebuilding and testing the tree each time something has changed, build
+ problems are pinpointed quickly, before other developers are
+ inconvenienced by the failure. The guilty developer can be identified
+ and harassed without human intervention. By running the builds on a
+ variety of platforms, developers who do not have the facilities to test
+ their changes everywhere before checkin will at least know shortly
+ afterwards whether they have broken the build or not. Warning counts,
+ lint checks, image size, compile time, and other build parameters can
+ be tracked over time, are more visible, and are therefore easier to
+ improve.
+
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: No Input/Output (Daemon)
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License (GPL)
+Classifier: Topic :: Software Development :: Build Tools
+Classifier: Topic :: Software Development :: Testing
diff --git a/buildbot/README b/buildbot/README
new file mode 100644
index 0000000..15a6024
--- /dev/null
+++ b/buildbot/README
@@ -0,0 +1,201 @@
+
+BuildBot: build/test automation
+ http://buildbot.net
+ Brian Warner <warner-buildbot @ lothar . com>
+
+
+Abstract:
+
+The BuildBot is a system to automate the compile/test cycle required by most
+software projects to validate code changes. By automatically rebuilding and
+testing the tree each time something has changed, build problems are
+pinpointed quickly, before other developers are inconvenienced by the
+failure. The guilty developer can be identified and harassed without human
+intervention. By running the builds on a variety of platforms, developers
+who do not have the facilities to test their changes everywhere before
+checkin will at least know shortly afterwards whether they have broken the
+build or not. Warning counts, lint checks, image size, compile time, and
+other build parameters can be tracked over time, are more visible, and
+are therefore easier to improve.
+
+The overall goal is to reduce tree breakage and provide a platform to run
+tests or code-quality checks that are too annoying or pedantic for any human
+to waste their time with. Developers get immediate (and potentially public)
+feedback about their changes, encouraging them to be more careful about
+testing before checkin.
+
+
+Features:
+
+ * run builds on a variety of slave platforms
+ * arbitrary build process: handles projects using C, Python, whatever
+ * minimal host requirements: python and Twisted
+ * slaves can be behind a firewall if they can still do checkout
+ * status delivery through web page, email, IRC, other protocols
+ * track builds in progress, provide estimated completion time
+ * flexible configuration by subclassing generic build process classes
+ * debug tools to force a new build, submit fake Changes, query slave status
+ * released under the GPL
+
+
+DOCUMENTATION:
+
+The PyCon paper has a good description of the overall architecture. It is
+available in HTML form in docs/PyCon-2003/buildbot.html, or on the web page.
+
+The User's Manual is in docs/buildbot.info, and the Installation chapter is
+the best guide to use for setup instructions. The .texinfo source can also be
+turned into printed documentation. An HTML representation is available on the
+Buildbot home page.
+
+REQUIREMENTS:
+
+ Python: http://www.python.org
+
+ Buildbot requires python-2.3 or later, and is primarily developed against
+ python-2.4 . It is also tested against python-2.5 .
+
+ Twisted: http://twistedmatrix.com
+
+ Both the buildmaster and the buildslaves require Twisted-2.0.x or later.
+ As always, the most recent version is recommended. It has been tested
+ against Twisted-2.5.0, Twisted-8.0.1, Twisted-8.1.0, and Twisted SVN as of
+ the date of release.
+
+ Certain versions of Twisted are delivered as a collection of subpackages.
+ You'll need at least "Twisted" (the core package), and you'll also want
+ TwistedMail, TwistedWeb, and TwistedWords (for sending email, serving a
+ web status page, and delivering build status via IRC, respectively). You
+ might also want TwistedConch (for the encrypted Manhole debug port). Note
+ that Twisted requires ZopeInterface to be installed as well.
+
+INSTALLATION:
+
+Please read the User's Manual in docs/buildbot.info or docs/buildbot.html for
+complete instructions. This file only contains a brief summary.
+
+ RUNNING THE UNIT TESTS
+
+If you would like to run the unit test suite, use a command like this:
+
+ PYTHONPATH=. trial buildbot.test
+
+This should run up to 221 tests, depending upon what VC tools you have
+installed. On my desktop machine it takes about six minutes to complete.
+Nothing should fail (at least under unix), a few might be skipped. If any of
+the tests fail, you should stop and investigate the cause before continuing
+the installation process, as it will probably be easier to track down the bug
+early. There are a few known failures under windows and OS-X, but please
+report these to the mailing list so we can isolate and resolve them.
+
+Neither CVS nor SVN support file based repositories on network filesystem
+(or network drives in Windows parlance). Therefore it is recommended to run
+all unit tests on local hard disks.
+
+ INSTALLING THE LIBRARIES:
+
+The first step is to install the python libraries. This package uses the
+standard 'distutils' module, so installing them is usually a matter of
+doing something like:
+
+ python ./setup.py install
+
+To test this, shift to a different directory (like /tmp), and run:
+
+ buildbot --version
+
+If it announces the versions of Buildbot and Twisted, the install went ok.
+
+
+ SETTING UP A BUILD SLAVE:
+
+If you want to run a build slave, you need to obtain the following pieces of
+information from the administrator of the buildmaster you intend to connect
+to:
+
+ your buildslave's name
+ the password assigned to your buildslave
+ the hostname and port number of the buildmaster, i.e. example.com:8007
+
+You also need to pick a working directory for the buildslave. All commands
+will be run inside this directory.
+
+Now run the 'buildbot' command as follows:
+
+ buildbot create-slave WORKDIR MASTERHOST:PORT SLAVENAME PASSWORD
+
+This will create a file called "buildbot.tac", which bundles up all the state
+needed by the build slave application. Twisted has a tool called "twistd"
+which knows how to load these saved applications and start running them.
+twistd takes care of logging and daemonization (running the program in the
+background). /usr/bin/buildbot is a front end which runs twistd for you.
+
+Once you've set up the directory with the .tac file, you start it running
+like this:
+
+ buildbot start WORKDIR
+
+This will start the build slave in the background and finish, so you don't
+need to put it in the background yourself with "&". The process ID of the
+background task is written to a file called "twistd.pid", and all output from
+the program is written to a log file named "twistd.log". Look in twistd.log
+to make sure the buildslave has started.
+
+To shut down the build slave, use:
+
+ buildbot stop WORKDIR
+
+
+ RUNNING BEHIND A NAT BOX:
+
+Some network environments will not properly maintain a TCP connection that
+appears to be idle. NAT boxes which do some form of connection tracking may
+drop the port mapping if it looks like the TCP session has been idle for too
+long. The buildslave attempts to turn on TCP "keepalives" (supported by
+Twisted 1.0.6 and later), and if these cannot be activated, it uses
+application level keepalives (which send a dummy message to the build master
+on a periodic basis). The TCP keepalive is typically sent at intervals of
+about 2 hours, and is configurable through the kernel. The application-level
+keepalive defaults to running once every 10 minutes.
+
+To manually turn on application-level keepalives, or to set them to use some
+other interval, add "--keepalive NNN" to the 'buildbot slave' command line.
+NNN is the number of seconds between keepalives. Use as large a value as your
+NAT box allows to reduce the amount of unnecessary traffic on the wire. 600
+seconds (10 minutes) is a reasonable value.
+
+
+ SETTING UP A BUILD MASTER:
+
+Please read the user's manual for instructions. The short form is that you
+use 'buildbot create-master MASTERDIR' to create the base directory, then you
+edit the 'master.cfg' file to configure the buildmaster. Once this is ready,
+you use 'buildbot start MASTERDIR' to launch it.
+
+A sample configuration file will be created for you in WORKDIR/master.cfg .
+There are more examples in docs/examples/, and plenty of documentation in the
+user's manual. Everything is controlled by the config file.
+
+
+SUPPORT:
+
+ Please send questions, bugs, patches, etc, to the buildbot-devel mailing
+ list reachable through http://buildbot.net/, so that everyone can see them.
+
+
+COPYING:
+
+ Buildbot is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, version 2.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+ Public License for more details.
+
+ For full details, please see the file named COPYING in the top directory
+ of the source tree. You should have received a copy of the GNU General
+ Public License along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+
diff --git a/buildbot/README.w32 b/buildbot/README.w32
new file mode 100644
index 0000000..de54c97
--- /dev/null
+++ b/buildbot/README.w32
@@ -0,0 +1,95 @@
+Several users have reported success in running a buildslave under Windows.
+The following list of steps might help you accomplish the same. They are a
+list of what I did as a unix guy struggling to make a winXP box run the
+buildbot unit tests. When I was done, most of the unit tests passed.
+
+If you discover things that are missing or incorrect, please send your
+corrections to the buildbot-devel mailing list (archives and subscription
+information are available at http://buildbot.sourceforge.net).
+
+Many thanks to Mike "Bear" Taylor for developing this list.
+
+
+0. Check to make sure your PATHEXT environment variable has ";.PY" in
+it -- if not set your global environment to include it.
+
+ Control Panels / System / Advanced / Environment Variables / System variables
+
+1. Install python -- 2.4 -- http://python.org
+ * run win32 installer - no special options needed so far
+
+2. install zope interface package -- 3.0.1final --
+http://www.zope.org/Products/ZopeInterface
+ * run win32 installer - it should auto-detect your python 2.4
+ installation
+
+3. python for windows extensions -- build 203 --
+http://pywin32.sourceforge.net/
+ * run win32 installer - it should auto-detect your python 2.4
+ installation
+
+ the installer complains about a missing DLL. Download mfc71.dll from the
+ site mentioned in the warning
+ (http://starship.python.net/crew/mhammond/win32/) and move it into
+ c:\Python24\DLLs
+
+4. at this point, to preserve my own sanity, I grabbed cygwin.com's setup.exe
+ and started it. It behaves a lot like dselect. I installed bash and other
+ tools (but *not* python). I added C:\cygwin\bin to PATH, allowing me to
+ use tar, md5sum, cvs, all the usual stuff. I also installed emacs, going
+ from the notes at http://www.gnu.org/software/emacs/windows/ntemacs.html .
+ Their FAQ at http://www.gnu.org/software/emacs/windows/faq3.html#install
+ has a note on how to swap CapsLock and Control.
+
+ I also modified PATH (in the same place as PATHEXT) to include C:\Python24
+ and C:\Python24\Scripts . This will allow 'python' and (eventually) 'trial'
+ to work in a regular command shell.
+
+5. twisted -- 2.0 -- http://twistedmatrix.com/projects/core/
+ * unpack tarball and run
+ python setup.py install
+ Note: if you want to test your setup - run:
+ python c:\python24\Scripts\trial.py -o -R twisted
+ (the -o will format the output for console and the "-R twisted" will
+ recursively run all unit tests)
+
+ I had to edit Twisted (core)'s setup.py, to make detectExtensions() return
+ an empty list before running builder._compile_helper(). Apparently the test
+ it uses to detect if the (optional) C modules can be compiled causes the
+ install process to simply quit without actually installing anything.
+
+ I installed several packages: core, Lore, Mail, Web, and Words. They all got
+ copied to C:\Python24\Lib\site-packages\
+
+ At this point
+
+ trial --version
+
+ works, so 'trial -o -R twisted' will run the Twisted test suite. Note that
+ this is not necessarily setting PYTHONPATH, so it may be running the test
+ suite that was installed, not the one in the current directory.
+
+6. I used CVS to grab a copy of the latest Buildbot sources. To run the
+ tests, you must first add the buildbot directory to PYTHONPATH. Windows
+ does not appear to have a Bourne-shell-style syntax to set a variable just
+ for a single command, so you have to set it once and remember it will
+ affect all commands for the lifetime of that shell session.
+
+ set PYTHONPATH=.
+ trial -o -r win32 buildbot.test
+
+ To run against both buildbot-CVS and, say, Twisted-SVN, do:
+
+ set PYTHONPATH=.;C:\path to\Twisted-SVN
+
+
+All commands are done using the normal cmd.exe command shell. As of
+buildbot-0.6.4, only one unit test fails (test_webPathname_port) when you run
+under the 'win32' reactor. (if you run under the default reactor, many of the
+child-process-spawning commands fail, but test_webPathname_port passes. go
+figure.)
+
+Actually setting up a buildslave is not yet covered by this document. Patches
+gladly accepted.
+
+ -Brian
diff --git a/buildbot/bin/buildbot b/buildbot/bin/buildbot
new file mode 100755
index 0000000..7295b00
--- /dev/null
+++ b/buildbot/bin/buildbot
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+
+from buildbot.scripts import runner
+runner.run()
diff --git a/buildbot/buildbot.egg-info/PKG-INFO b/buildbot/buildbot.egg-info/PKG-INFO
new file mode 100644
index 0000000..99384a2
--- /dev/null
+++ b/buildbot/buildbot.egg-info/PKG-INFO
@@ -0,0 +1,30 @@
+Metadata-Version: 1.0
+Name: buildbot
+Version: 0.7.10p1
+Summary: BuildBot build automation system
+Home-page: http://buildbot.net/
+Author: Brian Warner
+Author-email: warner-buildbot@lothar.com
+License: GNU GPL
+Description:
+ The BuildBot is a system to automate the compile/test cycle required by
+ most software projects to validate code changes. By automatically
+ rebuilding and testing the tree each time something has changed, build
+ problems are pinpointed quickly, before other developers are
+ inconvenienced by the failure. The guilty developer can be identified
+ and harassed without human intervention. By running the builds on a
+ variety of platforms, developers who do not have the facilities to test
+ their changes everywhere before checkin will at least know shortly
+ afterwards whether they have broken the build or not. Warning counts,
+ lint checks, image size, compile time, and other build parameters can
+ be tracked over time, are more visible, and are therefore easier to
+ improve.
+
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: No Input/Output (Daemon)
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License (GPL)
+Classifier: Topic :: Software Development :: Build Tools
+Classifier: Topic :: Software Development :: Testing
diff --git a/buildbot/buildbot.egg-info/SOURCES.txt b/buildbot/buildbot.egg-info/SOURCES.txt
new file mode 100644
index 0000000..300a2da
--- /dev/null
+++ b/buildbot/buildbot.egg-info/SOURCES.txt
@@ -0,0 +1,215 @@
+COPYING
+CREDITS
+MANIFEST.in
+NEWS
+README
+README.w32
+setup.py
+bin/buildbot
+buildbot/__init__.py
+buildbot/buildbot.png
+buildbot/buildset.py
+buildbot/buildslave.py
+buildbot/dnotify.py
+buildbot/ec2buildslave.py
+buildbot/interfaces.py
+buildbot/locks.py
+buildbot/manhole.py
+buildbot/master.py
+buildbot/pbutil.py
+buildbot/scheduler.py
+buildbot/sourcestamp.py
+buildbot/util.py
+buildbot.egg-info/PKG-INFO
+buildbot.egg-info/SOURCES.txt
+buildbot.egg-info/dependency_links.txt
+buildbot.egg-info/requires.txt
+buildbot.egg-info/top_level.txt
+buildbot/changes/__init__.py
+buildbot/changes/base.py
+buildbot/changes/bonsaipoller.py
+buildbot/changes/changes.py
+buildbot/changes/dnotify.py
+buildbot/changes/freshcvs.py
+buildbot/changes/hgbuildbot.py
+buildbot/changes/mail.py
+buildbot/changes/maildir.py
+buildbot/changes/monotone.py
+buildbot/changes/p4poller.py
+buildbot/changes/pb.py
+buildbot/changes/svnpoller.py
+buildbot/clients/__init__.py
+buildbot/clients/base.py
+buildbot/clients/debug.glade
+buildbot/clients/debug.py
+buildbot/clients/gtkPanes.py
+buildbot/clients/sendchange.py
+buildbot/process/__init__.py
+buildbot/process/base.py
+buildbot/process/builder.py
+buildbot/process/buildstep.py
+buildbot/process/factory.py
+buildbot/process/process_twisted.py
+buildbot/process/properties.py
+buildbot/process/step_twisted2.py
+buildbot/scripts/__init__.py
+buildbot/scripts/checkconfig.py
+buildbot/scripts/logwatcher.py
+buildbot/scripts/reconfig.py
+buildbot/scripts/runner.py
+buildbot/scripts/sample.cfg
+buildbot/scripts/startup.py
+buildbot/scripts/tryclient.py
+buildbot/slave/__init__.py
+buildbot/slave/bot.py
+buildbot/slave/commands.py
+buildbot/slave/interfaces.py
+buildbot/slave/registry.py
+buildbot/status/__init__.py
+buildbot/status/base.py
+buildbot/status/builder.py
+buildbot/status/client.py
+buildbot/status/html.py
+buildbot/status/mail.py
+buildbot/status/progress.py
+buildbot/status/tests.py
+buildbot/status/tinderbox.py
+buildbot/status/words.py
+buildbot/status/web/__init__.py
+buildbot/status/web/about.py
+buildbot/status/web/base.py
+buildbot/status/web/baseweb.py
+buildbot/status/web/build.py
+buildbot/status/web/builder.py
+buildbot/status/web/changes.py
+buildbot/status/web/classic.css
+buildbot/status/web/feeds.py
+buildbot/status/web/grid.py
+buildbot/status/web/index.html
+buildbot/status/web/logs.py
+buildbot/status/web/robots.txt
+buildbot/status/web/slaves.py
+buildbot/status/web/step.py
+buildbot/status/web/tests.py
+buildbot/status/web/waterfall.py
+buildbot/status/web/xmlrpc.py
+buildbot/steps/__init__.py
+buildbot/steps/dummy.py
+buildbot/steps/master.py
+buildbot/steps/maxq.py
+buildbot/steps/python.py
+buildbot/steps/python_twisted.py
+buildbot/steps/shell.py
+buildbot/steps/source.py
+buildbot/steps/transfer.py
+buildbot/steps/trigger.py
+buildbot/steps/package/__init__.py
+buildbot/steps/package/rpm/__init__.py
+buildbot/steps/package/rpm/rpmbuild.py
+buildbot/steps/package/rpm/rpmlint.py
+buildbot/steps/package/rpm/rpmspec.py
+buildbot/test/__init__.py
+buildbot/test/emit.py
+buildbot/test/emitlogs.py
+buildbot/test/runutils.py
+buildbot/test/sleep.py
+buildbot/test/test__versions.py
+buildbot/test/test_bonsaipoller.py
+buildbot/test/test_buildreq.py
+buildbot/test/test_buildstep.py
+buildbot/test/test_changes.py
+buildbot/test/test_config.py
+buildbot/test/test_control.py
+buildbot/test/test_dependencies.py
+buildbot/test/test_ec2buildslave.py
+buildbot/test/test_limitlogs.py
+buildbot/test/test_locks.py
+buildbot/test/test_maildir.py
+buildbot/test/test_mailparse.py
+buildbot/test/test_mergerequests.py
+buildbot/test/test_p4poller.py
+buildbot/test/test_package_rpm.py
+buildbot/test/test_properties.py
+buildbot/test/test_reconfig.py
+buildbot/test/test_run.py
+buildbot/test/test_runner.py
+buildbot/test/test_scheduler.py
+buildbot/test/test_shell.py
+buildbot/test/test_slavecommand.py
+buildbot/test/test_slaves.py
+buildbot/test/test_status.py
+buildbot/test/test_steps.py
+buildbot/test/test_svnpoller.py
+buildbot/test/test_transfer.py
+buildbot/test/test_twisted.py
+buildbot/test/test_util.py
+buildbot/test/test_vc.py
+buildbot/test/test_web.py
+buildbot/test/test_webparts.py
+buildbot/test/mail/freshcvs.1
+buildbot/test/mail/freshcvs.2
+buildbot/test/mail/freshcvs.3
+buildbot/test/mail/freshcvs.4
+buildbot/test/mail/freshcvs.5
+buildbot/test/mail/freshcvs.6
+buildbot/test/mail/freshcvs.7
+buildbot/test/mail/freshcvs.8
+buildbot/test/mail/freshcvs.9
+buildbot/test/mail/svn-commit.1
+buildbot/test/mail/svn-commit.2
+buildbot/test/mail/syncmail.1
+buildbot/test/mail/syncmail.2
+buildbot/test/mail/syncmail.3
+buildbot/test/mail/syncmail.4
+buildbot/test/mail/syncmail.5
+buildbot/test/subdir/emit.py
+contrib/README.txt
+contrib/arch_buildbot.py
+contrib/bb_applet.py
+contrib/bzr_buildbot.py
+contrib/darcs_buildbot.py
+contrib/fakechange.py
+contrib/generate_changelog.py
+contrib/git_buildbot.py
+contrib/hg_buildbot.py
+contrib/run_maxq.py
+contrib/svn_buildbot.py
+contrib/svn_watcher.py
+contrib/svnpoller.py
+contrib/viewcvspoll.py
+contrib/CSS/sample1.css
+contrib/CSS/sample2.css
+contrib/OS-X/README
+contrib/OS-X/net.sourceforge.buildbot.master.plist
+contrib/OS-X/net.sourceforge.buildbot.slave.plist
+contrib/windows/buildbot.bat
+contrib/windows/buildbot2.bat
+contrib/windows/buildbot_service.py
+contrib/windows/setup.py
+docs/buildbot.html
+docs/buildbot.info
+docs/buildbot.info-1
+docs/buildbot.info-2
+docs/buildbot.texinfo
+docs/epyrun
+docs/gen-reference
+docs/hexnut32.png
+docs/hexnut48.png
+docs/hexnut64.png
+docs/examples/hello.cfg
+docs/examples/twisted_master.cfg
+docs/images/master.png
+docs/images/master.svg
+docs/images/master.txt
+docs/images/overview.png
+docs/images/overview.svg
+docs/images/overview.txt
+docs/images/slavebuilder.png
+docs/images/slavebuilder.svg
+docs/images/slavebuilder.txt
+docs/images/slaves.png
+docs/images/slaves.svg
+docs/images/slaves.txt
+docs/images/status.png
+docs/images/status.svg
+docs/images/status.txt \ No newline at end of file
diff --git a/buildbot/buildbot.egg-info/dependency_links.txt b/buildbot/buildbot.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/buildbot/buildbot.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/buildbot/buildbot.egg-info/requires.txt b/buildbot/buildbot.egg-info/requires.txt
new file mode 100644
index 0000000..de1910f
--- /dev/null
+++ b/buildbot/buildbot.egg-info/requires.txt
@@ -0,0 +1 @@
+twisted >= 2.0.0 \ No newline at end of file
diff --git a/buildbot/buildbot.egg-info/top_level.txt b/buildbot/buildbot.egg-info/top_level.txt
new file mode 100644
index 0000000..8683f0a
--- /dev/null
+++ b/buildbot/buildbot.egg-info/top_level.txt
@@ -0,0 +1 @@
+buildbot
diff --git a/buildbot/buildbot/__init__.py b/buildbot/buildbot/__init__.py
new file mode 100644
index 0000000..b691f8b
--- /dev/null
+++ b/buildbot/buildbot/__init__.py
@@ -0,0 +1 @@
+version = "0.7.10p1"
diff --git a/buildbot/buildbot/buildbot.png b/buildbot/buildbot/buildbot.png
new file mode 100644
index 0000000..387ba15
--- /dev/null
+++ b/buildbot/buildbot/buildbot.png
Binary files differ
diff --git a/buildbot/buildbot/buildset.py b/buildbot/buildbot/buildset.py
new file mode 100644
index 0000000..fe59f74
--- /dev/null
+++ b/buildbot/buildbot/buildset.py
@@ -0,0 +1,81 @@
+from buildbot.process import base
+from buildbot.status import builder
+from buildbot.process.properties import Properties
+
+
+class BuildSet:
+ """I represent a set of potential Builds, all of the same source tree,
+ across a specified list of Builders. I can represent a build of a
+ specific version of the source tree (named by source.branch and
+ source.revision), or a build of a certain set of Changes
+ (source.changes=list)."""
+
+ def __init__(self, builderNames, source, reason=None, bsid=None,
+ properties=None):
+ """
+ @param source: a L{buildbot.sourcestamp.SourceStamp}
+ """
+ self.builderNames = builderNames
+ self.source = source
+ self.reason = reason
+
+ self.properties = Properties()
+ if properties: self.properties.updateFromProperties(properties)
+
+ self.stillHopeful = True
+ self.status = bss = builder.BuildSetStatus(source, reason,
+ builderNames, bsid)
+
+ def waitUntilSuccess(self):
+ return self.status.waitUntilSuccess()
+ def waitUntilFinished(self):
+ return self.status.waitUntilFinished()
+
+ def start(self, builders):
+ """This is called by the BuildMaster to actually create and submit
+ the BuildRequests."""
+ self.requests = []
+ reqs = []
+
+ # create the requests
+ for b in builders:
+ req = base.BuildRequest(self.reason, self.source, b.name,
+ properties=self.properties)
+ reqs.append((b, req))
+ self.requests.append(req)
+ d = req.waitUntilFinished()
+ d.addCallback(self.requestFinished, req)
+
+ # tell our status about them
+ req_statuses = [req.status for req in self.requests]
+ self.status.setBuildRequestStatuses(req_statuses)
+
+ # now submit them
+ for b,req in reqs:
+ b.submitBuildRequest(req)
+
+ def requestFinished(self, buildstatus, req):
+ # TODO: this is where individual build status results are aggregated
+ # into a BuildSet-wide status. Consider making a rule that says one
+ # WARNINGS results in the overall status being WARNINGS too. The
+ # current rule is that any FAILURE means FAILURE, otherwise you get
+ # SUCCESS.
+ self.requests.remove(req)
+ results = buildstatus.getResults()
+ if results == builder.FAILURE:
+ self.status.setResults(results)
+ if self.stillHopeful:
+ # oh, cruel reality cuts deep. no joy for you. This is the
+ # first failure. This flunks the overall BuildSet, so we can
+ # notify success watchers that they aren't going to be happy.
+ self.stillHopeful = False
+ self.status.giveUpHope()
+ self.status.notifySuccessWatchers()
+ if not self.requests:
+ # that was the last build, so we can notify finished watchers. If
+ # we haven't failed by now, we can claim success.
+ if self.stillHopeful:
+ self.status.setResults(builder.SUCCESS)
+ self.status.notifySuccessWatchers()
+ self.status.notifyFinishedWatchers()
+
diff --git a/buildbot/buildbot/buildslave.py b/buildbot/buildbot/buildslave.py
new file mode 100644
index 0000000..bd41813
--- /dev/null
+++ b/buildbot/buildbot/buildslave.py
@@ -0,0 +1,688 @@
+# Portions copyright Canonical Ltd. 2009
+
+import time
+from email.Message import Message
+from email.Utils import formatdate
+from zope.interface import implements
+from twisted.python import log
+from twisted.internet import defer, reactor
+from twisted.application import service
+import twisted.spread.pb
+
+from buildbot.pbutil import NewCredPerspective
+from buildbot.status.builder import SlaveStatus
+from buildbot.status.mail import MailNotifier
+from buildbot.interfaces import IBuildSlave, ILatentBuildSlave
+from buildbot.process.properties import Properties
+
+
+class AbstractBuildSlave(NewCredPerspective, service.MultiService):
+ """This is the master-side representative for a remote buildbot slave.
+ There is exactly one for each slave described in the config file (the
+ c['slaves'] list). When buildbots connect in (.attach), they get a
+ reference to this instance. The BotMaster object is stashed as the
+ .botmaster attribute. The BotMaster is also our '.parent' Service.
+
+ I represent a build slave -- a remote machine capable of
+ running builds. I am instantiated by the configuration file, and can be
+ subclassed to add extra functionality."""
+
+ implements(IBuildSlave)
+
+ def __init__(self, name, password, max_builds=None,
+ notify_on_missing=[], missing_timeout=3600,
+ properties={}):
+ """
+ @param name: botname this machine will supply when it connects
+ @param password: password this machine will supply when
+ it connects
+ @param max_builds: maximum number of simultaneous builds that will
+ be run concurrently on this buildslave (the
+ default is None for no limit)
+ @param properties: properties that will be applied to builds run on
+ this slave
+ @type properties: dictionary
+ """
+ service.MultiService.__init__(self)
+ self.slavename = name
+ self.password = password
+ self.botmaster = None # no buildmaster yet
+ self.slave_status = SlaveStatus(name)
+ self.slave = None # a RemoteReference to the Bot, when connected
+ self.slave_commands = None
+ self.slavebuilders = {}
+ self.max_builds = max_builds
+
+ self.properties = Properties()
+ self.properties.update(properties, "BuildSlave")
+ self.properties.setProperty("slavename", name, "BuildSlave")
+
+ self.lastMessageReceived = 0
+ if isinstance(notify_on_missing, str):
+ notify_on_missing = [notify_on_missing]
+ self.notify_on_missing = notify_on_missing
+ for i in notify_on_missing:
+ assert isinstance(i, str)
+ self.missing_timeout = missing_timeout
+ self.missing_timer = None
+
+ def update(self, new):
+ """
+ Given a new BuildSlave, configure this one identically. Because
+ BuildSlave objects are remotely referenced, we can't replace them
+ without disconnecting the slave, yet there's no reason to do that.
+ """
+ # the reconfiguration logic should guarantee this:
+ assert self.slavename == new.slavename
+ assert self.password == new.password
+ assert self.__class__ == new.__class__
+ self.max_builds = new.max_builds
+
+ def __repr__(self):
+ if self.botmaster:
+ builders = self.botmaster.getBuildersForSlave(self.slavename)
+ return "<%s '%s', current builders: %s>" % \
+ (self.__class__.__name__, self.slavename,
+ ','.join(map(lambda b: b.name, builders)))
+ else:
+ return "<%s '%s', (no builders yet)>" % \
+ (self.__class__.__name__, self.slavename)
+
+ def setBotmaster(self, botmaster):
+ assert not self.botmaster, "BuildSlave already has a botmaster"
+ self.botmaster = botmaster
+ self.startMissingTimer()
+
+ def stopMissingTimer(self):
+ if self.missing_timer:
+ self.missing_timer.cancel()
+ self.missing_timer = None
+
+ def startMissingTimer(self):
+ if self.notify_on_missing and self.missing_timeout and self.parent:
+ self.stopMissingTimer() # in case it's already running
+ self.missing_timer = reactor.callLater(self.missing_timeout,
+ self._missing_timer_fired)
+
+ def _missing_timer_fired(self):
+ self.missing_timer = None
+ # notify people, but only if we're still in the config
+ if not self.parent:
+ return
+
+ buildmaster = self.botmaster.parent
+ status = buildmaster.getStatus()
+ text = "The Buildbot working for '%s'\n" % status.getProjectName()
+ text += ("has noticed that the buildslave named %s went away\n" %
+ self.slavename)
+ text += "\n"
+ text += ("It last disconnected at %s (buildmaster-local time)\n" %
+ time.ctime(time.time() - self.missing_timeout)) # approx
+ text += "\n"
+ text += "The admin on record (as reported by BUILDSLAVE:info/admin)\n"
+ text += "was '%s'.\n" % self.slave_status.getAdmin()
+ text += "\n"
+ text += "Sincerely,\n"
+ text += " The Buildbot\n"
+ text += " %s\n" % status.getProjectURL()
+ subject = "Buildbot: buildslave %s was lost" % self.slavename
+ return self._mail_missing_message(subject, text)
+
+
+ def updateSlave(self):
+ """Called to add or remove builders after the slave has connected.
+
+ @return: a Deferred that indicates when an attached slave has
+ accepted the new builders and/or released the old ones."""
+ if self.slave:
+ return self.sendBuilderList()
+ else:
+ return defer.succeed(None)
+
+ def updateSlaveStatus(self, buildStarted=None, buildFinished=None):
+ if buildStarted:
+ self.slave_status.buildStarted(buildStarted)
+ if buildFinished:
+ self.slave_status.buildFinished(buildFinished)
+
+ def attached(self, bot):
+ """This is called when the slave connects.
+
+ @return: a Deferred that fires with a suitable pb.IPerspective to
+ give to the slave (i.e. 'self')"""
+
+ if self.slave:
+ # uh-oh, we've got a duplicate slave. The most likely
+ # explanation is that the slave is behind a slow link, thinks we
+ # went away, and has attempted to reconnect, so we've got two
+ # "connections" from the same slave, but the previous one is
+ # stale. Give the new one precedence.
+ log.msg("duplicate slave %s replacing old one" % self.slavename)
+
+ # just in case we've got two identically-configured slaves,
+ # report the IP addresses of both so someone can resolve the
+ # squabble
+ tport = self.slave.broker.transport
+ log.msg("old slave was connected from", tport.getPeer())
+ log.msg("new slave is from", bot.broker.transport.getPeer())
+ d = self.disconnect()
+ else:
+ d = defer.succeed(None)
+ # now we go through a sequence of calls, gathering information, then
+ # tell the Botmaster that it can finally give this slave to all the
+ # Builders that care about it.
+
+ # we accumulate slave information in this 'state' dictionary, then
+ # set it atomically if we make it far enough through the process
+ state = {}
+
+ # Reset graceful shutdown status
+ self.slave_status.setGraceful(False)
+ # We want to know when the graceful shutdown flag changes
+ self.slave_status.addGracefulWatcher(self._gracefulChanged)
+
+ def _log_attachment_on_slave(res):
+ d1 = bot.callRemote("print", "attached")
+ d1.addErrback(lambda why: None)
+ return d1
+ d.addCallback(_log_attachment_on_slave)
+
+ def _get_info(res):
+ d1 = bot.callRemote("getSlaveInfo")
+ def _got_info(info):
+ log.msg("Got slaveinfo from '%s'" % self.slavename)
+ # TODO: info{} might have other keys
+ state["admin"] = info.get("admin")
+ state["host"] = info.get("host")
+ def _info_unavailable(why):
+ # maybe an old slave, doesn't implement remote_getSlaveInfo
+ log.msg("BuildSlave.info_unavailable")
+ log.err(why)
+ d1.addCallbacks(_got_info, _info_unavailable)
+ return d1
+ d.addCallback(_get_info)
+
+ def _get_commands(res):
+ d1 = bot.callRemote("getCommands")
+ def _got_commands(commands):
+ state["slave_commands"] = commands
+ def _commands_unavailable(why):
+ # probably an old slave
+ log.msg("BuildSlave._commands_unavailable")
+ if why.check(AttributeError):
+ return
+ log.err(why)
+ d1.addCallbacks(_got_commands, _commands_unavailable)
+ return d1
+ d.addCallback(_get_commands)
+
+ def _accept_slave(res):
+ self.slave_status.setAdmin(state.get("admin"))
+ self.slave_status.setHost(state.get("host"))
+ self.slave_status.setConnected(True)
+ self.slave_commands = state.get("slave_commands")
+ self.slave = bot
+ log.msg("bot attached")
+ self.messageReceivedFromSlave()
+ self.stopMissingTimer()
+
+ return self.updateSlave()
+ d.addCallback(_accept_slave)
+
+ # Finally, the slave gets a reference to this BuildSlave. They
+ # receive this later, after we've started using them.
+ d.addCallback(lambda res: self)
+ return d
+
+ def messageReceivedFromSlave(self):
+ now = time.time()
+ self.lastMessageReceived = now
+ self.slave_status.setLastMessageReceived(now)
+
+ def detached(self, mind):
+ self.slave = None
+ self.slave_status.removeGracefulWatcher(self._gracefulChanged)
+ self.slave_status.setConnected(False)
+ log.msg("BuildSlave.detached(%s)" % self.slavename)
+
+ def disconnect(self):
+ """Forcibly disconnect the slave.
+
+ This severs the TCP connection and returns a Deferred that will fire
+ (with None) when the connection is probably gone.
+
+ If the slave is still alive, they will probably try to reconnect
+ again in a moment.
+
+ This is called in two circumstances. The first is when a slave is
+ removed from the config file. In this case, when they try to
+ reconnect, they will be rejected as an unknown slave. The second is
+ when we wind up with two connections for the same slave, in which
+ case we disconnect the older connection.
+ """
+
+ if not self.slave:
+ return defer.succeed(None)
+ log.msg("disconnecting old slave %s now" % self.slavename)
+ # When this Deferred fires, we'll be ready to accept the new slave
+ return self._disconnect(self.slave)
+
+ def _disconnect(self, slave):
+ # all kinds of teardown will happen as a result of
+ # loseConnection(), but it happens after a reactor iteration or
+ # two. Hook the actual disconnect so we can know when it is safe
+ # to connect the new slave. We have to wait one additional
+ # iteration (with callLater(0)) to make sure the *other*
+ # notifyOnDisconnect handlers have had a chance to run.
+ d = defer.Deferred()
+
+ # notifyOnDisconnect runs the callback with one argument, the
+ # RemoteReference being disconnected.
+ def _disconnected(rref):
+ reactor.callLater(0, d.callback, None)
+ slave.notifyOnDisconnect(_disconnected)
+ tport = slave.broker.transport
+ # this is the polite way to request that a socket be closed
+ tport.loseConnection()
+ try:
+ # but really we don't want to wait for the transmit queue to
+ # drain. The remote end is unlikely to ACK the data, so we'd
+ # probably have to wait for a (20-minute) TCP timeout.
+ #tport._closeSocket()
+ # however, doing _closeSocket (whether before or after
+ # loseConnection) somehow prevents the notifyOnDisconnect
+ # handlers from being run. Bummer.
+ tport.offset = 0
+ tport.dataBuffer = ""
+ except:
+ # however, these hacks are pretty internal, so don't blow up if
+ # they fail or are unavailable
+ log.msg("failed to accelerate the shutdown process")
+ pass
+ log.msg("waiting for slave to finish disconnecting")
+
+ return d
+
+ def sendBuilderList(self):
+ our_builders = self.botmaster.getBuildersForSlave(self.slavename)
+ blist = [(b.name, b.builddir) for b in our_builders]
+ d = self.slave.callRemote("setBuilderList", blist)
+ return d
+
+ def perspective_keepalive(self):
+ pass
+
+ def addSlaveBuilder(self, sb):
+ if sb.builder_name not in self.slavebuilders:
+ log.msg("%s adding %s" % (self, sb))
+ elif sb is not self.slavebuilders[sb.builder_name]:
+ log.msg("%s replacing %s" % (self, sb))
+ else:
+ return
+ self.slavebuilders[sb.builder_name] = sb
+
+ def removeSlaveBuilder(self, sb):
+ try:
+ del self.slavebuilders[sb.builder_name]
+ except KeyError:
+ pass
+ else:
+ log.msg("%s removed %s" % (self, sb))
+
+ def canStartBuild(self):
+ """
+ I am called when a build is requested to see if this buildslave
+ can start a build. This function can be used to limit overall
+ concurrency on the buildslave.
+ """
+ # If we're waiting to shutdown gracefully, then we shouldn't
+ # accept any new jobs.
+ if self.slave_status.getGraceful():
+ return False
+
+ if self.max_builds:
+ active_builders = [sb for sb in self.slavebuilders.values()
+ if sb.isBusy()]
+ if len(active_builders) >= self.max_builds:
+ return False
+ return True
+
+ def _mail_missing_message(self, subject, text):
+ # first, see if we have a MailNotifier we can use. This gives us a
+ # fromaddr and a relayhost.
+ buildmaster = self.botmaster.parent
+ for st in buildmaster.statusTargets:
+ if isinstance(st, MailNotifier):
+ break
+ else:
+ # if not, they get a default MailNotifier, which always uses SMTP
+ # to localhost and uses a dummy fromaddr of "buildbot".
+ log.msg("buildslave-missing msg using default MailNotifier")
+ st = MailNotifier("buildbot")
+ # now construct the mail
+
+ m = Message()
+ m.set_payload(text)
+ m['Date'] = formatdate(localtime=True)
+ m['Subject'] = subject
+ m['From'] = st.fromaddr
+ recipients = self.notify_on_missing
+ m['To'] = ", ".join(recipients)
+ d = st.sendMessage(m, recipients)
+ # return the Deferred for testing purposes
+ return d
+
+ def _gracefulChanged(self, graceful):
+ """This is called when our graceful shutdown setting changes"""
+ if graceful:
+ active_builders = [sb for sb in self.slavebuilders.values()
+ if sb.isBusy()]
+ if len(active_builders) == 0:
+ # Shut down!
+ self.shutdown()
+
+ def shutdown(self):
+ """Shutdown the slave"""
+ # Look for a builder with a remote reference to the client side
+ # slave. If we can find one, then call "shutdown" on the remote
+ # builder, which will cause the slave buildbot process to exit.
+ d = None
+ for b in self.slavebuilders.values():
+ if b.remote:
+ d = b.remote.callRemote("shutdown")
+ break
+
+ if d:
+ log.msg("Shutting down slave: %s" % self.slavename)
+ # The remote shutdown call will not complete successfully since the
+ # buildbot process exits almost immediately after getting the
+ # shutdown request.
+ # Here we look at the reason why the remote call failed, and if
+ # it's because the connection was lost, that means the slave
+ # shutdown as expected.
+ def _errback(why):
+ if why.check(twisted.spread.pb.PBConnectionLost):
+ log.msg("Lost connection to %s" % self.slavename)
+ else:
+ log.err("Unexpected error when trying to shutdown %s" % self.slavename)
+ d.addErrback(_errback)
+ return d
+ log.err("Couldn't find remote builder to shut down slave")
+ return defer.succeed(None)
+
+class BuildSlave(AbstractBuildSlave):
+
+ def sendBuilderList(self):
+ d = AbstractBuildSlave.sendBuilderList(self)
+ def _sent(slist):
+ dl = []
+ for name, remote in slist.items():
+ # use get() since we might have changed our mind since then
+ b = self.botmaster.builders.get(name)
+ if b:
+ d1 = b.attached(self, remote, self.slave_commands)
+ dl.append(d1)
+ return defer.DeferredList(dl)
+ def _set_failed(why):
+ log.msg("BuildSlave.sendBuilderList (%s) failed" % self)
+ log.err(why)
+ # TODO: hang up on them?, without setBuilderList we can't use
+ # them
+ d.addCallbacks(_sent, _set_failed)
+ return d
+
+ def detached(self, mind):
+ AbstractBuildSlave.detached(self, mind)
+ self.botmaster.slaveLost(self)
+ self.startMissingTimer()
+
+ def buildFinished(self, sb):
+ """This is called when a build on this slave is finished."""
+ # If we're gracefully shutting down, and we have no more active
+ # builders, then it's safe to disconnect
+ if self.slave_status.getGraceful():
+ active_builders = [sb for sb in self.slavebuilders.values()
+ if sb.isBusy()]
+ if len(active_builders) == 0:
+ # Shut down!
+ return self.shutdown()
+ return defer.succeed(None)
+
+class AbstractLatentBuildSlave(AbstractBuildSlave):
+ """A build slave that will start up a slave instance when needed.
+
+ To use, subclass and implement start_instance and stop_instance.
+
+ See ec2buildslave.py for a concrete example. Also see the stub example in
+ test/test_slaves.py.
+ """
+
+ implements(ILatentBuildSlave)
+
+ substantiated = False
+ substantiation_deferred = None
+ build_wait_timer = None
+ _start_result = _shutdown_callback_handle = None
+
+ def __init__(self, name, password, max_builds=None,
+ notify_on_missing=[], missing_timeout=60*20,
+ build_wait_timeout=60*10,
+ properties={}):
+ AbstractBuildSlave.__init__(
+ self, name, password, max_builds, notify_on_missing,
+ missing_timeout, properties)
+ self.building = set()
+ self.build_wait_timeout = build_wait_timeout
+
+ def start_instance(self):
+ # responsible for starting instance that will try to connect with
+ # this master. Should return deferred. Problems should use an
+ # errback.
+ raise NotImplementedError
+
+ def stop_instance(self, fast=False):
+ # responsible for shutting down instance.
+ raise NotImplementedError
+
+ def substantiate(self, sb):
+ if self.substantiated:
+ self._clearBuildWaitTimer()
+ self._setBuildWaitTimer()
+ return defer.succeed(self)
+ if self.substantiation_deferred is None:
+ if self.parent and not self.missing_timer:
+ # start timer. if timer times out, fail deferred
+ self.missing_timer = reactor.callLater(
+ self.missing_timeout,
+ self._substantiation_failed, defer.TimeoutError())
+ self.substantiation_deferred = defer.Deferred()
+ if self.slave is None:
+ self._substantiate() # start up instance
+ # else: we're waiting for an old one to detach. the _substantiate
+ # will be done in ``detached`` below.
+ return self.substantiation_deferred
+
+ def _substantiate(self):
+ # register event trigger
+ d = self.start_instance()
+ self._shutdown_callback_handle = reactor.addSystemEventTrigger(
+ 'before', 'shutdown', self._soft_disconnect, fast=True)
+ def stash_reply(result):
+ self._start_result = result
+ def clean_up(failure):
+ if self.missing_timer is not None:
+ self.missing_timer.cancel()
+ self._substantiation_failed(failure)
+ if self._shutdown_callback_handle is not None:
+ handle = self._shutdown_callback_handle
+ del self._shutdown_callback_handle
+ reactor.removeSystemEventTrigger(handle)
+ return failure
+ d.addCallbacks(stash_reply, clean_up)
+ return d
+
+ def attached(self, bot):
+ if self.substantiation_deferred is None:
+ log.msg('Slave %s received connection while not trying to '
+ 'substantiate. Disconnecting.' % (self.slavename,))
+ self._disconnect(bot)
+ return defer.fail()
+ return AbstractBuildSlave.attached(self, bot)
+
+ def detached(self, mind):
+ AbstractBuildSlave.detached(self, mind)
+ if self.substantiation_deferred is not None:
+ self._substantiate()
+
+ def _substantiation_failed(self, failure):
+ d = self.substantiation_deferred
+ self.substantiation_deferred = None
+ self.missing_timer = None
+ d.errback(failure)
+ self.insubstantiate()
+ # notify people, but only if we're still in the config
+ if not self.parent or not self.notify_on_missing:
+ return
+
+ status = buildmaster.getStatus()
+ text = "The Buildbot working for '%s'\n" % status.getProjectName()
+ text += ("has noticed that the latent buildslave named %s \n" %
+ self.slavename)
+ text += "never substantiated after a request\n"
+ text += "\n"
+ text += ("The request was made at %s (buildmaster-local time)\n" %
+ time.ctime(time.time() - self.missing_timeout)) # approx
+ text += "\n"
+ text += "Sincerely,\n"
+ text += " The Buildbot\n"
+ text += " %s\n" % status.getProjectURL()
+ subject = "Buildbot: buildslave %s never substantiated" % self.slavename
+ return self._mail_missing_message(subject, text)
+
+ def buildStarted(self, sb):
+ assert self.substantiated
+ self._clearBuildWaitTimer()
+ self.building.add(sb.builder_name)
+
+ def buildFinished(self, sb):
+ self.building.remove(sb.builder_name)
+ if not self.building:
+ self._setBuildWaitTimer()
+
+ def _clearBuildWaitTimer(self):
+ if self.build_wait_timer is not None:
+ if self.build_wait_timer.active():
+ self.build_wait_timer.cancel()
+ self.build_wait_timer = None
+
+ def _setBuildWaitTimer(self):
+ self._clearBuildWaitTimer()
+ self.build_wait_timer = reactor.callLater(
+ self.build_wait_timeout, self._soft_disconnect)
+
+ def insubstantiate(self, fast=False):
+ self._clearBuildWaitTimer()
+ d = self.stop_instance(fast)
+ if self._shutdown_callback_handle is not None:
+ handle = self._shutdown_callback_handle
+ del self._shutdown_callback_handle
+ reactor.removeSystemEventTrigger(handle)
+ self.substantiated = False
+ self.building.clear() # just to be sure
+ return d
+
+ def _soft_disconnect(self, fast=False):
+ d = AbstractBuildSlave.disconnect(self)
+ if self.slave is not None:
+ # this could be called when the slave needs to shut down, such as
+ # in BotMaster.removeSlave, *or* when a new slave requests a
+ # connection when we already have a slave. It's not clear what to
+ # do in the second case: this shouldn't happen, and if it
+ # does...if it's a latent slave, shutting down will probably kill
+ # something we want...but we can't know what the status is. So,
+ # here, we just do what should be appropriate for the first case,
+ # and put our heads in the sand for the second, at least for now.
+ # The best solution to the odd situation is removing it as a
+ # possibilty: make the master in charge of connecting to the
+ # slave, rather than vice versa. TODO.
+ d = defer.DeferredList([d, self.insubstantiate(fast)])
+ else:
+ if self.substantiation_deferred is not None:
+ # unlike the previous block, we don't expect this situation when
+ # ``attached`` calls ``disconnect``, only when we get a simple
+ # request to "go away".
+ self.substantiation_deferred.errback()
+ self.substantiation_deferred = None
+ if self.missing_timer:
+ self.missing_timer.cancel()
+ self.missing_timer = None
+ self.stop_instance()
+ return d
+
+ def disconnect(self):
+ d = self._soft_disconnect()
+ # this removes the slave from all builders. It won't come back
+ # without a restart (or maybe a sighup)
+ self.botmaster.slaveLost(self)
+
+ def stopService(self):
+ res = defer.maybeDeferred(AbstractBuildSlave.stopService, self)
+ if self.slave is not None:
+ d = self._soft_disconnect()
+ res = defer.DeferredList([res, d])
+ return res
+
+ def updateSlave(self):
+ """Called to add or remove builders after the slave has connected.
+
+ Also called after botmaster's builders are initially set.
+
+ @return: a Deferred that indicates when an attached slave has
+ accepted the new builders and/or released the old ones."""
+ for b in self.botmaster.getBuildersForSlave(self.slavename):
+ if b.name not in self.slavebuilders:
+ b.addLatentSlave(self)
+ return AbstractBuildSlave.updateSlave(self)
+
+ def sendBuilderList(self):
+ d = AbstractBuildSlave.sendBuilderList(self)
+ def _sent(slist):
+ dl = []
+ for name, remote in slist.items():
+ # use get() since we might have changed our mind since then.
+ # we're checking on the builder in addition to the
+ # slavebuilders out of a bit of paranoia.
+ b = self.botmaster.builders.get(name)
+ sb = self.slavebuilders.get(name)
+ if b and sb:
+ d1 = sb.attached(self, remote, self.slave_commands)
+ dl.append(d1)
+ return defer.DeferredList(dl)
+ def _set_failed(why):
+ log.msg("BuildSlave.sendBuilderList (%s) failed" % self)
+ log.err(why)
+ # TODO: hang up on them?, without setBuilderList we can't use
+ # them
+ if self.substantiation_deferred:
+ self.substantiation_deferred.errback()
+ self.substantiation_deferred = None
+ if self.missing_timer:
+ self.missing_timer.cancel()
+ self.missing_timer = None
+ # TODO: maybe log? send an email?
+ return why
+ d.addCallbacks(_sent, _set_failed)
+ def _substantiated(res):
+ self.substantiated = True
+ if self.substantiation_deferred:
+ d = self.substantiation_deferred
+ del self.substantiation_deferred
+ res = self._start_result
+ del self._start_result
+ d.callback(res)
+ # note that the missing_timer is already handled within
+ # ``attached``
+ if not self.building:
+ self._setBuildWaitTimer()
+ d.addCallback(_substantiated)
+ return d
diff --git a/buildbot/buildbot/changes/__init__.py b/buildbot/buildbot/changes/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildbot/buildbot/changes/__init__.py
diff --git a/buildbot/buildbot/changes/base.py b/buildbot/buildbot/changes/base.py
new file mode 100644
index 0000000..72c45bf
--- /dev/null
+++ b/buildbot/buildbot/changes/base.py
@@ -0,0 +1,10 @@
+
+from zope.interface import implements
+from twisted.application import service
+
+from buildbot.interfaces import IChangeSource
+from buildbot import util
+
+class ChangeSource(service.Service, util.ComparableMixin):
+ implements(IChangeSource)
+
diff --git a/buildbot/buildbot/changes/bonsaipoller.py b/buildbot/buildbot/changes/bonsaipoller.py
new file mode 100644
index 0000000..2e319bb
--- /dev/null
+++ b/buildbot/buildbot/changes/bonsaipoller.py
@@ -0,0 +1,320 @@
+import time
+from xml.dom import minidom
+
+from twisted.python import log, failure
+from twisted.internet import reactor
+from twisted.internet.task import LoopingCall
+from twisted.web.client import getPage
+
+from buildbot.changes import base, changes
+
+class InvalidResultError(Exception):
+ def __init__(self, value="InvalidResultError"):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class EmptyResult(Exception):
+ pass
+
+class NoMoreCiNodes(Exception):
+ pass
+
+class NoMoreFileNodes(Exception):
+ pass
+
+class BonsaiResult:
+ """I hold a list of CiNodes"""
+ def __init__(self, nodes=[]):
+ self.nodes = nodes
+
+ def __cmp__(self, other):
+ if len(self.nodes) != len(other.nodes):
+ return False
+ for i in range(len(self.nodes)):
+ if self.nodes[i].log != other.nodes[i].log \
+ or self.nodes[i].who != other.nodes[i].who \
+ or self.nodes[i].date != other.nodes[i].date \
+ or len(self.nodes[i].files) != len(other.nodes[i].files):
+ return -1
+
+ for j in range(len(self.nodes[i].files)):
+ if self.nodes[i].files[j].revision \
+ != other.nodes[i].files[j].revision \
+ or self.nodes[i].files[j].filename \
+ != other.nodes[i].files[j].filename:
+ return -1
+
+ return 0
+
+class CiNode:
+ """I hold information baout one <ci> node, including a list of files"""
+ def __init__(self, log="", who="", date=0, files=[]):
+ self.log = log
+ self.who = who
+ self.date = date
+ self.files = files
+
+class FileNode:
+ """I hold information about one <f> node"""
+ def __init__(self, revision="", filename=""):
+ self.revision = revision
+ self.filename = filename
+
+class BonsaiParser:
+ """I parse the XML result from a bonsai cvsquery."""
+
+ def __init__(self, data):
+ try:
+ # this is a fix for non-ascii characters
+ # because bonsai does not give us an encoding to work with
+ # it impossible to be 100% sure what to decode it as but latin1 covers
+ # the broadest base
+ data = data.decode("latin1")
+ data = data.encode("ascii", "replace")
+ self.dom = minidom.parseString(data)
+ log.msg(data)
+ except:
+ raise InvalidResultError("Malformed XML in result")
+
+ self.ciNodes = self.dom.getElementsByTagName("ci")
+ self.currentCiNode = None # filled in by _nextCiNode()
+ self.fileNodes = None # filled in by _nextCiNode()
+ self.currentFileNode = None # filled in by _nextFileNode()
+ self.bonsaiResult = self._parseData()
+
+ def getData(self):
+ return self.bonsaiResult
+
+ def _parseData(self):
+ """Returns data from a Bonsai cvsquery in a BonsaiResult object"""
+ nodes = []
+ try:
+ while self._nextCiNode():
+ files = []
+ try:
+ while self._nextFileNode():
+ files.append(FileNode(self._getRevision(),
+ self._getFilename()))
+ except NoMoreFileNodes:
+ pass
+ except InvalidResultError:
+ raise
+ cinode = CiNode(self._getLog(), self._getWho(),
+ self._getDate(), files)
+ # hack around bonsai xml output bug for empty check-in comments
+ if not cinode.log and nodes and \
+ not nodes[-1].log and \
+ cinode.who == nodes[-1].who and \
+ cinode.date == nodes[-1].date:
+ nodes[-1].files += cinode.files
+ else:
+ nodes.append(cinode)
+
+ except NoMoreCiNodes:
+ pass
+ except InvalidResultError, EmptyResult:
+ raise
+
+ return BonsaiResult(nodes)
+
+
+ def _nextCiNode(self):
+ """Iterates to the next <ci> node and fills self.fileNodes with
+ child <f> nodes"""
+ try:
+ self.currentCiNode = self.ciNodes.pop(0)
+ if len(self.currentCiNode.getElementsByTagName("files")) > 1:
+ raise InvalidResultError("Multiple <files> for one <ci>")
+
+ self.fileNodes = self.currentCiNode.getElementsByTagName("f")
+ except IndexError:
+ # if there was zero <ci> nodes in the result
+ if not self.currentCiNode:
+ raise EmptyResult
+ else:
+ raise NoMoreCiNodes
+
+ return True
+
+ def _nextFileNode(self):
+ """Iterates to the next <f> node"""
+ try:
+ self.currentFileNode = self.fileNodes.pop(0)
+ except IndexError:
+ raise NoMoreFileNodes
+
+ return True
+
+ def _getLog(self):
+ """Returns the log of the current <ci> node"""
+ logs = self.currentCiNode.getElementsByTagName("log")
+ if len(logs) < 1:
+ raise InvalidResultError("No log present")
+ elif len(logs) > 1:
+ raise InvalidResultError("Multiple logs present")
+
+ # catch empty check-in comments
+ if logs[0].firstChild:
+ return logs[0].firstChild.data
+ return ''
+
+ def _getWho(self):
+ """Returns the e-mail address of the commiter"""
+ # convert unicode string to regular string
+ return str(self.currentCiNode.getAttribute("who"))
+
+ def _getDate(self):
+ """Returns the date (unix time) of the commit"""
+ # convert unicode number to regular one
+ try:
+ commitDate = int(self.currentCiNode.getAttribute("date"))
+ except ValueError:
+ raise InvalidResultError
+
+ return commitDate
+
+ def _getFilename(self):
+ """Returns the filename of the current <f> node"""
+ try:
+ filename = self.currentFileNode.firstChild.data
+ except AttributeError:
+ raise InvalidResultError("Missing filename")
+
+ return filename
+
+ def _getRevision(self):
+ return self.currentFileNode.getAttribute("rev")
+
+
+class BonsaiPoller(base.ChangeSource):
+ """This source will poll a bonsai server for changes and submit
+ them to the change master."""
+
+ compare_attrs = ["bonsaiURL", "pollInterval", "tree",
+ "module", "branch", "cvsroot"]
+
+ parent = None # filled in when we're added
+ loop = None
+ volatile = ['loop']
+ working = False
+
+ def __init__(self, bonsaiURL, module, branch, tree="default",
+ cvsroot="/cvsroot", pollInterval=30):
+ """
+ @type bonsaiURL: string
+ @param bonsaiURL: The base URL of the Bonsai server
+ (ie. http://bonsai.mozilla.org)
+ @type module: string
+ @param module: The module to look for changes in. Commonly
+ this is 'all'
+ @type branch: string
+ @param branch: The branch to look for changes in. This must
+ match the
+ 'branch' option for the Scheduler.
+ @type tree: string
+ @param tree: The tree to look for changes in. Commonly this
+ is 'all'
+ @type cvsroot: string
+ @param cvsroot: The cvsroot of the repository. Usually this is
+ '/cvsroot'
+ @type pollInterval: int
+ @param pollInterval: The time (in seconds) between queries for
+ changes
+ """
+
+ self.bonsaiURL = bonsaiURL
+ self.module = module
+ self.branch = branch
+ self.tree = tree
+ self.cvsroot = cvsroot
+ self.pollInterval = pollInterval
+ self.lastChange = time.time()
+ self.lastPoll = time.time()
+
+ def startService(self):
+ self.loop = LoopingCall(self.poll)
+ base.ChangeSource.startService(self)
+
+ reactor.callLater(0, self.loop.start, self.pollInterval)
+
+ def stopService(self):
+ self.loop.stop()
+ return base.ChangeSource.stopService(self)
+
+ def describe(self):
+ str = ""
+ str += "Getting changes from the Bonsai service running at %s " \
+ % self.bonsaiURL
+ str += "<br>Using tree: %s, branch: %s, and module: %s" % (self.tree, \
+ self.branch, self.module)
+ return str
+
+ def poll(self):
+ if self.working:
+ log.msg("Not polling Bonsai because last poll is still working")
+ else:
+ self.working = True
+ d = self._get_changes()
+ d.addCallback(self._process_changes)
+ d.addCallbacks(self._finished_ok, self._finished_failure)
+ return
+
+ def _finished_ok(self, res):
+ assert self.working
+ self.working = False
+
+ # check for failure -- this is probably never hit but the twisted docs
+ # are not clear enough to be sure. it is being kept "just in case"
+ if isinstance(res, failure.Failure):
+ log.msg("Bonsai poll failed: %s" % res)
+ return res
+
+ def _finished_failure(self, res):
+ log.msg("Bonsai poll failed: %s" % res)
+ assert self.working
+ self.working = False
+ return None # eat the failure
+
+ def _make_url(self):
+ args = ["treeid=%s" % self.tree, "module=%s" % self.module,
+ "branch=%s" % self.branch, "branchtype=match",
+ "sortby=Date", "date=explicit",
+ "mindate=%d" % self.lastChange,
+ "maxdate=%d" % int(time.time()),
+ "cvsroot=%s" % self.cvsroot, "xml=1"]
+ # build the bonsai URL
+ url = self.bonsaiURL
+ url += "/cvsquery.cgi?"
+ url += "&".join(args)
+
+ return url
+
+ def _get_changes(self):
+ url = self._make_url()
+ log.msg("Polling Bonsai tree at %s" % url)
+
+ self.lastPoll = time.time()
+ # get the page, in XML format
+ return getPage(url, timeout=self.pollInterval)
+
+ def _process_changes(self, query):
+ try:
+ bp = BonsaiParser(query)
+ result = bp.getData()
+ except InvalidResultError, e:
+ log.msg("Could not process Bonsai query: " + e.value)
+ return
+ except EmptyResult:
+ return
+
+ for cinode in result.nodes:
+ files = [file.filename + ' (revision '+file.revision+')'
+ for file in cinode.files]
+ c = changes.Change(who = cinode.who,
+ files = files,
+ comments = cinode.log,
+ when = cinode.date,
+ branch = self.branch)
+ self.parent.addChange(c)
+ self.lastChange = self.lastPoll
diff --git a/buildbot/buildbot/changes/changes.py b/buildbot/buildbot/changes/changes.py
new file mode 100644
index 0000000..7d399e0
--- /dev/null
+++ b/buildbot/buildbot/changes/changes.py
@@ -0,0 +1,288 @@
+
+import sys, os, time
+from cPickle import dump
+
+from zope.interface import implements
+from twisted.python import log
+from twisted.internet import defer
+from twisted.application import service
+from twisted.web import html
+
+from buildbot import interfaces, util
+
+html_tmpl = """
+<p>Changed by: <b>%(who)s</b><br />
+Changed at: <b>%(at)s</b><br />
+%(branch)s
+%(revision)s
+<br />
+
+Changed files:
+%(files)s
+
+Comments:
+%(comments)s
+</p>
+"""
+
+class Change:
+ """I represent a single change to the source tree. This may involve
+ several files, but they are all changed by the same person, and there is
+ a change comment for the group as a whole.
+
+ If the version control system supports sequential repository- (or
+ branch-) wide change numbers (like SVN, P4, and Arch), then revision=
+ should be set to that number. The highest such number will be used at
+ checkout time to get the correct set of files.
+
+ If it does not (like CVS), when= should be set to the timestamp (seconds
+ since epoch, as returned by time.time()) when the change was made. when=
+ will be filled in for you (to the current time) if you omit it, which is
+ suitable for ChangeSources which have no way of getting more accurate
+ timestamps.
+
+ Changes should be submitted to ChangeMaster.addChange() in
+ chronologically increasing order. Out-of-order changes will probably
+ cause the html.Waterfall display to be corrupted."""
+
+ implements(interfaces.IStatusEvent)
+
+ number = None
+
+ links = []
+ branch = None
+ revision = None # used to create a source-stamp
+
+ def __init__(self, who, files, comments, isdir=0, links=[],
+ revision=None, when=None, branch=None, category=None):
+ self.who = who
+ self.comments = comments
+ self.isdir = isdir
+ self.links = links
+ self.revision = revision
+ if when is None:
+ when = util.now()
+ self.when = when
+ self.branch = branch
+ self.category = category
+
+ # keep a sorted list of the files, for easier display
+ self.files = files[:]
+ self.files.sort()
+
+ def asText(self):
+ data = ""
+ data += self.getFileContents()
+ data += "At: %s\n" % self.getTime()
+ data += "Changed By: %s\n" % self.who
+ data += "Comments: %s\n\n" % self.comments
+ return data
+
+ def asHTML(self):
+ links = []
+ for file in self.files:
+ link = filter(lambda s: s.find(file) != -1, self.links)
+ if len(link) == 1:
+ # could get confused
+ links.append('<a href="%s"><b>%s</b></a>' % (link[0], file))
+ else:
+ links.append('<b>%s</b>' % file)
+ revision = ""
+ if self.revision:
+ revision = "Revision: <b>%s</b><br />\n" % self.revision
+ branch = ""
+ if self.branch:
+ branch = "Branch: <b>%s</b><br />\n" % self.branch
+
+ kwargs = { 'who' : html.escape(self.who),
+ 'at' : self.getTime(),
+ 'files' : html.UL(links) + '\n',
+ 'revision': revision,
+ 'branch' : branch,
+ 'comments': html.PRE(self.comments) }
+ return html_tmpl % kwargs
+
+ def get_HTML_box(self, url):
+ """Return the contents of a TD cell for the waterfall display.
+
+ @param url: the URL that points to an HTML page that will render
+ using our asHTML method. The Change is free to use this or ignore it
+ as it pleases.
+
+ @return: the HTML that will be put inside the table cell. Typically
+ this is just a single href named after the author of the change and
+ pointing at the passed-in 'url'.
+ """
+ who = self.getShortAuthor()
+ if self.comments is None:
+ title = ""
+ else:
+ title = html.escape(self.comments)
+ return '<a href="%s" title="%s">%s</a>' % (url,
+ title,
+ html.escape(who))
+
+ def getShortAuthor(self):
+ return self.who
+
+ def getTime(self):
+ if not self.when:
+ return "?"
+ return time.strftime("%a %d %b %Y %H:%M:%S",
+ time.localtime(self.when))
+
+ def getTimes(self):
+ return (self.when, None)
+
+ def getText(self):
+ return [html.escape(self.who)]
+ def getLogs(self):
+ return {}
+
+ def getFileContents(self):
+ data = ""
+ if len(self.files) == 1:
+ if self.isdir:
+ data += "Directory: %s\n" % self.files[0]
+ else:
+ data += "File: %s\n" % self.files[0]
+ else:
+ data += "Files:\n"
+ for f in self.files:
+ data += " %s\n" % f
+ return data
+
+class ChangeMaster(service.MultiService):
+
+ """This is the master-side service which receives file change
+ notifications from CVS. It keeps a log of these changes, enough to
+ provide for the HTML waterfall display, and to tell
+ temporarily-disconnected bots what they missed while they were
+ offline.
+
+ Change notifications come from two different kinds of sources. The first
+ is a PB service (servicename='changemaster', perspectivename='change'),
+ which provides a remote method called 'addChange', which should be
+ called with a dict that has keys 'filename' and 'comments'.
+
+ The second is a list of objects derived from the ChangeSource class.
+ These are added with .addSource(), which also sets the .changemaster
+ attribute in the source to point at the ChangeMaster. When the
+ application begins, these will be started with .start() . At shutdown
+ time, they will be terminated with .stop() . They must be persistable.
+ They are expected to call self.changemaster.addChange() with Change
+ objects.
+
+ There are several different variants of the second type of source:
+
+ - L{buildbot.changes.mail.MaildirSource} watches a maildir for CVS
+ commit mail. It uses DNotify if available, or polls every 10
+ seconds if not. It parses incoming mail to determine what files
+ were changed.
+
+ - L{buildbot.changes.freshcvs.FreshCVSSource} makes a PB
+ connection to the CVSToys 'freshcvs' daemon and relays any
+ changes it announces.
+
+ """
+
+ implements(interfaces.IEventSource)
+
+ debug = False
+ # todo: use Maildir class to watch for changes arriving by mail
+
+ def __init__(self):
+ service.MultiService.__init__(self)
+ self.changes = []
+ # self.basedir must be filled in by the parent
+ self.nextNumber = 1
+
+ def addSource(self, source):
+ assert interfaces.IChangeSource.providedBy(source)
+ assert service.IService.providedBy(source)
+ if self.debug:
+ print "ChangeMaster.addSource", source
+ source.setServiceParent(self)
+
+ def removeSource(self, source):
+ assert source in self
+ if self.debug:
+ print "ChangeMaster.removeSource", source, source.parent
+ d = defer.maybeDeferred(source.disownServiceParent)
+ return d
+
+ def addChange(self, change):
+ """Deliver a file change event. The event should be a Change object.
+ This method will timestamp the object as it is received."""
+ log.msg("adding change, who %s, %d files, rev=%s, branch=%s, "
+ "comments %s, category %s" % (change.who, len(change.files),
+ change.revision, change.branch,
+ change.comments, change.category))
+ change.number = self.nextNumber
+ self.nextNumber += 1
+ self.changes.append(change)
+ self.parent.addChange(change)
+ # TODO: call pruneChanges after a while
+
+ def pruneChanges(self):
+ self.changes = self.changes[-100:] # or something
+
+ def eventGenerator(self, branches=[]):
+ for i in range(len(self.changes)-1, -1, -1):
+ c = self.changes[i]
+ if not branches or c.branch in branches:
+ yield c
+
+ def getChangeNumbered(self, num):
+ if not self.changes:
+ return None
+ first = self.changes[0].number
+ if first + len(self.changes)-1 != self.changes[-1].number:
+ log.msg(self,
+ "lost a change somewhere: [0] is %d, [%d] is %d" % \
+ (self.changes[0].number,
+ len(self.changes) - 1,
+ self.changes[-1].number))
+ for c in self.changes:
+ log.msg("c[%d]: " % c.number, c)
+ return None
+ offset = num - first
+ log.msg(self, "offset", offset)
+ return self.changes[offset]
+
+ def __getstate__(self):
+ d = service.MultiService.__getstate__(self)
+ del d['parent']
+ del d['services'] # lose all children
+ del d['namedServices']
+ return d
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ # self.basedir must be set by the parent
+ self.services = [] # they'll be repopulated by readConfig
+ self.namedServices = {}
+
+
+ def saveYourself(self):
+ filename = os.path.join(self.basedir, "changes.pck")
+ tmpfilename = filename + ".tmp"
+ try:
+ dump(self, open(tmpfilename, "wb"))
+ if sys.platform == 'win32':
+ # windows cannot rename a file on top of an existing one
+ if os.path.exists(filename):
+ os.unlink(filename)
+ os.rename(tmpfilename, filename)
+ except Exception, e:
+ log.msg("unable to save changes")
+ log.err()
+
+ def stopService(self):
+ self.saveYourself()
+ return service.MultiService.stopService(self)
+
+class TestChangeMaster(ChangeMaster):
+ """A ChangeMaster for use in tests that does not save itself"""
+ def stopService(self):
+ return service.MultiService.stopService(self)
diff --git a/buildbot/buildbot/changes/dnotify.py b/buildbot/buildbot/changes/dnotify.py
new file mode 100644
index 0000000..0674248
--- /dev/null
+++ b/buildbot/buildbot/changes/dnotify.py
@@ -0,0 +1,100 @@
+
+import fcntl, signal, os
+
+class DNotify_Handler:
+ def __init__(self):
+ self.watchers = {}
+ self.installed = 0
+ def install(self):
+ if self.installed:
+ return
+ signal.signal(signal.SIGIO, self.fire)
+ self.installed = 1
+ def uninstall(self):
+ if not self.installed:
+ return
+ signal.signal(signal.SIGIO, signal.SIG_DFL)
+ self.installed = 0
+ def add(self, watcher):
+ self.watchers[watcher.fd] = watcher
+ self.install()
+ def remove(self, watcher):
+ if self.watchers.has_key(watcher.fd):
+ del(self.watchers[watcher.fd])
+ if not self.watchers:
+ self.uninstall()
+ def fire(self, signum, frame):
+ # this is the signal handler
+ # without siginfo_t, we must fire them all
+ for watcher in self.watchers.values():
+ watcher.callback()
+
+class DNotify:
+ DN_ACCESS = fcntl.DN_ACCESS # a file in the directory was read
+ DN_MODIFY = fcntl.DN_MODIFY # a file was modified (write,truncate)
+ DN_CREATE = fcntl.DN_CREATE # a file was created
+ DN_DELETE = fcntl.DN_DELETE # a file was unlinked
+ DN_RENAME = fcntl.DN_RENAME # a file was renamed
+ DN_ATTRIB = fcntl.DN_ATTRIB # a file had attributes changed (chmod,chown)
+
+ handler = [None]
+
+ def __init__(self, dirname, callback=None,
+ flags=[DN_MODIFY,DN_CREATE,DN_DELETE,DN_RENAME]):
+
+ """This object watches a directory for changes. The .callback
+ attribute should be set to a function to be run every time something
+ happens to it. Be aware that it will be called more times than you
+ expect."""
+
+ if callback:
+ self.callback = callback
+ else:
+ self.callback = self.fire
+ self.dirname = dirname
+ self.flags = reduce(lambda x, y: x | y, flags) | fcntl.DN_MULTISHOT
+ self.fd = os.open(dirname, os.O_RDONLY)
+ # ideally we would move the notification to something like SIGRTMIN,
+ # (to free up SIGIO) and use sigaction to have the signal handler
+ # receive a structure with the fd number. But python doesn't offer
+ # either.
+ if not self.handler[0]:
+ self.handler[0] = DNotify_Handler()
+ self.handler[0].add(self)
+ fcntl.fcntl(self.fd, fcntl.F_NOTIFY, self.flags)
+ def remove(self):
+ self.handler[0].remove(self)
+ os.close(self.fd)
+ def fire(self):
+ print self.dirname, "changed!"
+
+def test_dnotify1():
+ d = DNotify(".")
+ while 1:
+ signal.pause()
+
+def test_dnotify2():
+ # create ./foo/, create/delete files in ./ and ./foo/ while this is
+ # running. Notice how both notifiers are fired when anything changes;
+ # this is an unfortunate side-effect of the lack of extended sigaction
+ # support in Python.
+ count = [0]
+ d1 = DNotify(".")
+ def fire1(count=count, d1=d1):
+ print "./ changed!", count[0]
+ count[0] += 1
+ if count[0] > 5:
+ d1.remove()
+ del(d1)
+ # change the callback, since we can't define it until after we have the
+ # dnotify object. Hmm, unless we give the dnotify to the callback.
+ d1.callback = fire1
+ def fire2(): print "foo/ changed!"
+ d2 = DNotify("foo", fire2)
+ while 1:
+ signal.pause()
+
+
+if __name__ == '__main__':
+ test_dnotify2()
+
diff --git a/buildbot/buildbot/changes/freshcvs.py b/buildbot/buildbot/changes/freshcvs.py
new file mode 100644
index 0000000..53a2ac4
--- /dev/null
+++ b/buildbot/buildbot/changes/freshcvs.py
@@ -0,0 +1,144 @@
+
+import os.path
+
+from zope.interface import implements
+from twisted.cred import credentials
+from twisted.spread import pb
+from twisted.application.internet import TCPClient
+from twisted.python import log
+
+import cvstoys.common # to make sure VersionedPatch gets registered
+
+from buildbot.interfaces import IChangeSource
+from buildbot.pbutil import ReconnectingPBClientFactory
+from buildbot.changes.changes import Change
+from buildbot import util
+
+class FreshCVSListener(pb.Referenceable):
+ def remote_notify(self, root, files, message, user):
+ try:
+ self.source.notify(root, files, message, user)
+ except Exception, e:
+ print "notify failed"
+ log.err()
+
+ def remote_goodbye(self, message):
+ pass
+
+class FreshCVSConnectionFactory(ReconnectingPBClientFactory):
+
+ def gotPerspective(self, perspective):
+ log.msg("connected to FreshCVS daemon")
+ ReconnectingPBClientFactory.gotPerspective(self, perspective)
+ self.source.connected = True
+ # TODO: freshcvs-1.0.10 doesn't handle setFilter correctly, it will
+ # be fixed in the upcoming 1.0.11 . I haven't been able to test it
+ # to make sure the failure mode is survivable, so I'll just leave
+ # this out for now.
+ return
+ if self.source.prefix is not None:
+ pathfilter = "^%s" % self.source.prefix
+ d = perspective.callRemote("setFilter",
+ None, pathfilter, None)
+ # ignore failures, setFilter didn't work in 1.0.10 and this is
+ # just an optimization anyway
+ d.addErrback(lambda f: None)
+
+ def clientConnectionLost(self, connector, reason):
+ ReconnectingPBClientFactory.clientConnectionLost(self, connector,
+ reason)
+ self.source.connected = False
+
+class FreshCVSSourceNewcred(TCPClient, util.ComparableMixin):
+ """This source will connect to a FreshCVS server associated with one or
+ more CVS repositories. Each time a change is committed to a repository,
+ the server will send us a message describing the change. This message is
+ used to build a Change object, which is then submitted to the
+ ChangeMaster.
+
+ This class handles freshcvs daemons which use newcred. CVSToys-1.0.9
+ does not, later versions might.
+ """
+
+ implements(IChangeSource)
+ compare_attrs = ["host", "port", "username", "password", "prefix"]
+
+ changemaster = None # filled in when we're added
+ connected = False
+
+ def __init__(self, host, port, user, passwd, prefix=None):
+ self.host = host
+ self.port = port
+ self.username = user
+ self.password = passwd
+ if prefix is not None and not prefix.endswith("/"):
+ log.msg("WARNING: prefix '%s' should probably end with a slash" \
+ % prefix)
+ self.prefix = prefix
+ self.listener = l = FreshCVSListener()
+ l.source = self
+ self.factory = f = FreshCVSConnectionFactory()
+ f.source = self
+ self.creds = credentials.UsernamePassword(user, passwd)
+ f.startLogin(self.creds, client=l)
+ TCPClient.__init__(self, host, port, f)
+
+ def __repr__(self):
+ return "<FreshCVSSource where=%s, prefix=%s>" % \
+ ((self.host, self.port), self.prefix)
+
+ def describe(self):
+ online = ""
+ if not self.connected:
+ online = " [OFFLINE]"
+ return "freshcvs %s:%s%s" % (self.host, self.port, online)
+
+ def notify(self, root, files, message, user):
+ pathnames = []
+ isdir = 0
+ for f in files:
+ if not isinstance(f, (cvstoys.common.VersionedPatch,
+ cvstoys.common.Directory)):
+ continue
+ pathname, filename = f.pathname, f.filename
+ #r1, r2 = getattr(f, 'r1', None), getattr(f, 'r2', None)
+ if isinstance(f, cvstoys.common.Directory):
+ isdir = 1
+ path = os.path.join(pathname, filename)
+ log.msg("FreshCVS notify '%s'" % path)
+ if self.prefix:
+ if path.startswith(self.prefix):
+ path = path[len(self.prefix):]
+ else:
+ continue
+ pathnames.append(path)
+ if pathnames:
+ # now() is close enough: FreshCVS *is* realtime, after all
+ when=util.now()
+ c = Change(user, pathnames, message, isdir, when=when)
+ self.parent.addChange(c)
+
+class FreshCVSSourceOldcred(FreshCVSSourceNewcred):
+ """This is for older freshcvs daemons (from CVSToys-1.0.9 and earlier).
+ """
+
+ def __init__(self, host, port, user, passwd,
+ serviceName="cvstoys.notify", prefix=None):
+ self.host = host
+ self.port = port
+ self.prefix = prefix
+ self.listener = l = FreshCVSListener()
+ l.source = self
+ self.factory = f = FreshCVSConnectionFactory()
+ f.source = self
+ f.startGettingPerspective(user, passwd, serviceName, client=l)
+ TCPClient.__init__(self, host, port, f)
+
+ def __repr__(self):
+ return "<FreshCVSSourceOldcred where=%s, prefix=%s>" % \
+ ((self.host, self.port), self.prefix)
+
+# this is suitable for CVSToys-1.0.10 and later. If you run CVSToys-1.0.9 or
+# earlier, use FreshCVSSourceOldcred instead.
+FreshCVSSource = FreshCVSSourceNewcred
+
diff --git a/buildbot/buildbot/changes/hgbuildbot.py b/buildbot/buildbot/changes/hgbuildbot.py
new file mode 100644
index 0000000..1f4ed34
--- /dev/null
+++ b/buildbot/buildbot/changes/hgbuildbot.py
@@ -0,0 +1,114 @@
+# hgbuildbot.py - mercurial hooks for buildbot
+#
+# Copyright 2007 Frederic Leroy <fredo@starox.org>
+#
+# This software may be used and distributed according to the terms
+# of the GNU General Public License, incorporated herein by reference.
+
+# hook extension to send change notifications to buildbot when a changeset is
+# brought into the repository from elsewhere.
+#
+# default mode is to use mercurial branch
+#
+# to use, configure hgbuildbot in .hg/hgrc like this:
+#
+# [hooks]
+# changegroup = python:buildbot.changes.hgbuildbot.hook
+#
+# [hgbuildbot]
+# # config items go in here
+#
+# config items:
+#
+# REQUIRED:
+# master = host:port # host to send buildbot changes
+#
+# OPTIONAL:
+# branchtype = inrepo|dirname # dirname: branch = name of directory
+# # containing the repository
+# #
+# # inrepo: branch = mercurial branch
+#
+# branch = branchname # if set, branch is always branchname
+
+import os
+
+from mercurial.i18n import gettext as _
+from mercurial.node import bin, hex, nullid
+from mercurial.context import workingctx
+
+# mercurial's on-demand-importing hacks interfere with the:
+#from zope.interface import Interface
+# that Twisted needs to do, so disable it.
+try:
+ from mercurial import demandimport
+ demandimport.disable()
+except ImportError:
+ pass
+
+from buildbot.clients import sendchange
+from twisted.internet import defer, reactor
+
+
+def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
+ # read config parameters
+ master = ui.config('hgbuildbot', 'master')
+ if master:
+ branchtype = ui.config('hgbuildbot', 'branchtype')
+ branch = ui.config('hgbuildbot', 'branch')
+ else:
+ ui.write("* You must add a [hgbuildbot] section to .hg/hgrc in "
+ "order to use buildbot hook\n")
+ return
+
+ if branch is None:
+ if branchtype is not None:
+ if branchtype == 'dirname':
+ branch = os.path.basename(os.getcwd())
+ if branchtype == 'inrepo':
+ branch = workingctx(repo).branch()
+
+ if hooktype == 'changegroup':
+ s = sendchange.Sender(master, None)
+ d = defer.Deferred()
+ reactor.callLater(0, d.callback, None)
+ # process changesets
+ def _send(res, c):
+ ui.status("rev %s sent\n" % c['revision'])
+ return s.send(c['branch'], c['revision'], c['comments'],
+ c['files'], c['username'])
+
+ try: # first try Mercurial 1.1+ api
+ start = repo[node].rev()
+ end = len(repo)
+ except TypeError: # else fall back to old api
+ start = repo.changelog.rev(bin(node))
+ end = repo.changelog.count()
+
+ for rev in xrange(start, end):
+ # send changeset
+ node = repo.changelog.node(rev)
+ manifest, user, (time, timezone), files, desc, extra = repo.changelog.read(node)
+ parents = filter(lambda p: not p == nullid, repo.changelog.parents(node))
+ if branchtype == 'inrepo':
+ branch = extra['branch']
+ # merges don't always contain files, but at least one file is required by buildbot
+ if len(parents) > 1 and not files:
+ files = ["merge"]
+ change = {
+ 'master': master,
+ 'username': user,
+ 'revision': hex(node),
+ 'comments': desc,
+ 'files': files,
+ 'branch': branch
+ }
+ d.addCallback(_send, change)
+
+ d.addCallbacks(s.printSuccess, s.printFailure)
+ d.addBoth(s.stop)
+ s.run()
+ else:
+ ui.status(_('hgbuildbot: hook %s not supported\n') % hooktype)
+ return
+
diff --git a/buildbot/buildbot/changes/mail.py b/buildbot/buildbot/changes/mail.py
new file mode 100644
index 0000000..7d86d47
--- /dev/null
+++ b/buildbot/buildbot/changes/mail.py
@@ -0,0 +1,458 @@
+# -*- test-case-name: buildbot.test.test_mailparse -*-
+
+"""
+Parse various kinds of 'CVS notify' email.
+"""
+import os, re
+from email import message_from_file
+from email.Utils import parseaddr
+from email.Iterators import body_line_iterator
+
+from zope.interface import implements
+from twisted.python import log
+from buildbot import util
+from buildbot.interfaces import IChangeSource
+from buildbot.changes import changes
+from buildbot.changes.maildir import MaildirService
+
+class MaildirSource(MaildirService, util.ComparableMixin):
+ """This source will watch a maildir that is subscribed to a FreshCVS
+ change-announcement mailing list.
+ """
+ implements(IChangeSource)
+
+ compare_attrs = ["basedir", "pollinterval"]
+ name = None
+
+ def __init__(self, maildir, prefix=None):
+ MaildirService.__init__(self, maildir)
+ self.prefix = prefix
+ if prefix and not prefix.endswith("/"):
+ log.msg("%s: you probably want your prefix=('%s') to end with "
+ "a slash")
+
+ def describe(self):
+ return "%s mailing list in maildir %s" % (self.name, self.basedir)
+
+ def messageReceived(self, filename):
+ path = os.path.join(self.basedir, "new", filename)
+ change = self.parse_file(open(path, "r"), self.prefix)
+ if change:
+ self.parent.addChange(change)
+ os.rename(os.path.join(self.basedir, "new", filename),
+ os.path.join(self.basedir, "cur", filename))
+
+ def parse_file(self, fd, prefix=None):
+ m = message_from_file(fd)
+ return self.parse(m, prefix)
+
+class FCMaildirSource(MaildirSource):
+ name = "FreshCVS"
+
+ def parse(self, m, prefix=None):
+ """Parse mail sent by FreshCVS"""
+
+ # FreshCVS sets From: to "user CVS <user>", but the <> part may be
+ # modified by the MTA (to include a local domain)
+ name, addr = parseaddr(m["from"])
+ if not name:
+ return None # no From means this message isn't from FreshCVS
+ cvs = name.find(" CVS")
+ if cvs == -1:
+ return None # this message isn't from FreshCVS
+ who = name[:cvs]
+
+ # we take the time of receipt as the time of checkin. Not correct,
+ # but it avoids the out-of-order-changes issue. See the comment in
+ # parseSyncmail about using the 'Date:' header
+ when = util.now()
+
+ files = []
+ comments = ""
+ isdir = 0
+ lines = list(body_line_iterator(m))
+ while lines:
+ line = lines.pop(0)
+ if line == "Modified files:\n":
+ break
+ while lines:
+ line = lines.pop(0)
+ if line == "\n":
+ break
+ line = line.rstrip("\n")
+ linebits = line.split(None, 1)
+ file = linebits[0]
+ if prefix:
+ # insist that the file start with the prefix: FreshCVS sends
+ # changes we don't care about too
+ if file.startswith(prefix):
+ file = file[len(prefix):]
+ else:
+ continue
+ if len(linebits) == 1:
+ isdir = 1
+ elif linebits[1] == "0 0":
+ isdir = 1
+ files.append(file)
+ while lines:
+ line = lines.pop(0)
+ if line == "Log message:\n":
+ break
+ # message is terminated by "ViewCVS links:" or "Index:..." (patch)
+ while lines:
+ line = lines.pop(0)
+ if line == "ViewCVS links:\n":
+ break
+ if line.find("Index: ") == 0:
+ break
+ comments += line
+ comments = comments.rstrip() + "\n"
+
+ if not files:
+ return None
+
+ change = changes.Change(who, files, comments, isdir, when=when)
+
+ return change
+
+class SyncmailMaildirSource(MaildirSource):
+ name = "Syncmail"
+
+ def parse(self, m, prefix=None):
+ """Parse messages sent by the 'syncmail' program, as suggested by the
+ sourceforge.net CVS Admin documentation. Syncmail is maintained at
+ syncmail.sf.net .
+ """
+ # pretty much the same as freshcvs mail, not surprising since CVS is
+ # the one creating most of the text
+
+ # The mail is sent from the person doing the checkin. Assume that the
+ # local username is enough to identify them (this assumes a one-server
+ # cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
+ # model)
+ name, addr = parseaddr(m["from"])
+ if not addr:
+ return None # no From means this message isn't from FreshCVS
+ at = addr.find("@")
+ if at == -1:
+ who = addr # might still be useful
+ else:
+ who = addr[:at]
+
+ # we take the time of receipt as the time of checkin. Not correct (it
+ # depends upon the email latency), but it avoids the
+ # out-of-order-changes issue. Also syncmail doesn't give us anything
+ # better to work with, unless you count pulling the v1-vs-v2
+ # timestamp out of the diffs, which would be ugly. TODO: Pulling the
+ # 'Date:' header from the mail is a possibility, and
+ # email.Utils.parsedate_tz may be useful. It should be configurable,
+ # however, because there are a lot of broken clocks out there.
+ when = util.now()
+
+ subject = m["subject"]
+ # syncmail puts the repository-relative directory in the subject:
+ # mprefix + "%(dir)s %(file)s,%(oldversion)s,%(newversion)s", where
+ # 'mprefix' is something that could be added by a mailing list
+ # manager.
+ # this is the only reasonable way to determine the directory name
+ space = subject.find(" ")
+ if space != -1:
+ directory = subject[:space]
+ else:
+ directory = subject
+
+ files = []
+ comments = ""
+ isdir = 0
+ branch = None
+
+ lines = list(body_line_iterator(m))
+ while lines:
+ line = lines.pop(0)
+
+ if (line == "Modified Files:\n" or
+ line == "Added Files:\n" or
+ line == "Removed Files:\n"):
+ break
+
+ while lines:
+ line = lines.pop(0)
+ if line == "\n":
+ break
+ if line == "Log Message:\n":
+ lines.insert(0, line)
+ break
+ line = line.lstrip()
+ line = line.rstrip()
+ # note: syncmail will send one email per directory involved in a
+ # commit, with multiple files if they were in the same directory.
+ # Unlike freshCVS, it makes no attempt to collect all related
+ # commits into a single message.
+
+ # note: syncmail will report a Tag underneath the ... Files: line
+ # e.g.: Tag: BRANCH-DEVEL
+
+ if line.startswith('Tag:'):
+ branch = line.split(' ')[-1].rstrip()
+ continue
+
+ thesefiles = line.split(" ")
+ for f in thesefiles:
+ f = directory + "/" + f
+ if prefix:
+ # insist that the file start with the prefix: we may get
+ # changes we don't care about too
+ if f.startswith(prefix):
+ f = f[len(prefix):]
+ else:
+ continue
+ break
+ # TODO: figure out how new directories are described, set
+ # .isdir
+ files.append(f)
+
+ if not files:
+ return None
+
+ while lines:
+ line = lines.pop(0)
+ if line == "Log Message:\n":
+ break
+ # message is terminated by "Index:..." (patch) or "--- NEW FILE.."
+ # or "--- filename DELETED ---". Sigh.
+ while lines:
+ line = lines.pop(0)
+ if line.find("Index: ") == 0:
+ break
+ if re.search(r"^--- NEW FILE", line):
+ break
+ if re.search(r" DELETED ---$", line):
+ break
+ comments += line
+ comments = comments.rstrip() + "\n"
+
+ change = changes.Change(who, files, comments, isdir, when=when,
+ branch=branch)
+
+ return change
+
+# Bonsai mail parser by Stephen Davis.
+#
+# This handles changes for CVS repositories that are watched by Bonsai
+# (http://www.mozilla.org/bonsai.html)
+
+# A Bonsai-formatted email message looks like:
+#
+# C|1071099907|stephend|/cvs|Sources/Scripts/buildbot|bonsai.py|1.2|||18|7
+# A|1071099907|stephend|/cvs|Sources/Scripts/buildbot|master.cfg|1.1|||18|7
+# R|1071099907|stephend|/cvs|Sources/Scripts/buildbot|BuildMaster.py|||
+# LOGCOMMENT
+# Updated bonsai parser and switched master config to buildbot-0.4.1 style.
+#
+# :ENDLOGCOMMENT
+#
+# In the first example line, stephend is the user, /cvs the repository,
+# buildbot the directory, bonsai.py the file, 1.2 the revision, no sticky
+# and branch, 18 lines added and 7 removed. All of these fields might not be
+# present (during "removes" for example).
+#
+# There may be multiple "control" lines or even none (imports, directory
+# additions) but there is one email per directory. We only care about actual
+# changes since it is presumed directory additions don't actually affect the
+# build. At least one file should need to change (the makefile, say) to
+# actually make a new directory part of the build process. That's my story
+# and I'm sticking to it.
+
+class BonsaiMaildirSource(MaildirSource):
+ name = "Bonsai"
+
+ def parse(self, m, prefix=None):
+ """Parse mail sent by the Bonsai cvs loginfo script."""
+
+ # we don't care who the email came from b/c the cvs user is in the
+ # msg text
+
+ who = "unknown"
+ timestamp = None
+ files = []
+ lines = list(body_line_iterator(m))
+
+ # read the control lines (what/who/where/file/etc.)
+ while lines:
+ line = lines.pop(0)
+ if line == "LOGCOMMENT\n":
+ break;
+ line = line.rstrip("\n")
+
+ # we'd like to do the following but it won't work if the number of
+ # items doesn't match so...
+ # what, timestamp, user, repo, module, file = line.split( '|' )
+ items = line.split('|')
+ if len(items) < 6:
+ # not a valid line, assume this isn't a bonsai message
+ return None
+
+ try:
+ # just grab the bottom-most timestamp, they're probably all the
+ # same. TODO: I'm assuming this is relative to the epoch, but
+ # this needs testing.
+ timestamp = int(items[1])
+ except ValueError:
+ pass
+
+ user = items[2]
+ if user:
+ who = user
+
+ module = items[4]
+ file = items[5]
+ if module and file:
+ path = "%s/%s" % (module, file)
+ files.append(path)
+ sticky = items[7]
+ branch = items[8]
+
+ # if no files changed, return nothing
+ if not files:
+ return None
+
+ # read the comments
+ comments = ""
+ while lines:
+ line = lines.pop(0)
+ if line == ":ENDLOGCOMMENT\n":
+ break
+ comments += line
+ comments = comments.rstrip() + "\n"
+
+ # return buildbot Change object
+ return changes.Change(who, files, comments, when=timestamp,
+ branch=branch)
+
+# svn "commit-email.pl" handler. The format is very similar to freshcvs mail;
+# here's a sample:
+
+# From: username [at] apache.org [slightly obfuscated to avoid spam here]
+# To: commits [at] spamassassin.apache.org
+# Subject: svn commit: r105955 - in spamassassin/trunk: . lib/Mail
+# ...
+#
+# Author: username
+# Date: Sat Nov 20 00:17:49 2004 [note: TZ = local tz on server!]
+# New Revision: 105955
+#
+# Modified: [also Removed: and Added:]
+# [filename]
+# ...
+# Log:
+# [log message]
+# ...
+#
+#
+# Modified: spamassassin/trunk/lib/Mail/SpamAssassin.pm
+# [unified diff]
+#
+# [end of mail]
+
+class SVNCommitEmailMaildirSource(MaildirSource):
+ name = "SVN commit-email.pl"
+
+ def parse(self, m, prefix=None):
+ """Parse messages sent by the svn 'commit-email.pl' trigger.
+ """
+
+ # The mail is sent from the person doing the checkin. Assume that the
+ # local username is enough to identify them (this assumes a one-server
+ # cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
+ # model)
+ name, addr = parseaddr(m["from"])
+ if not addr:
+ return None # no From means this message isn't from FreshCVS
+ at = addr.find("@")
+ if at == -1:
+ who = addr # might still be useful
+ else:
+ who = addr[:at]
+
+ # we take the time of receipt as the time of checkin. Not correct (it
+ # depends upon the email latency), but it avoids the
+ # out-of-order-changes issue. Also syncmail doesn't give us anything
+ # better to work with, unless you count pulling the v1-vs-v2
+ # timestamp out of the diffs, which would be ugly. TODO: Pulling the
+ # 'Date:' header from the mail is a possibility, and
+ # email.Utils.parsedate_tz may be useful. It should be configurable,
+ # however, because there are a lot of broken clocks out there.
+ when = util.now()
+
+ files = []
+ comments = ""
+ isdir = 0
+ lines = list(body_line_iterator(m))
+ rev = None
+ while lines:
+ line = lines.pop(0)
+
+ # "Author: jmason"
+ match = re.search(r"^Author: (\S+)", line)
+ if match:
+ who = match.group(1)
+
+ # "New Revision: 105955"
+ match = re.search(r"^New Revision: (\d+)", line)
+ if match:
+ rev = match.group(1)
+
+ # possible TODO: use "Date: ..." data here instead of time of
+ # commit message receipt, above. however, this timestamp is
+ # specified *without* a timezone, in the server's local TZ, so to
+ # be accurate buildbot would need a config setting to specify the
+ # source server's expected TZ setting! messy.
+
+ # this stanza ends with the "Log:"
+ if (line == "Log:\n"):
+ break
+
+ # commit message is terminated by the file-listing section
+ while lines:
+ line = lines.pop(0)
+ if (line == "Modified:\n" or
+ line == "Added:\n" or
+ line == "Removed:\n"):
+ break
+ comments += line
+ comments = comments.rstrip() + "\n"
+
+ while lines:
+ line = lines.pop(0)
+ if line == "\n":
+ break
+ if line.find("Modified:\n") == 0:
+ continue # ignore this line
+ if line.find("Added:\n") == 0:
+ continue # ignore this line
+ if line.find("Removed:\n") == 0:
+ continue # ignore this line
+ line = line.strip()
+
+ thesefiles = line.split(" ")
+ for f in thesefiles:
+ if prefix:
+ # insist that the file start with the prefix: we may get
+ # changes we don't care about too
+ if f.startswith(prefix):
+ f = f[len(prefix):]
+ else:
+ log.msg("ignored file from svn commit: prefix '%s' "
+ "does not match filename '%s'" % (prefix, f))
+ continue
+
+ # TODO: figure out how new directories are described, set
+ # .isdir
+ files.append(f)
+
+ if not files:
+ log.msg("no matching files found, ignoring commit")
+ return None
+
+ return changes.Change(who, files, comments, when=when, revision=rev)
+
diff --git a/buildbot/buildbot/changes/maildir.py b/buildbot/buildbot/changes/maildir.py
new file mode 100644
index 0000000..2e4a706
--- /dev/null
+++ b/buildbot/buildbot/changes/maildir.py
@@ -0,0 +1,116 @@
+
+# This is a class which watches a maildir for new messages. It uses the
+# linux dirwatcher API (if available) to look for new files. The
+# .messageReceived method is invoked with the filename of the new message,
+# relative to the top of the maildir (so it will look like "new/blahblah").
+
+import os
+from twisted.python import log
+from twisted.application import service, internet
+from twisted.internet import reactor
+dnotify = None
+try:
+ import dnotify
+except:
+ # I'm not actually sure this log message gets recorded
+ log.msg("unable to import dnotify, so Maildir will use polling instead")
+
+class NoSuchMaildir(Exception):
+ pass
+
+class MaildirService(service.MultiService):
+ """I watch a maildir for new messages. I should be placed as the service
+ child of some MultiService instance. When running, I use the linux
+ dirwatcher API (if available) or poll for new files in the 'new'
+ subdirectory of my maildir path. When I discover a new message, I invoke
+ my .messageReceived() method with the short filename of the new message,
+ so the full name of the new file can be obtained with
+ os.path.join(maildir, 'new', filename). messageReceived() should be
+ overridden by a subclass to do something useful. I will not move or
+ delete the file on my own: the subclass's messageReceived() should
+ probably do that.
+ """
+ pollinterval = 10 # only used if we don't have DNotify
+
+ def __init__(self, basedir=None):
+ """Create the Maildir watcher. BASEDIR is the maildir directory (the
+ one which contains new/ and tmp/)
+ """
+ service.MultiService.__init__(self)
+ self.basedir = basedir
+ self.files = []
+ self.dnotify = None
+
+ def setBasedir(self, basedir):
+ # some users of MaildirService (scheduler.Try_Jobdir, in particular)
+ # don't know their basedir until setServiceParent, since it is
+ # relative to the buildmaster's basedir. So let them set it late. We
+ # don't actually need it until our own startService.
+ self.basedir = basedir
+
+ def startService(self):
+ service.MultiService.startService(self)
+ self.newdir = os.path.join(self.basedir, "new")
+ if not os.path.isdir(self.basedir) or not os.path.isdir(self.newdir):
+ raise NoSuchMaildir("invalid maildir '%s'" % self.basedir)
+ try:
+ if dnotify:
+ # we must hold an fd open on the directory, so we can get
+ # notified when it changes.
+ self.dnotify = dnotify.DNotify(self.newdir,
+ self.dnotify_callback,
+ [dnotify.DNotify.DN_CREATE])
+ except (IOError, OverflowError):
+ # IOError is probably linux<2.4.19, which doesn't support
+ # dnotify. OverflowError will occur on some 64-bit machines
+ # because of a python bug
+ log.msg("DNotify failed, falling back to polling")
+ if not self.dnotify:
+ t = internet.TimerService(self.pollinterval, self.poll)
+ t.setServiceParent(self)
+ self.poll()
+
+ def dnotify_callback(self):
+ log.msg("dnotify noticed something, now polling")
+
+ # give it a moment. I found that qmail had problems when the message
+ # was removed from the maildir instantly. It shouldn't, that's what
+ # maildirs are made for. I wasn't able to eyeball any reason for the
+ # problem, and safecat didn't behave the same way, but qmail reports
+ # "Temporary_error_on_maildir_delivery" (qmail-local.c:165,
+ # maildir_child() process exited with rc not in 0,2,3,4). Not sure
+ # why, and I'd have to hack qmail to investigate further, so it's
+ # easier to just wait a second before yanking the message out of new/
+
+ reactor.callLater(0.1, self.poll)
+
+
+ def stopService(self):
+ if self.dnotify:
+ self.dnotify.remove()
+ self.dnotify = None
+ return service.MultiService.stopService(self)
+
+ def poll(self):
+ assert self.basedir
+ # see what's new
+ for f in self.files:
+ if not os.path.isfile(os.path.join(self.newdir, f)):
+ self.files.remove(f)
+ newfiles = []
+ for f in os.listdir(self.newdir):
+ if not f in self.files:
+ newfiles.append(f)
+ self.files.extend(newfiles)
+ # TODO: sort by ctime, then filename, since safecat uses a rather
+ # fine-grained timestamp in the filename
+ for n in newfiles:
+ # TODO: consider catching exceptions in messageReceived
+ self.messageReceived(n)
+
+ def messageReceived(self, filename):
+ """Called when a new file is noticed. Will call
+ self.parent.messageReceived() with a path relative to maildir/new.
+ Should probably be overridden in subclasses."""
+ self.parent.messageReceived(filename)
+
diff --git a/buildbot/buildbot/changes/monotone.py b/buildbot/buildbot/changes/monotone.py
new file mode 100644
index 0000000..302c1c5
--- /dev/null
+++ b/buildbot/buildbot/changes/monotone.py
@@ -0,0 +1,305 @@
+
+import tempfile
+import os
+from cStringIO import StringIO
+
+from twisted.python import log
+from twisted.application import service
+from twisted.internet import defer, protocol, error, reactor
+from twisted.internet.task import LoopingCall
+
+from buildbot import util
+from buildbot.interfaces import IChangeSource
+from buildbot.changes.changes import Change
+
+class _MTProtocol(protocol.ProcessProtocol):
+
+ def __init__(self, deferred, cmdline):
+ self.cmdline = cmdline
+ self.deferred = deferred
+ self.s = StringIO()
+
+ def errReceived(self, text):
+ log.msg("stderr: %s" % text)
+
+ def outReceived(self, text):
+ log.msg("stdout: %s" % text)
+ self.s.write(text)
+
+ def processEnded(self, reason):
+ log.msg("Command %r exited with value %s" % (self.cmdline, reason))
+ if isinstance(reason.value, error.ProcessDone):
+ self.deferred.callback(self.s.getvalue())
+ else:
+ self.deferred.errback(reason)
+
+class Monotone:
+ """All methods of this class return a Deferred."""
+
+ def __init__(self, bin, db):
+ self.bin = bin
+ self.db = db
+
+ def _run_monotone(self, args):
+ d = defer.Deferred()
+ cmdline = (self.bin, "--db=" + self.db) + tuple(args)
+ p = _MTProtocol(d, cmdline)
+ log.msg("Running command: %r" % (cmdline,))
+ log.msg("wd: %s" % os.getcwd())
+ reactor.spawnProcess(p, self.bin, cmdline)
+ return d
+
+ def _process_revision_list(self, output):
+ if output:
+ return output.strip().split("\n")
+ else:
+ return []
+
+ def get_interface_version(self):
+ d = self._run_monotone(["automate", "interface_version"])
+ d.addCallback(self._process_interface_version)
+ return d
+
+ def _process_interface_version(self, output):
+ return tuple(map(int, output.strip().split(".")))
+
+ def db_init(self):
+ return self._run_monotone(["db", "init"])
+
+ def db_migrate(self):
+ return self._run_monotone(["db", "migrate"])
+
+ def pull(self, server, pattern):
+ return self._run_monotone(["pull", server, pattern])
+
+ def get_revision(self, rid):
+ return self._run_monotone(["cat", "revision", rid])
+
+ def get_heads(self, branch, rcfile=""):
+ cmd = ["automate", "heads", branch]
+ if rcfile:
+ cmd += ["--rcfile=" + rcfile]
+ d = self._run_monotone(cmd)
+ d.addCallback(self._process_revision_list)
+ return d
+
+ def erase_ancestors(self, revs):
+ d = self._run_monotone(["automate", "erase_ancestors"] + revs)
+ d.addCallback(self._process_revision_list)
+ return d
+
+ def ancestry_difference(self, new_rev, old_revs):
+ d = self._run_monotone(["automate", "ancestry_difference", new_rev]
+ + old_revs)
+ d.addCallback(self._process_revision_list)
+ return d
+
+ def descendents(self, rev):
+ d = self._run_monotone(["automate", "descendents", rev])
+ d.addCallback(self._process_revision_list)
+ return d
+
+ def log(self, rev, depth=None):
+ if depth is not None:
+ depth_arg = ["--last=%i" % (depth,)]
+ else:
+ depth_arg = []
+ return self._run_monotone(["log", "-r", rev] + depth_arg)
+
+
+class MonotoneSource(service.Service, util.ComparableMixin):
+ """This source will poll a monotone server for changes and submit them to
+ the change master.
+
+ @param server_addr: monotone server specification (host:portno)
+
+ @param branch: monotone branch to watch
+
+ @param trusted_keys: list of keys whose code you trust
+
+ @param db_path: path to monotone database to pull into
+
+ @param pollinterval: interval in seconds between polls, defaults to 10 minutes
+ @param monotone_exec: path to monotone executable, defaults to "monotone"
+ """
+
+ __implements__ = IChangeSource, service.Service.__implements__
+ compare_attrs = ["server_addr", "trusted_keys", "db_path",
+ "pollinterval", "branch", "monotone_exec"]
+
+ parent = None # filled in when we're added
+ done_revisions = []
+ last_revision = None
+ loop = None
+ d = None
+ tmpfile = None
+ monotone = None
+ volatile = ["loop", "d", "tmpfile", "monotone"]
+
+ def __init__(self, server_addr, branch, trusted_keys, db_path,
+ pollinterval=60 * 10, monotone_exec="monotone"):
+ self.server_addr = server_addr
+ self.branch = branch
+ self.trusted_keys = trusted_keys
+ self.db_path = db_path
+ self.pollinterval = pollinterval
+ self.monotone_exec = monotone_exec
+ self.monotone = Monotone(self.monotone_exec, self.db_path)
+
+ def startService(self):
+ self.loop = LoopingCall(self.start_poll)
+ self.loop.start(self.pollinterval)
+ service.Service.startService(self)
+
+ def stopService(self):
+ self.loop.stop()
+ return service.Service.stopService(self)
+
+ def describe(self):
+ return "monotone_source %s %s" % (self.server_addr,
+ self.branch)
+
+ def start_poll(self):
+ if self.d is not None:
+ log.msg("last poll still in progress, skipping next poll")
+ return
+ log.msg("starting poll")
+ self.d = self._maybe_init_db()
+ self.d.addCallback(self._do_netsync)
+ self.d.addCallback(self._get_changes)
+ self.d.addErrback(self._handle_error)
+
+ def _handle_error(self, failure):
+ log.err(failure)
+ self.d = None
+
+ def _maybe_init_db(self):
+ if not os.path.exists(self.db_path):
+ log.msg("init'ing db")
+ return self.monotone.db_init()
+ else:
+ log.msg("db already exists, migrating")
+ return self.monotone.db_migrate()
+
+ def _do_netsync(self, output):
+ return self.monotone.pull(self.server_addr, self.branch)
+
+ def _get_changes(self, output):
+ d = self._get_new_head()
+ d.addCallback(self._process_new_head)
+ return d
+
+ def _get_new_head(self):
+ # This function returns a deferred that resolves to a good pick of new
+ # head (or None if there is no good new head.)
+
+ # First need to get all new heads...
+ rcfile = """function get_revision_cert_trust(signers, id, name, val)
+ local trusted_signers = { %s }
+ local ts_table = {}
+ for k, v in pairs(trusted_signers) do ts_table[v] = 1 end
+ for k, v in pairs(signers) do
+ if ts_table[v] then
+ return true
+ end
+ end
+ return false
+ end
+ """
+ trusted_list = ", ".join(['"' + key + '"' for key in self.trusted_keys])
+ # mktemp is unsafe, but mkstemp is not 2.2 compatible.
+ tmpfile_name = tempfile.mktemp()
+ f = open(tmpfile_name, "w")
+ f.write(rcfile % trusted_list)
+ f.close()
+ d = self.monotone.get_heads(self.branch, tmpfile_name)
+ d.addCallback(self._find_new_head, tmpfile_name)
+ return d
+
+ def _find_new_head(self, new_heads, tmpfile_name):
+ os.unlink(tmpfile_name)
+ # Now get the old head's descendents...
+ if self.last_revision is not None:
+ d = self.monotone.descendents(self.last_revision)
+ else:
+ d = defer.succeed(new_heads)
+ d.addCallback(self._pick_new_head, new_heads)
+ return d
+
+ def _pick_new_head(self, old_head_descendents, new_heads):
+ for r in new_heads:
+ if r in old_head_descendents:
+ return r
+ return None
+
+ def _process_new_head(self, new_head):
+ if new_head is None:
+ log.msg("No new head")
+ self.d = None
+ return None
+ # Okay, we have a new head; we need to get all the revisions since
+ # then and create change objects for them.
+ # Step 1: simplify set of processed revisions.
+ d = self._simplify_revisions()
+ # Step 2: get the list of new revisions
+ d.addCallback(self._get_new_revisions, new_head)
+ # Step 3: add a change for each
+ d.addCallback(self._add_changes_for_revisions)
+ # Step 4: all done
+ d.addCallback(self._finish_changes, new_head)
+ return d
+
+ def _simplify_revisions(self):
+ d = self.monotone.erase_ancestors(self.done_revisions)
+ d.addCallback(self._reset_done_revisions)
+ return d
+
+ def _reset_done_revisions(self, new_done_revisions):
+ self.done_revisions = new_done_revisions
+ return None
+
+ def _get_new_revisions(self, blah, new_head):
+ if self.done_revisions:
+ return self.monotone.ancestry_difference(new_head,
+ self.done_revisions)
+ else:
+ # Don't force feed the builder with every change since the
+ # beginning of time when it's first started up.
+ return defer.succeed([new_head])
+
+ def _add_changes_for_revisions(self, revs):
+ d = defer.succeed(None)
+ for rid in revs:
+ d.addCallback(self._add_change_for_revision, rid)
+ return d
+
+ def _add_change_for_revision(self, blah, rid):
+ d = self.monotone.log(rid, 1)
+ d.addCallback(self._add_change_from_log, rid)
+ return d
+
+ def _add_change_from_log(self, log, rid):
+ d = self.monotone.get_revision(rid)
+ d.addCallback(self._add_change_from_log_and_revision, log, rid)
+ return d
+
+ def _add_change_from_log_and_revision(self, revision, log, rid):
+ # Stupid way to pull out everything inside quotes (which currently
+ # uniquely identifies filenames inside a changeset).
+ pieces = revision.split('"')
+ files = []
+ for i in range(len(pieces)):
+ if (i % 2) == 1:
+ files.append(pieces[i])
+ # Also pull out author key and date
+ author = "unknown author"
+ pieces = log.split('\n')
+ for p in pieces:
+ if p.startswith("Author:"):
+ author = p.split()[1]
+ self.parent.addChange(Change(author, files, log, revision=rid))
+
+ def _finish_changes(self, blah, new_head):
+ self.done_revisions.append(new_head)
+ self.last_revision = new_head
+ self.d = None
diff --git a/buildbot/buildbot/changes/p4poller.py b/buildbot/buildbot/changes/p4poller.py
new file mode 100644
index 0000000..a313343
--- /dev/null
+++ b/buildbot/buildbot/changes/p4poller.py
@@ -0,0 +1,207 @@
+# -*- test-case-name: buildbot.test.test_p4poller -*-
+
+# Many thanks to Dave Peticolas for contributing this module
+
+import re
+import time
+
+from twisted.python import log, failure
+from twisted.internet import defer, reactor
+from twisted.internet.utils import getProcessOutput
+from twisted.internet.task import LoopingCall
+
+from buildbot import util
+from buildbot.changes import base, changes
+
+def get_simple_split(branchfile):
+ """Splits the branchfile argument and assuming branch is
+ the first path component in branchfile, will return
+ branch and file else None."""
+
+ index = branchfile.find('/')
+ if index == -1: return None, None
+ branch, file = branchfile.split('/', 1)
+ return branch, file
+
+class P4Source(base.ChangeSource, util.ComparableMixin):
+ """This source will poll a perforce repository for changes and submit
+ them to the change master."""
+
+ compare_attrs = ["p4port", "p4user", "p4passwd", "p4base",
+ "p4bin", "pollinterval"]
+
+ changes_line_re = re.compile(
+ r"Change (?P<num>\d+) on \S+ by \S+@\S+ '.+'$")
+ describe_header_re = re.compile(
+ r"Change \d+ by (?P<who>\S+)@\S+ on (?P<when>.+)$")
+ file_re = re.compile(r"^\.\.\. (?P<path>[^#]+)#\d+ \w+$")
+ datefmt = '%Y/%m/%d %H:%M:%S'
+
+ parent = None # filled in when we're added
+ last_change = None
+ loop = None
+ working = False
+
+ def __init__(self, p4port=None, p4user=None, p4passwd=None,
+ p4base='//', p4bin='p4',
+ split_file=lambda branchfile: (None, branchfile),
+ pollinterval=60 * 10, histmax=None):
+ """
+ @type p4port: string
+ @param p4port: p4 port definition (host:portno)
+ @type p4user: string
+ @param p4user: p4 user
+ @type p4passwd: string
+ @param p4passwd: p4 passwd
+ @type p4base: string
+ @param p4base: p4 file specification to limit a poll to
+ without the trailing '...' (i.e., //)
+ @type p4bin: string
+ @param p4bin: path to p4 binary, defaults to just 'p4'
+ @type split_file: func
+ $param split_file: splits a filename into branch and filename.
+ @type pollinterval: int
+ @param pollinterval: interval in seconds between polls
+ @type histmax: int
+ @param histmax: (obsolete) maximum number of changes to look back through.
+ ignored; accepted for backwards compatibility.
+ """
+
+ self.p4port = p4port
+ self.p4user = p4user
+ self.p4passwd = p4passwd
+ self.p4base = p4base
+ self.p4bin = p4bin
+ self.split_file = split_file
+ self.pollinterval = pollinterval
+ self.loop = LoopingCall(self.checkp4)
+
+ def startService(self):
+ base.ChangeSource.startService(self)
+
+ # Don't start the loop just yet because the reactor isn't running.
+ # Give it a chance to go and install our SIGCHLD handler before
+ # spawning processes.
+ reactor.callLater(0, self.loop.start, self.pollinterval)
+
+ def stopService(self):
+ self.loop.stop()
+ return base.ChangeSource.stopService(self)
+
+ def describe(self):
+ return "p4source %s %s" % (self.p4port, self.p4base)
+
+ def checkp4(self):
+ # Our return value is only used for unit testing.
+ if self.working:
+ log.msg("Skipping checkp4 because last one has not finished")
+ return defer.succeed(None)
+ else:
+ self.working = True
+ d = self._get_changes()
+ d.addCallback(self._process_changes)
+ d.addBoth(self._finished)
+ return d
+
+ def _finished(self, res):
+ assert self.working
+ self.working = False
+
+ # Again, the return value is only for unit testing.
+ # If there's a failure, log it so it isn't lost.
+ if isinstance(res, failure.Failure):
+ log.msg('P4 poll failed: %s' % res)
+ return None
+ return res
+
+ def _get_changes(self):
+ args = []
+ if self.p4port:
+ args.extend(['-p', self.p4port])
+ if self.p4user:
+ args.extend(['-u', self.p4user])
+ if self.p4passwd:
+ args.extend(['-P', self.p4passwd])
+ args.extend(['changes'])
+ if self.last_change is not None:
+ args.extend(['%s...@%d,now' % (self.p4base, self.last_change+1)])
+ else:
+ args.extend(['-m', '1', '%s...' % (self.p4base,)])
+ env = {}
+ return getProcessOutput(self.p4bin, args, env)
+
+ def _process_changes(self, result):
+ last_change = self.last_change
+ changelists = []
+ for line in result.split('\n'):
+ line = line.strip()
+ if not line: continue
+ m = self.changes_line_re.match(line)
+ assert m, "Unexpected 'p4 changes' output: %r" % result
+ num = int(m.group('num'))
+ if last_change is None:
+ log.msg('P4Poller: starting at change %d' % num)
+ self.last_change = num
+ return []
+ changelists.append(num)
+ changelists.reverse() # oldest first
+
+ # Retrieve each sequentially.
+ d = defer.succeed(None)
+ for c in changelists:
+ d.addCallback(self._get_describe, c)
+ d.addCallback(self._process_describe, c)
+ return d
+
+ def _get_describe(self, dummy, num):
+ args = []
+ if self.p4port:
+ args.extend(['-p', self.p4port])
+ if self.p4user:
+ args.extend(['-u', self.p4user])
+ if self.p4passwd:
+ args.extend(['-P', self.p4passwd])
+ args.extend(['describe', '-s', str(num)])
+ env = {}
+ d = getProcessOutput(self.p4bin, args, env)
+ return d
+
+ def _process_describe(self, result, num):
+ lines = result.split('\n')
+ # SF#1555985: Wade Brainerd reports a stray ^M at the end of the date
+ # field. The rstrip() is intended to remove that.
+ lines[0] = lines[0].rstrip()
+ m = self.describe_header_re.match(lines[0])
+ assert m, "Unexpected 'p4 describe -s' result: %r" % result
+ who = m.group('who')
+ when = time.mktime(time.strptime(m.group('when'), self.datefmt))
+ comments = ''
+ while not lines[0].startswith('Affected files'):
+ comments += lines.pop(0) + '\n'
+ lines.pop(0) # affected files
+
+ branch_files = {} # dict for branch mapped to file(s)
+ while lines:
+ line = lines.pop(0).strip()
+ if not line: continue
+ m = self.file_re.match(line)
+ assert m, "Invalid file line: %r" % line
+ path = m.group('path')
+ if path.startswith(self.p4base):
+ branch, file = self.split_file(path[len(self.p4base):])
+ if (branch == None and file == None): continue
+ if branch_files.has_key(branch):
+ branch_files[branch].append(file)
+ else:
+ branch_files[branch] = [file]
+
+ for branch in branch_files:
+ c = changes.Change(who=who,
+ files=branch_files[branch],
+ comments=comments,
+ revision=num,
+ when=when,
+ branch=branch)
+ self.parent.addChange(c)
+
+ self.last_change = num
diff --git a/buildbot/buildbot/changes/pb.py b/buildbot/buildbot/changes/pb.py
new file mode 100644
index 0000000..91a1a22
--- /dev/null
+++ b/buildbot/buildbot/changes/pb.py
@@ -0,0 +1,108 @@
+# -*- test-case-name: buildbot.test.test_changes -*-
+
+from twisted.python import log
+
+from buildbot.pbutil import NewCredPerspective
+from buildbot.changes import base, changes
+
+class ChangePerspective(NewCredPerspective):
+
+ def __init__(self, changemaster, prefix):
+ self.changemaster = changemaster
+ self.prefix = prefix
+
+ def attached(self, mind):
+ return self
+ def detached(self, mind):
+ pass
+
+ def perspective_addChange(self, changedict):
+ log.msg("perspective_addChange called")
+ pathnames = []
+ prefixpaths = None
+ for path in changedict['files']:
+ if self.prefix:
+ if not path.startswith(self.prefix):
+ # this file does not start with the prefix, so ignore it
+ continue
+ path = path[len(self.prefix):]
+ pathnames.append(path)
+
+ if pathnames:
+ change = changes.Change(changedict['who'],
+ pathnames,
+ changedict['comments'],
+ branch=changedict.get('branch'),
+ revision=changedict.get('revision'),
+ category=changedict.get('category'),
+ )
+ self.changemaster.addChange(change)
+
+class PBChangeSource(base.ChangeSource):
+ compare_attrs = ["user", "passwd", "port", "prefix"]
+
+ def __init__(self, user="change", passwd="changepw", port=None,
+ prefix=None, sep=None):
+ """I listen on a TCP port for Changes from 'buildbot sendchange'.
+
+ I am a ChangeSource which will accept Changes from a remote source. I
+ share a TCP listening port with the buildslaves.
+
+ The 'buildbot sendchange' command, the contrib/svn_buildbot.py tool,
+ and the contrib/bzr_buildbot.py tool know how to send changes to me.
+
+ @type prefix: string (or None)
+ @param prefix: if set, I will ignore any filenames that do not start
+ with this string. Moreover I will remove this string
+ from all filenames before creating the Change object
+ and delivering it to the Schedulers. This is useful
+ for changes coming from version control systems that
+ represent branches as parent directories within the
+ repository (like SVN and Perforce). Use a prefix of
+ 'trunk/' or 'project/branches/foobranch/' to only
+ follow one branch and to get correct tree-relative
+ filenames.
+
+ @param sep: DEPRECATED (with an axe). sep= was removed in
+ buildbot-0.7.4 . Instead of using it, you should use
+ prefix= with a trailing directory separator. This
+ docstring (and the better-than-nothing error message
+ which occurs when you use it) will be removed in 0.7.5 .
+ """
+
+ # sep= was removed in 0.7.4 . This more-helpful-than-nothing error
+ # message will be removed in 0.7.5 .
+ assert sep is None, "prefix= is now a complete string, do not use sep="
+ # TODO: current limitations
+ assert user == "change"
+ assert passwd == "changepw"
+ assert port == None
+ self.user = user
+ self.passwd = passwd
+ self.port = port
+ self.prefix = prefix
+
+ def describe(self):
+ # TODO: when the dispatcher is fixed, report the specific port
+ #d = "PB listener on port %d" % self.port
+ d = "PBChangeSource listener on all-purpose slaveport"
+ if self.prefix is not None:
+ d += " (prefix '%s')" % self.prefix
+ return d
+
+ def startService(self):
+ base.ChangeSource.startService(self)
+ # our parent is the ChangeMaster object
+ # find the master's Dispatch object and register our username
+ # TODO: the passwd should be registered here too
+ master = self.parent.parent
+ master.dispatcher.register(self.user, self)
+
+ def stopService(self):
+ base.ChangeSource.stopService(self)
+ # unregister our username
+ master = self.parent.parent
+ master.dispatcher.unregister(self.user)
+
+ def getPerspective(self):
+ return ChangePerspective(self.parent, self.prefix)
diff --git a/buildbot/buildbot/changes/svnpoller.py b/buildbot/buildbot/changes/svnpoller.py
new file mode 100644
index 0000000..223c8b5
--- /dev/null
+++ b/buildbot/buildbot/changes/svnpoller.py
@@ -0,0 +1,463 @@
+# -*- test-case-name: buildbot.test.test_svnpoller -*-
+
+# Based on the work of Dave Peticolas for the P4poll
+# Changed to svn (using xml.dom.minidom) by Niklaus Giger
+# Hacked beyond recognition by Brian Warner
+
+from twisted.python import log
+from twisted.internet import defer, reactor, utils
+from twisted.internet.task import LoopingCall
+
+from buildbot import util
+from buildbot.changes import base
+from buildbot.changes.changes import Change
+
+import xml.dom.minidom
+
+def _assert(condition, msg):
+ if condition:
+ return True
+ raise AssertionError(msg)
+
+def dbgMsg(myString):
+ log.msg(myString)
+ return 1
+
+# these split_file_* functions are available for use as values to the
+# split_file= argument.
+def split_file_alwaystrunk(path):
+ return (None, path)
+
+def split_file_branches(path):
+ # turn trunk/subdir/file.c into (None, "subdir/file.c")
+ # and branches/1.5.x/subdir/file.c into ("branches/1.5.x", "subdir/file.c")
+ pieces = path.split('/')
+ if pieces[0] == 'trunk':
+ return (None, '/'.join(pieces[1:]))
+ elif pieces[0] == 'branches':
+ return ('/'.join(pieces[0:2]), '/'.join(pieces[2:]))
+ else:
+ return None
+
+
+class SVNPoller(base.ChangeSource, util.ComparableMixin):
+ """This source will poll a Subversion repository for changes and submit
+ them to the change master."""
+
+ compare_attrs = ["svnurl", "split_file_function",
+ "svnuser", "svnpasswd",
+ "pollinterval", "histmax",
+ "svnbin"]
+
+ parent = None # filled in when we're added
+ last_change = None
+ loop = None
+ working = False
+
+ def __init__(self, svnurl, split_file=None,
+ svnuser=None, svnpasswd=None,
+ pollinterval=10*60, histmax=100,
+ svnbin='svn'):
+ """
+ @type svnurl: string
+ @param svnurl: the SVN URL that describes the repository and
+ subdirectory to watch. If this ChangeSource should
+ only pay attention to a single branch, this should
+ point at the repository for that branch, like
+ svn://svn.twistedmatrix.com/svn/Twisted/trunk . If it
+ should follow multiple branches, point it at the
+ repository directory that contains all the branches
+ like svn://svn.twistedmatrix.com/svn/Twisted and also
+ provide a branch-determining function.
+
+ Each file in the repository has a SVN URL in the form
+ (SVNURL)/(BRANCH)/(FILEPATH), where (BRANCH) could be
+ empty or not, depending upon your branch-determining
+ function. Only files that start with (SVNURL)/(BRANCH)
+ will be monitored. The Change objects that are sent to
+ the Schedulers will see (FILEPATH) for each modified
+ file.
+
+ @type split_file: callable or None
+ @param split_file: a function that is called with a string of the
+ form (BRANCH)/(FILEPATH) and should return a tuple
+ (BRANCH, FILEPATH). This function should match
+ your repository's branch-naming policy. Each
+ changed file has a fully-qualified URL that can be
+ split into a prefix (which equals the value of the
+ 'svnurl' argument) and a suffix; it is this suffix
+ which is passed to the split_file function.
+
+ If the function returns None, the file is ignored.
+ Use this to indicate that the file is not a part
+ of this project.
+
+ For example, if your repository puts the trunk in
+ trunk/... and branches are in places like
+ branches/1.5/..., your split_file function could
+ look like the following (this function is
+ available as svnpoller.split_file_branches)::
+
+ pieces = path.split('/')
+ if pieces[0] == 'trunk':
+ return (None, '/'.join(pieces[1:]))
+ elif pieces[0] == 'branches':
+ return ('/'.join(pieces[0:2]),
+ '/'.join(pieces[2:]))
+ else:
+ return None
+
+ If instead your repository layout puts the trunk
+ for ProjectA in trunk/ProjectA/... and the 1.5
+ branch in branches/1.5/ProjectA/..., your
+ split_file function could look like::
+
+ pieces = path.split('/')
+ if pieces[0] == 'trunk':
+ branch = None
+ pieces.pop(0) # remove 'trunk'
+ elif pieces[0] == 'branches':
+ pieces.pop(0) # remove 'branches'
+ # grab branch name
+ branch = 'branches/' + pieces.pop(0)
+ else:
+ return None # something weird
+ projectname = pieces.pop(0)
+ if projectname != 'ProjectA':
+ return None # wrong project
+ return (branch, '/'.join(pieces))
+
+ The default of split_file= is None, which
+ indicates that no splitting should be done. This
+ is equivalent to the following function::
+
+ return (None, path)
+
+ If you wish, you can override the split_file
+ method with the same sort of function instead of
+ passing in a split_file= argument.
+
+
+ @type svnuser: string
+ @param svnuser: If set, the --username option will be added to
+ the 'svn log' command. You may need this to get
+ access to a private repository.
+ @type svnpasswd: string
+ @param svnpasswd: If set, the --password option will be added.
+
+ @type pollinterval: int
+ @param pollinterval: interval in seconds between polls. The default
+ is 600 seconds (10 minutes). Smaller values
+ decrease the latency between the time a change
+ is recorded and the time the buildbot notices
+ it, but it also increases the system load.
+
+ @type histmax: int
+ @param histmax: maximum number of changes to look back through.
+ The default is 100. Smaller values decrease
+ system load, but if more than histmax changes
+ are recorded between polls, the extra ones will
+ be silently lost.
+
+ @type svnbin: string
+ @param svnbin: path to svn binary, defaults to just 'svn'. Use
+ this if your subversion command lives in an
+ unusual location.
+ """
+
+ if svnurl.endswith("/"):
+ svnurl = svnurl[:-1] # strip the trailing slash
+ self.svnurl = svnurl
+ self.split_file_function = split_file or split_file_alwaystrunk
+ self.svnuser = svnuser
+ self.svnpasswd = svnpasswd
+
+ self.svnbin = svnbin
+ self.pollinterval = pollinterval
+ self.histmax = histmax
+ self._prefix = None
+ self.overrun_counter = 0
+ self.loop = LoopingCall(self.checksvn)
+
+ def split_file(self, path):
+ # use getattr() to avoid turning this function into a bound method,
+ # which would require it to have an extra 'self' argument
+ f = getattr(self, "split_file_function")
+ return f(path)
+
+ def startService(self):
+ log.msg("SVNPoller(%s) starting" % self.svnurl)
+ base.ChangeSource.startService(self)
+ # Don't start the loop just yet because the reactor isn't running.
+ # Give it a chance to go and install our SIGCHLD handler before
+ # spawning processes.
+ reactor.callLater(0, self.loop.start, self.pollinterval)
+
+ def stopService(self):
+ log.msg("SVNPoller(%s) shutting down" % self.svnurl)
+ self.loop.stop()
+ return base.ChangeSource.stopService(self)
+
+ def describe(self):
+ return "SVNPoller watching %s" % self.svnurl
+
+ def checksvn(self):
+ # Our return value is only used for unit testing.
+
+ # we need to figure out the repository root, so we can figure out
+ # repository-relative pathnames later. Each SVNURL is in the form
+ # (ROOT)/(PROJECT)/(BRANCH)/(FILEPATH), where (ROOT) is something
+ # like svn://svn.twistedmatrix.com/svn/Twisted (i.e. there is a
+ # physical repository at /svn/Twisted on that host), (PROJECT) is
+ # something like Projects/Twisted (i.e. within the repository's
+ # internal namespace, everything under Projects/Twisted/ has
+ # something to do with Twisted, but these directory names do not
+ # actually appear on the repository host), (BRANCH) is something like
+ # "trunk" or "branches/2.0.x", and (FILEPATH) is a tree-relative
+ # filename like "twisted/internet/defer.py".
+
+ # our self.svnurl attribute contains (ROOT)/(PROJECT) combined
+ # together in a way that we can't separate without svn's help. If the
+ # user is not using the split_file= argument, then self.svnurl might
+ # be (ROOT)/(PROJECT)/(BRANCH) . In any case, the filenames we will
+ # get back from 'svn log' will be of the form
+ # (PROJECT)/(BRANCH)/(FILEPATH), but we want to be able to remove
+ # that (PROJECT) prefix from them. To do this without requiring the
+ # user to tell us how svnurl is split into ROOT and PROJECT, we do an
+ # 'svn info --xml' command at startup. This command will include a
+ # <root> element that tells us ROOT. We then strip this prefix from
+ # self.svnurl to determine PROJECT, and then later we strip the
+ # PROJECT prefix from the filenames reported by 'svn log --xml' to
+ # get a (BRANCH)/(FILEPATH) that can be passed to split_file() to
+ # turn into separate BRANCH and FILEPATH values.
+
+ # whew.
+
+ if self.working:
+ log.msg("SVNPoller(%s) overrun: timer fired but the previous "
+ "poll had not yet finished." % self.svnurl)
+ self.overrun_counter += 1
+ return defer.succeed(None)
+ self.working = True
+
+ log.msg("SVNPoller polling")
+ if not self._prefix:
+ # this sets self._prefix when it finishes. It fires with
+ # self._prefix as well, because that makes the unit tests easier
+ # to write.
+ d = self.get_root()
+ d.addCallback(self.determine_prefix)
+ else:
+ d = defer.succeed(self._prefix)
+
+ d.addCallback(self.get_logs)
+ d.addCallback(self.parse_logs)
+ d.addCallback(self.get_new_logentries)
+ d.addCallback(self.create_changes)
+ d.addCallback(self.submit_changes)
+ d.addCallbacks(self.finished_ok, self.finished_failure)
+ return d
+
+ def getProcessOutput(self, args):
+ # this exists so we can override it during the unit tests
+ d = utils.getProcessOutput(self.svnbin, args, {})
+ return d
+
+ def get_root(self):
+ args = ["info", "--xml", "--non-interactive", self.svnurl]
+ if self.svnuser:
+ args.extend(["--username=%s" % self.svnuser])
+ if self.svnpasswd:
+ args.extend(["--password=%s" % self.svnpasswd])
+ d = self.getProcessOutput(args)
+ return d
+
+ def determine_prefix(self, output):
+ try:
+ doc = xml.dom.minidom.parseString(output)
+ except xml.parsers.expat.ExpatError:
+ dbgMsg("_process_changes: ExpatError in %s" % output)
+ log.msg("SVNPoller._determine_prefix_2: ExpatError in '%s'"
+ % output)
+ raise
+ rootnodes = doc.getElementsByTagName("root")
+ if not rootnodes:
+ # this happens if the URL we gave was already the root. In this
+ # case, our prefix is empty.
+ self._prefix = ""
+ return self._prefix
+ rootnode = rootnodes[0]
+ root = "".join([c.data for c in rootnode.childNodes])
+ # root will be a unicode string
+ _assert(self.svnurl.startswith(root),
+ "svnurl='%s' doesn't start with <root>='%s'" %
+ (self.svnurl, root))
+ self._prefix = self.svnurl[len(root):]
+ if self._prefix.startswith("/"):
+ self._prefix = self._prefix[1:]
+ log.msg("SVNPoller: svnurl=%s, root=%s, so prefix=%s" %
+ (self.svnurl, root, self._prefix))
+ return self._prefix
+
+ def get_logs(self, ignored_prefix=None):
+ args = []
+ args.extend(["log", "--xml", "--verbose", "--non-interactive"])
+ if self.svnuser:
+ args.extend(["--username=%s" % self.svnuser])
+ if self.svnpasswd:
+ args.extend(["--password=%s" % self.svnpasswd])
+ args.extend(["--limit=%d" % (self.histmax), self.svnurl])
+ d = self.getProcessOutput(args)
+ return d
+
+ def parse_logs(self, output):
+ # parse the XML output, return a list of <logentry> nodes
+ try:
+ doc = xml.dom.minidom.parseString(output)
+ except xml.parsers.expat.ExpatError:
+ dbgMsg("_process_changes: ExpatError in %s" % output)
+ log.msg("SVNPoller._parse_changes: ExpatError in '%s'" % output)
+ raise
+ logentries = doc.getElementsByTagName("logentry")
+ return logentries
+
+
+ def _filter_new_logentries(self, logentries, last_change):
+ # given a list of logentries, return a tuple of (new_last_change,
+ # new_logentries), where new_logentries contains only the ones after
+ # last_change
+ if not logentries:
+ # no entries, so last_change must stay at None
+ return (None, [])
+
+ mostRecent = int(logentries[0].getAttribute("revision"))
+
+ if last_change is None:
+ # if this is the first time we've been run, ignore any changes
+ # that occurred before now. This prevents a build at every
+ # startup.
+ log.msg('svnPoller: starting at change %s' % mostRecent)
+ return (mostRecent, [])
+
+ if last_change == mostRecent:
+ # an unmodified repository will hit this case
+ log.msg('svnPoller: _process_changes last %s mostRecent %s' % (
+ last_change, mostRecent))
+ return (mostRecent, [])
+
+ new_logentries = []
+ for el in logentries:
+ if last_change == int(el.getAttribute("revision")):
+ break
+ new_logentries.append(el)
+ new_logentries.reverse() # return oldest first
+ return (mostRecent, new_logentries)
+
+ def get_new_logentries(self, logentries):
+ last_change = self.last_change
+ (new_last_change,
+ new_logentries) = self._filter_new_logentries(logentries,
+ self.last_change)
+ self.last_change = new_last_change
+ log.msg('svnPoller: _process_changes %s .. %s' %
+ (last_change, new_last_change))
+ return new_logentries
+
+
+ def _get_text(self, element, tag_name):
+ try:
+ child_nodes = element.getElementsByTagName(tag_name)[0].childNodes
+ text = "".join([t.data for t in child_nodes])
+ except:
+ text = "<unknown>"
+ return text
+
+ def _transform_path(self, path):
+ _assert(path.startswith(self._prefix),
+ "filepath '%s' should start with prefix '%s'" %
+ (path, self._prefix))
+ relative_path = path[len(self._prefix):]
+ if relative_path.startswith("/"):
+ relative_path = relative_path[1:]
+ where = self.split_file(relative_path)
+ # 'where' is either None or (branch, final_path)
+ return where
+
+ def create_changes(self, new_logentries):
+ changes = []
+
+ for el in new_logentries:
+ branch_files = [] # get oldest change first
+ revision = str(el.getAttribute("revision"))
+ dbgMsg("Adding change revision %s" % (revision,))
+ # TODO: the rest of buildbot may not be ready for unicode 'who'
+ # values
+ author = self._get_text(el, "author")
+ comments = self._get_text(el, "msg")
+ # there is a "date" field, but it provides localtime in the
+ # repository's timezone, whereas we care about buildmaster's
+ # localtime (since this will get used to position the boxes on
+ # the Waterfall display, etc). So ignore the date field and use
+ # our local clock instead.
+ #when = self._get_text(el, "date")
+ #when = time.mktime(time.strptime("%.19s" % when,
+ # "%Y-%m-%dT%H:%M:%S"))
+ branches = {}
+ pathlist = el.getElementsByTagName("paths")[0]
+ for p in pathlist.getElementsByTagName("path"):
+ action = p.getAttribute("action")
+ path = "".join([t.data for t in p.childNodes])
+ # the rest of buildbot is certaily not yet ready to handle
+ # unicode filenames, because they get put in RemoteCommands
+ # which get sent via PB to the buildslave, and PB doesn't
+ # handle unicode.
+ path = path.encode("ascii")
+ if path.startswith("/"):
+ path = path[1:]
+ where = self._transform_path(path)
+
+ # if 'where' is None, the file was outside any project that
+ # we care about and we should ignore it
+ if where:
+ branch, filename = where
+ if not branch in branches:
+ branches[branch] = { 'files': []}
+ branches[branch]['files'].append(filename)
+
+ if not branches[branch].has_key('action'):
+ branches[branch]['action'] = action
+
+ for branch in branches.keys():
+ action = branches[branch]['action']
+ files = branches[branch]['files']
+ number_of_files_changed = len(files)
+
+ if action == u'D' and number_of_files_changed == 1 and files[0] == '':
+ log.msg("Ignoring deletion of branch '%s'" % branch)
+ else:
+ c = Change(who=author,
+ files=files,
+ comments=comments,
+ revision=revision,
+ branch=branch)
+ changes.append(c)
+
+ return changes
+
+ def submit_changes(self, changes):
+ for c in changes:
+ self.parent.addChange(c)
+
+ def finished_ok(self, res):
+ log.msg("SVNPoller finished polling")
+ dbgMsg('_finished : %s' % res)
+ assert self.working
+ self.working = False
+ return res
+
+ def finished_failure(self, f):
+ log.msg("SVNPoller failed")
+ dbgMsg('_finished : %s' % f)
+ assert self.working
+ self.working = False
+ return None # eat the failure
diff --git a/buildbot/buildbot/clients/__init__.py b/buildbot/buildbot/clients/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildbot/buildbot/clients/__init__.py
diff --git a/buildbot/buildbot/clients/base.py b/buildbot/buildbot/clients/base.py
new file mode 100644
index 0000000..6d9e46c
--- /dev/null
+++ b/buildbot/buildbot/clients/base.py
@@ -0,0 +1,125 @@
+
+import sys, re
+
+from twisted.spread import pb
+from twisted.cred import credentials, error
+from twisted.internet import reactor
+
+class StatusClient(pb.Referenceable):
+ """To use this, call my .connected method with a RemoteReference to the
+ buildmaster's StatusClientPerspective object.
+ """
+
+ def __init__(self, events):
+ self.builders = {}
+ self.events = events
+
+ def connected(self, remote):
+ print "connected"
+ self.remote = remote
+ remote.callRemote("subscribe", self.events, 5, self)
+
+ def remote_builderAdded(self, buildername, builder):
+ print "builderAdded", buildername
+
+ def remote_builderRemoved(self, buildername):
+ print "builderRemoved", buildername
+
+ def remote_builderChangedState(self, buildername, state, eta):
+ print "builderChangedState", buildername, state, eta
+
+ def remote_buildStarted(self, buildername, build):
+ print "buildStarted", buildername
+
+ def remote_buildFinished(self, buildername, build, results):
+ print "buildFinished", results
+
+ def remote_buildETAUpdate(self, buildername, build, eta):
+ print "ETA", buildername, eta
+
+ def remote_stepStarted(self, buildername, build, stepname, step):
+ print "stepStarted", buildername, stepname
+
+ def remote_stepFinished(self, buildername, build, stepname, step, results):
+ print "stepFinished", buildername, stepname, results
+
+ def remote_stepETAUpdate(self, buildername, build, stepname, step,
+ eta, expectations):
+ print "stepETA", buildername, stepname, eta
+
+ def remote_logStarted(self, buildername, build, stepname, step,
+ logname, log):
+ print "logStarted", buildername, stepname
+
+ def remote_logFinished(self, buildername, build, stepname, step,
+ logname, log):
+ print "logFinished", buildername, stepname
+
+ def remote_logChunk(self, buildername, build, stepname, step, logname, log,
+ channel, text):
+ ChunkTypes = ["STDOUT", "STDERR", "HEADER"]
+ print "logChunk[%s]: %s" % (ChunkTypes[channel], text)
+
+class TextClient:
+ def __init__(self, master, events="steps"):
+ """
+ @type events: string, one of builders, builds, steps, logs, full
+ @param events: specify what level of detail should be reported.
+ - 'builders': only announce new/removed Builders
+ - 'builds': also announce builderChangedState, buildStarted, and
+ buildFinished
+ - 'steps': also announce buildETAUpdate, stepStarted, stepFinished
+ - 'logs': also announce stepETAUpdate, logStarted, logFinished
+ - 'full': also announce log contents
+ """
+ self.master = master
+ self.listener = StatusClient(events)
+
+ def run(self):
+ """Start the TextClient."""
+ self.startConnecting()
+ reactor.run()
+
+ def startConnecting(self):
+ try:
+ host, port = re.search(r'(.+):(\d+)', self.master).groups()
+ port = int(port)
+ except:
+ print "unparseable master location '%s'" % self.master
+ print " expecting something more like localhost:8007"
+ raise
+ cf = pb.PBClientFactory()
+ creds = credentials.UsernamePassword("statusClient", "clientpw")
+ d = cf.login(creds)
+ reactor.connectTCP(host, port, cf)
+ d.addCallbacks(self.connected, self.not_connected)
+ return d
+ def connected(self, ref):
+ ref.notifyOnDisconnect(self.disconnected)
+ self.listener.connected(ref)
+ def not_connected(self, why):
+ if why.check(error.UnauthorizedLogin):
+ print """
+Unable to login.. are you sure we are connecting to a
+buildbot.status.client.PBListener port and not to the slaveport?
+"""
+ reactor.stop()
+ return why
+ def disconnected(self, ref):
+ print "lost connection"
+ # we can get here in one of two ways: the buildmaster has
+ # disconnected us (probably because it shut itself down), or because
+ # we've been SIGINT'ed. In the latter case, our reactor is already
+ # shut down, but we have no easy way of detecting that. So protect
+ # our attempt to shut down the reactor.
+ try:
+ reactor.stop()
+ except RuntimeError:
+ pass
+
+if __name__ == '__main__':
+ master = "localhost:8007"
+ if len(sys.argv) > 1:
+ master = sys.argv[1]
+ c = TextClient()
+ c.run()
diff --git a/buildbot/buildbot/clients/debug.glade b/buildbot/buildbot/clients/debug.glade
new file mode 100644
index 0000000..40468bb
--- /dev/null
+++ b/buildbot/buildbot/clients/debug.glade
@@ -0,0 +1,684 @@
+<?xml version="1.0" standalone="no"?> <!--*- mode: xml -*-->
+<!DOCTYPE glade-interface SYSTEM "http://glade.gnome.org/glade-2.0.dtd">
+
+<glade-interface>
+<requires lib="gnome"/>
+
+<widget class="GtkWindow" id="window1">
+ <property name="visible">True</property>
+ <property name="title" translatable="yes">Buildbot Debug Tool</property>
+ <property name="type">GTK_WINDOW_TOPLEVEL</property>
+ <property name="window_position">GTK_WIN_POS_NONE</property>
+ <property name="modal">False</property>
+ <property name="resizable">True</property>
+ <property name="destroy_with_parent">False</property>
+ <property name="decorated">True</property>
+ <property name="skip_taskbar_hint">False</property>
+ <property name="skip_pager_hint">False</property>
+ <property name="type_hint">GDK_WINDOW_TYPE_HINT_NORMAL</property>
+ <property name="gravity">GDK_GRAVITY_NORTH_WEST</property>
+ <property name="focus_on_map">True</property>
+ <property name="urgency_hint">False</property>
+
+ <child>
+ <widget class="GtkVBox" id="vbox1">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkHBox" id="connection">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkButton" id="connectbutton">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">Connect</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_connect"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkLabel" id="connectlabel">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">Disconnected</property>
+ <property name="use_underline">False</property>
+ <property name="use_markup">False</property>
+ <property name="justify">GTK_JUSTIFY_CENTER</property>
+ <property name="wrap">False</property>
+ <property name="selectable">False</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">0</property>
+ <property name="ypad">0</property>
+ <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+ <property name="width_chars">-1</property>
+ <property name="single_line_mode">False</property>
+ <property name="angle">0</property>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkHBox" id="commands">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkButton" id="reload">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">Reload .cfg</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_reload" last_modification_time="Wed, 24 Sep 2003 20:47:55 GMT"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkButton" id="rebuild">
+ <property name="visible">True</property>
+ <property name="sensitive">False</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">Rebuild .py</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_rebuild" last_modification_time="Wed, 24 Sep 2003 20:49:18 GMT"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkButton" id="button7">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">poke IRC</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_poke_irc" last_modification_time="Wed, 14 Jan 2004 22:23:59 GMT"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkHBox" id="hbox3">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkCheckButton" id="usebranch">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">Branch:</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <property name="active">False</property>
+ <property name="inconsistent">False</property>
+ <property name="draw_indicator">True</property>
+ <signal name="toggled" handler="on_usebranch_toggled" last_modification_time="Tue, 25 Oct 2005 01:42:45 GMT"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkEntry" id="branch">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="editable">True</property>
+ <property name="visibility">True</property>
+ <property name="max_length">0</property>
+ <property name="text" translatable="yes"></property>
+ <property name="has_frame">True</property>
+ <property name="invisible_char">*</property>
+ <property name="activates_default">False</property>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkHBox" id="hbox1">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkCheckButton" id="userevision">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">Revision:</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <property name="active">False</property>
+ <property name="inconsistent">False</property>
+ <property name="draw_indicator">True</property>
+ <signal name="toggled" handler="on_userevision_toggled" last_modification_time="Wed, 08 Sep 2004 17:58:33 GMT"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkEntry" id="revision">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="editable">True</property>
+ <property name="visibility">True</property>
+ <property name="max_length">0</property>
+ <property name="text" translatable="yes"></property>
+ <property name="has_frame">True</property>
+ <property name="invisible_char">*</property>
+ <property name="activates_default">False</property>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkFrame" id="Commit">
+ <property name="border_width">4</property>
+ <property name="visible">True</property>
+ <property name="label_xalign">0</property>
+ <property name="label_yalign">0.5</property>
+ <property name="shadow_type">GTK_SHADOW_ETCHED_IN</property>
+
+ <child>
+ <widget class="GtkAlignment" id="alignment1">
+ <property name="visible">True</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xscale">1</property>
+ <property name="yscale">1</property>
+ <property name="top_padding">0</property>
+ <property name="bottom_padding">0</property>
+ <property name="left_padding">0</property>
+ <property name="right_padding">0</property>
+
+ <child>
+ <widget class="GtkVBox" id="vbox3">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkHBox" id="commit">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkButton" id="button2">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">commit</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_commit"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkEntry" id="filename">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="editable">True</property>
+ <property name="visibility">True</property>
+ <property name="max_length">0</property>
+ <property name="text" translatable="yes">twisted/internet/app.py</property>
+ <property name="has_frame">True</property>
+ <property name="invisible_char">*</property>
+ <property name="activates_default">False</property>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkHBox" id="hbox2">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkLabel" id="label5">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">Who: </property>
+ <property name="use_underline">False</property>
+ <property name="use_markup">False</property>
+ <property name="justify">GTK_JUSTIFY_LEFT</property>
+ <property name="wrap">False</property>
+ <property name="selectable">False</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">0</property>
+ <property name="ypad">0</property>
+ <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+ <property name="width_chars">-1</property>
+ <property name="single_line_mode">False</property>
+ <property name="angle">0</property>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkEntry" id="who">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="editable">True</property>
+ <property name="visibility">True</property>
+ <property name="max_length">0</property>
+ <property name="text" translatable="yes">bob</property>
+ <property name="has_frame">True</property>
+ <property name="invisible_char">*</property>
+ <property name="activates_default">False</property>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+ </widget>
+ </child>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkLabel" id="label4">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">Commit</property>
+ <property name="use_underline">False</property>
+ <property name="use_markup">False</property>
+ <property name="justify">GTK_JUSTIFY_LEFT</property>
+ <property name="wrap">False</property>
+ <property name="selectable">False</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">2</property>
+ <property name="ypad">0</property>
+ <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+ <property name="width_chars">-1</property>
+ <property name="single_line_mode">False</property>
+ <property name="angle">0</property>
+ </widget>
+ <packing>
+ <property name="type">label_item</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkFrame" id="builderframe">
+ <property name="border_width">4</property>
+ <property name="visible">True</property>
+ <property name="label_xalign">0</property>
+ <property name="label_yalign">0.5</property>
+ <property name="shadow_type">GTK_SHADOW_ETCHED_IN</property>
+
+ <child>
+ <widget class="GtkVBox" id="vbox2">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkHBox" id="builder">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">3</property>
+
+ <child>
+ <widget class="GtkLabel" id="label1">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">Builder:</property>
+ <property name="use_underline">False</property>
+ <property name="use_markup">False</property>
+ <property name="justify">GTK_JUSTIFY_CENTER</property>
+ <property name="wrap">False</property>
+ <property name="selectable">False</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">0</property>
+ <property name="ypad">0</property>
+ <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+ <property name="width_chars">-1</property>
+ <property name="single_line_mode">False</property>
+ <property name="angle">0</property>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkEntry" id="buildname">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="editable">True</property>
+ <property name="visibility">True</property>
+ <property name="max_length">0</property>
+ <property name="text" translatable="yes">one</property>
+ <property name="has_frame">True</property>
+ <property name="invisible_char">*</property>
+ <property name="activates_default">False</property>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkHBox" id="buildercontrol">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkButton" id="button1">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">Request
+Build</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_build"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkButton" id="button8">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">Ping
+Builder</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_ping" last_modification_time="Fri, 24 Nov 2006 05:18:51 GMT"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <placeholder/>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkHBox" id="status">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkLabel" id="label2">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">Currently:</property>
+ <property name="use_underline">False</property>
+ <property name="use_markup">False</property>
+ <property name="justify">GTK_JUSTIFY_CENTER</property>
+ <property name="wrap">False</property>
+ <property name="selectable">False</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">7</property>
+ <property name="ypad">0</property>
+ <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+ <property name="width_chars">-1</property>
+ <property name="single_line_mode">False</property>
+ <property name="angle">0</property>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkButton" id="button3">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">offline</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_current_offline"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkButton" id="button4">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">idle</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_current_idle"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkButton" id="button5">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">waiting</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_current_waiting"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkButton" id="button6">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="label" translatable="yes">building</property>
+ <property name="use_underline">True</property>
+ <property name="relief">GTK_RELIEF_NORMAL</property>
+ <property name="focus_on_click">True</property>
+ <signal name="clicked" handler="do_current_building"/>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkLabel" id="label3">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">Builder</property>
+ <property name="use_underline">False</property>
+ <property name="use_markup">False</property>
+ <property name="justify">GTK_JUSTIFY_LEFT</property>
+ <property name="wrap">False</property>
+ <property name="selectable">False</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">2</property>
+ <property name="ypad">0</property>
+ <property name="ellipsize">PANGO_ELLIPSIZE_NONE</property>
+ <property name="width_chars">-1</property>
+ <property name="single_line_mode">False</property>
+ <property name="angle">0</property>
+ </widget>
+ <packing>
+ <property name="type">label_item</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+ </widget>
+ </child>
+</widget>
+
+</glade-interface>
diff --git a/buildbot/buildbot/clients/debug.py b/buildbot/buildbot/clients/debug.py
new file mode 100644
index 0000000..5413765
--- /dev/null
+++ b/buildbot/buildbot/clients/debug.py
@@ -0,0 +1,181 @@
+
+from twisted.internet import gtk2reactor
+gtk2reactor.install()
+from twisted.internet import reactor
+from twisted.python import util
+from twisted.spread import pb
+from twisted.cred import credentials
+import gtk.glade
+import sys, re
+
+class DebugWidget:
+ def __init__(self, master="localhost:8007", passwd="debugpw"):
+ self.connected = 0
+ try:
+ host, port = re.search(r'(.+):(\d+)', master).groups()
+ except:
+ print "unparseable master location '%s'" % master
+ print " expecting something more like localhost:8007"
+ raise
+ self.host = host
+ self.port = int(port)
+ self.passwd = passwd
+ self.remote = None
+ xml = self.xml = gtk.glade.XML(util.sibpath(__file__, "debug.glade"))
+ g = xml.get_widget
+ self.buildname = g('buildname')
+ self.filename = g('filename')
+ self.connectbutton = g('connectbutton')
+ self.connectlabel = g('connectlabel')
+ g('window1').connect('destroy', lambda win: gtk.main_quit())
+ # put the master info in the window's titlebar
+ g('window1').set_title("Buildbot Debug Tool: %s" % master)
+ c = xml.signal_connect
+ c('do_connect', self.do_connect)
+ c('do_reload', self.do_reload)
+ c('do_rebuild', self.do_rebuild)
+ c('do_poke_irc', self.do_poke_irc)
+ c('do_build', self.do_build)
+ c('do_ping', self.do_ping)
+ c('do_commit', self.do_commit)
+ c('on_usebranch_toggled', self.usebranch_toggled)
+ self.usebranch_toggled(g('usebranch'))
+ c('on_userevision_toggled', self.userevision_toggled)
+ self.userevision_toggled(g('userevision'))
+ c('do_current_offline', self.do_current, "offline")
+ c('do_current_idle', self.do_current, "idle")
+ c('do_current_waiting', self.do_current, "waiting")
+ c('do_current_building', self.do_current, "building")
+
+ def do_connect(self, widget):
+ if self.connected:
+ self.connectlabel.set_text("Disconnecting...")
+ if self.remote:
+ self.remote.broker.transport.loseConnection()
+ else:
+ self.connectlabel.set_text("Connecting...")
+ f = pb.PBClientFactory()
+ creds = credentials.UsernamePassword("debug", self.passwd)
+ d = f.login(creds)
+ reactor.connectTCP(self.host, int(self.port), f)
+ d.addCallbacks(self.connect_complete, self.connect_failed)
+ def connect_complete(self, ref):
+ self.connectbutton.set_label("Disconnect")
+ self.connectlabel.set_text("Connected")
+ self.connected = 1
+ self.remote = ref
+ self.remote.callRemote("print", "hello cleveland")
+ self.remote.notifyOnDisconnect(self.disconnected)
+ def connect_failed(self, why):
+ self.connectlabel.set_text("Failed")
+ print why
+ def disconnected(self, ref):
+ self.connectbutton.set_label("Connect")
+ self.connectlabel.set_text("Disconnected")
+ self.connected = 0
+ self.remote = None
+
+ def do_reload(self, widget):
+ if not self.remote:
+ return
+ d = self.remote.callRemote("reload")
+ d.addErrback(self.err)
+ def do_rebuild(self, widget):
+ print "Not yet implemented"
+ return
+ def do_poke_irc(self, widget):
+ if not self.remote:
+ return
+ d = self.remote.callRemote("pokeIRC")
+ d.addErrback(self.err)
+
+ def do_build(self, widget):
+ if not self.remote:
+ return
+ name = self.buildname.get_text()
+ branch = None
+ if self.xml.get_widget("usebranch").get_active():
+ branch = self.xml.get_widget('branch').get_text()
+ if branch == '':
+ branch = None
+ revision = None
+ if self.xml.get_widget("userevision").get_active():
+ revision = self.xml.get_widget('revision').get_text()
+ if revision == '':
+ revision = None
+ reason = "debugclient 'Request Build' button pushed"
+ properties = {}
+ d = self.remote.callRemote("requestBuild",
+ name, reason, branch, revision, properties)
+ d.addErrback(self.err)
+
+ def do_ping(self, widget):
+ if not self.remote:
+ return
+ name = self.buildname.get_text()
+ d = self.remote.callRemote("pingBuilder", name)
+ d.addErrback(self.err)
+
+ def usebranch_toggled(self, widget):
+ rev = self.xml.get_widget('branch')
+ if widget.get_active():
+ rev.set_sensitive(True)
+ else:
+ rev.set_sensitive(False)
+
+ def userevision_toggled(self, widget):
+ rev = self.xml.get_widget('revision')
+ if widget.get_active():
+ rev.set_sensitive(True)
+ else:
+ rev.set_sensitive(False)
+
+ def do_commit(self, widget):
+ if not self.remote:
+ return
+ filename = self.filename.get_text()
+ who = self.xml.get_widget("who").get_text()
+
+ branch = None
+ if self.xml.get_widget("usebranch").get_active():
+ branch = self.xml.get_widget('branch').get_text()
+ if branch == '':
+ branch = None
+
+ revision = None
+ if self.xml.get_widget("userevision").get_active():
+ revision = self.xml.get_widget('revision').get_text()
+ try:
+ revision = int(revision)
+ except ValueError:
+ pass
+ if revision == '':
+ revision = None
+
+ kwargs = { 'revision': revision, 'who': who }
+ if branch:
+ kwargs['branch'] = branch
+ d = self.remote.callRemote("fakeChange", filename, **kwargs)
+ d.addErrback(self.err)
+
+ def do_current(self, widget, state):
+ if not self.remote:
+ return
+ name = self.buildname.get_text()
+ d = self.remote.callRemote("setCurrentState", name, state)
+ d.addErrback(self.err)
+ def err(self, failure):
+ print "received error:", failure
+
+ def run(self):
+ reactor.run()
+
+if __name__ == '__main__':
+ master = "localhost:8007"
+ if len(sys.argv) > 1:
+ master = sys.argv[1]
+ passwd = "debugpw"
+ if len(sys.argv) > 2:
+ passwd = sys.argv[2]
+ d = DebugWidget(master, passwd)
+ d.run()
diff --git a/buildbot/buildbot/clients/gtkPanes.py b/buildbot/buildbot/clients/gtkPanes.py
new file mode 100644
index 0000000..8acba1b
--- /dev/null
+++ b/buildbot/buildbot/clients/gtkPanes.py
@@ -0,0 +1,532 @@
+
+from twisted.internet import gtk2reactor
+gtk2reactor.install()
+
+import sys, time
+
+import pygtk
+pygtk.require("2.0")
+import gobject, gtk
+assert(gtk.Window) # in gtk1 it's gtk.GtkWindow
+
+from twisted.spread import pb
+
+#from buildbot.clients.base import Builder, Client
+from buildbot.clients.base import TextClient
+from buildbot.util import now
+
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
+
+'''
+class Pane:
+ def __init__(self):
+ pass
+
+class OneRow(Pane):
+ """This is a one-row status bar. It has one square per Builder, and that
+ square is either red, yellow, or green. """
+
+ def __init__(self):
+ Pane.__init__(self)
+ self.widget = gtk.VBox(gtk.FALSE, 2)
+ self.nameBox = gtk.HBox(gtk.TRUE)
+ self.statusBox = gtk.HBox(gtk.TRUE)
+ self.widget.add(self.nameBox)
+ self.widget.add(self.statusBox)
+ self.widget.show_all()
+ self.builders = []
+
+ def getWidget(self):
+ return self.widget
+ def addBuilder(self, builder):
+ print "OneRow.addBuilder"
+ # todo: ordering. Should follow the order in which they were added
+ # to the original BotMaster
+ self.builders.append(builder)
+ # add the name to the left column, and a label (with background) to
+ # the right
+ name = gtk.Label(builder.name)
+ status = gtk.Label('??')
+ status.set_size_request(64,64)
+ box = gtk.EventBox()
+ box.add(status)
+ name.show()
+ box.show_all()
+ self.nameBox.add(name)
+ self.statusBox.add(box)
+ builder.haveSomeWidgets([name, status, box])
+
+class R2Builder(Builder):
+ def start(self):
+ self.nameSquare.set_text(self.name)
+ self.statusSquare.set_text("???")
+ self.subscribe()
+ def haveSomeWidgets(self, widgets):
+ self.nameSquare, self.statusSquare, self.statusBox = widgets
+
+ def remote_newLastBuildStatus(self, event):
+ color = None
+ if event:
+ text = "\n".join(event.text)
+ color = event.color
+ else:
+ text = "none"
+ self.statusSquare.set_text(text)
+ if color:
+ print "color", color
+ self.statusBox.modify_bg(gtk.STATE_NORMAL,
+ gtk.gdk.color_parse(color))
+
+ def remote_currentlyOffline(self):
+ self.statusSquare.set_text("offline")
+ def remote_currentlyIdle(self):
+ self.statusSquare.set_text("idle")
+ def remote_currentlyWaiting(self, seconds):
+ self.statusSquare.set_text("waiting")
+ def remote_currentlyInterlocked(self):
+ self.statusSquare.set_text("interlocked")
+ def remote_currentlyBuilding(self, eta):
+ self.statusSquare.set_text("building")
+
+
+class CompactRow(Pane):
+ def __init__(self):
+ Pane.__init__(self)
+ self.widget = gtk.VBox(gtk.FALSE, 3)
+ self.nameBox = gtk.HBox(gtk.TRUE, 2)
+ self.lastBuildBox = gtk.HBox(gtk.TRUE, 2)
+ self.statusBox = gtk.HBox(gtk.TRUE, 2)
+ self.widget.add(self.nameBox)
+ self.widget.add(self.lastBuildBox)
+ self.widget.add(self.statusBox)
+ self.widget.show_all()
+ self.builders = []
+
+ def getWidget(self):
+ return self.widget
+
+ def addBuilder(self, builder):
+ self.builders.append(builder)
+
+ name = gtk.Label(builder.name)
+ name.show()
+ self.nameBox.add(name)
+
+ last = gtk.Label('??')
+ last.set_size_request(64,64)
+ lastbox = gtk.EventBox()
+ lastbox.add(last)
+ lastbox.show_all()
+ self.lastBuildBox.add(lastbox)
+
+ status = gtk.Label('??')
+ status.set_size_request(64,64)
+ statusbox = gtk.EventBox()
+ statusbox.add(status)
+ statusbox.show_all()
+ self.statusBox.add(statusbox)
+
+ builder.haveSomeWidgets([name, last, lastbox, status, statusbox])
+
+ def removeBuilder(self, name, builder):
+ self.nameBox.remove(builder.nameSquare)
+ self.lastBuildBox.remove(builder.lastBuildBox)
+ self.statusBox.remove(builder.statusBox)
+ self.builders.remove(builder)
+
+class CompactBuilder(Builder):
+ def setup(self):
+ self.timer = None
+ self.text = []
+ self.eta = None
+ def start(self):
+ self.nameSquare.set_text(self.name)
+ self.statusSquare.set_text("???")
+ self.subscribe()
+ def haveSomeWidgets(self, widgets):
+ (self.nameSquare,
+ self.lastBuildSquare, self.lastBuildBox,
+ self.statusSquare, self.statusBox) = widgets
+
+ def remote_currentlyOffline(self):
+ self.eta = None
+ self.stopTimer()
+ self.statusSquare.set_text("offline")
+ self.statusBox.modify_bg(gtk.STATE_NORMAL,
+ gtk.gdk.color_parse("red"))
+ def remote_currentlyIdle(self):
+ self.eta = None
+ self.stopTimer()
+ self.statusSquare.set_text("idle")
+ def remote_currentlyWaiting(self, seconds):
+ self.nextBuild = now() + seconds
+ self.startTimer(self.updateWaiting)
+ def remote_currentlyInterlocked(self):
+ self.stopTimer()
+ self.statusSquare.set_text("interlocked")
+ def startTimer(self, func):
+ # the func must clear self.timer and return gtk.FALSE when the event
+ # has arrived
+ self.stopTimer()
+ self.timer = gtk.timeout_add(1000, func)
+ func()
+ def stopTimer(self):
+ if self.timer:
+ gtk.timeout_remove(self.timer)
+ self.timer = None
+ def updateWaiting(self):
+ when = self.nextBuild
+ if now() < when:
+ next = time.strftime("%H:%M:%S", time.localtime(when))
+ secs = "[%d seconds]" % (when - now())
+ self.statusSquare.set_text("waiting\n%s\n%s" % (next, secs))
+ return gtk.TRUE # restart timer
+ else:
+ # done
+ self.statusSquare.set_text("waiting\n[RSN]")
+ self.timer = None
+ return gtk.FALSE
+
+ def remote_currentlyBuilding(self, eta):
+ self.stopTimer()
+ self.statusSquare.set_text("building")
+ if eta:
+ d = eta.callRemote("subscribe", self, 5)
+
+ def remote_newLastBuildStatus(self, event):
+ color = None
+ if event:
+ text = "\n".join(event.text)
+ color = event.color
+ else:
+ text = "none"
+ if not color: color = "gray"
+ self.lastBuildSquare.set_text(text)
+ self.lastBuildBox.modify_bg(gtk.STATE_NORMAL,
+ gtk.gdk.color_parse(color))
+
+ def remote_newEvent(self, event):
+ assert(event.__class__ == GtkUpdatingEvent)
+ self.current = event
+ event.builder = self
+ self.text = event.text
+ if not self.text: self.text = ["idle"]
+ self.eta = None
+ self.stopTimer()
+ self.updateText()
+ color = event.color
+ if not color: color = "gray"
+ self.statusBox.modify_bg(gtk.STATE_NORMAL,
+ gtk.gdk.color_parse(color))
+
+ def updateCurrent(self):
+ text = self.current.text
+ if text:
+ self.text = text
+ self.updateText()
+ color = self.current.color
+ if color:
+ self.statusBox.modify_bg(gtk.STATE_NORMAL,
+ gtk.gdk.color_parse(color))
+ def updateText(self):
+ etatext = []
+ if self.eta:
+ etatext = [time.strftime("%H:%M:%S", time.localtime(self.eta))]
+ if now() > self.eta:
+ etatext += ["RSN"]
+ else:
+ seconds = self.eta - now()
+ etatext += ["[%d secs]" % seconds]
+ text = "\n".join(self.text + etatext)
+ self.statusSquare.set_text(text)
+ def updateTextTimer(self):
+ self.updateText()
+ return gtk.TRUE # restart timer
+
+ def remote_progress(self, seconds):
+ if seconds == None:
+ self.eta = None
+ else:
+ self.eta = now() + seconds
+ self.startTimer(self.updateTextTimer)
+ self.updateText()
+ def remote_finished(self, eta):
+ self.eta = None
+ self.stopTimer()
+ self.updateText()
+ eta.callRemote("unsubscribe", self)
+'''
+
+class Box:
+ def __init__(self, text="?"):
+ self.text = text
+ self.box = gtk.EventBox()
+ self.label = gtk.Label(text)
+ self.box.add(self.label)
+ self.box.set_size_request(64,64)
+ self.timer = None
+
+ def getBox(self):
+ return self.box
+
+ def setText(self, text):
+ self.text = text
+ self.label.set_text(text)
+
+ def setColor(self, color):
+ if not color:
+ return
+ self.box.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(color))
+
+ def setETA(self, eta):
+ if eta:
+ self.when = now() + eta
+ self.startTimer()
+ else:
+ self.stopTimer()
+
+ def startTimer(self):
+ self.stopTimer()
+ self.timer = gobject.timeout_add(1000, self.update)
+ self.update()
+
+ def stopTimer(self):
+ if self.timer:
+ gobject.source_remove(self.timer)
+ self.timer = None
+ self.label.set_text(self.text)
+
+ def update(self):
+ if now() < self.when:
+ next = time.strftime("%H:%M:%S", time.localtime(self.when))
+ secs = "[%d secs]" % (self.when - now())
+ self.label.set_text("%s\n%s\n%s" % (self.text, next, secs))
+ return True # restart timer
+ else:
+ # done
+ self.label.set_text("%s\n[soon]\n[overdue]" % (self.text,))
+ self.timer = None
+ return False
+
+
+
+class ThreeRowBuilder:
+ def __init__(self, name, ref):
+ self.name = name
+
+ self.last = Box()
+ self.current = Box()
+ self.step = Box("idle")
+ self.step.setColor("white")
+
+ self.ref = ref
+
+ def getBoxes(self):
+ return self.last.getBox(), self.current.getBox(), self.step.getBox()
+
+ def getLastBuild(self):
+ d = self.ref.callRemote("getLastFinishedBuild")
+ d.addCallback(self.gotLastBuild)
+ def gotLastBuild(self, build):
+ if build:
+ build.callRemote("getText").addCallback(self.gotLastText)
+ build.callRemote("getResults").addCallback(self.gotLastResult)
+
+ def gotLastText(self, text):
+ print "Got text", text
+ self.last.setText("\n".join(text))
+
+ def gotLastResult(self, result):
+ colormap = {SUCCESS: 'green',
+ FAILURE: 'red',
+ WARNINGS: 'orange',
+ EXCEPTION: 'purple',
+ }
+ self.last.setColor(colormap[result])
+
+ def getState(self):
+ self.ref.callRemote("getState").addCallback(self.gotState)
+ def gotState(self, res):
+ state, ETA, builds = res
+ # state is one of: offline, idle, waiting, interlocked, building
+ # TODO: ETA is going away, you have to look inside the builds to get
+ # that value
+ currentmap = {"offline": "red",
+ "idle": "white",
+ "waiting": "yellow",
+ "interlocked": "yellow",
+ "building": "yellow",}
+ text = state
+ self.current.setColor(currentmap[state])
+ if ETA is not None:
+ text += "\nETA=%s secs" % ETA
+ self.current.setText(state)
+
+ def buildStarted(self, build):
+ print "[%s] buildStarted" % (self.name,)
+ self.current.setColor("yellow")
+
+ def buildFinished(self, build, results):
+ print "[%s] buildFinished: %s" % (self.name, results)
+ self.gotLastBuild(build)
+ self.current.setColor("white")
+ self.current.stopTimer()
+
+ def buildETAUpdate(self, eta):
+ print "[%s] buildETAUpdate: %s" % (self.name, eta)
+ self.current.setETA(eta)
+
+
+ def stepStarted(self, stepname, step):
+ print "[%s] stepStarted: %s" % (self.name, stepname)
+ self.step.setText(stepname)
+ self.step.setColor("yellow")
+ def stepFinished(self, stepname, step, results):
+ print "[%s] stepFinished: %s %s" % (self.name, stepname, results)
+ self.step.setText("idle")
+ self.step.setColor("white")
+ self.step.stopTimer()
+ def stepETAUpdate(self, stepname, eta):
+ print "[%s] stepETAUpdate: %s %s" % (self.name, stepname, eta)
+ self.step.setETA(eta)
+
+
+class ThreeRowClient(pb.Referenceable):
+ def __init__(self, window):
+ self.window = window
+ self.buildernames = []
+ self.builders = {}
+
+ def connected(self, ref):
+ print "connected"
+ self.ref = ref
+ self.pane = gtk.VBox(False, 2)
+ self.table = gtk.Table(1+3, 1)
+ self.pane.add(self.table)
+ self.window.vb.add(self.pane)
+ self.pane.show_all()
+ ref.callRemote("subscribe", "logs", 5, self)
+
+ def removeTable(self):
+ for child in self.table.get_children():
+ self.table.remove(child)
+ self.pane.remove(self.table)
+
+ def makeTable(self):
+ columns = len(self.builders)
+ self.table = gtk.Table(2, columns)
+ self.pane.add(self.table)
+ for i in range(len(self.buildernames)):
+ name = self.buildernames[i]
+ b = self.builders[name]
+ last,current,step = b.getBoxes()
+ self.table.attach(gtk.Label(name), i, i+1, 0, 1)
+ self.table.attach(last, i, i+1, 1, 2,
+ xpadding=1, ypadding=1)
+ self.table.attach(current, i, i+1, 2, 3,
+ xpadding=1, ypadding=1)
+ self.table.attach(step, i, i+1, 3, 4,
+ xpadding=1, ypadding=1)
+ self.table.show_all()
+
+ def rebuildTable(self):
+ self.removeTable()
+ self.makeTable()
+
+ def remote_builderAdded(self, buildername, builder):
+ print "builderAdded", buildername
+ assert buildername not in self.buildernames
+ self.buildernames.append(buildername)
+
+ b = ThreeRowBuilder(buildername, builder)
+ self.builders[buildername] = b
+ self.rebuildTable()
+ b.getLastBuild()
+ b.getState()
+
+ def remote_builderRemoved(self, buildername):
+ del self.builders[buildername]
+ self.buildernames.remove(buildername)
+ self.rebuildTable()
+
+ def remote_builderChangedState(self, name, state, eta):
+ self.builders[name].gotState((state, eta, None))
+ def remote_buildStarted(self, name, build):
+ self.builders[name].buildStarted(build)
+ def remote_buildFinished(self, name, build, results):
+ self.builders[name].buildFinished(build, results)
+
+ def remote_buildETAUpdate(self, name, build, eta):
+ self.builders[name].buildETAUpdate(eta)
+ def remote_stepStarted(self, name, build, stepname, step):
+ self.builders[name].stepStarted(stepname, step)
+ def remote_stepFinished(self, name, build, stepname, step, results):
+ self.builders[name].stepFinished(stepname, step, results)
+
+ def remote_stepETAUpdate(self, name, build, stepname, step,
+ eta, expectations):
+ # expectations is a list of (metricname, current_value,
+ # expected_value) tuples, so that we could show individual progress
+ # meters for each metric
+ self.builders[name].stepETAUpdate(stepname, eta)
+
+ def remote_logStarted(self, buildername, build, stepname, step,
+ logname, log):
+ pass
+
+ def remote_logFinished(self, buildername, build, stepname, step,
+ logname, log):
+ pass
+
+
+class GtkClient(TextClient):
+ ClientClass = ThreeRowClient
+
+ def __init__(self, master):
+ self.master = master
+
+ w = gtk.Window()
+ self.w = w
+ #w.set_size_request(64,64)
+ w.connect('destroy', lambda win: gtk.main_quit())
+ self.vb = gtk.VBox(False, 2)
+ self.status = gtk.Label("unconnected")
+ self.vb.add(self.status)
+ self.listener = self.ClientClass(self)
+ w.add(self.vb)
+ w.show_all()
+
+ def connected(self, ref):
+ self.status.set_text("connected")
+ TextClient.connected(self, ref)
+
+"""
+ def addBuilder(self, name, builder):
+ Client.addBuilder(self, name, builder)
+ self.pane.addBuilder(builder)
+ def removeBuilder(self, name):
+ self.pane.removeBuilder(name, self.builders[name])
+ Client.removeBuilder(self, name)
+
+ def startConnecting(self, master):
+ self.master = master
+ Client.startConnecting(self, master)
+ self.status.set_text("connecting to %s.." % master)
+ def connected(self, remote):
+ Client.connected(self, remote)
+ self.status.set_text(self.master)
+ remote.notifyOnDisconnect(self.disconnected)
+ def disconnected(self, remote):
+ self.status.set_text("disconnected, will retry")
+"""
+
+def main():
+ master = "localhost:8007"
+ if len(sys.argv) > 1:
+ master = sys.argv[1]
+ c = GtkClient(master)
+ c.run()
+
+if __name__ == '__main__':
+ main()
+
diff --git a/buildbot/buildbot/clients/sendchange.py b/buildbot/buildbot/clients/sendchange.py
new file mode 100644
index 0000000..0ea4ba6
--- /dev/null
+++ b/buildbot/buildbot/clients/sendchange.py
@@ -0,0 +1,48 @@
+
+from twisted.spread import pb
+from twisted.cred import credentials
+from twisted.internet import reactor
+
+class Sender:
+ def __init__(self, master, user=None):
+ self.user = user
+ self.host, self.port = master.split(":")
+ self.port = int(self.port)
+ self.num_changes = 0
+
+ def send(self, branch, revision, comments, files, user=None, category=None):
+ if user is None:
+ user = self.user
+ change = {'who': user, 'files': files, 'comments': comments,
+ 'branch': branch, 'revision': revision, 'category': category}
+ self.num_changes += 1
+
+ f = pb.PBClientFactory()
+ d = f.login(credentials.UsernamePassword("change", "changepw"))
+ reactor.connectTCP(self.host, self.port, f)
+ d.addCallback(self.addChange, change)
+ return d
+
+ def addChange(self, remote, change):
+ d = remote.callRemote('addChange', change)
+ d.addCallback(lambda res: remote.broker.transport.loseConnection())
+ return d
+
+ def printSuccess(self, res):
+ if self.num_changes > 1:
+ print "%d changes sent successfully" % self.num_changes
+ elif self.num_changes == 1:
+ print "change sent successfully"
+ else:
+ print "no changes to send"
+
+ def printFailure(self, why):
+ print "change(s) NOT sent, something went wrong:"
+ print why
+
+ def stop(self, res):
+ reactor.stop()
+ return res
+
+ def run(self):
+ reactor.run()
diff --git a/buildbot/buildbot/dnotify.py b/buildbot/buildbot/dnotify.py
new file mode 100644
index 0000000..d23d600
--- /dev/null
+++ b/buildbot/buildbot/dnotify.py
@@ -0,0 +1,102 @@
+
+# spiv wants this
+
+import fcntl, signal
+
+class DNotify_Handler:
+ def __init__(self):
+ self.watchers = {}
+ self.installed = 0
+ def install(self):
+ if self.installed:
+ return
+ signal.signal(signal.SIGIO, self.fire)
+ self.installed = 1
+ def uninstall(self):
+ if not self.installed:
+ return
+ signal.signal(signal.SIGIO, signal.SIG_DFL)
+ self.installed = 0
+ def add(self, watcher):
+ self.watchers[watcher.fd.fileno()] = watcher
+ self.install()
+ def remove(self, watcher):
+ if self.watchers.has_key(watcher.fd.fileno()):
+ del(self.watchers[watcher.fd.fileno()])
+ if not self.watchers:
+ self.uninstall()
+ def fire(self, signum, frame):
+ # this is the signal handler
+ # without siginfo_t, we must fire them all
+ for watcher in self.watchers.values():
+ watcher.callback()
+
+class DNotify:
+ DN_ACCESS = fcntl.DN_ACCESS # a file in the directory was read
+ DN_MODIFY = fcntl.DN_MODIFY # a file was modified (write,truncate)
+ DN_CREATE = fcntl.DN_CREATE # a file was created
+ DN_DELETE = fcntl.DN_DELETE # a file was unlinked
+ DN_RENAME = fcntl.DN_RENAME # a file was renamed
+ DN_ATTRIB = fcntl.DN_ATTRIB # a file had attributes changed (chmod,chown)
+
+ handler = [None]
+
+ def __init__(self, dirname, callback=None,
+ flags=[DN_MODIFY,DN_CREATE,DN_DELETE,DN_RENAME]):
+
+ """This object watches a directory for changes. The .callback
+ attribute should be set to a function to be run every time something
+ happens to it. Be aware that it will be called more times than you
+ expect."""
+
+ if callback:
+ self.callback = callback
+ else:
+ self.callback = self.fire
+ self.dirname = dirname
+ self.flags = reduce(lambda x, y: x | y, flags) | fcntl.DN_MULTISHOT
+ self.fd = open(dirname, "r")
+ # ideally we would move the notification to something like SIGRTMIN,
+ # (to free up SIGIO) and use sigaction to have the signal handler
+ # receive a structure with the fd number. But python doesn't offer
+ # either.
+ if not self.handler[0]:
+ self.handler[0] = DNotify_Handler()
+ self.handler[0].add(self)
+ fcntl.fcntl(self.fd, fcntl.F_NOTIFY, self.flags)
+ def remove(self):
+ self.handler[0].remove(self)
+ self.fd.close()
+ def fire(self):
+ print self.dirname, "changed!"
+
+def test_dnotify1():
+ d = DNotify(".")
+ while 1:
+ signal.pause()
+
+def test_dnotify2():
+ # create ./foo/, create/delete files in ./ and ./foo/ while this is
+ # running. Notice how both notifiers are fired when anything changes;
+ # this is an unfortunate side-effect of the lack of extended sigaction
+ # support in Python.
+ count = [0]
+ d1 = DNotify(".")
+ def fire1(count=count, d1=d1):
+ print "./ changed!", count[0]
+ count[0] += 1
+ if count[0] > 5:
+ d1.remove()
+ del(d1)
+ # change the callback, since we can't define it until after we have the
+ # dnotify object. Hmm, unless we give the dnotify to the callback.
+ d1.callback = fire1
+ def fire2(): print "foo/ changed!"
+ d2 = DNotify("foo", fire2)
+ while 1:
+ signal.pause()
+
+
+if __name__ == '__main__':
+ test_dnotify2()
+
diff --git a/buildbot/buildbot/ec2buildslave.py b/buildbot/buildbot/ec2buildslave.py
new file mode 100644
index 0000000..6a1f42d
--- /dev/null
+++ b/buildbot/buildbot/ec2buildslave.py
@@ -0,0 +1,283 @@
+"""A LatentSlave that uses EC2 to instantiate the slaves on demand.
+
+Tested with Python boto 1.5c
+"""
+
+# Portions copyright Canonical Ltd. 2009
+
+import cStringIO
+import os
+import re
+import time
+import urllib
+
+import boto
+import boto.exception
+from twisted.internet import defer, threads
+from twisted.python import log
+
+from buildbot.buildslave import AbstractLatentBuildSlave
+from buildbot import interfaces
+
+PENDING = 'pending'
+RUNNING = 'running'
+SHUTTINGDOWN = 'shutting-down'
+TERMINATED = 'terminated'
+
+class EC2LatentBuildSlave(AbstractLatentBuildSlave):
+
+ instance = image = None
+ _poll_resolution = 5 # hook point for tests
+
+ def __init__(self, name, password, instance_type, ami=None,
+ valid_ami_owners=None, valid_ami_location_regex=None,
+ elastic_ip=None, identifier=None, secret_identifier=None,
+ aws_id_file_path=None,
+ keypair_name='latent_buildbot_slave',
+ security_name='latent_buildbot_slave',
+ max_builds=None, notify_on_missing=[], missing_timeout=60*20,
+ build_wait_timeout=60*10, properties={}):
+ AbstractLatentBuildSlave.__init__(
+ self, name, password, max_builds, notify_on_missing,
+ missing_timeout, build_wait_timeout, properties)
+ if not ((ami is not None) ^
+ (valid_ami_owners is not None or
+ valid_ami_location_regex is not None)):
+ raise ValueError(
+ 'You must provide either a specific ami, or one or both of '
+ 'valid_ami_location_regex and valid_ami_owners')
+ self.ami = ami
+ if valid_ami_owners is not None:
+ if isinstance(valid_ami_owners, (int, long)):
+ valid_ami_owners = (valid_ami_owners,)
+ else:
+ for element in valid_ami_owners:
+ if not isinstance(element, (int, long)):
+ raise ValueError(
+ 'valid_ami_owners should be int or iterable '
+ 'of ints', element)
+ if valid_ami_location_regex is not None:
+ if not isinstance(valid_ami_location_regex, basestring):
+ raise ValueError(
+ 'valid_ami_location_regex should be a string')
+ else:
+ # verify that regex will compile
+ re.compile(valid_ami_location_regex)
+ self.valid_ami_owners = valid_ami_owners
+ self.valid_ami_location_regex = valid_ami_location_regex
+ self.instance_type = instance_type
+ self.keypair_name = keypair_name
+ self.security_name = security_name
+ if identifier is None:
+ assert secret_identifier is None, (
+ 'supply both or neither of identifier, secret_identifier')
+ if aws_id_file_path is None:
+ home = os.environ['HOME']
+ aws_id_file_path = os.path.join(home, '.ec2', 'aws_id')
+ if not os.path.exists(aws_id_file_path):
+ raise ValueError(
+ "Please supply your AWS access key identifier and secret "
+ "access key identifier either when instantiating this %s "
+ "or in the %s file (on two lines).\n" %
+ (self.__class__.__name__, aws_id_file_path))
+ aws_file = open(aws_id_file_path, 'r')
+ try:
+ identifier = aws_file.readline().strip()
+ secret_identifier = aws_file.readline().strip()
+ finally:
+ aws_file.close()
+ else:
+ assert (aws_id_file_path is None,
+ 'if you supply the identifier and secret_identifier, '
+ 'do not specify the aws_id_file_path')
+ assert (secret_identifier is not None,
+ 'supply both or neither of identifier, secret_identifier')
+ # Make the EC2 connection.
+ self.conn = boto.connect_ec2(identifier, secret_identifier)
+
+ # Make a keypair
+ #
+ # We currently discard the keypair data because we don't need it.
+ # If we do need it in the future, we will always recreate the keypairs
+ # because there is no way to
+ # programmatically retrieve the private key component, unless we
+ # generate it and store it on the filesystem, which is an unnecessary
+ # usage requirement.
+ try:
+ key_pair = self.conn.get_all_key_pairs(keypair_name)[0]
+ # key_pair.delete() # would be used to recreate
+ except boto.exception.EC2ResponseError, e:
+ if e.code != 'InvalidKeyPair.NotFound':
+ if e.code == 'AuthFailure':
+ print ('POSSIBLE CAUSES OF ERROR:\n'
+ ' Did you sign up for EC2?\n'
+ ' Did you put a credit card number in your AWS '
+ 'account?\n'
+ 'Please doublecheck before reporting a problem.\n')
+ raise
+ # make one; we would always do this, and stash the result, if we
+ # needed the key (for instance, to SSH to the box). We'd then
+ # use paramiko to use the key to connect.
+ self.conn.create_key_pair(keypair_name)
+
+ # create security group
+ try:
+ group = self.conn.get_all_security_groups(security_name)[0]
+ except boto.exception.EC2ResponseError, e:
+ if e.code == 'InvalidGroup.NotFound':
+ self.security_group = self.conn.create_security_group(
+ security_name,
+ 'Authorization to access the buildbot instance.')
+ # Authorize the master as necessary
+ # TODO this is where we'd open the hole to do the reverse pb
+ # connect to the buildbot
+ # ip = urllib.urlopen(
+ # 'http://checkip.amazonaws.com').read().strip()
+ # self.security_group.authorize('tcp', 22, 22, '%s/32' % ip)
+ # self.security_group.authorize('tcp', 80, 80, '%s/32' % ip)
+ else:
+ raise
+
+ # get the image
+ if self.ami is not None:
+ self.image = self.conn.get_image(self.ami)
+ else:
+ # verify we have access to at least one acceptable image
+ discard = self.get_image()
+
+ # get the specified elastic IP, if any
+ if elastic_ip is not None:
+ elastic_ip = self.conn.get_all_addresses([elastic_ip])[0]
+ self.elastic_ip = elastic_ip
+
+ def get_image(self):
+ if self.image is not None:
+ return self.image
+ if self.valid_ami_location_regex:
+ level = 0
+ options = []
+ get_match = re.compile(self.valid_ami_location_regex).match
+ for image in self.conn.get_all_images(
+ owners=self.valid_ami_owners):
+ # gather sorting data
+ match = get_match(image.location)
+ if match:
+ alpha_sort = int_sort = None
+ if level < 2:
+ try:
+ alpha_sort = match.group(1)
+ except IndexError:
+ level = 2
+ else:
+ if level == 0:
+ try:
+ int_sort = int(alpha_sort)
+ except ValueError:
+ level = 1
+ options.append([int_sort, alpha_sort,
+ image.location, image.id, image])
+ if level:
+ log.msg('sorting images at level %d' % level)
+ options = [candidate[level:] for candidate in options]
+ else:
+ options = [(image.location, image.id, image) for image
+ in self.conn.get_all_images(
+ owners=self.valid_ami_owners)]
+ options.sort()
+ log.msg('sorted images (last is chosen): %s' %
+ (', '.join(
+ '%s (%s)' % (candidate[-1].id, candidate[-1].location)
+ for candidate in options)))
+ if not options:
+ raise ValueError('no available images match constraints')
+ return options[-1][-1]
+
+ @property
+ def dns(self):
+ if self.instance is None:
+ return None
+ return self.instance.public_dns_name
+
+ def start_instance(self):
+ if self.instance is not None:
+ raise ValueError('instance active')
+ return threads.deferToThread(self._start_instance)
+
+ def _start_instance(self):
+ image = self.get_image()
+ reservation = image.run(
+ key_name=self.keypair_name, security_groups=[self.security_name],
+ instance_type=self.instance_type)
+ self.instance = reservation.instances[0]
+ log.msg('%s %s starting instance %s' %
+ (self.__class__.__name__, self.slavename, self.instance.id))
+ duration = 0
+ interval = self._poll_resolution
+ while self.instance.state == PENDING:
+ time.sleep(interval)
+ duration += interval
+ if duration % 60 == 0:
+ log.msg('%s %s has waited %d minutes for instance %s' %
+ (self.__class__.__name__, self.slavename, duration//60,
+ self.instance.id))
+ self.instance.update()
+ if self.instance.state == RUNNING:
+ self.output = self.instance.get_console_output()
+ minutes = duration//60
+ seconds = duration%60
+ log.msg('%s %s instance %s started on %s '
+ 'in about %d minutes %d seconds (%s)' %
+ (self.__class__.__name__, self.slavename,
+ self.instance.id, self.dns, minutes, seconds,
+ self.output.output))
+ if self.elastic_ip is not None:
+ self.instance.use_ip(self.elastic_ip)
+ return [self.instance.id,
+ image.id,
+ '%02d:%02d:%02d' % (minutes//60, minutes%60, seconds)]
+ else:
+ log.msg('%s %s failed to start instance %s (%s)' %
+ (self.__class__.__name__, self.slavename,
+ self.instance.id, self.instance.state))
+ raise interfaces.LatentBuildSlaveFailedToSubstantiate(
+ self.instance.id, self.instance.state)
+
+ def stop_instance(self, fast=False):
+ if self.instance is None:
+ # be gentle. Something may just be trying to alert us that an
+ # instance never attached, and it's because, somehow, we never
+ # started.
+ return defer.succeed(None)
+ instance = self.instance
+ self.output = self.instance = None
+ return threads.deferToThread(
+ self._stop_instance, instance, fast)
+
+ def _stop_instance(self, instance, fast):
+ if self.elastic_ip is not None:
+ self.conn.disassociate_address(self.elastic_ip.public_ip)
+ instance.update()
+ if instance.state not in (SHUTTINGDOWN, TERMINATED):
+ instance.stop()
+ log.msg('%s %s terminating instance %s' %
+ (self.__class__.__name__, self.slavename, instance.id))
+ duration = 0
+ interval = self._poll_resolution
+ if fast:
+ goal = (SHUTTINGDOWN, TERMINATED)
+ instance.update()
+ else:
+ goal = (TERMINATED,)
+ while instance.state not in goal:
+ time.sleep(interval)
+ duration += interval
+ if duration % 60 == 0:
+ log.msg(
+ '%s %s has waited %d minutes for instance %s to end' %
+ (self.__class__.__name__, self.slavename, duration//60,
+ instance.id))
+ instance.update()
+ log.msg('%s %s instance %s %s '
+ 'after about %d minutes %d seconds' %
+ (self.__class__.__name__, self.slavename,
+ instance.id, goal, duration//60, duration%60))
diff --git a/buildbot/buildbot/interfaces.py b/buildbot/buildbot/interfaces.py
new file mode 100644
index 0000000..e510d05
--- /dev/null
+++ b/buildbot/buildbot/interfaces.py
@@ -0,0 +1,1123 @@
+
+"""Interface documentation.
+
+Define the interfaces that are implemented by various buildbot classes.
+"""
+
+from zope.interface import Interface, Attribute
+
+# exceptions that can be raised while trying to start a build
+class NoSlaveError(Exception):
+ pass
+class BuilderInUseError(Exception):
+ pass
+class BuildSlaveTooOldError(Exception):
+ pass
+class LatentBuildSlaveFailedToSubstantiate(Exception):
+ pass
+
+# other exceptions
+class BuildbotNotRunningError(Exception):
+ pass
+
+class IChangeSource(Interface):
+ """Object which feeds Change objects to the changemaster. When files or
+ directories are changed and the version control system provides some
+ kind of notification, this object should turn it into a Change object
+ and pass it through::
+
+ self.changemaster.addChange(change)
+ """
+
+ def start():
+ """Called when the buildmaster starts. Can be used to establish
+ connections to VC daemons or begin polling."""
+
+ def stop():
+ """Called when the buildmaster shuts down. Connections should be
+ terminated, polling timers should be canceled."""
+
+ def describe():
+ """Should return a string which briefly describes this source. This
+ string will be displayed in an HTML status page."""
+
+class IScheduler(Interface):
+ """I watch for Changes in the source tree and decide when to trigger
+ Builds. I create BuildSet objects and submit them to the BuildMaster. I
+ am a service, and the BuildMaster is always my parent.
+
+ @ivar properties: properties to be applied to all builds started by this
+ scheduler
+ @type properties: L<buildbot.process.properties.Properties>
+ """
+
+ def addChange(change):
+ """A Change has just been dispatched by one of the ChangeSources.
+ Each Scheduler will receive this Change. I may decide to start a
+ build as a result, or I might choose to ignore it."""
+
+ def listBuilderNames():
+ """Return a list of strings indicating the Builders that this
+ Scheduler might feed."""
+
+ def getPendingBuildTimes():
+ """Return a list of timestamps for any builds that are waiting in the
+ tree-stable-timer queue. This is only relevant for Change-based
+ schedulers, all others can just return an empty list."""
+ # TODO: it might be nice to make this into getPendingBuildSets, which
+ # would let someone subscribe to the buildset being finished.
+ # However, the Scheduler doesn't actually create the buildset until
+ # it gets submitted, so doing this would require some major rework.
+
+class IUpstreamScheduler(Interface):
+ """This marks an IScheduler as being eligible for use as the 'upstream='
+ argument to a buildbot.scheduler.Dependent instance."""
+
+ def subscribeToSuccessfulBuilds(target):
+ """Request that the target callbable be invoked after every
+ successful buildset. The target will be called with a single
+ argument: the SourceStamp used by the successful builds."""
+
+ def listBuilderNames():
+ """Return a list of strings indicating the Builders that this
+ Scheduler might feed."""
+
+class IDownstreamScheduler(Interface):
+ """This marks an IScheduler to be listening to other schedulers.
+ On reconfigs, these might get notified to check if their upstream
+ scheduler are stil the same."""
+
+ def checkUpstreamScheduler():
+ """Check if the upstream scheduler is still alive, and if not,
+ get a new upstream object from the master."""
+
+
+class ISourceStamp(Interface):
+ """
+ @cvar branch: branch from which source was drawn
+ @type branch: string or None
+
+ @cvar revision: revision of the source, or None to use CHANGES
+ @type revision: varies depending on VC
+
+ @cvar patch: patch applied to the source, or None if no patch
+ @type patch: None or tuple (level diff)
+
+ @cvar changes: the source step should check out hte latest revision
+ in the given changes
+ @type changes: tuple of L{buildbot.changes.changes.Change} instances,
+ all of which are on the same branch
+ """
+
+ def canBeMergedWith(self, other):
+ """
+ Can this SourceStamp be merged with OTHER?
+ """
+
+ def mergeWith(self, others):
+ """Generate a SourceStamp for the merger of me and all the other
+ BuildRequests. This is called by a Build when it starts, to figure
+ out what its sourceStamp should be."""
+
+ def getAbsoluteSourceStamp(self, got_revision):
+ """Get a new SourceStamp object reflecting the actual revision found
+ by a Source step."""
+
+ def getText(self):
+ """Returns a list of strings to describe the stamp. These are
+ intended to be displayed in a narrow column. If more space is
+ available, the caller should join them together with spaces before
+ presenting them to the user."""
+
+class IEmailSender(Interface):
+ """I know how to send email, and can be used by other parts of the
+ Buildbot to contact developers."""
+ pass
+
+class IEmailLookup(Interface):
+ def getAddress(user):
+ """Turn a User-name string into a valid email address. Either return
+ a string (with an @ in it), None (to indicate that the user cannot
+ be reached by email), or a Deferred which will fire with the same."""
+
+class IStatus(Interface):
+ """I am an object, obtainable from the buildmaster, which can provide
+ status information."""
+
+ def getProjectName():
+ """Return the name of the project that this Buildbot is working
+ for."""
+ def getProjectURL():
+ """Return the URL of this Buildbot's project."""
+ def getBuildbotURL():
+ """Return the URL of the top-most Buildbot status page, or None if
+ this Buildbot does not provide a web status page."""
+ def getURLForThing(thing):
+ """Return the URL of a page which provides information on 'thing',
+ which should be an object that implements one of the status
+ interfaces defined in L{buildbot.interfaces}. Returns None if no
+ suitable page is available (or if no Waterfall is running)."""
+
+ def getChangeSources():
+ """Return a list of IChangeSource objects."""
+
+ def getChange(number):
+ """Return an IChange object."""
+
+ def getSchedulers():
+ """Return a list of ISchedulerStatus objects for all
+ currently-registered Schedulers."""
+
+ def getBuilderNames(categories=None):
+ """Return a list of the names of all current Builders."""
+ def getBuilder(name):
+ """Return the IBuilderStatus object for a given named Builder. Raises
+ KeyError if there is no Builder by that name."""
+
+ def getSlaveNames():
+ """Return a list of buildslave names, suitable for passing to
+ getSlave()."""
+ def getSlave(name):
+ """Return the ISlaveStatus object for a given named buildslave."""
+
+ def getBuildSets():
+ """Return a list of active (non-finished) IBuildSetStatus objects."""
+
+ def generateFinishedBuilds(builders=[], branches=[],
+ num_builds=None, finished_before=None,
+ max_search=200):
+ """Return a generator that will produce IBuildStatus objects each
+ time you invoke its .next() method, starting with the most recent
+ finished build and working backwards.
+
+ @param builders: this is a list of Builder names, and the generator
+ will only produce builds that ran on the given
+ Builders. If the list is empty, produce builds from
+ all Builders.
+
+ @param branches: this is a list of branch names, and the generator
+ will only produce builds that used the given
+ branches. If the list is empty, produce builds from
+ all branches.
+
+ @param num_builds: the generator will stop after providing this many
+ builds. The default of None means to produce as
+ many builds as possible.
+
+ @type finished_before: int: a timestamp, seconds since the epoch
+ @param finished_before: if provided, do not produce any builds that
+ finished after the given timestamp.
+
+ @type max_search: int
+ @param max_search: this method may have to examine a lot of builds
+ to find some that match the search parameters,
+ especially if there aren't any matching builds.
+ This argument imposes a hard limit on the number
+ of builds that will be examined within any given
+ Builder.
+ """
+
+ def subscribe(receiver):
+ """Register an IStatusReceiver to receive new status events. The
+ receiver will immediately be sent a set of 'builderAdded' messages
+ for all current builders. It will receive further 'builderAdded' and
+ 'builderRemoved' messages as the config file is reloaded and builders
+ come and go. It will also receive 'buildsetSubmitted' messages for
+ all outstanding BuildSets (and each new BuildSet that gets
+ submitted). No additional messages will be sent unless the receiver
+ asks for them by calling .subscribe on the IBuilderStatus objects
+ which accompany the addedBuilder message."""
+
+ def unsubscribe(receiver):
+ """Unregister an IStatusReceiver. No further status messgaes will be
+ delivered."""
+
+class IBuildSetStatus(Interface):
+ """I represent a set of Builds, each run on a separate Builder but all
+ using the same source tree."""
+
+ def getSourceStamp():
+ """Return a SourceStamp object which can be used to re-create
+ the source tree that this build used.
+
+ This method will return None if the source information is no longer
+ available."""
+ pass
+ def getReason():
+ pass
+ def getID():
+ """Return the BuildSet's ID string, if any. The 'try' feature uses a
+ random string as a BuildSetID to relate submitted jobs with the
+ resulting BuildSet."""
+ def getResponsibleUsers():
+ pass # not implemented
+ def getInterestedUsers():
+ pass # not implemented
+ def getBuilderNames():
+ """Return a list of the names of all Builders on which this set will
+ do builds."""
+ def getBuildRequests():
+ """Return a list of IBuildRequestStatus objects that represent my
+ component Builds. This list might correspond to the Builders named by
+ getBuilderNames(), but if builder categories are used, or 'Builder
+ Aliases' are implemented, then they may not."""
+ def isFinished():
+ pass
+ def waitUntilSuccess():
+ """Return a Deferred that fires (with this IBuildSetStatus object)
+ when the outcome of the BuildSet is known, i.e., upon the first
+ failure, or after all builds complete successfully."""
+ def waitUntilFinished():
+ """Return a Deferred that fires (with this IBuildSetStatus object)
+ when all builds have finished."""
+ def getResults():
+ pass
+
+class IBuildRequestStatus(Interface):
+ """I represent a request to build a particular set of source code on a
+ particular Builder. These requests may be merged by the time they are
+ finally turned into a Build."""
+
+ def getSourceStamp():
+ """Return a SourceStamp object which can be used to re-create
+ the source tree that this build used. This method will
+ return an absolute SourceStamp if possible, and its results
+ may change as the build progresses. Specifically, a "HEAD"
+ build may later be more accurately specified by an absolute
+ SourceStamp with the specific revision information.
+
+ This method will return None if the source information is no longer
+ available."""
+ pass
+ def getBuilderName():
+ pass
+ def getBuilds():
+ """Return a list of IBuildStatus objects for each Build that has been
+ started in an attempt to satify this BuildRequest."""
+
+ def subscribe(observer):
+ """Register a callable that will be invoked (with a single
+ IBuildStatus object) for each Build that is created to satisfy this
+ request. There may be multiple Builds created in an attempt to handle
+ the request: they may be interrupted by the user or abandoned due to
+ a lost slave. The last Build (the one which actually gets to run to
+ completion) is said to 'satisfy' the BuildRequest. The observer will
+ be called once for each of these Builds, both old and new."""
+ def unsubscribe(observer):
+ """Unregister the callable that was registered with subscribe()."""
+ def getSubmitTime():
+ """Return the time when this request was submitted"""
+ def setSubmitTime(t):
+ """Sets the time when this request was submitted"""
+
+
+class ISlaveStatus(Interface):
+ def getName():
+ """Return the name of the build slave."""
+
+ def getAdmin():
+ """Return a string with the slave admin's contact data."""
+
+ def getHost():
+ """Return a string with the slave host info."""
+
+ def isConnected():
+ """Return True if the slave is currently online, False if not."""
+
+ def lastMessageReceived():
+ """Return a timestamp (seconds since epoch) indicating when the most
+ recent message was received from the buildslave."""
+
+class ISchedulerStatus(Interface):
+ def getName():
+ """Return the name of this Scheduler (a string)."""
+
+ def getPendingBuildsets():
+ """Return an IBuildSet for all BuildSets that are pending. These
+ BuildSets are waiting for their tree-stable-timers to expire."""
+ # TODO: this is not implemented anywhere
+
+
+class IBuilderStatus(Interface):
+ def getName():
+ """Return the name of this Builder (a string)."""
+
+ def getState():
+ # TODO: this isn't nearly as meaningful as it used to be
+ """Return a tuple (state, builds) for this Builder. 'state' is the
+ so-called 'big-status', indicating overall status (as opposed to
+ which step is currently running). It is a string, one of 'offline',
+ 'idle', or 'building'. 'builds' is a list of IBuildStatus objects
+ (possibly empty) representing the currently active builds."""
+
+ def getSlaves():
+ """Return a list of ISlaveStatus objects for the buildslaves that are
+ used by this builder."""
+
+ def getPendingBuilds():
+ """Return an IBuildRequestStatus object for all upcoming builds
+ (those which are ready to go but which are waiting for a buildslave
+ to be available."""
+
+ def getCurrentBuilds():
+ """Return a list containing an IBuildStatus object for each build
+ currently in progress."""
+ # again, we could probably provide an object for 'waiting' and
+ # 'interlocked' too, but things like the Change list might still be
+ # subject to change
+
+ def getLastFinishedBuild():
+ """Return the IBuildStatus object representing the last finished
+ build, which may be None if the builder has not yet finished any
+ builds."""
+
+ def getBuild(number):
+ """Return an IBuildStatus object for a historical build. Each build
+ is numbered (starting at 0 when the Builder is first added),
+ getBuild(n) will retrieve the Nth such build. getBuild(-n) will
+ retrieve a recent build, with -1 being the most recent build
+ started. If the Builder is idle, this will be the same as
+ getLastFinishedBuild(). If the Builder is active, it will be an
+ unfinished build. This method will return None if the build is no
+ longer available. Older builds are likely to have less information
+ stored: Logs are the first to go, then Steps."""
+
+ def getEvent(number):
+ """Return an IStatusEvent object for a recent Event. Builders
+ connecting and disconnecting are events, as are ping attempts.
+ getEvent(-1) will return the most recent event. Events are numbered,
+ but it probably doesn't make sense to ever do getEvent(+n)."""
+
+ def generateFinishedBuilds(branches=[],
+ num_builds=None,
+ max_buildnum=None, finished_before=None,
+ max_search=200,
+ ):
+ """Return a generator that will produce IBuildStatus objects each
+ time you invoke its .next() method, starting with the most recent
+ finished build, then the previous build, and so on back to the oldest
+ build available.
+
+ @param branches: this is a list of branch names, and the generator
+ will only produce builds that involve the given
+ branches. If the list is empty, the generator will
+ produce all builds regardless of what branch they
+ used.
+
+ @param num_builds: if provided, the generator will stop after
+ providing this many builds. The default of None
+ means to produce as many builds as possible.
+
+ @param max_buildnum: if provided, the generator will start by
+ providing the build with this number, or the
+ highest-numbered preceding build (i.e. the
+ generator will not produce any build numbered
+ *higher* than max_buildnum). The default of None
+ means to start with the most recent finished
+ build. -1 means the same as None. -2 means to
+ start with the next-most-recent completed build,
+ etc.
+
+ @type finished_before: int: a timestamp, seconds since the epoch
+ @param finished_before: if provided, do not produce any builds that
+ finished after the given timestamp.
+
+ @type max_search: int
+ @param max_search: this method may have to examine a lot of builds
+ to find some that match the search parameters,
+ especially if there aren't any matching builds.
+ This argument imposes a hard limit on the number
+ of builds that will be examined.
+ """
+
+ def subscribe(receiver):
+ """Register an IStatusReceiver to receive new status events. The
+ receiver will be given builderChangedState, buildStarted, and
+ buildFinished messages."""
+
+ def unsubscribe(receiver):
+ """Unregister an IStatusReceiver. No further status messgaes will be
+ delivered."""
+
+class IEventSource(Interface):
+ def eventGenerator(branches=[]):
+ """This function creates a generator which will yield all of this
+ object's status events, starting with the most recent and progressing
+ backwards in time. These events provide the IStatusEvent interface.
+ At the moment they are all instances of buildbot.status.builder.Event
+ or buildbot.status.builder.BuildStepStatus .
+
+ @param branches: a list of branch names. The generator should only
+ return events that are associated with these branches. If the list is
+ empty, events for all branches should be returned (i.e. an empty list
+ means 'accept all' rather than 'accept none').
+ """
+
+class IBuildStatus(Interface):
+ """I represent the status of a single Build/BuildRequest. It could be
+ in-progress or finished."""
+
+ def getBuilder():
+ """
+ Return the BuilderStatus that owns this build.
+
+ @rtype: implementor of L{IBuilderStatus}
+ """
+
+ def isFinished():
+ """Return a boolean. True means the build has finished, False means
+ it is still running."""
+
+ def waitUntilFinished():
+ """Return a Deferred that will fire when the build finishes. If the
+ build has already finished, this deferred will fire right away. The
+ callback is given this IBuildStatus instance as an argument."""
+
+ def getProperty(propname):
+ """Return the value of the build property with the given name. Raises
+ KeyError if there is no such property on this build."""
+
+ def getReason():
+ """Return a string that indicates why the build was run. 'changes',
+ 'forced', and 'periodic' are the most likely values. 'try' will be
+ added in the future."""
+
+ def getSourceStamp():
+ """Return a SourceStamp object which can be used to re-create
+ the source tree that this build used.
+
+ This method will return None if the source information is no longer
+ available."""
+ # TODO: it should be possible to expire the patch but still remember
+ # that the build was r123+something.
+
+ def getChanges():
+ """Return a list of Change objects which represent which source
+ changes went into the build."""
+
+ def getResponsibleUsers():
+ """Return a list of Users who are to blame for the changes that went
+ into this build. If anything breaks (at least anything that wasn't
+ already broken), blame them. Specifically, this is the set of users
+ who were responsible for the Changes that went into this build. Each
+ User is a string, corresponding to their name as known by the VC
+ repository."""
+
+ def getInterestedUsers():
+ """Return a list of Users who will want to know about the results of
+ this build. This is a superset of getResponsibleUsers(): it adds
+ people who are interested in this build but who did not actually
+ make the Changes that went into it (build sheriffs, code-domain
+ owners)."""
+
+ def getNumber():
+ """Within each builder, each Build has a number. Return it."""
+
+ def getPreviousBuild():
+ """Convenience method. Returns None if the previous build is
+ unavailable."""
+
+ def getSteps():
+ """Return a list of IBuildStepStatus objects. For invariant builds
+ (those which always use the same set of Steps), this should always
+ return the complete list, however some of the steps may not have
+ started yet (step.getTimes()[0] will be None). For variant builds,
+ this may not be complete (asking again later may give you more of
+ them)."""
+
+ def getTimes():
+ """Returns a tuple of (start, end). 'start' and 'end' are the times
+ (seconds since the epoch) when the Build started and finished. If
+ the build is still running, 'end' will be None."""
+
+ # while the build is running, the following methods make sense.
+ # Afterwards they return None
+
+ def getETA():
+ """Returns the number of seconds from now in which the build is
+ expected to finish, or None if we can't make a guess. This guess will
+ be refined over time."""
+
+ def getCurrentStep():
+ """Return an IBuildStepStatus object representing the currently
+ active step."""
+
+ # Once you know the build has finished, the following methods are legal.
+ # Before ths build has finished, they all return None.
+
+ def getSlavename():
+ """Return the name of the buildslave which handled this build."""
+
+ def getText():
+ """Returns a list of strings to describe the build. These are
+ intended to be displayed in a narrow column. If more space is
+ available, the caller should join them together with spaces before
+ presenting them to the user."""
+
+ def getResults():
+ """Return a constant describing the results of the build: one of the
+ constants in buildbot.status.builder: SUCCESS, WARNINGS, or
+ FAILURE."""
+
+ def getLogs():
+ """Return a list of logs that describe the build as a whole. Some
+ steps will contribute their logs, while others are are less important
+ and will only be accessible through the IBuildStepStatus objects.
+ Each log is an object which implements the IStatusLog interface."""
+
+ def getTestResults():
+ """Return a dictionary that maps test-name tuples to ITestResult
+ objects. This may return an empty or partially-filled dictionary
+ until the build has completed."""
+
+ # subscription interface
+
+ def subscribe(receiver, updateInterval=None):
+ """Register an IStatusReceiver to receive new status events. The
+ receiver will be given stepStarted and stepFinished messages. If
+ 'updateInterval' is non-None, buildETAUpdate messages will be sent
+ every 'updateInterval' seconds."""
+
+ def unsubscribe(receiver):
+ """Unregister an IStatusReceiver. No further status messgaes will be
+ delivered."""
+
+class ITestResult(Interface):
+ """I describe the results of a single unit test."""
+
+ def getName():
+ """Returns a tuple of strings which make up the test name. Tests may
+ be arranged in a hierarchy, so looking for common prefixes may be
+ useful."""
+
+ def getResults():
+ """Returns a constant describing the results of the test: SUCCESS,
+ WARNINGS, FAILURE."""
+
+ def getText():
+ """Returns a list of short strings which describe the results of the
+ test in slightly more detail. Suggested components include
+ 'failure', 'error', 'passed', 'timeout'."""
+
+ def getLogs():
+ # in flux, it may be possible to provide more structured information
+ # like python Failure instances
+ """Returns a dictionary of test logs. The keys are strings like
+ 'stdout', 'log', 'exceptions'. The values are strings."""
+
+
+class IBuildStepStatus(Interface):
+ """I hold status for a single BuildStep."""
+
+ def getName():
+ """Returns a short string with the name of this step. This string
+ may have spaces in it."""
+
+ def getBuild():
+ """Returns the IBuildStatus object which contains this step."""
+
+ def getTimes():
+ """Returns a tuple of (start, end). 'start' and 'end' are the times
+ (seconds since the epoch) when the Step started and finished. If the
+ step has not yet started, 'start' will be None. If the step is still
+ running, 'end' will be None."""
+
+ def getExpectations():
+ """Returns a list of tuples (name, current, target). Each tuple
+ describes a single axis along which the step's progress can be
+ measured. 'name' is a string which describes the axis itself, like
+ 'filesCompiled' or 'tests run' or 'bytes of output'. 'current' is a
+ number with the progress made so far, while 'target' is the value
+ that we expect (based upon past experience) to get to when the build
+ is finished.
+
+ 'current' will change over time until the step is finished. It is
+ 'None' until the step starts. When the build is finished, 'current'
+ may or may not equal 'target' (which is merely the expectation based
+ upon previous builds)."""
+
+ def getURLs():
+ """Returns a dictionary of URLs. Each key is a link name (a short
+ string, like 'results' or 'coverage'), and each value is a URL. These
+ links will be displayed along with the LogFiles.
+ """
+
+ def getLogs():
+ """Returns a list of IStatusLog objects. If the step has not yet
+ finished, this list may be incomplete (asking again later may give
+ you more of them)."""
+
+
+ def isFinished():
+ """Return a boolean. True means the step has finished, False means it
+ is still running."""
+
+ def waitUntilFinished():
+ """Return a Deferred that will fire when the step finishes. If the
+ step has already finished, this deferred will fire right away. The
+ callback is given this IBuildStepStatus instance as an argument."""
+
+ # while the step is running, the following methods make sense.
+ # Afterwards they return None
+
+ def getETA():
+ """Returns the number of seconds from now in which the step is
+ expected to finish, or None if we can't make a guess. This guess will
+ be refined over time."""
+
+ # Once you know the step has finished, the following methods are legal.
+ # Before ths step has finished, they all return None.
+
+ def getText():
+ """Returns a list of strings which describe the step. These are
+ intended to be displayed in a narrow column. If more space is
+ available, the caller should join them together with spaces before
+ presenting them to the user."""
+
+ def getResults():
+ """Return a tuple describing the results of the step: (result,
+ strings). 'result' is one of the constants in
+ buildbot.status.builder: SUCCESS, WARNINGS, FAILURE, or SKIPPED.
+ 'strings' is an optional list of strings that the step wants to
+ append to the overall build's results. These strings are usually
+ more terse than the ones returned by getText(): in particular,
+ successful Steps do not usually contribute any text to the overall
+ build."""
+
+ # subscription interface
+
+ def subscribe(receiver, updateInterval=10):
+ """Register an IStatusReceiver to receive new status events. The
+ receiver will be given logStarted and logFinished messages. It will
+ also be given a ETAUpdate message every 'updateInterval' seconds."""
+
+ def unsubscribe(receiver):
+ """Unregister an IStatusReceiver. No further status messgaes will be
+ delivered."""
+
+class IStatusEvent(Interface):
+ """I represent a Builder Event, something non-Build related that can
+ happen to a Builder."""
+
+ def getTimes():
+ """Returns a tuple of (start, end) like IBuildStepStatus, but end==0
+ indicates that this is a 'point event', which has no duration.
+ SlaveConnect/Disconnect are point events. Ping is not: it starts
+ when requested and ends when the response (positive or negative) is
+ returned"""
+
+ def getText():
+ """Returns a list of strings which describe the event. These are
+ intended to be displayed in a narrow column. If more space is
+ available, the caller should join them together with spaces before
+ presenting them to the user."""
+
+
+LOG_CHANNEL_STDOUT = 0
+LOG_CHANNEL_STDERR = 1
+LOG_CHANNEL_HEADER = 2
+
+class IStatusLog(Interface):
+ """I represent a single Log, which is a growing list of text items that
+ contains some kind of output for a single BuildStep. I might be finished,
+ in which case this list has stopped growing.
+
+ Each Log has a name, usually something boring like 'log' or 'output'.
+ These names are not guaranteed to be unique, however they are usually
+ chosen to be useful within the scope of a single step (i.e. the Compile
+ step might produce both 'log' and 'warnings'). The name may also have
+ spaces. If you want something more globally meaningful, at least within a
+ given Build, try::
+
+ '%s.%s' % (log.getStep.getName(), log.getName())
+
+ The Log can be presented as plain text, or it can be accessed as a list
+ of items, each of which has a channel indicator (header, stdout, stderr)
+ and a text chunk. An HTML display might represent the interleaved
+ channels with different styles, while a straight download-the-text
+ interface would just want to retrieve a big string.
+
+ The 'header' channel is used by ShellCommands to prepend a note about
+ which command is about to be run ('running command FOO in directory
+ DIR'), and append another note giving the exit code of the process.
+
+ Logs can be streaming: if the Log has not yet finished, you can
+ subscribe to receive new chunks as they are added.
+
+ A ShellCommand will have a Log associated with it that gathers stdout
+ and stderr. Logs may also be created by parsing command output or
+ through other synthetic means (grepping for all the warnings in a
+ compile log, or listing all the test cases that are going to be run).
+ Such synthetic Logs are usually finished as soon as they are created."""
+
+
+ def getName():
+ """Returns a short string with the name of this log, probably 'log'.
+ """
+
+ def getStep():
+ """Returns the IBuildStepStatus which owns this log."""
+ # TODO: can there be non-Step logs?
+
+ def isFinished():
+ """Return a boolean. True means the log has finished and is closed,
+ False means it is still open and new chunks may be added to it."""
+
+ def waitUntilFinished():
+ """Return a Deferred that will fire when the log is closed. If the
+ log has already finished, this deferred will fire right away. The
+ callback is given this IStatusLog instance as an argument."""
+
+ def subscribe(receiver, catchup):
+ """Register an IStatusReceiver to receive chunks (with logChunk) as
+ data is added to the Log. If you use this, you will also want to use
+ waitUntilFinished to find out when the listener can be retired.
+ Subscribing to a closed Log is a no-op.
+
+ If 'catchup' is True, the receiver will immediately be sent a series
+ of logChunk messages to bring it up to date with the partially-filled
+ log. This allows a status client to join a Log already in progress
+ without missing any data. If the Log has already finished, it is too
+ late to catch up: just do getText() instead.
+
+ If the Log is very large, the receiver will be called many times with
+ a lot of data. There is no way to throttle this data. If the receiver
+ is planning on sending the data on to somewhere else, over a narrow
+ connection, you can get a throttleable subscription by using
+ C{subscribeConsumer} instead."""
+
+ def unsubscribe(receiver):
+ """Remove a receiver previously registered with subscribe(). Attempts
+ to remove a receiver which was not previously registered is a no-op.
+ """
+
+ def subscribeConsumer(consumer):
+ """Register an L{IStatusLogConsumer} to receive all chunks of the
+ logfile, including all the old entries and any that will arrive in
+ the future. The consumer will first have their C{registerProducer}
+ method invoked with a reference to an object that can be told
+ C{pauseProducing}, C{resumeProducing}, and C{stopProducing}. Then the
+ consumer's C{writeChunk} method will be called repeatedly with each
+ (channel, text) tuple in the log, starting with the very first. The
+ consumer will be notified with C{finish} when the log has been
+ exhausted (which can only happen when the log is finished). Note that
+ a small amount of data could be written via C{writeChunk} even after
+ C{pauseProducing} has been called.
+
+ To unsubscribe the consumer, use C{producer.stopProducing}."""
+
+ # once the log has finished, the following methods make sense. They can
+ # be called earlier, but they will only return the contents of the log up
+ # to the point at which they were called. You will lose items that are
+ # added later. Use C{subscribe} or C{subscribeConsumer} to avoid missing
+ # anything.
+
+ def hasContents():
+ """Returns True if the LogFile still has contents available. Returns
+ False for logs that have been pruned. Clients should test this before
+ offering to show the contents of any log."""
+
+ def getText():
+ """Return one big string with the contents of the Log. This merges
+ all non-header chunks together."""
+
+ def readlines(channel=LOG_CHANNEL_STDOUT):
+ """Read lines from one channel of the logfile. This returns an
+ iterator that will provide single lines of text (including the
+ trailing newline).
+ """
+
+ def getTextWithHeaders():
+ """Return one big string with the contents of the Log. This merges
+ all chunks (including headers) together."""
+
+ def getChunks():
+ """Generate a list of (channel, text) tuples. 'channel' is a number,
+ 0 for stdout, 1 for stderr, 2 for header. (note that stderr is merged
+ into stdout if PTYs are in use)."""
+
+class IStatusLogConsumer(Interface):
+ """I am an object which can be passed to IStatusLog.subscribeConsumer().
+ I represent a target for writing the contents of an IStatusLog. This
+ differs from a regular IStatusReceiver in that it can pause the producer.
+ This makes it more suitable for use in streaming data over network
+ sockets, such as an HTTP request. Note that the consumer can only pause
+ the producer until it has caught up with all the old data. After that
+ point, C{pauseProducing} is ignored and all new output from the log is
+ sent directoy to the consumer."""
+
+ def registerProducer(producer, streaming):
+ """A producer is being hooked up to this consumer. The consumer only
+ has to handle a single producer. It should send .pauseProducing and
+ .resumeProducing messages to the producer when it wants to stop or
+ resume the flow of data. 'streaming' will be set to True because the
+ producer is always a PushProducer.
+ """
+
+ def unregisterProducer():
+ """The previously-registered producer has been removed. No further
+ pauseProducing or resumeProducing calls should be made. The consumer
+ should delete its reference to the Producer so it can be released."""
+
+ def writeChunk(chunk):
+ """A chunk (i.e. a tuple of (channel, text)) is being written to the
+ consumer."""
+
+ def finish():
+ """The log has finished sending chunks to the consumer."""
+
+class IStatusReceiver(Interface):
+ """I am an object which can receive build status updates. I may be
+ subscribed to an IStatus, an IBuilderStatus, or an IBuildStatus."""
+
+ def buildsetSubmitted(buildset):
+ """A new BuildSet has been submitted to the buildmaster.
+
+ @type buildset: implementor of L{IBuildSetStatus}
+ """
+
+ def requestSubmitted(request):
+ """A new BuildRequest has been submitted to the buildmaster.
+
+ @type request: implementor of L{IBuildRequestStatus}
+ """
+
+ def builderAdded(builderName, builder):
+ """
+ A new Builder has just been added. This method may return an
+ IStatusReceiver (probably 'self') which will be subscribed to receive
+ builderChangedState and buildStarted/Finished events.
+
+ @type builderName: string
+ @type builder: L{buildbot.status.builder.BuilderStatus}
+ @rtype: implementor of L{IStatusReceiver}
+ """
+
+ def builderChangedState(builderName, state):
+ """Builder 'builderName' has changed state. The possible values for
+ 'state' are 'offline', 'idle', and 'building'."""
+
+ def buildStarted(builderName, build):
+ """Builder 'builderName' has just started a build. The build is an
+ object which implements IBuildStatus, and can be queried for more
+ information.
+
+ This method may return an IStatusReceiver (it could even return
+ 'self'). If it does so, stepStarted and stepFinished methods will be
+ invoked on the object for the steps of this one build. This is a
+ convenient way to subscribe to all build steps without missing any.
+ This receiver will automatically be unsubscribed when the build
+ finishes.
+
+ It can also return a tuple of (IStatusReceiver, interval), in which
+ case buildETAUpdate messages are sent ever 'interval' seconds, in
+ addition to the stepStarted and stepFinished messages."""
+
+ def buildETAUpdate(build, ETA):
+ """This is a periodic update on the progress this Build has made
+ towards completion."""
+
+ def stepStarted(build, step):
+ """A step has just started. 'step' is the IBuildStepStatus which
+ represents the step: it can be queried for more information.
+
+ This method may return an IStatusReceiver (it could even return
+ 'self'). If it does so, logStarted and logFinished methods will be
+ invoked on the object for logs created by this one step. This
+ receiver will be automatically unsubscribed when the step finishes.
+
+ Alternatively, the method may return a tuple of an IStatusReceiver
+ and an integer named 'updateInterval'. In addition to
+ logStarted/logFinished messages, it will also receive stepETAUpdate
+ messages about every updateInterval seconds."""
+
+ def stepTextChanged(build, step, text):
+ """The text for a step has been updated.
+
+ This is called when calling setText() on the step status, and
+ hands in the text list."""
+
+ def stepText2Changed(build, step, text2):
+ """The text2 for a step has been updated.
+
+ This is called when calling setText2() on the step status, and
+ hands in text2 list."""
+
+ def stepETAUpdate(build, step, ETA, expectations):
+ """This is a periodic update on the progress this Step has made
+ towards completion. It gets an ETA (in seconds from the present) of
+ when the step ought to be complete, and a list of expectation tuples
+ (as returned by IBuildStepStatus.getExpectations) with more detailed
+ information."""
+
+ def logStarted(build, step, log):
+ """A new Log has been started, probably because a step has just
+ started running a shell command. 'log' is the IStatusLog object
+ which can be queried for more information.
+
+ This method may return an IStatusReceiver (such as 'self'), in which
+ case the target's logChunk method will be invoked as text is added to
+ the logfile. This receiver will automatically be unsubsribed when the
+ log finishes."""
+
+ def logChunk(build, step, log, channel, text):
+ """Some text has been added to this log. 'channel' is one of
+ LOG_CHANNEL_STDOUT, LOG_CHANNEL_STDERR, or LOG_CHANNEL_HEADER, as
+ defined in IStatusLog.getChunks."""
+
+ def logFinished(build, step, log):
+ """A Log has been closed."""
+
+ def stepFinished(build, step, results):
+ """A step has just finished. 'results' is the result tuple described
+ in IBuildStepStatus.getResults."""
+
+ def buildFinished(builderName, build, results):
+ """
+ A build has just finished. 'results' is the result tuple described
+ in L{IBuildStatus.getResults}.
+
+ @type builderName: string
+ @type build: L{buildbot.status.builder.BuildStatus}
+ @type results: tuple
+ """
+
+ def builderRemoved(builderName):
+ """The Builder has been removed."""
+
+class IControl(Interface):
+ def addChange(change):
+ """Add a change to all builders. Each Builder will decide for
+ themselves whether the change is interesting or not, and may initiate
+ a build as a result."""
+
+ def submitBuildSet(buildset):
+ """Submit a BuildSet object, which will eventually be run on all of
+ the builders listed therein."""
+
+ def getBuilder(name):
+ """Retrieve the IBuilderControl object for the given Builder."""
+
+class IBuilderControl(Interface):
+ def requestBuild(request):
+ """Queue a L{buildbot.process.base.BuildRequest} object for later
+ building."""
+
+ def requestBuildSoon(request):
+ """Submit a BuildRequest like requestBuild, but raise a
+ L{buildbot.interfaces.NoSlaveError} if no slaves are currently
+ available, so it cannot be used to queue a BuildRequest in the hopes
+ that a slave will eventually connect. This method is appropriate for
+ use by things like the web-page 'Force Build' button."""
+
+ def resubmitBuild(buildStatus, reason="<rebuild, no reason given>"):
+ """Rebuild something we've already built before. This submits a
+ BuildRequest to our Builder using the same SourceStamp as the earlier
+ build. This has no effect (but may eventually raise an exception) if
+ this Build has not yet finished."""
+
+ def getPendingBuilds():
+ """Return a list of L{IBuildRequestControl} objects for this Builder.
+ Each one corresponds to a pending build that has not yet started (due
+ to a scarcity of build slaves). These upcoming builds can be canceled
+ through the control object."""
+
+ def getBuild(number):
+ """Attempt to return an IBuildControl object for the given build.
+ Returns None if no such object is available. This will only work for
+ the build that is currently in progress: once the build finishes,
+ there is nothing to control anymore."""
+
+ def ping(timeout=30):
+ """Attempt to contact the slave and see if it is still alive. This
+ returns a Deferred which fires with either True (the slave is still
+ alive) or False (the slave did not respond). As a side effect, adds
+ an event to this builder's column in the waterfall display
+ containing the results of the ping."""
+ # TODO: this ought to live in ISlaveControl, maybe with disconnect()
+ # or something. However the event that is emitted is most useful in
+ # the Builder column, so it kinda fits here too.
+
+class IBuildRequestControl(Interface):
+ def subscribe(observer):
+ """Register a callable that will be invoked (with a single
+ IBuildControl object) for each Build that is created to satisfy this
+ request. There may be multiple Builds created in an attempt to handle
+ the request: they may be interrupted by the user or abandoned due to
+ a lost slave. The last Build (the one which actually gets to run to
+ completion) is said to 'satisfy' the BuildRequest. The observer will
+ be called once for each of these Builds, both old and new."""
+ def unsubscribe(observer):
+ """Unregister the callable that was registered with subscribe()."""
+ def cancel():
+ """Remove the build from the pending queue. Has no effect if the
+ build has already been started."""
+
+class IBuildControl(Interface):
+ def getStatus():
+ """Return an IBuildStatus object for the Build that I control."""
+ def stopBuild(reason="<no reason given>"):
+ """Halt the build. This has no effect if the build has already
+ finished."""
+
+class ILogFile(Interface):
+ """This is the internal interface to a LogFile, used by the BuildStep to
+ write data into the log.
+ """
+ def addStdout(data):
+ pass
+ def addStderr(data):
+ pass
+ def addHeader(data):
+ pass
+ def finish():
+ """The process that is feeding the log file has finished, and no
+ further data will be added. This closes the logfile."""
+
+class ILogObserver(Interface):
+ """Objects which provide this interface can be used in a BuildStep to
+ watch the output of a LogFile and parse it incrementally.
+ """
+
+ # internal methods
+ def setStep(step):
+ pass
+ def setLog(log):
+ pass
+
+ # methods called by the LogFile
+ def logChunk(build, step, log, channel, text):
+ pass
+
+class IBuildSlave(Interface):
+ # this is a marker interface for the BuildSlave class
+ pass
+
+class ILatentBuildSlave(IBuildSlave):
+ """A build slave that is not always running, but can run when requested.
+ """
+ substantiated = Attribute('Substantiated',
+ 'Whether the latent build slave is currently '
+ 'substantiated with a real instance.')
+
+ def substantiate():
+ """Request that the slave substantiate with a real instance.
+
+ Returns a deferred that will callback when a real instance has
+ attached."""
+
+ # there is an insubstantiate too, but that is not used externally ATM.
+
+ def buildStarted(sb):
+ """Inform the latent build slave that a build has started.
+
+ ``sb`` is a LatentSlaveBuilder as defined in buildslave.py. The sb
+ is the one for whom the build started.
+ """
+
+ def buildFinished(sb):
+ """Inform the latent build slave that a build has finished.
+
+ ``sb`` is a LatentSlaveBuilder as defined in buildslave.py. The sb
+ is the one for whom the build finished.
+ """
diff --git a/buildbot/buildbot/locks.py b/buildbot/buildbot/locks.py
new file mode 100644
index 0000000..6599d1d
--- /dev/null
+++ b/buildbot/buildbot/locks.py
@@ -0,0 +1,247 @@
+# -*- test-case-name: buildbot.test.test_locks -*-
+
+from twisted.python import log
+from twisted.internet import reactor, defer
+from buildbot import util
+
+if False: # for debugging
+ debuglog = log.msg
+else:
+ debuglog = lambda m: None
+
+class BaseLock:
+ """
+ Class handling claiming and releasing of L{self}, and keeping track of
+ current and waiting owners.
+
+ @note: Ideally, we'd like to maintain FIFO order. The place to do that
+ would be the L{isAvailable()} function. However, this function is
+ called by builds/steps both for the first time, and after waking
+ them up by L{self} from the L{self.waiting} queue. There is
+ currently no way of distinguishing between them.
+ """
+ description = "<BaseLock>"
+
+ def __init__(self, name, maxCount=1):
+ self.name = name # Name of the lock
+ self.waiting = [] # Current queue, tuples (LockAccess, deferred)
+ self.owners = [] # Current owners, tuples (owner, LockAccess)
+ self.maxCount=maxCount # maximal number of counting owners
+
+ def __repr__(self):
+ return self.description
+
+ def _getOwnersCount(self):
+ """ Return the number of current exclusive and counting owners.
+
+ @return: Tuple (number exclusive owners, number counting owners)
+ """
+ num_excl, num_counting = 0, 0
+ for owner in self.owners:
+ if owner[1].mode == 'exclusive':
+ num_excl = num_excl + 1
+ else: # mode == 'counting'
+ num_counting = num_counting + 1
+
+ assert (num_excl == 1 and num_counting == 0) \
+ or (num_excl == 0 and num_counting <= self.maxCount)
+ return num_excl, num_counting
+
+
+ def isAvailable(self, access):
+ """ Return a boolean whether the lock is available for claiming """
+ debuglog("%s isAvailable(%s): self.owners=%r"
+ % (self, access, self.owners))
+ num_excl, num_counting = self._getOwnersCount()
+ if access.mode == 'counting':
+ # Wants counting access
+ return num_excl == 0 and num_counting < self.maxCount
+ else:
+ # Wants exclusive access
+ return num_excl == 0 and num_counting == 0
+
+ def claim(self, owner, access):
+ """ Claim the lock (lock must be available) """
+ debuglog("%s claim(%s, %s)" % (self, owner, access.mode))
+ assert owner is not None
+ assert self.isAvailable(access), "ask for isAvailable() first"
+
+ assert isinstance(access, LockAccess)
+ assert access.mode in ['counting', 'exclusive']
+ self.owners.append((owner, access))
+ debuglog(" %s is claimed '%s'" % (self, access.mode))
+
+ def release(self, owner, access):
+ """ Release the lock """
+ assert isinstance(access, LockAccess)
+
+ debuglog("%s release(%s, %s)" % (self, owner, access.mode))
+ entry = (owner, access)
+ assert entry in self.owners
+ self.owners.remove(entry)
+ # who can we wake up?
+ # After an exclusive access, we may need to wake up several waiting.
+ # Break out of the loop when the first waiting client should not be awakened.
+ num_excl, num_counting = self._getOwnersCount()
+ while len(self.waiting) > 0:
+ access, d = self.waiting[0]
+ if access.mode == 'counting':
+ if num_excl > 0 or num_counting == self.maxCount:
+ break
+ else:
+ num_counting = num_counting + 1
+ else:
+ # access.mode == 'exclusive'
+ if num_excl > 0 or num_counting > 0:
+ break
+ else:
+ num_excl = num_excl + 1
+
+ del self.waiting[0]
+ reactor.callLater(0, d.callback, self)
+
+ def waitUntilMaybeAvailable(self, owner, access):
+ """Fire when the lock *might* be available. The caller will need to
+ check with isAvailable() when the deferred fires. This loose form is
+ used to avoid deadlocks. If we were interested in a stronger form,
+ this would be named 'waitUntilAvailable', and the deferred would fire
+ after the lock had been claimed.
+ """
+ debuglog("%s waitUntilAvailable(%s)" % (self, owner))
+ assert isinstance(access, LockAccess)
+ if self.isAvailable(access):
+ return defer.succeed(self)
+ d = defer.Deferred()
+ self.waiting.append((access, d))
+ return d
+
+
+class RealMasterLock(BaseLock):
+ def __init__(self, lockid):
+ BaseLock.__init__(self, lockid.name, lockid.maxCount)
+ self.description = "<MasterLock(%s, %s)>" % (self.name, self.maxCount)
+
+ def getLock(self, slave):
+ return self
+
+class RealSlaveLock:
+ def __init__(self, lockid):
+ self.name = lockid.name
+ self.maxCount = lockid.maxCount
+ self.maxCountForSlave = lockid.maxCountForSlave
+ self.description = "<SlaveLock(%s, %s, %s)>" % (self.name,
+ self.maxCount,
+ self.maxCountForSlave)
+ self.locks = {}
+
+ def __repr__(self):
+ return self.description
+
+ def getLock(self, slavebuilder):
+ slavename = slavebuilder.slave.slavename
+ if not self.locks.has_key(slavename):
+ maxCount = self.maxCountForSlave.get(slavename,
+ self.maxCount)
+ lock = self.locks[slavename] = BaseLock(self.name, maxCount)
+ desc = "<SlaveLock(%s, %s)[%s] %d>" % (self.name, maxCount,
+ slavename, id(lock))
+ lock.description = desc
+ self.locks[slavename] = lock
+ return self.locks[slavename]
+
+
+class LockAccess:
+ """ I am an object representing a way to access a lock.
+
+ @param lockid: LockId instance that should be accessed.
+ @type lockid: A MasterLock or SlaveLock instance.
+
+ @param mode: Mode of accessing the lock.
+ @type mode: A string, either 'counting' or 'exclusive'.
+ """
+ def __init__(self, lockid, mode):
+ self.lockid = lockid
+ self.mode = mode
+
+ assert isinstance(lockid, (MasterLock, SlaveLock))
+ assert mode in ['counting', 'exclusive']
+
+
+class BaseLockId(util.ComparableMixin):
+ """ Abstract base class for LockId classes.
+
+ Sets up the 'access()' function for the LockId's available to the user
+ (MasterLock and SlaveLock classes).
+ Derived classes should add
+ - Comparison with the L{util.ComparableMixin} via the L{compare_attrs}
+ class variable.
+ - Link to the actual lock class should be added with the L{lockClass}
+ class variable.
+ """
+ def access(self, mode):
+ """ Express how the lock should be accessed """
+ assert mode in ['counting', 'exclusive']
+ return LockAccess(self, mode)
+
+ def defaultAccess(self):
+ """ For buildbot 0.7.7 compability: When user doesn't specify an access
+ mode, this one is chosen.
+ """
+ return self.access('counting')
+
+
+
+# master.cfg should only reference the following MasterLock and SlaveLock
+# classes. They are identifiers that will be turned into real Locks later,
+# via the BotMaster.getLockByID method.
+
+class MasterLock(BaseLockId):
+ """I am a semaphore that limits the number of simultaneous actions.
+
+ Builds and BuildSteps can declare that they wish to claim me as they run.
+ Only a limited number of such builds or steps will be able to run
+ simultaneously. By default this number is one, but my maxCount parameter
+ can be raised to allow two or three or more operations to happen at the
+ same time.
+
+ Use this to protect a resource that is shared among all builders and all
+ slaves, for example to limit the load on a common SVN repository.
+ """
+
+ compare_attrs = ['name', 'maxCount']
+ lockClass = RealMasterLock
+ def __init__(self, name, maxCount=1):
+ self.name = name
+ self.maxCount = maxCount
+
+class SlaveLock(BaseLockId):
+ """I am a semaphore that limits simultaneous actions on each buildslave.
+
+ Builds and BuildSteps can declare that they wish to claim me as they run.
+ Only a limited number of such builds or steps will be able to run
+ simultaneously on any given buildslave. By default this number is one,
+ but my maxCount parameter can be raised to allow two or three or more
+ operations to happen on a single buildslave at the same time.
+
+ Use this to protect a resource that is shared among all the builds taking
+ place on each slave, for example to limit CPU or memory load on an
+ underpowered machine.
+
+ Each buildslave will get an independent copy of this semaphore. By
+ default each copy will use the same owner count (set with maxCount), but
+ you can provide maxCountForSlave with a dictionary that maps slavename to
+ owner count, to allow some slaves more parallelism than others.
+
+ """
+
+ compare_attrs = ['name', 'maxCount', '_maxCountForSlaveList']
+ lockClass = RealSlaveLock
+ def __init__(self, name, maxCount=1, maxCountForSlave={}):
+ self.name = name
+ self.maxCount = maxCount
+ self.maxCountForSlave = maxCountForSlave
+ # for comparison purposes, turn this dictionary into a stably-sorted
+ # list of tuples
+ self._maxCountForSlaveList = self.maxCountForSlave.items()
+ self._maxCountForSlaveList.sort()
+ self._maxCountForSlaveList = tuple(self._maxCountForSlaveList)
diff --git a/buildbot/buildbot/manhole.py b/buildbot/buildbot/manhole.py
new file mode 100644
index 0000000..e5479b3
--- /dev/null
+++ b/buildbot/buildbot/manhole.py
@@ -0,0 +1,265 @@
+
+import os.path
+import binascii, base64
+from twisted.python import log
+from twisted.application import service, strports
+from twisted.cred import checkers, portal
+from twisted.conch import manhole, telnet, manhole_ssh, checkers as conchc
+from twisted.conch.insults import insults
+from twisted.internet import protocol
+
+from buildbot.util import ComparableMixin
+from zope.interface import implements # requires Twisted-2.0 or later
+
+# makeTelnetProtocol and _TelnetRealm are for the TelnetManhole
+
+class makeTelnetProtocol:
+ # this curries the 'portal' argument into a later call to
+ # TelnetTransport()
+ def __init__(self, portal):
+ self.portal = portal
+
+ def __call__(self):
+ auth = telnet.AuthenticatingTelnetProtocol
+ return telnet.TelnetTransport(auth, self.portal)
+
+class _TelnetRealm:
+ implements(portal.IRealm)
+
+ def __init__(self, namespace_maker):
+ self.namespace_maker = namespace_maker
+
+ def requestAvatar(self, avatarId, *interfaces):
+ if telnet.ITelnetProtocol in interfaces:
+ namespace = self.namespace_maker()
+ p = telnet.TelnetBootstrapProtocol(insults.ServerProtocol,
+ manhole.ColoredManhole,
+ namespace)
+ return (telnet.ITelnetProtocol, p, lambda: None)
+ raise NotImplementedError()
+
+
+class chainedProtocolFactory:
+ # this curries the 'namespace' argument into a later call to
+ # chainedProtocolFactory()
+ def __init__(self, namespace):
+ self.namespace = namespace
+
+ def __call__(self):
+ return insults.ServerProtocol(manhole.ColoredManhole, self.namespace)
+
+class AuthorizedKeysChecker(conchc.SSHPublicKeyDatabase):
+ """Accept connections using SSH keys from a given file.
+
+ SSHPublicKeyDatabase takes the username that the prospective client has
+ requested and attempts to get a ~/.ssh/authorized_keys file for that
+ username. This requires root access, so it isn't as useful as you'd
+ like.
+
+ Instead, this subclass looks for keys in a single file, given as an
+ argument. This file is typically kept in the buildmaster's basedir. The
+ file should have 'ssh-dss ....' lines in it, just like authorized_keys.
+ """
+
+ def __init__(self, authorized_keys_file):
+ self.authorized_keys_file = os.path.expanduser(authorized_keys_file)
+
+ def checkKey(self, credentials):
+ f = open(self.authorized_keys_file)
+ for l in f.readlines():
+ l2 = l.split()
+ if len(l2) < 2:
+ continue
+ try:
+ if base64.decodestring(l2[1]) == credentials.blob:
+ return 1
+ except binascii.Error:
+ continue
+ return 0
+
+
+class _BaseManhole(service.MultiService):
+ """This provides remote access to a python interpreter (a read/exec/print
+ loop) embedded in the buildmaster via an internal SSH server. This allows
+ detailed inspection of the buildmaster state. It is of most use to
+ buildbot developers. Connect to this by running an ssh client.
+ """
+
+ def __init__(self, port, checker, using_ssh=True):
+ """
+ @type port: string or int
+ @param port: what port should the Manhole listen on? This is a
+ strports specification string, like 'tcp:12345' or
+ 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
+ simple tcp port.
+
+ @type checker: an object providing the
+ L{twisted.cred.checkers.ICredentialsChecker} interface
+ @param checker: if provided, this checker is used to authenticate the
+ client instead of using the username/password scheme. You must either
+ provide a username/password or a Checker. Some useful values are::
+ import twisted.cred.checkers as credc
+ import twisted.conch.checkers as conchc
+ c = credc.AllowAnonymousAccess # completely open
+ c = credc.FilePasswordDB(passwd_filename) # file of name:passwd
+ c = conchc.UNIXPasswordDatabase # getpwnam() (probably /etc/passwd)
+
+ @type using_ssh: bool
+ @param using_ssh: If True, accept SSH connections. If False, accept
+ regular unencrypted telnet connections.
+ """
+
+ # unfortunately, these don't work unless we're running as root
+ #c = credc.PluggableAuthenticationModulesChecker: PAM
+ #c = conchc.SSHPublicKeyDatabase() # ~/.ssh/authorized_keys
+ # and I can't get UNIXPasswordDatabase to work
+
+ service.MultiService.__init__(self)
+ if type(port) is int:
+ port = "tcp:%d" % port
+ self.port = port # for comparison later
+ self.checker = checker # to maybe compare later
+
+ def makeNamespace():
+ # close over 'self' so we can get access to .parent later
+ master = self.parent
+ namespace = {
+ 'master': master,
+ 'status': master.getStatus(),
+ }
+ return namespace
+
+ def makeProtocol():
+ namespace = makeNamespace()
+ p = insults.ServerProtocol(manhole.ColoredManhole, namespace)
+ return p
+
+ self.using_ssh = using_ssh
+ if using_ssh:
+ r = manhole_ssh.TerminalRealm()
+ r.chainedProtocolFactory = makeProtocol
+ p = portal.Portal(r, [self.checker])
+ f = manhole_ssh.ConchFactory(p)
+ else:
+ r = _TelnetRealm(makeNamespace)
+ p = portal.Portal(r, [self.checker])
+ f = protocol.ServerFactory()
+ f.protocol = makeTelnetProtocol(p)
+ s = strports.service(self.port, f)
+ s.setServiceParent(self)
+
+
+ def startService(self):
+ service.MultiService.startService(self)
+ if self.using_ssh:
+ via = "via SSH"
+ else:
+ via = "via telnet"
+ log.msg("Manhole listening %s on port %s" % (via, self.port))
+
+
+class TelnetManhole(_BaseManhole, ComparableMixin):
+ """This Manhole accepts unencrypted (telnet) connections, and requires a
+ username and password authorize access. You are encouraged to use the
+ encrypted ssh-based manhole classes instead."""
+
+ compare_attrs = ["port", "username", "password"]
+
+ def __init__(self, port, username, password):
+ """
+ @type port: string or int
+ @param port: what port should the Manhole listen on? This is a
+ strports specification string, like 'tcp:12345' or
+ 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
+ simple tcp port.
+
+ @param username:
+ @param password: username= and password= form a pair of strings to
+ use when authenticating the remote user.
+ """
+
+ self.username = username
+ self.password = password
+
+ c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
+ c.addUser(username, password)
+
+ _BaseManhole.__init__(self, port, c, using_ssh=False)
+
+class PasswordManhole(_BaseManhole, ComparableMixin):
+ """This Manhole accepts encrypted (ssh) connections, and requires a
+ username and password to authorize access.
+ """
+
+ compare_attrs = ["port", "username", "password"]
+
+ def __init__(self, port, username, password):
+ """
+ @type port: string or int
+ @param port: what port should the Manhole listen on? This is a
+ strports specification string, like 'tcp:12345' or
+ 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
+ simple tcp port.
+
+ @param username:
+ @param password: username= and password= form a pair of strings to
+ use when authenticating the remote user.
+ """
+
+ self.username = username
+ self.password = password
+
+ c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
+ c.addUser(username, password)
+
+ _BaseManhole.__init__(self, port, c)
+
+class AuthorizedKeysManhole(_BaseManhole, ComparableMixin):
+ """This Manhole accepts ssh connections, and requires that the
+ prospective client have an ssh private key that matches one of the public
+ keys in our authorized_keys file. It is created with the name of a file
+ that contains the public keys that we will accept."""
+
+ compare_attrs = ["port", "keyfile"]
+
+ def __init__(self, port, keyfile):
+ """
+ @type port: string or int
+ @param port: what port should the Manhole listen on? This is a
+ strports specification string, like 'tcp:12345' or
+ 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
+ simple tcp port.
+
+ @param keyfile: the name of a file (relative to the buildmaster's
+ basedir) that contains SSH public keys of authorized
+ users, one per line. This is the exact same format
+ as used by sshd in ~/.ssh/authorized_keys .
+ """
+
+ # TODO: expanduser this, and make it relative to the buildmaster's
+ # basedir
+ self.keyfile = keyfile
+ c = AuthorizedKeysChecker(keyfile)
+ _BaseManhole.__init__(self, port, c)
+
+class ArbitraryCheckerManhole(_BaseManhole, ComparableMixin):
+ """This Manhole accepts ssh connections, but uses an arbitrary
+ user-supplied 'checker' object to perform authentication."""
+
+ compare_attrs = ["port", "checker"]
+
+ def __init__(self, port, checker):
+ """
+ @type port: string or int
+ @param port: what port should the Manhole listen on? This is a
+ strports specification string, like 'tcp:12345' or
+ 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
+ simple tcp port.
+
+ @param checker: an instance of a twisted.cred 'checker' which will
+ perform authentication
+ """
+
+ _BaseManhole.__init__(self, port, checker)
+
+
diff --git a/buildbot/buildbot/master.py b/buildbot/buildbot/master.py
new file mode 100644
index 0000000..2a07c0b
--- /dev/null
+++ b/buildbot/buildbot/master.py
@@ -0,0 +1,965 @@
+# -*- test-case-name: buildbot.test.test_run -*-
+
+import os
+signal = None
+try:
+ import signal
+except ImportError:
+ pass
+from cPickle import load
+import warnings
+
+from zope.interface import implements
+from twisted.python import log, components
+from twisted.internet import defer, reactor
+from twisted.spread import pb
+from twisted.cred import portal, checkers
+from twisted.application import service, strports
+from twisted.persisted import styles
+
+import buildbot
+# sibling imports
+from buildbot.util import now
+from buildbot.pbutil import NewCredPerspective
+from buildbot.process.builder import Builder, IDLE
+from buildbot.process.base import BuildRequest
+from buildbot.status.builder import Status
+from buildbot.changes.changes import Change, ChangeMaster, TestChangeMaster
+from buildbot.sourcestamp import SourceStamp
+from buildbot.buildslave import BuildSlave
+from buildbot import interfaces, locks
+from buildbot.process.properties import Properties
+
+########################################
+
+class BotMaster(service.MultiService):
+
+ """This is the master-side service which manages remote buildbot slaves.
+ It provides them with BuildSlaves, and distributes file change
+ notification messages to them.
+ """
+
+ debug = 0
+
+ def __init__(self):
+ service.MultiService.__init__(self)
+ self.builders = {}
+ self.builderNames = []
+ # builders maps Builder names to instances of bb.p.builder.Builder,
+ # which is the master-side object that defines and controls a build.
+ # They are added by calling botmaster.addBuilder() from the startup
+ # code.
+
+ # self.slaves contains a ready BuildSlave instance for each
+ # potential buildslave, i.e. all the ones listed in the config file.
+ # If the slave is connected, self.slaves[slavename].slave will
+ # contain a RemoteReference to their Bot instance. If it is not
+ # connected, that attribute will hold None.
+ self.slaves = {} # maps slavename to BuildSlave
+ self.statusClientService = None
+ self.watchers = {}
+
+ # self.locks holds the real Lock instances
+ self.locks = {}
+
+ # self.mergeRequests is the callable override for merging build
+ # requests
+ self.mergeRequests = None
+
+ # these four are convenience functions for testing
+
+ def waitUntilBuilderAttached(self, name):
+ b = self.builders[name]
+ #if b.slaves:
+ # return defer.succeed(None)
+ d = defer.Deferred()
+ b.watchers['attach'].append(d)
+ return d
+
+ def waitUntilBuilderDetached(self, name):
+ b = self.builders.get(name)
+ if not b or not b.slaves:
+ return defer.succeed(None)
+ d = defer.Deferred()
+ b.watchers['detach'].append(d)
+ return d
+
+ def waitUntilBuilderFullyDetached(self, name):
+ b = self.builders.get(name)
+ # TODO: this looks too deeply inside the Builder object
+ if not b or not b.slaves:
+ return defer.succeed(None)
+ d = defer.Deferred()
+ b.watchers['detach_all'].append(d)
+ return d
+
+ def waitUntilBuilderIdle(self, name):
+ b = self.builders[name]
+ # TODO: this looks way too deeply inside the Builder object
+ for sb in b.slaves:
+ if sb.state != IDLE:
+ d = defer.Deferred()
+ b.watchers['idle'].append(d)
+ return d
+ return defer.succeed(None)
+
+ def loadConfig_Slaves(self, new_slaves):
+ old_slaves = [c for c in list(self)
+ if interfaces.IBuildSlave.providedBy(c)]
+
+ # identify added/removed slaves. For each slave we construct a tuple
+ # of (name, password, class), and we consider the slave to be already
+ # present if the tuples match. (we include the class to make sure
+ # that BuildSlave(name,pw) is different than
+ # SubclassOfBuildSlave(name,pw) ). If the password or class has
+ # changed, we will remove the old version of the slave and replace it
+ # with a new one. If anything else has changed, we just update the
+ # old BuildSlave instance in place. If the name has changed, of
+ # course, it looks exactly the same as deleting one slave and adding
+ # an unrelated one.
+ old_t = {}
+ for s in old_slaves:
+ old_t[(s.slavename, s.password, s.__class__)] = s
+ new_t = {}
+ for s in new_slaves:
+ new_t[(s.slavename, s.password, s.__class__)] = s
+ removed = [old_t[t]
+ for t in old_t
+ if t not in new_t]
+ added = [new_t[t]
+ for t in new_t
+ if t not in old_t]
+ remaining_t = [t
+ for t in new_t
+ if t in old_t]
+ # removeSlave will hang up on the old bot
+ dl = []
+ for s in removed:
+ dl.append(self.removeSlave(s))
+ d = defer.DeferredList(dl, fireOnOneErrback=True)
+ def _add(res):
+ for s in added:
+ self.addSlave(s)
+ for t in remaining_t:
+ old_t[t].update(new_t[t])
+ d.addCallback(_add)
+ return d
+
+ def addSlave(self, s):
+ s.setServiceParent(self)
+ s.setBotmaster(self)
+ self.slaves[s.slavename] = s
+
+ def removeSlave(self, s):
+ # TODO: technically, disownServiceParent could return a Deferred
+ s.disownServiceParent()
+ d = self.slaves[s.slavename].disconnect()
+ del self.slaves[s.slavename]
+ return d
+
+ def slaveLost(self, bot):
+ for name, b in self.builders.items():
+ if bot.slavename in b.slavenames:
+ b.detached(bot)
+
+ def getBuildersForSlave(self, slavename):
+ return [b
+ for b in self.builders.values()
+ if slavename in b.slavenames]
+
+ def getBuildernames(self):
+ return self.builderNames
+
+ def getBuilders(self):
+ allBuilders = [self.builders[name] for name in self.builderNames]
+ return allBuilders
+
+ def setBuilders(self, builders):
+ self.builders = {}
+ self.builderNames = []
+ for b in builders:
+ for slavename in b.slavenames:
+ # this is actually validated earlier
+ assert slavename in self.slaves
+ self.builders[b.name] = b
+ self.builderNames.append(b.name)
+ b.setBotmaster(self)
+ d = self._updateAllSlaves()
+ return d
+
+ def _updateAllSlaves(self):
+ """Notify all buildslaves about changes in their Builders."""
+ dl = [s.updateSlave() for s in self.slaves.values()]
+ return defer.DeferredList(dl)
+
+ def maybeStartAllBuilds(self):
+ builders = self.builders.values()
+ def _sortfunc(b1, b2):
+ t1 = b1.getOldestRequestTime()
+ t2 = b2.getOldestRequestTime()
+ # If t1 or t2 is None, then there are no build requests,
+ # so sort it at the end
+ if t1 is None:
+ return 1
+ if t2 is None:
+ return -1
+ return cmp(t1, t2)
+ builders.sort(cmp=_sortfunc)
+ for b in builders:
+ b.maybeStartBuild()
+
+ def shouldMergeRequests(self, builder, req1, req2):
+ """Determine whether two BuildRequests should be merged for
+ the given builder.
+
+ """
+ if self.mergeRequests is not None:
+ return self.mergeRequests(builder, req1, req2)
+ return req1.canBeMergedWith(req2)
+
+ def getPerspective(self, slavename):
+ return self.slaves[slavename]
+
+ def shutdownSlaves(self):
+ # TODO: make this into a bot method rather than a builder method
+ for b in self.slaves.values():
+ b.shutdownSlave()
+
+ def stopService(self):
+ for b in self.builders.values():
+ b.builder_status.addPointEvent(["master", "shutdown"])
+ b.builder_status.saveYourself()
+ return service.Service.stopService(self)
+
+ def getLockByID(self, lockid):
+ """Convert a Lock identifier into an actual Lock instance.
+ @param lockid: a locks.MasterLock or locks.SlaveLock instance
+ @return: a locks.RealMasterLock or locks.RealSlaveLock instance
+ """
+ assert isinstance(lockid, (locks.MasterLock, locks.SlaveLock))
+ if not lockid in self.locks:
+ self.locks[lockid] = lockid.lockClass(lockid)
+ # if the master.cfg file has changed maxCount= on the lock, the next
+ # time a build is started, they'll get a new RealLock instance. Note
+ # that this requires that MasterLock and SlaveLock (marker) instances
+ # be hashable and that they should compare properly.
+ return self.locks[lockid]
+
+########################################
+
+
+
+class DebugPerspective(NewCredPerspective):
+ def attached(self, mind):
+ return self
+ def detached(self, mind):
+ pass
+
+ def perspective_requestBuild(self, buildername, reason, branch, revision, properties={}):
+ c = interfaces.IControl(self.master)
+ bc = c.getBuilder(buildername)
+ ss = SourceStamp(branch, revision)
+ bpr = Properties()
+ bpr.update(properties, "remote requestBuild")
+ br = BuildRequest(reason, ss, builderName=buildername, properties=bpr)
+ bc.requestBuild(br)
+
+ def perspective_pingBuilder(self, buildername):
+ c = interfaces.IControl(self.master)
+ bc = c.getBuilder(buildername)
+ bc.ping()
+
+ def perspective_fakeChange(self, file, revision=None, who="fakeUser",
+ branch=None):
+ change = Change(who, [file], "some fake comments\n",
+ branch=branch, revision=revision)
+ c = interfaces.IControl(self.master)
+ c.addChange(change)
+
+ def perspective_setCurrentState(self, buildername, state):
+ builder = self.botmaster.builders.get(buildername)
+ if not builder: return
+ if state == "offline":
+ builder.statusbag.currentlyOffline()
+ if state == "idle":
+ builder.statusbag.currentlyIdle()
+ if state == "waiting":
+ builder.statusbag.currentlyWaiting(now()+10)
+ if state == "building":
+ builder.statusbag.currentlyBuilding(None)
+ def perspective_reload(self):
+ print "doing reload of the config file"
+ self.master.loadTheConfigFile()
+ def perspective_pokeIRC(self):
+ print "saying something on IRC"
+ from buildbot.status import words
+ for s in self.master:
+ if isinstance(s, words.IRC):
+ bot = s.f
+ for channel in bot.channels:
+ print " channel", channel
+ bot.p.msg(channel, "Ow, quit it")
+
+ def perspective_print(self, msg):
+ print "debug", msg
+
+class Dispatcher(styles.Versioned):
+ implements(portal.IRealm)
+ persistenceVersion = 2
+
+ def __init__(self):
+ self.names = {}
+
+ def upgradeToVersion1(self):
+ self.master = self.botmaster.parent
+ def upgradeToVersion2(self):
+ self.names = {}
+
+ def register(self, name, afactory):
+ self.names[name] = afactory
+ def unregister(self, name):
+ del self.names[name]
+
+ def requestAvatar(self, avatarID, mind, interface):
+ assert interface == pb.IPerspective
+ afactory = self.names.get(avatarID)
+ if afactory:
+ p = afactory.getPerspective()
+ elif avatarID == "debug":
+ p = DebugPerspective()
+ p.master = self.master
+ p.botmaster = self.botmaster
+ elif avatarID == "statusClient":
+ p = self.statusClientService.getPerspective()
+ else:
+ # it must be one of the buildslaves: no other names will make it
+ # past the checker
+ p = self.botmaster.getPerspective(avatarID)
+
+ if not p:
+ raise ValueError("no perspective for '%s'" % avatarID)
+
+ d = defer.maybeDeferred(p.attached, mind)
+ d.addCallback(self._avatarAttached, mind)
+ return d
+
+ def _avatarAttached(self, p, mind):
+ return (pb.IPerspective, p, lambda p=p,mind=mind: p.detached(mind))
+
+########################################
+
+# service hierarchy:
+# BuildMaster
+# BotMaster
+# ChangeMaster
+# all IChangeSource objects
+# StatusClientService
+# TCPClient(self.ircFactory)
+# TCPServer(self.slaveFactory) -> dispatcher.requestAvatar
+# TCPServer(self.site)
+# UNIXServer(ResourcePublisher(self.site))
+
+
+class BuildMaster(service.MultiService, styles.Versioned):
+ debug = 0
+ persistenceVersion = 3
+ manhole = None
+ debugPassword = None
+ projectName = "(unspecified)"
+ projectURL = None
+ buildbotURL = None
+ change_svc = None
+ properties = Properties()
+
+ def __init__(self, basedir, configFileName="master.cfg"):
+ service.MultiService.__init__(self)
+ self.setName("buildmaster")
+ self.basedir = basedir
+ self.configFileName = configFileName
+
+ # the dispatcher is the realm in which all inbound connections are
+ # looked up: slave builders, change notifications, status clients, and
+ # the debug port
+ dispatcher = Dispatcher()
+ dispatcher.master = self
+ self.dispatcher = dispatcher
+ self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
+ # the checker starts with no user/passwd pairs: they are added later
+ p = portal.Portal(dispatcher)
+ p.registerChecker(self.checker)
+ self.slaveFactory = pb.PBServerFactory(p)
+ self.slaveFactory.unsafeTracebacks = True # let them see exceptions
+
+ self.slavePortnum = None
+ self.slavePort = None
+
+ self.botmaster = BotMaster()
+ self.botmaster.setName("botmaster")
+ self.botmaster.setServiceParent(self)
+ dispatcher.botmaster = self.botmaster
+
+ self.status = Status(self.botmaster, self.basedir)
+
+ self.statusTargets = []
+
+ # this ChangeMaster is a dummy, only used by tests. In the real
+ # buildmaster, where the BuildMaster instance is activated
+ # (startService is called) by twistd, this attribute is overwritten.
+ self.useChanges(TestChangeMaster())
+
+ self.readConfig = False
+
+ def upgradeToVersion1(self):
+ self.dispatcher = self.slaveFactory.root.portal.realm
+
+ def upgradeToVersion2(self): # post-0.4.3
+ self.webServer = self.webTCPPort
+ del self.webTCPPort
+ self.webDistribServer = self.webUNIXPort
+ del self.webUNIXPort
+ self.configFileName = "master.cfg"
+
+ def upgradeToVersion3(self):
+ # post 0.6.3, solely to deal with the 0.6.3 breakage. Starting with
+ # 0.6.5 I intend to do away with .tap files altogether
+ self.services = []
+ self.namedServices = {}
+ del self.change_svc
+
+ def startService(self):
+ service.MultiService.startService(self)
+ self.loadChanges() # must be done before loading the config file
+ if not self.readConfig:
+ # TODO: consider catching exceptions during this call to
+ # loadTheConfigFile and bailing (reactor.stop) if it fails,
+ # since without a config file we can't do anything except reload
+ # the config file, and it would be nice for the user to discover
+ # this quickly.
+ self.loadTheConfigFile()
+ if signal and hasattr(signal, "SIGHUP"):
+ signal.signal(signal.SIGHUP, self._handleSIGHUP)
+ for b in self.botmaster.builders.values():
+ b.builder_status.addPointEvent(["master", "started"])
+ b.builder_status.saveYourself()
+
+ def useChanges(self, changes):
+ if self.change_svc:
+ # TODO: can return a Deferred
+ self.change_svc.disownServiceParent()
+ self.change_svc = changes
+ self.change_svc.basedir = self.basedir
+ self.change_svc.setName("changemaster")
+ self.dispatcher.changemaster = self.change_svc
+ self.change_svc.setServiceParent(self)
+
+ def loadChanges(self):
+ filename = os.path.join(self.basedir, "changes.pck")
+ try:
+ changes = load(open(filename, "rb"))
+ styles.doUpgrade()
+ except IOError:
+ log.msg("changes.pck missing, using new one")
+ changes = ChangeMaster()
+ except EOFError:
+ log.msg("corrupted changes.pck, using new one")
+ changes = ChangeMaster()
+ self.useChanges(changes)
+
+ def _handleSIGHUP(self, *args):
+ reactor.callLater(0, self.loadTheConfigFile)
+
+ def getStatus(self):
+ """
+ @rtype: L{buildbot.status.builder.Status}
+ """
+ return self.status
+
+ def loadTheConfigFile(self, configFile=None):
+ if not configFile:
+ configFile = os.path.join(self.basedir, self.configFileName)
+
+ log.msg("Creating BuildMaster -- buildbot.version: %s" % buildbot.version)
+ log.msg("loading configuration from %s" % configFile)
+ configFile = os.path.expanduser(configFile)
+
+ try:
+ f = open(configFile, "r")
+ except IOError, e:
+ log.msg("unable to open config file '%s'" % configFile)
+ log.msg("leaving old configuration in place")
+ log.err(e)
+ return
+
+ try:
+ self.loadConfig(f)
+ except:
+ log.msg("error during loadConfig")
+ log.err()
+ log.msg("The new config file is unusable, so I'll ignore it.")
+ log.msg("I will keep using the previous config file instead.")
+ f.close()
+
+ def loadConfig(self, f):
+ """Internal function to load a specific configuration file. Any
+ errors in the file will be signalled by raising an exception.
+
+ @return: a Deferred that will fire (with None) when the configuration
+ changes have been completed. This may involve a round-trip to each
+ buildslave that was involved."""
+
+ localDict = {'basedir': os.path.expanduser(self.basedir)}
+ try:
+ exec f in localDict
+ except:
+ log.msg("error while parsing config file")
+ raise
+
+ try:
+ config = localDict['BuildmasterConfig']
+ except KeyError:
+ log.err("missing config dictionary")
+ log.err("config file must define BuildmasterConfig")
+ raise
+
+ known_keys = ("bots", "slaves",
+ "sources", "change_source",
+ "schedulers", "builders", "mergeRequests",
+ "slavePortnum", "debugPassword", "logCompressionLimit",
+ "manhole", "status", "projectName", "projectURL",
+ "buildbotURL", "properties"
+ )
+ for k in config.keys():
+ if k not in known_keys:
+ log.msg("unknown key '%s' defined in config dictionary" % k)
+
+ try:
+ # required
+ schedulers = config['schedulers']
+ builders = config['builders']
+ for k in builders:
+ if k['name'].startswith("_"):
+ errmsg = ("builder names must not start with an "
+ "underscore: " + k['name'])
+ log.err(errmsg)
+ raise ValueError(errmsg)
+
+ slavePortnum = config['slavePortnum']
+ #slaves = config['slaves']
+ #change_source = config['change_source']
+
+ # optional
+ debugPassword = config.get('debugPassword')
+ manhole = config.get('manhole')
+ status = config.get('status', [])
+ projectName = config.get('projectName')
+ projectURL = config.get('projectURL')
+ buildbotURL = config.get('buildbotURL')
+ properties = config.get('properties', {})
+ logCompressionLimit = config.get('logCompressionLimit')
+ if logCompressionLimit is not None and not \
+ isinstance(logCompressionLimit, int):
+ raise ValueError("logCompressionLimit needs to be bool or int")
+ mergeRequests = config.get('mergeRequests')
+ if mergeRequests is not None and not callable(mergeRequests):
+ raise ValueError("mergeRequests must be a callable")
+
+ except KeyError, e:
+ log.msg("config dictionary is missing a required parameter")
+ log.msg("leaving old configuration in place")
+ raise
+
+ #if "bots" in config:
+ # raise KeyError("c['bots'] is no longer accepted")
+
+ slaves = config.get('slaves', [])
+ if "bots" in config:
+ m = ("c['bots'] is deprecated as of 0.7.6 and will be "
+ "removed by 0.8.0 . Please use c['slaves'] instead.")
+ log.msg(m)
+ warnings.warn(m, DeprecationWarning)
+ for name, passwd in config['bots']:
+ slaves.append(BuildSlave(name, passwd))
+
+ if "bots" not in config and "slaves" not in config:
+ log.msg("config dictionary must have either 'bots' or 'slaves'")
+ log.msg("leaving old configuration in place")
+ raise KeyError("must have either 'bots' or 'slaves'")
+
+ #if "sources" in config:
+ # raise KeyError("c['sources'] is no longer accepted")
+
+ change_source = config.get('change_source', [])
+ if isinstance(change_source, (list, tuple)):
+ change_sources = change_source
+ else:
+ change_sources = [change_source]
+ if "sources" in config:
+ m = ("c['sources'] is deprecated as of 0.7.6 and will be "
+ "removed by 0.8.0 . Please use c['change_source'] instead.")
+ log.msg(m)
+ warnings.warn(m, DeprecationWarning)
+ for s in config['sources']:
+ change_sources.append(s)
+
+ # do some validation first
+ for s in slaves:
+ assert interfaces.IBuildSlave.providedBy(s)
+ if s.slavename in ("debug", "change", "status"):
+ raise KeyError(
+ "reserved name '%s' used for a bot" % s.slavename)
+ if config.has_key('interlocks'):
+ raise KeyError("c['interlocks'] is no longer accepted")
+
+ assert isinstance(change_sources, (list, tuple))
+ for s in change_sources:
+ assert interfaces.IChangeSource(s, None)
+ # this assertion catches c['schedulers'] = Scheduler(), since
+ # Schedulers are service.MultiServices and thus iterable.
+ errmsg = "c['schedulers'] must be a list of Scheduler instances"
+ assert isinstance(schedulers, (list, tuple)), errmsg
+ for s in schedulers:
+ assert interfaces.IScheduler(s, None), errmsg
+ assert isinstance(status, (list, tuple))
+ for s in status:
+ assert interfaces.IStatusReceiver(s, None)
+
+ slavenames = [s.slavename for s in slaves]
+ buildernames = []
+ dirnames = []
+ for b in builders:
+ if type(b) is tuple:
+ raise ValueError("builder %s must be defined with a dict, "
+ "not a tuple" % b[0])
+ if b.has_key('slavename') and b['slavename'] not in slavenames:
+ raise ValueError("builder %s uses undefined slave %s" \
+ % (b['name'], b['slavename']))
+ for n in b.get('slavenames', []):
+ if n not in slavenames:
+ raise ValueError("builder %s uses undefined slave %s" \
+ % (b['name'], n))
+ if b['name'] in buildernames:
+ raise ValueError("duplicate builder name %s"
+ % b['name'])
+ buildernames.append(b['name'])
+ if b['builddir'] in dirnames:
+ raise ValueError("builder %s reuses builddir %s"
+ % (b['name'], b['builddir']))
+ dirnames.append(b['builddir'])
+
+ unscheduled_buildernames = buildernames[:]
+ schedulernames = []
+ for s in schedulers:
+ for b in s.listBuilderNames():
+ assert b in buildernames, \
+ "%s uses unknown builder %s" % (s, b)
+ if b in unscheduled_buildernames:
+ unscheduled_buildernames.remove(b)
+
+ if s.name in schedulernames:
+ # TODO: schedulers share a namespace with other Service
+ # children of the BuildMaster node, like status plugins, the
+ # Manhole, the ChangeMaster, and the BotMaster (although most
+ # of these don't have names)
+ msg = ("Schedulers must have unique names, but "
+ "'%s' was a duplicate" % (s.name,))
+ raise ValueError(msg)
+ schedulernames.append(s.name)
+
+ if unscheduled_buildernames:
+ log.msg("Warning: some Builders have no Schedulers to drive them:"
+ " %s" % (unscheduled_buildernames,))
+
+ # assert that all locks used by the Builds and their Steps are
+ # uniquely named.
+ lock_dict = {}
+ for b in builders:
+ for l in b.get('locks', []):
+ if isinstance(l, locks.LockAccess): # User specified access to the lock
+ l = l.lockid
+ if lock_dict.has_key(l.name):
+ if lock_dict[l.name] is not l:
+ raise ValueError("Two different locks (%s and %s) "
+ "share the name %s"
+ % (l, lock_dict[l.name], l.name))
+ else:
+ lock_dict[l.name] = l
+ # TODO: this will break with any BuildFactory that doesn't use a
+ # .steps list, but I think the verification step is more
+ # important.
+ for s in b['factory'].steps:
+ for l in s[1].get('locks', []):
+ if isinstance(l, locks.LockAccess): # User specified access to the lock
+ l = l.lockid
+ if lock_dict.has_key(l.name):
+ if lock_dict[l.name] is not l:
+ raise ValueError("Two different locks (%s and %s)"
+ " share the name %s"
+ % (l, lock_dict[l.name], l.name))
+ else:
+ lock_dict[l.name] = l
+
+ if not isinstance(properties, dict):
+ raise ValueError("c['properties'] must be a dictionary")
+
+ # slavePortnum supposed to be a strports specification
+ if type(slavePortnum) is int:
+ slavePortnum = "tcp:%d" % slavePortnum
+
+ # now we're committed to implementing the new configuration, so do
+ # it atomically
+ # TODO: actually, this is spread across a couple of Deferreds, so it
+ # really isn't atomic.
+
+ d = defer.succeed(None)
+
+ self.projectName = projectName
+ self.projectURL = projectURL
+ self.buildbotURL = buildbotURL
+
+ self.properties = Properties()
+ self.properties.update(properties, self.configFileName)
+ if logCompressionLimit is not None:
+ self.status.logCompressionLimit = logCompressionLimit
+ if mergeRequests is not None:
+ self.botmaster.mergeRequests = mergeRequests
+
+ # self.slaves: Disconnect any that were attached and removed from the
+ # list. Update self.checker with the new list of passwords, including
+ # debug/change/status.
+ d.addCallback(lambda res: self.loadConfig_Slaves(slaves))
+
+ # self.debugPassword
+ if debugPassword:
+ self.checker.addUser("debug", debugPassword)
+ self.debugPassword = debugPassword
+
+ # self.manhole
+ if manhole != self.manhole:
+ # changing
+ if self.manhole:
+ # disownServiceParent may return a Deferred
+ d.addCallback(lambda res: self.manhole.disownServiceParent())
+ def _remove(res):
+ self.manhole = None
+ return res
+ d.addCallback(_remove)
+ if manhole:
+ def _add(res):
+ self.manhole = manhole
+ manhole.setServiceParent(self)
+ d.addCallback(_add)
+
+ # add/remove self.botmaster.builders to match builders. The
+ # botmaster will handle startup/shutdown issues.
+ d.addCallback(lambda res: self.loadConfig_Builders(builders))
+
+ d.addCallback(lambda res: self.loadConfig_status(status))
+
+ # Schedulers are added after Builders in case they start right away
+ d.addCallback(lambda res: self.loadConfig_Schedulers(schedulers))
+ # and Sources go after Schedulers for the same reason
+ d.addCallback(lambda res: self.loadConfig_Sources(change_sources))
+
+ # self.slavePort
+ if self.slavePortnum != slavePortnum:
+ if self.slavePort:
+ def closeSlavePort(res):
+ d1 = self.slavePort.disownServiceParent()
+ self.slavePort = None
+ return d1
+ d.addCallback(closeSlavePort)
+ if slavePortnum is not None:
+ def openSlavePort(res):
+ self.slavePort = strports.service(slavePortnum,
+ self.slaveFactory)
+ self.slavePort.setServiceParent(self)
+ d.addCallback(openSlavePort)
+ log.msg("BuildMaster listening on port %s" % slavePortnum)
+ self.slavePortnum = slavePortnum
+
+ log.msg("configuration update started")
+ def _done(res):
+ self.readConfig = True
+ log.msg("configuration update complete")
+ d.addCallback(_done)
+ d.addCallback(lambda res: self.botmaster.maybeStartAllBuilds())
+ return d
+
+ def loadConfig_Slaves(self, new_slaves):
+ # set up the Checker with the names and passwords of all valid bots
+ self.checker.users = {} # violates abstraction, oh well
+ for s in new_slaves:
+ self.checker.addUser(s.slavename, s.password)
+ self.checker.addUser("change", "changepw")
+ # let the BotMaster take care of the rest
+ return self.botmaster.loadConfig_Slaves(new_slaves)
+
+ def loadConfig_Sources(self, sources):
+ if not sources:
+ log.msg("warning: no ChangeSources specified in c['change_source']")
+ # shut down any that were removed, start any that were added
+ deleted_sources = [s for s in self.change_svc if s not in sources]
+ added_sources = [s for s in sources if s not in self.change_svc]
+ dl = [self.change_svc.removeSource(s) for s in deleted_sources]
+ def addNewOnes(res):
+ [self.change_svc.addSource(s) for s in added_sources]
+ d = defer.DeferredList(dl, fireOnOneErrback=1, consumeErrors=0)
+ d.addCallback(addNewOnes)
+ return d
+
+ def allSchedulers(self):
+ return [child for child in self
+ if interfaces.IScheduler.providedBy(child)]
+
+
+ def loadConfig_Schedulers(self, newschedulers):
+ oldschedulers = self.allSchedulers()
+ removed = [s for s in oldschedulers if s not in newschedulers]
+ added = [s for s in newschedulers if s not in oldschedulers]
+ dl = [defer.maybeDeferred(s.disownServiceParent) for s in removed]
+ def addNewOnes(res):
+ log.msg("adding %d new schedulers, removed %d" %
+ (len(added), len(dl)))
+ for s in added:
+ s.setServiceParent(self)
+ d = defer.DeferredList(dl, fireOnOneErrback=1)
+ d.addCallback(addNewOnes)
+ if removed or added:
+ # notify Downstream schedulers to potentially pick up
+ # new schedulers now that we have removed and added some
+ def updateDownstreams(res):
+ log.msg("notifying downstream schedulers of changes")
+ for s in newschedulers:
+ if interfaces.IDownstreamScheduler.providedBy(s):
+ s.checkUpstreamScheduler()
+ d.addCallback(updateDownstreams)
+ return d
+
+ def loadConfig_Builders(self, newBuilderData):
+ somethingChanged = False
+ newList = {}
+ newBuilderNames = []
+ allBuilders = self.botmaster.builders.copy()
+ for data in newBuilderData:
+ name = data['name']
+ newList[name] = data
+ newBuilderNames.append(name)
+
+ # identify all that were removed
+ for oldname in self.botmaster.getBuildernames():
+ if oldname not in newList:
+ log.msg("removing old builder %s" % oldname)
+ del allBuilders[oldname]
+ somethingChanged = True
+ # announce the change
+ self.status.builderRemoved(oldname)
+
+ # everything in newList is either unchanged, changed, or new
+ for name, data in newList.items():
+ old = self.botmaster.builders.get(name)
+ basedir = data['builddir'] # used on both master and slave
+ #name, slave, builddir, factory = data
+ if not old: # new
+ # category added after 0.6.2
+ category = data.get('category', None)
+ log.msg("adding new builder %s for category %s" %
+ (name, category))
+ statusbag = self.status.builderAdded(name, basedir, category)
+ builder = Builder(data, statusbag)
+ allBuilders[name] = builder
+ somethingChanged = True
+ elif old.compareToSetup(data):
+ # changed: try to minimize the disruption and only modify the
+ # pieces that really changed
+ diffs = old.compareToSetup(data)
+ log.msg("updating builder %s: %s" % (name, "\n".join(diffs)))
+
+ statusbag = old.builder_status
+ statusbag.saveYourself() # seems like a good idea
+ # TODO: if the basedir was changed, we probably need to make
+ # a new statusbag
+ new_builder = Builder(data, statusbag)
+ new_builder.consumeTheSoulOfYourPredecessor(old)
+ # that migrates any retained slavebuilders too
+
+ # point out that the builder was updated. On the Waterfall,
+ # this will appear just after any currently-running builds.
+ statusbag.addPointEvent(["config", "updated"])
+
+ allBuilders[name] = new_builder
+ somethingChanged = True
+ else:
+ # unchanged: leave it alone
+ log.msg("builder %s is unchanged" % name)
+ pass
+
+ if somethingChanged:
+ sortedAllBuilders = [allBuilders[name] for name in newBuilderNames]
+ d = self.botmaster.setBuilders(sortedAllBuilders)
+ return d
+ return None
+
+ def loadConfig_status(self, status):
+ dl = []
+
+ # remove old ones
+ for s in self.statusTargets[:]:
+ if not s in status:
+ log.msg("removing IStatusReceiver", s)
+ d = defer.maybeDeferred(s.disownServiceParent)
+ dl.append(d)
+ self.statusTargets.remove(s)
+ # after those are finished going away, add new ones
+ def addNewOnes(res):
+ for s in status:
+ if not s in self.statusTargets:
+ log.msg("adding IStatusReceiver", s)
+ s.setServiceParent(self)
+ self.statusTargets.append(s)
+ d = defer.DeferredList(dl, fireOnOneErrback=1)
+ d.addCallback(addNewOnes)
+ return d
+
+
+ def addChange(self, change):
+ for s in self.allSchedulers():
+ s.addChange(change)
+
+ def submitBuildSet(self, bs):
+ # determine the set of Builders to use
+ builders = []
+ for name in bs.builderNames:
+ b = self.botmaster.builders.get(name)
+ if b:
+ if b not in builders:
+ builders.append(b)
+ continue
+ # TODO: add aliases like 'all'
+ raise KeyError("no such builder named '%s'" % name)
+
+ # now tell the BuildSet to create BuildRequests for all those
+ # Builders and submit them
+ bs.start(builders)
+ self.status.buildsetSubmitted(bs.status)
+
+
+class Control:
+ implements(interfaces.IControl)
+
+ def __init__(self, master):
+ self.master = master
+
+ def addChange(self, change):
+ self.master.change_svc.addChange(change)
+
+ def submitBuildSet(self, bs):
+ self.master.submitBuildSet(bs)
+
+ def getBuilder(self, name):
+ b = self.master.botmaster.builders[name]
+ return interfaces.IBuilderControl(b)
+
+components.registerAdapter(Control, BuildMaster, interfaces.IControl)
+
+# so anybody who can get a handle on the BuildMaster can cause a build with:
+# IControl(master).getBuilder("full-2.3").requestBuild(buildrequest)
diff --git a/buildbot/buildbot/pbutil.py b/buildbot/buildbot/pbutil.py
new file mode 100644
index 0000000..bc85a01
--- /dev/null
+++ b/buildbot/buildbot/pbutil.py
@@ -0,0 +1,147 @@
+
+"""Base classes handy for use with PB clients.
+"""
+
+from twisted.spread import pb
+
+from twisted.spread.pb import PBClientFactory
+from twisted.internet import protocol
+from twisted.python import log
+
+class NewCredPerspective(pb.Avatar):
+ def attached(self, mind):
+ return self
+ def detached(self, mind):
+ pass
+
+class ReconnectingPBClientFactory(PBClientFactory,
+ protocol.ReconnectingClientFactory):
+ """Reconnecting client factory for PB brokers.
+
+ Like PBClientFactory, but if the connection fails or is lost, the factory
+ will attempt to reconnect.
+
+ Instead of using f.getRootObject (which gives a Deferred that can only
+ be fired once), override the gotRootObject method.
+
+ Instead of using the newcred f.login (which is also one-shot), call
+ f.startLogin() with the credentials and client, and override the
+ gotPerspective method.
+
+ Instead of using the oldcred f.getPerspective (also one-shot), call
+ f.startGettingPerspective() with the same arguments, and override
+ gotPerspective.
+
+ gotRootObject and gotPerspective will be called each time the object is
+ received (once per successful connection attempt). You will probably want
+ to use obj.notifyOnDisconnect to find out when the connection is lost.
+
+ If an authorization error occurs, failedToGetPerspective() will be
+ invoked.
+
+ To use me, subclass, then hand an instance to a connector (like
+ TCPClient).
+ """
+
+ def __init__(self):
+ PBClientFactory.__init__(self)
+ self._doingLogin = False
+ self._doingGetPerspective = False
+
+ def clientConnectionFailed(self, connector, reason):
+ PBClientFactory.clientConnectionFailed(self, connector, reason)
+ # Twisted-1.3 erroneously abandons the connection on non-UserErrors.
+ # To avoid this bug, don't upcall, and implement the correct version
+ # of the method here.
+ if self.continueTrying:
+ self.connector = connector
+ self.retry()
+
+ def clientConnectionLost(self, connector, reason):
+ PBClientFactory.clientConnectionLost(self, connector, reason,
+ reconnecting=True)
+ RCF = protocol.ReconnectingClientFactory
+ RCF.clientConnectionLost(self, connector, reason)
+
+ def clientConnectionMade(self, broker):
+ self.resetDelay()
+ PBClientFactory.clientConnectionMade(self, broker)
+ if self._doingLogin:
+ self.doLogin(self._root)
+ if self._doingGetPerspective:
+ self.doGetPerspective(self._root)
+ self.gotRootObject(self._root)
+
+ def __getstate__(self):
+ # this should get folded into ReconnectingClientFactory
+ d = self.__dict__.copy()
+ d['connector'] = None
+ d['_callID'] = None
+ return d
+
+ # oldcred methods
+
+ def getPerspective(self, *args):
+ raise RuntimeError, "getPerspective is one-shot: use startGettingPerspective instead"
+
+ def startGettingPerspective(self, username, password, serviceName,
+ perspectiveName=None, client=None):
+ self._doingGetPerspective = True
+ if perspectiveName == None:
+ perspectiveName = username
+ self._oldcredArgs = (username, password, serviceName,
+ perspectiveName, client)
+
+ def doGetPerspective(self, root):
+ # oldcred getPerspective()
+ (username, password,
+ serviceName, perspectiveName, client) = self._oldcredArgs
+ d = self._cbAuthIdentity(root, username, password)
+ d.addCallback(self._cbGetPerspective,
+ serviceName, perspectiveName, client)
+ d.addCallbacks(self.gotPerspective, self.failedToGetPerspective)
+
+
+ # newcred methods
+
+ def login(self, *args):
+ raise RuntimeError, "login is one-shot: use startLogin instead"
+
+ def startLogin(self, credentials, client=None):
+ self._credentials = credentials
+ self._client = client
+ self._doingLogin = True
+
+ def doLogin(self, root):
+ # newcred login()
+ d = self._cbSendUsername(root, self._credentials.username,
+ self._credentials.password, self._client)
+ d.addCallbacks(self.gotPerspective, self.failedToGetPerspective)
+
+
+ # methods to override
+
+ def gotPerspective(self, perspective):
+ """The remote avatar or perspective (obtained each time this factory
+ connects) is now available."""
+ pass
+
+ def gotRootObject(self, root):
+ """The remote root object (obtained each time this factory connects)
+ is now available. This method will be called each time the connection
+ is established and the object reference is retrieved."""
+ pass
+
+ def failedToGetPerspective(self, why):
+ """The login process failed, most likely because of an authorization
+ failure (bad password), but it is also possible that we lost the new
+ connection before we managed to send our credentials.
+ """
+ log.msg("ReconnectingPBClientFactory.failedToGetPerspective")
+ if why.check(pb.PBConnectionLost):
+ log.msg("we lost the brand-new connection")
+ # retrying might help here, let clientConnectionLost decide
+ return
+ # probably authorization
+ self.stopTrying() # logging in harder won't help
+ log.err(why)
diff --git a/buildbot/buildbot/process/__init__.py b/buildbot/buildbot/process/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildbot/buildbot/process/__init__.py
diff --git a/buildbot/buildbot/process/base.py b/buildbot/buildbot/process/base.py
new file mode 100644
index 0000000..8eaa940
--- /dev/null
+++ b/buildbot/buildbot/process/base.py
@@ -0,0 +1,627 @@
+# -*- test-case-name: buildbot.test.test_step -*-
+
+import types
+
+from zope.interface import implements
+from twisted.python import log
+from twisted.python.failure import Failure
+from twisted.internet import reactor, defer, error
+
+from buildbot import interfaces, locks
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
+from buildbot.status.builder import Results, BuildRequestStatus
+from buildbot.status.progress import BuildProgress
+from buildbot.process.properties import Properties
+
+class BuildRequest:
+ """I represent a request to a specific Builder to run a single build.
+
+ I have a SourceStamp which specifies what sources I will build. This may
+ specify a specific revision of the source tree (so source.branch,
+ source.revision, and source.patch are used). The .patch attribute is
+ either None or a tuple of (patchlevel, diff), consisting of a number to
+ use in 'patch -pN', and a unified-format context diff.
+
+ Alternatively, the SourceStamp may specify a set of Changes to be built,
+ contained in source.changes. In this case, I may be mergeable with other
+ BuildRequests on the same branch.
+
+ I may be part of a BuildSet, in which case I will report status results
+ to it.
+
+ I am paired with a BuildRequestStatus object, to which I feed status
+ information.
+
+ @type source: a L{buildbot.sourcestamp.SourceStamp} instance.
+ @ivar source: the source code that this BuildRequest use
+
+ @type reason: string
+ @ivar reason: the reason this Build is being requested. Schedulers
+ provide this, but for forced builds the user requesting the
+ build will provide a string.
+
+ @type properties: Properties object
+ @ivar properties: properties that should be applied to this build
+ 'owner' property is used by Build objects to collect
+ the list returned by getInterestedUsers
+
+ @ivar status: the IBuildStatus object which tracks our status
+
+ @ivar submittedAt: a timestamp (seconds since epoch) when this request
+ was submitted to the Builder. This is used by the CVS
+ step to compute a checkout timestamp, as well as the
+ master to prioritize build requests from oldest to
+ newest.
+ """
+
+ source = None
+ builder = None
+ startCount = 0 # how many times we have tried to start this build
+ submittedAt = None
+
+ implements(interfaces.IBuildRequestControl)
+
+ def __init__(self, reason, source, builderName, properties=None):
+ assert interfaces.ISourceStamp(source, None)
+ self.reason = reason
+ self.source = source
+
+ self.properties = Properties()
+ if properties:
+ self.properties.updateFromProperties(properties)
+
+ self.start_watchers = []
+ self.finish_watchers = []
+ self.status = BuildRequestStatus(source, builderName)
+
+ def canBeMergedWith(self, other):
+ return self.source.canBeMergedWith(other.source)
+
+ def mergeWith(self, others):
+ return self.source.mergeWith([o.source for o in others])
+
+ def mergeReasons(self, others):
+ """Return a reason for the merged build request."""
+ reasons = []
+ for req in [self] + others:
+ if req.reason and req.reason not in reasons:
+ reasons.append(req.reason)
+ return ", ".join(reasons)
+
+ def waitUntilFinished(self):
+ """Get a Deferred that will fire (with a
+ L{buildbot.interfaces.IBuildStatus} instance when the build
+ finishes."""
+ d = defer.Deferred()
+ self.finish_watchers.append(d)
+ return d
+
+ # these are called by the Builder
+
+ def requestSubmitted(self, builder):
+ # the request has been placed on the queue
+ self.builder = builder
+
+ def buildStarted(self, build, buildstatus):
+ """This is called by the Builder when a Build has been started in the
+ hopes of satifying this BuildRequest. It may be called multiple
+ times, since interrupted builds and lost buildslaves may force
+ multiple Builds to be run until the fate of the BuildRequest is known
+ for certain."""
+ for o in self.start_watchers[:]:
+ # these observers get the IBuildControl
+ o(build)
+ # while these get the IBuildStatus
+ self.status.buildStarted(buildstatus)
+
+ def finished(self, buildstatus):
+ """This is called by the Builder when the BuildRequest has been
+ retired. This happens when its Build has either succeeded (yay!) or
+ failed (boo!). TODO: If it is halted due to an exception (oops!), or
+ some other retryable error, C{finished} will not be called yet."""
+
+ for w in self.finish_watchers:
+ w.callback(buildstatus)
+ self.finish_watchers = []
+
+ # IBuildRequestControl
+
+ def subscribe(self, observer):
+ self.start_watchers.append(observer)
+ def unsubscribe(self, observer):
+ self.start_watchers.remove(observer)
+
+ def cancel(self):
+ """Cancel this request. This can only be successful if the Build has
+ not yet been started.
+
+ @return: a boolean indicating if the cancel was successful."""
+ if self.builder:
+ return self.builder.cancelBuildRequest(self)
+ return False
+
+ def setSubmitTime(self, t):
+ self.submittedAt = t
+ self.status.setSubmitTime(t)
+
+ def getSubmitTime(self):
+ return self.submittedAt
+
+
+class Build:
+ """I represent a single build by a single slave. Specialized Builders can
+ use subclasses of Build to hold status information unique to those build
+ processes.
+
+ I control B{how} the build proceeds. The actual build is broken up into a
+ series of steps, saved in the .buildSteps[] array as a list of
+ L{buildbot.process.step.BuildStep} objects. Each step is a single remote
+ command, possibly a shell command.
+
+ During the build, I put status information into my C{BuildStatus}
+ gatherer.
+
+ After the build, I go away.
+
+ I can be used by a factory by setting buildClass on
+ L{buildbot.process.factory.BuildFactory}
+
+ @ivar requests: the list of L{BuildRequest}s that triggered me
+ @ivar build_status: the L{buildbot.status.builder.BuildStatus} that
+ collects our status
+ """
+
+ implements(interfaces.IBuildControl)
+
+ workdir = "build"
+ build_status = None
+ reason = "changes"
+ finished = False
+ results = None
+
+ def __init__(self, requests):
+ self.requests = requests
+ for req in self.requests:
+ req.startCount += 1
+ self.locks = []
+ # build a source stamp
+ self.source = requests[0].mergeWith(requests[1:])
+ self.reason = requests[0].mergeReasons(requests[1:])
+
+ self.progress = None
+ self.currentStep = None
+ self.slaveEnvironment = {}
+
+ self.terminate = False
+
+ def setBuilder(self, builder):
+ """
+ Set the given builder as our builder.
+
+ @type builder: L{buildbot.process.builder.Builder}
+ """
+ self.builder = builder
+
+ def setLocks(self, locks):
+ self.locks = locks
+
+ def setSlaveEnvironment(self, env):
+ self.slaveEnvironment = env
+
+ def getSourceStamp(self):
+ return self.source
+
+ def setProperty(self, propname, value, source):
+ """Set a property on this build. This may only be called after the
+ build has started, so that it has a BuildStatus object where the
+ properties can live."""
+ self.build_status.setProperty(propname, value, source)
+
+ def getProperties(self):
+ return self.build_status.getProperties()
+
+ def getProperty(self, propname):
+ return self.build_status.getProperty(propname)
+
+ def allChanges(self):
+ return self.source.changes
+
+ def allFiles(self):
+ # return a list of all source files that were changed
+ files = []
+ havedirs = 0
+ for c in self.allChanges():
+ for f in c.files:
+ files.append(f)
+ if c.isdir:
+ havedirs = 1
+ return files
+
+ def __repr__(self):
+ return "<Build %s>" % (self.builder.name,)
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ if d.has_key('remote'):
+ del d['remote']
+ return d
+
+ def blamelist(self):
+ blamelist = []
+ for c in self.allChanges():
+ if c.who not in blamelist:
+ blamelist.append(c.who)
+ blamelist.sort()
+ return blamelist
+
+ def changesText(self):
+ changetext = ""
+ for c in self.allChanges():
+ changetext += "-" * 60 + "\n\n" + c.asText() + "\n"
+ # consider sorting these by number
+ return changetext
+
+ def setStepFactories(self, step_factories):
+ """Set a list of 'step factories', which are tuples of (class,
+ kwargs), where 'class' is generally a subclass of step.BuildStep .
+ These are used to create the Steps themselves when the Build starts
+ (as opposed to when it is first created). By creating the steps
+ later, their __init__ method will have access to things like
+ build.allFiles() ."""
+ self.stepFactories = list(step_factories)
+
+
+
+ useProgress = True
+
+ def getSlaveCommandVersion(self, command, oldversion=None):
+ return self.slavebuilder.getSlaveCommandVersion(command, oldversion)
+ def getSlaveName(self):
+ return self.slavebuilder.slave.slavename
+
+ def setupProperties(self):
+ props = self.getProperties()
+
+ # start with global properties from the configuration
+ buildmaster = self.builder.botmaster.parent
+ props.updateFromProperties(buildmaster.properties)
+
+ # get any properties from requests (this is the path through
+ # which schedulers will send us properties)
+ for rq in self.requests:
+ props.updateFromProperties(rq.properties)
+
+ # now set some properties of our own, corresponding to the
+ # build itself
+ props.setProperty("buildername", self.builder.name, "Build")
+ props.setProperty("buildnumber", self.build_status.number, "Build")
+ props.setProperty("branch", self.source.branch, "Build")
+ props.setProperty("revision", self.source.revision, "Build")
+
+ def setupSlaveBuilder(self, slavebuilder):
+ self.slavebuilder = slavebuilder
+
+ # navigate our way back to the L{buildbot.buildslave.BuildSlave}
+ # object that came from the config, and get its properties
+ buildslave_properties = slavebuilder.slave.properties
+ self.getProperties().updateFromProperties(buildslave_properties)
+
+ self.slavename = slavebuilder.slave.slavename
+ self.build_status.setSlavename(self.slavename)
+
+ def startBuild(self, build_status, expectations, slavebuilder):
+ """This method sets up the build, then starts it by invoking the
+ first Step. It returns a Deferred which will fire when the build
+ finishes. This Deferred is guaranteed to never errback."""
+
+ # we are taking responsibility for watching the connection to the
+ # remote. This responsibility was held by the Builder until our
+ # startBuild was called, and will not return to them until we fire
+ # the Deferred returned by this method.
+
+ log.msg("%s.startBuild" % self)
+ self.build_status = build_status
+ # now that we have a build_status, we can set properties
+ self.setupProperties()
+ self.setupSlaveBuilder(slavebuilder)
+ slavebuilder.slave.updateSlaveStatus(buildStarted=build_status)
+
+ # convert all locks into their real forms
+ lock_list = []
+ for access in self.locks:
+ if not isinstance(access, locks.LockAccess):
+ # Buildbot 0.7.7 compability: user did not specify access
+ access = access.defaultAccess()
+ lock = self.builder.botmaster.getLockByID(access.lockid)
+ lock_list.append((lock, access))
+ self.locks = lock_list
+ # then narrow SlaveLocks down to the right slave
+ self.locks = [(l.getLock(self.slavebuilder), la)
+ for l, la in self.locks]
+ self.remote = slavebuilder.remote
+ self.remote.notifyOnDisconnect(self.lostRemote)
+ d = self.deferred = defer.Deferred()
+ def _release_slave(res, slave, bs):
+ self.slavebuilder.buildFinished()
+ slave.updateSlaveStatus(buildFinished=bs)
+ return res
+ d.addCallback(_release_slave, self.slavebuilder.slave, build_status)
+
+ try:
+ self.setupBuild(expectations) # create .steps
+ except:
+ # the build hasn't started yet, so log the exception as a point
+ # event instead of flunking the build. TODO: associate this
+ # failure with the build instead. this involves doing
+ # self.build_status.buildStarted() from within the exception
+ # handler
+ log.msg("Build.setupBuild failed")
+ log.err(Failure())
+ self.builder.builder_status.addPointEvent(["setupBuild",
+ "exception"])
+ self.finished = True
+ self.results = FAILURE
+ self.deferred = None
+ d.callback(self)
+ return d
+
+ self.acquireLocks().addCallback(self._startBuild_2)
+ return d
+
+ def acquireLocks(self, res=None):
+ log.msg("acquireLocks(step %s, locks %s)" % (self, self.locks))
+ if not self.locks:
+ return defer.succeed(None)
+ for lock, access in self.locks:
+ if not lock.isAvailable(access):
+ log.msg("Build %s waiting for lock %s" % (self, lock))
+ d = lock.waitUntilMaybeAvailable(self, access)
+ d.addCallback(self.acquireLocks)
+ return d
+ # all locks are available, claim them all
+ for lock, access in self.locks:
+ lock.claim(self, access)
+ return defer.succeed(None)
+
+ def _startBuild_2(self, res):
+ self.build_status.buildStarted(self)
+ self.startNextStep()
+
+ def setupBuild(self, expectations):
+ # create the actual BuildSteps. If there are any name collisions, we
+ # add a count to the loser until it is unique.
+ self.steps = []
+ self.stepStatuses = {}
+ stepnames = []
+ sps = []
+
+ for factory, args in self.stepFactories:
+ args = args.copy()
+ try:
+ step = factory(**args)
+ except:
+ log.msg("error while creating step, factory=%s, args=%s"
+ % (factory, args))
+ raise
+ step.setBuild(self)
+ step.setBuildSlave(self.slavebuilder.slave)
+ step.setDefaultWorkdir(self.workdir)
+ name = step.name
+ count = 1
+ while name in stepnames and count < 1000:
+ count += 1
+ name = step.name + "_%d" % count
+ if count == 1000:
+ raise RuntimeError("reached 1000 steps with base name" + \
+ "%s, bailing" % step.name)
+ elif name in stepnames:
+ raise RuntimeError("duplicate step '%s'" % step.name)
+ step.name = name
+ stepnames.append(name)
+ self.steps.append(step)
+
+ # tell the BuildStatus about the step. This will create a
+ # BuildStepStatus and bind it to the Step.
+ step_status = self.build_status.addStepWithName(name)
+ step.setStepStatus(step_status)
+
+ sp = None
+ if self.useProgress:
+ # XXX: maybe bail if step.progressMetrics is empty? or skip
+ # progress for that one step (i.e. "it is fast"), or have a
+ # separate "variable" flag that makes us bail on progress
+ # tracking
+ sp = step.setupProgress()
+ if sp:
+ sps.append(sp)
+
+ # Create a buildbot.status.progress.BuildProgress object. This is
+ # called once at startup to figure out how to build the long-term
+ # Expectations object, and again at the start of each build to get a
+ # fresh BuildProgress object to track progress for that individual
+ # build. TODO: revisit at-startup call
+
+ if self.useProgress:
+ self.progress = BuildProgress(sps)
+ if self.progress and expectations:
+ self.progress.setExpectationsFrom(expectations)
+
+ # we are now ready to set up our BuildStatus.
+ self.build_status.setSourceStamp(self.source)
+ self.build_status.setRequests([req.status for req in self.requests])
+ self.build_status.setReason(self.reason)
+ self.build_status.setBlamelist(self.blamelist())
+ self.build_status.setProgress(self.progress)
+
+ # gather owners from build requests
+ owners = [r.properties['owner'] for r in self.requests
+ if r.properties.has_key('owner')]
+ if owners: self.setProperty('owners', owners, self.reason)
+
+ self.results = [] # list of FAILURE, SUCCESS, WARNINGS, SKIPPED
+ self.result = SUCCESS # overall result, may downgrade after each step
+ self.text = [] # list of text string lists (text2)
+
+ def getNextStep(self):
+ """This method is called to obtain the next BuildStep for this build.
+ When it returns None (or raises a StopIteration exception), the build
+ is complete."""
+ if not self.steps:
+ return None
+ if self.terminate:
+ while True:
+ s = self.steps.pop(0)
+ if s.alwaysRun:
+ return s
+ if not self.steps:
+ return None
+ else:
+ return self.steps.pop(0)
+
+ def startNextStep(self):
+ try:
+ s = self.getNextStep()
+ except StopIteration:
+ s = None
+ if not s:
+ return self.allStepsDone()
+ self.currentStep = s
+ d = defer.maybeDeferred(s.startStep, self.remote)
+ d.addCallback(self._stepDone, s)
+ d.addErrback(self.buildException)
+
+ def _stepDone(self, results, step):
+ self.currentStep = None
+ if self.finished:
+ return # build was interrupted, don't keep building
+ terminate = self.stepDone(results, step) # interpret/merge results
+ if terminate:
+ self.terminate = True
+ return self.startNextStep()
+
+ def stepDone(self, result, step):
+ """This method is called when the BuildStep completes. It is passed a
+ status object from the BuildStep and is responsible for merging the
+ Step's results into those of the overall Build."""
+
+ terminate = False
+ text = None
+ if type(result) == types.TupleType:
+ result, text = result
+ assert type(result) == type(SUCCESS)
+ log.msg(" step '%s' complete: %s" % (step.name, Results[result]))
+ self.results.append(result)
+ if text:
+ self.text.extend(text)
+ if not self.remote:
+ terminate = True
+ if result == FAILURE:
+ if step.warnOnFailure:
+ if self.result != FAILURE:
+ self.result = WARNINGS
+ if step.flunkOnFailure:
+ self.result = FAILURE
+ if step.haltOnFailure:
+ terminate = True
+ elif result == WARNINGS:
+ if step.warnOnWarnings:
+ if self.result != FAILURE:
+ self.result = WARNINGS
+ if step.flunkOnWarnings:
+ self.result = FAILURE
+ elif result == EXCEPTION:
+ self.result = EXCEPTION
+ terminate = True
+ return terminate
+
+ def lostRemote(self, remote=None):
+ # the slave went away. There are several possible reasons for this,
+ # and they aren't necessarily fatal. For now, kill the build, but
+ # TODO: see if we can resume the build when it reconnects.
+ log.msg("%s.lostRemote" % self)
+ self.remote = None
+ if self.currentStep:
+ # this should cause the step to finish.
+ log.msg(" stopping currentStep", self.currentStep)
+ self.currentStep.interrupt(Failure(error.ConnectionLost()))
+
+ def stopBuild(self, reason="<no reason given>"):
+ # the idea here is to let the user cancel a build because, e.g.,
+ # they realized they committed a bug and they don't want to waste
+ # the time building something that they know will fail. Another
+ # reason might be to abandon a stuck build. We want to mark the
+ # build as failed quickly rather than waiting for the slave's
+ # timeout to kill it on its own.
+
+ log.msg(" %s: stopping build: %s" % (self, reason))
+ if self.finished:
+ return
+ # TODO: include 'reason' in this point event
+ self.builder.builder_status.addPointEvent(['interrupt'])
+ self.currentStep.interrupt(reason)
+ if 0:
+ # TODO: maybe let its deferred do buildFinished
+ if self.currentStep and self.currentStep.progress:
+ # XXX: really .fail or something
+ self.currentStep.progress.finish()
+ text = ["stopped", reason]
+ self.buildFinished(text, FAILURE)
+
+ def allStepsDone(self):
+ if self.result == FAILURE:
+ text = ["failed"]
+ elif self.result == WARNINGS:
+ text = ["warnings"]
+ elif self.result == EXCEPTION:
+ text = ["exception"]
+ else:
+ text = ["build", "successful"]
+ text.extend(self.text)
+ return self.buildFinished(text, self.result)
+
+ def buildException(self, why):
+ log.msg("%s.buildException" % self)
+ log.err(why)
+ self.buildFinished(["build", "exception"], FAILURE)
+
+ def buildFinished(self, text, results):
+ """This method must be called when the last Step has completed. It
+ marks the Build as complete and returns the Builder to the 'idle'
+ state.
+
+ It takes two arguments which describe the overall build status:
+ text, results. 'results' is one of SUCCESS, WARNINGS, or FAILURE.
+
+ If 'results' is SUCCESS or WARNINGS, we will permit any dependant
+ builds to start. If it is 'FAILURE', those builds will be
+ abandoned."""
+
+ self.finished = True
+ if self.remote:
+ self.remote.dontNotifyOnDisconnect(self.lostRemote)
+ self.results = results
+
+ log.msg(" %s: build finished" % self)
+ self.build_status.setText(text)
+ self.build_status.setResults(results)
+ self.build_status.buildFinished()
+ if self.progress and results == SUCCESS:
+ # XXX: also test a 'timing consistent' flag?
+ log.msg(" setting expectations for next time")
+ self.builder.setExpectations(self.progress)
+ reactor.callLater(0, self.releaseLocks)
+ self.deferred.callback(self)
+ self.deferred = None
+
+ def releaseLocks(self):
+ log.msg("releaseLocks(%s): %s" % (self, self.locks))
+ for lock, access in self.locks:
+ lock.release(self, access)
+
+ # IBuildControl
+
+ def getStatus(self):
+ return self.build_status
+
+ # stopBuild is defined earlier
+
diff --git a/buildbot/buildbot/process/builder.py b/buildbot/buildbot/process/builder.py
new file mode 100644
index 0000000..cb26ccb
--- /dev/null
+++ b/buildbot/buildbot/process/builder.py
@@ -0,0 +1,874 @@
+
+import random, weakref
+from zope.interface import implements
+from twisted.python import log, components
+from twisted.spread import pb
+from twisted.internet import reactor, defer
+
+from buildbot import interfaces
+from buildbot.status.progress import Expectations
+from buildbot.util import now
+from buildbot.process import base
+
+(ATTACHING, # slave attached, still checking hostinfo/etc
+ IDLE, # idle, available for use
+ PINGING, # build about to start, making sure it is still alive
+ BUILDING, # build is running
+ LATENT, # latent slave is not substantiated; similar to idle
+ ) = range(5)
+
+
+class AbstractSlaveBuilder(pb.Referenceable):
+ """I am the master-side representative for one of the
+ L{buildbot.slave.bot.SlaveBuilder} objects that lives in a remote
+ buildbot. When a remote builder connects, I query it for command versions
+ and then make it available to any Builds that are ready to run. """
+
+ def __init__(self):
+ self.ping_watchers = []
+ self.state = None # set in subclass
+ self.remote = None
+ self.slave = None
+ self.builder_name = None
+
+ def __repr__(self):
+ r = ["<", self.__class__.__name__]
+ if self.builder_name:
+ r.extend([" builder=", self.builder_name])
+ if self.slave:
+ r.extend([" slave=", self.slave.slavename])
+ r.append(">")
+ return ''.join(r)
+
+ def setBuilder(self, b):
+ self.builder = b
+ self.builder_name = b.name
+
+ def getSlaveCommandVersion(self, command, oldversion=None):
+ if self.remoteCommands is None:
+ # the slave is 0.5.0 or earlier
+ return oldversion
+ return self.remoteCommands.get(command)
+
+ def isAvailable(self):
+ # if this SlaveBuilder is busy, then it's definitely not available
+ if self.isBusy():
+ return False
+
+ # otherwise, check in with the BuildSlave
+ if self.slave:
+ return self.slave.canStartBuild()
+
+ # no slave? not very available.
+ return False
+
+ def isBusy(self):
+ return self.state not in (IDLE, LATENT)
+
+ def buildStarted(self):
+ self.state = BUILDING
+
+ def buildFinished(self):
+ self.state = IDLE
+ reactor.callLater(0, self.builder.botmaster.maybeStartAllBuilds)
+
+ def attached(self, slave, remote, commands):
+ """
+ @type slave: L{buildbot.buildslave.BuildSlave}
+ @param slave: the BuildSlave that represents the buildslave as a
+ whole
+ @type remote: L{twisted.spread.pb.RemoteReference}
+ @param remote: a reference to the L{buildbot.slave.bot.SlaveBuilder}
+ @type commands: dict: string -> string, or None
+ @param commands: provides the slave's version of each RemoteCommand
+ """
+ self.state = ATTACHING
+ self.remote = remote
+ self.remoteCommands = commands # maps command name to version
+ if self.slave is None:
+ self.slave = slave
+ self.slave.addSlaveBuilder(self)
+ else:
+ assert self.slave == slave
+ log.msg("Buildslave %s attached to %s" % (slave.slavename,
+ self.builder_name))
+ d = self.remote.callRemote("setMaster", self)
+ d.addErrback(self._attachFailure, "Builder.setMaster")
+ d.addCallback(self._attached2)
+ return d
+
+ def _attached2(self, res):
+ d = self.remote.callRemote("print", "attached")
+ d.addErrback(self._attachFailure, "Builder.print 'attached'")
+ d.addCallback(self._attached3)
+ return d
+
+ def _attached3(self, res):
+ # now we say they're really attached
+ self.state = IDLE
+ return self
+
+ def _attachFailure(self, why, where):
+ assert isinstance(where, str)
+ log.msg(where)
+ log.err(why)
+ return why
+
+ def ping(self, timeout, status=None):
+ """Ping the slave to make sure it is still there. Returns a Deferred
+ that fires with True if it is.
+
+ @param status: if you point this at a BuilderStatus, a 'pinging'
+ event will be pushed.
+ """
+ oldstate = self.state
+ self.state = PINGING
+ newping = not self.ping_watchers
+ d = defer.Deferred()
+ self.ping_watchers.append(d)
+ if newping:
+ if status:
+ event = status.addEvent(["pinging"])
+ d2 = defer.Deferred()
+ d2.addCallback(self._pong_status, event)
+ self.ping_watchers.insert(0, d2)
+ # I think it will make the tests run smoother if the status
+ # is updated before the ping completes
+ Ping().ping(self.remote, timeout).addCallback(self._pong)
+
+ def reset_state(res):
+ if self.state == PINGING:
+ self.state = oldstate
+ return res
+ d.addCallback(reset_state)
+ return d
+
+ def _pong(self, res):
+ watchers, self.ping_watchers = self.ping_watchers, []
+ for d in watchers:
+ d.callback(res)
+
+ def _pong_status(self, res, event):
+ if res:
+ event.text = ["ping", "success"]
+ else:
+ event.text = ["ping", "failed"]
+ event.finish()
+
+ def detached(self):
+ log.msg("Buildslave %s detached from %s" % (self.slave.slavename,
+ self.builder_name))
+ if self.slave:
+ self.slave.removeSlaveBuilder(self)
+ self.slave = None
+ self.remote = None
+ self.remoteCommands = None
+
+
+class Ping:
+ running = False
+ timer = None
+
+ def ping(self, remote, timeout):
+ assert not self.running
+ self.running = True
+ log.msg("sending ping")
+ self.d = defer.Deferred()
+ # TODO: add a distinct 'ping' command on the slave.. using 'print'
+ # for this purpose is kind of silly.
+ remote.callRemote("print", "ping").addCallbacks(self._pong,
+ self._ping_failed,
+ errbackArgs=(remote,))
+
+ # We use either our own timeout or the (long) TCP timeout to detect
+ # silently-missing slaves. This might happen because of a NAT
+ # timeout or a routing loop. If the slave just shuts down (and we
+ # somehow missed the FIN), we should get a "connection refused"
+ # message.
+ self.timer = reactor.callLater(timeout, self._ping_timeout, remote)
+ return self.d
+
+ def _ping_timeout(self, remote):
+ log.msg("ping timeout")
+ # force the BuildSlave to disconnect, since this indicates that
+ # the bot is unreachable.
+ del self.timer
+ remote.broker.transport.loseConnection()
+ # the forcibly-lost connection will now cause the ping to fail
+
+ def _stopTimer(self):
+ if not self.running:
+ return
+ self.running = False
+
+ if self.timer:
+ self.timer.cancel()
+ del self.timer
+
+ def _pong(self, res):
+ log.msg("ping finished: success")
+ self._stopTimer()
+ self.d.callback(True)
+
+ def _ping_failed(self, res, remote):
+ log.msg("ping finished: failure")
+ self._stopTimer()
+ # the slave has some sort of internal error, disconnect them. If we
+ # don't, we'll requeue a build and ping them again right away,
+ # creating a nasty loop.
+ remote.broker.transport.loseConnection()
+ # TODO: except, if they actually did manage to get this far, they'll
+ # probably reconnect right away, and we'll do this game again. Maybe
+ # it would be better to leave them in the PINGING state.
+ self.d.callback(False)
+
+
+class SlaveBuilder(AbstractSlaveBuilder):
+
+ def __init__(self):
+ AbstractSlaveBuilder.__init__(self)
+ self.state = ATTACHING
+
+ def detached(self):
+ AbstractSlaveBuilder.detached(self)
+ if self.slave:
+ self.slave.removeSlaveBuilder(self)
+ self.slave = None
+ self.state = ATTACHING
+
+ def buildFinished(self):
+ # Call the slave's buildFinished if we can; the slave may be waiting
+ # to do a graceful shutdown and needs to know when it's idle.
+ # After, we check to see if we can start other builds.
+ self.state = IDLE
+ if self.slave:
+ d = self.slave.buildFinished(self)
+ d.addCallback(lambda x: reactor.callLater(0, self.builder.botmaster.maybeStartAllBuilds))
+ else:
+ reactor.callLater(0, self.builder.botmaster.maybeStartAllBuilds)
+
+
+class LatentSlaveBuilder(AbstractSlaveBuilder):
+ def __init__(self, slave, builder):
+ AbstractSlaveBuilder.__init__(self)
+ self.slave = slave
+ self.state = LATENT
+ self.setBuilder(builder)
+ self.slave.addSlaveBuilder(self)
+ log.msg("Latent buildslave %s attached to %s" % (slave.slavename,
+ self.builder_name))
+
+ def substantiate(self, build):
+ d = self.slave.substantiate(self)
+ if not self.slave.substantiated:
+ event = self.builder.builder_status.addEvent(
+ ["substantiating"])
+ def substantiated(res):
+ msg = ["substantiate", "success"]
+ if isinstance(res, basestring):
+ msg.append(res)
+ elif isinstance(res, (tuple, list)):
+ msg.extend(res)
+ event.text = msg
+ event.finish()
+ return res
+ def substantiation_failed(res):
+ event.text = ["substantiate", "failed"]
+ # TODO add log of traceback to event
+ event.finish()
+ return res
+ d.addCallbacks(substantiated, substantiation_failed)
+ return d
+
+ def detached(self):
+ AbstractSlaveBuilder.detached(self)
+ self.state = LATENT
+
+ def buildStarted(self):
+ AbstractSlaveBuilder.buildStarted(self)
+ self.slave.buildStarted(self)
+
+ def buildFinished(self):
+ AbstractSlaveBuilder.buildFinished(self)
+ self.slave.buildFinished(self)
+
+ def _attachFailure(self, why, where):
+ self.state = LATENT
+ return AbstractSlaveBuilder._attachFailure(self, why, where)
+
+ def ping(self, timeout, status=None):
+ if not self.slave.substantiated:
+ if status:
+ status.addEvent(["ping", "latent"]).finish()
+ return defer.succeed(True)
+ return AbstractSlaveBuilder.ping(self, timeout, status)
+
+
+class Builder(pb.Referenceable):
+ """I manage all Builds of a given type.
+
+ Each Builder is created by an entry in the config file (the c['builders']
+ list), with a number of parameters.
+
+ One of these parameters is the L{buildbot.process.factory.BuildFactory}
+ object that is associated with this Builder. The factory is responsible
+ for creating new L{Build<buildbot.process.base.Build>} objects. Each
+ Build object defines when and how the build is performed, so a new
+ Factory or Builder should be defined to control this behavior.
+
+ The Builder holds on to a number of L{base.BuildRequest} objects in a
+ list named C{.buildable}. Incoming BuildRequest objects will be added to
+ this list, or (if possible) merged into an existing request. When a slave
+ becomes available, I will use my C{BuildFactory} to turn the request into
+ a new C{Build} object. The C{BuildRequest} is forgotten, the C{Build}
+ goes into C{.building} while it runs. Once the build finishes, I will
+ discard it.
+
+ I maintain a list of available SlaveBuilders, one for each connected
+ slave that the C{slavenames} parameter says we can use. Some of these
+ will be idle, some of them will be busy running builds for me. If there
+ are multiple slaves, I can run multiple builds at once.
+
+ I also manage forced builds, progress expectation (ETA) management, and
+ some status delivery chores.
+
+ I am persisted in C{BASEDIR/BUILDERNAME/builder}, so I can remember how
+ long a build usually takes to run (in my C{expectations} attribute). This
+ pickle also includes the L{buildbot.status.builder.BuilderStatus} object,
+ which remembers the set of historic builds.
+
+ @type buildable: list of L{buildbot.process.base.BuildRequest}
+ @ivar buildable: BuildRequests that are ready to build, but which are
+ waiting for a buildslave to be available.
+
+ @type building: list of L{buildbot.process.base.Build}
+ @ivar building: Builds that are actively running
+
+ @type slaves: list of L{buildbot.buildslave.BuildSlave} objects
+ @ivar slaves: the slaves currently available for building
+ """
+
+ expectations = None # this is created the first time we get a good build
+ START_BUILD_TIMEOUT = 10
+ CHOOSE_SLAVES_RANDOMLY = True # disabled for determinism during tests
+
+ def __init__(self, setup, builder_status):
+ """
+ @type setup: dict
+ @param setup: builder setup data, as stored in
+ BuildmasterConfig['builders']. Contains name,
+ slavename(s), builddir, factory, locks.
+ @type builder_status: L{buildbot.status.builder.BuilderStatus}
+ """
+ self.name = setup['name']
+ self.slavenames = []
+ if setup.has_key('slavename'):
+ self.slavenames.append(setup['slavename'])
+ if setup.has_key('slavenames'):
+ self.slavenames.extend(setup['slavenames'])
+ self.builddir = setup['builddir']
+ self.buildFactory = setup['factory']
+ self.locks = setup.get("locks", [])
+ self.env = setup.get('env', {})
+ assert isinstance(self.env, dict)
+ if setup.has_key('periodicBuildTime'):
+ raise ValueError("periodicBuildTime can no longer be defined as"
+ " part of the Builder: use scheduler.Periodic"
+ " instead")
+
+ # build/wannabuild slots: Build objects move along this sequence
+ self.buildable = []
+ self.building = []
+ # old_building holds active builds that were stolen from a predecessor
+ self.old_building = weakref.WeakKeyDictionary()
+
+ # buildslaves which have connected but which are not yet available.
+ # These are always in the ATTACHING state.
+ self.attaching_slaves = []
+
+ # buildslaves at our disposal. Each SlaveBuilder instance has a
+ # .state that is IDLE, PINGING, or BUILDING. "PINGING" is used when a
+ # Build is about to start, to make sure that they're still alive.
+ self.slaves = []
+
+ self.builder_status = builder_status
+ self.builder_status.setSlavenames(self.slavenames)
+
+ # for testing, to help synchronize tests
+ self.watchers = {'attach': [], 'detach': [], 'detach_all': [],
+ 'idle': []}
+
+ def setBotmaster(self, botmaster):
+ self.botmaster = botmaster
+
+ def compareToSetup(self, setup):
+ diffs = []
+ setup_slavenames = []
+ if setup.has_key('slavename'):
+ setup_slavenames.append(setup['slavename'])
+ setup_slavenames.extend(setup.get('slavenames', []))
+ if setup_slavenames != self.slavenames:
+ diffs.append('slavenames changed from %s to %s' \
+ % (self.slavenames, setup_slavenames))
+ if setup['builddir'] != self.builddir:
+ diffs.append('builddir changed from %s to %s' \
+ % (self.builddir, setup['builddir']))
+ if setup['factory'] != self.buildFactory: # compare objects
+ diffs.append('factory changed')
+ oldlocks = [(lock.__class__, lock.name)
+ for lock in self.locks]
+ newlocks = [(lock.__class__, lock.name)
+ for lock in setup.get('locks',[])]
+ if oldlocks != newlocks:
+ diffs.append('locks changed from %s to %s' % (oldlocks, newlocks))
+ return diffs
+
+ def __repr__(self):
+ return "<Builder '%s' at %d>" % (self.name, id(self))
+
+ def getOldestRequestTime(self):
+ """Returns the timestamp of the oldest build request for this builder.
+
+ If there are no build requests, None is returned."""
+ if self.buildable:
+ return self.buildable[0].getSubmitTime()
+ else:
+ return None
+
+ def submitBuildRequest(self, req):
+ req.setSubmitTime(now())
+ self.buildable.append(req)
+ req.requestSubmitted(self)
+ self.builder_status.addBuildRequest(req.status)
+ self.maybeStartBuild()
+
+ def cancelBuildRequest(self, req):
+ if req in self.buildable:
+ self.buildable.remove(req)
+ self.builder_status.removeBuildRequest(req.status)
+ return True
+ return False
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ # TODO: note that d['buildable'] can contain Deferreds
+ del d['building'] # TODO: move these back to .buildable?
+ del d['slaves']
+ return d
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.building = []
+ self.slaves = []
+
+ def consumeTheSoulOfYourPredecessor(self, old):
+ """Suck the brain out of an old Builder.
+
+ This takes all the runtime state from an existing Builder and moves
+ it into ourselves. This is used when a Builder is changed in the
+ master.cfg file: the new Builder has a different factory, but we want
+ all the builds that were queued for the old one to get processed by
+ the new one. Any builds which are already running will keep running.
+ The new Builder will get as many of the old SlaveBuilder objects as
+ it wants."""
+
+ log.msg("consumeTheSoulOfYourPredecessor: %s feeding upon %s" %
+ (self, old))
+ # we claim all the pending builds, removing them from the old
+ # Builder's queue. This insures that the old Builder will not start
+ # any new work.
+ log.msg(" stealing %s buildrequests" % len(old.buildable))
+ self.buildable.extend(old.buildable)
+ old.buildable = []
+
+ # old.building (i.e. builds which are still running) is not migrated
+ # directly: it keeps track of builds which were in progress in the
+ # old Builder. When those builds finish, the old Builder will be
+ # notified, not us. However, since the old SlaveBuilder will point to
+ # us, it is our maybeStartBuild() that will be triggered.
+ if old.building:
+ self.builder_status.setBigState("building")
+ # however, we do grab a weakref to the active builds, so that our
+ # BuilderControl can see them and stop them. We use a weakref because
+ # we aren't the one to get notified, so there isn't a convenient
+ # place to remove it from self.building .
+ for b in old.building:
+ self.old_building[b] = None
+ for b in old.old_building:
+ self.old_building[b] = None
+
+ # Our set of slavenames may be different. Steal any of the old
+ # buildslaves that we want to keep using.
+ for sb in old.slaves[:]:
+ if sb.slave.slavename in self.slavenames:
+ log.msg(" stealing buildslave %s" % sb)
+ self.slaves.append(sb)
+ old.slaves.remove(sb)
+ sb.setBuilder(self)
+
+ # old.attaching_slaves:
+ # these SlaveBuilders are waiting on a sequence of calls:
+ # remote.setMaster and remote.print . When these two complete,
+ # old._attached will be fired, which will add a 'connect' event to
+ # the builder_status and try to start a build. However, we've pulled
+ # everything out of the old builder's queue, so it will have no work
+ # to do. The outstanding remote.setMaster/print call will be holding
+ # the last reference to the old builder, so it will disappear just
+ # after that response comes back.
+ #
+ # The BotMaster will ask the slave to re-set their list of Builders
+ # shortly after this function returns, which will cause our
+ # attached() method to be fired with a bunch of references to remote
+ # SlaveBuilders, some of which we already have (by stealing them
+ # from the old Builder), some of which will be new. The new ones
+ # will be re-attached.
+
+ # Therefore, we don't need to do anything about old.attaching_slaves
+
+ return # all done
+
+ def getBuild(self, number):
+ for b in self.building:
+ if b.build_status.number == number:
+ return b
+ for b in self.old_building.keys():
+ if b.build_status.number == number:
+ return b
+ return None
+
+ def fireTestEvent(self, name, fire_with=None):
+ if fire_with is None:
+ fire_with = self
+ watchers = self.watchers[name]
+ self.watchers[name] = []
+ for w in watchers:
+ reactor.callLater(0, w.callback, fire_with)
+
+ def addLatentSlave(self, slave):
+ assert interfaces.ILatentBuildSlave.providedBy(slave)
+ for s in self.slaves:
+ if s == slave:
+ break
+ else:
+ sb = LatentSlaveBuilder(slave, self)
+ self.builder_status.addPointEvent(
+ ['added', 'latent', slave.slavename])
+ self.slaves.append(sb)
+ reactor.callLater(0, self.maybeStartBuild)
+
+ def attached(self, slave, remote, commands):
+ """This is invoked by the BuildSlave when the self.slavename bot
+ registers their builder.
+
+ @type slave: L{buildbot.buildslave.BuildSlave}
+ @param slave: the BuildSlave that represents the buildslave as a whole
+ @type remote: L{twisted.spread.pb.RemoteReference}
+ @param remote: a reference to the L{buildbot.slave.bot.SlaveBuilder}
+ @type commands: dict: string -> string, or None
+ @param commands: provides the slave's version of each RemoteCommand
+
+ @rtype: L{twisted.internet.defer.Deferred}
+ @return: a Deferred that fires (with 'self') when the slave-side
+ builder is fully attached and ready to accept commands.
+ """
+ for s in self.attaching_slaves + self.slaves:
+ if s.slave == slave:
+ # already attached to them. This is fairly common, since
+ # attached() gets called each time we receive the builder
+ # list from the slave, and we ask for it each time we add or
+ # remove a builder. So if the slave is hosting builders
+ # A,B,C, and the config file changes A, we'll remove A and
+ # re-add it, triggering two builder-list requests, getting
+ # two redundant calls to attached() for B, and another two
+ # for C.
+ #
+ # Therefore, when we see that we're already attached, we can
+ # just ignore it. TODO: build a diagram of the state
+ # transitions here, I'm concerned about sb.attached() failing
+ # and leaving sb.state stuck at 'ATTACHING', and about
+ # the detached() message arriving while there's some
+ # transition pending such that the response to the transition
+ # re-vivifies sb
+ return defer.succeed(self)
+
+ sb = SlaveBuilder()
+ sb.setBuilder(self)
+ self.attaching_slaves.append(sb)
+ d = sb.attached(slave, remote, commands)
+ d.addCallback(self._attached)
+ d.addErrback(self._not_attached, slave)
+ return d
+
+ def _attached(self, sb):
+ # TODO: make this .addSlaveEvent(slave.slavename, ['connect']) ?
+ self.builder_status.addPointEvent(['connect', sb.slave.slavename])
+ self.attaching_slaves.remove(sb)
+ self.slaves.append(sb)
+ reactor.callLater(0, self.maybeStartBuild)
+
+ self.fireTestEvent('attach')
+ return self
+
+ def _not_attached(self, why, slave):
+ # already log.err'ed by SlaveBuilder._attachFailure
+ # TODO: make this .addSlaveEvent?
+ # TODO: remove from self.slaves (except that detached() should get
+ # run first, right?)
+ self.builder_status.addPointEvent(['failed', 'connect',
+ slave.slave.slavename])
+ # TODO: add an HTMLLogFile of the exception
+ self.fireTestEvent('attach', why)
+
+ def detached(self, slave):
+ """This is called when the connection to the bot is lost."""
+ log.msg("%s.detached" % self, slave.slavename)
+ for sb in self.attaching_slaves + self.slaves:
+ if sb.slave == slave:
+ break
+ else:
+ log.msg("WEIRD: Builder.detached(%s) (%s)"
+ " not in attaching_slaves(%s)"
+ " or slaves(%s)" % (slave, slave.slavename,
+ self.attaching_slaves,
+ self.slaves))
+ return
+ if sb.state == BUILDING:
+ # the Build's .lostRemote method (invoked by a notifyOnDisconnect
+ # handler) will cause the Build to be stopped, probably right
+ # after the notifyOnDisconnect that invoked us finishes running.
+
+ # TODO: should failover to a new Build
+ #self.retryBuild(sb.build)
+ pass
+
+ if sb in self.attaching_slaves:
+ self.attaching_slaves.remove(sb)
+ if sb in self.slaves:
+ self.slaves.remove(sb)
+
+ # TODO: make this .addSlaveEvent?
+ self.builder_status.addPointEvent(['disconnect', slave.slavename])
+ sb.detached() # inform the SlaveBuilder that their slave went away
+ self.updateBigStatus()
+ self.fireTestEvent('detach')
+ if not self.slaves:
+ self.fireTestEvent('detach_all')
+
+ def updateBigStatus(self):
+ if not self.slaves:
+ self.builder_status.setBigState("offline")
+ elif self.building:
+ self.builder_status.setBigState("building")
+ else:
+ self.builder_status.setBigState("idle")
+ self.fireTestEvent('idle')
+
+ def maybeStartBuild(self):
+ log.msg("maybeStartBuild %s: %s %s" %
+ (self, self.buildable, self.slaves))
+ if not self.buildable:
+ self.updateBigStatus()
+ return # nothing to do
+
+ # pick an idle slave
+ available_slaves = [sb for sb in self.slaves if sb.isAvailable()]
+ if not available_slaves:
+ log.msg("%s: want to start build, but we don't have a remote"
+ % self)
+ self.updateBigStatus()
+ return
+ if self.CHOOSE_SLAVES_RANDOMLY:
+ # TODO prefer idle over latent? maybe other sorting preferences?
+ sb = random.choice(available_slaves)
+ else:
+ sb = available_slaves[0]
+
+ # there is something to build, and there is a slave on which to build
+ # it. Grab the oldest request, see if we can merge it with anything
+ # else.
+ req = self.buildable.pop(0)
+ self.builder_status.removeBuildRequest(req.status)
+ mergers = []
+ botmaster = self.botmaster
+ for br in self.buildable[:]:
+ if botmaster.shouldMergeRequests(self, req, br):
+ self.buildable.remove(br)
+ self.builder_status.removeBuildRequest(br.status)
+ mergers.append(br)
+ requests = [req] + mergers
+
+ # Create a new build from our build factory and set ourself as the
+ # builder.
+ build = self.buildFactory.newBuild(requests)
+ build.setBuilder(self)
+ build.setLocks(self.locks)
+ if len(self.env) > 0:
+ build.setSlaveEnvironment(self.env)
+
+ # start it
+ self.startBuild(build, sb)
+
+ def startBuild(self, build, sb):
+ """Start a build on the given slave.
+ @param build: the L{base.Build} to start
+ @param sb: the L{SlaveBuilder} which will host this build
+
+ @return: a Deferred which fires with a
+ L{buildbot.interfaces.IBuildControl} that can be used to stop the
+ Build, or to access a L{buildbot.interfaces.IBuildStatus} which will
+ watch the Build as it runs. """
+
+ self.building.append(build)
+ self.updateBigStatus()
+ if isinstance(sb, LatentSlaveBuilder):
+ log.msg("starting build %s.. substantiating the slave %s" %
+ (build, sb))
+ d = sb.substantiate(build)
+ def substantiated(res):
+ return sb.ping(self.START_BUILD_TIMEOUT)
+ def substantiation_failed(res):
+ self.builder_status.addPointEvent(
+ ['removing', 'latent', sb.slave.slavename])
+ sb.slave.disconnect()
+ # TODO: should failover to a new Build
+ #self.retryBuild(sb.build)
+ d.addCallbacks(substantiated, substantiation_failed)
+ else:
+ log.msg("starting build %s.. pinging the slave %s" % (build, sb))
+ d = sb.ping(self.START_BUILD_TIMEOUT)
+ # ping the slave to make sure they're still there. If they're fallen
+ # off the map (due to a NAT timeout or something), this will fail in
+ # a couple of minutes, depending upon the TCP timeout. TODO: consider
+ # making this time out faster, or at least characterize the likely
+ # duration.
+ d.addCallback(self._startBuild_1, build, sb)
+ return d
+
+ def _startBuild_1(self, res, build, sb):
+ if not res:
+ return self._startBuildFailed("slave ping failed", build, sb)
+ # The buildslave is ready to go. sb.buildStarted() sets its state to
+ # BUILDING (so we won't try to use it for any other builds). This
+ # gets set back to IDLE by the Build itself when it finishes.
+ sb.buildStarted()
+ d = sb.remote.callRemote("startBuild")
+ d.addCallbacks(self._startBuild_2, self._startBuildFailed,
+ callbackArgs=(build,sb), errbackArgs=(build,sb))
+ return d
+
+ def _startBuild_2(self, res, build, sb):
+ # create the BuildStatus object that goes with the Build
+ bs = self.builder_status.newBuild()
+
+ # start the build. This will first set up the steps, then tell the
+ # BuildStatus that it has started, which will announce it to the
+ # world (through our BuilderStatus object, which is its parent).
+ # Finally it will start the actual build process.
+ d = build.startBuild(bs, self.expectations, sb)
+ d.addCallback(self.buildFinished, sb)
+ d.addErrback(log.err) # this shouldn't happen. if it does, the slave
+ # will be wedged
+ for req in build.requests:
+ req.buildStarted(build, bs)
+ return build # this is the IBuildControl
+
+ def _startBuildFailed(self, why, build, sb):
+ # put the build back on the buildable list
+ log.msg("I tried to tell the slave that the build %s started, but "
+ "remote_startBuild failed: %s" % (build, why))
+ # release the slave. This will queue a call to maybeStartBuild, which
+ # will fire after other notifyOnDisconnect handlers have marked the
+ # slave as disconnected (so we don't try to use it again).
+ sb.buildFinished()
+
+ log.msg("re-queueing the BuildRequest")
+ self.building.remove(build)
+ for req in build.requests:
+ self.buildable.insert(0, req) # the interrupted build gets first
+ # priority
+ self.builder_status.addBuildRequest(req.status)
+
+
+ def buildFinished(self, build, sb):
+ """This is called when the Build has finished (either success or
+ failure). Any exceptions during the build are reported with
+ results=FAILURE, not with an errback."""
+
+ # by the time we get here, the Build has already released the slave
+ # (which queues a call to maybeStartBuild)
+
+ self.building.remove(build)
+ for req in build.requests:
+ req.finished(build.build_status)
+
+ def setExpectations(self, progress):
+ """Mark the build as successful and update expectations for the next
+ build. Only call this when the build did not fail in any way that
+ would invalidate the time expectations generated by it. (if the
+ compile failed and thus terminated early, we can't use the last
+ build to predict how long the next one will take).
+ """
+ if self.expectations:
+ self.expectations.update(progress)
+ else:
+ # the first time we get a good build, create our Expectations
+ # based upon its results
+ self.expectations = Expectations(progress)
+ log.msg("new expectations: %s seconds" % \
+ self.expectations.expectedBuildTime())
+
+ def shutdownSlave(self):
+ if self.remote:
+ self.remote.callRemote("shutdown")
+
+
+class BuilderControl(components.Adapter):
+ implements(interfaces.IBuilderControl)
+
+ def requestBuild(self, req):
+ """Submit a BuildRequest to this Builder."""
+ self.original.submitBuildRequest(req)
+
+ def requestBuildSoon(self, req):
+ """Submit a BuildRequest like requestBuild, but raise a
+ L{buildbot.interfaces.NoSlaveError} if no slaves are currently
+ available, so it cannot be used to queue a BuildRequest in the hopes
+ that a slave will eventually connect. This method is appropriate for
+ use by things like the web-page 'Force Build' button."""
+ if not self.original.slaves:
+ raise interfaces.NoSlaveError
+ self.requestBuild(req)
+
+ def resubmitBuild(self, bs, reason="<rebuild, no reason given>"):
+ if not bs.isFinished():
+ return
+
+ ss = bs.getSourceStamp(absolute=True)
+ req = base.BuildRequest(reason, ss, self.original.name)
+ self.requestBuild(req)
+
+ def getPendingBuilds(self):
+ # return IBuildRequestControl objects
+ raise NotImplementedError
+
+ def getBuild(self, number):
+ return self.original.getBuild(number)
+
+ def ping(self, timeout=30):
+ if not self.original.slaves:
+ self.original.builder_status.addPointEvent(["ping", "no slave"])
+ return defer.succeed(False) # interfaces.NoSlaveError
+ dl = []
+ for s in self.original.slaves:
+ dl.append(s.ping(timeout, self.original.builder_status))
+ d = defer.DeferredList(dl)
+ d.addCallback(self._gatherPingResults)
+ return d
+
+ def _gatherPingResults(self, res):
+ for ignored,success in res:
+ if not success:
+ return False
+ return True
+
+components.registerAdapter(BuilderControl, Builder, interfaces.IBuilderControl)
diff --git a/buildbot/buildbot/process/buildstep.py b/buildbot/buildbot/process/buildstep.py
new file mode 100644
index 0000000..2cfc157
--- /dev/null
+++ b/buildbot/buildbot/process/buildstep.py
@@ -0,0 +1,1097 @@
+# -*- test-case-name: buildbot.test.test_steps -*-
+
+from zope.interface import implements
+from twisted.internet import reactor, defer, error
+from twisted.protocols import basic
+from twisted.spread import pb
+from twisted.python import log
+from twisted.python.failure import Failure
+from twisted.web.util import formatFailure
+
+from buildbot import interfaces, locks
+from buildbot.status import progress
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, SKIPPED, \
+ EXCEPTION
+
+"""
+BuildStep and RemoteCommand classes for master-side representation of the
+build process
+"""
+
+class RemoteCommand(pb.Referenceable):
+ """
+ I represent a single command to be run on the slave. I handle the details
+ of reliably gathering status updates from the slave (acknowledging each),
+ and (eventually, in a future release) recovering from interrupted builds.
+ This is the master-side object that is known to the slave-side
+ L{buildbot.slave.bot.SlaveBuilder}, to which status updates are sent.
+
+ My command should be started by calling .run(), which returns a
+ Deferred that will fire when the command has finished, or will
+ errback if an exception is raised.
+
+ Typically __init__ or run() will set up self.remote_command to be a
+ string which corresponds to one of the SlaveCommands registered in
+ the buildslave, and self.args to a dictionary of arguments that will
+ be passed to the SlaveCommand instance.
+
+ start, remoteUpdate, and remoteComplete are available to be overridden
+
+ @type commandCounter: list of one int
+ @cvar commandCounter: provides a unique value for each
+ RemoteCommand executed across all slaves
+ @type active: boolean
+ @ivar active: whether the command is currently running
+ """
+ commandCounter = [0] # we use a list as a poor man's singleton
+ active = False
+
+ def __init__(self, remote_command, args):
+ """
+ @type remote_command: string
+ @param remote_command: remote command to start. This will be
+ passed to
+ L{buildbot.slave.bot.SlaveBuilder.remote_startCommand}
+ and needs to have been registered
+ slave-side by
+ L{buildbot.slave.registry.registerSlaveCommand}
+ @type args: dict
+ @param args: arguments to send to the remote command
+ """
+
+ self.remote_command = remote_command
+ self.args = args
+
+ def __getstate__(self):
+ dict = self.__dict__.copy()
+ # Remove the remote ref: if necessary (only for resumed builds), it
+ # will be reattached at resume time
+ if dict.has_key("remote"):
+ del dict["remote"]
+ return dict
+
+ def run(self, step, remote):
+ self.active = True
+ self.step = step
+ self.remote = remote
+ c = self.commandCounter[0]
+ self.commandCounter[0] += 1
+ #self.commandID = "%d %d" % (c, random.randint(0, 1000000))
+ self.commandID = "%d" % c
+ log.msg("%s: RemoteCommand.run [%s]" % (self, self.commandID))
+ self.deferred = defer.Deferred()
+
+ d = defer.maybeDeferred(self.start)
+
+ # _finished is called with an error for unknown commands, errors
+ # that occur while the command is starting (including OSErrors in
+ # exec()), StaleBroker (when the connection was lost before we
+ # started), and pb.PBConnectionLost (when the slave isn't responding
+ # over this connection, perhaps it had a power failure, or NAT
+ # weirdness). If this happens, self.deferred is fired right away.
+ d.addErrback(self._finished)
+
+ # Connections which are lost while the command is running are caught
+ # when our parent Step calls our .lostRemote() method.
+ return self.deferred
+
+ def start(self):
+ """
+ Tell the slave to start executing the remote command.
+
+ @rtype: L{twisted.internet.defer.Deferred}
+ @returns: a deferred that will fire when the remote command is
+ done (with None as the result)
+ """
+ # This method only initiates the remote command.
+ # We will receive remote_update messages as the command runs.
+ # We will get a single remote_complete when it finishes.
+ # We should fire self.deferred when the command is done.
+ d = self.remote.callRemote("startCommand", self, self.commandID,
+ self.remote_command, self.args)
+ return d
+
+ def interrupt(self, why):
+ # TODO: consider separating this into interrupt() and stop(), where
+ # stop() unconditionally calls _finished, but interrupt() merely
+ # asks politely for the command to stop soon.
+
+ log.msg("RemoteCommand.interrupt", self, why)
+ if not self.active:
+ log.msg(" but this RemoteCommand is already inactive")
+ return
+ if not self.remote:
+ log.msg(" but our .remote went away")
+ return
+ if isinstance(why, Failure) and why.check(error.ConnectionLost):
+ log.msg("RemoteCommand.disconnect: lost slave")
+ self.remote = None
+ self._finished(why)
+ return
+
+ # tell the remote command to halt. Returns a Deferred that will fire
+ # when the interrupt command has been delivered.
+
+ d = defer.maybeDeferred(self.remote.callRemote, "interruptCommand",
+ self.commandID, str(why))
+ # the slave may not have remote_interruptCommand
+ d.addErrback(self._interruptFailed)
+ return d
+
+ def _interruptFailed(self, why):
+ log.msg("RemoteCommand._interruptFailed", self)
+ # TODO: forcibly stop the Command now, since we can't stop it
+ # cleanly
+ return None
+
+ def remote_update(self, updates):
+ """
+ I am called by the slave's L{buildbot.slave.bot.SlaveBuilder} so
+ I can receive updates from the running remote command.
+
+ @type updates: list of [object, int]
+ @param updates: list of updates from the remote command
+ """
+ self.buildslave.messageReceivedFromSlave()
+ max_updatenum = 0
+ for (update, num) in updates:
+ #log.msg("update[%d]:" % num)
+ try:
+ if self.active: # ignore late updates
+ self.remoteUpdate(update)
+ except:
+ # log failure, terminate build, let slave retire the update
+ self._finished(Failure())
+ # TODO: what if multiple updates arrive? should
+ # skip the rest but ack them all
+ if num > max_updatenum:
+ max_updatenum = num
+ return max_updatenum
+
+ def remoteUpdate(self, update):
+ raise NotImplementedError("You must implement this in a subclass")
+
+ def remote_complete(self, failure=None):
+ """
+ Called by the slave's L{buildbot.slave.bot.SlaveBuilder} to
+ notify me the remote command has finished.
+
+ @type failure: L{twisted.python.failure.Failure} or None
+
+ @rtype: None
+ """
+ self.buildslave.messageReceivedFromSlave()
+ # call the real remoteComplete a moment later, but first return an
+ # acknowledgement so the slave can retire the completion message.
+ if self.active:
+ reactor.callLater(0, self._finished, failure)
+ return None
+
+ def _finished(self, failure=None):
+ self.active = False
+ # call .remoteComplete. If it raises an exception, or returns the
+ # Failure that we gave it, our self.deferred will be errbacked. If
+ # it does not (either it ate the Failure or there the step finished
+ # normally and it didn't raise a new exception), self.deferred will
+ # be callbacked.
+ d = defer.maybeDeferred(self.remoteComplete, failure)
+ # arrange for the callback to get this RemoteCommand instance
+ # instead of just None
+ d.addCallback(lambda r: self)
+ # this fires the original deferred we returned from .run(),
+ # with self as the result, or a failure
+ d.addBoth(self.deferred.callback)
+
+ def remoteComplete(self, maybeFailure):
+ """Subclasses can override this.
+
+ This is called when the RemoteCommand has finished. 'maybeFailure'
+ will be None if the command completed normally, or a Failure
+ instance in one of the following situations:
+
+ - the slave was lost before the command was started
+ - the slave didn't respond to the startCommand message
+ - the slave raised an exception while starting the command
+ (bad command name, bad args, OSError from missing executable)
+ - the slave raised an exception while finishing the command
+ (they send back a remote_complete message with a Failure payload)
+
+ and also (for now):
+ - slave disconnected while the command was running
+
+ This method should do cleanup, like closing log files. It should
+ normally return the 'failure' argument, so that any exceptions will
+ be propagated to the Step. If it wants to consume them, return None
+ instead."""
+
+ return maybeFailure
+
+class LoggedRemoteCommand(RemoteCommand):
+ """
+
+ I am a L{RemoteCommand} which gathers output from the remote command into
+ one or more local log files. My C{self.logs} dictionary contains
+ references to these L{buildbot.status.builder.LogFile} instances. Any
+ stdout/stderr/header updates from the slave will be put into
+ C{self.logs['stdio']}, if it exists. If the remote command uses other log
+ files, they will go into other entries in C{self.logs}.
+
+ If you want to use stdout or stderr, you should create a LogFile named
+ 'stdio' and pass it to my useLog() message. Otherwise stdout/stderr will
+ be ignored, which is probably not what you want.
+
+ Unless you tell me otherwise, when my command completes I will close all
+ the LogFiles that I know about.
+
+ @ivar logs: maps logname to a LogFile instance
+ @ivar _closeWhenFinished: maps logname to a boolean. If true, this
+ LogFile will be closed when the RemoteCommand
+ finishes. LogFiles which are shared between
+ multiple RemoteCommands should use False here.
+
+ """
+
+ rc = None
+ debug = False
+
+ def __init__(self, *args, **kwargs):
+ self.logs = {}
+ self._closeWhenFinished = {}
+ RemoteCommand.__init__(self, *args, **kwargs)
+
+ def __repr__(self):
+ return "<RemoteCommand '%s' at %d>" % (self.remote_command, id(self))
+
+ def useLog(self, loog, closeWhenFinished=False, logfileName=None):
+ """Start routing messages from a remote logfile to a local LogFile
+
+ I take a local ILogFile instance in 'loog', and arrange to route
+ remote log messages for the logfile named 'logfileName' into it. By
+ default this logfileName comes from the ILogFile itself (using the
+ name by which the ILogFile will be displayed), but the 'logfileName'
+ argument can be used to override this. For example, if
+ logfileName='stdio', this logfile will collect text from the stdout
+ and stderr of the command.
+
+ @param loog: an instance which implements ILogFile
+ @param closeWhenFinished: a boolean, set to False if the logfile
+ will be shared between multiple
+ RemoteCommands. If True, the logfile will
+ be closed when this ShellCommand is done
+ with it.
+ @param logfileName: a string, which indicates which remote log file
+ should be routed into this ILogFile. This should
+ match one of the keys of the logfiles= argument
+ to ShellCommand.
+
+ """
+
+ assert interfaces.ILogFile.providedBy(loog)
+ if not logfileName:
+ logfileName = loog.getName()
+ assert logfileName not in self.logs
+ self.logs[logfileName] = loog
+ self._closeWhenFinished[logfileName] = closeWhenFinished
+
+ def start(self):
+ log.msg("LoggedRemoteCommand.start")
+ if 'stdio' not in self.logs:
+ log.msg("LoggedRemoteCommand (%s) is running a command, but "
+ "it isn't being logged to anything. This seems unusual."
+ % self)
+ self.updates = {}
+ return RemoteCommand.start(self)
+
+ def addStdout(self, data):
+ if 'stdio' in self.logs:
+ self.logs['stdio'].addStdout(data)
+ def addStderr(self, data):
+ if 'stdio' in self.logs:
+ self.logs['stdio'].addStderr(data)
+ def addHeader(self, data):
+ if 'stdio' in self.logs:
+ self.logs['stdio'].addHeader(data)
+
+ def addToLog(self, logname, data):
+ if logname in self.logs:
+ self.logs[logname].addStdout(data)
+ else:
+ log.msg("%s.addToLog: no such log %s" % (self, logname))
+
+ def remoteUpdate(self, update):
+ if self.debug:
+ for k,v in update.items():
+ log.msg("Update[%s]: %s" % (k,v))
+ if update.has_key('stdout'):
+ # 'stdout': data
+ self.addStdout(update['stdout'])
+ if update.has_key('stderr'):
+ # 'stderr': data
+ self.addStderr(update['stderr'])
+ if update.has_key('header'):
+ # 'header': data
+ self.addHeader(update['header'])
+ if update.has_key('log'):
+ # 'log': (logname, data)
+ logname, data = update['log']
+ self.addToLog(logname, data)
+ if update.has_key('rc'):
+ rc = self.rc = update['rc']
+ log.msg("%s rc=%s" % (self, rc))
+ self.addHeader("program finished with exit code %d\n" % rc)
+
+ for k in update:
+ if k not in ('stdout', 'stderr', 'header', 'rc'):
+ if k not in self.updates:
+ self.updates[k] = []
+ self.updates[k].append(update[k])
+
+ def remoteComplete(self, maybeFailure):
+ for name,loog in self.logs.items():
+ if self._closeWhenFinished[name]:
+ if maybeFailure:
+ loog.addHeader("\nremoteFailed: %s" % maybeFailure)
+ else:
+ log.msg("closing log %s" % loog)
+ loog.finish()
+ return maybeFailure
+
+
+class LogObserver:
+ implements(interfaces.ILogObserver)
+
+ def setStep(self, step):
+ self.step = step
+
+ def setLog(self, loog):
+ assert interfaces.IStatusLog.providedBy(loog)
+ loog.subscribe(self, True)
+
+ def logChunk(self, build, step, log, channel, text):
+ if channel == interfaces.LOG_CHANNEL_STDOUT:
+ self.outReceived(text)
+ elif channel == interfaces.LOG_CHANNEL_STDERR:
+ self.errReceived(text)
+
+ # TODO: add a logEnded method? er, stepFinished?
+
+ def outReceived(self, data):
+ """This will be called with chunks of stdout data. Override this in
+ your observer."""
+ pass
+
+ def errReceived(self, data):
+ """This will be called with chunks of stderr data. Override this in
+ your observer."""
+ pass
+
+
+class LogLineObserver(LogObserver):
+ def __init__(self):
+ self.stdoutParser = basic.LineOnlyReceiver()
+ self.stdoutParser.delimiter = "\n"
+ self.stdoutParser.lineReceived = self.outLineReceived
+ self.stdoutParser.transport = self # for the .disconnecting attribute
+ self.disconnecting = False
+
+ self.stderrParser = basic.LineOnlyReceiver()
+ self.stderrParser.delimiter = "\n"
+ self.stderrParser.lineReceived = self.errLineReceived
+ self.stderrParser.transport = self
+
+ def setMaxLineLength(self, max_length):
+ """
+ Set the maximum line length: lines longer than max_length are
+ dropped. Default is 16384 bytes. Use sys.maxint for effective
+ infinity.
+ """
+ self.stdoutParser.MAX_LENGTH = max_length
+ self.stderrParser.MAX_LENGTH = max_length
+
+ def outReceived(self, data):
+ self.stdoutParser.dataReceived(data)
+
+ def errReceived(self, data):
+ self.stderrParser.dataReceived(data)
+
+ def outLineReceived(self, line):
+ """This will be called with complete stdout lines (not including the
+ delimiter). Override this in your observer."""
+ pass
+
+ def errLineReceived(self, line):
+ """This will be called with complete lines of stderr (not including
+ the delimiter). Override this in your observer."""
+ pass
+
+
+class RemoteShellCommand(LoggedRemoteCommand):
+ """This class helps you run a shell command on the build slave. It will
+ accumulate all the command's output into a Log named 'stdio'. When the
+ command is finished, it will fire a Deferred. You can then check the
+ results of the command and parse the output however you like."""
+
+ def __init__(self, workdir, command, env=None,
+ want_stdout=1, want_stderr=1,
+ timeout=20*60, logfiles={}, usePTY="slave-config"):
+ """
+ @type workdir: string
+ @param workdir: directory where the command ought to run,
+ relative to the Builder's home directory. Defaults to
+ '.': the same as the Builder's homedir. This should
+ probably be '.' for the initial 'cvs checkout'
+ command (which creates a workdir), and the Build-wide
+ workdir for all subsequent commands (including
+ compiles and 'cvs update').
+
+ @type command: list of strings (or string)
+ @param command: the shell command to run, like 'make all' or
+ 'cvs update'. This should be a list or tuple
+ which can be used directly as the argv array.
+ For backwards compatibility, if this is a
+ string, the text will be given to '/bin/sh -c
+ %s'.
+
+ @type env: dict of string->string
+ @param env: environment variables to add or change for the
+ slave. Each command gets a separate
+ environment; all inherit the slave's initial
+ one. TODO: make it possible to delete some or
+ all of the slave's environment.
+
+ @type want_stdout: bool
+ @param want_stdout: defaults to True. Set to False if stdout should
+ be thrown away. Do this to avoid storing or
+ sending large amounts of useless data.
+
+ @type want_stderr: bool
+ @param want_stderr: False if stderr should be thrown away
+
+ @type timeout: int
+ @param timeout: tell the remote that if the command fails to
+ produce any output for this number of seconds,
+ the command is hung and should be killed. Use
+ None to disable the timeout.
+ """
+
+ self.command = command # stash .command, set it later
+ if env is not None:
+ # avoid mutating the original master.cfg dictionary. Each
+ # ShellCommand gets its own copy, any start() methods won't be
+ # able to modify the original.
+ env = env.copy()
+ args = {'workdir': workdir,
+ 'env': env,
+ 'want_stdout': want_stdout,
+ 'want_stderr': want_stderr,
+ 'logfiles': logfiles,
+ 'timeout': timeout,
+ 'usePTY': usePTY,
+ }
+ LoggedRemoteCommand.__init__(self, "shell", args)
+
+ def start(self):
+ self.args['command'] = self.command
+ if self.remote_command == "shell":
+ # non-ShellCommand slavecommands are responsible for doing this
+ # fixup themselves
+ if self.step.slaveVersion("shell", "old") == "old":
+ self.args['dir'] = self.args['workdir']
+ what = "command '%s' in dir '%s'" % (self.args['command'],
+ self.args['workdir'])
+ log.msg(what)
+ return LoggedRemoteCommand.start(self)
+
+ def __repr__(self):
+ return "<RemoteShellCommand '%s'>" % repr(self.command)
+
+class BuildStep:
+ """
+ I represent a single step of the build process. This step may involve
+ zero or more commands to be run in the build slave, as well as arbitrary
+ processing on the master side. Regardless of how many slave commands are
+ run, the BuildStep will result in a single status value.
+
+ The step is started by calling startStep(), which returns a Deferred that
+ fires when the step finishes. See C{startStep} for a description of the
+ results provided by that Deferred.
+
+ __init__ and start are good methods to override. Don't forget to upcall
+ BuildStep.__init__ or bad things will happen.
+
+ To launch a RemoteCommand, pass it to .runCommand and wait on the
+ Deferred it returns.
+
+ Each BuildStep generates status as it runs. This status data is fed to
+ the L{buildbot.status.builder.BuildStepStatus} listener that sits in
+ C{self.step_status}. It can also feed progress data (like how much text
+ is output by a shell command) to the
+ L{buildbot.status.progress.StepProgress} object that lives in
+ C{self.progress}, by calling C{self.setProgress(metric, value)} as it
+ runs.
+
+ @type build: L{buildbot.process.base.Build}
+ @ivar build: the parent Build which is executing this step
+
+ @type progress: L{buildbot.status.progress.StepProgress}
+ @ivar progress: tracks ETA for the step
+
+ @type step_status: L{buildbot.status.builder.BuildStepStatus}
+ @ivar step_status: collects output status
+ """
+
+ # these parameters are used by the parent Build object to decide how to
+ # interpret our results. haltOnFailure will affect the build process
+ # immediately, the others will be taken into consideration when
+ # determining the overall build status.
+ #
+ # steps that are makred as alwaysRun will be run regardless of the outcome
+ # of previous steps (especially steps with haltOnFailure=True)
+ haltOnFailure = False
+ flunkOnWarnings = False
+ flunkOnFailure = False
+ warnOnWarnings = False
+ warnOnFailure = False
+ alwaysRun = False
+
+ # 'parms' holds a list of all the parameters we care about, to allow
+ # users to instantiate a subclass of BuildStep with a mixture of
+ # arguments, some of which are for us, some of which are for the subclass
+ # (or a delegate of the subclass, like how ShellCommand delivers many
+ # arguments to the RemoteShellCommand that it creates). Such delegating
+ # subclasses will use this list to figure out which arguments are meant
+ # for us and which should be given to someone else.
+ parms = ['name', 'locks',
+ 'haltOnFailure',
+ 'flunkOnWarnings',
+ 'flunkOnFailure',
+ 'warnOnWarnings',
+ 'warnOnFailure',
+ 'alwaysRun',
+ 'progressMetrics',
+ ]
+
+ name = "generic"
+ locks = []
+ progressMetrics = () # 'time' is implicit
+ useProgress = True # set to False if step is really unpredictable
+ build = None
+ step_status = None
+ progress = None
+
+ def __init__(self, **kwargs):
+ self.factory = (self.__class__, dict(kwargs))
+ for p in self.__class__.parms:
+ if kwargs.has_key(p):
+ setattr(self, p, kwargs[p])
+ del kwargs[p]
+ if kwargs:
+ why = "%s.__init__ got unexpected keyword argument(s) %s" \
+ % (self, kwargs.keys())
+ raise TypeError(why)
+ self._pendingLogObservers = []
+
+ def setBuild(self, build):
+ # subclasses which wish to base their behavior upon qualities of the
+ # Build (e.g. use the list of changed files to run unit tests only on
+ # code which has been modified) should do so here. The Build is not
+ # available during __init__, but setBuild() will be called just
+ # afterwards.
+ self.build = build
+
+ def setBuildSlave(self, buildslave):
+ self.buildslave = buildslave
+
+ def setDefaultWorkdir(self, workdir):
+ # The Build calls this just after __init__(). ShellCommand
+ # and variants use a slave-side workdir, but some other steps
+ # do not. Subclasses which use a workdir should use the value
+ # set by this method unless they were constructed with
+ # something more specific.
+ pass
+
+ def addFactoryArguments(self, **kwargs):
+ self.factory[1].update(kwargs)
+
+ def getStepFactory(self):
+ return self.factory
+
+ def setStepStatus(self, step_status):
+ self.step_status = step_status
+
+ def setupProgress(self):
+ if self.useProgress:
+ sp = progress.StepProgress(self.name, self.progressMetrics)
+ self.progress = sp
+ self.step_status.setProgress(sp)
+ return sp
+ return None
+
+ def setProgress(self, metric, value):
+ """BuildSteps can call self.setProgress() to announce progress along
+ some metric."""
+ if self.progress:
+ self.progress.setProgress(metric, value)
+
+ def getProperty(self, propname):
+ return self.build.getProperty(propname)
+
+ def setProperty(self, propname, value, source="Step"):
+ self.build.setProperty(propname, value, source)
+
+ def startStep(self, remote):
+ """Begin the step. This returns a Deferred that will fire when the
+ step finishes.
+
+ This deferred fires with a tuple of (result, [extra text]), although
+ older steps used to return just the 'result' value, so the receiving
+ L{base.Build} needs to be prepared to handle that too. C{result} is
+ one of the SUCCESS/WARNINGS/FAILURE/SKIPPED constants from
+ L{buildbot.status.builder}, and the extra text is a list of short
+ strings which should be appended to the Build's text results. This
+ text allows a test-case step which fails to append B{17 tests} to the
+ Build's status, in addition to marking the build as failing.
+
+ The deferred will errback if the step encounters an exception,
+ including an exception on the slave side (or if the slave goes away
+ altogether). Failures in shell commands (rc!=0) will B{not} cause an
+ errback, in general the BuildStep will evaluate the results and
+ decide whether to treat it as a WARNING or FAILURE.
+
+ @type remote: L{twisted.spread.pb.RemoteReference}
+ @param remote: a reference to the slave's
+ L{buildbot.slave.bot.SlaveBuilder} instance where any
+ RemoteCommands may be run
+ """
+
+ self.remote = remote
+ self.deferred = defer.Deferred()
+ # convert all locks into their real form
+ lock_list = []
+ for access in self.locks:
+ if not isinstance(access, locks.LockAccess):
+ # Buildbot 0.7.7 compability: user did not specify access
+ access = access.defaultAccess()
+ lock = self.build.builder.botmaster.getLockByID(access.lockid)
+ lock_list.append((lock, access))
+ self.locks = lock_list
+ # then narrow SlaveLocks down to the slave that this build is being
+ # run on
+ self.locks = [(l.getLock(self.build.slavebuilder), la) for l, la in self.locks]
+ for l, la in self.locks:
+ if l in self.build.locks:
+ log.msg("Hey, lock %s is claimed by both a Step (%s) and the"
+ " parent Build (%s)" % (l, self, self.build))
+ raise RuntimeError("lock claimed by both Step and Build")
+ d = self.acquireLocks()
+ d.addCallback(self._startStep_2)
+ return self.deferred
+
+ def acquireLocks(self, res=None):
+ log.msg("acquireLocks(step %s, locks %s)" % (self, self.locks))
+ if not self.locks:
+ return defer.succeed(None)
+ for lock, access in self.locks:
+ if not lock.isAvailable(access):
+ log.msg("step %s waiting for lock %s" % (self, lock))
+ d = lock.waitUntilMaybeAvailable(self, access)
+ d.addCallback(self.acquireLocks)
+ return d
+ # all locks are available, claim them all
+ for lock, access in self.locks:
+ lock.claim(self, access)
+ return defer.succeed(None)
+
+ def _startStep_2(self, res):
+ if self.progress:
+ self.progress.start()
+ self.step_status.stepStarted()
+ try:
+ skip = self.start()
+ if skip == SKIPPED:
+ # this return value from self.start is a shortcut
+ # to finishing the step immediately
+ reactor.callLater(0, self.finished, SKIPPED)
+ except:
+ log.msg("BuildStep.startStep exception in .start")
+ self.failed(Failure())
+
+ def start(self):
+ """Begin the step. Override this method and add code to do local
+ processing, fire off remote commands, etc.
+
+ To spawn a command in the buildslave, create a RemoteCommand instance
+ and run it with self.runCommand::
+
+ c = RemoteCommandFoo(args)
+ d = self.runCommand(c)
+ d.addCallback(self.fooDone).addErrback(self.failed)
+
+ As the step runs, it should send status information to the
+ BuildStepStatus::
+
+ self.step_status.setText(['compile', 'failed'])
+ self.step_status.setText2(['4', 'warnings'])
+
+ To have some code parse stdio (or other log stream) in realtime, add
+ a LogObserver subclass. This observer can use self.step.setProgress()
+ to provide better progress notification to the step.::
+
+ self.addLogObserver('stdio', MyLogObserver())
+
+ To add a LogFile, use self.addLog. Make sure it gets closed when it
+ finishes. When giving a Logfile to a RemoteShellCommand, just ask it
+ to close the log when the command completes::
+
+ log = self.addLog('output')
+ cmd = RemoteShellCommand(args)
+ cmd.useLog(log, closeWhenFinished=True)
+
+ You can also create complete Logfiles with generated text in a single
+ step::
+
+ self.addCompleteLog('warnings', text)
+
+ When the step is done, it should call self.finished(result). 'result'
+ will be provided to the L{buildbot.process.base.Build}, and should be
+ one of the constants defined above: SUCCESS, WARNINGS, FAILURE, or
+ SKIPPED.
+
+ If the step encounters an exception, it should call self.failed(why).
+ 'why' should be a Failure object. This automatically fails the whole
+ build with an exception. It is a good idea to add self.failed as an
+ errback to any Deferreds you might obtain.
+
+ If the step decides it does not need to be run, start() can return
+ the constant SKIPPED. This fires the callback immediately: it is not
+ necessary to call .finished yourself. This can also indicate to the
+ status-reporting mechanism that this step should not be displayed."""
+
+ raise NotImplementedError("your subclass must implement this method")
+
+ def interrupt(self, reason):
+ """Halt the command, either because the user has decided to cancel
+ the build ('reason' is a string), or because the slave has
+ disconnected ('reason' is a ConnectionLost Failure). Any further
+ local processing should be skipped, and the Step completed with an
+ error status. The results text should say something useful like
+ ['step', 'interrupted'] or ['remote', 'lost']"""
+ pass
+
+ def releaseLocks(self):
+ log.msg("releaseLocks(%s): %s" % (self, self.locks))
+ for lock, access in self.locks:
+ lock.release(self, access)
+
+ def finished(self, results):
+ if self.progress:
+ self.progress.finish()
+ self.step_status.stepFinished(results)
+ self.releaseLocks()
+ self.deferred.callback(results)
+
+ def failed(self, why):
+ # if isinstance(why, pb.CopiedFailure): # a remote exception might
+ # only have short traceback, so formatFailure is not as useful as
+ # you'd like (no .frames, so no traceback is displayed)
+ log.msg("BuildStep.failed, traceback follows")
+ log.err(why)
+ try:
+ if self.progress:
+ self.progress.finish()
+ self.addHTMLLog("err.html", formatFailure(why))
+ self.addCompleteLog("err.text", why.getTraceback())
+ # could use why.getDetailedTraceback() for more information
+ self.step_status.setText([self.name, "exception"])
+ self.step_status.setText2([self.name])
+ self.step_status.stepFinished(EXCEPTION)
+ except:
+ log.msg("exception during failure processing")
+ log.err()
+ # the progress stuff may still be whacked (the StepStatus may
+ # think that it is still running), but the build overall will now
+ # finish
+ try:
+ self.releaseLocks()
+ except:
+ log.msg("exception while releasing locks")
+ log.err()
+
+ log.msg("BuildStep.failed now firing callback")
+ self.deferred.callback(EXCEPTION)
+
+ # utility methods that BuildSteps may find useful
+
+ def slaveVersion(self, command, oldversion=None):
+ """Return the version number of the given slave command. For the
+ commands defined in buildbot.slave.commands, this is the value of
+ 'cvs_ver' at the top of that file. Non-existent commands will return
+ a value of None. Buildslaves running buildbot-0.5.0 or earlier did
+ not respond to the version query: commands on those slaves will
+ return a value of OLDVERSION, so you can distinguish between old
+ buildslaves and missing commands.
+
+ If you know that <=0.5.0 buildslaves have the command you want (CVS
+ and SVN existed back then, but none of the other VC systems), then it
+ makes sense to call this with oldversion='old'. If the command you
+ want is newer than that, just leave oldversion= unspecified, and the
+ command will return None for a buildslave that does not implement the
+ command.
+ """
+ return self.build.getSlaveCommandVersion(command, oldversion)
+
+ def slaveVersionIsOlderThan(self, command, minversion):
+ sv = self.build.getSlaveCommandVersion(command, None)
+ if sv is None:
+ return True
+ # the version we get back is a string form of the CVS version number
+ # of the slave's buildbot/slave/commands.py, something like 1.39 .
+ # This might change in the future (I might move away from CVS), but
+ # if so I'll keep updating that string with suitably-comparable
+ # values.
+ if sv.split(".") < minversion.split("."):
+ return True
+ return False
+
+ def getSlaveName(self):
+ return self.build.getSlaveName()
+
+ def addLog(self, name):
+ loog = self.step_status.addLog(name)
+ self._connectPendingLogObservers()
+ return loog
+
+ def getLog(self, name):
+ for l in self.step_status.getLogs():
+ if l.getName() == name:
+ return l
+ raise KeyError("no log named '%s'" % (name,))
+
+ def addCompleteLog(self, name, text):
+ log.msg("addCompleteLog(%s)" % name)
+ loog = self.step_status.addLog(name)
+ size = loog.chunkSize
+ for start in range(0, len(text), size):
+ loog.addStdout(text[start:start+size])
+ loog.finish()
+ self._connectPendingLogObservers()
+
+ def addHTMLLog(self, name, html):
+ log.msg("addHTMLLog(%s)" % name)
+ self.step_status.addHTMLLog(name, html)
+ self._connectPendingLogObservers()
+
+ def addLogObserver(self, logname, observer):
+ assert interfaces.ILogObserver.providedBy(observer)
+ observer.setStep(self)
+ self._pendingLogObservers.append((logname, observer))
+ self._connectPendingLogObservers()
+
+ def _connectPendingLogObservers(self):
+ if not self._pendingLogObservers:
+ return
+ if not self.step_status:
+ return
+ current_logs = {}
+ for loog in self.step_status.getLogs():
+ current_logs[loog.getName()] = loog
+ for logname, observer in self._pendingLogObservers[:]:
+ if logname in current_logs:
+ observer.setLog(current_logs[logname])
+ self._pendingLogObservers.remove((logname, observer))
+
+ def addURL(self, name, url):
+ """Add a BuildStep URL to this step.
+
+ An HREF to this URL will be added to any HTML representations of this
+ step. This allows a step to provide links to external web pages,
+ perhaps to provide detailed HTML code coverage results or other forms
+ of build status.
+ """
+ self.step_status.addURL(name, url)
+
+ def runCommand(self, c):
+ c.buildslave = self.buildslave
+ d = c.run(self, self.remote)
+ return d
+
+
+class OutputProgressObserver(LogObserver):
+ length = 0
+
+ def __init__(self, name):
+ self.name = name
+
+ def logChunk(self, build, step, log, channel, text):
+ self.length += len(text)
+ self.step.setProgress(self.name, self.length)
+
+class LoggingBuildStep(BuildStep):
+ """This is an abstract base class, suitable for inheritance by all
+ BuildSteps that invoke RemoteCommands which emit stdout/stderr messages.
+ """
+
+ progressMetrics = ('output',)
+ logfiles = {}
+
+ parms = BuildStep.parms + ['logfiles']
+
+ def __init__(self, logfiles={}, *args, **kwargs):
+ BuildStep.__init__(self, *args, **kwargs)
+ self.addFactoryArguments(logfiles=logfiles)
+ # merge a class-level 'logfiles' attribute with one passed in as an
+ # argument
+ self.logfiles = self.logfiles.copy()
+ self.logfiles.update(logfiles)
+ self.addLogObserver('stdio', OutputProgressObserver("output"))
+
+ def describe(self, done=False):
+ raise NotImplementedError("implement this in a subclass")
+
+ def startCommand(self, cmd, errorMessages=[]):
+ """
+ @param cmd: a suitable RemoteCommand which will be launched, with
+ all output being put into our self.stdio_log LogFile
+ """
+ log.msg("ShellCommand.startCommand(cmd=%s)" % (cmd,))
+ log.msg(" cmd.args = %r" % (cmd.args))
+ self.cmd = cmd # so we can interrupt it
+ self.step_status.setText(self.describe(False))
+
+ # stdio is the first log
+ self.stdio_log = stdio_log = self.addLog("stdio")
+ cmd.useLog(stdio_log, True)
+ for em in errorMessages:
+ stdio_log.addHeader(em)
+ # TODO: consider setting up self.stdio_log earlier, and have the
+ # code that passes in errorMessages instead call
+ # self.stdio_log.addHeader() directly.
+
+ # there might be other logs
+ self.setupLogfiles(cmd, self.logfiles)
+
+ d = self.runCommand(cmd) # might raise ConnectionLost
+ d.addCallback(lambda res: self.commandComplete(cmd))
+ d.addCallback(lambda res: self.createSummary(cmd.logs['stdio']))
+ d.addCallback(lambda res: self.evaluateCommand(cmd)) # returns results
+ def _gotResults(results):
+ self.setStatus(cmd, results)
+ return results
+ d.addCallback(_gotResults) # returns results
+ d.addCallbacks(self.finished, self.checkDisconnect)
+ d.addErrback(self.failed)
+
+ def setupLogfiles(self, cmd, logfiles):
+ """Set up any additional logfiles= logs.
+ """
+ for logname,remotefilename in logfiles.items():
+ # tell the BuildStepStatus to add a LogFile
+ newlog = self.addLog(logname)
+ # and tell the LoggedRemoteCommand to feed it
+ cmd.useLog(newlog, True)
+
+ def interrupt(self, reason):
+ # TODO: consider adding an INTERRUPTED or STOPPED status to use
+ # instead of FAILURE, might make the text a bit more clear.
+ # 'reason' can be a Failure, or text
+ self.addCompleteLog('interrupt', str(reason))
+ d = self.cmd.interrupt(reason)
+ return d
+
+ def checkDisconnect(self, f):
+ f.trap(error.ConnectionLost)
+ self.step_status.setText(self.describe(True) +
+ ["failed", "slave", "lost"])
+ self.step_status.setText2(["failed", "slave", "lost"])
+ return self.finished(FAILURE)
+
+ # to refine the status output, override one or more of the following
+ # methods. Change as little as possible: start with the first ones on
+ # this list and only proceed further if you have to
+ #
+ # createSummary: add additional Logfiles with summarized results
+ # evaluateCommand: decides whether the step was successful or not
+ #
+ # getText: create the final per-step text strings
+ # describeText2: create the strings added to the overall build status
+ #
+ # getText2: only adds describeText2() when the step affects build status
+ #
+ # setStatus: handles all status updating
+
+ # commandComplete is available for general-purpose post-completion work.
+ # It is a good place to do one-time parsing of logfiles, counting
+ # warnings and errors. It should probably stash such counts in places
+ # like self.warnings so they can be picked up later by your getText
+ # method.
+
+ # TODO: most of this stuff should really be on BuildStep rather than
+ # ShellCommand. That involves putting the status-setup stuff in
+ # .finished, which would make it hard to turn off.
+
+ def commandComplete(self, cmd):
+ """This is a general-purpose hook method for subclasses. It will be
+ called after the remote command has finished, but before any of the
+ other hook functions are called."""
+ pass
+
+ def createSummary(self, log):
+ """To create summary logs, do something like this:
+ warnings = grep('^Warning:', log.getText())
+ self.addCompleteLog('warnings', warnings)
+ """
+ pass
+
+ def evaluateCommand(self, cmd):
+ """Decide whether the command was SUCCESS, WARNINGS, or FAILURE.
+ Override this to, say, declare WARNINGS if there is any stderr
+ activity, or to say that rc!=0 is not actually an error."""
+
+ if cmd.rc != 0:
+ return FAILURE
+ # if cmd.log.getStderr(): return WARNINGS
+ return SUCCESS
+
+ def getText(self, cmd, results):
+ if results == SUCCESS:
+ return self.describe(True)
+ elif results == WARNINGS:
+ return self.describe(True) + ["warnings"]
+ else:
+ return self.describe(True) + ["failed"]
+
+ def getText2(self, cmd, results):
+ """We have decided to add a short note about ourselves to the overall
+ build description, probably because something went wrong. Return a
+ short list of short strings. If your subclass counts test failures or
+ warnings of some sort, this is a good place to announce the count."""
+ # return ["%d warnings" % warningcount]
+ # return ["%d tests" % len(failedTests)]
+ return [self.name]
+
+ def maybeGetText2(self, cmd, results):
+ if results == SUCCESS:
+ # successful steps do not add anything to the build's text
+ pass
+ elif results == WARNINGS:
+ if (self.flunkOnWarnings or self.warnOnWarnings):
+ # we're affecting the overall build, so tell them why
+ return self.getText2(cmd, results)
+ else:
+ if (self.haltOnFailure or self.flunkOnFailure
+ or self.warnOnFailure):
+ # we're affecting the overall build, so tell them why
+ return self.getText2(cmd, results)
+ return []
+
+ def setStatus(self, cmd, results):
+ # this is good enough for most steps, but it can be overridden to
+ # get more control over the displayed text
+ self.step_status.setText(self.getText(cmd, results))
+ self.step_status.setText2(self.maybeGetText2(cmd, results))
+
+# (WithProeprties used to be available in this module)
+from buildbot.process.properties import WithProperties
+_hush_pyflakes = [WithProperties]
+del _hush_pyflakes
+
diff --git a/buildbot/buildbot/process/factory.py b/buildbot/buildbot/process/factory.py
new file mode 100644
index 0000000..37551d9
--- /dev/null
+++ b/buildbot/buildbot/process/factory.py
@@ -0,0 +1,182 @@
+# -*- test-case-name: buildbot.test.test_step -*-
+
+from buildbot import util
+from buildbot.process.base import Build
+from buildbot.process.buildstep import BuildStep
+from buildbot.steps.source import CVS, SVN
+from buildbot.steps.shell import Configure, Compile, Test, PerlModuleTest
+
+# deprecated, use BuildFactory.addStep
+def s(steptype, **kwargs):
+ # convenience function for master.cfg files, to create step
+ # specification tuples
+ return (steptype, kwargs)
+
+class BuildFactory(util.ComparableMixin):
+ """
+ @cvar buildClass: class to use when creating builds
+ @type buildClass: L{buildbot.process.base.Build}
+ """
+ buildClass = Build
+ useProgress = 1
+ compare_attrs = ['buildClass', 'steps', 'useProgress']
+
+ def __init__(self, steps=None):
+ if steps is None:
+ steps = []
+ self.steps = [self._makeStepFactory(s) for s in steps]
+
+ def _makeStepFactory(self, step_or_factory):
+ if isinstance(step_or_factory, BuildStep):
+ return step_or_factory.getStepFactory()
+ return step_or_factory
+
+ def newBuild(self, request):
+ """Create a new Build instance.
+ @param request: a L{base.BuildRequest} describing what is to be built
+ """
+ b = self.buildClass(request)
+ b.useProgress = self.useProgress
+ b.setStepFactories(self.steps)
+ return b
+
+ def addStep(self, step_or_factory, **kwargs):
+ if isinstance(step_or_factory, BuildStep):
+ s = step_or_factory.getStepFactory()
+ else:
+ s = (step_or_factory, dict(kwargs))
+ self.steps.append(s)
+
+ def addSteps(self, steps):
+ self.steps.extend([ s.getStepFactory() for s in steps ])
+
+# BuildFactory subclasses for common build tools
+
+class GNUAutoconf(BuildFactory):
+ def __init__(self, source, configure="./configure",
+ configureEnv={},
+ configureFlags=[],
+ compile=["make", "all"],
+ test=["make", "check"]):
+ BuildFactory.__init__(self, [source])
+ if configure is not None:
+ # we either need to wind up with a string (which will be
+ # space-split), or with a list of strings (which will not). The
+ # list of strings is the preferred form.
+ if type(configure) is str:
+ if configureFlags:
+ assert not " " in configure # please use list instead
+ command = [configure] + configureFlags
+ else:
+ command = configure
+ else:
+ assert isinstance(configure, (list, tuple))
+ command = configure + configureFlags
+ self.addStep(Configure, command=command, env=configureEnv)
+ if compile is not None:
+ self.addStep(Compile, command=compile)
+ if test is not None:
+ self.addStep(Test, command=test)
+
+class CPAN(BuildFactory):
+ def __init__(self, source, perl="perl"):
+ BuildFactory.__init__(self, [source])
+ self.addStep(Configure, command=[perl, "Makefile.PL"])
+ self.addStep(Compile, command=["make"])
+ self.addStep(PerlModuleTest, command=["make", "test"])
+
+class Distutils(BuildFactory):
+ def __init__(self, source, python="python", test=None):
+ BuildFactory.__init__(self, [source])
+ self.addStep(Compile, command=[python, "./setup.py", "build"])
+ if test is not None:
+ self.addStep(Test, command=test)
+
+class Trial(BuildFactory):
+ """Build a python module that uses distutils and trial. Set 'tests' to
+ the module in which the tests can be found, or set useTestCaseNames=True
+ to always have trial figure out which tests to run (based upon which
+ files have been changed).
+
+ See docs/factories.xhtml for usage samples. Not all of the Trial
+ BuildStep options are available here, only the most commonly used ones.
+ To get complete access, you will need to create a custom
+ BuildFactory."""
+
+ trial = "trial"
+ randomly = False
+ recurse = False
+
+ def __init__(self, source,
+ buildpython=["python"], trialpython=[], trial=None,
+ testpath=".", randomly=None, recurse=None,
+ tests=None, useTestCaseNames=False, env=None):
+ BuildFactory.__init__(self, [source])
+ assert tests or useTestCaseNames, "must use one or the other"
+ if trial is not None:
+ self.trial = trial
+ if randomly is not None:
+ self.randomly = randomly
+ if recurse is not None:
+ self.recurse = recurse
+
+ from buildbot.steps.python_twisted import Trial
+ buildcommand = buildpython + ["./setup.py", "build"]
+ self.addStep(Compile, command=buildcommand, env=env)
+ self.addStep(Trial,
+ python=trialpython, trial=self.trial,
+ testpath=testpath,
+ tests=tests, testChanges=useTestCaseNames,
+ randomly=self.randomly,
+ recurse=self.recurse,
+ env=env,
+ )
+
+
+# compatibility classes, will go away. Note that these only offer
+# compatibility at the constructor level: if you have subclassed these
+# factories, your subclasses are unlikely to still work correctly.
+
+ConfigurableBuildFactory = BuildFactory
+
+class BasicBuildFactory(GNUAutoconf):
+ # really a "GNU Autoconf-created tarball -in-CVS tree" builder
+
+ def __init__(self, cvsroot, cvsmodule,
+ configure=None, configureEnv={},
+ compile="make all",
+ test="make check", cvsCopy=False):
+ mode = "clobber"
+ if cvsCopy:
+ mode = "copy"
+ source = s(CVS, cvsroot=cvsroot, cvsmodule=cvsmodule, mode=mode)
+ GNUAutoconf.__init__(self, source,
+ configure=configure, configureEnv=configureEnv,
+ compile=compile,
+ test=test)
+
+class QuickBuildFactory(BasicBuildFactory):
+ useProgress = False
+
+ def __init__(self, cvsroot, cvsmodule,
+ configure=None, configureEnv={},
+ compile="make all",
+ test="make check", cvsCopy=False):
+ mode = "update"
+ source = s(CVS, cvsroot=cvsroot, cvsmodule=cvsmodule, mode=mode)
+ GNUAutoconf.__init__(self, source,
+ configure=configure, configureEnv=configureEnv,
+ compile=compile,
+ test=test)
+
+class BasicSVN(GNUAutoconf):
+
+ def __init__(self, svnurl,
+ configure=None, configureEnv={},
+ compile="make all",
+ test="make check"):
+ source = s(SVN, svnurl=svnurl, mode="update")
+ GNUAutoconf.__init__(self, source,
+ configure=configure, configureEnv=configureEnv,
+ compile=compile,
+ test=test)
diff --git a/buildbot/buildbot/process/process_twisted.py b/buildbot/buildbot/process/process_twisted.py
new file mode 100644
index 0000000..36d6fc5
--- /dev/null
+++ b/buildbot/buildbot/process/process_twisted.py
@@ -0,0 +1,118 @@
+
+# Build classes specific to the Twisted codebase
+
+from buildbot.process.base import Build
+from buildbot.process.factory import BuildFactory
+from buildbot.steps import shell
+from buildbot.steps.python_twisted import HLint, ProcessDocs, BuildDebs, \
+ Trial, RemovePYCs
+
+class TwistedBuild(Build):
+ workdir = "Twisted" # twisted's bin/trial expects to live in here
+ def isFileImportant(self, filename):
+ if filename.startswith("doc/fun/"):
+ return 0
+ if filename.startswith("sandbox/"):
+ return 0
+ return 1
+
+class TwistedTrial(Trial):
+ tests = "twisted"
+ # the Trial in Twisted >=2.1.0 has --recurse on by default, and -to
+ # turned into --reporter=bwverbose .
+ recurse = False
+ trialMode = ["--reporter=bwverbose"]
+ testpath = None
+ trial = "./bin/trial"
+
+class TwistedBaseFactory(BuildFactory):
+ buildClass = TwistedBuild
+ # bin/trial expects its parent directory to be named "Twisted": it uses
+ # this to add the local tree to PYTHONPATH during tests
+ workdir = "Twisted"
+
+ def __init__(self, source):
+ BuildFactory.__init__(self, [source])
+
+class QuickTwistedBuildFactory(TwistedBaseFactory):
+ treeStableTimer = 30
+ useProgress = 0
+
+ def __init__(self, source, python="python"):
+ TwistedBaseFactory.__init__(self, source)
+ if type(python) is str:
+ python = [python]
+ self.addStep(HLint, python=python[0])
+ self.addStep(RemovePYCs)
+ for p in python:
+ cmd = [p, "setup.py", "build_ext", "-i"]
+ self.addStep(shell.Compile, command=cmd, flunkOnFailure=True)
+ self.addStep(TwistedTrial, python=p, testChanges=True)
+
+class FullTwistedBuildFactory(TwistedBaseFactory):
+ treeStableTimer = 5*60
+
+ def __init__(self, source, python="python",
+ processDocs=False, runTestsRandomly=False,
+ compileOpts=[], compileOpts2=[]):
+ TwistedBaseFactory.__init__(self, source)
+ if processDocs:
+ self.addStep(ProcessDocs)
+
+ if type(python) == str:
+ python = [python]
+ assert isinstance(compileOpts, list)
+ assert isinstance(compileOpts2, list)
+ cmd = (python + compileOpts + ["setup.py", "build_ext"]
+ + compileOpts2 + ["-i"])
+
+ self.addStep(shell.Compile, command=cmd, flunkOnFailure=True)
+ self.addStep(RemovePYCs)
+ self.addStep(TwistedTrial, python=python, randomly=runTestsRandomly)
+
+class TwistedDebsBuildFactory(TwistedBaseFactory):
+ treeStableTimer = 10*60
+
+ def __init__(self, source, python="python"):
+ TwistedBaseFactory.__init__(self, source)
+ self.addStep(ProcessDocs, haltOnFailure=True)
+ self.addStep(BuildDebs, warnOnWarnings=True)
+
+class TwistedReactorsBuildFactory(TwistedBaseFactory):
+ treeStableTimer = 5*60
+
+ def __init__(self, source,
+ python="python", compileOpts=[], compileOpts2=[],
+ reactors=None):
+ TwistedBaseFactory.__init__(self, source)
+
+ if type(python) == str:
+ python = [python]
+ assert isinstance(compileOpts, list)
+ assert isinstance(compileOpts2, list)
+ cmd = (python + compileOpts + ["setup.py", "build_ext"]
+ + compileOpts2 + ["-i"])
+
+ self.addStep(shell.Compile, command=cmd, warnOnFailure=True)
+
+ if reactors == None:
+ reactors = [
+ 'gtk2',
+ 'gtk',
+ #'kqueue',
+ 'poll',
+ 'c',
+ 'qt',
+ #'win32',
+ ]
+ for reactor in reactors:
+ flunkOnFailure = 1
+ warnOnFailure = 0
+ #if reactor in ['c', 'qt', 'win32']:
+ # # these are buggy, so tolerate failures for now
+ # flunkOnFailure = 0
+ # warnOnFailure = 1
+ self.addStep(RemovePYCs) # TODO: why?
+ self.addStep(TwistedTrial, name=reactor, python=python,
+ reactor=reactor, flunkOnFailure=flunkOnFailure,
+ warnOnFailure=warnOnFailure)
diff --git a/buildbot/buildbot/process/properties.py b/buildbot/buildbot/process/properties.py
new file mode 100644
index 0000000..2d07db9
--- /dev/null
+++ b/buildbot/buildbot/process/properties.py
@@ -0,0 +1,157 @@
+import re
+import weakref
+from buildbot import util
+
+class Properties(util.ComparableMixin):
+ """
+ I represent a set of properties that can be interpolated into various
+ strings in buildsteps.
+
+ @ivar properties: dictionary mapping property values to tuples
+ (value, source), where source is a string identifing the source
+ of the property.
+
+ Objects of this class can be read like a dictionary -- in this case,
+ only the property value is returned.
+
+ As a special case, a property value of None is returned as an empty
+ string when used as a mapping.
+ """
+
+ compare_attrs = ('properties',)
+
+ def __init__(self, **kwargs):
+ """
+ @param kwargs: initial property values (for testing)
+ """
+ self.properties = {}
+ self.pmap = PropertyMap(self)
+ if kwargs: self.update(kwargs, "TEST")
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ del d['pmap']
+ return d
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.pmap = PropertyMap(self)
+
+ def __getitem__(self, name):
+ """Just get the value for this property."""
+ rv = self.properties[name][0]
+ return rv
+
+ def has_key(self, name):
+ return self.properties.has_key(name)
+
+ def getProperty(self, name, default=None):
+ """Get the value for the given property."""
+ return self.properties.get(name, (default,))[0]
+
+ def getPropertySource(self, name):
+ return self.properties[name][1]
+
+ def asList(self):
+ """Return the properties as a sorted list of (name, value, source)"""
+ l = [ (k, v[0], v[1]) for k,v in self.properties.items() ]
+ l.sort()
+ return l
+
+ def __repr__(self):
+ return repr(dict([ (k,v[0]) for k,v in self.properties.iteritems() ]))
+
+ def setProperty(self, name, value, source):
+ self.properties[name] = (value, source)
+
+ def update(self, dict, source):
+ """Update this object from a dictionary, with an explicit source specified."""
+ for k, v in dict.items():
+ self.properties[k] = (v, source)
+
+ def updateFromProperties(self, other):
+ """Update this object based on another object; the other object's """
+ self.properties.update(other.properties)
+
+ def render(self, value):
+ """
+ Return a variant of value that has any WithProperties objects
+ substituted. This recurses into Python's compound data types.
+ """
+ # we use isinstance to detect Python's standard data types, and call
+ # this function recursively for the values in those types
+ if isinstance(value, (str, unicode)):
+ return value
+ elif isinstance(value, WithProperties):
+ return value.render(self.pmap)
+ elif isinstance(value, list):
+ return [ self.render(e) for e in value ]
+ elif isinstance(value, tuple):
+ return tuple([ self.render(e) for e in value ])
+ elif isinstance(value, dict):
+ return dict([ (self.render(k), self.render(v)) for k,v in value.iteritems() ])
+ else:
+ return value
+
+class PropertyMap:
+ """
+ Privately-used mapping object to implement WithProperties' substitutions,
+ including the rendering of None as ''.
+ """
+ colon_minus_re = re.compile(r"(.*):-(.*)")
+ colon_plus_re = re.compile(r"(.*):\+(.*)")
+ def __init__(self, properties):
+ # use weakref here to avoid a reference loop
+ self.properties = weakref.ref(properties)
+
+ def __getitem__(self, key):
+ properties = self.properties()
+ assert properties is not None
+
+ # %(prop:-repl)s
+ # if prop exists, use it; otherwise, use repl
+ mo = self.colon_minus_re.match(key)
+ if mo:
+ prop, repl = mo.group(1,2)
+ if properties.has_key(prop):
+ rv = properties[prop]
+ else:
+ rv = repl
+ else:
+ # %(prop:+repl)s
+ # if prop exists, use repl; otherwise, an empty string
+ mo = self.colon_plus_re.match(key)
+ if mo:
+ prop, repl = mo.group(1,2)
+ if properties.has_key(prop):
+ rv = repl
+ else:
+ rv = ''
+ else:
+ rv = properties[key]
+
+ # translate 'None' to an empty string
+ if rv is None: rv = ''
+ return rv
+
+class WithProperties(util.ComparableMixin):
+ """
+ This is a marker class, used fairly widely to indicate that we
+ want to interpolate build properties.
+ """
+
+ compare_attrs = ('fmtstring', 'args')
+
+ def __init__(self, fmtstring, *args):
+ self.fmtstring = fmtstring
+ self.args = args
+
+ def render(self, pmap):
+ if self.args:
+ strings = []
+ for name in self.args:
+ strings.append(pmap[name])
+ s = self.fmtstring % tuple(strings)
+ else:
+ s = self.fmtstring % pmap
+ return s
diff --git a/buildbot/buildbot/process/step_twisted2.py b/buildbot/buildbot/process/step_twisted2.py
new file mode 100644
index 0000000..bc58315
--- /dev/null
+++ b/buildbot/buildbot/process/step_twisted2.py
@@ -0,0 +1,159 @@
+
+from buildbot.status import tests
+from buildbot.process.step import SUCCESS, FAILURE, BuildStep
+from buildbot.process.step_twisted import RunUnitTests
+
+from zope.interface import implements
+from twisted.python import log, failure
+from twisted.spread import jelly
+from twisted.pb.tokens import BananaError
+from twisted.web.html import PRE
+from twisted.web.error import NoResource
+
+class Null: pass
+ResultTypes = Null()
+ResultTypeNames = ["SKIP",
+ "EXPECTED_FAILURE", "FAILURE", "ERROR",
+ "UNEXPECTED_SUCCESS", "SUCCESS"]
+try:
+ from twisted.trial import reporter # introduced in Twisted-1.0.5
+ # extract the individual result types
+ for name in ResultTypeNames:
+ setattr(ResultTypes, name, getattr(reporter, name))
+except ImportError:
+ from twisted.trial import unittest # Twisted-1.0.4 has them here
+ for name in ResultTypeNames:
+ setattr(ResultTypes, name, getattr(unittest, name))
+
+log._keepErrors = 0
+from twisted.trial import remote # for trial/jelly parsing
+
+import StringIO
+
+class OneJellyTest(tests.OneTest):
+ def html(self, request):
+ tpl = "<HTML><BODY>\n\n%s\n\n</body></html>\n"
+ pptpl = "<HTML><BODY>\n\n<pre>%s</pre>\n\n</body></html>\n"
+ t = request.postpath[0] # one of 'short', 'long' #, or 'html'
+ if isinstance(self.results, failure.Failure):
+ # it would be nice to remove unittest functions from the
+ # traceback like unittest.format_exception() does.
+ if t == 'short':
+ s = StringIO.StringIO()
+ self.results.printTraceback(s)
+ return pptpl % PRE(s.getvalue())
+ elif t == 'long':
+ s = StringIO.StringIO()
+ self.results.printDetailedTraceback(s)
+ return pptpl % PRE(s.getvalue())
+ #elif t == 'html':
+ # return tpl % formatFailure(self.results)
+ # ACK! source lines aren't stored in the Failure, rather,
+ # formatFailure pulls them (by filename) from the local
+ # disk. Feh. Even printTraceback() won't work. Double feh.
+ return NoResource("No such mode '%s'" % t)
+ if self.results == None:
+ return tpl % "No results to show: test probably passed."
+ # maybe results are plain text?
+ return pptpl % PRE(self.results)
+
+class TwistedJellyTestResults(tests.TestResults):
+ oneTestClass = OneJellyTest
+ def describeOneTest(self, testname):
+ return "%s: %s\n" % (testname, self.tests[testname][0])
+
+class RunUnitTestsJelly(RunUnitTests):
+ """I run the unit tests with the --jelly option, which generates
+ machine-parseable results as the tests are run.
+ """
+ trialMode = "--jelly"
+ implements(remote.IRemoteReporter)
+
+ ourtypes = { ResultTypes.SKIP: tests.SKIP,
+ ResultTypes.EXPECTED_FAILURE: tests.EXPECTED_FAILURE,
+ ResultTypes.FAILURE: tests.FAILURE,
+ ResultTypes.ERROR: tests.ERROR,
+ ResultTypes.UNEXPECTED_SUCCESS: tests.UNEXPECTED_SUCCESS,
+ ResultTypes.SUCCESS: tests.SUCCESS,
+ }
+
+ def __getstate__(self):
+ #d = RunUnitTests.__getstate__(self)
+ d = self.__dict__.copy()
+ # Banana subclasses are Ephemeral
+ if d.has_key("decoder"):
+ del d['decoder']
+ return d
+ def start(self):
+ self.decoder = remote.DecodeReport(self)
+ # don't accept anything unpleasant from the (untrusted) build slave
+ # The jellied stream may have Failures, but everything inside should
+ # be a string
+ security = jelly.SecurityOptions()
+ security.allowBasicTypes()
+ security.allowInstancesOf(failure.Failure)
+ self.decoder.taster = security
+ self.results = TwistedJellyTestResults()
+ RunUnitTests.start(self)
+
+ def logProgress(self, progress):
+ # XXX: track number of tests
+ BuildStep.logProgress(self, progress)
+
+ def addStdout(self, data):
+ if not self.decoder:
+ return
+ try:
+ self.decoder.dataReceived(data)
+ except BananaError:
+ self.decoder = None
+ log.msg("trial --jelly output unparseable, traceback follows")
+ log.deferr()
+
+ def remote_start(self, expectedTests, times=None):
+ print "remote_start", expectedTests
+ def remote_reportImportError(self, name, aFailure, times=None):
+ pass
+ def remote_reportStart(self, testClass, method, times=None):
+ print "reportStart", testClass, method
+
+ def remote_reportResults(self, testClass, method, resultType, results,
+ times=None):
+ print "reportResults", testClass, method, resultType
+ which = testClass + "." + method
+ self.results.addTest(which,
+ self.ourtypes.get(resultType, tests.UNKNOWN),
+ results)
+
+ def finished(self, rc):
+ # give self.results to our Build object
+ self.build.testsFinished(self.results)
+ total = self.results.countTests()
+ count = self.results.countFailures()
+ result = SUCCESS
+ if total == None:
+ result = (FAILURE, ['tests%s' % self.rtext(' (%s)')])
+ if count:
+ result = (FAILURE, ["%d tes%s%s" % (count,
+ (count == 1 and 't' or 'ts'),
+ self.rtext(' (%s)'))])
+ return self.stepComplete(result)
+ def finishStatus(self, result):
+ total = self.results.countTests()
+ count = self.results.countFailures()
+ text = []
+ if count == 0:
+ text.extend(["%d %s" % \
+ (total,
+ total == 1 and "test" or "tests"),
+ "passed"])
+ else:
+ text.append("tests")
+ text.append("%d %s" % \
+ (count,
+ count == 1 and "failure" or "failures"))
+ self.updateCurrentActivity(text=text)
+ self.addFileToCurrentActivity("tests", self.results)
+ #self.finishStatusSummary()
+ self.finishCurrentActivity()
+
diff --git a/buildbot/buildbot/scheduler.py b/buildbot/buildbot/scheduler.py
new file mode 100644
index 0000000..4341617
--- /dev/null
+++ b/buildbot/buildbot/scheduler.py
@@ -0,0 +1,837 @@
+# -*- test-case-name: buildbot.test.test_dependencies -*-
+
+import time, os.path
+
+from zope.interface import implements
+from twisted.internet import reactor
+from twisted.application import service, internet, strports
+from twisted.python import log, runtime
+from twisted.protocols import basic
+from twisted.cred import portal, checkers
+from twisted.spread import pb
+
+from buildbot import interfaces, buildset, util, pbutil
+from buildbot.status import builder
+from buildbot.sourcestamp import SourceStamp
+from buildbot.changes.maildir import MaildirService
+from buildbot.process.properties import Properties
+
+
+class BaseScheduler(service.MultiService, util.ComparableMixin):
+ """
+ A Schduler creates BuildSets and submits them to the BuildMaster.
+
+ @ivar name: name of the scheduler
+
+ @ivar properties: additional properties specified in this
+ scheduler's configuration
+ @type properties: Properties object
+ """
+ implements(interfaces.IScheduler)
+
+ def __init__(self, name, properties={}):
+ """
+ @param name: name for this scheduler
+
+ @param properties: properties to be propagated from this scheduler
+ @type properties: dict
+ """
+ service.MultiService.__init__(self)
+ self.name = name
+ self.properties = Properties()
+ self.properties.update(properties, "Scheduler")
+ self.properties.setProperty("scheduler", name, "Scheduler")
+
+ def __repr__(self):
+ # TODO: why can't id() return a positive number? %d is ugly.
+ return "<Scheduler '%s' at %d>" % (self.name, id(self))
+
+ def submitBuildSet(self, bs):
+ self.parent.submitBuildSet(bs)
+
+ def addChange(self, change):
+ pass
+
+class BaseUpstreamScheduler(BaseScheduler):
+ implements(interfaces.IUpstreamScheduler)
+
+ def __init__(self, name, properties={}):
+ BaseScheduler.__init__(self, name, properties)
+ self.successWatchers = []
+
+ def subscribeToSuccessfulBuilds(self, watcher):
+ self.successWatchers.append(watcher)
+ def unsubscribeToSuccessfulBuilds(self, watcher):
+ self.successWatchers.remove(watcher)
+
+ def submitBuildSet(self, bs):
+ d = bs.waitUntilFinished()
+ d.addCallback(self.buildSetFinished)
+ BaseScheduler.submitBuildSet(self, bs)
+
+ def buildSetFinished(self, bss):
+ if not self.running:
+ return
+ if bss.getResults() == builder.SUCCESS:
+ ss = bss.getSourceStamp()
+ for w in self.successWatchers:
+ w(ss)
+
+
+class Scheduler(BaseUpstreamScheduler):
+ """The default Scheduler class will run a build after some period of time
+ called the C{treeStableTimer}, on a given set of Builders. It only pays
+ attention to a single branch. You you can provide a C{fileIsImportant}
+ function which will evaluate each Change to decide whether or not it
+ should trigger a new build.
+ """
+
+ fileIsImportant = None
+ compare_attrs = ('name', 'treeStableTimer', 'builderNames', 'branch',
+ 'fileIsImportant', 'properties', 'categories')
+
+ def __init__(self, name, branch, treeStableTimer, builderNames,
+ fileIsImportant=None, properties={}, categories=None):
+ """
+ @param name: the name of this Scheduler
+ @param branch: The branch name that the Scheduler should pay
+ attention to. Any Change that is not on this branch
+ will be ignored. It can be set to None to only pay
+ attention to the default branch.
+ @param treeStableTimer: the duration, in seconds, for which the tree
+ must remain unchanged before a build will be
+ triggered. This is intended to avoid builds
+ of partially-committed fixes.
+ @param builderNames: a list of Builder names. When this Scheduler
+ decides to start a set of builds, they will be
+ run on the Builders named by this list.
+
+ @param fileIsImportant: A callable which takes one argument (a Change
+ instance) and returns True if the change is
+ worth building, and False if it is not.
+ Unimportant Changes are accumulated until the
+ build is triggered by an important change.
+ The default value of None means that all
+ Changes are important.
+
+ @param properties: properties to apply to all builds started from this
+ scheduler
+ @param categories: A list of categories of changes to accept
+ """
+
+ BaseUpstreamScheduler.__init__(self, name, properties)
+ self.treeStableTimer = treeStableTimer
+ errmsg = ("The builderNames= argument to Scheduler must be a list "
+ "of Builder description names (i.e. the 'name' key of the "
+ "Builder specification dictionary)")
+ assert isinstance(builderNames, (list, tuple)), errmsg
+ for b in builderNames:
+ assert isinstance(b, str), errmsg
+ self.builderNames = builderNames
+ self.branch = branch
+ if fileIsImportant:
+ assert callable(fileIsImportant)
+ self.fileIsImportant = fileIsImportant
+
+ self.importantChanges = []
+ self.unimportantChanges = []
+ self.nextBuildTime = None
+ self.timer = None
+ self.categories = categories
+
+ def listBuilderNames(self):
+ return self.builderNames
+
+ def getPendingBuildTimes(self):
+ if self.nextBuildTime is not None:
+ return [self.nextBuildTime]
+ return []
+
+ def addChange(self, change):
+ if change.branch != self.branch:
+ log.msg("%s ignoring off-branch %s" % (self, change))
+ return
+ if self.categories is not None and change.category not in self.categories:
+ log.msg("%s ignoring non-matching categories %s" % (self, change))
+ return
+ if not self.fileIsImportant:
+ self.addImportantChange(change)
+ elif self.fileIsImportant(change):
+ self.addImportantChange(change)
+ else:
+ self.addUnimportantChange(change)
+
+ def addImportantChange(self, change):
+ log.msg("%s: change is important, adding %s" % (self, change))
+ self.importantChanges.append(change)
+ self.nextBuildTime = max(self.nextBuildTime,
+ change.when + self.treeStableTimer)
+ self.setTimer(self.nextBuildTime)
+
+ def addUnimportantChange(self, change):
+ log.msg("%s: change is not important, adding %s" % (self, change))
+ self.unimportantChanges.append(change)
+
+ def setTimer(self, when):
+ log.msg("%s: setting timer to %s" %
+ (self, time.strftime("%H:%M:%S", time.localtime(when))))
+ now = util.now()
+ if when < now:
+ when = now
+ if self.timer:
+ self.timer.cancel()
+ self.timer = reactor.callLater(when - now, self.fireTimer)
+
+ def stopTimer(self):
+ if self.timer:
+ self.timer.cancel()
+ self.timer = None
+
+ def fireTimer(self):
+ # clear out our state
+ self.timer = None
+ self.nextBuildTime = None
+ changes = self.importantChanges + self.unimportantChanges
+ self.importantChanges = []
+ self.unimportantChanges = []
+
+ # create a BuildSet, submit it to the BuildMaster
+ bs = buildset.BuildSet(self.builderNames,
+ SourceStamp(changes=changes),
+ properties=self.properties)
+ self.submitBuildSet(bs)
+
+ def stopService(self):
+ self.stopTimer()
+ return service.MultiService.stopService(self)
+
+
+class AnyBranchScheduler(BaseUpstreamScheduler):
+ """This Scheduler will handle changes on a variety of branches. It will
+ accumulate Changes for each branch separately. It works by creating a
+ separate Scheduler for each new branch it sees."""
+
+ schedulerFactory = Scheduler
+ fileIsImportant = None
+
+ compare_attrs = ('name', 'branches', 'treeStableTimer', 'builderNames',
+ 'fileIsImportant', 'properties')
+
+ def __init__(self, name, branches, treeStableTimer, builderNames,
+ fileIsImportant=None, properties={}):
+ """
+ @param name: the name of this Scheduler
+ @param branches: The branch names that the Scheduler should pay
+ attention to. Any Change that is not on one of these
+ branches will be ignored. It can be set to None to
+ accept changes from any branch. Don't use [] (an
+ empty list), because that means we don't pay
+ attention to *any* branches, so we'll never build
+ anything.
+ @param treeStableTimer: the duration, in seconds, for which the tree
+ must remain unchanged before a build will be
+ triggered. This is intended to avoid builds
+ of partially-committed fixes.
+ @param builderNames: a list of Builder names. When this Scheduler
+ decides to start a set of builds, they will be
+ run on the Builders named by this list.
+
+ @param fileIsImportant: A callable which takes one argument (a Change
+ instance) and returns True if the change is
+ worth building, and False if it is not.
+ Unimportant Changes are accumulated until the
+ build is triggered by an important change.
+ The default value of None means that all
+ Changes are important.
+
+ @param properties: properties to apply to all builds started from this
+ scheduler
+ """
+
+ BaseUpstreamScheduler.__init__(self, name, properties)
+ self.treeStableTimer = treeStableTimer
+ for b in builderNames:
+ assert isinstance(b, str)
+ self.builderNames = builderNames
+ self.branches = branches
+ if self.branches == []:
+ log.msg("AnyBranchScheduler %s: branches=[], so we will ignore "
+ "all branches, and never trigger any builds. Please set "
+ "branches=None to mean 'all branches'" % self)
+ # consider raising an exception here, to make this warning more
+ # prominent, but I can vaguely imagine situations where you might
+ # want to comment out branches temporarily and wouldn't
+ # appreciate it being treated as an error.
+ if fileIsImportant:
+ assert callable(fileIsImportant)
+ self.fileIsImportant = fileIsImportant
+ self.schedulers = {} # one per branch
+
+ def __repr__(self):
+ return "<AnyBranchScheduler '%s'>" % self.name
+
+ def listBuilderNames(self):
+ return self.builderNames
+
+ def getPendingBuildTimes(self):
+ bts = []
+ for s in self.schedulers.values():
+ if s.nextBuildTime is not None:
+ bts.append(s.nextBuildTime)
+ return bts
+
+ def buildSetFinished(self, bss):
+ # we don't care if a build has finished; one of the per-branch builders
+ # will take care of it, instead.
+ pass
+
+ def addChange(self, change):
+ branch = change.branch
+ if self.branches is not None and branch not in self.branches:
+ log.msg("%s ignoring off-branch %s" % (self, change))
+ return
+ s = self.schedulers.get(branch)
+ if not s:
+ if branch:
+ name = self.name + "." + branch
+ else:
+ name = self.name + ".<default>"
+ s = self.schedulerFactory(name, branch,
+ self.treeStableTimer,
+ self.builderNames,
+ self.fileIsImportant)
+ s.successWatchers = self.successWatchers
+ s.setServiceParent(self)
+ s.properties = self.properties
+ # TODO: does this result in schedulers that stack up forever?
+ # When I make the persistify-pass, think about this some more.
+ self.schedulers[branch] = s
+ s.addChange(change)
+
+
+class Dependent(BaseUpstreamScheduler):
+ """This scheduler runs some set of 'downstream' builds when the
+ 'upstream' scheduler has completed successfully."""
+ implements(interfaces.IDownstreamScheduler)
+
+ compare_attrs = ('name', 'upstream', 'builderNames', 'properties')
+
+ def __init__(self, name, upstream, builderNames, properties={}):
+ assert interfaces.IUpstreamScheduler.providedBy(upstream)
+ BaseUpstreamScheduler.__init__(self, name, properties)
+ self.upstream = upstream
+ self.builderNames = builderNames
+
+ def listBuilderNames(self):
+ return self.builderNames
+
+ def getPendingBuildTimes(self):
+ # report the upstream's value
+ return self.upstream.getPendingBuildTimes()
+
+ def startService(self):
+ service.MultiService.startService(self)
+ self.upstream.subscribeToSuccessfulBuilds(self.upstreamBuilt)
+
+ def stopService(self):
+ d = service.MultiService.stopService(self)
+ self.upstream.unsubscribeToSuccessfulBuilds(self.upstreamBuilt)
+ return d
+
+ def upstreamBuilt(self, ss):
+ bs = buildset.BuildSet(self.builderNames, ss,
+ properties=self.properties)
+ self.submitBuildSet(bs)
+
+ def checkUpstreamScheduler(self):
+ # find our *active* upstream scheduler (which may not be self.upstream!) by name
+ up_name = self.upstream.name
+ upstream = None
+ for s in self.parent.allSchedulers():
+ if s.name == up_name and interfaces.IUpstreamScheduler.providedBy(s):
+ upstream = s
+ if not upstream:
+ log.msg("ERROR: Couldn't find upstream scheduler of name <%s>" %
+ up_name)
+
+ # if it's already correct, we're good to go
+ if upstream is self.upstream:
+ return
+
+ # otherwise, associate with the new upstream. We also keep listening
+ # to the old upstream, in case it's in the middle of a build
+ upstream.subscribeToSuccessfulBuilds(self.upstreamBuilt)
+ self.upstream = upstream
+ log.msg("Dependent <%s> connected to new Upstream <%s>" %
+ (self.name, up_name))
+
+class Periodic(BaseUpstreamScheduler):
+ """Instead of watching for Changes, this Scheduler can just start a build
+ at fixed intervals. The C{periodicBuildTimer} parameter sets the number
+ of seconds to wait between such periodic builds. The first build will be
+ run immediately."""
+
+ # TODO: consider having this watch another (changed-based) scheduler and
+ # merely enforce a minimum time between builds.
+
+ compare_attrs = ('name', 'builderNames', 'periodicBuildTimer', 'branch', 'properties')
+
+ def __init__(self, name, builderNames, periodicBuildTimer,
+ branch=None, properties={}):
+ BaseUpstreamScheduler.__init__(self, name, properties)
+ self.builderNames = builderNames
+ self.periodicBuildTimer = periodicBuildTimer
+ self.branch = branch
+ self.reason = ("The Periodic scheduler named '%s' triggered this build"
+ % name)
+ self.timer = internet.TimerService(self.periodicBuildTimer,
+ self.doPeriodicBuild)
+ self.timer.setServiceParent(self)
+
+ def listBuilderNames(self):
+ return self.builderNames
+
+ def getPendingBuildTimes(self):
+ # TODO: figure out when self.timer is going to fire next and report
+ # that
+ return []
+
+ def doPeriodicBuild(self):
+ bs = buildset.BuildSet(self.builderNames,
+ SourceStamp(branch=self.branch),
+ self.reason,
+ properties=self.properties)
+ self.submitBuildSet(bs)
+
+
+
+class Nightly(BaseUpstreamScheduler):
+ """Imitate 'cron' scheduling. This can be used to schedule a nightly
+ build, or one which runs are certain times of the day, week, or month.
+
+ Pass some subset of minute, hour, dayOfMonth, month, and dayOfWeek; each
+ may be a single number or a list of valid values. The builds will be
+ triggered whenever the current time matches these values. Wildcards are
+ represented by a '*' string. All fields default to a wildcard except
+ 'minute', so with no fields this defaults to a build every hour, on the
+ hour.
+
+ For example, the following master.cfg clause will cause a build to be
+ started every night at 3:00am::
+
+ s = Nightly('nightly', ['builder1', 'builder2'], hour=3, minute=0)
+ c['schedules'].append(s)
+
+ This scheduler will perform a build each monday morning at 6:23am and
+ again at 8:23am::
+
+ s = Nightly('BeforeWork', ['builder1'],
+ dayOfWeek=0, hour=[6,8], minute=23)
+
+ The following runs a build every two hours::
+
+ s = Nightly('every2hours', ['builder1'], hour=range(0, 24, 2))
+
+ And this one will run only on December 24th::
+
+ s = Nightly('SleighPreflightCheck', ['flying_circuits', 'radar'],
+ month=12, dayOfMonth=24, hour=12, minute=0)
+
+ For dayOfWeek and dayOfMonth, builds are triggered if the date matches
+ either of them. All time values are compared against the tuple returned
+ by time.localtime(), so month and dayOfMonth numbers start at 1, not
+ zero. dayOfWeek=0 is Monday, dayOfWeek=6 is Sunday.
+
+ onlyIfChanged functionality
+ s = Nightly('nightly', ['builder1', 'builder2'],
+ hour=3, minute=0, onlyIfChanged=True)
+ When the flag is True (False by default), the build is trigged if
+ the date matches and if the branch has changed
+
+ fileIsImportant parameter is implemented as defined in class Scheduler
+ """
+
+ compare_attrs = ('name', 'builderNames',
+ 'minute', 'hour', 'dayOfMonth', 'month',
+ 'dayOfWeek', 'branch', 'onlyIfChanged',
+ 'fileIsImportant', 'properties')
+
+ def __init__(self, name, builderNames, minute=0, hour='*',
+ dayOfMonth='*', month='*', dayOfWeek='*',
+ branch=None, fileIsImportant=None, onlyIfChanged=False, properties={}):
+ # Setting minute=0 really makes this an 'Hourly' scheduler. This
+ # seemed like a better default than minute='*', which would result in
+ # a build every 60 seconds.
+ BaseUpstreamScheduler.__init__(self, name, properties)
+ self.builderNames = builderNames
+ self.minute = minute
+ self.hour = hour
+ self.dayOfMonth = dayOfMonth
+ self.month = month
+ self.dayOfWeek = dayOfWeek
+ self.branch = branch
+ self.onlyIfChanged = onlyIfChanged
+ self.delayedRun = None
+ self.nextRunTime = None
+ self.reason = ("The Nightly scheduler named '%s' triggered this build"
+ % name)
+
+ self.importantChanges = []
+ self.unimportantChanges = []
+ self.fileIsImportant = None
+ if fileIsImportant:
+ assert callable(fileIsImportant)
+ self.fileIsImportant = fileIsImportant
+
+ def addTime(self, timetuple, secs):
+ return time.localtime(time.mktime(timetuple)+secs)
+ def findFirstValueAtLeast(self, values, value, default=None):
+ for v in values:
+ if v >= value: return v
+ return default
+
+ def setTimer(self):
+ self.nextRunTime = self.calculateNextRunTime()
+ self.delayedRun = reactor.callLater(self.nextRunTime - time.time(),
+ self.doPeriodicBuild)
+
+ def startService(self):
+ BaseUpstreamScheduler.startService(self)
+ self.setTimer()
+
+ def stopService(self):
+ BaseUpstreamScheduler.stopService(self)
+ self.delayedRun.cancel()
+
+ def isRunTime(self, timetuple):
+ def check(ourvalue, value):
+ if ourvalue == '*': return True
+ if isinstance(ourvalue, int): return value == ourvalue
+ return (value in ourvalue)
+
+ if not check(self.minute, timetuple[4]):
+ #print 'bad minute', timetuple[4], self.minute
+ return False
+
+ if not check(self.hour, timetuple[3]):
+ #print 'bad hour', timetuple[3], self.hour
+ return False
+
+ if not check(self.month, timetuple[1]):
+ #print 'bad month', timetuple[1], self.month
+ return False
+
+ if self.dayOfMonth != '*' and self.dayOfWeek != '*':
+ # They specified both day(s) of month AND day(s) of week.
+ # This means that we only have to match one of the two. If
+ # neither one matches, this time is not the right time.
+ if not (check(self.dayOfMonth, timetuple[2]) or
+ check(self.dayOfWeek, timetuple[6])):
+ #print 'bad day'
+ return False
+ else:
+ if not check(self.dayOfMonth, timetuple[2]):
+ #print 'bad day of month'
+ return False
+
+ if not check(self.dayOfWeek, timetuple[6]):
+ #print 'bad day of week'
+ return False
+
+ return True
+
+ def calculateNextRunTime(self):
+ return self.calculateNextRunTimeFrom(time.time())
+
+ def calculateNextRunTimeFrom(self, now):
+ dateTime = time.localtime(now)
+
+ # Remove seconds by advancing to at least the next minue
+ dateTime = self.addTime(dateTime, 60-dateTime[5])
+
+ # Now we just keep adding minutes until we find something that matches
+
+ # It not an efficient algorithm, but it'll *work* for now
+ yearLimit = dateTime[0]+2
+ while not self.isRunTime(dateTime):
+ dateTime = self.addTime(dateTime, 60)
+ #print 'Trying', time.asctime(dateTime)
+ assert dateTime[0] < yearLimit, 'Something is wrong with this code'
+ return time.mktime(dateTime)
+
+ def listBuilderNames(self):
+ return self.builderNames
+
+ def getPendingBuildTimes(self):
+ # TODO: figure out when self.timer is going to fire next and report
+ # that
+ if self.nextRunTime is None: return []
+ return [self.nextRunTime]
+
+ def doPeriodicBuild(self):
+ # Schedule the next run
+ self.setTimer()
+
+ if self.onlyIfChanged:
+ if len(self.importantChanges) > 0:
+ changes = self.importantChanges + self.unimportantChanges
+ # And trigger a build
+ log.msg("Nightly Scheduler <%s>: triggering build" % self.name)
+ bs = buildset.BuildSet(self.builderNames,
+ SourceStamp(changes=changes),
+ self.reason,
+ properties=self.properties)
+ self.submitBuildSet(bs)
+ # Reset the change lists
+ self.importantChanges = []
+ self.unimportantChanges = []
+ else:
+ log.msg("Nightly Scheduler <%s>: skipping build - No important change" % self.name)
+ else:
+ # And trigger a build
+ bs = buildset.BuildSet(self.builderNames,
+ SourceStamp(branch=self.branch),
+ self.reason,
+ properties=self.properties)
+ self.submitBuildSet(bs)
+
+ def addChange(self, change):
+ if self.onlyIfChanged:
+ if change.branch != self.branch:
+ log.msg("Nightly Scheduler <%s>: ignoring change %d on off-branch %s" % (self.name, change.revision, change.branch))
+ return
+ if not self.fileIsImportant:
+ self.addImportantChange(change)
+ elif self.fileIsImportant(change):
+ self.addImportantChange(change)
+ else:
+ self.addUnimportantChange(change)
+ else:
+ log.msg("Nightly Scheduler <%s>: no add change" % self.name)
+ pass
+
+ def addImportantChange(self, change):
+ log.msg("Nightly Scheduler <%s>: change %s from %s is important, adding it" % (self.name, change.revision, change.who))
+ self.importantChanges.append(change)
+
+ def addUnimportantChange(self, change):
+ log.msg("Nightly Scheduler <%s>: change %s from %s is not important, adding it" % (self.name, change.revision, change.who))
+ self.unimportantChanges.append(change)
+
+
+class TryBase(BaseScheduler):
+ def __init__(self, name, builderNames, properties={}):
+ BaseScheduler.__init__(self, name, properties)
+ self.builderNames = builderNames
+
+ def listBuilderNames(self):
+ return self.builderNames
+
+ def getPendingBuildTimes(self):
+ # we can't predict what the developers are going to do in the future
+ return []
+
+ def addChange(self, change):
+ # Try schedulers ignore Changes
+ pass
+
+ def processBuilderList(self, builderNames):
+ # self.builderNames is the configured list of builders
+ # available for try. If the user supplies a list of builders,
+ # it must be restricted to the configured list. If not, build
+ # on all of the configured builders.
+ if builderNames:
+ for b in builderNames:
+ if not b in self.builderNames:
+ log.msg("%s got with builder %s" % (self, b))
+ log.msg(" but that wasn't in our list: %s"
+ % (self.builderNames,))
+ return []
+ else:
+ builderNames = self.builderNames
+ return builderNames
+
+class BadJobfile(Exception):
+ pass
+
+class JobFileScanner(basic.NetstringReceiver):
+ def __init__(self):
+ self.strings = []
+ self.transport = self # so transport.loseConnection works
+ self.error = False
+
+ def stringReceived(self, s):
+ self.strings.append(s)
+
+ def loseConnection(self):
+ self.error = True
+
+class Try_Jobdir(TryBase):
+ compare_attrs = ( 'name', 'builderNames', 'jobdir', 'properties' )
+
+ def __init__(self, name, builderNames, jobdir, properties={}):
+ TryBase.__init__(self, name, builderNames, properties)
+ self.jobdir = jobdir
+ self.watcher = MaildirService()
+ self.watcher.setServiceParent(self)
+
+ def setServiceParent(self, parent):
+ self.watcher.setBasedir(os.path.join(parent.basedir, self.jobdir))
+ TryBase.setServiceParent(self, parent)
+
+ def parseJob(self, f):
+ # jobfiles are serialized build requests. Each is a list of
+ # serialized netstrings, in the following order:
+ # "1", the version number of this format
+ # buildsetID, arbitrary string, used to find the buildSet later
+ # branch name, "" for default-branch
+ # base revision, "" for HEAD
+ # patchlevel, usually "1"
+ # patch
+ # builderNames...
+ p = JobFileScanner()
+ p.dataReceived(f.read())
+ if p.error:
+ raise BadJobfile("unable to parse netstrings")
+ s = p.strings
+ ver = s.pop(0)
+ if ver != "1":
+ raise BadJobfile("unknown version '%s'" % ver)
+ buildsetID, branch, baserev, patchlevel, diff = s[:5]
+ builderNames = s[5:]
+ if branch == "":
+ branch = None
+ if baserev == "":
+ baserev = None
+ patchlevel = int(patchlevel)
+ patch = (patchlevel, diff)
+ ss = SourceStamp(branch, baserev, patch)
+ return builderNames, ss, buildsetID
+
+ def messageReceived(self, filename):
+ md = os.path.join(self.parent.basedir, self.jobdir)
+ if runtime.platformType == "posix":
+ # open the file before moving it, because I'm afraid that once
+ # it's in cur/, someone might delete it at any moment
+ path = os.path.join(md, "new", filename)
+ f = open(path, "r")
+ os.rename(os.path.join(md, "new", filename),
+ os.path.join(md, "cur", filename))
+ else:
+ # do this backwards under windows, because you can't move a file
+ # that somebody is holding open. This was causing a Permission
+ # Denied error on bear's win32-twisted1.3 buildslave.
+ os.rename(os.path.join(md, "new", filename),
+ os.path.join(md, "cur", filename))
+ path = os.path.join(md, "cur", filename)
+ f = open(path, "r")
+
+ try:
+ builderNames, ss, bsid = self.parseJob(f)
+ except BadJobfile:
+ log.msg("%s reports a bad jobfile in %s" % (self, filename))
+ log.err()
+ return
+ # Validate/fixup the builder names.
+ builderNames = self.processBuilderList(builderNames)
+ if not builderNames:
+ return
+ reason = "'try' job"
+ bs = buildset.BuildSet(builderNames, ss, reason=reason,
+ bsid=bsid, properties=self.properties)
+ self.submitBuildSet(bs)
+
+class Try_Userpass(TryBase):
+ compare_attrs = ( 'name', 'builderNames', 'port', 'userpass', 'properties' )
+ implements(portal.IRealm)
+
+ def __init__(self, name, builderNames, port, userpass, properties={}):
+ TryBase.__init__(self, name, builderNames, properties)
+ if type(port) is int:
+ port = "tcp:%d" % port
+ self.port = port
+ self.userpass = userpass
+ c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
+ for user,passwd in self.userpass:
+ c.addUser(user, passwd)
+
+ p = portal.Portal(self)
+ p.registerChecker(c)
+ f = pb.PBServerFactory(p)
+ s = strports.service(port, f)
+ s.setServiceParent(self)
+
+ def getPort(self):
+ # utility method for tests: figure out which TCP port we just opened.
+ return self.services[0]._port.getHost().port
+
+ def requestAvatar(self, avatarID, mind, interface):
+ log.msg("%s got connection from user %s" % (self, avatarID))
+ assert interface == pb.IPerspective
+ p = Try_Userpass_Perspective(self, avatarID)
+ return (pb.IPerspective, p, lambda: None)
+
+class Try_Userpass_Perspective(pbutil.NewCredPerspective):
+ def __init__(self, parent, username):
+ self.parent = parent
+ self.username = username
+
+ def perspective_try(self, branch, revision, patch, builderNames, properties={}):
+ log.msg("user %s requesting build on builders %s" % (self.username,
+ builderNames))
+ # Validate/fixup the builder names.
+ builderNames = self.parent.processBuilderList(builderNames)
+ if not builderNames:
+ return
+ ss = SourceStamp(branch, revision, patch)
+ reason = "'try' job from user %s" % self.username
+
+ # roll the specified props in with our inherited props
+ combined_props = Properties()
+ combined_props.updateFromProperties(self.parent.properties)
+ combined_props.update(properties, "try build")
+
+ bs = buildset.BuildSet(builderNames,
+ ss,
+ reason=reason,
+ properties=combined_props)
+
+ self.parent.submitBuildSet(bs)
+
+ # return a remotely-usable BuildSetStatus object
+ from buildbot.status.client import makeRemote
+ return makeRemote(bs.status)
+
+class Triggerable(BaseUpstreamScheduler):
+ """This scheduler doesn't do anything until it is triggered by a Trigger
+ step in a factory. In general, that step will not complete until all of
+ the builds that I fire have finished.
+ """
+
+ compare_attrs = ('name', 'builderNames', 'properties')
+
+ def __init__(self, name, builderNames, properties={}):
+ BaseUpstreamScheduler.__init__(self, name, properties)
+ self.builderNames = builderNames
+
+ def listBuilderNames(self):
+ return self.builderNames
+
+ def getPendingBuildTimes(self):
+ return []
+
+ def trigger(self, ss, set_props=None):
+ """Trigger this scheduler. Returns a deferred that will fire when the
+ buildset is finished.
+ """
+
+ # properties for this buildset are composed of our own properties,
+ # potentially overridden by anything from the triggering build
+ props = Properties()
+ props.updateFromProperties(self.properties)
+ if set_props: props.updateFromProperties(set_props)
+
+ bs = buildset.BuildSet(self.builderNames, ss, properties=props)
+ d = bs.waitUntilFinished()
+ self.submitBuildSet(bs)
+ return d
diff --git a/buildbot/buildbot/scripts/__init__.py b/buildbot/buildbot/scripts/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildbot/buildbot/scripts/__init__.py
diff --git a/buildbot/buildbot/scripts/checkconfig.py b/buildbot/buildbot/scripts/checkconfig.py
new file mode 100644
index 0000000..44dd7bc
--- /dev/null
+++ b/buildbot/buildbot/scripts/checkconfig.py
@@ -0,0 +1,53 @@
+import sys
+import os
+from shutil import copy, rmtree
+from tempfile import mkdtemp
+from os.path import isfile
+import traceback
+
+from buildbot import master
+
+class ConfigLoader(master.BuildMaster):
+ def __init__(self, configFileName="master.cfg"):
+ master.BuildMaster.__init__(self, ".", configFileName)
+ dir = os.getcwd()
+ # Use a temporary directory since loadConfig() creates a bunch of
+ # directories and compiles .py files
+ tempdir = mkdtemp()
+ try:
+ copy(configFileName, tempdir)
+ for entry in os.listdir("."):
+ # Any code in a subdirectory will _not_ be copied! This is a bug
+ if isfile(entry):
+ copy(entry, tempdir)
+ except:
+ raise
+
+ try:
+ os.chdir(tempdir)
+ # Add the temp directory to the library path so local modules work
+ sys.path.append(tempdir)
+ configFile = open(configFileName, "r")
+ self.loadConfig(configFile)
+ except:
+ os.chdir(dir)
+ configFile.close()
+ rmtree(tempdir)
+ raise
+ os.chdir(dir)
+ rmtree(tempdir)
+
+if __name__ == '__main__':
+ try:
+ if len(sys.argv) > 1:
+ c = ConfigLoader(sys.argv[1])
+ else:
+ c = ConfigLoader()
+ except IOError:
+ print >> sys.stderr, "Could not open config file"
+ sys.exit(2)
+ except:
+ print >> sys.stderr, "Error in config file:"
+ t, v, tb = sys.exc_info()
+ print >> sys.stderr, traceback.print_exception(t, v, tb)
+ sys.exit(1)
diff --git a/buildbot/buildbot/scripts/logwatcher.py b/buildbot/buildbot/scripts/logwatcher.py
new file mode 100644
index 0000000..e959afb
--- /dev/null
+++ b/buildbot/buildbot/scripts/logwatcher.py
@@ -0,0 +1,97 @@
+
+import os
+from twisted.python.failure import Failure
+from twisted.internet import defer, reactor, protocol, error
+from twisted.protocols.basic import LineOnlyReceiver
+
+class FakeTransport:
+ disconnecting = False
+
+class BuildmasterTimeoutError(Exception):
+ pass
+class BuildslaveTimeoutError(Exception):
+ pass
+class ReconfigError(Exception):
+ pass
+class BuildSlaveDetectedError(Exception):
+ pass
+
+class TailProcess(protocol.ProcessProtocol):
+ def outReceived(self, data):
+ self.lw.dataReceived(data)
+ def errReceived(self, data):
+ print "ERR: '%s'" % (data,)
+
+
+class LogWatcher(LineOnlyReceiver):
+ POLL_INTERVAL = 0.1
+ TIMEOUT_DELAY = 10.0
+ delimiter = os.linesep
+
+ def __init__(self, logfile):
+ self.logfile = logfile
+ self.in_reconfig = False
+ self.transport = FakeTransport()
+ self.pp = TailProcess()
+ self.pp.lw = self
+ self.processtype = "buildmaster"
+ self.timer = None
+
+ def start(self):
+ # return a Deferred that fires when the reconfig process has
+ # finished. It errbacks with TimeoutError if the finish line has not
+ # been seen within 10 seconds, and with ReconfigError if the error
+ # line was seen. If the logfile could not be opened, it errbacks with
+ # an IOError.
+ self.p = reactor.spawnProcess(self.pp, "/usr/bin/tail",
+ ("tail", "-f", "-n", "0", self.logfile),
+ env=os.environ,
+ )
+ self.running = True
+ d = defer.maybeDeferred(self._start)
+ return d
+
+ def _start(self):
+ self.d = defer.Deferred()
+ self.timer = reactor.callLater(self.TIMEOUT_DELAY, self.timeout)
+ return self.d
+
+ def timeout(self):
+ self.timer = None
+ if self.processtype == "buildmaster":
+ e = BuildmasterTimeoutError()
+ else:
+ e = BuildslaveTimeoutError()
+ self.finished(Failure(e))
+
+ def finished(self, results):
+ try:
+ self.p.signalProcess("KILL")
+ except error.ProcessExitedAlready:
+ pass
+ if self.timer:
+ self.timer.cancel()
+ self.timer = None
+ self.running = False
+ self.in_reconfig = False
+ self.d.callback(results)
+
+ def lineReceived(self, line):
+ if not self.running:
+ return
+ if "Log opened." in line:
+ self.in_reconfig = True
+ if "loading configuration from" in line:
+ self.in_reconfig = True
+ if "Creating BuildSlave" in line:
+ self.processtype = "buildslave"
+
+ if self.in_reconfig:
+ print line
+
+ if "message from master: attached" in line:
+ return self.finished("buildslave")
+ if "I will keep using the previous config file" in line:
+ return self.finished(Failure(ReconfigError()))
+ if "configuration update complete" in line:
+ return self.finished("buildmaster")
diff --git a/buildbot/buildbot/scripts/reconfig.py b/buildbot/buildbot/scripts/reconfig.py
new file mode 100644
index 0000000..104214b
--- /dev/null
+++ b/buildbot/buildbot/scripts/reconfig.py
@@ -0,0 +1,69 @@
+
+import os, signal, platform
+from twisted.internet import reactor
+
+from buildbot.scripts.logwatcher import LogWatcher, BuildmasterTimeoutError, \
+ ReconfigError
+
+class Reconfigurator:
+ def run(self, config):
+ # Returns "Microsoft" for Vista and "Windows" for other versions
+ if platform.system() in ("Windows", "Microsoft"):
+ print "Reconfig (through SIGHUP) is not supported on Windows."
+ print "The 'buildbot debugclient' tool can trigger a reconfig"
+ print "remotely, but requires Gtk+ libraries to run."
+ return
+
+ basedir = config['basedir']
+ quiet = config['quiet']
+ os.chdir(basedir)
+ f = open("twistd.pid", "rt")
+ self.pid = int(f.read().strip())
+ if quiet:
+ os.kill(self.pid, signal.SIGHUP)
+ return
+
+ # keep reading twistd.log. Display all messages between "loading
+ # configuration from ..." and "configuration update complete" or
+ # "I will keep using the previous config file instead.", or until
+ # 10 seconds have elapsed.
+
+ self.sent_signal = False
+ lw = LogWatcher("twistd.log")
+ d = lw.start()
+ d.addCallbacks(self.success, self.failure)
+ reactor.callLater(0.2, self.sighup)
+ reactor.run()
+
+ def sighup(self):
+ if self.sent_signal:
+ return
+ print "sending SIGHUP to process %d" % self.pid
+ self.sent_signal = True
+ os.kill(self.pid, signal.SIGHUP)
+
+ def success(self, res):
+ print """
+Reconfiguration appears to have completed successfully.
+"""
+ reactor.stop()
+
+ def failure(self, why):
+ if why.check(BuildmasterTimeoutError):
+ print "Never saw reconfiguration finish."
+ elif why.check(ReconfigError):
+ print """
+Reconfiguration failed. Please inspect the master.cfg file for errors,
+correct them, then try 'buildbot reconfig' again.
+"""
+ elif why.check(IOError):
+ # we were probably unable to open the file in the first place
+ self.sighup()
+ else:
+ print "Error while following twistd.log: %s" % why
+ reactor.stop()
+
+def reconfig(config):
+ r = Reconfigurator()
+ r.run(config)
+
diff --git a/buildbot/buildbot/scripts/runner.py b/buildbot/buildbot/scripts/runner.py
new file mode 100644
index 0000000..4e22dbc
--- /dev/null
+++ b/buildbot/buildbot/scripts/runner.py
@@ -0,0 +1,1023 @@
+# -*- test-case-name: buildbot.test.test_runner -*-
+
+# N.B.: don't import anything that might pull in a reactor yet. Some of our
+# subcommands want to load modules that need the gtk reactor.
+import os, sys, stat, re, time
+import traceback
+from twisted.python import usage, util, runtime
+
+from buildbot.interfaces import BuildbotNotRunningError
+
+# this is mostly just a front-end for mktap, twistd, and kill(1), but in the
+# future it will also provide an interface to some developer tools that talk
+# directly to a remote buildmaster (like 'try' and a status client)
+
+# the create/start/stop commands should all be run as the same user,
+# preferably a separate 'buildbot' account.
+
+class MakerBase(usage.Options):
+ optFlags = [
+ ['help', 'h', "Display this message"],
+ ["quiet", "q", "Do not emit the commands being run"],
+ ]
+
+ #["basedir", "d", None, "Base directory for the buildmaster"],
+ opt_h = usage.Options.opt_help
+
+ def parseArgs(self, *args):
+ if len(args) > 0:
+ self['basedir'] = args[0]
+ else:
+ self['basedir'] = None
+ if len(args) > 1:
+ raise usage.UsageError("I wasn't expecting so many arguments")
+
+ def postOptions(self):
+ if self['basedir'] is None:
+ raise usage.UsageError("<basedir> parameter is required")
+ self['basedir'] = os.path.abspath(self['basedir'])
+
+makefile_sample = """# -*- makefile -*-
+
+# This is a simple makefile which lives in a buildmaster/buildslave
+# directory (next to the buildbot.tac file). It allows you to start/stop the
+# master or slave by doing 'make start' or 'make stop'.
+
+# The 'reconfig' target will tell a buildmaster to reload its config file.
+
+start:
+ twistd --no_save -y buildbot.tac
+
+stop:
+ kill `cat twistd.pid`
+
+reconfig:
+ kill -HUP `cat twistd.pid`
+
+log:
+ tail -f twistd.log
+"""
+
+class Maker:
+ def __init__(self, config):
+ self.config = config
+ self.basedir = config['basedir']
+ self.force = config.get('force', False)
+ self.quiet = config['quiet']
+
+ def mkdir(self):
+ if os.path.exists(self.basedir):
+ if not self.quiet:
+ print "updating existing installation"
+ return
+ if not self.quiet: print "mkdir", self.basedir
+ os.mkdir(self.basedir)
+
+ def mkinfo(self):
+ path = os.path.join(self.basedir, "info")
+ if not os.path.exists(path):
+ if not self.quiet: print "mkdir", path
+ os.mkdir(path)
+ created = False
+ admin = os.path.join(path, "admin")
+ if not os.path.exists(admin):
+ if not self.quiet:
+ print "Creating info/admin, you need to edit it appropriately"
+ f = open(admin, "wt")
+ f.write("Your Name Here <admin@youraddress.invalid>\n")
+ f.close()
+ created = True
+ host = os.path.join(path, "host")
+ if not os.path.exists(host):
+ if not self.quiet:
+ print "Creating info/host, you need to edit it appropriately"
+ f = open(host, "wt")
+ f.write("Please put a description of this build host here\n")
+ f.close()
+ created = True
+ if created and not self.quiet:
+ print "Please edit the files in %s appropriately." % path
+
+ def chdir(self):
+ if not self.quiet: print "chdir", self.basedir
+ os.chdir(self.basedir)
+
+ def makeTAC(self, contents, secret=False):
+ tacfile = "buildbot.tac"
+ if os.path.exists(tacfile):
+ oldcontents = open(tacfile, "rt").read()
+ if oldcontents == contents:
+ if not self.quiet:
+ print "buildbot.tac already exists and is correct"
+ return
+ if not self.quiet:
+ print "not touching existing buildbot.tac"
+ print "creating buildbot.tac.new instead"
+ tacfile = "buildbot.tac.new"
+ f = open(tacfile, "wt")
+ f.write(contents)
+ f.close()
+ if secret:
+ os.chmod(tacfile, 0600)
+
+ def makefile(self):
+ target = "Makefile.sample"
+ if os.path.exists(target):
+ oldcontents = open(target, "rt").read()
+ if oldcontents == makefile_sample:
+ if not self.quiet:
+ print "Makefile.sample already exists and is correct"
+ return
+ if not self.quiet:
+ print "replacing Makefile.sample"
+ else:
+ if not self.quiet:
+ print "creating Makefile.sample"
+ f = open(target, "wt")
+ f.write(makefile_sample)
+ f.close()
+
+ def sampleconfig(self, source):
+ target = "master.cfg.sample"
+ config_sample = open(source, "rt").read()
+ if os.path.exists(target):
+ oldcontents = open(target, "rt").read()
+ if oldcontents == config_sample:
+ if not self.quiet:
+ print "master.cfg.sample already exists and is up-to-date"
+ return
+ if not self.quiet:
+ print "replacing master.cfg.sample"
+ else:
+ if not self.quiet:
+ print "creating master.cfg.sample"
+ f = open(target, "wt")
+ f.write(config_sample)
+ f.close()
+ os.chmod(target, 0600)
+
+ def public_html(self, index_html, buildbot_css, robots_txt):
+ webdir = os.path.join(self.basedir, "public_html")
+ if os.path.exists(webdir):
+ if not self.quiet:
+ print "public_html/ already exists: not replacing"
+ return
+ else:
+ os.mkdir(webdir)
+ if not self.quiet:
+ print "populating public_html/"
+ target = os.path.join(webdir, "index.html")
+ f = open(target, "wt")
+ f.write(open(index_html, "rt").read())
+ f.close()
+
+ target = os.path.join(webdir, "buildbot.css")
+ f = open(target, "wt")
+ f.write(open(buildbot_css, "rt").read())
+ f.close()
+
+ target = os.path.join(webdir, "robots.txt")
+ f = open(target, "wt")
+ f.write(open(robots_txt, "rt").read())
+ f.close()
+
+ def populate_if_missing(self, target, source, overwrite=False):
+ new_contents = open(source, "rt").read()
+ if os.path.exists(target):
+ old_contents = open(target, "rt").read()
+ if old_contents != new_contents:
+ if overwrite:
+ if not self.quiet:
+ print "%s has old/modified contents" % target
+ print " overwriting it with new contents"
+ open(target, "wt").write(new_contents)
+ else:
+ if not self.quiet:
+ print "%s has old/modified contents" % target
+ print " writing new contents to %s.new" % target
+ open(target + ".new", "wt").write(new_contents)
+ # otherwise, it's up to date
+ else:
+ if not self.quiet:
+ print "populating %s" % target
+ open(target, "wt").write(new_contents)
+
+ def upgrade_public_html(self, index_html, buildbot_css, robots_txt):
+ webdir = os.path.join(self.basedir, "public_html")
+ if not os.path.exists(webdir):
+ if not self.quiet:
+ print "populating public_html/"
+ os.mkdir(webdir)
+ self.populate_if_missing(os.path.join(webdir, "index.html"),
+ index_html)
+ self.populate_if_missing(os.path.join(webdir, "buildbot.css"),
+ buildbot_css)
+ self.populate_if_missing(os.path.join(webdir, "robots.txt"),
+ robots_txt)
+
+ def check_master_cfg(self):
+ from buildbot.master import BuildMaster
+ from twisted.python import log, failure
+
+ master_cfg = os.path.join(self.basedir, "master.cfg")
+ if not os.path.exists(master_cfg):
+ if not self.quiet:
+ print "No master.cfg found"
+ return 1
+
+ # side-effects of loading the config file:
+
+ # for each Builder defined in c['builders'], if the status directory
+ # didn't already exist, it will be created, and the
+ # $BUILDERNAME/builder pickle might be created (with a single
+ # "builder created" event).
+
+ # we put basedir in front of sys.path, because that's how the
+ # buildmaster itself will run, and it is quite common to have the
+ # buildmaster import helper classes from other .py files in its
+ # basedir.
+
+ if sys.path[0] != self.basedir:
+ sys.path.insert(0, self.basedir)
+
+ m = BuildMaster(self.basedir)
+ # we need to route log.msg to stdout, so any problems can be seen
+ # there. But if everything goes well, I'd rather not clutter stdout
+ # with log messages. So instead we add a logObserver which gathers
+ # messages and only displays them if something goes wrong.
+ messages = []
+ log.addObserver(messages.append)
+ try:
+ # this will raise an exception if there's something wrong with
+ # the config file. Note that this BuildMaster instance is never
+ # started, so it won't actually do anything with the
+ # configuration.
+ m.loadConfig(open(master_cfg, "r"))
+ except:
+ f = failure.Failure()
+ if not self.quiet:
+ print
+ for m in messages:
+ print "".join(m['message'])
+ print f
+ print
+ print "An error was detected in the master.cfg file."
+ print "Please correct the problem and run 'buildbot upgrade-master' again."
+ print
+ return 1
+ return 0
+
+class UpgradeMasterOptions(MakerBase):
+ optFlags = [
+ ["replace", "r", "Replace any modified files without confirmation."],
+ ]
+
+ def getSynopsis(self):
+ return "Usage: buildbot upgrade-master [options] <basedir>"
+
+ longdesc = """
+ This command takes an existing buildmaster working directory and
+ adds/modifies the files there to work with the current version of
+ buildbot. When this command is finished, the buildmaster directory should
+ look much like a brand-new one created by the 'create-master' command.
+
+ Use this after you've upgraded your buildbot installation and before you
+ restart the buildmaster to use the new version.
+
+ If you have modified the files in your working directory, this command
+ will leave them untouched, but will put the new recommended contents in a
+ .new file (for example, if index.html has been modified, this command
+ will create index.html.new). You can then look at the new version and
+ decide how to merge its contents into your modified file.
+ """
+
+def upgradeMaster(config):
+ basedir = config['basedir']
+ m = Maker(config)
+ # TODO: check Makefile
+ # TODO: check TAC file
+ # check web files: index.html, classic.css, robots.txt
+ webdir = os.path.join(basedir, "public_html")
+ m.upgrade_public_html(util.sibpath(__file__, "../status/web/index.html"),
+ util.sibpath(__file__, "../status/web/classic.css"),
+ util.sibpath(__file__, "../status/web/robots.txt"),
+ )
+ m.populate_if_missing(os.path.join(basedir, "master.cfg.sample"),
+ util.sibpath(__file__, "sample.cfg"),
+ overwrite=True)
+ rc = m.check_master_cfg()
+ if rc:
+ return rc
+ if not config['quiet']:
+ print "upgrade complete"
+
+
+class MasterOptions(MakerBase):
+ optFlags = [
+ ["force", "f",
+ "Re-use an existing directory (will not overwrite master.cfg file)"],
+ ]
+ optParameters = [
+ ["config", "c", "master.cfg", "name of the buildmaster config file"],
+ ["log-size", "s", "1000000",
+ "size at which to rotate twisted log files"],
+ ["log-count", "l", "None",
+ "limit the number of kept old twisted log files"],
+ ]
+ def getSynopsis(self):
+ return "Usage: buildbot create-master [options] <basedir>"
+
+ longdesc = """
+ This command creates a buildmaster working directory and buildbot.tac
+ file. The master will live in <dir> and create various files there.
+
+ At runtime, the master will read a configuration file (named
+ 'master.cfg' by default) in its basedir. This file should contain python
+ code which eventually defines a dictionary named 'BuildmasterConfig'.
+ The elements of this dictionary are used to configure the Buildmaster.
+ See doc/config.xhtml for details about what can be controlled through
+ this interface."""
+
+ def postOptions(self):
+ MakerBase.postOptions(self)
+ if not re.match('^\d+$', self['log-size']):
+ raise usage.UsageError("log-size parameter needs to be an int")
+ if not re.match('^\d+$', self['log-count']) and \
+ self['log-count'] != 'None':
+ raise usage.UsageError("log-count parameter needs to be an int "+
+ " or None")
+
+
+masterTAC = """
+from twisted.application import service
+from buildbot.master import BuildMaster
+
+basedir = r'%(basedir)s'
+configfile = r'%(config)s'
+rotateLength = %(log-size)s
+maxRotatedFiles = %(log-count)s
+
+application = service.Application('buildmaster')
+try:
+ from twisted.python.logfile import LogFile
+ from twisted.python.log import ILogObserver, FileLogObserver
+ logfile = LogFile.fromFullPath("twistd.log", rotateLength=rotateLength,
+ maxRotatedFiles=maxRotatedFiles)
+ application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
+except ImportError:
+ # probably not yet twisted 8.2.0 and beyond, can't set log yet
+ pass
+BuildMaster(basedir, configfile).setServiceParent(application)
+
+"""
+
+def createMaster(config):
+ m = Maker(config)
+ m.mkdir()
+ m.chdir()
+ contents = masterTAC % config
+ m.makeTAC(contents)
+ m.sampleconfig(util.sibpath(__file__, "sample.cfg"))
+ m.public_html(util.sibpath(__file__, "../status/web/index.html"),
+ util.sibpath(__file__, "../status/web/classic.css"),
+ util.sibpath(__file__, "../status/web/robots.txt"),
+ )
+ m.makefile()
+
+ if not m.quiet: print "buildmaster configured in %s" % m.basedir
+
+class SlaveOptions(MakerBase):
+ optFlags = [
+ ["force", "f", "Re-use an existing directory"],
+ ]
+ optParameters = [
+# ["name", "n", None, "Name for this build slave"],
+# ["passwd", "p", None, "Password for this build slave"],
+# ["basedir", "d", ".", "Base directory to use"],
+# ["master", "m", "localhost:8007",
+# "Location of the buildmaster (host:port)"],
+
+ ["keepalive", "k", 600,
+ "Interval at which keepalives should be sent (in seconds)"],
+ ["usepty", None, 0,
+ "(1 or 0) child processes should be run in a pty (default 0)"],
+ ["umask", None, "None",
+ "controls permissions of generated files. Use --umask=022 to be world-readable"],
+ ["maxdelay", None, 300,
+ "Maximum time between connection attempts"],
+ ["log-size", "s", "1000000",
+ "size at which to rotate twisted log files"],
+ ["log-count", "l", "None",
+ "limit the number of kept old twisted log files"],
+ ]
+
+ longdesc = """
+ This command creates a buildslave working directory and buildbot.tac
+ file. The bot will use the <name> and <passwd> arguments to authenticate
+ itself when connecting to the master. All commands are run in a
+ build-specific subdirectory of <basedir>. <master> is a string of the
+ form 'hostname:port', and specifies where the buildmaster can be reached.
+
+ <name>, <passwd>, and <master> will be provided by the buildmaster
+ administrator for your bot. You must choose <basedir> yourself.
+ """
+
+ def getSynopsis(self):
+ return "Usage: buildbot create-slave [options] <basedir> <master> <name> <passwd>"
+
+ def parseArgs(self, *args):
+ if len(args) < 4:
+ raise usage.UsageError("command needs more arguments")
+ basedir, master, name, passwd = args
+ self['basedir'] = basedir
+ self['master'] = master
+ self['name'] = name
+ self['passwd'] = passwd
+
+ def postOptions(self):
+ MakerBase.postOptions(self)
+ self['usepty'] = int(self['usepty'])
+ self['keepalive'] = int(self['keepalive'])
+ self['maxdelay'] = int(self['maxdelay'])
+ if self['master'].find(":") == -1:
+ raise usage.UsageError("--master must be in the form host:portnum")
+ if not re.match('^\d+$', self['log-size']):
+ raise usage.UsageError("log-size parameter needs to be an int")
+ if not re.match('^\d+$', self['log-count']) and \
+ self['log-count'] != 'None':
+ raise usage.UsageError("log-count parameter needs to be an int "+
+ " or None")
+
+slaveTAC = """
+from twisted.application import service
+from buildbot.slave.bot import BuildSlave
+
+basedir = r'%(basedir)s'
+buildmaster_host = '%(host)s'
+port = %(port)d
+slavename = '%(name)s'
+passwd = '%(passwd)s'
+keepalive = %(keepalive)d
+usepty = %(usepty)d
+umask = %(umask)s
+maxdelay = %(maxdelay)d
+rotateLength = %(log-size)s
+maxRotatedFiles = %(log-count)s
+
+application = service.Application('buildslave')
+try:
+ from twisted.python.logfile import LogFile
+ from twisted.python.log import ILogObserver, FileLogObserver
+ logfile = LogFile.fromFullPath("twistd.log", rotateLength=rotateLength,
+ maxRotatedFiles=maxRotatedFiles)
+ application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
+except ImportError:
+ # probably not yet twisted 8.2.0 and beyond, can't set log yet
+ pass
+s = BuildSlave(buildmaster_host, port, slavename, passwd, basedir,
+ keepalive, usepty, umask=umask, maxdelay=maxdelay)
+s.setServiceParent(application)
+
+"""
+
+def createSlave(config):
+ m = Maker(config)
+ m.mkdir()
+ m.chdir()
+ try:
+ master = config['master']
+ host, port = re.search(r'(.+):(\d+)', master).groups()
+ config['host'] = host
+ config['port'] = int(port)
+ except:
+ print "unparseable master location '%s'" % master
+ print " expecting something more like localhost:8007"
+ raise
+ contents = slaveTAC % config
+
+ m.makeTAC(contents, secret=True)
+
+ m.makefile()
+ m.mkinfo()
+
+ if not m.quiet: print "buildslave configured in %s" % m.basedir
+
+
+
+def stop(config, signame="TERM", wait=False):
+ import signal
+ basedir = config['basedir']
+ quiet = config['quiet']
+ os.chdir(basedir)
+ try:
+ f = open("twistd.pid", "rt")
+ except:
+ raise BuildbotNotRunningError
+ pid = int(f.read().strip())
+ signum = getattr(signal, "SIG"+signame)
+ timer = 0
+ os.kill(pid, signum)
+ if not wait:
+ if not quiet:
+ print "sent SIG%s to process" % signame
+ return
+ time.sleep(0.1)
+ while timer < 10:
+ # poll once per second until twistd.pid goes away, up to 10 seconds
+ try:
+ os.kill(pid, 0)
+ except OSError:
+ if not quiet:
+ print "buildbot process %d is dead" % pid
+ return
+ timer += 1
+ time.sleep(1)
+ if not quiet:
+ print "never saw process go away"
+
+def restart(config):
+ quiet = config['quiet']
+ from buildbot.scripts.startup import start
+ try:
+ stop(config, wait=True)
+ except BuildbotNotRunningError:
+ pass
+ if not quiet:
+ print "now restarting buildbot process.."
+ start(config)
+
+
+def loadOptions(filename="options", here=None, home=None):
+ """Find the .buildbot/FILENAME file. Crawl from the current directory up
+ towards the root, and also look in ~/.buildbot . The first directory
+ that's owned by the user and has the file we're looking for wins. Windows
+ skips the owned-by-user test.
+
+ @rtype: dict
+ @return: a dictionary of names defined in the options file. If no options
+ file was found, return an empty dict.
+ """
+
+ if here is None:
+ here = os.getcwd()
+ here = os.path.abspath(here)
+
+ if home is None:
+ if runtime.platformType == 'win32':
+ home = os.path.join(os.environ['APPDATA'], "buildbot")
+ else:
+ home = os.path.expanduser("~/.buildbot")
+
+ searchpath = []
+ toomany = 20
+ while True:
+ searchpath.append(os.path.join(here, ".buildbot"))
+ next = os.path.dirname(here)
+ if next == here:
+ break # we've hit the root
+ here = next
+ toomany -= 1 # just in case
+ if toomany == 0:
+ raise ValueError("Hey, I seem to have wandered up into the "
+ "infinite glories of the heavens. Oops.")
+ searchpath.append(home)
+
+ localDict = {}
+
+ for d in searchpath:
+ if os.path.isdir(d):
+ if runtime.platformType != 'win32':
+ if os.stat(d)[stat.ST_UID] != os.getuid():
+ print "skipping %s because you don't own it" % d
+ continue # security, skip other people's directories
+ optfile = os.path.join(d, filename)
+ if os.path.exists(optfile):
+ try:
+ f = open(optfile, "r")
+ options = f.read()
+ exec options in localDict
+ except:
+ print "error while reading %s" % optfile
+ raise
+ break
+
+ for k in localDict.keys():
+ if k.startswith("__"):
+ del localDict[k]
+ return localDict
+
+class StartOptions(MakerBase):
+ optFlags = [
+ ['quiet', 'q', "Don't display startup log messages"],
+ ]
+ def getSynopsis(self):
+ return "Usage: buildbot start <basedir>"
+
+class StopOptions(MakerBase):
+ def getSynopsis(self):
+ return "Usage: buildbot stop <basedir>"
+
+class ReconfigOptions(MakerBase):
+ optFlags = [
+ ['quiet', 'q', "Don't display log messages about reconfiguration"],
+ ]
+ def getSynopsis(self):
+ return "Usage: buildbot reconfig <basedir>"
+
+
+
+class RestartOptions(MakerBase):
+ optFlags = [
+ ['quiet', 'q', "Don't display startup log messages"],
+ ]
+ def getSynopsis(self):
+ return "Usage: buildbot restart <basedir>"
+
+class DebugClientOptions(usage.Options):
+ optFlags = [
+ ['help', 'h', "Display this message"],
+ ]
+ optParameters = [
+ ["master", "m", None,
+ "Location of the buildmaster's slaveport (host:port)"],
+ ["passwd", "p", None, "Debug password to use"],
+ ]
+
+ def parseArgs(self, *args):
+ if len(args) > 0:
+ self['master'] = args[0]
+ if len(args) > 1:
+ self['passwd'] = args[1]
+ if len(args) > 2:
+ raise usage.UsageError("I wasn't expecting so many arguments")
+
+def debugclient(config):
+ from buildbot.clients import debug
+ opts = loadOptions()
+
+ master = config.get('master')
+ if not master:
+ master = opts.get('master')
+ if master is None:
+ raise usage.UsageError("master must be specified: on the command "
+ "line or in ~/.buildbot/options")
+
+ passwd = config.get('passwd')
+ if not passwd:
+ passwd = opts.get('debugPassword')
+ if passwd is None:
+ raise usage.UsageError("passwd must be specified: on the command "
+ "line or in ~/.buildbot/options")
+
+ d = debug.DebugWidget(master, passwd)
+ d.run()
+
+class StatusClientOptions(usage.Options):
+ optFlags = [
+ ['help', 'h', "Display this message"],
+ ]
+ optParameters = [
+ ["master", "m", None,
+ "Location of the buildmaster's status port (host:port)"],
+ ]
+
+ def parseArgs(self, *args):
+ if len(args) > 0:
+ self['master'] = args[0]
+ if len(args) > 1:
+ raise usage.UsageError("I wasn't expecting so many arguments")
+
+def statuslog(config):
+ from buildbot.clients import base
+ opts = loadOptions()
+ master = config.get('master')
+ if not master:
+ master = opts.get('masterstatus')
+ if master is None:
+ raise usage.UsageError("master must be specified: on the command "
+ "line or in ~/.buildbot/options")
+ c = base.TextClient(master)
+ c.run()
+
+def statusgui(config):
+ from buildbot.clients import gtkPanes
+ opts = loadOptions()
+ master = config.get('master')
+ if not master:
+ master = opts.get('masterstatus')
+ if master is None:
+ raise usage.UsageError("master must be specified: on the command "
+ "line or in ~/.buildbot/options")
+ c = gtkPanes.GtkClient(master)
+ c.run()
+
+class SendChangeOptions(usage.Options):
+ optParameters = [
+ ("master", "m", None,
+ "Location of the buildmaster's PBListener (host:port)"),
+ ("username", "u", None, "Username performing the commit"),
+ ("branch", "b", None, "Branch specifier"),
+ ("category", "c", None, "Category of repository"),
+ ("revision", "r", None, "Revision specifier (string)"),
+ ("revision_number", "n", None, "Revision specifier (integer)"),
+ ("revision_file", None, None, "Filename containing revision spec"),
+ ("comments", "m", None, "log message"),
+ ("logfile", "F", None,
+ "Read the log messages from this file (- for stdin)"),
+ ]
+ def getSynopsis(self):
+ return "Usage: buildbot sendchange [options] filenames.."
+ def parseArgs(self, *args):
+ self['files'] = args
+
+
+def sendchange(config, runReactor=False):
+ """Send a single change to the buildmaster's PBChangeSource. The
+ connection will be drpoped as soon as the Change has been sent."""
+ from buildbot.clients.sendchange import Sender
+
+ opts = loadOptions()
+ user = config.get('username', opts.get('username'))
+ master = config.get('master', opts.get('master'))
+ branch = config.get('branch', opts.get('branch'))
+ category = config.get('category', opts.get('category'))
+ revision = config.get('revision')
+ # SVN and P4 use numeric revisions
+ if config.get("revision_number"):
+ revision = int(config['revision_number'])
+ if config.get("revision_file"):
+ revision = open(config["revision_file"],"r").read()
+
+ comments = config.get('comments')
+ if not comments and config.get('logfile'):
+ if config['logfile'] == "-":
+ f = sys.stdin
+ else:
+ f = open(config['logfile'], "rt")
+ comments = f.read()
+ if comments is None:
+ comments = ""
+
+ files = config.get('files', [])
+
+ assert user, "you must provide a username"
+ assert master, "you must provide the master location"
+
+ s = Sender(master, user)
+ d = s.send(branch, revision, comments, files, category=category)
+ if runReactor:
+ d.addCallbacks(s.printSuccess, s.printFailure)
+ d.addBoth(s.stop)
+ s.run()
+ return d
+
+
+class ForceOptions(usage.Options):
+ optParameters = [
+ ["builder", None, None, "which Builder to start"],
+ ["branch", None, None, "which branch to build"],
+ ["revision", None, None, "which revision to build"],
+ ["reason", None, None, "the reason for starting the build"],
+ ]
+
+ def parseArgs(self, *args):
+ args = list(args)
+ if len(args) > 0:
+ if self['builder'] is not None:
+ raise usage.UsageError("--builder provided in two ways")
+ self['builder'] = args.pop(0)
+ if len(args) > 0:
+ if self['reason'] is not None:
+ raise usage.UsageError("--reason provided in two ways")
+ self['reason'] = " ".join(args)
+
+
+class TryOptions(usage.Options):
+ optParameters = [
+ ["connect", "c", None,
+ "how to reach the buildmaster, either 'ssh' or 'pb'"],
+ # for ssh, use --tryhost, --username, and --trydir
+ ["tryhost", None, None,
+ "the hostname (used by ssh) for the buildmaster"],
+ ["trydir", None, None,
+ "the directory (on the tryhost) where tryjobs are deposited"],
+ ["username", "u", None, "Username performing the trial build"],
+ # for PB, use --master, --username, and --passwd
+ ["master", "m", None,
+ "Location of the buildmaster's PBListener (host:port)"],
+ ["passwd", None, None, "password for PB authentication"],
+
+ ["diff", None, None,
+ "Filename of a patch to use instead of scanning a local tree. Use '-' for stdin."],
+ ["patchlevel", "p", 0,
+ "Number of slashes to remove from patch pathnames, like the -p option to 'patch'"],
+
+ ["baserev", None, None,
+ "Base revision to use instead of scanning a local tree."],
+
+ ["vc", None, None,
+ "The VC system in use, one of: cvs,svn,tla,baz,darcs"],
+ ["branch", None, None,
+ "The branch in use, for VC systems that can't figure it out"
+ " themselves"],
+
+ ["builder", "b", None,
+ "Run the trial build on this Builder. Can be used multiple times."],
+ ["properties", None, None,
+ "A set of properties made available in the build environment, format:prop=value,propb=valueb..."],
+ ]
+
+ optFlags = [
+ ["wait", None, "wait until the builds have finished"],
+ ["dryrun", 'n', "Gather info, but don't actually submit."],
+ ]
+
+ def __init__(self):
+ super(TryOptions, self).__init__()
+ self['builders'] = []
+ self['properties'] = {}
+
+ def opt_builder(self, option):
+ self['builders'].append(option)
+
+ def opt_properties(self, option):
+ # We need to split the value of this option into a dictionary of properties
+ properties = {}
+ propertylist = option.split(",")
+ for i in range(0,len(propertylist)):
+ print propertylist[i]
+ splitproperty = propertylist[i].split("=")
+ properties[splitproperty[0]] = splitproperty[1]
+ self['properties'] = properties
+
+ def opt_patchlevel(self, option):
+ self['patchlevel'] = int(option)
+
+ def getSynopsis(self):
+ return "Usage: buildbot try [options]"
+
+def doTry(config):
+ from buildbot.scripts import tryclient
+ t = tryclient.Try(config)
+ t.run()
+
+class TryServerOptions(usage.Options):
+ optParameters = [
+ ["jobdir", None, None, "the jobdir (maildir) for submitting jobs"],
+ ]
+
+def doTryServer(config):
+ import md5
+ jobdir = os.path.expanduser(config["jobdir"])
+ job = sys.stdin.read()
+ # now do a 'safecat'-style write to jobdir/tmp, then move atomically to
+ # jobdir/new . Rather than come up with a unique name randomly, I'm just
+ # going to MD5 the contents and prepend a timestamp.
+ timestring = "%d" % time.time()
+ jobhash = md5.new(job).hexdigest()
+ fn = "%s-%s" % (timestring, jobhash)
+ tmpfile = os.path.join(jobdir, "tmp", fn)
+ newfile = os.path.join(jobdir, "new", fn)
+ f = open(tmpfile, "w")
+ f.write(job)
+ f.close()
+ os.rename(tmpfile, newfile)
+
+
+class CheckConfigOptions(usage.Options):
+ optFlags = [
+ ['quiet', 'q', "Don't display error messages or tracebacks"],
+ ]
+
+ def getSynopsis(self):
+ return "Usage :buildbot checkconfig [configFile]\n" + \
+ " If not specified, 'master.cfg' will be used as 'configFile'"
+
+ def parseArgs(self, *args):
+ if len(args) >= 1:
+ self['configFile'] = args[0]
+ else:
+ self['configFile'] = 'master.cfg'
+
+
+def doCheckConfig(config):
+ quiet = config.get('quiet')
+ configFile = config.get('configFile')
+ try:
+ from buildbot.scripts.checkconfig import ConfigLoader
+ ConfigLoader(configFile)
+ except:
+ if not quiet:
+ # Print out the traceback in a nice format
+ t, v, tb = sys.exc_info()
+ traceback.print_exception(t, v, tb)
+ sys.exit(1)
+
+ if not quiet:
+ print "Config file is good!"
+
+
+class Options(usage.Options):
+ synopsis = "Usage: buildbot <command> [command options]"
+
+ subCommands = [
+ # the following are all admin commands
+ ['create-master', None, MasterOptions,
+ "Create and populate a directory for a new buildmaster"],
+ ['upgrade-master', None, UpgradeMasterOptions,
+ "Upgrade an existing buildmaster directory for the current version"],
+ ['create-slave', None, SlaveOptions,
+ "Create and populate a directory for a new buildslave"],
+ ['start', None, StartOptions, "Start a buildmaster or buildslave"],
+ ['stop', None, StopOptions, "Stop a buildmaster or buildslave"],
+ ['restart', None, RestartOptions,
+ "Restart a buildmaster or buildslave"],
+
+ ['reconfig', None, ReconfigOptions,
+ "SIGHUP a buildmaster to make it re-read the config file"],
+ ['sighup', None, ReconfigOptions,
+ "SIGHUP a buildmaster to make it re-read the config file"],
+
+ ['sendchange', None, SendChangeOptions,
+ "Send a change to the buildmaster"],
+
+ ['debugclient', None, DebugClientOptions,
+ "Launch a small debug panel GUI"],
+
+ ['statuslog', None, StatusClientOptions,
+ "Emit current builder status to stdout"],
+ ['statusgui', None, StatusClientOptions,
+ "Display a small window showing current builder status"],
+
+ #['force', None, ForceOptions, "Run a build"],
+ ['try', None, TryOptions, "Run a build with your local changes"],
+
+ ['tryserver', None, TryServerOptions,
+ "buildmaster-side 'try' support function, not for users"],
+
+ ['checkconfig', None, CheckConfigOptions,
+ "test the validity of a master.cfg config file"],
+
+ # TODO: 'watch'
+ ]
+
+ def opt_version(self):
+ import buildbot
+ print "Buildbot version: %s" % buildbot.version
+ usage.Options.opt_version(self)
+
+ def opt_verbose(self):
+ from twisted.python import log
+ log.startLogging(sys.stderr)
+
+ def postOptions(self):
+ if not hasattr(self, 'subOptions'):
+ raise usage.UsageError("must specify a command")
+
+
+def run():
+ config = Options()
+ try:
+ config.parseOptions()
+ except usage.error, e:
+ print "%s: %s" % (sys.argv[0], e)
+ print
+ c = getattr(config, 'subOptions', config)
+ print str(c)
+ sys.exit(1)
+
+ command = config.subCommand
+ so = config.subOptions
+
+ if command == "create-master":
+ createMaster(so)
+ elif command == "upgrade-master":
+ upgradeMaster(so)
+ elif command == "create-slave":
+ createSlave(so)
+ elif command == "start":
+ from buildbot.scripts.startup import start
+ start(so)
+ elif command == "stop":
+ stop(so, wait=True)
+ elif command == "restart":
+ restart(so)
+ elif command == "reconfig" or command == "sighup":
+ from buildbot.scripts.reconfig import Reconfigurator
+ Reconfigurator().run(so)
+ elif command == "sendchange":
+ sendchange(so, True)
+ elif command == "debugclient":
+ debugclient(so)
+ elif command == "statuslog":
+ statuslog(so)
+ elif command == "statusgui":
+ statusgui(so)
+ elif command == "try":
+ doTry(so)
+ elif command == "tryserver":
+ doTryServer(so)
+ elif command == "checkconfig":
+ doCheckConfig(so)
+
+
diff --git a/buildbot/buildbot/scripts/sample.cfg b/buildbot/buildbot/scripts/sample.cfg
new file mode 100644
index 0000000..b405673
--- /dev/null
+++ b/buildbot/buildbot/scripts/sample.cfg
@@ -0,0 +1,175 @@
+# -*- python -*-
+# ex: set syntax=python:
+
+# This is a sample buildmaster config file. It must be installed as
+# 'master.cfg' in your buildmaster's base directory (although the filename
+# can be changed with the --basedir option to 'mktap buildbot master').
+
+# It has one job: define a dictionary named BuildmasterConfig. This
+# dictionary has a variety of keys to control different aspects of the
+# buildmaster. They are documented in docs/config.xhtml .
+
+
+# This is the dictionary that the buildmaster pays attention to. We also use
+# a shorter alias to save typing.
+c = BuildmasterConfig = {}
+
+####### BUILDSLAVES
+
+# the 'slaves' list defines the set of allowable buildslaves. Each element is
+# a BuildSlave object, which is created with bot-name, bot-password. These
+# correspond to values given to the buildslave's mktap invocation.
+from buildbot.buildslave import BuildSlave
+c['slaves'] = [BuildSlave("bot1name", "bot1passwd")]
+
+# to limit to two concurrent builds on a slave, use
+# c['slaves'] = [BuildSlave("bot1name", "bot1passwd", max_builds=2)]
+
+
+# 'slavePortnum' defines the TCP port to listen on. This must match the value
+# configured into the buildslaves (with their --master option)
+
+c['slavePortnum'] = 9989
+
+####### CHANGESOURCES
+
+# the 'change_source' setting tells the buildmaster how it should find out
+# about source code changes. Any class which implements IChangeSource can be
+# put here: there are several in buildbot/changes/*.py to choose from.
+
+from buildbot.changes.pb import PBChangeSource
+c['change_source'] = PBChangeSource()
+
+# For example, if you had CVSToys installed on your repository, and your
+# CVSROOT/freshcfg file had an entry like this:
+#pb = ConfigurationSet([
+# (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
+# ])
+
+# then you could use the following buildmaster Change Source to subscribe to
+# the FreshCVS daemon and be notified on every commit:
+#
+#from buildbot.changes.freshcvs import FreshCVSSource
+#fc_source = FreshCVSSource("cvs.example.com", 4519, "foo", "bar")
+#c['change_source'] = fc_source
+
+# or, use a PBChangeSource, and then have your repository's commit script run
+# 'buildbot sendchange', or use contrib/svn_buildbot.py, or
+# contrib/arch_buildbot.py :
+#
+#from buildbot.changes.pb import PBChangeSource
+#c['change_source'] = PBChangeSource()
+
+
+####### SCHEDULERS
+
+## configure the Schedulers
+
+from buildbot.scheduler import Scheduler
+c['schedulers'] = []
+c['schedulers'].append(Scheduler(name="all", branch=None,
+ treeStableTimer=2*60,
+ builderNames=["buildbot-full"]))
+
+
+####### BUILDERS
+
+# the 'builders' list defines the Builders. Each one is configured with a
+# dictionary, using the following keys:
+# name (required): the name used to describe this builder
+# slavename (required): which slave to use (must appear in c['bots'])
+# builddir (required): which subdirectory to run the builder in
+# factory (required): a BuildFactory to define how the build is run
+# periodicBuildTime (optional): if set, force a build every N seconds
+
+# buildbot/process/factory.py provides several BuildFactory classes you can
+# start with, which implement build processes for common targets (GNU
+# autoconf projects, CPAN perl modules, etc). The factory.BuildFactory is the
+# base class, and is configured with a series of BuildSteps. When the build
+# is run, the appropriate buildslave is told to execute each Step in turn.
+
+# the first BuildStep is typically responsible for obtaining a copy of the
+# sources. There are source-obtaining Steps in buildbot/steps/source.py for
+# CVS, SVN, and others.
+
+cvsroot = ":pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot"
+cvsmodule = "buildbot"
+
+from buildbot.process import factory
+from buildbot.steps.source import CVS
+from buildbot.steps.shell import Compile
+from buildbot.steps.python_twisted import Trial
+f1 = factory.BuildFactory()
+f1.addStep(CVS(cvsroot=cvsroot, cvsmodule=cvsmodule, login="", mode="copy"))
+f1.addStep(Compile(command=["python", "./setup.py", "build"]))
+f1.addStep(Trial(testpath="."))
+
+b1 = {'name': "buildbot-full",
+ 'slavename': "bot1name",
+ 'builddir': "full",
+ 'factory': f1,
+ }
+c['builders'] = [b1]
+
+
+####### STATUS TARGETS
+
+# 'status' is a list of Status Targets. The results of each build will be
+# pushed to these targets. buildbot/status/*.py has a variety to choose from,
+# including web pages, email senders, and IRC bots.
+
+c['status'] = []
+
+from buildbot.status import html
+c['status'].append(html.WebStatus(http_port=8010))
+
+# from buildbot.status import mail
+# c['status'].append(mail.MailNotifier(fromaddr="buildbot@localhost",
+# extraRecipients=["builds@example.com"],
+# sendToInterestedUsers=False))
+#
+# from buildbot.status import words
+# c['status'].append(words.IRC(host="irc.example.com", nick="bb",
+# channels=["#example"]))
+#
+# from buildbot.status import client
+# c['status'].append(client.PBListener(9988))
+
+
+####### DEBUGGING OPTIONS
+
+# if you set 'debugPassword', then you can connect to the buildmaster with
+# the diagnostic tool in contrib/debugclient.py . From this tool, you can
+# manually force builds and inject changes, which may be useful for testing
+# your buildmaster without actually committing changes to your repository (or
+# before you have a functioning 'sources' set up). The debug tool uses the
+# same port number as the slaves do: 'slavePortnum'.
+
+#c['debugPassword'] = "debugpassword"
+
+# if you set 'manhole', you can ssh into the buildmaster and get an
+# interactive python shell, which may be useful for debugging buildbot
+# internals. It is probably only useful for buildbot developers. You can also
+# use an authorized_keys file, or plain telnet.
+#from buildbot import manhole
+#c['manhole'] = manhole.PasswordManhole("tcp:9999:interface=127.0.0.1",
+# "admin", "password")
+
+
+####### PROJECT IDENTITY
+
+# the 'projectName' string will be used to describe the project that this
+# buildbot is working on. For example, it is used as the title of the
+# waterfall HTML page. The 'projectURL' string will be used to provide a link
+# from buildbot HTML pages to your project's home page.
+
+c['projectName'] = "Buildbot"
+c['projectURL'] = "http://buildbot.sourceforge.net/"
+
+# the 'buildbotURL' string should point to the location where the buildbot's
+# internal web server (usually the html.Waterfall page) is visible. This
+# typically uses the port number set in the Waterfall 'status' entry, but
+# with an externally-visible host name which the buildbot cannot figure out
+# without some help.
+
+c['buildbotURL'] = "http://localhost:8010/"
diff --git a/buildbot/buildbot/scripts/startup.py b/buildbot/buildbot/scripts/startup.py
new file mode 100644
index 0000000..9472af2
--- /dev/null
+++ b/buildbot/buildbot/scripts/startup.py
@@ -0,0 +1,128 @@
+
+import os, sys, time
+
+class Follower:
+ def follow(self):
+ from twisted.internet import reactor
+ from buildbot.scripts.reconfig import LogWatcher
+ self.rc = 0
+ print "Following twistd.log until startup finished.."
+ lw = LogWatcher("twistd.log")
+ d = lw.start()
+ d.addCallbacks(self._success, self._failure)
+ reactor.run()
+ return self.rc
+
+ def _success(self, processtype):
+ from twisted.internet import reactor
+ print "The %s appears to have (re)started correctly." % processtype
+ self.rc = 0
+ reactor.stop()
+
+ def _failure(self, why):
+ from twisted.internet import reactor
+ from buildbot.scripts.logwatcher import BuildmasterTimeoutError, \
+ ReconfigError, BuildslaveTimeoutError, BuildSlaveDetectedError
+ if why.check(BuildmasterTimeoutError):
+ print """
+The buildmaster took more than 10 seconds to start, so we were unable to
+confirm that it started correctly. Please 'tail twistd.log' and look for a
+line that says 'configuration update complete' to verify correct startup.
+"""
+ elif why.check(BuildslaveTimeoutError):
+ print """
+The buildslave took more than 10 seconds to start and/or connect to the
+buildmaster, so we were unable to confirm that it started and connected
+correctly. Please 'tail twistd.log' and look for a line that says 'message
+from master: attached' to verify correct startup. If you see a bunch of
+messages like 'will retry in 6 seconds', your buildslave might not have the
+correct hostname or portnumber for the buildmaster, or the buildmaster might
+not be running. If you see messages like
+ 'Failure: twisted.cred.error.UnauthorizedLogin'
+then your buildslave might be using the wrong botname or password. Please
+correct these problems and then restart the buildslave.
+"""
+ elif why.check(ReconfigError):
+ print """
+The buildmaster appears to have encountered an error in the master.cfg config
+file during startup. It is probably running with an empty configuration right
+now. Please inspect and fix master.cfg, then restart the buildmaster.
+"""
+ elif why.check(BuildSlaveDetectedError):
+ print """
+Buildslave is starting up, not following logfile.
+"""
+ else:
+ print """
+Unable to confirm that the buildmaster started correctly. You may need to
+stop it, fix the config file, and restart.
+"""
+ print why
+ self.rc = 1
+ reactor.stop()
+
+
+def start(config):
+ os.chdir(config['basedir'])
+ if (not os.path.exists("buildbot.tac") and
+ not os.path.exists("Makefile.buildbot")):
+ print "This doesn't look like a buildbot base directory:"
+ print "No buildbot.tac or Makefile.buildbot file."
+ print "Giving up!"
+ sys.exit(1)
+ if config['quiet']:
+ return launch(config)
+
+ # we probably can't do this os.fork under windows
+ from twisted.python.runtime import platformType
+ if platformType == "win32":
+ return launch(config)
+
+ # fork a child to launch the daemon, while the parent process tails the
+ # logfile
+ if os.fork():
+ # this is the parent
+ rc = Follower().follow()
+ sys.exit(rc)
+ # this is the child: give the logfile-watching parent a chance to start
+ # watching it before we start the daemon
+ time.sleep(0.2)
+ launch(config)
+
+def launch(config):
+ sys.path.insert(0, os.path.abspath(os.getcwd()))
+ if os.path.exists("/usr/bin/make") and os.path.exists("Makefile.buildbot"):
+ # Preferring the Makefile lets slave admins do useful things like set
+ # up environment variables for the buildslave.
+ cmd = "make -f Makefile.buildbot start"
+ if not config['quiet']:
+ print cmd
+ os.system(cmd)
+ else:
+ # see if we can launch the application without actually having to
+ # spawn twistd, since spawning processes correctly is a real hassle
+ # on windows.
+ from twisted.python.runtime import platformType
+ argv = ["twistd",
+ "--no_save",
+ "--logfile=twistd.log", # windows doesn't use the same default
+ "--python=buildbot.tac"]
+ if platformType == "win32":
+ argv.append("--reactor=win32")
+ sys.argv = argv
+
+ # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use
+ # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for
+ # windows.
+ from twisted import __version__
+ major, minor, ignored = __version__.split(".", 2)
+ major = int(major)
+ minor = int(minor)
+ if (platformType == "win32" and (major == 2 and minor < 5)):
+ from twisted.scripts import _twistw
+ run = _twistw.run
+ else:
+ from twisted.scripts import twistd
+ run = twistd.run
+ run()
+
diff --git a/buildbot/buildbot/scripts/tryclient.py b/buildbot/buildbot/scripts/tryclient.py
new file mode 100644
index 0000000..b1b7658
--- /dev/null
+++ b/buildbot/buildbot/scripts/tryclient.py
@@ -0,0 +1,707 @@
+# -*- test-case-name: buildbot.test.test_scheduler,buildbot.test.test_vc -*-
+
+import sys, os, re, time, random
+from twisted.internet import utils, protocol, defer, reactor, task
+from twisted.spread import pb
+from twisted.cred import credentials
+from twisted.python import log
+from twisted.python.procutils import which
+
+from buildbot.sourcestamp import SourceStamp
+from buildbot.scripts import runner
+from buildbot.util import now
+from buildbot.status import builder
+
+class SourceStampExtractor:
+
+ def __init__(self, treetop, branch):
+ self.treetop = treetop
+ self.branch = branch
+ self.exe = which(self.vcexe)[0]
+
+ def dovc(self, cmd):
+ """This accepts the arguments of a command, without the actual
+ command itself."""
+ env = os.environ.copy()
+ env['LC_ALL'] = "C"
+ d = utils.getProcessOutputAndValue(self.exe, cmd, env=env,
+ path=self.treetop)
+ d.addCallback(self._didvc, cmd)
+ return d
+ def _didvc(self, res, cmd):
+ (stdout, stderr, code) = res
+ # 'bzr diff' sets rc=1 if there were any differences. tla, baz, and
+ # cvs do something similar, so don't bother requring rc=0.
+ return stdout
+
+ def get(self):
+ """Return a Deferred that fires with a SourceStamp instance."""
+ d = self.getBaseRevision()
+ d.addCallback(self.getPatch)
+ d.addCallback(self.done)
+ return d
+ def readPatch(self, res, patchlevel):
+ self.patch = (patchlevel, res)
+ def done(self, res):
+ # TODO: figure out the branch too
+ ss = SourceStamp(self.branch, self.baserev, self.patch)
+ return ss
+
+class CVSExtractor(SourceStampExtractor):
+ patchlevel = 0
+ vcexe = "cvs"
+ def getBaseRevision(self):
+ # this depends upon our local clock and the repository's clock being
+ # reasonably synchronized with each other. We express everything in
+ # UTC because the '%z' format specifier for strftime doesn't always
+ # work.
+ self.baserev = time.strftime("%Y-%m-%d %H:%M:%S +0000",
+ time.gmtime(now()))
+ return defer.succeed(None)
+
+ def getPatch(self, res):
+ # the -q tells CVS to not announce each directory as it works
+ if self.branch is not None:
+ # 'cvs diff' won't take both -r and -D at the same time (it
+ # ignores the -r). As best I can tell, there is no way to make
+ # cvs give you a diff relative to a timestamp on the non-trunk
+ # branch. A bare 'cvs diff' will tell you about the changes
+ # relative to your checked-out versions, but I know of no way to
+ # find out what those checked-out versions are.
+ raise RuntimeError("Sorry, CVS 'try' builds don't work with "
+ "branches")
+ args = ['-q', 'diff', '-u', '-D', self.baserev]
+ d = self.dovc(args)
+ d.addCallback(self.readPatch, self.patchlevel)
+ return d
+
+class SVNExtractor(SourceStampExtractor):
+ patchlevel = 0
+ vcexe = "svn"
+
+ def getBaseRevision(self):
+ d = self.dovc(["status", "-u"])
+ d.addCallback(self.parseStatus)
+ return d
+ def parseStatus(self, res):
+ # svn shows the base revision for each file that has been modified or
+ # which needs an update. You can update each file to a different
+ # version, so each file is displayed with its individual base
+ # revision. It also shows the repository-wide latest revision number
+ # on the last line ("Status against revision: \d+").
+
+ # for our purposes, we use the latest revision number as the "base"
+ # revision, and get a diff against that. This means we will get
+ # reverse-diffs for local files that need updating, but the resulting
+ # tree will still be correct. The only weirdness is that the baserev
+ # that we emit may be different than the version of the tree that we
+ # first checked out.
+
+ # to do this differently would probably involve scanning the revision
+ # numbers to find the max (or perhaps the min) revision, and then
+ # using that as a base.
+
+ for line in res.split("\n"):
+ m = re.search(r'^Status against revision:\s+(\d+)', line)
+ if m:
+ self.baserev = int(m.group(1))
+ return
+ raise IndexError("Could not find 'Status against revision' in "
+ "SVN output: %s" % res)
+ def getPatch(self, res):
+ d = self.dovc(["diff", "-r%d" % self.baserev])
+ d.addCallback(self.readPatch, self.patchlevel)
+ return d
+
+class BazExtractor(SourceStampExtractor):
+ patchlevel = 1
+ vcexe = "baz"
+ def getBaseRevision(self):
+ d = self.dovc(["tree-id"])
+ d.addCallback(self.parseStatus)
+ return d
+ def parseStatus(self, res):
+ tid = res.strip()
+ slash = tid.index("/")
+ dd = tid.rindex("--")
+ self.branch = tid[slash+1:dd]
+ self.baserev = tid[dd+2:]
+ def getPatch(self, res):
+ d = self.dovc(["diff"])
+ d.addCallback(self.readPatch, self.patchlevel)
+ return d
+
+class TlaExtractor(SourceStampExtractor):
+ patchlevel = 1
+ vcexe = "tla"
+ def getBaseRevision(self):
+ # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
+ # 'tla logs' gives us REVISION
+ d = self.dovc(["logs", "--full", "--reverse"])
+ d.addCallback(self.parseStatus)
+ return d
+ def parseStatus(self, res):
+ tid = res.split("\n")[0].strip()
+ slash = tid.index("/")
+ dd = tid.rindex("--")
+ self.branch = tid[slash+1:dd]
+ self.baserev = tid[dd+2:]
+
+ def getPatch(self, res):
+ d = self.dovc(["changes", "--diffs"])
+ d.addCallback(self.readPatch, self.patchlevel)
+ return d
+
+class BzrExtractor(SourceStampExtractor):
+ patchlevel = 0
+ vcexe = "bzr"
+ def getBaseRevision(self):
+ d = self.dovc(["version-info"])
+ d.addCallback(self.get_revision_number)
+ return d
+ def get_revision_number(self, out):
+ for line in out.split("\n"):
+ colon = line.find(":")
+ if colon != -1:
+ key, value = line[:colon], line[colon+2:]
+ if key == "revno":
+ self.baserev = int(value)
+ return
+ raise ValueError("unable to find revno: in bzr output: '%s'" % out)
+
+ def getPatch(self, res):
+ d = self.dovc(["diff"])
+ d.addCallback(self.readPatch, self.patchlevel)
+ return d
+
+class MercurialExtractor(SourceStampExtractor):
+ patchlevel = 1
+ vcexe = "hg"
+ def getBaseRevision(self):
+ d = self.dovc(["identify"])
+ d.addCallback(self.parseStatus)
+ return d
+ def parseStatus(self, output):
+ m = re.search(r'^(\w+)', output)
+ self.baserev = m.group(0)
+ def getPatch(self, res):
+ d = self.dovc(["diff"])
+ d.addCallback(self.readPatch, self.patchlevel)
+ return d
+
+class DarcsExtractor(SourceStampExtractor):
+ patchlevel = 1
+ vcexe = "darcs"
+ def getBaseRevision(self):
+ d = self.dovc(["changes", "--context"])
+ d.addCallback(self.parseStatus)
+ return d
+ def parseStatus(self, res):
+ self.baserev = res # the whole context file
+ def getPatch(self, res):
+ d = self.dovc(["diff", "-u"])
+ d.addCallback(self.readPatch, self.patchlevel)
+ return d
+
+class GitExtractor(SourceStampExtractor):
+ patchlevel = 1
+ vcexe = "git"
+
+ def getBaseRevision(self):
+ d = self.dovc(["branch", "--no-color", "-v", "--no-abbrev"])
+ d.addCallback(self.parseStatus)
+ return d
+
+ def readConfig(self):
+ d = self.dovc(["config", "-l"])
+ d.addCallback(self.parseConfig)
+ return d
+
+ def parseConfig(self, res):
+ git_config = {}
+ for l in res.split("\n"):
+ if l.strip():
+ parts = l.strip().split("=", 2)
+ git_config[parts[0]] = parts[1]
+
+ # If we're tracking a remote, consider that the base.
+ remote = git_config.get("branch." + self.branch + ".remote")
+ ref = git_config.get("branch." + self.branch + ".merge")
+ if remote and ref:
+ remote_branch = ref.split("/", 3)[-1]
+ d = self.dovc(["rev-parse", remote + "/" + remote_branch])
+ d.addCallback(self.override_baserev)
+ return d
+
+ def override_baserev(self, res):
+ self.baserev = res.strip()
+
+ def parseStatus(self, res):
+ # The current branch is marked by '*' at the start of the
+ # line, followed by the branch name and the SHA1.
+ #
+ # Branch names may contain pretty much anything but whitespace.
+ m = re.search(r'^\* (\S+)\s+([0-9a-f]{40})', res, re.MULTILINE)
+ if m:
+ self.baserev = m.group(2)
+ # If a branch is specified, parse out the rev it points to
+ # and extract the local name (assuming it has a slash).
+ # This may break if someone specifies the name of a local
+ # branch that has a slash in it and has no corresponding
+ # remote branch (or something similarly contrived).
+ if self.branch:
+ d = self.dovc(["rev-parse", self.branch])
+ if '/' in self.branch:
+ self.branch = self.branch.split('/', 1)[1]
+ d.addCallback(self.override_baserev)
+ return d
+ else:
+ self.branch = m.group(1)
+ return self.readConfig()
+ raise IndexError("Could not find current GIT branch: %s" % res)
+
+ def getPatch(self, res):
+ d = self.dovc(["diff", self.baserev])
+ d.addCallback(self.readPatch, self.patchlevel)
+ return d
+
+def getSourceStamp(vctype, treetop, branch=None):
+ if vctype == "cvs":
+ e = CVSExtractor(treetop, branch)
+ elif vctype == "svn":
+ e = SVNExtractor(treetop, branch)
+ elif vctype == "baz":
+ e = BazExtractor(treetop, branch)
+ elif vctype == "bzr":
+ e = BzrExtractor(treetop, branch)
+ elif vctype == "tla":
+ e = TlaExtractor(treetop, branch)
+ elif vctype == "hg":
+ e = MercurialExtractor(treetop, branch)
+ elif vctype == "darcs":
+ e = DarcsExtractor(treetop, branch)
+ elif vctype == "git":
+ e = GitExtractor(treetop, branch)
+ else:
+ raise KeyError("unknown vctype '%s'" % vctype)
+ return e.get()
+
+
+def ns(s):
+ return "%d:%s," % (len(s), s)
+
+def createJobfile(bsid, branch, baserev, patchlevel, diff, builderNames):
+ job = ""
+ job += ns("1")
+ job += ns(bsid)
+ job += ns(branch)
+ job += ns(str(baserev))
+ job += ns("%d" % patchlevel)
+ job += ns(diff)
+ for bn in builderNames:
+ job += ns(bn)
+ return job
+
+def getTopdir(topfile, start=None):
+ """walk upwards from the current directory until we find this topfile"""
+ if not start:
+ start = os.getcwd()
+ here = start
+ toomany = 20
+ while toomany > 0:
+ if os.path.exists(os.path.join(here, topfile)):
+ return here
+ next = os.path.dirname(here)
+ if next == here:
+ break # we've hit the root
+ here = next
+ toomany -= 1
+ raise ValueError("Unable to find topfile '%s' anywhere from %s upwards"
+ % (topfile, start))
+
+class RemoteTryPP(protocol.ProcessProtocol):
+ def __init__(self, job):
+ self.job = job
+ self.d = defer.Deferred()
+ def connectionMade(self):
+ self.transport.write(self.job)
+ self.transport.closeStdin()
+ def outReceived(self, data):
+ sys.stdout.write(data)
+ def errReceived(self, data):
+ sys.stderr.write(data)
+ def processEnded(self, status_object):
+ sig = status_object.value.signal
+ rc = status_object.value.exitCode
+ if sig != None or rc != 0:
+ self.d.errback(RuntimeError("remote 'buildbot tryserver' failed"
+ ": sig=%s, rc=%s" % (sig, rc)))
+ return
+ self.d.callback((sig, rc))
+
+class BuildSetStatusGrabber:
+ retryCount = 5 # how many times to we try to grab the BuildSetStatus?
+ retryDelay = 3 # seconds to wait between attempts
+
+ def __init__(self, status, bsid):
+ self.status = status
+ self.bsid = bsid
+
+ def grab(self):
+ # return a Deferred that either fires with the BuildSetStatus
+ # reference or errbacks because we were unable to grab it
+ self.d = defer.Deferred()
+ # wait a second before querying to give the master's maildir watcher
+ # a chance to see the job
+ reactor.callLater(1, self.go)
+ return self.d
+
+ def go(self, dummy=None):
+ if self.retryCount == 0:
+ raise RuntimeError("couldn't find matching buildset")
+ self.retryCount -= 1
+ d = self.status.callRemote("getBuildSets")
+ d.addCallback(self._gotSets)
+
+ def _gotSets(self, buildsets):
+ for bs,bsid in buildsets:
+ if bsid == self.bsid:
+ # got it
+ self.d.callback(bs)
+ return
+ d = defer.Deferred()
+ d.addCallback(self.go)
+ reactor.callLater(self.retryDelay, d.callback, None)
+
+
+class Try(pb.Referenceable):
+ buildsetStatus = None
+ quiet = False
+
+ def __init__(self, config):
+ self.config = config
+ self.opts = runner.loadOptions()
+ self.connect = self.getopt('connect', 'try_connect')
+ assert self.connect, "you must specify a connect style: ssh or pb"
+ self.builderNames = self.getopt('builders', 'try_builders')
+
+ def getopt(self, config_name, options_name, default=None):
+ value = self.config.get(config_name)
+ if value is None or value == []:
+ value = self.opts.get(options_name)
+ if value is None or value == []:
+ value = default
+ return value
+
+ def createJob(self):
+ # returns a Deferred which fires when the job parameters have been
+ # created
+ opts = self.opts
+ # generate a random (unique) string. It would make sense to add a
+ # hostname and process ID here, but a) I suspect that would cause
+ # windows portability problems, and b) really this is good enough
+ self.bsid = "%d-%s" % (time.time(), random.randint(0, 1000000))
+
+ # common options
+ branch = self.getopt("branch", "try_branch")
+
+ difffile = self.config.get("diff")
+ if difffile:
+ baserev = self.config.get("baserev")
+ if difffile == "-":
+ diff = sys.stdin.read()
+ else:
+ diff = open(difffile,"r").read()
+ patch = (self.config['patchlevel'], diff)
+ ss = SourceStamp(branch, baserev, patch)
+ d = defer.succeed(ss)
+ else:
+ vc = self.getopt("vc", "try_vc")
+ if vc in ("cvs", "svn"):
+ # we need to find the tree-top
+ topdir = self.getopt("try_topdir", "try_topdir")
+ if topdir:
+ treedir = os.path.expanduser(topdir)
+ else:
+ topfile = self.getopt("try-topfile", "try_topfile")
+ treedir = getTopdir(topfile)
+ else:
+ treedir = os.getcwd()
+ d = getSourceStamp(vc, treedir, branch)
+ d.addCallback(self._createJob_1)
+ return d
+
+ def _createJob_1(self, ss):
+ self.sourcestamp = ss
+ if self.connect == "ssh":
+ patchlevel, diff = ss.patch
+ revspec = ss.revision
+ if revspec is None:
+ revspec = ""
+ self.jobfile = createJobfile(self.bsid,
+ ss.branch or "", revspec,
+ patchlevel, diff,
+ self.builderNames)
+
+ def fakeDeliverJob(self):
+ # Display the job to be delivered, but don't perform delivery.
+ ss = self.sourcestamp
+ print ("Job:\n\tBranch: %s\n\tRevision: %s\n\tBuilders: %s\n%s"
+ % (ss.branch,
+ ss.revision,
+ self.builderNames,
+ ss.patch[1]))
+ d = defer.Deferred()
+ d.callback(True)
+ return d
+
+ def deliverJob(self):
+ # returns a Deferred that fires when the job has been delivered
+ opts = self.opts
+
+ if self.connect == "ssh":
+ tryhost = self.getopt("tryhost", "try_host")
+ tryuser = self.getopt("username", "try_username")
+ trydir = self.getopt("trydir", "try_dir")
+
+ argv = ["ssh", "-l", tryuser, tryhost,
+ "buildbot", "tryserver", "--jobdir", trydir]
+ # now run this command and feed the contents of 'job' into stdin
+
+ pp = RemoteTryPP(self.jobfile)
+ p = reactor.spawnProcess(pp, argv[0], argv, os.environ)
+ d = pp.d
+ return d
+ if self.connect == "pb":
+ user = self.getopt("username", "try_username")
+ passwd = self.getopt("passwd", "try_password")
+ master = self.getopt("master", "try_master")
+ tryhost, tryport = master.split(":")
+ tryport = int(tryport)
+ f = pb.PBClientFactory()
+ d = f.login(credentials.UsernamePassword(user, passwd))
+ reactor.connectTCP(tryhost, tryport, f)
+ d.addCallback(self._deliverJob_pb)
+ return d
+ raise RuntimeError("unknown connecttype '%s', should be 'ssh' or 'pb'"
+ % self.connect)
+
+ def _deliverJob_pb(self, remote):
+ ss = self.sourcestamp
+
+ d = remote.callRemote("try",
+ ss.branch,
+ ss.revision,
+ ss.patch,
+ self.builderNames,
+ self.config.get('properties', {}))
+ d.addCallback(self._deliverJob_pb2)
+ return d
+ def _deliverJob_pb2(self, status):
+ self.buildsetStatus = status
+ return status
+
+ def getStatus(self):
+ # returns a Deferred that fires when the builds have finished, and
+ # may emit status messages while we wait
+ wait = bool(self.getopt("wait", "try_wait", False))
+ if not wait:
+ # TODO: emit the URL where they can follow the builds. This
+ # requires contacting the Status server over PB and doing
+ # getURLForThing() on the BuildSetStatus. To get URLs for
+ # individual builds would require we wait for the builds to
+ # start.
+ print "not waiting for builds to finish"
+ return
+ d = self.running = defer.Deferred()
+ if self.buildsetStatus:
+ self._getStatus_1()
+ # contact the status port
+ # we're probably using the ssh style
+ master = self.getopt("master", "masterstatus")
+ host, port = master.split(":")
+ port = int(port)
+ self.announce("contacting the status port at %s:%d" % (host, port))
+ f = pb.PBClientFactory()
+ creds = credentials.UsernamePassword("statusClient", "clientpw")
+ d = f.login(creds)
+ reactor.connectTCP(host, port, f)
+ d.addCallback(self._getStatus_ssh_1)
+ return self.running
+
+ def _getStatus_ssh_1(self, remote):
+ # find a remotereference to the corresponding BuildSetStatus object
+ self.announce("waiting for job to be accepted")
+ g = BuildSetStatusGrabber(remote, self.bsid)
+ d = g.grab()
+ d.addCallback(self._getStatus_1)
+ return d
+
+ def _getStatus_1(self, res=None):
+ if res:
+ self.buildsetStatus = res
+ # gather the set of BuildRequests
+ d = self.buildsetStatus.callRemote("getBuildRequests")
+ d.addCallback(self._getStatus_2)
+
+ def _getStatus_2(self, brs):
+ self.builderNames = []
+ self.buildRequests = {}
+
+ # self.builds holds the current BuildStatus object for each one
+ self.builds = {}
+
+ # self.outstanding holds the list of builderNames which haven't
+ # finished yet
+ self.outstanding = []
+
+ # self.results holds the list of build results. It holds a tuple of
+ # (result, text)
+ self.results = {}
+
+ # self.currentStep holds the name of the Step that each build is
+ # currently running
+ self.currentStep = {}
+
+ # self.ETA holds the expected finishing time (absolute time since
+ # epoch)
+ self.ETA = {}
+
+ for n,br in brs:
+ self.builderNames.append(n)
+ self.buildRequests[n] = br
+ self.builds[n] = None
+ self.outstanding.append(n)
+ self.results[n] = [None,None]
+ self.currentStep[n] = None
+ self.ETA[n] = None
+ # get new Builds for this buildrequest. We follow each one until
+ # it finishes or is interrupted.
+ br.callRemote("subscribe", self)
+
+ # now that those queries are in transit, we can start the
+ # display-status-every-30-seconds loop
+ self.printloop = task.LoopingCall(self.printStatus)
+ self.printloop.start(3, now=False)
+
+
+ # these methods are invoked by the status objects we've subscribed to
+
+ def remote_newbuild(self, bs, builderName):
+ if self.builds[builderName]:
+ self.builds[builderName].callRemote("unsubscribe", self)
+ self.builds[builderName] = bs
+ bs.callRemote("subscribe", self, 20)
+ d = bs.callRemote("waitUntilFinished")
+ d.addCallback(self._build_finished, builderName)
+
+ def remote_stepStarted(self, buildername, build, stepname, step):
+ self.currentStep[buildername] = stepname
+
+ def remote_stepFinished(self, buildername, build, stepname, step, results):
+ pass
+
+ def remote_buildETAUpdate(self, buildername, build, eta):
+ self.ETA[buildername] = now() + eta
+
+ def _build_finished(self, bs, builderName):
+ # we need to collect status from the newly-finished build. We don't
+ # remove the build from self.outstanding until we've collected
+ # everything we want.
+ self.builds[builderName] = None
+ self.ETA[builderName] = None
+ self.currentStep[builderName] = "finished"
+ d = bs.callRemote("getResults")
+ d.addCallback(self._build_finished_2, bs, builderName)
+ return d
+ def _build_finished_2(self, results, bs, builderName):
+ self.results[builderName][0] = results
+ d = bs.callRemote("getText")
+ d.addCallback(self._build_finished_3, builderName)
+ return d
+ def _build_finished_3(self, text, builderName):
+ self.results[builderName][1] = text
+
+ self.outstanding.remove(builderName)
+ if not self.outstanding:
+ # all done
+ return self.statusDone()
+
+ def printStatus(self):
+ names = self.buildRequests.keys()
+ names.sort()
+ for n in names:
+ if n not in self.outstanding:
+ # the build is finished, and we have results
+ code,text = self.results[n]
+ t = builder.Results[code]
+ if text:
+ t += " (%s)" % " ".join(text)
+ elif self.builds[n]:
+ t = self.currentStep[n] or "building"
+ if self.ETA[n]:
+ t += " [ETA %ds]" % (self.ETA[n] - now())
+ else:
+ t = "no build"
+ self.announce("%s: %s" % (n, t))
+ self.announce("")
+
+ def statusDone(self):
+ self.printloop.stop()
+ print "All Builds Complete"
+ # TODO: include a URL for all failing builds
+ names = self.buildRequests.keys()
+ names.sort()
+ happy = True
+ for n in names:
+ code,text = self.results[n]
+ t = "%s: %s" % (n, builder.Results[code])
+ if text:
+ t += " (%s)" % " ".join(text)
+ print t
+ if self.results[n] != builder.SUCCESS:
+ happy = False
+
+ if happy:
+ self.exitcode = 0
+ else:
+ self.exitcode = 1
+ self.running.callback(self.exitcode)
+
+ def announce(self, message):
+ if not self.quiet:
+ print message
+
+ def run(self):
+ # we can't do spawnProcess until we're inside reactor.run(), so get
+ # funky
+ print "using '%s' connect method" % self.connect
+ self.exitcode = 0
+ d = defer.Deferred()
+ d.addCallback(lambda res: self.createJob())
+ d.addCallback(lambda res: self.announce("job created"))
+ deliver = self.deliverJob
+ if bool(self.config.get("dryrun")):
+ deliver = self.fakeDeliverJob
+ d.addCallback(lambda res: deliver())
+ d.addCallback(lambda res: self.announce("job has been delivered"))
+ d.addCallback(lambda res: self.getStatus())
+ d.addErrback(log.err)
+ d.addCallback(self.cleanup)
+ d.addCallback(lambda res: reactor.stop())
+
+ reactor.callLater(0, d.callback, None)
+ reactor.run()
+ sys.exit(self.exitcode)
+
+ def logErr(self, why):
+ log.err(why)
+ print "error during 'try' processing"
+ print why
+
+ def cleanup(self, res=None):
+ if self.buildsetStatus:
+ self.buildsetStatus.broker.transport.loseConnection()
+
+
+
diff --git a/buildbot/buildbot/slave/__init__.py b/buildbot/buildbot/slave/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildbot/buildbot/slave/__init__.py
diff --git a/buildbot/buildbot/slave/bot.py b/buildbot/buildbot/slave/bot.py
new file mode 100644
index 0000000..4184d3d
--- /dev/null
+++ b/buildbot/buildbot/slave/bot.py
@@ -0,0 +1,510 @@
+
+import os.path
+
+import buildbot
+
+from twisted.spread import pb
+from twisted.python import log
+from twisted.internet import reactor, defer
+from twisted.application import service, internet
+from twisted.cred import credentials
+
+from buildbot.util import now
+from buildbot.pbutil import ReconnectingPBClientFactory
+from buildbot.slave import registry
+# make sure the standard commands get registered. This import is performed
+# for its side-effects.
+from buildbot.slave import commands
+# and make pyflakes think we aren't being stupid
+commands = commands
+
+class NoCommandRunning(pb.Error):
+ pass
+class WrongCommandRunning(pb.Error):
+ pass
+class UnknownCommand(pb.Error):
+ pass
+
+class Master:
+ def __init__(self, host, port, username, password):
+ self.host = host
+ self.port = port
+ self.username = username
+ self.password = password
+
+class SlaveBuild:
+
+ """This is an object that can hold state from one step to another in the
+ same build. All SlaveCommands have access to it.
+ """
+ def __init__(self, builder):
+ self.builder = builder
+
+class SlaveBuilder(pb.Referenceable, service.Service):
+
+ """This is the local representation of a single Builder: it handles a
+ single kind of build (like an all-warnings build). It has a name and a
+ home directory. The rest of its behavior is determined by the master.
+ """
+
+ stopCommandOnShutdown = True
+
+ # remote is a ref to the Builder object on the master side, and is set
+ # when they attach. We use it to detect when the connection to the master
+ # is severed.
+ remote = None
+
+ # .build points to a SlaveBuild object, a new one for each build
+ build = None
+
+ # .command points to a SlaveCommand instance, and is set while the step
+ # is running. We use it to implement the stopBuild method.
+ command = None
+
+ # .remoteStep is a ref to the master-side BuildStep object, and is set
+ # when the step is started
+ remoteStep = None
+
+ def __init__(self, name, not_really):
+ #service.Service.__init__(self) # Service has no __init__ method
+ self.setName(name)
+ self.not_really = not_really
+
+ def __repr__(self):
+ return "<SlaveBuilder '%s' at %d>" % (self.name, id(self))
+
+ def setServiceParent(self, parent):
+ service.Service.setServiceParent(self, parent)
+ self.bot = self.parent
+ # note that self.parent will go away when the buildmaster's config
+ # file changes and this Builder is removed (possibly because it has
+ # been changed, so the Builder will be re-added again in a moment).
+ # This may occur during a build, while a step is running.
+
+ def setBuilddir(self, builddir):
+ assert self.parent
+ self.builddir = builddir
+ self.basedir = os.path.join(self.bot.basedir, self.builddir)
+ if not os.path.isdir(self.basedir):
+ os.mkdir(self.basedir)
+
+ def stopService(self):
+ service.Service.stopService(self)
+ if self.stopCommandOnShutdown:
+ self.stopCommand()
+
+ def activity(self):
+ bot = self.parent
+ if bot:
+ buildslave = bot.parent
+ if buildslave:
+ bf = buildslave.bf
+ bf.activity()
+
+ def remote_setMaster(self, remote):
+ self.remote = remote
+ self.remote.notifyOnDisconnect(self.lostRemote)
+ def remote_print(self, message):
+ log.msg("SlaveBuilder.remote_print(%s): message from master: %s" %
+ (self.name, message))
+ if message == "ping":
+ return self.remote_ping()
+
+ def remote_ping(self):
+ log.msg("SlaveBuilder.remote_ping(%s)" % self)
+ if self.bot and self.bot.parent:
+ debugOpts = self.bot.parent.debugOpts
+ if debugOpts.get("stallPings"):
+ log.msg(" debug_stallPings")
+ timeout, timers = debugOpts["stallPings"]
+ d = defer.Deferred()
+ t = reactor.callLater(timeout, d.callback, None)
+ timers.append(t)
+ return d
+ if debugOpts.get("failPingOnce"):
+ log.msg(" debug_failPingOnce")
+ class FailPingError(pb.Error): pass
+ del debugOpts['failPingOnce']
+ raise FailPingError("debug_failPingOnce means we should fail")
+
+ def lostRemote(self, remote):
+ log.msg("lost remote")
+ self.remote = None
+
+ def lostRemoteStep(self, remotestep):
+ log.msg("lost remote step")
+ self.remoteStep = None
+ if self.stopCommandOnShutdown:
+ self.stopCommand()
+
+ # the following are Commands that can be invoked by the master-side
+ # Builder
+ def remote_startBuild(self):
+ """This is invoked before the first step of any new build is run. It
+ creates a new SlaveBuild object, which holds slave-side state from
+ one step to the next."""
+ self.build = SlaveBuild(self)
+ log.msg("%s.startBuild" % self)
+
+ def remote_startCommand(self, stepref, stepId, command, args):
+ """
+ This gets invoked by L{buildbot.process.step.RemoteCommand.start}, as
+ part of various master-side BuildSteps, to start various commands
+ that actually do the build. I return nothing. Eventually I will call
+ .commandComplete() to notify the master-side RemoteCommand that I'm
+ done.
+ """
+
+ self.activity()
+
+ if self.command:
+ log.msg("leftover command, dropping it")
+ self.stopCommand()
+
+ try:
+ factory, version = registry.commandRegistry[command]
+ except KeyError:
+ raise UnknownCommand, "unrecognized SlaveCommand '%s'" % command
+ self.command = factory(self, stepId, args)
+
+ log.msg(" startCommand:%s [id %s]" % (command,stepId))
+ self.remoteStep = stepref
+ self.remoteStep.notifyOnDisconnect(self.lostRemoteStep)
+ d = self.command.doStart()
+ d.addCallback(lambda res: None)
+ d.addBoth(self.commandComplete)
+ return None
+
+ def remote_interruptCommand(self, stepId, why):
+ """Halt the current step."""
+ log.msg("asked to interrupt current command: %s" % why)
+ self.activity()
+ if not self.command:
+ # TODO: just log it, a race could result in their interrupting a
+ # command that wasn't actually running
+ log.msg(" .. but none was running")
+ return
+ self.command.doInterrupt()
+
+
+ def stopCommand(self):
+ """Make any currently-running command die, with no further status
+ output. This is used when the buildslave is shutting down or the
+ connection to the master has been lost. Interrupt the command,
+ silence it, and then forget about it."""
+ if not self.command:
+ return
+ log.msg("stopCommand: halting current command %s" % self.command)
+ self.command.doInterrupt() # shut up! and die!
+ self.command = None # forget you!
+
+ # sendUpdate is invoked by the Commands we spawn
+ def sendUpdate(self, data):
+ """This sends the status update to the master-side
+ L{buildbot.process.step.RemoteCommand} object, giving it a sequence
+ number in the process. It adds the update to a queue, and asks the
+ master to acknowledge the update so it can be removed from that
+ queue."""
+
+ if not self.running:
+ # .running comes from service.Service, and says whether the
+ # service is running or not. If we aren't running, don't send any
+ # status messages.
+ return
+ # the update[1]=0 comes from the leftover 'updateNum', which the
+ # master still expects to receive. Provide it to avoid significant
+ # interoperability issues between new slaves and old masters.
+ if self.remoteStep:
+ update = [data, 0]
+ updates = [update]
+ d = self.remoteStep.callRemote("update", updates)
+ d.addCallback(self.ackUpdate)
+ d.addErrback(self._ackFailed, "SlaveBuilder.sendUpdate")
+
+ def ackUpdate(self, acknum):
+ self.activity() # update the "last activity" timer
+
+ def ackComplete(self, dummy):
+ self.activity() # update the "last activity" timer
+
+ def _ackFailed(self, why, where):
+ log.msg("SlaveBuilder._ackFailed:", where)
+ #log.err(why) # we don't really care
+
+
+ # this is fired by the Deferred attached to each Command
+ def commandComplete(self, failure):
+ if failure:
+ log.msg("SlaveBuilder.commandFailed", self.command)
+ log.err(failure)
+ # failure, if present, is a failure.Failure. To send it across
+ # the wire, we must turn it into a pb.CopyableFailure.
+ failure = pb.CopyableFailure(failure)
+ failure.unsafeTracebacks = True
+ else:
+ # failure is None
+ log.msg("SlaveBuilder.commandComplete", self.command)
+ self.command = None
+ if not self.running:
+ log.msg(" but we weren't running, quitting silently")
+ return
+ if self.remoteStep:
+ self.remoteStep.dontNotifyOnDisconnect(self.lostRemoteStep)
+ d = self.remoteStep.callRemote("complete", failure)
+ d.addCallback(self.ackComplete)
+ d.addErrback(self._ackFailed, "sendComplete")
+ self.remoteStep = None
+
+
+ def remote_shutdown(self):
+ print "slave shutting down on command from master"
+ reactor.stop()
+
+
+class Bot(pb.Referenceable, service.MultiService):
+ """I represent the slave-side bot."""
+ usePTY = None
+ name = "bot"
+
+ def __init__(self, basedir, usePTY, not_really=0):
+ service.MultiService.__init__(self)
+ self.basedir = basedir
+ self.usePTY = usePTY
+ self.not_really = not_really
+ self.builders = {}
+
+ def startService(self):
+ assert os.path.isdir(self.basedir)
+ service.MultiService.startService(self)
+
+ def remote_getDirs(self):
+ return filter(lambda d: os.path.isdir(d), os.listdir(self.basedir))
+
+ def remote_getCommands(self):
+ commands = {}
+ for name, (factory, version) in registry.commandRegistry.items():
+ commands[name] = version
+ return commands
+
+ def remote_setBuilderList(self, wanted):
+ retval = {}
+ wanted_dirs = ["info"]
+ for (name, builddir) in wanted:
+ wanted_dirs.append(builddir)
+ b = self.builders.get(name, None)
+ if b:
+ if b.builddir != builddir:
+ log.msg("changing builddir for builder %s from %s to %s" \
+ % (name, b.builddir, builddir))
+ b.setBuilddir(builddir)
+ else:
+ b = SlaveBuilder(name, self.not_really)
+ b.usePTY = self.usePTY
+ b.setServiceParent(self)
+ b.setBuilddir(builddir)
+ self.builders[name] = b
+ retval[name] = b
+ for name in self.builders.keys():
+ if not name in map(lambda a: a[0], wanted):
+ log.msg("removing old builder %s" % name)
+ self.builders[name].disownServiceParent()
+ del(self.builders[name])
+
+ for d in os.listdir(self.basedir):
+ if os.path.isdir(d):
+ if d not in wanted_dirs:
+ log.msg("I have a leftover directory '%s' that is not "
+ "being used by the buildmaster: you can delete "
+ "it now" % d)
+ return retval
+
+ def remote_print(self, message):
+ log.msg("message from master:", message)
+
+ def remote_getSlaveInfo(self):
+ """This command retrieves data from the files in SLAVEDIR/info/* and
+ sends the contents to the buildmaster. These are used to describe
+ the slave and its configuration, and should be created and
+ maintained by the slave administrator. They will be retrieved each
+ time the master-slave connection is established.
+ """
+
+ files = {}
+ basedir = os.path.join(self.basedir, "info")
+ if not os.path.isdir(basedir):
+ return files
+ for f in os.listdir(basedir):
+ filename = os.path.join(basedir, f)
+ if os.path.isfile(filename):
+ files[f] = open(filename, "r").read()
+ return files
+
+class BotFactory(ReconnectingPBClientFactory):
+ # 'keepaliveInterval' serves two purposes. The first is to keep the
+ # connection alive: it guarantees that there will be at least some
+ # traffic once every 'keepaliveInterval' seconds, which may help keep an
+ # interposed NAT gateway from dropping the address mapping because it
+ # thinks the connection has been abandoned. The second is to put an upper
+ # limit on how long the buildmaster might have gone away before we notice
+ # it. For this second purpose, we insist upon seeing *some* evidence of
+ # the buildmaster at least once every 'keepaliveInterval' seconds.
+ keepaliveInterval = None # None = do not use keepalives
+
+ # 'keepaliveTimeout' seconds before the interval expires, we will send a
+ # keepalive request, both to add some traffic to the connection, and to
+ # prompt a response from the master in case all our builders are idle. We
+ # don't insist upon receiving a timely response from this message: a slow
+ # link might put the request at the wrong end of a large build message.
+ keepaliveTimeout = 30 # how long we will go without a response
+
+ # 'maxDelay' determines the maximum amount of time the slave will wait
+ # between connection retries
+ maxDelay = 300
+
+ keepaliveTimer = None
+ activityTimer = None
+ lastActivity = 0
+ unsafeTracebacks = 1
+ perspective = None
+
+ def __init__(self, keepaliveInterval, keepaliveTimeout, maxDelay):
+ ReconnectingPBClientFactory.__init__(self)
+ self.maxDelay = maxDelay
+ self.keepaliveInterval = keepaliveInterval
+ self.keepaliveTimeout = keepaliveTimeout
+
+ def startedConnecting(self, connector):
+ ReconnectingPBClientFactory.startedConnecting(self, connector)
+ self.connector = connector
+
+ def gotPerspective(self, perspective):
+ ReconnectingPBClientFactory.gotPerspective(self, perspective)
+ self.perspective = perspective
+ try:
+ perspective.broker.transport.setTcpKeepAlive(1)
+ except:
+ log.msg("unable to set SO_KEEPALIVE")
+ if not self.keepaliveInterval:
+ self.keepaliveInterval = 10*60
+ self.activity()
+ if self.keepaliveInterval:
+ log.msg("sending application-level keepalives every %d seconds" \
+ % self.keepaliveInterval)
+ self.startTimers()
+
+ def clientConnectionFailed(self, connector, reason):
+ self.connector = None
+ ReconnectingPBClientFactory.clientConnectionFailed(self,
+ connector, reason)
+
+ def clientConnectionLost(self, connector, reason):
+ self.connector = None
+ self.stopTimers()
+ self.perspective = None
+ ReconnectingPBClientFactory.clientConnectionLost(self,
+ connector, reason)
+
+ def startTimers(self):
+ assert self.keepaliveInterval
+ assert not self.keepaliveTimer
+ assert not self.activityTimer
+ # Insist that doKeepalive fires before checkActivity. Really, it
+ # needs to happen at least one RTT beforehand.
+ assert self.keepaliveInterval > self.keepaliveTimeout
+
+ # arrange to send a keepalive a little while before our deadline
+ when = self.keepaliveInterval - self.keepaliveTimeout
+ self.keepaliveTimer = reactor.callLater(when, self.doKeepalive)
+ # and check for activity too
+ self.activityTimer = reactor.callLater(self.keepaliveInterval,
+ self.checkActivity)
+
+ def stopTimers(self):
+ if self.keepaliveTimer:
+ self.keepaliveTimer.cancel()
+ self.keepaliveTimer = None
+ if self.activityTimer:
+ self.activityTimer.cancel()
+ self.activityTimer = None
+
+ def activity(self, res=None):
+ self.lastActivity = now()
+
+ def doKeepalive(self):
+ # send the keepalive request. If it fails outright, the connection
+ # was already dropped, so just log and ignore.
+ self.keepaliveTimer = None
+ log.msg("sending app-level keepalive")
+ d = self.perspective.callRemote("keepalive")
+ d.addCallback(self.activity)
+ d.addErrback(self.keepaliveLost)
+
+ def keepaliveLost(self, f):
+ log.msg("BotFactory.keepaliveLost")
+
+ def checkActivity(self):
+ self.activityTimer = None
+ if self.lastActivity + self.keepaliveInterval < now():
+ log.msg("BotFactory.checkActivity: nothing from master for "
+ "%d secs" % (now() - self.lastActivity))
+ self.perspective.broker.transport.loseConnection()
+ return
+ self.startTimers()
+
+ def stopFactory(self):
+ ReconnectingPBClientFactory.stopFactory(self)
+ self.stopTimers()
+
+
+class BuildSlave(service.MultiService):
+ botClass = Bot
+
+ # debugOpts is a dictionary used during unit tests.
+
+ # debugOpts['stallPings'] can be set to a tuple of (timeout, []). Any
+ # calls to remote_print will stall for 'timeout' seconds before
+ # returning. The DelayedCalls used to implement this are stashed in the
+ # list so they can be cancelled later.
+
+ # debugOpts['failPingOnce'] can be set to True to make the slaveping fail
+ # exactly once.
+
+ def __init__(self, buildmaster_host, port, name, passwd, basedir,
+ keepalive, usePTY, keepaliveTimeout=30, umask=None,
+ maxdelay=300, debugOpts={}):
+ log.msg("Creating BuildSlave -- buildbot.version: %s" % buildbot.version)
+ service.MultiService.__init__(self)
+ self.debugOpts = debugOpts.copy()
+ bot = self.botClass(basedir, usePTY)
+ bot.setServiceParent(self)
+ self.bot = bot
+ if keepalive == 0:
+ keepalive = None
+ self.umask = umask
+ bf = self.bf = BotFactory(keepalive, keepaliveTimeout, maxdelay)
+ bf.startLogin(credentials.UsernamePassword(name, passwd), client=bot)
+ self.connection = c = internet.TCPClient(buildmaster_host, port, bf)
+ c.setServiceParent(self)
+
+ def waitUntilDisconnected(self):
+ # utility method for testing. Returns a Deferred that will fire when
+ # we lose the connection to the master.
+ if not self.bf.perspective:
+ return defer.succeed(None)
+ d = defer.Deferred()
+ self.bf.perspective.notifyOnDisconnect(lambda res: d.callback(None))
+ return d
+
+ def startService(self):
+ if self.umask is not None:
+ os.umask(self.umask)
+ service.MultiService.startService(self)
+
+ def stopService(self):
+ self.bf.continueTrying = 0
+ self.bf.stopTrying()
+ service.MultiService.stopService(self)
+ # now kill the TCP connection
+ # twisted >2.0.1 does this for us, and leaves _connection=None
+ if self.connection._connection:
+ self.connection._connection.disconnect()
diff --git a/buildbot/buildbot/slave/commands.py b/buildbot/buildbot/slave/commands.py
new file mode 100644
index 0000000..45b9e99
--- /dev/null
+++ b/buildbot/buildbot/slave/commands.py
@@ -0,0 +1,2788 @@
+# -*- test-case-name: buildbot.test.test_slavecommand -*-
+
+import os, re, signal, shutil, types, time
+from stat import ST_CTIME, ST_MTIME, ST_SIZE
+
+from zope.interface import implements
+from twisted.internet.protocol import ProcessProtocol
+from twisted.internet import reactor, defer, task
+from twisted.python import log, failure, runtime
+from twisted.python.procutils import which
+
+from buildbot.slave.interfaces import ISlaveCommand
+from buildbot.slave.registry import registerSlaveCommand
+
+# this used to be a CVS $-style "Revision" auto-updated keyword, but since I
+# moved to Darcs as the primary repository, this is updated manually each
+# time this file is changed. The last cvs_ver that was here was 1.51 .
+command_version = "2.8"
+
+# version history:
+# >=1.17: commands are interruptable
+# >=1.28: Arch understands 'revision', added Bazaar
+# >=1.33: Source classes understand 'retry'
+# >=1.39: Source classes correctly handle changes in branch (except Git)
+# Darcs accepts 'revision' (now all do but Git) (well, and P4Sync)
+# Arch/Baz should accept 'build-config'
+# >=1.51: (release 0.7.3)
+# >= 2.1: SlaveShellCommand now accepts 'initial_stdin', 'keep_stdin_open',
+# and 'logfiles'. It now sends 'log' messages in addition to
+# stdout/stdin/header/rc. It acquired writeStdin/closeStdin methods,
+# but these are not remotely callable yet.
+# (not externally visible: ShellCommandPP has writeStdin/closeStdin.
+# ShellCommand accepts new arguments (logfiles=, initialStdin=,
+# keepStdinOpen=) and no longer accepts stdin=)
+# (release 0.7.4)
+# >= 2.2: added monotone, uploadFile, and downloadFile (release 0.7.5)
+# >= 2.3: added bzr (release 0.7.6)
+# >= 2.4: Git understands 'revision' and branches
+# >= 2.5: workaround added for remote 'hg clone --rev REV' when hg<0.9.2
+# >= 2.6: added uploadDirectory
+# >= 2.7: added usePTY option to SlaveShellCommand
+# >= 2.8: added username and password args to SVN class
+
+class CommandInterrupted(Exception):
+ pass
+class TimeoutError(Exception):
+ pass
+
+class Obfuscated:
+ """An obfuscated string in a command"""
+ def __init__(self, real, fake):
+ self.real = real
+ self.fake = fake
+
+ def __str__(self):
+ return self.fake
+
+ def __repr__(self):
+ return `self.fake`
+
+ def get_real(command):
+ rv = command
+ if type(command) == types.ListType:
+ rv = []
+ for elt in command:
+ if isinstance(elt, Obfuscated):
+ rv.append(elt.real)
+ else:
+ rv.append(elt)
+ return rv
+ get_real = staticmethod(get_real)
+
+ def get_fake(command):
+ rv = command
+ if type(command) == types.ListType:
+ rv = []
+ for elt in command:
+ if isinstance(elt, Obfuscated):
+ rv.append(elt.fake)
+ else:
+ rv.append(elt)
+ return rv
+ get_fake = staticmethod(get_fake)
+
+class AbandonChain(Exception):
+ """A series of chained steps can raise this exception to indicate that
+ one of the intermediate ShellCommands has failed, such that there is no
+ point in running the remainder. 'rc' should be the non-zero exit code of
+ the failing ShellCommand."""
+
+ def __repr__(self):
+ return "<AbandonChain rc=%s>" % self.args[0]
+
+def getCommand(name):
+ possibles = which(name)
+ if not possibles:
+ raise RuntimeError("Couldn't find executable for '%s'" % name)
+ return possibles[0]
+
+def rmdirRecursive(dir):
+ """This is a replacement for shutil.rmtree that works better under
+ windows. Thanks to Bear at the OSAF for the code."""
+ if not os.path.exists(dir):
+ return
+
+ if os.path.islink(dir):
+ os.remove(dir)
+ return
+
+ # Verify the directory is read/write/execute for the current user
+ os.chmod(dir, 0700)
+
+ for name in os.listdir(dir):
+ full_name = os.path.join(dir, name)
+ # on Windows, if we don't have write permission we can't remove
+ # the file/directory either, so turn that on
+ if os.name == 'nt':
+ if not os.access(full_name, os.W_OK):
+ # I think this is now redundant, but I don't have an NT
+ # machine to test on, so I'm going to leave it in place
+ # -warner
+ os.chmod(full_name, 0600)
+
+ if os.path.isdir(full_name):
+ rmdirRecursive(full_name)
+ else:
+ os.chmod(full_name, 0700)
+ os.remove(full_name)
+ os.rmdir(dir)
+
+class ShellCommandPP(ProcessProtocol):
+ debug = False
+
+ def __init__(self, command):
+ self.command = command
+ self.pending_stdin = ""
+ self.stdin_finished = False
+
+ def writeStdin(self, data):
+ assert not self.stdin_finished
+ if self.connected:
+ self.transport.write(data)
+ else:
+ self.pending_stdin += data
+
+ def closeStdin(self):
+ if self.connected:
+ if self.debug: log.msg(" closing stdin")
+ self.transport.closeStdin()
+ self.stdin_finished = True
+
+ def connectionMade(self):
+ if self.debug:
+ log.msg("ShellCommandPP.connectionMade")
+ if not self.command.process:
+ if self.debug:
+ log.msg(" assigning self.command.process: %s" %
+ (self.transport,))
+ self.command.process = self.transport
+
+ # TODO: maybe we shouldn't close stdin when using a PTY. I can't test
+ # this yet, recent debian glibc has a bug which causes thread-using
+ # test cases to SIGHUP trial, and the workaround is to either run
+ # the whole test with /bin/sh -c " ".join(argv) (way gross) or to
+ # not use a PTY. Once the bug is fixed, I'll be able to test what
+ # happens when you close stdin on a pty. My concern is that it will
+ # SIGHUP the child (since we are, in a sense, hanging up on them).
+ # But it may well be that keeping stdout open prevents the SIGHUP
+ # from being sent.
+ #if not self.command.usePTY:
+
+ if self.pending_stdin:
+ if self.debug: log.msg(" writing to stdin")
+ self.transport.write(self.pending_stdin)
+ if self.stdin_finished:
+ if self.debug: log.msg(" closing stdin")
+ self.transport.closeStdin()
+
+ def outReceived(self, data):
+ if self.debug:
+ log.msg("ShellCommandPP.outReceived")
+ self.command.addStdout(data)
+
+ def errReceived(self, data):
+ if self.debug:
+ log.msg("ShellCommandPP.errReceived")
+ self.command.addStderr(data)
+
+ def processEnded(self, status_object):
+ if self.debug:
+ log.msg("ShellCommandPP.processEnded", status_object)
+ # status_object is a Failure wrapped around an
+ # error.ProcessTerminated or and error.ProcessDone.
+ # requires twisted >= 1.0.4 to overcome a bug in process.py
+ sig = status_object.value.signal
+ rc = status_object.value.exitCode
+ self.command.finished(sig, rc)
+
+class LogFileWatcher:
+ POLL_INTERVAL = 2
+
+ def __init__(self, command, name, logfile):
+ self.command = command
+ self.name = name
+ self.logfile = logfile
+ log.msg("LogFileWatcher created to watch %s" % logfile)
+ # we are created before the ShellCommand starts. If the logfile we're
+ # supposed to be watching already exists, record its size and
+ # ctime/mtime so we can tell when it starts to change.
+ self.old_logfile_stats = self.statFile()
+ self.started = False
+
+ # every 2 seconds we check on the file again
+ self.poller = task.LoopingCall(self.poll)
+
+ def start(self):
+ self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
+
+ def _cleanupPoll(self, err):
+ log.err(err, msg="Polling error")
+ self.poller = None
+
+ def stop(self):
+ self.poll()
+ if self.poller is not None:
+ self.poller.stop()
+ if self.started:
+ self.f.close()
+
+ def statFile(self):
+ if os.path.exists(self.logfile):
+ s = os.stat(self.logfile)
+ return (s[ST_CTIME], s[ST_MTIME], s[ST_SIZE])
+ return None
+
+ def poll(self):
+ if not self.started:
+ s = self.statFile()
+ if s == self.old_logfile_stats:
+ return # not started yet
+ if not s:
+ # the file was there, but now it's deleted. Forget about the
+ # initial state, clearly the process has deleted the logfile
+ # in preparation for creating a new one.
+ self.old_logfile_stats = None
+ return # no file to work with
+ self.f = open(self.logfile, "rb")
+ self.started = True
+ self.f.seek(self.f.tell(), 0)
+ while True:
+ data = self.f.read(10000)
+ if not data:
+ return
+ self.command.addLogfile(self.name, data)
+
+
+class ShellCommand:
+ # This is a helper class, used by SlaveCommands to run programs in a
+ # child shell.
+
+ notreally = False
+ BACKUP_TIMEOUT = 5
+ KILL = "KILL"
+ CHUNK_LIMIT = 128*1024
+
+ # For sending elapsed time:
+ startTime = None
+ elapsedTime = None
+ # I wish we had easy access to CLOCK_MONOTONIC in Python:
+ # http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html
+ # Then changes to the system clock during a run wouldn't effect the "elapsed
+ # time" results.
+
+ def __init__(self, builder, command,
+ workdir, environ=None,
+ sendStdout=True, sendStderr=True, sendRC=True,
+ timeout=None, initialStdin=None, keepStdinOpen=False,
+ keepStdout=False, keepStderr=False, logEnviron=True,
+ logfiles={}, usePTY="slave-config"):
+ """
+
+ @param keepStdout: if True, we keep a copy of all the stdout text
+ that we've seen. This copy is available in
+ self.stdout, which can be read after the command
+ has finished.
+ @param keepStderr: same, for stderr
+
+ @param usePTY: "slave-config" -> use the SlaveBuilder's usePTY;
+ otherwise, true to use a PTY, false to not use a PTY.
+ """
+
+ self.builder = builder
+ self.command = Obfuscated.get_real(command)
+ self.fake_command = Obfuscated.get_fake(command)
+ self.sendStdout = sendStdout
+ self.sendStderr = sendStderr
+ self.sendRC = sendRC
+ self.logfiles = logfiles
+ self.workdir = workdir
+ self.environ = os.environ.copy()
+ if environ:
+ if environ.has_key('PYTHONPATH'):
+ ppath = environ['PYTHONPATH']
+ # Need to do os.pathsep translation. We could either do that
+ # by replacing all incoming ':'s with os.pathsep, or by
+ # accepting lists. I like lists better.
+ if not isinstance(ppath, str):
+ # If it's not a string, treat it as a sequence to be
+ # turned in to a string.
+ ppath = os.pathsep.join(ppath)
+
+ if self.environ.has_key('PYTHONPATH'):
+ # special case, prepend the builder's items to the
+ # existing ones. This will break if you send over empty
+ # strings, so don't do that.
+ ppath = ppath + os.pathsep + self.environ['PYTHONPATH']
+
+ environ['PYTHONPATH'] = ppath
+
+ self.environ.update(environ)
+ self.initialStdin = initialStdin
+ self.keepStdinOpen = keepStdinOpen
+ self.logEnviron = logEnviron
+ self.timeout = timeout
+ self.timer = None
+ self.keepStdout = keepStdout
+ self.keepStderr = keepStderr
+
+
+ if usePTY == "slave-config":
+ self.usePTY = self.builder.usePTY
+ else:
+ self.usePTY = usePTY
+
+ # usePTY=True is a convenience for cleaning up all children and
+ # grandchildren of a hung command. Fall back to usePTY=False on systems
+ # and in situations where ptys cause problems. PTYs are posix-only,
+ # and for .closeStdin to matter, we must use a pipe, not a PTY
+ if runtime.platformType != "posix" or initialStdin is not None:
+ if self.usePTY and usePTY != "slave-config":
+ self.sendStatus({'header': "WARNING: disabling usePTY for this command"})
+ self.usePTY = False
+
+ self.logFileWatchers = []
+ for name,filename in self.logfiles.items():
+ w = LogFileWatcher(self, name,
+ os.path.join(self.workdir, filename))
+ self.logFileWatchers.append(w)
+
+ def __repr__(self):
+ return "<slavecommand.ShellCommand '%s'>" % self.fake_command
+
+ def sendStatus(self, status):
+ self.builder.sendUpdate(status)
+
+ def start(self):
+ # return a Deferred which fires (with the exit code) when the command
+ # completes
+ if self.keepStdout:
+ self.stdout = ""
+ if self.keepStderr:
+ self.stderr = ""
+ self.deferred = defer.Deferred()
+ try:
+ self._startCommand()
+ except:
+ log.msg("error in ShellCommand._startCommand")
+ log.err()
+ # pretend it was a shell error
+ self.deferred.errback(AbandonChain(-1))
+ return self.deferred
+
+ def _startCommand(self):
+ # ensure workdir exists
+ if not os.path.isdir(self.workdir):
+ os.makedirs(self.workdir)
+ log.msg("ShellCommand._startCommand")
+ if self.notreally:
+ self.sendStatus({'header': "command '%s' in dir %s" % \
+ (self.fake_command, self.workdir)})
+ self.sendStatus({'header': "(not really)\n"})
+ self.finished(None, 0)
+ return
+
+ self.pp = ShellCommandPP(self)
+
+ if type(self.command) in types.StringTypes:
+ if runtime.platformType == 'win32':
+ argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
+ if '/c' not in argv: argv += ['/c']
+ argv += [self.command]
+ else:
+ # for posix, use /bin/sh. for other non-posix, well, doesn't
+ # hurt to try
+ argv = ['/bin/sh', '-c', self.command]
+ display = self.fake_command
+ else:
+ if runtime.platformType == 'win32':
+ argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
+ if '/c' not in argv: argv += ['/c']
+ argv += list(self.command)
+ else:
+ argv = self.command
+ display = " ".join(self.fake_command)
+
+ # $PWD usually indicates the current directory; spawnProcess may not
+ # update this value, though, so we set it explicitly here.
+ self.environ['PWD'] = os.path.abspath(self.workdir)
+
+ # self.stdin is handled in ShellCommandPP.connectionMade
+
+ # first header line is the command in plain text, argv joined with
+ # spaces. You should be able to cut-and-paste this into a shell to
+ # obtain the same results. If there are spaces in the arguments, too
+ # bad.
+ log.msg(" " + display)
+ self.sendStatus({'header': display+"\n"})
+
+ # then comes the secondary information
+ msg = " in dir %s" % (self.workdir,)
+ if self.timeout:
+ msg += " (timeout %d secs)" % (self.timeout,)
+ log.msg(" " + msg)
+ self.sendStatus({'header': msg+"\n"})
+
+ msg = " watching logfiles %s" % (self.logfiles,)
+ log.msg(" " + msg)
+ self.sendStatus({'header': msg+"\n"})
+
+ # then the obfuscated command array for resolving unambiguity
+ msg = " argv: %s" % (self.fake_command,)
+ log.msg(" " + msg)
+ self.sendStatus({'header': msg+"\n"})
+
+ # then the environment, since it sometimes causes problems
+ if self.logEnviron:
+ msg = " environment:\n"
+ env_names = self.environ.keys()
+ env_names.sort()
+ for name in env_names:
+ msg += " %s=%s\n" % (name, self.environ[name])
+ log.msg(" environment: %s" % (self.environ,))
+ self.sendStatus({'header': msg})
+
+ if self.initialStdin:
+ msg = " writing %d bytes to stdin" % len(self.initialStdin)
+ log.msg(" " + msg)
+ self.sendStatus({'header': msg+"\n"})
+
+ if self.keepStdinOpen:
+ msg = " leaving stdin open"
+ else:
+ msg = " closing stdin"
+ log.msg(" " + msg)
+ self.sendStatus({'header': msg+"\n"})
+
+ msg = " using PTY: %s" % bool(self.usePTY)
+ log.msg(" " + msg)
+ self.sendStatus({'header': msg+"\n"})
+
+ # this will be buffered until connectionMade is called
+ if self.initialStdin:
+ self.pp.writeStdin(self.initialStdin)
+ if not self.keepStdinOpen:
+ self.pp.closeStdin()
+
+ # win32eventreactor's spawnProcess (under twisted <= 2.0.1) returns
+ # None, as opposed to all the posixbase-derived reactors (which
+ # return the new Process object). This is a nuisance. We can make up
+ # for it by having the ProcessProtocol give us their .transport
+ # attribute after they get one. I'd prefer to get it from
+ # spawnProcess because I'm concerned about returning from this method
+ # without having a valid self.process to work with. (if kill() were
+ # called right after we return, but somehow before connectionMade
+ # were called, then kill() would blow up).
+ self.process = None
+ self.startTime = time.time()
+ p = reactor.spawnProcess(self.pp, argv[0], argv,
+ self.environ,
+ self.workdir,
+ usePTY=self.usePTY)
+ # connectionMade might have been called during spawnProcess
+ if not self.process:
+ self.process = p
+
+ # connectionMade also closes stdin as long as we're not using a PTY.
+ # This is intended to kill off inappropriately interactive commands
+ # better than the (long) hung-command timeout. ProcessPTY should be
+ # enhanced to allow the same childFDs argument that Process takes,
+ # which would let us connect stdin to /dev/null .
+
+ if self.timeout:
+ self.timer = reactor.callLater(self.timeout, self.doTimeout)
+
+ for w in self.logFileWatchers:
+ w.start()
+
+
+ def _chunkForSend(self, data):
+ # limit the chunks that we send over PB to 128k, since it has a
+ # hardwired string-size limit of 640k.
+ LIMIT = self.CHUNK_LIMIT
+ for i in range(0, len(data), LIMIT):
+ yield data[i:i+LIMIT]
+
+ def addStdout(self, data):
+ if self.sendStdout:
+ for chunk in self._chunkForSend(data):
+ self.sendStatus({'stdout': chunk})
+ if self.keepStdout:
+ self.stdout += data
+ if self.timer:
+ self.timer.reset(self.timeout)
+
+ def addStderr(self, data):
+ if self.sendStderr:
+ for chunk in self._chunkForSend(data):
+ self.sendStatus({'stderr': chunk})
+ if self.keepStderr:
+ self.stderr += data
+ if self.timer:
+ self.timer.reset(self.timeout)
+
+ def addLogfile(self, name, data):
+ for chunk in self._chunkForSend(data):
+ self.sendStatus({'log': (name, chunk)})
+ if self.timer:
+ self.timer.reset(self.timeout)
+
+ def finished(self, sig, rc):
+ self.elapsedTime = time.time() - self.startTime
+ log.msg("command finished with signal %s, exit code %s, elapsedTime: %0.6f" % (sig,rc,self.elapsedTime))
+ for w in self.logFileWatchers:
+ # this will send the final updates
+ w.stop()
+ if sig is not None:
+ rc = -1
+ if self.sendRC:
+ if sig is not None:
+ self.sendStatus(
+ {'header': "process killed by signal %d\n" % sig})
+ self.sendStatus({'rc': rc})
+ self.sendStatus({'header': "elapsedTime=%0.6f\n" % self.elapsedTime})
+ if self.timer:
+ self.timer.cancel()
+ self.timer = None
+ d = self.deferred
+ self.deferred = None
+ if d:
+ d.callback(rc)
+ else:
+ log.msg("Hey, command %s finished twice" % self)
+
+ def failed(self, why):
+ log.msg("ShellCommand.failed: command failed: %s" % (why,))
+ if self.timer:
+ self.timer.cancel()
+ self.timer = None
+ d = self.deferred
+ self.deferred = None
+ if d:
+ d.errback(why)
+ else:
+ log.msg("Hey, command %s finished twice" % self)
+
+ def doTimeout(self):
+ self.timer = None
+ msg = "command timed out: %d seconds without output" % self.timeout
+ self.kill(msg)
+
+ def kill(self, msg):
+ # This may be called by the timeout, or when the user has decided to
+ # abort this build.
+ if self.timer:
+ self.timer.cancel()
+ self.timer = None
+ if hasattr(self.process, "pid"):
+ msg += ", killing pid %d" % self.process.pid
+ log.msg(msg)
+ self.sendStatus({'header': "\n" + msg + "\n"})
+
+ hit = 0
+ if runtime.platformType == "posix":
+ try:
+ # really want to kill off all child processes too. Process
+ # Groups are ideal for this, but that requires
+ # spawnProcess(usePTY=1). Try both ways in case process was
+ # not started that way.
+
+ # the test suite sets self.KILL=None to tell us we should
+ # only pretend to kill the child. This lets us test the
+ # backup timer.
+
+ sig = None
+ if self.KILL is not None:
+ sig = getattr(signal, "SIG"+ self.KILL, None)
+
+ if self.KILL == None:
+ log.msg("self.KILL==None, only pretending to kill child")
+ elif sig is None:
+ log.msg("signal module is missing SIG%s" % self.KILL)
+ elif not hasattr(os, "kill"):
+ log.msg("os module is missing the 'kill' function")
+ else:
+ log.msg("trying os.kill(-pid, %d)" % (sig,))
+ # TODO: maybe use os.killpg instead of a negative pid?
+ os.kill(-self.process.pid, sig)
+ log.msg(" signal %s sent successfully" % sig)
+ hit = 1
+ except OSError:
+ # probably no-such-process, maybe because there is no process
+ # group
+ pass
+ if not hit:
+ try:
+ if self.KILL is None:
+ log.msg("self.KILL==None, only pretending to kill child")
+ else:
+ log.msg("trying process.signalProcess('KILL')")
+ self.process.signalProcess(self.KILL)
+ log.msg(" signal %s sent successfully" % (self.KILL,))
+ hit = 1
+ except OSError:
+ # could be no-such-process, because they finished very recently
+ pass
+ if not hit:
+ log.msg("signalProcess/os.kill failed both times")
+
+ if runtime.platformType == "posix":
+ # we only do this under posix because the win32eventreactor
+ # blocks here until the process has terminated, while closing
+ # stderr. This is weird.
+ self.pp.transport.loseConnection()
+
+ # finished ought to be called momentarily. Just in case it doesn't,
+ # set a timer which will abandon the command.
+ self.timer = reactor.callLater(self.BACKUP_TIMEOUT,
+ self.doBackupTimeout)
+
+ def doBackupTimeout(self):
+ log.msg("we tried to kill the process, and it wouldn't die.."
+ " finish anyway")
+ self.timer = None
+ self.sendStatus({'header': "SIGKILL failed to kill process\n"})
+ if self.sendRC:
+ self.sendStatus({'header': "using fake rc=-1\n"})
+ self.sendStatus({'rc': -1})
+ self.failed(TimeoutError("SIGKILL failed to kill process"))
+
+
+ def writeStdin(self, data):
+ self.pp.writeStdin(data)
+
+ def closeStdin(self):
+ self.pp.closeStdin()
+
+
+class Command:
+ implements(ISlaveCommand)
+
+ """This class defines one command that can be invoked by the build master.
+ The command is executed on the slave side, and always sends back a
+ completion message when it finishes. It may also send intermediate status
+ as it runs (by calling builder.sendStatus). Some commands can be
+ interrupted (either by the build master or a local timeout), in which
+ case the step is expected to complete normally with a status message that
+ indicates an error occurred.
+
+ These commands are used by BuildSteps on the master side. Each kind of
+ BuildStep uses a single Command. The slave must implement all the
+ Commands required by the set of BuildSteps used for any given build:
+ this is checked at startup time.
+
+ All Commands are constructed with the same signature:
+ c = CommandClass(builder, args)
+ where 'builder' is the parent SlaveBuilder object, and 'args' is a
+ dict that is interpreted per-command.
+
+ The setup(args) method is available for setup, and is run from __init__.
+
+ The Command is started with start(). This method must be implemented in a
+ subclass, and it should return a Deferred. When your step is done, you
+ should fire the Deferred (the results are not used). If the command is
+ interrupted, it should fire the Deferred anyway.
+
+ While the command runs. it may send status messages back to the
+ buildmaster by calling self.sendStatus(statusdict). The statusdict is
+ interpreted by the master-side BuildStep however it likes.
+
+ A separate completion message is sent when the deferred fires, which
+ indicates that the Command has finished, but does not carry any status
+ data. If the Command needs to return an exit code of some sort, that
+ should be sent as a regular status message before the deferred is fired .
+ Once builder.commandComplete has been run, no more status messages may be
+ sent.
+
+ If interrupt() is called, the Command should attempt to shut down as
+ quickly as possible. Child processes should be killed, new ones should
+ not be started. The Command should send some kind of error status update,
+ then complete as usual by firing the Deferred.
+
+ .interrupted should be set by interrupt(), and can be tested to avoid
+ sending multiple error status messages.
+
+ If .running is False, the bot is shutting down (or has otherwise lost the
+ connection to the master), and should not send any status messages. This
+ is checked in Command.sendStatus .
+
+ """
+
+ # builder methods:
+ # sendStatus(dict) (zero or more)
+ # commandComplete() or commandInterrupted() (one, at end)
+
+ debug = False
+ interrupted = False
+ running = False # set by Builder, cleared on shutdown or when the
+ # Deferred fires
+
+ def __init__(self, builder, stepId, args):
+ self.builder = builder
+ self.stepId = stepId # just for logging
+ self.args = args
+ self.setup(args)
+
+ def setup(self, args):
+ """Override this in a subclass to extract items from the args dict."""
+ pass
+
+ def doStart(self):
+ self.running = True
+ d = defer.maybeDeferred(self.start)
+ d.addBoth(self.commandComplete)
+ return d
+
+ def start(self):
+ """Start the command. This method should return a Deferred that will
+ fire when the command has completed. The Deferred's argument will be
+ ignored.
+
+ This method should be overridden by subclasses."""
+ raise NotImplementedError, "You must implement this in a subclass"
+
+ def sendStatus(self, status):
+ """Send a status update to the master."""
+ if self.debug:
+ log.msg("sendStatus", status)
+ if not self.running:
+ log.msg("would sendStatus but not .running")
+ return
+ self.builder.sendUpdate(status)
+
+ def doInterrupt(self):
+ self.running = False
+ self.interrupt()
+
+ def interrupt(self):
+ """Override this in a subclass to allow commands to be interrupted.
+ May be called multiple times, test and set self.interrupted=True if
+ this matters."""
+ pass
+
+ def commandComplete(self, res):
+ self.running = False
+ return res
+
+ # utility methods, mostly used by SlaveShellCommand and the like
+
+ def _abandonOnFailure(self, rc):
+ if type(rc) is not int:
+ log.msg("weird, _abandonOnFailure was given rc=%s (%s)" % \
+ (rc, type(rc)))
+ assert isinstance(rc, int)
+ if rc != 0:
+ raise AbandonChain(rc)
+ return rc
+
+ def _sendRC(self, res):
+ self.sendStatus({'rc': 0})
+
+ def _checkAbandoned(self, why):
+ log.msg("_checkAbandoned", why)
+ why.trap(AbandonChain)
+ log.msg(" abandoning chain", why.value)
+ self.sendStatus({'rc': why.value.args[0]})
+ return None
+
+
+
+class SlaveFileUploadCommand(Command):
+ """
+ Upload a file from slave to build master
+ Arguments:
+
+ - ['workdir']: base directory to use
+ - ['slavesrc']: name of the slave-side file to read from
+ - ['writer']: RemoteReference to a transfer._FileWriter object
+ - ['maxsize']: max size (in bytes) of file to write
+ - ['blocksize']: max size for each data block
+ """
+ debug = False
+
+ def setup(self, args):
+ self.workdir = args['workdir']
+ self.filename = args['slavesrc']
+ self.writer = args['writer']
+ self.remaining = args['maxsize']
+ self.blocksize = args['blocksize']
+ self.stderr = None
+ self.rc = 0
+
+ def start(self):
+ if self.debug:
+ log.msg('SlaveFileUploadCommand started')
+
+ # Open file
+ self.path = os.path.join(self.builder.basedir,
+ self.workdir,
+ os.path.expanduser(self.filename))
+ try:
+ self.fp = open(self.path, 'rb')
+ if self.debug:
+ log.msg('Opened %r for upload' % self.path)
+ except:
+ # TODO: this needs cleanup
+ self.fp = None
+ self.stderr = 'Cannot open file %r for upload' % self.path
+ self.rc = 1
+ if self.debug:
+ log.msg('Cannot open file %r for upload' % self.path)
+
+ self.sendStatus({'header': "sending %s" % self.path})
+
+ d = defer.Deferred()
+ reactor.callLater(0, self._loop, d)
+ def _close(res):
+ # close the file, but pass through any errors from _loop
+ d1 = self.writer.callRemote("close")
+ d1.addErrback(log.err)
+ d1.addCallback(lambda ignored: res)
+ return d1
+ d.addBoth(_close)
+ d.addBoth(self.finished)
+ return d
+
+ def _loop(self, fire_when_done):
+ d = defer.maybeDeferred(self._writeBlock)
+ def _done(finished):
+ if finished:
+ fire_when_done.callback(None)
+ else:
+ self._loop(fire_when_done)
+ def _err(why):
+ fire_when_done.errback(why)
+ d.addCallbacks(_done, _err)
+ return None
+
+ def _writeBlock(self):
+ """Write a block of data to the remote writer"""
+
+ if self.interrupted or self.fp is None:
+ if self.debug:
+ log.msg('SlaveFileUploadCommand._writeBlock(): end')
+ return True
+
+ length = self.blocksize
+ if self.remaining is not None and length > self.remaining:
+ length = self.remaining
+
+ if length <= 0:
+ if self.stderr is None:
+ self.stderr = 'Maximum filesize reached, truncating file %r' \
+ % self.path
+ self.rc = 1
+ data = ''
+ else:
+ data = self.fp.read(length)
+
+ if self.debug:
+ log.msg('SlaveFileUploadCommand._writeBlock(): '+
+ 'allowed=%d readlen=%d' % (length, len(data)))
+ if len(data) == 0:
+ log.msg("EOF: callRemote(close)")
+ return True
+
+ if self.remaining is not None:
+ self.remaining = self.remaining - len(data)
+ assert self.remaining >= 0
+ d = self.writer.callRemote('write', data)
+ d.addCallback(lambda res: False)
+ return d
+
+ def interrupt(self):
+ if self.debug:
+ log.msg('interrupted')
+ if self.interrupted:
+ return
+ if self.stderr is None:
+ self.stderr = 'Upload of %r interrupted' % self.path
+ self.rc = 1
+ self.interrupted = True
+ # the next _writeBlock call will notice the .interrupted flag
+
+ def finished(self, res):
+ if self.debug:
+ log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
+ if self.stderr is None:
+ self.sendStatus({'rc': self.rc})
+ else:
+ self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
+ return res
+
+registerSlaveCommand("uploadFile", SlaveFileUploadCommand, command_version)
+
+
+class SlaveDirectoryUploadCommand(Command):
+ """
+ Upload a directory from slave to build master
+ Arguments:
+
+ - ['workdir']: base directory to use
+ - ['slavesrc']: name of the slave-side directory to read from
+ - ['writer']: RemoteReference to a transfer._DirectoryWriter object
+ - ['maxsize']: max size (in bytes) of file to write
+ - ['blocksize']: max size for each data block
+ """
+ debug = True
+
+ def setup(self, args):
+ self.workdir = args['workdir']
+ self.dirname = args['slavesrc']
+ self.writer = args['writer']
+ self.remaining = args['maxsize']
+ self.blocksize = args['blocksize']
+ self.stderr = None
+ self.rc = 0
+
+ def start(self):
+ if self.debug:
+ log.msg('SlaveDirectoryUploadCommand started')
+
+ # create some lists with all files and directories
+ foundFiles = []
+ foundDirs = []
+
+ self.baseRoot = os.path.join(self.builder.basedir,
+ self.workdir,
+ os.path.expanduser(self.dirname))
+ if self.debug:
+ log.msg("baseRoot: %r" % self.baseRoot)
+
+ for root, dirs, files in os.walk(self.baseRoot):
+ tempRoot = root
+ relRoot = ''
+ while (tempRoot != self.baseRoot):
+ tempRoot, tempRelRoot = os.path.split(tempRoot)
+ relRoot = os.path.join(tempRelRoot, relRoot)
+ for name in files:
+ foundFiles.append(os.path.join(relRoot, name))
+ for directory in dirs:
+ foundDirs.append(os.path.join(relRoot, directory))
+
+ if self.debug:
+ log.msg("foundDirs: %s" % (str(foundDirs)))
+ log.msg("foundFiles: %s" % (str(foundFiles)))
+
+ # create all directories on the master, to catch also empty ones
+ for dirname in foundDirs:
+ self.writer.callRemote("createdir", dirname)
+
+ for filename in foundFiles:
+ self._writeFile(filename)
+
+ return None
+
+ def _writeFile(self, filename):
+ """Write a file to the remote writer"""
+
+ log.msg("_writeFile: %r" % (filename))
+ self.writer.callRemote('open', filename)
+ data = open(os.path.join(self.baseRoot, filename), "r").read()
+ self.writer.callRemote('write', data)
+ self.writer.callRemote('close')
+ return None
+
+ def interrupt(self):
+ if self.debug:
+ log.msg('interrupted')
+ if self.interrupted:
+ return
+ if self.stderr is None:
+ self.stderr = 'Upload of %r interrupted' % self.path
+ self.rc = 1
+ self.interrupted = True
+ # the next _writeBlock call will notice the .interrupted flag
+
+ def finished(self, res):
+ if self.debug:
+ log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
+ if self.stderr is None:
+ self.sendStatus({'rc': self.rc})
+ else:
+ self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
+ return res
+
+registerSlaveCommand("uploadDirectory", SlaveDirectoryUploadCommand, command_version)
+
+
+class SlaveFileDownloadCommand(Command):
+ """
+ Download a file from master to slave
+ Arguments:
+
+ - ['workdir']: base directory to use
+ - ['slavedest']: name of the slave-side file to be created
+ - ['reader']: RemoteReference to a transfer._FileReader object
+ - ['maxsize']: max size (in bytes) of file to write
+ - ['blocksize']: max size for each data block
+ - ['mode']: access mode for the new file
+ """
+ debug = False
+
+ def setup(self, args):
+ self.workdir = args['workdir']
+ self.filename = args['slavedest']
+ self.reader = args['reader']
+ self.bytes_remaining = args['maxsize']
+ self.blocksize = args['blocksize']
+ self.mode = args['mode']
+ self.stderr = None
+ self.rc = 0
+
+ def start(self):
+ if self.debug:
+ log.msg('SlaveFileDownloadCommand starting')
+
+ # Open file
+ self.path = os.path.join(self.builder.basedir,
+ self.workdir,
+ os.path.expanduser(self.filename))
+
+ dirname = os.path.dirname(self.path)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ try:
+ self.fp = open(self.path, 'wb')
+ if self.debug:
+ log.msg('Opened %r for download' % self.path)
+ if self.mode is not None:
+ # note: there is a brief window during which the new file
+ # will have the buildslave's default (umask) mode before we
+ # set the new one. Don't use this mode= feature to keep files
+ # private: use the buildslave's umask for that instead. (it
+ # is possible to call os.umask() before and after the open()
+ # call, but cleaning up from exceptions properly is more of a
+ # nuisance that way).
+ os.chmod(self.path, self.mode)
+ except IOError:
+ # TODO: this still needs cleanup
+ self.fp = None
+ self.stderr = 'Cannot open file %r for download' % self.path
+ self.rc = 1
+ if self.debug:
+ log.msg('Cannot open file %r for download' % self.path)
+
+ d = defer.Deferred()
+ reactor.callLater(0, self._loop, d)
+ def _close(res):
+ # close the file, but pass through any errors from _loop
+ d1 = self.reader.callRemote('close')
+ d1.addErrback(log.err)
+ d1.addCallback(lambda ignored: res)
+ return d1
+ d.addBoth(_close)
+ d.addBoth(self.finished)
+ return d
+
+ def _loop(self, fire_when_done):
+ d = defer.maybeDeferred(self._readBlock)
+ def _done(finished):
+ if finished:
+ fire_when_done.callback(None)
+ else:
+ self._loop(fire_when_done)
+ def _err(why):
+ fire_when_done.errback(why)
+ d.addCallbacks(_done, _err)
+ return None
+
+ def _readBlock(self):
+ """Read a block of data from the remote reader."""
+
+ if self.interrupted or self.fp is None:
+ if self.debug:
+ log.msg('SlaveFileDownloadCommand._readBlock(): end')
+ return True
+
+ length = self.blocksize
+ if self.bytes_remaining is not None and length > self.bytes_remaining:
+ length = self.bytes_remaining
+
+ if length <= 0:
+ if self.stderr is None:
+ self.stderr = 'Maximum filesize reached, truncating file %r' \
+ % self.path
+ self.rc = 1
+ return True
+ else:
+ d = self.reader.callRemote('read', length)
+ d.addCallback(self._writeData)
+ return d
+
+ def _writeData(self, data):
+ if self.debug:
+ log.msg('SlaveFileDownloadCommand._readBlock(): readlen=%d' %
+ len(data))
+ if len(data) == 0:
+ return True
+
+ if self.bytes_remaining is not None:
+ self.bytes_remaining = self.bytes_remaining - len(data)
+ assert self.bytes_remaining >= 0
+ self.fp.write(data)
+ return False
+
+ def interrupt(self):
+ if self.debug:
+ log.msg('interrupted')
+ if self.interrupted:
+ return
+ if self.stderr is None:
+ self.stderr = 'Download of %r interrupted' % self.path
+ self.rc = 1
+ self.interrupted = True
+ # now we wait for the next read request to return. _readBlock will
+ # abandon the file when it sees self.interrupted set.
+
+ def finished(self, res):
+ if self.fp is not None:
+ self.fp.close()
+
+ if self.debug:
+ log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
+ if self.stderr is None:
+ self.sendStatus({'rc': self.rc})
+ else:
+ self.sendStatus({'stderr': self.stderr, 'rc': self.rc})
+ return res
+
+registerSlaveCommand("downloadFile", SlaveFileDownloadCommand, command_version)
+
+
+
+class SlaveShellCommand(Command):
+ """This is a Command which runs a shell command. The args dict contains
+ the following keys:
+
+ - ['command'] (required): a shell command to run. If this is a string,
+ it will be run with /bin/sh (['/bin/sh',
+ '-c', command]). If it is a list
+ (preferred), it will be used directly.
+ - ['workdir'] (required): subdirectory in which the command will be
+ run, relative to the builder dir
+ - ['env']: a dict of environment variables to augment/replace
+ os.environ . PYTHONPATH is treated specially, and
+ should be a list of path components to be prepended to
+ any existing PYTHONPATH environment variable.
+ - ['initial_stdin']: a string which will be written to the command's
+ stdin as soon as it starts
+ - ['keep_stdin_open']: unless True, the command's stdin will be
+ closed as soon as initial_stdin has been
+ written. Set this to True if you plan to write
+ to stdin after the command has been started.
+ - ['want_stdout']: 0 if stdout should be thrown away
+ - ['want_stderr']: 0 if stderr should be thrown away
+ - ['usePTY']: True or False if the command should use a PTY (defaults to
+ configuration of the slave)
+ - ['not_really']: 1 to skip execution and return rc=0
+ - ['timeout']: seconds of silence to tolerate before killing command
+ - ['logfiles']: dict mapping LogFile name to the workdir-relative
+ filename of a local log file. This local file will be
+ watched just like 'tail -f', and all changes will be
+ written to 'log' status updates.
+
+ ShellCommand creates the following status messages:
+ - {'stdout': data} : when stdout data is available
+ - {'stderr': data} : when stderr data is available
+ - {'header': data} : when headers (command start/stop) are available
+ - {'log': (logfile_name, data)} : when log files have new contents
+ - {'rc': rc} : when the process has terminated
+ """
+
+ def start(self):
+ args = self.args
+ # args['workdir'] is relative to Builder directory, and is required.
+ assert args['workdir'] is not None
+ workdir = os.path.join(self.builder.basedir, args['workdir'])
+
+ c = ShellCommand(self.builder, args['command'],
+ workdir, environ=args.get('env'),
+ timeout=args.get('timeout', None),
+ sendStdout=args.get('want_stdout', True),
+ sendStderr=args.get('want_stderr', True),
+ sendRC=True,
+ initialStdin=args.get('initial_stdin'),
+ keepStdinOpen=args.get('keep_stdin_open'),
+ logfiles=args.get('logfiles', {}),
+ usePTY=args.get('usePTY', "slave-config"),
+ )
+ self.command = c
+ d = self.command.start()
+ return d
+
+ def interrupt(self):
+ self.interrupted = True
+ self.command.kill("command interrupted")
+
+ def writeStdin(self, data):
+ self.command.writeStdin(data)
+
+ def closeStdin(self):
+ self.command.closeStdin()
+
+registerSlaveCommand("shell", SlaveShellCommand, command_version)
+
+
+class DummyCommand(Command):
+ """
+ I am a dummy no-op command that by default takes 5 seconds to complete.
+ See L{buildbot.steps.dummy.RemoteDummy}
+ """
+
+ def start(self):
+ self.d = defer.Deferred()
+ log.msg(" starting dummy command [%s]" % self.stepId)
+ self.timer = reactor.callLater(1, self.doStatus)
+ return self.d
+
+ def interrupt(self):
+ if self.interrupted:
+ return
+ self.timer.cancel()
+ self.timer = None
+ self.interrupted = True
+ self.finished()
+
+ def doStatus(self):
+ log.msg(" sending intermediate status")
+ self.sendStatus({'stdout': 'data'})
+ timeout = self.args.get('timeout', 5) + 1
+ self.timer = reactor.callLater(timeout - 1, self.finished)
+
+ def finished(self):
+ log.msg(" dummy command finished [%s]" % self.stepId)
+ if self.interrupted:
+ self.sendStatus({'rc': 1})
+ else:
+ self.sendStatus({'rc': 0})
+ self.d.callback(0)
+
+registerSlaveCommand("dummy", DummyCommand, command_version)
+
+
+# this maps handle names to a callable. When the WaitCommand starts, this
+# callable is invoked with no arguments. It should return a Deferred. When
+# that Deferred fires, our WaitCommand will finish.
+waitCommandRegistry = {}
+
+class WaitCommand(Command):
+ """
+ I am a dummy command used by the buildbot unit test suite. I want for the
+ unit test to tell us to finish. See L{buildbot.steps.dummy.Wait}
+ """
+
+ def start(self):
+ self.d = defer.Deferred()
+ log.msg(" starting wait command [%s]" % self.stepId)
+ handle = self.args['handle']
+ cb = waitCommandRegistry[handle]
+ del waitCommandRegistry[handle]
+ def _called():
+ log.msg(" wait-%s starting" % (handle,))
+ d = cb()
+ def _done(res):
+ log.msg(" wait-%s finishing: %s" % (handle, res))
+ return res
+ d.addBoth(_done)
+ d.addCallbacks(self.finished, self.failed)
+ reactor.callLater(0, _called)
+ return self.d
+
+ def interrupt(self):
+ log.msg(" wait command interrupted")
+ if self.interrupted:
+ return
+ self.interrupted = True
+ self.finished("interrupted")
+
+ def finished(self, res):
+ log.msg(" wait command finished [%s]" % self.stepId)
+ if self.interrupted:
+ self.sendStatus({'rc': 2})
+ else:
+ self.sendStatus({'rc': 0})
+ self.d.callback(0)
+ def failed(self, why):
+ log.msg(" wait command failed [%s]" % self.stepId)
+ self.sendStatus({'rc': 1})
+ self.d.callback(0)
+
+registerSlaveCommand("dummy.wait", WaitCommand, command_version)
+
+
+class SourceBase(Command):
+ """Abstract base class for Version Control System operations (checkout
+ and update). This class extracts the following arguments from the
+ dictionary received from the master:
+
+ - ['workdir']: (required) the subdirectory where the buildable sources
+ should be placed
+
+ - ['mode']: one of update/copy/clobber/export, defaults to 'update'
+
+ - ['revision']: If not None, this is an int or string which indicates
+ which sources (along a time-like axis) should be used.
+ It is the thing you provide as the CVS -r or -D
+ argument.
+
+ - ['patch']: If not None, this is a tuple of (striplevel, patch)
+ which contains a patch that should be applied after the
+ checkout has occurred. Once applied, the tree is no
+ longer eligible for use with mode='update', and it only
+ makes sense to use this in conjunction with a
+ ['revision'] argument. striplevel is an int, and patch
+ is a string in standard unified diff format. The patch
+ will be applied with 'patch -p%d <PATCH', with
+ STRIPLEVEL substituted as %d. The command will fail if
+ the patch process fails (rejected hunks).
+
+ - ['timeout']: seconds of silence tolerated before we kill off the
+ command
+
+ - ['retry']: If not None, this is a tuple of (delay, repeats)
+ which means that any failed VC updates should be
+ reattempted, up to REPEATS times, after a delay of
+ DELAY seconds. This is intended to deal with slaves
+ that experience transient network failures.
+ """
+
+ sourcedata = ""
+
+ def setup(self, args):
+ # if we need to parse the output, use this environment. Otherwise
+ # command output will be in whatever the buildslave's native language
+ # has been set to.
+ self.env = os.environ.copy()
+ self.env['LC_MESSAGES'] = "C"
+
+ self.workdir = args['workdir']
+ self.mode = args.get('mode', "update")
+ self.revision = args.get('revision')
+ self.patch = args.get('patch')
+ self.timeout = args.get('timeout', 120)
+ self.retry = args.get('retry')
+ # VC-specific subclasses should override this to extract more args.
+ # Make sure to upcall!
+
+ def start(self):
+ self.sendStatus({'header': "starting " + self.header + "\n"})
+ self.command = None
+
+ # self.srcdir is where the VC system should put the sources
+ if self.mode == "copy":
+ self.srcdir = "source" # hardwired directory name, sorry
+ else:
+ self.srcdir = self.workdir
+ self.sourcedatafile = os.path.join(self.builder.basedir,
+ self.srcdir,
+ ".buildbot-sourcedata")
+
+ d = defer.succeed(None)
+ self.maybeClobber(d)
+ if not (self.sourcedirIsUpdateable() and self.sourcedataMatches()):
+ # the directory cannot be updated, so we have to clobber it.
+ # Perhaps the master just changed modes from 'export' to
+ # 'update'.
+ d.addCallback(self.doClobber, self.srcdir)
+
+ d.addCallback(self.doVC)
+
+ if self.mode == "copy":
+ d.addCallback(self.doCopy)
+ if self.patch:
+ d.addCallback(self.doPatch)
+ d.addCallbacks(self._sendRC, self._checkAbandoned)
+ return d
+
+ def maybeClobber(self, d):
+ # do we need to clobber anything?
+ if self.mode in ("copy", "clobber", "export"):
+ d.addCallback(self.doClobber, self.workdir)
+
+ def interrupt(self):
+ self.interrupted = True
+ if self.command:
+ self.command.kill("command interrupted")
+
+ def doVC(self, res):
+ if self.interrupted:
+ raise AbandonChain(1)
+ if self.sourcedirIsUpdateable() and self.sourcedataMatches():
+ d = self.doVCUpdate()
+ d.addCallback(self.maybeDoVCFallback)
+ else:
+ d = self.doVCFull()
+ d.addBoth(self.maybeDoVCRetry)
+ d.addCallback(self._abandonOnFailure)
+ d.addCallback(self._handleGotRevision)
+ d.addCallback(self.writeSourcedata)
+ return d
+
+ def sourcedataMatches(self):
+ try:
+ olddata = open(self.sourcedatafile, "r").read()
+ if olddata != self.sourcedata:
+ return False
+ except IOError:
+ return False
+ return True
+
+ def _handleGotRevision(self, res):
+ d = defer.maybeDeferred(self.parseGotRevision)
+ d.addCallback(lambda got_revision:
+ self.sendStatus({'got_revision': got_revision}))
+ return d
+
+ def parseGotRevision(self):
+ """Override this in a subclass. It should return a string that
+ represents which revision was actually checked out, or a Deferred
+ that will fire with such a string. If, in a future build, you were to
+ pass this 'got_revision' string in as the 'revision' component of a
+ SourceStamp, you should wind up with the same source code as this
+ checkout just obtained.
+
+ It is probably most useful to scan self.command.stdout for a string
+ of some sort. Be sure to set keepStdout=True on the VC command that
+ you run, so that you'll have something available to look at.
+
+ If this information is unavailable, just return None."""
+
+ return None
+
+ def writeSourcedata(self, res):
+ open(self.sourcedatafile, "w").write(self.sourcedata)
+ return res
+
+ def sourcedirIsUpdateable(self):
+ raise NotImplementedError("this must be implemented in a subclass")
+
+ def doVCUpdate(self):
+ raise NotImplementedError("this must be implemented in a subclass")
+
+ def doVCFull(self):
+ raise NotImplementedError("this must be implemented in a subclass")
+
+ def maybeDoVCFallback(self, rc):
+ if type(rc) is int and rc == 0:
+ return rc
+ if self.interrupted:
+ raise AbandonChain(1)
+ msg = "update failed, clobbering and trying again"
+ self.sendStatus({'header': msg + "\n"})
+ log.msg(msg)
+ d = self.doClobber(None, self.srcdir)
+ d.addCallback(self.doVCFallback2)
+ return d
+
+ def doVCFallback2(self, res):
+ msg = "now retrying VC operation"
+ self.sendStatus({'header': msg + "\n"})
+ log.msg(msg)
+ d = self.doVCFull()
+ d.addBoth(self.maybeDoVCRetry)
+ d.addCallback(self._abandonOnFailure)
+ return d
+
+ def maybeDoVCRetry(self, res):
+ """We get here somewhere after a VC chain has finished. res could
+ be::
+
+ - 0: the operation was successful
+ - nonzero: the operation failed. retry if possible
+ - AbandonChain: the operation failed, someone else noticed. retry.
+ - Failure: some other exception, re-raise
+ """
+
+ if isinstance(res, failure.Failure):
+ if self.interrupted:
+ return res # don't re-try interrupted builds
+ res.trap(AbandonChain)
+ else:
+ if type(res) is int and res == 0:
+ return res
+ if self.interrupted:
+ raise AbandonChain(1)
+ # if we get here, we should retry, if possible
+ if self.retry:
+ delay, repeats = self.retry
+ if repeats >= 0:
+ self.retry = (delay, repeats-1)
+ msg = ("update failed, trying %d more times after %d seconds"
+ % (repeats, delay))
+ self.sendStatus({'header': msg + "\n"})
+ log.msg(msg)
+ d = defer.Deferred()
+ self.maybeClobber(d)
+ d.addCallback(lambda res: self.doVCFull())
+ d.addBoth(self.maybeDoVCRetry)
+ reactor.callLater(delay, d.callback, None)
+ return d
+ return res
+
+ def doClobber(self, dummy, dirname):
+ # TODO: remove the old tree in the background
+## workdir = os.path.join(self.builder.basedir, self.workdir)
+## deaddir = self.workdir + ".deleting"
+## if os.path.isdir(workdir):
+## try:
+## os.rename(workdir, deaddir)
+## # might fail if deaddir already exists: previous deletion
+## # hasn't finished yet
+## # start the deletion in the background
+## # TODO: there was a solaris/NetApp/NFS problem where a
+## # process that was still running out of the directory we're
+## # trying to delete could prevent the rm-rf from working. I
+## # think it stalled the rm, but maybe it just died with
+## # permission issues. Try to detect this.
+## os.commands("rm -rf %s &" % deaddir)
+## except:
+## # fall back to sequential delete-then-checkout
+## pass
+ d = os.path.join(self.builder.basedir, dirname)
+ if runtime.platformType != "posix":
+ # if we're running on w32, use rmtree instead. It will block,
+ # but hopefully it won't take too long.
+ rmdirRecursive(d)
+ return defer.succeed(0)
+ command = ["rm", "-rf", d]
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ sendRC=0, timeout=self.timeout, usePTY=False)
+
+ self.command = c
+ # sendRC=0 means the rm command will send stdout/stderr to the
+ # master, but not the rc=0 when it finishes. That job is left to
+ # _sendRC
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ return d
+
+ def doCopy(self, res):
+ # now copy tree to workdir
+ fromdir = os.path.join(self.builder.basedir, self.srcdir)
+ todir = os.path.join(self.builder.basedir, self.workdir)
+ if runtime.platformType != "posix":
+ self.sendStatus({'header': "Since we're on a non-POSIX platform, "
+ "we're not going to try to execute cp in a subprocess, but instead "
+ "use shutil.copytree(), which will block until it is complete. "
+ "fromdir: %s, todir: %s\n" % (fromdir, todir)})
+ shutil.copytree(fromdir, todir)
+ return defer.succeed(0)
+
+ if not os.path.exists(os.path.dirname(todir)):
+ os.makedirs(os.path.dirname(todir))
+ if os.path.exists(todir):
+ # I don't think this happens, but just in case..
+ log.msg("cp target '%s' already exists -- cp will not do what you think!" % todir)
+
+ command = ['cp', '-R', '-P', '-p', fromdir, todir]
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ return d
+
+ def doPatch(self, res):
+ patchlevel, diff = self.patch
+ command = [getCommand("patch"), '-p%d' % patchlevel]
+ dir = os.path.join(self.builder.basedir, self.workdir)
+ # mark the directory so we don't try to update it later
+ open(os.path.join(dir, ".buildbot-patched"), "w").write("patched\n")
+ # now apply the patch
+ c = ShellCommand(self.builder, command, dir,
+ sendRC=False, timeout=self.timeout,
+ initialStdin=diff, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ return d
+
+
+class CVS(SourceBase):
+ """CVS-specific VC operation. In addition to the arguments handled by
+ SourceBase, this command reads the following keys:
+
+ ['cvsroot'] (required): the CVSROOT repository string
+ ['cvsmodule'] (required): the module to be retrieved
+ ['branch']: a '-r' tag or branch name to use for the checkout/update
+ ['login']: a string for use as a password to 'cvs login'
+ ['global_options']: a list of strings to use before the CVS verb
+ """
+
+ header = "cvs operation"
+
+ def setup(self, args):
+ SourceBase.setup(self, args)
+ self.vcexe = getCommand("cvs")
+ self.cvsroot = args['cvsroot']
+ self.cvsmodule = args['cvsmodule']
+ self.global_options = args.get('global_options', [])
+ self.branch = args.get('branch')
+ self.login = args.get('login')
+ self.sourcedata = "%s\n%s\n%s\n" % (self.cvsroot, self.cvsmodule,
+ self.branch)
+
+ def sourcedirIsUpdateable(self):
+ if os.path.exists(os.path.join(self.builder.basedir,
+ self.srcdir, ".buildbot-patched")):
+ return False
+ return os.path.isdir(os.path.join(self.builder.basedir,
+ self.srcdir, "CVS"))
+
+ def start(self):
+ if self.login is not None:
+ # need to do a 'cvs login' command first
+ d = self.builder.basedir
+ command = ([self.vcexe, '-d', self.cvsroot] + self.global_options
+ + ['login'])
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout,
+ initialStdin=self.login+"\n", usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ d.addCallback(self._didLogin)
+ return d
+ else:
+ return self._didLogin(None)
+
+ def _didLogin(self, res):
+ # now we really start
+ return SourceBase.start(self)
+
+ def doVCUpdate(self):
+ d = os.path.join(self.builder.basedir, self.srcdir)
+ command = [self.vcexe, '-z3'] + self.global_options + ['update', '-dP']
+ if self.branch:
+ command += ['-r', self.branch]
+ if self.revision:
+ command += ['-D', self.revision]
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ return c.start()
+
+ def doVCFull(self):
+ d = self.builder.basedir
+ if self.mode == "export":
+ verb = "export"
+ else:
+ verb = "checkout"
+ command = ([self.vcexe, '-d', self.cvsroot, '-z3'] +
+ self.global_options +
+ [verb, '-d', self.srcdir])
+ if self.branch:
+ command += ['-r', self.branch]
+ if self.revision:
+ command += ['-D', self.revision]
+ command += [self.cvsmodule]
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ return c.start()
+
+ def parseGotRevision(self):
+ # CVS does not have any kind of revision stamp to speak of. We return
+ # the current timestamp as a best-effort guess, but this depends upon
+ # the local system having a clock that is
+ # reasonably-well-synchronized with the repository.
+ return time.strftime("%Y-%m-%d %H:%M:%S +0000", time.gmtime())
+
+registerSlaveCommand("cvs", CVS, command_version)
+
+class SVN(SourceBase):
+ """Subversion-specific VC operation. In addition to the arguments
+ handled by SourceBase, this command reads the following keys:
+
+ ['svnurl'] (required): the SVN repository string
+ ['username'] Username passed to the svn command
+ ['password'] Password passed to the svn command
+ """
+
+ header = "svn operation"
+
+ def setup(self, args):
+ SourceBase.setup(self, args)
+ self.vcexe = getCommand("svn")
+ self.svnurl = args['svnurl']
+ self.sourcedata = "%s\n" % self.svnurl
+
+ self.extra_args = []
+ if args.has_key('username'):
+ self.extra_args.extend(["--username", args['username']])
+ if args.has_key('password'):
+ self.extra_args.extend(["--password", Obfuscated(args['password'], "XXXX")])
+
+ def sourcedirIsUpdateable(self):
+ if os.path.exists(os.path.join(self.builder.basedir,
+ self.srcdir, ".buildbot-patched")):
+ return False
+ return os.path.isdir(os.path.join(self.builder.basedir,
+ self.srcdir, ".svn"))
+
+ def doVCUpdate(self):
+ revision = self.args['revision'] or 'HEAD'
+ # update: possible for mode in ('copy', 'update')
+ d = os.path.join(self.builder.basedir, self.srcdir)
+ command = [self.vcexe, 'update'] + \
+ self.extra_args + \
+ ['--revision', str(revision),
+ '--non-interactive', '--no-auth-cache']
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout,
+ keepStdout=True, usePTY=False)
+ self.command = c
+ return c.start()
+
+ def doVCFull(self):
+ revision = self.args['revision'] or 'HEAD'
+ d = self.builder.basedir
+ if self.mode == "export":
+ command = [self.vcexe, 'export'] + \
+ self.extra_args + \
+ ['--revision', str(revision),
+ '--non-interactive', '--no-auth-cache',
+ self.svnurl, self.srcdir]
+ else:
+ # mode=='clobber', or copy/update on a broken workspace
+ command = [self.vcexe, 'checkout'] + \
+ self.extra_args + \
+ ['--revision', str(revision),
+ '--non-interactive', '--no-auth-cache',
+ self.svnurl, self.srcdir]
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout,
+ keepStdout=True, usePTY=False)
+ self.command = c
+ return c.start()
+
+ def getSvnVersionCommand(self):
+ """
+ Get the (shell) command used to determine SVN revision number
+ of checked-out code
+
+ return: list of strings, passable as the command argument to ShellCommand
+ """
+ # svn checkout operations finish with 'Checked out revision 16657.'
+ # svn update operations finish the line 'At revision 16654.'
+ # But we don't use those. Instead, run 'svnversion'.
+ svnversion_command = getCommand("svnversion")
+ # older versions of 'svnversion' (1.1.4) require the WC_PATH
+ # argument, newer ones (1.3.1) do not.
+ return [svnversion_command, "."]
+
+ def parseGotRevision(self):
+ c = ShellCommand(self.builder,
+ self.getSvnVersionCommand(),
+ os.path.join(self.builder.basedir, self.srcdir),
+ environ=self.env,
+ sendStdout=False, sendStderr=False, sendRC=False,
+ keepStdout=True, usePTY=False)
+ d = c.start()
+ def _parse(res):
+ r_raw = c.stdout.strip()
+ # Extract revision from the version "number" string
+ r = r_raw.rstrip('MS')
+ r = r.split(':')[-1]
+ got_version = None
+ try:
+ got_version = int(r)
+ except ValueError:
+ msg =("SVN.parseGotRevision unable to parse output "
+ "of svnversion: '%s'" % r_raw)
+ log.msg(msg)
+ self.sendStatus({'header': msg + "\n"})
+ return got_version
+ d.addCallback(_parse)
+ return d
+
+
+registerSlaveCommand("svn", SVN, command_version)
+
+class Darcs(SourceBase):
+ """Darcs-specific VC operation. In addition to the arguments
+ handled by SourceBase, this command reads the following keys:
+
+ ['repourl'] (required): the Darcs repository string
+ """
+
+ header = "darcs operation"
+
+ def setup(self, args):
+ SourceBase.setup(self, args)
+ self.vcexe = getCommand("darcs")
+ self.repourl = args['repourl']
+ self.sourcedata = "%s\n" % self.repourl
+ self.revision = self.args.get('revision')
+
+ def sourcedirIsUpdateable(self):
+ if os.path.exists(os.path.join(self.builder.basedir,
+ self.srcdir, ".buildbot-patched")):
+ return False
+ if self.revision:
+ # checking out a specific revision requires a full 'darcs get'
+ return False
+ return os.path.isdir(os.path.join(self.builder.basedir,
+ self.srcdir, "_darcs"))
+
+ def doVCUpdate(self):
+ assert not self.revision
+ # update: possible for mode in ('copy', 'update')
+ d = os.path.join(self.builder.basedir, self.srcdir)
+ command = [self.vcexe, 'pull', '--all', '--verbose']
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ return c.start()
+
+ def doVCFull(self):
+ # checkout or export
+ d = self.builder.basedir
+ command = [self.vcexe, 'get', '--verbose', '--partial',
+ '--repo-name', self.srcdir]
+ if self.revision:
+ # write the context to a file
+ n = os.path.join(self.builder.basedir, ".darcs-context")
+ f = open(n, "wb")
+ f.write(self.revision)
+ f.close()
+ # tell Darcs to use that context
+ command.append('--context')
+ command.append(n)
+ command.append(self.repourl)
+
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ if self.revision:
+ d.addCallback(self.removeContextFile, n)
+ return d
+
+ def removeContextFile(self, res, n):
+ os.unlink(n)
+ return res
+
+ def parseGotRevision(self):
+ # we use 'darcs context' to find out what we wound up with
+ command = [self.vcexe, "changes", "--context"]
+ c = ShellCommand(self.builder, command,
+ os.path.join(self.builder.basedir, self.srcdir),
+ environ=self.env,
+ sendStdout=False, sendStderr=False, sendRC=False,
+ keepStdout=True, usePTY=False)
+ d = c.start()
+ d.addCallback(lambda res: c.stdout)
+ return d
+
+registerSlaveCommand("darcs", Darcs, command_version)
+
+class Monotone(SourceBase):
+ """Monotone-specific VC operation. In addition to the arguments handled
+ by SourceBase, this command reads the following keys:
+
+ ['server_addr'] (required): the address of the server to pull from
+ ['branch'] (required): the branch the revision is on
+ ['db_path'] (required): the local database path to use
+ ['revision'] (required): the revision to check out
+ ['monotone']: (required): path to monotone executable
+ """
+
+ header = "monotone operation"
+
+ def setup(self, args):
+ SourceBase.setup(self, args)
+ self.server_addr = args["server_addr"]
+ self.branch = args["branch"]
+ self.db_path = args["db_path"]
+ self.revision = args["revision"]
+ self.monotone = args["monotone"]
+ self._made_fulls = False
+ self._pull_timeout = args["timeout"]
+
+ def _makefulls(self):
+ if not self._made_fulls:
+ basedir = self.builder.basedir
+ self.full_db_path = os.path.join(basedir, self.db_path)
+ self.full_srcdir = os.path.join(basedir, self.srcdir)
+ self._made_fulls = True
+
+ def sourcedirIsUpdateable(self):
+ self._makefulls()
+ if os.path.exists(os.path.join(self.full_srcdir,
+ ".buildbot_patched")):
+ return False
+ return (os.path.isfile(self.full_db_path)
+ and os.path.isdir(os.path.join(self.full_srcdir, "MT")))
+
+ def doVCUpdate(self):
+ return self._withFreshDb(self._doUpdate)
+
+ def _doUpdate(self):
+ # update: possible for mode in ('copy', 'update')
+ command = [self.monotone, "update",
+ "-r", self.revision,
+ "-b", self.branch]
+ c = ShellCommand(self.builder, command, self.full_srcdir,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ return c.start()
+
+ def doVCFull(self):
+ return self._withFreshDb(self._doFull)
+
+ def _doFull(self):
+ command = [self.monotone, "--db=" + self.full_db_path,
+ "checkout",
+ "-r", self.revision,
+ "-b", self.branch,
+ self.full_srcdir]
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ return c.start()
+
+ def _withFreshDb(self, callback):
+ self._makefulls()
+ # first ensure the db exists and is usable
+ if os.path.isfile(self.full_db_path):
+ # already exists, so run 'db migrate' in case monotone has been
+ # upgraded under us
+ command = [self.monotone, "db", "migrate",
+ "--db=" + self.full_db_path]
+ else:
+ # We'll be doing an initial pull, so up the timeout to 3 hours to
+ # make sure it will have time to complete.
+ self._pull_timeout = max(self._pull_timeout, 3 * 60 * 60)
+ self.sendStatus({"header": "creating database %s\n"
+ % (self.full_db_path,)})
+ command = [self.monotone, "db", "init",
+ "--db=" + self.full_db_path]
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ d.addCallback(self._didDbInit)
+ d.addCallback(self._didPull, callback)
+ return d
+
+ def _didDbInit(self, res):
+ command = [self.monotone, "--db=" + self.full_db_path,
+ "pull", "--ticker=dot", self.server_addr, self.branch]
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ sendRC=False, timeout=self._pull_timeout, usePTY=False)
+ self.sendStatus({"header": "pulling %s from %s\n"
+ % (self.branch, self.server_addr)})
+ self.command = c
+ return c.start()
+
+ def _didPull(self, res, callback):
+ return callback()
+
+registerSlaveCommand("monotone", Monotone, command_version)
+
+
+class Git(SourceBase):
+ """Git specific VC operation. In addition to the arguments
+ handled by SourceBase, this command reads the following keys:
+
+ ['repourl'] (required): the upstream GIT repository string
+ ['branch'] (optional): which version (i.e. branch or tag) to
+ retrieve. Default: "master".
+ """
+
+ header = "git operation"
+
+ def setup(self, args):
+ SourceBase.setup(self, args)
+ self.repourl = args['repourl']
+ self.branch = args.get('branch')
+ if not self.branch:
+ self.branch = "master"
+ self.sourcedata = "%s %s\n" % (self.repourl, self.branch)
+
+ def _fullSrcdir(self):
+ return os.path.join(self.builder.basedir, self.srcdir)
+
+ def _commitSpec(self):
+ if self.revision:
+ return self.revision
+ return self.branch
+
+ def sourcedirIsUpdateable(self):
+ if os.path.exists(os.path.join(self._fullSrcdir(),
+ ".buildbot-patched")):
+ return False
+ return os.path.isdir(os.path.join(self._fullSrcdir(), ".git"))
+
+ def readSourcedata(self):
+ return open(self.sourcedatafile, "r").read()
+
+ # If the repourl matches the sourcedata file, then
+ # we can say that the sourcedata matches. We can
+ # ignore branch changes, since Git can work with
+ # many branches fetched, and we deal with it properly
+ # in doVCUpdate.
+ def sourcedataMatches(self):
+ try:
+ olddata = self.readSourcedata()
+ if not olddata.startswith(self.repourl+' '):
+ return False
+ except IOError:
+ return False
+ return True
+
+ def _didFetch(self, res):
+ if self.revision:
+ head = self.revision
+ else:
+ head = 'FETCH_HEAD'
+
+ command = ['git', 'reset', '--hard', head]
+ c = ShellCommand(self.builder, command, self._fullSrcdir(),
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ return c.start()
+
+ # Update first runs "git clean", removing local changes,
+ # if the branch to be checked out has changed. This, combined
+ # with the later "git reset" equates clobbering the repo,
+ # but it's much more efficient.
+ def doVCUpdate(self):
+ try:
+ # Check to see if our branch has changed
+ diffbranch = self.sourcedata != self.readSourcedata()
+ except IOError:
+ diffbranch = False
+ if diffbranch:
+ command = ['git', 'clean', '-f', '-d']
+ c = ShellCommand(self.builder, command, self._fullSrcdir(),
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ d.addCallback(self._didClean)
+ return d
+ return self._didClean(None)
+
+ def _didClean(self, dummy):
+ command = ['git', 'fetch', '-t', self.repourl, self.branch]
+ self.sendStatus({"header": "fetching branch %s from %s\n"
+ % (self.branch, self.repourl)})
+ c = ShellCommand(self.builder, command, self._fullSrcdir(),
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ d.addCallback(self._didFetch)
+ return d
+
+ def _didInit(self, res):
+ return self.doVCUpdate()
+
+ def doVCFull(self):
+ os.mkdir(self._fullSrcdir())
+ c = ShellCommand(self.builder, ['git', 'init'], self._fullSrcdir(),
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ d.addCallback(self._didInit)
+ return d
+
+ def parseGotRevision(self):
+ command = ['git', 'rev-parse', 'HEAD']
+ c = ShellCommand(self.builder, command, self._fullSrcdir(),
+ sendRC=False, keepStdout=True, usePTY=False)
+ d = c.start()
+ def _parse(res):
+ hash = c.stdout.strip()
+ if len(hash) != 40:
+ return None
+ return hash
+ d.addCallback(_parse)
+ return d
+
+registerSlaveCommand("git", Git, command_version)
+
+class Arch(SourceBase):
+ """Arch-specific (tla-specific) VC operation. In addition to the
+ arguments handled by SourceBase, this command reads the following keys:
+
+ ['url'] (required): the repository string
+ ['version'] (required): which version (i.e. branch) to retrieve
+ ['revision'] (optional): the 'patch-NN' argument to check out
+ ['archive']: the archive name to use. If None, use the archive's default
+ ['build-config']: if present, give to 'tla build-config' after checkout
+ """
+
+ header = "arch operation"
+ buildconfig = None
+
+ def setup(self, args):
+ SourceBase.setup(self, args)
+ self.vcexe = getCommand("tla")
+ self.archive = args.get('archive')
+ self.url = args['url']
+ self.version = args['version']
+ self.revision = args.get('revision')
+ self.buildconfig = args.get('build-config')
+ self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
+ self.buildconfig)
+
+ def sourcedirIsUpdateable(self):
+ if self.revision:
+ # Arch cannot roll a directory backwards, so if they ask for a
+ # specific revision, clobber the directory. Technically this
+ # could be limited to the cases where the requested revision is
+ # later than our current one, but it's too hard to extract the
+ # current revision from the tree.
+ return False
+ if os.path.exists(os.path.join(self.builder.basedir,
+ self.srcdir, ".buildbot-patched")):
+ return False
+ return os.path.isdir(os.path.join(self.builder.basedir,
+ self.srcdir, "{arch}"))
+
+ def doVCUpdate(self):
+ # update: possible for mode in ('copy', 'update')
+ d = os.path.join(self.builder.basedir, self.srcdir)
+ command = [self.vcexe, 'replay']
+ if self.revision:
+ command.append(self.revision)
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ return c.start()
+
+ def doVCFull(self):
+ # to do a checkout, we must first "register" the archive by giving
+ # the URL to tla, which will go to the repository at that URL and
+ # figure out the archive name. tla will tell you the archive name
+ # when it is done, and all further actions must refer to this name.
+
+ command = [self.vcexe, 'register-archive', '--force', self.url]
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ sendRC=False, keepStdout=True,
+ timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ d.addCallback(self._didRegister, c)
+ return d
+
+ def _didRegister(self, res, c):
+ # find out what tla thinks the archive name is. If the user told us
+ # to use something specific, make sure it matches.
+ r = re.search(r'Registering archive: (\S+)\s*$', c.stdout)
+ if r:
+ msg = "tla reports archive name is '%s'" % r.group(1)
+ log.msg(msg)
+ self.builder.sendUpdate({'header': msg+"\n"})
+ if self.archive and r.group(1) != self.archive:
+ msg = (" mismatch, we wanted an archive named '%s'"
+ % self.archive)
+ log.msg(msg)
+ self.builder.sendUpdate({'header': msg+"\n"})
+ raise AbandonChain(-1)
+ self.archive = r.group(1)
+ assert self.archive, "need archive name to continue"
+ return self._doGet()
+
+ def _doGet(self):
+ ver = self.version
+ if self.revision:
+ ver += "--%s" % self.revision
+ command = [self.vcexe, 'get', '--archive', self.archive,
+ '--no-pristine',
+ ver, self.srcdir]
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ if self.buildconfig:
+ d.addCallback(self._didGet)
+ return d
+
+ def _didGet(self, res):
+ d = os.path.join(self.builder.basedir, self.srcdir)
+ command = [self.vcexe, 'build-config', self.buildconfig]
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ return d
+
+ def parseGotRevision(self):
+ # using code from tryclient.TlaExtractor
+ # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION
+ # 'tla logs' gives us REVISION
+ command = [self.vcexe, "logs", "--full", "--reverse"]
+ c = ShellCommand(self.builder, command,
+ os.path.join(self.builder.basedir, self.srcdir),
+ environ=self.env,
+ sendStdout=False, sendStderr=False, sendRC=False,
+ keepStdout=True, usePTY=False)
+ d = c.start()
+ def _parse(res):
+ tid = c.stdout.split("\n")[0].strip()
+ slash = tid.index("/")
+ dd = tid.rindex("--")
+ #branch = tid[slash+1:dd]
+ baserev = tid[dd+2:]
+ return baserev
+ d.addCallback(_parse)
+ return d
+
+registerSlaveCommand("arch", Arch, command_version)
+
+class Bazaar(Arch):
+ """Bazaar (/usr/bin/baz) is an alternative client for Arch repositories.
+ It is mostly option-compatible, but archive registration is different
+ enough to warrant a separate Command.
+
+ ['archive'] (required): the name of the archive being used
+ """
+
+ def setup(self, args):
+ Arch.setup(self, args)
+ self.vcexe = getCommand("baz")
+ # baz doesn't emit the repository name after registration (and
+ # grepping through the output of 'baz archives' is too hard), so we
+ # require that the buildmaster configuration to provide both the
+ # archive name and the URL.
+ self.archive = args['archive'] # required for Baz
+ self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version,
+ self.buildconfig)
+
+ # in _didRegister, the regexp won't match, so we'll stick with the name
+ # in self.archive
+
+ def _doGet(self):
+ # baz prefers ARCHIVE/VERSION. This will work even if
+ # my-default-archive is not set.
+ ver = self.archive + "/" + self.version
+ if self.revision:
+ ver += "--%s" % self.revision
+ command = [self.vcexe, 'get', '--no-pristine',
+ ver, self.srcdir]
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ if self.buildconfig:
+ d.addCallback(self._didGet)
+ return d
+
+ def parseGotRevision(self):
+ # using code from tryclient.BazExtractor
+ command = [self.vcexe, "tree-id"]
+ c = ShellCommand(self.builder, command,
+ os.path.join(self.builder.basedir, self.srcdir),
+ environ=self.env,
+ sendStdout=False, sendStderr=False, sendRC=False,
+ keepStdout=True, usePTY=False)
+ d = c.start()
+ def _parse(res):
+ tid = c.stdout.strip()
+ slash = tid.index("/")
+ dd = tid.rindex("--")
+ #branch = tid[slash+1:dd]
+ baserev = tid[dd+2:]
+ return baserev
+ d.addCallback(_parse)
+ return d
+
+registerSlaveCommand("bazaar", Bazaar, command_version)
+
+
+class Bzr(SourceBase):
+ """bzr-specific VC operation. In addition to the arguments
+ handled by SourceBase, this command reads the following keys:
+
+ ['repourl'] (required): the Bzr repository string
+ """
+
+ header = "bzr operation"
+
+ def setup(self, args):
+ SourceBase.setup(self, args)
+ self.vcexe = getCommand("bzr")
+ self.repourl = args['repourl']
+ self.sourcedata = "%s\n" % self.repourl
+ self.revision = self.args.get('revision')
+
+ def sourcedirIsUpdateable(self):
+ if os.path.exists(os.path.join(self.builder.basedir,
+ self.srcdir, ".buildbot-patched")):
+ return False
+ if self.revision:
+ # checking out a specific revision requires a full 'bzr checkout'
+ return False
+ return os.path.isdir(os.path.join(self.builder.basedir,
+ self.srcdir, ".bzr"))
+
+ def doVCUpdate(self):
+ assert not self.revision
+ # update: possible for mode in ('copy', 'update')
+ srcdir = os.path.join(self.builder.basedir, self.srcdir)
+ command = [self.vcexe, 'update']
+ c = ShellCommand(self.builder, command, srcdir,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ return c.start()
+
+ def doVCFull(self):
+ # checkout or export
+ d = self.builder.basedir
+ if self.mode == "export":
+ # exporting in bzr requires a separate directory
+ return self.doVCExport()
+ # originally I added --lightweight here, but then 'bzr revno' is
+ # wrong. The revno reported in 'bzr version-info' is correct,
+ # however. Maybe this is a bzr bug?
+ #
+ # In addition, you cannot perform a 'bzr update' on a repo pulled
+ # from an HTTP repository that used 'bzr checkout --lightweight'. You
+ # get a "ERROR: Cannot lock: transport is read only" when you try.
+ #
+ # So I won't bother using --lightweight for now.
+
+ command = [self.vcexe, 'checkout']
+ if self.revision:
+ command.append('--revision')
+ command.append(str(self.revision))
+ command.append(self.repourl)
+ command.append(self.srcdir)
+
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ return d
+
+ def doVCExport(self):
+ tmpdir = os.path.join(self.builder.basedir, "export-temp")
+ srcdir = os.path.join(self.builder.basedir, self.srcdir)
+ command = [self.vcexe, 'checkout', '--lightweight']
+ if self.revision:
+ command.append('--revision')
+ command.append(str(self.revision))
+ command.append(self.repourl)
+ command.append(tmpdir)
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ d = c.start()
+ def _export(res):
+ command = [self.vcexe, 'export', srcdir]
+ c = ShellCommand(self.builder, command, tmpdir,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ return c.start()
+ d.addCallback(_export)
+ return d
+
+ def get_revision_number(self, out):
+ # it feels like 'bzr revno' sometimes gives different results than
+ # the 'revno:' line from 'bzr version-info', and the one from
+ # version-info is more likely to be correct.
+ for line in out.split("\n"):
+ colon = line.find(":")
+ if colon != -1:
+ key, value = line[:colon], line[colon+2:]
+ if key == "revno":
+ return int(value)
+ raise ValueError("unable to find revno: in bzr output: '%s'" % out)
+
+ def parseGotRevision(self):
+ command = [self.vcexe, "version-info"]
+ c = ShellCommand(self.builder, command,
+ os.path.join(self.builder.basedir, self.srcdir),
+ environ=self.env,
+ sendStdout=False, sendStderr=False, sendRC=False,
+ keepStdout=True, usePTY=False)
+ d = c.start()
+ def _parse(res):
+ try:
+ return self.get_revision_number(c.stdout)
+ except ValueError:
+ msg =("Bzr.parseGotRevision unable to parse output "
+ "of bzr version-info: '%s'" % c.stdout.strip())
+ log.msg(msg)
+ self.sendStatus({'header': msg + "\n"})
+ return None
+ d.addCallback(_parse)
+ return d
+
+registerSlaveCommand("bzr", Bzr, command_version)
+
+class Mercurial(SourceBase):
+ """Mercurial specific VC operation. In addition to the arguments
+ handled by SourceBase, this command reads the following keys:
+
+ ['repourl'] (required): the Cogito repository string
+ """
+
+ header = "mercurial operation"
+
+ def setup(self, args):
+ SourceBase.setup(self, args)
+ self.vcexe = getCommand("hg")
+ self.repourl = args['repourl']
+ self.sourcedata = "%s\n" % self.repourl
+ self.stdout = ""
+ self.stderr = ""
+
+ def sourcedirIsUpdateable(self):
+ if os.path.exists(os.path.join(self.builder.basedir,
+ self.srcdir, ".buildbot-patched")):
+ return False
+ # like Darcs, to check out a specific (old) revision, we have to do a
+ # full checkout. TODO: I think 'hg pull' plus 'hg update' might work
+ if self.revision:
+ return False
+ return os.path.isdir(os.path.join(self.builder.basedir,
+ self.srcdir, ".hg"))
+
+ def doVCUpdate(self):
+ d = os.path.join(self.builder.basedir, self.srcdir)
+ command = [self.vcexe, 'pull', '--verbose', self.repourl]
+ c = ShellCommand(self.builder, command, d,
+ sendRC=False, timeout=self.timeout,
+ keepStdout=True, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._handleEmptyUpdate)
+ d.addCallback(self._update)
+ return d
+
+ def _handleEmptyUpdate(self, res):
+ if type(res) is int and res == 1:
+ if self.command.stdout.find("no changes found") != -1:
+ # 'hg pull', when it doesn't have anything to do, exits with
+ # rc=1, and there appears to be no way to shut this off. It
+ # emits a distinctive message to stdout, though. So catch
+ # this and pretend that it completed successfully.
+ return 0
+ return res
+
+ def doVCFull(self):
+ d = os.path.join(self.builder.basedir, self.srcdir)
+ command = [self.vcexe, 'init', d]
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ cmd1 = c.start()
+
+ def _vcupdate(res):
+ return self.doVCUpdate()
+
+ cmd1.addCallback(_vcupdate)
+ return cmd1
+
+ def _update(self, res):
+ if res != 0:
+ return res
+
+ # compare current branch to update
+ self.update_branch = self.args.get('branch', 'default')
+
+ d = os.path.join(self.builder.basedir, self.srcdir)
+ parentscmd = [self.vcexe, 'identify', '--num', '--branch']
+ cmd = ShellCommand(self.builder, parentscmd, d,
+ sendStdout=False, sendStderr=False,
+ keepStdout=True, keepStderr=True, usePTY=False)
+
+ def _parse(res):
+ if res != 0:
+ msg = "'hg identify' failed: %s\n%s" % (cmd.stdout, cmd.stderr)
+ self.sendStatus({'header': msg + "\n"})
+ log.msg(msg)
+ return res
+
+ log.msg('Output: %s' % cmd.stdout)
+
+ match = re.search(r'^(.+) (.+)$', cmd.stdout)
+ assert match
+
+ rev = match.group(1)
+ current_branch = match.group(2)
+
+ if rev == '-1':
+ msg = "Fresh hg repo, don't worry about branch"
+ log.msg(msg)
+
+ elif self.update_branch != current_branch:
+ msg = "Working dir is on branch '%s' and build needs '%s'. Clobbering." % (current_branch, self.update_branch)
+ self.sendStatus({'header': msg + "\n"})
+ log.msg(msg)
+
+ def _vcfull(res):
+ return self.doVCFull()
+
+ d = self.doClobber(None, self.srcdir)
+ d.addCallback(_vcfull)
+ return d
+
+ else:
+ msg = "Working dir on same branch as build (%s)." % (current_branch)
+ log.msg(msg)
+
+ return 0
+
+ c = cmd.start()
+ c.addCallback(_parse)
+ c.addCallback(self._update2)
+ return c
+
+ def _update2(self, res):
+ d = os.path.join(self.builder.basedir, self.srcdir)
+
+ updatecmd=[self.vcexe, 'update', '--clean', '--repository', d]
+ if self.args.get('revision'):
+ updatecmd.extend(['--rev', self.args['revision']])
+ else:
+ updatecmd.extend(['--rev', self.args.get('branch', 'default')])
+ self.command = ShellCommand(self.builder, updatecmd,
+ self.builder.basedir, sendRC=False,
+ timeout=self.timeout, usePTY=False)
+ return self.command.start()
+
+ def parseGotRevision(self):
+ # we use 'hg identify' to find out what we wound up with
+ command = [self.vcexe, "identify"]
+ c = ShellCommand(self.builder, command,
+ os.path.join(self.builder.basedir, self.srcdir),
+ environ=self.env,
+ sendStdout=False, sendStderr=False, sendRC=False,
+ keepStdout=True, usePTY=False)
+ d = c.start()
+ def _parse(res):
+ m = re.search(r'^(\w+)', c.stdout)
+ return m.group(1)
+ d.addCallback(_parse)
+ return d
+
+registerSlaveCommand("hg", Mercurial, command_version)
+
+
+class P4Base(SourceBase):
+ """Base class for P4 source-updaters
+
+ ['p4port'] (required): host:port for server to access
+ ['p4user'] (optional): user to use for access
+ ['p4passwd'] (optional): passwd to try for the user
+ ['p4client'] (optional): client spec to use
+ """
+ def setup(self, args):
+ SourceBase.setup(self, args)
+ self.p4port = args['p4port']
+ self.p4client = args['p4client']
+ self.p4user = args['p4user']
+ self.p4passwd = args['p4passwd']
+
+ def parseGotRevision(self):
+ # Executes a p4 command that will give us the latest changelist number
+ # of any file under the current (or default) client:
+ command = ['p4']
+ if self.p4port:
+ command.extend(['-p', self.p4port])
+ if self.p4user:
+ command.extend(['-u', self.p4user])
+ if self.p4passwd:
+ command.extend(['-P', self.p4passwd])
+ if self.p4client:
+ command.extend(['-c', self.p4client])
+ command.extend(['changes', '-m', '1', '#have'])
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ environ=self.env, timeout=self.timeout,
+ sendStdout=True, sendStderr=False, sendRC=False,
+ keepStdout=True, usePTY=False)
+ self.command = c
+ d = c.start()
+
+ def _parse(res):
+ # 'p4 -c clien-name change -m 1 "#have"' will produce an output like:
+ # "Change 28147 on 2008/04/07 by p4user@hostname..."
+ # The number after "Change" is the one we want.
+ m = re.match('Change\s+(\d+)\s+', c.stdout)
+ if m:
+ return m.group(1)
+ return None
+ d.addCallback(_parse)
+ return d
+
+
+class P4(P4Base):
+ """A P4 source-updater.
+
+ ['p4port'] (required): host:port for server to access
+ ['p4user'] (optional): user to use for access
+ ['p4passwd'] (optional): passwd to try for the user
+ ['p4client'] (optional): client spec to use
+ ['p4extra_views'] (optional): additional client views to use
+ """
+
+ header = "p4"
+
+ def setup(self, args):
+ P4Base.setup(self, args)
+ self.p4base = args['p4base']
+ self.p4extra_views = args['p4extra_views']
+ self.p4mode = args['mode']
+ self.p4branch = args['branch']
+
+ self.sourcedata = str([
+ # Perforce server.
+ self.p4port,
+
+ # Client spec.
+ self.p4client,
+
+ # Depot side of view spec.
+ self.p4base,
+ self.p4branch,
+ self.p4extra_views,
+
+ # Local side of view spec (srcdir is made from these).
+ self.builder.basedir,
+ self.mode,
+ self.workdir
+ ])
+
+
+ def sourcedirIsUpdateable(self):
+ if os.path.exists(os.path.join(self.builder.basedir,
+ self.srcdir, ".buildbot-patched")):
+ return False
+ # We assume our client spec is still around.
+ # We just say we aren't updateable if the dir doesn't exist so we
+ # don't get ENOENT checking the sourcedata.
+ return os.path.isdir(os.path.join(self.builder.basedir,
+ self.srcdir))
+
+ def doVCUpdate(self):
+ return self._doP4Sync(force=False)
+
+ def _doP4Sync(self, force):
+ command = ['p4']
+
+ if self.p4port:
+ command.extend(['-p', self.p4port])
+ if self.p4user:
+ command.extend(['-u', self.p4user])
+ if self.p4passwd:
+ command.extend(['-P', self.p4passwd])
+ if self.p4client:
+ command.extend(['-c', self.p4client])
+ command.extend(['sync'])
+ if force:
+ command.extend(['-f'])
+ if self.revision:
+ command.extend(['@' + str(self.revision)])
+ env = {}
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ environ=env, sendRC=False, timeout=self.timeout,
+ keepStdout=True, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ return d
+
+
+ def doVCFull(self):
+ env = {}
+ command = ['p4']
+ client_spec = ''
+ client_spec += "Client: %s\n\n" % self.p4client
+ client_spec += "Owner: %s\n\n" % self.p4user
+ client_spec += "Description:\n\tCreated by %s\n\n" % self.p4user
+ client_spec += "Root:\t%s\n\n" % self.builder.basedir
+ client_spec += "Options:\tallwrite rmdir\n\n"
+ client_spec += "LineEnd:\tlocal\n\n"
+
+ # Setup a view
+ client_spec += "View:\n\t%s" % (self.p4base)
+ if self.p4branch:
+ client_spec += "%s/" % (self.p4branch)
+ client_spec += "... //%s/%s/...\n" % (self.p4client, self.srcdir)
+ if self.p4extra_views:
+ for k, v in self.p4extra_views:
+ client_spec += "\t%s/... //%s/%s%s/...\n" % (k, self.p4client,
+ self.srcdir, v)
+ if self.p4port:
+ command.extend(['-p', self.p4port])
+ if self.p4user:
+ command.extend(['-u', self.p4user])
+ if self.p4passwd:
+ command.extend(['-P', self.p4passwd])
+ command.extend(['client', '-i'])
+ log.msg(client_spec)
+ c = ShellCommand(self.builder, command, self.builder.basedir,
+ environ=env, sendRC=False, timeout=self.timeout,
+ initialStdin=client_spec, usePTY=False)
+ self.command = c
+ d = c.start()
+ d.addCallback(self._abandonOnFailure)
+ d.addCallback(lambda _: self._doP4Sync(force=True))
+ return d
+
+registerSlaveCommand("p4", P4, command_version)
+
+
+class P4Sync(P4Base):
+ """A partial P4 source-updater. Requires manual setup of a per-slave P4
+ environment. The only thing which comes from the master is P4PORT.
+ 'mode' is required to be 'copy'.
+
+ ['p4port'] (required): host:port for server to access
+ ['p4user'] (optional): user to use for access
+ ['p4passwd'] (optional): passwd to try for the user
+ ['p4client'] (optional): client spec to use
+ """
+
+ header = "p4 sync"
+
+ def setup(self, args):
+ P4Base.setup(self, args)
+ self.vcexe = getCommand("p4")
+
+ def sourcedirIsUpdateable(self):
+ return True
+
+ def _doVC(self, force):
+ d = os.path.join(self.builder.basedir, self.srcdir)
+ command = [self.vcexe]
+ if self.p4port:
+ command.extend(['-p', self.p4port])
+ if self.p4user:
+ command.extend(['-u', self.p4user])
+ if self.p4passwd:
+ command.extend(['-P', self.p4passwd])
+ if self.p4client:
+ command.extend(['-c', self.p4client])
+ command.extend(['sync'])
+ if force:
+ command.extend(['-f'])
+ if self.revision:
+ command.extend(['@' + self.revision])
+ env = {}
+ c = ShellCommand(self.builder, command, d, environ=env,
+ sendRC=False, timeout=self.timeout, usePTY=False)
+ self.command = c
+ return c.start()
+
+ def doVCUpdate(self):
+ return self._doVC(force=False)
+
+ def doVCFull(self):
+ return self._doVC(force=True)
+
+registerSlaveCommand("p4sync", P4Sync, command_version)
diff --git a/buildbot/buildbot/slave/interfaces.py b/buildbot/buildbot/slave/interfaces.py
new file mode 100644
index 0000000..fb143a7
--- /dev/null
+++ b/buildbot/buildbot/slave/interfaces.py
@@ -0,0 +1,56 @@
+
+from zope.interface import Interface
+
+class ISlaveCommand(Interface):
+ """This interface is implemented by all of the buildslave's Command
+ subclasses. It specifies how the buildslave can start, interrupt, and
+ query the various Commands running on behalf of the buildmaster."""
+
+ def __init__(builder, stepId, args):
+ """Create the Command. 'builder' is a reference to the parent
+ buildbot.bot.SlaveBuilder instance, which will be used to send status
+ updates (by calling builder.sendStatus). 'stepId' is a random string
+ which helps correlate slave logs with the master. 'args' is a dict of
+ arguments that comes from the master-side BuildStep, with contents
+ that are specific to the individual Command subclass.
+
+ This method is not intended to be subclassed."""
+
+ def setup(args):
+ """This method is provided for subclasses to override, to extract
+ parameters from the 'args' dictionary. The default implemention does
+ nothing. It will be called from __init__"""
+
+ def start():
+ """Begin the command, and return a Deferred.
+
+ While the command runs, it should send status updates to the
+ master-side BuildStep by calling self.sendStatus(status). The
+ 'status' argument is typically a dict with keys like 'stdout',
+ 'stderr', and 'rc'.
+
+ When the step completes, it should fire the Deferred (the results are
+ not used). If an exception occurs during execution, it may also
+ errback the deferred, however any reasonable errors should be trapped
+ and indicated with a non-zero 'rc' status rather than raising an
+ exception. Exceptions should indicate problems within the buildbot
+ itself, not problems in the project being tested.
+
+ """
+
+ def interrupt():
+ """This is called to tell the Command that the build is being stopped
+ and therefore the command should be terminated as quickly as
+ possible. The command may continue to send status updates, up to and
+ including an 'rc' end-of-command update (which should indicate an
+ error condition). The Command's deferred should still be fired when
+ the command has finally completed.
+
+ If the build is being stopped because the slave it shutting down or
+ because the connection to the buildmaster has been lost, the status
+ updates will simply be discarded. The Command does not need to be
+ aware of this.
+
+ Child shell processes should be killed. Simple ShellCommand classes
+ can just insert a header line indicating that the process will be
+ killed, then os.kill() the child."""
diff --git a/buildbot/buildbot/slave/registry.py b/buildbot/buildbot/slave/registry.py
new file mode 100644
index 0000000..772aad3
--- /dev/null
+++ b/buildbot/buildbot/slave/registry.py
@@ -0,0 +1,17 @@
+
+commandRegistry = {}
+
+def registerSlaveCommand(name, factory, version):
+ """
+ Register a slave command with the registry, making it available in slaves.
+
+ @type name: string
+ @param name: name under which the slave command will be registered; used
+ for L{buildbot.slave.bot.SlaveBuilder.remote_startCommand}
+
+ @type factory: L{buildbot.slave.commands.Command}
+ @type version: string
+ @param version: version string of the factory code
+ """
+ assert not commandRegistry.has_key(name)
+ commandRegistry[name] = (factory, version)
diff --git a/buildbot/buildbot/sourcestamp.py b/buildbot/buildbot/sourcestamp.py
new file mode 100644
index 0000000..e2162ca
--- /dev/null
+++ b/buildbot/buildbot/sourcestamp.py
@@ -0,0 +1,95 @@
+
+from zope.interface import implements
+from buildbot import util, interfaces
+
+class SourceStamp(util.ComparableMixin):
+ """This is a tuple of (branch, revision, patchspec, changes).
+
+ C{branch} is always valid, although it may be None to let the Source
+ step use its default branch. There are three possibilities for the
+ remaining elements:
+ - (revision=REV, patchspec=None, changes=None): build REV. If REV is
+ None, build the HEAD revision from the given branch.
+ - (revision=REV, patchspec=(LEVEL, DIFF), changes=None): checkout REV,
+ then apply a patch to the source, with C{patch -pPATCHLEVEL <DIFF}.
+ If REV is None, checkout HEAD and patch it.
+ - (revision=None, patchspec=None, changes=[CHANGES]): let the Source
+ step check out the latest revision indicated by the given Changes.
+ CHANGES is a tuple of L{buildbot.changes.changes.Change} instances,
+ and all must be on the same branch.
+ """
+
+ # all four of these are publically visible attributes
+ branch = None
+ revision = None
+ patch = None
+ changes = ()
+
+ compare_attrs = ('branch', 'revision', 'patch', 'changes')
+
+ implements(interfaces.ISourceStamp)
+
+ def __init__(self, branch=None, revision=None, patch=None,
+ changes=None):
+ self.branch = branch
+ self.revision = revision
+ self.patch = patch
+ if changes:
+ self.changes = tuple(changes)
+ self.branch = changes[0].branch
+
+ def canBeMergedWith(self, other):
+ if other.branch != self.branch:
+ return False # the builds are completely unrelated
+
+ if self.changes and other.changes:
+ # TODO: consider not merging these. It's a tradeoff between
+ # minimizing the number of builds and obtaining finer-grained
+ # results.
+ return True
+ elif self.changes and not other.changes:
+ return False # we're using changes, they aren't
+ elif not self.changes and other.changes:
+ return False # they're using changes, we aren't
+
+ if self.patch or other.patch:
+ return False # you can't merge patched builds with anything
+ if self.revision == other.revision:
+ # both builds are using the same specific revision, so they can
+ # be merged. It might be the case that revision==None, so they're
+ # both building HEAD.
+ return True
+
+ return False
+
+ def mergeWith(self, others):
+ """Generate a SourceStamp for the merger of me and all the other
+ BuildRequests. This is called by a Build when it starts, to figure
+ out what its sourceStamp should be."""
+
+ # either we're all building the same thing (changes==None), or we're
+ # all building changes (which can be merged)
+ changes = []
+ changes.extend(self.changes)
+ for req in others:
+ assert self.canBeMergedWith(req) # should have been checked already
+ changes.extend(req.changes)
+ newsource = SourceStamp(branch=self.branch,
+ revision=self.revision,
+ patch=self.patch,
+ changes=changes)
+ return newsource
+
+ def getAbsoluteSourceStamp(self, got_revision):
+ return SourceStamp(branch=self.branch, revision=got_revision, patch=self.patch)
+
+ def getText(self):
+ # TODO: this won't work for VC's with huge 'revision' strings
+ if self.revision is None:
+ return [ "latest" ]
+ text = [ str(self.revision) ]
+ if self.branch:
+ text.append("in '%s'" % self.branch)
+ if self.patch:
+ text.append("[patch]")
+ return text
diff --git a/buildbot/buildbot/status/__init__.py b/buildbot/buildbot/status/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildbot/buildbot/status/__init__.py
diff --git a/buildbot/buildbot/status/base.py b/buildbot/buildbot/status/base.py
new file mode 100644
index 0000000..7588198
--- /dev/null
+++ b/buildbot/buildbot/status/base.py
@@ -0,0 +1,69 @@
+
+from zope.interface import implements
+from twisted.application import service
+
+from buildbot.interfaces import IStatusReceiver
+from buildbot import util, pbutil
+
+class StatusReceiver:
+ implements(IStatusReceiver)
+
+ def requestSubmitted(self, request):
+ pass
+
+ def buildsetSubmitted(self, buildset):
+ pass
+
+ def builderAdded(self, builderName, builder):
+ pass
+
+ def builderChangedState(self, builderName, state):
+ pass
+
+ def buildStarted(self, builderName, build):
+ pass
+
+ def buildETAUpdate(self, build, ETA):
+ pass
+
+ def stepStarted(self, build, step):
+ pass
+
+ def stepTextChanged(self, build, step, text):
+ pass
+
+ def stepText2Changed(self, build, step, text2):
+ pass
+
+ def stepETAUpdate(self, build, step, ETA, expectations):
+ pass
+
+ def logStarted(self, build, step, log):
+ pass
+
+ def logChunk(self, build, step, log, channel, text):
+ pass
+
+ def logFinished(self, build, step, log):
+ pass
+
+ def stepFinished(self, build, step, results):
+ pass
+
+ def buildFinished(self, builderName, build, results):
+ pass
+
+ def builderRemoved(self, builderName):
+ pass
+
+class StatusReceiverMultiService(StatusReceiver, service.MultiService,
+ util.ComparableMixin):
+ implements(IStatusReceiver)
+
+ def __init__(self):
+ service.MultiService.__init__(self)
+
+
+class StatusReceiverPerspective(StatusReceiver, pbutil.NewCredPerspective):
+ implements(IStatusReceiver)
+
diff --git a/buildbot/buildbot/status/builder.py b/buildbot/buildbot/status/builder.py
new file mode 100644
index 0000000..97f356f
--- /dev/null
+++ b/buildbot/buildbot/status/builder.py
@@ -0,0 +1,2182 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+from zope.interface import implements
+from twisted.python import log
+from twisted.persisted import styles
+from twisted.internet import reactor, defer, threads
+from twisted.protocols import basic
+from buildbot.process.properties import Properties
+
+import os, shutil, sys, re, urllib, itertools
+from cPickle import load, dump
+from cStringIO import StringIO
+from bz2 import BZ2File
+
+# sibling imports
+from buildbot import interfaces, util, sourcestamp
+
+SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION = range(5)
+Results = ["success", "warnings", "failure", "skipped", "exception"]
+
+
+# build processes call the following methods:
+#
+# setDefaults
+#
+# currentlyBuilding
+# currentlyIdle
+# currentlyInterlocked
+# currentlyOffline
+# currentlyWaiting
+#
+# setCurrentActivity
+# updateCurrentActivity
+# addFileToCurrentActivity
+# finishCurrentActivity
+#
+# startBuild
+# finishBuild
+
+STDOUT = interfaces.LOG_CHANNEL_STDOUT
+STDERR = interfaces.LOG_CHANNEL_STDERR
+HEADER = interfaces.LOG_CHANNEL_HEADER
+ChunkTypes = ["stdout", "stderr", "header"]
+
+class LogFileScanner(basic.NetstringReceiver):
+ def __init__(self, chunk_cb, channels=[]):
+ self.chunk_cb = chunk_cb
+ self.channels = channels
+
+ def stringReceived(self, line):
+ channel = int(line[0])
+ if not self.channels or (channel in self.channels):
+ self.chunk_cb((channel, line[1:]))
+
+class LogFileProducer:
+ """What's the plan?
+
+ the LogFile has just one FD, used for both reading and writing.
+ Each time you add an entry, fd.seek to the end and then write.
+
+ Each reader (i.e. Producer) keeps track of their own offset. The reader
+ starts by seeking to the start of the logfile, and reading forwards.
+ Between each hunk of file they yield chunks, so they must remember their
+ offset before yielding and re-seek back to that offset before reading
+ more data. When their read() returns EOF, they're finished with the first
+ phase of the reading (everything that's already been written to disk).
+
+ After EOF, the remaining data is entirely in the current entries list.
+ These entries are all of the same channel, so we can do one "".join and
+ obtain a single chunk to be sent to the listener. But since that involves
+ a yield, and more data might arrive after we give up control, we have to
+ subscribe them before yielding. We can't subscribe them any earlier,
+ otherwise they'd get data out of order.
+
+ We're using a generator in the first place so that the listener can
+ throttle us, which means they're pulling. But the subscription means
+ we're pushing. Really we're a Producer. In the first phase we can be
+ either a PullProducer or a PushProducer. In the second phase we're only a
+ PushProducer.
+
+ So the client gives a LogFileConsumer to File.subscribeConsumer . This
+ Consumer must have registerProducer(), unregisterProducer(), and
+ writeChunk(), and is just like a regular twisted.interfaces.IConsumer,
+ except that writeChunk() takes chunks (tuples of (channel,text)) instead
+ of the normal write() which takes just text. The LogFileConsumer is
+ allowed to call stopProducing, pauseProducing, and resumeProducing on the
+ producer instance it is given. """
+
+ paused = False
+ subscribed = False
+ BUFFERSIZE = 2048
+
+ def __init__(self, logfile, consumer):
+ self.logfile = logfile
+ self.consumer = consumer
+ self.chunkGenerator = self.getChunks()
+ consumer.registerProducer(self, True)
+
+ def getChunks(self):
+ f = self.logfile.getFile()
+ offset = 0
+ chunks = []
+ p = LogFileScanner(chunks.append)
+ f.seek(offset)
+ data = f.read(self.BUFFERSIZE)
+ offset = f.tell()
+ while data:
+ p.dataReceived(data)
+ while chunks:
+ c = chunks.pop(0)
+ yield c
+ f.seek(offset)
+ data = f.read(self.BUFFERSIZE)
+ offset = f.tell()
+ del f
+
+ # now subscribe them to receive new entries
+ self.subscribed = True
+ self.logfile.watchers.append(self)
+ d = self.logfile.waitUntilFinished()
+
+ # then give them the not-yet-merged data
+ if self.logfile.runEntries:
+ channel = self.logfile.runEntries[0][0]
+ text = "".join([c[1] for c in self.logfile.runEntries])
+ yield (channel, text)
+
+ # now we've caught up to the present. Anything further will come from
+ # the logfile subscription. We add the callback *after* yielding the
+ # data from runEntries, because the logfile might have finished
+ # during the yield.
+ d.addCallback(self.logfileFinished)
+
+ def stopProducing(self):
+ # TODO: should we still call consumer.finish? probably not.
+ self.paused = True
+ self.consumer = None
+ self.done()
+
+ def done(self):
+ if self.chunkGenerator:
+ self.chunkGenerator = None # stop making chunks
+ if self.subscribed:
+ self.logfile.watchers.remove(self)
+ self.subscribed = False
+
+ def pauseProducing(self):
+ self.paused = True
+
+ def resumeProducing(self):
+ # Twisted-1.3.0 has a bug which causes hangs when resumeProducing
+ # calls transport.write (there is a recursive loop, fixed in 2.0 in
+ # t.i.abstract.FileDescriptor.doWrite by setting the producerPaused
+ # flag *before* calling resumeProducing). To work around this, we
+ # just put off the real resumeProducing for a moment. This probably
+ # has a performance hit, but I'm going to assume that the log files
+ # are not retrieved frequently enough for it to be an issue.
+
+ reactor.callLater(0, self._resumeProducing)
+
+ def _resumeProducing(self):
+ self.paused = False
+ if not self.chunkGenerator:
+ return
+ try:
+ while not self.paused:
+ chunk = self.chunkGenerator.next()
+ self.consumer.writeChunk(chunk)
+ # we exit this when the consumer says to stop, or we run out
+ # of chunks
+ except StopIteration:
+ # if the generator finished, it will have done releaseFile
+ self.chunkGenerator = None
+ # now everything goes through the subscription, and they don't get to
+ # pause anymore
+
+ def logChunk(self, build, step, logfile, channel, chunk):
+ if self.consumer:
+ self.consumer.writeChunk((channel, chunk))
+
+ def logfileFinished(self, logfile):
+ self.done()
+ if self.consumer:
+ self.consumer.unregisterProducer()
+ self.consumer.finish()
+ self.consumer = None
+
+def _tryremove(filename, timeout, retries):
+ """Try to remove a file, and if failed, try again in timeout.
+ Increases the timeout by a factor of 4, and only keeps trying for
+ another retries-amount of times.
+
+ """
+ try:
+ os.unlink(filename)
+ except OSError:
+ if retries > 0:
+ reactor.callLater(timeout, _tryremove, filename, timeout * 4,
+ retries - 1)
+ else:
+ log.msg("giving up on removing %s after over %d seconds" %
+ (filename, timeout))
+
+class LogFile:
+ """A LogFile keeps all of its contents on disk, in a non-pickle format to
+ which new entries can easily be appended. The file on disk has a name
+ like 12-log-compile-output, under the Builder's directory. The actual
+ filename is generated (before the LogFile is created) by
+ L{BuildStatus.generateLogfileName}.
+
+ Old LogFile pickles (which kept their contents in .entries) must be
+ upgraded. The L{BuilderStatus} is responsible for doing this, when it
+ loads the L{BuildStatus} into memory. The Build pickle is not modified,
+ so users who go from 0.6.5 back to 0.6.4 don't have to lose their
+ logs."""
+
+ implements(interfaces.IStatusLog, interfaces.ILogFile)
+
+ finished = False
+ length = 0
+ chunkSize = 10*1000
+ runLength = 0
+ runEntries = [] # provided so old pickled builds will getChunks() ok
+ entries = None
+ BUFFERSIZE = 2048
+ filename = None # relative to the Builder's basedir
+ openfile = None
+
+ def __init__(self, parent, name, logfilename):
+ """
+ @type parent: L{BuildStepStatus}
+ @param parent: the Step that this log is a part of
+ @type name: string
+ @param name: the name of this log, typically 'output'
+ @type logfilename: string
+ @param logfilename: the Builder-relative pathname for the saved entries
+ """
+ self.step = parent
+ self.name = name
+ self.filename = logfilename
+ fn = self.getFilename()
+ if os.path.exists(fn):
+ # the buildmaster was probably stopped abruptly, before the
+ # BuilderStatus could be saved, so BuilderStatus.nextBuildNumber
+ # is out of date, and we're overlapping with earlier builds now.
+ # Warn about it, but then overwrite the old pickle file
+ log.msg("Warning: Overwriting old serialized Build at %s" % fn)
+ self.openfile = open(fn, "w+")
+ self.runEntries = []
+ self.watchers = []
+ self.finishedWatchers = []
+
+ def getFilename(self):
+ return os.path.join(self.step.build.builder.basedir, self.filename)
+
+ def hasContents(self):
+ return os.path.exists(self.getFilename() + '.bz2') or \
+ os.path.exists(self.getFilename())
+
+ def getName(self):
+ return self.name
+
+ def getStep(self):
+ return self.step
+
+ def isFinished(self):
+ return self.finished
+ def waitUntilFinished(self):
+ if self.finished:
+ d = defer.succeed(self)
+ else:
+ d = defer.Deferred()
+ self.finishedWatchers.append(d)
+ return d
+
+ def getFile(self):
+ if self.openfile:
+ # this is the filehandle we're using to write to the log, so
+ # don't close it!
+ return self.openfile
+ # otherwise they get their own read-only handle
+ # try a compressed log first
+ try:
+ return BZ2File(self.getFilename() + ".bz2", "r")
+ except IOError:
+ pass
+ return open(self.getFilename(), "r")
+
+ def getText(self):
+ # this produces one ginormous string
+ return "".join(self.getChunks([STDOUT, STDERR], onlyText=True))
+
+ def getTextWithHeaders(self):
+ return "".join(self.getChunks(onlyText=True))
+
+ def getChunks(self, channels=[], onlyText=False):
+ # generate chunks for everything that was logged at the time we were
+ # first called, so remember how long the file was when we started.
+ # Don't read beyond that point. The current contents of
+ # self.runEntries will follow.
+
+ # this returns an iterator, which means arbitrary things could happen
+ # while we're yielding. This will faithfully deliver the log as it
+ # existed when it was started, and not return anything after that
+ # point. To use this in subscribe(catchup=True) without missing any
+ # data, you must insure that nothing will be added to the log during
+ # yield() calls.
+
+ f = self.getFile()
+ offset = 0
+ f.seek(0, 2)
+ remaining = f.tell()
+
+ leftover = None
+ if self.runEntries and (not channels or
+ (self.runEntries[0][0] in channels)):
+ leftover = (self.runEntries[0][0],
+ "".join([c[1] for c in self.runEntries]))
+
+ # freeze the state of the LogFile by passing a lot of parameters into
+ # a generator
+ return self._generateChunks(f, offset, remaining, leftover,
+ channels, onlyText)
+
+ def _generateChunks(self, f, offset, remaining, leftover,
+ channels, onlyText):
+ chunks = []
+ p = LogFileScanner(chunks.append, channels)
+ f.seek(offset)
+ data = f.read(min(remaining, self.BUFFERSIZE))
+ remaining -= len(data)
+ offset = f.tell()
+ while data:
+ p.dataReceived(data)
+ while chunks:
+ channel, text = chunks.pop(0)
+ if onlyText:
+ yield text
+ else:
+ yield (channel, text)
+ f.seek(offset)
+ data = f.read(min(remaining, self.BUFFERSIZE))
+ remaining -= len(data)
+ offset = f.tell()
+ del f
+
+ if leftover:
+ if onlyText:
+ yield leftover[1]
+ else:
+ yield leftover
+
+ def readlines(self, channel=STDOUT):
+ """Return an iterator that produces newline-terminated lines,
+ excluding header chunks."""
+ # TODO: make this memory-efficient, by turning it into a generator
+ # that retrieves chunks as necessary, like a pull-driven version of
+ # twisted.protocols.basic.LineReceiver
+ alltext = "".join(self.getChunks([channel], onlyText=True))
+ io = StringIO(alltext)
+ return io.readlines()
+
+ def subscribe(self, receiver, catchup):
+ if self.finished:
+ return
+ self.watchers.append(receiver)
+ if catchup:
+ for channel, text in self.getChunks():
+ # TODO: add logChunks(), to send over everything at once?
+ receiver.logChunk(self.step.build, self.step, self,
+ channel, text)
+
+ def unsubscribe(self, receiver):
+ if receiver in self.watchers:
+ self.watchers.remove(receiver)
+
+ def subscribeConsumer(self, consumer):
+ p = LogFileProducer(self, consumer)
+ p.resumeProducing()
+
+ # interface used by the build steps to add things to the log
+
+ def merge(self):
+ # merge all .runEntries (which are all of the same type) into a
+ # single chunk for .entries
+ if not self.runEntries:
+ return
+ channel = self.runEntries[0][0]
+ text = "".join([c[1] for c in self.runEntries])
+ assert channel < 10
+ f = self.openfile
+ f.seek(0, 2)
+ offset = 0
+ while offset < len(text):
+ size = min(len(text)-offset, self.chunkSize)
+ f.write("%d:%d" % (1 + size, channel))
+ f.write(text[offset:offset+size])
+ f.write(",")
+ offset += size
+ self.runEntries = []
+ self.runLength = 0
+
+ def addEntry(self, channel, text):
+ assert not self.finished
+ # we only add to .runEntries here. merge() is responsible for adding
+ # merged chunks to .entries
+ if self.runEntries and channel != self.runEntries[0][0]:
+ self.merge()
+ self.runEntries.append((channel, text))
+ self.runLength += len(text)
+ if self.runLength >= self.chunkSize:
+ self.merge()
+
+ for w in self.watchers:
+ w.logChunk(self.step.build, self.step, self, channel, text)
+ self.length += len(text)
+
+ def addStdout(self, text):
+ self.addEntry(STDOUT, text)
+ def addStderr(self, text):
+ self.addEntry(STDERR, text)
+ def addHeader(self, text):
+ self.addEntry(HEADER, text)
+
+ def finish(self):
+ self.merge()
+ if self.openfile:
+ # we don't do an explicit close, because there might be readers
+ # shareing the filehandle. As soon as they stop reading, the
+ # filehandle will be released and automatically closed. We will
+ # do a sync, however, to make sure the log gets saved in case of
+ # a crash.
+ self.openfile.flush()
+ os.fsync(self.openfile.fileno())
+ del self.openfile
+ self.finished = True
+ watchers = self.finishedWatchers
+ self.finishedWatchers = []
+ for w in watchers:
+ w.callback(self)
+ self.watchers = []
+
+
+ def compressLog(self):
+ compressed = self.getFilename() + ".bz2.tmp"
+ d = threads.deferToThread(self._compressLog, compressed)
+ d.addCallback(self._renameCompressedLog, compressed)
+ d.addErrback(self._cleanupFailedCompress, compressed)
+ return d
+
+ def _compressLog(self, compressed):
+ infile = self.getFile()
+ cf = BZ2File(compressed, 'w')
+ bufsize = 1024*1024
+ while True:
+ buf = infile.read(bufsize)
+ cf.write(buf)
+ if len(buf) < bufsize:
+ break
+ cf.close()
+ def _renameCompressedLog(self, rv, compressed):
+ filename = self.getFilename() + '.bz2'
+ if sys.platform == 'win32':
+ # windows cannot rename a file on top of an existing one, so
+ # fall back to delete-first. There are ways this can fail and
+ # lose the builder's history, so we avoid using it in the
+ # general (non-windows) case
+ if os.path.exists(filename):
+ os.unlink(filename)
+ os.rename(compressed, filename)
+ _tryremove(self.getFilename(), 1, 5)
+ def _cleanupFailedCompress(self, failure, compressed):
+ log.msg("failed to compress %s" % self.getFilename())
+ if os.path.exists(compressed):
+ _tryremove(compressed, 1, 5)
+ failure.trap() # reraise the failure
+
+ # persistence stuff
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ del d['step'] # filled in upon unpickling
+ del d['watchers']
+ del d['finishedWatchers']
+ d['entries'] = [] # let 0.6.4 tolerate the saved log. TODO: really?
+ if d.has_key('finished'):
+ del d['finished']
+ if d.has_key('openfile'):
+ del d['openfile']
+ return d
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.watchers = [] # probably not necessary
+ self.finishedWatchers = [] # same
+ # self.step must be filled in by our parent
+ self.finished = True
+
+ def upgrade(self, logfilename):
+ """Save our .entries to a new-style offline log file (if necessary),
+ and modify our in-memory representation to use it. The original
+ pickled LogFile (inside the pickled Build) won't be modified."""
+ self.filename = logfilename
+ if not os.path.exists(self.getFilename()):
+ self.openfile = open(self.getFilename(), "w")
+ self.finished = False
+ for channel,text in self.entries:
+ self.addEntry(channel, text)
+ self.finish() # releases self.openfile, which will be closed
+ del self.entries
+
+class HTMLLogFile:
+ implements(interfaces.IStatusLog)
+
+ filename = None
+
+ def __init__(self, parent, name, logfilename, html):
+ self.step = parent
+ self.name = name
+ self.filename = logfilename
+ self.html = html
+
+ def getName(self):
+ return self.name # set in BuildStepStatus.addLog
+ def getStep(self):
+ return self.step
+
+ def isFinished(self):
+ return True
+ def waitUntilFinished(self):
+ return defer.succeed(self)
+
+ def hasContents(self):
+ return True
+ def getText(self):
+ return self.html # looks kinda like text
+ def getTextWithHeaders(self):
+ return self.html
+ def getChunks(self):
+ return [(STDERR, self.html)]
+
+ def subscribe(self, receiver, catchup):
+ pass
+ def unsubscribe(self, receiver):
+ pass
+
+ def finish(self):
+ pass
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ del d['step']
+ return d
+
+ def upgrade(self, logfilename):
+ pass
+
+
+class Event:
+ implements(interfaces.IStatusEvent)
+
+ started = None
+ finished = None
+ text = []
+
+ # IStatusEvent methods
+ def getTimes(self):
+ return (self.started, self.finished)
+ def getText(self):
+ return self.text
+ def getLogs(self):
+ return []
+
+ def finish(self):
+ self.finished = util.now()
+
+class TestResult:
+ implements(interfaces.ITestResult)
+
+ def __init__(self, name, results, text, logs):
+ assert isinstance(name, tuple)
+ self.name = name
+ self.results = results
+ self.text = text
+ self.logs = logs
+
+ def getName(self):
+ return self.name
+
+ def getResults(self):
+ return self.results
+
+ def getText(self):
+ return self.text
+
+ def getLogs(self):
+ return self.logs
+
+
+class BuildSetStatus:
+ implements(interfaces.IBuildSetStatus)
+
+ def __init__(self, source, reason, builderNames, bsid=None):
+ self.source = source
+ self.reason = reason
+ self.builderNames = builderNames
+ self.id = bsid
+ self.successWatchers = []
+ self.finishedWatchers = []
+ self.stillHopeful = True
+ self.finished = False
+
+ def setBuildRequestStatuses(self, buildRequestStatuses):
+ self.buildRequests = buildRequestStatuses
+ def setResults(self, results):
+ # the build set succeeds only if all its component builds succeed
+ self.results = results
+ def giveUpHope(self):
+ self.stillHopeful = False
+
+
+ def notifySuccessWatchers(self):
+ for d in self.successWatchers:
+ d.callback(self)
+ self.successWatchers = []
+
+ def notifyFinishedWatchers(self):
+ self.finished = True
+ for d in self.finishedWatchers:
+ d.callback(self)
+ self.finishedWatchers = []
+
+ # methods for our clients
+
+ def getSourceStamp(self):
+ return self.source
+ def getReason(self):
+ return self.reason
+ def getResults(self):
+ return self.results
+ def getID(self):
+ return self.id
+
+ def getBuilderNames(self):
+ return self.builderNames
+ def getBuildRequests(self):
+ return self.buildRequests
+ def isFinished(self):
+ return self.finished
+
+ def waitUntilSuccess(self):
+ if self.finished or not self.stillHopeful:
+ # the deferreds have already fired
+ return defer.succeed(self)
+ d = defer.Deferred()
+ self.successWatchers.append(d)
+ return d
+
+ def waitUntilFinished(self):
+ if self.finished:
+ return defer.succeed(self)
+ d = defer.Deferred()
+ self.finishedWatchers.append(d)
+ return d
+
+class BuildRequestStatus:
+ implements(interfaces.IBuildRequestStatus)
+
+ def __init__(self, source, builderName):
+ self.source = source
+ self.builderName = builderName
+ self.builds = [] # list of BuildStatus objects
+ self.observers = []
+ self.submittedAt = None
+
+ def buildStarted(self, build):
+ self.builds.append(build)
+ for o in self.observers[:]:
+ o(build)
+
+ # methods called by our clients
+ def getSourceStamp(self):
+ return self.source
+ def getBuilderName(self):
+ return self.builderName
+ def getBuilds(self):
+ return self.builds
+
+ def subscribe(self, observer):
+ self.observers.append(observer)
+ for b in self.builds:
+ observer(b)
+ def unsubscribe(self, observer):
+ self.observers.remove(observer)
+
+ def getSubmitTime(self):
+ return self.submittedAt
+ def setSubmitTime(self, t):
+ self.submittedAt = t
+
+
+class BuildStepStatus(styles.Versioned):
+ """
+ I represent a collection of output status for a
+ L{buildbot.process.step.BuildStep}.
+
+ Statistics contain any information gleaned from a step that is
+ not in the form of a logfile. As an example, steps that run
+ tests might gather statistics about the number of passed, failed,
+ or skipped tests.
+
+ @type progress: L{buildbot.status.progress.StepProgress}
+ @cvar progress: tracks ETA for the step
+ @type text: list of strings
+ @cvar text: list of short texts that describe the command and its status
+ @type text2: list of strings
+ @cvar text2: list of short texts added to the overall build description
+ @type logs: dict of string -> L{buildbot.status.builder.LogFile}
+ @ivar logs: logs of steps
+ @type statistics: dict
+ @ivar statistics: results from running this step
+ """
+ # note that these are created when the Build is set up, before each
+ # corresponding BuildStep has started.
+ implements(interfaces.IBuildStepStatus, interfaces.IStatusEvent)
+ persistenceVersion = 2
+
+ started = None
+ finished = None
+ progress = None
+ text = []
+ results = (None, [])
+ text2 = []
+ watchers = []
+ updates = {}
+ finishedWatchers = []
+ statistics = {}
+
+ def __init__(self, parent):
+ assert interfaces.IBuildStatus(parent)
+ self.build = parent
+ self.logs = []
+ self.urls = {}
+ self.watchers = []
+ self.updates = {}
+ self.finishedWatchers = []
+ self.statistics = {}
+
+ def getName(self):
+ """Returns a short string with the name of this step. This string
+ may have spaces in it."""
+ return self.name
+
+ def getBuild(self):
+ return self.build
+
+ def getTimes(self):
+ return (self.started, self.finished)
+
+ def getExpectations(self):
+ """Returns a list of tuples (name, current, target)."""
+ if not self.progress:
+ return []
+ ret = []
+ metrics = self.progress.progress.keys()
+ metrics.sort()
+ for m in metrics:
+ t = (m, self.progress.progress[m], self.progress.expectations[m])
+ ret.append(t)
+ return ret
+
+ def getLogs(self):
+ return self.logs
+
+ def getURLs(self):
+ return self.urls.copy()
+
+ def isFinished(self):
+ return (self.finished is not None)
+
+ def waitUntilFinished(self):
+ if self.finished:
+ d = defer.succeed(self)
+ else:
+ d = defer.Deferred()
+ self.finishedWatchers.append(d)
+ return d
+
+ # while the step is running, the following methods make sense.
+ # Afterwards they return None
+
+ def getETA(self):
+ if self.started is None:
+ return None # not started yet
+ if self.finished is not None:
+ return None # already finished
+ if not self.progress:
+ return None # no way to predict
+ return self.progress.remaining()
+
+ # Once you know the step has finished, the following methods are legal.
+ # Before this step has finished, they all return None.
+
+ def getText(self):
+ """Returns a list of strings which describe the step. These are
+ intended to be displayed in a narrow column. If more space is
+ available, the caller should join them together with spaces before
+ presenting them to the user."""
+ return self.text
+
+ def getResults(self):
+ """Return a tuple describing the results of the step.
+ 'result' is one of the constants in L{buildbot.status.builder}:
+ SUCCESS, WARNINGS, FAILURE, or SKIPPED.
+ 'strings' is an optional list of strings that the step wants to
+ append to the overall build's results. These strings are usually
+ more terse than the ones returned by getText(): in particular,
+ successful Steps do not usually contribute any text to the
+ overall build.
+
+ @rtype: tuple of int, list of strings
+ @returns: (result, strings)
+ """
+ return (self.results, self.text2)
+
+ def hasStatistic(self, name):
+ """Return true if this step has a value for the given statistic.
+ """
+ return self.statistics.has_key(name)
+
+ def getStatistic(self, name, default=None):
+ """Return the given statistic, if present
+ """
+ return self.statistics.get(name, default)
+
+ # subscription interface
+
+ def subscribe(self, receiver, updateInterval=10):
+ # will get logStarted, logFinished, stepETAUpdate
+ assert receiver not in self.watchers
+ self.watchers.append(receiver)
+ self.sendETAUpdate(receiver, updateInterval)
+
+ def sendETAUpdate(self, receiver, updateInterval):
+ self.updates[receiver] = None
+ # they might unsubscribe during stepETAUpdate
+ receiver.stepETAUpdate(self.build, self,
+ self.getETA(), self.getExpectations())
+ if receiver in self.watchers:
+ self.updates[receiver] = reactor.callLater(updateInterval,
+ self.sendETAUpdate,
+ receiver,
+ updateInterval)
+
+ def unsubscribe(self, receiver):
+ if receiver in self.watchers:
+ self.watchers.remove(receiver)
+ if receiver in self.updates:
+ if self.updates[receiver] is not None:
+ self.updates[receiver].cancel()
+ del self.updates[receiver]
+
+
+ # methods to be invoked by the BuildStep
+
+ def setName(self, stepname):
+ self.name = stepname
+
+ def setColor(self, color):
+ log.msg("BuildStepStatus.setColor is no longer supported -- ignoring color %s" % (color,))
+
+ def setProgress(self, stepprogress):
+ self.progress = stepprogress
+
+ def stepStarted(self):
+ self.started = util.now()
+ if self.build:
+ self.build.stepStarted(self)
+
+ def addLog(self, name):
+ assert self.started # addLog before stepStarted won't notify watchers
+ logfilename = self.build.generateLogfileName(self.name, name)
+ log = LogFile(self, name, logfilename)
+ self.logs.append(log)
+ for w in self.watchers:
+ receiver = w.logStarted(self.build, self, log)
+ if receiver:
+ log.subscribe(receiver, True)
+ d = log.waitUntilFinished()
+ d.addCallback(lambda log: log.unsubscribe(receiver))
+ d = log.waitUntilFinished()
+ d.addCallback(self.logFinished)
+ return log
+
+ def addHTMLLog(self, name, html):
+ assert self.started # addLog before stepStarted won't notify watchers
+ logfilename = self.build.generateLogfileName(self.name, name)
+ log = HTMLLogFile(self, name, logfilename, html)
+ self.logs.append(log)
+ for w in self.watchers:
+ receiver = w.logStarted(self.build, self, log)
+ # TODO: think about this: there isn't much point in letting
+ # them subscribe
+ #if receiver:
+ # log.subscribe(receiver, True)
+ w.logFinished(self.build, self, log)
+
+ def logFinished(self, log):
+ for w in self.watchers:
+ w.logFinished(self.build, self, log)
+
+ def addURL(self, name, url):
+ self.urls[name] = url
+
+ def setText(self, text):
+ self.text = text
+ for w in self.watchers:
+ w.stepTextChanged(self.build, self, text)
+ def setText2(self, text):
+ self.text2 = text
+ for w in self.watchers:
+ w.stepText2Changed(self.build, self, text)
+
+ def setStatistic(self, name, value):
+ """Set the given statistic. Usually called by subclasses.
+ """
+ self.statistics[name] = value
+
+ def stepFinished(self, results):
+ self.finished = util.now()
+ self.results = results
+ cld = [] # deferreds for log compression
+ logCompressionLimit = self.build.builder.logCompressionLimit
+ for loog in self.logs:
+ if not loog.isFinished():
+ loog.finish()
+ # if log compression is on, and it's a real LogFile,
+ # HTMLLogFiles aren't files
+ if logCompressionLimit is not False and \
+ isinstance(loog, LogFile):
+ if os.path.getsize(loog.getFilename()) > logCompressionLimit:
+ cld.append(loog.compressLog())
+
+ for r in self.updates.keys():
+ if self.updates[r] is not None:
+ self.updates[r].cancel()
+ del self.updates[r]
+
+ watchers = self.finishedWatchers
+ self.finishedWatchers = []
+ for w in watchers:
+ w.callback(self)
+ if cld:
+ return defer.DeferredList(cld)
+
+ # persistence
+
+ def __getstate__(self):
+ d = styles.Versioned.__getstate__(self)
+ del d['build'] # filled in when loading
+ if d.has_key('progress'):
+ del d['progress']
+ del d['watchers']
+ del d['finishedWatchers']
+ del d['updates']
+ return d
+
+ def __setstate__(self, d):
+ styles.Versioned.__setstate__(self, d)
+ # self.build must be filled in by our parent
+ for loog in self.logs:
+ loog.step = self
+
+ def upgradeToVersion1(self):
+ if not hasattr(self, "urls"):
+ self.urls = {}
+
+ def upgradeToVersion2(self):
+ if not hasattr(self, "statistics"):
+ self.statistics = {}
+
+
+class BuildStatus(styles.Versioned):
+ implements(interfaces.IBuildStatus, interfaces.IStatusEvent)
+ persistenceVersion = 3
+
+ source = None
+ reason = None
+ changes = []
+ blamelist = []
+ requests = []
+ progress = None
+ started = None
+ finished = None
+ currentStep = None
+ text = []
+ results = None
+ slavename = "???"
+
+ # these lists/dicts are defined here so that unserialized instances have
+ # (empty) values. They are set in __init__ to new objects to make sure
+ # each instance gets its own copy.
+ watchers = []
+ updates = {}
+ finishedWatchers = []
+ testResults = {}
+
+ def __init__(self, parent, number):
+ """
+ @type parent: L{BuilderStatus}
+ @type number: int
+ """
+ assert interfaces.IBuilderStatus(parent)
+ self.builder = parent
+ self.number = number
+ self.watchers = []
+ self.updates = {}
+ self.finishedWatchers = []
+ self.steps = []
+ self.testResults = {}
+ self.properties = Properties()
+ self.requests = []
+
+ # IBuildStatus
+
+ def getBuilder(self):
+ """
+ @rtype: L{BuilderStatus}
+ """
+ return self.builder
+
+ def getProperty(self, propname):
+ return self.properties[propname]
+
+ def getProperties(self):
+ return self.properties
+
+ def getNumber(self):
+ return self.number
+
+ def getPreviousBuild(self):
+ if self.number == 0:
+ return None
+ return self.builder.getBuild(self.number-1)
+
+ def getSourceStamp(self, absolute=False):
+ if not absolute or not self.properties.has_key('got_revision'):
+ return self.source
+ return self.source.getAbsoluteSourceStamp(self.properties['got_revision'])
+
+ def getReason(self):
+ return self.reason
+
+ def getChanges(self):
+ return self.changes
+
+ def getRequests(self):
+ return self.requests
+
+ def getResponsibleUsers(self):
+ return self.blamelist
+
+ def getInterestedUsers(self):
+ # TODO: the Builder should add others: sheriffs, domain-owners
+ return self.blamelist + self.properties.getProperty('owners', [])
+
+ def getSteps(self):
+ """Return a list of IBuildStepStatus objects. For invariant builds
+ (those which always use the same set of Steps), this should be the
+ complete list, however some of the steps may not have started yet
+ (step.getTimes()[0] will be None). For variant builds, this may not
+ be complete (asking again later may give you more of them)."""
+ return self.steps
+
+ def getTimes(self):
+ return (self.started, self.finished)
+
+ _sentinel = [] # used as a sentinel to indicate unspecified initial_value
+ def getSummaryStatistic(self, name, summary_fn, initial_value=_sentinel):
+ """Summarize the named statistic over all steps in which it
+ exists, using combination_fn and initial_value to combine multiple
+ results into a single result. This translates to a call to Python's
+ X{reduce}::
+ return reduce(summary_fn, step_stats_list, initial_value)
+ """
+ step_stats_list = [
+ st.getStatistic(name)
+ for st in self.steps
+ if st.hasStatistic(name) ]
+ if initial_value is self._sentinel:
+ return reduce(summary_fn, step_stats_list)
+ else:
+ return reduce(summary_fn, step_stats_list, initial_value)
+
+ def isFinished(self):
+ return (self.finished is not None)
+
+ def waitUntilFinished(self):
+ if self.finished:
+ d = defer.succeed(self)
+ else:
+ d = defer.Deferred()
+ self.finishedWatchers.append(d)
+ return d
+
+ # while the build is running, the following methods make sense.
+ # Afterwards they return None
+
+ def getETA(self):
+ if self.finished is not None:
+ return None
+ if not self.progress:
+ return None
+ eta = self.progress.eta()
+ if eta is None:
+ return None
+ return eta - util.now()
+
+ def getCurrentStep(self):
+ return self.currentStep
+
+ # Once you know the build has finished, the following methods are legal.
+ # Before ths build has finished, they all return None.
+
+ def getText(self):
+ text = []
+ text.extend(self.text)
+ for s in self.steps:
+ text.extend(s.text2)
+ return text
+
+ def getResults(self):
+ return self.results
+
+ def getSlavename(self):
+ return self.slavename
+
+ def getTestResults(self):
+ return self.testResults
+
+ def getLogs(self):
+ # TODO: steps should contribute significant logs instead of this
+ # hack, which returns every log from every step. The logs should get
+ # names like "compile" and "test" instead of "compile.output"
+ logs = []
+ for s in self.steps:
+ for log in s.getLogs():
+ logs.append(log)
+ return logs
+
+ # subscription interface
+
+ def subscribe(self, receiver, updateInterval=None):
+ # will receive stepStarted and stepFinished messages
+ # and maybe buildETAUpdate
+ self.watchers.append(receiver)
+ if updateInterval is not None:
+ self.sendETAUpdate(receiver, updateInterval)
+
+ def sendETAUpdate(self, receiver, updateInterval):
+ self.updates[receiver] = None
+ ETA = self.getETA()
+ if ETA is not None:
+ receiver.buildETAUpdate(self, self.getETA())
+ # they might have unsubscribed during buildETAUpdate
+ if receiver in self.watchers:
+ self.updates[receiver] = reactor.callLater(updateInterval,
+ self.sendETAUpdate,
+ receiver,
+ updateInterval)
+
+ def unsubscribe(self, receiver):
+ if receiver in self.watchers:
+ self.watchers.remove(receiver)
+ if receiver in self.updates:
+ if self.updates[receiver] is not None:
+ self.updates[receiver].cancel()
+ del self.updates[receiver]
+
+ # methods for the base.Build to invoke
+
+ def addStepWithName(self, name):
+ """The Build is setting up, and has added a new BuildStep to its
+ list. Create a BuildStepStatus object to which it can send status
+ updates."""
+
+ s = BuildStepStatus(self)
+ s.setName(name)
+ self.steps.append(s)
+ return s
+
+ def setProperty(self, propname, value, source):
+ self.properties.setProperty(propname, value, source)
+
+ def addTestResult(self, result):
+ self.testResults[result.getName()] = result
+
+ def setSourceStamp(self, sourceStamp):
+ self.source = sourceStamp
+ self.changes = self.source.changes
+
+ def setRequests(self, requests):
+ self.requests = requests
+
+ def setReason(self, reason):
+ self.reason = reason
+ def setBlamelist(self, blamelist):
+ self.blamelist = blamelist
+ def setProgress(self, progress):
+ self.progress = progress
+
+ def buildStarted(self, build):
+ """The Build has been set up and is about to be started. It can now
+ be safely queried, so it is time to announce the new build."""
+
+ self.started = util.now()
+ # now that we're ready to report status, let the BuilderStatus tell
+ # the world about us
+ self.builder.buildStarted(self)
+
+ def setSlavename(self, slavename):
+ self.slavename = slavename
+
+ def setText(self, text):
+ assert isinstance(text, (list, tuple))
+ self.text = text
+ def setResults(self, results):
+ self.results = results
+
+ def buildFinished(self):
+ self.currentStep = None
+ self.finished = util.now()
+
+ for r in self.updates.keys():
+ if self.updates[r] is not None:
+ self.updates[r].cancel()
+ del self.updates[r]
+
+ watchers = self.finishedWatchers
+ self.finishedWatchers = []
+ for w in watchers:
+ w.callback(self)
+
+ # methods called by our BuildStepStatus children
+
+ def stepStarted(self, step):
+ self.currentStep = step
+ name = self.getBuilder().getName()
+ for w in self.watchers:
+ receiver = w.stepStarted(self, step)
+ if receiver:
+ if type(receiver) == type(()):
+ step.subscribe(receiver[0], receiver[1])
+ else:
+ step.subscribe(receiver)
+ d = step.waitUntilFinished()
+ d.addCallback(lambda step: step.unsubscribe(receiver))
+
+ step.waitUntilFinished().addCallback(self._stepFinished)
+
+ def _stepFinished(self, step):
+ results = step.getResults()
+ for w in self.watchers:
+ w.stepFinished(self, step, results)
+
+ # methods called by our BuilderStatus parent
+
+ def pruneLogs(self):
+ # this build is somewhat old: remove the build logs to save space
+ # TODO: delete logs visible through IBuildStatus.getLogs
+ for s in self.steps:
+ s.pruneLogs()
+
+ def pruneSteps(self):
+ # this build is very old: remove the build steps too
+ self.steps = []
+
+ # persistence stuff
+
+ def generateLogfileName(self, stepname, logname):
+ """Return a filename (relative to the Builder's base directory) where
+ the logfile's contents can be stored uniquely.
+
+ The base filename is made by combining our build number, the Step's
+ name, and the log's name, then removing unsuitable characters. The
+ filename is then made unique by appending _0, _1, etc, until it does
+ not collide with any other logfile.
+
+ These files are kept in the Builder's basedir (rather than a
+ per-Build subdirectory) because that makes cleanup easier: cron and
+ find will help get rid of the old logs, but the empty directories are
+ more of a hassle to remove."""
+
+ starting_filename = "%d-log-%s-%s" % (self.number, stepname, logname)
+ starting_filename = re.sub(r'[^\w\.\-]', '_', starting_filename)
+ # now make it unique
+ unique_counter = 0
+ filename = starting_filename
+ while filename in [l.filename
+ for step in self.steps
+ for l in step.getLogs()
+ if l.filename]:
+ filename = "%s_%d" % (starting_filename, unique_counter)
+ unique_counter += 1
+ return filename
+
+ def __getstate__(self):
+ d = styles.Versioned.__getstate__(self)
+ # for now, a serialized Build is always "finished". We will never
+ # save unfinished builds.
+ if not self.finished:
+ d['finished'] = True
+ # TODO: push an "interrupted" step so it is clear that the build
+ # was interrupted. The builder will have a 'shutdown' event, but
+ # someone looking at just this build will be confused as to why
+ # the last log is truncated.
+ del d['builder'] # filled in by our parent when loading
+ del d['watchers']
+ del d['updates']
+ del d['requests']
+ del d['finishedWatchers']
+ return d
+
+ def __setstate__(self, d):
+ styles.Versioned.__setstate__(self, d)
+ # self.builder must be filled in by our parent when loading
+ for step in self.steps:
+ step.build = self
+ self.watchers = []
+ self.updates = {}
+ self.finishedWatchers = []
+
+ def upgradeToVersion1(self):
+ if hasattr(self, "sourceStamp"):
+ # the old .sourceStamp attribute wasn't actually very useful
+ maxChangeNumber, patch = self.sourceStamp
+ changes = getattr(self, 'changes', [])
+ source = sourcestamp.SourceStamp(branch=None,
+ revision=None,
+ patch=patch,
+ changes=changes)
+ self.source = source
+ self.changes = source.changes
+ del self.sourceStamp
+
+ def upgradeToVersion2(self):
+ self.properties = {}
+
+ def upgradeToVersion3(self):
+ # in version 3, self.properties became a Properties object
+ propdict = self.properties
+ self.properties = Properties()
+ self.properties.update(propdict, "Upgrade from previous version")
+
+ def upgradeLogfiles(self):
+ # upgrade any LogFiles that need it. This must occur after we've been
+ # attached to our Builder, and after we know about all LogFiles of
+ # all Steps (to get the filenames right).
+ assert self.builder
+ for s in self.steps:
+ for l in s.getLogs():
+ if l.filename:
+ pass # new-style, log contents are on disk
+ else:
+ logfilename = self.generateLogfileName(s.name, l.name)
+ # let the logfile update its .filename pointer,
+ # transferring its contents onto disk if necessary
+ l.upgrade(logfilename)
+
+ def saveYourself(self):
+ filename = os.path.join(self.builder.basedir, "%d" % self.number)
+ if os.path.isdir(filename):
+ # leftover from 0.5.0, which stored builds in directories
+ shutil.rmtree(filename, ignore_errors=True)
+ tmpfilename = filename + ".tmp"
+ try:
+ dump(self, open(tmpfilename, "wb"), -1)
+ if sys.platform == 'win32':
+ # windows cannot rename a file on top of an existing one, so
+ # fall back to delete-first. There are ways this can fail and
+ # lose the builder's history, so we avoid using it in the
+ # general (non-windows) case
+ if os.path.exists(filename):
+ os.unlink(filename)
+ os.rename(tmpfilename, filename)
+ except:
+ log.msg("unable to save build %s-#%d" % (self.builder.name,
+ self.number))
+ log.err()
+
+
+
+class BuilderStatus(styles.Versioned):
+ """I handle status information for a single process.base.Builder object.
+ That object sends status changes to me (frequently as Events), and I
+ provide them on demand to the various status recipients, like the HTML
+ waterfall display and the live status clients. It also sends build
+ summaries to me, which I log and provide to status clients who aren't
+ interested in seeing details of the individual build steps.
+
+ I am responsible for maintaining the list of historic Events and Builds,
+ pruning old ones, and loading them from / saving them to disk.
+
+ I live in the buildbot.process.base.Builder object, in the
+ .builder_status attribute.
+
+ @type category: string
+ @ivar category: user-defined category this builder belongs to; can be
+ used to filter on in status clients
+ """
+
+ implements(interfaces.IBuilderStatus, interfaces.IEventSource)
+ persistenceVersion = 1
+
+ # these limit the amount of memory we consume, as well as the size of the
+ # main Builder pickle. The Build and LogFile pickles on disk must be
+ # handled separately.
+ buildCacheSize = 30
+ buildHorizon = 100 # forget builds beyond this
+ stepHorizon = 50 # forget steps in builds beyond this
+
+ category = None
+ currentBigState = "offline" # or idle/waiting/interlocked/building
+ basedir = None # filled in by our parent
+
+ def __init__(self, buildername, category=None):
+ self.name = buildername
+ self.category = category
+
+ self.slavenames = []
+ self.events = []
+ # these three hold Events, and are used to retrieve the current
+ # state of the boxes.
+ self.lastBuildStatus = None
+ #self.currentBig = None
+ #self.currentSmall = None
+ self.currentBuilds = []
+ self.pendingBuilds = []
+ self.nextBuild = None
+ self.watchers = []
+ self.buildCache = [] # TODO: age builds out of the cache
+ self.logCompressionLimit = False # default to no compression for tests
+
+ # persistence
+
+ def __getstate__(self):
+ # when saving, don't record transient stuff like what builds are
+ # currently running, because they won't be there when we start back
+ # up. Nor do we save self.watchers, nor anything that gets set by our
+ # parent like .basedir and .status
+ d = styles.Versioned.__getstate__(self)
+ d['watchers'] = []
+ del d['buildCache']
+ for b in self.currentBuilds:
+ b.saveYourself()
+ # TODO: push a 'hey, build was interrupted' event
+ del d['currentBuilds']
+ del d['pendingBuilds']
+ del d['currentBigState']
+ del d['basedir']
+ del d['status']
+ del d['nextBuildNumber']
+ return d
+
+ def __setstate__(self, d):
+ # when loading, re-initialize the transient stuff. Remember that
+ # upgradeToVersion1 and such will be called after this finishes.
+ styles.Versioned.__setstate__(self, d)
+ self.buildCache = []
+ self.currentBuilds = []
+ self.pendingBuilds = []
+ self.watchers = []
+ self.slavenames = []
+ # self.basedir must be filled in by our parent
+ # self.status must be filled in by our parent
+
+ def upgradeToVersion1(self):
+ if hasattr(self, 'slavename'):
+ self.slavenames = [self.slavename]
+ del self.slavename
+ if hasattr(self, 'nextBuildNumber'):
+ del self.nextBuildNumber # determineNextBuildNumber chooses this
+
+ def determineNextBuildNumber(self):
+ """Scan our directory of saved BuildStatus instances to determine
+ what our self.nextBuildNumber should be. Set it one larger than the
+ highest-numbered build we discover. This is called by the top-level
+ Status object shortly after we are created or loaded from disk.
+ """
+ existing_builds = [int(f)
+ for f in os.listdir(self.basedir)
+ if re.match("^\d+$", f)]
+ if existing_builds:
+ self.nextBuildNumber = max(existing_builds) + 1
+ else:
+ self.nextBuildNumber = 0
+
+ def setLogCompressionLimit(self, lowerLimit):
+ self.logCompressionLimit = lowerLimit
+
+ def saveYourself(self):
+ for b in self.buildCache:
+ if not b.isFinished:
+ # interrupted build, need to save it anyway.
+ # BuildStatus.saveYourself will mark it as interrupted.
+ b.saveYourself()
+ filename = os.path.join(self.basedir, "builder")
+ tmpfilename = filename + ".tmp"
+ try:
+ dump(self, open(tmpfilename, "wb"), -1)
+ if sys.platform == 'win32':
+ # windows cannot rename a file on top of an existing one
+ if os.path.exists(filename):
+ os.unlink(filename)
+ os.rename(tmpfilename, filename)
+ except:
+ log.msg("unable to save builder %s" % self.name)
+ log.err()
+
+
+ # build cache management
+
+ def addBuildToCache(self, build):
+ if build in self.buildCache:
+ return
+ self.buildCache.append(build)
+ while len(self.buildCache) > self.buildCacheSize:
+ self.buildCache.pop(0)
+
+ def getBuildByNumber(self, number):
+ for b in self.currentBuilds:
+ if b.number == number:
+ return b
+ for build in self.buildCache:
+ if build.number == number:
+ return build
+ filename = os.path.join(self.basedir, "%d" % number)
+ try:
+ build = load(open(filename, "rb"))
+ styles.doUpgrade()
+ build.builder = self
+ # handle LogFiles from after 0.5.0 and before 0.6.5
+ build.upgradeLogfiles()
+ self.addBuildToCache(build)
+ return build
+ except IOError:
+ raise IndexError("no such build %d" % number)
+ except EOFError:
+ raise IndexError("corrupted build pickle %d" % number)
+
+ def prune(self):
+ return # TODO: change this to walk through the filesystem
+ # first, blow away all builds beyond our build horizon
+ self.builds = self.builds[-self.buildHorizon:]
+ # then prune steps in builds past the step horizon
+ for b in self.builds[0:-self.stepHorizon]:
+ b.pruneSteps()
+
+ # IBuilderStatus methods
+ def getName(self):
+ return self.name
+
+ def getState(self):
+ return (self.currentBigState, self.currentBuilds)
+
+ def getSlaves(self):
+ return [self.status.getSlave(name) for name in self.slavenames]
+
+ def getPendingBuilds(self):
+ return self.pendingBuilds
+
+ def getCurrentBuilds(self):
+ return self.currentBuilds
+
+ def getLastFinishedBuild(self):
+ b = self.getBuild(-1)
+ if not (b and b.isFinished()):
+ b = self.getBuild(-2)
+ return b
+
+ def getBuild(self, number):
+ if number < 0:
+ number = self.nextBuildNumber + number
+ if number < 0 or number >= self.nextBuildNumber:
+ return None
+
+ try:
+ return self.getBuildByNumber(number)
+ except IndexError:
+ return None
+
+ def getEvent(self, number):
+ try:
+ return self.events[number]
+ except IndexError:
+ return None
+
+ def generateFinishedBuilds(self, branches=[],
+ num_builds=None,
+ max_buildnum=None,
+ finished_before=None,
+ max_search=200):
+ got = 0
+ for Nb in itertools.count(1):
+ if Nb > self.nextBuildNumber:
+ break
+ if Nb > max_search:
+ break
+ build = self.getBuild(-Nb)
+ if build is None:
+ continue
+ if max_buildnum is not None:
+ if build.getNumber() > max_buildnum:
+ continue
+ if not build.isFinished():
+ continue
+ if finished_before is not None:
+ start, end = build.getTimes()
+ if end >= finished_before:
+ continue
+ if branches:
+ if build.getSourceStamp().branch not in branches:
+ continue
+ got += 1
+ yield build
+ if num_builds is not None:
+ if got >= num_builds:
+ return
+
+ def eventGenerator(self, branches=[]):
+ """This function creates a generator which will provide all of this
+ Builder's status events, starting with the most recent and
+ progressing backwards in time. """
+
+ # remember the oldest-to-earliest flow here. "next" means earlier.
+
+ # TODO: interleave build steps and self.events by timestamp.
+ # TODO: um, I think we're already doing that.
+
+ # TODO: there's probably something clever we could do here to
+ # interleave two event streams (one from self.getBuild and the other
+ # from self.getEvent), which would be simpler than this control flow
+
+ eventIndex = -1
+ e = self.getEvent(eventIndex)
+ for Nb in range(1, self.nextBuildNumber+1):
+ b = self.getBuild(-Nb)
+ if not b:
+ break
+ if branches and not b.getSourceStamp().branch in branches:
+ continue
+ steps = b.getSteps()
+ for Ns in range(1, len(steps)+1):
+ if steps[-Ns].started:
+ step_start = steps[-Ns].getTimes()[0]
+ while e is not None and e.getTimes()[0] > step_start:
+ yield e
+ eventIndex -= 1
+ e = self.getEvent(eventIndex)
+ yield steps[-Ns]
+ yield b
+ while e is not None:
+ yield e
+ eventIndex -= 1
+ e = self.getEvent(eventIndex)
+
+ def subscribe(self, receiver):
+ # will get builderChangedState, buildStarted, and buildFinished
+ self.watchers.append(receiver)
+ self.publishState(receiver)
+
+ def unsubscribe(self, receiver):
+ self.watchers.remove(receiver)
+
+ ## Builder interface (methods called by the Builder which feeds us)
+
+ def setSlavenames(self, names):
+ self.slavenames = names
+
+ def addEvent(self, text=[]):
+ # this adds a duration event. When it is done, the user should call
+ # e.finish(). They can also mangle it by modifying .text
+ e = Event()
+ e.started = util.now()
+ e.text = text
+ self.events.append(e)
+ return e # they are free to mangle it further
+
+ def addPointEvent(self, text=[]):
+ # this adds a point event, one which occurs as a single atomic
+ # instant of time.
+ e = Event()
+ e.started = util.now()
+ e.finished = 0
+ e.text = text
+ self.events.append(e)
+ return e # for consistency, but they really shouldn't touch it
+
+ def setBigState(self, state):
+ needToUpdate = state != self.currentBigState
+ self.currentBigState = state
+ if needToUpdate:
+ self.publishState()
+
+ def publishState(self, target=None):
+ state = self.currentBigState
+
+ if target is not None:
+ # unicast
+ target.builderChangedState(self.name, state)
+ return
+ for w in self.watchers:
+ try:
+ w.builderChangedState(self.name, state)
+ except:
+ log.msg("Exception caught publishing state to %r" % w)
+ log.err()
+
+ def newBuild(self):
+ """The Builder has decided to start a build, but the Build object is
+ not yet ready to report status (it has not finished creating the
+ Steps). Create a BuildStatus object that it can use."""
+ number = self.nextBuildNumber
+ self.nextBuildNumber += 1
+ # TODO: self.saveYourself(), to make sure we don't forget about the
+ # build number we've just allocated. This is not quite as important
+ # as it was before we switch to determineNextBuildNumber, but I think
+ # it may still be useful to have the new build save itself.
+ s = BuildStatus(self, number)
+ s.waitUntilFinished().addCallback(self._buildFinished)
+ return s
+
+ def addBuildRequest(self, brstatus):
+ self.pendingBuilds.append(brstatus)
+ for w in self.watchers:
+ w.requestSubmitted(brstatus)
+
+ def removeBuildRequest(self, brstatus):
+ self.pendingBuilds.remove(brstatus)
+
+ # buildStarted is called by our child BuildStatus instances
+ def buildStarted(self, s):
+ """Now the BuildStatus object is ready to go (it knows all of its
+ Steps, its ETA, etc), so it is safe to notify our watchers."""
+
+ assert s.builder is self # paranoia
+ assert s.number == self.nextBuildNumber - 1
+ assert s not in self.currentBuilds
+ self.currentBuilds.append(s)
+ self.addBuildToCache(s)
+
+ # now that the BuildStatus is prepared to answer queries, we can
+ # announce the new build to all our watchers
+
+ for w in self.watchers: # TODO: maybe do this later? callLater(0)?
+ try:
+ receiver = w.buildStarted(self.getName(), s)
+ if receiver:
+ if type(receiver) == type(()):
+ s.subscribe(receiver[0], receiver[1])
+ else:
+ s.subscribe(receiver)
+ d = s.waitUntilFinished()
+ d.addCallback(lambda s: s.unsubscribe(receiver))
+ except:
+ log.msg("Exception caught notifying %r of buildStarted event" % w)
+ log.err()
+
+ def _buildFinished(self, s):
+ assert s in self.currentBuilds
+ s.saveYourself()
+ self.currentBuilds.remove(s)
+
+ name = self.getName()
+ results = s.getResults()
+ for w in self.watchers:
+ try:
+ w.buildFinished(name, s, results)
+ except:
+ log.msg("Exception caught notifying %r of buildFinished event" % w)
+ log.err()
+
+ self.prune() # conserve disk
+
+
+ # waterfall display (history)
+
+ # I want some kind of build event that holds everything about the build:
+ # why, what changes went into it, the results of the build, itemized
+ # test results, etc. But, I do kind of need something to be inserted in
+ # the event log first, because intermixing step events and the larger
+ # build event is fraught with peril. Maybe an Event-like-thing that
+ # doesn't have a file in it but does have links. Hmm, that's exactly
+ # what it does now. The only difference would be that this event isn't
+ # pushed to the clients.
+
+ # publish to clients
+ def sendLastBuildStatus(self, client):
+ #client.newLastBuildStatus(self.lastBuildStatus)
+ pass
+ def sendCurrentActivityBigToEveryone(self):
+ for s in self.subscribers:
+ self.sendCurrentActivityBig(s)
+ def sendCurrentActivityBig(self, client):
+ state = self.currentBigState
+ if state == "offline":
+ client.currentlyOffline()
+ elif state == "idle":
+ client.currentlyIdle()
+ elif state == "building":
+ client.currentlyBuilding()
+ else:
+ log.msg("Hey, self.currentBigState is weird:", state)
+
+
+ ## HTML display interface
+
+ def getEventNumbered(self, num):
+ # deal with dropped events, pruned events
+ first = self.events[0].number
+ if first + len(self.events)-1 != self.events[-1].number:
+ log.msg(self,
+ "lost an event somewhere: [0] is %d, [%d] is %d" % \
+ (self.events[0].number,
+ len(self.events) - 1,
+ self.events[-1].number))
+ for e in self.events:
+ log.msg("e[%d]: " % e.number, e)
+ return None
+ offset = num - first
+ log.msg(self, "offset", offset)
+ try:
+ return self.events[offset]
+ except IndexError:
+ return None
+
+ ## Persistence of Status
+ def loadYourOldEvents(self):
+ if hasattr(self, "allEvents"):
+ # first time, nothing to get from file. Note that this is only if
+ # the Application gets .run() . If it gets .save()'ed, then the
+ # .allEvents attribute goes away in the initial __getstate__ and
+ # we try to load a non-existent file.
+ return
+ self.allEvents = self.loadFile("events", [])
+ if self.allEvents:
+ self.nextEventNumber = self.allEvents[-1].number + 1
+ else:
+ self.nextEventNumber = 0
+ def saveYourOldEvents(self):
+ self.saveFile("events", self.allEvents)
+
+ ## clients
+
+ def addClient(self, client):
+ if client not in self.subscribers:
+ self.subscribers.append(client)
+ self.sendLastBuildStatus(client)
+ self.sendCurrentActivityBig(client)
+ client.newEvent(self.currentSmall)
+ def removeClient(self, client):
+ if client in self.subscribers:
+ self.subscribers.remove(client)
+
+class SlaveStatus:
+ implements(interfaces.ISlaveStatus)
+
+ admin = None
+ host = None
+ connected = False
+ graceful_shutdown = False
+
+ def __init__(self, name):
+ self.name = name
+ self._lastMessageReceived = 0
+ self.runningBuilds = []
+ self.graceful_callbacks = []
+
+ def getName(self):
+ return self.name
+ def getAdmin(self):
+ return self.admin
+ def getHost(self):
+ return self.host
+ def isConnected(self):
+ return self.connected
+ def lastMessageReceived(self):
+ return self._lastMessageReceived
+ def getRunningBuilds(self):
+ return self.runningBuilds
+
+ def setAdmin(self, admin):
+ self.admin = admin
+ def setHost(self, host):
+ self.host = host
+ def setConnected(self, isConnected):
+ self.connected = isConnected
+ def setLastMessageReceived(self, when):
+ self._lastMessageReceived = when
+
+ def buildStarted(self, build):
+ self.runningBuilds.append(build)
+ def buildFinished(self, build):
+ self.runningBuilds.remove(build)
+
+ def getGraceful(self):
+ """Return the graceful shutdown flag"""
+ return self.graceful_shutdown
+ def setGraceful(self, graceful):
+ """Set the graceful shutdown flag, and notify all the watchers"""
+ self.graceful_shutdown = graceful
+ for cb in self.graceful_callbacks:
+ reactor.callLater(0, cb, graceful)
+ def addGracefulWatcher(self, watcher):
+ """Add watcher to the list of watchers to be notified when the
+ graceful shutdown flag is changed."""
+ if not watcher in self.graceful_callbacks:
+ self.graceful_callbacks.append(watcher)
+ def removeGracefulWatcher(self, watcher):
+ """Remove watcher from the list of watchers to be notified when the
+ graceful shutdown flag is changed."""
+ if watcher in self.graceful_callbacks:
+ self.graceful_callbacks.remove(watcher)
+
+class Status:
+ """
+ I represent the status of the buildmaster.
+ """
+ implements(interfaces.IStatus)
+
+ def __init__(self, botmaster, basedir):
+ """
+ @type botmaster: L{buildbot.master.BotMaster}
+ @param botmaster: the Status object uses C{.botmaster} to get at
+ both the L{buildbot.master.BuildMaster} (for
+ various buildbot-wide parameters) and the
+ actual Builders (to get at their L{BuilderStatus}
+ objects). It is not allowed to change or influence
+ anything through this reference.
+ @type basedir: string
+ @param basedir: this provides a base directory in which saved status
+ information (changes.pck, saved Build status
+ pickles) can be stored
+ """
+ self.botmaster = botmaster
+ self.basedir = basedir
+ self.watchers = []
+ self.activeBuildSets = []
+ assert os.path.isdir(basedir)
+ # compress logs bigger than 4k, a good default on linux
+ self.logCompressionLimit = 4*1024
+
+
+ # methods called by our clients
+
+ def getProjectName(self):
+ return self.botmaster.parent.projectName
+ def getProjectURL(self):
+ return self.botmaster.parent.projectURL
+ def getBuildbotURL(self):
+ return self.botmaster.parent.buildbotURL
+
+ def getURLForThing(self, thing):
+ prefix = self.getBuildbotURL()
+ if not prefix:
+ return None
+ if interfaces.IStatus.providedBy(thing):
+ return prefix
+ if interfaces.ISchedulerStatus.providedBy(thing):
+ pass
+ if interfaces.IBuilderStatus.providedBy(thing):
+ builder = thing
+ return prefix + "builders/%s" % (
+ urllib.quote(builder.getName(), safe=''),
+ )
+ if interfaces.IBuildStatus.providedBy(thing):
+ build = thing
+ builder = build.getBuilder()
+ return prefix + "builders/%s/builds/%d" % (
+ urllib.quote(builder.getName(), safe=''),
+ build.getNumber())
+ if interfaces.IBuildStepStatus.providedBy(thing):
+ step = thing
+ build = step.getBuild()
+ builder = build.getBuilder()
+ return prefix + "builders/%s/builds/%d/steps/%s" % (
+ urllib.quote(builder.getName(), safe=''),
+ build.getNumber(),
+ urllib.quote(step.getName(), safe=''))
+ # IBuildSetStatus
+ # IBuildRequestStatus
+ # ISlaveStatus
+
+ # IStatusEvent
+ if interfaces.IStatusEvent.providedBy(thing):
+ from buildbot.changes import changes
+ # TODO: this is goofy, create IChange or something
+ if isinstance(thing, changes.Change):
+ change = thing
+ return "%schanges/%d" % (prefix, change.number)
+
+ if interfaces.IStatusLog.providedBy(thing):
+ log = thing
+ step = log.getStep()
+ build = step.getBuild()
+ builder = build.getBuilder()
+
+ logs = step.getLogs()
+ for i in range(len(logs)):
+ if log is logs[i]:
+ lognum = i
+ break
+ else:
+ return None
+ return prefix + "builders/%s/builds/%d/steps/%s/logs/%d" % (
+ urllib.quote(builder.getName(), safe=''),
+ build.getNumber(),
+ urllib.quote(step.getName(), safe=''),
+ lognum)
+
+ def getChangeSources(self):
+ return list(self.botmaster.parent.change_svc)
+
+ def getChange(self, number):
+ return self.botmaster.parent.change_svc.getChangeNumbered(number)
+
+ def getSchedulers(self):
+ return self.botmaster.parent.allSchedulers()
+
+ def getBuilderNames(self, categories=None):
+ if categories == None:
+ return self.botmaster.builderNames[:] # don't let them break it
+
+ l = []
+ # respect addition order
+ for name in self.botmaster.builderNames:
+ builder = self.botmaster.builders[name]
+ if builder.builder_status.category in categories:
+ l.append(name)
+ return l
+
+ def getBuilder(self, name):
+ """
+ @rtype: L{BuilderStatus}
+ """
+ return self.botmaster.builders[name].builder_status
+
+ def getSlaveNames(self):
+ return self.botmaster.slaves.keys()
+
+ def getSlave(self, slavename):
+ return self.botmaster.slaves[slavename].slave_status
+
+ def getBuildSets(self):
+ return self.activeBuildSets[:]
+
+ def generateFinishedBuilds(self, builders=[], branches=[],
+ num_builds=None, finished_before=None,
+ max_search=200):
+
+ def want_builder(bn):
+ if builders:
+ return bn in builders
+ return True
+ builder_names = [bn
+ for bn in self.getBuilderNames()
+ if want_builder(bn)]
+
+ # 'sources' is a list of generators, one for each Builder we're
+ # using. When the generator is exhausted, it is replaced in this list
+ # with None.
+ sources = []
+ for bn in builder_names:
+ b = self.getBuilder(bn)
+ g = b.generateFinishedBuilds(branches,
+ finished_before=finished_before,
+ max_search=max_search)
+ sources.append(g)
+
+ # next_build the next build from each source
+ next_build = [None] * len(sources)
+
+ def refill():
+ for i,g in enumerate(sources):
+ if next_build[i]:
+ # already filled
+ continue
+ if not g:
+ # already exhausted
+ continue
+ try:
+ next_build[i] = g.next()
+ except StopIteration:
+ next_build[i] = None
+ sources[i] = None
+
+ got = 0
+ while True:
+ refill()
+ # find the latest build among all the candidates
+ candidates = [(i, b, b.getTimes()[1])
+ for i,b in enumerate(next_build)
+ if b is not None]
+ candidates.sort(lambda x,y: cmp(x[2], y[2]))
+ if not candidates:
+ return
+
+ # and remove it from the list
+ i, build, finshed_time = candidates[-1]
+ next_build[i] = None
+ got += 1
+ yield build
+ if num_builds is not None:
+ if got >= num_builds:
+ return
+
+ def subscribe(self, target):
+ self.watchers.append(target)
+ for name in self.botmaster.builderNames:
+ self.announceNewBuilder(target, name, self.getBuilder(name))
+ def unsubscribe(self, target):
+ self.watchers.remove(target)
+
+
+ # methods called by upstream objects
+
+ def announceNewBuilder(self, target, name, builder_status):
+ t = target.builderAdded(name, builder_status)
+ if t:
+ builder_status.subscribe(t)
+
+ def builderAdded(self, name, basedir, category=None):
+ """
+ @rtype: L{BuilderStatus}
+ """
+ filename = os.path.join(self.basedir, basedir, "builder")
+ log.msg("trying to load status pickle from %s" % filename)
+ builder_status = None
+ try:
+ builder_status = load(open(filename, "rb"))
+ styles.doUpgrade()
+ except IOError:
+ log.msg("no saved status pickle, creating a new one")
+ except:
+ log.msg("error while loading status pickle, creating a new one")
+ log.msg("error follows:")
+ log.err()
+ if not builder_status:
+ builder_status = BuilderStatus(name, category)
+ builder_status.addPointEvent(["builder", "created"])
+ log.msg("added builder %s in category %s" % (name, category))
+ # an unpickled object might not have category set from before,
+ # so set it here to make sure
+ builder_status.category = category
+ builder_status.basedir = os.path.join(self.basedir, basedir)
+ builder_status.name = name # it might have been updated
+ builder_status.status = self
+
+ if not os.path.isdir(builder_status.basedir):
+ os.makedirs(builder_status.basedir)
+ builder_status.determineNextBuildNumber()
+
+ builder_status.setBigState("offline")
+ builder_status.setLogCompressionLimit(self.logCompressionLimit)
+
+ for t in self.watchers:
+ self.announceNewBuilder(t, name, builder_status)
+
+ return builder_status
+
+ def builderRemoved(self, name):
+ for t in self.watchers:
+ t.builderRemoved(name)
+
+ def prune(self):
+ for b in self.botmaster.builders.values():
+ b.builder_status.prune()
+
+ def buildsetSubmitted(self, bss):
+ self.activeBuildSets.append(bss)
+ bss.waitUntilFinished().addCallback(self.activeBuildSets.remove)
+ for t in self.watchers:
+ t.buildsetSubmitted(bss)
diff --git a/buildbot/buildbot/status/client.py b/buildbot/buildbot/status/client.py
new file mode 100644
index 0000000..0d4611d
--- /dev/null
+++ b/buildbot/buildbot/status/client.py
@@ -0,0 +1,564 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+from twisted.spread import pb
+from twisted.python import components, log as twlog
+from twisted.internet import reactor
+from twisted.application import strports
+from twisted.cred import portal, checkers
+
+from buildbot import interfaces
+from zope.interface import Interface, implements
+from buildbot.status import builder, base
+from buildbot.changes import changes
+
+class IRemote(Interface):
+ pass
+
+def makeRemote(obj):
+ # we want IRemote(None) to be None, but you can't really do that with
+ # adapters, so we fake it
+ if obj is None:
+ return None
+ return IRemote(obj)
+
+
+class RemoteBuildSet(pb.Referenceable):
+ def __init__(self, buildset):
+ self.b = buildset
+
+ def remote_getSourceStamp(self):
+ return self.b.getSourceStamp()
+
+ def remote_getReason(self):
+ return self.b.getReason()
+
+ def remote_getID(self):
+ return self.b.getID()
+
+ def remote_getBuilderNames(self):
+ return self.b.getBuilderNames()
+
+ def remote_getBuildRequests(self):
+ """Returns a list of (builderName, BuildRequest) tuples."""
+ return [(br.getBuilderName(), IRemote(br))
+ for br in self.b.getBuildRequests()]
+
+ def remote_isFinished(self):
+ return self.b.isFinished()
+
+ def remote_waitUntilSuccess(self):
+ d = self.b.waitUntilSuccess()
+ d.addCallback(lambda res: self)
+ return d
+
+ def remote_waitUntilFinished(self):
+ d = self.b.waitUntilFinished()
+ d.addCallback(lambda res: self)
+ return d
+
+ def remote_getResults(self):
+ return self.b.getResults()
+
+components.registerAdapter(RemoteBuildSet,
+ interfaces.IBuildSetStatus, IRemote)
+
+
+class RemoteBuilder(pb.Referenceable):
+ def __init__(self, builder):
+ self.b = builder
+
+ def remote_getName(self):
+ return self.b.getName()
+
+ def remote_getState(self):
+ state, builds = self.b.getState()
+ return (state,
+ None, # TODO: remove leftover ETA
+ [makeRemote(b) for b in builds])
+
+ def remote_getSlaves(self):
+ return [IRemote(s) for s in self.b.getSlaves()]
+
+ def remote_getLastFinishedBuild(self):
+ return makeRemote(self.b.getLastFinishedBuild())
+
+ def remote_getCurrentBuilds(self):
+ return [IRemote(b) for b in self.b.getCurrentBuilds()]
+
+ def remote_getBuild(self, number):
+ return makeRemote(self.b.getBuild(number))
+
+ def remote_getEvent(self, number):
+ return IRemote(self.b.getEvent(number))
+
+components.registerAdapter(RemoteBuilder,
+ interfaces.IBuilderStatus, IRemote)
+
+
+class RemoteBuildRequest(pb.Referenceable):
+ def __init__(self, buildreq):
+ self.b = buildreq
+ self.observers = []
+
+ def remote_getSourceStamp(self):
+ return self.b.getSourceStamp()
+
+ def remote_getBuilderName(self):
+ return self.b.getBuilderName()
+
+ def remote_subscribe(self, observer):
+ """The observer's remote_newbuild method will be called (with two
+ arguments: the RemoteBuild object, and our builderName) for each new
+ Build that is created to handle this BuildRequest."""
+ self.observers.append(observer)
+ def send(bs):
+ d = observer.callRemote("newbuild",
+ IRemote(bs), self.b.getBuilderName())
+ d.addErrback(lambda err: None)
+ reactor.callLater(0, self.b.subscribe, send)
+
+ def remote_unsubscribe(self, observer):
+ # PB (well, at least oldpb) doesn't re-use RemoteReference instances,
+ # so sending the same object across the wire twice will result in two
+ # separate objects that compare as equal ('a is not b' and 'a == b').
+ # That means we can't use a simple 'self.observers.remove(observer)'
+ # here.
+ for o in self.observers:
+ if o == observer:
+ self.observers.remove(o)
+
+components.registerAdapter(RemoteBuildRequest,
+ interfaces.IBuildRequestStatus, IRemote)
+
+class RemoteBuild(pb.Referenceable):
+ def __init__(self, build):
+ self.b = build
+ self.observers = []
+
+ def remote_getBuilderName(self):
+ return self.b.getBuilder().getName()
+
+ def remote_getNumber(self):
+ return self.b.getNumber()
+
+ def remote_getReason(self):
+ return self.b.getReason()
+
+ def remote_getChanges(self):
+ return [IRemote(c) for c in self.b.getChanges()]
+
+ def remote_getResponsibleUsers(self):
+ return self.b.getResponsibleUsers()
+
+ def remote_getSteps(self):
+ return [IRemote(s) for s in self.b.getSteps()]
+
+ def remote_getTimes(self):
+ return self.b.getTimes()
+
+ def remote_isFinished(self):
+ return self.b.isFinished()
+
+ def remote_waitUntilFinished(self):
+ # the Deferred returned by callRemote() will fire when this build is
+ # finished
+ d = self.b.waitUntilFinished()
+ d.addCallback(lambda res: self)
+ return d
+
+ def remote_getETA(self):
+ return self.b.getETA()
+
+ def remote_getCurrentStep(self):
+ return makeRemote(self.b.getCurrentStep())
+
+ def remote_getText(self):
+ return self.b.getText()
+
+ def remote_getResults(self):
+ return self.b.getResults()
+
+ def remote_getLogs(self):
+ logs = {}
+ for name,log in self.b.getLogs().items():
+ logs[name] = IRemote(log)
+ return logs
+
+ def remote_subscribe(self, observer, updateInterval=None):
+ """The observer will have remote_stepStarted(buildername, build,
+ stepname, step), remote_stepFinished(buildername, build, stepname,
+ step, results), and maybe remote_buildETAUpdate(buildername, build,
+ eta)) messages sent to it."""
+ self.observers.append(observer)
+ s = BuildSubscriber(observer)
+ self.b.subscribe(s, updateInterval)
+
+ def remote_unsubscribe(self, observer):
+ # TODO: is the observer automatically unsubscribed when the build
+ # finishes? Or are they responsible for unsubscribing themselves
+ # anyway? How do we avoid a race condition here?
+ for o in self.observers:
+ if o == observer:
+ self.observers.remove(o)
+
+
+components.registerAdapter(RemoteBuild,
+ interfaces.IBuildStatus, IRemote)
+
+class BuildSubscriber:
+ def __init__(self, observer):
+ self.observer = observer
+
+ def buildETAUpdate(self, build, eta):
+ self.observer.callRemote("buildETAUpdate",
+ build.getBuilder().getName(),
+ IRemote(build),
+ eta)
+
+ def stepStarted(self, build, step):
+ self.observer.callRemote("stepStarted",
+ build.getBuilder().getName(),
+ IRemote(build),
+ step.getName(), IRemote(step))
+ return None
+
+ def stepFinished(self, build, step, results):
+ self.observer.callRemote("stepFinished",
+ build.getBuilder().getName(),
+ IRemote(build),
+ step.getName(), IRemote(step),
+ results)
+
+
+class RemoteBuildStep(pb.Referenceable):
+ def __init__(self, step):
+ self.s = step
+
+ def remote_getName(self):
+ return self.s.getName()
+
+ def remote_getBuild(self):
+ return IRemote(self.s.getBuild())
+
+ def remote_getTimes(self):
+ return self.s.getTimes()
+
+ def remote_getExpectations(self):
+ return self.s.getExpectations()
+
+ def remote_getLogs(self):
+ logs = {}
+ for log in self.s.getLogs():
+ logs[log.getName()] = IRemote(log)
+ return logs
+
+ def remote_isFinished(self):
+ return self.s.isFinished()
+
+ def remote_waitUntilFinished(self):
+ return self.s.waitUntilFinished() # returns a Deferred
+
+ def remote_getETA(self):
+ return self.s.getETA()
+
+ def remote_getText(self):
+ return self.s.getText()
+
+ def remote_getResults(self):
+ return self.s.getResults()
+
+components.registerAdapter(RemoteBuildStep,
+ interfaces.IBuildStepStatus, IRemote)
+
+class RemoteSlave:
+ def __init__(self, slave):
+ self.s = slave
+
+ def remote_getName(self):
+ return self.s.getName()
+ def remote_getAdmin(self):
+ return self.s.getAdmin()
+ def remote_getHost(self):
+ return self.s.getHost()
+ def remote_isConnected(self):
+ return self.s.isConnected()
+
+components.registerAdapter(RemoteSlave,
+ interfaces.ISlaveStatus, IRemote)
+
+class RemoteEvent:
+ def __init__(self, event):
+ self.e = event
+
+ def remote_getTimes(self):
+ return self.s.getTimes()
+ def remote_getText(self):
+ return self.s.getText()
+
+components.registerAdapter(RemoteEvent,
+ interfaces.IStatusEvent, IRemote)
+
+class RemoteLog(pb.Referenceable):
+ def __init__(self, log):
+ self.l = log
+
+ def remote_getName(self):
+ return self.l.getName()
+
+ def remote_isFinished(self):
+ return self.l.isFinished()
+ def remote_waitUntilFinished(self):
+ d = self.l.waitUntilFinished()
+ d.addCallback(lambda res: self)
+ return d
+
+ def remote_getText(self):
+ return self.l.getText()
+ def remote_getTextWithHeaders(self):
+ return self.l.getTextWithHeaders()
+ def remote_getChunks(self):
+ return self.l.getChunks()
+ # TODO: subscription interface
+
+components.registerAdapter(RemoteLog, builder.LogFile, IRemote)
+# TODO: something similar for builder.HTMLLogfile ?
+
+class RemoteChange:
+ def __init__(self, change):
+ self.c = change
+
+ def getWho(self):
+ return self.c.who
+ def getFiles(self):
+ return self.c.files
+ def getComments(self):
+ return self.c.comments
+
+components.registerAdapter(RemoteChange, changes.Change, IRemote)
+
+
+class StatusClientPerspective(base.StatusReceiverPerspective):
+
+ subscribed = None
+ client = None
+
+ def __init__(self, status):
+ self.status = status # the IStatus
+ self.subscribed_to_builders = [] # Builders to which we're subscribed
+ self.subscribed_to = [] # everything else we're subscribed to
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ d['client'] = None
+ return d
+
+ def attached(self, mind):
+ #twlog.msg("StatusClientPerspective.attached")
+ return self
+
+ def detached(self, mind):
+ twlog.msg("PB client detached")
+ self.client = None
+ for name in self.subscribed_to_builders:
+ twlog.msg(" unsubscribing from Builder(%s)" % name)
+ self.status.getBuilder(name).unsubscribe(self)
+ for s in self.subscribed_to:
+ twlog.msg(" unsubscribe from %s" % s)
+ s.unsubscribe(self)
+ self.subscribed = None
+
+ def perspective_subscribe(self, mode, interval, target):
+ """The remote client wishes to subscribe to some set of events.
+ 'target' will be sent remote messages when these events happen.
+ 'mode' indicates which events are desired: it is a string with one
+ of the following values:
+
+ 'builders': builderAdded, builderRemoved
+ 'builds': those plus builderChangedState, buildStarted, buildFinished
+ 'steps': all those plus buildETAUpdate, stepStarted, stepFinished
+ 'logs': all those plus stepETAUpdate, logStarted, logFinished
+ 'full': all those plus logChunk (with the log contents)
+
+
+ Messages are defined by buildbot.interfaces.IStatusReceiver .
+ 'interval' is used to specify how frequently ETAUpdate messages
+ should be sent.
+
+ Raising or lowering the subscription level will take effect starting
+ with the next build or step."""
+
+ assert mode in ("builders", "builds", "steps", "logs", "full")
+ assert target
+ twlog.msg("PB subscribe(%s)" % mode)
+
+ self.client = target
+ self.subscribed = mode
+ self.interval = interval
+ self.subscribed_to.append(self.status)
+ # wait a moment before subscribing, so the new-builder messages
+ # won't appear before this remote method finishes
+ reactor.callLater(0, self.status.subscribe, self)
+ return None
+
+ def perspective_unsubscribe(self):
+ twlog.msg("PB unsubscribe")
+ self.status.unsubscribe(self)
+ self.subscribed_to.remove(self.status)
+ self.client = None
+
+ def perspective_getBuildSets(self):
+ """This returns tuples of (buildset, bsid), because that is much more
+ convenient for tryclient."""
+ return [(IRemote(s), s.getID()) for s in self.status.getBuildSets()]
+
+ def perspective_getBuilderNames(self):
+ return self.status.getBuilderNames()
+
+ def perspective_getBuilder(self, name):
+ b = self.status.getBuilder(name)
+ return IRemote(b)
+
+ def perspective_getSlave(self, name):
+ s = self.status.getSlave(name)
+ return IRemote(s)
+
+ def perspective_ping(self):
+ """Ping method to allow pb clients to validate their connections."""
+ return "pong"
+
+ # IStatusReceiver methods, invoked if we've subscribed
+
+ # mode >= builder
+ def builderAdded(self, name, builder):
+ self.client.callRemote("builderAdded", name, IRemote(builder))
+ if self.subscribed in ("builds", "steps", "logs", "full"):
+ self.subscribed_to_builders.append(name)
+ return self
+ return None
+
+ def builderChangedState(self, name, state):
+ self.client.callRemote("builderChangedState", name, state, None)
+ # TODO: remove leftover ETA argument
+
+ def builderRemoved(self, name):
+ if name in self.subscribed_to_builders:
+ self.subscribed_to_builders.remove(name)
+ self.client.callRemote("builderRemoved", name)
+
+ def buildsetSubmitted(self, buildset):
+ # TODO: deliver to client, somehow
+ pass
+
+ # mode >= builds
+ def buildStarted(self, name, build):
+ self.client.callRemote("buildStarted", name, IRemote(build))
+ if self.subscribed in ("steps", "logs", "full"):
+ self.subscribed_to.append(build)
+ return (self, self.interval)
+ return None
+
+ def buildFinished(self, name, build, results):
+ if build in self.subscribed_to:
+ # we might have joined during the build
+ self.subscribed_to.remove(build)
+ self.client.callRemote("buildFinished",
+ name, IRemote(build), results)
+
+ # mode >= steps
+ def buildETAUpdate(self, build, eta):
+ self.client.callRemote("buildETAUpdate",
+ build.getBuilder().getName(), IRemote(build),
+ eta)
+
+ def stepStarted(self, build, step):
+ # we add some information here so the client doesn't have to do an
+ # extra round-trip
+ self.client.callRemote("stepStarted",
+ build.getBuilder().getName(), IRemote(build),
+ step.getName(), IRemote(step))
+ if self.subscribed in ("logs", "full"):
+ self.subscribed_to.append(step)
+ return (self, self.interval)
+ return None
+
+ def stepFinished(self, build, step, results):
+ self.client.callRemote("stepFinished",
+ build.getBuilder().getName(), IRemote(build),
+ step.getName(), IRemote(step),
+ results)
+ if step in self.subscribed_to:
+ # eventually (through some new subscription method) we could
+ # join in the middle of the step
+ self.subscribed_to.remove(step)
+
+ # mode >= logs
+ def stepETAUpdate(self, build, step, ETA, expectations):
+ self.client.callRemote("stepETAUpdate",
+ build.getBuilder().getName(), IRemote(build),
+ step.getName(), IRemote(step),
+ ETA, expectations)
+
+ def logStarted(self, build, step, log):
+ # TODO: make the HTMLLog adapter
+ rlog = IRemote(log, None)
+ if not rlog:
+ print "hey, couldn't adapt %s to IRemote" % log
+ self.client.callRemote("logStarted",
+ build.getBuilder().getName(), IRemote(build),
+ step.getName(), IRemote(step),
+ log.getName(), IRemote(log, None))
+ if self.subscribed in ("full",):
+ self.subscribed_to.append(log)
+ return self
+ return None
+
+ def logFinished(self, build, step, log):
+ self.client.callRemote("logFinished",
+ build.getBuilder().getName(), IRemote(build),
+ step.getName(), IRemote(step),
+ log.getName(), IRemote(log, None))
+ if log in self.subscribed_to:
+ self.subscribed_to.remove(log)
+
+ # mode >= full
+ def logChunk(self, build, step, log, channel, text):
+ self.client.callRemote("logChunk",
+ build.getBuilder().getName(), IRemote(build),
+ step.getName(), IRemote(step),
+ log.getName(), IRemote(log),
+ channel, text)
+
+
+class PBListener(base.StatusReceiverMultiService):
+ """I am a listener for PB-based status clients."""
+
+ compare_attrs = ["port", "cred"]
+ implements(portal.IRealm)
+
+ def __init__(self, port, user="statusClient", passwd="clientpw"):
+ base.StatusReceiverMultiService.__init__(self)
+ if type(port) is int:
+ port = "tcp:%d" % port
+ self.port = port
+ self.cred = (user, passwd)
+ p = portal.Portal(self)
+ c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
+ c.addUser(user, passwd)
+ p.registerChecker(c)
+ f = pb.PBServerFactory(p)
+ s = strports.service(port, f)
+ s.setServiceParent(self)
+
+ def setServiceParent(self, parent):
+ base.StatusReceiverMultiService.setServiceParent(self, parent)
+ self.setup()
+
+ def setup(self):
+ self.status = self.parent.getStatus()
+
+ def requestAvatar(self, avatarID, mind, interface):
+ assert interface == pb.IPerspective
+ p = StatusClientPerspective(self.status)
+ p.attached(mind) # perhaps .callLater(0) ?
+ return (pb.IPerspective, p,
+ lambda p=p,mind=mind: p.detached(mind))
diff --git a/buildbot/buildbot/status/html.py b/buildbot/buildbot/status/html.py
new file mode 100644
index 0000000..cc36a4a
--- /dev/null
+++ b/buildbot/buildbot/status/html.py
@@ -0,0 +1,6 @@
+
+# compatibility wrapper. This is currently the preferred place for master.cfg
+# to import from.
+
+from buildbot.status.web.baseweb import Waterfall, WebStatus
+_hush_pyflakes = [Waterfall, WebStatus]
diff --git a/buildbot/buildbot/status/mail.py b/buildbot/buildbot/status/mail.py
new file mode 100644
index 0000000..e32cfa9
--- /dev/null
+++ b/buildbot/buildbot/status/mail.py
@@ -0,0 +1,524 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+# the email.MIMEMultipart module is only available in python-2.2.2 and later
+import re
+
+from email.Message import Message
+from email.Utils import formatdate
+from email.MIMEText import MIMEText
+try:
+ from email.MIMEMultipart import MIMEMultipart
+ canDoAttachments = True
+except ImportError:
+ canDoAttachments = False
+import urllib
+
+from zope.interface import implements
+from twisted.internet import defer
+from twisted.mail.smtp import sendmail
+from twisted.python import log as twlog
+
+from buildbot import interfaces, util
+from buildbot.status import base
+from buildbot.status.builder import FAILURE, SUCCESS, WARNINGS, Results
+
+VALID_EMAIL = re.compile("[a-zA-Z0-9\.\_\%\-\+]+@[a-zA-Z0-9\.\_\%\-]+.[a-zA-Z]{2,6}")
+
+def message(attrs):
+ """Generate a buildbot mail message and return a tuple of message text
+ and type.
+
+ This function can be replaced using the customMesg variable in MailNotifier.
+ A message function will *always* get a dictionary of attributes with
+ the following values:
+
+ builderName - (str) Name of the builder that generated this event.
+
+ projectName - (str) Name of the project.
+
+ mode - (str) Mode set in MailNotifier. (failing, passing, problem).
+
+ result - (str) Builder result as a string. 'success', 'warnings',
+ 'failure', 'skipped', or 'exception'
+
+ buildURL - (str) URL to build page.
+
+ buildbotURL - (str) URL to buildbot main page.
+
+ buildText - (str) Build text from build.getText().
+
+ slavename - (str) Slavename.
+
+ reason - (str) Build reason from build.getReason().
+
+ responsibleUsers - (List of str) List of responsible users.
+
+ branch - (str) Name of branch used. If no SourceStamp exists branch
+ is an empty string.
+
+ revision - (str) Name of revision used. If no SourceStamp exists revision
+ is an empty string.
+
+ patch - (str) Name of patch used. If no SourceStamp exists patch
+ is an empty string.
+
+ changes - (list of objs) List of change objects from SourceStamp. A change
+ object has the following useful information:
+
+ who - who made this change
+ revision - what VC revision is this change
+ branch - on what branch did this change occur
+ when - when did this change occur
+ files - what files were affected in this change
+ comments - comments reguarding the change.
+
+ The functions asText and asHTML return a list of strings with
+ the above information formatted.
+
+ logs - (List of Tuples) List of tuples that contain the log name, log url
+ and log contents as a list of strings.
+ """
+ text = ""
+ if attrs['mode'] == "all":
+ text += "The Buildbot has finished a build"
+ elif attrs['mode'] == "failing":
+ text += "The Buildbot has detected a failed build"
+ elif attrs['mode'] == "passing":
+ text += "The Buildbot has detected a passing build"
+ else:
+ text += "The Buildbot has detected a new failure"
+ text += " of %s on %s.\n" % (attrs['builderName'], attrs['projectName'])
+ if attrs['buildURL']:
+ text += "Full details are available at:\n %s\n" % attrs['buildURL']
+ text += "\n"
+
+ if attrs['buildbotURL']:
+ text += "Buildbot URL: %s\n\n" % urllib.quote(attrs['buildbotURL'], '/:')
+
+ text += "Buildslave for this Build: %s\n\n" % attrs['slavename']
+ text += "Build Reason: %s\n" % attrs['reason']
+
+ #
+ # No source stamp
+ #
+ if attrs['branch']:
+ source = "unavailable"
+ else:
+ source = ""
+ if attrs['branch']:
+ source += "[branch %s] " % attrs['branch']
+ if attrs['revision']:
+ source += attrs['revision']
+ else:
+ source += "HEAD"
+ if attrs['patch']:
+ source += " (plus patch)"
+ text += "Build Source Stamp: %s\n" % source
+
+ text += "Blamelist: %s\n" % ",".join(attrs['responsibleUsers'])
+
+ text += "\n"
+
+ t = attrs['buildText']
+ if t:
+ t = ": " + " ".join(t)
+ else:
+ t = ""
+
+ if attrs['result'] == 'success':
+ text += "Build succeeded!\n"
+ elif attrs['result'] == 'warnings':
+ text += "Build Had Warnings%s\n" % t
+ else:
+ text += "BUILD FAILED%s\n" % t
+
+ text += "\n"
+ text += "sincerely,\n"
+ text += " -The Buildbot\n"
+ text += "\n"
+ return (text, 'plain')
+
+class Domain(util.ComparableMixin):
+ implements(interfaces.IEmailLookup)
+ compare_attrs = ["domain"]
+
+ def __init__(self, domain):
+ assert "@" not in domain
+ self.domain = domain
+
+ def getAddress(self, name):
+ """If name is already an email address, pass it through."""
+ if '@' in name:
+ return name
+ return name + "@" + self.domain
+
+
+class MailNotifier(base.StatusReceiverMultiService):
+ """This is a status notifier which sends email to a list of recipients
+ upon the completion of each build. It can be configured to only send out
+ mail for certain builds, and only send messages when the build fails, or
+ when it transitions from success to failure. It can also be configured to
+ include various build logs in each message.
+
+ By default, the message will be sent to the Interested Users list, which
+ includes all developers who made changes in the build. You can add
+ additional recipients with the extraRecipients argument.
+
+ To get a simple one-message-per-build (say, for a mailing list), use
+ sendToInterestedUsers=False, extraRecipients=['listaddr@example.org']
+
+ Each MailNotifier sends mail to a single set of recipients. To send
+ different kinds of mail to different recipients, use multiple
+ MailNotifiers.
+ """
+
+ implements(interfaces.IEmailSender)
+
+ compare_attrs = ["extraRecipients", "lookup", "fromaddr", "mode",
+ "categories", "builders", "addLogs", "relayhost",
+ "subject", "sendToInterestedUsers", "customMesg"]
+
+ def __init__(self, fromaddr, mode="all", categories=None, builders=None,
+ addLogs=False, relayhost="localhost",
+ subject="buildbot %(result)s in %(projectName)s on %(builder)s",
+ lookup=None, extraRecipients=[],
+ sendToInterestedUsers=True, customMesg=message):
+ """
+ @type fromaddr: string
+ @param fromaddr: the email address to be used in the 'From' header.
+ @type sendToInterestedUsers: boolean
+ @param sendToInterestedUsers: if True (the default), send mail to all
+ of the Interested Users. If False, only
+ send mail to the extraRecipients list.
+
+ @type extraRecipients: tuple of string
+ @param extraRecipients: a list of email addresses to which messages
+ should be sent (in addition to the
+ InterestedUsers list, which includes any
+ developers who made Changes that went into this
+ build). It is a good idea to create a small
+ mailing list and deliver to that, then let
+ subscribers come and go as they please.
+
+ @type subject: string
+ @param subject: a string to be used as the subject line of the message.
+ %(builder)s will be replaced with the name of the
+ builder which provoked the message.
+
+ @type mode: string (defaults to all)
+ @param mode: one of:
+ - 'all': send mail about all builds, passing and failing
+ - 'failing': only send mail about builds which fail
+ - 'passing': only send mail about builds which succeed
+ - 'problem': only send mail about a build which failed
+ when the previous build passed
+
+ @type builders: list of strings
+ @param builders: a list of builder names for which mail should be
+ sent. Defaults to None (send mail for all builds).
+ Use either builders or categories, but not both.
+
+ @type categories: list of strings
+ @param categories: a list of category names to serve status
+ information for. Defaults to None (all
+ categories). Use either builders or categories,
+ but not both.
+
+ @type addLogs: boolean.
+ @param addLogs: if True, include all build logs as attachments to the
+ messages. These can be quite large. This can also be
+ set to a list of log names, to send a subset of the
+ logs. Defaults to False.
+
+ @type relayhost: string
+ @param relayhost: the host to which the outbound SMTP connection
+ should be made. Defaults to 'localhost'
+
+ @type lookup: implementor of {IEmailLookup}
+ @param lookup: object which provides IEmailLookup, which is
+ responsible for mapping User names (which come from
+ the VC system) into valid email addresses. If not
+ provided, the notifier will only be able to send mail
+ to the addresses in the extraRecipients list. Most of
+ the time you can use a simple Domain instance. As a
+ shortcut, you can pass as string: this will be
+ treated as if you had provided Domain(str). For
+ example, lookup='twistedmatrix.com' will allow mail
+ to be sent to all developers whose SVN usernames
+ match their twistedmatrix.com account names.
+
+ @type customMesg: func
+ @param customMesg: A function that returns a tuple containing the text of
+ a custom message and its type. This function takes
+ the dict attrs which has the following values:
+
+ builderName - (str) Name of the builder that generated this event.
+
+ projectName - (str) Name of the project.
+
+ mode - (str) Mode set in MailNotifier. (failing, passing, problem).
+
+ result - (str) Builder result as a string. 'success', 'warnings',
+ 'failure', 'skipped', or 'exception'
+
+ buildURL - (str) URL to build page.
+
+ buildbotURL - (str) URL to buildbot main page.
+
+ buildText - (str) Build text from build.getText().
+
+ slavename - (str) Slavename.
+
+ reason - (str) Build reason from build.getReason().
+
+ responsibleUsers - (List of str) List of responsible users.
+
+ branch - (str) Name of branch used. If no SourceStamp exists branch
+ is an empty string.
+
+ revision - (str) Name of revision used. If no SourceStamp exists revision
+ is an empty string.
+
+ patch - (str) Name of patch used. If no SourceStamp exists patch
+ is an empty string.
+
+ changes - (list of objs) List of change objects from SourceStamp. A change
+ object has the following useful information:
+
+ who - who made this change
+ revision - what VC revision is this change
+ branch - on what branch did this change occur
+ when - when did this change occur
+ files - what files were affected in this change
+ comments - comments reguarding the change.
+
+ The functions asText and asHTML return a list of strings with
+ the above information formatted.
+
+ logs - (List of Tuples) List of tuples that contain the log name, log url,
+ and log contents as a list of strings.
+
+ """
+
+ base.StatusReceiverMultiService.__init__(self)
+ assert isinstance(extraRecipients, (list, tuple))
+ for r in extraRecipients:
+ assert isinstance(r, str)
+ assert VALID_EMAIL.search(r) # require full email addresses, not User names
+ self.extraRecipients = extraRecipients
+ self.sendToInterestedUsers = sendToInterestedUsers
+ self.fromaddr = fromaddr
+ assert mode in ('all', 'failing', 'problem')
+ self.mode = mode
+ self.categories = categories
+ self.builders = builders
+ self.addLogs = addLogs
+ self.relayhost = relayhost
+ self.subject = subject
+ if lookup is not None:
+ if type(lookup) is str:
+ lookup = Domain(lookup)
+ assert interfaces.IEmailLookup.providedBy(lookup)
+ self.lookup = lookup
+ self.customMesg = customMesg
+ self.watched = []
+ self.status = None
+
+ # you should either limit on builders or categories, not both
+ if self.builders != None and self.categories != None:
+ twlog.err("Please specify only builders to ignore or categories to include")
+ raise # FIXME: the asserts above do not raise some Exception either
+
+ def setServiceParent(self, parent):
+ """
+ @type parent: L{buildbot.master.BuildMaster}
+ """
+ base.StatusReceiverMultiService.setServiceParent(self, parent)
+ self.setup()
+
+ def setup(self):
+ self.status = self.parent.getStatus()
+ self.status.subscribe(self)
+
+ def disownServiceParent(self):
+ self.status.unsubscribe(self)
+ for w in self.watched:
+ w.unsubscribe(self)
+ return base.StatusReceiverMultiService.disownServiceParent(self)
+
+ def builderAdded(self, name, builder):
+ # only subscribe to builders we are interested in
+ if self.categories != None and builder.category not in self.categories:
+ return None
+
+ self.watched.append(builder)
+ return self # subscribe to this builder
+
+ def builderRemoved(self, name):
+ pass
+
+ def builderChangedState(self, name, state):
+ pass
+ def buildStarted(self, name, build):
+ pass
+ def buildFinished(self, name, build, results):
+ # here is where we actually do something.
+ builder = build.getBuilder()
+ if self.builders is not None and name not in self.builders:
+ return # ignore this build
+ if self.categories is not None and \
+ builder.category not in self.categories:
+ return # ignore this build
+
+ if self.mode == "failing" and results != FAILURE:
+ return
+ if self.mode == "passing" and results != SUCCESS:
+ return
+ if self.mode == "problem":
+ if results != FAILURE:
+ return
+ prev = build.getPreviousBuild()
+ if prev and prev.getResults() == FAILURE:
+ return
+ # for testing purposes, buildMessage returns a Deferred that fires
+ # when the mail has been sent. To help unit tests, we return that
+ # Deferred here even though the normal IStatusReceiver.buildFinished
+ # signature doesn't do anything with it. If that changes (if
+ # .buildFinished's return value becomes significant), we need to
+ # rearrange this.
+ return self.buildMessage(name, build, results)
+
+ def buildMessage(self, name, build, results):
+ #
+ # logs is a list of tuples that contain the log
+ # name, log url, and the log contents as a list of strings.
+ #
+ logs = list()
+ for log in build.getLogs():
+ stepName = log.getStep().getName()
+ logName = log.getName()
+ logs.append(('%s.%s' % (stepName, logName),
+ '%s/steps/%s/logs/%s' % (self.status.getURLForThing(build), stepName, logName),
+ log.getText().splitlines()))
+
+ attrs = {'builderName': name,
+ 'projectName': self.status.getProjectName(),
+ 'mode': self.mode,
+ 'result': Results[results],
+ 'buildURL': self.status.getURLForThing(build),
+ 'buildbotURL': self.status.getBuildbotURL(),
+ 'buildText': build.getText(),
+ 'slavename': build.getSlavename(),
+ 'reason': build.getReason(),
+ 'responsibleUsers': build.getResponsibleUsers(),
+ 'branch': "",
+ 'revision': "",
+ 'patch': "",
+ 'changes': [],
+ 'logs': logs}
+
+ ss = build.getSourceStamp()
+ if ss:
+ attrs['branch'] = ss.branch
+ attrs['revision'] = ss.revision
+ attrs['patch'] = ss.patch
+ attrs['changes'] = ss.changes[:]
+
+ text, type = self.customMesg(attrs)
+ assert type in ('plain', 'html'), "'%s' message type must be 'plain' or 'html'." % type
+
+ haveAttachments = False
+ if attrs['patch'] or self.addLogs:
+ haveAttachments = True
+ if not canDoAttachments:
+ twlog.msg("warning: I want to send mail with attachments, "
+ "but this python is too old to have "
+ "email.MIMEMultipart . Please upgrade to python-2.3 "
+ "or newer to enable addLogs=True")
+
+ if haveAttachments and canDoAttachments:
+ m = MIMEMultipart()
+ m.attach(MIMEText(text, type))
+ else:
+ m = Message()
+ m.set_payload(text)
+ m.set_type("text/%s" % type)
+
+ m['Date'] = formatdate(localtime=True)
+ m['Subject'] = self.subject % { 'result': attrs['result'],
+ 'projectName': attrs['projectName'],
+ 'builder': attrs['builderName'],
+ }
+ m['From'] = self.fromaddr
+ # m['To'] is added later
+
+ if attrs['patch']:
+ a = MIMEText(attrs['patch'][1])
+ a.add_header('Content-Disposition', "attachment",
+ filename="source patch")
+ m.attach(a)
+ if self.addLogs:
+ for log in build.getLogs():
+ name = "%s.%s" % (log.getStep().getName(),
+ log.getName())
+ if self._shouldAttachLog(log.getName()) or self._shouldAttachLog(name):
+ a = MIMEText(log.getText())
+ a.add_header('Content-Disposition', "attachment",
+ filename=name)
+ m.attach(a)
+
+ # now, who is this message going to?
+ dl = []
+ recipients = []
+ if self.sendToInterestedUsers and self.lookup:
+ for u in build.getInterestedUsers():
+ d = defer.maybeDeferred(self.lookup.getAddress, u)
+ d.addCallback(recipients.append)
+ dl.append(d)
+ d = defer.DeferredList(dl)
+ d.addCallback(self._gotRecipients, recipients, m)
+ return d
+
+ def _shouldAttachLog(self, logname):
+ if type(self.addLogs) is bool:
+ return self.addLogs
+ return logname in self.addLogs
+
+ def _gotRecipients(self, res, rlist, m):
+ recipients = set()
+
+ for r in rlist:
+ if r is None: # getAddress didn't like this address
+ continue
+
+ # Git can give emails like 'User' <user@foo.com>@foo.com so check
+ # for two @ and chop the last
+ if r.count('@') > 1:
+ r = r[:r.rindex('@')]
+
+ if VALID_EMAIL.search(r):
+ recipients.add(r)
+ else:
+ twlog.msg("INVALID EMAIL: %r" + r)
+
+ # if we're sending to interested users move the extra's to the CC
+ # list so they can tell if they are also interested in the change
+ # unless there are no interested users
+ if self.sendToInterestedUsers and len(recipients):
+ m['CC'] = ", ".join(sorted(self.extraRecipients[:]))
+ else:
+ [recipients.add(r) for r in self.extraRecipients[:]]
+
+ m['To'] = ", ".join(sorted(recipients))
+
+ # The extras weren't part of the TO list so add them now
+ if self.sendToInterestedUsers:
+ for r in self.extraRecipients:
+ recipients.add(r)
+
+ return self.sendMessage(m, list(recipients))
+
+ def sendMessage(self, m, recipients):
+ s = m.as_string()
+ twlog.msg("sending mail (%d bytes) to" % len(s), recipients)
+ return sendmail(self.relayhost, self.fromaddr, recipients, s)
diff --git a/buildbot/buildbot/status/progress.py b/buildbot/buildbot/status/progress.py
new file mode 100644
index 0000000..dc4d3d5
--- /dev/null
+++ b/buildbot/buildbot/status/progress.py
@@ -0,0 +1,308 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+from twisted.internet import reactor
+from twisted.spread import pb
+from twisted.python import log
+from buildbot import util
+
+class StepProgress:
+ """I keep track of how much progress a single BuildStep has made.
+
+ Progress is measured along various axes. Time consumed is one that is
+ available for all steps. Amount of command output is another, and may be
+ better quantified by scanning the output for markers to derive number of
+ files compiled, directories walked, tests run, etc.
+
+ I am created when the build begins, and given to a BuildProgress object
+ so it can track the overall progress of the whole build.
+
+ """
+
+ startTime = None
+ stopTime = None
+ expectedTime = None
+ buildProgress = None
+ debug = False
+
+ def __init__(self, name, metricNames):
+ self.name = name
+ self.progress = {}
+ self.expectations = {}
+ for m in metricNames:
+ self.progress[m] = None
+ self.expectations[m] = None
+
+ def setBuildProgress(self, bp):
+ self.buildProgress = bp
+
+ def setExpectations(self, metrics):
+ """The step can call this to explicitly set a target value for one
+ of its metrics. E.g., ShellCommands knows how many commands it will
+ execute, so it could set the 'commands' expectation."""
+ for metric, value in metrics.items():
+ self.expectations[metric] = value
+ self.buildProgress.newExpectations()
+
+ def setExpectedTime(self, seconds):
+ self.expectedTime = seconds
+ self.buildProgress.newExpectations()
+
+ def start(self):
+ if self.debug: print "StepProgress.start[%s]" % self.name
+ self.startTime = util.now()
+
+ def setProgress(self, metric, value):
+ """The step calls this as progress is made along various axes."""
+ if self.debug:
+ print "setProgress[%s][%s] = %s" % (self.name, metric, value)
+ self.progress[metric] = value
+ if self.debug:
+ r = self.remaining()
+ print " step remaining:", r
+ self.buildProgress.newProgress()
+
+ def finish(self):
+ """This stops the 'time' metric and marks the step as finished
+ overall. It should be called after the last .setProgress has been
+ done for each axis."""
+ if self.debug: print "StepProgress.finish[%s]" % self.name
+ self.stopTime = util.now()
+ self.buildProgress.stepFinished(self.name)
+
+ def totalTime(self):
+ if self.startTime != None and self.stopTime != None:
+ return self.stopTime - self.startTime
+
+ def remaining(self):
+ if self.startTime == None:
+ return self.expectedTime
+ if self.stopTime != None:
+ return 0 # already finished
+ # TODO: replace this with cleverness that graphs each metric vs.
+ # time, then finds the inverse function. Will probably need to save
+ # a timestamp with each setProgress update, when finished, go back
+ # and find the 2% transition points, then save those 50 values in a
+ # list. On the next build, do linear interpolation between the two
+ # closest samples to come up with a percentage represented by that
+ # metric.
+
+ # TODO: If no other metrics are available, just go with elapsed
+ # time. Given the non-time-uniformity of text output from most
+ # steps, this would probably be better than the text-percentage
+ # scheme currently implemented.
+
+ percentages = []
+ for metric, value in self.progress.items():
+ expectation = self.expectations[metric]
+ if value != None and expectation != None:
+ p = 1.0 * value / expectation
+ percentages.append(p)
+ if percentages:
+ avg = reduce(lambda x,y: x+y, percentages) / len(percentages)
+ if avg > 1.0:
+ # overdue
+ avg = 1.0
+ if avg < 0.0:
+ avg = 0.0
+ if percentages and self.expectedTime != None:
+ return self.expectedTime - (avg * self.expectedTime)
+ if self.expectedTime is not None:
+ # fall back to pure time
+ return self.expectedTime - (util.now() - self.startTime)
+ return None # no idea
+
+
+class WatcherState:
+ def __init__(self, interval):
+ self.interval = interval
+ self.timer = None
+ self.needUpdate = 0
+
+class BuildProgress(pb.Referenceable):
+ """I keep track of overall build progress. I hold a list of StepProgress
+ objects.
+ """
+
+ def __init__(self, stepProgresses):
+ self.steps = {}
+ for s in stepProgresses:
+ self.steps[s.name] = s
+ s.setBuildProgress(self)
+ self.finishedSteps = []
+ self.watchers = {}
+ self.debug = 0
+
+ def setExpectationsFrom(self, exp):
+ """Set our expectations from the builder's Expectations object."""
+ for name, metrics in exp.steps.items():
+ s = self.steps[name]
+ s.setExpectedTime(exp.times[name])
+ s.setExpectations(exp.steps[name])
+
+ def newExpectations(self):
+ """Call this when one of the steps has changed its expectations.
+ This should trigger us to update our ETA value and notify any
+ subscribers."""
+ pass # subscribers are not implemented: they just poll
+
+ def stepFinished(self, stepname):
+ assert(stepname not in self.finishedSteps)
+ self.finishedSteps.append(stepname)
+ if len(self.finishedSteps) == len(self.steps.keys()):
+ self.sendLastUpdates()
+
+ def newProgress(self):
+ r = self.remaining()
+ if self.debug:
+ print " remaining:", r
+ if r != None:
+ self.sendAllUpdates()
+
+ def remaining(self):
+ # sum eta of all steps
+ sum = 0
+ for name, step in self.steps.items():
+ rem = step.remaining()
+ if rem == None:
+ return None # not sure
+ sum += rem
+ return sum
+ def eta(self):
+ left = self.remaining()
+ if left == None:
+ return None # not sure
+ done = util.now() + left
+ return done
+
+
+ def remote_subscribe(self, remote, interval=5):
+ # [interval, timer, needUpdate]
+ # don't send an update more than once per interval
+ self.watchers[remote] = WatcherState(interval)
+ remote.notifyOnDisconnect(self.removeWatcher)
+ self.updateWatcher(remote)
+ self.startTimer(remote)
+ log.msg("BuildProgress.remote_subscribe(%s)" % remote)
+ def remote_unsubscribe(self, remote):
+ # TODO: this doesn't work. I think 'remote' will always be different
+ # than the object that appeared in _subscribe.
+ log.msg("BuildProgress.remote_unsubscribe(%s)" % remote)
+ self.removeWatcher(remote)
+ #remote.dontNotifyOnDisconnect(self.removeWatcher)
+ def removeWatcher(self, remote):
+ #log.msg("removeWatcher(%s)" % remote)
+ try:
+ timer = self.watchers[remote].timer
+ if timer:
+ timer.cancel()
+ del self.watchers[remote]
+ except KeyError:
+ log.msg("Weird, removeWatcher on non-existent subscriber:",
+ remote)
+ def sendAllUpdates(self):
+ for r in self.watchers.keys():
+ self.updateWatcher(r)
+ def updateWatcher(self, remote):
+ # an update wants to go to this watcher. Send it if we can, otherwise
+ # queue it for later
+ w = self.watchers[remote]
+ if not w.timer:
+ # no timer, so send update now and start the timer
+ self.sendUpdate(remote)
+ self.startTimer(remote)
+ else:
+ # timer is running, just mark as needing an update
+ w.needUpdate = 1
+ def startTimer(self, remote):
+ w = self.watchers[remote]
+ timer = reactor.callLater(w.interval, self.watcherTimeout, remote)
+ w.timer = timer
+ def sendUpdate(self, remote, last=0):
+ self.watchers[remote].needUpdate = 0
+ #text = self.asText() # TODO: not text, duh
+ try:
+ remote.callRemote("progress", self.remaining())
+ if last:
+ remote.callRemote("finished", self)
+ except:
+ log.deferr()
+ self.removeWatcher(remote)
+
+ def watcherTimeout(self, remote):
+ w = self.watchers.get(remote, None)
+ if not w:
+ return # went away
+ w.timer = None
+ if w.needUpdate:
+ self.sendUpdate(remote)
+ self.startTimer(remote)
+ def sendLastUpdates(self):
+ for remote in self.watchers.keys():
+ self.sendUpdate(remote, 1)
+ self.removeWatcher(remote)
+
+
+class Expectations:
+ debug = False
+ # decay=1.0 ignores all but the last build
+ # 0.9 is short time constant. 0.1 is very long time constant
+ # TODO: let decay be specified per-metric
+ decay = 0.5
+
+ def __init__(self, buildprogress):
+ """Create us from a successful build. We will expect each step to
+ take as long as it did in that build."""
+
+ # .steps maps stepname to dict2
+ # dict2 maps metricname to final end-of-step value
+ self.steps = {}
+
+ # .times maps stepname to per-step elapsed time
+ self.times = {}
+
+ for name, step in buildprogress.steps.items():
+ self.steps[name] = {}
+ for metric, value in step.progress.items():
+ self.steps[name][metric] = value
+ self.times[name] = None
+ if step.startTime is not None and step.stopTime is not None:
+ self.times[name] = step.stopTime - step.startTime
+
+ def wavg(self, old, current):
+ if old is None:
+ return current
+ if current is None:
+ return old
+ else:
+ return (current * self.decay) + (old * (1 - self.decay))
+
+ def update(self, buildprogress):
+ for name, stepprogress in buildprogress.steps.items():
+ old = self.times[name]
+ current = stepprogress.totalTime()
+ if current == None:
+ log.msg("Expectations.update: current[%s] was None!" % name)
+ continue
+ new = self.wavg(old, current)
+ self.times[name] = new
+ if self.debug:
+ print "new expected time[%s] = %s, old %s, cur %s" % \
+ (name, new, old, current)
+
+ for metric, current in stepprogress.progress.items():
+ old = self.steps[name][metric]
+ new = self.wavg(old, current)
+ if self.debug:
+ print "new expectation[%s][%s] = %s, old %s, cur %s" % \
+ (name, metric, new, old, current)
+ self.steps[name][metric] = new
+
+ def expectedBuildTime(self):
+ if None in self.times.values():
+ return None
+ #return sum(self.times.values())
+ # python-2.2 doesn't have 'sum'. TODO: drop python-2.2 support
+ s = 0
+ for v in self.times.values():
+ s += v
+ return s
diff --git a/buildbot/buildbot/status/tests.py b/buildbot/buildbot/status/tests.py
new file mode 100644
index 0000000..4c4c894
--- /dev/null
+++ b/buildbot/buildbot/status/tests.py
@@ -0,0 +1,73 @@
+
+from twisted.web import resource
+from twisted.web.error import NoResource
+
+# these are our test result types. Steps are responsible for mapping results
+# into these values.
+SKIP, EXPECTED_FAILURE, FAILURE, ERROR, UNEXPECTED_SUCCESS, SUCCESS = \
+ "skip", "expected failure", "failure", "error", "unexpected success", \
+ "success"
+UNKNOWN = "unknown" # catch-all
+
+
+class OneTest(resource.Resource):
+ isLeaf = 1
+ def __init__(self, parent, testName, results):
+ self.parent = parent
+ self.testName = testName
+ self.resultType, self.results = results
+
+ def render(self, request):
+ request.setHeader("content-type", "text/html")
+ if request.method == "HEAD":
+ request.setHeader("content-length", len(self.html(request)))
+ return ''
+ return self.html(request)
+
+ def html(self, request):
+ # turn ourselves into HTML
+ raise NotImplementedError
+
+class TestResults(resource.Resource):
+ oneTestClass = OneTest
+ def __init__(self):
+ resource.Resource.__init__(self)
+ self.tests = {}
+ def addTest(self, testName, resultType, results=None):
+ self.tests[testName] = (resultType, results)
+ # TODO: .setName and .delete should be used on our Swappable
+ def countTests(self):
+ return len(self.tests)
+ def countFailures(self):
+ failures = 0
+ for t in self.tests.values():
+ if t[0] in (FAILURE, ERROR):
+ failures += 1
+ return failures
+ def summary(self):
+ """Return a short list of text strings as a summary, suitable for
+ inclusion in an Event"""
+ return ["some", "tests"]
+ def describeOneTest(self, testname):
+ return "%s: %s\n" % (testname, self.tests[testname][0])
+ def html(self):
+ data = "<html>\n<head><title>Test Results</title></head>\n"
+ data += "<body>\n"
+ data += "<pre>\n"
+ tests = self.tests.keys()
+ tests.sort()
+ for testname in tests:
+ data += self.describeOneTest(testname)
+ data += "</pre>\n"
+ data += "</body></html>\n"
+ return data
+ def render(self, request):
+ request.setHeader("content-type", "text/html")
+ if request.method == "HEAD":
+ request.setHeader("content-length", len(self.html()))
+ return ''
+ return self.html()
+ def getChild(self, path, request):
+ if self.tests.has_key(path):
+ return self.oneTestClass(self, path, self.tests[path])
+ return NoResource("No such test '%s'" % path)
diff --git a/buildbot/buildbot/status/tinderbox.py b/buildbot/buildbot/status/tinderbox.py
new file mode 100644
index 0000000..51d404b
--- /dev/null
+++ b/buildbot/buildbot/status/tinderbox.py
@@ -0,0 +1,223 @@
+
+from email.Message import Message
+from email.Utils import formatdate
+
+from zope.interface import implements
+from twisted.internet import defer
+
+from buildbot import interfaces
+from buildbot.status import mail
+from buildbot.status.builder import SUCCESS, WARNINGS
+from buildbot.steps.shell import WithProperties
+
+import zlib, bz2, base64
+
+# TODO: docs, maybe a test of some sort just to make sure it actually imports
+# and can format email without raising an exception.
+
+class TinderboxMailNotifier(mail.MailNotifier):
+ """This is a Tinderbox status notifier. It can send e-mail to a number of
+ different tinderboxes or people. E-mails are sent at the beginning and
+ upon completion of each build. It can be configured to send out e-mails
+ for only certain builds.
+
+ The most basic usage is as follows::
+ TinderboxMailNotifier(fromaddr="buildbot@localhost",
+ tree="MyTinderboxTree",
+ extraRecipients=["tinderboxdaemon@host.org"])
+
+ The builder name (as specified in master.cfg) is used as the "build"
+ tinderbox option.
+
+ """
+ implements(interfaces.IEmailSender)
+
+ compare_attrs = ["extraRecipients", "fromaddr", "categories", "builders",
+ "addLogs", "relayhost", "subject", "binaryURL", "tree",
+ "logCompression", "errorparser", "columnName",
+ "useChangeTime"]
+
+ def __init__(self, fromaddr, tree, extraRecipients,
+ categories=None, builders=None, relayhost="localhost",
+ subject="buildbot %(result)s in %(builder)s", binaryURL="",
+ logCompression="", errorparser="unix", columnName=None,
+ useChangeTime=False):
+ """
+ @type fromaddr: string
+ @param fromaddr: the email address to be used in the 'From' header.
+
+ @type tree: string
+ @param tree: The Tinderbox tree to post to.
+
+ @type extraRecipients: tuple of string
+ @param extraRecipients: E-mail addresses of recipients. This should at
+ least include the tinderbox daemon.
+
+ @type categories: list of strings
+ @param categories: a list of category names to serve status
+ information for. Defaults to None (all
+ categories). Use either builders or categories,
+ but not both.
+
+ @type builders: list of strings
+ @param builders: a list of builder names for which mail should be
+ sent. Defaults to None (send mail for all builds).
+ Use either builders or categories, but not both.
+
+ @type relayhost: string
+ @param relayhost: the host to which the outbound SMTP connection
+ should be made. Defaults to 'localhost'
+
+ @type subject: string
+ @param subject: a string to be used as the subject line of the message.
+ %(builder)s will be replaced with the name of the
+ %builder which provoked the message.
+ This parameter is not significant for the tinderbox
+ daemon.
+
+ @type binaryURL: string
+ @param binaryURL: If specified, this should be the location where final
+ binary for a build is located.
+ (ie. http://www.myproject.org/nightly/08-08-2006.tgz)
+ It will be posted to the Tinderbox.
+
+ @type logCompression: string
+ @param logCompression: The type of compression to use on the log.
+ Valid options are"bzip2" and "gzip". gzip is
+ only known to work on Python 2.4 and above.
+
+ @type errorparser: string
+ @param errorparser: The error parser that the Tinderbox server
+ should use when scanning the log file.
+ Default is "unix".
+
+ @type columnName: string
+ @param columnName: When columnName is None, use the buildername as
+ the Tinderbox column name. When columnName is a
+ string this exact string will be used for all
+ builders that this TinderboxMailNotifier cares
+ about (not recommended). When columnName is a
+ WithProperties instance it will be interpolated
+ as such. See WithProperties for more detail.
+ @type useChangeTime: bool
+ @param useChangeTime: When True, the time of the first Change for a
+ build is used as the builddate. When False,
+ the current time is used as the builddate.
+ """
+
+ mail.MailNotifier.__init__(self, fromaddr, categories=categories,
+ builders=builders, relayhost=relayhost,
+ subject=subject,
+ extraRecipients=extraRecipients,
+ sendToInterestedUsers=False)
+ self.tree = tree
+ self.binaryURL = binaryURL
+ self.logCompression = logCompression
+ self.errorparser = errorparser
+ self.useChangeTime = useChangeTime
+ assert columnName is None or type(columnName) is str \
+ or isinstance(columnName, WithProperties), \
+ "columnName must be None, a string, or a WithProperties instance"
+ self.columnName = columnName
+
+ def buildStarted(self, name, build):
+ builder = build.getBuilder()
+ if self.builders is not None and name not in self.builders:
+ return # ignore this Build
+ if self.categories is not None and \
+ builder.category not in self.categories:
+ return # ignore this build
+ self.buildMessage(name, build, "building")
+
+ def buildMessage(self, name, build, results):
+ text = ""
+ res = ""
+ # shortform
+ t = "tinderbox:"
+
+ text += "%s tree: %s\n" % (t, self.tree)
+ # the start time
+ # getTimes() returns a fractioned time that tinderbox doesn't understand
+ builddate = int(build.getTimes()[0])
+ # attempt to pull a Change time from this Build's Changes.
+ # if that doesn't work, fall back on the current time
+ if self.useChangeTime:
+ try:
+ builddate = build.getChanges()[-1].when
+ except:
+ pass
+ text += "%s builddate: %s\n" % (t, builddate)
+ text += "%s status: " % t
+
+ if results == "building":
+ res = "building"
+ text += res
+ elif results == SUCCESS:
+ res = "success"
+ text += res
+ elif results == WARNINGS:
+ res = "testfailed"
+ text += res
+ else:
+ res += "busted"
+ text += res
+
+ text += "\n";
+
+ if self.columnName is None:
+ # use the builder name
+ text += "%s build: %s\n" % (t, name)
+ elif type(self.columnName) is str:
+ # use the exact string given
+ text += "%s build: %s\n" % (t, self.columnName)
+ elif isinstance(self.columnName, WithProperties):
+ # interpolate the WithProperties instance, use that
+ text += "%s build: %s\n" % (t, build.getProperties().render(self.columnName))
+ else:
+ raise Exception("columnName is an unhandled value")
+ text += "%s errorparser: %s\n" % (t, self.errorparser)
+
+ # if the build just started...
+ if results == "building":
+ text += "%s END\n" % t
+ # if the build finished...
+ else:
+ text += "%s binaryurl: %s\n" % (t, self.binaryURL)
+ text += "%s logcompression: %s\n" % (t, self.logCompression)
+
+ # logs will always be appended
+ logEncoding = ""
+ tinderboxLogs = ""
+ for log in build.getLogs():
+ l = ""
+ if self.logCompression == "bzip2":
+ compressedLog = bz2.compress(log.getText())
+ l = base64.encodestring(compressedLog)
+ logEncoding = "base64";
+ elif self.logCompression == "gzip":
+ compressedLog = zlib.compress(log.getText())
+ l = base64.encodestring(compressedLog)
+ logEncoding = "base64";
+ else:
+ l = log.getText()
+ tinderboxLogs += l
+
+ text += "%s logencoding: %s\n" % (t, logEncoding)
+ text += "%s END\n\n" % t
+ text += tinderboxLogs
+ text += "\n"
+
+ m = Message()
+ m.set_payload(text)
+
+ m['Date'] = formatdate(localtime=True)
+ m['Subject'] = self.subject % { 'result': res,
+ 'builder': name,
+ }
+ m['From'] = self.fromaddr
+ # m['To'] is added later
+
+ d = defer.DeferredList([])
+ d.addCallback(self._gotRecipients, self.extraRecipients, m)
+ return d
+
diff --git a/buildbot/buildbot/status/web/__init__.py b/buildbot/buildbot/status/web/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildbot/buildbot/status/web/__init__.py
diff --git a/buildbot/buildbot/status/web/about.py b/buildbot/buildbot/status/web/about.py
new file mode 100644
index 0000000..09748e6
--- /dev/null
+++ b/buildbot/buildbot/status/web/about.py
@@ -0,0 +1,33 @@
+
+from twisted.web import html
+from buildbot.status.web.base import HtmlResource
+import buildbot
+import twisted
+import sys
+
+class AboutBuildbot(HtmlResource):
+ title = "About this Buildbot"
+
+ def body(self, request):
+ data = ''
+ data += '<h1>Welcome to the Buildbot</h1>\n'
+ data += '<h2>Version Information</h2>\n'
+ data += '<ul>\n'
+ data += ' <li>Buildbot: %s</li>\n' % html.escape(buildbot.version)
+ data += ' <li>Twisted: %s</li>\n' % html.escape(twisted.__version__)
+ data += ' <li>Python: %s</li>\n' % html.escape(sys.version)
+ data += ' <li>Buildmaster platform: %s</li>\n' % html.escape(sys.platform)
+ data += '</ul>\n'
+
+ data += '''
+<h2>Source code</h2>
+
+<p>Buildbot is a free software project, released under the terms of the
+<a href="http://www.gnu.org/licenses/gpl.html">GNU GPL</a>.</p>
+
+<p>Please visit the <a href="http://buildbot.net/">Buildbot Home Page</a> for
+more information, including documentation, bug reports, and source
+downloads.</p>
+'''
+ return data
+
diff --git a/buildbot/buildbot/status/web/base.py b/buildbot/buildbot/status/web/base.py
new file mode 100644
index 0000000..e515a25
--- /dev/null
+++ b/buildbot/buildbot/status/web/base.py
@@ -0,0 +1,421 @@
+
+import urlparse, urllib, time
+from zope.interface import Interface
+from twisted.web import html, resource
+from buildbot.status import builder
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION
+from buildbot import version, util
+
+class ITopBox(Interface):
+ """I represent a box in the top row of the waterfall display: the one
+ which shows the status of the last build for each builder."""
+ def getBox(self, request):
+ """Return a Box instance, which can produce a <td> cell.
+ """
+
+class ICurrentBox(Interface):
+ """I represent the 'current activity' box, just above the builder name."""
+ def getBox(self, status):
+ """Return a Box instance, which can produce a <td> cell.
+ """
+
+class IBox(Interface):
+ """I represent a box in the waterfall display."""
+ def getBox(self, request):
+ """Return a Box instance, which wraps an Event and can produce a <td>
+ cell.
+ """
+
+class IHTMLLog(Interface):
+ pass
+
+css_classes = {SUCCESS: "success",
+ WARNINGS: "warnings",
+ FAILURE: "failure",
+ SKIPPED: "skipped",
+ EXCEPTION: "exception",
+ }
+
+ROW_TEMPLATE = '''
+<div class="row">
+ <span class="label">%(label)s</span>
+ <span class="field">%(field)s</span>
+</div>
+'''
+
+def make_row(label, field):
+ """Create a name/value row for the HTML.
+
+ `label` is plain text; it will be HTML-encoded.
+
+ `field` is a bit of HTML structure; it will not be encoded in
+ any way.
+ """
+ label = html.escape(label)
+ return ROW_TEMPLATE % {"label": label, "field": field}
+
+def make_stop_form(stopURL, on_all=False, label="Build"):
+ if on_all:
+ data = """<form action="%s" class='command stopbuild'>
+ <p>To stop all builds, fill out the following fields and
+ push the 'Stop' button</p>\n""" % stopURL
+ else:
+ data = """<form action="%s" class='command stopbuild'>
+ <p>To stop this build, fill out the following fields and
+ push the 'Stop' button</p>\n""" % stopURL
+ data += make_row("Your name:",
+ "<input type='text' name='username' />")
+ data += make_row("Reason for stopping build:",
+ "<input type='text' name='comments' />")
+ data += '<input type="submit" value="Stop %s" /></form>\n' % label
+ return data
+
+def make_force_build_form(forceURL, on_all=False):
+ if on_all:
+ data = """<form action="%s" class="command forcebuild">
+ <p>To force a build on all Builders, fill out the following fields
+ and push the 'Force Build' button</p>""" % forceURL
+ else:
+ data = """<form action="%s" class="command forcebuild">
+ <p>To force a build, fill out the following fields and
+ push the 'Force Build' button</p>""" % forceURL
+ return (data
+ + make_row("Your name:",
+ "<input type='text' name='username' />")
+ + make_row("Reason for build:",
+ "<input type='text' name='comments' />")
+ + make_row("Branch to build:",
+ "<input type='text' name='branch' />")
+ + make_row("Revision to build:",
+ "<input type='text' name='revision' />")
+ + '<input type="submit" value="Force Build" /></form>\n')
+
+def td(text="", parms={}, **props):
+ data = ""
+ data += " "
+ #if not props.has_key("border"):
+ # props["border"] = 1
+ props.update(parms)
+ comment = props.get("comment", None)
+ if comment:
+ data += "<!-- %s -->" % comment
+ data += "<td"
+ class_ = props.get('class_', None)
+ if class_:
+ props["class"] = class_
+ for prop in ("align", "colspan", "rowspan", "border",
+ "valign", "halign", "class"):
+ p = props.get(prop, None)
+ if p != None:
+ data += " %s=\"%s\"" % (prop, p)
+ data += ">"
+ if not text:
+ text = "&nbsp;"
+ if isinstance(text, list):
+ data += "<br />".join(text)
+ else:
+ data += text
+ data += "</td>\n"
+ return data
+
+def build_get_class(b):
+ """
+ Return the class to use for a finished build or buildstep,
+ based on the result.
+ """
+ # FIXME: this getResults duplicity might need to be fixed
+ result = b.getResults()
+ #print "THOMAS: result for b %r: %r" % (b, result)
+ if isinstance(b, builder.BuildStatus):
+ result = b.getResults()
+ elif isinstance(b, builder.BuildStepStatus):
+ result = b.getResults()[0]
+ # after forcing a build, b.getResults() returns ((None, []), []), ugh
+ if isinstance(result, tuple):
+ result = result[0]
+ else:
+ raise TypeError, "%r is not a BuildStatus or BuildStepStatus" % b
+
+ if result == None:
+ # FIXME: this happens when a buildstep is running ?
+ return "running"
+ return builder.Results[result]
+
+def path_to_root(request):
+ # /waterfall : ['waterfall'] -> ''
+ # /somewhere/lower : ['somewhere', 'lower'] -> '../'
+ # /somewhere/indexy/ : ['somewhere', 'indexy', ''] -> '../../'
+ # / : [] -> ''
+ if request.prepath:
+ segs = len(request.prepath) - 1
+ else:
+ segs = 0
+ root = "../" * segs
+ return root
+
+def path_to_builder(request, builderstatus):
+ return (path_to_root(request) +
+ "builders/" +
+ urllib.quote(builderstatus.getName(), safe=''))
+
+def path_to_build(request, buildstatus):
+ return (path_to_builder(request, buildstatus.getBuilder()) +
+ "/builds/%d" % buildstatus.getNumber())
+
+def path_to_step(request, stepstatus):
+ return (path_to_build(request, stepstatus.getBuild()) +
+ "/steps/%s" % urllib.quote(stepstatus.getName(), safe=''))
+
+def path_to_slave(request, slave):
+ return (path_to_root(request) +
+ "buildslaves/" +
+ urllib.quote(slave.getName(), safe=''))
+
+class Box:
+ # a Box wraps an Event. The Box has HTML <td> parameters that Events
+ # lack, and it has a base URL to which each File's name is relative.
+ # Events don't know about HTML.
+ spacer = False
+ def __init__(self, text=[], class_=None, urlbase=None,
+ **parms):
+ self.text = text
+ self.class_ = class_
+ self.urlbase = urlbase
+ self.show_idle = 0
+ if parms.has_key('show_idle'):
+ del parms['show_idle']
+ self.show_idle = 1
+
+ self.parms = parms
+ # parms is a dict of HTML parameters for the <td> element that will
+ # represent this Event in the waterfall display.
+
+ def td(self, **props):
+ props.update(self.parms)
+ text = self.text
+ if not text and self.show_idle:
+ text = ["[idle]"]
+ return td(text, props, class_=self.class_)
+
+
+class HtmlResource(resource.Resource):
+ # this is a cheap sort of template thingy
+ contentType = "text/html; charset=UTF-8"
+ title = "Buildbot"
+ addSlash = False # adapted from Nevow
+
+ def getChild(self, path, request):
+ if self.addSlash and path == "" and len(request.postpath) == 0:
+ return self
+ return resource.Resource.getChild(self, path, request)
+
+ def render(self, request):
+ # tell the WebStatus about the HTTPChannel that got opened, so they
+ # can close it if we get reconfigured and the WebStatus goes away.
+ # They keep a weakref to this, since chances are good that it will be
+ # closed by the browser or by us before we get reconfigured. See
+ # ticket #102 for details.
+ if hasattr(request, "channel"):
+ # web.distrib.Request has no .channel
+ request.site.buildbot_service.registerChannel(request.channel)
+
+ # Our pages no longer require that their URL end in a slash. Instead,
+ # they all use request.childLink() or some equivalent which takes the
+ # last path component into account. This clause is left here for
+ # historical and educational purposes.
+ if False and self.addSlash and request.prepath[-1] != '':
+ # this is intended to behave like request.URLPath().child('')
+ # but we need a relative URL, since we might be living behind a
+ # reverse proxy
+ #
+ # note that the Location: header (as used in redirects) are
+ # required to have absolute URIs, and my attempt to handle
+ # reverse-proxies gracefully violates rfc2616. This frequently
+ # works, but single-component paths sometimes break. The best
+ # strategy is to avoid these redirects whenever possible by using
+ # HREFs with trailing slashes, and only use the redirects for
+ # manually entered URLs.
+ url = request.prePathURL()
+ scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
+ new_url = request.prepath[-1] + "/"
+ if query:
+ new_url += "?" + query
+ request.redirect(new_url)
+ return ''
+
+ data = self.content(request)
+ if isinstance(data, unicode):
+ data = data.encode("utf-8")
+ request.setHeader("content-type", self.contentType)
+ if request.method == "HEAD":
+ request.setHeader("content-length", len(data))
+ return ''
+ return data
+
+ def getStatus(self, request):
+ return request.site.buildbot_service.getStatus()
+ def getControl(self, request):
+ return request.site.buildbot_service.getControl()
+
+ def getChangemaster(self, request):
+ return request.site.buildbot_service.getChangeSvc()
+
+ def path_to_root(self, request):
+ return path_to_root(request)
+
+ def footer(self, s, req):
+ # TODO: this stuff should be generated by a template of some sort
+ projectURL = s.getProjectURL()
+ projectName = s.getProjectName()
+ data = '<hr /><div class="footer">\n'
+
+ welcomeurl = self.path_to_root(req) + "index.html"
+ data += '[<a href="%s">welcome</a>]\n' % welcomeurl
+ data += "<br />\n"
+
+ data += '<a href="http://buildbot.sourceforge.net/">Buildbot</a>'
+ data += "-%s " % version
+ if projectName:
+ data += "working for the "
+ if projectURL:
+ data += "<a href=\"%s\">%s</a> project." % (projectURL,
+ projectName)
+ else:
+ data += "%s project." % projectName
+ data += "<br />\n"
+ data += ("Page built: " +
+ time.strftime("%a %d %b %Y %H:%M:%S",
+ time.localtime(util.now()))
+ + "\n")
+ data += '</div>\n'
+
+ return data
+
+ def getTitle(self, request):
+ return self.title
+
+ def fillTemplate(self, template, request):
+ s = request.site.buildbot_service
+ values = s.template_values.copy()
+ values['root'] = self.path_to_root(request)
+ # e.g. to reference the top-level 'buildbot.css' page, use
+ # "%(root)sbuildbot.css"
+ values['title'] = self.getTitle(request)
+ return template % values
+
+ def content(self, request):
+ s = request.site.buildbot_service
+ data = ""
+ data += self.fillTemplate(s.header, request)
+ data += "<head>\n"
+ for he in s.head_elements:
+ data += " " + self.fillTemplate(he, request) + "\n"
+ data += self.head(request)
+ data += "</head>\n\n"
+
+ data += '<body %s>\n' % " ".join(['%s="%s"' % (k,v)
+ for (k,v) in s.body_attrs.items()])
+ data += self.body(request)
+ data += "</body>\n"
+ data += self.fillTemplate(s.footer, request)
+ return data
+
+ def head(self, request):
+ return ""
+
+ def body(self, request):
+ return "Dummy\n"
+
+class StaticHTML(HtmlResource):
+ def __init__(self, body, title):
+ HtmlResource.__init__(self)
+ self.bodyHTML = body
+ self.title = title
+ def body(self, request):
+ return self.bodyHTML
+
+MINUTE = 60
+HOUR = 60*MINUTE
+DAY = 24*HOUR
+WEEK = 7*DAY
+MONTH = 30*DAY
+
+def plural(word, words, num):
+ if int(num) == 1:
+ return "%d %s" % (num, word)
+ else:
+ return "%d %s" % (num, words)
+
+def abbreviate_age(age):
+ if age <= 90:
+ return "%s ago" % plural("second", "seconds", age)
+ if age < 90*MINUTE:
+ return "about %s ago" % plural("minute", "minutes", age / MINUTE)
+ if age < DAY:
+ return "about %s ago" % plural("hour", "hours", age / HOUR)
+ if age < 2*WEEK:
+ return "about %s ago" % plural("day", "days", age / DAY)
+ if age < 2*MONTH:
+ return "about %s ago" % plural("week", "weeks", age / WEEK)
+ return "a long time ago"
+
+
+class OneLineMixin:
+ LINE_TIME_FORMAT = "%b %d %H:%M"
+
+ def get_line_values(self, req, build):
+ '''
+ Collect the data needed for each line display
+ '''
+ builder_name = build.getBuilder().getName()
+ results = build.getResults()
+ text = build.getText()
+ try:
+ rev = build.getProperty("got_revision")
+ if rev is None:
+ rev = "??"
+ except KeyError:
+ rev = "??"
+ rev = str(rev)
+ if len(rev) > 40:
+ rev = "version is too-long"
+ root = self.path_to_root(req)
+ css_class = css_classes.get(results, "")
+ values = {'class': css_class,
+ 'builder_name': builder_name,
+ 'buildnum': build.getNumber(),
+ 'results': css_class,
+ 'text': " ".join(build.getText()),
+ 'buildurl': path_to_build(req, build),
+ 'builderurl': path_to_builder(req, build.getBuilder()),
+ 'rev': rev,
+ 'time': time.strftime(self.LINE_TIME_FORMAT,
+ time.localtime(build.getTimes()[0])),
+ }
+ return values
+
+ def make_line(self, req, build, include_builder=True):
+ '''
+ Format and render a single line into HTML
+ '''
+ values = self.get_line_values(req, build)
+ fmt_pieces = ['<font size="-1">(%(time)s)</font>',
+ 'rev=[%(rev)s]',
+ '<span class="%(class)s">%(results)s</span>',
+ ]
+ if include_builder:
+ fmt_pieces.append('<a href="%(builderurl)s">%(builder_name)s</a>')
+ fmt_pieces.append('<a href="%(buildurl)s">#%(buildnum)d</a>:')
+ fmt_pieces.append('%(text)s')
+ data = " ".join(fmt_pieces) % values
+ return data
+
+def map_branches(branches):
+ # when the query args say "trunk", present that to things like
+ # IBuilderStatus.generateFinishedBuilds as None, since that's the
+ # convention in use. But also include 'trunk', because some VC systems
+ # refer to it that way. In the long run we should clean this up better,
+ # maybe with Branch objects or something.
+ if "trunk" in branches:
+ return branches + [None]
+ return branches
diff --git a/buildbot/buildbot/status/web/baseweb.py b/buildbot/buildbot/status/web/baseweb.py
new file mode 100644
index 0000000..a963a9a
--- /dev/null
+++ b/buildbot/buildbot/status/web/baseweb.py
@@ -0,0 +1,614 @@
+
+import os, sys, urllib, weakref
+from itertools import count
+
+from zope.interface import implements
+from twisted.python import log
+from twisted.application import strports, service
+from twisted.web import server, distrib, static, html
+from twisted.spread import pb
+
+from buildbot.interfaces import IControl, IStatusReceiver
+
+from buildbot.status.web.base import HtmlResource, Box, \
+ build_get_class, ICurrentBox, OneLineMixin, map_branches, \
+ make_stop_form, make_force_build_form
+from buildbot.status.web.feeds import Rss20StatusResource, \
+ Atom10StatusResource
+from buildbot.status.web.waterfall import WaterfallStatusResource
+from buildbot.status.web.grid import GridStatusResource
+from buildbot.status.web.changes import ChangesResource
+from buildbot.status.web.builder import BuildersResource
+from buildbot.status.web.slaves import BuildSlavesResource
+from buildbot.status.web.xmlrpc import XMLRPCServer
+from buildbot.status.web.about import AboutBuildbot
+
+# this class contains the status services (WebStatus and the older Waterfall)
+# which can be put in c['status']. It also contains some of the resources
+# that are attached to the WebStatus at various well-known URLs, which the
+# admin might wish to attach (using WebStatus.putChild) at other URLs.
+
+
+class LastBuild(HtmlResource):
+ def body(self, request):
+ return "missing\n"
+
+def getLastNBuilds(status, numbuilds, builders=[], branches=[]):
+ """Return a list with the last few Builds, sorted by start time.
+ builder_names=None means all builders
+ """
+
+ # TODO: this unsorts the list of builder names, ick
+ builder_names = set(status.getBuilderNames())
+ if builders:
+ builder_names = builder_names.intersection(set(builders))
+
+ # to make sure that we get everything, we must get 'numbuilds' builds
+ # from *each* source, then sort by ending time, then trim to the last
+ # 20. We could be more efficient, but it would require the same
+ # gnarly code that the Waterfall uses to generate one event at a
+ # time. TODO: factor that code out into some useful class.
+ events = []
+ for builder_name in builder_names:
+ builder = status.getBuilder(builder_name)
+ for build_number in count(1):
+ if build_number > numbuilds:
+ break # enough from this builder, move on to another
+ build = builder.getBuild(-build_number)
+ if not build:
+ break # no more builds here, move on to the next builder
+ #if not build.isFinished():
+ # continue
+ (build_start, build_end) = build.getTimes()
+ event = (build_start, builder_name, build)
+ events.append(event)
+ def _sorter(a, b):
+ return cmp( a[:2], b[:2] )
+ events.sort(_sorter)
+ # now only return the actual build, and only return some of them
+ return [e[2] for e in events[-numbuilds:]]
+
+
+# /one_line_per_build
+# accepts builder=, branch=, numbuilds=
+class OneLinePerBuild(HtmlResource, OneLineMixin):
+ """This shows one line per build, combining all builders together. Useful
+ query arguments:
+
+ numbuilds=: how many lines to display
+ builder=: show only builds for this builder. Multiple builder= arguments
+ can be used to see builds from any builder in the set.
+ """
+
+ title = "Recent Builds"
+
+ def __init__(self, numbuilds=20):
+ HtmlResource.__init__(self)
+ self.numbuilds = numbuilds
+
+ def getChild(self, path, req):
+ status = self.getStatus(req)
+ builder = status.getBuilder(path)
+ return OneLinePerBuildOneBuilder(builder)
+
+ def body(self, req):
+ status = self.getStatus(req)
+ control = self.getControl(req)
+ numbuilds = int(req.args.get("numbuilds", [self.numbuilds])[0])
+ builders = req.args.get("builder", [])
+ branches = [b for b in req.args.get("branch", []) if b]
+
+ g = status.generateFinishedBuilds(builders, map_branches(branches),
+ numbuilds)
+
+ data = ""
+
+ # really this is "up to %d builds"
+ data += "<h1>Last %d finished builds: %s</h1>\n" % \
+ (numbuilds, ", ".join(branches))
+ if builders:
+ data += ("<p>of builders: %s</p>\n" % (", ".join(builders)))
+ data += "<ul>\n"
+ got = 0
+ building = False
+ online = 0
+ for build in g:
+ got += 1
+ data += " <li>" + self.make_line(req, build) + "</li>\n"
+ builder_status = build.getBuilder().getState()[0]
+ if builder_status == "building":
+ building = True
+ online += 1
+ elif builder_status != "offline":
+ online += 1
+ if not got:
+ data += " <li>No matching builds found</li>\n"
+ data += "</ul>\n"
+
+ if control is not None:
+ if building:
+ stopURL = "builders/_all/stop"
+ data += make_stop_form(stopURL, True, "Builds")
+ if online:
+ forceURL = "builders/_all/force"
+ data += make_force_build_form(forceURL, True)
+
+ return data
+
+
+
+# /one_line_per_build/$BUILDERNAME
+# accepts branch=, numbuilds=
+
+class OneLinePerBuildOneBuilder(HtmlResource, OneLineMixin):
+ def __init__(self, builder, numbuilds=20):
+ HtmlResource.__init__(self)
+ self.builder = builder
+ self.builder_name = builder.getName()
+ self.numbuilds = numbuilds
+ self.title = "Recent Builds of %s" % self.builder_name
+
+ def body(self, req):
+ status = self.getStatus(req)
+ numbuilds = int(req.args.get("numbuilds", [self.numbuilds])[0])
+ branches = [b for b in req.args.get("branch", []) if b]
+
+ # walk backwards through all builds of a single builder
+ g = self.builder.generateFinishedBuilds(map_branches(branches),
+ numbuilds)
+
+ data = ""
+ data += ("<h1>Last %d builds of builder %s: %s</h1>\n" %
+ (numbuilds, self.builder_name, ", ".join(branches)))
+ data += "<ul>\n"
+ got = 0
+ for build in g:
+ got += 1
+ data += " <li>" + self.make_line(req, build) + "</li>\n"
+ if not got:
+ data += " <li>No matching builds found</li>\n"
+ data += "</ul>\n"
+
+ return data
+
+# /one_box_per_builder
+# accepts builder=, branch=
+class OneBoxPerBuilder(HtmlResource):
+ """This shows a narrow table with one row per builder. The leftmost column
+ contains the builder name. The next column contains the results of the
+ most recent build. The right-hand column shows the builder's current
+ activity.
+
+ builder=: show only builds for this builder. Multiple builder= arguments
+ can be used to see builds from any builder in the set.
+ """
+
+ title = "Latest Build"
+
+ def body(self, req):
+ status = self.getStatus(req)
+ control = self.getControl(req)
+
+ builders = req.args.get("builder", status.getBuilderNames())
+ branches = [b for b in req.args.get("branch", []) if b]
+
+ data = ""
+
+ data += "<h2>Latest builds: %s</h2>\n" % ", ".join(branches)
+ data += "<table>\n"
+
+ building = False
+ online = 0
+ base_builders_url = self.path_to_root(req) + "builders/"
+ for bn in builders:
+ base_builder_url = base_builders_url + urllib.quote(bn, safe='')
+ builder = status.getBuilder(bn)
+ data += "<tr>\n"
+ data += '<td class="box"><a href="%s">%s</a></td>\n' \
+ % (base_builder_url, html.escape(bn))
+ builds = list(builder.generateFinishedBuilds(map_branches(branches),
+ num_builds=1))
+ if builds:
+ b = builds[0]
+ url = (base_builder_url + "/builds/%d" % b.getNumber())
+ try:
+ label = b.getProperty("got_revision")
+ except KeyError:
+ label = None
+ if not label or len(str(label)) > 20:
+ label = "#%d" % b.getNumber()
+ text = ['<a href="%s">%s</a>' % (url, label)]
+ text.extend(b.getText())
+ box = Box(text,
+ class_="LastBuild box %s" % build_get_class(b))
+ data += box.td(align="center")
+ else:
+ data += '<td class="LastBuild box" >no build</td>\n'
+ current_box = ICurrentBox(builder).getBox(status)
+ data += current_box.td(align="center")
+
+ builder_status = builder.getState()[0]
+ if builder_status == "building":
+ building = True
+ online += 1
+ elif builder_status != "offline":
+ online += 1
+
+ data += "</table>\n"
+
+ if control is not None:
+ if building:
+ stopURL = "builders/_all/stop"
+ data += make_stop_form(stopURL, True, "Builds")
+ if online:
+ forceURL = "builders/_all/force"
+ data += make_force_build_form(forceURL, True)
+
+ return data
+
+
+
+HEADER = '''
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+
+<html
+ xmlns="http://www.w3.org/1999/xhtml"
+ lang="en"
+ xml:lang="en">
+'''
+
+HEAD_ELEMENTS = [
+ '<title>%(title)s</title>',
+ '<link href="%(root)sbuildbot.css" rel="stylesheet" type="text/css" />',
+ ]
+BODY_ATTRS = {
+ 'vlink': "#800080",
+ }
+
+FOOTER = '''
+</html>
+'''
+
+
+class WebStatus(service.MultiService):
+ implements(IStatusReceiver)
+ # TODO: IStatusReceiver is really about things which subscribe to hear
+ # about buildbot events. We need a different interface (perhaps a parent
+ # of IStatusReceiver) for status targets that don't subscribe, like the
+ # WebStatus class. buildbot.master.BuildMaster.loadConfig:737 asserts
+ # that everything in c['status'] provides IStatusReceiver, but really it
+ # should check that they provide IStatusTarget instead.
+
+ """
+ The webserver provided by this class has the following resources:
+
+ /waterfall : the big time-oriented 'waterfall' display, with links
+ to individual changes, builders, builds, steps, and logs.
+ A number of query-arguments can be added to influence
+ the display.
+ /rss : a rss feed summarizing all failed builds. The same
+ query-arguments used by 'waterfall' can be added to
+ influence the feed output.
+ /atom : an atom feed summarizing all failed builds. The same
+ query-arguments used by 'waterfall' can be added to
+ influence the feed output.
+ /grid : another summary display that shows a grid of builds, with
+ sourcestamps on the x axis, and builders on the y. Query
+ arguments similar to those for the waterfall can be added.
+ /builders/BUILDERNAME: a page summarizing the builder. This includes
+ references to the Schedulers that feed it,
+ any builds currently in the queue, which
+ buildslaves are designated or attached, and a
+ summary of the build process it uses.
+ /builders/BUILDERNAME/builds/NUM: a page describing a single Build
+ /builders/BUILDERNAME/builds/NUM/steps/STEPNAME: describes a single step
+ /builders/BUILDERNAME/builds/NUM/steps/STEPNAME/logs/LOGNAME: a StatusLog
+ /builders/BUILDERNAME/builds/NUM/tests : summarize test results
+ /builders/BUILDERNAME/builds/NUM/tests/TEST.NAME: results of one test
+ /builders/_all/{force,stop}: force a build/stop building on all builders.
+ /changes : summarize all ChangeSources
+ /changes/CHANGENUM: a page describing a single Change
+ /schedulers/SCHEDULERNAME: a page describing a Scheduler, including
+ a description of its behavior, a list of the
+ Builders it triggers, and list of the Changes
+ that are queued awaiting the tree-stable
+ timer, and controls to accelerate the timer.
+ /buildslaves : list all BuildSlaves
+ /buildslaves/SLAVENAME : describe a single BuildSlave
+ /one_line_per_build : summarize the last few builds, one line each
+ /one_line_per_build/BUILDERNAME : same, but only for a single builder
+ /one_box_per_builder : show the latest build and current activity
+ /about : describe this buildmaster (Buildbot and support library versions)
+ /xmlrpc : (not yet implemented) an XMLRPC server with build status
+
+
+ All URLs for pages which are not defined here are used to look
+ for files in PUBLIC_HTML, which defaults to BASEDIR/public_html.
+ This means that /robots.txt or /buildbot.css or /favicon.ico can
+ be placed in that directory.
+
+ If an index file (index.html, index.htm, or index, in that order) is
+ present in PUBLIC_HTML, it will be used for the root resource. If not,
+ the default behavior is to put a redirection to the /waterfall page.
+
+ All of the resources provided by this service use relative URLs to reach
+ each other. The only absolute links are the c['projectURL'] links at the
+ top and bottom of the page, and the buildbot home-page link at the
+ bottom.
+
+ This webserver defines class attributes on elements so they can be styled
+ with CSS stylesheets. All pages pull in PUBLIC_HTML/buildbot.css, and you
+ can cause additional stylesheets to be loaded by adding a suitable <link>
+ to the WebStatus instance's .head_elements attribute.
+
+ Buildbot uses some generic classes to identify the type of object, and
+ some more specific classes for the various kinds of those types. It does
+ this by specifying both in the class attributes where applicable,
+ separated by a space. It is important that in your CSS you declare the
+ more generic class styles above the more specific ones. For example,
+ first define a style for .Event, and below that for .SUCCESS
+
+ The following CSS class names are used:
+ - Activity, Event, BuildStep, LastBuild: general classes
+ - waiting, interlocked, building, offline, idle: Activity states
+ - start, running, success, failure, warnings, skipped, exception:
+ LastBuild and BuildStep states
+ - Change: box with change
+ - Builder: box for builder name (at top)
+ - Project
+ - Time
+
+ """
+
+ # we are not a ComparableMixin, and therefore the webserver will be
+ # rebuilt every time we reconfig. This is because WebStatus.putChild()
+ # makes it too difficult to tell whether two instances are the same or
+ # not (we'd have to do a recursive traversal of all children to discover
+ # all the changes).
+
+ def __init__(self, http_port=None, distrib_port=None, allowForce=False,
+ public_html="public_html", site=None):
+ """Run a web server that provides Buildbot status.
+
+ @type http_port: int or L{twisted.application.strports} string
+ @param http_port: a strports specification describing which port the
+ buildbot should use for its web server, with the
+ Waterfall display as the root page. For backwards
+ compatibility this can also be an int. Use
+ 'tcp:8000' to listen on that port, or
+ 'tcp:12345:interface=127.0.0.1' if you only want
+ local processes to connect to it (perhaps because
+ you are using an HTTP reverse proxy to make the
+ buildbot available to the outside world, and do not
+ want to make the raw port visible).
+
+ @type distrib_port: int or L{twisted.application.strports} string
+ @param distrib_port: Use this if you want to publish the Waterfall
+ page using web.distrib instead. The most common
+ case is to provide a string that is an absolute
+ pathname to the unix socket on which the
+ publisher should listen
+ (C{os.path.expanduser(~/.twistd-web-pb)} will
+ match the default settings of a standard
+ twisted.web 'personal web server'). Another
+ possibility is to pass an integer, which means
+ the publisher should listen on a TCP socket,
+ allowing the web server to be on a different
+ machine entirely. Both forms are provided for
+ backwards compatibility; the preferred form is a
+ strports specification like
+ 'unix:/home/buildbot/.twistd-web-pb'. Providing
+ a non-absolute pathname will probably confuse
+ the strports parser.
+
+ @param allowForce: boolean, if True then the webserver will allow
+ visitors to trigger and cancel builds
+
+ @param public_html: the path to the public_html directory for this display,
+ either absolute or relative to the basedir. The default
+ is 'public_html', which selects BASEDIR/public_html.
+
+ @type site: None or L{twisted.web.server.Site}
+ @param site: Use this if you want to define your own object instead of
+ using the default.`
+ """
+
+ service.MultiService.__init__(self)
+ if type(http_port) is int:
+ http_port = "tcp:%d" % http_port
+ self.http_port = http_port
+ if distrib_port is not None:
+ if type(distrib_port) is int:
+ distrib_port = "tcp:%d" % distrib_port
+ if distrib_port[0] in "/~.": # pathnames
+ distrib_port = "unix:%s" % distrib_port
+ self.distrib_port = distrib_port
+ self.allowForce = allowForce
+ self.public_html = public_html
+
+ # If we were given a site object, go ahead and use it.
+ if site:
+ self.site = site
+ else:
+ # this will be replaced once we've been attached to a parent (and
+ # thus have a basedir and can reference BASEDIR)
+ root = static.Data("placeholder", "text/plain")
+ self.site = server.Site(root)
+ self.childrenToBeAdded = {}
+
+ self.setupUsualPages()
+
+ # the following items are accessed by HtmlResource when it renders
+ # each page.
+ self.site.buildbot_service = self
+ self.header = HEADER
+ self.head_elements = HEAD_ELEMENTS[:]
+ self.body_attrs = BODY_ATTRS.copy()
+ self.footer = FOOTER
+ self.template_values = {}
+
+ # keep track of cached connections so we can break them when we shut
+ # down. See ticket #102 for more details.
+ self.channels = weakref.WeakKeyDictionary()
+
+ if self.http_port is not None:
+ s = strports.service(self.http_port, self.site)
+ s.setServiceParent(self)
+ if self.distrib_port is not None:
+ f = pb.PBServerFactory(distrib.ResourcePublisher(self.site))
+ s = strports.service(self.distrib_port, f)
+ s.setServiceParent(self)
+
+ def setupUsualPages(self):
+ #self.putChild("", IndexOrWaterfallRedirection())
+ self.putChild("waterfall", WaterfallStatusResource())
+ self.putChild("grid", GridStatusResource())
+ self.putChild("builders", BuildersResource()) # has builds/steps/logs
+ self.putChild("changes", ChangesResource())
+ self.putChild("buildslaves", BuildSlavesResource())
+ #self.putChild("schedulers", SchedulersResource())
+ self.putChild("one_line_per_build", OneLinePerBuild())
+ self.putChild("one_box_per_builder", OneBoxPerBuilder())
+ self.putChild("xmlrpc", XMLRPCServer())
+ self.putChild("about", AboutBuildbot())
+
+ def __repr__(self):
+ if self.http_port is None:
+ return "<WebStatus on path %s at %s>" % (self.distrib_port,
+ hex(id(self)))
+ if self.distrib_port is None:
+ return "<WebStatus on port %s at %s>" % (self.http_port,
+ hex(id(self)))
+ return ("<WebStatus on port %s and path %s at %s>" %
+ (self.http_port, self.distrib_port, hex(id(self))))
+
+ def setServiceParent(self, parent):
+ service.MultiService.setServiceParent(self, parent)
+
+ # this class keeps a *separate* link to the buildmaster, rather than
+ # just using self.parent, so that when we are "disowned" (and thus
+ # parent=None), any remaining HTTP clients of this WebStatus will still
+ # be able to get reasonable results.
+ self.master = parent
+
+ self.setupSite()
+
+ def setupSite(self):
+ # this is responsible for creating the root resource. It isn't done
+ # at __init__ time because we need to reference the parent's basedir.
+ htmldir = os.path.abspath(os.path.join(self.master.basedir, self.public_html))
+ if os.path.isdir(htmldir):
+ log.msg("WebStatus using (%s)" % htmldir)
+ else:
+ log.msg("WebStatus: warning: %s is missing. Do you need to run"
+ " 'buildbot upgrade-master' on this buildmaster?" % htmldir)
+ # all static pages will get a 404 until upgrade-master is used to
+ # populate this directory. Create the directory, though, since
+ # otherwise we get internal server errors instead of 404s.
+ os.mkdir(htmldir)
+ root = static.File(htmldir)
+
+ for name, child_resource in self.childrenToBeAdded.iteritems():
+ root.putChild(name, child_resource)
+
+ status = self.getStatus()
+ root.putChild("rss", Rss20StatusResource(status))
+ root.putChild("atom", Atom10StatusResource(status))
+
+ self.site.resource = root
+
+ def putChild(self, name, child_resource):
+ """This behaves a lot like root.putChild() . """
+ self.childrenToBeAdded[name] = child_resource
+
+ def registerChannel(self, channel):
+ self.channels[channel] = 1 # weakrefs
+
+ def stopService(self):
+ for channel in self.channels:
+ try:
+ channel.transport.loseConnection()
+ except:
+ log.msg("WebStatus.stopService: error while disconnecting"
+ " leftover clients")
+ log.err()
+ return service.MultiService.stopService(self)
+
+ def getStatus(self):
+ return self.master.getStatus()
+
+ def getControl(self):
+ if self.allowForce:
+ return IControl(self.master)
+ return None
+
+ def getChangeSvc(self):
+ return self.master.change_svc
+ def getPortnum(self):
+ # this is for the benefit of unit tests
+ s = list(self)[0]
+ return s._port.getHost().port
+
+# resources can get access to the IStatus by calling
+# request.site.buildbot_service.getStatus()
+
+# this is the compatibility class for the old waterfall. It is exactly like a
+# regular WebStatus except that the root resource (e.g. http://buildbot.net/)
+# always redirects to a WaterfallStatusResource, and the old arguments are
+# mapped into the new resource-tree approach. In the normal WebStatus, the
+# root resource either redirects the browser to /waterfall or serves
+# PUBLIC_HTML/index.html, and favicon/robots.txt are provided by
+# having the admin write actual files into PUBLIC_HTML/ .
+
+# note: we don't use a util.Redirect here because HTTP requires that the
+# Location: header provide an absolute URI, and it's non-trivial to figure
+# out our absolute URI from here.
+
+class Waterfall(WebStatus):
+
+ if hasattr(sys, "frozen"):
+ # all 'data' files are in the directory of our executable
+ here = os.path.dirname(sys.executable)
+ buildbot_icon = os.path.abspath(os.path.join(here, "buildbot.png"))
+ buildbot_css = os.path.abspath(os.path.join(here, "classic.css"))
+ else:
+ # running from source
+ # the icon is sibpath(__file__, "../buildbot.png") . This is for
+ # portability.
+ up = os.path.dirname
+ buildbot_icon = os.path.abspath(os.path.join(up(up(up(__file__))),
+ "buildbot.png"))
+ buildbot_css = os.path.abspath(os.path.join(up(__file__),
+ "classic.css"))
+
+ compare_attrs = ["http_port", "distrib_port", "allowForce",
+ "categories", "css", "favicon", "robots_txt"]
+
+ def __init__(self, http_port=None, distrib_port=None, allowForce=True,
+ categories=None, css=buildbot_css, favicon=buildbot_icon,
+ robots_txt=None):
+ import warnings
+ m = ("buildbot.status.html.Waterfall is deprecated as of 0.7.6 "
+ "and will be removed from a future release. "
+ "Please use html.WebStatus instead.")
+ warnings.warn(m, DeprecationWarning)
+
+ WebStatus.__init__(self, http_port, distrib_port, allowForce)
+ self.css = css
+ if css:
+ if os.path.exists(os.path.join("public_html", "buildbot.css")):
+ # they've upgraded, so defer to that copy instead
+ pass
+ else:
+ data = open(css, "rb").read()
+ self.putChild("buildbot.css", static.Data(data, "text/plain"))
+ self.favicon = favicon
+ self.robots_txt = robots_txt
+ if favicon:
+ data = open(favicon, "rb").read()
+ self.putChild("favicon.ico", static.Data(data, "image/x-icon"))
+ if robots_txt:
+ data = open(robots_txt, "rb").read()
+ self.putChild("robots.txt", static.Data(data, "text/plain"))
+ self.putChild("", WaterfallStatusResource(categories))
diff --git a/buildbot/buildbot/status/web/build.py b/buildbot/buildbot/status/web/build.py
new file mode 100644
index 0000000..5d01358
--- /dev/null
+++ b/buildbot/buildbot/status/web/build.py
@@ -0,0 +1,302 @@
+
+from twisted.web import html
+from twisted.web.util import Redirect, DeferredResource
+from twisted.internet import defer, reactor
+
+import urllib, time
+from twisted.python import log
+from buildbot.status.web.base import HtmlResource, make_row, make_stop_form, \
+ css_classes, path_to_builder, path_to_slave
+
+from buildbot.status.web.tests import TestsResource
+from buildbot.status.web.step import StepsResource
+from buildbot import version, util
+
+# /builders/$builder/builds/$buildnum
+class StatusResourceBuild(HtmlResource):
+ addSlash = True
+
+ def __init__(self, build_status, build_control, builder_control):
+ HtmlResource.__init__(self)
+ self.build_status = build_status
+ self.build_control = build_control
+ self.builder_control = builder_control
+
+ def getTitle(self, request):
+ return ("Buildbot: %s Build #%d" %
+ (html.escape(self.build_status.getBuilder().getName()),
+ self.build_status.getNumber()))
+
+ def body(self, req):
+ b = self.build_status
+ status = self.getStatus(req)
+ projectURL = status.getProjectURL()
+ projectName = status.getProjectName()
+ data = ('<div class="title"><a href="%s">%s</a></div>\n'
+ % (self.path_to_root(req), projectName))
+ builder_name = b.getBuilder().getName()
+ data += ("<h1><a href=\"%s\">Builder %s</a>: Build #%d</h1>\n"
+ % (path_to_builder(req, b.getBuilder()),
+ builder_name, b.getNumber()))
+
+ if not b.isFinished():
+ data += "<h2>Build In Progress</h2>"
+ when = b.getETA()
+ if when is not None:
+ when_time = time.strftime("%H:%M:%S",
+ time.localtime(time.time() + when))
+ data += "<div>ETA %ds (%s)</div>\n" % (when, when_time)
+
+ if self.build_control is not None:
+ stopURL = urllib.quote(req.childLink("stop"))
+ data += make_stop_form(stopURL)
+
+ if b.isFinished():
+ results = b.getResults()
+ data += "<h2>Results:</h2>\n"
+ text = " ".join(b.getText())
+ data += '<span class="%s">%s</span>\n' % (css_classes[results],
+ text)
+ if b.getTestResults():
+ url = req.childLink("tests")
+ data += "<h3><a href=\"%s\">test results</a></h3>\n" % url
+
+ ss = b.getSourceStamp()
+ data += "<h2>SourceStamp:</h2>\n"
+ data += " <ul>\n"
+ if ss.branch:
+ data += " <li>Branch: %s</li>\n" % html.escape(ss.branch)
+ if ss.revision:
+ data += " <li>Revision: %s</li>\n" % html.escape(str(ss.revision))
+ if ss.patch:
+ data += " <li>Patch: YES</li>\n" # TODO: provide link to .diff
+ if ss.changes:
+ data += " <li>Changes: see below</li>\n"
+ if (ss.branch is None and ss.revision is None and ss.patch is None
+ and not ss.changes):
+ data += " <li>build of most recent revision</li>\n"
+ got_revision = None
+ try:
+ got_revision = b.getProperty("got_revision")
+ except KeyError:
+ pass
+ if got_revision:
+ got_revision = str(got_revision)
+ if len(got_revision) > 40:
+ got_revision = "[revision string too long]"
+ data += " <li>Got Revision: %s</li>\n" % got_revision
+ data += " </ul>\n"
+
+ # TODO: turn this into a table, or some other sort of definition-list
+ # that doesn't take up quite so much vertical space
+ try:
+ slaveurl = path_to_slave(req, status.getSlave(b.getSlavename()))
+ data += "<h2>Buildslave:</h2>\n <a href=\"%s\">%s</a>\n" % (html.escape(slaveurl), html.escape(b.getSlavename()))
+ except KeyError:
+ data += "<h2>Buildslave:</h2>\n %s\n" % html.escape(b.getSlavename())
+ data += "<h2>Reason:</h2>\n%s\n" % html.escape(b.getReason())
+
+ data += "<h2>Steps and Logfiles:</h2>\n"
+ # TODO:
+# urls = self.original.getURLs()
+# ex_url_class = "BuildStep external"
+# for name, target in urls.items():
+# text.append('[<a href="%s" class="%s">%s</a>]' %
+# (target, ex_url_class, html.escape(name)))
+ if b.getLogs():
+ data += "<ol>\n"
+ for s in b.getSteps():
+ name = s.getName()
+ data += (" <li><a href=\"%s\">%s</a> [%s]\n"
+ % (req.childLink("steps/%s" % urllib.quote(name)),
+ name,
+ " ".join(s.getText())))
+ if s.getLogs():
+ data += " <ol>\n"
+ for logfile in s.getLogs():
+ logname = logfile.getName()
+ logurl = req.childLink("steps/%s/logs/%s" %
+ (urllib.quote(name),
+ urllib.quote(logname)))
+ data += (" <li><a href=\"%s\">%s</a></li>\n" %
+ (logurl, logfile.getName()))
+ data += " </ol>\n"
+ data += " </li>\n"
+ data += "</ol>\n"
+
+ data += "<h2>Build Properties:</h2>\n"
+ data += "<table><tr><th valign=\"left\">Name</th><th valign=\"left\">Value</th><th valign=\"left\">Source</th></tr>\n"
+ for name, value, source in b.getProperties().asList():
+ value = str(value)
+ if len(value) > 500:
+ value = value[:500] + " .. [property value too long]"
+ data += "<tr>"
+ data += "<td>%s</td>" % html.escape(name)
+ data += "<td>%s</td>" % html.escape(value)
+ data += "<td>%s</td>" % html.escape(source)
+ data += "</tr>\n"
+ data += "</table>"
+
+ data += "<h2>Blamelist:</h2>\n"
+ if list(b.getResponsibleUsers()):
+ data += " <ol>\n"
+ for who in b.getResponsibleUsers():
+ data += " <li>%s</li>\n" % html.escape(who)
+ data += " </ol>\n"
+ else:
+ data += "<div>no responsible users</div>\n"
+
+
+ (start, end) = b.getTimes()
+ data += "<h2>Timing</h2>\n"
+ data += "<table>\n"
+ data += "<tr><td>Start</td><td>%s</td></tr>\n" % time.ctime(start)
+ if end:
+ data += "<tr><td>End</td><td>%s</td></tr>\n" % time.ctime(end)
+ data += "<tr><td>Elapsed</td><td>%s</td></tr>\n" % util.formatInterval(end - start)
+ data += "</table>\n"
+
+ if ss.changes:
+ data += "<h2>All Changes</h2>\n"
+ data += "<ol>\n"
+ for c in ss.changes:
+ data += "<li>" + c.asHTML() + "</li>\n"
+ data += "</ol>\n"
+ #data += html.PRE(b.changesText()) # TODO
+
+ if b.isFinished() and self.builder_control is not None:
+ data += "<h3>Resubmit Build:</h3>\n"
+ # can we rebuild it exactly?
+ exactly = (ss.revision is not None) or b.getChanges()
+ if exactly:
+ data += ("<p>This tree was built from a specific set of \n"
+ "source files, and can be rebuilt exactly</p>\n")
+ else:
+ data += ("<p>This tree was built from the most recent "
+ "revision")
+ if ss.branch:
+ data += " (along some branch)"
+ data += (" and thus it might not be possible to rebuild it \n"
+ "exactly. Any changes that have been committed \n"
+ "after this build was started <b>will</b> be \n"
+ "included in a rebuild.</p>\n")
+ rebuildURL = urllib.quote(req.childLink("rebuild"))
+ data += ('<form action="%s" class="command rebuild">\n'
+ % rebuildURL)
+ data += make_row("Your name:",
+ "<input type='text' name='username' />")
+ data += make_row("Reason for re-running build:",
+ "<input type='text' name='comments' />")
+ data += '<input type="submit" value="Rebuild" />\n'
+ data += '</form>\n'
+
+ # TODO: this stuff should be generated by a template of some sort
+ data += '<hr /><div class="footer">\n'
+
+ welcomeurl = self.path_to_root(req) + "index.html"
+ data += '[<a href="%s">welcome</a>]\n' % welcomeurl
+ data += "<br />\n"
+
+ data += '<a href="http://buildbot.sourceforge.net/">Buildbot</a>'
+ data += "-%s " % version
+ if projectName:
+ data += "working for the "
+ if projectURL:
+ data += "<a href=\"%s\">%s</a> project." % (projectURL,
+ projectName)
+ else:
+ data += "%s project." % projectName
+ data += "<br />\n"
+ data += ("Page built: " +
+ time.strftime("%a %d %b %Y %H:%M:%S",
+ time.localtime(util.now()))
+ + "\n")
+ data += '</div>\n'
+
+ return data
+
+ def stop(self, req):
+ b = self.build_status
+ c = self.build_control
+ log.msg("web stopBuild of build %s:%s" % \
+ (b.getBuilder().getName(), b.getNumber()))
+ name = req.args.get("username", ["<unknown>"])[0]
+ comments = req.args.get("comments", ["<no reason specified>"])[0]
+ reason = ("The web-page 'stop build' button was pressed by "
+ "'%s': %s\n" % (name, comments))
+ c.stopBuild(reason)
+ # we're at http://localhost:8080/svn-hello/builds/5/stop?[args] and
+ # we want to go to: http://localhost:8080/svn-hello
+ r = Redirect("../..")
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, r)
+ return DeferredResource(d)
+
+ def rebuild(self, req):
+ b = self.build_status
+ bc = self.builder_control
+ builder_name = b.getBuilder().getName()
+ log.msg("web rebuild of build %s:%s" % (builder_name, b.getNumber()))
+ name = req.args.get("username", ["<unknown>"])[0]
+ comments = req.args.get("comments", ["<no reason specified>"])[0]
+ reason = ("The web-page 'rebuild' button was pressed by "
+ "'%s': %s\n" % (name, comments))
+ if not bc or not b.isFinished():
+ log.msg("could not rebuild: bc=%s, isFinished=%s"
+ % (bc, b.isFinished()))
+ # TODO: indicate an error
+ else:
+ bc.resubmitBuild(b, reason)
+ # we're at
+ # http://localhost:8080/builders/NAME/builds/5/rebuild?[args]
+ # Where should we send them?
+ #
+ # Ideally it would be to the per-build page that they just started,
+ # but we don't know the build number for it yet (besides, it might
+ # have to wait for a current build to finish). The next-most
+ # preferred place is somewhere that the user can see tangible
+ # evidence of their build starting (or to see the reason that it
+ # didn't start). This should be the Builder page.
+ r = Redirect("../..") # the Builder's page
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, r)
+ return DeferredResource(d)
+
+ def getChild(self, path, req):
+ if path == "stop":
+ return self.stop(req)
+ if path == "rebuild":
+ return self.rebuild(req)
+ if path == "steps":
+ return StepsResource(self.build_status)
+ if path == "tests":
+ return TestsResource(self.build_status)
+
+ return HtmlResource.getChild(self, path, req)
+
+# /builders/$builder/builds
+class BuildsResource(HtmlResource):
+ addSlash = True
+
+ def __init__(self, builder_status, builder_control):
+ HtmlResource.__init__(self)
+ self.builder_status = builder_status
+ self.builder_control = builder_control
+
+ def getChild(self, path, req):
+ try:
+ num = int(path)
+ except ValueError:
+ num = None
+ if num is not None:
+ build_status = self.builder_status.getBuild(num)
+ if build_status:
+ if self.builder_control:
+ build_control = self.builder_control.getBuild(num)
+ else:
+ build_control = None
+ return StatusResourceBuild(build_status, build_control,
+ self.builder_control)
+
+ return HtmlResource.getChild(self, path, req)
+
diff --git a/buildbot/buildbot/status/web/builder.py b/buildbot/buildbot/status/web/builder.py
new file mode 100644
index 0000000..35f65e9
--- /dev/null
+++ b/buildbot/buildbot/status/web/builder.py
@@ -0,0 +1,312 @@
+
+from twisted.web.error import NoResource
+from twisted.web import html, static
+from twisted.web.util import Redirect
+
+import re, urllib, time
+from twisted.python import log
+from buildbot import interfaces
+from buildbot.status.web.base import HtmlResource, make_row, \
+ make_force_build_form, OneLineMixin, path_to_build, path_to_slave, path_to_builder
+from buildbot.process.base import BuildRequest
+from buildbot.sourcestamp import SourceStamp
+
+from buildbot.status.web.build import BuildsResource, StatusResourceBuild
+
+# /builders/$builder
+class StatusResourceBuilder(HtmlResource, OneLineMixin):
+ addSlash = True
+
+ def __init__(self, builder_status, builder_control):
+ HtmlResource.__init__(self)
+ self.builder_status = builder_status
+ self.builder_control = builder_control
+
+ def getTitle(self, request):
+ return "Buildbot: %s" % html.escape(self.builder_status.getName())
+
+ def build_line(self, build, req):
+ buildnum = build.getNumber()
+ buildurl = path_to_build(req, build)
+ data = '<a href="%s">#%d</a> ' % (buildurl, buildnum)
+
+ when = build.getETA()
+ if when is not None:
+ when_time = time.strftime("%H:%M:%S",
+ time.localtime(time.time() + when))
+ data += "ETA %ds (%s) " % (when, when_time)
+ step = build.getCurrentStep()
+ if step:
+ data += "[%s]" % step.getName()
+ else:
+ data += "[waiting for Lock]"
+ # TODO: is this necessarily the case?
+
+ if self.builder_control is not None:
+ stopURL = path_to_build(req, build) + '/stop'
+ data += '''
+<form action="%s" class="command stopbuild" style="display:inline">
+ <input type="submit" value="Stop Build" />
+</form>''' % stopURL
+ return data
+
+ def body(self, req):
+ b = self.builder_status
+ control = self.builder_control
+ status = self.getStatus(req)
+
+ slaves = b.getSlaves()
+ connected_slaves = [s for s in slaves if s.isConnected()]
+
+ projectName = status.getProjectName()
+
+ data = '<a href="%s">%s</a>\n' % (self.path_to_root(req), projectName)
+
+ data += "<h1>Builder: %s</h1>\n" % html.escape(b.getName())
+
+ # the first section shows builds which are currently running, if any.
+
+ current = b.getCurrentBuilds()
+ if current:
+ data += "<h2>Currently Building:</h2>\n"
+ data += "<ul>\n"
+ for build in current:
+ data += " <li>" + self.build_line(build, req) + "</li>\n"
+ data += "</ul>\n"
+ else:
+ data += "<h2>no current builds</h2>\n"
+
+ # Then a section with the last 5 builds, with the most recent build
+ # distinguished from the rest.
+
+ data += "<h2>Recent Builds:</h2>\n"
+ data += "<ul>\n"
+ for i,build in enumerate(b.generateFinishedBuilds(num_builds=5)):
+ data += " <li>" + self.make_line(req, build, False) + "</li>\n"
+ if i == 0:
+ data += "<br />\n" # separator
+ # TODO: or empty list?
+ data += "</ul>\n"
+
+
+ data += "<h2>Buildslaves:</h2>\n"
+ data += "<ol>\n"
+ for slave in slaves:
+ slaveurl = path_to_slave(req, slave)
+ data += "<li><b><a href=\"%s\">%s</a></b>: " % (html.escape(slaveurl), html.escape(slave.getName()))
+ if slave.isConnected():
+ data += "CONNECTED\n"
+ if slave.getAdmin():
+ data += make_row("Admin:", html.escape(slave.getAdmin()))
+ if slave.getHost():
+ data += "<span class='label'>Host info:</span>\n"
+ data += html.PRE(slave.getHost())
+ else:
+ data += ("NOT CONNECTED\n")
+ data += "</li>\n"
+ data += "</ol>\n"
+
+ if control is not None and connected_slaves:
+ forceURL = path_to_builder(req, b) + '/force'
+ data += make_force_build_form(forceURL)
+ elif control is not None:
+ data += """
+ <p>All buildslaves appear to be offline, so it's not possible
+ to force this build to execute at this time.</p>
+ """
+
+ if control is not None:
+ pingURL = path_to_builder(req, b) + '/ping'
+ data += """
+ <form action="%s" class='command pingbuilder'>
+ <p>To ping the buildslave(s), push the 'Ping' button</p>
+
+ <input type="submit" value="Ping Builder" />
+ </form>
+ """ % pingURL
+
+ data += self.footer(status, req)
+
+ return data
+
+ def force(self, req):
+ """
+
+ Custom properties can be passed from the web form. To do
+ this, subclass this class, overriding the force() method. You
+ can then determine the properties (usually from form values,
+ by inspecting req.args), then pass them to this superclass
+ force method.
+
+ """
+ name = req.args.get("username", ["<unknown>"])[0]
+ reason = req.args.get("comments", ["<no reason specified>"])[0]
+ branch = req.args.get("branch", [""])[0]
+ revision = req.args.get("revision", [""])[0]
+
+ r = "The web-page 'force build' button was pressed by '%s': %s\n" \
+ % (name, reason)
+ log.msg("web forcebuild of builder '%s', branch='%s', revision='%s'"
+ % (self.builder_status.getName(), branch, revision))
+
+ if not self.builder_control:
+ # TODO: tell the web user that their request was denied
+ log.msg("but builder control is disabled")
+ return Redirect("..")
+
+ # keep weird stuff out of the branch and revision strings. TODO:
+ # centralize this somewhere.
+ if not re.match(r'^[\w\.\-\/]*$', branch):
+ log.msg("bad branch '%s'" % branch)
+ return Redirect("..")
+ if not re.match(r'^[\w\.\-\/]*$', revision):
+ log.msg("bad revision '%s'" % revision)
+ return Redirect("..")
+ if not branch:
+ branch = None
+ if not revision:
+ revision = None
+
+ # TODO: if we can authenticate that a particular User pushed the
+ # button, use their name instead of None, so they'll be informed of
+ # the results.
+ s = SourceStamp(branch=branch, revision=revision)
+ req = BuildRequest(r, s, builderName=self.builder_status.getName())
+ try:
+ self.builder_control.requestBuildSoon(req)
+ except interfaces.NoSlaveError:
+ # TODO: tell the web user that their request could not be
+ # honored
+ pass
+ # send the user back to the builder page
+ return Redirect(".")
+
+ def ping(self, req):
+ log.msg("web ping of builder '%s'" % self.builder_status.getName())
+ self.builder_control.ping() # TODO: there ought to be an ISlaveControl
+ # send the user back to the builder page
+ return Redirect(".")
+
+ def getChild(self, path, req):
+ if path == "force":
+ return self.force(req)
+ if path == "ping":
+ return self.ping(req)
+ if path == "events":
+ num = req.postpath.pop(0)
+ req.prepath.append(num)
+ num = int(num)
+ # TODO: is this dead code? .statusbag doesn't exist,right?
+ log.msg("getChild['path']: %s" % req.uri)
+ return NoResource("events are unavailable until code gets fixed")
+ filename = req.postpath.pop(0)
+ req.prepath.append(filename)
+ e = self.builder_status.getEventNumbered(num)
+ if not e:
+ return NoResource("No such event '%d'" % num)
+ file = e.files.get(filename, None)
+ if file == None:
+ return NoResource("No such file '%s'" % filename)
+ if type(file) == type(""):
+ if file[:6] in ("<HTML>", "<html>"):
+ return static.Data(file, "text/html")
+ return static.Data(file, "text/plain")
+ return file
+ if path == "builds":
+ return BuildsResource(self.builder_status, self.builder_control)
+
+ return HtmlResource.getChild(self, path, req)
+
+
+# /builders/_all
+class StatusResourceAllBuilders(HtmlResource, OneLineMixin):
+
+ def __init__(self, status, control):
+ HtmlResource.__init__(self)
+ self.status = status
+ self.control = control
+
+ def getChild(self, path, req):
+ if path == "force":
+ return self.force(req)
+ if path == "stop":
+ return self.stop(req)
+
+ return HtmlResource.getChild(self, path, req)
+
+ def force(self, req):
+ for bname in self.status.getBuilderNames():
+ builder_status = self.status.getBuilder(bname)
+ builder_control = None
+ c = self.getControl(req)
+ if c:
+ builder_control = c.getBuilder(bname)
+ build = StatusResourceBuilder(builder_status, builder_control)
+ build.force(req)
+ # back to the welcome page
+ return Redirect("../..")
+
+ def stop(self, req):
+ for bname in self.status.getBuilderNames():
+ builder_status = self.status.getBuilder(bname)
+ builder_control = None
+ c = self.getControl(req)
+ if c:
+ builder_control = c.getBuilder(bname)
+ (state, current_builds) = builder_status.getState()
+ if state != "building":
+ continue
+ for b in current_builds:
+ build_status = builder_status.getBuild(b.number)
+ if not build_status:
+ continue
+ if builder_control:
+ build_control = builder_control.getBuild(b.number)
+ else:
+ build_control = None
+ build = StatusResourceBuild(build_status, build_control,
+ builder_control)
+ build.stop(req)
+ # go back to the welcome page
+ return Redirect("../..")
+
+
+# /builders
+class BuildersResource(HtmlResource):
+ title = "Builders"
+ addSlash = True
+
+ def body(self, req):
+ s = self.getStatus(req)
+ data = ""
+ data += "<h1>Builders</h1>\n"
+
+ # TODO: this is really basic. It should be expanded to include a
+ # brief one-line summary of the builder (perhaps with whatever the
+ # builder is currently doing)
+ data += "<ol>\n"
+ for bname in s.getBuilderNames():
+ data += (' <li><a href="%s">%s</a></li>\n' %
+ (req.childLink(urllib.quote(bname, safe='')),
+ bname))
+ data += "</ol>\n"
+
+ data += self.footer(s, req)
+
+ return data
+
+ def getChild(self, path, req):
+ s = self.getStatus(req)
+ if path in s.getBuilderNames():
+ builder_status = s.getBuilder(path)
+ builder_control = None
+ c = self.getControl(req)
+ if c:
+ builder_control = c.getBuilder(path)
+ return StatusResourceBuilder(builder_status, builder_control)
+ if path == "_all":
+ return StatusResourceAllBuilders(self.getStatus(req),
+ self.getControl(req))
+
+ return HtmlResource.getChild(self, path, req)
+
diff --git a/buildbot/buildbot/status/web/changes.py b/buildbot/buildbot/status/web/changes.py
new file mode 100644
index 0000000..ff562c6
--- /dev/null
+++ b/buildbot/buildbot/status/web/changes.py
@@ -0,0 +1,41 @@
+
+from zope.interface import implements
+from twisted.python import components
+from twisted.web.error import NoResource
+
+from buildbot.changes.changes import Change
+from buildbot.status.web.base import HtmlResource, StaticHTML, IBox, Box
+
+# /changes/NN
+class ChangesResource(HtmlResource):
+
+ def body(self, req):
+ data = ""
+ data += "Change sources:\n"
+ sources = self.getStatus(req).getChangeSources()
+ if sources:
+ data += "<ol>\n"
+ for s in sources:
+ data += "<li>%s</li>\n" % s.describe()
+ data += "</ol>\n"
+ else:
+ data += "none (push only)\n"
+ return data
+
+ def getChild(self, path, req):
+ num = int(path)
+ c = self.getStatus(req).getChange(num)
+ if not c:
+ return NoResource("No change number '%d'" % num)
+ return StaticHTML(c.asHTML(), "Change #%d" % num)
+
+
+class ChangeBox(components.Adapter):
+ implements(IBox)
+
+ def getBox(self, req):
+ url = req.childLink("../changes/%d" % self.original.number)
+ text = self.original.get_HTML_box(url)
+ return Box([text], class_="Change")
+components.registerAdapter(ChangeBox, Change, IBox)
+
diff --git a/buildbot/buildbot/status/web/classic.css b/buildbot/buildbot/status/web/classic.css
new file mode 100644
index 0000000..5a5b0ea
--- /dev/null
+++ b/buildbot/buildbot/status/web/classic.css
@@ -0,0 +1,78 @@
+a:visited {
+ color: #800080;
+}
+
+td.Event, td.BuildStep, td.Activity, td.Change, td.Time, td.Builder {
+ border-top: 1px solid;
+ border-right: 1px solid;
+}
+
+td.box {
+ border: 1px solid;
+}
+
+/* Activity states */
+.offline {
+ background-color: gray;
+}
+.idle {
+ background-color: white;
+}
+.waiting {
+ background-color: yellow;
+}
+.building {
+ background-color: yellow;
+}
+
+/* LastBuild, BuildStep states */
+.success {
+ background-color: #72ff75;
+}
+.failure {
+ background-color: red;
+}
+.warnings {
+ background-color: #ff8000;
+}
+.exception {
+ background-color: #c000c0;
+}
+.start,.running {
+ background-color: yellow;
+}
+
+/* grid styles */
+
+table.Grid {
+ border-collapse: collapse;
+}
+
+table.Grid tr td {
+ padding: 0.2em;
+ margin: 0px;
+ text-align: center;
+}
+
+table.Grid tr td.title {
+ font-size: 90%;
+ border-right: 1px gray solid;
+ border-bottom: 1px gray solid;
+}
+
+table.Grid tr td.sourcestamp {
+ font-size: 90%;
+}
+
+table.Grid tr td.builder {
+ text-align: right;
+ font-size: 90%;
+}
+
+table.Grid tr td.build {
+ border: 1px gray solid;
+}
+
+div.footer {
+ font-size: 80%;
+}
diff --git a/buildbot/buildbot/status/web/feeds.py b/buildbot/buildbot/status/web/feeds.py
new file mode 100644
index 0000000..c86ca3b
--- /dev/null
+++ b/buildbot/buildbot/status/web/feeds.py
@@ -0,0 +1,359 @@
+# This module enables ATOM and RSS feeds from webstatus.
+#
+# It is based on "feeder.py" which was part of the Buildbot
+# configuration for the Subversion project. The original file was
+# created by Lieven Gobaerts and later adjusted by API
+# (apinheiro@igalia.coma) and also here
+# http://code.google.com/p/pybots/source/browse/trunk/master/Feeder.py
+#
+# All subsequent changes to feeder.py where made by Chandan-Dutta
+# Chowdhury <chandan-dutta.chowdhury @ hp.com> and Gareth Armstrong
+# <gareth.armstrong @ hp.com>.
+#
+# Those modifications are as follows:
+# 1) the feeds are usable from baseweb.WebStatus
+# 2) feeds are fully validated ATOM 1.0 and RSS 2.0 feeds, verified
+# with code from http://feedvalidator.org
+# 3) nicer xml output
+# 4) feeds can be filtered as per the /waterfall display with the
+# builder and category filters
+# 5) cleaned up white space and imports
+#
+# Finally, the code was directly integrated into these two files,
+# buildbot/status/web/feeds.py (you're reading it, ;-)) and
+# buildbot/status/web/baseweb.py.
+
+import os
+import re
+import sys
+import time
+from twisted.web import resource
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
+
+class XmlResource(resource.Resource):
+ contentType = "text/xml; charset=UTF-8"
+ def render(self, request):
+ data = self.content(request)
+ request.setHeader("content-type", self.contentType)
+ if request.method == "HEAD":
+ request.setHeader("content-length", len(data))
+ return ''
+ return data
+ docType = ''
+ def header (self, request):
+ data = ('<?xml version="1.0"?>\n')
+ return data
+ def footer(self, request):
+ data = ''
+ return data
+ def content(self, request):
+ data = self.docType
+ data += self.header(request)
+ data += self.body(request)
+ data += self.footer(request)
+ return data
+ def body(self, request):
+ return ''
+
+class FeedResource(XmlResource):
+ title = None
+ link = 'http://dummylink'
+ language = 'en-us'
+ description = 'Dummy rss'
+ status = None
+
+ def __init__(self, status, categories=None, title=None):
+ self.status = status
+ self.categories = categories
+ self.title = title
+ self.link = self.status.getBuildbotURL()
+ self.description = 'List of FAILED builds'
+ self.pubdate = time.gmtime(int(time.time()))
+
+ def getBuilds(self, request):
+ builds = []
+ # THIS is lifted straight from the WaterfallStatusResource Class in
+ # status/web/waterfall.py
+ #
+ # we start with all Builders available to this Waterfall: this is
+ # limited by the config-file -time categories= argument, and defaults
+ # to all defined Builders.
+ allBuilderNames = self.status.getBuilderNames(categories=self.categories)
+ builders = [self.status.getBuilder(name) for name in allBuilderNames]
+
+ # but if the URL has one or more builder= arguments (or the old show=
+ # argument, which is still accepted for backwards compatibility), we
+ # use that set of builders instead. We still don't show anything
+ # outside the config-file time set limited by categories=.
+ showBuilders = request.args.get("show", [])
+ showBuilders.extend(request.args.get("builder", []))
+ if showBuilders:
+ builders = [b for b in builders if b.name in showBuilders]
+
+ # now, if the URL has one or category= arguments, use them as a
+ # filter: only show those builders which belong to one of the given
+ # categories.
+ showCategories = request.args.get("category", [])
+ if showCategories:
+ builders = [b for b in builders if b.category in showCategories]
+
+ maxFeeds = 25
+
+ # Copy all failed builds in a new list.
+ # This could clearly be implemented much better if we had
+ # access to a global list of builds.
+ for b in builders:
+ lastbuild = b.getLastFinishedBuild()
+ if lastbuild is None:
+ continue
+
+ lastnr = lastbuild.getNumber()
+
+ totalbuilds = 0
+ i = lastnr
+ while i >= 0:
+ build = b.getBuild(i)
+ i -= 1
+ if not build:
+ continue
+
+ results = build.getResults()
+
+ # only add entries for failed builds!
+ if results == FAILURE:
+ totalbuilds += 1
+ builds.append(build)
+
+ # stop for this builder when our total nr. of feeds is reached
+ if totalbuilds >= maxFeeds:
+ break
+
+ # Sort build list by date, youngest first.
+ if sys.version_info[:3] >= (2,4,0):
+ builds.sort(key=lambda build: build.getTimes(), reverse=True)
+ else:
+ # If you need compatibility with python < 2.4, use this for
+ # sorting instead:
+ # We apply Decorate-Sort-Undecorate
+ deco = [(build.getTimes(), build) for build in builds]
+ deco.sort()
+ deco.reverse()
+ builds = [build for (b1, build) in deco]
+
+ if builds:
+ builds = builds[:min(len(builds), maxFeeds)]
+ return builds
+
+ def body (self, request):
+ data = ''
+ builds = self.getBuilds(request)
+
+ for build in builds:
+ start, finished = build.getTimes()
+ finishedTime = time.gmtime(int(finished))
+ projectName = self.status.getProjectName()
+ link = re.sub(r'index.html', "", self.status.getURLForThing(build))
+
+ # title: trunk r22191 (plus patch) failed on 'i686-debian-sarge1 shared gcc-3.3.5'
+ ss = build.getSourceStamp()
+ source = ""
+ if ss.branch:
+ source += "Branch %s " % ss.branch
+ if ss.revision:
+ source += "Revision %s " % str(ss.revision)
+ if ss.patch:
+ source += " (plus patch)"
+ if ss.changes:
+ pass
+ if (ss.branch is None and ss.revision is None and ss.patch is None
+ and not ss.changes):
+ source += "Latest revision "
+ got_revision = None
+ try:
+ got_revision = build.getProperty("got_revision")
+ except KeyError:
+ pass
+ if got_revision:
+ got_revision = str(got_revision)
+ if len(got_revision) > 40:
+ got_revision = "[revision string too long]"
+ source += "(Got Revision: %s)" % got_revision
+ title = ('%s failed on "%s"' %
+ (source, build.getBuilder().getName()))
+
+ # get name of the failed step and the last 30 lines of its log.
+ if build.getLogs():
+ log = build.getLogs()[-1]
+ laststep = log.getStep().getName()
+ try:
+ lastlog = log.getText()
+ except IOError:
+ # Probably the log file has been removed
+ lastlog='<b>log file not available</b>'
+
+ lines = re.split('\n', lastlog)
+ lastlog = ''
+ for logline in lines[max(0, len(lines)-30):]:
+ lastlog = lastlog + logline + '<br/>'
+ lastlog = lastlog.replace('\n', '<br/>')
+
+ description = ''
+ description += ('Date: %s<br/><br/>' %
+ time.strftime("%a, %d %b %Y %H:%M:%S GMT",
+ finishedTime))
+ description += ('Full details available here: <a href="%s">%s</a><br/>' %
+ (self.link, projectName))
+ builder_summary_link = ('%s/builders/%s' %
+ (re.sub(r'/index.html', '', self.link),
+ build.getBuilder().getName()))
+ description += ('Build summary: <a href="%s">%s</a><br/><br/>' %
+ (builder_summary_link,
+ build.getBuilder().getName()))
+ description += ('Build details: <a href="%s">%s</a><br/><br/>' %
+ (link, self.link + link[1:]))
+ description += ('Author list: <b>%s</b><br/><br/>' %
+ ",".join(build.getResponsibleUsers()))
+ description += ('Failed step: <b>%s</b><br/><br/>' % laststep)
+ description += 'Last lines of the build log:<br/>'
+
+ data += self.item(title, description=description, lastlog=lastlog,
+ link=link, pubDate=finishedTime)
+
+ return data
+
+ def item(self, title='', link='', description='', pubDate=''):
+ """Generates xml for one item in the feed."""
+
+class Rss20StatusResource(FeedResource):
+ def __init__(self, status, categories=None, title=None):
+ FeedResource.__init__(self, status, categories, title)
+ contentType = 'application/rss+xml'
+
+ def header(self, request):
+ data = FeedResource.header(self, request)
+ data += ('<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">\n')
+ data += (' <channel>\n')
+ if self.title is None:
+ title = 'Build status of ' + status.getProjectName()
+ else:
+ title = self.title
+ data += (' <title>%s</title>\n' % title)
+ if self.link is not None:
+ data += (' <link>%s</link>\n' % self.link)
+ link = re.sub(r'/index.html', '', self.link)
+ data += (' <atom:link href="%s/rss" rel="self" type="application/rss+xml"/>\n' % link)
+ if self.language is not None:
+ data += (' <language>%s</language>\n' % self.language)
+ if self.description is not None:
+ data += (' <description>%s</description>\n' % self.description)
+ if self.pubdate is not None:
+ rfc822_pubdate = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
+ self.pubdate)
+ data += (' <pubDate>%s</pubDate>\n' % rfc822_pubdate)
+ return data
+
+ def item(self, title='', link='', description='', lastlog='', pubDate=''):
+ data = (' <item>\n')
+ data += (' <title>%s</title>\n' % title)
+ if link is not None:
+ data += (' <link>%s</link>\n' % link)
+ if (description is not None and lastlog is not None):
+ lastlog = re.sub(r'<br/>', "\n", lastlog)
+ lastlog = re.sub(r'&', "&amp;", lastlog)
+ lastlog = re.sub(r"'", "&apos;", lastlog)
+ lastlog = re.sub(r'"', "&quot;", lastlog)
+ lastlog = re.sub(r'<', '&lt;', lastlog)
+ lastlog = re.sub(r'>', '&gt;', lastlog)
+ lastlog = lastlog.replace('\n', '<br/>')
+ content = '<![CDATA['
+ content += description
+ content += lastlog
+ content += ']]>'
+ data += (' <description>%s</description>\n' % content)
+ if pubDate is not None:
+ rfc822pubDate = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
+ pubDate)
+ data += (' <pubDate>%s</pubDate>\n' % rfc822pubDate)
+ # Every RSS item must have a globally unique ID
+ guid = ('tag:%s@%s,%s:%s' % (os.environ['USER'],
+ os.environ['HOSTNAME'],
+ time.strftime("%Y-%m-%d", pubDate),
+ time.strftime("%Y%m%d%H%M%S",
+ pubDate)))
+ data += (' <guid isPermaLink="false">%s</guid>\n' % guid)
+ data += (' </item>\n')
+ return data
+
+ def footer(self, request):
+ data = (' </channel>\n'
+ '</rss>')
+ return data
+
+class Atom10StatusResource(FeedResource):
+ def __init__(self, status, categories=None, title=None):
+ FeedResource.__init__(self, status, categories, title)
+ contentType = 'application/atom+xml'
+
+ def header(self, request):
+ data = FeedResource.header(self, request)
+ data += '<feed xmlns="http://www.w3.org/2005/Atom">\n'
+ data += (' <id>%s</id>\n' % self.status.getBuildbotURL())
+ if self.title is None:
+ title = 'Build status of ' + status.getProjectName()
+ else:
+ title = self.title
+ data += (' <title>%s</title>\n' % title)
+ if self.link is not None:
+ link = re.sub(r'/index.html', '', self.link)
+ data += (' <link rel="self" href="%s/atom"/>\n' % link)
+ data += (' <link rel="alternate" href="%s/"/>\n' % link)
+ if self.description is not None:
+ data += (' <subtitle>%s</subtitle>\n' % self.description)
+ if self.pubdate is not None:
+ rfc3339_pubdate = time.strftime("%Y-%m-%dT%H:%M:%SZ",
+ self.pubdate)
+ data += (' <updated>%s</updated>\n' % rfc3339_pubdate)
+ data += (' <author>\n')
+ data += (' <name>Build Bot</name>\n')
+ data += (' </author>\n')
+ return data
+
+ def item(self, title='', link='', description='', lastlog='', pubDate=''):
+ data = (' <entry>\n')
+ data += (' <title>%s</title>\n' % title)
+ if link is not None:
+ data += (' <link href="%s"/>\n' % link)
+ if (description is not None and lastlog is not None):
+ lastlog = re.sub(r'<br/>', "\n", lastlog)
+ lastlog = re.sub(r'&', "&amp;", lastlog)
+ lastlog = re.sub(r"'", "&apos;", lastlog)
+ lastlog = re.sub(r'"', "&quot;", lastlog)
+ lastlog = re.sub(r'<', '&lt;', lastlog)
+ lastlog = re.sub(r'>', '&gt;', lastlog)
+ data += (' <content type="xhtml">\n')
+ data += (' <div xmlns="http://www.w3.org/1999/xhtml">\n')
+ data += (' %s\n' % description)
+ data += (' <pre xml:space="preserve">%s</pre>\n' % lastlog)
+ data += (' </div>\n')
+ data += (' </content>\n')
+ if pubDate is not None:
+ rfc3339pubDate = time.strftime("%Y-%m-%dT%H:%M:%SZ",
+ pubDate)
+ data += (' <updated>%s</updated>\n' % rfc3339pubDate)
+ # Every Atom entry must have a globally unique ID
+ # http://diveintomark.org/archives/2004/05/28/howto-atom-id
+ guid = ('tag:%s@%s,%s:%s' % (os.environ['USER'],
+ os.environ['HOSTNAME'],
+ time.strftime("%Y-%m-%d", pubDate),
+ time.strftime("%Y%m%d%H%M%S",
+ pubDate)))
+ data += (' <id>%s</id>\n' % guid)
+ data += (' <author>\n')
+ data += (' <name>Build Bot</name>\n')
+ data += (' </author>\n')
+ data += (' </entry>\n')
+ return data
+
+ def footer(self, request):
+ data = ('</feed>')
+ return data
diff --git a/buildbot/buildbot/status/web/grid.py b/buildbot/buildbot/status/web/grid.py
new file mode 100644
index 0000000..79527d8
--- /dev/null
+++ b/buildbot/buildbot/status/web/grid.py
@@ -0,0 +1,252 @@
+from __future__ import generators
+
+import sys, time, os.path
+import urllib
+
+from buildbot import util
+from buildbot import version
+from buildbot.status.web.base import HtmlResource
+#from buildbot.status.web.base import Box, HtmlResource, IBox, ICurrentBox, \
+# ITopBox, td, build_get_class, path_to_build, path_to_step, map_branches
+from buildbot.status.web.base import build_get_class
+
+# set grid_css to the full pathname of the css file
+if hasattr(sys, "frozen"):
+ # all 'data' files are in the directory of our executable
+ here = os.path.dirname(sys.executable)
+ grid_css = os.path.abspath(os.path.join(here, "grid.css"))
+else:
+ # running from source; look for a sibling to __file__
+ up = os.path.dirname
+ grid_css = os.path.abspath(os.path.join(up(__file__), "grid.css"))
+
+class ANYBRANCH: pass # a flag value, used below
+
+class GridStatusResource(HtmlResource):
+ # TODO: docs
+ status = None
+ control = None
+ changemaster = None
+
+ def __init__(self, allowForce=True, css=None):
+ HtmlResource.__init__(self)
+
+ self.allowForce = allowForce
+ self.css = css or grid_css
+
+ def getTitle(self, request):
+ status = self.getStatus(request)
+ p = status.getProjectName()
+ if p:
+ return "BuildBot: %s" % p
+ else:
+ return "BuildBot"
+
+ def getChangemaster(self, request):
+ # TODO: this wants to go away, access it through IStatus
+ return request.site.buildbot_service.getChangeSvc()
+
+ # handle reloads through an http header
+ # TODO: send this as a real header, rather than a tag
+ def get_reload_time(self, request):
+ if "reload" in request.args:
+ try:
+ reload_time = int(request.args["reload"][0])
+ return max(reload_time, 15)
+ except ValueError:
+ pass
+ return None
+
+ def head(self, request):
+ head = ''
+ reload_time = self.get_reload_time(request)
+ if reload_time is not None:
+ head += '<meta http-equiv="refresh" content="%d">\n' % reload_time
+ return head
+
+# def setBuildmaster(self, buildmaster):
+# self.status = buildmaster.getStatus()
+# if self.allowForce:
+# self.control = interfaces.IControl(buildmaster)
+# else:
+# self.control = None
+# self.changemaster = buildmaster.change_svc
+#
+# # try to set the page title
+# p = self.status.getProjectName()
+# if p:
+# self.title = "BuildBot: %s" % p
+#
+ def build_td(self, request, build):
+ if not build:
+ return '<td class="build">&nbsp;</td>\n'
+
+ if build.isFinished():
+ # get the text and annotate the first line with a link
+ text = build.getText()
+ if not text: text = [ "(no information)" ]
+ if text == [ "build", "successful" ]: text = [ "OK" ]
+ else:
+ text = [ 'building' ]
+
+ name = build.getBuilder().getName()
+ number = build.getNumber()
+ url = "builders/%s/builds/%d" % (name, number)
+ text[0] = '<a href="%s">%s</a>' % (url, text[0])
+ text = '<br />\n'.join(text)
+ class_ = build_get_class(build)
+
+ return '<td class="build %s">%s</td>\n' % (class_, text)
+
+ def builder_td(self, request, builder):
+ state, builds = builder.getState()
+
+ # look for upcoming builds. We say the state is "waiting" if the
+ # builder is otherwise idle and there is a scheduler which tells us a
+ # build will be performed some time in the near future. TODO: this
+ # functionality used to be in BuilderStatus.. maybe this code should
+ # be merged back into it.
+ upcoming = []
+ builderName = builder.getName()
+ for s in self.getStatus(request).getSchedulers():
+ if builderName in s.listBuilderNames():
+ upcoming.extend(s.getPendingBuildTimes())
+ if state == "idle" and upcoming:
+ state = "waiting"
+
+ # TODO: for now, this pending/upcoming stuff is in the "current
+ # activity" box, but really it should go into a "next activity" row
+ # instead. The only times it should show up in "current activity" is
+ # when the builder is otherwise idle.
+
+ # are any builds pending? (waiting for a slave to be free)
+ url = 'builders/%s/' % urllib.quote(builder.getName(), safe='')
+ text = '<a href="%s">%s</a>' % (url, builder.getName())
+ pbs = builder.getPendingBuilds()
+ if state != 'idle' or pbs:
+ if pbs:
+ text += "<br />(%s with %d pending)" % (state, len(pbs))
+ else:
+ text += "<br />(%s)" % state
+
+ return '<td valign="center" class="builder %s">%s</td>\n' % \
+ (state, text)
+
+ def stamp_td(self, stamp):
+ text = stamp.getText()
+ return '<td valign="bottom" class="sourcestamp">%s</td>\n' % \
+ "<br />".join(text)
+
+ def body(self, request):
+ "This method builds the main waterfall display."
+
+ # get url parameters
+ numBuilds = int(request.args.get("width", [5])[0])
+ categories = request.args.get("category", [])
+ branch = request.args.get("branch", [ANYBRANCH])[0]
+ if branch == 'trunk': branch = None
+
+ # and the data we want to render
+ status = self.getStatus(request)
+ stamps = self.getRecentSourcestamps(status, numBuilds, categories, branch)
+
+ projectURL = status.getProjectURL()
+ projectName = status.getProjectName()
+
+ data = '<table class="Grid" border="0" cellspacing="0">\n'
+ data += '<tr>\n'
+ data += '<td class="title"><a href="%s">%s</a>' % (projectURL, projectName)
+ if categories:
+ if len(categories) > 1:
+ data += '\n<br /><b>Categories:</b><br/>%s' % ('<br/>'.join(categories))
+ else:
+ data += '\n<br /><b>Category:</b> %s' % categories[0]
+ if branch != ANYBRANCH:
+ data += '\n<br /><b>Branch:</b> %s' % (branch or 'trunk')
+ data += '</td>\n'
+ for stamp in stamps:
+ data += self.stamp_td(stamp)
+ data += '</tr>\n'
+
+ sortedBuilderNames = status.getBuilderNames()[:]
+ sortedBuilderNames.sort()
+ for bn in sortedBuilderNames:
+ builds = [None] * len(stamps)
+
+ builder = status.getBuilder(bn)
+ if categories and builder.category not in categories:
+ continue
+
+ build = builder.getBuild(-1)
+ while build and None in builds:
+ ss = build.getSourceStamp(absolute=True)
+ for i in range(len(stamps)):
+ if ss == stamps[i] and builds[i] is None:
+ builds[i] = build
+ build = build.getPreviousBuild()
+
+ data += '<tr>\n'
+ data += self.builder_td(request, builder)
+ for build in builds:
+ data += self.build_td(request, build)
+ data += '</tr>\n'
+
+ data += '</table>\n'
+
+ # TODO: this stuff should be generated by a template of some sort
+ data += '<hr /><div class="footer">\n'
+
+ welcomeurl = self.path_to_root(request) + "index.html"
+ data += '[<a href="%s">welcome</a>]\n' % welcomeurl
+ data += "<br />\n"
+
+ data += '<a href="http://buildbot.sourceforge.net/">Buildbot</a>'
+ data += "-%s " % version
+ if projectName:
+ data += "working for the "
+ if projectURL:
+ data += "<a href=\"%s\">%s</a> project." % (projectURL,
+ projectName)
+ else:
+ data += "%s project." % projectName
+ data += "<br />\n"
+ data += ("Page built: " +
+ time.strftime("%a %d %b %Y %H:%M:%S",
+ time.localtime(util.now()))
+ + "\n")
+ data += '</div>\n'
+ return data
+
+ def getRecentSourcestamps(self, status, numBuilds, categories, branch):
+ """
+ get a list of the most recent NUMBUILDS SourceStamp tuples, sorted
+ by the earliest start we've seen for them
+ """
+ # TODO: use baseweb's getLastNBuilds?
+ sourcestamps = { } # { ss-tuple : earliest time }
+ for bn in status.getBuilderNames():
+ builder = status.getBuilder(bn)
+ if categories and builder.category not in categories:
+ continue
+ build = builder.getBuild(-1)
+ while build:
+ ss = build.getSourceStamp(absolute=True)
+ start = build.getTimes()[0]
+ build = build.getPreviousBuild()
+
+ # skip un-started builds
+ if not start: continue
+
+ # skip non-matching branches
+ if branch != ANYBRANCH and ss.branch != branch: continue
+
+ sourcestamps[ss] = min(sourcestamps.get(ss, sys.maxint), start)
+
+ # now sort those and take the NUMBUILDS most recent
+ sourcestamps = sourcestamps.items()
+ sourcestamps.sort(lambda x, y: cmp(x[1], y[1]))
+ sourcestamps = map(lambda tup : tup[0], sourcestamps)
+ sourcestamps = sourcestamps[-numBuilds:]
+
+ return sourcestamps
+
diff --git a/buildbot/buildbot/status/web/index.html b/buildbot/buildbot/status/web/index.html
new file mode 100644
index 0000000..23e6650
--- /dev/null
+++ b/buildbot/buildbot/status/web/index.html
@@ -0,0 +1,32 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-15">
+<title>Welcome to the Buildbot</title>
+</head>
+
+<body>
+<h1>Welcome to the Buildbot!</h1>
+
+<ul>
+ <li>the <a href="waterfall">Waterfall Display</a> will give you a
+ time-oriented summary of recent buildbot activity.</li>
+
+ <li>the <a href="grid">Grid Display</a> will give you a
+ developer-oriented summary of recent buildbot activity.</li>
+
+ <li>The <a href="one_box_per_builder">Latest Build</a> for each builder is
+ here.</li>
+
+ <li><a href="one_line_per_build">Recent Builds</a> are summarized here, one
+ per line.</li>
+
+ <li><a href="buildslaves">Buildslave</a> information</li>
+ <li><a href="changes">ChangeSource</a> information.</li>
+
+ <br />
+ <li><a href="about">About this Buildbot</a></li>
+</ul>
+
+
+</body> </html>
diff --git a/buildbot/buildbot/status/web/logs.py b/buildbot/buildbot/status/web/logs.py
new file mode 100644
index 0000000..dfcf7f0
--- /dev/null
+++ b/buildbot/buildbot/status/web/logs.py
@@ -0,0 +1,171 @@
+
+from zope.interface import implements
+from twisted.python import components
+from twisted.spread import pb
+from twisted.web import html, server
+from twisted.web.resource import Resource
+from twisted.web.error import NoResource
+
+from buildbot import interfaces
+from buildbot.status import builder
+from buildbot.status.web.base import IHTMLLog, HtmlResource
+
+
+textlog_stylesheet = """
+<style type="text/css">
+ div.data {
+ font-family: "Courier New", courier, monotype;
+ }
+ span.stdout {
+ font-family: "Courier New", courier, monotype;
+ }
+ span.stderr {
+ font-family: "Courier New", courier, monotype;
+ color: red;
+ }
+ span.header {
+ font-family: "Courier New", courier, monotype;
+ color: blue;
+ }
+</style>
+"""
+
+class ChunkConsumer:
+ implements(interfaces.IStatusLogConsumer)
+
+ def __init__(self, original, textlog):
+ self.original = original
+ self.textlog = textlog
+ def registerProducer(self, producer, streaming):
+ self.producer = producer
+ self.original.registerProducer(producer, streaming)
+ def unregisterProducer(self):
+ self.original.unregisterProducer()
+ def writeChunk(self, chunk):
+ formatted = self.textlog.content([chunk])
+ try:
+ self.original.write(formatted)
+ except pb.DeadReferenceError:
+ self.producing.stopProducing()
+ def finish(self):
+ self.textlog.finished()
+
+
+# /builders/$builder/builds/$buildnum/steps/$stepname/logs/$logname
+class TextLog(Resource):
+ # a new instance of this Resource is created for each client who views
+ # it, so we can afford to track the request in the Resource.
+ implements(IHTMLLog)
+
+ asText = False
+ subscribed = False
+
+ def __init__(self, original):
+ Resource.__init__(self)
+ self.original = original
+
+ def getChild(self, path, req):
+ if path == "text":
+ self.asText = True
+ return self
+ return HtmlResource.getChild(self, path, req)
+
+ def htmlHeader(self, request):
+ title = "Log File contents"
+ data = "<html>\n<head><title>" + title + "</title>\n"
+ data += textlog_stylesheet
+ data += "</head>\n"
+ data += "<body vlink=\"#800080\">\n"
+ texturl = request.childLink("text")
+ data += '<a href="%s">(view as text)</a><br />\n' % texturl
+ data += "<pre>\n"
+ return data
+
+ def content(self, entries):
+ spanfmt = '<span class="%s">%s</span>'
+ data = ""
+ for type, entry in entries:
+ if type >= len(builder.ChunkTypes) or type < 0:
+ # non-std channel, don't display
+ continue
+ if self.asText:
+ if type != builder.HEADER:
+ data += entry
+ else:
+ data += spanfmt % (builder.ChunkTypes[type],
+ html.escape(entry))
+ return data
+
+ def htmlFooter(self):
+ data = "</pre>\n"
+ data += "</body></html>\n"
+ return data
+
+ def render_HEAD(self, request):
+ if self.asText:
+ request.setHeader("content-type", "text/plain")
+ else:
+ request.setHeader("content-type", "text/html")
+
+ # vague approximation, ignores markup
+ request.setHeader("content-length", self.original.length)
+ return ''
+
+ def render_GET(self, req):
+ self.req = req
+
+ if self.asText:
+ req.setHeader("content-type", "text/plain")
+ else:
+ req.setHeader("content-type", "text/html")
+
+ if not self.asText:
+ req.write(self.htmlHeader(req))
+
+ self.original.subscribeConsumer(ChunkConsumer(req, self))
+ return server.NOT_DONE_YET
+
+ def finished(self):
+ if not self.req:
+ return
+ try:
+ if not self.asText:
+ self.req.write(self.htmlFooter())
+ self.req.finish()
+ except pb.DeadReferenceError:
+ pass
+ # break the cycle, the Request's .notifications list includes the
+ # Deferred (from req.notifyFinish) that's pointing at us.
+ self.req = None
+
+components.registerAdapter(TextLog, interfaces.IStatusLog, IHTMLLog)
+
+
+class HTMLLog(Resource):
+ implements(IHTMLLog)
+
+ def __init__(self, original):
+ Resource.__init__(self)
+ self.original = original
+
+ def render(self, request):
+ request.setHeader("content-type", "text/html")
+ return self.original.html
+
+components.registerAdapter(HTMLLog, builder.HTMLLogFile, IHTMLLog)
+
+
+class LogsResource(HtmlResource):
+ addSlash = True
+
+ def __init__(self, step_status):
+ HtmlResource.__init__(self)
+ self.step_status = step_status
+
+ def getChild(self, path, req):
+ for log in self.step_status.getLogs():
+ if path == log.getName():
+ if log.hasContents():
+ return IHTMLLog(interfaces.IStatusLog(log))
+ return NoResource("Empty Log '%s'" % path)
+ return HtmlResource.getChild(self, path, req)
diff --git a/buildbot/buildbot/status/web/robots.txt b/buildbot/buildbot/status/web/robots.txt
new file mode 100644
index 0000000..47a9d27
--- /dev/null
+++ b/buildbot/buildbot/status/web/robots.txt
@@ -0,0 +1,9 @@
+User-agent: *
+Disallow: /waterfall
+Disallow: /builders
+Disallow: /changes
+Disallow: /buildslaves
+Disallow: /schedulers
+Disallow: /one_line_per_build
+Disallow: /one_box_per_builder
+Disallow: /xmlrpc
diff --git a/buildbot/buildbot/status/web/slaves.py b/buildbot/buildbot/status/web/slaves.py
new file mode 100644
index 0000000..5782873
--- /dev/null
+++ b/buildbot/buildbot/status/web/slaves.py
@@ -0,0 +1,181 @@
+
+import time, urllib
+from twisted.python import log
+from twisted.web import html
+from twisted.web.util import Redirect
+
+from buildbot.status.web.base import HtmlResource, abbreviate_age, OneLineMixin, path_to_slave
+from buildbot import version, util
+
+# /buildslaves/$slavename
+class OneBuildSlaveResource(HtmlResource, OneLineMixin):
+ addSlash = False
+ def __init__(self, slavename):
+ HtmlResource.__init__(self)
+ self.slavename = slavename
+
+ def getTitle(self, req):
+ return "Buildbot: %s" % html.escape(self.slavename)
+
+ def getChild(self, path, req):
+ if path == "shutdown":
+ s = self.getStatus(req)
+ slave = s.getSlave(self.slavename)
+ slave.setGraceful(True)
+ return Redirect(path_to_slave(req, slave))
+
+ def body(self, req):
+ s = self.getStatus(req)
+ slave = s.getSlave(self.slavename)
+ my_builders = []
+ for bname in s.getBuilderNames():
+ b = s.getBuilder(bname)
+ for bs in b.getSlaves():
+ slavename = bs.getName()
+ if bs.getName() == self.slavename:
+ my_builders.append(b)
+
+ # Current builds
+ current_builds = []
+ for b in my_builders:
+ for cb in b.getCurrentBuilds():
+ if cb.getSlavename() == self.slavename:
+ current_builds.append(cb)
+
+ data = []
+
+ projectName = s.getProjectName()
+
+ data.append("<a href=\"%s\">%s</a>\n" % (self.path_to_root(req), projectName))
+
+ data.append("<h1>Build Slave: %s</h1>\n" % self.slavename)
+
+ shutdown_url = req.childLink("shutdown")
+
+ if not slave.isConnected():
+ data.append("<h2>NOT CONNECTED</h2>\n")
+ elif not slave.getGraceful():
+ data.append('''<form method="POST" action="%s">
+<input type="submit" value="Gracefully Shutdown">
+</form>''' % shutdown_url)
+ else:
+ data.append("Gracefully shutting down...\n")
+
+ if current_builds:
+ data.append("<h2>Currently building:</h2>\n")
+ data.append("<ul>\n")
+ for build in current_builds:
+ data.append("<li>%s</li>\n" % self.make_line(req, build, True))
+ data.append("</ul>\n")
+
+ else:
+ data.append("<h2>no current builds</h2>\n")
+
+ # Recent builds
+ data.append("<h2>Recent builds:</h2>\n")
+ data.append("<ul>\n")
+ n = 0
+ try:
+ max_builds = int(req.args.get('builds')[0])
+ except:
+ max_builds = 10
+ for build in s.generateFinishedBuilds(builders=[b.getName() for b in my_builders]):
+ if build.getSlavename() == self.slavename:
+ n += 1
+ data.append("<li>%s</li>\n" % self.make_line(req, build, True))
+ if n > max_builds:
+ break
+ data.append("</ul>\n")
+
+ projectURL = s.getProjectURL()
+ projectName = s.getProjectName()
+ data.append('<hr /><div class="footer">\n')
+
+ welcomeurl = self.path_to_root(req) + "index.html"
+ data.append("[<a href=\"%s\">welcome</a>]\n" % welcomeurl)
+ data.append("<br />\n")
+
+ data.append('<a href="http://buildbot.sourceforge.net/">Buildbot</a>')
+ data.append("-%s " % version)
+ if projectName:
+ data.append("working for the ")
+ if projectURL:
+ data.append("<a href=\"%s\">%s</a> project." % (projectURL,
+ projectName))
+ else:
+ data.append("%s project." % projectName)
+ data.append("<br />\n")
+ data.append("Page built: " +
+ time.strftime("%a %d %b %Y %H:%M:%S",
+ time.localtime(util.now()))
+ + "\n")
+ data.append("</div>\n")
+
+ return "".join(data)
+
+# /buildslaves
+class BuildSlavesResource(HtmlResource):
+ title = "BuildSlaves"
+ addSlash = True
+
+ def body(self, req):
+ s = self.getStatus(req)
+ data = ""
+ data += "<h1>Build Slaves</h1>\n"
+
+ used_by_builder = {}
+ for bname in s.getBuilderNames():
+ b = s.getBuilder(bname)
+ for bs in b.getSlaves():
+ slavename = bs.getName()
+ if slavename not in used_by_builder:
+ used_by_builder[slavename] = []
+ used_by_builder[slavename].append(bname)
+
+ data += "<ol>\n"
+ for name in util.naturalSort(s.getSlaveNames()):
+ slave = s.getSlave(name)
+ slave_status = s.botmaster.slaves[name].slave_status
+ isBusy = len(slave_status.getRunningBuilds())
+ data += " <li><a href=\"%s\">%s</a>:\n" % (req.childLink(urllib.quote(name,'')), name)
+ data += " <ul>\n"
+ builder_links = ['<a href="%s">%s</a>'
+ % (req.childLink("../builders/%s" % bname),bname)
+ for bname in used_by_builder.get(name, [])]
+ if builder_links:
+ data += (" <li>Used by Builders: %s</li>\n" %
+ ", ".join(builder_links))
+ else:
+ data += " <li>Not used by any Builders</li>\n"
+ if slave.isConnected():
+ data += " <li>Slave is currently connected</li>\n"
+ admin = slave.getAdmin()
+ if admin:
+ # munge it to avoid feeding the spambot harvesters
+ admin = admin.replace("@", " -at- ")
+ data += " <li>Admin: %s</li>\n" % admin
+ last = slave.lastMessageReceived()
+ if last:
+ lt = time.strftime("%Y-%b-%d %H:%M:%S",
+ time.localtime(last))
+ age = abbreviate_age(time.time() - last)
+ data += " <li>Last heard from: %s " % age
+ data += '<font size="-1">(%s)</font>' % lt
+ data += "</li>\n"
+ if isBusy:
+ data += "<li>Slave is currently building.</li>"
+ else:
+ data += "<li>Slave is idle.</li>"
+ else:
+ data += " <li><b>Slave is NOT currently connected</b></li>\n"
+
+ data += " </ul>\n"
+ data += " </li>\n"
+ data += "\n"
+
+ data += "</ol>\n"
+
+ return data
+
+ def getChild(self, path, req):
+ return OneBuildSlaveResource(path)
diff --git a/buildbot/buildbot/status/web/step.py b/buildbot/buildbot/status/web/step.py
new file mode 100644
index 0000000..b65626f
--- /dev/null
+++ b/buildbot/buildbot/status/web/step.py
@@ -0,0 +1,97 @@
+
+from twisted.web import html
+
+import urllib
+from buildbot.status.web.base import HtmlResource, path_to_builder, \
+ path_to_build
+from buildbot.status.web.logs import LogsResource
+from buildbot import util
+from time import ctime
+
+# /builders/$builder/builds/$buildnum/steps/$stepname
+class StatusResourceBuildStep(HtmlResource):
+ title = "Build Step"
+ addSlash = True
+
+ def __init__(self, build_status, step_status):
+ HtmlResource.__init__(self)
+ self.status = build_status
+ self.step_status = step_status
+
+ def body(self, req):
+ s = self.step_status
+ b = s.getBuild()
+ builder_name = b.getBuilder().getName()
+ build_num = b.getNumber()
+ data = ""
+ data += ('<h1>BuildStep <a href="%s">%s</a>:' %
+ (path_to_builder(req, b.getBuilder()), builder_name))
+ data += '<a href="%s">#%d</a>' % (path_to_build(req, b), build_num)
+ data += ":%s</h1>\n" % s.getName()
+
+ if s.isFinished():
+ data += ("<h2>Finished</h2>\n"
+ "<p>%s</p>\n" % html.escape("%s" % s.getText()))
+ else:
+ data += ("<h2>Not Finished</h2>\n"
+ "<p>ETA %s seconds</p>\n" % s.getETA())
+
+ exp = s.getExpectations()
+ if exp:
+ data += ("<h2>Expectations</h2>\n"
+ "<ul>\n")
+ for e in exp:
+ data += "<li>%s: current=%s, target=%s</li>\n" % \
+ (html.escape(e[0]), e[1], e[2])
+ data += "</ul>\n"
+
+ (start, end) = s.getTimes()
+ data += "<h2>Timing</h2>\n"
+ data += "<table>\n"
+ data += "<tr><td>Start</td><td>%s</td></tr>\n" % ctime(start)
+ if end:
+ data += "<tr><td>End</td><td>%s</td></tr>\n" % ctime(end)
+ data += "<tr><td>Elapsed</td><td>%s</td></tr>\n" % util.formatInterval(end - start)
+ data += "</table>\n"
+
+ logs = s.getLogs()
+ if logs:
+ data += ("<h2>Logs</h2>\n"
+ "<ul>\n")
+ for logfile in logs:
+ if logfile.hasContents():
+ # FIXME: If the step name has a / in it, this is broken
+ # either way. If we quote it but say '/'s are safe,
+ # it chops up the step name. If we quote it and '/'s
+ # are not safe, it escapes the / that separates the
+ # step name from the log number.
+ logname = logfile.getName()
+ logurl = req.childLink("logs/%s" % urllib.quote(logname))
+ data += ('<li><a href="%s">%s</a></li>\n' %
+ (logurl, html.escape(logname)))
+ else:
+ data += '<li>%s</li>\n' % html.escape(logname)
+ data += "</ul>\n"
+
+ return data
+
+ def getChild(self, path, req):
+ if path == "logs":
+ return LogsResource(self.step_status)
+ return HtmlResource.getChild(self, path, req)
+
+
+
+# /builders/$builder/builds/$buildnum/steps
+class StepsResource(HtmlResource):
+ addSlash = True
+
+ def __init__(self, build_status):
+ HtmlResource.__init__(self)
+ self.build_status = build_status
+
+ def getChild(self, path, req):
+ for s in self.build_status.getSteps():
+ if s.getName() == path:
+ return StatusResourceBuildStep(self.build_status, s)
+ return HtmlResource.getChild(self, path, req)
diff --git a/buildbot/buildbot/status/web/tests.py b/buildbot/buildbot/status/web/tests.py
new file mode 100644
index 0000000..b96bba2
--- /dev/null
+++ b/buildbot/buildbot/status/web/tests.py
@@ -0,0 +1,64 @@
+
+from twisted.web.error import NoResource
+from twisted.web import html
+
+from buildbot.status.web.base import HtmlResource
+
+# /builders/$builder/builds/$buildnum/tests/$testname
+class TestResult(HtmlResource):
+ title = "Test Logs"
+
+ def __init__(self, name, test_result):
+ HtmlResource.__init__(self)
+ self.name = name
+ self.test_result = test_result
+
+ def body(self, request):
+ dotname = ".".join(self.name)
+ logs = self.test_result.getLogs()
+ lognames = logs.keys()
+ lognames.sort()
+ data = "<h1>%s</h1>\n" % html.escape(dotname)
+ for name in lognames:
+ data += "<h2>%s</h2>\n" % html.escape(name)
+ data += "<pre>" + logs[name] + "</pre>\n\n"
+
+ return data
+
+
+# /builders/$builder/builds/$buildnum/tests
+class TestsResource(HtmlResource):
+ title = "Test Results"
+
+ def __init__(self, build_status):
+ HtmlResource.__init__(self)
+ self.build_status = build_status
+ self.test_results = build_status.getTestResults()
+
+ def body(self, request):
+ r = self.test_results
+ data = "<h1>Test Results</h1>\n"
+ data += "<ul>\n"
+ testnames = r.keys()
+ testnames.sort()
+ for name in testnames:
+ res = r[name]
+ dotname = ".".join(name)
+ data += " <li>%s: " % dotname
+ # TODO: this could break on weird test names. At the moment,
+ # test names only come from Trial tests, where the name
+ # components must be legal python names, but that won't always
+ # be a restriction.
+ url = request.childLink(dotname)
+ data += "<a href=\"%s\">%s</a>" % (url, " ".join(res.getText()))
+ data += "</li>\n"
+ data += "</ul>\n"
+ return data
+
+ def getChild(self, path, request):
+ try:
+ name = tuple(path.split("."))
+ result = self.test_results[name]
+ return TestResult(name, result)
+ except KeyError:
+ return NoResource("No such test name '%s'" % path)
diff --git a/buildbot/buildbot/status/web/waterfall.py b/buildbot/buildbot/status/web/waterfall.py
new file mode 100644
index 0000000..1d3ab60
--- /dev/null
+++ b/buildbot/buildbot/status/web/waterfall.py
@@ -0,0 +1,962 @@
+# -*- test-case-name: buildbot.test.test_web -*-
+
+from zope.interface import implements
+from twisted.python import log, components
+from twisted.web import html
+import urllib
+
+import time
+import operator
+
+from buildbot import interfaces, util
+from buildbot import version
+from buildbot.status import builder
+
+from buildbot.status.web.base import Box, HtmlResource, IBox, ICurrentBox, \
+ ITopBox, td, build_get_class, path_to_build, path_to_step, map_branches
+
+
+
+class CurrentBox(components.Adapter):
+ # this provides the "current activity" box, just above the builder name
+ implements(ICurrentBox)
+
+ def formatETA(self, prefix, eta):
+ if eta is None:
+ return []
+ if eta < 60:
+ return ["< 1 min"]
+ eta_parts = ["~"]
+ eta_secs = eta
+ if eta_secs > 3600:
+ eta_parts.append("%d hrs" % (eta_secs / 3600))
+ eta_secs %= 3600
+ if eta_secs > 60:
+ eta_parts.append("%d mins" % (eta_secs / 60))
+ eta_secs %= 60
+ abstime = time.strftime("%H:%M", time.localtime(util.now()+eta))
+ return [prefix, " ".join(eta_parts), "at %s" % abstime]
+
+ def getBox(self, status):
+ # getState() returns offline, idle, or building
+ state, builds = self.original.getState()
+
+ # look for upcoming builds. We say the state is "waiting" if the
+ # builder is otherwise idle and there is a scheduler which tells us a
+ # build will be performed some time in the near future. TODO: this
+ # functionality used to be in BuilderStatus.. maybe this code should
+ # be merged back into it.
+ upcoming = []
+ builderName = self.original.getName()
+ for s in status.getSchedulers():
+ if builderName in s.listBuilderNames():
+ upcoming.extend(s.getPendingBuildTimes())
+ if state == "idle" and upcoming:
+ state = "waiting"
+
+ if state == "building":
+ text = ["building"]
+ if builds:
+ for b in builds:
+ eta = b.getETA()
+ text.extend(self.formatETA("ETA in", eta))
+ elif state == "offline":
+ text = ["offline"]
+ elif state == "idle":
+ text = ["idle"]
+ elif state == "waiting":
+ text = ["waiting"]
+ else:
+ # just in case I add a state and forget to update this
+ text = [state]
+
+ # TODO: for now, this pending/upcoming stuff is in the "current
+ # activity" box, but really it should go into a "next activity" row
+ # instead. The only times it should show up in "current activity" is
+ # when the builder is otherwise idle.
+
+ # are any builds pending? (waiting for a slave to be free)
+ pbs = self.original.getPendingBuilds()
+ if pbs:
+ text.append("%d pending" % len(pbs))
+ for t in upcoming:
+ eta = t - util.now()
+ text.extend(self.formatETA("next in", eta))
+ return Box(text, class_="Activity " + state)
+
+components.registerAdapter(CurrentBox, builder.BuilderStatus, ICurrentBox)
+
+
+class BuildTopBox(components.Adapter):
+ # this provides a per-builder box at the very top of the display,
+ # showing the results of the most recent build
+ implements(IBox)
+
+ def getBox(self, req):
+ assert interfaces.IBuilderStatus(self.original)
+ branches = [b for b in req.args.get("branch", []) if b]
+ builder = self.original
+ builds = list(builder.generateFinishedBuilds(map_branches(branches),
+ num_builds=1))
+ if not builds:
+ return Box(["none"], class_="LastBuild")
+ b = builds[0]
+ name = b.getBuilder().getName()
+ number = b.getNumber()
+ url = path_to_build(req, b)
+ text = b.getText()
+ tests_failed = b.getSummaryStatistic('tests-failed', operator.add, 0)
+ if tests_failed: text.extend(["Failed tests: %d" % tests_failed])
+ # TODO: maybe add logs?
+ # TODO: add link to the per-build page at 'url'
+ class_ = build_get_class(b)
+ return Box(text, class_="LastBuild %s" % class_)
+components.registerAdapter(BuildTopBox, builder.BuilderStatus, ITopBox)
+
+class BuildBox(components.Adapter):
+ # this provides the yellow "starting line" box for each build
+ implements(IBox)
+
+ def getBox(self, req):
+ b = self.original
+ number = b.getNumber()
+ url = path_to_build(req, b)
+ reason = b.getReason()
+ text = ('<a title="Reason: %s" href="%s">Build %d</a>'
+ % (html.escape(reason), url, number))
+ class_ = "start"
+ if b.isFinished() and not b.getSteps():
+ # the steps have been pruned, so there won't be any indication
+ # of whether it succeeded or failed.
+ class_ = build_get_class(b)
+ return Box([text], class_="BuildStep " + class_)
+components.registerAdapter(BuildBox, builder.BuildStatus, IBox)
+
+class StepBox(components.Adapter):
+ implements(IBox)
+
+ def getBox(self, req):
+ urlbase = path_to_step(req, self.original)
+ text = self.original.getText()
+ if text is None:
+ log.msg("getText() gave None", urlbase)
+ text = []
+ text = text[:]
+ logs = self.original.getLogs()
+ for num in range(len(logs)):
+ name = logs[num].getName()
+ if logs[num].hasContents():
+ url = urlbase + "/logs/%s" % urllib.quote(name)
+ text.append("<a href=\"%s\">%s</a>" % (url, html.escape(name)))
+ else:
+ text.append(html.escape(name))
+ urls = self.original.getURLs()
+ ex_url_class = "BuildStep external"
+ for name, target in urls.items():
+ text.append('[<a href="%s" class="%s">%s</a>]' %
+ (target, ex_url_class, html.escape(name)))
+ class_ = "BuildStep " + build_get_class(self.original)
+ return Box(text, class_=class_)
+components.registerAdapter(StepBox, builder.BuildStepStatus, IBox)
+
+
+class EventBox(components.Adapter):
+ implements(IBox)
+
+ def getBox(self, req):
+ text = self.original.getText()
+ class_ = "Event"
+ return Box(text, class_=class_)
+components.registerAdapter(EventBox, builder.Event, IBox)
+
+
+class Spacer:
+ implements(interfaces.IStatusEvent)
+
+ def __init__(self, start, finish):
+ self.started = start
+ self.finished = finish
+
+ def getTimes(self):
+ return (self.started, self.finished)
+ def getText(self):
+ return []
+
+class SpacerBox(components.Adapter):
+ implements(IBox)
+
+ def getBox(self, req):
+ #b = Box(["spacer"], "white")
+ b = Box([])
+ b.spacer = True
+ return b
+components.registerAdapter(SpacerBox, Spacer, IBox)
+
+def insertGaps(g, lastEventTime, idleGap=2):
+ debug = False
+
+ e = g.next()
+ starts, finishes = e.getTimes()
+ if debug: log.msg("E0", starts, finishes)
+ if finishes == 0:
+ finishes = starts
+ if debug: log.msg("E1 finishes=%s, gap=%s, lET=%s" % \
+ (finishes, idleGap, lastEventTime))
+ if finishes is not None and finishes + idleGap < lastEventTime:
+ if debug: log.msg(" spacer0")
+ yield Spacer(finishes, lastEventTime)
+
+ followingEventStarts = starts
+ if debug: log.msg(" fES0", starts)
+ yield e
+
+ while 1:
+ e = g.next()
+ starts, finishes = e.getTimes()
+ if debug: log.msg("E2", starts, finishes)
+ if finishes == 0:
+ finishes = starts
+ if finishes is not None and finishes + idleGap < followingEventStarts:
+ # there is a gap between the end of this event and the beginning
+ # of the next one. Insert an idle event so the waterfall display
+ # shows a gap here.
+ if debug:
+ log.msg(" finishes=%s, gap=%s, fES=%s" % \
+ (finishes, idleGap, followingEventStarts))
+ yield Spacer(finishes, followingEventStarts)
+ yield e
+ followingEventStarts = starts
+ if debug: log.msg(" fES1", starts)
+
+HELP = '''
+<form action="../waterfall" method="GET">
+
+<h1>The Waterfall Display</h1>
+
+<p>The Waterfall display can be controlled by adding query arguments to the
+URL. For example, if your Waterfall is accessed via the URL
+<tt>http://buildbot.example.org:8080</tt>, then you could add a
+<tt>branch=</tt> argument (described below) by going to
+<tt>http://buildbot.example.org:8080?branch=beta4</tt> instead. Remember that
+query arguments are separated from each other with ampersands, but they are
+separated from the main URL with a question mark, so to add a
+<tt>branch=</tt> and two <tt>builder=</tt> arguments, you would use
+<tt>http://buildbot.example.org:8080?branch=beta4&amp;builder=unix&amp;builder=macos</tt>.</p>
+
+<h2>Limiting the Displayed Interval</h2>
+
+<p>The <tt>last_time=</tt> argument is a unix timestamp (seconds since the
+start of 1970) that will be used as an upper bound on the interval of events
+displayed: nothing will be shown that is more recent than the given time.
+When no argument is provided, all events up to and including the most recent
+steps are included.</p>
+
+<p>The <tt>first_time=</tt> argument provides the lower bound. No events will
+be displayed that occurred <b>before</b> this timestamp. Instead of providing
+<tt>first_time=</tt>, you can provide <tt>show_time=</tt>: in this case,
+<tt>first_time</tt> will be set equal to <tt>last_time</tt> minus
+<tt>show_time</tt>. <tt>show_time</tt> overrides <tt>first_time</tt>.</p>
+
+<p>The display normally shows the latest 200 events that occurred in the
+given interval, where each timestamp on the left hand edge counts as a single
+event. You can add a <tt>num_events=</tt> argument to override this this.</p>
+
+<h2>Hiding non-Build events</h2>
+
+<p>By passing <tt>show_events=false</tt>, you can remove the "buildslave
+attached", "buildslave detached", and "builder reconfigured" events that
+appear in-between the actual builds.</p>
+
+%(show_events_input)s
+
+<h2>Showing only Certain Branches</h2>
+
+<p>If you provide one or more <tt>branch=</tt> arguments, the display will be
+limited to builds that used one of the given branches. If no <tt>branch=</tt>
+arguments are given, builds from all branches will be displayed.</p>
+
+Erase the text from these "Show Branch:" boxes to remove that branch filter.
+
+%(show_branches_input)s
+
+<h2>Limiting the Builders that are Displayed</h2>
+
+<p>By adding one or more <tt>builder=</tt> arguments, the display will be
+limited to showing builds that ran on the given builders. This serves to
+limit the display to the specific named columns. If no <tt>builder=</tt>
+arguments are provided, all Builders will be displayed.</p>
+
+<p>To view a Waterfall page with only a subset of Builders displayed, select
+the Builders you are interested in here.</p>
+
+%(show_builders_input)s
+
+
+<h2>Auto-reloading the Page</h2>
+
+<p>Adding a <tt>reload=</tt> argument will cause the page to automatically
+reload itself after that many seconds.</p>
+
+%(show_reload_input)s
+
+<h2>Reload Waterfall Page</h2>
+
+<input type="submit" value="View Waterfall" />
+</form>
+'''
+
+class WaterfallHelp(HtmlResource):
+ title = "Waterfall Help"
+
+ def __init__(self, categories=None):
+ HtmlResource.__init__(self)
+ self.categories = categories
+
+ def body(self, request):
+ data = ''
+ status = self.getStatus(request)
+
+ showEvents_checked = 'checked="checked"'
+ if request.args.get("show_events", ["true"])[0].lower() == "true":
+ showEvents_checked = ''
+ show_events_input = ('<p>'
+ '<input type="checkbox" name="show_events" '
+ 'value="false" %s>'
+ 'Hide non-Build events'
+ '</p>\n'
+ ) % showEvents_checked
+
+ branches = [b
+ for b in request.args.get("branch", [])
+ if b]
+ branches.append('')
+ show_branches_input = '<table>\n'
+ for b in branches:
+ show_branches_input += ('<tr>'
+ '<td>Show Branch: '
+ '<input type="text" name="branch" '
+ 'value="%s">'
+ '</td></tr>\n'
+ ) % (b,)
+ show_branches_input += '</table>\n'
+
+ # this has a set of toggle-buttons to let the user choose the
+ # builders
+ showBuilders = request.args.get("show", [])
+ showBuilders.extend(request.args.get("builder", []))
+ allBuilders = status.getBuilderNames(categories=self.categories)
+
+ show_builders_input = '<table>\n'
+ for bn in allBuilders:
+ checked = ""
+ if bn in showBuilders:
+ checked = 'checked="checked"'
+ show_builders_input += ('<tr>'
+ '<td><input type="checkbox"'
+ ' name="builder" '
+ 'value="%s" %s></td> '
+ '<td>%s</td></tr>\n'
+ ) % (bn, checked, bn)
+ show_builders_input += '</table>\n'
+
+ # a couple of radio-button selectors for refresh time will appear
+ # just after that text
+ show_reload_input = '<table>\n'
+ times = [("none", "None"),
+ ("60", "60 seconds"),
+ ("300", "5 minutes"),
+ ("600", "10 minutes"),
+ ]
+ current_reload_time = request.args.get("reload", ["none"])
+ if current_reload_time:
+ current_reload_time = current_reload_time[0]
+ if current_reload_time not in [t[0] for t in times]:
+ times.insert(0, (current_reload_time, current_reload_time) )
+ for value, name in times:
+ checked = ""
+ if value == current_reload_time:
+ checked = 'checked="checked"'
+ show_reload_input += ('<tr>'
+ '<td><input type="radio" name="reload" '
+ 'value="%s" %s></td> '
+ '<td>%s</td></tr>\n'
+ ) % (value, checked, name)
+ show_reload_input += '</table>\n'
+
+ fields = {"show_events_input": show_events_input,
+ "show_branches_input": show_branches_input,
+ "show_builders_input": show_builders_input,
+ "show_reload_input": show_reload_input,
+ }
+ data += HELP % fields
+ return data
+
+class WaterfallStatusResource(HtmlResource):
+ """This builds the main status page, with the waterfall display, and
+ all child pages."""
+
+ def __init__(self, categories=None):
+ HtmlResource.__init__(self)
+ self.categories = categories
+ self.putChild("help", WaterfallHelp(categories))
+
+ def getTitle(self, request):
+ status = self.getStatus(request)
+ p = status.getProjectName()
+ if p:
+ return "BuildBot: %s" % p
+ else:
+ return "BuildBot"
+
+ def getChangemaster(self, request):
+ # TODO: this wants to go away, access it through IStatus
+ return request.site.buildbot_service.getChangeSvc()
+
+ def get_reload_time(self, request):
+ if "reload" in request.args:
+ try:
+ reload_time = int(request.args["reload"][0])
+ return max(reload_time, 15)
+ except ValueError:
+ pass
+ return None
+
+ def head(self, request):
+ head = ''
+ reload_time = self.get_reload_time(request)
+ if reload_time is not None:
+ head += '<meta http-equiv="refresh" content="%d">\n' % reload_time
+ return head
+
+ def body(self, request):
+ "This method builds the main waterfall display."
+
+ status = self.getStatus(request)
+ data = ''
+
+ projectName = status.getProjectName()
+ projectURL = status.getProjectURL()
+
+ phase = request.args.get("phase",["2"])
+ phase = int(phase[0])
+
+ # we start with all Builders available to this Waterfall: this is
+ # limited by the config-file -time categories= argument, and defaults
+ # to all defined Builders.
+ allBuilderNames = status.getBuilderNames(categories=self.categories)
+ builders = [status.getBuilder(name) for name in allBuilderNames]
+
+ # but if the URL has one or more builder= arguments (or the old show=
+ # argument, which is still accepted for backwards compatibility), we
+ # use that set of builders instead. We still don't show anything
+ # outside the config-file time set limited by categories=.
+ showBuilders = request.args.get("show", [])
+ showBuilders.extend(request.args.get("builder", []))
+ if showBuilders:
+ builders = [b for b in builders if b.name in showBuilders]
+
+ # now, if the URL has one or category= arguments, use them as a
+ # filter: only show those builders which belong to one of the given
+ # categories.
+ showCategories = request.args.get("category", [])
+ if showCategories:
+ builders = [b for b in builders if b.category in showCategories]
+
+ builderNames = [b.name for b in builders]
+
+ if phase == -1:
+ return self.body0(request, builders)
+ (changeNames, builderNames, timestamps, eventGrid, sourceEvents) = \
+ self.buildGrid(request, builders)
+ if phase == 0:
+ return self.phase0(request, (changeNames + builderNames),
+ timestamps, eventGrid)
+ # start the table: top-header material
+ data += '<table border="0" cellspacing="0">\n'
+
+ if projectName and projectURL:
+ # TODO: this is going to look really ugly
+ topleft = '<a href="%s">%s</a><br />last build' % \
+ (projectURL, projectName)
+ else:
+ topleft = "last build"
+ data += ' <tr class="LastBuild">\n'
+ data += td(topleft, align="right", colspan=2, class_="Project")
+ for b in builders:
+ box = ITopBox(b).getBox(request)
+ data += box.td(align="center")
+ data += " </tr>\n"
+
+ data += ' <tr class="Activity">\n'
+ data += td('current activity', align='right', colspan=2)
+ for b in builders:
+ box = ICurrentBox(b).getBox(status)
+ data += box.td(align="center")
+ data += " </tr>\n"
+
+ data += " <tr>\n"
+ TZ = time.tzname[time.localtime()[-1]]
+ data += td("time (%s)" % TZ, align="center", class_="Time")
+ data += td('<a href="%s">changes</a>' % request.childLink("../changes"),
+ align="center", class_="Change")
+ for name in builderNames:
+ safename = urllib.quote(name, safe='')
+ data += td('<a href="%s">%s</a>' %
+ (request.childLink("../builders/%s" % safename), name),
+ align="center", class_="Builder")
+ data += " </tr>\n"
+
+ if phase == 1:
+ f = self.phase1
+ else:
+ f = self.phase2
+ data += f(request, changeNames + builderNames, timestamps, eventGrid,
+ sourceEvents)
+
+ data += "</table>\n"
+
+ data += '<hr /><div class="footer">\n'
+
+ def with_args(req, remove_args=[], new_args=[], new_path=None):
+ # sigh, nevow makes this sort of manipulation easier
+ newargs = req.args.copy()
+ for argname in remove_args:
+ newargs[argname] = []
+ if "branch" in newargs:
+ newargs["branch"] = [b for b in newargs["branch"] if b]
+ for k,v in new_args:
+ if k in newargs:
+ newargs[k].append(v)
+ else:
+ newargs[k] = [v]
+ newquery = "&".join(["%s=%s" % (k, v)
+ for k in newargs
+ for v in newargs[k]
+ ])
+ if new_path:
+ new_url = new_path
+ elif req.prepath:
+ new_url = req.prepath[-1]
+ else:
+ new_url = ''
+ if newquery:
+ new_url += "?" + newquery
+ return new_url
+
+ if timestamps:
+ bottom = timestamps[-1]
+ nextpage = with_args(request, ["last_time"],
+ [("last_time", str(int(bottom)))])
+ data += '[<a href="%s">next page</a>]\n' % nextpage
+
+ helpurl = self.path_to_root(request) + "waterfall/help"
+ helppage = with_args(request, new_path=helpurl)
+ data += '[<a href="%s">help</a>]\n' % helppage
+
+ welcomeurl = self.path_to_root(request) + "index.html"
+ data += '[<a href="%s">welcome</a>]\n' % welcomeurl
+
+ if self.get_reload_time(request) is not None:
+ no_reload_page = with_args(request, remove_args=["reload"])
+ data += '[<a href="%s">Stop Reloading</a>]\n' % no_reload_page
+
+ data += "<br />\n"
+
+
+ bburl = "http://buildbot.net/?bb-ver=%s" % urllib.quote(version)
+ data += '<a href="%s">Buildbot-%s</a> ' % (bburl, version)
+ if projectName:
+ data += "working for the "
+ if projectURL:
+ data += '<a href="%s">%s</a> project.' % (projectURL,
+ projectName)
+ else:
+ data += "%s project." % projectName
+ data += "<br />\n"
+ # TODO: push this to the right edge, if possible
+ data += ("Page built: " +
+ time.strftime("%a %d %b %Y %H:%M:%S",
+ time.localtime(util.now()))
+ + "\n")
+ data += '</div>\n'
+ return data
+
+ def body0(self, request, builders):
+ # build the waterfall display
+ data = ""
+ data += "<h2>Basic display</h2>\n"
+ data += '<p>See <a href="%s">here</a>' % request.childLink("../waterfall")
+ data += " for the waterfall display</p>\n"
+
+ data += '<table border="0" cellspacing="0">\n'
+ names = map(lambda builder: builder.name, builders)
+
+ # the top row is two blank spaces, then the top-level status boxes
+ data += " <tr>\n"
+ data += td("", colspan=2)
+ for b in builders:
+ text = ""
+ state, builds = b.getState()
+ if state != "offline":
+ text += "%s<br />\n" % state #b.getCurrentBig().text[0]
+ else:
+ text += "OFFLINE<br />\n"
+ data += td(text, align="center")
+
+ # the next row has the column headers: time, changes, builder names
+ data += " <tr>\n"
+ data += td("Time", align="center")
+ data += td("Changes", align="center")
+ for name in names:
+ data += td('<a href="%s">%s</a>' %
+ (request.childLink("../" + urllib.quote(name)), name),
+ align="center")
+ data += " </tr>\n"
+
+ # all further rows involve timestamps, commit events, and build events
+ data += " <tr>\n"
+ data += td("04:00", align="bottom")
+ data += td("fred", align="center")
+ for name in names:
+ data += td("stuff", align="center")
+ data += " </tr>\n"
+
+ data += "</table>\n"
+ return data
+
+ def buildGrid(self, request, builders):
+ debug = False
+ # TODO: see if we can use a cached copy
+
+ showEvents = False
+ if request.args.get("show_events", ["true"])[0].lower() == "true":
+ showEvents = True
+ filterBranches = [b for b in request.args.get("branch", []) if b]
+ filterBranches = map_branches(filterBranches)
+ maxTime = int(request.args.get("last_time", [util.now()])[0])
+ if "show_time" in request.args:
+ minTime = maxTime - int(request.args["show_time"][0])
+ elif "first_time" in request.args:
+ minTime = int(request.args["first_time"][0])
+ else:
+ minTime = None
+ spanLength = 10 # ten-second chunks
+ maxPageLen = int(request.args.get("num_events", [200])[0])
+
+ # first step is to walk backwards in time, asking each column
+ # (commit, all builders) if they have any events there. Build up the
+ # array of events, and stop when we have a reasonable number.
+
+ commit_source = self.getChangemaster(request)
+
+ lastEventTime = util.now()
+ sources = [commit_source] + builders
+ changeNames = ["changes"]
+ builderNames = map(lambda builder: builder.getName(), builders)
+ sourceNames = changeNames + builderNames
+ sourceEvents = []
+ sourceGenerators = []
+
+ def get_event_from(g):
+ try:
+ while True:
+ e = g.next()
+ # e might be builder.BuildStepStatus,
+ # builder.BuildStatus, builder.Event,
+ # waterfall.Spacer(builder.Event), or changes.Change .
+ # The showEvents=False flag means we should hide
+ # builder.Event .
+ if not showEvents and isinstance(e, builder.Event):
+ continue
+ break
+ event = interfaces.IStatusEvent(e)
+ if debug:
+ log.msg("gen %s gave1 %s" % (g, event.getText()))
+ except StopIteration:
+ event = None
+ return event
+
+ for s in sources:
+ gen = insertGaps(s.eventGenerator(filterBranches), lastEventTime)
+ sourceGenerators.append(gen)
+ # get the first event
+ sourceEvents.append(get_event_from(gen))
+ eventGrid = []
+ timestamps = []
+
+ lastEventTime = 0
+ for e in sourceEvents:
+ if e and e.getTimes()[0] > lastEventTime:
+ lastEventTime = e.getTimes()[0]
+ if lastEventTime == 0:
+ lastEventTime = util.now()
+
+ spanStart = lastEventTime - spanLength
+ debugGather = 0
+
+ while 1:
+ if debugGather: log.msg("checking (%s,]" % spanStart)
+ # the tableau of potential events is in sourceEvents[]. The
+ # window crawls backwards, and we examine one source at a time.
+ # If the source's top-most event is in the window, is it pushed
+ # onto the events[] array and the tableau is refilled. This
+ # continues until the tableau event is not in the window (or is
+ # missing).
+
+ spanEvents = [] # for all sources, in this span. row of eventGrid
+ firstTimestamp = None # timestamp of first event in the span
+ lastTimestamp = None # last pre-span event, for next span
+
+ for c in range(len(sourceGenerators)):
+ events = [] # for this source, in this span. cell of eventGrid
+ event = sourceEvents[c]
+ while event and spanStart < event.getTimes()[0]:
+ # to look at windows that don't end with the present,
+ # condition the .append on event.time <= spanFinish
+ if not IBox(event, None):
+ log.msg("BAD EVENT", event, event.getText())
+ assert 0
+ if debug:
+ log.msg("pushing", event.getText(), event)
+ events.append(event)
+ starts, finishes = event.getTimes()
+ firstTimestamp = util.earlier(firstTimestamp, starts)
+ event = get_event_from(sourceGenerators[c])
+ if debug:
+ log.msg("finished span")
+
+ if event:
+ # this is the last pre-span event for this source
+ lastTimestamp = util.later(lastTimestamp,
+ event.getTimes()[0])
+ if debugGather:
+ log.msg(" got %s from %s" % (events, sourceNames[c]))
+ sourceEvents[c] = event # refill the tableau
+ spanEvents.append(events)
+
+ # only show events older than maxTime. This makes it possible to
+ # visit a page that shows what it would be like to scroll off the
+ # bottom of this one.
+ if firstTimestamp is not None and firstTimestamp <= maxTime:
+ eventGrid.append(spanEvents)
+ timestamps.append(firstTimestamp)
+
+ if lastTimestamp:
+ spanStart = lastTimestamp - spanLength
+ else:
+ # no more events
+ break
+ if minTime is not None and lastTimestamp < minTime:
+ break
+
+ if len(timestamps) > maxPageLen:
+ break
+
+
+ # now loop
+
+ # loop is finished. now we have eventGrid[] and timestamps[]
+ if debugGather: log.msg("finished loop")
+ assert(len(timestamps) == len(eventGrid))
+ return (changeNames, builderNames, timestamps, eventGrid, sourceEvents)
+
+ def phase0(self, request, sourceNames, timestamps, eventGrid):
+ # phase0 rendering
+ if not timestamps:
+ return "no events"
+ data = ""
+ for r in range(0, len(timestamps)):
+ data += "<p>\n"
+ data += "[%s]<br />" % timestamps[r]
+ row = eventGrid[r]
+ assert(len(row) == len(sourceNames))
+ for c in range(0, len(row)):
+ if row[c]:
+ data += "<b>%s</b><br />\n" % sourceNames[c]
+ for e in row[c]:
+ log.msg("Event", r, c, sourceNames[c], e.getText())
+ lognames = [loog.getName() for loog in e.getLogs()]
+ data += "%s: %s: %s<br />" % (e.getText(),
+ e.getTimes()[0],
+ lognames)
+ else:
+ data += "<b>%s</b> [none]<br />\n" % sourceNames[c]
+ return data
+
+ def phase1(self, request, sourceNames, timestamps, eventGrid,
+ sourceEvents):
+ # phase1 rendering: table, but boxes do not overlap
+ data = ""
+ if not timestamps:
+ return data
+ lastDate = None
+ for r in range(0, len(timestamps)):
+ chunkstrip = eventGrid[r]
+ # chunkstrip is a horizontal strip of event blocks. Each block
+ # is a vertical list of events, all for the same source.
+ assert(len(chunkstrip) == len(sourceNames))
+ maxRows = reduce(lambda x,y: max(x,y),
+ map(lambda x: len(x), chunkstrip))
+ for i in range(maxRows):
+ data += " <tr>\n";
+ if i == 0:
+ stuff = []
+ # add the date at the beginning, and each time it changes
+ today = time.strftime("<b>%d %b %Y</b>",
+ time.localtime(timestamps[r]))
+ todayday = time.strftime("<b>%a</b>",
+ time.localtime(timestamps[r]))
+ if today != lastDate:
+ stuff.append(todayday)
+ stuff.append(today)
+ lastDate = today
+ stuff.append(
+ time.strftime("%H:%M:%S",
+ time.localtime(timestamps[r])))
+ data += td(stuff, valign="bottom", align="center",
+ rowspan=maxRows, class_="Time")
+ for c in range(0, len(chunkstrip)):
+ block = chunkstrip[c]
+ assert(block != None) # should be [] instead
+ # bottom-justify
+ offset = maxRows - len(block)
+ if i < offset:
+ data += td("")
+ else:
+ e = block[i-offset]
+ box = IBox(e).getBox(request)
+ box.parms["show_idle"] = 1
+ data += box.td(valign="top", align="center")
+ data += " </tr>\n"
+
+ return data
+
+ def phase2(self, request, sourceNames, timestamps, eventGrid,
+ sourceEvents):
+ data = ""
+ if not timestamps:
+ return data
+ # first pass: figure out the height of the chunks, populate grid
+ grid = []
+ for i in range(1+len(sourceNames)):
+ grid.append([])
+ # grid is a list of columns, one for the timestamps, and one per
+ # event source. Each column is exactly the same height. Each element
+ # of the list is a single <td> box.
+ lastDate = time.strftime("<b>%d %b %Y</b>",
+ time.localtime(util.now()))
+ for r in range(0, len(timestamps)):
+ chunkstrip = eventGrid[r]
+ # chunkstrip is a horizontal strip of event blocks. Each block
+ # is a vertical list of events, all for the same source.
+ assert(len(chunkstrip) == len(sourceNames))
+ maxRows = reduce(lambda x,y: max(x,y),
+ map(lambda x: len(x), chunkstrip))
+ for i in range(maxRows):
+ if i != maxRows-1:
+ grid[0].append(None)
+ else:
+ # timestamp goes at the bottom of the chunk
+ stuff = []
+ # add the date at the beginning (if it is not the same as
+ # today's date), and each time it changes
+ todayday = time.strftime("<b>%a</b>",
+ time.localtime(timestamps[r]))
+ today = time.strftime("<b>%d %b %Y</b>",
+ time.localtime(timestamps[r]))
+ if today != lastDate:
+ stuff.append(todayday)
+ stuff.append(today)
+ lastDate = today
+ stuff.append(
+ time.strftime("%H:%M:%S",
+ time.localtime(timestamps[r])))
+ grid[0].append(Box(text=stuff, class_="Time",
+ valign="bottom", align="center"))
+
+ # at this point the timestamp column has been populated with
+ # maxRows boxes, most None but the last one has the time string
+ for c in range(0, len(chunkstrip)):
+ block = chunkstrip[c]
+ assert(block != None) # should be [] instead
+ for i in range(maxRows - len(block)):
+ # fill top of chunk with blank space
+ grid[c+1].append(None)
+ for i in range(len(block)):
+ # so the events are bottom-justified
+ b = IBox(block[i]).getBox(request)
+ b.parms['valign'] = "top"
+ b.parms['align'] = "center"
+ grid[c+1].append(b)
+ # now all the other columns have maxRows new boxes too
+ # populate the last row, if empty
+ gridlen = len(grid[0])
+ for i in range(len(grid)):
+ strip = grid[i]
+ assert(len(strip) == gridlen)
+ if strip[-1] == None:
+ if sourceEvents[i-1]:
+ filler = IBox(sourceEvents[i-1]).getBox(request)
+ else:
+ # this can happen if you delete part of the build history
+ filler = Box(text=["?"], align="center")
+ strip[-1] = filler
+ strip[-1].parms['rowspan'] = 1
+ # second pass: bubble the events upwards to un-occupied locations
+ # Every square of the grid that has a None in it needs to have
+ # something else take its place.
+ noBubble = request.args.get("nobubble",['0'])
+ noBubble = int(noBubble[0])
+ if not noBubble:
+ for col in range(len(grid)):
+ strip = grid[col]
+ if col == 1: # changes are handled differently
+ for i in range(2, len(strip)+1):
+ # only merge empty boxes. Don't bubble commit boxes.
+ if strip[-i] == None:
+ next = strip[-i+1]
+ assert(next)
+ if next:
+ #if not next.event:
+ if next.spacer:
+ # bubble the empty box up
+ strip[-i] = next
+ strip[-i].parms['rowspan'] += 1
+ strip[-i+1] = None
+ else:
+ # we are above a commit box. Leave it
+ # be, and turn the current box into an
+ # empty one
+ strip[-i] = Box([], rowspan=1,
+ comment="commit bubble")
+ strip[-i].spacer = True
+ else:
+ # we are above another empty box, which
+ # somehow wasn't already converted.
+ # Shouldn't happen
+ pass
+ else:
+ for i in range(2, len(strip)+1):
+ # strip[-i] will go from next-to-last back to first
+ if strip[-i] == None:
+ # bubble previous item up
+ assert(strip[-i+1] != None)
+ strip[-i] = strip[-i+1]
+ strip[-i].parms['rowspan'] += 1
+ strip[-i+1] = None
+ else:
+ strip[-i].parms['rowspan'] = 1
+ # third pass: render the HTML table
+ for i in range(gridlen):
+ data += " <tr>\n";
+ for strip in grid:
+ b = strip[i]
+ if b:
+ data += b.td()
+ else:
+ if noBubble:
+ data += td([])
+ # Nones are left empty, rowspan should make it all fit
+ data += " </tr>\n"
+ return data
+
diff --git a/buildbot/buildbot/status/web/xmlrpc.py b/buildbot/buildbot/status/web/xmlrpc.py
new file mode 100644
index 0000000..234e7ff
--- /dev/null
+++ b/buildbot/buildbot/status/web/xmlrpc.py
@@ -0,0 +1,203 @@
+
+from twisted.python import log
+from twisted.web import xmlrpc
+from buildbot.status.builder import Results
+from itertools import count
+
+class XMLRPCServer(xmlrpc.XMLRPC):
+ def __init__(self):
+ xmlrpc.XMLRPC.__init__(self)
+
+ def render(self, req):
+ # extract the IStatus and IControl objects for later use, since they
+ # come from the request object. They'll be the same each time, but
+ # they aren't available until the first request arrives.
+ self.status = req.site.buildbot_service.getStatus()
+ self.control = req.site.buildbot_service.getControl()
+ return xmlrpc.XMLRPC.render(self, req)
+
+ def xmlrpc_getAllBuilders(self):
+ """Return a list of all builder names
+ """
+ log.msg("getAllBuilders")
+ return self.status.getBuilderNames()
+
+ def xmlrpc_getLastBuildResults(self, builder_name):
+ """Return the result of the last build for the given builder
+ """
+ builder = self.status.getBuilder(builder_name)
+ lastbuild = builder.getBuild(-1)
+ return Results[lastbuild.getResults()]
+
+ def xmlrpc_getLastBuilds(self, builder_name, num_builds):
+ """Return the last N completed builds for the given builder.
+ 'builder_name' is the name of the builder to query
+ 'num_builds' is the number of builds to return
+
+ Each build is returned in the same form as xmlrpc_getAllBuildsInInterval
+ """
+ log.msg("getLastBuilds: %s - %d" % (builder_name, num_builds))
+ builder = self.status.getBuilder(builder_name)
+ all_builds = []
+ for build_number in range(1, num_builds+1):
+ build = builder.getBuild(-build_number)
+ if not build:
+ break
+ if not build.isFinished():
+ continue
+ (build_start, build_end) = build.getTimes()
+
+ ss = build.getSourceStamp()
+ branch = ss.branch
+ if branch is None:
+ branch = ""
+ try:
+ revision = build.getProperty("got_revision")
+ except KeyError:
+ revision = ""
+ revision = str(revision)
+
+ answer = (builder_name,
+ build.getNumber(),
+ build_end,
+ branch,
+ revision,
+ Results[build.getResults()],
+ build.getText(),
+ )
+ all_builds.append((build_end, answer))
+
+ # now we've gotten all the builds we're interested in. Sort them by
+ # end time.
+ all_builds.sort(lambda a,b: cmp(a[0], b[0]))
+ # and remove the timestamps
+ all_builds = [t[1] for t in all_builds]
+
+ log.msg("ready to go: %s" % (all_builds,))
+
+ return all_builds
+
+
+ def xmlrpc_getAllBuildsInInterval(self, start, stop):
+ """Return a list of builds that have completed after the 'start'
+ timestamp and before the 'stop' timestamp. This looks at all
+ Builders.
+
+ The timestamps are integers, interpreted as standard unix timestamps
+ (seconds since epoch).
+
+ Each Build is returned as a tuple in the form::
+ (buildername, buildnumber, build_end, branchname, revision,
+ results, text)
+
+ The buildnumber is an integer. 'build_end' is an integer (seconds
+ since epoch) specifying when the build finished.
+
+ The branchname is a string, which may be an empty string to indicate
+ None (i.e. the default branch). The revision is a string whose
+ meaning is specific to the VC system in use, and comes from the
+ 'got_revision' build property. The results are expressed as a string,
+ one of ('success', 'warnings', 'failure', 'exception'). The text is a
+ list of short strings that ought to be joined by spaces and include
+ slightly more data about the results of the build.
+ """
+ #log.msg("start: %s %s %s" % (start, type(start), start.__class__))
+ log.msg("getAllBuildsInInterval: %d - %d" % (start, stop))
+ all_builds = []
+
+ for builder_name in self.status.getBuilderNames():
+ builder = self.status.getBuilder(builder_name)
+ for build_number in count(1):
+ build = builder.getBuild(-build_number)
+ if not build:
+ break
+ if not build.isFinished():
+ continue
+ (build_start, build_end) = build.getTimes()
+ # in reality, builds are mostly ordered by start time. For
+ # the purposes of this method, we pretend that they are
+ # strictly ordered by end time, so that we can stop searching
+ # when we start seeing builds that are outside the window.
+ if build_end > stop:
+ continue # keep looking
+ if build_end < start:
+ break # stop looking
+
+ ss = build.getSourceStamp()
+ branch = ss.branch
+ if branch is None:
+ branch = ""
+ try:
+ revision = build.getProperty("got_revision")
+ except KeyError:
+ revision = ""
+ revision = str(revision)
+
+ answer = (builder_name,
+ build.getNumber(),
+ build_end,
+ branch,
+ revision,
+ Results[build.getResults()],
+ build.getText(),
+ )
+ all_builds.append((build_end, answer))
+ # we've gotten all the builds that we care about from this
+ # particular builder, so now we can continue on the next builder
+
+ # now we've gotten all the builds we're interested in. Sort them by
+ # end time.
+ all_builds.sort(lambda a,b: cmp(a[0], b[0]))
+ # and remove the timestamps
+ all_builds = [t[1] for t in all_builds]
+
+ log.msg("ready to go: %s" % (all_builds,))
+
+ return all_builds
+
+ def xmlrpc_getBuild(self, builder_name, build_number):
+ """Return information about a specific build.
+
+ """
+ builder = self.status.getBuilder(builder_name)
+ build = builder.getBuild(build_number)
+ info = {}
+ info['builder_name'] = builder.getName()
+ info['url'] = self.status.getURLForThing(build) or ''
+ info['reason'] = build.getReason()
+ info['slavename'] = build.getSlavename()
+ info['results'] = build.getResults()
+ info['text'] = build.getText()
+ # Added to help out requests for build -N
+ info['number'] = build.number
+ ss = build.getSourceStamp()
+ branch = ss.branch
+ if branch is None:
+ branch = ""
+ info['branch'] = str(branch)
+ try:
+ revision = str(build.getProperty("got_revision"))
+ except KeyError:
+ revision = ""
+ info['revision'] = str(revision)
+ info['start'], info['end'] = build.getTimes()
+
+ info_steps = []
+ for s in build.getSteps():
+ stepinfo = {}
+ stepinfo['name'] = s.getName()
+ stepinfo['start'], stepinfo['end'] = s.getTimes()
+ stepinfo['results'] = s.getResults()
+ info_steps.append(stepinfo)
+ info['steps'] = info_steps
+
+ info_logs = []
+ for l in build.getLogs():
+ loginfo = {}
+ loginfo['name'] = l.getStep().getName() + "/" + l.getName()
+ #loginfo['text'] = l.getText()
+ loginfo['text'] = "HUGE"
+ info_logs.append(loginfo)
+ info['logs'] = info_logs
+ return info
+
diff --git a/buildbot/buildbot/status/words.py b/buildbot/buildbot/status/words.py
new file mode 100644
index 0000000..0e98651
--- /dev/null
+++ b/buildbot/buildbot/status/words.py
@@ -0,0 +1,875 @@
+
+# code to deliver build status through twisted.words (instant messaging
+# protocols: irc, etc)
+
+import re, shlex
+
+from zope.interface import Interface, implements
+from twisted.internet import protocol, reactor
+from twisted.words.protocols import irc
+from twisted.python import log, failure
+from twisted.application import internet
+
+from buildbot import interfaces, util
+from buildbot import version
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.status import base
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION
+from buildbot.scripts.runner import ForceOptions
+
+from string import join, capitalize, lower
+
+class UsageError(ValueError):
+ def __init__(self, string = "Invalid usage", *more):
+ ValueError.__init__(self, string, *more)
+
+class IrcBuildRequest:
+ hasStarted = False
+ timer = None
+
+ def __init__(self, parent):
+ self.parent = parent
+ self.timer = reactor.callLater(5, self.soon)
+
+ def soon(self):
+ del self.timer
+ if not self.hasStarted:
+ self.parent.send("The build has been queued, I'll give a shout"
+ " when it starts")
+
+ def started(self, c):
+ self.hasStarted = True
+ if self.timer:
+ self.timer.cancel()
+ del self.timer
+ s = c.getStatus()
+ eta = s.getETA()
+ response = "build #%d forced" % s.getNumber()
+ if eta is not None:
+ response = "build forced [ETA %s]" % self.parent.convertTime(eta)
+ self.parent.send(response)
+ self.parent.send("I'll give a shout when the build finishes")
+ d = s.waitUntilFinished()
+ d.addCallback(self.parent.watchedBuildFinished)
+
+
+class Contact:
+ """I hold the state for a single user's interaction with the buildbot.
+
+ This base class provides all the basic behavior (the queries and
+ responses). Subclasses for each channel type (IRC, different IM
+ protocols) are expected to provide the lower-level send/receive methods.
+
+ There will be one instance of me for each user who interacts personally
+ with the buildbot. There will be an additional instance for each
+ 'broadcast contact' (chat rooms, IRC channels as a whole).
+ """
+
+ def __init__(self, channel):
+ self.channel = channel
+ self.notify_events = {}
+ self.subscribed = 0
+ self.add_notification_events(channel.notify_events)
+
+ silly = {
+ "What happen ?": "Somebody set up us the bomb.",
+ "It's You !!": ["How are you gentlemen !!",
+ "All your base are belong to us.",
+ "You are on the way to destruction."],
+ "What you say !!": ["You have no chance to survive make your time.",
+ "HA HA HA HA ...."],
+ }
+
+ def getCommandMethod(self, command):
+ meth = getattr(self, 'command_' + command.upper(), None)
+ return meth
+
+ def getBuilder(self, which):
+ try:
+ b = self.channel.status.getBuilder(which)
+ except KeyError:
+ raise UsageError, "no such builder '%s'" % which
+ return b
+
+ def getControl(self, which):
+ if not self.channel.control:
+ raise UsageError("builder control is not enabled")
+ try:
+ bc = self.channel.control.getBuilder(which)
+ except KeyError:
+ raise UsageError("no such builder '%s'" % which)
+ return bc
+
+ def getAllBuilders(self):
+ """
+ @rtype: list of L{buildbot.process.builder.Builder}
+ """
+ names = self.channel.status.getBuilderNames(categories=self.channel.categories)
+ names.sort()
+ builders = [self.channel.status.getBuilder(n) for n in names]
+ return builders
+
+ def convertTime(self, seconds):
+ if seconds < 60:
+ return "%d seconds" % seconds
+ minutes = int(seconds / 60)
+ seconds = seconds - 60*minutes
+ if minutes < 60:
+ return "%dm%02ds" % (minutes, seconds)
+ hours = int(minutes / 60)
+ minutes = minutes - 60*hours
+ return "%dh%02dm%02ds" % (hours, minutes, seconds)
+
+ def doSilly(self, message):
+ response = self.silly[message]
+ if type(response) != type([]):
+ response = [response]
+ when = 0.5
+ for r in response:
+ reactor.callLater(when, self.send, r)
+ when += 2.5
+
+ def command_HELLO(self, args, who):
+ self.send("yes?")
+
+ def command_VERSION(self, args, who):
+ self.send("buildbot-%s at your service" % version)
+
+ def command_LIST(self, args, who):
+ args = args.split()
+ if len(args) == 0:
+ raise UsageError, "try 'list builders'"
+ if args[0] == 'builders':
+ builders = self.getAllBuilders()
+ str = "Configured builders: "
+ for b in builders:
+ str += b.name
+ state = b.getState()[0]
+ if state == 'offline':
+ str += "[offline]"
+ str += " "
+ str.rstrip()
+ self.send(str)
+ return
+ command_LIST.usage = "list builders - List configured builders"
+
+ def command_STATUS(self, args, who):
+ args = args.split()
+ if len(args) == 0:
+ which = "all"
+ elif len(args) == 1:
+ which = args[0]
+ else:
+ raise UsageError, "try 'status <builder>'"
+ if which == "all":
+ builders = self.getAllBuilders()
+ for b in builders:
+ self.emit_status(b.name)
+ return
+ self.emit_status(which)
+ command_STATUS.usage = "status [<which>] - List status of a builder (or all builders)"
+
+ def validate_notification_event(self, event):
+ if not re.compile("^(started|finished|success|failure|exception|warnings|(success|warnings|exception|failure)To(Failure|Success|Warnings|Exception))$").match(event):
+ raise UsageError("try 'notify on|off <EVENT>'")
+
+ def list_notified_events(self):
+ self.send( "The following events are being notified: %r" % self.notify_events.keys() )
+
+ def notify_for(self, *events):
+ for event in events:
+ if self.notify_events.has_key(event):
+ return 1
+ return 0
+
+ def subscribe_to_build_events(self):
+ self.channel.status.subscribe(self)
+ self.subscribed = 1
+
+ def unsubscribe_from_build_events(self):
+ self.channel.status.unsubscribe(self)
+ self.subscribed = 0
+
+ def add_notification_events(self, events):
+ for event in events:
+ self.validate_notification_event(event)
+ self.notify_events[event] = 1
+
+ if not self.subscribed:
+ self.subscribe_to_build_events()
+
+ def remove_notification_events(self, events):
+ for event in events:
+ self.validate_notification_event(event)
+ del self.notify_events[event]
+
+ if len(self.notify_events) == 0 and self.subscribed:
+ self.unsubscribe_from_build_events()
+
+ def remove_all_notification_events(self):
+ self.notify_events = {}
+
+ if self.subscribed:
+ self.unsubscribe_from_build_events()
+
+ def command_NOTIFY(self, args, who):
+ args = args.split()
+
+ if not args:
+ raise UsageError("try 'notify on|off|list <EVENT>'")
+ action = args.pop(0)
+ events = args
+
+ if action == "on":
+ if not events: events = ('started','finished')
+ self.add_notification_events(events)
+
+ self.list_notified_events()
+
+ elif action == "off":
+ if events:
+ self.remove_notification_events(events)
+ else:
+ self.remove_all_notification_events()
+
+ self.list_notified_events()
+
+ elif action == "list":
+ self.list_notified_events()
+ return
+
+ else:
+ raise UsageError("try 'notify on|off <EVENT>'")
+
+ command_NOTIFY.usage = "notify on|off|list [<EVENT>] ... - Notify me about build events. event should be one or more of: 'started', 'finished', 'failure', 'success', 'exception' or 'xToY' (where x and Y are one of success, warnings, failure, exception, but Y is capitalized)"
+
+ def command_WATCH(self, args, who):
+ args = args.split()
+ if len(args) != 1:
+ raise UsageError("try 'watch <builder>'")
+ which = args[0]
+ b = self.getBuilder(which)
+ builds = b.getCurrentBuilds()
+ if not builds:
+ self.send("there are no builds currently running")
+ return
+ for build in builds:
+ assert not build.isFinished()
+ d = build.waitUntilFinished()
+ d.addCallback(self.watchedBuildFinished)
+ r = "watching build %s #%d until it finishes" \
+ % (which, build.getNumber())
+ eta = build.getETA()
+ if eta is not None:
+ r += " [%s]" % self.convertTime(eta)
+ r += ".."
+ self.send(r)
+ command_WATCH.usage = "watch <which> - announce the completion of an active build"
+
+ def buildsetSubmitted(self, buildset):
+ log.msg('[Contact] Buildset %s added' % (buildset))
+
+ def builderAdded(self, builderName, builder):
+ log.msg('[Contact] Builder %s added' % (builder))
+ builder.subscribe(self)
+
+ def builderChangedState(self, builderName, state):
+ log.msg('[Contact] Builder %s changed state to %s' % (builderName, state))
+
+ def requestSubmitted(self, brstatus):
+ log.msg('[Contact] BuildRequest for %s submiitted to Builder %s' %
+ (brstatus.getSourceStamp(), brstatus.builderName))
+
+ def builderRemoved(self, builderName):
+ log.msg('[Contact] Builder %s removed' % (builderName))
+
+ def buildStarted(self, builderName, build):
+ builder = build.getBuilder()
+ log.msg('[Contact] Builder %r in category %s started' % (builder, builder.category))
+
+ # only notify about builders we are interested in
+
+ if (self.channel.categories != None and
+ builder.category not in self.channel.categories):
+ log.msg('Not notifying for a build in the wrong category')
+ return
+
+ if not self.notify_for('started'):
+ log.msg('Not notifying for a build when started-notification disabled')
+ return
+
+ r = "build #%d of %s started" % \
+ (build.getNumber(),
+ builder.getName())
+
+ r += " including [" + ", ".join(map(lambda c: repr(c.revision), build.getChanges())) + "]"
+
+ self.send(r)
+
+ def buildFinished(self, builderName, build, results):
+ builder = build.getBuilder()
+
+ results_descriptions = {
+ SUCCESS: "Success",
+ WARNINGS: "Warnings",
+ FAILURE: "Failure",
+ EXCEPTION: "Exception",
+ }
+
+ # only notify about builders we are interested in
+ log.msg('[Contact] builder %r in category %s finished' % (builder, builder.category))
+
+ if self.notify_for('started'):
+ return
+
+ if (self.channel.categories != None and
+ builder.category not in self.channel.categories):
+ return
+
+ results = build.getResults()
+
+ r = "build #%d of %s is complete: %s" % \
+ (build.getNumber(),
+ builder.getName(),
+ results_descriptions.get(results, "??"))
+ r += " [%s]" % " ".join(build.getText())
+ buildurl = self.channel.status.getURLForThing(build)
+ if buildurl:
+ r += " Build details are at %s" % buildurl
+
+ if self.notify_for('finished') or self.notify_for(lower(results_descriptions.get(results))):
+ self.send(r)
+ return
+
+ prevBuild = build.getPreviousBuild()
+ if prevBuild:
+ prevResult = prevBuild.getResults()
+
+ required_notification_control_string = join((lower(results_descriptions.get(prevResult)), \
+ 'To', \
+ capitalize(results_descriptions.get(results))), \
+ '')
+
+ if (self.notify_for(required_notification_control_string)):
+ self.send(r)
+
+ def watchedBuildFinished(self, b):
+ results = {SUCCESS: "Success",
+ WARNINGS: "Warnings",
+ FAILURE: "Failure",
+ EXCEPTION: "Exception",
+ }
+
+ # only notify about builders we are interested in
+ builder = b.getBuilder()
+ log.msg('builder %r in category %s finished' % (builder,
+ builder.category))
+ if (self.channel.categories != None and
+ builder.category not in self.channel.categories):
+ return
+
+ r = "Hey! build %s #%d is complete: %s" % \
+ (b.getBuilder().getName(),
+ b.getNumber(),
+ results.get(b.getResults(), "??"))
+ r += " [%s]" % " ".join(b.getText())
+ self.send(r)
+ buildurl = self.channel.status.getURLForThing(b)
+ if buildurl:
+ self.send("Build details are at %s" % buildurl)
+
+ def command_FORCE(self, args, who):
+ args = shlex.split(args) # TODO: this requires python2.3 or newer
+ if not args:
+ raise UsageError("try 'force build WHICH <REASON>'")
+ what = args.pop(0)
+ if what != "build":
+ raise UsageError("try 'force build WHICH <REASON>'")
+ opts = ForceOptions()
+ opts.parseOptions(args)
+
+ which = opts['builder']
+ branch = opts['branch']
+ revision = opts['revision']
+ reason = opts['reason']
+
+ if which is None:
+ raise UsageError("you must provide a Builder, "
+ "try 'force build WHICH <REASON>'")
+
+ # keep weird stuff out of the branch and revision strings. TODO:
+ # centralize this somewhere.
+ if branch and not re.match(r'^[\w\.\-\/]*$', branch):
+ log.msg("bad branch '%s'" % branch)
+ self.send("sorry, bad branch '%s'" % branch)
+ return
+ if revision and not re.match(r'^[\w\.\-\/]*$', revision):
+ log.msg("bad revision '%s'" % revision)
+ self.send("sorry, bad revision '%s'" % revision)
+ return
+
+ bc = self.getControl(which)
+
+ r = "forced: by %s: %s" % (self.describeUser(who), reason)
+ # TODO: maybe give certain users the ability to request builds of
+ # certain branches
+ s = SourceStamp(branch=branch, revision=revision)
+ req = BuildRequest(r, s, which)
+ try:
+ bc.requestBuildSoon(req)
+ except interfaces.NoSlaveError:
+ self.send("sorry, I can't force a build: all slaves are offline")
+ return
+ ireq = IrcBuildRequest(self)
+ req.subscribe(ireq.started)
+
+
+ command_FORCE.usage = "force build <which> <reason> - Force a build"
+
+ def command_STOP(self, args, who):
+ args = args.split(None, 2)
+ if len(args) < 3 or args[0] != 'build':
+ raise UsageError, "try 'stop build WHICH <REASON>'"
+ which = args[1]
+ reason = args[2]
+
+ buildercontrol = self.getControl(which)
+
+ r = "stopped: by %s: %s" % (self.describeUser(who), reason)
+
+ # find an in-progress build
+ builderstatus = self.getBuilder(which)
+ builds = builderstatus.getCurrentBuilds()
+ if not builds:
+ self.send("sorry, no build is currently running")
+ return
+ for build in builds:
+ num = build.getNumber()
+
+ # obtain the BuildControl object
+ buildcontrol = buildercontrol.getBuild(num)
+
+ # make it stop
+ buildcontrol.stopBuild(r)
+
+ self.send("build %d interrupted" % num)
+
+ command_STOP.usage = "stop build <which> <reason> - Stop a running build"
+
+ def emit_status(self, which):
+ b = self.getBuilder(which)
+ str = "%s: " % which
+ state, builds = b.getState()
+ str += state
+ if state == "idle":
+ last = b.getLastFinishedBuild()
+ if last:
+ start,finished = last.getTimes()
+ str += ", last build %s ago: %s" % \
+ (self.convertTime(int(util.now() - finished)), " ".join(last.getText()))
+ if state == "building":
+ t = []
+ for build in builds:
+ step = build.getCurrentStep()
+ if step:
+ s = "(%s)" % " ".join(step.getText())
+ else:
+ s = "(no current step)"
+ ETA = build.getETA()
+ if ETA is not None:
+ s += " [ETA %s]" % self.convertTime(ETA)
+ t.append(s)
+ str += ", ".join(t)
+ self.send(str)
+
+ def emit_last(self, which):
+ last = self.getBuilder(which).getLastFinishedBuild()
+ if not last:
+ str = "(no builds run since last restart)"
+ else:
+ start,finish = last.getTimes()
+ str = "%s ago: " % (self.convertTime(int(util.now() - finish)))
+ str += " ".join(last.getText())
+ self.send("last build [%s]: %s" % (which, str))
+
+ def command_LAST(self, args, who):
+ args = args.split()
+ if len(args) == 0:
+ which = "all"
+ elif len(args) == 1:
+ which = args[0]
+ else:
+ raise UsageError, "try 'last <builder>'"
+ if which == "all":
+ builders = self.getAllBuilders()
+ for b in builders:
+ self.emit_last(b.name)
+ return
+ self.emit_last(which)
+ command_LAST.usage = "last <which> - list last build status for builder <which>"
+
+ def build_commands(self):
+ commands = []
+ for k in dir(self):
+ if k.startswith('command_'):
+ commands.append(k[8:].lower())
+ commands.sort()
+ return commands
+
+ def command_HELP(self, args, who):
+ args = args.split()
+ if len(args) == 0:
+ self.send("Get help on what? (try 'help <foo>', or 'commands' for a command list)")
+ return
+ command = args[0]
+ meth = self.getCommandMethod(command)
+ if not meth:
+ raise UsageError, "no such command '%s'" % command
+ usage = getattr(meth, 'usage', None)
+ if usage:
+ self.send("Usage: %s" % usage)
+ else:
+ self.send("No usage info for '%s'" % command)
+ command_HELP.usage = "help <command> - Give help for <command>"
+
+ def command_SOURCE(self, args, who):
+ banner = "My source can be found at http://buildbot.net/"
+ self.send(banner)
+
+ def command_COMMANDS(self, args, who):
+ commands = self.build_commands()
+ str = "buildbot commands: " + ", ".join(commands)
+ self.send(str)
+ command_COMMANDS.usage = "commands - List available commands"
+
+ def command_DESTROY(self, args, who):
+ self.act("readies phasers")
+
+ def command_DANCE(self, args, who):
+ reactor.callLater(1.0, self.send, "0-<")
+ reactor.callLater(3.0, self.send, "0-/")
+ reactor.callLater(3.5, self.send, "0-\\")
+
+ def command_EXCITED(self, args, who):
+ # like 'buildbot: destroy the sun!'
+ self.send("What you say!")
+
+ def handleAction(self, data, user):
+ # this is sent when somebody performs an action that mentions the
+ # buildbot (like '/me kicks buildbot'). 'user' is the name/nick/id of
+ # the person who performed the action, so if their action provokes a
+ # response, they can be named.
+ if not data.endswith("s buildbot"):
+ return
+ words = data.split()
+ verb = words[-2]
+ timeout = 4
+ if verb == "kicks":
+ response = "%s back" % verb
+ timeout = 1
+ else:
+ response = "%s %s too" % (verb, user)
+ reactor.callLater(timeout, self.act, response)
+
+class IRCContact(Contact):
+ # this is the IRC-specific subclass of Contact
+
+ def __init__(self, channel, dest):
+ Contact.__init__(self, channel)
+ # when people send us public messages ("buildbot: command"),
+ # self.dest is the name of the channel ("#twisted"). When they send
+ # us private messages (/msg buildbot command), self.dest is their
+ # username.
+ self.dest = dest
+
+ def describeUser(self, user):
+ if self.dest[0] == "#":
+ return "IRC user <%s> on channel %s" % (user, self.dest)
+ return "IRC user <%s> (privmsg)" % user
+
+ # userJoined(self, user, channel)
+
+ def send(self, message):
+ self.channel.msg(self.dest, message.encode("ascii", "replace"))
+ def act(self, action):
+ self.channel.me(self.dest, action.encode("ascii", "replace"))
+
+ def command_JOIN(self, args, who):
+ args = args.split()
+ to_join = args[0]
+ self.channel.join(to_join)
+ self.send("Joined %s" % to_join)
+ command_JOIN.usage = "join channel - Join another channel"
+
+ def command_LEAVE(self, args, who):
+ args = args.split()
+ to_leave = args[0]
+ self.send("Buildbot has been told to leave %s" % to_leave)
+ self.channel.part(to_leave)
+ command_LEAVE.usage = "leave channel - Leave a channel"
+
+
+ def handleMessage(self, message, who):
+ # a message has arrived from 'who'. For broadcast contacts (i.e. when
+ # people do an irc 'buildbot: command'), this will be a string
+ # describing the sender of the message in some useful-to-log way, and
+ # a single Contact may see messages from a variety of users. For
+ # unicast contacts (i.e. when people do an irc '/msg buildbot
+ # command'), a single Contact will only ever see messages from a
+ # single user.
+ message = message.lstrip()
+ if self.silly.has_key(message):
+ return self.doSilly(message)
+
+ parts = message.split(' ', 1)
+ if len(parts) == 1:
+ parts = parts + ['']
+ cmd, args = parts
+ log.msg("irc command", cmd)
+
+ meth = self.getCommandMethod(cmd)
+ if not meth and message[-1] == '!':
+ meth = self.command_EXCITED
+
+ error = None
+ try:
+ if meth:
+ meth(args.strip(), who)
+ except UsageError, e:
+ self.send(str(e))
+ except:
+ f = failure.Failure()
+ log.err(f)
+ error = "Something bad happened (see logs): %s" % f.type
+
+ if error:
+ try:
+ self.send(error)
+ except:
+ log.err()
+
+ #self.say(channel, "count %d" % self.counter)
+ self.channel.counter += 1
+
+class IChannel(Interface):
+ """I represent the buildbot's presence in a particular IM scheme.
+
+ This provides the connection to the IRC server, or represents the
+ buildbot's account with an IM service. Each Channel will have zero or
+ more Contacts associated with it.
+ """
+
+class IrcStatusBot(irc.IRCClient):
+ """I represent the buildbot to an IRC server.
+ """
+ implements(IChannel)
+
+ def __init__(self, nickname, password, channels, status, categories, notify_events):
+ """
+ @type nickname: string
+ @param nickname: the nickname by which this bot should be known
+ @type password: string
+ @param password: the password to use for identifying with Nickserv
+ @type channels: list of strings
+ @param channels: the bot will maintain a presence in these channels
+ @type status: L{buildbot.status.builder.Status}
+ @param status: the build master's Status object, through which the
+ bot retrieves all status information
+ """
+ self.nickname = nickname
+ self.channels = channels
+ self.password = password
+ self.status = status
+ self.categories = categories
+ self.notify_events = notify_events
+ self.counter = 0
+ self.hasQuit = 0
+ self.contacts = {}
+
+ def addContact(self, name, contact):
+ self.contacts[name] = contact
+
+ def getContact(self, name):
+ if name in self.contacts:
+ return self.contacts[name]
+ new_contact = IRCContact(self, name)
+ self.contacts[name] = new_contact
+ return new_contact
+
+ def deleteContact(self, contact):
+ name = contact.getName()
+ if name in self.contacts:
+ assert self.contacts[name] == contact
+ del self.contacts[name]
+
+ def log(self, msg):
+ log.msg("%s: %s" % (self, msg))
+
+
+ # the following irc.IRCClient methods are called when we have input
+
+ def privmsg(self, user, channel, message):
+ user = user.split('!', 1)[0] # rest is ~user@hostname
+ # channel is '#twisted' or 'buildbot' (for private messages)
+ channel = channel.lower()
+ #print "privmsg:", user, channel, message
+ if channel == self.nickname:
+ # private message
+ contact = self.getContact(user)
+ contact.handleMessage(message, user)
+ return
+ # else it's a broadcast message, maybe for us, maybe not. 'channel'
+ # is '#twisted' or the like.
+ contact = self.getContact(channel)
+ if message.startswith("%s:" % self.nickname) or message.startswith("%s," % self.nickname):
+ message = message[len("%s:" % self.nickname):]
+ contact.handleMessage(message, user)
+ # to track users comings and goings, add code here
+
+ def action(self, user, channel, data):
+ #log.msg("action: %s,%s,%s" % (user, channel, data))
+ user = user.split('!', 1)[0] # rest is ~user@hostname
+ # somebody did an action (/me actions) in the broadcast channel
+ contact = self.getContact(channel)
+ if "buildbot" in data:
+ contact.handleAction(data, user)
+
+
+
+ def signedOn(self):
+ if self.password:
+ self.msg("Nickserv", "IDENTIFY " + self.password)
+ for c in self.channels:
+ self.join(c)
+
+ def joined(self, channel):
+ self.log("I have joined %s" % (channel,))
+ def left(self, channel):
+ self.log("I have left %s" % (channel,))
+ def kickedFrom(self, channel, kicker, message):
+ self.log("I have been kicked from %s by %s: %s" % (channel,
+ kicker,
+ message))
+
+ # we can using the following irc.IRCClient methods to send output. Most
+ # of these are used by the IRCContact class.
+ #
+ # self.say(channel, message) # broadcast
+ # self.msg(user, message) # unicast
+ # self.me(channel, action) # send action
+ # self.away(message='')
+ # self.quit(message='')
+
+class ThrottledClientFactory(protocol.ClientFactory):
+ lostDelay = 2
+ failedDelay = 60
+ def clientConnectionLost(self, connector, reason):
+ reactor.callLater(self.lostDelay, connector.connect)
+ def clientConnectionFailed(self, connector, reason):
+ reactor.callLater(self.failedDelay, connector.connect)
+
+class IrcStatusFactory(ThrottledClientFactory):
+ protocol = IrcStatusBot
+
+ status = None
+ control = None
+ shuttingDown = False
+ p = None
+
+ def __init__(self, nickname, password, channels, categories, notify_events):
+ #ThrottledClientFactory.__init__(self) # doesn't exist
+ self.status = None
+ self.nickname = nickname
+ self.password = password
+ self.channels = channels
+ self.categories = categories
+ self.notify_events = notify_events
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ del d['p']
+ return d
+
+ def shutdown(self):
+ self.shuttingDown = True
+ if self.p:
+ self.p.quit("buildmaster reconfigured: bot disconnecting")
+
+ def buildProtocol(self, address):
+ p = self.protocol(self.nickname, self.password,
+ self.channels, self.status,
+ self.categories, self.notify_events)
+ p.factory = self
+ p.status = self.status
+ p.control = self.control
+ self.p = p
+ return p
+
+ # TODO: I think a shutdown that occurs while the connection is being
+ # established will make this explode
+
+ def clientConnectionLost(self, connector, reason):
+ if self.shuttingDown:
+ log.msg("not scheduling reconnection attempt")
+ return
+ ThrottledClientFactory.clientConnectionLost(self, connector, reason)
+
+ def clientConnectionFailed(self, connector, reason):
+ if self.shuttingDown:
+ log.msg("not scheduling reconnection attempt")
+ return
+ ThrottledClientFactory.clientConnectionFailed(self, connector, reason)
+
+
+class IRC(base.StatusReceiverMultiService):
+ """I am an IRC bot which can be queried for status information. I
+ connect to a single IRC server and am known by a single nickname on that
+ server, however I can join multiple channels."""
+
+ compare_attrs = ["host", "port", "nick", "password",
+ "channels", "allowForce",
+ "categories"]
+
+ def __init__(self, host, nick, channels, port=6667, allowForce=True,
+ categories=None, password=None, notify_events={}):
+ base.StatusReceiverMultiService.__init__(self)
+
+ assert allowForce in (True, False) # TODO: implement others
+
+ # need to stash these so we can detect changes later
+ self.host = host
+ self.port = port
+ self.nick = nick
+ self.channels = channels
+ self.password = password
+ self.allowForce = allowForce
+ self.categories = categories
+ self.notify_events = notify_events
+
+ # need to stash the factory so we can give it the status object
+ self.f = IrcStatusFactory(self.nick, self.password,
+ self.channels, self.categories, self.notify_events)
+
+ c = internet.TCPClient(host, port, self.f)
+ c.setServiceParent(self)
+
+ def setServiceParent(self, parent):
+ base.StatusReceiverMultiService.setServiceParent(self, parent)
+ self.f.status = parent.getStatus()
+ if self.allowForce:
+ self.f.control = interfaces.IControl(parent)
+
+ def stopService(self):
+ # make sure the factory will stop reconnecting
+ self.f.shutdown()
+ return base.StatusReceiverMultiService.stopService(self)
+
+
+## buildbot: list builders
+# buildbot: watch quick
+# print notification when current build in 'quick' finishes
+## buildbot: status
+## buildbot: status full-2.3
+## building, not, % complete, ETA
+## buildbot: force build full-2.3 "reason"
diff --git a/buildbot/buildbot/steps/__init__.py b/buildbot/buildbot/steps/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildbot/buildbot/steps/__init__.py
diff --git a/buildbot/buildbot/steps/dummy.py b/buildbot/buildbot/steps/dummy.py
new file mode 100644
index 0000000..9ddfdce
--- /dev/null
+++ b/buildbot/buildbot/steps/dummy.py
@@ -0,0 +1,100 @@
+
+from twisted.internet import reactor
+from buildbot.process.buildstep import BuildStep, LoggingBuildStep
+from buildbot.process.buildstep import LoggedRemoteCommand
+from buildbot.status.builder import SUCCESS, FAILURE
+
+# these classes are used internally by buildbot unit tests
+
+class Dummy(BuildStep):
+ """I am a dummy no-op step, which runs entirely on the master, and simply
+ waits 5 seconds before finishing with SUCCESS
+ """
+
+ haltOnFailure = True
+ flunkOnFailure = True
+ name = "dummy"
+
+ def __init__(self, timeout=5, **kwargs):
+ """
+ @type timeout: int
+ @param timeout: the number of seconds to delay before completing
+ """
+ BuildStep.__init__(self, **kwargs)
+ self.addFactoryArguments(timeout=timeout)
+ self.timeout = timeout
+ self.timer = None
+
+ def start(self):
+ self.step_status.setText(["delay", "%s secs" % self.timeout])
+ self.timer = reactor.callLater(self.timeout, self.done)
+
+ def interrupt(self, reason):
+ if self.timer:
+ self.timer.cancel()
+ self.timer = None
+ self.step_status.setText(["delay", "interrupted"])
+ self.finished(FAILURE)
+
+ def done(self):
+ self.finished(SUCCESS)
+
+class FailingDummy(Dummy):
+ """I am a dummy no-op step that 'runs' master-side and finishes (with a
+ FAILURE status) after 5 seconds."""
+
+ name = "failing dummy"
+
+ def start(self):
+ self.step_status.setText(["boom", "%s secs" % self.timeout])
+ self.timer = reactor.callLater(self.timeout, self.done)
+
+ def done(self):
+ self.finished(FAILURE)
+
+class RemoteDummy(LoggingBuildStep):
+ """I am a dummy no-op step that runs on the remote side and
+ simply waits 5 seconds before completing with success.
+ See L{buildbot.slave.commands.DummyCommand}
+ """
+
+ haltOnFailure = True
+ flunkOnFailure = True
+ name = "remote dummy"
+
+ def __init__(self, timeout=5, **kwargs):
+ """
+ @type timeout: int
+ @param timeout: the number of seconds to delay
+ """
+ LoggingBuildStep.__init__(self, **kwargs)
+ self.addFactoryArguments(timeout=timeout)
+ self.timeout = timeout
+ self.description = ["remote", "delay", "%s secs" % timeout]
+
+ def describe(self, done=False):
+ return self.description
+
+ def start(self):
+ args = {'timeout': self.timeout}
+ cmd = LoggedRemoteCommand("dummy", args)
+ self.startCommand(cmd)
+
+class Wait(LoggingBuildStep):
+ """I start a command on the slave that waits for the unit test to
+ tell it when to finish.
+ """
+
+ name = "wait"
+ def __init__(self, handle, **kwargs):
+ LoggingBuildStep.__init__(self, **kwargs)
+ self.addFactoryArguments(handle=handle)
+ self.handle = handle
+
+ def describe(self, done=False):
+ return ["wait: %s" % self.handle]
+
+ def start(self):
+ args = {'handle': (self.handle, self.build.reason)}
+ cmd = LoggedRemoteCommand("dummy.wait", args)
+ self.startCommand(cmd)
diff --git a/buildbot/buildbot/steps/master.py b/buildbot/buildbot/steps/master.py
new file mode 100644
index 0000000..da8a664
--- /dev/null
+++ b/buildbot/buildbot/steps/master.py
@@ -0,0 +1,76 @@
+import os, types
+from twisted.python import log, failure, runtime
+from twisted.internet import reactor, defer, task
+from buildbot.process.buildstep import RemoteCommand, BuildStep
+from buildbot.process.buildstep import SUCCESS, FAILURE
+from twisted.internet.protocol import ProcessProtocol
+
+class MasterShellCommand(BuildStep):
+ """
+ Run a shell command locally - on the buildmaster. The shell command
+ COMMAND is specified just as for a RemoteShellCommand. Note that extra
+ logfiles are not sopported.
+ """
+ name='MasterShellCommand'
+ description='Running'
+ descriptionDone='Ran'
+
+ def __init__(self, command, **kwargs):
+ BuildStep.__init__(self, **kwargs)
+ self.addFactoryArguments(command=command)
+ self.command=command
+
+ class LocalPP(ProcessProtocol):
+ def __init__(self, step):
+ self.step = step
+
+ def outReceived(self, data):
+ self.step.stdio_log.addStdout(data)
+
+ def errReceived(self, data):
+ self.step.stdio_log.addStderr(data)
+
+ def processEnded(self, status_object):
+ self.step.stdio_log.addHeader("exit status %d\n" % status_object.value.exitCode)
+ self.step.processEnded(status_object)
+
+ def start(self):
+ # set up argv
+ if type(self.command) in types.StringTypes:
+ if runtime.platformType == 'win32':
+ argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
+ if '/c' not in argv: argv += ['/c']
+ argv += [self.command]
+ else:
+ # for posix, use /bin/sh. for other non-posix, well, doesn't
+ # hurt to try
+ argv = ['/bin/sh', '-c', self.command]
+ else:
+ if runtime.platformType == 'win32':
+ argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
+ if '/c' not in argv: argv += ['/c']
+ argv += list(self.command)
+ else:
+ argv = self.command
+
+ self.stdio_log = stdio_log = self.addLog("stdio")
+
+ if type(self.command) in types.StringTypes:
+ stdio_log.addHeader(self.command.strip() + "\n\n")
+ else:
+ stdio_log.addHeader(" ".join(self.command) + "\n\n")
+ stdio_log.addHeader("** RUNNING ON BUILDMASTER **\n")
+ stdio_log.addHeader(" in dir %s\n" % os.getcwd())
+ stdio_log.addHeader(" argv: %s\n" % (argv,))
+
+ # TODO add a timeout?
+ proc = reactor.spawnProcess(self.LocalPP(self), argv[0], argv)
+ # (the LocalPP object will call processEnded for us)
+
+ def processEnded(self, status_object):
+ if status_object.value.exitCode != 0:
+ self.step_status.setText(["failed (%d)" % status_object.value.exitCode])
+ self.finished(FAILURE)
+ else:
+ self.step_status.setText(["succeeded"])
+ self.finished(SUCCESS)
diff --git a/buildbot/buildbot/steps/maxq.py b/buildbot/buildbot/steps/maxq.py
new file mode 100644
index 0000000..23538a5
--- /dev/null
+++ b/buildbot/buildbot/steps/maxq.py
@@ -0,0 +1,44 @@
+from buildbot.steps.shell import ShellCommand
+from buildbot.status.builder import Event, SUCCESS, FAILURE
+
+class MaxQ(ShellCommand):
+ flunkOnFailure = True
+ name = "maxq"
+
+ def __init__(self, testdir=None, **kwargs):
+ if not testdir:
+ raise TypeError("please pass testdir")
+ kwargs['command'] = 'run_maxq.py %s' % (testdir,)
+ ShellCommand.__init__(self, **kwargs)
+ self.addFactoryArguments(testdir=testdir)
+
+ def startStatus(self):
+ evt = Event("yellow", ['running', 'maxq', 'tests'],
+ files={'log': self.log})
+ self.setCurrentActivity(evt)
+
+
+ def finished(self, rc):
+ self.failures = 0
+ if rc:
+ self.failures = 1
+ output = self.log.getAll()
+ self.failures += output.count('\nTEST FAILURE:')
+
+ result = (SUCCESS, ['maxq'])
+
+ if self.failures:
+ result = (FAILURE, [str(self.failures), 'maxq', 'failures'])
+
+ return self.stepComplete(result)
+
+ def finishStatus(self, result):
+ if self.failures:
+ text = ["maxq", "failed"]
+ else:
+ text = ['maxq', 'tests']
+ self.updateCurrentActivity(text=text)
+ self.finishStatusSummary()
+ self.finishCurrentActivity()
+
+
diff --git a/buildbot/buildbot/steps/package/__init__.py b/buildbot/buildbot/steps/package/__init__.py
new file mode 100644
index 0000000..d81f066
--- /dev/null
+++ b/buildbot/buildbot/steps/package/__init__.py
@@ -0,0 +1,11 @@
+# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+"""
+Steps specific to package formats.
+"""
diff --git a/buildbot/buildbot/steps/package/rpm/__init__.py b/buildbot/buildbot/steps/package/rpm/__init__.py
new file mode 100644
index 0000000..0d7be6d
--- /dev/null
+++ b/buildbot/buildbot/steps/package/rpm/__init__.py
@@ -0,0 +1,15 @@
+# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+"""
+Steps specific to the rpm format.
+"""
+
+from rpmbuild import RpmBuild
+from rpmspec import RpmSpec
+from rpmlint import RpmLint
diff --git a/buildbot/buildbot/steps/package/rpm/rpmbuild.py b/buildbot/buildbot/steps/package/rpm/rpmbuild.py
new file mode 100644
index 0000000..38bce85
--- /dev/null
+++ b/buildbot/buildbot/steps/package/rpm/rpmbuild.py
@@ -0,0 +1,144 @@
+# Dan Radez <dradez+buildbot@redhat.com>
+# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+"""
+RPM Building steps.
+"""
+
+from buildbot.steps.shell import ShellCommand
+from buildbot.process.buildstep import RemoteShellCommand
+
+
+class RpmBuild(ShellCommand):
+ """
+ Build and RPM based on pased spec filename
+ """
+
+ import os.path
+
+ name = "rpmbuilder"
+ haltOnFailure = 1
+ flunkOnFailure = 1
+ description = ["RPMBUILD"]
+ descriptionDone = ["RPMBUILD"]
+
+ def __init__(self,
+ specfile=None,
+ topdir='`pwd`',
+ builddir='`pwd`',
+ rpmdir='`pwd`',
+ sourcedir='`pwd`',
+ specdir='`pwd`',
+ srcrpmdir='`pwd`',
+ dist='.el5',
+ autoRelease=False,
+ vcsRevision=False,
+ **kwargs):
+ """
+ Creates the RpmBuild object.
+
+ @type specfile: str
+ @param specfile: the name of the spec file for the rpmbuild
+ @type topdir: str
+ @param topdir: the top directory for rpm building.
+ @type builddir: str
+ @param builddir: the directory to use for building
+ @type rpmdir: str
+ @param rpmdir: the directory to dump the rpms into
+ @type sourcedir: str
+ @param sourcedir: the directory that houses source code
+ @type srcrpmdir: str
+ @param srcrpmdir: the directory to dump source rpms into
+ @type dist: str
+ @param dist: the distribution to build for
+ @type autoRelease: boolean
+ @param autoRelease: if the auto release mechanics should be used
+ @type vcsRevision: boolean
+ @param vcsRevision: if the vcs revision mechanics should be used
+ @type kwargs: dict
+ @param kwargs: All further keyword arguments.
+ """
+ ShellCommand.__init__(self, **kwargs)
+ self.addFactoryArguments(topdir=topdir,
+ builddir=builddir,
+ rpmdir=rpmdir,
+ sourcedir=sourcedir,
+ specdir=specdir,
+ srcrpmdir=srcrpmdir,
+ specfile=specfile,
+ dist=dist,
+ autoRelease=autoRelease,
+ vcsRevision=vcsRevision)
+ self.rpmbuild = (
+ 'rpmbuild --define "_topdir %s" --define "_builddir %s"'
+ ' --define "_rpmdir %s" --define "_sourcedir %s"'
+ ' --define "_specdir %s" --define "_srcrpmdir %s"'
+ ' --define "dist %s"' % (topdir, builddir, rpmdir, sourcedir,
+ specdir, srcrpmdir, dist))
+ self.specfile = specfile
+ self.autoRelease = autoRelease
+ self.vcsRevision = vcsRevision
+
+ def start(self):
+ """
+ Buildbot Calls Me when it's time to start
+ """
+ if self.autoRelease:
+ relfile = '%s.release' % (
+ self.os.path.basename(self.specfile).split('.')[0])
+ try:
+ rfile = open(relfile, 'r')
+ rel = int(rfile.readline().strip())
+ rfile.close()
+ except:
+ rel = 0
+ self.rpmbuild = self.rpmbuild + ' --define "_release %s"' % rel
+ rfile = open(relfile, 'w')
+ rfile.write(str(rel+1))
+ rfile.close()
+
+ if self.vcsRevision:
+ self.rpmbuild = self.rpmbuild + ' --define "_revision %s"' % \
+ self.getProperty('got_revision')
+
+ self.rpmbuild = self.rpmbuild + ' -ba %s' % self.specfile
+
+ self.command = ['bash', '-c', self.rpmbuild]
+
+ # create the actual RemoteShellCommand instance now
+ kwargs = self.remote_kwargs
+ kwargs['command'] = self.command
+ cmd = RemoteShellCommand(**kwargs)
+ self.setupEnvironment(cmd)
+ self.checkForOldSlaveAndLogfiles()
+ self.startCommand(cmd)
+
+ def createSummary(self, log):
+ """
+ Create nice summary logs.
+
+ @param log: The log to create summary off of.
+ """
+ rpm_prefixes = ['Provides:', 'Requires(rpmlib):', 'Requires:',
+ 'Checking for unpackaged', 'Wrote:',
+ 'Executing(%', '+ ']
+ rpm_err_pfx = [' ', 'RPM build errors:', 'error: ']
+
+ rpmcmdlog = []
+ rpmerrors = []
+
+ for line in log.readlines():
+ for pfx in rpm_prefixes:
+ if pfx in line:
+ rpmcmdlog.append(line)
+ for err in rpm_err_pfx:
+ if err in line:
+ rpmerrors.append(line)
+ self.addCompleteLog('RPM Command Log', "".join(rpmcmdlog))
+ self.addCompleteLog('RPM Errors', "".join(rpmerrors))
diff --git a/buildbot/buildbot/steps/package/rpm/rpmlint.py b/buildbot/buildbot/steps/package/rpm/rpmlint.py
new file mode 100644
index 0000000..444a44a
--- /dev/null
+++ b/buildbot/buildbot/steps/package/rpm/rpmlint.py
@@ -0,0 +1,51 @@
+# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+"""
+Steps and objects related to rpmlint.
+"""
+
+from buildbot.steps.shell import Test
+
+
+class RpmLint(Test):
+ """
+ Rpmlint build step.
+ """
+
+ description = ["Checking for RPM/SPEC issues"]
+ descriptionDone = ["Finished checking RPM/SPEC issues"]
+
+ def __init__(self, fileloc="*rpm", **kwargs):
+ """
+ Create the Rpmlint object.
+
+ @type fileloc: str
+ @param fileloc: Location glob of the specs or rpms.
+ @type kwargs: dict
+ @param fileloc: all other keyword arguments.
+ """
+ Test.__init__(self, **kwargs)
+ self.command = ["/usr/bin/rpmlint", "-i"]
+ self.command.append(fileloc)
+
+ def createSummary(self, log):
+ """
+ Create nice summary logs.
+
+ @param log: log to create summary off of.
+ """
+ warnings = []
+ errors = []
+ for line in log.readlines():
+ if ' W: ' in line:
+ warnings.append(line)
+ elif ' E: ' in line:
+ errors.append(line)
+ self.addCompleteLog('Rpmlint Warnings', "".join(warnings))
+ self.addCompleteLog('Rpmlint Errors', "".join(errors))
diff --git a/buildbot/buildbot/steps/package/rpm/rpmspec.py b/buildbot/buildbot/steps/package/rpm/rpmspec.py
new file mode 100644
index 0000000..6aa5254
--- /dev/null
+++ b/buildbot/buildbot/steps/package/rpm/rpmspec.py
@@ -0,0 +1,67 @@
+# Dan Radez <dradez+buildbot@redhat.com>
+# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+"""
+library to populate parameters from and rpmspec file into a memory structure
+"""
+
+
+from buildbot.steps.shell import ShellCommand
+
+
+class RpmSpec(ShellCommand):
+ """
+ read parameters out of an rpm spec file
+ """
+
+ import re
+ import types
+
+ #initialize spec info vars and get them from the spec file
+ n_regex = re.compile('^Name:[ ]*([^\s]*)')
+ v_regex = re.compile('^Version:[ ]*([0-9\.]*)')
+
+ def __init__(self, specfile=None, **kwargs):
+ """
+ Creates the RpmSpec object.
+
+ @type specfile: str
+ @param specfile: the name of the specfile to get the package
+ name and version from
+ @type kwargs: dict
+ @param kwargs: All further keyword arguments.
+ """
+ self.specfile = specfile
+ self._pkg_name = None
+ self._pkg_version = None
+ self._loaded = False
+
+ def load(self):
+ """
+ call this function after the file exists to populate properties
+ """
+ # If we are given a string, open it up else assume it's something we
+ # can call read on.
+ if type(self.specfile) == self.types.StringType:
+ f = open(self.specfile, 'r')
+ else:
+ f = self.specfile
+
+ for line in f:
+ if self.v_regex.match(line):
+ self._pkg_version = self.v_regex.match(line).group(1)
+ if self.n_regex.match(line):
+ self._pkg_name = self.n_regex.match(line).group(1)
+ f.close()
+ self._loaded = True
+
+ # Read-only properties
+ loaded = property(lambda self: self._loaded)
+ pkg_name = property(lambda self: self._pkg_name)
+ pkg_version = property(lambda self: self._pkg_version)
diff --git a/buildbot/buildbot/steps/python.py b/buildbot/buildbot/steps/python.py
new file mode 100644
index 0000000..7f87aa7
--- /dev/null
+++ b/buildbot/buildbot/steps/python.py
@@ -0,0 +1,187 @@
+
+from buildbot.status.builder import SUCCESS, FAILURE, WARNINGS
+from buildbot.steps.shell import ShellCommand
+import re
+
+try:
+ import cStringIO
+ StringIO = cStringIO.StringIO
+except ImportError:
+ from StringIO import StringIO
+
+
+class BuildEPYDoc(ShellCommand):
+ name = "epydoc"
+ command = ["make", "epydocs"]
+ description = ["building", "epydocs"]
+ descriptionDone = ["epydoc"]
+
+ def createSummary(self, log):
+ import_errors = 0
+ warnings = 0
+ errors = 0
+
+ for line in StringIO(log.getText()):
+ if line.startswith("Error importing "):
+ import_errors += 1
+ if line.find("Warning: ") != -1:
+ warnings += 1
+ if line.find("Error: ") != -1:
+ errors += 1
+
+ self.descriptionDone = self.descriptionDone[:]
+ if import_errors:
+ self.descriptionDone.append("ierr=%d" % import_errors)
+ if warnings:
+ self.descriptionDone.append("warn=%d" % warnings)
+ if errors:
+ self.descriptionDone.append("err=%d" % errors)
+
+ self.import_errors = import_errors
+ self.warnings = warnings
+ self.errors = errors
+
+ def evaluateCommand(self, cmd):
+ if cmd.rc != 0:
+ return FAILURE
+ if self.warnings or self.errors:
+ return WARNINGS
+ return SUCCESS
+
+
+class PyFlakes(ShellCommand):
+ name = "pyflakes"
+ command = ["make", "pyflakes"]
+ description = ["running", "pyflakes"]
+ descriptionDone = ["pyflakes"]
+ flunkOnFailure = False
+ flunkingIssues = ["undefined"] # any pyflakes lines like this cause FAILURE
+
+ MESSAGES = ("unused", "undefined", "redefs", "import*", "misc")
+
+ def createSummary(self, log):
+ counts = {}
+ summaries = {}
+ for m in self.MESSAGES:
+ counts[m] = 0
+ summaries[m] = []
+
+ first = True
+ for line in StringIO(log.getText()).readlines():
+ # the first few lines might contain echoed commands from a 'make
+ # pyflakes' step, so don't count these as warnings. Stop ignoring
+ # the initial lines as soon as we see one with a colon.
+ if first:
+ if line.find(":") != -1:
+ # there's the colon, this is the first real line
+ first = False
+ # fall through and parse the line
+ else:
+ # skip this line, keep skipping non-colon lines
+ continue
+ if line.find("imported but unused") != -1:
+ m = "unused"
+ elif line.find("*' used; unable to detect undefined names") != -1:
+ m = "import*"
+ elif line.find("undefined name") != -1:
+ m = "undefined"
+ elif line.find("redefinition of unused") != -1:
+ m = "redefs"
+ else:
+ m = "misc"
+ summaries[m].append(line)
+ counts[m] += 1
+
+ self.descriptionDone = self.descriptionDone[:]
+ for m in self.MESSAGES:
+ if counts[m]:
+ self.descriptionDone.append("%s=%d" % (m, counts[m]))
+ self.addCompleteLog(m, "".join(summaries[m]))
+ self.setProperty("pyflakes-%s" % m, counts[m], "pyflakes")
+ self.setProperty("pyflakes-total", sum(counts.values()), "pyflakes")
+
+
+ def evaluateCommand(self, cmd):
+ if cmd.rc != 0:
+ return FAILURE
+ for m in self.flunkingIssues:
+ if self.getProperty("pyflakes-%s" % m):
+ return FAILURE
+ if self.getProperty("pyflakes-total"):
+ return WARNINGS
+ return SUCCESS
+
+class PyLint(ShellCommand):
+ '''A command that knows about pylint output.
+ It's a good idea to add --output-format=parseable to your
+ command, since it includes the filename in the message.
+ '''
+ name = "pylint"
+ description = ["running", "pylint"]
+ descriptionDone = ["pylint"]
+
+ # Using the default text output, the message format is :
+ # MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE
+ # with --output-format=parseable it is: (the outer brackets are literal)
+ # FILE_NAME:LINE_NUM: [MESSAGE_TYPE[, OBJECT]] MESSAGE
+ # message type consists of the type char and 4 digits
+ # The message types:
+
+ MESSAGES = {
+ 'C': "convention", # for programming standard violation
+ 'R': "refactor", # for bad code smell
+ 'W': "warning", # for python specific problems
+ 'E': "error", # for much probably bugs in the code
+ 'F': "fatal", # error prevented pylint from further processing.
+ 'I': "info",
+ }
+
+ flunkingIssues = ["F", "E"] # msg categories that cause FAILURE
+
+ _re_groupname = 'errtype'
+ _msgtypes_re_str = '(?P<%s>[%s])' % (_re_groupname, ''.join(MESSAGES.keys()))
+ _default_line_re = re.compile(r'%s\d{4}: *\d+:.+' % _msgtypes_re_str)
+ _parseable_line_re = re.compile(r'[^:]+:\d+: \[%s\d{4}[,\]] .+' % _msgtypes_re_str)
+
+ def createSummary(self, log):
+ counts = {}
+ summaries = {}
+ for m in self.MESSAGES:
+ counts[m] = 0
+ summaries[m] = []
+
+ line_re = None # decide after first match
+ for line in StringIO(log.getText()).readlines():
+ if not line_re:
+ # need to test both and then decide on one
+ if self._parseable_line_re.match(line):
+ line_re = self._parseable_line_re
+ elif self._default_line_re.match(line):
+ line_re = self._default_line_re
+ else: # no match yet
+ continue
+ mo = line_re.match(line)
+ if mo:
+ msgtype = mo.group(self._re_groupname)
+ assert msgtype in self.MESSAGES
+ summaries[msgtype].append(line)
+ counts[msgtype] += 1
+
+ self.descriptionDone = self.descriptionDone[:]
+ for msg, fullmsg in self.MESSAGES.items():
+ if counts[msg]:
+ self.descriptionDone.append("%s=%d" % (fullmsg, counts[msg]))
+ self.addCompleteLog(fullmsg, "".join(summaries[msg]))
+ self.setProperty("pylint-%s" % fullmsg, counts[msg])
+ self.setProperty("pylint-total", sum(counts.values()))
+
+ def evaluateCommand(self, cmd):
+ if cmd.rc != 0:
+ return FAILURE
+ for msg in self.flunkingIssues:
+ if self.getProperty("pylint-%s" % self.MESSAGES[msg]):
+ return FAILURE
+ if self.getProperty("pylint-total"):
+ return WARNINGS
+ return SUCCESS
+
diff --git a/buildbot/buildbot/steps/python_twisted.py b/buildbot/buildbot/steps/python_twisted.py
new file mode 100644
index 0000000..d0ed5b0
--- /dev/null
+++ b/buildbot/buildbot/steps/python_twisted.py
@@ -0,0 +1,804 @@
+# -*- test-case-name: buildbot.test.test_twisted -*-
+
+from twisted.python import log
+
+from buildbot.status import builder
+from buildbot.status.builder import SUCCESS, FAILURE, WARNINGS, SKIPPED
+from buildbot.process.buildstep import LogLineObserver, OutputProgressObserver
+from buildbot.process.buildstep import RemoteShellCommand
+from buildbot.steps.shell import ShellCommand
+
+try:
+ import cStringIO
+ StringIO = cStringIO
+except ImportError:
+ import StringIO
+import re
+
+# BuildSteps that are specific to the Twisted source tree
+
+class HLint(ShellCommand):
+ """I run a 'lint' checker over a set of .xhtml files. Any deviations
+ from recommended style is flagged and put in the output log.
+
+ This step looks at .changes in the parent Build to extract a list of
+ Lore XHTML files to check."""
+
+ name = "hlint"
+ description = ["running", "hlint"]
+ descriptionDone = ["hlint"]
+ warnOnWarnings = True
+ warnOnFailure = True
+ # TODO: track time, but not output
+ warnings = 0
+
+ def __init__(self, python=None, **kwargs):
+ ShellCommand.__init__(self, **kwargs)
+ self.addFactoryArguments(python=python)
+ self.python = python
+
+ def start(self):
+ # create the command
+ htmlFiles = {}
+ for f in self.build.allFiles():
+ if f.endswith(".xhtml") and not f.startswith("sandbox/"):
+ htmlFiles[f] = 1
+ # remove duplicates
+ hlintTargets = htmlFiles.keys()
+ hlintTargets.sort()
+ if not hlintTargets:
+ return SKIPPED
+ self.hlintFiles = hlintTargets
+ c = []
+ if self.python:
+ c.append(self.python)
+ c += ["bin/lore", "-p", "--output", "lint"] + self.hlintFiles
+ self.setCommand(c)
+
+ # add an extra log file to show the .html files we're checking
+ self.addCompleteLog("files", "\n".join(self.hlintFiles)+"\n")
+
+ ShellCommand.start(self)
+
+ def commandComplete(self, cmd):
+ # TODO: remove the 'files' file (a list of .xhtml files that were
+ # submitted to hlint) because it is available in the logfile and
+ # mostly exists to give the user an idea of how long the step will
+ # take anyway).
+ lines = cmd.logs['stdio'].getText().split("\n")
+ warningLines = filter(lambda line:':' in line, lines)
+ if warningLines:
+ self.addCompleteLog("warnings", "".join(warningLines))
+ warnings = len(warningLines)
+ self.warnings = warnings
+
+ def evaluateCommand(self, cmd):
+ # warnings are in stdout, rc is always 0, unless the tools break
+ if cmd.rc != 0:
+ return FAILURE
+ if self.warnings:
+ return WARNINGS
+ return SUCCESS
+
+ def getText2(self, cmd, results):
+ if cmd.rc != 0:
+ return ["hlint"]
+ return ["%d hlin%s" % (self.warnings,
+ self.warnings == 1 and 't' or 'ts')]
+
+def countFailedTests(output):
+ # start scanning 10kb from the end, because there might be a few kb of
+ # import exception tracebacks between the total/time line and the errors
+ # line
+ chunk = output[-10000:]
+ lines = chunk.split("\n")
+ lines.pop() # blank line at end
+ # lines[-3] is "Ran NN tests in 0.242s"
+ # lines[-2] is blank
+ # lines[-1] is 'OK' or 'FAILED (failures=1, errors=12)'
+ # or 'FAILED (failures=1)'
+ # or "PASSED (skips=N, successes=N)" (for Twisted-2.0)
+ # there might be other lines dumped here. Scan all the lines.
+ res = {'total': None,
+ 'failures': 0,
+ 'errors': 0,
+ 'skips': 0,
+ 'expectedFailures': 0,
+ 'unexpectedSuccesses': 0,
+ }
+ for l in lines:
+ out = re.search(r'Ran (\d+) tests', l)
+ if out:
+ res['total'] = int(out.group(1))
+ if (l.startswith("OK") or
+ l.startswith("FAILED ") or
+ l.startswith("PASSED")):
+ # the extra space on FAILED_ is to distinguish the overall
+ # status from an individual test which failed. The lack of a
+ # space on the OK is because it may be printed without any
+ # additional text (if there are no skips,etc)
+ out = re.search(r'failures=(\d+)', l)
+ if out: res['failures'] = int(out.group(1))
+ out = re.search(r'errors=(\d+)', l)
+ if out: res['errors'] = int(out.group(1))
+ out = re.search(r'skips=(\d+)', l)
+ if out: res['skips'] = int(out.group(1))
+ out = re.search(r'expectedFailures=(\d+)', l)
+ if out: res['expectedFailures'] = int(out.group(1))
+ out = re.search(r'unexpectedSuccesses=(\d+)', l)
+ if out: res['unexpectedSuccesses'] = int(out.group(1))
+ # successes= is a Twisted-2.0 addition, and is not currently used
+ out = re.search(r'successes=(\d+)', l)
+ if out: res['successes'] = int(out.group(1))
+
+ return res
+
+
+class TrialTestCaseCounter(LogLineObserver):
+ _line_re = re.compile(r'^(?:Doctest: )?([\w\.]+) \.\.\. \[([^\]]+)\]$')
+ numTests = 0
+ finished = False
+
+ def outLineReceived(self, line):
+ # different versions of Twisted emit different per-test lines with
+ # the bwverbose reporter.
+ # 2.0.0: testSlave (buildbot.test.test_runner.Create) ... [OK]
+ # 2.1.0: buildbot.test.test_runner.Create.testSlave ... [OK]
+ # 2.4.0: buildbot.test.test_runner.Create.testSlave ... [OK]
+ # Let's just handle the most recent version, since it's the easiest.
+ # Note that doctests create lines line this:
+ # Doctest: viff.field.GF ... [OK]
+
+ if self.finished:
+ return
+ if line.startswith("=" * 40):
+ self.finished = True
+ return
+
+ m = self._line_re.search(line.strip())
+ if m:
+ testname, result = m.groups()
+ self.numTests += 1
+ self.step.setProgress('tests', self.numTests)
+
+
+UNSPECIFIED=() # since None is a valid choice
+
+class Trial(ShellCommand):
+ """I run a unit test suite using 'trial', a unittest-like testing
+ framework that comes with Twisted. Trial is used to implement Twisted's
+ own unit tests, and is the unittest-framework of choice for many projects
+ that use Twisted internally.
+
+ Projects that use trial typically have all their test cases in a 'test'
+ subdirectory of their top-level library directory. I.e. for my package
+ 'petmail', the tests are in 'petmail/test/test_*.py'. More complicated
+ packages (like Twisted itself) may have multiple test directories, like
+ 'twisted/test/test_*.py' for the core functionality and
+ 'twisted/mail/test/test_*.py' for the email-specific tests.
+
+ To run trial tests, you run the 'trial' executable and tell it where the
+ test cases are located. The most common way of doing this is with a
+ module name. For petmail, I would run 'trial petmail.test' and it would
+ locate all the test_*.py files under petmail/test/, running every test
+ case it could find in them. Unlike the unittest.py that comes with
+ Python, you do not run the test_foo.py as a script; you always let trial
+ do the importing and running. The 'tests' parameter controls which tests
+ trial will run: it can be a string or a list of strings.
+
+ To find these test cases, you must set a PYTHONPATH that allows something
+ like 'import petmail.test' to work. For packages that don't use a
+ separate top-level 'lib' directory, PYTHONPATH=. will work, and will use
+ the test cases (and the code they are testing) in-place.
+ PYTHONPATH=build/lib or PYTHONPATH=build/lib.$ARCH are also useful when
+ you do a'setup.py build' step first. The 'testpath' attribute of this
+ class controls what PYTHONPATH= is set to.
+
+ Trial has the ability (through the --testmodule flag) to run only the set
+ of test cases named by special 'test-case-name' tags in source files. We
+ can get the list of changed source files from our parent Build and
+ provide them to trial, thus running the minimal set of test cases needed
+ to cover the Changes. This is useful for quick builds, especially in
+ trees with a lot of test cases. The 'testChanges' parameter controls this
+ feature: if set, it will override 'tests'.
+
+ The trial executable itself is typically just 'trial' (which is usually
+ found on your $PATH as /usr/bin/trial), but it can be overridden with the
+ 'trial' parameter. This is useful for Twisted's own unittests, which want
+ to use the copy of bin/trial that comes with the sources. (when bin/trial
+ discovers that it is living in a subdirectory named 'Twisted', it assumes
+ it is being run from the source tree and adds that parent directory to
+ PYTHONPATH. Therefore the canonical way to run Twisted's own unittest
+ suite is './bin/trial twisted.test' rather than 'PYTHONPATH=.
+ /usr/bin/trial twisted.test', especially handy when /usr/bin/trial has
+ not yet been installed).
+
+ To influence the version of python being used for the tests, or to add
+ flags to the command, set the 'python' parameter. This can be a string
+ (like 'python2.2') or a list (like ['python2.3', '-Wall']).
+
+ Trial creates and switches into a directory named _trial_temp/ before
+ running the tests, and sends the twisted log (which includes all
+ exceptions) to a file named test.log . This file will be pulled up to
+ the master where it can be seen as part of the status output.
+
+ There are some class attributes which may be usefully overridden
+ by subclasses. 'trialMode' and 'trialArgs' can influence the trial
+ command line.
+ """
+
+ name = "trial"
+ progressMetrics = ('output', 'tests', 'test.log')
+ # note: the slash only works on unix buildslaves, of course, but we have
+ # no way to know what the buildslave uses as a separator. TODO: figure
+ # out something clever.
+ logfiles = {"test.log": "_trial_temp/test.log"}
+ # we use test.log to track Progress at the end of __init__()
+
+ flunkOnFailure = True
+ python = None
+ trial = "trial"
+ trialMode = ["--reporter=bwverbose"] # requires Twisted-2.1.0 or newer
+ # for Twisted-2.0.0 or 1.3.0, use ["-o"] instead
+ trialArgs = []
+ testpath = UNSPECIFIED # required (but can be None)
+ testChanges = False # TODO: needs better name
+ recurse = False
+ reactor = None
+ randomly = False
+ tests = None # required
+
+ def __init__(self, reactor=UNSPECIFIED, python=None, trial=None,
+ testpath=UNSPECIFIED,
+ tests=None, testChanges=None,
+ recurse=None, randomly=None,
+ trialMode=None, trialArgs=None,
+ **kwargs):
+ """
+ @type testpath: string
+ @param testpath: use in PYTHONPATH when running the tests. If
+ None, do not set PYTHONPATH. Setting this to '.' will
+ cause the source files to be used in-place.
+
+ @type python: string (without spaces) or list
+ @param python: which python executable to use. Will form the start of
+ the argv array that will launch trial. If you use this,
+ you should set 'trial' to an explicit path (like
+ /usr/bin/trial or ./bin/trial). Defaults to None, which
+ leaves it out entirely (running 'trial args' instead of
+ 'python ./bin/trial args'). Likely values are 'python',
+ ['python2.2'], ['python', '-Wall'], etc.
+
+ @type trial: string
+ @param trial: which 'trial' executable to run.
+ Defaults to 'trial', which will cause $PATH to be
+ searched and probably find /usr/bin/trial . If you set
+ 'python', this should be set to an explicit path (because
+ 'python2.3 trial' will not work).
+
+ @type trialMode: list of strings
+ @param trialMode: a list of arguments to pass to trial, specifically
+ to set the reporting mode. This defaults to ['-to']
+ which means 'verbose colorless output' to the trial
+ that comes with Twisted-2.0.x and at least -2.1.0 .
+ Newer versions of Twisted may come with a trial
+ that prefers ['--reporter=bwverbose'].
+
+ @type trialArgs: list of strings
+ @param trialArgs: a list of arguments to pass to trial, available to
+ turn on any extra flags you like. Defaults to [].
+
+ @type tests: list of strings
+ @param tests: a list of test modules to run, like
+ ['twisted.test.test_defer', 'twisted.test.test_process'].
+ If this is a string, it will be converted into a one-item
+ list.
+
+ @type testChanges: boolean
+ @param testChanges: if True, ignore the 'tests' parameter and instead
+ ask the Build for all the files that make up the
+ Changes going into this build. Pass these filenames
+ to trial and ask it to look for test-case-name
+ tags, running just the tests necessary to cover the
+ changes.
+
+ @type recurse: boolean
+ @param recurse: If True, pass the --recurse option to trial, allowing
+ test cases to be found in deeper subdirectories of the
+ modules listed in 'tests'. This does not appear to be
+ necessary when using testChanges.
+
+ @type reactor: string
+ @param reactor: which reactor to use, like 'gtk' or 'java'. If not
+ provided, the Twisted's usual platform-dependent
+ default is used.
+
+ @type randomly: boolean
+ @param randomly: if True, add the --random=0 argument, which instructs
+ trial to run the unit tests in a random order each
+ time. This occasionally catches problems that might be
+ masked when one module always runs before another
+ (like failing to make registerAdapter calls before
+ lookups are done).
+
+ @type kwargs: dict
+ @param kwargs: parameters. The following parameters are inherited from
+ L{ShellCommand} and may be useful to set: workdir,
+ haltOnFailure, flunkOnWarnings, flunkOnFailure,
+ warnOnWarnings, warnOnFailure, want_stdout, want_stderr,
+ timeout.
+ """
+ ShellCommand.__init__(self, **kwargs)
+ self.addFactoryArguments(reactor=reactor,
+ python=python,
+ trial=trial,
+ testpath=testpath,
+ tests=tests,
+ testChanges=testChanges,
+ recurse=recurse,
+ randomly=randomly,
+ trialMode=trialMode,
+ trialArgs=trialArgs,
+ )
+
+ if python:
+ self.python = python
+ if self.python is not None:
+ if type(self.python) is str:
+ self.python = [self.python]
+ for s in self.python:
+ if " " in s:
+ # this is not strictly an error, but I suspect more
+ # people will accidentally try to use python="python2.3
+ # -Wall" than will use embedded spaces in a python flag
+ log.msg("python= component '%s' has spaces")
+ log.msg("To add -Wall, use python=['python', '-Wall']")
+ why = "python= value has spaces, probably an error"
+ raise ValueError(why)
+
+ if trial:
+ self.trial = trial
+ if " " in self.trial:
+ raise ValueError("trial= value has spaces")
+ if trialMode is not None:
+ self.trialMode = trialMode
+ if trialArgs is not None:
+ self.trialArgs = trialArgs
+
+ if testpath is not UNSPECIFIED:
+ self.testpath = testpath
+ if self.testpath is UNSPECIFIED:
+ raise ValueError("You must specify testpath= (it can be None)")
+ assert isinstance(self.testpath, str) or self.testpath is None
+
+ if reactor is not UNSPECIFIED:
+ self.reactor = reactor
+
+ if tests is not None:
+ self.tests = tests
+ if type(self.tests) is str:
+ self.tests = [self.tests]
+ if testChanges is not None:
+ self.testChanges = testChanges
+ #self.recurse = True # not sure this is necessary
+
+ if not self.testChanges and self.tests is None:
+ raise ValueError("Must either set testChanges= or provide tests=")
+
+ if recurse is not None:
+ self.recurse = recurse
+ if randomly is not None:
+ self.randomly = randomly
+
+ # build up most of the command, then stash it until start()
+ command = []
+ if self.python:
+ command.extend(self.python)
+ command.append(self.trial)
+ command.extend(self.trialMode)
+ if self.recurse:
+ command.append("--recurse")
+ if self.reactor:
+ command.append("--reactor=%s" % reactor)
+ if self.randomly:
+ command.append("--random=0")
+ command.extend(self.trialArgs)
+ self.command = command
+
+ if self.reactor:
+ self.description = ["testing", "(%s)" % self.reactor]
+ self.descriptionDone = ["tests"]
+ # commandComplete adds (reactorname) to self.text
+ else:
+ self.description = ["testing"]
+ self.descriptionDone = ["tests"]
+
+ # this counter will feed Progress along the 'test cases' metric
+ self.addLogObserver('stdio', TrialTestCaseCounter())
+ # this one just measures bytes of output in _trial_temp/test.log
+ self.addLogObserver('test.log', OutputProgressObserver('test.log'))
+
+ def setupEnvironment(self, cmd):
+ ShellCommand.setupEnvironment(self, cmd)
+ if self.testpath != None:
+ e = cmd.args['env']
+ if e is None:
+ cmd.args['env'] = {'PYTHONPATH': self.testpath}
+ else:
+ # TODO: somehow, each build causes another copy of
+ # self.testpath to get prepended
+ if e.get('PYTHONPATH', "") == "":
+ e['PYTHONPATH'] = self.testpath
+ else:
+ e['PYTHONPATH'] = self.testpath + ":" + e['PYTHONPATH']
+ try:
+ p = cmd.args['env']['PYTHONPATH']
+ if type(p) is not str:
+ log.msg("hey, not a string:", p)
+ assert False
+ except (KeyError, TypeError):
+ # KeyError if args doesn't have ['env']
+ # KeyError if args['env'] doesn't have ['PYTHONPATH']
+ # TypeError if args is None
+ pass
+
+ def start(self):
+ # now that self.build.allFiles() is nailed down, finish building the
+ # command
+ if self.testChanges:
+ for f in self.build.allFiles():
+ if f.endswith(".py"):
+ self.command.append("--testmodule=%s" % f)
+ else:
+ self.command.extend(self.tests)
+ log.msg("Trial.start: command is", self.command)
+
+ # if our slave is too old to understand logfiles=, fetch them
+ # manually. This is a fallback for the Twisted buildbot and some old
+ # buildslaves.
+ self._needToPullTestDotLog = False
+ if self.slaveVersionIsOlderThan("shell", "2.1"):
+ log.msg("Trial: buildslave %s is too old to accept logfiles=" %
+ self.getSlaveName())
+ log.msg(" falling back to 'cat _trial_temp/test.log' instead")
+ self.logfiles = {}
+ self._needToPullTestDotLog = True
+
+ ShellCommand.start(self)
+
+
+ def commandComplete(self, cmd):
+ if not self._needToPullTestDotLog:
+ return self._gotTestDotLog(cmd)
+
+ # if the buildslave was too old, pull test.log now
+ catcmd = ["cat", "_trial_temp/test.log"]
+ c2 = RemoteShellCommand(command=catcmd, workdir=self.workdir)
+ loog = self.addLog("test.log")
+ c2.useLog(loog, True, logfileName="stdio")
+ self.cmd = c2 # to allow interrupts
+ d = c2.run(self, self.remote)
+ d.addCallback(lambda res: self._gotTestDotLog(cmd))
+ return d
+
+ def rtext(self, fmt='%s'):
+ if self.reactor:
+ rtext = fmt % self.reactor
+ return rtext.replace("reactor", "")
+ return ""
+
+ def _gotTestDotLog(self, cmd):
+ # figure out all status, then let the various hook functions return
+ # different pieces of it
+
+ # 'cmd' is the original trial command, so cmd.logs['stdio'] is the
+ # trial output. We don't have access to test.log from here.
+ output = cmd.logs['stdio'].getText()
+ counts = countFailedTests(output)
+
+ total = counts['total']
+ failures, errors = counts['failures'], counts['errors']
+ parsed = (total != None)
+ text = []
+ text2 = ""
+
+ if cmd.rc == 0:
+ if parsed:
+ results = SUCCESS
+ if total:
+ text += ["%d %s" % \
+ (total,
+ total == 1 and "test" or "tests"),
+ "passed"]
+ else:
+ text += ["no tests", "run"]
+ else:
+ results = FAILURE
+ text += ["testlog", "unparseable"]
+ text2 = "tests"
+ else:
+ # something failed
+ results = FAILURE
+ if parsed:
+ text.append("tests")
+ if failures:
+ text.append("%d %s" % \
+ (failures,
+ failures == 1 and "failure" or "failures"))
+ if errors:
+ text.append("%d %s" % \
+ (errors,
+ errors == 1 and "error" or "errors"))
+ count = failures + errors
+ text2 = "%d tes%s" % (count, (count == 1 and 't' or 'ts'))
+ else:
+ text += ["tests", "failed"]
+ text2 = "tests"
+
+ if counts['skips']:
+ text.append("%d %s" % \
+ (counts['skips'],
+ counts['skips'] == 1 and "skip" or "skips"))
+ if counts['expectedFailures']:
+ text.append("%d %s" % \
+ (counts['expectedFailures'],
+ counts['expectedFailures'] == 1 and "todo"
+ or "todos"))
+ if 0: # TODO
+ results = WARNINGS
+ if not text2:
+ text2 = "todo"
+
+ if 0:
+ # ignore unexpectedSuccesses for now, but it should really mark
+ # the build WARNING
+ if counts['unexpectedSuccesses']:
+ text.append("%d surprises" % counts['unexpectedSuccesses'])
+ results = WARNINGS
+ if not text2:
+ text2 = "tests"
+
+ if self.reactor:
+ text.append(self.rtext('(%s)'))
+ if text2:
+ text2 = "%s %s" % (text2, self.rtext('(%s)'))
+
+ self.results = results
+ self.text = text
+ self.text2 = [text2]
+
+ def addTestResult(self, testname, results, text, tlog):
+ if self.reactor is not None:
+ testname = (self.reactor,) + testname
+ tr = builder.TestResult(testname, results, text, logs={'log': tlog})
+ #self.step_status.build.addTestResult(tr)
+ self.build.build_status.addTestResult(tr)
+
+ def createSummary(self, loog):
+ output = loog.getText()
+ problems = ""
+ sio = StringIO.StringIO(output)
+ warnings = {}
+ while 1:
+ line = sio.readline()
+ if line == "":
+ break
+ if line.find(" exceptions.DeprecationWarning: ") != -1:
+ # no source
+ warning = line # TODO: consider stripping basedir prefix here
+ warnings[warning] = warnings.get(warning, 0) + 1
+ elif (line.find(" DeprecationWarning: ") != -1 or
+ line.find(" UserWarning: ") != -1):
+ # next line is the source
+ warning = line + sio.readline()
+ warnings[warning] = warnings.get(warning, 0) + 1
+ elif line.find("Warning: ") != -1:
+ warning = line
+ warnings[warning] = warnings.get(warning, 0) + 1
+
+ if line.find("=" * 60) == 0 or line.find("-" * 60) == 0:
+ problems += line
+ problems += sio.read()
+ break
+
+ if problems:
+ self.addCompleteLog("problems", problems)
+ # now parse the problems for per-test results
+ pio = StringIO.StringIO(problems)
+ pio.readline() # eat the first separator line
+ testname = None
+ done = False
+ while not done:
+ while 1:
+ line = pio.readline()
+ if line == "":
+ done = True
+ break
+ if line.find("=" * 60) == 0:
+ break
+ if line.find("-" * 60) == 0:
+ # the last case has --- as a separator before the
+ # summary counts are printed
+ done = True
+ break
+ if testname is None:
+ # the first line after the === is like:
+# EXPECTED FAILURE: testLackOfTB (twisted.test.test_failure.FailureTestCase)
+# SKIPPED: testRETR (twisted.test.test_ftp.TestFTPServer)
+# FAILURE: testBatchFile (twisted.conch.test.test_sftp.TestOurServerBatchFile)
+ r = re.search(r'^([^:]+): (\w+) \(([\w\.]+)\)', line)
+ if not r:
+ # TODO: cleanup, if there are no problems,
+ # we hit here
+ continue
+ result, name, case = r.groups()
+ testname = tuple(case.split(".") + [name])
+ results = {'SKIPPED': SKIPPED,
+ 'EXPECTED FAILURE': SUCCESS,
+ 'UNEXPECTED SUCCESS': WARNINGS,
+ 'FAILURE': FAILURE,
+ 'ERROR': FAILURE,
+ 'SUCCESS': SUCCESS, # not reported
+ }.get(result, WARNINGS)
+ text = result.lower().split()
+ loog = line
+ # the next line is all dashes
+ loog += pio.readline()
+ else:
+ # the rest goes into the log
+ loog += line
+ if testname:
+ self.addTestResult(testname, results, text, loog)
+ testname = None
+
+ if warnings:
+ lines = warnings.keys()
+ lines.sort()
+ self.addCompleteLog("warnings", "".join(lines))
+
+ def evaluateCommand(self, cmd):
+ return self.results
+
+ def getText(self, cmd, results):
+ return self.text
+ def getText2(self, cmd, results):
+ return self.text2
+
+
+class ProcessDocs(ShellCommand):
+ """I build all docs. This requires some LaTeX packages to be installed.
+ It will result in the full documentation book (dvi, pdf, etc).
+
+ """
+
+ name = "process-docs"
+ warnOnWarnings = 1
+ command = ["admin/process-docs"]
+ description = ["processing", "docs"]
+ descriptionDone = ["docs"]
+ # TODO: track output and time
+
+ def __init__(self, **kwargs):
+ """
+ @type workdir: string
+ @keyword workdir: the workdir to start from: must be the base of the
+ Twisted tree
+ """
+ ShellCommand.__init__(self, **kwargs)
+
+ def createSummary(self, log):
+ output = log.getText()
+ # hlint warnings are of the format: 'WARNING: file:line:col: stuff
+ # latex warnings start with "WARNING: LaTeX Warning: stuff", but
+ # sometimes wrap around to a second line.
+ lines = output.split("\n")
+ warningLines = []
+ wantNext = False
+ for line in lines:
+ wantThis = wantNext
+ wantNext = False
+ if line.startswith("WARNING: "):
+ wantThis = True
+ wantNext = True
+ if wantThis:
+ warningLines.append(line)
+
+ if warningLines:
+ self.addCompleteLog("warnings", "\n".join(warningLines) + "\n")
+ self.warnings = len(warningLines)
+
+ def evaluateCommand(self, cmd):
+ if cmd.rc != 0:
+ return FAILURE
+ if self.warnings:
+ return WARNINGS
+ return SUCCESS
+
+ def getText(self, cmd, results):
+ if results == SUCCESS:
+ return ["docs", "successful"]
+ if results == WARNINGS:
+ return ["docs",
+ "%d warnin%s" % (self.warnings,
+ self.warnings == 1 and 'g' or 'gs')]
+ if results == FAILURE:
+ return ["docs", "failed"]
+
+ def getText2(self, cmd, results):
+ if results == WARNINGS:
+ return ["%d do%s" % (self.warnings,
+ self.warnings == 1 and 'c' or 'cs')]
+ return ["docs"]
+
+
+
+class BuildDebs(ShellCommand):
+ """I build the .deb packages."""
+
+ name = "debuild"
+ flunkOnFailure = 1
+ command = ["debuild", "-uc", "-us"]
+ description = ["building", "debs"]
+ descriptionDone = ["debs"]
+
+ def __init__(self, **kwargs):
+ """
+ @type workdir: string
+ @keyword workdir: the workdir to start from (must be the base of the
+ Twisted tree)
+ """
+ ShellCommand.__init__(self, **kwargs)
+
+ def commandComplete(self, cmd):
+ errors, warnings = 0, 0
+ output = cmd.logs['stdio'].getText()
+ summary = ""
+ sio = StringIO.StringIO(output)
+ for line in sio.readlines():
+ if line.find("E: ") == 0:
+ summary += line
+ errors += 1
+ if line.find("W: ") == 0:
+ summary += line
+ warnings += 1
+ if summary:
+ self.addCompleteLog("problems", summary)
+ self.errors = errors
+ self.warnings = warnings
+
+ def evaluateCommand(self, cmd):
+ if cmd.rc != 0:
+ return FAILURE
+ if self.errors:
+ return FAILURE
+ if self.warnings:
+ return WARNINGS
+ return SUCCESS
+
+ def getText(self, cmd, results):
+ text = ["debuild"]
+ if cmd.rc != 0:
+ text.append("failed")
+ errors, warnings = self.errors, self.warnings
+ if warnings or errors:
+ text.append("lintian:")
+ if warnings:
+ text.append("%d warnin%s" % (warnings,
+ warnings == 1 and 'g' or 'gs'))
+ if errors:
+ text.append("%d erro%s" % (errors,
+ errors == 1 and 'r' or 'rs'))
+ return text
+
+ def getText2(self, cmd, results):
+ if cmd.rc != 0:
+ return ["debuild"]
+ if self.errors or self.warnings:
+ return ["%d lintian" % (self.errors + self.warnings)]
+ return []
+
+class RemovePYCs(ShellCommand):
+ name = "remove-.pyc"
+ command = 'find . -name "*.pyc" | xargs rm'
+ description = ["removing", ".pyc", "files"]
+ descriptionDone = ["remove", ".pycs"]
diff --git a/buildbot/buildbot/steps/shell.py b/buildbot/buildbot/steps/shell.py
new file mode 100644
index 0000000..e979f04
--- /dev/null
+++ b/buildbot/buildbot/steps/shell.py
@@ -0,0 +1,487 @@
+# -*- test-case-name: buildbot.test.test_steps,buildbot.test.test_properties -*-
+
+import re
+from twisted.python import log
+from buildbot.process.buildstep import LoggingBuildStep, RemoteShellCommand
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, STDOUT, STDERR
+
+# for existing configurations that import WithProperties from here. We like
+# to move this class around just to keep our readers guessing.
+from buildbot.process.properties import WithProperties
+_hush_pyflakes = [WithProperties]
+del _hush_pyflakes
+
+class ShellCommand(LoggingBuildStep):
+ """I run a single shell command on the buildslave. I return FAILURE if
+ the exit code of that command is non-zero, SUCCESS otherwise. To change
+ this behavior, override my .evaluateCommand method.
+
+ By default, a failure of this step will mark the whole build as FAILURE.
+ To override this, give me an argument of flunkOnFailure=False .
+
+ I create a single Log named 'log' which contains the output of the
+ command. To create additional summary Logs, override my .createSummary
+ method.
+
+ The shell command I run (a list of argv strings) can be provided in
+ several ways:
+ - a class-level .command attribute
+ - a command= parameter to my constructor (overrides .command)
+ - set explicitly with my .setCommand() method (overrides both)
+
+ @ivar command: a list of renderable objects (typically strings or
+ WithProperties instances). This will be used by start()
+ to create a RemoteShellCommand instance.
+
+ @ivar logfiles: a dict mapping log NAMEs to workdir-relative FILENAMEs
+ of their corresponding logfiles. The contents of the file
+ named FILENAME will be put into a LogFile named NAME, ina
+ something approximating real-time. (note that logfiles=
+ is actually handled by our parent class LoggingBuildStep)
+
+ """
+
+ name = "shell"
+ description = None # set this to a list of short strings to override
+ descriptionDone = None # alternate description when the step is complete
+ command = None # set this to a command, or set in kwargs
+ # logfiles={} # you can also set 'logfiles' to a dictionary, and it
+ # will be merged with any logfiles= argument passed in
+ # to __init__
+
+ # override this on a specific ShellCommand if you want to let it fail
+ # without dooming the entire build to a status of FAILURE
+ flunkOnFailure = True
+
+ def __init__(self, workdir=None,
+ description=None, descriptionDone=None,
+ command=None,
+ usePTY="slave-config",
+ **kwargs):
+ # most of our arguments get passed through to the RemoteShellCommand
+ # that we create, but first strip out the ones that we pass to
+ # BuildStep (like haltOnFailure and friends), and a couple that we
+ # consume ourselves.
+
+ if description:
+ self.description = description
+ if isinstance(self.description, str):
+ self.description = [self.description]
+ if descriptionDone:
+ self.descriptionDone = descriptionDone
+ if isinstance(self.descriptionDone, str):
+ self.descriptionDone = [self.descriptionDone]
+ if command:
+ self.setCommand(command)
+
+ # pull out the ones that LoggingBuildStep wants, then upcall
+ buildstep_kwargs = {}
+ for k in kwargs.keys()[:]:
+ if k in self.__class__.parms:
+ buildstep_kwargs[k] = kwargs[k]
+ del kwargs[k]
+ LoggingBuildStep.__init__(self, **buildstep_kwargs)
+ self.addFactoryArguments(workdir=workdir,
+ description=description,
+ descriptionDone=descriptionDone,
+ command=command)
+
+ # everything left over goes to the RemoteShellCommand
+ kwargs['workdir'] = workdir # including a copy of 'workdir'
+ kwargs['usePTY'] = usePTY
+ self.remote_kwargs = kwargs
+ # we need to stash the RemoteShellCommand's args too
+ self.addFactoryArguments(**kwargs)
+
+ def setDefaultWorkdir(self, workdir):
+ rkw = self.remote_kwargs
+ rkw['workdir'] = rkw['workdir'] or workdir
+
+ def setCommand(self, command):
+ self.command = command
+
+ def describe(self, done=False):
+ """Return a list of short strings to describe this step, for the
+ status display. This uses the first few words of the shell command.
+ You can replace this by setting .description in your subclass, or by
+ overriding this method to describe the step better.
+
+ @type done: boolean
+ @param done: whether the command is complete or not, to improve the
+ way the command is described. C{done=False} is used
+ while the command is still running, so a single
+ imperfect-tense verb is appropriate ('compiling',
+ 'testing', ...) C{done=True} is used when the command
+ has finished, and the default getText() method adds some
+ text, so a simple noun is appropriate ('compile',
+ 'tests' ...)
+ """
+
+ if done and self.descriptionDone is not None:
+ return list(self.descriptionDone)
+ if self.description is not None:
+ return list(self.description)
+
+ properties = self.build.getProperties()
+ words = self.command
+ if isinstance(words, (str, unicode)):
+ words = words.split()
+ # render() each word to handle WithProperties objects
+ words = properties.render(words)
+ if len(words) < 1:
+ return ["???"]
+ if len(words) == 1:
+ return ["'%s'" % words[0]]
+ if len(words) == 2:
+ return ["'%s" % words[0], "%s'" % words[1]]
+ return ["'%s" % words[0], "%s" % words[1], "...'"]
+
+ def setupEnvironment(self, cmd):
+ # merge in anything from Build.slaveEnvironment
+ # This can be set from a Builder-level environment, or from earlier
+ # BuildSteps. The latter method is deprecated and superceded by
+ # BuildProperties.
+ # Environment variables passed in by a BuildStep override
+ # those passed in at the Builder level.
+ properties = self.build.getProperties()
+ slaveEnv = self.build.slaveEnvironment
+ if slaveEnv:
+ if cmd.args['env'] is None:
+ cmd.args['env'] = {}
+ fullSlaveEnv = slaveEnv.copy()
+ fullSlaveEnv.update(cmd.args['env'])
+ cmd.args['env'] = properties.render(fullSlaveEnv)
+ # note that each RemoteShellCommand gets its own copy of the
+ # dictionary, so we shouldn't be affecting anyone but ourselves.
+
+ def checkForOldSlaveAndLogfiles(self):
+ if not self.logfiles:
+ return # doesn't matter
+ if not self.slaveVersionIsOlderThan("shell", "2.1"):
+ return # slave is new enough
+ # this buildslave is too old and will ignore the 'logfiles'
+ # argument. You'll either have to pull the logfiles manually
+ # (say, by using 'cat' in a separate RemoteShellCommand) or
+ # upgrade the buildslave.
+ msg1 = ("Warning: buildslave %s is too old "
+ "to understand logfiles=, ignoring it."
+ % self.getSlaveName())
+ msg2 = "You will have to pull this logfile (%s) manually."
+ log.msg(msg1)
+ for logname,remotefilename in self.logfiles.items():
+ newlog = self.addLog(logname)
+ newlog.addHeader(msg1 + "\n")
+ newlog.addHeader(msg2 % remotefilename + "\n")
+ newlog.finish()
+ # now prevent setupLogfiles() from adding them
+ self.logfiles = {}
+
+ def start(self):
+ # this block is specific to ShellCommands. subclasses that don't need
+ # to set up an argv array, an environment, or extra logfiles= (like
+ # the Source subclasses) can just skip straight to startCommand()
+ properties = self.build.getProperties()
+
+ warnings = []
+
+ # create the actual RemoteShellCommand instance now
+ kwargs = properties.render(self.remote_kwargs)
+ kwargs['command'] = properties.render(self.command)
+ kwargs['logfiles'] = self.logfiles
+
+ # check for the usePTY flag
+ if kwargs.has_key('usePTY') and kwargs['usePTY'] != 'slave-config':
+ slavever = self.slaveVersion("shell", "old")
+ if self.slaveVersionIsOlderThan("svn", "2.7"):
+ warnings.append("NOTE: slave does not allow master to override usePTY\n")
+
+ cmd = RemoteShellCommand(**kwargs)
+ self.setupEnvironment(cmd)
+ self.checkForOldSlaveAndLogfiles()
+
+ self.startCommand(cmd, warnings)
+
+
+
+class TreeSize(ShellCommand):
+ name = "treesize"
+ command = ["du", "-s", "-k", "."]
+ kib = None
+
+ def commandComplete(self, cmd):
+ out = cmd.logs['stdio'].getText()
+ m = re.search(r'^(\d+)', out)
+ if m:
+ self.kib = int(m.group(1))
+ self.setProperty("tree-size-KiB", self.kib, "treesize")
+
+ def evaluateCommand(self, cmd):
+ if cmd.rc != 0:
+ return FAILURE
+ if self.kib is None:
+ return WARNINGS # not sure how 'du' could fail, but whatever
+ return SUCCESS
+
+ def getText(self, cmd, results):
+ if self.kib is not None:
+ return ["treesize", "%d KiB" % self.kib]
+ return ["treesize", "unknown"]
+
+class SetProperty(ShellCommand):
+ name = "setproperty"
+
+ def __init__(self, **kwargs):
+ self.property = None
+ self.extract_fn = None
+ self.strip = True
+
+ if kwargs.has_key('property'):
+ self.property = kwargs['property']
+ del kwargs['property']
+ if kwargs.has_key('extract_fn'):
+ self.extract_fn = kwargs['extract_fn']
+ del kwargs['extract_fn']
+ if kwargs.has_key('strip'):
+ self.strip = kwargs['strip']
+ del kwargs['strip']
+
+ ShellCommand.__init__(self, **kwargs)
+
+ self.addFactoryArguments(property=self.property)
+ self.addFactoryArguments(extract_fn=self.extract_fn)
+ self.addFactoryArguments(strip=self.strip)
+
+ assert self.property or self.extract_fn, \
+ "SetProperty step needs either property= or extract_fn="
+
+ self.property_changes = {}
+
+ def commandComplete(self, cmd):
+ if self.property:
+ result = cmd.logs['stdio'].getText()
+ if self.strip: result = result.strip()
+ propname = self.build.getProperties().render(self.property)
+ self.setProperty(propname, result, "SetProperty Step")
+ self.property_changes[propname] = result
+ else:
+ log = cmd.logs['stdio']
+ new_props = self.extract_fn(cmd.rc,
+ ''.join(log.getChunks([STDOUT], onlyText=True)),
+ ''.join(log.getChunks([STDERR], onlyText=True)))
+ for k,v in new_props.items():
+ self.setProperty(k, v, "SetProperty Step")
+ self.property_changes = new_props
+
+ def createSummary(self, log):
+ props_set = [ "%s: %r" % (k,v) for k,v in self.property_changes.items() ]
+ self.addCompleteLog('property changes', "\n".join(props_set))
+
+ def getText(self, cmd, results):
+ if self.property_changes:
+ return [ "set props:" ] + self.property_changes.keys()
+ else:
+ return [ "no change" ]
+
+class Configure(ShellCommand):
+
+ name = "configure"
+ haltOnFailure = 1
+ flunkOnFailure = 1
+ description = ["configuring"]
+ descriptionDone = ["configure"]
+ command = ["./configure"]
+
+class WarningCountingShellCommand(ShellCommand):
+ warnCount = 0
+ warningPattern = '.*warning[: ].*'
+
+ def __init__(self, warningPattern=None, **kwargs):
+ # See if we've been given a regular expression to use to match
+ # warnings. If not, use a default that assumes any line with "warning"
+ # present is a warning. This may lead to false positives in some cases.
+ if warningPattern:
+ self.warningPattern = warningPattern
+
+ # And upcall to let the base class do its work
+ ShellCommand.__init__(self, **kwargs)
+
+ self.addFactoryArguments(warningPattern=warningPattern)
+
+ def createSummary(self, log):
+ self.warnCount = 0
+
+ # Now compile a regular expression from whichever warning pattern we're
+ # using
+ if not self.warningPattern:
+ return
+
+ wre = self.warningPattern
+ if isinstance(wre, str):
+ wre = re.compile(wre)
+
+ # Check if each line in the output from this command matched our
+ # warnings regular expressions. If did, bump the warnings count and
+ # add the line to the collection of lines with warnings
+ warnings = []
+ # TODO: use log.readlines(), except we need to decide about stdout vs
+ # stderr
+ for line in log.getText().split("\n"):
+ if wre.match(line):
+ warnings.append(line)
+ self.warnCount += 1
+
+ # If there were any warnings, make the log if lines with warnings
+ # available
+ if self.warnCount:
+ self.addCompleteLog("warnings", "\n".join(warnings) + "\n")
+
+ warnings_stat = self.step_status.getStatistic('warnings', 0)
+ self.step_status.setStatistic('warnings', warnings_stat + self.warnCount)
+
+ try:
+ old_count = self.getProperty("warnings-count")
+ except KeyError:
+ old_count = 0
+ self.setProperty("warnings-count", old_count + self.warnCount, "WarningCountingShellCommand")
+
+
+ def evaluateCommand(self, cmd):
+ if cmd.rc != 0:
+ return FAILURE
+ if self.warnCount:
+ return WARNINGS
+ return SUCCESS
+
+
+class Compile(WarningCountingShellCommand):
+
+ name = "compile"
+ haltOnFailure = 1
+ flunkOnFailure = 1
+ description = ["compiling"]
+ descriptionDone = ["compile"]
+ command = ["make", "all"]
+
+ OFFprogressMetrics = ('output',)
+ # things to track: number of files compiled, number of directories
+ # traversed (assuming 'make' is being used)
+
+ def createSummary(self, log):
+ # TODO: grep for the characteristic GCC error lines and
+ # assemble them into a pair of buffers
+ WarningCountingShellCommand.createSummary(self, log)
+
+class Test(WarningCountingShellCommand):
+
+ name = "test"
+ warnOnFailure = 1
+ description = ["testing"]
+ descriptionDone = ["test"]
+ command = ["make", "test"]
+
+ def setTestResults(self, total=0, failed=0, passed=0, warnings=0):
+ """
+ Called by subclasses to set the relevant statistics; this actually
+ adds to any statistics already present
+ """
+ total += self.step_status.getStatistic('tests-total', 0)
+ self.step_status.setStatistic('tests-total', total)
+ failed += self.step_status.getStatistic('tests-failed', 0)
+ self.step_status.setStatistic('tests-failed', failed)
+ warnings += self.step_status.getStatistic('tests-warnings', 0)
+ self.step_status.setStatistic('tests-warnings', warnings)
+ passed += self.step_status.getStatistic('tests-passed', 0)
+ self.step_status.setStatistic('tests-passed', passed)
+
+ def describe(self, done=False):
+ description = WarningCountingShellCommand.describe(self, done)
+ if done:
+ if self.step_status.hasStatistic('tests-total'):
+ total = self.step_status.getStatistic("tests-total", 0)
+ failed = self.step_status.getStatistic("tests-failed", 0)
+ passed = self.step_status.getStatistic("tests-passed", 0)
+ warnings = self.step_status.getStatistic("tests-warnings", 0)
+ if not total:
+ total = failed + passed + warnings
+
+ if total:
+ description.append('%d tests' % total)
+ if passed:
+ description.append('%d passed' % passed)
+ if warnings:
+ description.append('%d warnings' % warnings)
+ if failed:
+ description.append('%d failed' % failed)
+ return description
+
+class PerlModuleTest(Test):
+ command=["prove", "--lib", "lib", "-r", "t"]
+ total = 0
+
+ def evaluateCommand(self, cmd):
+ # Get stdio, stripping pesky newlines etc.
+ lines = map(
+ lambda line : line.replace('\r\n','').replace('\r','').replace('\n',''),
+ self.getLog('stdio').readlines()
+ )
+
+ total = 0
+ passed = 0
+ failed = 0
+ rc = cmd.rc
+
+ # New version of Test::Harness?
+ try:
+ test_summary_report_index = lines.index("Test Summary Report")
+
+ del lines[0:test_summary_report_index + 2]
+
+ re_test_result = re.compile("^Result: (PASS|FAIL)$|Tests: \d+ Failed: (\d+)\)|Files=\d+, Tests=(\d+)")
+
+ mos = map(lambda line: re_test_result.search(line), lines)
+ test_result_lines = [mo.groups() for mo in mos if mo]
+
+ for line in test_result_lines:
+ if line[0] == 'PASS':
+ rc = SUCCESS
+ elif line[0] == 'FAIL':
+ rc = FAILURE
+ elif line[1]:
+ failed += int(line[1])
+ elif line[2]:
+ total = int(line[2])
+
+ except ValueError: # Nope, it's the old version
+ re_test_result = re.compile("^(All tests successful)|(\d+)/(\d+) subtests failed|Files=\d+, Tests=(\d+),")
+
+ mos = map(lambda line: re_test_result.search(line), lines)
+ test_result_lines = [mo.groups() for mo in mos if mo]
+
+ if test_result_lines:
+ test_result_line = test_result_lines[0]
+
+ success = test_result_line[0]
+
+ if success:
+ failed = 0
+
+ test_totals_line = test_result_lines[1]
+ total_str = test_totals_line[3]
+
+ rc = SUCCESS
+ else:
+ failed_str = test_result_line[1]
+ failed = int(failed_str)
+
+ total_str = test_result_line[2]
+
+ rc = FAILURE
+
+ total = int(total_str)
+
+ if total:
+ passed = total - failed
+
+ self.setTestResults(total=total, failed=failed, passed=passed)
+
+ return rc
diff --git a/buildbot/buildbot/steps/source.py b/buildbot/buildbot/steps/source.py
new file mode 100644
index 0000000..4571ad5
--- /dev/null
+++ b/buildbot/buildbot/steps/source.py
@@ -0,0 +1,1107 @@
+# -*- test-case-name: buildbot.test.test_vc -*-
+
+from warnings import warn
+from email.Utils import formatdate
+from twisted.python import log
+from buildbot.process.buildstep import LoggingBuildStep, LoggedRemoteCommand
+from buildbot.interfaces import BuildSlaveTooOldError
+from buildbot.status.builder import SKIPPED
+
+
+class Source(LoggingBuildStep):
+ """This is a base class to generate a source tree in the buildslave.
+ Each version control system has a specialized subclass, and is expected
+ to override __init__ and implement computeSourceRevision() and
+ startVC(). The class as a whole builds up the self.args dictionary, then
+ starts a LoggedRemoteCommand with those arguments.
+ """
+
+ # if the checkout fails, there's no point in doing anything else
+ haltOnFailure = True
+ flunkOnFailure = True
+ notReally = False
+
+ branch = None # the default branch, should be set in __init__
+
+ def __init__(self, workdir=None, mode='update', alwaysUseLatest=False,
+ timeout=20*60, retry=None, **kwargs):
+ """
+ @type workdir: string
+ @param workdir: local directory (relative to the Builder's root)
+ where the tree should be placed
+
+ @type mode: string
+ @param mode: the kind of VC operation that is desired:
+ - 'update': specifies that the checkout/update should be
+ performed directly into the workdir. Each build is performed
+ in the same directory, allowing for incremental builds. This
+ minimizes disk space, bandwidth, and CPU time. However, it
+ may encounter problems if the build process does not handle
+ dependencies properly (if you must sometimes do a 'clean
+ build' to make sure everything gets compiled), or if source
+ files are deleted but generated files can influence test
+ behavior (e.g. python's .pyc files), or when source
+ directories are deleted but generated files prevent CVS from
+ removing them.
+
+ - 'copy': specifies that the source-controlled workspace
+ should be maintained in a separate directory (called the
+ 'copydir'), using checkout or update as necessary. For each
+ build, a new workdir is created with a copy of the source
+ tree (rm -rf workdir; cp -R -P -p copydir workdir). This
+ doubles the disk space required, but keeps the bandwidth low
+ (update instead of a full checkout). A full 'clean' build
+ is performed each time. This avoids any generated-file
+ build problems, but is still occasionally vulnerable to
+ problems such as a CVS repository being manually rearranged
+ (causing CVS errors on update) which are not an issue with
+ a full checkout.
+
+ - 'clobber': specifies that the working directory should be
+ deleted each time, necessitating a full checkout for each
+ build. This insures a clean build off a complete checkout,
+ avoiding any of the problems described above, but is
+ bandwidth intensive, as the whole source tree must be
+ pulled down for each build.
+
+ - 'export': is like 'clobber', except that e.g. the 'cvs
+ export' command is used to create the working directory.
+ This command removes all VC metadata files (the
+ CVS/.svn/{arch} directories) from the tree, which is
+ sometimes useful for creating source tarballs (to avoid
+ including the metadata in the tar file). Not all VC systems
+ support export.
+
+ @type alwaysUseLatest: boolean
+ @param alwaysUseLatest: whether to always update to the most
+ recent available sources for this build.
+
+ Normally the Source step asks its Build for a list of all
+ Changes that are supposed to go into the build, then computes a
+ 'source stamp' (revision number or timestamp) that will cause
+ exactly that set of changes to be present in the checked out
+ tree. This is turned into, e.g., 'cvs update -D timestamp', or
+ 'svn update -r revnum'. If alwaysUseLatest=True, bypass this
+ computation and always update to the latest available sources
+ for each build.
+
+ The source stamp helps avoid a race condition in which someone
+ commits a change after the master has decided to start a build
+ but before the slave finishes checking out the sources. At best
+ this results in a build which contains more changes than the
+ buildmaster thinks it has (possibly resulting in the wrong
+ person taking the blame for any problems that result), at worst
+ is can result in an incoherent set of sources (splitting a
+ non-atomic commit) which may not build at all.
+
+ @type retry: tuple of ints (delay, repeats) (or None)
+ @param retry: if provided, VC update failures are re-attempted up
+ to REPEATS times, with DELAY seconds between each
+ attempt. Some users have slaves with poor connectivity
+ to their VC repository, and they say that up to 80% of
+ their build failures are due to transient network
+ failures that could be handled by simply retrying a
+ couple times.
+
+ """
+
+ LoggingBuildStep.__init__(self, **kwargs)
+ self.addFactoryArguments(workdir=workdir,
+ mode=mode,
+ alwaysUseLatest=alwaysUseLatest,
+ timeout=timeout,
+ retry=retry,
+ )
+
+ assert mode in ("update", "copy", "clobber", "export")
+ if retry:
+ delay, repeats = retry
+ assert isinstance(repeats, int)
+ assert repeats > 0
+ self.args = {'mode': mode,
+ 'workdir': workdir,
+ 'timeout': timeout,
+ 'retry': retry,
+ 'patch': None, # set during .start
+ }
+ self.alwaysUseLatest = alwaysUseLatest
+
+ # Compute defaults for descriptions:
+ description = ["updating"]
+ descriptionDone = ["update"]
+ if mode == "clobber":
+ description = ["checkout"]
+ # because checkingouting takes too much space
+ descriptionDone = ["checkout"]
+ elif mode == "export":
+ description = ["exporting"]
+ descriptionDone = ["export"]
+ self.description = description
+ self.descriptionDone = descriptionDone
+
+ def setDefaultWorkdir(self, workdir):
+ self.args['workdir'] = self.args['workdir'] or workdir
+
+ def describe(self, done=False):
+ if done:
+ return self.descriptionDone
+ return self.description
+
+ def computeSourceRevision(self, changes):
+ """Each subclass must implement this method to do something more
+ precise than -rHEAD every time. For version control systems that use
+ repository-wide change numbers (SVN, P4), this can simply take the
+ maximum such number from all the changes involved in this build. For
+ systems that do not (CVS), it needs to create a timestamp based upon
+ the latest Change, the Build's treeStableTimer, and an optional
+ self.checkoutDelay value."""
+ return None
+
+ def start(self):
+ if self.notReally:
+ log.msg("faking %s checkout/update" % self.name)
+ self.step_status.setText(["fake", self.name, "successful"])
+ self.addCompleteLog("log",
+ "Faked %s checkout/update 'successful'\n" \
+ % self.name)
+ return SKIPPED
+
+ # what source stamp would this build like to use?
+ s = self.build.getSourceStamp()
+ # if branch is None, then use the Step's "default" branch
+ branch = s.branch or self.branch
+ # if revision is None, use the latest sources (-rHEAD)
+ revision = s.revision
+ if not revision and not self.alwaysUseLatest:
+ revision = self.computeSourceRevision(s.changes)
+ # if patch is None, then do not patch the tree after checkout
+
+ # 'patch' is None or a tuple of (patchlevel, diff)
+ patch = s.patch
+ if patch:
+ self.addCompleteLog("patch", patch[1])
+
+ self.startVC(branch, revision, patch)
+
+ def commandComplete(self, cmd):
+ if cmd.updates.has_key("got_revision"):
+ got_revision = cmd.updates["got_revision"][-1]
+ if got_revision is not None:
+ self.setProperty("got_revision", str(got_revision), "Source")
+
+
+
+class CVS(Source):
+ """I do CVS checkout/update operations.
+
+ Note: if you are doing anonymous/pserver CVS operations, you will need
+ to manually do a 'cvs login' on each buildslave before the slave has any
+ hope of success. XXX: fix then, take a cvs password as an argument and
+ figure out how to do a 'cvs login' on each build
+ """
+
+ name = "cvs"
+
+ #progressMetrics = ('output',)
+ #
+ # additional things to track: update gives one stderr line per directory
+ # (starting with 'cvs server: Updating ') (and is fairly stable if files
+ # is empty), export gives one line per directory (starting with 'cvs
+ # export: Updating ') and another line per file (starting with U). Would
+ # be nice to track these, requires grepping LogFile data for lines,
+ # parsing each line. Might be handy to have a hook in LogFile that gets
+ # called with each complete line.
+
+ def __init__(self, cvsroot, cvsmodule,
+ global_options=[], branch=None, checkoutDelay=None,
+ login=None,
+ **kwargs):
+
+ """
+ @type cvsroot: string
+ @param cvsroot: CVS Repository from which the source tree should
+ be obtained. '/home/warner/Repository' for local
+ or NFS-reachable repositories,
+ ':pserver:anon@foo.com:/cvs' for anonymous CVS,
+ 'user@host.com:/cvs' for non-anonymous CVS or
+ CVS over ssh. Lots of possibilities, check the
+ CVS documentation for more.
+
+ @type cvsmodule: string
+ @param cvsmodule: subdirectory of CVS repository that should be
+ retrieved
+
+ @type login: string or None
+ @param login: if not None, a string which will be provided as a
+ password to the 'cvs login' command, used when a
+ :pserver: method is used to access the repository.
+ This login is only needed once, but must be run
+ each time (just before the CVS operation) because
+ there is no way for the buildslave to tell whether
+ it was previously performed or not.
+
+ @type branch: string
+ @param branch: the default branch name, will be used in a '-r'
+ argument to specify which branch of the source tree
+ should be used for this checkout. Defaults to None,
+ which means to use 'HEAD'.
+
+ @type checkoutDelay: int or None
+ @param checkoutDelay: if not None, the number of seconds to put
+ between the last known Change and the
+ timestamp given to the -D argument. This
+ defaults to exactly half of the parent
+ Build's .treeStableTimer, but it could be
+ set to something else if your CVS change
+ notification has particularly weird
+ latency characteristics.
+
+ @type global_options: list of strings
+ @param global_options: these arguments are inserted in the cvs
+ command line, before the
+ 'checkout'/'update' command word. See
+ 'cvs --help-options' for a list of what
+ may be accepted here. ['-r'] will make
+ the checked out files read only. ['-r',
+ '-R'] will also assume the repository is
+ read-only (I assume this means it won't
+ use locks to insure atomic access to the
+ ,v files)."""
+
+ self.checkoutDelay = checkoutDelay
+ self.branch = branch
+
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(cvsroot=cvsroot,
+ cvsmodule=cvsmodule,
+ global_options=global_options,
+ branch=branch,
+ checkoutDelay=checkoutDelay,
+ login=login,
+ )
+
+ self.args.update({'cvsroot': cvsroot,
+ 'cvsmodule': cvsmodule,
+ 'global_options': global_options,
+ 'login': login,
+ })
+
+ def computeSourceRevision(self, changes):
+ if not changes:
+ return None
+ lastChange = max([c.when for c in changes])
+ if self.checkoutDelay is not None:
+ when = lastChange + self.checkoutDelay
+ else:
+ lastSubmit = max([r.submittedAt for r in self.build.requests])
+ when = (lastChange + lastSubmit) / 2
+ return formatdate(when)
+
+ def startVC(self, branch, revision, patch):
+ if self.slaveVersionIsOlderThan("cvs", "1.39"):
+ # the slave doesn't know to avoid re-using the same sourcedir
+ # when the branch changes. We have no way of knowing which branch
+ # the last build used, so if we're using a non-default branch and
+ # either 'update' or 'copy' modes, it is safer to refuse to
+ # build, and tell the user they need to upgrade the buildslave.
+ if (branch != self.branch
+ and self.args['mode'] in ("update", "copy")):
+ m = ("This buildslave (%s) does not know about multiple "
+ "branches, and using mode=%s would probably build the "
+ "wrong tree. "
+ "Refusing to build. Please upgrade the buildslave to "
+ "buildbot-0.7.0 or newer." % (self.build.slavename,
+ self.args['mode']))
+ log.msg(m)
+ raise BuildSlaveTooOldError(m)
+
+ if branch is None:
+ branch = "HEAD"
+ self.args['branch'] = branch
+ self.args['revision'] = revision
+ self.args['patch'] = patch
+
+ if self.args['branch'] == "HEAD" and self.args['revision']:
+ # special case. 'cvs update -r HEAD -D today' gives no files
+ # TODO: figure out why, see if it applies to -r BRANCH
+ self.args['branch'] = None
+
+ # deal with old slaves
+ warnings = []
+ slavever = self.slaveVersion("cvs", "old")
+
+ if slavever == "old":
+ # 0.5.0
+ if self.args['mode'] == "export":
+ self.args['export'] = 1
+ elif self.args['mode'] == "clobber":
+ self.args['clobber'] = 1
+ elif self.args['mode'] == "copy":
+ self.args['copydir'] = "source"
+ self.args['tag'] = self.args['branch']
+ assert not self.args['patch'] # 0.5.0 slave can't do patch
+
+ cmd = LoggedRemoteCommand("cvs", self.args)
+ self.startCommand(cmd, warnings)
+
+
+class SVN(Source):
+ """I perform Subversion checkout/update operations."""
+
+ name = 'svn'
+
+ def __init__(self, svnurl=None, baseURL=None, defaultBranch=None,
+ directory=None, username=None, password=None, **kwargs):
+ """
+ @type svnurl: string
+ @param svnurl: the URL which points to the Subversion server,
+ combining the access method (HTTP, ssh, local file),
+ the repository host/port, the repository path, the
+ sub-tree within the repository, and the branch to
+ check out. Using C{svnurl} does not enable builds of
+ alternate branches: use C{baseURL} to enable this.
+ Use exactly one of C{svnurl} and C{baseURL}.
+
+ @param baseURL: if branches are enabled, this is the base URL to
+ which a branch name will be appended. It should
+ probably end in a slash. Use exactly one of
+ C{svnurl} and C{baseURL}.
+
+ @param defaultBranch: if branches are enabled, this is the branch
+ to use if the Build does not specify one
+ explicitly. It will simply be appended
+ to C{baseURL} and the result handed to
+ the SVN command.
+
+ @param username: username to pass to svn's --username
+ @param password: username to pass to svn's --password
+ """
+
+ if not kwargs.has_key('workdir') and directory is not None:
+ # deal with old configs
+ warn("Please use workdir=, not directory=", DeprecationWarning)
+ kwargs['workdir'] = directory
+
+ self.svnurl = svnurl
+ self.baseURL = baseURL
+ self.branch = defaultBranch
+ self.username = username
+ self.password = password
+
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(svnurl=svnurl,
+ baseURL=baseURL,
+ defaultBranch=defaultBranch,
+ directory=directory,
+ username=username,
+ password=password,
+ )
+
+ if not svnurl and not baseURL:
+ raise ValueError("you must use exactly one of svnurl and baseURL")
+
+
+ def computeSourceRevision(self, changes):
+ if not changes or None in [c.revision for c in changes]:
+ return None
+ lastChange = max([int(c.revision) for c in changes])
+ return lastChange
+
+ def startVC(self, branch, revision, patch):
+
+ # handle old slaves
+ warnings = []
+ slavever = self.slaveVersion("svn", "old")
+ if not slavever:
+ m = "slave does not have the 'svn' command"
+ raise BuildSlaveTooOldError(m)
+
+ if self.slaveVersionIsOlderThan("svn", "1.39"):
+ # the slave doesn't know to avoid re-using the same sourcedir
+ # when the branch changes. We have no way of knowing which branch
+ # the last build used, so if we're using a non-default branch and
+ # either 'update' or 'copy' modes, it is safer to refuse to
+ # build, and tell the user they need to upgrade the buildslave.
+ if (branch != self.branch
+ and self.args['mode'] in ("update", "copy")):
+ m = ("This buildslave (%s) does not know about multiple "
+ "branches, and using mode=%s would probably build the "
+ "wrong tree. "
+ "Refusing to build. Please upgrade the buildslave to "
+ "buildbot-0.7.0 or newer." % (self.build.slavename,
+ self.args['mode']))
+ raise BuildSlaveTooOldError(m)
+
+ if slavever == "old":
+ # 0.5.0 compatibility
+ if self.args['mode'] in ("clobber", "copy"):
+ # TODO: use some shell commands to make up for the
+ # deficiency, by blowing away the old directory first (thus
+ # forcing a full checkout)
+ warnings.append("WARNING: this slave can only do SVN updates"
+ ", not mode=%s\n" % self.args['mode'])
+ log.msg("WARNING: this slave only does mode=update")
+ if self.args['mode'] == "export":
+ raise BuildSlaveTooOldError("old slave does not have "
+ "mode=export")
+ self.args['directory'] = self.args['workdir']
+ if revision is not None:
+ # 0.5.0 can only do HEAD. We have no way of knowing whether
+ # the requested revision is HEAD or not, and for
+ # slowly-changing trees this will probably do the right
+ # thing, so let it pass with a warning
+ m = ("WARNING: old slave can only update to HEAD, not "
+ "revision=%s" % revision)
+ log.msg(m)
+ warnings.append(m + "\n")
+ revision = "HEAD" # interprets this key differently
+ if patch:
+ raise BuildSlaveTooOldError("old slave can't do patch")
+
+ if self.svnurl:
+ assert not branch # we need baseURL= to use branches
+ self.args['svnurl'] = self.svnurl
+ else:
+ self.args['svnurl'] = self.baseURL + branch
+ self.args['revision'] = revision
+ self.args['patch'] = patch
+
+ if self.username is not None or self.password is not None:
+ if self.slaveVersionIsOlderThan("svn", "2.8"):
+ m = ("This buildslave (%s) does not support svn usernames "
+ "and passwords. "
+ "Refusing to build. Please upgrade the buildslave to "
+ "buildbot-0.7.10 or newer." % (self.build.slavename,))
+ raise BuildSlaveTooOldError(m)
+ if self.username is not None: self.args['username'] = self.username
+ if self.password is not None: self.args['password'] = self.password
+
+ revstuff = []
+ if branch is not None and branch != self.branch:
+ revstuff.append("[branch]")
+ if revision is not None:
+ revstuff.append("r%s" % revision)
+ if patch is not None:
+ revstuff.append("[patch]")
+ self.description.extend(revstuff)
+ self.descriptionDone.extend(revstuff)
+
+ cmd = LoggedRemoteCommand("svn", self.args)
+ self.startCommand(cmd, warnings)
+
+
+class Darcs(Source):
+ """Check out a source tree from a Darcs repository at 'repourl'.
+
+ Darcs has no concept of file modes. This means the eXecute-bit will be
+ cleared on all source files. As a result, you may need to invoke
+ configuration scripts with something like:
+
+ C{s(step.Configure, command=['/bin/sh', './configure'])}
+ """
+
+ name = "darcs"
+
+ def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
+ **kwargs):
+ """
+ @type repourl: string
+ @param repourl: the URL which points at the Darcs repository. This
+ is used as the default branch. Using C{repourl} does
+ not enable builds of alternate branches: use
+ C{baseURL} to enable this. Use either C{repourl} or
+ C{baseURL}, not both.
+
+ @param baseURL: if branches are enabled, this is the base URL to
+ which a branch name will be appended. It should
+ probably end in a slash. Use exactly one of
+ C{repourl} and C{baseURL}.
+
+ @param defaultBranch: if branches are enabled, this is the branch
+ to use if the Build does not specify one
+ explicitly. It will simply be appended to
+ C{baseURL} and the result handed to the
+ 'darcs pull' command.
+ """
+ self.repourl = repourl
+ self.baseURL = baseURL
+ self.branch = defaultBranch
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(repourl=repourl,
+ baseURL=baseURL,
+ defaultBranch=defaultBranch,
+ )
+ assert self.args['mode'] != "export", \
+ "Darcs does not have an 'export' mode"
+ if (not repourl and not baseURL) or (repourl and baseURL):
+ raise ValueError("you must provide exactly one of repourl and"
+ " baseURL")
+
+ def startVC(self, branch, revision, patch):
+ slavever = self.slaveVersion("darcs")
+ if not slavever:
+ m = "slave is too old, does not know about darcs"
+ raise BuildSlaveTooOldError(m)
+
+ if self.slaveVersionIsOlderThan("darcs", "1.39"):
+ if revision:
+ # TODO: revisit this once we implement computeSourceRevision
+ m = "0.6.6 slaves can't handle args['revision']"
+ raise BuildSlaveTooOldError(m)
+
+ # the slave doesn't know to avoid re-using the same sourcedir
+ # when the branch changes. We have no way of knowing which branch
+ # the last build used, so if we're using a non-default branch and
+ # either 'update' or 'copy' modes, it is safer to refuse to
+ # build, and tell the user they need to upgrade the buildslave.
+ if (branch != self.branch
+ and self.args['mode'] in ("update", "copy")):
+ m = ("This buildslave (%s) does not know about multiple "
+ "branches, and using mode=%s would probably build the "
+ "wrong tree. "
+ "Refusing to build. Please upgrade the buildslave to "
+ "buildbot-0.7.0 or newer." % (self.build.slavename,
+ self.args['mode']))
+ raise BuildSlaveTooOldError(m)
+
+ if self.repourl:
+ assert not branch # we need baseURL= to use branches
+ self.args['repourl'] = self.repourl
+ else:
+ self.args['repourl'] = self.baseURL + branch
+ self.args['revision'] = revision
+ self.args['patch'] = patch
+
+ revstuff = []
+ if branch is not None and branch != self.branch:
+ revstuff.append("[branch]")
+ self.description.extend(revstuff)
+ self.descriptionDone.extend(revstuff)
+
+ cmd = LoggedRemoteCommand("darcs", self.args)
+ self.startCommand(cmd)
+
+
+class Git(Source):
+ """Check out a source tree from a git repository 'repourl'."""
+
+ name = "git"
+
+ def __init__(self, repourl, branch="master", **kwargs):
+ """
+ @type repourl: string
+ @param repourl: the URL which points at the git repository
+
+ @type branch: string
+ @param branch: The branch or tag to check out by default. If
+ a build specifies a different branch, it will
+ be used instead of this.
+ """
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(repourl=repourl, branch=branch)
+ self.args.update({'repourl': repourl,
+ 'branch': branch})
+
+ def computeSourceRevision(self, changes):
+ if not changes:
+ return None
+ return changes[-1].revision
+
+ def startVC(self, branch, revision, patch):
+ if branch is not None:
+ self.args['branch'] = branch
+
+ self.args['revision'] = revision
+ self.args['patch'] = patch
+ slavever = self.slaveVersion("git")
+ if not slavever:
+ raise BuildSlaveTooOldError("slave is too old, does not know "
+ "about git")
+ cmd = LoggedRemoteCommand("git", self.args)
+ self.startCommand(cmd)
+
+
+class Arch(Source):
+ """Check out a source tree from an Arch repository named 'archive'
+ available at 'url'. 'version' specifies which version number (development
+ line) will be used for the checkout: this is mostly equivalent to a
+ branch name. This version uses the 'tla' tool to do the checkout, to use
+ 'baz' see L{Bazaar} instead.
+ """
+
+ name = "arch"
+ # TODO: slaves >0.6.6 will accept args['build-config'], so use it
+
+ def __init__(self, url, version, archive=None, **kwargs):
+ """
+ @type url: string
+ @param url: the Arch coordinates of the repository. This is
+ typically an http:// URL, but could also be the absolute
+ pathname of a local directory instead.
+
+ @type version: string
+ @param version: the category--branch--version to check out. This is
+ the default branch. If a build specifies a different
+ branch, it will be used instead of this.
+
+ @type archive: string
+ @param archive: The archive name. If provided, it must match the one
+ that comes from the repository. If not, the
+ repository's default will be used.
+ """
+ self.branch = version
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(url=url,
+ version=version,
+ archive=archive,
+ )
+ self.args.update({'url': url,
+ 'archive': archive,
+ })
+
+ def computeSourceRevision(self, changes):
+ # in Arch, fully-qualified revision numbers look like:
+ # arch@buildbot.sourceforge.net--2004/buildbot--dev--0--patch-104
+ # For any given builder, all of this is fixed except the patch-104.
+ # The Change might have any part of the fully-qualified string, so we
+ # just look for the last part. We return the "patch-NN" string.
+ if not changes:
+ return None
+ lastChange = None
+ for c in changes:
+ if not c.revision:
+ continue
+ if c.revision.endswith("--base-0"):
+ rev = 0
+ else:
+ i = c.revision.rindex("patch")
+ rev = int(c.revision[i+len("patch-"):])
+ lastChange = max(lastChange, rev)
+ if lastChange is None:
+ return None
+ if lastChange == 0:
+ return "base-0"
+ return "patch-%d" % lastChange
+
+ def checkSlaveVersion(self, cmd, branch):
+ warnings = []
+ slavever = self.slaveVersion(cmd)
+ if not slavever:
+ m = "slave is too old, does not know about %s" % cmd
+ raise BuildSlaveTooOldError(m)
+
+ # slave 1.28 and later understand 'revision'
+ if self.slaveVersionIsOlderThan(cmd, "1.28"):
+ if not self.alwaysUseLatest:
+ # we don't know whether our requested revision is the latest
+ # or not. If the tree does not change very quickly, this will
+ # probably build the right thing, so emit a warning rather
+ # than refuse to build at all
+ m = "WARNING, buildslave is too old to use a revision"
+ log.msg(m)
+ warnings.append(m + "\n")
+
+ if self.slaveVersionIsOlderThan(cmd, "1.39"):
+ # the slave doesn't know to avoid re-using the same sourcedir
+ # when the branch changes. We have no way of knowing which branch
+ # the last build used, so if we're using a non-default branch and
+ # either 'update' or 'copy' modes, it is safer to refuse to
+ # build, and tell the user they need to upgrade the buildslave.
+ if (branch != self.branch
+ and self.args['mode'] in ("update", "copy")):
+ m = ("This buildslave (%s) does not know about multiple "
+ "branches, and using mode=%s would probably build the "
+ "wrong tree. "
+ "Refusing to build. Please upgrade the buildslave to "
+ "buildbot-0.7.0 or newer." % (self.build.slavename,
+ self.args['mode']))
+ log.msg(m)
+ raise BuildSlaveTooOldError(m)
+
+ return warnings
+
+ def startVC(self, branch, revision, patch):
+ self.args['version'] = branch
+ self.args['revision'] = revision
+ self.args['patch'] = patch
+ warnings = self.checkSlaveVersion("arch", branch)
+
+ revstuff = []
+ if branch is not None and branch != self.branch:
+ revstuff.append("[branch]")
+ if revision is not None:
+ revstuff.append("patch%s" % revision)
+ self.description.extend(revstuff)
+ self.descriptionDone.extend(revstuff)
+
+ cmd = LoggedRemoteCommand("arch", self.args)
+ self.startCommand(cmd, warnings)
+
+
+class Bazaar(Arch):
+ """Bazaar is an alternative client for Arch repositories. baz is mostly
+ compatible with tla, but archive registration is slightly different."""
+
+ # TODO: slaves >0.6.6 will accept args['build-config'], so use it
+
+ def __init__(self, url, version, archive, **kwargs):
+ """
+ @type url: string
+ @param url: the Arch coordinates of the repository. This is
+ typically an http:// URL, but could also be the absolute
+ pathname of a local directory instead.
+
+ @type version: string
+ @param version: the category--branch--version to check out
+
+ @type archive: string
+ @param archive: The archive name (required). This must always match
+ the one that comes from the repository, otherwise the
+ buildslave will attempt to get sources from the wrong
+ archive.
+ """
+ self.branch = version
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(url=url,
+ version=version,
+ archive=archive,
+ )
+ self.args.update({'url': url,
+ 'archive': archive,
+ })
+
+ def startVC(self, branch, revision, patch):
+ self.args['version'] = branch
+ self.args['revision'] = revision
+ self.args['patch'] = patch
+ warnings = self.checkSlaveVersion("bazaar", branch)
+
+ revstuff = []
+ if branch is not None and branch != self.branch:
+ revstuff.append("[branch]")
+ if revision is not None:
+ revstuff.append("patch%s" % revision)
+ self.description.extend(revstuff)
+ self.descriptionDone.extend(revstuff)
+
+ cmd = LoggedRemoteCommand("bazaar", self.args)
+ self.startCommand(cmd, warnings)
+
+class Bzr(Source):
+ """Check out a source tree from a bzr (Bazaar) repository at 'repourl'.
+
+ """
+
+ name = "bzr"
+
+ def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
+ **kwargs):
+ """
+ @type repourl: string
+ @param repourl: the URL which points at the bzr repository. This
+ is used as the default branch. Using C{repourl} does
+ not enable builds of alternate branches: use
+ C{baseURL} to enable this. Use either C{repourl} or
+ C{baseURL}, not both.
+
+ @param baseURL: if branches are enabled, this is the base URL to
+ which a branch name will be appended. It should
+ probably end in a slash. Use exactly one of
+ C{repourl} and C{baseURL}.
+
+ @param defaultBranch: if branches are enabled, this is the branch
+ to use if the Build does not specify one
+ explicitly. It will simply be appended to
+ C{baseURL} and the result handed to the
+ 'bzr checkout pull' command.
+ """
+ self.repourl = repourl
+ self.baseURL = baseURL
+ self.branch = defaultBranch
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(repourl=repourl,
+ baseURL=baseURL,
+ defaultBranch=defaultBranch,
+ )
+ if (not repourl and not baseURL) or (repourl and baseURL):
+ raise ValueError("you must provide exactly one of repourl and"
+ " baseURL")
+
+ def computeSourceRevision(self, changes):
+ if not changes:
+ return None
+ lastChange = max([int(c.revision) for c in changes])
+ return lastChange
+
+ def startVC(self, branch, revision, patch):
+ slavever = self.slaveVersion("bzr")
+ if not slavever:
+ m = "slave is too old, does not know about bzr"
+ raise BuildSlaveTooOldError(m)
+
+ if self.repourl:
+ assert not branch # we need baseURL= to use branches
+ self.args['repourl'] = self.repourl
+ else:
+ self.args['repourl'] = self.baseURL + branch
+ self.args['revision'] = revision
+ self.args['patch'] = patch
+
+ revstuff = []
+ if branch is not None and branch != self.branch:
+ revstuff.append("[branch]")
+ self.description.extend(revstuff)
+ self.descriptionDone.extend(revstuff)
+
+ cmd = LoggedRemoteCommand("bzr", self.args)
+ self.startCommand(cmd)
+
+
+class Mercurial(Source):
+ """Check out a source tree from a mercurial repository 'repourl'."""
+
+ name = "hg"
+
+ def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
+ branchType='dirname', **kwargs):
+ """
+ @type repourl: string
+ @param repourl: the URL which points at the Mercurial repository.
+ This uses the 'default' branch unless defaultBranch is
+ specified below and the C{branchType} is set to
+ 'inrepo'. It is an error to specify a branch without
+ setting the C{branchType} to 'inrepo'.
+
+ @param baseURL: if 'dirname' branches are enabled, this is the base URL
+ to which a branch name will be appended. It should
+ probably end in a slash. Use exactly one of C{repourl}
+ and C{baseURL}.
+
+ @param defaultBranch: if branches are enabled, this is the branch
+ to use if the Build does not specify one
+ explicitly.
+ For 'dirname' branches, It will simply be
+ appended to C{baseURL} and the result handed to
+ the 'hg update' command.
+ For 'inrepo' branches, this specifies the named
+ revision to which the tree will update after a
+ clone.
+
+ @param branchType: either 'dirname' or 'inrepo' depending on whether
+ the branch name should be appended to the C{baseURL}
+ or the branch is a mercurial named branch and can be
+ found within the C{repourl}
+ """
+ self.repourl = repourl
+ self.baseURL = baseURL
+ self.branch = defaultBranch
+ self.branchType = branchType
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(repourl=repourl,
+ baseURL=baseURL,
+ defaultBranch=defaultBranch,
+ branchType=branchType,
+ )
+ if (not repourl and not baseURL) or (repourl and baseURL):
+ raise ValueError("you must provide exactly one of repourl and"
+ " baseURL")
+
+ def startVC(self, branch, revision, patch):
+ slavever = self.slaveVersion("hg")
+ if not slavever:
+ raise BuildSlaveTooOldError("slave is too old, does not know "
+ "about hg")
+
+ if self.repourl:
+ # we need baseURL= to use dirname branches
+ assert self.branchType == 'inrepo' or not branch
+ self.args['repourl'] = self.repourl
+ if branch:
+ self.args['branch'] = branch
+ else:
+ self.args['repourl'] = self.baseURL + branch
+ self.args['revision'] = revision
+ self.args['patch'] = patch
+
+ revstuff = []
+ if branch is not None and branch != self.branch:
+ revstuff.append("[branch]")
+ self.description.extend(revstuff)
+ self.descriptionDone.extend(revstuff)
+
+ cmd = LoggedRemoteCommand("hg", self.args)
+ self.startCommand(cmd)
+
+ def computeSourceRevision(self, changes):
+ if not changes:
+ return None
+ # without knowing the revision ancestry graph, we can't sort the
+ # changes at all. So for now, assume they were given to us in sorted
+ # order, and just pay attention to the last one. See ticket #103 for
+ # more details.
+ if len(changes) > 1:
+ log.msg("Mercurial.computeSourceRevision: warning: "
+ "there are %d changes here, assuming the last one is "
+ "the most recent" % len(changes))
+ return changes[-1].revision
+
+
+class P4(Source):
+ """ P4 is a class for accessing perforce revision control"""
+ name = "p4"
+
+ def __init__(self, p4base, defaultBranch=None, p4port=None, p4user=None,
+ p4passwd=None, p4extra_views=[],
+ p4client='buildbot_%(slave)s_%(builder)s', **kwargs):
+ """
+ @type p4base: string
+ @param p4base: A view into a perforce depot, typically
+ "//depot/proj/"
+
+ @type defaultBranch: string
+ @param defaultBranch: Identify a branch to build by default. Perforce
+ is a view based branching system. So, the branch
+ is normally the name after the base. For example,
+ branch=1.0 is view=//depot/proj/1.0/...
+ branch=1.1 is view=//depot/proj/1.1/...
+
+ @type p4port: string
+ @param p4port: Specify the perforce server to connection in the format
+ <host>:<port>. Example "perforce.example.com:1666"
+
+ @type p4user: string
+ @param p4user: The perforce user to run the command as.
+
+ @type p4passwd: string
+ @param p4passwd: The password for the perforce user.
+
+ @type p4extra_views: list of tuples
+ @param p4extra_views: Extra views to be added to
+ the client that is being used.
+
+ @type p4client: string
+ @param p4client: The perforce client to use for this buildslave.
+ """
+
+ self.branch = defaultBranch
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(p4base=p4base,
+ defaultBranch=defaultBranch,
+ p4port=p4port,
+ p4user=p4user,
+ p4passwd=p4passwd,
+ p4extra_views=p4extra_views,
+ p4client=p4client,
+ )
+ self.args['p4port'] = p4port
+ self.args['p4user'] = p4user
+ self.args['p4passwd'] = p4passwd
+ self.args['p4base'] = p4base
+ self.args['p4extra_views'] = p4extra_views
+ self.p4client = p4client
+
+ def setBuild(self, build):
+ Source.setBuild(self, build)
+ self.args['p4client'] = self.p4client % {
+ 'slave': build.slavename,
+ 'builder': build.builder.name,
+ }
+
+ def computeSourceRevision(self, changes):
+ if not changes:
+ return None
+ lastChange = max([int(c.revision) for c in changes])
+ return lastChange
+
+ def startVC(self, branch, revision, patch):
+ slavever = self.slaveVersion("p4")
+ assert slavever, "slave is too old, does not know about p4"
+ args = dict(self.args)
+ args['branch'] = branch or self.branch
+ args['revision'] = revision
+ args['patch'] = patch
+ cmd = LoggedRemoteCommand("p4", args)
+ self.startCommand(cmd)
+
+class P4Sync(Source):
+ """This is a partial solution for using a P4 source repository. You are
+ required to manually set up each build slave with a useful P4
+ environment, which means setting various per-slave environment variables,
+ and creating a P4 client specification which maps the right files into
+ the slave's working directory. Once you have done that, this step merely
+ performs a 'p4 sync' to update that workspace with the newest files.
+
+ Each slave needs the following environment:
+
+ - PATH: the 'p4' binary must be on the slave's PATH
+ - P4USER: each slave needs a distinct user account
+ - P4CLIENT: each slave needs a distinct client specification
+
+ You should use 'p4 client' (?) to set up a client view spec which maps
+ the desired files into $SLAVEBASE/$BUILDERBASE/source .
+ """
+
+ name = "p4sync"
+
+ def __init__(self, p4port, p4user, p4passwd, p4client, **kwargs):
+ assert kwargs['mode'] == "copy", "P4Sync can only be used in mode=copy"
+ self.branch = None
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(p4port=p4port,
+ p4user=p4user,
+ p4passwd=p4passwd,
+ p4client=p4client,
+ )
+ self.args['p4port'] = p4port
+ self.args['p4user'] = p4user
+ self.args['p4passwd'] = p4passwd
+ self.args['p4client'] = p4client
+
+ def computeSourceRevision(self, changes):
+ if not changes:
+ return None
+ lastChange = max([int(c.revision) for c in changes])
+ return lastChange
+
+ def startVC(self, branch, revision, patch):
+ slavever = self.slaveVersion("p4sync")
+ assert slavever, "slave is too old, does not know about p4"
+ cmd = LoggedRemoteCommand("p4sync", self.args)
+ self.startCommand(cmd)
+
+class Monotone(Source):
+ """Check out a revision from a monotone server at 'server_addr',
+ branch 'branch'. 'revision' specifies which revision id to check
+ out.
+
+ This step will first create a local database, if necessary, and then pull
+ the contents of the server into the database. Then it will do the
+ checkout/update from this database."""
+
+ name = "monotone"
+
+ def __init__(self, server_addr, branch, db_path="monotone.db",
+ monotone="monotone",
+ **kwargs):
+ Source.__init__(self, **kwargs)
+ self.addFactoryArguments(server_addr=server_addr,
+ branch=branch,
+ db_path=db_path,
+ monotone=monotone,
+ )
+ self.args.update({"server_addr": server_addr,
+ "branch": branch,
+ "db_path": db_path,
+ "monotone": monotone})
+
+ def computeSourceRevision(self, changes):
+ if not changes:
+ return None
+ return changes[-1].revision
+
+ def startVC(self):
+ slavever = self.slaveVersion("monotone")
+ assert slavever, "slave is too old, does not know about monotone"
+ cmd = LoggedRemoteCommand("monotone", self.args)
+ self.startCommand(cmd)
+
diff --git a/buildbot/buildbot/steps/transfer.py b/buildbot/buildbot/steps/transfer.py
new file mode 100644
index 0000000..3e23f88
--- /dev/null
+++ b/buildbot/buildbot/steps/transfer.py
@@ -0,0 +1,465 @@
+# -*- test-case-name: buildbot.test.test_transfer -*-
+
+import os.path
+from twisted.internet import reactor
+from twisted.spread import pb
+from twisted.python import log
+from buildbot.process.buildstep import RemoteCommand, BuildStep
+from buildbot.process.buildstep import SUCCESS, FAILURE
+from buildbot.interfaces import BuildSlaveTooOldError
+
+
+class _FileWriter(pb.Referenceable):
+ """
+ Helper class that acts as a file-object with write access
+ """
+
+ def __init__(self, destfile, maxsize, mode):
+ # Create missing directories.
+ destfile = os.path.abspath(destfile)
+ dirname = os.path.dirname(destfile)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ self.destfile = destfile
+ self.fp = open(destfile, "wb")
+ if mode is not None:
+ os.chmod(destfile, mode)
+ self.remaining = maxsize
+
+ def remote_write(self, data):
+ """
+ Called from remote slave to write L{data} to L{fp} within boundaries
+ of L{maxsize}
+
+ @type data: C{string}
+ @param data: String of data to write
+ """
+ if self.remaining is not None:
+ if len(data) > self.remaining:
+ data = data[:self.remaining]
+ self.fp.write(data)
+ self.remaining = self.remaining - len(data)
+ else:
+ self.fp.write(data)
+
+ def remote_close(self):
+ """
+ Called by remote slave to state that no more data will be transfered
+ """
+ self.fp.close()
+ self.fp = None
+
+ def __del__(self):
+ # unclean shutdown, the file is probably truncated, so delete it
+ # altogether rather than deliver a corrupted file
+ fp = getattr(self, "fp", None)
+ if fp:
+ fp.close()
+ os.unlink(self.destfile)
+
+
+class _DirectoryWriter(pb.Referenceable):
+ """
+ Helper class that acts as a directory-object with write access
+ """
+
+ def __init__(self, destroot, maxsize, mode):
+ self.destroot = destroot
+ # Create missing directories.
+ self.destroot = os.path.abspath(self.destroot)
+ if not os.path.exists(self.destroot):
+ os.makedirs(self.destroot)
+
+ self.fp = None
+ self.mode = mode
+ self.maxsize = maxsize
+
+ def remote_createdir(self, dirname):
+ # This function is needed to transfer empty directories.
+ dirname = os.path.join(self.destroot, dirname)
+ dirname = os.path.abspath(dirname)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ def remote_open(self, destfile):
+ # Create missing directories.
+ destfile = os.path.join(self.destroot, destfile)
+ destfile = os.path.abspath(destfile)
+ dirname = os.path.dirname(destfile)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ self.fp = open(destfile, "wb")
+ if self.mode is not None:
+ os.chmod(destfile, self.mode)
+ self.remaining = self.maxsize
+
+ def remote_write(self, data):
+ """
+ Called from remote slave to write L{data} to L{fp} within boundaries
+ of L{maxsize}
+
+ @type data: C{string}
+ @param data: String of data to write
+ """
+ if self.remaining is not None:
+ if len(data) > self.remaining:
+ data = data[:self.remaining]
+ self.fp.write(data)
+ self.remaining = self.remaining - len(data)
+ else:
+ self.fp.write(data)
+
+ def remote_close(self):
+ """
+ Called by remote slave to state that no more data will be transfered
+ """
+ if self.fp:
+ self.fp.close()
+ self.fp = None
+
+ def __del__(self):
+ # unclean shutdown, the file is probably truncated, so delete it
+ # altogether rather than deliver a corrupted file
+ fp = getattr(self, "fp", None)
+ if fp:
+ fp.close()
+
+
+class StatusRemoteCommand(RemoteCommand):
+ def __init__(self, remote_command, args):
+ RemoteCommand.__init__(self, remote_command, args)
+
+ self.rc = None
+ self.stderr = ''
+
+ def remoteUpdate(self, update):
+ #log.msg('StatusRemoteCommand: update=%r' % update)
+ if 'rc' in update:
+ self.rc = update['rc']
+ if 'stderr' in update:
+ self.stderr = self.stderr + update['stderr'] + '\n'
+
+class _TransferBuildStep(BuildStep):
+ """
+ Base class for FileUpload and FileDownload to factor out common
+ functionality.
+ """
+ DEFAULT_WORKDIR = "build" # is this redundant?
+
+ def setDefaultWorkdir(self, workdir):
+ if self.workdir is None:
+ self.workdir = workdir
+
+ def _getWorkdir(self):
+ properties = self.build.getProperties()
+ if self.workdir is None:
+ workdir = self.DEFAULT_WORKDIR
+ else:
+ workdir = self.workdir
+ return properties.render(workdir)
+
+
+class FileUpload(_TransferBuildStep):
+ """
+ Build step to transfer a file from the slave to the master.
+
+ arguments:
+
+ - ['slavesrc'] filename of source file at slave, relative to workdir
+ - ['masterdest'] filename of destination file at master
+ - ['workdir'] string with slave working directory relative to builder
+ base dir, default 'build'
+ - ['maxsize'] maximum size of the file, default None (=unlimited)
+ - ['blocksize'] maximum size of each block being transfered
+ - ['mode'] file access mode for the resulting master-side file.
+ The default (=None) is to leave it up to the umask of
+ the buildmaster process.
+
+ """
+
+ name = 'upload'
+
+ def __init__(self, slavesrc, masterdest,
+ workdir=None, maxsize=None, blocksize=16*1024, mode=None,
+ **buildstep_kwargs):
+ BuildStep.__init__(self, **buildstep_kwargs)
+ self.addFactoryArguments(slavesrc=slavesrc,
+ masterdest=masterdest,
+ workdir=workdir,
+ maxsize=maxsize,
+ blocksize=blocksize,
+ mode=mode,
+ )
+
+ self.slavesrc = slavesrc
+ self.masterdest = masterdest
+ self.workdir = workdir
+ self.maxsize = maxsize
+ self.blocksize = blocksize
+ assert isinstance(mode, (int, type(None)))
+ self.mode = mode
+
+ def start(self):
+ version = self.slaveVersion("uploadFile")
+ properties = self.build.getProperties()
+
+ if not version:
+ m = "slave is too old, does not know about uploadFile"
+ raise BuildSlaveTooOldError(m)
+
+ source = properties.render(self.slavesrc)
+ masterdest = properties.render(self.masterdest)
+ # we rely upon the fact that the buildmaster runs chdir'ed into its
+ # basedir to make sure that relative paths in masterdest are expanded
+ # properly. TODO: maybe pass the master's basedir all the way down
+ # into the BuildStep so we can do this better.
+ masterdest = os.path.expanduser(masterdest)
+ log.msg("FileUpload started, from slave %r to master %r"
+ % (source, masterdest))
+
+ self.step_status.setText(['uploading', os.path.basename(source)])
+
+ # we use maxsize to limit the amount of data on both sides
+ fileWriter = _FileWriter(masterdest, self.maxsize, self.mode)
+
+ # default arguments
+ args = {
+ 'slavesrc': source,
+ 'workdir': self._getWorkdir(),
+ 'writer': fileWriter,
+ 'maxsize': self.maxsize,
+ 'blocksize': self.blocksize,
+ }
+
+ self.cmd = StatusRemoteCommand('uploadFile', args)
+ d = self.runCommand(self.cmd)
+ d.addCallback(self.finished).addErrback(self.failed)
+
+ def finished(self, result):
+ if self.cmd.stderr != '':
+ self.addCompleteLog('stderr', self.cmd.stderr)
+
+ if self.cmd.rc is None or self.cmd.rc == 0:
+ return BuildStep.finished(self, SUCCESS)
+ return BuildStep.finished(self, FAILURE)
+
+
+class DirectoryUpload(BuildStep):
+ """
+ Build step to transfer a directory from the slave to the master.
+
+ arguments:
+
+ - ['slavesrc'] name of source directory at slave, relative to workdir
+ - ['masterdest'] name of destination directory at master
+ - ['workdir'] string with slave working directory relative to builder
+ base dir, default 'build'
+ - ['maxsize'] maximum size of each file, default None (=unlimited)
+ - ['blocksize'] maximum size of each block being transfered
+ - ['mode'] file access mode for the resulting master-side file.
+ The default (=None) is to leave it up to the umask of
+ the buildmaster process.
+
+ """
+
+ name = 'upload'
+
+ def __init__(self, slavesrc, masterdest,
+ workdir="build", maxsize=None, blocksize=16*1024, mode=None,
+ **buildstep_kwargs):
+ BuildStep.__init__(self, **buildstep_kwargs)
+ self.addFactoryArguments(slavesrc=slavesrc,
+ masterdest=masterdest,
+ workdir=workdir,
+ maxsize=maxsize,
+ blocksize=blocksize,
+ mode=mode,
+ )
+
+ self.slavesrc = slavesrc
+ self.masterdest = masterdest
+ self.workdir = workdir
+ self.maxsize = maxsize
+ self.blocksize = blocksize
+ assert isinstance(mode, (int, type(None)))
+ self.mode = mode
+
+ def start(self):
+ version = self.slaveVersion("uploadDirectory")
+ properties = self.build.getProperties()
+
+ if not version:
+ m = "slave is too old, does not know about uploadDirectory"
+ raise BuildSlaveTooOldError(m)
+
+ source = properties.render(self.slavesrc)
+ masterdest = properties.render(self.masterdest)
+ # we rely upon the fact that the buildmaster runs chdir'ed into its
+ # basedir to make sure that relative paths in masterdest are expanded
+ # properly. TODO: maybe pass the master's basedir all the way down
+ # into the BuildStep so we can do this better.
+ masterdest = os.path.expanduser(masterdest)
+ log.msg("DirectoryUpload started, from slave %r to master %r"
+ % (source, masterdest))
+
+ self.step_status.setText(['uploading', os.path.basename(source)])
+
+ # we use maxsize to limit the amount of data on both sides
+ dirWriter = _DirectoryWriter(masterdest, self.maxsize, self.mode)
+
+ # default arguments
+ args = {
+ 'slavesrc': source,
+ 'workdir': self.workdir,
+ 'writer': dirWriter,
+ 'maxsize': self.maxsize,
+ 'blocksize': self.blocksize,
+ }
+
+ self.cmd = StatusRemoteCommand('uploadDirectory', args)
+ d = self.runCommand(self.cmd)
+ d.addCallback(self.finished).addErrback(self.failed)
+
+ def finished(self, result):
+ if self.cmd.stderr != '':
+ self.addCompleteLog('stderr', self.cmd.stderr)
+
+ if self.cmd.rc is None or self.cmd.rc == 0:
+ return BuildStep.finished(self, SUCCESS)
+ return BuildStep.finished(self, FAILURE)
+
+
+
+
+class _FileReader(pb.Referenceable):
+ """
+ Helper class that acts as a file-object with read access
+ """
+
+ def __init__(self, fp):
+ self.fp = fp
+
+ def remote_read(self, maxlength):
+ """
+ Called from remote slave to read at most L{maxlength} bytes of data
+
+ @type maxlength: C{integer}
+ @param maxlength: Maximum number of data bytes that can be returned
+
+ @return: Data read from L{fp}
+ @rtype: C{string} of bytes read from file
+ """
+ if self.fp is None:
+ return ''
+
+ data = self.fp.read(maxlength)
+ return data
+
+ def remote_close(self):
+ """
+ Called by remote slave to state that no more data will be transfered
+ """
+ if self.fp is not None:
+ self.fp.close()
+ self.fp = None
+
+
+class FileDownload(_TransferBuildStep):
+ """
+ Download the first 'maxsize' bytes of a file, from the buildmaster to the
+ buildslave. Set the mode of the file
+
+ Arguments::
+
+ ['mastersrc'] filename of source file at master
+ ['slavedest'] filename of destination file at slave
+ ['workdir'] string with slave working directory relative to builder
+ base dir, default 'build'
+ ['maxsize'] maximum size of the file, default None (=unlimited)
+ ['blocksize'] maximum size of each block being transfered
+ ['mode'] use this to set the access permissions of the resulting
+ buildslave-side file. This is traditionally an octal
+ integer, like 0644 to be world-readable (but not
+ world-writable), or 0600 to only be readable by
+ the buildslave account, or 0755 to be world-executable.
+ The default (=None) is to leave it up to the umask of
+ the buildslave process.
+
+ """
+ name = 'download'
+
+ def __init__(self, mastersrc, slavedest,
+ workdir=None, maxsize=None, blocksize=16*1024, mode=None,
+ **buildstep_kwargs):
+ BuildStep.__init__(self, **buildstep_kwargs)
+ self.addFactoryArguments(mastersrc=mastersrc,
+ slavedest=slavedest,
+ workdir=workdir,
+ maxsize=maxsize,
+ blocksize=blocksize,
+ mode=mode,
+ )
+
+ self.mastersrc = mastersrc
+ self.slavedest = slavedest
+ self.workdir = workdir
+ self.maxsize = maxsize
+ self.blocksize = blocksize
+ assert isinstance(mode, (int, type(None)))
+ self.mode = mode
+
+ def start(self):
+ properties = self.build.getProperties()
+
+ version = self.slaveVersion("downloadFile")
+ if not version:
+ m = "slave is too old, does not know about downloadFile"
+ raise BuildSlaveTooOldError(m)
+
+ # we are currently in the buildmaster's basedir, so any non-absolute
+ # paths will be interpreted relative to that
+ source = os.path.expanduser(properties.render(self.mastersrc))
+ slavedest = properties.render(self.slavedest)
+ log.msg("FileDownload started, from master %r to slave %r" %
+ (source, slavedest))
+
+ self.step_status.setText(['downloading', "to",
+ os.path.basename(slavedest)])
+
+ # setup structures for reading the file
+ try:
+ fp = open(source, 'rb')
+ except IOError:
+ # if file does not exist, bail out with an error
+ self.addCompleteLog('stderr',
+ 'File %r not available at master' % source)
+ # TODO: once BuildStep.start() gets rewritten to use
+ # maybeDeferred, just re-raise the exception here.
+ reactor.callLater(0, BuildStep.finished, self, FAILURE)
+ return
+ fileReader = _FileReader(fp)
+
+ # default arguments
+ args = {
+ 'slavedest': slavedest,
+ 'maxsize': self.maxsize,
+ 'reader': fileReader,
+ 'blocksize': self.blocksize,
+ 'workdir': self._getWorkdir(),
+ 'mode': self.mode,
+ }
+
+ self.cmd = StatusRemoteCommand('downloadFile', args)
+ d = self.runCommand(self.cmd)
+ d.addCallback(self.finished).addErrback(self.failed)
+
+ def finished(self, result):
+ if self.cmd.stderr != '':
+ self.addCompleteLog('stderr', self.cmd.stderr)
+
+ if self.cmd.rc is None or self.cmd.rc == 0:
+ return BuildStep.finished(self, SUCCESS)
+ return BuildStep.finished(self, FAILURE)
+
diff --git a/buildbot/buildbot/steps/trigger.py b/buildbot/buildbot/steps/trigger.py
new file mode 100644
index 0000000..7903e70
--- /dev/null
+++ b/buildbot/buildbot/steps/trigger.py
@@ -0,0 +1,122 @@
+from buildbot.process.buildstep import LoggingBuildStep, SUCCESS, FAILURE, EXCEPTION
+from buildbot.process.properties import Properties
+from buildbot.scheduler import Triggerable
+from twisted.internet import defer
+
+class Trigger(LoggingBuildStep):
+ """I trigger a scheduler.Triggerable, to use one or more Builders as if
+ they were a single buildstep (like a subroutine call).
+ """
+ name = "trigger"
+
+ flunkOnFailure = True
+
+ def __init__(self, schedulerNames=[], updateSourceStamp=True,
+ waitForFinish=False, set_properties={}, **kwargs):
+ """
+ Trigger the given schedulers when this step is executed.
+
+ @param schedulerNames: A list of scheduler names that should be
+ triggered. Schedulers can be specified using
+ WithProperties, if desired.
+
+ @param updateSourceStamp: If True (the default), I will try to give
+ the schedulers an absolute SourceStamp for
+ their builds, so that a HEAD build will use
+ the same revision even if more changes have
+ occurred since my build's update step was
+ run. If False, I will use the original
+ SourceStamp unmodified.
+
+ @param waitForFinish: If False (the default), this step will finish
+ as soon as I've started the triggered
+ schedulers. If True, I will wait until all of
+ the triggered schedulers have finished their
+ builds.
+
+ @param set_properties: A dictionary of properties to set for any
+ builds resulting from this trigger. To copy
+ existing properties, use WithProperties. These
+ properties will override properties set in the
+ Triggered scheduler's constructor.
+
+ """
+ assert schedulerNames, "You must specify a scheduler to trigger"
+ self.schedulerNames = schedulerNames
+ self.updateSourceStamp = updateSourceStamp
+ self.waitForFinish = waitForFinish
+ self.set_properties = set_properties
+ self.running = False
+ LoggingBuildStep.__init__(self, **kwargs)
+ self.addFactoryArguments(schedulerNames=schedulerNames,
+ updateSourceStamp=updateSourceStamp,
+ waitForFinish=waitForFinish,
+ set_properties=set_properties)
+
+ def interrupt(self, reason):
+ # TODO: this doesn't actually do anything.
+ if self.running:
+ self.step_status.setText(["interrupted"])
+
+ def start(self):
+ properties = self.build.getProperties()
+
+ # make a new properties object from a dict rendered by the old
+ # properties object
+ props_to_set = Properties()
+ props_to_set.update(properties.render(self.set_properties), "Trigger")
+
+ self.running = True
+ ss = self.build.getSourceStamp()
+ if self.updateSourceStamp:
+ got = properties.getProperty('got_revision')
+ if got:
+ ss = ss.getAbsoluteSourceStamp(got)
+
+ # (is there an easier way to find the BuildMaster?)
+ all_schedulers = self.build.builder.botmaster.parent.allSchedulers()
+ all_schedulers = dict([(sch.name, sch) for sch in all_schedulers])
+ unknown_schedulers = []
+ triggered_schedulers = []
+
+ # TODO: don't fire any schedulers if we discover an unknown one
+ dl = []
+ for scheduler in self.schedulerNames:
+ scheduler = properties.render(scheduler)
+ if all_schedulers.has_key(scheduler):
+ sch = all_schedulers[scheduler]
+ if isinstance(sch, Triggerable):
+ dl.append(sch.trigger(ss, set_props=props_to_set))
+ triggered_schedulers.append(scheduler)
+ else:
+ unknown_schedulers.append(scheduler)
+ else:
+ unknown_schedulers.append(scheduler)
+
+ if unknown_schedulers:
+ self.step_status.setText(['no scheduler:'] + unknown_schedulers)
+ rc = FAILURE
+ else:
+ rc = SUCCESS
+ self.step_status.setText(['triggered'] + triggered_schedulers)
+
+ if self.waitForFinish:
+ d = defer.DeferredList(dl, consumeErrors=1)
+ else:
+ d = defer.succeed([])
+
+ def cb(rclist):
+ rc = SUCCESS # (this rc is not the same variable as that above)
+ for was_cb, buildsetstatus in rclist:
+ # TODO: make this algo more configurable
+ if not was_cb:
+ rc = EXCEPTION
+ break
+ if buildsetstatus.getResults() == FAILURE:
+ rc = FAILURE
+ return self.finished(rc)
+
+ def eb(why):
+ return self.finished(FAILURE)
+
+ d.addCallbacks(cb, eb)
diff --git a/buildbot/buildbot/test/__init__.py b/buildbot/buildbot/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildbot/buildbot/test/__init__.py
diff --git a/buildbot/buildbot/test/emit.py b/buildbot/buildbot/test/emit.py
new file mode 100644
index 0000000..1e23e92
--- /dev/null
+++ b/buildbot/buildbot/test/emit.py
@@ -0,0 +1,11 @@
+
+import os, sys
+
+sys.stdout.write("this is stdout\n")
+sys.stderr.write("this is stderr\n")
+if os.environ.has_key("EMIT_TEST"):
+ sys.stdout.write("EMIT_TEST: %s\n" % os.environ["EMIT_TEST"])
+open("log1.out","wt").write("this is log1\n")
+
+rc = int(sys.argv[1])
+sys.exit(rc)
diff --git a/buildbot/buildbot/test/emitlogs.py b/buildbot/buildbot/test/emitlogs.py
new file mode 100644
index 0000000..1430235
--- /dev/null
+++ b/buildbot/buildbot/test/emitlogs.py
@@ -0,0 +1,42 @@
+import sys, time, os.path, StringIO
+
+mode = 0
+if len(sys.argv) > 1:
+ mode = int(sys.argv[1])
+
+if mode == 0:
+ log2 = open("log2.out", "wt")
+ log3 = open("log3.out", "wt")
+elif mode == 1:
+ # delete the logfiles first, and wait a moment to exercise a failure path
+ if os.path.exists("log2.out"):
+ os.unlink("log2.out")
+ if os.path.exists("log3.out"):
+ os.unlink("log3.out")
+ time.sleep(2)
+ log2 = open("log2.out", "wt")
+ log3 = open("log3.out", "wt")
+elif mode == 2:
+ # don't create the logfiles at all
+ log2 = StringIO.StringIO()
+ log3 = StringIO.StringIO()
+
+def write(i):
+ log2.write("this is log2 %d\n" % i)
+ log2.flush()
+ log3.write("this is log3 %d\n" % i)
+ log3.flush()
+ sys.stdout.write("this is stdout %d\n" % i)
+ sys.stdout.flush()
+
+write(0)
+time.sleep(1)
+write(1)
+sys.stdin.read(1)
+write(2)
+
+log2.close()
+log3.close()
+
+sys.exit(0)
+
diff --git a/buildbot/buildbot/test/mail/freshcvs.1 b/buildbot/buildbot/test/mail/freshcvs.1
new file mode 100644
index 0000000..cc8442e
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.1
@@ -0,0 +1,68 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 11151 invoked by uid 1000); 11 Jan 2003 17:10:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 1548 invoked by uid 13574); 11 Jan 2003 17:06:39 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 11 Jan 2003 17:06:39 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18XP0U-0002Mq-00; Sat, 11 Jan 2003 11:01:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18XP02-0002MN-00
+ for <twisted-commits@twistedmatrix.com>; Sat, 11 Jan 2003 11:00:46 -0600
+To: twisted-commits@twistedmatrix.com
+From: moshez CVS <moshez@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: moshez CVS <moshez@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18XP02-0002MN-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Instance massenger, apparently
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Sat, 11 Jan 2003 11:00:46 -0600
+Status:
+
+Modified files:
+Twisted/debian/python-twisted.menu.in 1.3 1.4
+
+Log message:
+Instance massenger, apparently
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/debian/python-twisted.menu.in.diff?r1=text&tr1=1.3&r2=text&tr2=1.4&cvsroot=Twisted
+
+Index: Twisted/debian/python-twisted.menu.in
+diff -u Twisted/debian/python-twisted.menu.in:1.3 Twisted/debian/python-twisted.menu.in:1.4
+--- Twisted/debian/python-twisted.menu.in:1.3 Sat Dec 28 10:02:12 2002
++++ Twisted/debian/python-twisted.menu.in Sat Jan 11 09:00:44 2003
+@@ -1,7 +1,7 @@
+ ?package(python@VERSION@-twisted):\
+ needs=x11\
+ section="Apps/Net"\
+-title="Twisted Instant Messenger (@VERSION@)"\
++title="Twisted Instance Messenger (@VERSION@)"\
+ command="/usr/bin/t-im@VERSION@"
+
+ ?package(python@VERSION@-twisted):\
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.2 b/buildbot/buildbot/test/mail/freshcvs.2
new file mode 100644
index 0000000..ada1311
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.2
@@ -0,0 +1,101 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18YYq7-0005eQ-00
+ for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
+To: twisted-commits@twistedmatrix.com
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] submit formmethod now subclass of Choice
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Tue, 14 Jan 2003 15:43:19 -0600
+Status:
+
+Modified files:
+Twisted/twisted/web/woven/form.py 1.20 1.21
+Twisted/twisted/python/formmethod.py 1.12 1.13
+
+Log message:
+submit formmethod now subclass of Choice
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/twisted/web/woven/form.py.diff?r1=text&tr1=1.20&r2=text&tr2=1.21&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/twisted/python/formmethod.py.diff?r1=text&tr1=1.12&r2=text&tr2=1.13&cvsroot=Twisted
+
+Index: Twisted/twisted/web/woven/form.py
+diff -u Twisted/twisted/web/woven/form.py:1.20 Twisted/twisted/web/woven/form.py:1.21
+--- Twisted/twisted/web/woven/form.py:1.20 Tue Jan 14 12:07:29 2003
++++ Twisted/twisted/web/woven/form.py Tue Jan 14 13:43:16 2003
+@@ -140,8 +140,8 @@
+
+ def input_submit(self, request, content, arg):
+ div = content.div()
+- for value in arg.buttons:
+- div.input(type="submit", name=arg.name, value=value)
++ for tag, value, desc in arg.choices:
++ div.input(type="submit", name=arg.name, value=tag)
+ div.text(" ")
+ if arg.reset:
+ div.input(type="reset")
+
+Index: Twisted/twisted/python/formmethod.py
+diff -u Twisted/twisted/python/formmethod.py:1.12 Twisted/twisted/python/formmethod.py:1.13
+--- Twisted/twisted/python/formmethod.py:1.12 Tue Jan 14 12:07:30 2003
++++ Twisted/twisted/python/formmethod.py Tue Jan 14 13:43:17 2003
+@@ -180,19 +180,13 @@
+ return 1
+
+
+-class Submit(Argument):
++class Submit(Choice):
+ """Submit button or a reasonable facsimile thereof."""
+
+- def __init__(self, name, buttons=["Submit"], reset=0, shortDesc=None, longDesc=None):
+- Argument.__init__(self, name, shortDesc=shortDesc, longDesc=longDesc)
+- self.buttons = buttons
++ def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
++ reset=0, shortDesc=None, longDesc=None):
++ Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc)
+ self.reset = reset
+-
+- def coerce(self, val):
+- if val in self.buttons:
+- return val
+- else:
+- raise InputError, "no such action"
+
+
+ class PresentationHint:
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.3 b/buildbot/buildbot/test/mail/freshcvs.3
new file mode 100644
index 0000000..f9ff199
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.3
@@ -0,0 +1,97 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18YYq7-0005eQ-00
+ for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
+To: twisted-commits@twistedmatrix.com
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] submit formmethod now subclass of Choice
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Tue, 14 Jan 2003 15:43:19 -0600
+Status:
+
+Modified files:
+Twisted/twisted/web/woven/form.py 1.20 1.21
+Twisted/twisted/python/formmethod.py 1.12 1.13
+
+Log message:
+submit formmethod now subclass of Choice
+
+
+Index: Twisted/twisted/web/woven/form.py
+diff -u Twisted/twisted/web/woven/form.py:1.20 Twisted/twisted/web/woven/form.py:1.21
+--- Twisted/twisted/web/woven/form.py:1.20 Tue Jan 14 12:07:29 2003
++++ Twisted/twisted/web/woven/form.py Tue Jan 14 13:43:16 2003
+@@ -140,8 +140,8 @@
+
+ def input_submit(self, request, content, arg):
+ div = content.div()
+- for value in arg.buttons:
+- div.input(type="submit", name=arg.name, value=value)
++ for tag, value, desc in arg.choices:
++ div.input(type="submit", name=arg.name, value=tag)
+ div.text(" ")
+ if arg.reset:
+ div.input(type="reset")
+
+Index: Twisted/twisted/python/formmethod.py
+diff -u Twisted/twisted/python/formmethod.py:1.12 Twisted/twisted/python/formmethod.py:1.13
+--- Twisted/twisted/python/formmethod.py:1.12 Tue Jan 14 12:07:30 2003
++++ Twisted/twisted/python/formmethod.py Tue Jan 14 13:43:17 2003
+@@ -180,19 +180,13 @@
+ return 1
+
+
+-class Submit(Argument):
++class Submit(Choice):
+ """Submit button or a reasonable facsimile thereof."""
+
+- def __init__(self, name, buttons=["Submit"], reset=0, shortDesc=None, longDesc=None):
+- Argument.__init__(self, name, shortDesc=shortDesc, longDesc=longDesc)
+- self.buttons = buttons
++ def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
++ reset=0, shortDesc=None, longDesc=None):
++ Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc)
+ self.reset = reset
+-
+- def coerce(self, val):
+- if val in self.buttons:
+- return val
+- else:
+- raise InputError, "no such action"
+
+
+ class PresentationHint:
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.4 b/buildbot/buildbot/test/mail/freshcvs.4
new file mode 100644
index 0000000..9e674dc
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.4
@@ -0,0 +1,45 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18YYq7-0005eQ-00
+ for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
+To: twisted-commits@twistedmatrix.com
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] submit formmethod now subclass of Choice
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Tue, 14 Jan 2003 15:43:19 -0600
+Status:
+
+Modified files:
+Twisted/twisted/web/woven/form.py 1.20 1.21
+Twisted/twisted/python/formmethod.py 1.12 1.13
+
+Log message:
+submit formmethod now subclass of Choice
+
diff --git a/buildbot/buildbot/test/mail/freshcvs.5 b/buildbot/buildbot/test/mail/freshcvs.5
new file mode 100644
index 0000000..f20a958
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.5
@@ -0,0 +1,54 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 5865 invoked by uid 1000); 17 Jan 2003 07:00:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 40460 invoked by uid 13574); 17 Jan 2003 06:51:55 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 17 Jan 2003 06:51:55 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18ZQGk-0003WL-00; Fri, 17 Jan 2003 00:46:22 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18ZQFy-0003VP-00
+ for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 00:45:34 -0600
+To: twisted-commits@twistedmatrix.com
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18ZQFy-0003VP-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Fri, 17 Jan 2003 00:45:34 -0600
+Status:
+
+Modified files:
+Twisted/doc/examples/cocoaDemo 0 0
+
+Log message:
+Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo.diff?r1=text&tr1=NONE&r2=text&tr2=NONE&cvsroot=Twisted
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.6 b/buildbot/buildbot/test/mail/freshcvs.6
new file mode 100644
index 0000000..20719f4
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.6
@@ -0,0 +1,70 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 7252 invoked by uid 1000); 17 Jan 2003 07:10:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 43115 invoked by uid 13574); 17 Jan 2003 07:07:57 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 17 Jan 2003 07:07:57 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18ZQW6-0003dA-00; Fri, 17 Jan 2003 01:02:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18ZQV7-0003cm-00
+ for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 01:01:13 -0600
+To: twisted-commits@twistedmatrix.com
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18ZQV7-0003cm-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Cocoa (OS X) clone of the QT demo, using polling reactor
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Fri, 17 Jan 2003 01:01:13 -0600
+Status:
+
+Modified files:
+Twisted/doc/examples/cocoaDemo/MyAppDelegate.py None 1.1
+Twisted/doc/examples/cocoaDemo/__main__.py None 1.1
+Twisted/doc/examples/cocoaDemo/bin-python-main.m None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib None 1.1
+Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj None 1.1
+
+Log message:
+Cocoa (OS X) clone of the QT demo, using polling reactor
+
+Requires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project. The reactor is iterated periodically by a repeating NSTimer.
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/MyAppDelegate.py.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/__main__.py.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/bin-python-main.m.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.7 b/buildbot/buildbot/test/mail/freshcvs.7
new file mode 100644
index 0000000..515be1d
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.7
@@ -0,0 +1,68 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 8665 invoked by uid 1000); 17 Jan 2003 08:00:03 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 50728 invoked by uid 13574); 17 Jan 2003 07:51:14 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 17 Jan 2003 07:51:14 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18ZRBm-0003pN-00; Fri, 17 Jan 2003 01:45:18 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18ZRBQ-0003ou-00
+ for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 01:44:56 -0600
+To: twisted-commits@twistedmatrix.com
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18ZRBQ-0003ou-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Directories break debian build script, waiting for reasonable fix
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Fri, 17 Jan 2003 01:44:56 -0600
+Status:
+
+Modified files:
+Twisted/doc/examples/cocoaDemo/MyAppDelegate.py 1.1 None
+Twisted/doc/examples/cocoaDemo/__main__.py 1.1 None
+Twisted/doc/examples/cocoaDemo/bin-python-main.m 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib 1.1 None
+Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj 1.1 None
+
+Log message:
+Directories break debian build script, waiting for reasonable fix
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/MyAppDelegate.py.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/__main__.py.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/bin-python-main.m.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.8 b/buildbot/buildbot/test/mail/freshcvs.8
new file mode 100644
index 0000000..9b1e4fd
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.8
@@ -0,0 +1,61 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 10804 invoked by uid 1000); 19 Jan 2003 14:10:03 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 6704 invoked by uid 13574); 19 Jan 2003 14:00:20 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 19 Jan 2003 14:00:20 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18aFtx-0002WS-00; Sun, 19 Jan 2003 07:54:17 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18aFtH-0002W3-00
+ for <twisted-commits@twistedmatrix.com>; Sun, 19 Jan 2003 07:53:35 -0600
+To: twisted-commits@twistedmatrix.com
+From: acapnotic CVS <acapnotic@twistedmatrix.com>
+X-Mailer: CVSToys
+Message-Id: <E18aFtH-0002W3-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] it doesn't work with invalid syntax
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Sun, 19 Jan 2003 07:53:35 -0600
+Status:
+
+Modified files:
+CVSROOT/freshCfg 1.16 1.17
+
+Log message:
+it doesn't work with invalid syntax
+
+
+Index: CVSROOT/freshCfg
+diff -u CVSROOT/freshCfg:1.16 CVSROOT/freshCfg:1.17
+--- CVSROOT/freshCfg:1.16 Sun Jan 19 05:52:34 2003
++++ CVSROOT/freshCfg Sun Jan 19 05:53:34 2003
+@@ -27,7 +27,7 @@
+ ('/cvs', '^Reality', None, MailNotification(['reality-commits'])),
+ ('/cvs', '^Twistby', None, MailNotification(['acapnotic'])),
+ ('/cvs', '^CVSToys', None,
+- MailNotification(['CVSToys-list']
++ MailNotification(['CVSToys-list'],
+ "http://twistedmatrix.com/users/jh.twistd/"
+ "viewcvs/cgi/viewcvs.cgi/",
+ replyTo="cvstoys-list@twistedmatrix.com"),)
+
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.9 b/buildbot/buildbot/test/mail/freshcvs.9
new file mode 100644
index 0000000..fd4f785
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.9
@@ -0,0 +1,18 @@
+From twisted-python@twistedmatrix.com Fri Dec 26 07:25:13 2003
+From: twisted-python@twistedmatrix.com (exarkun CVS)
+Date: Fri, 26 Dec 2003 00:25:13 -0700
+Subject: [Twisted-commits] Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository
+Message-ID: <E1AZmLR-0000Tl-00@wolfwood>
+
+Modified files:
+Twisted/sandbox/exarkun/persist-plugin
+
+Log message:
+Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository
+
+
+ViewCVS links:
+http://cvs.twistedmatrix.com/cvs/sandbox/exarkun/persist-plugin?cvsroot=Twisted
+
+
+
diff --git a/buildbot/buildbot/test/mail/svn-commit.1 b/buildbot/buildbot/test/mail/svn-commit.1
new file mode 100644
index 0000000..591dfee
--- /dev/null
+++ b/buildbot/buildbot/test/mail/svn-commit.1
@@ -0,0 +1,67 @@
+X-Original-To: jm@jmason.org
+Delivered-To: jm@dogma.boxhost.net
+Received: from localhost [127.0.0.1]
+ by localhost with IMAP (fetchmail-6.2.5)
+ for jm@localhost (single-drop); Wed, 12 Apr 2006 01:52:04 +0100 (IST)
+Received: from mail.apache.org (hermes.apache.org [209.237.227.199])
+ by dogma.boxhost.net (Postfix) with SMTP id 34F07310051
+ for <jm@jmason.org>; Wed, 12 Apr 2006 01:44:17 +0100 (IST)
+Received: (qmail 71414 invoked by uid 500); 12 Apr 2006 00:44:16 -0000
+Mailing-List: contact commits-help@spamassassin.apache.org; run by ezmlm
+Precedence: bulk
+list-help: <mailto:commits-help@spamassassin.apache.org>
+list-unsubscribe: <mailto:commits-unsubscribe@spamassassin.apache.org>
+List-Post: <mailto:commits@spamassassin.apache.org>
+Reply-To: "SpamAssassin Dev" <dev@spamassassin.apache.org>
+List-Id: <commits.spamassassin.apache.org>
+Delivered-To: mailing list commits@spamassassin.apache.org
+Received: (qmail 71403 invoked by uid 99); 12 Apr 2006 00:44:16 -0000
+Received: from asf.osuosl.org (HELO asf.osuosl.org) (140.211.166.49)
+ by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 11 Apr 2006 17:44:16 -0700
+X-ASF-Spam-Status: No, hits=-9.4 required=10.0
+ tests=ALL_TRUSTED,NO_REAL_NAME
+Received: from [209.237.227.194] (HELO minotaur.apache.org) (209.237.227.194)
+ by apache.org (qpsmtpd/0.29) with SMTP; Tue, 11 Apr 2006 17:44:15 -0700
+Received: (qmail 51950 invoked by uid 65534); 12 Apr 2006 00:43:55 -0000
+Message-ID: <20060412004355.51949.qmail@minotaur.apache.org>
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: svn commit: r393348 - /spamassassin/trunk/sa-update.raw
+Date: Wed, 12 Apr 2006 00:43:54 -0000
+To: commits@spamassassin.apache.org
+From: felicity@apache.org
+X-Mailer: svnmailer-1.0.7
+X-Virus-Checked: Checked by ClamAV on apache.org
+Status: O
+X-UID: 62932
+X-Keywords:
+
+Author: felicity
+Date: Tue Apr 11 17:43:54 2006
+New Revision: 393348
+
+URL: http://svn.apache.org/viewcvs?rev=393348&view=rev
+Log:
+bug 4864: remove extraneous front-slash from gpghomedir path
+
+Modified:
+ spamassassin/trunk/sa-update.raw
+
+Modified: spamassassin/trunk/sa-update.raw
+URL: http://svn.apache.org/viewcvs/spamassassin/trunk/sa-update.raw?rev=393348&r1=393347&r2=393348&view=diff
+==============================================================================
+--- spamassassin/trunk/sa-update.raw (original)
++++ spamassassin/trunk/sa-update.raw Tue Apr 11 17:43:54 2006
+@@ -120,7 +120,7 @@
+ @{$opt{'channel'}} = ();
+ my $GPG_ENABLED = 1;
+
+-$opt{'gpghomedir'} = File::Spec->catfile($LOCAL_RULES_DIR, '/sa-update-keys');
++$opt{'gpghomedir'} = File::Spec->catfile($LOCAL_RULES_DIR, 'sa-update-keys');
+
+ Getopt::Long::Configure(
+ qw(bundling no_getopt_compat no_auto_abbrev no_ignore_case));
+
+
+
diff --git a/buildbot/buildbot/test/mail/svn-commit.2 b/buildbot/buildbot/test/mail/svn-commit.2
new file mode 100644
index 0000000..eeef001
--- /dev/null
+++ b/buildbot/buildbot/test/mail/svn-commit.2
@@ -0,0 +1,1218 @@
+X-Original-To: jm@jmason.org
+Delivered-To: jm@dogma.boxhost.net
+Received: from localhost [127.0.0.1]
+ by localhost with IMAP (fetchmail-6.2.5)
+ for jm@localhost (single-drop); Thu, 09 Mar 2006 21:44:57 +0000 (GMT)
+Received: from minotaur.apache.org (minotaur.apache.org [209.237.227.194])
+ by dogma.boxhost.net (Postfix) with SMTP id 0D3463105BF
+ for <jm@jmason.org>; Thu, 9 Mar 2006 19:52:50 +0000 (GMT)
+Received: (qmail 30661 invoked by uid 1833); 9 Mar 2006 19:52:44 -0000
+Delivered-To: jm@locus.apache.org
+Received: (qmail 30451 invoked from network); 9 Mar 2006 19:52:38 -0000
+Received: from hermes.apache.org (HELO mail.apache.org) (209.237.227.199)
+ by minotaur.apache.org with SMTP; 9 Mar 2006 19:52:38 -0000
+Received: (qmail 97860 invoked by uid 500); 9 Mar 2006 19:52:29 -0000
+Delivered-To: apmail-jm@apache.org
+Received: (qmail 97837 invoked by uid 500); 9 Mar 2006 19:52:28 -0000
+Mailing-List: contact commits-help@spamassassin.apache.org; run by ezmlm
+Precedence: bulk
+list-help: <mailto:commits-help@spamassassin.apache.org>
+list-unsubscribe: <mailto:commits-unsubscribe@spamassassin.apache.org>
+List-Post: <mailto:commits@spamassassin.apache.org>
+Reply-To: "SpamAssassin Dev" <dev@spamassassin.apache.org>
+List-Id: <commits.spamassassin.apache.org>
+Delivered-To: mailing list commits@spamassassin.apache.org
+Received: (qmail 97826 invoked by uid 99); 9 Mar 2006 19:52:28 -0000
+Received: from asf.osuosl.org (HELO asf.osuosl.org) (140.211.166.49)
+ by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 Mar 2006 11:52:28 -0800
+X-ASF-Spam-Status: No, hits=-9.4 required=10.0
+ tests=ALL_TRUSTED,NO_REAL_NAME
+Received: from [209.237.227.194] (HELO minotaur.apache.org) (209.237.227.194)
+ by apache.org (qpsmtpd/0.29) with SMTP; Thu, 09 Mar 2006 11:52:26 -0800
+Received: (qmail 29644 invoked by uid 65534); 9 Mar 2006 19:52:06 -0000
+Message-ID: <20060309195206.29643.qmail@minotaur.apache.org>
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: svn commit: r384590 - in /spamassassin/branches/3.1: ./
+ lib/Mail/SpamAssassin/ lib/Mail/SpamAssassin/Plugin/ spamd/
+Date: Thu, 09 Mar 2006 19:52:02 -0000
+To: commits@spamassassin.apache.org
+From: sidney@apache.org
+X-Mailer: svnmailer-1.0.7
+X-Virus-Checked: Checked by ClamAV on apache.org
+Status: O
+X-UID: 60795
+X-Keywords:
+
+Author: sidney
+Date: Thu Mar 9 11:51:59 2006
+New Revision: 384590
+
+URL: http://svn.apache.org/viewcvs?rev=384590&view=rev
+Log:
+Bug 4696: consolidated fixes for timeout bugs
+
+Added:
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm
+Modified:
+ spamassassin/branches/3.1/MANIFEST
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm
+ spamassassin/branches/3.1/spamd/spamd.raw
+
+Modified: spamassassin/branches/3.1/MANIFEST
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/MANIFEST?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/MANIFEST (original)
++++ spamassassin/branches/3.1/MANIFEST Thu Mar 9 11:51:59 2006
+@@ -89,6 +89,7 @@
+ lib/Mail/SpamAssassin/SQLBasedAddrList.pm
+ lib/Mail/SpamAssassin/SpamdForkScaling.pm
+ lib/Mail/SpamAssassin/SubProcBackChannel.pm
++lib/Mail/SpamAssassin/Timeout.pm
+ lib/Mail/SpamAssassin/Util.pm
+ lib/Mail/SpamAssassin/Util/DependencyInfo.pm
+ lib/Mail/SpamAssassin/Util/Progress.pm
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm Thu Mar 9 11:51:59 2006
+@@ -142,7 +142,7 @@
+
+ if ($level eq "error") {
+ # don't log alarm timeouts or broken pipes of various plugins' network checks
+- return if ($message[0] =~ /__(?:alarm|brokenpipe)__ignore__/);
++ return if ($message[0] =~ /__ignore__/);
+
+ # dos: we can safely ignore any die's that we eval'd in our own modules so
+ # don't log them -- this is caller 0, the use'ing package is 1, the eval is 2
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm Thu Mar 9 11:51:59 2006
+@@ -44,6 +44,7 @@
+
+ use Mail::SpamAssassin::Plugin;
+ use Mail::SpamAssassin::Logger;
++use Mail::SpamAssassin::Timeout;
+ use IO::Socket;
+ use strict;
+ use warnings;
+@@ -375,15 +376,10 @@
+
+ $permsgstatus->enter_helper_run_mode();
+
+- my $oldalarm = 0;
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
+
+- eval {
+- # safe to use $SIG{ALRM} here instead of Util::trap_sigalrm_fully(),
+- # since there are no killer regexp hang dangers here
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+-
+- $oldalarm = alarm $timeout;
++ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+
+ my $sock = IO::Socket::UNIX->new(Type => SOCK_STREAM,
+ Peer => $sockpath) || dbg("dcc: failed to open socket") && die;
+@@ -419,28 +415,20 @@
+ }
+
+ dbg("dcc: dccifd got response: $response");
++
++ });
+
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
++ $permsgstatus->leave_helper_run_mode();
+
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
++ if ($timer->timed_out()) {
++ dbg("dcc: dccifd check timed out after $timeout secs.");
++ return 0;
+ }
+- $permsgstatus->leave_helper_run_mode();
+
+ if ($err) {
+ chomp $err;
+- $response = undef;
+- if ($err eq "__alarm__ignore__") {
+- dbg("dcc: dccifd check timed out after $timeout secs.");
+- return 0;
+- } else {
+- warn("dcc: dccifd -> check skipped: $! $err");
+- return 0;
+- }
++ warn("dcc: dccifd -> check skipped: $! $err");
++ return 0;
+ }
+
+ if (!defined $response || $response !~ /^X-DCC/) {
+@@ -494,17 +482,12 @@
+
+ # use a temp file here -- open2() is unreliable, buffering-wise, under spamd
+ my $tmpf = $permsgstatus->create_fulltext_tmpfile($fulltext);
+- my $oldalarm = 0;
+-
+ my $pid;
+- eval {
+- # safe to use $SIG{ALRM} here instead of Util::trap_sigalrm_fully(),
+- # since there are no killer regexp hang dangers here
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+
+- $oldalarm = alarm $timeout;
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
++
++ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+
+ # note: not really tainted, this came from system configuration file
+ my $path = Mail::SpamAssassin::Util::untaint_file_path($self->{main}->{conf}->{dcc_path});
+@@ -542,17 +525,7 @@
+
+ dbg("dcc: got response: $response");
+
+- # note: this must be called BEFORE leave_helper_run_mode()
+- # $self->cleanup_kids($pid);
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
+-
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
++ });
+
+ if (defined(fileno(*DCC))) { # still open
+ if ($pid) {
+@@ -564,11 +537,14 @@
+ }
+ $permsgstatus->leave_helper_run_mode();
+
++ if ($timer->timed_out()) {
++ dbg("dcc: check timed out after $timeout seconds");
++ return 0;
++ }
++
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("dcc: check timed out after $timeout seconds");
+- } elsif ($err eq "__brokenpipe__ignore__") {
++ if ($err eq "__brokenpipe__ignore__") {
+ dbg("dcc: check failed: broken pipe");
+ } elsif ($err eq "no response") {
+ dbg("dcc: check failed: no response");
+@@ -645,47 +621,37 @@
+ my ($self, $options, $tmpf) = @_;
+ my $timeout = $options->{report}->{conf}->{dcc_timeout};
+
+- $options->{report}->enter_helper_run_mode();
++ # note: not really tainted, this came from system configuration file
++ my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{dcc_path});
+
+- my $oldalarm = 0;
++ my $opts = $options->{report}->{conf}->{dcc_options} || '';
+
+- eval {
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
+
+- $oldalarm = alarm $timeout;
+-
+- # note: not really tainted, this came from system configuration file
+- my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{dcc_path});
++ $options->{report}->enter_helper_run_mode();
++ my $err = $timer->run_and_catch(sub {
+
+- my $opts = $options->{report}->{conf}->{dcc_options} || '';
++ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+
+ my $pid = Mail::SpamAssassin::Util::helper_app_pipe_open(*DCC,
+- $tmpf, 1, $path, "-t", "many", split(' ', $opts));
++ $tmpf, 1, $path, "-t", "many", split(' ', $opts));
+ $pid or die "$!\n";
+
+ my @ignored = <DCC>;
+ $options->{report}->close_pipe_fh(\*DCC);
+-
+ waitpid ($pid, 0);
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
++
++ });
++ $options->{report}->leave_helper_run_mode();
+
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
++ if ($timer->timed_out()) {
++ dbg("reporter: DCC report timed out after $timeout seconds");
++ return 0;
+ }
+
+- $options->{report}->leave_helper_run_mode();
+-
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("reporter: DCC report timed out after $timeout seconds");
+- } elsif ($err eq "__brokenpipe__ignore__") {
++ if ($err eq "__brokenpipe__ignore__") {
+ dbg("reporter: DCC report failed: broken pipe");
+ } else {
+ warn("reporter: DCC report failed: $err\n");
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm Thu Mar 9 11:51:59 2006
+@@ -34,6 +34,8 @@
+
+ use Mail::SpamAssassin::Plugin;
+ use Mail::SpamAssassin::Logger;
++use Mail::SpamAssassin::Timeout;
++
+ use strict;
+ use warnings;
+ use bytes;
+@@ -165,30 +167,22 @@
+ }
+
+ my $timeout = $scan->{conf}->{domainkeys_timeout};
+- my $oldalarm = 0;
+
+- eval {
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+- $oldalarm = alarm($timeout);
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
++
+ $self->_dk_lookup_trapped($scan, $message, $domain);
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
+-
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
++
++ });
++
++ if ($timer->timed_out()) {
++ dbg("dk: lookup timed out after $timeout seconds");
++ return 0;
+ }
+
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("dk: lookup timed out after $timeout seconds");
+- } else {
+- warn("dk: lookup failed: $err\n");
+- }
++ warn("dk: lookup failed: $err\n");
+ return 0;
+ }
+
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm Thu Mar 9 11:51:59 2006
+@@ -35,6 +35,7 @@
+
+ use Mail::SpamAssassin::Plugin;
+ use Mail::SpamAssassin::Logger;
++use Mail::SpamAssassin::Timeout;
+ use strict;
+ use warnings;
+ use bytes;
+@@ -229,27 +230,22 @@
+
+ $pyzor_count = 0;
+ $pyzor_whitelisted = 0;
+-
+- $permsgstatus->enter_helper_run_mode();
++ my $pid;
+
+ # use a temp file here -- open2() is unreliable, buffering-wise, under spamd
+ my $tmpf = $permsgstatus->create_fulltext_tmpfile($fulltext);
+- my $oldalarm = 0;
+
+- my $pid;
+- eval {
+- # safe to use $SIG{ALRM} here instead of Util::trap_sigalrm_fully(),
+- # since there are no killer regexp hang dangers here
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
++ # note: not really tainted, this came from system configuration file
++ my $path = Mail::SpamAssassin::Util::untaint_file_path($self->{main}->{conf}->{pyzor_path});
++
++ my $opts = $self->{main}->{conf}->{pyzor_options} || '';
+
+- $oldalarm = alarm $timeout;
++ $permsgstatus->enter_helper_run_mode();
+
+- # note: not really tainted, this came from system configuration file
+- my $path = Mail::SpamAssassin::Util::untaint_file_path($self->{main}->{conf}->{pyzor_path});
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
+
+- my $opts = $self->{main}->{conf}->{pyzor_options} || '';
++ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+
+ dbg("pyzor: opening pipe: " . join(' ', $path, $opts, "check", "< $tmpf"));
+
+@@ -273,21 +269,7 @@
+ die("internal error\n");
+ }
+
+- # note: this must be called BEFORE leave_helper_run_mode()
+- # $self->cleanup_kids($pid);
+-
+- # attempt to call this inside the eval, as leaving this scope is
+- # a slow operation and timing *that* out is pointless
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
+-
+- # clear the alarm before doing lots of time-consuming hard work
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
++ });
+
+ if (defined(fileno(*PYZOR))) { # still open
+ if ($pid) {
+@@ -299,11 +281,14 @@
+ }
+ $permsgstatus->leave_helper_run_mode();
+
++ if ($timer->timed_out()) {
++ dbg("pyzor: check timed out after $timeout seconds");
++ return 0;
++ }
++
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("pyzor: check timed out after $timeout seconds");
+- } elsif ($err eq "__brokenpipe__ignore__") {
++ if ($err eq "__brokenpipe__ignore__") {
+ dbg("pyzor: check failed: broken pipe");
+ } elsif ($err eq "no response") {
+ dbg("pyzor: check failed: no response");
+@@ -364,23 +349,19 @@
+
+ sub pyzor_report {
+ my ($self, $options, $tmpf) = @_;
++
++ # note: not really tainted, this came from system configuration file
++ my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{pyzor_path});
++
++ my $opts = $options->{report}->{conf}->{pyzor_options} || '';
+ my $timeout = $self->{main}->{conf}->{pyzor_timeout};
+
+ $options->{report}->enter_helper_run_mode();
+
+- my $oldalarm = 0;
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
+
+- eval {
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+-
+- $oldalarm = alarm $timeout;
+-
+- # note: not really tainted, this came from system configuration file
+- my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{pyzor_path});
+-
+- my $opts = $options->{report}->{conf}->{pyzor_options} || '';
+
+ dbg("pyzor: opening pipe: " . join(' ', $path, $opts, "report", "< $tmpf"));
+
+@@ -391,23 +372,19 @@
+ my @ignored = <PYZOR>;
+ $options->{report}->close_pipe_fh(\*PYZOR);
+
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+ waitpid ($pid, 0);
+- };
++ });
+
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+ $options->{report}->leave_helper_run_mode();
+
++ if ($timer->timed_out()) {
++ dbg("reporter: pyzor report timed out after $timeout seconds");
++ return 0;
++ }
++
+ if ($err) {
+ chomp $err;
+- if ($err eq '__alarm__ignore__') {
+- dbg("reporter: pyzor report timed out after $timeout seconds");
+- } elsif ($err eq '__brokenpipe__ignore__') {
++ if ($err eq '__brokenpipe__ignore__') {
+ dbg("reporter: pyzor report failed: broken pipe");
+ } else {
+ warn("reporter: pyzor report failed: $err\n");
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm Thu Mar 9 11:51:59 2006
+@@ -143,14 +143,11 @@
+ }
+
+ Mail::SpamAssassin::PerMsgStatus::enter_helper_run_mode($self);
+- my $oldalarm = 0;
+
+- eval {
+- local ($^W) = 0; # argh, warnings in Razor
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
+
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+- $oldalarm = alarm $timeout;
++ local ($^W) = 0; # argh, warnings in Razor
+
+ # everything's in the module!
+ my $rc = Razor2::Client::Agent->new("razor-$type");
+@@ -184,7 +181,7 @@
+ # let's reset the alarm since get_server_info() calls
+ # nextserver() which calls discover() which very likely will
+ # reset the alarm for us ... how polite. :(
+- alarm $timeout;
++ $timer->reset();
+
+ # no facility prefix on this die
+ my $sigs = $rc->compute_sigs($objects)
+@@ -219,100 +216,96 @@
+ my $error = $rc->errprefix("$debug: spamassassin") || "$debug: razor2 had unknown error during disconnect";
+ die $error;
+ }
++ }
+
+- # if we got here, we're done doing remote stuff, abort the alert
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+-
+- # Razor 2.14 says that if we get here, we did ok.
+- $return = 1;
++ # Razor 2.14 says that if we get here, we did ok.
++ $return = 1;
+
+- # figure out if we have a log file we need to close...
+- if (ref($rc->{logref}) && exists $rc->{logref}->{fd}) {
+- # the fd can be stdout or stderr, so we need to find out if it is
+- # so we don't close them by accident. Note: we can't just
+- # undef the fd here (like the IO::Handle manpage says we can)
+- # because it won't actually close, unfortunately. :(
+- my $untie = 1;
+- foreach my $log (*STDOUT{IO}, *STDERR{IO}) {
+- if ($log == $rc->{logref}->{fd}) {
+- $untie = 0;
+- last;
+- }
+- }
+- close $rc->{logref}->{fd} if ($untie);
+- }
+-
+- if ($type eq 'check') {
+- # so $objects->[0] is the first (only) message, and ->{spam} is a general yes/no
+- push(@results, { result => $objects->[0]->{spam} });
++ # figure out if we have a log file we need to close...
++ if (ref($rc->{logref}) && exists $rc->{logref}->{fd}) {
++ # the fd can be stdout or stderr, so we need to find out if it is
++ # so we don't close them by accident. Note: we can't just
++ # undef the fd here (like the IO::Handle manpage says we can)
++ # because it won't actually close, unfortunately. :(
++ my $untie = 1;
++ foreach my $log (*STDOUT{IO}, *STDERR{IO}) {
++ if ($log == $rc->{logref}->{fd}) {
++ $untie = 0;
++ last;
++ }
++ }
++ close $rc->{logref}->{fd} if ($untie);
++ }
+
+- # great for debugging, but leave this off!
+- #use Data::Dumper;
+- #print Dumper($objects),"\n";
+-
+- # ->{p} is for each part of the message
+- # so go through each part, taking the highest cf we find
+- # of any part that isn't contested (ct). This helps avoid false
+- # positives. equals logic_method 4.
+- #
+- # razor-agents < 2.14 have a different object format, so we now support both.
+- # $objects->[0]->{resp} vs $objects->[0]->{p}->[part #]->{resp}
+- my $part = 0;
+- my $arrayref = $objects->[0]->{p} || $objects;
+- if (defined $arrayref) {
+- foreach my $cf (@{$arrayref}) {
+- if (exists $cf->{resp}) {
+- for (my $response=0; $response<@{$cf->{resp}}; $response++) {
+- my $tmp = $cf->{resp}->[$response];
+- my $tmpcf = $tmp->{cf}; # Part confidence
+- my $tmpct = $tmp->{ct}; # Part contested?
+- my $engine = $cf->{sent}->[$response]->{e};
+-
+- # These should always be set, but just in case ...
+- $tmpcf = 0 unless defined $tmpcf;
+- $tmpct = 0 unless defined $tmpct;
+- $engine = 0 unless defined $engine;
+-
+- push(@results,
+- { part => $part, engine => $engine, contested => $tmpct, confidence => $tmpcf });
+- }
+- }
+- else {
+- push(@results, { part => $part, noresponse => 1 });
+- }
+- $part++;
+- }
+- }
+- else {
+- # If we have some new $objects format that isn't close to
+- # the current razor-agents 2.x version, we won't FP but we
+- # should alert in debug.
+- dbg("$debug: it looks like the internal Razor object has changed format!");
+- }
+- }
++ if ($type eq 'check') {
++ # so $objects->[0] is the first (only) message, and ->{spam} is a general yes/no
++ push(@results, { result => $objects->[0]->{spam} });
++
++ # great for debugging, but leave this off!
++ #use Data::Dumper;
++ #print Dumper($objects),"\n";
++
++ # ->{p} is for each part of the message
++ # so go through each part, taking the highest cf we find
++ # of any part that isn't contested (ct). This helps avoid false
++ # positives. equals logic_method 4.
++ #
++ # razor-agents < 2.14 have a different object format, so we now support both.
++ # $objects->[0]->{resp} vs $objects->[0]->{p}->[part #]->{resp}
++ my $part = 0;
++ my $arrayref = $objects->[0]->{p} || $objects;
++ if (defined $arrayref) {
++ foreach my $cf (@{$arrayref}) {
++ if (exists $cf->{resp}) {
++ for (my $response=0; $response<@{$cf->{resp}}; $response++) {
++ my $tmp = $cf->{resp}->[$response];
++ my $tmpcf = $tmp->{cf}; # Part confidence
++ my $tmpct = $tmp->{ct}; # Part contested?
++ my $engine = $cf->{sent}->[$response]->{e};
++
++ # These should always be set, but just in case ...
++ $tmpcf = 0 unless defined $tmpcf;
++ $tmpct = 0 unless defined $tmpct;
++ $engine = 0 unless defined $engine;
++
++ push(@results,
++ { part => $part, engine => $engine, contested => $tmpct, confidence => $tmpcf });
++ }
++ }
++ else {
++ push(@results, { part => $part, noresponse => 1 });
++ }
++ $part++;
++ }
++ }
++ else {
++ # If we have some new $objects format that isn't close to
++ # the current razor-agents 2.x version, we won't FP but we
++ # should alert in debug.
++ dbg("$debug: it looks like the internal Razor object has changed format!");
++ }
+ }
+ }
+ else {
+ warn "$debug: undefined Razor2::Client::Agent\n";
+ }
+
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
++ });
++
++ # OK, that's enough Razor stuff. now, reset all that global
++ # state it futzes with :(
++ # work around serious brain damage in Razor2 (constant seed)
++ srand;
+
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
++ Mail::SpamAssassin::PerMsgStatus::leave_helper_run_mode($self);
++
++ if ($timer->timed_out()) {
++ dbg("$debug: razor2 $type timed out after $timeout seconds");
+ }
+
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("$debug: razor2 $type timed out after $timeout seconds");
+- } elsif ($err =~ /(?:could not connect|network is unreachable)/) {
++ if ($err =~ /(?:could not connect|network is unreachable)/) {
+ # make this a dbg(); SpamAssassin will still continue,
+ # but without Razor checking. otherwise there may be
+ # DSNs and errors in syslog etc., yuck
+@@ -323,11 +316,6 @@
+ warn("$debug: razor2 $type failed: $! $err");
+ }
+ }
+-
+- # work around serious brain damage in Razor2 (constant seed)
+- srand;
+-
+- Mail::SpamAssassin::PerMsgStatus::leave_helper_run_mode($self);
+
+ # razor also debugs to stdout. argh. fix it to stderr...
+ if (would_log('dbg', $debug)) {
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm Thu Mar 9 11:51:59 2006
+@@ -34,6 +34,7 @@
+
+ use Mail::SpamAssassin::Plugin;
+ use Mail::SpamAssassin::Logger;
++use Mail::SpamAssassin::Timeout;
+ use strict;
+ use warnings;
+ use bytes;
+@@ -300,30 +301,17 @@
+
+ my ($result, $comment);
+ my $timeout = $scanner->{conf}->{spf_timeout};
+- my $oldalarm = 0;
+
+- eval {
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+- $oldalarm = alarm($timeout);
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
++
+ ($result, $comment) = $query->result();
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
+
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
++ });
+
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("spf: lookup timed out after $timeout seconds");
+- } else {
+- warn("spf: lookup failed: $err\n");
+- }
++ warn("spf: lookup failed: $err\n");
+ return 0;
+ }
+
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm Thu Mar 9 11:51:59 2006
+@@ -25,6 +25,7 @@
+
+ use Mail::SpamAssassin::Util;
+ use Mail::SpamAssassin::Logger;
++use Mail::SpamAssassin::Timeout;
+
+ use vars qw {
+ @PFSTATE_VARS %EXPORT_TAGS @EXPORT_OK
+@@ -109,6 +110,9 @@
+
+ delete $self->{kids}->{$pid};
+
++ # note this for the select()-caller's benefit
++ $self->{child_just_exited} = 1;
++
+ # remove the child from the backchannel list, too
+ $self->{backchannel}->delete_socket_for_child($pid);
+
+@@ -188,24 +192,63 @@
+ vec($rin, $self->{server_fileno}, 1) = 0;
+ }
+
+- my ($rout, $eout, $nfound, $timeleft);
++ my ($rout, $eout, $nfound, $timeleft, $selerr);
++
++ # use alarm to back up select()'s built-in alarm, to debug Theo's bug.
++ # not that I can remember what Theo's bug was, but hey ;) A good
++ # 60 seconds extra on the alarm() should make that quite rare...
++
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => ($tout*2) + 60 });
+
+- # use alarm to back up select()'s built-in alarm, to debug theo's bug
+- eval {
+- Mail::SpamAssassin::Util::trap_sigalrm_fully(sub { die "tcp timeout"; });
+- alarm ($tout*2) if ($tout);
++ $timer->run(sub {
++
++ $self->{child_just_exited} = 0;
+ ($nfound, $timeleft) = select($rout=$rin, undef, $eout=$rin, $tout);
+- };
+- alarm 0;
++ $selerr = $!;
+
+- if ($@) {
+- warn "prefork: select timeout failed! recovering\n";
+- sleep 1; # avoid overload
+- return;
+- }
++ });
++
++ # bug 4696: under load, the process can go for such a long time without
++ # being context-switched in, that when it does return the alarm() fires
++ # before the select() timeout does. Treat this as a select() timeout
++ if ($timer->timed_out) {
++ dbg("prefork: select timed out (via alarm)");
++ $nfound = 0;
++ $timeleft = 0;
++ }
++
++ # errors; handle undef *or* -1 returned. do this before "errors on
++ # the handle" below, since an error condition is signalled both via
++ # a -1 return and a $eout bit.
++ if (!defined $nfound || $nfound < 0)
++ {
++ if (exists &Errno::EINTR && $selerr == &Errno::EINTR)
++ {
++ # this happens if the process is signalled during the select(),
++ # for example if someone sends SIGHUP to reload the configuration.
++ # just return inmmediately
++ dbg("prefork: select returned err $selerr, probably signalled");
++ return;
++ }
++
++ # if a child exits during that select() call, it generates a spurious
++ # error, like this:
++ #
++ # Jan 29 12:53:17 dogma spamd[18518]: prefork: child states: BI
++ # Jan 29 12:53:17 dogma spamd[18518]: spamd: handled cleanup of child pid 13101 due to SIGCHLD
++ # Jan 29 12:53:17 dogma spamd[18518]: prefork: select returned -1! recovering:
++ #
++ # avoid by setting a boolean in the child_exited() callback and checking
++ # it here. log $! just in case, though.
++ if ($self->{child_just_exited} && $nfound == -1) {
++ dbg("prefork: select returned -1 due to child exiting, ignored ($selerr)");
++ return;
++ }
++
++ warn "prefork: select returned ".
++ (defined $nfound ? $nfound : "undef").
++ "! recovering: $selerr\n";
+
+- if (!defined $nfound) {
+- warn "prefork: select returned undef! recovering\n";
+ sleep 1; # avoid overload
+ return;
+ }
+@@ -213,7 +256,7 @@
+ # errors on the handle?
+ # return them immediately, they may be from a SIGHUP restart signal
+ if (vec ($eout, $self->{server_fileno}, 1)) {
+- warn "prefork: select returned error on server filehandle: $!\n";
++ warn "prefork: select returned error on server filehandle: $selerr $!\n";
+ return;
+ }
+
+@@ -282,7 +325,7 @@
+
+ my ($sock, $kid);
+ while (($kid, $sock) = each %{$self->{backchannel}->{kids}}) {
+- $self->syswrite_with_retry($sock, PF_PING_ORDER) and next;
++ $self->syswrite_with_retry($sock, PF_PING_ORDER, $kid, 3) and next;
+
+ warn "prefork: write of ping failed to $kid fd=".$sock->fileno.": ".$!;
+
+@@ -353,7 +396,7 @@
+ return $self->order_idle_child_to_accept();
+ }
+
+- if (!$self->syswrite_with_retry($sock, PF_ACCEPT_ORDER))
++ if (!$self->syswrite_with_retry($sock, PF_ACCEPT_ORDER, $kid))
+ {
+ # failure to write to the child; bad news. call it dead
+ warn "prefork: killing rogue child $kid, failed to write on fd ".$sock->fileno.": $!\n";
+@@ -396,7 +439,7 @@
+ my ($self, $kid) = @_;
+ if ($self->{waiting_for_idle_child}) {
+ my $sock = $self->{backchannel}->get_socket_for_child($kid);
+- $self->syswrite_with_retry($sock, PF_ACCEPT_ORDER)
++ $self->syswrite_with_retry($sock, PF_ACCEPT_ORDER, $kid)
+ or die "prefork: $kid claimed it was ready, but write failed on fd ".
+ $sock->fileno.": ".$!;
+ $self->{waiting_for_idle_child} = 0;
+@@ -426,7 +469,7 @@
+ sub report_backchannel_socket {
+ my ($self, $str) = @_;
+ my $sock = $self->{backchannel}->get_parent_socket();
+- $self->syswrite_with_retry($sock, $str)
++ $self->syswrite_with_retry($sock, $str, 'parent')
+ or write "syswrite() to parent failed: $!";
+ }
+
+@@ -537,12 +580,31 @@
+ }
+
+ sub syswrite_with_retry {
+- my ($self, $sock, $buf) = @_;
++ my ($self, $sock, $buf, $targetname, $numretries) = @_;
++ $numretries ||= 10; # default 10 retries
+
+ my $written = 0;
++ my $try = 0;
+
+ retry_write:
++
++ $try++;
++ if ($try > 1) {
++ warn "prefork: syswrite(".$sock->fileno.") to $targetname failed on try $try";
++ if ($try > $numretries) {
++ warn "prefork: giving up";
++ return undef;
++ }
++ else {
++ # give it 1 second to recover. we retry indefinitely.
++ my $rout = '';
++ vec($rout, $sock->fileno, 1) = 1;
++ select(undef, $rout, undef, 1);
++ }
++ }
++
+ my $nbytes = $sock->syswrite($buf);
++
+ if (!defined $nbytes) {
+ unless ((exists &Errno::EAGAIN && $! == &Errno::EAGAIN)
+ || (exists &Errno::EWOULDBLOCK && $! == &Errno::EWOULDBLOCK))
+@@ -551,13 +613,7 @@
+ return undef;
+ }
+
+- warn "prefork: syswrite(".$sock->fileno.") failed, retrying...";
+-
+- # give it 5 seconds to recover. we retry indefinitely.
+- my $rout = '';
+- vec($rout, $sock->fileno, 1) = 1;
+- select(undef, $rout, undef, 5);
+-
++ warn "prefork: retrying syswrite(): $!";
+ goto retry_write;
+ }
+ else {
+@@ -568,7 +624,8 @@
+ return $written; # it's complete, we can return
+ }
+ else {
+- warn "prefork: partial write of $nbytes, towrite=".length($buf).
++ warn "prefork: partial write of $nbytes to ".
++ $targetname.", towrite=".length($buf).
+ " sofar=".$written." fd=".$sock->fileno.", recovering";
+ goto retry_write;
+ }
+
+Added: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm?rev=384590&view=auto
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm (added)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm Thu Mar 9 11:51:59 2006
+@@ -0,0 +1,215 @@
++# <@LICENSE>
++# Copyright 2004 Apache Software Foundation
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++# </@LICENSE>
++
++=head1 NAME
++
++Mail::SpamAssassin::Timeout - safe, reliable timeouts in perl
++
++=head1 SYNOPSIS
++
++ # non-timeout code...
++
++ my $t = Mail::SpamAssassin::Timeout->new({ secs => 5 });
++
++ $t->run(sub {
++ # code to run with a 5-second timeout...
++ });
++
++ if ($t->timed_out()) {
++ # do something...
++ }
++
++ # more non-timeout code...
++
++=head1 DESCRIPTION
++
++This module provides a safe, reliable and clean API to provide
++C<alarm(2)>-based timeouts for perl code.
++
++Note that C<$SIG{ALRM}> is used to provide the timeout, so this will not
++interrupt out-of-control regular expression matches.
++
++Nested timeouts are supported.
++
++=head1 PUBLIC METHODS
++
++=over 4
++
++=cut
++
++package Mail::SpamAssassin::Timeout;
++
++use strict;
++use warnings;
++use bytes;
++
++use vars qw{
++ @ISA
++};
++
++@ISA = qw();
++
++###########################################################################
++
++=item my $t = Mail::SpamAssassin::Timeout->new({ ... options ... });
++
++Constructor. Options include:
++
++=over 4
++
++=item secs => $seconds
++
++timeout, in seconds. Optional; if not specified, no timeouts will be applied.
++
++=back
++
++=cut
++
++sub new {
++ my ($class, $opts) = @_;
++ $class = ref($class) || $class;
++ my %selfval = $opts ? %{$opts} : ();
++ my $self = \%selfval;
++
++ bless ($self, $class);
++ $self;
++}
++
++###########################################################################
++
++=item $t->run($coderef)
++
++Run a code reference within the currently-defined timeout.
++
++The timeout is as defined by the B<secs> parameter to the constructor.
++
++Returns whatever the subroutine returns, or C<undef> on timeout.
++If the timer times out, C<$t-<gt>timed_out()> will return C<1>.
++
++Time elapsed is not cumulative; multiple runs of C<run> will restart the
++timeout from scratch.
++
++=item $t->run_and_catch($coderef)
++
++Run a code reference, as per C<$t-<gt>run()>, but also catching any
++C<die()> calls within the code reference.
++
++Returns C<undef> if no C<die()> call was executed and C<$@> was unset, or the
++value of C<$@> if it was set. (The timeout event doesn't count as a C<die()>.)
++
++=cut
++
++sub run { $_[0]->_run($_[1], 0); }
++
++sub run_and_catch { $_[0]->_run($_[1], 1); }
++
++sub _run { # private
++ my ($self, $sub, $and_catch) = @_;
++
++ delete $self->{timed_out};
++
++ if (!$self->{secs}) { # no timeout! just call the sub and return.
++ return &$sub;
++ }
++
++ # assertion
++ if ($self->{secs} < 0) {
++ die "Mail::SpamAssassin::Timeout: oops? neg value for 'secs': $self->{secs}";
++ }
++
++ my $oldalarm = 0;
++ my $ret;
++
++ eval {
++ # note use of local to ensure closed scope here
++ local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
++ local $SIG{__DIE__}; # bug 4631
++
++ $oldalarm = alarm($self->{secs});
++
++ $ret = &$sub;
++
++ # Unset the alarm() before we leave eval{ } scope, as that stack-pop
++ # operation can take a second or two under load. Note: previous versions
++ # restored $oldalarm here; however, that is NOT what we want to do, since
++ # it creates a new race condition, namely that an old alarm could then fire
++ # while the stack-pop was underway, thereby appearing to be *this* timeout
++ # timing out. In terms of how we might possibly have nested timeouts in
++ # SpamAssassin, this is an academic issue with little impact, but it's
++ # still worth avoiding anyway.
++
++ alarm 0;
++ };
++
++ my $err = $@;
++
++ if (defined $oldalarm) {
++ # now, we could have died from a SIGALRM == timed out. if so,
++ # restore the previously-active one, or zero all timeouts if none
++ # were previously active.
++ alarm $oldalarm;
++ }
++
++ if ($err) {
++ if ($err =~ /__alarm__ignore__/) {
++ $self->{timed_out} = 1;
++ } else {
++ if ($and_catch) {
++ return $@;
++ } else {
++ die $@; # propagate any "real" errors
++ }
++ }
++ }
++
++ if ($and_catch) {
++ return; # undef
++ } else {
++ return $ret;
++ }
++}
++
++###########################################################################
++
++=item $t->timed_out()
++
++Returns C<1> if the most recent code executed in C<run()> timed out, or
++C<undef> if it did not.
++
++=cut
++
++sub timed_out {
++ my ($self) = @_;
++ return $self->{timed_out};
++}
++
++###########################################################################
++
++=item $t->reset()
++
++If called within a C<run()> code reference, causes the current alarm timer to
++be reset to its starting value.
++
++=cut
++
++sub reset {
++ my ($self) = @_;
++ alarm($self->{secs});
++}
++
++###########################################################################
++
++1;
+
+Modified: spamassassin/branches/3.1/spamd/spamd.raw
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/spamd/spamd.raw?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/spamd/spamd.raw (original)
++++ spamassassin/branches/3.1/spamd/spamd.raw Thu Mar 9 11:51:59 2006
+@@ -2049,6 +2049,9 @@
+ foreach (keys %children) {
+ kill 'INT' => $_;
+ my $pid = waitpid($_, 0);
++ if ($scaling) {
++ $scaling->child_exited($pid);
++ }
+ info("spamd: child $pid killed successfully");
+ }
+ %children = ();
+
+
+
+
+ \ No newline at end of file
diff --git a/buildbot/buildbot/test/mail/syncmail.1 b/buildbot/buildbot/test/mail/syncmail.1
new file mode 100644
index 0000000..eb35e25
--- /dev/null
+++ b/buildbot/buildbot/test/mail/syncmail.1
@@ -0,0 +1,152 @@
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23758 invoked by uid 1000); 28 Jul 2003 07:22:14 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 62715 invoked by uid 13574); 28 Jul 2003 07:22:03 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 07:22:03 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h2KY-0004Nr-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h2KY-0001rv-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h2KY-0003r4-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: buildbot/buildbot/changes freshcvsmail.py,1.2,1.3
+Message-Id: <E19h2KY-0003r4-00@sc8-pr-cvs1.sourceforge.net>
+Date: Mon, 28 Jul 2003 00:22:02 -0700
+Status:
+
+Update of /cvsroot/buildbot/buildbot/buildbot/changes
+In directory sc8-pr-cvs1:/tmp/cvs-serv14795/buildbot/changes
+
+Modified Files:
+ freshcvsmail.py
+Log Message:
+remove leftover code, leave a temporary compatibility import. Note! Start
+importing FCMaildirSource from changes.mail instead of changes.freshcvsmail
+
+
+Index: freshcvsmail.py
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/buildbot/changes/freshcvsmail.py,v
+retrieving revision 1.2
+retrieving revision 1.3
+diff -C2 -d -r1.2 -r1.3
+*** freshcvsmail.py 27 Jul 2003 18:54:08 -0000 1.2
+--- freshcvsmail.py 28 Jul 2003 07:22:00 -0000 1.3
+***************
+*** 1,96 ****
+ #! /usr/bin/python
+
+! from buildbot.interfaces import IChangeSource
+! from buildbot.changes.maildirtwisted import MaildirTwisted
+! from buildbot.changes.changes import Change
+! from rfc822 import Message
+! import os, os.path
+!
+! def parseFreshCVSMail(fd, prefix=None):
+! """Parse mail sent by FreshCVS"""
+! # this uses rfc822.Message so it can run under python2.1 . In the future
+! # it will be updated to use python2.2's "email" module.
+!
+! m = Message(fd)
+! # FreshCVS sets From: to "user CVS <user>", but the <> part may be
+! # modified by the MTA (to include a local domain)
+! name, addr = m.getaddr("from")
+! if not name:
+! return None # no From means this message isn't from FreshCVS
+! cvs = name.find(" CVS")
+! if cvs == -1:
+! return None # this message isn't from FreshCVS
+! who = name[:cvs]
+!
+! # we take the time of receipt as the time of checkin. Not correct,
+! # but it avoids the out-of-order-changes issue
+! #when = m.getdate() # and convert from 9-tuple, and handle timezone
+!
+! files = []
+! comments = ""
+! isdir = 0
+! lines = m.fp.readlines()
+! while lines:
+! line = lines.pop(0)
+! if line == "Modified files:\n":
+! break
+! while lines:
+! line = lines.pop(0)
+! if line == "\n":
+! break
+! line = line.rstrip("\n")
+! file, junk = line.split(None, 1)
+! if prefix:
+! # insist that the file start with the prefix: FreshCVS sends
+! # changes we don't care about too
+! bits = file.split(os.sep)
+! if bits[0] == prefix:
+! file = apply(os.path.join, bits[1:])
+! else:
+! break
+! if junk == "0 0":
+! isdir = 1
+! files.append(file)
+! while lines:
+! line = lines.pop(0)
+! if line == "Log message:\n":
+! break
+! # message is terminated by "ViewCVS links:" or "Index:..." (patch)
+! while lines:
+! line = lines.pop(0)
+! if line == "ViewCVS links:\n":
+! break
+! if line.find("Index: ") == 0:
+! break
+! comments += line
+! comments = comments.rstrip() + "\n"
+!
+! if not files:
+! return None
+!
+! change = Change(who, files, comments, isdir)
+!
+! return change
+!
+!
+!
+! class FCMaildirSource(MaildirTwisted):
+! """This source will watch a maildir that is subscribed to a FreshCVS
+! change-announcement mailing list.
+! """
+!
+! __implements__ = IChangeSource,
+
+! def __init__(self, maildir, prefix=None):
+! MaildirTwisted.__init__(self, maildir)
+! self.changemaster = None # filled in when added
+! self.prefix = prefix
+! def describe(self):
+! return "FreshCVS mailing list in maildir %s" % self.maildir.where
+! def messageReceived(self, filename):
+! path = os.path.join(self.basedir, "new", filename)
+! change = parseFreshCVSMail(open(path, "r"), self.prefix)
+! if change:
+! self.changemaster.addChange(change)
+! os.rename(os.path.join(self.basedir, "new", filename),
+! os.path.join(self.basedir, "cur", filename))
+--- 1,5 ----
+ #! /usr/bin/python
+
+! # leftover import for compatibility
+
+! from buildbot.changes.mail import FCMaildirSource
+
+
diff --git a/buildbot/buildbot/test/mail/syncmail.2 b/buildbot/buildbot/test/mail/syncmail.2
new file mode 100644
index 0000000..5296cbe
--- /dev/null
+++ b/buildbot/buildbot/test/mail/syncmail.2
@@ -0,0 +1,56 @@
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23221 invoked by uid 1000); 28 Jul 2003 06:53:15 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 58537 invoked by uid 13574); 28 Jul 2003 06:53:09 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:53:09 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h1sb-0003nw-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:09 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h1sa-00018t-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h1sa-0002mX-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: buildbot ChangeLog,1.93,1.94
+Message-Id: <E19h1sa-0002mX-00@sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:53:08 -0700
+Status:
+
+Update of /cvsroot/buildbot/buildbot
+In directory sc8-pr-cvs1:/tmp/cvs-serv10689
+
+Modified Files:
+ ChangeLog
+Log Message:
+ * NEWS: started adding new features
+
+
+Index: ChangeLog
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/ChangeLog,v
+retrieving revision 1.93
+retrieving revision 1.94
+diff -C2 -d -r1.93 -r1.94
+*** ChangeLog 27 Jul 2003 22:53:27 -0000 1.93
+--- ChangeLog 28 Jul 2003 06:53:06 -0000 1.94
+***************
+*** 1,4 ****
+--- 1,6 ----
+ 2003-07-27 Brian Warner <warner@lothar.com>
+
++ * NEWS: started adding new features
++
+ * buildbot/changes/mail.py: start work on Syncmail parser, move
+ mail sources into their own file
+
+
diff --git a/buildbot/buildbot/test/mail/syncmail.3 b/buildbot/buildbot/test/mail/syncmail.3
new file mode 100644
index 0000000..eee19b1
--- /dev/null
+++ b/buildbot/buildbot/test/mail/syncmail.3
@@ -0,0 +1,39 @@
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23196 invoked by uid 1000); 28 Jul 2003 06:51:53 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 58269 invoked by uid 13574); 28 Jul 2003 06:51:46 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:51:46 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h1rF-00027s-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:46 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h1rF-00017O-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h1rF-0002jg-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: CVSROOT syncmail,1.1,NONE
+Message-Id: <E19h1rF-0002jg-00@sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:51:45 -0700
+Status:
+
+Update of /cvsroot/buildbot/CVSROOT
+In directory sc8-pr-cvs1:/tmp/cvs-serv10515
+
+Removed Files:
+ syncmail
+Log Message:
+nevermind
+
+--- syncmail DELETED ---
+
+
diff --git a/buildbot/buildbot/test/mail/syncmail.4 b/buildbot/buildbot/test/mail/syncmail.4
new file mode 100644
index 0000000..44bda5d
--- /dev/null
+++ b/buildbot/buildbot/test/mail/syncmail.4
@@ -0,0 +1,290 @@
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 24111 invoked by uid 1000); 28 Jul 2003 08:01:54 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 68756 invoked by uid 13574); 28 Jul 2003 08:01:46 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 08:01:46 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h2wz-00029d-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h2wz-0002XB-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h2wz-0005a9-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: buildbot/test/mail syncmail.1,NONE,1.1 syncmail.2,NONE,1.1 syncmail.3,NONE,1.1
+Message-Id: <E19h2wz-0005a9-00@sc8-pr-cvs1.sourceforge.net>
+Date: Mon, 28 Jul 2003 01:01:45 -0700
+Status:
+
+Update of /cvsroot/buildbot/buildbot/test/mail
+In directory sc8-pr-cvs1:/tmp/cvs-serv21445
+
+Added Files:
+ syncmail.1 syncmail.2 syncmail.3
+Log Message:
+test cases for syncmail parser
+
+--- NEW FILE: syncmail.1 ---
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23758 invoked by uid 1000); 28 Jul 2003 07:22:14 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 62715 invoked by uid 13574); 28 Jul 2003 07:22:03 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 07:22:03 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h2KY-0004Nr-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h2KY-0001rv-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h2KY-0003r4-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: buildbot/buildbot/changes freshcvsmail.py,1.2,1.3
+Message-Id: <E19h2KY-0003r4-00@sc8-pr-cvs1.sourceforge.net>
+Date: Mon, 28 Jul 2003 00:22:02 -0700
+Status:
+
+Update of /cvsroot/buildbot/buildbot/buildbot/changes
+In directory sc8-pr-cvs1:/tmp/cvs-serv14795/buildbot/changes
+
+Modified Files:
+ freshcvsmail.py
+Log Message:
+remove leftover code, leave a temporary compatibility import. Note! Start
+importing FCMaildirSource from changes.mail instead of changes.freshcvsmail
+
+
+Index: freshcvsmail.py
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/buildbot/changes/freshcvsmail.py,v
+retrieving revision 1.2
+retrieving revision 1.3
+diff -C2 -d -r1.2 -r1.3
+*** freshcvsmail.py 27 Jul 2003 18:54:08 -0000 1.2
+--- freshcvsmail.py 28 Jul 2003 07:22:00 -0000 1.3
+***************
+*** 1,96 ****
+ #! /usr/bin/python
+
+! from buildbot.interfaces import IChangeSource
+! from buildbot.changes.maildirtwisted import MaildirTwisted
+! from buildbot.changes.changes import Change
+! from rfc822 import Message
+! import os, os.path
+!
+! def parseFreshCVSMail(fd, prefix=None):
+! """Parse mail sent by FreshCVS"""
+! # this uses rfc822.Message so it can run under python2.1 . In the future
+! # it will be updated to use python2.2's "email" module.
+!
+! m = Message(fd)
+! # FreshCVS sets From: to "user CVS <user>", but the <> part may be
+! # modified by the MTA (to include a local domain)
+! name, addr = m.getaddr("from")
+! if not name:
+! return None # no From means this message isn't from FreshCVS
+! cvs = name.find(" CVS")
+! if cvs == -1:
+! return None # this message isn't from FreshCVS
+! who = name[:cvs]
+!
+! # we take the time of receipt as the time of checkin. Not correct,
+! # but it avoids the out-of-order-changes issue
+! #when = m.getdate() # and convert from 9-tuple, and handle timezone
+!
+! files = []
+! comments = ""
+! isdir = 0
+! lines = m.fp.readlines()
+! while lines:
+! line = lines.pop(0)
+! if line == "Modified files:\n":
+! break
+! while lines:
+! line = lines.pop(0)
+! if line == "\n":
+! break
+! line = line.rstrip("\n")
+! file, junk = line.split(None, 1)
+! if prefix:
+! # insist that the file start with the prefix: FreshCVS sends
+! # changes we don't care about too
+! bits = file.split(os.sep)
+! if bits[0] == prefix:
+! file = apply(os.path.join, bits[1:])
+! else:
+! break
+! if junk == "0 0":
+! isdir = 1
+! files.append(file)
+! while lines:
+! line = lines.pop(0)
+! if line == "Log message:\n":
+! break
+! # message is terminated by "ViewCVS links:" or "Index:..." (patch)
+! while lines:
+! line = lines.pop(0)
+! if line == "ViewCVS links:\n":
+! break
+! if line.find("Index: ") == 0:
+! break
+! comments += line
+! comments = comments.rstrip() + "\n"
+!
+! if not files:
+! return None
+!
+! change = Change(who, files, comments, isdir)
+!
+! return change
+!
+!
+!
+! class FCMaildirSource(MaildirTwisted):
+! """This source will watch a maildir that is subscribed to a FreshCVS
+! change-announcement mailing list.
+! """
+!
+! __implements__ = IChangeSource,
+
+! def __init__(self, maildir, prefix=None):
+! MaildirTwisted.__init__(self, maildir)
+! self.changemaster = None # filled in when added
+! self.prefix = prefix
+! def describe(self):
+! return "FreshCVS mailing list in maildir %s" % self.maildir.where
+! def messageReceived(self, filename):
+! path = os.path.join(self.basedir, "new", filename)
+! change = parseFreshCVSMail(open(path, "r"), self.prefix)
+! if change:
+! self.changemaster.addChange(change)
+! os.rename(os.path.join(self.basedir, "new", filename),
+! os.path.join(self.basedir, "cur", filename))
+--- 1,5 ----
+ #! /usr/bin/python
+
+! # leftover import for compatibility
+
+! from buildbot.changes.mail import FCMaildirSource
+
+
+
+--- NEW FILE: syncmail.2 ---
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23221 invoked by uid 1000); 28 Jul 2003 06:53:15 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 58537 invoked by uid 13574); 28 Jul 2003 06:53:09 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:53:09 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h1sb-0003nw-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:09 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h1sa-00018t-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h1sa-0002mX-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: buildbot ChangeLog,1.93,1.94
+Message-Id: <E19h1sa-0002mX-00@sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:53:08 -0700
+Status:
+
+Update of /cvsroot/buildbot/buildbot
+In directory sc8-pr-cvs1:/tmp/cvs-serv10689
+
+Modified Files:
+ ChangeLog
+Log Message:
+ * NEWS: started adding new features
+
+
+Index: ChangeLog
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/ChangeLog,v
+retrieving revision 1.93
+retrieving revision 1.94
+diff -C2 -d -r1.93 -r1.94
+*** ChangeLog 27 Jul 2003 22:53:27 -0000 1.93
+--- ChangeLog 28 Jul 2003 06:53:06 -0000 1.94
+***************
+*** 1,4 ****
+--- 1,6 ----
+ 2003-07-27 Brian Warner <warner@lothar.com>
+
++ * NEWS: started adding new features
++
+ * buildbot/changes/mail.py: start work on Syncmail parser, move
+ mail sources into their own file
+
+
+
+--- NEW FILE: syncmail.3 ---
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23196 invoked by uid 1000); 28 Jul 2003 06:51:53 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 58269 invoked by uid 13574); 28 Jul 2003 06:51:46 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:51:46 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h1rF-00027s-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:46 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h1rF-00017O-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h1rF-0002jg-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: CVSROOT syncmail,1.1,NONE
+Message-Id: <E19h1rF-0002jg-00@sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:51:45 -0700
+Status:
+
+Update of /cvsroot/buildbot/CVSROOT
+In directory sc8-pr-cvs1:/tmp/cvs-serv10515
+
+Removed Files:
+ syncmail
+Log Message:
+nevermind
+
+--- syncmail DELETED ---
+
+
+
+
diff --git a/buildbot/buildbot/test/mail/syncmail.5 b/buildbot/buildbot/test/mail/syncmail.5
new file mode 100644
index 0000000..82ba451
--- /dev/null
+++ b/buildbot/buildbot/test/mail/syncmail.5
@@ -0,0 +1,70 @@
+From thomas@otto.amantes Mon Feb 21 17:46:45 2005
+Return-Path: <thomas@otto.amantes>
+Received: from otto.amantes (otto.amantes [127.0.0.1]) by otto.amantes
+ (8.13.1/8.13.1) with ESMTP id j1LGkjr3011986 for <thomas@localhost>; Mon,
+ 21 Feb 2005 17:46:45 +0100
+Message-Id: <200502211646.j1LGkjr3011986@otto.amantes>
+From: Thomas Vander Stichele <thomas@otto.amantes>
+To: thomas@otto.amantes
+Subject: test1 s
+Date: Mon, 21 Feb 2005 16:46:45 +0000
+X-Mailer: Python syncmail $Revision: 1.1 $
+ <http://sf.net/projects/cvs-syncmail>
+Content-Transfer-Encoding: 8bit
+Mime-Version: 1.0
+
+Update of /home/cvs/test/test1
+In directory otto.amantes:/home/thomas/dev/tests/cvs/test1
+
+Added Files:
+ Tag: BRANCH-DEVEL
+ MANIFEST Makefile.am autogen.sh configure.in
+Log Message:
+stuff on the branch
+
+--- NEW FILE: Makefile.am ---
+SUBDIRS = src
+
+# normally I wouldn't distribute autogen.sh and friends with a tarball
+# but this one is specifically distributed for demonstration purposes
+
+EXTRA_DIST = autogen.sh
+
+# target for making the "import this into svn" tarball
+test:
+ mkdir test
+ for a in `cat MANIFEST`; do \
+ cp -pr $$a test/$$a; done
+ tar czf test.tar.gz test
+ rm -rf test
+
+--- NEW FILE: MANIFEST ---
+MANIFEST
+autogen.sh
+configure.in
+Makefile.am
+src
+src/Makefile.am
+src/test.c
+
+--- NEW FILE: autogen.sh ---
+#!/bin/sh
+
+set -x
+
+aclocal && \
+autoheader && \
+autoconf && \
+automake -a --foreign && \
+./configure $@
+
+--- NEW FILE: configure.in ---
+dnl configure.ac for version macro
+AC_INIT
+
+AM_CONFIG_HEADER(config.h)
+
+AM_INIT_AUTOMAKE(test, 0.0.0)
+AC_PROG_CC
+
+AC_OUTPUT(Makefile src/Makefile)
diff --git a/buildbot/buildbot/test/runutils.py b/buildbot/buildbot/test/runutils.py
new file mode 100644
index 0000000..2be85d6
--- /dev/null
+++ b/buildbot/buildbot/test/runutils.py
@@ -0,0 +1,516 @@
+
+import signal
+import shutil, os, errno
+from cStringIO import StringIO
+from twisted.internet import defer, reactor, protocol
+from twisted.python import log, util
+
+from buildbot import master, interfaces
+from buildbot.slave import bot
+from buildbot.buildslave import BuildSlave
+from buildbot.process.builder import Builder
+from buildbot.process.base import BuildRequest, Build
+from buildbot.process.buildstep import BuildStep
+from buildbot.sourcestamp import SourceStamp
+from buildbot.status import builder
+from buildbot.process.properties import Properties
+
+
+
+class _PutEverythingGetter(protocol.ProcessProtocol):
+ def __init__(self, deferred, stdin):
+ self.deferred = deferred
+ self.outBuf = StringIO()
+ self.errBuf = StringIO()
+ self.outReceived = self.outBuf.write
+ self.errReceived = self.errBuf.write
+ self.stdin = stdin
+
+ def connectionMade(self):
+ if self.stdin is not None:
+ self.transport.write(self.stdin)
+ self.transport.closeStdin()
+
+ def processEnded(self, reason):
+ out = self.outBuf.getvalue()
+ err = self.errBuf.getvalue()
+ e = reason.value
+ code = e.exitCode
+ if e.signal:
+ self.deferred.errback((out, err, e.signal))
+ else:
+ self.deferred.callback((out, err, code))
+
+def myGetProcessOutputAndValue(executable, args=(), env={}, path='.',
+ _reactor_ignored=None, stdin=None):
+ """Like twisted.internet.utils.getProcessOutputAndValue but takes
+ stdin, too."""
+ d = defer.Deferred()
+ p = _PutEverythingGetter(d, stdin)
+ reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path)
+ return d
+
+
+class MyBot(bot.Bot):
+ def remote_getSlaveInfo(self):
+ return self.parent.info
+
+class MyBuildSlave(bot.BuildSlave):
+ botClass = MyBot
+
+def rmtree(d):
+ try:
+ shutil.rmtree(d, ignore_errors=1)
+ except OSError, e:
+ # stupid 2.2 appears to ignore ignore_errors
+ if e.errno != errno.ENOENT:
+ raise
+
+class RunMixin:
+ master = None
+
+ def rmtree(self, d):
+ rmtree(d)
+
+ def setUp(self):
+ self.slaves = {}
+ self.rmtree("basedir")
+ os.mkdir("basedir")
+ self.master = master.BuildMaster("basedir")
+ self.status = self.master.getStatus()
+ self.control = interfaces.IControl(self.master)
+
+ def connectOneSlave(self, slavename, opts={}):
+ port = self.master.slavePort._port.getHost().port
+ self.rmtree("slavebase-%s" % slavename)
+ os.mkdir("slavebase-%s" % slavename)
+ slave = MyBuildSlave("localhost", port, slavename, "sekrit",
+ "slavebase-%s" % slavename,
+ keepalive=0, usePTY=False, debugOpts=opts)
+ slave.info = {"admin": "one"}
+ self.slaves[slavename] = slave
+ slave.startService()
+
+ def connectSlave(self, builders=["dummy"], slavename="bot1",
+ opts={}):
+ # connect buildslave 'slavename' and wait for it to connect to all of
+ # the given builders
+ dl = []
+ # initiate call for all of them, before waiting on result,
+ # otherwise we might miss some
+ for b in builders:
+ dl.append(self.master.botmaster.waitUntilBuilderAttached(b))
+ d = defer.DeferredList(dl)
+ self.connectOneSlave(slavename, opts)
+ return d
+
+ def connectSlaves(self, slavenames, builders):
+ dl = []
+ # initiate call for all of them, before waiting on result,
+ # otherwise we might miss some
+ for b in builders:
+ dl.append(self.master.botmaster.waitUntilBuilderAttached(b))
+ d = defer.DeferredList(dl)
+ for name in slavenames:
+ self.connectOneSlave(name)
+ return d
+
+ def connectSlave2(self):
+ # this takes over for bot1, so it has to share the slavename
+ port = self.master.slavePort._port.getHost().port
+ self.rmtree("slavebase-bot2")
+ os.mkdir("slavebase-bot2")
+ # this uses bot1, really
+ slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
+ "slavebase-bot2", keepalive=0, usePTY=False)
+ slave.info = {"admin": "two"}
+ self.slaves['bot2'] = slave
+ slave.startService()
+
+ def connectSlaveFastTimeout(self):
+ # this slave has a very fast keepalive timeout
+ port = self.master.slavePort._port.getHost().port
+ self.rmtree("slavebase-bot1")
+ os.mkdir("slavebase-bot1")
+ slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
+ "slavebase-bot1", keepalive=2, usePTY=False,
+ keepaliveTimeout=1)
+ slave.info = {"admin": "one"}
+ self.slaves['bot1'] = slave
+ slave.startService()
+ d = self.master.botmaster.waitUntilBuilderAttached("dummy")
+ return d
+
+ # things to start builds
+ def requestBuild(self, builder):
+ # returns a Deferred that fires with an IBuildStatus object when the
+ # build is finished
+ req = BuildRequest("forced build", SourceStamp(), 'test_builder')
+ self.control.getBuilder(builder).requestBuild(req)
+ return req.waitUntilFinished()
+
+ def failUnlessBuildSucceeded(self, bs):
+ if bs.getResults() != builder.SUCCESS:
+ log.msg("failUnlessBuildSucceeded noticed that the build failed")
+ self.logBuildResults(bs)
+ self.failUnlessEqual(bs.getResults(), builder.SUCCESS)
+ return bs # useful for chaining
+
+ def logBuildResults(self, bs):
+ # emit the build status and the contents of all logs to test.log
+ log.msg("logBuildResults starting")
+ log.msg(" bs.getResults() == %s" % builder.Results[bs.getResults()])
+ log.msg(" bs.isFinished() == %s" % bs.isFinished())
+ for s in bs.getSteps():
+ for l in s.getLogs():
+ log.msg("--- START step %s / log %s ---" % (s.getName(),
+ l.getName()))
+ if not l.getName().endswith(".html"):
+ log.msg(l.getTextWithHeaders())
+ log.msg("--- STOP ---")
+ log.msg("logBuildResults finished")
+
+ def tearDown(self):
+ log.msg("doing tearDown")
+ d = self.shutdownAllSlaves()
+ d.addCallback(self._tearDown_1)
+ d.addCallback(self._tearDown_2)
+ return d
+ def _tearDown_1(self, res):
+ if self.master:
+ return defer.maybeDeferred(self.master.stopService)
+ def _tearDown_2(self, res):
+ self.master = None
+ log.msg("tearDown done")
+
+
+ # various forms of slave death
+
+ def shutdownAllSlaves(self):
+ # the slave has disconnected normally: they SIGINT'ed it, or it shut
+ # down willingly. This will kill child processes and give them a
+ # chance to finish up. We return a Deferred that will fire when
+ # everything is finished shutting down.
+
+ log.msg("doing shutdownAllSlaves")
+ dl = []
+ for slave in self.slaves.values():
+ dl.append(slave.waitUntilDisconnected())
+ dl.append(defer.maybeDeferred(slave.stopService))
+ d = defer.DeferredList(dl)
+ d.addCallback(self._shutdownAllSlavesDone)
+ return d
+ def _shutdownAllSlavesDone(self, res):
+ for name in self.slaves.keys():
+ del self.slaves[name]
+ return self.master.botmaster.waitUntilBuilderFullyDetached("dummy")
+
+ def shutdownSlave(self, slavename, buildername):
+ # this slave has disconnected normally: they SIGINT'ed it, or it shut
+ # down willingly. This will kill child processes and give them a
+ # chance to finish up. We return a Deferred that will fire when
+ # everything is finished shutting down, and the given Builder knows
+ # that the slave has gone away.
+
+ s = self.slaves[slavename]
+ dl = [self.master.botmaster.waitUntilBuilderDetached(buildername),
+ s.waitUntilDisconnected()]
+ d = defer.DeferredList(dl)
+ d.addCallback(self._shutdownSlave_done, slavename)
+ s.stopService()
+ return d
+ def _shutdownSlave_done(self, res, slavename):
+ del self.slaves[slavename]
+
+ def killSlave(self):
+ # the slave has died, its host sent a FIN. The .notifyOnDisconnect
+ # callbacks will terminate the current step, so the build should be
+ # flunked (no further steps should be started).
+ self.slaves['bot1'].bf.continueTrying = 0
+ bot = self.slaves['bot1'].getServiceNamed("bot")
+ broker = bot.builders["dummy"].remote.broker
+ broker.transport.loseConnection()
+ del self.slaves['bot1']
+
+ def disappearSlave(self, slavename="bot1", buildername="dummy",
+ allowReconnect=False):
+ # the slave's host has vanished off the net, leaving the connection
+ # dangling. This will be detected quickly by app-level keepalives or
+ # a ping, or slowly by TCP timeouts.
+
+ # simulate this by replacing the slave Broker's .dataReceived method
+ # with one that just throws away all data.
+ def discard(data):
+ pass
+ bot = self.slaves[slavename].getServiceNamed("bot")
+ broker = bot.builders[buildername].remote.broker
+ broker.dataReceived = discard # seal its ears
+ broker.transport.write = discard # and take away its voice
+ if not allowReconnect:
+ # also discourage it from reconnecting once the connection goes away
+ assert self.slaves[slavename].bf.continueTrying
+ self.slaves[slavename].bf.continueTrying = False
+
+ def ghostSlave(self):
+ # the slave thinks it has lost the connection, and initiated a
+ # reconnect. The master doesn't yet realize it has lost the previous
+ # connection, and sees two connections at once.
+ raise NotImplementedError
+
+
+def setupBuildStepStatus(basedir):
+ """Return a BuildStep with a suitable BuildStepStatus object, ready to
+ use."""
+ os.mkdir(basedir)
+ botmaster = None
+ s0 = builder.Status(botmaster, basedir)
+ s1 = s0.builderAdded("buildername", "buildername")
+ s2 = builder.BuildStatus(s1, 1)
+ s3 = builder.BuildStepStatus(s2)
+ s3.setName("foostep")
+ s3.started = True
+ s3.stepStarted()
+ return s3
+
+def fake_slaveVersion(command, oldversion=None):
+ from buildbot.slave.registry import commandRegistry
+ return commandRegistry[command]
+
+class FakeBuildMaster:
+ properties = Properties(masterprop="master")
+
+class FakeBotMaster:
+ parent = FakeBuildMaster()
+
+def makeBuildStep(basedir, step_class=BuildStep, **kwargs):
+ bss = setupBuildStepStatus(basedir)
+
+ ss = SourceStamp()
+ setup = {'name': "builder1", "slavename": "bot1",
+ 'builddir': "builddir", 'factory': None}
+ b0 = Builder(setup, bss.getBuild().getBuilder())
+ b0.botmaster = FakeBotMaster()
+ br = BuildRequest("reason", ss, 'test_builder')
+ b = Build([br])
+ b.setBuilder(b0)
+ s = step_class(**kwargs)
+ s.setBuild(b)
+ s.setStepStatus(bss)
+ b.build_status = bss.getBuild()
+ b.setupProperties()
+ s.slaveVersion = fake_slaveVersion
+ return s
+
+
+def findDir():
+ # the same directory that holds this script
+ return util.sibpath(__file__, ".")
+
+class SignalMixin:
+ sigchldHandler = None
+
+ def setUpClass(self):
+ # make sure SIGCHLD handler is installed, as it should be on
+ # reactor.run(). problem is reactor may not have been run when this
+ # test runs.
+ if hasattr(reactor, "_handleSigchld") and hasattr(signal, "SIGCHLD"):
+ self.sigchldHandler = signal.signal(signal.SIGCHLD,
+ reactor._handleSigchld)
+
+ def tearDownClass(self):
+ if self.sigchldHandler:
+ signal.signal(signal.SIGCHLD, self.sigchldHandler)
+
+# these classes are used to test SlaveCommands in isolation
+
+class FakeSlaveBuilder:
+ debug = False
+ def __init__(self, usePTY, basedir):
+ self.updates = []
+ self.basedir = basedir
+ self.usePTY = usePTY
+
+ def sendUpdate(self, data):
+ if self.debug:
+ print "FakeSlaveBuilder.sendUpdate", data
+ self.updates.append(data)
+
+
+class SlaveCommandTestBase(SignalMixin):
+ usePTY = False
+
+ def setUpBuilder(self, basedir):
+ if not os.path.exists(basedir):
+ os.mkdir(basedir)
+ self.builder = FakeSlaveBuilder(self.usePTY, basedir)
+
+ def startCommand(self, cmdclass, args):
+ stepId = 0
+ self.cmd = c = cmdclass(self.builder, stepId, args)
+ c.running = True
+ d = c.doStart()
+ return d
+
+ def collectUpdates(self, res=None):
+ logs = {}
+ for u in self.builder.updates:
+ for k in u.keys():
+ if k == "log":
+ logname,data = u[k]
+ oldlog = logs.get(("log",logname), "")
+ logs[("log",logname)] = oldlog + data
+ elif k == "rc":
+ pass
+ else:
+ logs[k] = logs.get(k, "") + u[k]
+ return logs
+
+ def findRC(self):
+ for u in self.builder.updates:
+ if "rc" in u:
+ return u["rc"]
+ return None
+
+ def printStderr(self):
+ for u in self.builder.updates:
+ if "stderr" in u:
+ print u["stderr"]
+
+# ----------------------------------------
+
+class LocalWrapper:
+ # r = pb.Referenceable()
+ # w = LocalWrapper(r)
+ # now you can do things like w.callRemote()
+ def __init__(self, target):
+ self.target = target
+
+ def callRemote(self, name, *args, **kwargs):
+ # callRemote is not allowed to fire its Deferred in the same turn
+ d = defer.Deferred()
+ d.addCallback(self._callRemote, *args, **kwargs)
+ reactor.callLater(0, d.callback, name)
+ return d
+
+ def _callRemote(self, name, *args, **kwargs):
+ method = getattr(self.target, "remote_"+name)
+ return method(*args, **kwargs)
+
+ def notifyOnDisconnect(self, observer):
+ pass
+ def dontNotifyOnDisconnect(self, observer):
+ pass
+
+
+class LocalSlaveBuilder(bot.SlaveBuilder):
+ """I am object that behaves like a pb.RemoteReference, but in fact I
+ invoke methods locally."""
+ _arg_filter = None
+
+ def setArgFilter(self, filter):
+ self._arg_filter = filter
+
+ def remote_startCommand(self, stepref, stepId, command, args):
+ if self._arg_filter:
+ args = self._arg_filter(args)
+ # stepref should be a RemoteReference to the RemoteCommand
+ return bot.SlaveBuilder.remote_startCommand(self,
+ LocalWrapper(stepref),
+ stepId, command, args)
+
+class StepTester:
+ """Utility class to exercise BuildSteps and RemoteCommands, without
+ really using a Build or a Bot. No networks are used.
+
+ Use this as follows::
+
+ class MyTest(StepTester, unittest.TestCase):
+ def testOne(self):
+ self.slavebase = 'testOne.slave'
+ self.masterbase = 'testOne.master'
+ sb = self.makeSlaveBuilder()
+ step = self.makeStep(stepclass, **kwargs)
+ d = self.runStep(step)
+ d.addCallback(_checkResults)
+ return d
+ """
+
+ #slavebase = "slavebase"
+ slavebuilderbase = "slavebuilderbase"
+ #masterbase = "masterbase"
+
+ def makeSlaveBuilder(self):
+ os.mkdir(self.slavebase)
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase))
+ b = bot.Bot(self.slavebase, False)
+ b.startService()
+ sb = LocalSlaveBuilder("slavebuildername", False)
+ sb.setArgFilter(self.filterArgs)
+ sb.usePTY = False
+ sb.setServiceParent(b)
+ sb.setBuilddir(self.slavebuilderbase)
+ self.remote = LocalWrapper(sb)
+ return sb
+
+ workdir = "build"
+ def makeStep(self, factory, **kwargs):
+ step = makeBuildStep(self.masterbase, factory, **kwargs)
+ step.setBuildSlave(BuildSlave("name", "password"))
+ step.setDefaultWorkdir(self.workdir)
+ return step
+
+ def runStep(self, step):
+ d = defer.maybeDeferred(step.startStep, self.remote)
+ return d
+
+ def wrap(self, target):
+ return LocalWrapper(target)
+
+ def filterArgs(self, args):
+ # this can be overridden
+ return args
+
+# ----------------------------------------
+
+_flags = {}
+
+def setTestFlag(flagname, value):
+ _flags[flagname] = value
+
+class SetTestFlagStep(BuildStep):
+ """
+ A special BuildStep to set a named flag; this can be used with the
+ TestFlagMixin to monitor what has and has not run in a particular
+ configuration.
+ """
+ def __init__(self, flagname='flag', value=1, **kwargs):
+ BuildStep.__init__(self, **kwargs)
+ self.addFactoryArguments(flagname=flagname, value=value)
+
+ self.flagname = flagname
+ self.value = value
+
+ def start(self):
+ properties = self.build.getProperties()
+ _flags[self.flagname] = properties.render(self.value)
+ self.finished(builder.SUCCESS)
+
+class TestFlagMixin:
+ def clearFlags(self):
+ """
+ Set up for a test by clearing all flags; call this from your test
+ function.
+ """
+ _flags.clear()
+
+ def failIfFlagSet(self, flagname, msg=None):
+ if not msg: msg = "flag '%s' is set" % flagname
+ self.failIf(_flags.has_key(flagname), msg=msg)
+
+ def failIfFlagNotSet(self, flagname, msg=None):
+ if not msg: msg = "flag '%s' is not set" % flagname
+ self.failUnless(_flags.has_key(flagname), msg=msg)
+
+ def getFlag(self, flagname):
+ self.failIfFlagNotSet(flagname, "flag '%s' not set" % flagname)
+ return _flags.get(flagname)
diff --git a/buildbot/buildbot/test/sleep.py b/buildbot/buildbot/test/sleep.py
new file mode 100644
index 0000000..4662852
--- /dev/null
+++ b/buildbot/buildbot/test/sleep.py
@@ -0,0 +1,8 @@
+
+import sys, time
+delay = int(sys.argv[1])
+
+sys.stdout.write("sleeping for %d seconds\n" % delay)
+time.sleep(delay)
+sys.stdout.write("woke up\n")
+sys.exit(0)
diff --git a/buildbot/buildbot/test/subdir/emit.py b/buildbot/buildbot/test/subdir/emit.py
new file mode 100644
index 0000000..42d2ca9
--- /dev/null
+++ b/buildbot/buildbot/test/subdir/emit.py
@@ -0,0 +1,11 @@
+#! /usr/bin/python
+
+import os, sys
+
+sys.stdout.write("this is stdout in subdir\n")
+sys.stderr.write("this is stderr\n")
+if os.environ.has_key("EMIT_TEST"):
+ sys.stdout.write("EMIT_TEST: %s\n" % os.environ["EMIT_TEST"])
+open("log1.out","wt").write("this is log1\n")
+rc = int(sys.argv[1])
+sys.exit(rc)
diff --git a/buildbot/buildbot/test/test__versions.py b/buildbot/buildbot/test/test__versions.py
new file mode 100644
index 0000000..a69fcc4
--- /dev/null
+++ b/buildbot/buildbot/test/test__versions.py
@@ -0,0 +1,16 @@
+
+# This is a fake test which just logs the version of Twisted, to make it
+# easier to track down failures in other tests.
+
+from twisted.trial import unittest
+from twisted.python import log
+from twisted import copyright
+import sys
+import buildbot
+
+class Versions(unittest.TestCase):
+ def test_versions(self):
+ log.msg("Python Version: %s" % sys.version)
+ log.msg("Twisted Version: %s" % copyright.version)
+ log.msg("Buildbot Version: %s" % buildbot.version)
+
diff --git a/buildbot/buildbot/test/test_bonsaipoller.py b/buildbot/buildbot/test/test_bonsaipoller.py
new file mode 100644
index 0000000..f4ca233
--- /dev/null
+++ b/buildbot/buildbot/test/test_bonsaipoller.py
@@ -0,0 +1,244 @@
+# -*- test-case-name: buildbot.test.test_bonsaipoller -*-
+
+from twisted.trial import unittest
+from buildbot.changes.bonsaipoller import FileNode, CiNode, BonsaiResult, \
+ BonsaiParser, BonsaiPoller, InvalidResultError, EmptyResult
+from buildbot.changes.changes import ChangeMaster
+
+from copy import deepcopy
+import re
+
+log1 = "Add Bug 338541a"
+who1 = "sar@gmail.com"
+date1 = 1161908700
+log2 = "bug 357427 add static ctor/dtor methods"
+who2 = "aarrg@ooacm.org"
+date2 = 1161910620
+log3 = "Testing log #3 lbah blah"
+who3 = "huoents@hueont.net"
+date3 = 1889822728
+rev1 = "1.8"
+file1 = "mozilla/testing/mochitest/tests/index.html"
+rev2 = "1.1"
+file2 = "mozilla/testing/mochitest/tests/test_bug338541.xhtml"
+rev3 = "1.1812"
+file3 = "mozilla/xpcom/threads/nsAutoLock.cpp"
+rev4 = "1.3"
+file4 = "mozilla/xpcom/threads/nsAutoLock.h"
+rev5 = "2.4"
+file5 = "mozilla/xpcom/threads/test.cpp"
+
+nodes = []
+files = []
+files.append(FileNode(rev1,file1))
+nodes.append(CiNode(log1, who1, date1, files))
+
+files = []
+files.append(FileNode(rev2, file2))
+files.append(FileNode(rev3, file3))
+nodes.append(CiNode(log2, who2, date2, files))
+
+nodes.append(CiNode(log3, who3, date3, []))
+
+goodParsedResult = BonsaiResult(nodes)
+
+goodUnparsedResult = """\
+<?xml version="1.0"?>
+<queryResults>
+<ci who="%s" date="%d">
+ <log>%s</log>
+ <files>
+ <f rev="%s">%s</f>
+ </files>
+</ci>
+<ci who="%s" date="%d">
+ <log>%s</log>
+ <files>
+ <f rev="%s">%s</f>
+ <f rev="%s">%s</f>
+ </files>
+</ci>
+<ci who="%s" date="%d">
+ <log>%s</log>
+ <files>
+ </files>
+</ci>
+</queryResults>
+""" % (who1, date1, log1, rev1, file1,
+ who2, date2, log2, rev2, file2, rev3, file3,
+ who3, date3, log3)
+
+badUnparsedResult = deepcopy(goodUnparsedResult)
+badUnparsedResult = badUnparsedResult.replace("</queryResults>", "")
+
+invalidDateResult = deepcopy(goodUnparsedResult)
+invalidDateResult = invalidDateResult.replace(str(date1), "foobar")
+
+missingFilenameResult = deepcopy(goodUnparsedResult)
+missingFilenameResult = missingFilenameResult.replace(file2, "")
+
+duplicateLogResult = deepcopy(goodUnparsedResult)
+duplicateLogResult = re.sub("<log>"+log1+"</log>",
+ "<log>blah</log><log>blah</log>",
+ duplicateLogResult)
+
+duplicateFilesResult = deepcopy(goodUnparsedResult)
+duplicateFilesResult = re.sub("<files>\s*</files>",
+ "<files></files><files></files>",
+ duplicateFilesResult)
+
+missingCiResult = deepcopy(goodUnparsedResult)
+r = re.compile("<ci.*</ci>", re.DOTALL | re.MULTILINE)
+missingCiResult = re.sub(r, "", missingCiResult)
+
+badResultMsgs = { 'badUnparsedResult':
+ "BonsaiParser did not raise an exception when given a bad query",
+ 'invalidDateResult':
+ "BonsaiParser did not raise an exception when given an invalid date",
+ 'missingRevisionResult':
+ "BonsaiParser did not raise an exception when a revision was missing",
+ 'missingFilenameResult':
+ "BonsaiParser did not raise an exception when a filename was missing",
+ 'duplicateLogResult':
+ "BonsaiParser did not raise an exception when there was two <log> tags",
+ 'duplicateFilesResult':
+ "BonsaiParser did not raise an exception when there was two <files> tags",
+ 'missingCiResult':
+ "BonsaiParser did not raise an exception when there was no <ci> tags"
+}
+
+noCheckinMsgResult = """\
+<?xml version="1.0"?>
+<queryResults>
+<ci who="johndoe@domain.tld" date="12345678">
+ <log></log>
+ <files>
+ <f rev="1.1">first/file.ext</f>
+ </files>
+</ci>
+<ci who="johndoe@domain.tld" date="12345678">
+ <log></log>
+ <files>
+ <f rev="1.2">second/file.ext</f>
+ </files>
+</ci>
+<ci who="johndoe@domain.tld" date="12345678">
+ <log></log>
+ <files>
+ <f rev="1.3">third/file.ext</f>
+ </files>
+</ci>
+</queryResults>
+"""
+
+noCheckinMsgRef = [dict(filename="first/file.ext",
+ revision="1.1"),
+ dict(filename="second/file.ext",
+ revision="1.2"),
+ dict(filename="third/file.ext",
+ revision="1.3")]
+
+class FakeChangeMaster(ChangeMaster):
+ def __init__(self):
+ ChangeMaster.__init__(self)
+
+ def addChange(self, change):
+ pass
+
+class FakeBonsaiPoller(BonsaiPoller):
+ def __init__(self):
+ BonsaiPoller.__init__(self, "fake url", "fake module", "fake branch")
+ self.parent = FakeChangeMaster()
+
+class TestBonsaiPoller(unittest.TestCase):
+ def testFullyFormedResult(self):
+ br = BonsaiParser(goodUnparsedResult)
+ result = br.getData()
+ # make sure the result is a BonsaiResult
+ self.failUnless(isinstance(result, BonsaiResult))
+ # test for successful parsing
+ self.failUnlessEqual(goodParsedResult, result,
+ "BonsaiParser did not return the expected BonsaiResult")
+
+ def testBadUnparsedResult(self):
+ try:
+ BonsaiParser(badUnparsedResult)
+ self.fail(badResultMsgs["badUnparsedResult"])
+ except InvalidResultError:
+ pass
+
+ def testInvalidDateResult(self):
+ try:
+ BonsaiParser(invalidDateResult)
+ self.fail(badResultMsgs["invalidDateResult"])
+ except InvalidResultError:
+ pass
+
+ def testMissingFilenameResult(self):
+ try:
+ BonsaiParser(missingFilenameResult)
+ self.fail(badResultMsgs["missingFilenameResult"])
+ except InvalidResultError:
+ pass
+
+ def testDuplicateLogResult(self):
+ try:
+ BonsaiParser(duplicateLogResult)
+ self.fail(badResultMsgs["duplicateLogResult"])
+ except InvalidResultError:
+ pass
+
+ def testDuplicateFilesResult(self):
+ try:
+ BonsaiParser(duplicateFilesResult)
+ self.fail(badResultMsgs["duplicateFilesResult"])
+ except InvalidResultError:
+ pass
+
+ def testMissingCiResult(self):
+ try:
+ BonsaiParser(missingCiResult)
+ self.fail(badResultMsgs["missingCiResult"])
+ except EmptyResult:
+ pass
+
+ def testChangeNotSubmitted(self):
+ "Make sure a change is not submitted if the BonsaiParser fails"
+ poller = FakeBonsaiPoller()
+ lastChangeBefore = poller.lastChange
+ poller._process_changes(badUnparsedResult)
+ # self.lastChange will not be updated if the change was not submitted
+ self.failUnlessEqual(lastChangeBefore, poller.lastChange)
+
+ def testParserWorksAfterInvalidResult(self):
+ """Make sure the BonsaiPoller still works after catching an
+ InvalidResultError"""
+
+ poller = FakeBonsaiPoller()
+
+ lastChangeBefore = poller.lastChange
+ # generate an exception first. pretend that we're doing a poll and
+ # increment the timestamp, otherwise the failIfEqual test at the
+ # bottom will depend upon there being a noticeable difference between
+ # two successive calls to time.time().
+ poller.lastPoll += 1.0
+ poller._process_changes(badUnparsedResult)
+ # now give it a valid one...
+ poller.lastPoll += 1.0
+ poller._process_changes(goodUnparsedResult)
+ # if poller.lastChange has not been updated then the good result
+ # was not parsed
+ self.failIfEqual(lastChangeBefore, poller.lastChange)
+
+ def testMergeEmptyLogMsg(self):
+ """Ensure that BonsaiPoller works around the bonsai xml output
+ issue when the check-in comment is empty"""
+ bp = BonsaiParser(noCheckinMsgResult)
+ result = bp.getData()
+ self.failUnlessEqual(len(result.nodes), 1)
+ self.failUnlessEqual(result.nodes[0].who, "johndoe@domain.tld")
+ self.failUnlessEqual(result.nodes[0].date, 12345678)
+ self.failUnlessEqual(result.nodes[0].log, "")
+ for file, ref in zip(result.nodes[0].files, noCheckinMsgRef):
+ self.failUnlessEqual(file.filename, ref['filename'])
+ self.failUnlessEqual(file.revision, ref['revision'])
diff --git a/buildbot/buildbot/test/test_buildreq.py b/buildbot/buildbot/test/test_buildreq.py
new file mode 100644
index 0000000..6f7f3a9
--- /dev/null
+++ b/buildbot/buildbot/test/test_buildreq.py
@@ -0,0 +1,182 @@
+# -*- test-case-name: buildbot.test.test_buildreq -*-
+
+from twisted.trial import unittest
+
+from buildbot import buildset, interfaces, sourcestamp
+from buildbot.process import base
+from buildbot.status import builder
+from buildbot.changes.changes import Change
+
+class Request(unittest.TestCase):
+ def testMerge(self):
+ R = base.BuildRequest
+ S = sourcestamp.SourceStamp
+ N = 'test_builder'
+ b1 = R("why", S("branch1", None, None, None), N)
+ b1r1 = R("why2", S("branch1", "rev1", None, None), N)
+ b1r1a = R("why not", S("branch1", "rev1", None, None), N)
+ b1r2 = R("why3", S("branch1", "rev2", None, None), N)
+ b2r2 = R("why4", S("branch2", "rev2", None, None), N)
+ b1r1p1 = R("why5", S("branch1", "rev1", (3, "diff"), None), N)
+ c1 = Change("alice", [], "changed stuff", branch="branch1")
+ c2 = Change("alice", [], "changed stuff", branch="branch1")
+ c3 = Change("alice", [], "changed stuff", branch="branch1")
+ c4 = Change("alice", [], "changed stuff", branch="branch1")
+ c5 = Change("alice", [], "changed stuff", branch="branch1")
+ c6 = Change("alice", [], "changed stuff", branch="branch1")
+ b1c1 = R("changes", S("branch1", None, None, [c1,c2,c3]), N)
+ b1c2 = R("changes", S("branch1", None, None, [c4,c5,c6]), N)
+
+ self.failUnless(b1.canBeMergedWith(b1))
+ self.failIf(b1.canBeMergedWith(b1r1))
+ self.failIf(b1.canBeMergedWith(b2r2))
+ self.failIf(b1.canBeMergedWith(b1r1p1))
+ self.failIf(b1.canBeMergedWith(b1c1))
+
+ self.failIf(b1r1.canBeMergedWith(b1))
+ self.failUnless(b1r1.canBeMergedWith(b1r1))
+ self.failIf(b1r1.canBeMergedWith(b2r2))
+ self.failIf(b1r1.canBeMergedWith(b1r1p1))
+ self.failIf(b1r1.canBeMergedWith(b1c1))
+
+ self.failIf(b1r2.canBeMergedWith(b1))
+ self.failIf(b1r2.canBeMergedWith(b1r1))
+ self.failUnless(b1r2.canBeMergedWith(b1r2))
+ self.failIf(b1r2.canBeMergedWith(b2r2))
+ self.failIf(b1r2.canBeMergedWith(b1r1p1))
+
+ self.failIf(b1r1p1.canBeMergedWith(b1))
+ self.failIf(b1r1p1.canBeMergedWith(b1r1))
+ self.failIf(b1r1p1.canBeMergedWith(b1r2))
+ self.failIf(b1r1p1.canBeMergedWith(b2r2))
+ self.failIf(b1r1p1.canBeMergedWith(b1c1))
+
+ self.failIf(b1c1.canBeMergedWith(b1))
+ self.failIf(b1c1.canBeMergedWith(b1r1))
+ self.failIf(b1c1.canBeMergedWith(b1r2))
+ self.failIf(b1c1.canBeMergedWith(b2r2))
+ self.failIf(b1c1.canBeMergedWith(b1r1p1))
+ self.failUnless(b1c1.canBeMergedWith(b1c1))
+ self.failUnless(b1c1.canBeMergedWith(b1c2))
+
+ sm = b1.mergeWith([])
+ self.failUnlessEqual(sm.branch, "branch1")
+ self.failUnlessEqual(sm.revision, None)
+ self.failUnlessEqual(sm.patch, None)
+ self.failUnlessEqual(sm.changes, ())
+
+ ss = b1r1.mergeWith([b1r1])
+ self.failUnlessEqual(ss, S("branch1", "rev1", None, None))
+ why = b1r1.mergeReasons([b1r1])
+ self.failUnlessEqual(why, "why2")
+ why = b1r1.mergeReasons([b1r1a])
+ self.failUnlessEqual(why, "why2, why not")
+
+ ss = b1c1.mergeWith([b1c2])
+ self.failUnlessEqual(ss, S("branch1", None, None, [c1,c2,c3,c4,c5,c6]))
+ why = b1c1.mergeReasons([b1c2])
+ self.failUnlessEqual(why, "changes")
+
+
+class FakeBuilder:
+ name = "fake"
+ def __init__(self):
+ self.requests = []
+ def submitBuildRequest(self, req):
+ self.requests.append(req)
+
+
+class Set(unittest.TestCase):
+ def testBuildSet(self):
+ S = buildset.BuildSet
+ a,b = FakeBuilder(), FakeBuilder()
+
+ # two builds, the first one fails, the second one succeeds. The
+ # waitUntilSuccess watcher fires as soon as the first one fails,
+ # while the waitUntilFinished watcher doesn't fire until all builds
+ # are complete.
+
+ source = sourcestamp.SourceStamp()
+ s = S(["a","b"], source, "forced build")
+ s.start([a,b])
+ self.failUnlessEqual(len(a.requests), 1)
+ self.failUnlessEqual(len(b.requests), 1)
+ r1 = a.requests[0]
+ self.failUnlessEqual(r1.reason, s.reason)
+ self.failUnlessEqual(r1.source, s.source)
+
+ st = s.status
+ self.failUnlessEqual(st.getSourceStamp(), source)
+ self.failUnlessEqual(st.getReason(), "forced build")
+ self.failUnlessEqual(st.getBuilderNames(), ["a","b"])
+ self.failIf(st.isFinished())
+ brs = st.getBuildRequests()
+ self.failUnlessEqual(len(brs), 2)
+
+ res = []
+ d1 = s.waitUntilSuccess()
+ d1.addCallback(lambda r: res.append(("success", r)))
+ d2 = s.waitUntilFinished()
+ d2.addCallback(lambda r: res.append(("finished", r)))
+
+ self.failUnlessEqual(res, [])
+
+ # the first build finishes here, with FAILURE
+ builderstatus_a = builder.BuilderStatus("a")
+ bsa = builder.BuildStatus(builderstatus_a, 1)
+ bsa.setResults(builder.FAILURE)
+ a.requests[0].finished(bsa)
+
+ # any FAILURE flunks the BuildSet immediately, so the
+ # waitUntilSuccess deferred fires right away. However, the
+ # waitUntilFinished deferred must wait until all builds have
+ # completed.
+ self.failUnlessEqual(len(res), 1)
+ self.failUnlessEqual(res[0][0], "success")
+ bss = res[0][1]
+ self.failUnless(interfaces.IBuildSetStatus(bss, None))
+ self.failUnlessEqual(bss.getResults(), builder.FAILURE)
+
+ # here we finish the second build
+ builderstatus_b = builder.BuilderStatus("b")
+ bsb = builder.BuildStatus(builderstatus_b, 1)
+ bsb.setResults(builder.SUCCESS)
+ b.requests[0].finished(bsb)
+
+ # .. which ought to fire the waitUntilFinished deferred
+ self.failUnlessEqual(len(res), 2)
+ self.failUnlessEqual(res[1][0], "finished")
+ self.failUnlessEqual(res[1][1], bss)
+
+ # and finish the BuildSet overall
+ self.failUnless(st.isFinished())
+ self.failUnlessEqual(st.getResults(), builder.FAILURE)
+
+ def testSuccess(self):
+ S = buildset.BuildSet
+ a,b = FakeBuilder(), FakeBuilder()
+ # this time, both builds succeed
+
+ source = sourcestamp.SourceStamp()
+ s = S(["a","b"], source, "forced build")
+ s.start([a,b])
+
+ st = s.status
+ self.failUnlessEqual(st.getSourceStamp(), source)
+ self.failUnlessEqual(st.getReason(), "forced build")
+ self.failUnlessEqual(st.getBuilderNames(), ["a","b"])
+ self.failIf(st.isFinished())
+
+ builderstatus_a = builder.BuilderStatus("a")
+ bsa = builder.BuildStatus(builderstatus_a, 1)
+ bsa.setResults(builder.SUCCESS)
+ a.requests[0].finished(bsa)
+
+ builderstatus_b = builder.BuilderStatus("b")
+ bsb = builder.BuildStatus(builderstatus_b, 1)
+ bsb.setResults(builder.SUCCESS)
+ b.requests[0].finished(bsb)
+
+ self.failUnless(st.isFinished())
+ self.failUnlessEqual(st.getResults(), builder.SUCCESS)
+
diff --git a/buildbot/buildbot/test/test_buildstep.py b/buildbot/buildbot/test/test_buildstep.py
new file mode 100644
index 0000000..0e9c620
--- /dev/null
+++ b/buildbot/buildbot/test/test_buildstep.py
@@ -0,0 +1,144 @@
+# -*- test-case-name: buildbot.test.test_buildstep -*-
+
+# test cases for buildbot.process.buildstep
+
+from twisted.trial import unittest
+
+from buildbot import interfaces
+from buildbot.process import buildstep
+
+# have to subclass LogObserver in order to test it, since the default
+# implementations of outReceived() and errReceived() do nothing
+class MyLogObserver(buildstep.LogObserver):
+ def __init__(self):
+ self._out = [] # list of chunks
+ self._err = []
+
+ def outReceived(self, data):
+ self._out.append(data)
+
+ def errReceived(self, data):
+ self._err.append(data)
+
+class ObserverTestCase(unittest.TestCase):
+ observer_cls = None # must be set by subclass
+
+ def setUp(self):
+ self.observer = self.observer_cls()
+
+ def _logStdout(self, chunk):
+ # why does LogObserver.logChunk() take 'build', 'step', and
+ # 'log' arguments when it clearly doesn't use them for anything?
+ self.observer.logChunk(None, None, None, interfaces.LOG_CHANNEL_STDOUT, chunk)
+
+ def _logStderr(self, chunk):
+ self.observer.logChunk(None, None, None, interfaces.LOG_CHANNEL_STDERR, chunk)
+
+ def _assertStdout(self, expect_lines):
+ self.assertEqual(self.observer._out, expect_lines)
+
+ def _assertStderr(self, expect_lines):
+ self.assertEqual(self.observer._err, expect_lines)
+
+class LogObserver(ObserverTestCase):
+
+ observer_cls = MyLogObserver
+
+ def testLogChunk(self):
+ self._logStdout("foo")
+ self._logStderr("argh")
+ self._logStdout(" wubba\n")
+ self._logStderr("!!!\n")
+
+ self._assertStdout(["foo", " wubba\n"])
+ self._assertStderr(["argh", "!!!\n"])
+
+# again, have to subclass LogLineObserver in order to test it, because the
+# default implementations of data-receiving methods are empty
+class MyLogLineObserver(buildstep.LogLineObserver):
+ def __init__(self):
+ #super(MyLogLineObserver, self).__init__()
+ buildstep.LogLineObserver.__init__(self)
+
+ self._out = [] # list of lines
+ self._err = []
+
+ def outLineReceived(self, line):
+ self._out.append(line)
+
+ def errLineReceived(self, line):
+ self._err.append(line)
+
+class LogLineObserver(ObserverTestCase):
+ observer_cls = MyLogLineObserver
+
+ def testLineBuffered(self):
+ # no challenge here: we feed it chunks that are already lines
+ # (like a program writing to stdout in line-buffered mode)
+ self._logStdout("stdout line 1\n")
+ self._logStdout("stdout line 2\n")
+ self._logStderr("stderr line 1\n")
+ self._logStdout("stdout line 3\n")
+
+ self._assertStdout(["stdout line 1",
+ "stdout line 2",
+ "stdout line 3"])
+ self._assertStderr(["stderr line 1"])
+
+ def testShortBrokenLines(self):
+ self._logStdout("stdout line 1 starts ")
+ self._logStderr("an intervening line of error\n")
+ self._logStdout("and continues ")
+ self._logStdout("but finishes here\n")
+ self._logStderr("more error\n")
+ self._logStdout("and another line of stdout\n")
+
+ self._assertStdout(["stdout line 1 starts and continues but finishes here",
+ "and another line of stdout"])
+ self._assertStderr(["an intervening line of error",
+ "more error"])
+
+ def testLongLine(self):
+ chunk = "." * 1024
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout("\n")
+
+ self._assertStdout([chunk * 5])
+ self._assertStderr([])
+
+ def testBigChunk(self):
+ chunk = "." * 5000
+ self._logStdout(chunk)
+ self._logStdout("\n")
+
+ self._assertStdout([chunk])
+ self._assertStderr([])
+
+ def testReallyLongLine(self):
+ # A single line of > 16384 bytes is dropped on the floor (bug #201).
+ # In real life, I observed such a line being broken into chunks of
+ # 4095 bytes, so that's how I'm breaking it here.
+ self.observer.setMaxLineLength(65536)
+ chunk = "." * 4095
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout(chunk) # now we're up to 16380 bytes
+ self._logStdout("12345\n")
+
+ self._assertStdout([chunk*4 + "12345"])
+ self._assertStderr([])
+
+class RemoteShellTest(unittest.TestCase):
+ def testRepr(self):
+ # Test for #352
+ rsc = buildstep.RemoteShellCommand('.', ('sh', 'make'))
+ testval = repr(rsc)
+ rsc = buildstep.RemoteShellCommand('.', ['sh', 'make'])
+ testval = repr(rsc)
+ rsc = buildstep.RemoteShellCommand('.', 'make')
+ testval = repr(rsc)
diff --git a/buildbot/buildbot/test/test_changes.py b/buildbot/buildbot/test/test_changes.py
new file mode 100644
index 0000000..faebe7b
--- /dev/null
+++ b/buildbot/buildbot/test/test_changes.py
@@ -0,0 +1,243 @@
+# -*- test-case-name: buildbot.test.test_changes -*-
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+
+from buildbot import master
+from buildbot.changes import pb
+from buildbot.scripts import runner
+
+d1 = {'files': ["Project/foo.c", "Project/bar/boo.c"],
+ 'who': "marvin",
+ 'comments': "Some changes in Project"}
+d2 = {'files': ["OtherProject/bar.c"],
+ 'who': "zaphod",
+ 'comments': "other changes"}
+d3 = {'files': ["Project/baz.c", "OtherProject/bloo.c"],
+ 'who': "alice",
+ 'comments': "mixed changes"}
+d4 = {'files': ["trunk/baz.c", "branches/foobranch/foo.c", "trunk/bar.c"],
+ 'who': "alice",
+ 'comments': "mixed changes"}
+d5 = {'files': ["Project/foo.c"],
+ 'who': "trillian",
+ 'comments': "Some changes in Project",
+ 'category': "categoryA"}
+
+class TestChangePerspective(unittest.TestCase):
+
+ def setUp(self):
+ self.changes = []
+
+ def addChange(self, c):
+ self.changes.append(c)
+
+ def testNoPrefix(self):
+ p = pb.ChangePerspective(self, None)
+ p.perspective_addChange(d1)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[0]
+ self.failUnlessEqual(set(c1.files),
+ set(["Project/foo.c", "Project/bar/boo.c"]))
+ self.failUnlessEqual(c1.comments, "Some changes in Project")
+ self.failUnlessEqual(c1.who, "marvin")
+
+ def testPrefix(self):
+ p = pb.ChangePerspective(self, "Project/")
+
+ p.perspective_addChange(d1)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[-1]
+ self.failUnlessEqual(set(c1.files), set(["foo.c", "bar/boo.c"]))
+ self.failUnlessEqual(c1.comments, "Some changes in Project")
+ self.failUnlessEqual(c1.who, "marvin")
+
+ p.perspective_addChange(d2) # should be ignored
+ self.failUnlessEqual(len(self.changes), 1)
+
+ p.perspective_addChange(d3) # should ignore the OtherProject file
+ self.failUnlessEqual(len(self.changes), 2)
+
+ c3 = self.changes[-1]
+ self.failUnlessEqual(set(c3.files), set(["baz.c"]))
+ self.failUnlessEqual(c3.comments, "mixed changes")
+ self.failUnlessEqual(c3.who, "alice")
+
+ def testPrefix2(self):
+ p = pb.ChangePerspective(self, "Project/bar/")
+
+ p.perspective_addChange(d1)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[-1]
+ self.failUnlessEqual(set(c1.files), set(["boo.c"]))
+ self.failUnlessEqual(c1.comments, "Some changes in Project")
+ self.failUnlessEqual(c1.who, "marvin")
+
+ p.perspective_addChange(d2) # should be ignored
+ self.failUnlessEqual(len(self.changes), 1)
+
+ p.perspective_addChange(d3) # should ignore this too
+ self.failUnlessEqual(len(self.changes), 1)
+
+ def testPrefix3(self):
+ p = pb.ChangePerspective(self, "trunk/")
+
+ p.perspective_addChange(d4)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[-1]
+ self.failUnlessEqual(set(c1.files), set(["baz.c", "bar.c"]))
+ self.failUnlessEqual(c1.comments, "mixed changes")
+
+ def testPrefix4(self):
+ p = pb.ChangePerspective(self, "branches/foobranch/")
+
+ p.perspective_addChange(d4)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[-1]
+ self.failUnlessEqual(set(c1.files), set(["foo.c"]))
+ self.failUnlessEqual(c1.comments, "mixed changes")
+
+ def testCategory(self):
+ p = pb.ChangePerspective(self, None)
+ p.perspective_addChange(d5)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[0]
+ self.failUnlessEqual(c1.category, "categoryA")
+
+config_empty = """
+BuildmasterConfig = c = {}
+c['slaves'] = []
+c['builders'] = []
+c['schedulers'] = []
+c['slavePortnum'] = 0
+"""
+
+config_sender = config_empty + \
+"""
+from buildbot.changes import pb
+c['change_source'] = pb.PBChangeSource(port=None)
+"""
+
+class Sender(unittest.TestCase):
+ def setUp(self):
+ self.master = master.BuildMaster(".")
+ def tearDown(self):
+ d = defer.maybeDeferred(self.master.stopService)
+ # TODO: something in Twisted-2.0.0 (and probably 2.0.1) doesn't shut
+ # down the Broker listening socket when it's supposed to.
+ # Twisted-1.3.0, and current SVN (which will be post-2.0.1) are ok.
+ # This iterate() is a quick hack to deal with the problem. I need to
+ # investigate more thoroughly and find a better solution.
+ d.addCallback(self.stall, 0.1)
+ return d
+
+ def stall(self, res, timeout):
+ d = defer.Deferred()
+ reactor.callLater(timeout, d.callback, res)
+ return d
+
+ def testSender(self):
+ self.master.loadConfig(config_empty)
+ self.master.startService()
+ # TODO: BuildMaster.loadChanges replaces the change_svc object, so we
+ # have to load it twice. Clean this up.
+ d = self.master.loadConfig(config_sender)
+ d.addCallback(self._testSender_1)
+ return d
+
+ def _testSender_1(self, res):
+ self.cm = cm = self.master.change_svc
+ s1 = list(self.cm)[0]
+ port = self.master.slavePort._port.getHost().port
+
+ self.options = {'username': "alice",
+ 'master': "localhost:%d" % port,
+ 'files': ["foo.c"],
+ 'category': "categoryA",
+ }
+
+ d = runner.sendchange(self.options)
+ d.addCallback(self._testSender_2)
+ return d
+
+ def _testSender_2(self, res):
+ # now check that the change was received
+ self.failUnlessEqual(len(self.cm.changes), 1)
+ c = self.cm.changes.pop()
+ self.failUnlessEqual(c.who, "alice")
+ self.failUnlessEqual(c.files, ["foo.c"])
+ self.failUnlessEqual(c.comments, "")
+ self.failUnlessEqual(c.revision, None)
+ self.failUnlessEqual(c.category, "categoryA")
+
+ self.options['revision'] = "r123"
+ self.options['comments'] = "test change"
+
+ d = runner.sendchange(self.options)
+ d.addCallback(self._testSender_3)
+ return d
+
+ def _testSender_3(self, res):
+ self.failUnlessEqual(len(self.cm.changes), 1)
+ c = self.cm.changes.pop()
+ self.failUnlessEqual(c.who, "alice")
+ self.failUnlessEqual(c.files, ["foo.c"])
+ self.failUnlessEqual(c.comments, "test change")
+ self.failUnlessEqual(c.revision, "r123")
+ self.failUnlessEqual(c.category, "categoryA")
+
+ # test options['logfile'] by creating a temporary file
+ logfile = self.mktemp()
+ f = open(logfile, "wt")
+ f.write("longer test change")
+ f.close()
+ self.options['comments'] = None
+ self.options['logfile'] = logfile
+
+ d = runner.sendchange(self.options)
+ d.addCallback(self._testSender_4)
+ return d
+
+ def _testSender_4(self, res):
+ self.failUnlessEqual(len(self.cm.changes), 1)
+ c = self.cm.changes.pop()
+ self.failUnlessEqual(c.who, "alice")
+ self.failUnlessEqual(c.files, ["foo.c"])
+ self.failUnlessEqual(c.comments, "longer test change")
+ self.failUnlessEqual(c.revision, "r123")
+ self.failUnlessEqual(c.category, "categoryA")
+
+ # make sure that numeric revisions work too
+ self.options['logfile'] = None
+ del self.options['revision']
+ self.options['revision_number'] = 42
+
+ d = runner.sendchange(self.options)
+ d.addCallback(self._testSender_5)
+ return d
+
+ def _testSender_5(self, res):
+ self.failUnlessEqual(len(self.cm.changes), 1)
+ c = self.cm.changes.pop()
+ self.failUnlessEqual(c.who, "alice")
+ self.failUnlessEqual(c.files, ["foo.c"])
+ self.failUnlessEqual(c.comments, "")
+ self.failUnlessEqual(c.revision, 42)
+ self.failUnlessEqual(c.category, "categoryA")
+
+ # verify --branch too
+ self.options['branch'] = "branches/test"
+
+ d = runner.sendchange(self.options)
+ d.addCallback(self._testSender_6)
+ return d
+
+ def _testSender_6(self, res):
+ self.failUnlessEqual(len(self.cm.changes), 1)
+ c = self.cm.changes.pop()
+ self.failUnlessEqual(c.who, "alice")
+ self.failUnlessEqual(c.files, ["foo.c"])
+ self.failUnlessEqual(c.comments, "")
+ self.failUnlessEqual(c.revision, 42)
+ self.failUnlessEqual(c.branch, "branches/test")
+ self.failUnlessEqual(c.category, "categoryA")
diff --git a/buildbot/buildbot/test/test_config.py b/buildbot/buildbot/test/test_config.py
new file mode 100644
index 0000000..900dcad
--- /dev/null
+++ b/buildbot/buildbot/test/test_config.py
@@ -0,0 +1,1277 @@
+# -*- test-case-name: buildbot.test.test_config -*-
+
+import os, warnings, exceptions
+
+from twisted.trial import unittest
+from twisted.python import failure
+from twisted.internet import defer
+
+from buildbot.master import BuildMaster
+from buildbot import scheduler
+from twisted.application import service, internet
+from twisted.spread import pb
+from twisted.web.server import Site
+from twisted.web.distrib import ResourcePublisher
+from buildbot.process.builder import Builder
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.changes.pb import PBChangeSource
+from buildbot.changes.mail import SyncmailMaildirSource
+from buildbot.steps.source import CVS, Darcs
+from buildbot.steps.shell import Compile, Test, ShellCommand
+from buildbot.status import base
+from buildbot.steps import dummy, maxq, python, python_twisted, shell, \
+ source, transfer
+words = None
+try:
+ from buildbot.status import words
+except ImportError:
+ pass
+
+emptyCfg = \
+"""
+from buildbot.buildslave import BuildSlave
+BuildmasterConfig = c = {}
+c['slaves'] = []
+c['schedulers'] = []
+c['builders'] = []
+c['slavePortnum'] = 9999
+c['projectName'] = 'dummy project'
+c['projectURL'] = 'http://dummy.example.com'
+c['buildbotURL'] = 'http://dummy.example.com/buildbot'
+"""
+
+buildersCfg = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.buildslave import BuildSlave
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+c['slavePortnum'] = 9999
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir', 'factory':f1}]
+"""
+
+buildersCfg2 = buildersCfg + \
+"""
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule2')
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir', 'factory':f1}]
+"""
+
+buildersCfg3 = buildersCfg2 + \
+"""
+c['builders'].append({'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 })
+"""
+
+buildersCfg4 = buildersCfg2 + \
+"""
+c['builders'] = [{ 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'newworkdir', 'factory': f1 },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 }]
+"""
+
+wpCfg1 = buildersCfg + \
+"""
+from buildbot.steps import shell
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+f1.addStep(shell.ShellCommand, command=[shell.WithProperties('echo')])
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir1', 'factory': f1}]
+"""
+
+wpCfg2 = buildersCfg + \
+"""
+from buildbot.steps import shell
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+f1.addStep(shell.ShellCommand,
+ command=[shell.WithProperties('echo %s', 'revision')])
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir1', 'factory': f1}]
+"""
+
+
+
+ircCfg1 = emptyCfg + \
+"""
+from buildbot.status import words
+c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['twisted'])]
+"""
+
+ircCfg2 = emptyCfg + \
+"""
+from buildbot.status import words
+c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['twisted']),
+ words.IRC('irc.example.com', 'otherbot', ['chan1', 'chan2'])]
+"""
+
+ircCfg3 = emptyCfg + \
+"""
+from buildbot.status import words
+c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['knotted'])]
+"""
+
+webCfg1 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(http_port=9980)]
+"""
+
+webCfg2 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(http_port=9981)]
+"""
+
+webCfg3 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(http_port='tcp:9981:interface=127.0.0.1')]
+"""
+
+webNameCfg1 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(distrib_port='~/.twistd-web-pb')]
+"""
+
+webNameCfg2 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(distrib_port='./bar.socket')]
+"""
+
+debugPasswordCfg = emptyCfg + \
+"""
+c['debugPassword'] = 'sekrit'
+"""
+
+interlockCfgBad = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1 },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+# interlocks have been removed
+c['interlocks'] = [('lock1', ['builder1'], ['builder2', 'builder3']),
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfgBad1 = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock1') # duplicate lock name
+f1 = BuildFactory([s(Dummy, locks=[])])
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfgBad2 = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock, SlaveLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = SlaveLock('lock1') # duplicate lock name
+f1 = BuildFactory([s(Dummy, locks=[])])
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfgBad3 = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock1') # duplicate lock name
+f1 = BuildFactory([s(Dummy, locks=[l2])])
+f2 = BuildFactory([s(Dummy)])
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f2, 'locks': [l1] },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg1a = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg1b = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1, 'locks': [l1] },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+# test out step Locks
+lockCfg2a = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+f1 = BuildFactory([s(Dummy, locks=[l1,l2])])
+f2 = BuildFactory([s(Dummy)])
+
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1 },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f2 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg2b = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+f1 = BuildFactory([s(Dummy, locks=[l1])])
+f2 = BuildFactory([s(Dummy)])
+
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1 },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f2 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg2c = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+f1 = BuildFactory([s(Dummy)])
+f2 = BuildFactory([s(Dummy)])
+
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1 },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f2 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+schedulersCfg = \
+"""
+from buildbot.scheduler import Scheduler, Dependent
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+b1 = {'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir', 'factory':f1}
+c['builders'] = [b1]
+c['schedulers'] = [Scheduler('full', None, 60, ['builder1'])]
+c['slavePortnum'] = 9999
+c['projectName'] = 'dummy project'
+c['projectURL'] = 'http://dummy.example.com'
+c['buildbotURL'] = 'http://dummy.example.com/buildbot'
+BuildmasterConfig = c
+"""
+
+class ConfigTest(unittest.TestCase):
+ def setUp(self):
+ # this class generates several deprecation warnings, which the user
+ # doesn't need to see.
+ warnings.simplefilter('ignore', exceptions.DeprecationWarning)
+ self.buildmaster = BuildMaster(".")
+
+ def failUnlessListsEquivalent(self, list1, list2):
+ l1 = list1[:]
+ l1.sort()
+ l2 = list2[:]
+ l2.sort()
+ self.failUnlessEqual(l1, l2)
+
+ def servers(self, s, types):
+ # perform a recursive search of s.services, looking for instances of
+ # twisted.application.internet.TCPServer, then extract their .args
+ # values to find the TCP ports they want to listen on
+ for child in s:
+ if service.IServiceCollection.providedBy(child):
+ for gc in self.servers(child, types):
+ yield gc
+ if isinstance(child, types):
+ yield child
+
+ def TCPports(self, s):
+ return list(self.servers(s, internet.TCPServer))
+ def UNIXports(self, s):
+ return list(self.servers(s, internet.UNIXServer))
+ def TCPclients(self, s):
+ return list(self.servers(s, internet.TCPClient))
+
+ def checkPorts(self, svc, expected):
+ """Verify that the TCPServer and UNIXServer children of the given
+ service have the expected portnum/pathname and factory classes. As a
+ side-effect, return a list of servers in the same order as the
+ 'expected' list. This can be used to verify properties of the
+ factories contained therein."""
+
+ expTCP = [e for e in expected if type(e[0]) == int]
+ expUNIX = [e for e in expected if type(e[0]) == str]
+ haveTCP = [(p.args[0], p.args[1].__class__)
+ for p in self.TCPports(svc)]
+ haveUNIX = [(p.args[0], p.args[1].__class__)
+ for p in self.UNIXports(svc)]
+ self.failUnlessListsEquivalent(expTCP, haveTCP)
+ self.failUnlessListsEquivalent(expUNIX, haveUNIX)
+ ret = []
+ for e in expected:
+ for have in self.TCPports(svc) + self.UNIXports(svc):
+ if have.args[0] == e[0]:
+ ret.append(have)
+ continue
+ assert(len(ret) == len(expected))
+ return ret
+
+ def testEmpty(self):
+ self.failUnlessRaises(KeyError, self.buildmaster.loadConfig, "")
+
+ def testSimple(self):
+ # covers slavePortnum, base checker passwords
+ master = self.buildmaster
+ master.loadChanges()
+
+ master.loadConfig(emptyCfg)
+ # note: this doesn't actually start listening, because the app
+ # hasn't been started running
+ self.failUnlessEqual(master.slavePortnum, "tcp:9999")
+ self.checkPorts(master, [(9999, pb.PBServerFactory)])
+ self.failUnlessEqual(list(master.change_svc), [])
+ self.failUnlessEqual(master.botmaster.builders, {})
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw"})
+ self.failUnlessEqual(master.projectName, "dummy project")
+ self.failUnlessEqual(master.projectURL, "http://dummy.example.com")
+ self.failUnlessEqual(master.buildbotURL,
+ "http://dummy.example.com/buildbot")
+
+ def testSlavePortnum(self):
+ master = self.buildmaster
+ master.loadChanges()
+
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.slavePortnum, "tcp:9999")
+ ports = self.checkPorts(master, [(9999, pb.PBServerFactory)])
+ p = ports[0]
+
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.slavePortnum, "tcp:9999")
+ ports = self.checkPorts(master, [(9999, pb.PBServerFactory)])
+ self.failUnlessIdentical(p, ports[0],
+ "the slave port was changed even " + \
+ "though the configuration was not")
+
+ master.loadConfig(emptyCfg + "c['slavePortnum'] = 9000\n")
+ self.failUnlessEqual(master.slavePortnum, "tcp:9000")
+ ports = self.checkPorts(master, [(9000, pb.PBServerFactory)])
+ self.failIf(p is ports[0],
+ "slave port was unchanged but configuration was changed")
+
+ def testSlaves(self):
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.botmaster.builders, {})
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw"})
+ # 'botsCfg' is testing backwards compatibility, for 0.7.5 config
+ # files that have not yet been updated to 0.7.6 . This compatibility
+ # (and this test) is scheduled for removal in 0.8.0 .
+ botsCfg = (emptyCfg +
+ "c['bots'] = [('bot1', 'pw1'), ('bot2', 'pw2')]\n")
+ master.loadConfig(botsCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw",
+ "bot1": "pw1",
+ "bot2": "pw2"})
+ master.loadConfig(botsCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw",
+ "bot1": "pw1",
+ "bot2": "pw2"})
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw"})
+ slavesCfg = (emptyCfg +
+ "from buildbot.buildslave import BuildSlave\n"
+ "c['slaves'] = [BuildSlave('bot1','pw1'), "
+ "BuildSlave('bot2','pw2')]\n")
+ master.loadConfig(slavesCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw",
+ "bot1": "pw1",
+ "bot2": "pw2"})
+
+
+ def testChangeSource(self):
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(list(master.change_svc), [])
+
+ sourcesCfg = emptyCfg + \
+"""
+from buildbot.changes.pb import PBChangeSource
+c['change_source'] = PBChangeSource()
+"""
+
+ d = master.loadConfig(sourcesCfg)
+ def _check1(res):
+ self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
+ s1 = list(self.buildmaster.change_svc)[0]
+ self.failUnless(isinstance(s1, PBChangeSource))
+ self.failUnlessEqual(s1, list(self.buildmaster.change_svc)[0])
+ self.failUnless(s1.parent)
+
+ # verify that unchanged sources are not interrupted
+ d1 = self.buildmaster.loadConfig(sourcesCfg)
+
+ def _check2(res):
+ self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
+ s2 = list(self.buildmaster.change_svc)[0]
+ self.failUnlessIdentical(s1, s2)
+ self.failUnless(s1.parent)
+ d1.addCallback(_check2)
+ return d1
+ d.addCallback(_check1)
+
+ # make sure we can get rid of the sources too
+ d.addCallback(lambda res: self.buildmaster.loadConfig(emptyCfg))
+
+ def _check3(res):
+ self.failUnlessEqual(list(self.buildmaster.change_svc), [])
+ d.addCallback(_check3)
+
+ return d
+
+ def testChangeSources(self):
+ # make sure we can accept a list
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(list(master.change_svc), [])
+
+ sourcesCfg = emptyCfg + \
+"""
+from buildbot.changes.pb import PBChangeSource
+from buildbot.changes.mail import SyncmailMaildirSource
+c['change_source'] = [PBChangeSource(),
+ SyncmailMaildirSource('.'),
+ ]
+"""
+
+ d = master.loadConfig(sourcesCfg)
+ def _check1(res):
+ self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 2)
+ s1,s2 = list(self.buildmaster.change_svc)
+ if isinstance(s2, PBChangeSource):
+ s1,s2 = s2,s1
+ self.failUnless(isinstance(s1, PBChangeSource))
+ self.failUnless(s1.parent)
+ self.failUnless(isinstance(s2, SyncmailMaildirSource))
+ self.failUnless(s2.parent)
+ d.addCallback(_check1)
+ return d
+
+ def testSources(self):
+ # test backwards compatibility. c['sources'] is deprecated.
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(list(master.change_svc), [])
+
+ sourcesCfg = emptyCfg + \
+"""
+from buildbot.changes.pb import PBChangeSource
+c['sources'] = [PBChangeSource()]
+"""
+
+ d = master.loadConfig(sourcesCfg)
+ def _check1(res):
+ self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
+ s1 = list(self.buildmaster.change_svc)[0]
+ self.failUnless(isinstance(s1, PBChangeSource))
+ self.failUnless(s1.parent)
+ d.addCallback(_check1)
+ return d
+
+ def shouldBeFailure(self, res, *expected):
+ self.failUnless(isinstance(res, failure.Failure),
+ "we expected this to fail, not produce %s" % (res,))
+ res.trap(*expected)
+ return None # all is good
+
+ def testSchedulerErrors(self):
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.allSchedulers(), [])
+
+ def _shouldBeFailure(res, hint=None):
+ self.shouldBeFailure(res, AssertionError, ValueError)
+ if hint:
+ self.failUnless(str(res).find(hint) != -1)
+
+ def _loadConfig(res, newcfg):
+ return self.buildmaster.loadConfig(newcfg)
+ d = defer.succeed(None)
+
+ # c['schedulers'] must be a list
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = Scheduler('full', None, 60, ['builder1'])
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure,
+ "c['schedulers'] must be a list of Scheduler instances")
+
+ # c['schedulers'] must be a list of IScheduler objects
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = ['oops', 'problem']
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure,
+ "c['schedulers'] must be a list of Scheduler instances")
+
+ # c['schedulers'] must point at real builders
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, ['builder-bogus'])]
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure, "uses unknown builder")
+
+ # builderNames= must be a list
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, 'builder1')]
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure,
+ "must be a list of Builder description names")
+
+ # builderNames= must be a list of strings, not dicts
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, [b1])]
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure,
+ "must be a list of Builder description names")
+
+ # builderNames= must be a list of strings, not a dict
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, b1)]
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure,
+ "must be a list of Builder description names")
+
+ # each Scheduler must have a unique name
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('dup', None, 60, []),
+ Scheduler('dup', None, 60, [])]
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure, "Schedulers must have unique names")
+
+ return d
+
+ def testSchedulers(self):
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.allSchedulers(), [])
+
+ d = self.buildmaster.loadConfig(schedulersCfg)
+ d.addCallback(self._testSchedulers_1)
+ return d
+
+ def _testSchedulers_1(self, res):
+ sch = self.buildmaster.allSchedulers()
+ self.failUnlessEqual(len(sch), 1)
+ s = sch[0]
+ self.failUnless(isinstance(s, scheduler.Scheduler))
+ self.failUnlessEqual(s.name, "full")
+ self.failUnlessEqual(s.branch, None)
+ self.failUnlessEqual(s.treeStableTimer, 60)
+ self.failUnlessEqual(s.builderNames, ['builder1'])
+
+ newcfg = schedulersCfg + \
+"""
+s1 = Scheduler('full', None, 60, ['builder1'])
+c['schedulers'] = [s1, Dependent('downstream', s1, ['builder1'])]
+"""
+ d = self.buildmaster.loadConfig(newcfg)
+ d.addCallback(self._testSchedulers_2, newcfg)
+ return d
+ def _testSchedulers_2(self, res, newcfg):
+ sch = self.buildmaster.allSchedulers()
+ self.failUnlessEqual(len(sch), 2)
+ s = sch[0]
+ self.failUnless(isinstance(s, scheduler.Scheduler))
+ s = sch[1]
+ self.failUnless(isinstance(s, scheduler.Dependent))
+ self.failUnlessEqual(s.name, "downstream")
+ self.failUnlessEqual(s.builderNames, ['builder1'])
+
+ # reloading the same config file should leave the schedulers in place
+ d = self.buildmaster.loadConfig(newcfg)
+ d.addCallback(self._testSchedulers_3, sch)
+ return d
+ def _testSchedulers_3(self, res, sch1):
+ sch2 = self.buildmaster.allSchedulers()
+ self.failUnlessEqual(len(sch2), 2)
+ sch1.sort()
+ sch2.sort()
+ self.failUnlessEqual(sch1, sch2)
+ self.failUnlessIdentical(sch1[0], sch2[0])
+ self.failUnlessIdentical(sch1[1], sch2[1])
+ self.failUnlessIdentical(sch1[0].parent, self.buildmaster)
+ self.failUnlessIdentical(sch1[1].parent, self.buildmaster)
+
+
+
+ def testBuilders(self):
+ master = self.buildmaster
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.botmaster.builders, {})
+
+ master.loadConfig(buildersCfg)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+ self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+ b = master.botmaster.builders["builder1"]
+ self.failUnless(isinstance(b, Builder))
+ self.failUnlessEqual(b.name, "builder1")
+ self.failUnlessEqual(b.slavenames, ["bot1"])
+ self.failUnlessEqual(b.builddir, "workdir")
+ f1 = b.buildFactory
+ self.failUnless(isinstance(f1, BasicBuildFactory))
+ steps = f1.steps
+ self.failUnlessEqual(len(steps), 3)
+ self.failUnlessEqual(steps[0], (CVS,
+ {'cvsroot': 'cvsroot',
+ 'cvsmodule': 'cvsmodule',
+ 'mode': 'clobber'}))
+ self.failUnlessEqual(steps[1], (Compile,
+ {'command': 'make all'}))
+ self.failUnlessEqual(steps[2], (Test,
+ {'command': 'make check'}))
+
+
+ # make sure a reload of the same data doesn't interrupt the Builder
+ master.loadConfig(buildersCfg)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+ self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+ b2 = master.botmaster.builders["builder1"]
+ self.failUnlessIdentical(b, b2)
+ # TODO: test that the BuilderStatus object doesn't change
+ #statusbag2 = master.client_svc.statusbags["builder1"]
+ #self.failUnlessIdentical(statusbag, statusbag2)
+
+ # but changing something should result in a new Builder
+ master.loadConfig(buildersCfg2)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+ self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+ b3 = master.botmaster.builders["builder1"]
+ self.failIf(b is b3)
+ # the statusbag remains the same TODO
+ #statusbag3 = master.client_svc.statusbags["builder1"]
+ #self.failUnlessIdentical(statusbag, statusbag3)
+
+ # adding new builder
+ master.loadConfig(buildersCfg3)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1",
+ "builder2"])
+ self.failUnlessListsEquivalent(master.botmaster.builders.keys(),
+ ["builder1", "builder2"])
+ b4 = master.botmaster.builders["builder1"]
+ self.failUnlessIdentical(b3, b4)
+
+ # changing first builder should leave it at the same place in the list
+ master.loadConfig(buildersCfg4)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1",
+ "builder2"])
+ self.failUnlessListsEquivalent(master.botmaster.builders.keys(),
+ ["builder1", "builder2"])
+ b5 = master.botmaster.builders["builder1"]
+ self.failIf(b4 is b5)
+
+ # and removing it should make the Builder go away
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.botmaster.builderNames, [])
+ self.failUnlessEqual(master.botmaster.builders, {})
+ #self.failUnlessEqual(master.client_svc.statusbags, {}) # TODO
+
+ def testWithProperties(self):
+ master = self.buildmaster
+ master.loadConfig(wpCfg1)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+ self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+ b1 = master.botmaster.builders["builder1"]
+
+ # reloading the same config should leave the builder unchanged
+ master.loadConfig(wpCfg1)
+ b2 = master.botmaster.builders["builder1"]
+ self.failUnlessIdentical(b1, b2)
+
+ # but changing the parameters of the WithProperties should change it
+ master.loadConfig(wpCfg2)
+ b3 = master.botmaster.builders["builder1"]
+ self.failIf(b1 is b3)
+
+ # again, reloading same config should leave the builder unchanged
+ master.loadConfig(wpCfg2)
+ b4 = master.botmaster.builders["builder1"]
+ self.failUnlessIdentical(b3, b4)
+
+ def checkIRC(self, m, expected):
+ ircs = {}
+ for irc in self.servers(m, words.IRC):
+ ircs[irc.host] = (irc.nick, irc.channels)
+ self.failUnlessEqual(ircs, expected)
+
+ def testIRC(self):
+ if not words:
+ raise unittest.SkipTest("Twisted Words package is not installed")
+ master = self.buildmaster
+ master.loadChanges()
+ d = master.loadConfig(emptyCfg)
+ e1 = {}
+ d.addCallback(lambda res: self.checkIRC(master, e1))
+ d.addCallback(lambda res: master.loadConfig(ircCfg1))
+ e2 = {'irc.us.freenode.net': ('buildbot', ['twisted'])}
+ d.addCallback(lambda res: self.checkIRC(master, e2))
+ d.addCallback(lambda res: master.loadConfig(ircCfg2))
+ e3 = {'irc.us.freenode.net': ('buildbot', ['twisted']),
+ 'irc.example.com': ('otherbot', ['chan1', 'chan2'])}
+ d.addCallback(lambda res: self.checkIRC(master, e3))
+ d.addCallback(lambda res: master.loadConfig(ircCfg3))
+ e4 = {'irc.us.freenode.net': ('buildbot', ['knotted'])}
+ d.addCallback(lambda res: self.checkIRC(master, e4))
+ d.addCallback(lambda res: master.loadConfig(ircCfg1))
+ e5 = {'irc.us.freenode.net': ('buildbot', ['twisted'])}
+ d.addCallback(lambda res: self.checkIRC(master, e5))
+ return d
+
+ def testWebPortnum(self):
+ master = self.buildmaster
+ master.loadChanges()
+
+ d = master.loadConfig(webCfg1)
+ def _check1(res):
+ ports = self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory), (9980, Site)])
+ p = ports[1]
+ self.p = p
+ # nothing should be changed
+ d.addCallback(_check1)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(webCfg1))
+ def _check2(res):
+ ports = self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory), (9980, Site)])
+ self.failUnlessIdentical(self.p, ports[1],
+ "web port was changed even though "
+ "configuration was not")
+ # WebStatus is no longer a ComparableMixin, so it will be
+ # rebuilt on each reconfig
+ #d.addCallback(_check2)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(webCfg2))
+ # changes port to 9981
+ def _check3(p):
+ ports = self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory), (9981, Site)])
+ self.failIf(self.p is ports[1],
+ "configuration was changed but web port was unchanged")
+ d.addCallback(_check3)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(webCfg3))
+ # make 9981 on only localhost
+ def _check4(p):
+ ports = self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory), (9981, Site)])
+ self.failUnlessEqual(ports[1].kwargs['interface'], "127.0.0.1")
+ d.addCallback(_check4)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(emptyCfg))
+ d.addCallback(lambda res:
+ self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory)]))
+ return d
+
+ def testWebPathname(self):
+ master = self.buildmaster
+ master.loadChanges()
+
+ d = master.loadConfig(webNameCfg1)
+ def _check1(res):
+ self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory),
+ ('~/.twistd-web-pb', pb.PBServerFactory)])
+ unixports = self.UNIXports(self.buildmaster)
+ self.f = f = unixports[0].args[1]
+ self.failUnless(isinstance(f.root, ResourcePublisher))
+ d.addCallback(_check1)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(webNameCfg1))
+ # nothing should be changed
+ def _check2(res):
+ self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory),
+ ('~/.twistd-web-pb', pb.PBServerFactory)])
+ newf = self.UNIXports(self.buildmaster)[0].args[1]
+ self.failUnlessIdentical(self.f, newf,
+ "web factory was changed even though "
+ "configuration was not")
+ # WebStatus is no longer a ComparableMixin, so it will be
+ # rebuilt on each reconfig
+ #d.addCallback(_check2)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(webNameCfg2))
+ def _check3(res):
+ self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory),
+ ('./bar.socket', pb.PBServerFactory)])
+ newf = self.UNIXports(self.buildmaster)[0].args[1],
+ self.failIf(self.f is newf,
+ "web factory was unchanged but "
+ "configuration was changed")
+ d.addCallback(_check3)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(emptyCfg))
+ d.addCallback(lambda res:
+ self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory)]))
+ return d
+
+ def testDebugPassword(self):
+ master = self.buildmaster
+
+ master.loadConfig(debugPasswordCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw",
+ "debug": "sekrit"})
+
+ master.loadConfig(debugPasswordCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw",
+ "debug": "sekrit"})
+
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw"})
+
+ def testLocks(self):
+ master = self.buildmaster
+ botmaster = master.botmaster
+
+ # make sure that c['interlocks'] is rejected properly
+ self.failUnlessRaises(KeyError, master.loadConfig, interlockCfgBad)
+ # and that duplicate-named Locks are caught
+ self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad1)
+ self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad2)
+ self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad3)
+
+ # create a Builder that uses Locks
+ master.loadConfig(lockCfg1a)
+ b1 = master.botmaster.builders["builder1"]
+ self.failUnlessEqual(len(b1.locks), 2)
+
+ # reloading the same config should not change the Builder
+ master.loadConfig(lockCfg1a)
+ self.failUnlessIdentical(b1, master.botmaster.builders["builder1"])
+ # but changing the set of locks used should change it
+ master.loadConfig(lockCfg1b)
+ self.failIfIdentical(b1, master.botmaster.builders["builder1"])
+ b1 = master.botmaster.builders["builder1"]
+ self.failUnlessEqual(len(b1.locks), 1)
+
+ # similar test with step-scoped locks
+ master.loadConfig(lockCfg2a)
+ b1 = master.botmaster.builders["builder1"]
+ # reloading the same config should not change the Builder
+ master.loadConfig(lockCfg2a)
+ self.failUnlessIdentical(b1, master.botmaster.builders["builder1"])
+ # but changing the set of locks used should change it
+ master.loadConfig(lockCfg2b)
+ self.failIfIdentical(b1, master.botmaster.builders["builder1"])
+ b1 = master.botmaster.builders["builder1"]
+ # remove the locks entirely
+ master.loadConfig(lockCfg2c)
+ self.failIfIdentical(b1, master.botmaster.builders["builder1"])
+
+class ConfigElements(unittest.TestCase):
+ # verify that ComparableMixin is working
+ def testSchedulers(self):
+ s1 = scheduler.Scheduler(name='quick', branch=None,
+ treeStableTimer=30,
+ builderNames=['quick'])
+ s2 = scheduler.Scheduler(name="all", branch=None,
+ treeStableTimer=5*60,
+ builderNames=["a", "b"])
+ s3 = scheduler.Try_Userpass("try", ["a","b"], port=9989,
+ userpass=[("foo","bar")])
+ s1a = scheduler.Scheduler(name='quick', branch=None,
+ treeStableTimer=30,
+ builderNames=['quick'])
+ s2a = scheduler.Scheduler(name="all", branch=None,
+ treeStableTimer=5*60,
+ builderNames=["a", "b"])
+ s3a = scheduler.Try_Userpass("try", ["a","b"], port=9989,
+ userpass=[("foo","bar")])
+ self.failUnless(s1 == s1)
+ self.failUnless(s1 == s1a)
+ self.failUnless(s1a in [s1, s2, s3])
+ self.failUnless(s2a in [s1, s2, s3])
+ self.failUnless(s3a in [s1, s2, s3])
+
+
+
+class ConfigFileTest(unittest.TestCase):
+
+ def testFindConfigFile(self):
+ os.mkdir("test_cf")
+ open(os.path.join("test_cf", "master.cfg"), "w").write(emptyCfg)
+ slaveportCfg = emptyCfg + "c['slavePortnum'] = 9000\n"
+ open(os.path.join("test_cf", "alternate.cfg"), "w").write(slaveportCfg)
+
+ m = BuildMaster("test_cf")
+ m.loadTheConfigFile()
+ self.failUnlessEqual(m.slavePortnum, "tcp:9999")
+
+ m = BuildMaster("test_cf", "alternate.cfg")
+ m.loadTheConfigFile()
+ self.failUnlessEqual(m.slavePortnum, "tcp:9000")
+
+
+class MyTarget(base.StatusReceiverMultiService):
+ def __init__(self, name):
+ self.name = name
+ base.StatusReceiverMultiService.__init__(self)
+ def startService(self):
+ # make a note in a list stashed in the BuildMaster
+ self.parent.targetevents.append(("start", self.name))
+ return base.StatusReceiverMultiService.startService(self)
+ def stopService(self):
+ self.parent.targetevents.append(("stop", self.name))
+ return base.StatusReceiverMultiService.stopService(self)
+
+class MySlowTarget(MyTarget):
+ def stopService(self):
+ from twisted.internet import reactor
+ d = base.StatusReceiverMultiService.stopService(self)
+ def stall(res):
+ d2 = defer.Deferred()
+ reactor.callLater(0.1, d2.callback, res)
+ return d2
+ d.addCallback(stall)
+ m = self.parent
+ def finishedStalling(res):
+ m.targetevents.append(("stop", self.name))
+ return res
+ d.addCallback(finishedStalling)
+ return d
+
+# we can't actually startService a buildmaster with a config that uses a
+# fixed slavePortnum like 9999, so instead this makes it possible to pass '0'
+# for the first time, and then substitute back in the allocated port number
+# on subsequent passes.
+startableEmptyCfg = emptyCfg + \
+"""
+c['slavePortnum'] = %d
+"""
+
+targetCfg1 = startableEmptyCfg + \
+"""
+from buildbot.test.test_config import MyTarget
+c['status'] = [MyTarget('a')]
+"""
+
+targetCfg2 = startableEmptyCfg + \
+"""
+from buildbot.test.test_config import MySlowTarget
+c['status'] = [MySlowTarget('b')]
+"""
+
+class StartService(unittest.TestCase):
+ def tearDown(self):
+ return self.master.stopService()
+
+ def testStartService(self):
+ os.mkdir("test_ss")
+ self.master = m = BuildMaster("test_ss")
+ # inhibit the usual read-config-on-startup behavior
+ m.readConfig = True
+ m.startService()
+ d = m.loadConfig(startableEmptyCfg % 0)
+ d.addCallback(self._testStartService_0)
+ return d
+
+ def _testStartService_0(self, res):
+ m = self.master
+ m.targetevents = []
+ # figure out what port got allocated
+ self.portnum = m.slavePort._port.getHost().port
+ d = m.loadConfig(targetCfg1 % self.portnum)
+ d.addCallback(self._testStartService_1)
+ return d
+
+ def _testStartService_1(self, res):
+ self.failUnlessEqual(len(self.master.statusTargets), 1)
+ self.failUnless(isinstance(self.master.statusTargets[0], MyTarget))
+ self.failUnlessEqual(self.master.targetevents,
+ [('start', 'a')])
+ self.master.targetevents = []
+ # reloading the same config should not start or stop the target
+ d = self.master.loadConfig(targetCfg1 % self.portnum)
+ d.addCallback(self._testStartService_2)
+ return d
+
+ def _testStartService_2(self, res):
+ self.failUnlessEqual(self.master.targetevents, [])
+ # but loading a new config file should stop the old one, then
+ # start the new one
+ d = self.master.loadConfig(targetCfg2 % self.portnum)
+ d.addCallback(self._testStartService_3)
+ return d
+
+ def _testStartService_3(self, res):
+ self.failUnlessEqual(self.master.targetevents,
+ [('stop', 'a'), ('start', 'b')])
+ self.master.targetevents = []
+ # and going back to the old one should do the same, in the same
+ # order, even though the current MySlowTarget takes a moment to shut
+ # down
+ d = self.master.loadConfig(targetCfg1 % self.portnum)
+ d.addCallback(self._testStartService_4)
+ return d
+
+ def _testStartService_4(self, res):
+ self.failUnlessEqual(self.master.targetevents,
+ [('stop', 'b'), ('start', 'a')])
+
+cfg1 = \
+"""
+from buildbot.process.factory import BuildFactory, s
+from buildbot.steps.shell import ShellCommand
+from buildbot.steps.source import Darcs
+from buildbot.buildslave import BuildSlave
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+c['slavePortnum'] = 9999
+f1 = BuildFactory([ShellCommand(command='echo yes'),
+ s(ShellCommand, command='old-style'),
+ ])
+f1.addStep(Darcs(repourl='http://buildbot.net/repos/trunk'))
+f1.addStep(ShellCommand, command='echo old-style')
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir', 'factory':f1}]
+"""
+
+class Factories(unittest.TestCase):
+
+ def failUnlessExpectedShell(self, factory, defaults=True, **kwargs):
+ shell_args = {}
+ if defaults:
+ shell_args.update({'descriptionDone': None,
+ 'description': None,
+ 'workdir': None,
+ 'logfiles': {},
+ 'usePTY': "slave-config",
+ })
+ shell_args.update(kwargs)
+ self.failUnlessIdentical(factory[0], ShellCommand)
+ if factory[1] != shell_args:
+ print
+ print "factory had:"
+ for k in sorted(factory[1].keys()):
+ print k
+ print "but we were expecting:"
+ for k in sorted(shell_args.keys()):
+ print k
+ self.failUnlessEqual(factory[1], shell_args)
+
+ def failUnlessExpectedDarcs(self, factory, **kwargs):
+ darcs_args = {'workdir': None,
+ 'alwaysUseLatest': False,
+ 'mode': 'update',
+ 'timeout': 1200,
+ 'retry': None,
+ 'baseURL': None,
+ 'defaultBranch': None,
+ 'logfiles': {},
+ }
+ darcs_args.update(kwargs)
+ self.failUnlessIdentical(factory[0], Darcs)
+ if factory[1] != darcs_args:
+ print
+ print "factory had:"
+ for k in sorted(factory[1].keys()):
+ print k
+ print "but we were expecting:"
+ for k in sorted(darcs_args.keys()):
+ print k
+ self.failUnlessEqual(factory[1], darcs_args)
+
+ def testSteps(self):
+ m = BuildMaster(".")
+ m.loadConfig(cfg1)
+ b = m.botmaster.builders["builder1"]
+ steps = b.buildFactory.steps
+ self.failUnlessEqual(len(steps), 4)
+
+ self.failUnlessExpectedShell(steps[0], command="echo yes")
+ self.failUnlessExpectedShell(steps[1], defaults=False,
+ command="old-style")
+ self.failUnlessExpectedDarcs(steps[2],
+ repourl="http://buildbot.net/repos/trunk")
+ self.failUnlessExpectedShell(steps[3], defaults=False,
+ command="echo old-style")
+
+ def _loop(self, orig):
+ step_class, kwargs = orig.getStepFactory()
+ newstep = step_class(**kwargs)
+ return newstep
+
+ def testAllSteps(self):
+ # make sure that steps can be created from the factories that they
+ # return
+ for s in ( dummy.Dummy(), dummy.FailingDummy(), dummy.RemoteDummy(),
+ maxq.MaxQ("testdir"),
+ python.BuildEPYDoc(), python.PyFlakes(),
+ python_twisted.HLint(),
+ python_twisted.Trial(testpath=None, tests="tests"),
+ python_twisted.ProcessDocs(), python_twisted.BuildDebs(),
+ python_twisted.RemovePYCs(),
+ shell.ShellCommand(), shell.TreeSize(),
+ shell.Configure(), shell.Compile(), shell.Test(),
+ source.CVS("cvsroot", "module"),
+ source.SVN("svnurl"), source.Darcs("repourl"),
+ source.Git("repourl"),
+ source.Arch("url", "version"),
+ source.Bazaar("url", "version", "archive"),
+ source.Bzr("repourl"),
+ source.Mercurial("repourl"),
+ source.P4("p4base"),
+ source.P4Sync(1234, "p4user", "passwd", "client",
+ mode="copy"),
+ source.Monotone("server", "branch"),
+ transfer.FileUpload("src", "dest"),
+ transfer.FileDownload("src", "dest"),
+ ):
+ try:
+ self._loop(s)
+ except:
+ print "error checking %s" % s
+ raise
+
diff --git a/buildbot/buildbot/test/test_control.py b/buildbot/buildbot/test/test_control.py
new file mode 100644
index 0000000..298d48a
--- /dev/null
+++ b/buildbot/buildbot/test/test_control.py
@@ -0,0 +1,104 @@
+# -*- test-case-name: buildbot.test.test_control -*-
+
+import os
+
+from twisted.trial import unittest
+from twisted.internet import defer
+
+from buildbot import master, interfaces
+from buildbot.sourcestamp import SourceStamp
+from buildbot.slave import bot
+from buildbot.status.builder import SUCCESS
+from buildbot.process import base
+from buildbot.test.runutils import rmtree
+
+config = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+
+def s(klass, **kwargs):
+ return (klass, kwargs)
+
+f1 = factory.BuildFactory([
+ s(dummy.Dummy, timeout=1),
+ ])
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = [{'name': 'force', 'slavename': 'bot1',
+ 'builddir': 'force-dir', 'factory': f1}]
+c['slavePortnum'] = 0
+BuildmasterConfig = c
+"""
+
+class FakeBuilder:
+ name = "fake"
+ def getSlaveCommandVersion(self, command, oldversion=None):
+ return "1.10"
+
+
+class Force(unittest.TestCase):
+
+ def rmtree(self, d):
+ rmtree(d)
+
+ def setUp(self):
+ self.master = None
+ self.slave = None
+ self.rmtree("control_basedir")
+ os.mkdir("control_basedir")
+ self.master = master.BuildMaster("control_basedir")
+ self.slavebase = os.path.abspath("control_slavebase")
+ self.rmtree(self.slavebase)
+ os.mkdir("control_slavebase")
+
+ def connectSlave(self):
+ port = self.master.slavePort._port.getHost().port
+ slave = bot.BuildSlave("localhost", port, "bot1", "sekrit",
+ self.slavebase, keepalive=0, usePTY=1)
+ self.slave = slave
+ slave.startService()
+ d = self.master.botmaster.waitUntilBuilderAttached("force")
+ return d
+
+ def tearDown(self):
+ dl = []
+ if self.slave:
+ dl.append(self.master.botmaster.waitUntilBuilderDetached("force"))
+ dl.append(defer.maybeDeferred(self.slave.stopService))
+ if self.master:
+ dl.append(defer.maybeDeferred(self.master.stopService))
+ return defer.DeferredList(dl)
+
+ def testRequest(self):
+ m = self.master
+ m.loadConfig(config)
+ m.startService()
+ d = self.connectSlave()
+ d.addCallback(self._testRequest_1)
+ return d
+ def _testRequest_1(self, res):
+ c = interfaces.IControl(self.master)
+ req = base.BuildRequest("I was bored", SourceStamp(), 'test_builder')
+ builder_control = c.getBuilder("force")
+ d = defer.Deferred()
+ req.subscribe(d.callback)
+ builder_control.requestBuild(req)
+ d.addCallback(self._testRequest_2)
+ # we use the same check-the-results code as testForce
+ return d
+
+ def _testRequest_2(self, build_control):
+ self.failUnless(interfaces.IBuildControl.providedBy(build_control))
+ d = build_control.getStatus().waitUntilFinished()
+ d.addCallback(self._testRequest_3)
+ return d
+
+ def _testRequest_3(self, bs):
+ self.failUnless(interfaces.IBuildStatus.providedBy(bs))
+ self.failUnless(bs.isFinished())
+ self.failUnlessEqual(bs.getResults(), SUCCESS)
+ #self.failUnlessEqual(bs.getResponsibleUsers(), ["bob"]) # TODO
+ self.failUnlessEqual(bs.getChanges(), ())
+ #self.failUnlessEqual(bs.getReason(), "forced") # TODO
diff --git a/buildbot/buildbot/test/test_dependencies.py b/buildbot/buildbot/test/test_dependencies.py
new file mode 100644
index 0000000..624efc4
--- /dev/null
+++ b/buildbot/buildbot/test/test_dependencies.py
@@ -0,0 +1,166 @@
+# -*- test-case-name: buildbot.test.test_dependencies -*-
+
+from twisted.trial import unittest
+
+from twisted.internet import reactor, defer
+
+from buildbot.test.runutils import RunMixin
+from buildbot.status import base
+
+config_1 = """
+from buildbot import scheduler
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+from buildbot.test.test_locks import LockStep
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+
+# upstream1 (fastfail, slowpass)
+# -> downstream2 (b3, b4)
+# upstream3 (slowfail, slowpass)
+# -> downstream4 (b3, b4)
+# -> downstream5 (b5)
+
+s1 = scheduler.Scheduler('upstream1', None, 10, ['slowpass', 'fastfail'])
+s2 = scheduler.Dependent('downstream2', s1, ['b3', 'b4'])
+s3 = scheduler.Scheduler('upstream3', None, 10, ['fastpass', 'slowpass'])
+s4 = scheduler.Dependent('downstream4', s3, ['b3', 'b4'])
+s5 = scheduler.Dependent('downstream5', s4, ['b5'])
+c['schedulers'] = [s1, s2, s3, s4, s5]
+
+f_fastpass = factory.BuildFactory([s(dummy.Dummy, timeout=1)])
+f_slowpass = factory.BuildFactory([s(dummy.Dummy, timeout=2)])
+f_fastfail = factory.BuildFactory([s(dummy.FailingDummy, timeout=1)])
+
+def builder(name, f):
+ d = {'name': name, 'slavename': 'bot1', 'builddir': name, 'factory': f}
+ return d
+
+c['builders'] = [builder('slowpass', f_slowpass),
+ builder('fastfail', f_fastfail),
+ builder('fastpass', f_fastpass),
+ builder('b3', f_fastpass),
+ builder('b4', f_fastpass),
+ builder('b5', f_fastpass),
+ ]
+"""
+
+class Logger(base.StatusReceiverMultiService):
+ def __init__(self, master):
+ base.StatusReceiverMultiService.__init__(self)
+ self.builds = []
+ for bn in master.status.getBuilderNames():
+ master.status.getBuilder(bn).subscribe(self)
+
+ def buildStarted(self, builderName, build):
+ self.builds.append(builderName)
+
+class Dependencies(RunMixin, unittest.TestCase):
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_1)
+ self.master.startService()
+ d = self.connectSlave(["slowpass", "fastfail", "fastpass",
+ "b3", "b4", "b5"])
+ return d
+
+ def findScheduler(self, name):
+ for s in self.master.allSchedulers():
+ if s.name == name:
+ return s
+ raise KeyError("No Scheduler named '%s'" % name)
+
+ def testParse(self):
+ self.master.loadConfig(config_1)
+ # that's it, just make sure this config file is loaded successfully
+
+ def testRun_Fail(self):
+ # add an extra status target to make pay attention to which builds
+ # start and which don't.
+ self.logger = Logger(self.master)
+
+ # kick off upstream1, which has a failing Builder and thus will not
+ # trigger downstream3
+ s = self.findScheduler("upstream1")
+ # this is an internal function of the Scheduler class
+ s.fireTimer() # fires a build
+ # t=0: two builders start: 'slowpass' and 'fastfail'
+ # t=1: builder 'fastfail' finishes
+ # t=2: builder 'slowpass' finishes
+ d = defer.Deferred()
+ d.addCallback(self._testRun_Fail_1)
+ reactor.callLater(5, d.callback, None)
+ return d
+
+ def _testRun_Fail_1(self, res):
+ # 'slowpass' and 'fastfail' should have run one build each
+ b = self.status.getBuilder('slowpass').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+ b = self.status.getBuilder('fastfail').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+ # none of the other builders should have run
+ self.failIf(self.status.getBuilder('b3').getLastFinishedBuild())
+ self.failIf(self.status.getBuilder('b4').getLastFinishedBuild())
+ self.failIf(self.status.getBuilder('b5').getLastFinishedBuild())
+
+ # in fact, none of them should have even started
+ self.failUnlessEqual(len(self.logger.builds), 2)
+ self.failUnless("slowpass" in self.logger.builds)
+ self.failUnless("fastfail" in self.logger.builds)
+ self.failIf("b3" in self.logger.builds)
+ self.failIf("b4" in self.logger.builds)
+ self.failIf("b5" in self.logger.builds)
+
+ def testRun_Pass(self):
+ # kick off upstream3, which will fire downstream4 and then
+ # downstream5
+ s = self.findScheduler("upstream3")
+ # this is an internal function of the Scheduler class
+ s.fireTimer() # fires a build
+ # t=0: slowpass and fastpass start
+ # t=1: builder 'fastpass' finishes
+ # t=2: builder 'slowpass' finishes
+ # scheduler 'downstream4' fires
+ # builds b3 and b4 are started
+ # t=3: builds b3 and b4 finish
+ # scheduler 'downstream5' fires
+ # build b5 is started
+ # t=4: build b5 is finished
+ d = defer.Deferred()
+ d.addCallback(self._testRun_Pass_1)
+ reactor.callLater(5, d.callback, None)
+ return d
+
+ def _testRun_Pass_1(self, res):
+ # 'fastpass' and 'slowpass' should have run one build each
+ b = self.status.getBuilder('fastpass').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+ b = self.status.getBuilder('slowpass').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+ self.failIf(self.status.getBuilder('fastfail').getLastFinishedBuild())
+
+ b = self.status.getBuilder('b3').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+ b = self.status.getBuilder('b4').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+ b = self.status.getBuilder('b4').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+
diff --git a/buildbot/buildbot/test/test_ec2buildslave.py b/buildbot/buildbot/test/test_ec2buildslave.py
new file mode 100644
index 0000000..d0f1644
--- /dev/null
+++ b/buildbot/buildbot/test/test_ec2buildslave.py
@@ -0,0 +1,552 @@
+# Portions copyright Canonical Ltd. 2009
+
+import os
+import sys
+import StringIO
+import textwrap
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+
+from buildbot.process.base import BuildRequest
+from buildbot.sourcestamp import SourceStamp
+from buildbot.status.builder import SUCCESS
+from buildbot.test.runutils import RunMixin
+
+
+PENDING = 'pending'
+RUNNING = 'running'
+SHUTTINGDOWN = 'shutting-down'
+TERMINATED = 'terminated'
+
+
+class EC2ResponseError(Exception):
+ def __init__(self, code):
+ self.code = code
+
+
+class Stub:
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+
+class Instance:
+
+ def __init__(self, data, ami, **kwargs):
+ self.data = data
+ self.state = PENDING
+ self.id = ami
+ self.public_dns_name = 'ec2-012-345-678-901.compute-1.amazonaws.com'
+ self.__dict__.update(kwargs)
+ self.output = Stub(name='output', output='example_output')
+
+ def update(self):
+ if self.state == PENDING:
+ self.data.testcase.connectOneSlave(self.data.slave.slavename)
+ self.state = RUNNING
+ elif self.state == SHUTTINGDOWN:
+ slavename = self.data.slave.slavename
+ slaves = self.data.testcase.slaves
+ if slavename in slaves:
+ def discard(data):
+ pass
+ s = slaves.pop(slavename)
+ bot = s.getServiceNamed("bot")
+ for buildername in self.data.slave.slavebuilders:
+ remote = bot.builders[buildername].remote
+ if remote is None:
+ continue
+ broker = remote.broker
+ broker.dataReceived = discard # seal its ears
+ # and take away its voice
+ broker.transport.write = discard
+ # also discourage it from reconnecting once the connection
+ # goes away
+ s.bf.continueTrying = False
+ # stop the service for cleanliness
+ s.stopService()
+ self.state = TERMINATED
+
+ def get_console_output(self):
+ return self.output
+
+ def use_ip(self, elastic_ip):
+ if isinstance(elastic_ip, Stub):
+ elastic_ip = elastic_ip.public_ip
+ if self.data.addresses[elastic_ip] is not None:
+ raise ValueError('elastic ip already used')
+ self.data.addresses[elastic_ip] = self
+
+ def stop(self):
+ self.state = SHUTTINGDOWN
+
+class Image:
+
+ def __init__(self, data, ami, owner, location):
+ self.data = data
+ self.id = ami
+ self.owner = owner
+ self.location = location
+
+ def run(self, **kwargs):
+ return Stub(name='reservation',
+ instances=[Instance(self.data, self.id, **kwargs)])
+
+ @classmethod
+ def create(klass, data, ami, owner, location):
+ assert ami not in data.images
+ self = klass(data, ami, owner, location)
+ data.images[ami] = self
+ return self
+
+
+class Connection:
+
+ def __init__(self, data):
+ self.data = data
+
+ def get_all_key_pairs(self, keypair_name):
+ try:
+ return [self.data.keys[keypair_name]]
+ except KeyError:
+ raise EC2ResponseError('InvalidKeyPair.NotFound')
+
+ def create_key_pair(self, keypair_name):
+ return Key.create(keypair_name, self.data.keys)
+
+ def get_all_security_groups(self, security_name):
+ try:
+ return [self.data.security_groups[security_name]]
+ except KeyError:
+ raise EC2ResponseError('InvalidGroup.NotFound')
+
+ def create_security_group(self, security_name, description):
+ assert security_name not in self.data.security_groups
+ res = Stub(name='security_group', value=security_name,
+ description=description)
+ self.data.security_groups[security_name] = res
+ return res
+
+ def get_all_images(self, owners=None):
+ # return a list of images. images have .location and .id.
+ res = self.data.images.values()
+ if owners:
+ res = [image for image in res if image.owner in owners]
+ return res
+
+ def get_image(self, machine_id):
+ # return image or raise an error
+ return self.data.images[machine_id]
+
+ def get_all_addresses(self, elastic_ips):
+ res = []
+ for ip in elastic_ips:
+ if ip in self.data.addresses:
+ res.append(Stub(public_ip=ip))
+ else:
+ raise EC2ResponseError('...bad address...')
+ return res
+
+ def disassociate_address(self, address):
+ if address not in self.data.addresses:
+ raise EC2ResponseError('...unknown address...')
+ self.data.addresses[address] = None
+
+
+class Key:
+
+ # this is what we would need to do if we actually needed a real key.
+ # We don't right now.
+ #def __init__(self):
+ # self.raw = paramiko.RSAKey.generate(256)
+ # f = StringIO.StringIO()
+ # self.raw.write_private_key(f)
+ # self.material = f.getvalue()
+
+ @classmethod
+ def create(klass, name, keys):
+ self = klass()
+ self.name = name
+ self.keys = keys
+ assert name not in keys
+ keys[name] = self
+ return self
+
+ def delete(self):
+ del self.keys[self.name]
+
+
+class Boto:
+
+ slave = None # must be set in setUp
+
+ def __init__(self, testcase):
+ self.testcase = testcase
+ self.keys = {}
+ Key.create('latent_buildbot_slave', self.keys)
+ Key.create('buildbot_slave', self.keys)
+ assert sorted(self.keys.keys()) == ['buildbot_slave',
+ 'latent_buildbot_slave']
+ self.original_keys = dict(self.keys)
+ self.security_groups = {
+ 'latent_buildbot_slave': Stub(name='security_group',
+ value='latent_buildbot_slave')}
+ self.addresses = {'127.0.0.1': None}
+ self.images = {}
+ Image.create(self, 'ami-12345', 12345667890,
+ 'test-xx/image.manifest.xml')
+ Image.create(self, 'ami-AF000', 11111111111,
+ 'test-f0a/image.manifest.xml')
+ Image.create(self, 'ami-CE111', 22222222222,
+ 'test-e1b/image.manifest.xml')
+ Image.create(self, 'ami-ED222', 22222222222,
+ 'test-d2c/image.manifest.xml')
+ Image.create(self, 'ami-FC333', 22222222222,
+ 'test-c30d/image.manifest.xml')
+ Image.create(self, 'ami-DB444', 11111111111,
+ 'test-b4e/image.manifest.xml')
+ Image.create(self, 'ami-BA555', 11111111111,
+ 'test-a5f/image.manifest.xml')
+
+ def connect_ec2(self, identifier, secret_identifier):
+ assert identifier == 'publickey', identifier
+ assert secret_identifier == 'privatekey', secret_identifier
+ return Connection(self)
+
+ exception = Stub(EC2ResponseError=EC2ResponseError)
+
+
+class Mixin(RunMixin):
+
+ def doBuild(self):
+ br = BuildRequest("forced", SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder('b1').requestBuild(br)
+ return d
+
+ def setUp(self):
+ self.boto_setUp1()
+ self.master.loadConfig(self.config)
+ self.boto_setUp2()
+ self.boto_setUp3()
+
+ def boto_setUp1(self):
+ # debugging
+ #import twisted.internet.base
+ #twisted.internet.base.DelayedCall.debug = True
+ # debugging
+ RunMixin.setUp(self)
+ self.boto = boto = Boto(self)
+ if 'boto' not in sys.modules:
+ sys.modules['boto'] = boto
+ sys.modules['boto.exception'] = boto.exception
+ if 'buildbot.ec2buildslave' in sys.modules:
+ sys.modules['buildbot.ec2buildslave'].boto = boto
+
+ def boto_setUp2(self):
+ if sys.modules['boto'] is self.boto:
+ del sys.modules['boto']
+ del sys.modules['boto.exception']
+
+ def boto_setUp3(self):
+ self.master.startService()
+ self.boto.slave = self.bot1 = self.master.botmaster.slaves['bot1']
+ self.bot1._poll_resolution = 0.1
+ self.b1 = self.master.botmaster.builders['b1']
+
+ def tearDown(self):
+ try:
+ import boto
+ import boto.exception
+ except ImportError:
+ pass
+ else:
+ sys.modules['buildbot.ec2buildslave'].boto = boto
+ return RunMixin.tearDown(self)
+
+
+class BasicConfig(Mixin, unittest.TestCase):
+ config = textwrap.dedent("""\
+ from buildbot.process import factory
+ from buildbot.steps import dummy
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ s = factory.s
+
+ BuildmasterConfig = c = {}
+ c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345',
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )]
+ c['schedulers'] = []
+ c['slavePortnum'] = 0
+ c['schedulers'] = []
+
+ f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+
+ c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+ """)
+
+ def testSequence(self):
+ # test with secrets in config, a single AMI, and defaults/
+ self.assertEqual(self.bot1.ami, 'ami-12345')
+ self.assertEqual(self.bot1.instance_type, 'm1.large')
+ self.assertEqual(self.bot1.keypair_name, 'latent_buildbot_slave')
+ self.assertEqual(self.bot1.security_name, 'latent_buildbot_slave')
+ # this would be appropriate if we were recreating keys.
+ #self.assertNotEqual(self.boto.keys['latent_buildbot_slave'],
+ # self.boto.original_keys['latent_buildbot_slave'])
+ self.failUnless(isinstance(self.bot1.get_image(), Image))
+ self.assertEqual(self.bot1.get_image().id, 'ami-12345')
+ self.assertIdentical(self.bot1.elastic_ip, None)
+ self.assertIdentical(self.bot1.instance, None)
+ # let's start a build...
+ self.build_deferred = self.doBuild()
+ # ...and wait for the ec2 slave to show up
+ d = self.bot1.substantiation_deferred
+ d.addCallback(self._testSequence_1)
+ return d
+ def _testSequence_1(self, res):
+ # bot 1 is substantiated.
+ self.assertNotIdentical(self.bot1.slave, None)
+ self.failUnless(self.bot1.substantiated)
+ self.failUnless(isinstance(self.bot1.instance, Instance))
+ self.assertEqual(self.bot1.instance.id, 'ami-12345')
+ self.assertEqual(self.bot1.instance.state, RUNNING)
+ self.assertEqual(self.bot1.instance.key_name, 'latent_buildbot_slave')
+ self.assertEqual(self.bot1.instance.security_groups,
+ ['latent_buildbot_slave'])
+ self.assertEqual(self.bot1.instance.instance_type, 'm1.large')
+ self.assertEqual(self.bot1.output.output, 'example_output')
+ # now we'll wait for the build to complete
+ d = self.build_deferred
+ del self.build_deferred
+ d.addCallback(self._testSequence_2)
+ return d
+ def _testSequence_2(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # Let's let it shut down. We'll set the build_wait_timer to fire
+ # sooner, and wait for it to fire.
+ self.bot1.build_wait_timer.reset(0)
+ # we'll stash the instance around to look at it
+ self.instance = self.bot1.instance
+ # now we wait.
+ d = defer.Deferred()
+ reactor.callLater(0.5, d.callback, None)
+ d.addCallback(self._testSequence_3)
+ return d
+ def _testSequence_3(self, res):
+ # slave is insubstantiated
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ self.assertIdentical(self.bot1.instance, None)
+ self.assertEqual(self.instance.state, TERMINATED)
+ del self.instance
+
+class ElasticIP(Mixin, unittest.TestCase):
+ config = textwrap.dedent("""\
+ from buildbot.process import factory
+ from buildbot.steps import dummy
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ s = factory.s
+
+ BuildmasterConfig = c = {}
+ c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345',
+ identifier='publickey',
+ secret_identifier='privatekey',
+ elastic_ip='127.0.0.1'
+ )]
+ c['schedulers'] = []
+ c['slavePortnum'] = 0
+ c['schedulers'] = []
+
+ f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+
+ c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+ """)
+
+ def testSequence(self):
+ self.assertEqual(self.bot1.elastic_ip.public_ip, '127.0.0.1')
+ self.assertIdentical(self.boto.addresses['127.0.0.1'], None)
+ # let's start a build...
+ d = self.doBuild()
+ d.addCallback(self._testSequence_1)
+ return d
+ def _testSequence_1(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # we have our address
+ self.assertIdentical(self.boto.addresses['127.0.0.1'],
+ self.bot1.instance)
+ # Let's let it shut down. We'll set the build_wait_timer to fire
+ # sooner, and wait for it to fire.
+ self.bot1.build_wait_timer.reset(0)
+ d = defer.Deferred()
+ reactor.callLater(0.5, d.callback, None)
+ d.addCallback(self._testSequence_2)
+ return d
+ def _testSequence_2(self, res):
+ # slave is insubstantiated
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ self.assertIdentical(self.bot1.instance, None)
+ # the address is free again
+ self.assertIdentical(self.boto.addresses['127.0.0.1'], None)
+
+
+class Initialization(Mixin, unittest.TestCase):
+
+ def setUp(self):
+ self.boto_setUp1()
+
+ def tearDown(self):
+ self.boto_setUp2()
+ return Mixin.tearDown(self)
+
+ def testDefaultSeparateFile(self):
+ # set up .ec2/aws_id
+ home = os.environ['HOME']
+ fake_home = os.path.join(os.getcwd(), 'basedir') # see RunMixin.setUp
+ os.environ['HOME'] = fake_home
+ dir = os.path.join(fake_home, '.ec2')
+ os.mkdir(dir)
+ f = open(os.path.join(dir, 'aws_id'), 'w')
+ f.write('publickey\nprivatekey')
+ f.close()
+ # The Connection checks the file, so if the secret file is not parsed
+ # correctly, *this* is where it would fail. This is the real test.
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345')
+ # for completeness, we'll show that the connection actually exists.
+ self.failUnless(isinstance(bot1.conn, Connection))
+ # clean up.
+ os.environ['HOME'] = home
+ self.rmtree(dir)
+
+ def testCustomSeparateFile(self):
+ # set up .ec2/aws_id
+ file_path = os.path.join(os.getcwd(), 'basedir', 'custom_aws_id')
+ f = open(file_path, 'w')
+ f.write('publickey\nprivatekey')
+ f.close()
+ # The Connection checks the file, so if the secret file is not parsed
+ # correctly, *this* is where it would fail. This is the real test.
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345', aws_id_file_path=file_path)
+ # for completeness, we'll show that the connection actually exists.
+ self.failUnless(isinstance(bot1.conn, Connection))
+
+ def testNoAMIBroken(self):
+ # you must specify an AMI, or at least one of valid_ami_owners or
+ # valid_ami_location_regex
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ self.assertRaises(ValueError, EC2LatentBuildSlave, 'bot1', 'sekrit',
+ 'm1.large', identifier='publickey',
+ secret_identifier='privatekey')
+
+ def testAMIOwnerFilter(self):
+ # if you only specify an owner, you get the image owned by any of the
+ # owners that sorts last by the AMI's location.
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111],
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+ self.assertEqual(bot1.get_image().location,
+ 'test-f0a/image.manifest.xml')
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111,
+ 22222222222],
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+ self.assertEqual(bot1.get_image().location,
+ 'test-f0a/image.manifest.xml')
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[22222222222],
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+ self.assertEqual(bot1.get_image().location,
+ 'test-e1b/image.manifest.xml')
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=12345667890,
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+ self.assertEqual(bot1.get_image().location,
+ 'test-xx/image.manifest.xml')
+
+ def testAMISimpleRegexFilter(self):
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'test\-[a-z]\w+/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+ self.assertEqual(bot1.get_image().location,
+ 'test-xx/image.manifest.xml')
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'test\-[a-z]\d+\w/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+ self.assertEqual(bot1.get_image().location,
+ 'test-f0a/image.manifest.xml')
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large', valid_ami_owners=[22222222222],
+ valid_ami_location_regex=r'test\-[a-z]\d+\w/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+ self.assertEqual(bot1.get_image().location,
+ 'test-e1b/image.manifest.xml')
+
+ def testAMIRegexAlphaSortFilter(self):
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111, 22222222222],
+ valid_ami_location_regex=r'test\-[a-z]\d+([a-z])/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+ self.assertEqual(bot1.get_image().location,
+ 'test-a5f/image.manifest.xml')
+
+ def testAMIRegexIntSortFilter(self):
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111, 22222222222],
+ valid_ami_location_regex=r'test\-[a-z](\d+)[a-z]/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+ self.assertEqual(bot1.get_image().location,
+ 'test-c30d/image.manifest.xml')
+
+ def testNewSecurityGroup(self):
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large', 'ami-12345',
+ identifier='publickey', secret_identifier='privatekey',
+ security_name='custom_security_name')
+ self.assertEqual(
+ self.boto.security_groups['custom_security_name'].value,
+ 'custom_security_name')
+ self.assertEqual(bot1.security_name, 'custom_security_name')
+
+ def testNewKeypairName(self):
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large', 'ami-12345',
+ identifier='publickey', secret_identifier='privatekey',
+ keypair_name='custom_keypair_name')
+ self.assertIn('custom_keypair_name', self.boto.keys)
+ self.assertEqual(bot1.keypair_name, 'custom_keypair_name')
diff --git a/buildbot/buildbot/test/test_limitlogs.py b/buildbot/buildbot/test/test_limitlogs.py
new file mode 100644
index 0000000..9fd5bea
--- /dev/null
+++ b/buildbot/buildbot/test/test_limitlogs.py
@@ -0,0 +1,94 @@
+# -*- test-case-name: buildbot.test.test_limitlogs -*-
+
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+from twisted.internet.utils import getProcessValue, getProcessOutput
+import twisted
+from twisted.python.versions import Version
+from twisted.python.procutils import which
+from twisted.python import log, logfile
+import os
+
+'''Testcases to verify that the --log-size and --log-count options to
+create-master and create-slave actually work.
+
+These features require Twisted 8.2.0 to work.
+
+Currently only testing the master side of it.
+'''
+
+
+master_cfg = """from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
+
+f2 = factory.BuildFactory([
+ dummy.Dummy(timeout=1),
+ dummy.RemoteDummy(timeout=2),
+ ])
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'quick', 'slavename':'bot1',
+ 'builddir': 'quickdir', 'factory': f1})
+c['slavePortnum'] = 0
+
+from twisted.python import log
+for i in xrange(100):
+ log.msg("this is a mighty long string and I'm going to write it into the log often")
+"""
+
+class MasterLogs(unittest.TestCase):
+ '''Limit master log size and count.'''
+
+ def setUp(self):
+ if twisted.version < Version("twisted", 8, 2, 0):
+ self.skip = True
+ raise unittest.SkipTest("Twisted 8.2.0 or higher required")
+
+ def testLog(self):
+ exes = which('buildbot')
+ if not exes:
+ raise unittest.SkipTest("Buildbot needs to be installed")
+ self.buildbotexe = exes[0]
+ d = getProcessValue(self.buildbotexe,
+ ['create-master', '--log-size=1000', '--log-count=2',
+ 'master'])
+ d.addCallback(self._master_created)
+ return d
+
+ def _master_created(self, res):
+ open('master/master.cfg', 'w').write(master_cfg)
+ d = getProcessOutput(self.buildbotexe,
+ ['start', 'master'])
+ d.addBoth(self._master_running)
+ return d
+
+ def _master_running(self, res):
+ self.addCleanup(self._stop_master)
+ d = defer.Deferred()
+ reactor.callLater(2, d.callback, None)
+ d.addCallback(self._do_tests)
+ return d
+
+ def _do_tests(self, rv):
+ '''The actual method doing the tests on the master twistd.log'''
+ lf = logfile.LogFile.fromFullPath(os.path.join('master', 'twistd.log'))
+ self.failUnlessEqual(lf.listLogs(), [1,2])
+ lr = lf.getLog(1)
+ firstline = lr.readLines()[0]
+ self.failUnless(firstline.endswith("this is a mighty long string and I'm going to write it into the log often\n"))
+
+ def _stop_master(self):
+ d = getProcessOutput(self.buildbotexe,
+ ['stop', 'master'])
+ d.addBoth(self._master_stopped)
+ return d
+
+ def _master_stopped(self, res):
+ print "master stopped"
diff --git a/buildbot/buildbot/test/test_locks.py b/buildbot/buildbot/test/test_locks.py
new file mode 100644
index 0000000..0c1e0b5
--- /dev/null
+++ b/buildbot/buildbot/test/test_locks.py
@@ -0,0 +1,495 @@
+# -*- test-case-name: buildbot.test.test_locks -*-
+
+import random
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+
+from buildbot import master
+from buildbot.steps import dummy
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.test.runutils import RunMixin
+from buildbot import locks
+
+def claimHarder(lock, owner, la):
+ """Return a Deferred that will fire when the lock is claimed. Keep trying
+ until we succeed."""
+ if lock.isAvailable(la):
+ #print "claimHarder(%s): claiming" % owner
+ lock.claim(owner, la)
+ return defer.succeed(lock)
+ #print "claimHarder(%s): waiting" % owner
+ d = lock.waitUntilMaybeAvailable(owner, la)
+ d.addCallback(claimHarder, owner, la)
+ return d
+
+def hold(lock, owner, la, mode="now"):
+ if mode == "now":
+ lock.release(owner, la)
+ elif mode == "very soon":
+ reactor.callLater(0, lock.release, owner, la)
+ elif mode == "soon":
+ reactor.callLater(0.1, lock.release, owner, la)
+
+class Unit(unittest.TestCase):
+ def testNowCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ return self._testNow(la)
+
+ def testNowExclusive(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'exclusive')
+ return self._testNow(la)
+
+ def _testNow(self, la):
+ l = locks.BaseLock("name")
+ self.failUnless(l.isAvailable(la))
+ l.claim("owner1", la)
+ self.failIf(l.isAvailable(la))
+ l.release("owner1", la)
+ self.failUnless(l.isAvailable(la))
+
+ def testNowMixed1(self):
+ """ Test exclusive is not possible when a counting has the lock """
+ lid = locks.MasterLock('dummy')
+ lac = locks.LockAccess(lid, 'counting')
+ lae = locks.LockAccess(lid, 'exclusive')
+ l = locks.BaseLock("name", maxCount=2)
+ self.failUnless(l.isAvailable(lac))
+ l.claim("count-owner", lac)
+ self.failIf(l.isAvailable(lae))
+ l.release("count-owner", lac)
+ self.failUnless(l.isAvailable(lac))
+
+ def testNowMixed2(self):
+ """ Test counting is not possible when an exclsuive has the lock """
+ lid = locks.MasterLock('dummy')
+ lac = locks.LockAccess(lid, 'counting')
+ lae = locks.LockAccess(lid, 'exclusive')
+ l = locks.BaseLock("name", maxCount=2)
+ self.failUnless(l.isAvailable(lae))
+ l.claim("count-owner", lae)
+ self.failIf(l.isAvailable(lac))
+ l.release("count-owner", lae)
+ self.failUnless(l.isAvailable(lae))
+
+ def testLaterCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ return self._testLater(la)
+
+ def testLaterExclusive(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'exclusive')
+ return self._testLater(la)
+
+ def _testLater(self, la):
+ lock = locks.BaseLock("name")
+ d = claimHarder(lock, "owner1", la)
+ d.addCallback(lambda lock: lock.release("owner1", la))
+ return d
+
+ def testCompetitionCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ return self._testCompetition(la)
+
+ def testCompetitionExclusive(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'exclusive')
+ return self._testCompetition(la)
+
+ def _testCompetition(self, la):
+ lock = locks.BaseLock("name")
+ d = claimHarder(lock, "owner1", la)
+ d.addCallback(self._claim1, la)
+ return d
+ def _claim1(self, lock, la):
+ # we should have claimed it by now
+ self.failIf(lock.isAvailable(la))
+ # now set up two competing owners. We don't know which will get the
+ # lock first.
+ d2 = claimHarder(lock, "owner2", la)
+ d2.addCallback(hold, "owner2", la, "now")
+ d3 = claimHarder(lock, "owner3", la)
+ d3.addCallback(hold, "owner3", la, "soon")
+ dl = defer.DeferredList([d2,d3])
+ dl.addCallback(self._cleanup, lock, la)
+ # and release the lock in a moment
+ reactor.callLater(0.1, lock.release, "owner1", la)
+ return dl
+
+ def _cleanup(self, res, lock, la):
+ d = claimHarder(lock, "cleanup", la)
+ d.addCallback(lambda lock: lock.release("cleanup", la))
+ return d
+
+ def testRandomCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ return self._testRandom(la)
+
+ def testRandomExclusive(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'exclusive')
+ return self._testRandom(la)
+
+ def _testRandom(self, la):
+ lock = locks.BaseLock("name")
+ dl = []
+ for i in range(100):
+ owner = "owner%d" % i
+ mode = random.choice(["now", "very soon", "soon"])
+ d = claimHarder(lock, owner, la)
+ d.addCallback(hold, owner, la, mode)
+ dl.append(d)
+ d = defer.DeferredList(dl)
+ d.addCallback(self._cleanup, lock, la)
+ return d
+
+class Multi(unittest.TestCase):
+ def testNowCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ lock = locks.BaseLock("name", 2)
+ self.failUnless(lock.isAvailable(la))
+ lock.claim("owner1", la)
+ self.failUnless(lock.isAvailable(la))
+ lock.claim("owner2", la)
+ self.failIf(lock.isAvailable(la))
+ lock.release("owner1", la)
+ self.failUnless(lock.isAvailable(la))
+ lock.release("owner2", la)
+ self.failUnless(lock.isAvailable(la))
+
+ def testLaterCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ lock = locks.BaseLock("name", 2)
+ lock.claim("owner1", la)
+ lock.claim("owner2", la)
+ d = claimHarder(lock, "owner3", la)
+ d.addCallback(lambda lock: lock.release("owner3", la))
+ lock.release("owner2", la)
+ lock.release("owner1", la)
+ return d
+
+ def _cleanup(self, res, lock, count, la):
+ dl = []
+ for i in range(count):
+ d = claimHarder(lock, "cleanup%d" % i, la)
+ dl.append(d)
+ d2 = defer.DeferredList(dl)
+ # once all locks are claimed, we know that any previous owners have
+ # been flushed out
+ def _release(res):
+ for i in range(count):
+ lock.release("cleanup%d" % i, la)
+ d2.addCallback(_release)
+ return d2
+
+ def testRandomCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ COUNT = 5
+ lock = locks.BaseLock("name", COUNT)
+ dl = []
+ for i in range(100):
+ owner = "owner%d" % i
+ mode = random.choice(["now", "very soon", "soon"])
+ d = claimHarder(lock, owner, la)
+ def _check(lock):
+ self.failIf(len(lock.owners) > COUNT)
+ return lock
+ d.addCallback(_check)
+ d.addCallback(hold, owner, la, mode)
+ dl.append(d)
+ d = defer.DeferredList(dl)
+ d.addCallback(self._cleanup, lock, COUNT, la)
+ return d
+
+class Dummy:
+ pass
+
+def slave(slavename):
+ slavebuilder = Dummy()
+ slavebuilder.slave = Dummy()
+ slavebuilder.slave.slavename = slavename
+ return slavebuilder
+
+class MakeRealLock(unittest.TestCase):
+
+ def make(self, lockid):
+ return lockid.lockClass(lockid)
+
+ def testMaster(self):
+ mid1 = locks.MasterLock("name1")
+ mid2 = locks.MasterLock("name1")
+ mid3 = locks.MasterLock("name3")
+ mid4 = locks.MasterLock("name1", 3)
+ self.failUnlessEqual(mid1, mid2)
+ self.failIfEqual(mid1, mid3)
+ # they should all be hashable
+ d = {mid1: 1, mid2: 2, mid3: 3, mid4: 4}
+
+ l1 = self.make(mid1)
+ self.failUnlessEqual(l1.name, "name1")
+ self.failUnlessEqual(l1.maxCount, 1)
+ self.failUnlessIdentical(l1.getLock(slave("slave1")), l1)
+ l4 = self.make(mid4)
+ self.failUnlessEqual(l4.name, "name1")
+ self.failUnlessEqual(l4.maxCount, 3)
+ self.failUnlessIdentical(l4.getLock(slave("slave1")), l4)
+
+ def testSlave(self):
+ sid1 = locks.SlaveLock("name1")
+ sid2 = locks.SlaveLock("name1")
+ sid3 = locks.SlaveLock("name3")
+ sid4 = locks.SlaveLock("name1", maxCount=3)
+ mcfs = {"bigslave": 4, "smallslave": 1}
+ sid5 = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs)
+ mcfs2 = {"bigslave": 4, "smallslave": 1}
+ sid5a = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs2)
+ mcfs3 = {"bigslave": 1, "smallslave": 99}
+ sid5b = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs3)
+ self.failUnlessEqual(sid1, sid2)
+ self.failIfEqual(sid1, sid3)
+ self.failIfEqual(sid1, sid4)
+ self.failIfEqual(sid1, sid5)
+ self.failUnlessEqual(sid5, sid5a)
+ self.failIfEqual(sid5a, sid5b)
+ # they should all be hashable
+ d = {sid1: 1, sid2: 2, sid3: 3, sid4: 4, sid5: 5, sid5a: 6, sid5b: 7}
+
+ l1 = self.make(sid1)
+ self.failUnlessEqual(l1.name, "name1")
+ self.failUnlessEqual(l1.maxCount, 1)
+ l1s1 = l1.getLock(slave("slave1"))
+ self.failIfIdentical(l1s1, l1)
+
+ l4 = self.make(sid4)
+ self.failUnlessEqual(l4.maxCount, 3)
+ l4s1 = l4.getLock(slave("slave1"))
+ self.failUnlessEqual(l4s1.maxCount, 3)
+
+ l5 = self.make(sid5)
+ l5s1 = l5.getLock(slave("bigslave"))
+ l5s2 = l5.getLock(slave("smallslave"))
+ l5s3 = l5.getLock(slave("unnamedslave"))
+ self.failUnlessEqual(l5s1.maxCount, 4)
+ self.failUnlessEqual(l5s2.maxCount, 1)
+ self.failUnlessEqual(l5s3.maxCount, 3)
+
+class GetLock(unittest.TestCase):
+ def testGet(self):
+ # the master.cfg file contains "lock ids", which are instances of
+ # MasterLock and SlaveLock but which are not actually Locks per se.
+ # When the build starts, these markers are turned into RealMasterLock
+ # and RealSlaveLock instances. This insures that any builds running
+ # on slaves that were unaffected by the config change are still
+ # referring to the same Lock instance as new builds by builders that
+ # *were* affected by the change. There have been bugs in the past in
+ # which this didn't happen, and the Locks were bypassed because half
+ # the builders were using one incarnation of the lock while the other
+ # half were using a separate (but equal) incarnation.
+ #
+ # Changing the lock id in any way should cause it to be replaced in
+ # the BotMaster. This will result in a couple of funky artifacts:
+ # builds in progress might pay attention to a different lock, so we
+ # might bypass the locking for the duration of a couple builds.
+ # There's also the problem of old Locks lingering around in
+ # BotMaster.locks, but they're small and shouldn't really cause a
+ # problem.
+
+ b = master.BotMaster()
+ l1 = locks.MasterLock("one")
+ l1a = locks.MasterLock("one")
+ l2 = locks.MasterLock("one", maxCount=4)
+
+ rl1 = b.getLockByID(l1)
+ rl2 = b.getLockByID(l1a)
+ self.failUnlessIdentical(rl1, rl2)
+ rl3 = b.getLockByID(l2)
+ self.failIfIdentical(rl1, rl3)
+
+ s1 = locks.SlaveLock("one")
+ s1a = locks.SlaveLock("one")
+ s2 = locks.SlaveLock("one", maxCount=4)
+ s3 = locks.SlaveLock("one", maxCount=4,
+ maxCountForSlave={"a":1, "b":2})
+ s3a = locks.SlaveLock("one", maxCount=4,
+ maxCountForSlave={"a":1, "b":2})
+ s4 = locks.SlaveLock("one", maxCount=4,
+ maxCountForSlave={"a":4, "b":4})
+
+ rl1 = b.getLockByID(s1)
+ rl2 = b.getLockByID(s1a)
+ self.failUnlessIdentical(rl1, rl2)
+ rl3 = b.getLockByID(s2)
+ self.failIfIdentical(rl1, rl3)
+ rl4 = b.getLockByID(s3)
+ self.failIfIdentical(rl1, rl4)
+ self.failIfIdentical(rl3, rl4)
+ rl5 = b.getLockByID(s3a)
+ self.failUnlessIdentical(rl4, rl5)
+ rl6 = b.getLockByID(s4)
+ self.failIfIdentical(rl5, rl6)
+
+
+
+class LockStep(dummy.Dummy):
+ def start(self):
+ number = self.build.requests[0].number
+ self.build.requests[0].events.append(("start", number))
+ dummy.Dummy.start(self)
+ def done(self):
+ number = self.build.requests[0].number
+ self.build.requests[0].events.append(("done", number))
+ dummy.Dummy.done(self)
+
+config_1 = """
+from buildbot import locks
+from buildbot.process import factory
+from buildbot.buildslave import BuildSlave
+s = factory.s
+from buildbot.test.test_locks import LockStep
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+
+first_lock = locks.SlaveLock('first')
+second_lock = locks.MasterLock('second')
+f1 = factory.BuildFactory([s(LockStep, timeout=2, locks=[first_lock])])
+f2 = factory.BuildFactory([s(LockStep, timeout=3, locks=[second_lock])])
+f3 = factory.BuildFactory([s(LockStep, timeout=2, locks=[])])
+
+b1a = {'name': 'full1a', 'slavename': 'bot1', 'builddir': '1a', 'factory': f1}
+b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1b', 'factory': f1}
+b1c = {'name': 'full1c', 'slavename': 'bot1', 'builddir': '1c', 'factory': f3,
+ 'locks': [first_lock, second_lock]}
+b1d = {'name': 'full1d', 'slavename': 'bot1', 'builddir': '1d', 'factory': f2}
+b2a = {'name': 'full2a', 'slavename': 'bot2', 'builddir': '2a', 'factory': f1}
+b2b = {'name': 'full2b', 'slavename': 'bot2', 'builddir': '2b', 'factory': f3,
+ 'locks': [second_lock]}
+c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
+"""
+
+config_1a = config_1 + \
+"""
+b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1B', 'factory': f1}
+c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
+"""
+
+
+class Locks(RunMixin, unittest.TestCase):
+ def setUp(self):
+ N = 'test_builder'
+ RunMixin.setUp(self)
+ self.req1 = req1 = BuildRequest("forced build", SourceStamp(), N)
+ req1.number = 1
+ self.req2 = req2 = BuildRequest("forced build", SourceStamp(), N)
+ req2.number = 2
+ self.req3 = req3 = BuildRequest("forced build", SourceStamp(), N)
+ req3.number = 3
+ req1.events = req2.events = req3.events = self.events = []
+ d = self.master.loadConfig(config_1)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectSlaves(["bot1", "bot2"],
+ ["full1a", "full1b",
+ "full1c", "full1d",
+ "full2a", "full2b"]))
+ return d
+
+ def testLock1(self):
+ self.control.getBuilder("full1a").requestBuild(self.req1)
+ self.control.getBuilder("full1b").requestBuild(self.req2)
+ d = defer.DeferredList([self.req1.waitUntilFinished(),
+ self.req2.waitUntilFinished()])
+ d.addCallback(self._testLock1_1)
+ return d
+
+ def _testLock1_1(self, res):
+ # full1a should complete its step before full1b starts it
+ self.failUnlessEqual(self.events,
+ [("start", 1), ("done", 1),
+ ("start", 2), ("done", 2)])
+
+ def testLock1a(self):
+ # just like testLock1, but we reload the config file first, with a
+ # change that causes full1b to be changed. This tickles a design bug
+ # in which full1a and full1b wind up with distinct Lock instances.
+ d = self.master.loadConfig(config_1a)
+ d.addCallback(self._testLock1a_1)
+ return d
+ def _testLock1a_1(self, res):
+ self.control.getBuilder("full1a").requestBuild(self.req1)
+ self.control.getBuilder("full1b").requestBuild(self.req2)
+ d = defer.DeferredList([self.req1.waitUntilFinished(),
+ self.req2.waitUntilFinished()])
+ d.addCallback(self._testLock1a_2)
+ return d
+
+ def _testLock1a_2(self, res):
+ # full1a should complete its step before full1b starts it
+ self.failUnlessEqual(self.events,
+ [("start", 1), ("done", 1),
+ ("start", 2), ("done", 2)])
+
+ def testLock2(self):
+ # two builds run on separate slaves with slave-scoped locks should
+ # not interfere
+ self.control.getBuilder("full1a").requestBuild(self.req1)
+ self.control.getBuilder("full2a").requestBuild(self.req2)
+ d = defer.DeferredList([self.req1.waitUntilFinished(),
+ self.req2.waitUntilFinished()])
+ d.addCallback(self._testLock2_1)
+ return d
+
+ def _testLock2_1(self, res):
+ # full2a should start its step before full1a finishes it. They run on
+ # different slaves, however, so they might start in either order.
+ self.failUnless(self.events[:2] == [("start", 1), ("start", 2)] or
+ self.events[:2] == [("start", 2), ("start", 1)])
+
+ def testLock3(self):
+ # two builds run on separate slaves with master-scoped locks should
+ # not overlap
+ self.control.getBuilder("full1c").requestBuild(self.req1)
+ self.control.getBuilder("full2b").requestBuild(self.req2)
+ d = defer.DeferredList([self.req1.waitUntilFinished(),
+ self.req2.waitUntilFinished()])
+ d.addCallback(self._testLock3_1)
+ return d
+
+ def _testLock3_1(self, res):
+ # full2b should not start until after full1c finishes. The builds run
+ # on different slaves, so we can't really predict which will start
+ # first. The important thing is that they don't overlap.
+ self.failUnless(self.events == [("start", 1), ("done", 1),
+ ("start", 2), ("done", 2)]
+ or self.events == [("start", 2), ("done", 2),
+ ("start", 1), ("done", 1)]
+ )
+
+ def testLock4(self):
+ self.control.getBuilder("full1a").requestBuild(self.req1)
+ self.control.getBuilder("full1c").requestBuild(self.req2)
+ self.control.getBuilder("full1d").requestBuild(self.req3)
+ d = defer.DeferredList([self.req1.waitUntilFinished(),
+ self.req2.waitUntilFinished(),
+ self.req3.waitUntilFinished()])
+ d.addCallback(self._testLock4_1)
+ return d
+
+ def _testLock4_1(self, res):
+ # full1a starts, then full1d starts (because they do not interfere).
+ # Once both are done, full1c can run.
+ self.failUnlessEqual(self.events,
+ [("start", 1), ("start", 3),
+ ("done", 1), ("done", 3),
+ ("start", 2), ("done", 2)])
+
diff --git a/buildbot/buildbot/test/test_maildir.py b/buildbot/buildbot/test/test_maildir.py
new file mode 100644
index 0000000..b79cbd3
--- /dev/null
+++ b/buildbot/buildbot/test/test_maildir.py
@@ -0,0 +1,92 @@
+# -*- test-case-name: buildbot.test.test_maildir -*-
+
+from twisted.trial import unittest
+import os, shutil
+from buildbot.changes.mail import FCMaildirSource
+from twisted.internet import defer, reactor, task
+from twisted.python import util, log
+
+class TimeOutError(Exception):
+ """The message were not received in a timely fashion"""
+
+class MaildirTest(unittest.TestCase):
+ SECONDS_PER_MESSAGE = 1.0
+
+ def setUp(self):
+ log.msg("creating empty maildir")
+ self.maildir = "test-maildir"
+ if os.path.isdir(self.maildir):
+ shutil.rmtree(self.maildir)
+ log.msg("removing stale maildir")
+ os.mkdir(self.maildir)
+ os.mkdir(os.path.join(self.maildir, "cur"))
+ os.mkdir(os.path.join(self.maildir, "new"))
+ os.mkdir(os.path.join(self.maildir, "tmp"))
+ self.source = None
+
+ def tearDown(self):
+ log.msg("removing old maildir")
+ shutil.rmtree(self.maildir)
+ if self.source:
+ return self.source.stopService()
+
+ def addChange(self, c):
+ # NOTE: this assumes every message results in a Change, which isn't
+ # true for msg8-prefix
+ log.msg("got change")
+ self.changes.append(c)
+
+ def deliverMail(self, msg):
+ log.msg("delivering", msg)
+ newdir = os.path.join(self.maildir, "new")
+ # to do this right, use safecat
+ shutil.copy(msg, newdir)
+
+ def poll(self, changes, count, d):
+ if len(changes) == count:
+ d.callback("passed")
+
+ def testMaildir(self):
+ self.changes = []
+ s = self.source = FCMaildirSource(self.maildir)
+ s.parent = self
+ s.startService()
+ testfiles_dir = util.sibpath(__file__, "mail")
+ testfiles = [msg for msg in os.listdir(testfiles_dir)
+ if msg.startswith("freshcvs")]
+ assert testfiles
+ testfiles.sort()
+ count = len(testfiles)
+ d = defer.Deferred()
+
+ i = 1
+ for i in range(count):
+ msg = testfiles[i]
+ reactor.callLater(self.SECONDS_PER_MESSAGE*i, self.deliverMail,
+ os.path.join(testfiles_dir, msg))
+ self.loop = task.LoopingCall(self.poll, self.changes, count, d)
+ self.loop.start(0.1)
+ t = reactor.callLater(self.SECONDS_PER_MESSAGE*count + 15,
+ d.errback, TimeOutError)
+ # TODO: verify the messages, should use code from test_mailparse but
+ # I'm not sure how to factor the verification routines out in a
+ # useful fashion
+
+ #for i in range(count):
+ # msg, check = test_messages[i]
+ # check(self, self.changes[i])
+
+ def _shutdown(res):
+ if t.active():
+ t.cancel()
+ self.loop.stop()
+ return res
+ d.addBoth(_shutdown)
+
+ return d
+
+ # TODO: it would be nice to set this timeout after counting the number of
+ # messages in buildbot/test/mail/msg*, but I suspect trial wants to have
+ # this number before the method starts, and maybe even before setUp()
+ testMaildir.timeout = SECONDS_PER_MESSAGE*9 + 15
+
diff --git a/buildbot/buildbot/test/test_mailparse.py b/buildbot/buildbot/test/test_mailparse.py
new file mode 100644
index 0000000..dc60269
--- /dev/null
+++ b/buildbot/buildbot/test/test_mailparse.py
@@ -0,0 +1,293 @@
+# -*- test-case-name: buildbot.test.test_mailparse -*-
+
+from twisted.trial import unittest
+from twisted.python import util
+from buildbot.changes import mail
+
+class TestFreshCVS(unittest.TestCase):
+
+ def get(self, msg):
+ msg = util.sibpath(__file__, msg)
+ s = mail.FCMaildirSource(None)
+ return s.parse_file(open(msg, "r"))
+
+ def testMsg1(self):
+ c = self.get("mail/freshcvs.1")
+ self.assertEqual(c.who, "moshez")
+ self.assertEqual(set(c.files), set(["Twisted/debian/python-twisted.menu.in"]))
+ self.assertEqual(c.comments, "Instance massenger, apparently\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg2(self):
+ c = self.get("mail/freshcvs.2")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["Twisted/twisted/web/woven/form.py",
+ "Twisted/twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg3(self):
+ # same as msg2 but missing the ViewCVS section
+ c = self.get("mail/freshcvs.3")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["Twisted/twisted/web/woven/form.py",
+ "Twisted/twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg4(self):
+ # same as msg3 but also missing CVS patch section
+ c = self.get("mail/freshcvs.4")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["Twisted/twisted/web/woven/form.py",
+ "Twisted/twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg5(self):
+ # creates a directory
+ c = self.get("mail/freshcvs.5")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set(["Twisted/doc/examples/cocoaDemo"]))
+ self.assertEqual(c.comments,
+ "Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository\n")
+ self.assertEqual(c.isdir, 1)
+
+ def testMsg6(self):
+ # adds files
+ c = self.get("mail/freshcvs.6")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set([
+ "Twisted/doc/examples/cocoaDemo/MyAppDelegate.py",
+ "Twisted/doc/examples/cocoaDemo/__main__.py",
+ "Twisted/doc/examples/cocoaDemo/bin-python-main.m",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+ "Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
+ self.assertEqual(c.comments,
+ "Cocoa (OS X) clone of the QT demo, using polling reactor\n\nRequires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project. The reactor is iterated periodically by a repeating NSTimer.\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg7(self):
+ # deletes files
+ c = self.get("mail/freshcvs.7")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set([
+ "Twisted/doc/examples/cocoaDemo/MyAppDelegate.py",
+ "Twisted/doc/examples/cocoaDemo/__main__.py",
+ "Twisted/doc/examples/cocoaDemo/bin-python-main.m",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+ "Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
+ self.assertEqual(c.comments,
+ "Directories break debian build script, waiting for reasonable fix\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg8(self):
+ # files outside Twisted/
+ c = self.get("mail/freshcvs.8")
+ self.assertEqual(c.who, "acapnotic")
+ self.assertEqual(set(c.files), set([ "CVSROOT/freshCfg" ]))
+ self.assertEqual(c.comments, "it doesn't work with invalid syntax\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg9(self):
+ # also creates a directory
+ c = self.get("mail/freshcvs.9")
+ self.assertEqual(c.who, "exarkun")
+ self.assertEqual(set(c.files), set(["Twisted/sandbox/exarkun/persist-plugin"]))
+ self.assertEqual(c.comments,
+ "Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository\n")
+ self.assertEqual(c.isdir, 1)
+
+
+class TestFreshCVS_Prefix(unittest.TestCase):
+ def get(self, msg):
+ msg = util.sibpath(__file__, msg)
+ s = mail.FCMaildirSource(None)
+ return s.parse_file(open(msg, "r"), prefix="Twisted/")
+
+ def testMsg1p(self):
+ c = self.get("mail/freshcvs.1")
+ self.assertEqual(c.who, "moshez")
+ self.assertEqual(set(c.files), set(["debian/python-twisted.menu.in"]))
+ self.assertEqual(c.comments, "Instance massenger, apparently\n")
+
+ def testMsg2p(self):
+ c = self.get("mail/freshcvs.2")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["twisted/web/woven/form.py",
+ "twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+
+ def testMsg3p(self):
+ # same as msg2 but missing the ViewCVS section
+ c = self.get("mail/freshcvs.3")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["twisted/web/woven/form.py",
+ "twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+
+ def testMsg4p(self):
+ # same as msg3 but also missing CVS patch section
+ c = self.get("mail/freshcvs.4")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["twisted/web/woven/form.py",
+ "twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+
+ def testMsg5p(self):
+ # creates a directory
+ c = self.get("mail/freshcvs.5")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set(["doc/examples/cocoaDemo"]))
+ self.assertEqual(c.comments,
+ "Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository\n")
+ self.assertEqual(c.isdir, 1)
+
+ def testMsg6p(self):
+ # adds files
+ c = self.get("mail/freshcvs.6")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set([
+ "doc/examples/cocoaDemo/MyAppDelegate.py",
+ "doc/examples/cocoaDemo/__main__.py",
+ "doc/examples/cocoaDemo/bin-python-main.m",
+ "doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+ "doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
+ self.assertEqual(c.comments,
+ "Cocoa (OS X) clone of the QT demo, using polling reactor\n\nRequires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project. The reactor is iterated periodically by a repeating NSTimer.\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg7p(self):
+ # deletes files
+ c = self.get("mail/freshcvs.7")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set([
+ "doc/examples/cocoaDemo/MyAppDelegate.py",
+ "doc/examples/cocoaDemo/__main__.py",
+ "doc/examples/cocoaDemo/bin-python-main.m",
+ "doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+ "doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
+ self.assertEqual(c.comments,
+ "Directories break debian build script, waiting for reasonable fix\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg8p(self):
+ # files outside Twisted/
+ c = self.get("mail/freshcvs.8")
+ self.assertEqual(c, None)
+
+
+class TestSyncmail(unittest.TestCase):
+ def get(self, msg):
+ msg = util.sibpath(__file__, msg)
+ s = mail.SyncmailMaildirSource(None)
+ return s.parse_file(open(msg, "r"), prefix="buildbot/")
+
+ def getNoPrefix(self, msg):
+ msg = util.sibpath(__file__, msg)
+ s = mail.SyncmailMaildirSource(None)
+ return s.parse_file(open(msg, "r"))
+
+ def testMsgS1(self):
+ c = self.get("mail/syncmail.1")
+ self.failUnless(c is not None)
+ self.assertEqual(c.who, "warner")
+ self.assertEqual(set(c.files), set(["buildbot/changes/freshcvsmail.py"]))
+ self.assertEqual(c.comments,
+ "remove leftover code, leave a temporary compatibility import. Note! Start\nimporting FCMaildirSource from changes.mail instead of changes.freshcvsmail\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsgS2(self):
+ c = self.get("mail/syncmail.2")
+ self.assertEqual(c.who, "warner")
+ self.assertEqual(set(c.files), set(["ChangeLog"]))
+ self.assertEqual(c.comments, "\t* NEWS: started adding new features\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsgS3(self):
+ c = self.get("mail/syncmail.3")
+ self.failUnless(c == None)
+
+ def testMsgS4(self):
+ c = self.get("mail/syncmail.4")
+ self.assertEqual(c.who, "warner")
+ self.assertEqual(set(c.files),
+ set(["test/mail/syncmail.1",
+ "test/mail/syncmail.2",
+ "test/mail/syncmail.3"]))
+ self.assertEqual(c.comments, "test cases for syncmail parser\n")
+ self.assertEqual(c.isdir, 0)
+ self.assertEqual(c.branch, None)
+
+ # tests a tag
+ def testMsgS5(self):
+ c = self.getNoPrefix("mail/syncmail.5")
+ self.failUnless(c)
+ self.assertEqual(c.who, "thomas")
+ self.assertEqual(set(c.files),
+ set(['test1/MANIFEST',
+ 'test1/Makefile.am',
+ 'test1/autogen.sh',
+ 'test1/configure.in']))
+ self.assertEqual(c.branch, "BRANCH-DEVEL")
+ self.assertEqual(c.isdir, 0)
+
+
+class TestSVNCommitEmail(unittest.TestCase):
+ def get(self, msg, prefix):
+ msg = util.sibpath(__file__, msg)
+ s = mail.SVNCommitEmailMaildirSource(None)
+ return s.parse_file(open(msg, "r"), prefix)
+
+ def test1(self):
+ c = self.get("mail/svn-commit.1", "spamassassin/trunk/")
+ self.failUnless(c)
+ self.failUnlessEqual(c.who, "felicity")
+ self.failUnlessEqual(set(c.files), set(["sa-update.raw"]))
+ self.failUnlessEqual(c.branch, None)
+ self.failUnlessEqual(c.comments,
+ "bug 4864: remove extraneous front-slash "
+ "from gpghomedir path\n")
+
+ def test2a(self):
+ c = self.get("mail/svn-commit.2", "spamassassin/trunk/")
+ self.failIf(c)
+
+ def test2b(self):
+ c = self.get("mail/svn-commit.2", "spamassassin/branches/3.1/")
+ self.failUnless(c)
+ self.failUnlessEqual(c.who, "sidney")
+ self.failUnlessEqual(set(c.files),
+ set(["lib/Mail/SpamAssassin/Timeout.pm",
+ "MANIFEST",
+ "lib/Mail/SpamAssassin/Logger.pm",
+ "lib/Mail/SpamAssassin/Plugin/DCC.pm",
+ "lib/Mail/SpamAssassin/Plugin/DomainKeys.pm",
+ "lib/Mail/SpamAssassin/Plugin/Pyzor.pm",
+ "lib/Mail/SpamAssassin/Plugin/Razor2.pm",
+ "lib/Mail/SpamAssassin/Plugin/SPF.pm",
+ "lib/Mail/SpamAssassin/SpamdForkScaling.pm",
+ "spamd/spamd.raw",
+ ]))
+ self.failUnlessEqual(c.comments,
+ "Bug 4696: consolidated fixes for timeout bugs\n")
+
+
diff --git a/buildbot/buildbot/test/test_mergerequests.py b/buildbot/buildbot/test/test_mergerequests.py
new file mode 100644
index 0000000..e176cf1
--- /dev/null
+++ b/buildbot/buildbot/test/test_mergerequests.py
@@ -0,0 +1,196 @@
+from twisted.internet import defer, reactor
+from twisted.trial import unittest
+
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.process.properties import Properties
+from buildbot.status import builder, base, words
+from buildbot.changes.changes import Change
+
+from buildbot.test.runutils import RunMixin
+
+"""Testcases for master.botmaster.shouldMergeRequests.
+
+"""
+
+master_cfg = """from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+
+f = factory.BuildFactory([
+ dummy.Dummy(timeout=0),
+ ])
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'dummy', 'slavename':'bot1',
+ 'builddir': 'dummy', 'factory': f})
+c['slavePortnum'] = 0
+
+%s
+c['mergeRequests'] = mergeRequests
+"""
+
+class MergeRequestsTest(RunMixin, unittest.TestCase):
+ def do_test(self, mergefun, results, reqs = None):
+ R = BuildRequest
+ S = SourceStamp
+ c1 = Change("alice", [], "changed stuff", branch="branch1")
+ c2 = Change("alice", [], "changed stuff", branch="branch1")
+ c3 = Change("alice", [], "changed stuff", branch="branch1")
+ c4 = Change("alice", [], "changed stuff", branch="branch1")
+ c5 = Change("alice", [], "changed stuff", branch="branch1")
+ c6 = Change("alice", [], "changed stuff", branch="branch1")
+ if reqs is None:
+ reqs = (R("why", S("branch1", None, None, None), 'test_builder'),
+ R("why2", S("branch1", "rev1", None, None), 'test_builder'),
+ R("why not", S("branch1", "rev1", None, None), 'test_builder'),
+ R("why3", S("branch1", "rev2", None, None), 'test_builder'),
+ R("why4", S("branch2", "rev2", None, None), 'test_builder'),
+ R("why5", S("branch1", "rev1", (3, "diff"), None), 'test_builder'),
+ R("changes", S("branch1", None, None, [c1,c2,c3]), 'test_builder'),
+ R("changes", S("branch1", None, None, [c4,c5,c6]), 'test_builder'),
+ )
+
+ m = self.master
+ m.loadConfig(master_cfg % mergefun)
+ m.readConfig = True
+ m.startService()
+ builder = self.control.getBuilder('dummy')
+ for req in reqs:
+ builder.requestBuild(req)
+
+ d = self.connectSlave()
+ d.addCallback(self.waitForBuilds, results)
+
+ return d
+
+ def waitForBuilds(self, r, results):
+ d = self.master.botmaster.waitUntilBuilderIdle('dummy')
+ d.addCallback(self.checkresults, results)
+ return d
+
+ def checkresults(self, builder, results):
+ s = builder.builder_status
+ builds = list(s.generateFinishedBuilds())
+ builds.reverse()
+ self.assertEqual(len(builds), len(results))
+ for i in xrange(len(builds)):
+ b = builds[i]
+ r = results[i]
+ ss = b.getSourceStamp()
+ self.assertEquals(b.getReason(), r['reason'])
+ self.assertEquals(ss.branch, r['branch'])
+ self.assertEquals(len(ss.changes), r['changecount'])
+ # print b.getReason(), ss.branch, len(ss.changes), ss.revision
+
+ def testDefault(self):
+ return self.do_test('mergeRequests = None',
+ ({'reason': 'why',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why2, why not',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why3',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why4',
+ 'branch': 'branch2',
+ 'changecount': 0},
+ {'reason': 'why5',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'changes',
+ 'branch': 'branch1',
+ 'changecount': 6},
+ ))
+
+ def testNoMerges(self):
+ mergefun = """def mergeRequests(builder, req1, req2):
+ return False
+"""
+ return self.do_test(mergefun,
+ ({'reason': 'why',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why2',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why not',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why3',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why4',
+ 'branch': 'branch2',
+ 'changecount': 0},
+ {'reason': 'why5',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'changes',
+ 'branch': 'branch1',
+ 'changecount': 3},
+ {'reason': 'changes',
+ 'branch': 'branch1',
+ 'changecount': 3},
+ ))
+
+ def testReasons(self):
+ mergefun = """def mergeRequests(builder, req1, req2):
+ return req1.reason == req2.reason
+"""
+ return self.do_test(mergefun,
+ ({'reason': 'why',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why2',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why not',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why3',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why4',
+ 'branch': 'branch2',
+ 'changecount': 0},
+ {'reason': 'why5',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'changes',
+ 'branch': 'branch1',
+ 'changecount': 6},
+ ))
+
+
+ def testProperties(self):
+ mergefun = """def mergeRequests(builder, req1, req2):
+ return req1.properties == req2.properties
+"""
+ R = BuildRequest
+ S = SourceStamp
+ p1 = Properties(first="value")
+ p2 = Properties(first="other value")
+ reqs = (R("why", S("branch1", None, None, None), 'test_builder',
+ properties = p1),
+ R("why", S("branch1", None, None, None), 'test_builder',
+ properties = p1),
+ R("why", S("branch1", None, None, None), 'test_builder',
+ properties = p2),
+ R("why", S("branch1", None, None, None), 'test_builder',
+ properties = p2),
+ )
+ return self.do_test(mergefun,
+ ({'reason': 'why',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ ),
+ reqs=reqs)
diff --git a/buildbot/buildbot/test/test_p4poller.py b/buildbot/buildbot/test/test_p4poller.py
new file mode 100644
index 0000000..54c6325
--- /dev/null
+++ b/buildbot/buildbot/test/test_p4poller.py
@@ -0,0 +1,213 @@
+import time
+
+from twisted.internet import defer
+from twisted.trial import unittest
+
+from buildbot.changes.changes import Change
+from buildbot.changes.p4poller import P4Source, get_simple_split
+
+first_p4changes = \
+"""Change 1 on 2006/04/13 by slamb@testclient 'first rev'
+"""
+
+second_p4changes = \
+"""Change 3 on 2006/04/13 by bob@testclient 'short desc truncated'
+Change 2 on 2006/04/13 by slamb@testclient 'bar'
+"""
+
+third_p4changes = \
+"""Change 5 on 2006/04/13 by mpatel@testclient 'first rev'
+"""
+
+change_4_log = \
+"""Change 4 by mpatel@testclient on 2006/04/13 21:55:39
+
+ short desc truncated because this is a long description.
+"""
+change_3_log = \
+"""Change 3 by bob@testclient on 2006/04/13 21:51:39
+
+ short desc truncated because this is a long description.
+"""
+
+change_2_log = \
+"""Change 2 by slamb@testclient on 2006/04/13 21:46:23
+
+ creation
+"""
+
+p4change = {
+ 3: change_3_log +
+"""Affected files ...
+
+... //depot/myproject/branch_b/branch_b_file#1 add
+... //depot/myproject/branch_b/whatbranch#1 branch
+... //depot/myproject/branch_c/whatbranch#1 branch
+""",
+ 2: change_2_log +
+"""Affected files ...
+
+... //depot/myproject/trunk/whatbranch#1 add
+... //depot/otherproject/trunk/something#1 add
+""",
+ 5: change_4_log +
+"""Affected files ...
+
+... //depot/myproject/branch_b/branch_b_file#1 add
+... //depot/myproject/branch_b#75 edit
+... //depot/myproject/branch_c/branch_c_file#1 add
+""",
+}
+
+
+class MockP4Source(P4Source):
+ """Test P4Source which doesn't actually invoke p4."""
+ invocation = 0
+
+ def __init__(self, p4changes, p4change, *args, **kwargs):
+ P4Source.__init__(self, *args, **kwargs)
+ self.p4changes = p4changes
+ self.p4change = p4change
+
+ def _get_changes(self):
+ assert self.working
+ result = self.p4changes[self.invocation]
+ self.invocation += 1
+ return defer.succeed(result)
+
+ def _get_describe(self, dummy, num):
+ assert self.working
+ return defer.succeed(self.p4change[num])
+
+class TestP4Poller(unittest.TestCase):
+ def setUp(self):
+ self.changes = []
+ self.addChange = self.changes.append
+
+ def failUnlessIn(self, substr, string):
+ # this is for compatibility with python2.2
+ if isinstance(string, str):
+ self.failUnless(string.find(substr) != -1)
+ else:
+ self.assertIn(substr, string)
+
+ def testCheck(self):
+ """successful checks"""
+ self.t = MockP4Source(p4changes=[first_p4changes, second_p4changes],
+ p4change=p4change,
+ p4port=None, p4user=None,
+ p4base='//depot/myproject/',
+ split_file=lambda x: x.split('/', 1))
+ self.t.parent = self
+
+ # The first time, it just learns the change to start at.
+ self.assert_(self.t.last_change is None)
+ self.assert_(not self.t.working)
+ return self.t.checkp4().addCallback(self._testCheck2)
+
+ def _testCheck2(self, res):
+ self.assertEquals(self.changes, [])
+ self.assertEquals(self.t.last_change, 1)
+
+ # Subsequent times, it returns Change objects for new changes.
+ return self.t.checkp4().addCallback(self._testCheck3)
+
+ def _testCheck3(self, res):
+ self.assertEquals(len(self.changes), 3)
+ self.assertEquals(self.t.last_change, 3)
+ self.assert_(not self.t.working)
+
+ # They're supposed to go oldest to newest, so this one must be first.
+ self.assertEquals(self.changes[0].asText(),
+ Change(who='slamb',
+ files=['whatbranch'],
+ comments=change_2_log,
+ revision='2',
+ when=self.makeTime("2006/04/13 21:46:23"),
+ branch='trunk').asText())
+
+ # These two can happen in either order, since they're from the same
+ # Perforce change.
+ self.failUnlessIn(
+ Change(who='bob',
+ files=['branch_b_file',
+ 'whatbranch'],
+ comments=change_3_log,
+ revision='3',
+ when=self.makeTime("2006/04/13 21:51:39"),
+ branch='branch_b').asText(),
+ [c.asText() for c in self.changes])
+ self.failUnlessIn(
+ Change(who='bob',
+ files=['whatbranch'],
+ comments=change_3_log,
+ revision='3',
+ when=self.makeTime("2006/04/13 21:51:39"),
+ branch='branch_c').asText(),
+ [c.asText() for c in self.changes])
+
+ def makeTime(self, timestring):
+ datefmt = '%Y/%m/%d %H:%M:%S'
+ when = time.mktime(time.strptime(timestring, datefmt))
+ return when
+
+ def testFailedChanges(self):
+ """'p4 changes' failure is properly ignored"""
+ self.t = MockP4Source(p4changes=['Perforce client error:\n...'],
+ p4change={},
+ p4port=None, p4user=None)
+ self.t.parent = self
+ d = self.t.checkp4()
+ d.addCallback(self._testFailedChanges2)
+ return d
+
+ def _testFailedChanges2(self, f):
+ self.failUnlessEqual(f, None)
+ self.assert_(not self.t.working)
+
+ def testFailedDescribe(self):
+ """'p4 describe' failure is properly ignored"""
+ c = dict(p4change)
+ c[3] = 'Perforce client error:\n...'
+ self.t = MockP4Source(p4changes=[first_p4changes, second_p4changes],
+ p4change=c, p4port=None, p4user=None)
+ self.t.parent = self
+ d = self.t.checkp4()
+ d.addCallback(self._testFailedDescribe2)
+ return d
+
+ def _testFailedDescribe2(self, res):
+ # first time finds nothing; check again.
+ return self.t.checkp4().addCallback(self._testFailedDescribe3)
+
+ def _testFailedDescribe3(self, f):
+ self.failUnlessEqual(f, None)
+ self.assert_(not self.t.working)
+ self.assertEquals(self.t.last_change, 2)
+
+ def testAlreadyWorking(self):
+ """don't launch a new poll while old is still going"""
+ self.t = P4Source()
+ self.t.working = True
+ self.assert_(self.t.last_change is None)
+ d = self.t.checkp4()
+ d.addCallback(self._testAlreadyWorking2)
+
+ def _testAlreadyWorking2(self, res):
+ self.assert_(self.t.last_change is None)
+
+ def testSplitFile(self):
+ """Make sure split file works on branch only changes"""
+ self.t = MockP4Source(p4changes=[third_p4changes],
+ p4change=p4change,
+ p4port=None, p4user=None,
+ p4base='//depot/myproject/',
+ split_file=get_simple_split)
+ self.t.parent = self
+ self.t.last_change = 50
+ d = self.t.checkp4()
+ d.addCallback(self._testSplitFile)
+
+ def _testSplitFile(self, res):
+ self.assertEquals(len(self.changes), 2)
+ self.assertEquals(self.t.last_change, 5)
diff --git a/buildbot/buildbot/test/test_package_rpm.py b/buildbot/buildbot/test/test_package_rpm.py
new file mode 100644
index 0000000..05d2841
--- /dev/null
+++ b/buildbot/buildbot/test/test_package_rpm.py
@@ -0,0 +1,132 @@
+# test step.package.rpm.*
+
+from twisted.trial import unittest
+
+from buildbot.test.runutils import SlaveCommandTestBase
+from buildbot.steps.package.rpm import RpmBuild, RpmLint, RpmSpec
+
+
+class TestRpmBuild(unittest.TestCase):
+ """
+ Tests the package.rpm.RpmBuild class.
+ """
+
+ def test_creation(self):
+ """
+ Test that instances are created with proper data.
+ """
+ rb = RpmBuild()
+ self.assertEquals(rb.specfile, None)
+ self.assertFalse(rb.autoRelease)
+ self.assertFalse(rb.vcsRevision)
+
+ rb2 = RpmBuild('aspec.spec', autoRelease=True, vcsRevision=True)
+ self.assertEquals(rb2.specfile, 'aspec.spec')
+ self.assertTrue(rb2.autoRelease)
+ self.assertTrue(rb2.vcsRevision)
+
+ def test_rpmbuild(self):
+ """
+ Verifies the rpmbuild string is what we would expect.
+ """
+ rb = RpmBuild('topdir', 'buildir', 'rpmdir', 'sourcedir',
+ 'specdir', 'dist')
+ expected_result = ('rpmbuild --define "_topdir buildir"'
+ ' --define "_builddir rpmdir" --define "_rpmdir sourcedir"'
+ ' --define "_sourcedir specdir" --define "_specdir dist"'
+ ' --define "_srcrpmdir `pwd`" --define "dist .el5"')
+ self.assertEquals(rb.rpmbuild, expected_result)
+
+
+class TestRpmLint(unittest.TestCase):
+ """
+ Tests the package.rpm.RpmLint class.
+ """
+
+ def test_command(self):
+ """
+ Test that instance command variable is created with proper data.
+ """
+ rl = RpmLint()
+ expected_result = ["/usr/bin/rpmlint", "-i", '*rpm']
+ self.assertEquals(rl.command, expected_result)
+
+
+class TestRpmSpec(unittest.TestCase):
+ """
+ Tests the package.rpm.RpmSpec class.
+ """
+
+ def test_creation(self):
+ """
+ Test that instances are created with proper data.
+ """
+ rs = RpmSpec()
+ self.assertEquals(rs.specfile, None)
+ self.assertEquals(rs.pkg_name, None)
+ self.assertEquals(rs.pkg_version, None)
+ self.assertFalse(rs.loaded)
+
+ def test_load(self):
+ try:
+ from cStringIO import StringIO
+ except ImportError, ie:
+ from StringIO import StringIO
+
+ specfile = StringIO()
+ specfile.write("""\
+Name: example
+Version: 1.0.0
+Release: 1%{?dist}
+Summary: An example spec
+
+Group: Development/Libraries
+License: GPLv2+
+URL: http://www.example.dom
+Source0: %{name}-%{version}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+
+BuildArch: noarch
+Requires: python >= 2.4
+BuildRequires: python-setuptools
+
+
+%description
+An example spec for an rpm.
+
+
+%prep
+%setup -q
+
+
+%build
+%{__python} setup.py build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+%{__python} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT/
+
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+
+%files
+%defattr(-,root,root,-)
+%doc INSTALL LICENSE AUTHORS COPYING
+# For noarch packages: sitelib
+%{python_sitelib}/*
+
+
+%changelog
+* Wed Jan 7 2009 Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com> - \
+1.0.0-1
+- example""")
+ specfile.flush()
+ specfile.seek(0)
+ rs = RpmSpec(specfile)
+ rs.load()
+ self.assertTrue(rs.loaded)
+ self.assertEquals(rs.pkg_name, 'example')
+ self.assertEquals(rs.pkg_version, '1.0.0')
diff --git a/buildbot/buildbot/test/test_properties.py b/buildbot/buildbot/test/test_properties.py
new file mode 100644
index 0000000..a8973dd
--- /dev/null
+++ b/buildbot/buildbot/test/test_properties.py
@@ -0,0 +1,274 @@
+# -*- test-case-name: buildbot.test.test_properties -*-
+
+import os
+
+from twisted.trial import unittest
+
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process import base
+from buildbot.process.properties import WithProperties, Properties
+from buildbot.status import builder
+from buildbot.slave.commands import rmdirRecursive
+from buildbot.test.runutils import RunMixin
+
+
+class FakeBuild:
+ pass
+class FakeBuildMaster:
+ properties = Properties(masterprop="master")
+class FakeBotMaster:
+ parent = FakeBuildMaster()
+class FakeBuilder:
+ statusbag = None
+ name = "fakebuilder"
+ botmaster = FakeBotMaster()
+class FakeSlave:
+ slavename = "bot12"
+ properties = Properties(slavename="bot12")
+class FakeSlaveBuilder:
+ slave = FakeSlave()
+ def getSlaveCommandVersion(self, command, oldversion=None):
+ return "1.10"
+class FakeScheduler:
+ name = "fakescheduler"
+
+class TestProperties(unittest.TestCase):
+ def setUp(self):
+ self.props = Properties()
+
+ def testDictBehavior(self):
+ self.props.setProperty("do-tests", 1, "scheduler")
+ self.props.setProperty("do-install", 2, "scheduler")
+
+ self.assert_(self.props.has_key('do-tests'))
+ self.failUnlessEqual(self.props['do-tests'], 1)
+ self.failUnlessEqual(self.props['do-install'], 2)
+ self.assertRaises(KeyError, lambda : self.props['do-nothing'])
+ self.failUnlessEqual(self.props.getProperty('do-install'), 2)
+
+ def testUpdate(self):
+ self.props.setProperty("x", 24, "old")
+ newprops = { 'a' : 1, 'b' : 2 }
+ self.props.update(newprops, "new")
+
+ self.failUnlessEqual(self.props.getProperty('x'), 24)
+ self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
+ self.failUnlessEqual(self.props.getProperty('a'), 1)
+ self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
+
+ def testUpdateFromProperties(self):
+ self.props.setProperty("x", 24, "old")
+ newprops = Properties()
+ newprops.setProperty('a', 1, "new")
+ newprops.setProperty('b', 2, "new")
+ self.props.updateFromProperties(newprops)
+
+ self.failUnlessEqual(self.props.getProperty('x'), 24)
+ self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
+ self.failUnlessEqual(self.props.getProperty('a'), 1)
+ self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
+
+ # render() is pretty well tested by TestWithProperties
+
+class TestWithProperties(unittest.TestCase):
+ def setUp(self):
+ self.props = Properties()
+
+ def testBasic(self):
+ # test basic substitution with WithProperties
+ self.props.setProperty("revision", "47", "test")
+ command = WithProperties("build-%s.tar.gz", "revision")
+ self.failUnlessEqual(self.props.render(command),
+ "build-47.tar.gz")
+
+ def testDict(self):
+ # test dict-style substitution with WithProperties
+ self.props.setProperty("other", "foo", "test")
+ command = WithProperties("build-%(other)s.tar.gz")
+ self.failUnlessEqual(self.props.render(command),
+ "build-foo.tar.gz")
+
+ def testDictColonMinus(self):
+ # test dict-style substitution with WithProperties
+ self.props.setProperty("prop1", "foo", "test")
+ command = WithProperties("build-%(prop1:-empty)s-%(prop2:-empty)s.tar.gz")
+ self.failUnlessEqual(self.props.render(command),
+ "build-foo-empty.tar.gz")
+
+ def testDictColonPlus(self):
+ # test dict-style substitution with WithProperties
+ self.props.setProperty("prop1", "foo", "test")
+ command = WithProperties("build-%(prop1:+exists)s-%(prop2:+exists)s.tar.gz")
+ self.failUnlessEqual(self.props.render(command),
+ "build-exists-.tar.gz")
+
+ def testEmpty(self):
+ # None should render as ''
+ self.props.setProperty("empty", None, "test")
+ command = WithProperties("build-%(empty)s.tar.gz")
+ self.failUnlessEqual(self.props.render(command),
+ "build-.tar.gz")
+
+ def testRecursiveList(self):
+ self.props.setProperty("x", 10, "test")
+ self.props.setProperty("y", 20, "test")
+ command = [ WithProperties("%(x)s %(y)s"), "and",
+ WithProperties("%(y)s %(x)s") ]
+ self.failUnlessEqual(self.props.render(command),
+ ["10 20", "and", "20 10"])
+
+ def testRecursiveTuple(self):
+ self.props.setProperty("x", 10, "test")
+ self.props.setProperty("y", 20, "test")
+ command = ( WithProperties("%(x)s %(y)s"), "and",
+ WithProperties("%(y)s %(x)s") )
+ self.failUnlessEqual(self.props.render(command),
+ ("10 20", "and", "20 10"))
+
+ def testRecursiveDict(self):
+ self.props.setProperty("x", 10, "test")
+ self.props.setProperty("y", 20, "test")
+ command = { WithProperties("%(x)s %(y)s") :
+ WithProperties("%(y)s %(x)s") }
+ self.failUnlessEqual(self.props.render(command),
+ {"10 20" : "20 10"})
+
+class BuildProperties(unittest.TestCase):
+ """Test the properties that a build should have."""
+ def setUp(self):
+ self.builder = FakeBuilder()
+ self.builder_status = builder.BuilderStatus("fakebuilder")
+ self.builder_status.basedir = "test_properties"
+ self.builder_status.nextBuildNumber = 5
+ rmdirRecursive(self.builder_status.basedir)
+ os.mkdir(self.builder_status.basedir)
+ self.build_status = self.builder_status.newBuild()
+ req = base.BuildRequest("reason",
+ SourceStamp(branch="branch2", revision="1234"),
+ 'test_builder',
+ properties=Properties(scheduler="fakescheduler"))
+ self.build = base.Build([req])
+ self.build.build_status = self.build_status
+ self.build.setBuilder(self.builder)
+ self.build.setupProperties()
+ self.build.setupSlaveBuilder(FakeSlaveBuilder())
+
+ def testProperties(self):
+ self.failUnlessEqual(self.build.getProperty("scheduler"), "fakescheduler")
+ self.failUnlessEqual(self.build.getProperty("branch"), "branch2")
+ self.failUnlessEqual(self.build.getProperty("revision"), "1234")
+ self.failUnlessEqual(self.build.getProperty("slavename"), "bot12")
+ self.failUnlessEqual(self.build.getProperty("buildnumber"), 5)
+ self.failUnlessEqual(self.build.getProperty("buildername"), "fakebuilder")
+ self.failUnlessEqual(self.build.getProperty("masterprop"), "master")
+
+run_config = """
+from buildbot.process import factory
+from buildbot.steps.shell import ShellCommand, WithProperties
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit', properties={'slprop':'slprop'})]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+c['properties'] = { 'global' : 'global' }
+
+# Note: when run against twisted-1.3.0, this locks up about 5% of the time. I
+# suspect that a command with no output that finishes quickly triggers a race
+# condition in 1.3.0's process-reaping code. The 'touch' process becomes a
+# zombie and the step never completes. To keep this from messing up the unit
+# tests too badly, this step runs with a reduced timeout.
+
+f1 = factory.BuildFactory([s(ShellCommand,
+ flunkOnFailure=True,
+ command=['touch',
+ WithProperties('%s-%s-%s',
+ 'slavename', 'global', 'slprop'),
+ ],
+ workdir='.',
+ timeout=10,
+ )])
+
+b1 = {'name': 'full1', 'slavename': 'bot1', 'builddir': 'bd1', 'factory': f1}
+c['builders'] = [b1]
+
+"""
+
+class Run(RunMixin, unittest.TestCase):
+ def testInterpolate(self):
+ # run an actual build with a step that interpolates a build property
+ d = self.master.loadConfig(run_config)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectOneSlave("bot1"))
+ d.addCallback(lambda res: self.requestBuild("full1"))
+ d.addCallback(self.failUnlessBuildSucceeded)
+ def _check_touch(res):
+ f = os.path.join("slavebase-bot1", "bd1", "bot1-global-slprop")
+ self.failUnless(os.path.exists(f))
+ return res
+ d.addCallback(_check_touch)
+ return d
+
+ SetProperty_base_config = """
+from buildbot.process import factory
+from buildbot.steps.shell import ShellCommand, SetProperty, WithProperties
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+
+f1 = factory.BuildFactory([
+##STEPS##
+])
+
+b1 = {'name': 'full1', 'slavename': 'bot1', 'builddir': 'bd1', 'factory': f1}
+c['builders'] = [b1]
+"""
+
+ SetPropertySimple_config = SetProperty_base_config.replace("##STEPS##", """
+ SetProperty(property='foo', command="echo foo"),
+ SetProperty(property=WithProperties('wp'), command="echo wp"),
+ SetProperty(property='bar', command="echo bar", strip=False),
+ """)
+
+ def testSetPropertySimple(self):
+ d = self.master.loadConfig(self.SetPropertySimple_config)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectOneSlave("bot1"))
+ d.addCallback(lambda res: self.requestBuild("full1"))
+ d.addCallback(self.failUnlessBuildSucceeded)
+ def _check_props(bs):
+ self.failUnlessEqual(bs.getProperty("foo"), "foo")
+ self.failUnlessEqual(bs.getProperty("wp"), "wp")
+ # (will this fail on some platforms, due to newline differences?)
+ self.failUnlessEqual(bs.getProperty("bar"), "bar\n")
+ return bs
+ d.addCallback(_check_props)
+ return d
+
+ SetPropertyExtractFn_config = SetProperty_base_config.replace("##STEPS##", """
+ SetProperty(
+ extract_fn=lambda rc,stdout,stderr : {
+ 'foo' : stdout.strip(),
+ 'bar' : stderr.strip() },
+ command="echo foo; echo bar >&2"),
+ """)
+
+ def testSetPropertyExtractFn(self):
+ d = self.master.loadConfig(self.SetPropertyExtractFn_config)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectOneSlave("bot1"))
+ d.addCallback(lambda res: self.requestBuild("full1"))
+ d.addCallback(self.failUnlessBuildSucceeded)
+ def _check_props(bs):
+ self.failUnlessEqual(bs.getProperty("foo"), "foo")
+ self.failUnlessEqual(bs.getProperty("bar"), "bar")
+ return bs
+ d.addCallback(_check_props)
+ return d
+
+# we test got_revision in test_vc
diff --git a/buildbot/buildbot/test/test_reconfig.py b/buildbot/buildbot/test/test_reconfig.py
new file mode 100644
index 0000000..c4c3922
--- /dev/null
+++ b/buildbot/buildbot/test/test_reconfig.py
@@ -0,0 +1,91 @@
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+from twisted.python import log
+
+from buildbot.test.runutils import RunMixin
+from buildbot.sourcestamp import SourceStamp
+
+config_base = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+from buildbot.scheduler import Triggerable, Dependent
+
+BuildmasterConfig = c = {}
+
+f = factory.BuildFactory()
+f.addStep(dummy.Dummy, timeout=%d)
+
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+
+upstream = Triggerable('s_upstream', ['upstream'], {'prop': '%s'})
+dep = Dependent('s_dep', upstream, ['depend'], {'dep prop': '%s'})
+c['schedulers'] = [upstream, dep]
+c['builders'] = [{'name':'upstream', 'slavename':'bot1',
+ 'builddir': 'upstream', 'factory': f},
+ {'name':'depend', 'slavename':'bot1',
+ 'builddir': 'depend', 'factory': f}]
+c['slavePortnum'] = 0
+"""
+
+class DependingScheduler(RunMixin, unittest.TestCase):
+ '''Test an upstream and a dependent scheduler while reconfiguring.'''
+
+ def testReconfig(self):
+ self.reconfigured = 0
+ self.master.loadConfig(config_base % (1, 'prop value', 'dep prop value'))
+ self.prop_value = 'prop value'
+ self.dep_prop_value = 'dep prop value'
+ self.master.readConfig = True
+ self.master.startService()
+ d = self.connectSlave(builders=['upstream', 'depend'])
+ d.addCallback(self._triggerUpstream)
+ return d
+ def _triggerUpstream(self, res):
+ log.msg("trigger upstream")
+ ss = SourceStamp()
+ upstream = [s for s in self.master.allSchedulers()
+ if s.name == 's_upstream'][0]
+ d = upstream.trigger(ss)
+ d.addCallback(self._gotBuild)
+ return d
+
+ def _gotBuild(self, res):
+ log.msg("done")
+ d = defer.Deferred()
+ d.addCallback(self._doChecks)
+ reactor.callLater(2, d.callback, None)
+ return d
+
+ def _doChecks(self, res):
+ log.msg("starting tests")
+ ub = self.status.getBuilder('upstream').getLastFinishedBuild()
+ tb = self.status.getBuilder('depend').getLastFinishedBuild()
+ self.assertEqual(ub.getProperty('prop'), self.prop_value)
+ self.assertEqual(ub.getNumber(), self.reconfigured)
+ self.assertEqual(tb.getProperty('dep prop'), self.dep_prop_value)
+ self.assertEqual(tb.getNumber(), self.reconfigured)
+
+ # now further on to the reconfig
+ if self.reconfigured > 2:
+ # actually, we're done,
+ return
+ if self.reconfigured == 0:
+ # reconfig without changes now
+ d = self.master.loadConfig(config_base% (1, 'prop value',
+ 'dep prop value'))
+ elif self.reconfigured == 1:
+ # reconfig with changes to upstream now
+ d = self.master.loadConfig(config_base% (1, 'other prop value',
+ 'dep prop value'))
+ self.prop_value = 'other prop value'
+ self.dep_prop_value = 'dep prop value'
+ else:
+ # reconfig with changes to dep now
+ d = self.master.loadConfig(config_base% (1, 'other prop value',
+ 'other dep prop value'))
+ self.prop_value = 'other prop value'
+ self.dep_prop_value = 'other dep prop value'
+ self.reconfigured += 1
+ d.addCallback(self._triggerUpstream)
+ return d
diff --git a/buildbot/buildbot/test/test_run.py b/buildbot/buildbot/test/test_run.py
new file mode 100644
index 0000000..a04ea5b
--- /dev/null
+++ b/buildbot/buildbot/test/test_run.py
@@ -0,0 +1,1199 @@
+# -*- test-case-name: buildbot.test.test_run -*-
+
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+import os
+
+from buildbot import master, interfaces
+from buildbot.sourcestamp import SourceStamp
+from buildbot.changes import changes
+from buildbot.status import builder
+from buildbot.process.base import BuildRequest
+
+from buildbot.test.runutils import RunMixin, TestFlagMixin, rmtree
+
+config_base = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
+
+f2 = factory.BuildFactory([
+ dummy.Dummy(timeout=1),
+ dummy.RemoteDummy(timeout=2),
+ ])
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'quick', 'slavename':'bot1',
+ 'builddir': 'quickdir', 'factory': f1})
+c['slavePortnum'] = 0
+"""
+
+config_run = config_base + """
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('quick', None, 120, ['quick'])]
+"""
+
+config_can_build = config_base + """
+from buildbot.buildslave import BuildSlave
+c['slaves'] = [ BuildSlave('bot1', 'sekrit') ]
+
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('dummy', None, 0.1, ['dummy'])]
+
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy1', 'factory': f2}]
+"""
+
+config_cant_build = config_can_build + """
+class MyBuildSlave(BuildSlave):
+ def canStartBuild(self): return False
+c['slaves'] = [ MyBuildSlave('bot1', 'sekrit') ]
+"""
+
+config_concurrency = config_base + """
+from buildbot.buildslave import BuildSlave
+c['slaves'] = [ BuildSlave('bot1', 'sekrit', max_builds=1) ]
+
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('dummy', None, 0.1, ['dummy', 'dummy2'])]
+
+c['builders'].append({'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy', 'factory': f2})
+c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
+ 'builddir': 'dummy2', 'factory': f2})
+"""
+
+config_2 = config_base + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy1', 'factory': f2},
+ {'name': 'testdummy', 'slavename': 'bot1',
+ 'builddir': 'dummy2', 'factory': f2, 'category': 'test'}]
+"""
+
+config_3 = config_2 + """
+c['builders'].append({'name': 'adummy', 'slavename': 'bot1',
+ 'builddir': 'adummy3', 'factory': f2})
+c['builders'].append({'name': 'bdummy', 'slavename': 'bot1',
+ 'builddir': 'adummy4', 'factory': f2,
+ 'category': 'test'})
+"""
+
+config_4 = config_base + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy', 'factory': f2}]
+"""
+
+config_4_newbasedir = config_4 + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy2', 'factory': f2}]
+"""
+
+config_4_newbuilder = config_4_newbasedir + """
+c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
+ 'builddir': 'dummy23', 'factory': f2})
+"""
+
+class Run(unittest.TestCase):
+ def rmtree(self, d):
+ rmtree(d)
+
+ def testMaster(self):
+ self.rmtree("basedir")
+ os.mkdir("basedir")
+ m = master.BuildMaster("basedir")
+ m.loadConfig(config_run)
+ m.readConfig = True
+ m.startService()
+ cm = m.change_svc
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ cm.addChange(c)
+ # verify that the Scheduler is now waiting
+ s = m.allSchedulers()[0]
+ self.failUnless(s.timer)
+ # halting the service will also stop the timer
+ d = defer.maybeDeferred(m.stopService)
+ return d
+
+class CanStartBuild(RunMixin, unittest.TestCase):
+ def rmtree(self, d):
+ rmtree(d)
+
+ def testCanStartBuild(self):
+ return self.do_test(config_can_build, True)
+
+ def testCantStartBuild(self):
+ return self.do_test(config_cant_build, False)
+
+ def do_test(self, config, builder_should_run):
+ self.master.loadConfig(config)
+ self.master.readConfig = True
+ self.master.startService()
+ d = self.connectSlave()
+
+ # send a change
+ cm = self.master.change_svc
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ cm.addChange(c)
+
+ d.addCallback(self._do_test1, builder_should_run)
+
+ return d
+
+ def _do_test1(self, res, builder_should_run):
+ # delay a little bit. Note that relying upon timers is a bit fragile,
+ # in this case we're hoping that our 0.5 second timer will land us
+ # somewhere in the middle of the [0.1s, 3.1s] window (after the 0.1
+ # second Scheduler fires, then during the 3-second build), so that
+ # when we sample BuildSlave.state, we'll see BUILDING (or IDLE if the
+ # slave was told to be unavailable). On a heavily loaded system, our
+ # 0.5 second timer might not actually fire until after the build has
+ # completed. In the long run, it would be good to change this test to
+ # pass under those circumstances too.
+ d = defer.Deferred()
+ reactor.callLater(.5, d.callback, builder_should_run)
+ d.addCallback(self._do_test2)
+ return d
+
+ def _do_test2(self, builder_should_run):
+ b = self.master.botmaster.builders['dummy']
+ self.failUnless(len(b.slaves) == 1)
+
+ bs = b.slaves[0]
+ from buildbot.process.builder import IDLE, BUILDING
+ if builder_should_run:
+ self.failUnlessEqual(bs.state, BUILDING)
+ else:
+ self.failUnlessEqual(bs.state, IDLE)
+
+
+class ConcurrencyLimit(RunMixin, unittest.TestCase):
+
+ def testConcurrencyLimit(self):
+ d = self.master.loadConfig(config_concurrency)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectSlave())
+
+ def _send(res):
+ # send a change. This will trigger both builders at the same
+ # time, but since they share a slave, the max_builds=1 setting
+ # will insure that only one of the two builds gets to run.
+ cm = self.master.change_svc
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"],
+ "changed stuff")
+ cm.addChange(c)
+ d.addCallback(_send)
+
+ def _delay(res):
+ d1 = defer.Deferred()
+ reactor.callLater(1, d1.callback, None)
+ # this test depends upon this 1s delay landing us in the middle
+ # of one of the builds.
+ return d1
+ d.addCallback(_delay)
+
+ def _check(res):
+ builders = [ self.master.botmaster.builders[bn]
+ for bn in ('dummy', 'dummy2') ]
+ for builder in builders:
+ self.failUnless(len(builder.slaves) == 1)
+
+ from buildbot.process.builder import BUILDING
+ building_bs = [ builder
+ for builder in builders
+ if builder.slaves[0].state == BUILDING ]
+ # assert that only one build is running right now. If the
+ # max_builds= weren't in effect, this would be 2.
+ self.failUnlessEqual(len(building_bs), 1)
+ d.addCallback(_check)
+
+ return d
+
+
+class Ping(RunMixin, unittest.TestCase):
+ def testPing(self):
+ self.master.loadConfig(config_2)
+ self.master.readConfig = True
+ self.master.startService()
+
+ d = self.connectSlave()
+ d.addCallback(self._testPing_1)
+ return d
+
+ def _testPing_1(self, res):
+ d = interfaces.IControl(self.master).getBuilder("dummy").ping(1)
+ d.addCallback(self._testPing_2)
+ return d
+
+ def _testPing_2(self, res):
+ pass
+
+class BuilderNames(unittest.TestCase):
+
+ def testGetBuilderNames(self):
+ os.mkdir("bnames")
+ m = master.BuildMaster("bnames")
+ s = m.getStatus()
+
+ m.loadConfig(config_3)
+ m.readConfig = True
+
+ self.failUnlessEqual(s.getBuilderNames(),
+ ["dummy", "testdummy", "adummy", "bdummy"])
+ self.failUnlessEqual(s.getBuilderNames(categories=['test']),
+ ["testdummy", "bdummy"])
+
+class Disconnect(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+
+ # verify that disconnecting the slave during a build properly
+ # terminates the build
+ m = self.master
+ s = self.status
+ c = self.control
+
+ m.loadConfig(config_2)
+ m.readConfig = True
+ m.startService()
+
+ self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
+ self.s1 = s1 = s.getBuilder("dummy")
+ self.failUnlessEqual(s1.getName(), "dummy")
+ self.failUnlessEqual(s1.getState(), ("offline", []))
+ self.failUnlessEqual(s1.getCurrentBuilds(), [])
+ self.failUnlessEqual(s1.getLastFinishedBuild(), None)
+ self.failUnlessEqual(s1.getBuild(-1), None)
+
+ d = self.connectSlave()
+ d.addCallback(self._disconnectSetup_1)
+ return d
+
+ def _disconnectSetup_1(self, res):
+ self.failUnlessEqual(self.s1.getState(), ("idle", []))
+
+
+ def verifyDisconnect(self, bs):
+ self.failUnless(bs.isFinished())
+
+ step1 = bs.getSteps()[0]
+ self.failUnlessEqual(step1.getText(), ["delay", "interrupted"])
+ self.failUnlessEqual(step1.getResults()[0], builder.FAILURE)
+
+ self.failUnlessEqual(bs.getResults(), builder.FAILURE)
+
+ def verifyDisconnect2(self, bs):
+ self.failUnless(bs.isFinished())
+
+ step1 = bs.getSteps()[1]
+ self.failUnlessEqual(step1.getText(), ["remote", "delay", "2 secs",
+ "failed", "slave", "lost"])
+ self.failUnlessEqual(step1.getResults()[0], builder.FAILURE)
+
+ self.failUnlessEqual(bs.getResults(), builder.FAILURE)
+
+ def submitBuild(self):
+ ss = SourceStamp()
+ br = BuildRequest("forced build", ss, "dummy")
+ self.control.getBuilder("dummy").requestBuild(br)
+ d = defer.Deferred()
+ def _started(bc):
+ br.unsubscribe(_started)
+ d.callback(bc)
+ br.subscribe(_started)
+ return d
+
+ def testIdle2(self):
+ # now suppose the slave goes missing
+ self.disappearSlave(allowReconnect=False)
+
+ # forcing a build will work: the build detect that the slave is no
+ # longer available and will be re-queued. Wait 5 seconds, then check
+ # to make sure the build is still in the 'waiting for a slave' queue.
+ self.control.getBuilder("dummy").original.START_BUILD_TIMEOUT = 1
+ req = BuildRequest("forced build", SourceStamp(), "test_builder")
+ self.failUnlessEqual(req.startCount, 0)
+ self.control.getBuilder("dummy").requestBuild(req)
+ # this should ping the slave, which doesn't respond, and then give up
+ # after a second. The BuildRequest will be re-queued, and its
+ # .startCount will be incremented.
+ d = defer.Deferred()
+ d.addCallback(self._testIdle2_1, req)
+ reactor.callLater(3, d.callback, None)
+ return d
+ testIdle2.timeout = 5
+
+ def _testIdle2_1(self, res, req):
+ self.failUnlessEqual(req.startCount, 1)
+ cancelled = req.cancel()
+ self.failUnless(cancelled)
+
+
+ def testBuild1(self):
+ # this next sequence is timing-dependent. The dummy build takes at
+ # least 3 seconds to complete, and this batch of commands must
+ # complete within that time.
+ #
+ d = self.submitBuild()
+ d.addCallback(self._testBuild1_1)
+ return d
+
+ def _testBuild1_1(self, bc):
+ bs = bc.getStatus()
+ # now kill the slave before it gets to start the first step
+ d = self.shutdownAllSlaves() # dies before it gets started
+ d.addCallback(self._testBuild1_2, bs)
+ return d # TODO: this used to have a 5-second timeout
+
+ def _testBuild1_2(self, res, bs):
+ # now examine the just-stopped build and make sure it is really
+ # stopped. This is checking for bugs in which the slave-detach gets
+ # missed or causes an exception which prevents the build from being
+ # marked as "finished due to an error".
+ d = bs.waitUntilFinished()
+ d2 = self.master.botmaster.waitUntilBuilderDetached("dummy")
+ dl = defer.DeferredList([d, d2])
+ dl.addCallback(self._testBuild1_3, bs)
+ return dl # TODO: this had a 5-second timeout too
+
+ def _testBuild1_3(self, res, bs):
+ self.failUnlessEqual(self.s1.getState()[0], "offline")
+ self.verifyDisconnect(bs)
+
+
+ def testBuild2(self):
+ # this next sequence is timing-dependent
+ d = self.submitBuild()
+ d.addCallback(self._testBuild2_1)
+ return d
+ testBuild2.timeout = 30
+
+ def _testBuild2_1(self, bc):
+ bs = bc.getStatus()
+ # shutdown the slave while it's running the first step
+ reactor.callLater(0.5, self.shutdownAllSlaves)
+
+ d = bs.waitUntilFinished()
+ d.addCallback(self._testBuild2_2, bs)
+ return d
+
+ def _testBuild2_2(self, res, bs):
+ # we hit here when the build has finished. The builder is still being
+ # torn down, however, so spin for another second to allow the
+ # callLater(0) in Builder.detached to fire.
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, None)
+ d.addCallback(self._testBuild2_3, bs)
+ return d
+
+ def _testBuild2_3(self, res, bs):
+ self.failUnlessEqual(self.s1.getState()[0], "offline")
+ self.verifyDisconnect(bs)
+
+
+ def testBuild3(self):
+ # this next sequence is timing-dependent
+ d = self.submitBuild()
+ d.addCallback(self._testBuild3_1)
+ return d
+ testBuild3.timeout = 30
+
+ def _testBuild3_1(self, bc):
+ bs = bc.getStatus()
+ # kill the slave while it's running the first step
+ reactor.callLater(0.5, self.killSlave)
+ d = bs.waitUntilFinished()
+ d.addCallback(self._testBuild3_2, bs)
+ return d
+
+ def _testBuild3_2(self, res, bs):
+ # the builder is still being torn down, so give it another second
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, None)
+ d.addCallback(self._testBuild3_3, bs)
+ return d
+
+ def _testBuild3_3(self, res, bs):
+ self.failUnlessEqual(self.s1.getState()[0], "offline")
+ self.verifyDisconnect(bs)
+
+
+ def testBuild4(self):
+ # this next sequence is timing-dependent
+ d = self.submitBuild()
+ d.addCallback(self._testBuild4_1)
+ return d
+ testBuild4.timeout = 30
+
+ def _testBuild4_1(self, bc):
+ bs = bc.getStatus()
+ # kill the slave while it's running the second (remote) step
+ reactor.callLater(1.5, self.killSlave)
+ d = bs.waitUntilFinished()
+ d.addCallback(self._testBuild4_2, bs)
+ return d
+
+ def _testBuild4_2(self, res, bs):
+ # at this point, the slave is in the process of being removed, so it
+ # could either be 'idle' or 'offline'. I think there is a
+ # reactor.callLater(0) standing between here and the offline state.
+ #reactor.iterate() # TODO: remove the need for this
+
+ self.failUnlessEqual(self.s1.getState()[0], "offline")
+ self.verifyDisconnect2(bs)
+
+
+ def testInterrupt(self):
+ # this next sequence is timing-dependent
+ d = self.submitBuild()
+ d.addCallback(self._testInterrupt_1)
+ return d
+ testInterrupt.timeout = 30
+
+ def _testInterrupt_1(self, bc):
+ bs = bc.getStatus()
+ # halt the build while it's running the first step
+ reactor.callLater(0.5, bc.stopBuild, "bang go splat")
+ d = bs.waitUntilFinished()
+ d.addCallback(self._testInterrupt_2, bs)
+ return d
+
+ def _testInterrupt_2(self, res, bs):
+ self.verifyDisconnect(bs)
+
+
+ def testDisappear(self):
+ bc = self.control.getBuilder("dummy")
+
+ # ping should succeed
+ d = bc.ping(1)
+ d.addCallback(self._testDisappear_1, bc)
+ return d
+
+ def _testDisappear_1(self, res, bc):
+ self.failUnlessEqual(res, True)
+
+ # now, before any build is run, make the slave disappear
+ self.disappearSlave(allowReconnect=False)
+
+ # at this point, a ping to the slave should timeout
+ d = bc.ping(1)
+ d.addCallback(self. _testDisappear_2)
+ return d
+ def _testDisappear_2(self, res):
+ self.failUnlessEqual(res, False)
+
+ def testDuplicate(self):
+ bc = self.control.getBuilder("dummy")
+ bs = self.status.getBuilder("dummy")
+ ss = bs.getSlaves()[0]
+
+ self.failUnless(ss.isConnected())
+ self.failUnlessEqual(ss.getAdmin(), "one")
+
+ # now, before any build is run, make the first slave disappear
+ self.disappearSlave(allowReconnect=False)
+
+ d = self.master.botmaster.waitUntilBuilderDetached("dummy")
+ # now let the new slave take over
+ self.connectSlave2()
+ d.addCallback(self._testDuplicate_1, ss)
+ return d
+ testDuplicate.timeout = 5
+
+ def _testDuplicate_1(self, res, ss):
+ d = self.master.botmaster.waitUntilBuilderAttached("dummy")
+ d.addCallback(self._testDuplicate_2, ss)
+ return d
+
+ def _testDuplicate_2(self, res, ss):
+ self.failUnless(ss.isConnected())
+ self.failUnlessEqual(ss.getAdmin(), "two")
+
+
+class Disconnect2(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ # verify that disconnecting the slave during a build properly
+ # terminates the build
+ m = self.master
+ s = self.status
+ c = self.control
+
+ m.loadConfig(config_2)
+ m.readConfig = True
+ m.startService()
+
+ self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
+ self.s1 = s1 = s.getBuilder("dummy")
+ self.failUnlessEqual(s1.getName(), "dummy")
+ self.failUnlessEqual(s1.getState(), ("offline", []))
+ self.failUnlessEqual(s1.getCurrentBuilds(), [])
+ self.failUnlessEqual(s1.getLastFinishedBuild(), None)
+ self.failUnlessEqual(s1.getBuild(-1), None)
+
+ d = self.connectSlaveFastTimeout()
+ d.addCallback(self._setup_disconnect2_1)
+ return d
+
+ def _setup_disconnect2_1(self, res):
+ self.failUnlessEqual(self.s1.getState(), ("idle", []))
+
+
+ def testSlaveTimeout(self):
+ # now suppose the slave goes missing. We want to find out when it
+ # creates a new Broker, so we reach inside and mark it with the
+ # well-known sigil of impending messy death.
+ bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
+ broker = bd.remote.broker
+ broker.redshirt = 1
+
+ # make sure the keepalives will keep the connection up
+ d = defer.Deferred()
+ reactor.callLater(5, d.callback, None)
+ d.addCallback(self._testSlaveTimeout_1)
+ return d
+ testSlaveTimeout.timeout = 20
+
+ def _testSlaveTimeout_1(self, res):
+ bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
+ if not bd.remote or not hasattr(bd.remote.broker, "redshirt"):
+ self.fail("slave disconnected when it shouldn't have")
+
+ d = self.master.botmaster.waitUntilBuilderDetached("dummy")
+ # whoops! how careless of me.
+ self.disappearSlave(allowReconnect=True)
+ # the slave will realize the connection is lost within 2 seconds, and
+ # reconnect.
+ d.addCallback(self._testSlaveTimeout_2)
+ return d
+
+ def _testSlaveTimeout_2(self, res):
+ # the ReconnectingPBClientFactory will attempt a reconnect in two
+ # seconds.
+ d = self.master.botmaster.waitUntilBuilderAttached("dummy")
+ d.addCallback(self._testSlaveTimeout_3)
+ return d
+
+ def _testSlaveTimeout_3(self, res):
+ # make sure it is a new connection (i.e. a new Broker)
+ bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
+ self.failUnless(bd.remote, "hey, slave isn't really connected")
+ self.failIf(hasattr(bd.remote.broker, "redshirt"),
+ "hey, slave's Broker is still marked for death")
+
+
+class Basedir(RunMixin, unittest.TestCase):
+ def testChangeBuilddir(self):
+ m = self.master
+ m.loadConfig(config_4)
+ m.readConfig = True
+ m.startService()
+
+ d = self.connectSlave()
+ d.addCallback(self._testChangeBuilddir_1)
+ return d
+
+ def _testChangeBuilddir_1(self, res):
+ self.bot = bot = self.slaves['bot1'].bot
+ self.builder = builder = bot.builders.get("dummy")
+ self.failUnless(builder)
+ self.failUnlessEqual(builder.builddir, "dummy")
+ self.failUnlessEqual(builder.basedir,
+ os.path.join("slavebase-bot1", "dummy"))
+
+ d = self.master.loadConfig(config_4_newbasedir)
+ d.addCallback(self._testChangeBuilddir_2)
+ return d
+
+ def _testChangeBuilddir_2(self, res):
+ bot = self.bot
+ # this does NOT cause the builder to be replaced
+ builder = bot.builders.get("dummy")
+ self.failUnless(builder)
+ self.failUnlessIdentical(self.builder, builder)
+ # the basedir should be updated
+ self.failUnlessEqual(builder.builddir, "dummy2")
+ self.failUnlessEqual(builder.basedir,
+ os.path.join("slavebase-bot1", "dummy2"))
+
+ # add a new builder, which causes the basedir list to be reloaded
+ d = self.master.loadConfig(config_4_newbuilder)
+ return d
+
+class Triggers(RunMixin, TestFlagMixin, unittest.TestCase):
+ config_trigger = config_base + """
+from buildbot.scheduler import Triggerable, Scheduler
+from buildbot.steps.trigger import Trigger
+from buildbot.steps.dummy import Dummy
+from buildbot.test.runutils import SetTestFlagStep
+c['schedulers'] = [
+ Scheduler('triggerer', None, 0.1, ['triggerer']),
+ Triggerable('triggeree', ['triggeree'])
+]
+triggerer = factory.BuildFactory()
+triggerer.addSteps([
+ SetTestFlagStep(flagname='triggerer_started'),
+ Trigger(flunkOnFailure=True, @ARGS@),
+ SetTestFlagStep(flagname='triggerer_finished'),
+ ])
+triggeree = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='triggeree_started'),
+ s(@DUMMYCLASS@),
+ s(SetTestFlagStep, flagname='triggeree_finished'),
+ ])
+c['builders'] = [{'name': 'triggerer', 'slavename': 'bot1',
+ 'builddir': 'triggerer', 'factory': triggerer},
+ {'name': 'triggeree', 'slavename': 'bot1',
+ 'builddir': 'triggeree', 'factory': triggeree}]
+"""
+
+ def mkConfig(self, args, dummyclass="Dummy"):
+ return self.config_trigger.replace("@ARGS@", args).replace("@DUMMYCLASS@", dummyclass)
+
+ def setupTest(self, args, dummyclass, checkFn):
+ self.clearFlags()
+ m = self.master
+ m.loadConfig(self.mkConfig(args, dummyclass))
+ m.readConfig = True
+ m.startService()
+
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ m.change_svc.addChange(c)
+
+ d = self.connectSlave(builders=['triggerer', 'triggeree'])
+ d.addCallback(self.startTimer, 0.5, checkFn)
+ return d
+
+ def startTimer(self, res, time, next_fn):
+ d = defer.Deferred()
+ reactor.callLater(time, d.callback, None)
+ d.addCallback(next_fn)
+ return d
+
+ def testTriggerBuild(self):
+ return self.setupTest("schedulerNames=['triggeree']",
+ "Dummy",
+ self._checkTriggerBuild)
+
+ def _checkTriggerBuild(self, res):
+ self.failIfFlagNotSet('triggerer_started')
+ self.failIfFlagNotSet('triggeree_started')
+ self.failIfFlagSet('triggeree_finished')
+ self.failIfFlagNotSet('triggerer_finished')
+
+ def testTriggerBuildWait(self):
+ return self.setupTest("schedulerNames=['triggeree'], waitForFinish=1",
+ "Dummy",
+ self._checkTriggerBuildWait)
+
+ def _checkTriggerBuildWait(self, res):
+ self.failIfFlagNotSet('triggerer_started')
+ self.failIfFlagNotSet('triggeree_started')
+ self.failIfFlagSet('triggeree_finished')
+ self.failIfFlagSet('triggerer_finished')
+
+class PropertyPropagation(RunMixin, TestFlagMixin, unittest.TestCase):
+ def setupTest(self, config, builders, checkFn):
+ self.clearFlags()
+ m = self.master
+ m.loadConfig(config)
+ m.readConfig = True
+ m.startService()
+
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ m.change_svc.addChange(c)
+
+ d = self.connectSlave(builders=builders)
+ d.addCallback(self.startTimer, 0.5, checkFn)
+ return d
+
+ def startTimer(self, res, time, next_fn):
+ d = defer.Deferred()
+ reactor.callLater(time, d.callback, None)
+ d.addCallback(next_fn)
+ return d
+
+ config_schprop = config_base + """
+from buildbot.scheduler import Scheduler
+from buildbot.steps.dummy import Dummy
+from buildbot.test.runutils import SetTestFlagStep
+from buildbot.process.properties import WithProperties
+c['schedulers'] = [
+ Scheduler('mysched', None, 0.1, ['flagcolor'], properties={'color':'red'}),
+]
+factory = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='testresult',
+ value=WithProperties('color=%(color)s sched=%(scheduler)s')),
+ ])
+c['builders'] = [{'name': 'flagcolor', 'slavename': 'bot1',
+ 'builddir': 'test', 'factory': factory},
+ ]
+"""
+
+ def testScheduler(self):
+ def _check(res):
+ self.failUnlessEqual(self.getFlag('testresult'),
+ 'color=red sched=mysched')
+ return self.setupTest(self.config_schprop, ['flagcolor'], _check)
+
+ config_slaveprop = config_base + """
+from buildbot.scheduler import Scheduler
+from buildbot.steps.dummy import Dummy
+from buildbot.test.runutils import SetTestFlagStep
+from buildbot.process.properties import WithProperties
+c['schedulers'] = [
+ Scheduler('mysched', None, 0.1, ['flagcolor'])
+]
+c['slaves'] = [BuildSlave('bot1', 'sekrit', properties={'color':'orange'})]
+factory = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='testresult',
+ value=WithProperties('color=%(color)s slavename=%(slavename)s')),
+ ])
+c['builders'] = [{'name': 'flagcolor', 'slavename': 'bot1',
+ 'builddir': 'test', 'factory': factory},
+ ]
+"""
+ def testSlave(self):
+ def _check(res):
+ self.failUnlessEqual(self.getFlag('testresult'),
+ 'color=orange slavename=bot1')
+ return self.setupTest(self.config_slaveprop, ['flagcolor'], _check)
+
+ config_trigger = config_base + """
+from buildbot.scheduler import Triggerable, Scheduler
+from buildbot.steps.trigger import Trigger
+from buildbot.steps.dummy import Dummy
+from buildbot.test.runutils import SetTestFlagStep
+from buildbot.process.properties import WithProperties
+c['schedulers'] = [
+ Scheduler('triggerer', None, 0.1, ['triggerer'],
+ properties={'color':'mauve', 'pls_trigger':'triggeree'}),
+ Triggerable('triggeree', ['triggeree'], properties={'color':'invisible'})
+]
+triggerer = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='testresult', value='wrongone'),
+ s(Trigger, flunkOnFailure=True,
+ schedulerNames=[WithProperties('%(pls_trigger)s')],
+ set_properties={'color' : WithProperties('%(color)s')}),
+ s(SetTestFlagStep, flagname='testresult', value='triggered'),
+ ])
+triggeree = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='testresult',
+ value=WithProperties('sched=%(scheduler)s color=%(color)s')),
+ ])
+c['builders'] = [{'name': 'triggerer', 'slavename': 'bot1',
+ 'builddir': 'triggerer', 'factory': triggerer},
+ {'name': 'triggeree', 'slavename': 'bot1',
+ 'builddir': 'triggeree', 'factory': triggeree}]
+"""
+ def testTrigger(self):
+ def _check(res):
+ self.failUnlessEqual(self.getFlag('testresult'),
+ 'sched=triggeree color=mauve')
+ return self.setupTest(self.config_trigger,
+ ['triggerer', 'triggeree'], _check)
+
+
+config_test_flag = config_base + """
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('quick', None, 0.1, ['dummy'])]
+
+from buildbot.test.runutils import SetTestFlagStep
+f3 = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='foo', value='bar'),
+ ])
+
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy', 'factory': f3}]
+"""
+
+class TestFlag(RunMixin, TestFlagMixin, unittest.TestCase):
+ """Test for the TestFlag functionality in runutils"""
+ def testTestFlag(self):
+ m = self.master
+ m.loadConfig(config_test_flag)
+ m.readConfig = True
+ m.startService()
+
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ m.change_svc.addChange(c)
+
+ d = self.connectSlave()
+ d.addCallback(self._testTestFlag_1)
+ return d
+
+ def _testTestFlag_1(self, res):
+ d = defer.Deferred()
+ reactor.callLater(0.5, d.callback, None)
+ d.addCallback(self._testTestFlag_2)
+ return d
+
+ def _testTestFlag_2(self, res):
+ self.failUnlessEqual(self.getFlag('foo'), 'bar')
+
+# TODO: test everything, from Change submission to Scheduler to Build to
+# Status. Use all the status types. Specifically I want to catch recurrences
+# of the bug where I forgot to make Waterfall inherit from StatusReceiver
+# such that buildSetSubmitted failed.
+
+config_test_builder = config_base + """
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('quick', 'dummy', 0.1, ['dummy']),
+ Scheduler('quick2', 'dummy2', 0.1, ['dummy2']),
+ Scheduler('quick3', 'dummy3', 0.1, ['dummy3'])]
+
+from buildbot.steps.shell import ShellCommand
+f3 = factory.BuildFactory([
+ s(ShellCommand, command="sleep 3", env={'blah':'blah'})
+ ])
+
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1', 'env': {'foo':'bar'},
+ 'builddir': 'dummy', 'factory': f3}]
+
+c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
+ 'env': {'blah':'bar'}, 'builddir': 'dummy2',
+ 'factory': f3})
+
+f4 = factory.BuildFactory([
+ s(ShellCommand, command="sleep 3")
+ ])
+
+c['builders'].append({'name': 'dummy3', 'slavename': 'bot1',
+ 'env': {'blah':'bar'}, 'builddir': 'dummy3',
+ 'factory': f4})
+"""
+
+class TestBuilder(RunMixin, unittest.TestCase):
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_test_builder)
+ self.master.readConfig = True
+ self.master.startService()
+ self.connectSlave(builders=["dummy", "dummy2", "dummy3"])
+
+ def doBuilderEnvTest(self, branch, cb):
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed",
+ branch=branch)
+ self.master.change_svc.addChange(c)
+
+ d = defer.Deferred()
+ reactor.callLater(0.5, d.callback, None)
+ d.addCallback(cb)
+
+ return d
+
+ def testBuilderEnv(self):
+ return self.doBuilderEnvTest("dummy", self._testBuilderEnv1)
+
+ def _testBuilderEnv1(self, res):
+ b = self.master.botmaster.builders['dummy']
+ build = b.building[0]
+ s = build.currentStep
+ self.failUnless('foo' in s.cmd.args['env'])
+ self.failUnlessEqual('bar', s.cmd.args['env']['foo'])
+ self.failUnless('blah' in s.cmd.args['env'])
+ self.failUnlessEqual('blah', s.cmd.args['env']['blah'])
+
+ def testBuilderEnvOverride(self):
+ return self.doBuilderEnvTest("dummy2", self._testBuilderEnvOverride1)
+
+ def _testBuilderEnvOverride1(self, res):
+ b = self.master.botmaster.builders['dummy2']
+ build = b.building[0]
+ s = build.currentStep
+ self.failUnless('blah' in s.cmd.args['env'])
+ self.failUnlessEqual('blah', s.cmd.args['env']['blah'])
+
+ def testBuilderNoStepEnv(self):
+ return self.doBuilderEnvTest("dummy3", self._testBuilderNoStepEnv1)
+
+ def _testBuilderNoStepEnv1(self, res):
+ b = self.master.botmaster.builders['dummy3']
+ build = b.building[0]
+ s = build.currentStep
+ self.failUnless('blah' in s.cmd.args['env'])
+ self.failUnlessEqual('bar', s.cmd.args['env']['blah'])
+
+class SchedulerWatchers(RunMixin, TestFlagMixin, unittest.TestCase):
+ config_watchable = config_base + """
+from buildbot.scheduler import AnyBranchScheduler
+from buildbot.steps.dummy import Dummy
+from buildbot.test.runutils import setTestFlag, SetTestFlagStep
+s = AnyBranchScheduler(
+ name='abs',
+ branches=None,
+ treeStableTimer=0,
+ builderNames=['a', 'b'])
+c['schedulers'] = [ s ]
+
+# count the number of times a success watcher is called
+numCalls = [ 0 ]
+def watcher(ss):
+ numCalls[0] += 1
+ setTestFlag("numCalls", numCalls[0])
+s.subscribeToSuccessfulBuilds(watcher)
+
+f = factory.BuildFactory()
+f.addStep(Dummy(timeout=0))
+c['builders'] = [{'name': 'a', 'slavename': 'bot1',
+ 'builddir': 'a', 'factory': f},
+ {'name': 'b', 'slavename': 'bot1',
+ 'builddir': 'b', 'factory': f}]
+"""
+
+ def testWatchers(self):
+ self.clearFlags()
+ m = self.master
+ m.loadConfig(self.config_watchable)
+ m.readConfig = True
+ m.startService()
+
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ m.change_svc.addChange(c)
+
+ d = self.connectSlave(builders=['a', 'b'])
+
+ def pause(res):
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, res)
+ return d
+ d.addCallback(pause)
+
+ def checkFn(res):
+ self.failUnlessEqual(self.getFlag('numCalls'), 1)
+ d.addCallback(checkFn)
+ return d
+
+config_priority = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+from buildbot.steps.shell import ShellCommand
+f1 = factory.BuildFactory([
+ s(ShellCommand, command="sleep 3", env={'blah':'blah'})
+ ])
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit', max_builds=1)]
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'quick1', 'slavename':'bot1', 'builddir': 'quickdir1', 'factory': f1})
+c['builders'].append({'name':'quick2', 'slavename':'bot1', 'builddir': 'quickdir2', 'factory': f1})
+c['slavePortnum'] = 0
+"""
+
+class BuildPrioritization(RunMixin, unittest.TestCase):
+ def rmtree(self, d):
+ rmtree(d)
+
+ def testPriority(self):
+ self.rmtree("basedir")
+ os.mkdir("basedir")
+ self.master.loadConfig(config_priority)
+ self.master.readConfig = True
+ self.master.startService()
+
+ d = self.connectSlave(builders=['quick1', 'quick2'])
+ d.addCallback(self._connected)
+
+ return d
+
+ def _connected(self, *args):
+ # Our fake source stamp
+ # we override canBeMergedWith so that our requests don't get merged together
+ ss = SourceStamp()
+ ss.canBeMergedWith = lambda x: False
+
+ # Send one request to tie up the slave before sending future requests
+ req0 = BuildRequest("reason", ss, "test_builder")
+ self.master.botmaster.builders['quick1'].submitBuildRequest(req0)
+
+ # Send 10 requests to alternating builders
+ # We fudge the submittedAt field after submitting since they're all
+ # getting submitted so close together according to time.time()
+ # and all we care about is what order they're run in.
+ reqs = []
+ self.finish_order = []
+ for i in range(10):
+ req = BuildRequest(str(i), ss, "test_builder")
+ j = i % 2 + 1
+ self.master.botmaster.builders['quick%i' % j].submitBuildRequest(req)
+ req.submittedAt = i
+ # Keep track of what order the builds finished in
+ def append(item, arg):
+ self.finish_order.append(item)
+ req.waitUntilFinished().addCallback(append, req)
+ reqs.append(req.waitUntilFinished())
+
+ dl = defer.DeferredList(reqs)
+ dl.addCallback(self._all_finished)
+
+ # After our first build finishes, we should wait for the rest to finish
+ d = req0.waitUntilFinished()
+ d.addCallback(lambda x: dl)
+ return d
+
+ def _all_finished(self, *args):
+ # The builds should have finished in proper order
+ self.failUnlessEqual([int(b.reason) for b in self.finish_order], range(10))
+
+# Test graceful shutdown when no builds are active, as well as
+# canStartBuild after graceful shutdown is initiated
+config_graceful_shutdown_idle = config_base
+class GracefulShutdownIdle(RunMixin, unittest.TestCase):
+ def testShutdown(self):
+ self.rmtree("basedir")
+ os.mkdir("basedir")
+ self.master.loadConfig(config_graceful_shutdown_idle)
+ self.master.readConfig = True
+ self.master.startService()
+ d = self.connectSlave(builders=['quick'])
+ d.addCallback(self._do_shutdown)
+ return d
+
+ def _do_shutdown(self, res):
+ bs = self.master.botmaster.builders['quick'].slaves[0]
+ # Check that the slave is accepting builds once it's connected
+ self.assertEquals(bs.slave.canStartBuild(), True)
+
+ # Monkeypatch the slave's shutdown routine since the real shutdown
+ # interrupts the test harness
+ self.did_shutdown = False
+ def _shutdown():
+ self.did_shutdown = True
+ bs.slave.shutdown = _shutdown
+
+ # Start a graceful shutdown
+ bs.slave.slave_status.setGraceful(True)
+ # Check that the slave isn't accepting builds any more
+ self.assertEquals(bs.slave.canStartBuild(), False)
+
+ # Wait a little bit and then check that we (pretended to) shut down
+ d = defer.Deferred()
+ d.addCallback(self._check_shutdown)
+ reactor.callLater(0.5, d.callback, None)
+ return d
+
+ def _check_shutdown(self, res):
+ self.assertEquals(self.did_shutdown, True)
+
+# Test graceful shutdown when two builds are active
+config_graceful_shutdown_busy = config_base + """
+from buildbot.buildslave import BuildSlave
+c['slaves'] = [ BuildSlave('bot1', 'sekrit', max_builds=2) ]
+
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('dummy', None, 0.1, ['dummy', 'dummy2'])]
+
+c['builders'].append({'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy', 'factory': f2})
+c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
+ 'builddir': 'dummy2', 'factory': f2})
+"""
+class GracefulShutdownBusy(RunMixin, unittest.TestCase):
+ def testShutdown(self):
+ self.rmtree("basedir")
+ os.mkdir("basedir")
+ d = self.master.loadConfig(config_graceful_shutdown_busy)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectSlave())
+
+ def _send(res):
+ # send a change. This will trigger both builders at the same
+ # time, but since they share a slave, the max_builds=1 setting
+ # will insure that only one of the two builds gets to run.
+ cm = self.master.change_svc
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"],
+ "changed stuff")
+ cm.addChange(c)
+ d.addCallback(_send)
+
+ def _delay(res):
+ d1 = defer.Deferred()
+ reactor.callLater(0.5, d1.callback, None)
+ # this test depends upon this 0.5s delay landing us in the middle
+ # of one of the builds.
+ return d1
+ d.addCallback(_delay)
+
+ # Start a graceful shutdown. We should be in the middle of two builds
+ def _shutdown(res):
+ bs = self.master.botmaster.builders['dummy'].slaves[0]
+ # Monkeypatch the slave's shutdown routine since the real shutdown
+ # interrupts the test harness
+ self.did_shutdown = False
+ def _shutdown():
+ self.did_shutdown = True
+ return defer.succeed(None)
+ bs.slave.shutdown = _shutdown
+ # Start a graceful shutdown
+ bs.slave.slave_status.setGraceful(True)
+
+ builders = [ self.master.botmaster.builders[bn]
+ for bn in ('dummy', 'dummy2') ]
+ for builder in builders:
+ self.failUnless(len(builder.slaves) == 1)
+ from buildbot.process.builder import BUILDING
+ building_bs = [ builder
+ for builder in builders
+ if builder.slaves[0].state == BUILDING ]
+ # assert that both builds are running right now.
+ self.failUnlessEqual(len(building_bs), 2)
+
+ d.addCallback(_shutdown)
+
+ # Wait a little bit again, and then make sure that we are still running
+ # the two builds, and haven't shutdown yet
+ d.addCallback(_delay)
+ def _check(res):
+ self.assertEquals(self.did_shutdown, False)
+ builders = [ self.master.botmaster.builders[bn]
+ for bn in ('dummy', 'dummy2') ]
+ for builder in builders:
+ self.failUnless(len(builder.slaves) == 1)
+ from buildbot.process.builder import BUILDING
+ building_bs = [ builder
+ for builder in builders
+ if builder.slaves[0].state == BUILDING ]
+ # assert that both builds are running right now.
+ self.failUnlessEqual(len(building_bs), 2)
+ d.addCallback(_check)
+
+ # Wait for all the builds to finish
+ def _wait_finish(res):
+ builders = [ self.master.botmaster.builders[bn]
+ for bn in ('dummy', 'dummy2') ]
+ builds = []
+ for builder in builders:
+ builds.append(builder.builder_status.currentBuilds[0].waitUntilFinished())
+ dl = defer.DeferredList(builds)
+ return dl
+ d.addCallback(_wait_finish)
+
+ # Wait a little bit after the builds finish, and then
+ # check that the slave has shutdown
+ d.addCallback(_delay)
+ def _check_shutdown(res):
+ # assert that we shutdown the slave
+ self.assertEquals(self.did_shutdown, True)
+ builders = [ self.master.botmaster.builders[bn]
+ for bn in ('dummy', 'dummy2') ]
+ from buildbot.process.builder import BUILDING
+ building_bs = [ builder
+ for builder in builders
+ if builder.slaves[0].state == BUILDING ]
+ # assert that no builds are running right now.
+ self.failUnlessEqual(len(building_bs), 0)
+ d.addCallback(_check_shutdown)
+
+ return d
diff --git a/buildbot/buildbot/test/test_runner.py b/buildbot/buildbot/test/test_runner.py
new file mode 100644
index 0000000..d94ef5f
--- /dev/null
+++ b/buildbot/buildbot/test/test_runner.py
@@ -0,0 +1,392 @@
+
+# this file tests the 'buildbot' command, with its various sub-commands
+
+from twisted.trial import unittest
+from twisted.python import usage
+import os, shutil, shlex
+import sets
+
+from buildbot.scripts import runner, tryclient
+
+class Options(unittest.TestCase):
+ optionsFile = "SDFsfsFSdfsfsFSD"
+
+ def make(self, d, key):
+ # we use a wacky filename here in case the test code discovers the
+ # user's real ~/.buildbot/ directory
+ os.makedirs(os.sep.join(d + [".buildbot"]))
+ f = open(os.sep.join(d + [".buildbot", self.optionsFile]), "w")
+ f.write("key = '%s'\n" % key)
+ f.close()
+
+ def check(self, d, key):
+ basedir = os.sep.join(d)
+ options = runner.loadOptions(self.optionsFile, here=basedir,
+ home=self.home)
+ if key is None:
+ self.failIf(options.has_key('key'))
+ else:
+ self.failUnlessEqual(options['key'], key)
+
+ def testFindOptions(self):
+ self.make(["home", "dir1", "dir2", "dir3"], "one")
+ self.make(["home", "dir1", "dir2"], "two")
+ self.make(["home"], "home")
+ self.home = os.path.abspath("home")
+
+ self.check(["home", "dir1", "dir2", "dir3"], "one")
+ self.check(["home", "dir1", "dir2"], "two")
+ self.check(["home", "dir1"], "home")
+
+ self.home = os.path.abspath("nothome")
+ os.makedirs(os.sep.join(["nothome", "dir1"]))
+ self.check(["nothome", "dir1"], None)
+
+ def doForce(self, args, expected):
+ o = runner.ForceOptions()
+ o.parseOptions(args)
+ self.failUnlessEqual(o.keys(), expected.keys())
+ for k in o.keys():
+ self.failUnlessEqual(o[k], expected[k],
+ "[%s] got %s instead of %s" % (k, o[k],
+ expected[k]))
+
+ def testForceOptions(self):
+ if not hasattr(shlex, "split"):
+ raise unittest.SkipTest("need python>=2.3 for shlex.split")
+
+ exp = {"builder": "b1", "reason": "reason",
+ "branch": None, "revision": None}
+ self.doForce(shlex.split("b1 reason"), exp)
+ self.doForce(shlex.split("b1 'reason'"), exp)
+ self.failUnlessRaises(usage.UsageError, self.doForce,
+ shlex.split("--builder b1 'reason'"), exp)
+ self.doForce(shlex.split("--builder b1 --reason reason"), exp)
+ self.doForce(shlex.split("--builder b1 --reason 'reason'"), exp)
+ self.doForce(shlex.split("--builder b1 --reason \"reason\""), exp)
+
+ exp['reason'] = "longer reason"
+ self.doForce(shlex.split("b1 'longer reason'"), exp)
+ self.doForce(shlex.split("b1 longer reason"), exp)
+ self.doForce(shlex.split("--reason 'longer reason' b1"), exp)
+
+
+class Create(unittest.TestCase):
+ def failUnlessIn(self, substring, string, msg=None):
+ # trial provides a version of this that requires python-2.3 to test
+ # strings.
+ self.failUnless(string.find(substring) != -1, msg)
+ def failUnlessExists(self, filename):
+ self.failUnless(os.path.exists(filename), "%s should exist" % filename)
+ def failIfExists(self, filename):
+ self.failIf(os.path.exists(filename), "%s should not exist" % filename)
+
+ def setUp(self):
+ self.cwd = os.getcwd()
+
+ def tearDown(self):
+ os.chdir(self.cwd)
+
+ def testMaster(self):
+ basedir = "test_runner.master"
+ options = runner.MasterOptions()
+ options.parseOptions(["-q", basedir])
+ cwd = os.getcwd()
+ runner.createMaster(options)
+ os.chdir(cwd)
+
+ tac = os.path.join(basedir, "buildbot.tac")
+ self.failUnless(os.path.exists(tac))
+ tacfile = open(tac,"rt").read()
+ self.failUnlessIn("basedir", tacfile)
+ self.failUnlessIn("configfile = r'master.cfg'", tacfile)
+ self.failUnlessIn("BuildMaster(basedir, configfile)", tacfile)
+
+ cfg = os.path.join(basedir, "master.cfg")
+ self.failIfExists(cfg)
+ samplecfg = os.path.join(basedir, "master.cfg.sample")
+ self.failUnlessExists(samplecfg)
+ cfgfile = open(samplecfg,"rt").read()
+ self.failUnlessIn("This is a sample buildmaster config file", cfgfile)
+
+ makefile = os.path.join(basedir, "Makefile.sample")
+ self.failUnlessExists(makefile)
+
+ # now verify that running it a second time (with the same options)
+ # does the right thing: nothing changes
+ runner.createMaster(options)
+ os.chdir(cwd)
+
+ self.failIfExists(os.path.join(basedir, "buildbot.tac.new"))
+ self.failUnlessExists(os.path.join(basedir, "master.cfg.sample"))
+
+ oldtac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+
+ # mutate Makefile.sample, since it should be rewritten
+ f = open(os.path.join(basedir, "Makefile.sample"), "rt")
+ oldmake = f.read()
+ f = open(os.path.join(basedir, "Makefile.sample"), "wt")
+ f.write(oldmake)
+ f.write("# additional line added\n")
+ f.close()
+
+ # also mutate master.cfg.sample
+ f = open(os.path.join(basedir, "master.cfg.sample"), "rt")
+ oldsamplecfg = f.read()
+ f = open(os.path.join(basedir, "master.cfg.sample"), "wt")
+ f.write(oldsamplecfg)
+ f.write("# additional line added\n")
+ f.close()
+
+ # now run it again (with different options)
+ options = runner.MasterOptions()
+ options.parseOptions(["-q", "--config", "other.cfg", basedir])
+ runner.createMaster(options)
+ os.chdir(cwd)
+
+ tac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+ self.failUnlessEqual(tac, oldtac, "shouldn't change existing .tac")
+ self.failUnlessExists(os.path.join(basedir, "buildbot.tac.new"))
+
+ make = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
+ self.failUnlessEqual(make, oldmake, "*should* rewrite Makefile.sample")
+
+ samplecfg = open(os.path.join(basedir, "master.cfg.sample"),
+ "rt").read()
+ self.failUnlessEqual(samplecfg, oldsamplecfg,
+ "*should* rewrite master.cfg.sample")
+
+ def testUpgradeMaster(self):
+ # first, create a master, run it briefly, then upgrade it. Nothing
+ # should change.
+ basedir = "test_runner.master2"
+ options = runner.MasterOptions()
+ options.parseOptions(["-q", basedir])
+ cwd = os.getcwd()
+ runner.createMaster(options)
+ os.chdir(cwd)
+
+ f = open(os.path.join(basedir, "master.cfg"), "w")
+ f.write(open(os.path.join(basedir, "master.cfg.sample"), "r").read())
+ f.close()
+
+ # the upgrade process (specifically the verify-master.cfg step) will
+ # create any builder status directories that weren't already created.
+ # Create those ahead of time.
+ os.mkdir(os.path.join(basedir, "full"))
+
+ files1 = self.record_files(basedir)
+
+ # upgrade it
+ options = runner.UpgradeMasterOptions()
+ options.parseOptions(["--quiet", basedir])
+ cwd = os.getcwd()
+ runner.upgradeMaster(options)
+ os.chdir(cwd)
+
+ files2 = self.record_files(basedir)
+ self.failUnlessSameFiles(files1, files2)
+
+ # now make it look like the one that 0.7.5 creates: no public_html
+ for fn in os.listdir(os.path.join(basedir, "public_html")):
+ os.unlink(os.path.join(basedir, "public_html", fn))
+ os.rmdir(os.path.join(basedir, "public_html"))
+
+ # and make sure that upgrading it re-populates public_html
+ options = runner.UpgradeMasterOptions()
+ options.parseOptions(["-q", basedir])
+ cwd = os.getcwd()
+ runner.upgradeMaster(options)
+ os.chdir(cwd)
+
+ files3 = self.record_files(basedir)
+ self.failUnlessSameFiles(files1, files3)
+
+ # now induce an error in master.cfg and make sure that upgrade
+ # notices it.
+ f = open(os.path.join(basedir, "master.cfg"), "a")
+ f.write("raise RuntimeError('catch me please')\n")
+ f.close()
+
+ options = runner.UpgradeMasterOptions()
+ options.parseOptions(["-q", basedir])
+ cwd = os.getcwd()
+ rc = runner.upgradeMaster(options)
+ os.chdir(cwd)
+ self.failUnless(rc != 0, rc)
+ # TODO: change the way runner.py works to let us pass in a stderr
+ # filehandle, and use a StringIO to capture its output, and make sure
+ # the right error messages appear therein.
+
+
+ def failUnlessSameFiles(self, files1, files2):
+ f1 = sets.Set(files1.keys())
+ f2 = sets.Set(files2.keys())
+ msg = ""
+ if f2 - f1:
+ msg += "Missing from files1: %s\n" % (list(f2-f1),)
+ if f1 - f2:
+ msg += "Missing from files2: %s\n" % (list(f1-f2),)
+ if msg:
+ self.fail(msg)
+
+ def record_files(self, basedir):
+ allfiles = {}
+ for root, dirs, files in os.walk(basedir):
+ for f in files:
+ fn = os.path.join(root, f)
+ allfiles[fn] = ("FILE", open(fn,"rb").read())
+ for d in dirs:
+ allfiles[os.path.join(root, d)] = ("DIR",)
+ return allfiles
+
+
+ def testSlave(self):
+ basedir = "test_runner.slave"
+ options = runner.SlaveOptions()
+ options.parseOptions(["-q", basedir, "buildmaster:1234",
+ "botname", "passwd"])
+ cwd = os.getcwd()
+ runner.createSlave(options)
+ os.chdir(cwd)
+
+ tac = os.path.join(basedir, "buildbot.tac")
+ self.failUnless(os.path.exists(tac))
+ tacfile = open(tac,"rt").read()
+ self.failUnlessIn("basedir", tacfile)
+ self.failUnlessIn("buildmaster_host = 'buildmaster'", tacfile)
+ self.failUnlessIn("port = 1234", tacfile)
+ self.failUnlessIn("slavename = 'botname'", tacfile)
+ self.failUnlessIn("passwd = 'passwd'", tacfile)
+ self.failUnlessIn("keepalive = 600", tacfile)
+ self.failUnlessIn("BuildSlave(buildmaster_host, port, slavename",
+ tacfile)
+
+ makefile = os.path.join(basedir, "Makefile.sample")
+ self.failUnlessExists(makefile)
+
+ self.failUnlessExists(os.path.join(basedir, "info", "admin"))
+ self.failUnlessExists(os.path.join(basedir, "info", "host"))
+ # edit one to make sure the later install doesn't change it
+ f = open(os.path.join(basedir, "info", "admin"), "wt")
+ f.write("updated@buildbot.example.org\n")
+ f.close()
+
+ # now verify that running it a second time (with the same options)
+ # does the right thing: nothing changes
+ runner.createSlave(options)
+ os.chdir(cwd)
+
+ self.failIfExists(os.path.join(basedir, "buildbot.tac.new"))
+ admin = open(os.path.join(basedir, "info", "admin"), "rt").read()
+ self.failUnlessEqual(admin, "updated@buildbot.example.org\n")
+
+
+ # mutate Makefile.sample, since it should be rewritten
+ oldmake = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
+ f = open(os.path.join(basedir, "Makefile.sample"), "wt")
+ f.write(oldmake)
+ f.write("# additional line added\n")
+ f.close()
+ oldtac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+
+ # now run it again (with different options)
+ options = runner.SlaveOptions()
+ options.parseOptions(["-q", "--keepalive", "30",
+ basedir, "buildmaster:9999",
+ "newbotname", "passwd"])
+ runner.createSlave(options)
+ os.chdir(cwd)
+
+ tac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+ self.failUnlessEqual(tac, oldtac, "shouldn't change existing .tac")
+ self.failUnlessExists(os.path.join(basedir, "buildbot.tac.new"))
+ tacfile = open(os.path.join(basedir, "buildbot.tac.new"),"rt").read()
+ self.failUnlessIn("basedir", tacfile)
+ self.failUnlessIn("buildmaster_host = 'buildmaster'", tacfile)
+ self.failUnlessIn("port = 9999", tacfile)
+ self.failUnlessIn("slavename = 'newbotname'", tacfile)
+ self.failUnlessIn("passwd = 'passwd'", tacfile)
+ self.failUnlessIn("keepalive = 30", tacfile)
+ self.failUnlessIn("BuildSlave(buildmaster_host, port, slavename",
+ tacfile)
+
+ make = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
+ self.failUnlessEqual(make, oldmake, "*should* rewrite Makefile.sample")
+
+class Try(unittest.TestCase):
+ # test some aspects of the 'buildbot try' command
+ def makeOptions(self, contents):
+ if os.path.exists(".buildbot"):
+ shutil.rmtree(".buildbot")
+ os.mkdir(".buildbot")
+ open(os.path.join(".buildbot", "options"), "w").write(contents)
+
+ def testGetopt1(self):
+ opts = "try_connect = 'ssh'\n" + "try_builders = ['a']\n"
+ self.makeOptions(opts)
+ config = runner.TryOptions()
+ config.parseOptions([])
+ t = tryclient.Try(config)
+ self.failUnlessEqual(t.connect, "ssh")
+ self.failUnlessEqual(t.builderNames, ['a'])
+
+ def testGetopt2(self):
+ opts = ""
+ self.makeOptions(opts)
+ config = runner.TryOptions()
+ config.parseOptions(['--connect=ssh', '--builder', 'a'])
+ t = tryclient.Try(config)
+ self.failUnlessEqual(t.connect, "ssh")
+ self.failUnlessEqual(t.builderNames, ['a'])
+
+ def testGetopt3(self):
+ opts = ""
+ self.makeOptions(opts)
+ config = runner.TryOptions()
+ config.parseOptions(['--connect=ssh',
+ '--builder', 'a', '--builder=b'])
+ t = tryclient.Try(config)
+ self.failUnlessEqual(t.connect, "ssh")
+ self.failUnlessEqual(t.builderNames, ['a', 'b'])
+
+ def testGetopt4(self):
+ opts = "try_connect = 'ssh'\n" + "try_builders = ['a']\n"
+ self.makeOptions(opts)
+ config = runner.TryOptions()
+ config.parseOptions(['--builder=b'])
+ t = tryclient.Try(config)
+ self.failUnlessEqual(t.connect, "ssh")
+ self.failUnlessEqual(t.builderNames, ['b'])
+
+ def testGetTopdir(self):
+ os.mkdir("gettopdir")
+ os.mkdir(os.path.join("gettopdir", "foo"))
+ os.mkdir(os.path.join("gettopdir", "foo", "bar"))
+ open(os.path.join("gettopdir", "1"),"w").write("1")
+ open(os.path.join("gettopdir", "foo", "2"),"w").write("2")
+ open(os.path.join("gettopdir", "foo", "bar", "3"),"w").write("3")
+
+ target = os.path.abspath("gettopdir")
+ t = tryclient.getTopdir("1", "gettopdir")
+ self.failUnlessEqual(os.path.abspath(t), target)
+ t = tryclient.getTopdir("1", os.path.join("gettopdir", "foo"))
+ self.failUnlessEqual(os.path.abspath(t), target)
+ t = tryclient.getTopdir("1", os.path.join("gettopdir", "foo", "bar"))
+ self.failUnlessEqual(os.path.abspath(t), target)
+
+ target = os.path.abspath(os.path.join("gettopdir", "foo"))
+ t = tryclient.getTopdir("2", os.path.join("gettopdir", "foo"))
+ self.failUnlessEqual(os.path.abspath(t), target)
+ t = tryclient.getTopdir("2", os.path.join("gettopdir", "foo", "bar"))
+ self.failUnlessEqual(os.path.abspath(t), target)
+
+ target = os.path.abspath(os.path.join("gettopdir", "foo", "bar"))
+ t = tryclient.getTopdir("3", os.path.join("gettopdir", "foo", "bar"))
+ self.failUnlessEqual(os.path.abspath(t), target)
+
+ nonexistent = "nonexistent\n29fis3kq\tBAR"
+ # hopefully there won't be a real file with that name between here
+ # and the filesystem root.
+ self.failUnlessRaises(ValueError, tryclient.getTopdir, nonexistent)
+
diff --git a/buildbot/buildbot/test/test_scheduler.py b/buildbot/buildbot/test/test_scheduler.py
new file mode 100644
index 0000000..667e349
--- /dev/null
+++ b/buildbot/buildbot/test/test_scheduler.py
@@ -0,0 +1,348 @@
+# -*- test-case-name: buildbot.test.test_scheduler -*-
+
+import os, time
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+from twisted.application import service
+from twisted.spread import pb
+
+from buildbot import scheduler, sourcestamp, buildset, status
+from buildbot.changes.changes import Change
+from buildbot.scripts import tryclient
+
+
+class FakeMaster(service.MultiService):
+ d = None
+ def submitBuildSet(self, bs):
+ self.sets.append(bs)
+ if self.d:
+ reactor.callLater(0, self.d.callback, bs)
+ self.d = None
+ return pb.Referenceable() # makes the cleanup work correctly
+
+class Scheduling(unittest.TestCase):
+ def setUp(self):
+ self.master = master = FakeMaster()
+ master.sets = []
+ master.startService()
+
+ def tearDown(self):
+ d = self.master.stopService()
+ return d
+
+ def addScheduler(self, s):
+ s.setServiceParent(self.master)
+
+ def testPeriodic1(self):
+ self.addScheduler(scheduler.Periodic("quickly", ["a","b"], 2))
+ d = defer.Deferred()
+ reactor.callLater(5, d.callback, None)
+ d.addCallback(self._testPeriodic1_1)
+ return d
+ def _testPeriodic1_1(self, res):
+ self.failUnless(len(self.master.sets) > 1)
+ s1 = self.master.sets[0]
+ self.failUnlessEqual(s1.builderNames, ["a","b"])
+ self.failUnlessEqual(s1.reason, "The Periodic scheduler named 'quickly' triggered this build")
+
+ def testNightly(self):
+ # now == 15-Nov-2005, 00:05:36 AM . By using mktime, this is
+ # converted into the local timezone, which happens to match what
+ # Nightly is going to do anyway.
+ MIN=60; HOUR=60*MIN; DAY=24*3600
+ now = time.mktime((2005, 11, 15, 0, 5, 36, 1, 319, 0))
+
+ s = scheduler.Nightly('nightly', ["a"], hour=3)
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), 2*HOUR+54*MIN+24)
+
+ s = scheduler.Nightly('nightly', ["a"], minute=[3,8,54])
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), 2*MIN+24)
+
+ s = scheduler.Nightly('nightly', ["a"],
+ dayOfMonth=16, hour=1, minute=6)
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), DAY+HOUR+24)
+
+ s = scheduler.Nightly('nightly', ["a"],
+ dayOfMonth=16, hour=1, minute=3)
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), DAY+57*MIN+24)
+
+ s = scheduler.Nightly('nightly', ["a"],
+ dayOfMonth=15, hour=1, minute=3)
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), 57*MIN+24)
+
+ s = scheduler.Nightly('nightly', ["a"],
+ dayOfMonth=15, hour=0, minute=3)
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), 30*DAY-3*MIN+24)
+
+
+ def isImportant(self, change):
+ if "important" in change.files:
+ return True
+ return False
+
+ def testBranch(self):
+ s = scheduler.Scheduler("b1", "branch1", 2, ["a","b"],
+ fileIsImportant=self.isImportant)
+ self.addScheduler(s)
+
+ c0 = Change("carol", ["important"], "other branch", branch="other")
+ s.addChange(c0)
+ self.failIf(s.timer)
+ self.failIf(s.importantChanges)
+
+ c1 = Change("alice", ["important", "not important"], "some changes",
+ branch="branch1")
+ s.addChange(c1)
+ c2 = Change("bob", ["not important", "boring"], "some more changes",
+ branch="branch1")
+ s.addChange(c2)
+ c3 = Change("carol", ["important", "dull"], "even more changes",
+ branch="branch1")
+ s.addChange(c3)
+
+ self.failUnlessEqual(s.importantChanges, [c1,c3])
+ self.failUnlessEqual(s.unimportantChanges, [c2])
+ self.failUnless(s.timer)
+
+ d = defer.Deferred()
+ reactor.callLater(4, d.callback, None)
+ d.addCallback(self._testBranch_1)
+ return d
+ def _testBranch_1(self, res):
+ self.failUnlessEqual(len(self.master.sets), 1)
+ s = self.master.sets[0].source
+ self.failUnlessEqual(s.branch, "branch1")
+ self.failUnlessEqual(s.revision, None)
+ self.failUnlessEqual(len(s.changes), 3)
+ self.failUnlessEqual(s.patch, None)
+
+
+ def testAnyBranch(self):
+ s = scheduler.AnyBranchScheduler("b1", None, 1, ["a","b"],
+ fileIsImportant=self.isImportant)
+ self.addScheduler(s)
+
+ c1 = Change("alice", ["important", "not important"], "some changes",
+ branch="branch1")
+ s.addChange(c1)
+ c2 = Change("bob", ["not important", "boring"], "some more changes",
+ branch="branch1")
+ s.addChange(c2)
+ c3 = Change("carol", ["important", "dull"], "even more changes",
+ branch="branch1")
+ s.addChange(c3)
+
+ c4 = Change("carol", ["important"], "other branch", branch="branch2")
+ s.addChange(c4)
+
+ c5 = Change("carol", ["important"], "default branch", branch=None)
+ s.addChange(c5)
+
+ d = defer.Deferred()
+ reactor.callLater(2, d.callback, None)
+ d.addCallback(self._testAnyBranch_1)
+ return d
+ def _testAnyBranch_1(self, res):
+ self.failUnlessEqual(len(self.master.sets), 3)
+ self.master.sets.sort(lambda a,b: cmp(a.source.branch,
+ b.source.branch))
+
+ s1 = self.master.sets[0].source
+ self.failUnlessEqual(s1.branch, None)
+ self.failUnlessEqual(s1.revision, None)
+ self.failUnlessEqual(len(s1.changes), 1)
+ self.failUnlessEqual(s1.patch, None)
+
+ s2 = self.master.sets[1].source
+ self.failUnlessEqual(s2.branch, "branch1")
+ self.failUnlessEqual(s2.revision, None)
+ self.failUnlessEqual(len(s2.changes), 3)
+ self.failUnlessEqual(s2.patch, None)
+
+ s3 = self.master.sets[2].source
+ self.failUnlessEqual(s3.branch, "branch2")
+ self.failUnlessEqual(s3.revision, None)
+ self.failUnlessEqual(len(s3.changes), 1)
+ self.failUnlessEqual(s3.patch, None)
+
+ def testAnyBranch2(self):
+ # like testAnyBranch but without fileIsImportant
+ s = scheduler.AnyBranchScheduler("b1", None, 2, ["a","b"])
+ self.addScheduler(s)
+ c1 = Change("alice", ["important", "not important"], "some changes",
+ branch="branch1")
+ s.addChange(c1)
+ c2 = Change("bob", ["not important", "boring"], "some more changes",
+ branch="branch1")
+ s.addChange(c2)
+ c3 = Change("carol", ["important", "dull"], "even more changes",
+ branch="branch1")
+ s.addChange(c3)
+
+ c4 = Change("carol", ["important"], "other branch", branch="branch2")
+ s.addChange(c4)
+
+ d = defer.Deferred()
+ reactor.callLater(2, d.callback, None)
+ d.addCallback(self._testAnyBranch2_1)
+ return d
+ def _testAnyBranch2_1(self, res):
+ self.failUnlessEqual(len(self.master.sets), 2)
+ self.master.sets.sort(lambda a,b: cmp(a.source.branch,
+ b.source.branch))
+ s1 = self.master.sets[0].source
+ self.failUnlessEqual(s1.branch, "branch1")
+ self.failUnlessEqual(s1.revision, None)
+ self.failUnlessEqual(len(s1.changes), 3)
+ self.failUnlessEqual(s1.patch, None)
+
+ s2 = self.master.sets[1].source
+ self.failUnlessEqual(s2.branch, "branch2")
+ self.failUnlessEqual(s2.revision, None)
+ self.failUnlessEqual(len(s2.changes), 1)
+ self.failUnlessEqual(s2.patch, None)
+
+
+ def createMaildir(self, jobdir):
+ os.mkdir(jobdir)
+ os.mkdir(os.path.join(jobdir, "new"))
+ os.mkdir(os.path.join(jobdir, "cur"))
+ os.mkdir(os.path.join(jobdir, "tmp"))
+
+ jobcounter = 1
+ def pushJob(self, jobdir, job):
+ while 1:
+ filename = "job_%d" % self.jobcounter
+ self.jobcounter += 1
+ if os.path.exists(os.path.join(jobdir, "new", filename)):
+ continue
+ if os.path.exists(os.path.join(jobdir, "tmp", filename)):
+ continue
+ if os.path.exists(os.path.join(jobdir, "cur", filename)):
+ continue
+ break
+ f = open(os.path.join(jobdir, "tmp", filename), "w")
+ f.write(job)
+ f.close()
+ os.rename(os.path.join(jobdir, "tmp", filename),
+ os.path.join(jobdir, "new", filename))
+
+ def testTryJobdir(self):
+ self.master.basedir = "try_jobdir"
+ os.mkdir(self.master.basedir)
+ jobdir = "jobdir1"
+ jobdir_abs = os.path.join(self.master.basedir, jobdir)
+ self.createMaildir(jobdir_abs)
+ s = scheduler.Try_Jobdir("try1", ["a", "b"], jobdir)
+ self.addScheduler(s)
+ self.failIf(self.master.sets)
+ job1 = tryclient.createJobfile("buildsetID",
+ "branch1", "123", 1, "diff",
+ ["a", "b"])
+ self.master.d = d = defer.Deferred()
+ self.pushJob(jobdir_abs, job1)
+ d.addCallback(self._testTryJobdir_1)
+ # N.B.: if we don't have DNotify, we poll every 10 seconds, so don't
+ # set a .timeout here shorter than that. TODO: make it possible to
+ # set the polling interval, so we can make it shorter.
+ return d
+
+ def _testTryJobdir_1(self, bs):
+ self.failUnlessEqual(bs.builderNames, ["a", "b"])
+ self.failUnlessEqual(bs.source.branch, "branch1")
+ self.failUnlessEqual(bs.source.revision, "123")
+ self.failUnlessEqual(bs.source.patch, (1, "diff"))
+
+
+ def testTryUserpass(self):
+ up = [("alice","pw1"), ("bob","pw2")]
+ s = scheduler.Try_Userpass("try2", ["a", "b"], 0, userpass=up)
+ self.addScheduler(s)
+ port = s.getPort()
+ config = {'connect': 'pb',
+ 'username': 'alice',
+ 'passwd': 'pw1',
+ 'master': "localhost:%d" % port,
+ 'builders': ["a", "b"],
+ }
+ t = tryclient.Try(config)
+ ss = sourcestamp.SourceStamp("branch1", "123", (1, "diff"))
+ t.sourcestamp = ss
+ d2 = self.master.d = defer.Deferred()
+ d = t.deliverJob()
+ d.addCallback(self._testTryUserpass_1, t, d2)
+ return d
+ testTryUserpass.timeout = 5
+ def _testTryUserpass_1(self, res, t, d2):
+ # at this point, the Try object should have a RemoteReference to the
+ # status object. The FakeMaster returns a stub.
+ self.failUnless(t.buildsetStatus)
+ d2.addCallback(self._testTryUserpass_2, t)
+ return d2
+ def _testTryUserpass_2(self, bs, t):
+ # this should be the BuildSet submitted by the TryScheduler
+ self.failUnlessEqual(bs.builderNames, ["a", "b"])
+ self.failUnlessEqual(bs.source.branch, "branch1")
+ self.failUnlessEqual(bs.source.revision, "123")
+ self.failUnlessEqual(bs.source.patch, (1, "diff"))
+
+ t.cleanup()
+
+ # twisted-2.0.1 (but not later versions) seems to require a reactor
+ # iteration before stopListening actually works. TODO: investigate
+ # this.
+ d = defer.Deferred()
+ reactor.callLater(0, d.callback, None)
+ return d
+
+ def testGetBuildSets(self):
+ # validate IStatus.getBuildSets
+ s = status.builder.Status(None, ".")
+ bs1 = buildset.BuildSet(["a","b"], sourcestamp.SourceStamp(),
+ reason="one", bsid="1")
+ s.buildsetSubmitted(bs1.status)
+ self.failUnlessEqual(s.getBuildSets(), [bs1.status])
+ bs1.status.notifyFinishedWatchers()
+ self.failUnlessEqual(s.getBuildSets(), [])
+
+ def testCategory(self):
+ s1 = scheduler.Scheduler("b1", "branch1", 2, ["a","b"], categories=["categoryA", "both"])
+ self.addScheduler(s1)
+ s2 = scheduler.Scheduler("b2", "branch1", 2, ["a","b"], categories=["categoryB", "both"])
+ self.addScheduler(s2)
+
+ c0 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryA")
+ s1.addChange(c0)
+ s2.addChange(c0)
+
+ c1 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryB")
+ s1.addChange(c1)
+ s2.addChange(c1)
+
+ c2 = Change("carol", ["important"], "branch1", branch="branch1")
+ s1.addChange(c2)
+ s2.addChange(c2)
+
+ c3 = Change("carol", ["important"], "branch1", branch="branch1", category="both")
+ s1.addChange(c3)
+ s2.addChange(c3)
+
+ self.failUnlessEqual(s1.importantChanges, [c0, c3])
+ self.failUnlessEqual(s2.importantChanges, [c1, c3])
+
+ s = scheduler.Scheduler("b3", "branch1", 2, ["a","b"])
+ self.addScheduler(s)
+
+ c0 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryA")
+ s.addChange(c0)
+ c1 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryB")
+ s.addChange(c1)
+
+ self.failUnlessEqual(s.importantChanges, [c0, c1])
diff --git a/buildbot/buildbot/test/test_shell.py b/buildbot/buildbot/test/test_shell.py
new file mode 100644
index 0000000..52a17f4
--- /dev/null
+++ b/buildbot/buildbot/test/test_shell.py
@@ -0,0 +1,138 @@
+
+
+# test step.ShellCommand and the slave-side commands.ShellCommand
+
+import sys, time, os
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+from twisted.python import util
+from buildbot.slave.commands import SlaveShellCommand
+from buildbot.test.runutils import SlaveCommandTestBase
+
+class SlaveSide(SlaveCommandTestBase, unittest.TestCase):
+ def testOne(self):
+ self.setUpBuilder("test_shell.testOne")
+ emitcmd = util.sibpath(__file__, "emit.py")
+ args = {
+ 'command': [sys.executable, emitcmd, "0"],
+ 'workdir': ".",
+ }
+ d = self.startCommand(SlaveShellCommand, args)
+ d.addCallback(self.collectUpdates)
+ def _check(logs):
+ self.failUnlessEqual(logs['stdout'], "this is stdout\n")
+ self.failUnlessEqual(logs['stderr'], "this is stderr\n")
+ d.addCallback(_check)
+ return d
+
+ # TODO: move test_slavecommand.Shell and .ShellPTY over here
+
+ def _generateText(self, filename):
+ lines = []
+ for i in range(3):
+ lines.append("this is %s %d\n" % (filename, i))
+ return "".join(lines)
+
+ def testLogFiles_0(self):
+ return self._testLogFiles(0)
+
+ def testLogFiles_1(self):
+ return self._testLogFiles(1)
+
+ def testLogFiles_2(self):
+ return self._testLogFiles(2)
+
+ def testLogFiles_3(self):
+ return self._testLogFiles(3)
+
+ def _testLogFiles(self, mode):
+ basedir = "test_shell.testLogFiles"
+ self.setUpBuilder(basedir)
+ # emitlogs.py writes two lines to stdout and two logfiles, one second
+ # apart. Then it waits for us to write something to stdin, then it
+ # writes one more line.
+
+ if mode != 3:
+ # we write something to the log file first, to exercise the logic
+ # that distinguishes between the old file and the one as modified
+ # by the ShellCommand. We set the timestamp back 5 seconds so
+ # that timestamps can be used to distinguish old from new.
+ log2file = os.path.join(basedir, "log2.out")
+ f = open(log2file, "w")
+ f.write("dummy text\n")
+ f.close()
+ earlier = time.time() - 5
+ os.utime(log2file, (earlier, earlier))
+
+ if mode == 3:
+ # mode=3 doesn't create the old logfiles in the first place, but
+ # then behaves like mode=1 (where the command pauses before
+ # creating them).
+ mode = 1
+
+ # mode=1 will cause emitlogs.py to delete the old logfiles first, and
+ # then wait two seconds before creating the new files. mode=0 does
+ # not do this.
+ args = {
+ 'command': [sys.executable,
+ util.sibpath(__file__, "emitlogs.py"),
+ "%s" % mode],
+ 'workdir': ".",
+ 'logfiles': {"log2": "log2.out",
+ "log3": "log3.out"},
+ 'keep_stdin_open': True,
+ }
+ finishd = self.startCommand(SlaveShellCommand, args)
+ # The first batch of lines is written immediately. The second is
+ # written after a pause of one second. We poll once per second until
+ # we see both batches.
+
+ self._check_timeout = 10
+ d = self._check_and_wait()
+ def _wait_for_finish(res, finishd):
+ return finishd
+ d.addCallback(_wait_for_finish, finishd)
+ d.addCallback(self.collectUpdates)
+ def _check(logs):
+ self.failUnlessEqual(logs['stdout'], self._generateText("stdout"))
+ if mode == 2:
+ self.failIf(('log','log2') in logs)
+ self.failIf(('log','log3') in logs)
+ else:
+ self.failUnlessEqual(logs[('log','log2')],
+ self._generateText("log2"))
+ self.failUnlessEqual(logs[('log','log3')],
+ self._generateText("log3"))
+ d.addCallback(_check)
+ d.addBoth(self._maybePrintError)
+ return d
+
+ def _check_and_wait(self, res=None):
+ self._check_timeout -= 1
+ if self._check_timeout <= 0:
+ raise defer.TimeoutError("gave up on command")
+ logs = self.collectUpdates()
+ if logs.get('stdout') == "this is stdout 0\nthis is stdout 1\n":
+ # the emitlogs.py process is now waiting for something to arrive
+ # on stdin
+ self.cmd.command.pp.transport.write("poke\n")
+ return
+ if not self.cmd.running:
+ self.fail("command finished too early")
+ spin = defer.Deferred()
+ spin.addCallback(self._check_and_wait)
+ reactor.callLater(1, spin.callback, None)
+ return spin
+
+ def _maybePrintError(self, res):
+ rc = self.findRC()
+ if rc != 0:
+ print "Command ended with rc=%s" % rc
+ print "STDERR:"
+ self.printStderr()
+ return res
+
+ # MAYBE TODO: a command which appends to an existing logfile should
+ # result in only the new text being sent up to the master. I need to
+ # think about this more first.
+
diff --git a/buildbot/buildbot/test/test_slavecommand.py b/buildbot/buildbot/test/test_slavecommand.py
new file mode 100644
index 0000000..9809163
--- /dev/null
+++ b/buildbot/buildbot/test/test_slavecommand.py
@@ -0,0 +1,294 @@
+# -*- test-case-name: buildbot.test.test_slavecommand -*-
+
+from twisted.trial import unittest
+from twisted.internet import reactor, interfaces
+from twisted.python import runtime, failure, util
+
+import os, sys
+
+from buildbot.slave import commands
+SlaveShellCommand = commands.SlaveShellCommand
+
+from buildbot.test.runutils import SignalMixin, FakeSlaveBuilder
+
+# test slavecommand.py by running the various commands with a fake
+# SlaveBuilder object that logs the calls to sendUpdate()
+
+class Utilities(unittest.TestCase):
+ def mkdir(self, basedir, path, mode=None):
+ fn = os.path.join(basedir, path)
+ os.makedirs(fn)
+ if mode is not None:
+ os.chmod(fn, mode)
+
+ def touch(self, basedir, path, mode=None):
+ fn = os.path.join(basedir, path)
+ f = open(fn, "w")
+ f.write("touch\n")
+ f.close()
+ if mode is not None:
+ os.chmod(fn, mode)
+
+ def test_rmdirRecursive(self):
+ basedir = "slavecommand/Utilities/test_rmdirRecursive"
+ os.makedirs(basedir)
+ d = os.path.join(basedir, "doomed")
+ self.mkdir(d, "a/b")
+ self.touch(d, "a/b/1.txt")
+ self.touch(d, "a/b/2.txt", 0444)
+ self.touch(d, "a/b/3.txt", 0)
+ self.mkdir(d, "a/c")
+ self.touch(d, "a/c/1.txt")
+ self.touch(d, "a/c/2.txt", 0444)
+ self.touch(d, "a/c/3.txt", 0)
+ os.chmod(os.path.join(d, "a/c"), 0444)
+ self.mkdir(d, "a/d")
+ self.touch(d, "a/d/1.txt")
+ self.touch(d, "a/d/2.txt", 0444)
+ self.touch(d, "a/d/3.txt", 0)
+ os.chmod(os.path.join(d, "a/d"), 0)
+
+ commands.rmdirRecursive(d)
+ self.failIf(os.path.exists(d))
+
+
+class ShellBase(SignalMixin):
+
+ def setUp(self):
+ self.basedir = "test_slavecommand"
+ if not os.path.isdir(self.basedir):
+ os.mkdir(self.basedir)
+ self.subdir = os.path.join(self.basedir, "subdir")
+ if not os.path.isdir(self.subdir):
+ os.mkdir(self.subdir)
+ self.builder = FakeSlaveBuilder(self.usePTY, self.basedir)
+ self.emitcmd = util.sibpath(__file__, "emit.py")
+ self.subemitcmd = os.path.join(util.sibpath(__file__, "subdir"),
+ "emit.py")
+ self.sleepcmd = util.sibpath(__file__, "sleep.py")
+
+ def failUnlessIn(self, substring, string):
+ self.failUnless(string.find(substring) != -1,
+ "'%s' not in '%s'" % (substring, string))
+
+ def getfile(self, which):
+ got = ""
+ for r in self.builder.updates:
+ if r.has_key(which):
+ got += r[which]
+ return got
+
+ def checkOutput(self, expected):
+ """
+ @type expected: list of (streamname, contents) tuples
+ @param expected: the expected output
+ """
+ expected_linesep = os.linesep
+ if self.usePTY:
+ # PTYs change the line ending. I'm not sure why.
+ expected_linesep = "\r\n"
+ expected = [(stream, contents.replace("\n", expected_linesep, 1000))
+ for (stream, contents) in expected]
+ if self.usePTY:
+ # PTYs merge stdout+stderr into a single stream
+ expected = [('stdout', contents)
+ for (stream, contents) in expected]
+ # now merge everything into one string per stream
+ streams = {}
+ for (stream, contents) in expected:
+ streams[stream] = streams.get(stream, "") + contents
+ for (stream, contents) in streams.items():
+ got = self.getfile(stream)
+ self.assertEquals(got, contents)
+
+ def getrc(self):
+ # updates[-2] is the rc, unless the step was interrupted
+ # updates[-1] is the elapsed-time header
+ u = self.builder.updates[-1]
+ if "rc" not in u:
+ self.failUnless(len(self.builder.updates) >= 2)
+ u = self.builder.updates[-2]
+ self.failUnless("rc" in u)
+ return u['rc']
+ def checkrc(self, expected):
+ got = self.getrc()
+ self.assertEquals(got, expected)
+
+ def testShell1(self):
+ targetfile = os.path.join(self.basedir, "log1.out")
+ if os.path.exists(targetfile):
+ os.unlink(targetfile)
+ cmd = "%s %s 0" % (sys.executable, self.emitcmd)
+ args = {'command': cmd, 'workdir': '.', 'timeout': 60}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ expected = [('stdout', "this is stdout\n"),
+ ('stderr', "this is stderr\n")]
+ d.addCallback(self._checkPass, expected, 0)
+ def _check_targetfile(res):
+ self.failUnless(os.path.exists(targetfile))
+ d.addCallback(_check_targetfile)
+ return d
+
+ def _checkPass(self, res, expected, rc):
+ self.checkOutput(expected)
+ self.checkrc(rc)
+
+ def testShell2(self):
+ cmd = [sys.executable, self.emitcmd, "0"]
+ args = {'command': cmd, 'workdir': '.', 'timeout': 60}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ expected = [('stdout', "this is stdout\n"),
+ ('stderr', "this is stderr\n")]
+ d.addCallback(self._checkPass, expected, 0)
+ return d
+
+ def testShellRC(self):
+ cmd = [sys.executable, self.emitcmd, "1"]
+ args = {'command': cmd, 'workdir': '.', 'timeout': 60}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ expected = [('stdout', "this is stdout\n"),
+ ('stderr', "this is stderr\n")]
+ d.addCallback(self._checkPass, expected, 1)
+ return d
+
+ def testShellEnv(self):
+ cmd = "%s %s 0" % (sys.executable, self.emitcmd)
+ args = {'command': cmd, 'workdir': '.',
+ 'env': {'EMIT_TEST': "envtest"}, 'timeout': 60}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ expected = [('stdout', "this is stdout\n"),
+ ('stderr', "this is stderr\n"),
+ ('stdout', "EMIT_TEST: envtest\n"),
+ ]
+ d.addCallback(self._checkPass, expected, 0)
+ return d
+
+ def testShellSubdir(self):
+ targetfile = os.path.join(self.basedir, "subdir", "log1.out")
+ if os.path.exists(targetfile):
+ os.unlink(targetfile)
+ cmd = "%s %s 0" % (sys.executable, self.subemitcmd)
+ args = {'command': cmd, 'workdir': "subdir", 'timeout': 60}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ expected = [('stdout', "this is stdout in subdir\n"),
+ ('stderr', "this is stderr\n")]
+ d.addCallback(self._checkPass, expected, 0)
+ def _check_targetfile(res):
+ self.failUnless(os.path.exists(targetfile))
+ d.addCallback(_check_targetfile)
+ return d
+
+ def testShellMissingCommand(self):
+ args = {'command': "/bin/EndWorldHungerAndMakePigsFly",
+ 'workdir': '.', 'timeout': 10,
+ 'env': {"LC_ALL": "C"},
+ }
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ d.addCallback(self._testShellMissingCommand_1)
+ return d
+ def _testShellMissingCommand_1(self, res):
+ self.failIfEqual(self.getrc(), 0)
+ # we used to check the error message to make sure it said something
+ # about a missing command, but there are a variety of shells out
+ # there, and they emit message sin a variety of languages, so we
+ # stopped trying.
+
+ def testTimeout(self):
+ args = {'command': [sys.executable, self.sleepcmd, "10"],
+ 'workdir': '.', 'timeout': 2}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ d.addCallback(self._testTimeout_1)
+ return d
+ def _testTimeout_1(self, res):
+ self.failIfEqual(self.getrc(), 0)
+ got = self.getfile('header')
+ self.failUnlessIn("command timed out: 2 seconds without output", got)
+ if runtime.platformType == "posix":
+ # the "killing pid" message is not present in windows
+ self.failUnlessIn("killing pid", got)
+ # but the process *ought* to be killed somehow
+ self.failUnlessIn("process killed by signal", got)
+ #print got
+ if runtime.platformType != 'posix':
+ testTimeout.todo = "timeout doesn't appear to work under windows"
+
+ def testInterrupt1(self):
+ args = {'command': [sys.executable, self.sleepcmd, "10"],
+ 'workdir': '.', 'timeout': 20}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ reactor.callLater(1, c.interrupt)
+ d.addCallback(self._testInterrupt1_1)
+ return d
+ def _testInterrupt1_1(self, res):
+ self.failIfEqual(self.getrc(), 0)
+ got = self.getfile('header')
+ self.failUnlessIn("command interrupted", got)
+ if runtime.platformType == "posix":
+ self.failUnlessIn("process killed by signal", got)
+ if runtime.platformType != 'posix':
+ testInterrupt1.todo = "interrupt doesn't appear to work under windows"
+
+
+ # todo: twisted-specific command tests
+
+class Shell(ShellBase, unittest.TestCase):
+ usePTY = False
+
+ def testInterrupt2(self):
+ # test the backup timeout. This doesn't work under a PTY, because the
+ # transport.loseConnection we do in the timeout handler actually
+ # *does* kill the process.
+ args = {'command': [sys.executable, self.sleepcmd, "5"],
+ 'workdir': '.', 'timeout': 20}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ c.command.BACKUP_TIMEOUT = 1
+ # make it unable to kill the child, by changing the signal it uses
+ # from SIGKILL to the do-nothing signal 0.
+ c.command.KILL = None
+ reactor.callLater(1, c.interrupt)
+ d.addBoth(self._testInterrupt2_1)
+ return d
+ def _testInterrupt2_1(self, res):
+ # the slave should raise a TimeoutError exception. In a normal build
+ # process (i.e. one that uses step.RemoteShellCommand), this
+ # exception will be handed to the Step, which will acquire an ERROR
+ # status. In our test environment, it isn't such a big deal.
+ self.failUnless(isinstance(res, failure.Failure),
+ "res is not a Failure: %s" % (res,))
+ self.failUnless(res.check(commands.TimeoutError))
+ self.checkrc(-1)
+ return
+ # the command is still actually running. Start another command, to
+ # make sure that a) the old command's output doesn't interfere with
+ # the new one, and b) the old command's actual termination doesn't
+ # break anything
+ args = {'command': [sys.executable, self.sleepcmd, "5"],
+ 'workdir': '.', 'timeout': 20}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ d.addCallback(self._testInterrupt2_2)
+ return d
+ def _testInterrupt2_2(self, res):
+ self.checkrc(0)
+ # N.B.: under windows, the trial process hangs out for another few
+ # seconds. I assume that the win32eventreactor is waiting for one of
+ # the lingering child processes to really finish.
+
+haveProcess = interfaces.IReactorProcess(reactor, None)
+if runtime.platformType == 'posix':
+ # test with PTYs also
+ class ShellPTY(ShellBase, unittest.TestCase):
+ usePTY = True
+ if not haveProcess:
+ ShellPTY.skip = "this reactor doesn't support IReactorProcess"
+if not haveProcess:
+ Shell.skip = "this reactor doesn't support IReactorProcess"
diff --git a/buildbot/buildbot/test/test_slaves.py b/buildbot/buildbot/test/test_slaves.py
new file mode 100644
index 0000000..4005fc6
--- /dev/null
+++ b/buildbot/buildbot/test/test_slaves.py
@@ -0,0 +1,991 @@
+# -*- test-case-name: buildbot.test.test_slaves -*-
+
+# Portions copyright Canonical Ltd. 2009
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+from twisted.python import log, runtime, failure
+
+from buildbot.buildslave import AbstractLatentBuildSlave
+from buildbot.test.runutils import RunMixin
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.status.builder import SUCCESS
+from buildbot.status import mail
+from buildbot.slave import bot
+
+config_1 = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit'),
+ BuildSlave('bot3', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+c['schedulers'] = []
+
+f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+f2 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=2)])
+f3 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=3)])
+f4 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=5)])
+
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+"""
+
+config_2 = config_1 + """
+
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b1', 'factory': f2},
+ ]
+
+"""
+
+config_busyness = config_1 + """
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f3},
+ {'name': 'b2', 'slavenames': ['bot1'],
+ 'builddir': 'b2', 'factory': f4},
+ ]
+"""
+
+class Slave(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_1)
+ self.master.startService()
+ d = self.connectSlave(["b1"])
+ d.addCallback(lambda res: self.connectSlave(["b1"], "bot2"))
+ return d
+
+ def doBuild(self, buildername):
+ br = BuildRequest("forced", SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+ def testSequence(self):
+ # make sure both slaves appear in the list.
+ attached_slaves = [c for c in self.master.botmaster.slaves.values()
+ if c.slave]
+ self.failUnlessEqual(len(attached_slaves), 2)
+ b = self.master.botmaster.builders["b1"]
+ self.failUnlessEqual(len(b.slaves), 2)
+
+ # since the current scheduling algorithm is simple and does not
+ # rotate or attempt any sort of load-balancing, two builds in
+ # sequence should both use the first slave. This may change later if
+ # we move to a more sophisticated scheme.
+ b.CHOOSE_SLAVES_RANDOMLY = False
+
+ d = self.doBuild("b1")
+ d.addCallback(self._testSequence_1)
+ return d
+ def _testSequence_1(self, res):
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+
+ d = self.doBuild("b1")
+ d.addCallback(self._testSequence_2)
+ return d
+ def _testSequence_2(self, res):
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+
+
+ def testSimultaneous(self):
+ # make sure we can actually run two builds at the same time
+ d1 = self.doBuild("b1")
+ d2 = self.doBuild("b1")
+ d1.addCallback(self._testSimultaneous_1, d2)
+ return d1
+ def _testSimultaneous_1(self, res, d2):
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ b1_slavename = res.getSlavename()
+ d2.addCallback(self._testSimultaneous_2, b1_slavename)
+ return d2
+ def _testSimultaneous_2(self, res, b1_slavename):
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ b2_slavename = res.getSlavename()
+ # make sure the two builds were run by different slaves
+ slavenames = [b1_slavename, b2_slavename]
+ slavenames.sort()
+ self.failUnlessEqual(slavenames, ["bot1", "bot2"])
+
+ def testFallback1(self):
+ # detach the first slave, verify that a build is run using the second
+ # slave instead
+ d = self.shutdownSlave("bot1", "b1")
+ d.addCallback(self._testFallback1_1)
+ return d
+ def _testFallback1_1(self, res):
+ attached_slaves = [c for c in self.master.botmaster.slaves.values()
+ if c.slave]
+ self.failUnlessEqual(len(attached_slaves), 1)
+ self.failUnlessEqual(len(self.master.botmaster.builders["b1"].slaves),
+ 1)
+ d = self.doBuild("b1")
+ d.addCallback(self._testFallback1_2)
+ return d
+ def _testFallback1_2(self, res):
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot2")
+
+ def testFallback2(self):
+ # Disable the first slave, so that a slaveping will timeout. Then
+ # start a build, and verify that the non-failing (second) one is
+ # claimed for the build, and that the failing one is removed from the
+ # list.
+
+ b1 = self.master.botmaster.builders["b1"]
+ # reduce the ping time so we'll failover faster
+ b1.START_BUILD_TIMEOUT = 1
+ assert b1.CHOOSE_SLAVES_RANDOMLY
+ b1.CHOOSE_SLAVES_RANDOMLY = False
+ self.disappearSlave("bot1", "b1", allowReconnect=False)
+ d = self.doBuild("b1")
+ d.addCallback(self._testFallback2_1)
+ return d
+ def _testFallback2_1(self, res):
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot2")
+ b1slaves = self.master.botmaster.builders["b1"].slaves
+ self.failUnlessEqual(len(b1slaves), 1, "whoops: %s" % (b1slaves,))
+ self.failUnlessEqual(b1slaves[0].slave.slavename, "bot2")
+
+
+ def notFinished(self, brs):
+ # utility method
+ builds = brs.getBuilds()
+ self.failIf(len(builds) > 1)
+ if builds:
+ self.failIf(builds[0].isFinished())
+
+ def testDontClaimPingingSlave(self):
+ # have two slaves connect for the same builder. Do something to the
+ # first one so that slavepings are delayed (but do not fail
+ # outright).
+ timers = []
+ self.slaves['bot1'].debugOpts["stallPings"] = (10, timers)
+ br = BuildRequest("forced", SourceStamp(), 'test_builder')
+ d1 = br.waitUntilFinished()
+ self.master.botmaster.builders["b1"].CHOOSE_SLAVES_RANDOMLY = False
+ self.control.getBuilder("b1").requestBuild(br)
+ s1 = br.status # this is a BuildRequestStatus
+ # give it a chance to start pinging
+ d2 = defer.Deferred()
+ d2.addCallback(self._testDontClaimPingingSlave_1, d1, s1, timers)
+ reactor.callLater(1, d2.callback, None)
+ return d2
+ def _testDontClaimPingingSlave_1(self, res, d1, s1, timers):
+ # now the first build is running (waiting on the ping), so start the
+ # second build. This should claim the second slave, not the first,
+ # because the first is busy doing the ping.
+ self.notFinished(s1)
+ d3 = self.doBuild("b1")
+ d3.addCallback(self._testDontClaimPingingSlave_2, d1, s1, timers)
+ return d3
+ def _testDontClaimPingingSlave_2(self, res, d1, s1, timers):
+ self.failUnlessEqual(res.getSlavename(), "bot2")
+ self.notFinished(s1)
+ # now let the ping complete
+ self.failUnlessEqual(len(timers), 1)
+ timers[0].reset(0)
+ d1.addCallback(self._testDontClaimPingingSlave_3)
+ return d1
+ def _testDontClaimPingingSlave_3(self, res):
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+
+class FakeLatentBuildSlave(AbstractLatentBuildSlave):
+
+ testcase = None
+ stop_wait = None
+ start_message = None
+ stopped = testing_substantiation_timeout = False
+
+ def start_instance(self):
+ # responsible for starting instance that will try to connect with
+ # this master
+ # simulate having to do some work.
+ d = defer.Deferred()
+ if not self.testing_substantiation_timeout:
+ reactor.callLater(0, self._start_instance, d)
+ return d
+
+ def _start_instance(self, d):
+ self.testcase.connectOneSlave(self.slavename)
+ d.callback(self.start_message)
+
+ def stop_instance(self, fast=False):
+ # responsible for shutting down instance
+ # we're going to emulate dropping off the net.
+
+ # simulate this by replacing the slave Broker's .dataReceived method
+ # with one that just throws away all data.
+ self.fast_stop_request = fast
+ if self.slavename not in self.testcase.slaves:
+ assert self.testing_substantiation_timeout
+ self.stopped = True
+ return defer.succeed(None)
+ d = defer.Deferred()
+ if self.stop_wait is None:
+ self._stop_instance(d)
+ else:
+ reactor.callLater(self.stop_wait, self._stop_instance, d)
+ return d
+
+ def _stop_instance(self, d):
+ try:
+ s = self.testcase.slaves.pop(self.slavename)
+ except KeyError:
+ pass
+ else:
+ def discard(data):
+ pass
+ bot = s.getServiceNamed("bot")
+ for buildername in self.slavebuilders:
+ remote = bot.builders[buildername].remote
+ if remote is None:
+ continue
+ broker = remote.broker
+ broker.dataReceived = discard # seal its ears
+ broker.transport.write = discard # and take away its voice
+ # also discourage it from reconnecting once the connection goes away
+ s.bf.continueTrying = False
+ # stop the service for cleanliness
+ s.stopService()
+ d.callback(None)
+
+latent_config = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+from buildbot.test.test_slaves import FakeLatentBuildSlave
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['slaves'] = [FakeLatentBuildSlave('bot1', 'sekrit',
+ ),
+ FakeLatentBuildSlave('bot2', 'sekrit',
+ ),
+ BuildSlave('bot3', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+c['schedulers'] = []
+
+f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+f2 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=2)])
+f3 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=3)])
+f4 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=5)])
+
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+"""
+
+
+class LatentSlave(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ # debugging
+ #import twisted.internet.base
+ #twisted.internet.base.DelayedCall.debug = True
+ # debugging
+ RunMixin.setUp(self)
+ self.master.loadConfig(latent_config)
+ self.master.startService()
+ self.bot1 = self.master.botmaster.slaves['bot1']
+ self.bot2 = self.master.botmaster.slaves['bot2']
+ self.bot3 = self.master.botmaster.slaves['bot3']
+ self.bot1.testcase = self
+ self.bot2.testcase = self
+ self.b1 = self.master.botmaster.builders['b1']
+
+ def doBuild(self, buildername):
+ br = BuildRequest("forced", SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+ def testSequence(self):
+ # make sure both slaves appear in the builder. This is automatically,
+ # without any attaching.
+ self.assertEqual(len(self.b1.slaves), 2)
+ self.assertEqual(sorted(sb.slave.slavename for sb in self.b1.slaves),
+ ['bot1', 'bot2'])
+ # These have not substantiated
+ self.assertEqual([sb.slave.substantiated for sb in self.b1.slaves],
+ [False, False])
+ self.assertEqual([sb.slave.slave for sb in self.b1.slaves],
+ [None, None])
+ # we can mix and match latent slaves and normal slaves. ATM, they
+ # are treated identically in terms of selecting slaves.
+ d = self.connectSlave(builders=['b1'], slavename='bot3')
+ d.addCallback(self._testSequence_1)
+ return d
+ def _testSequence_1(self, res):
+ # now we have all three slaves. Two are latent slaves, and one is a
+ # standard slave.
+ self.assertEqual(sorted(sb.slave.slavename for sb in self.b1.slaves),
+ ['bot1', 'bot2', 'bot3'])
+ # Now it's time to try a build on one of the latent slaves,
+ # substantiating it.
+ # since the current scheduling algorithm is simple and does not
+ # rotate or attempt any sort of load-balancing, two builds in
+ # sequence should both use the first slave. This may change later if
+ # we move to a more sophisticated scheme.
+ self.b1.CHOOSE_SLAVES_RANDOMLY = False
+
+ self.build_deferred = self.doBuild("b1")
+ # now there's an event waiting for the slave to substantiate.
+ e = self.b1.builder_status.getEvent(-1)
+ self.assertEqual(e.text, ['substantiating'])
+ # the substantiation_deferred is an internal stash of a deferred
+ # that we'll grab so we can find the point at which the slave is
+ # substantiated but the build has not yet started.
+ d = self.bot1.substantiation_deferred
+ self.assertNotIdentical(d, None)
+ d.addCallback(self._testSequence_2)
+ return d
+ def _testSequence_2(self, res):
+ # bot 1 is substantiated.
+ self.assertNotIdentical(self.bot1.slave, None)
+ self.failUnless(self.bot1.substantiated)
+ # the event has announced it's success
+ e = self.b1.builder_status.getEvent(-1)
+ self.assertEqual(e.text, ['substantiate', 'success'])
+ self.assertNotIdentical(e.finished, None)
+ # now we'll wait for the build to complete
+ d = self.build_deferred
+ del self.build_deferred
+ d.addCallback(self._testSequence_3)
+ return d
+ def _testSequence_3(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # bot1 is substantiated now. bot2 has not.
+ self.failUnless(self.bot1.substantiated)
+ self.failIf(self.bot2.substantiated)
+ # bot1 is waiting a bit to see if there will be another build before
+ # it shuts down the instance ("insubstantiates")
+ self.build_wait_timer = self.bot1.build_wait_timer
+ self.assertNotIdentical(self.build_wait_timer, None)
+ self.failUnless(self.build_wait_timer.active())
+ self.assertApproximates(
+ self.bot1.build_wait_timeout,
+ self.build_wait_timer.time - runtime.seconds(),
+ 2)
+ # now we'll do another build
+ d = self.doBuild("b1")
+ # the slave is already substantiated, so no event is created
+ e = self.b1.builder_status.getEvent(-1)
+ self.assertNotEqual(e.text, ['substantiating'])
+ # wait for the next build
+ d.addCallback(self._testSequence_4)
+ return d
+ def _testSequence_4(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # bot1 is still waiting, but with a new timer
+ self.assertNotIdentical(self.bot1.build_wait_timer, None)
+ self.assertNotIdentical(self.build_wait_timer,
+ self.bot1.build_wait_timer)
+ self.assertApproximates(
+ self.bot1.build_wait_timeout,
+ self.bot1.build_wait_timer.time - runtime.seconds(),
+ 2)
+ del self.build_wait_timer
+ # We'll set the timer to fire sooner, and wait for it to fire.
+ self.bot1.build_wait_timer.reset(0)
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, None)
+ d.addCallback(self._testSequence_5)
+ return d
+ def _testSequence_5(self, res):
+ # slave is insubstantiated
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ # Now we'll start up another build, to show that the shutdown left
+ # things in such a state that we can restart.
+ d = self.doBuild("b1")
+ # the bot can return an informative message on success that the event
+ # will render. Let's use a mechanism of our test latent bot to
+ # demonstrate that.
+ self.bot1.start_message = ['[instance id]', '[start-up time]']
+ # here's our event again:
+ self.e = self.b1.builder_status.getEvent(-1)
+ self.assertEqual(self.e.text, ['substantiating'])
+ d.addCallback(self._testSequence_6)
+ return d
+ def _testSequence_6(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # the event has announced it's success. (Just imagine that
+ # [instance id] and [start-up time] were actually valuable
+ # information.)
+ e = self.e
+ del self.e
+ self.assertEqual(
+ e.text,
+ ['substantiate', 'success', '[instance id]', '[start-up time]'])
+ # Now we need to clean up the timer. We could just cancel it, but
+ # we'll go through the full dance once more time to show we can.
+ # We'll set the timer to fire sooner, and wait for it to fire.
+ # Also, we'll set the build_slave to take a little bit longer to shut
+ # down, to see that it doesn't affect anything.
+ self.bot1.stop_wait = 2
+ self.bot1.build_wait_timer.reset(0)
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, None)
+ d.addCallback(self._testSequence_7)
+ return d
+ def _testSequence_7(self, res):
+ # slave is insubstantiated
+ self.assertIdentical(self.bot1.slave, None)
+ self.assertNot(self.bot1.substantiated)
+ # the remote is still not cleaned out. We'll wait for it.
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, None)
+ return d
+
+ def testNeverSubstantiated(self):
+ # When a substantiation is requested, the slave may never appear.
+ # This is a serious problem, and recovering from it is not really
+ # handled well right now (in part because a way to handle it is not
+ # clear). However, at the least, the status event will show a
+ # failure, and the slave will be told to insubstantiate, and to be
+ # removed from the botmaster as anavailable slave.
+ # This tells our test bot to never start, and to not complain about
+ # being told to stop without ever starting
+ self.bot1.testing_substantiation_timeout = True
+ # normally (by default) we have 20 minutes to try and connect to the
+ # remote
+ self.assertEqual(self.bot1.missing_timeout, 20*60)
+ # for testing purposes, we'll put that down to a tenth of a second!
+ self.bot1.missing_timeout = 0.1
+ # since the current scheduling algorithm is simple and does not
+ # rotate or attempt any sort of load-balancing, two builds in
+ # sequence should both use the first slave. This may change later if
+ # we move to a more sophisticated scheme.
+ self.b1.CHOOSE_SLAVES_RANDOMLY = False
+ # start a build
+ self.build_deferred = self.doBuild('b1')
+ # the event tells us we are instantiating, as usual
+ e = self.b1.builder_status.getEvent(-1)
+ self.assertEqual(e.text, ['substantiating'])
+ # we'll see in a moment that the test flag we have to show that the
+ # bot was told to insubstantiate has been fired. Here, we just verify
+ # that it is ready to be fired.
+ self.failIf(self.bot1.stopped)
+ # That substantiation is going to fail. Let's wait for it.
+ d = self.bot1.substantiation_deferred
+ self.assertNotIdentical(d, None)
+ d.addCallbacks(self._testNeverSubstantiated_BadSuccess,
+ self._testNeverSubstantiated_1)
+ return d
+ def _testNeverSubstantiated_BadSuccess(self, res):
+ self.fail('we should not have succeeded here.')
+ def _testNeverSubstantiated_1(self, res):
+ # ok, we failed.
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ self.failUnless(isinstance(res, failure.Failure))
+ self.assertIdentical(self.bot1.substantiation_deferred, None)
+ # our event informs us of this
+ e1 = self.b1.builder_status.getEvent(-3)
+ self.assertEqual(e1.text, ['substantiate', 'failed'])
+ self.assertNotIdentical(e1.finished, None)
+ # the slave is no longer available to build. The events show it...
+ e2 = self.b1.builder_status.getEvent(-2)
+ self.assertEqual(e2.text, ['removing', 'latent', 'bot1'])
+ e3 = self.b1.builder_status.getEvent(-1)
+ self.assertEqual(e3.text, ['disconnect', 'bot1'])
+ # ...and the builder shows it.
+ self.assertEqual(['bot2'],
+ [sb.slave.slavename for sb in self.b1.slaves])
+ # ideally, we would retry the build, but that infrastructure (which
+ # would be used for other situations in the builder as well) does not
+ # yet exist. Therefore the build never completes one way or the
+ # other, just as if a normal slave detached.
+
+ def testServiceStop(self):
+ # if the slave has an instance when it is stopped, the slave should
+ # be told to shut down.
+ self.b1.CHOOSE_SLAVES_RANDOMLY = False
+ d = self.doBuild("b1")
+ d.addCallback(self._testServiceStop_1)
+ return d
+ def _testServiceStop_1(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # bot 1 is substantiated.
+ self.assertNotIdentical(self.bot1.slave, None)
+ self.failUnless(self.bot1.substantiated)
+ # now let's stop the bot.
+ d = self.bot1.stopService()
+ d.addCallback(self._testServiceStop_2)
+ return d
+ def _testServiceStop_2(self, res):
+ # bot 1 is NOT substantiated.
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+
+ def testPing(self):
+ # While a latent slave pings normally when it is substantiated, (as
+ # happens behind the scene when a build is request), when
+ # it is insubstantial, the ping is a no-op success.
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ d = self.connectSlave(builders=['b1'], slavename='bot3')
+ d.addCallback(self._testPing_1)
+ return d
+ def _testPing_1(self, res):
+ self.assertEqual(sorted(sb.slave.slavename for sb in self.b1.slaves),
+ ['bot1', 'bot2', 'bot3'])
+ d = self.control.getBuilder('b1').ping()
+ d.addCallback(self._testPing_2)
+ return d
+ def _testPing_2(self, res):
+ # all three pings were successful
+ self.assert_(res)
+ # but neither bot1 not bot2 substantiated.
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ self.assertIdentical(self.bot2.slave, None)
+ self.failIf(self.bot2.substantiated)
+
+
+class SlaveBusyness(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_busyness)
+ self.master.startService()
+ d = self.connectSlave(["b1", "b2"])
+ return d
+
+ def doBuild(self, buildername):
+ br = BuildRequest("forced", SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+ def getRunningBuilds(self):
+ return len(self.status.getSlave("bot1").getRunningBuilds())
+
+ def testSlaveNotBusy(self):
+ self.failUnlessEqual(self.getRunningBuilds(), 0)
+ # now kick a build, wait for it to finish, then check again
+ d = self.doBuild("b1")
+ d.addCallback(self._testSlaveNotBusy_1)
+ return d
+
+ def _testSlaveNotBusy_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 0)
+
+ def testSlaveBusyOneBuild(self):
+ d1 = self.doBuild("b1")
+ d2 = defer.Deferred()
+ reactor.callLater(.5, d2.callback, None)
+ d2.addCallback(self._testSlaveBusyOneBuild_1)
+ d1.addCallback(self._testSlaveBusyOneBuild_finished_1)
+ return defer.DeferredList([d1,d2])
+
+ def _testSlaveBusyOneBuild_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 1)
+
+ def _testSlaveBusyOneBuild_finished_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 0)
+
+ def testSlaveBusyTwoBuilds(self):
+ d1 = self.doBuild("b1")
+ d2 = self.doBuild("b2")
+ d3 = defer.Deferred()
+ reactor.callLater(.5, d3.callback, None)
+ d3.addCallback(self._testSlaveBusyTwoBuilds_1)
+ d1.addCallback(self._testSlaveBusyTwoBuilds_finished_1, d2)
+ return defer.DeferredList([d1,d3])
+
+ def _testSlaveBusyTwoBuilds_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 2)
+
+ def _testSlaveBusyTwoBuilds_finished_1(self, res, d2):
+ self.failUnlessEqual(self.getRunningBuilds(), 1)
+ d2.addCallback(self._testSlaveBusyTwoBuilds_finished_2)
+ return d2
+
+ def _testSlaveBusyTwoBuilds_finished_2(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 0)
+
+ def testSlaveDisconnect(self):
+ d1 = self.doBuild("b1")
+ d2 = defer.Deferred()
+ reactor.callLater(.5, d2.callback, None)
+ d2.addCallback(self._testSlaveDisconnect_1)
+ d1.addCallback(self._testSlaveDisconnect_finished_1)
+ return defer.DeferredList([d1, d2])
+
+ def _testSlaveDisconnect_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 1)
+ return self.shutdownAllSlaves()
+
+ def _testSlaveDisconnect_finished_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 0)
+
+config_3 = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+c['schedulers'] = []
+
+f1 = factory.BuildFactory([s(dummy.Wait, handle='one')])
+f2 = factory.BuildFactory([s(dummy.Wait, handle='two')])
+f3 = factory.BuildFactory([s(dummy.Wait, handle='three')])
+
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+"""
+
+config_4 = config_3 + """
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f2},
+ ]
+"""
+
+config_5 = config_3 + """
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f3},
+ ]
+"""
+
+from buildbot.slave.commands import waitCommandRegistry
+
+class Reconfig(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_3)
+ self.master.startService()
+ d = self.connectSlave(["b1"])
+ return d
+
+ def _one_started(self):
+ log.msg("testReconfig._one_started")
+ self.build1_started = True
+ self.d1.callback(None)
+ return self.d2
+
+ def _two_started(self):
+ log.msg("testReconfig._two_started")
+ self.build2_started = True
+ self.d3.callback(None)
+ return self.d4
+
+ def _three_started(self):
+ log.msg("testReconfig._three_started")
+ self.build3_started = True
+ self.d5.callback(None)
+ return self.d6
+
+ def testReconfig(self):
+ # reconfiguring a Builder should not interrupt any running Builds. No
+ # queued BuildRequests should be lost. The next Build started should
+ # use the new process.
+ slave1 = self.slaves['bot1']
+ bot1 = slave1.getServiceNamed('bot')
+ sb1 = bot1.builders['b1']
+ self.failUnless(isinstance(sb1, bot.SlaveBuilder))
+ self.failUnless(sb1.running)
+ b1 = self.master.botmaster.builders['b1']
+ self.orig_b1 = b1
+
+ self.d1 = d1 = defer.Deferred()
+ self.d2 = d2 = defer.Deferred()
+ self.d3, self.d4 = defer.Deferred(), defer.Deferred()
+ self.d5, self.d6 = defer.Deferred(), defer.Deferred()
+ self.build1_started = False
+ self.build2_started = False
+ self.build3_started = False
+ waitCommandRegistry[("one","build1")] = self._one_started
+ waitCommandRegistry[("two","build2")] = self._two_started
+ waitCommandRegistry[("three","build3")] = self._three_started
+
+ # use different branches to make sure these cannot be merged
+ br1 = BuildRequest("build1", SourceStamp(branch="1"), 'test_builder')
+ b1.submitBuildRequest(br1)
+ br2 = BuildRequest("build2", SourceStamp(branch="2"), 'test_builder')
+ b1.submitBuildRequest(br2)
+ br3 = BuildRequest("build3", SourceStamp(branch="3"), 'test_builder')
+ b1.submitBuildRequest(br3)
+ self.requests = (br1, br2, br3)
+ # all three are now in the queue
+
+ # wait until the first one has started
+ d1.addCallback(self._testReconfig_2)
+ return d1
+
+ def _testReconfig_2(self, res):
+ log.msg("_testReconfig_2")
+ # confirm that it is building
+ brs = self.requests[0].status.getBuilds()
+ self.failUnlessEqual(len(brs), 1)
+ self.build1 = brs[0]
+ self.failUnlessEqual(self.build1.getCurrentStep().getName(), "wait")
+ # br1 is building, br2 and br3 are in the queue (in that order). Now
+ # we reconfigure the Builder.
+ self.failUnless(self.build1_started)
+ d = self.master.loadConfig(config_4)
+ d.addCallback(self._testReconfig_3)
+ return d
+
+ def _testReconfig_3(self, res):
+ log.msg("_testReconfig_3")
+ # now check to see that br1 is still building, and that br2 and br3
+ # are in the queue of the new builder
+ b1 = self.master.botmaster.builders['b1']
+ self.failIfIdentical(b1, self.orig_b1)
+ self.failIf(self.build1.isFinished())
+ self.failUnlessEqual(self.build1.getCurrentStep().getName(), "wait")
+ self.failUnlessEqual(len(b1.buildable), 2)
+ self.failUnless(self.requests[1] in b1.buildable)
+ self.failUnless(self.requests[2] in b1.buildable)
+
+ # allow br1 to finish, and make sure its status is delivered normally
+ d = self.requests[0].waitUntilFinished()
+ d.addCallback(self._testReconfig_4)
+ self.d2.callback(None)
+ return d
+
+ def _testReconfig_4(self, bs):
+ log.msg("_testReconfig_4")
+ self.failUnlessEqual(bs.getReason(), "build1")
+ self.failUnless(bs.isFinished())
+ self.failUnlessEqual(bs.getResults(), SUCCESS)
+
+ # at this point, the first build has finished, and there is a pending
+ # call to start the second build. Once that pending call fires, there
+ # is a network roundtrip before the 'wait' RemoteCommand is delivered
+ # to the slave. We need to wait for both events to happen before we
+ # can check to make sure it is using the correct process. Just wait a
+ # full second.
+ d = defer.Deferred()
+ d.addCallback(self._testReconfig_5)
+ reactor.callLater(1, d.callback, None)
+ return d
+
+ def _testReconfig_5(self, res):
+ log.msg("_testReconfig_5")
+ # at this point the next build ought to be running
+ b1 = self.master.botmaster.builders['b1']
+ self.failUnlessEqual(len(b1.buildable), 1)
+ self.failUnless(self.requests[2] in b1.buildable)
+ self.failUnlessEqual(len(b1.building), 1)
+ # and it ought to be using the new process
+ self.failUnless(self.build2_started)
+
+ # now, while the second build is running, change the config multiple
+ # times.
+
+ d = self.master.loadConfig(config_3)
+ d.addCallback(lambda res: self.master.loadConfig(config_4))
+ d.addCallback(lambda res: self.master.loadConfig(config_5))
+ def _done(res):
+ # then once that's done, allow the second build to finish and
+ # wait for it to complete
+ da = self.requests[1].waitUntilFinished()
+ self.d4.callback(None)
+ return da
+ d.addCallback(_done)
+ def _done2(res):
+ # and once *that*'s done, wait another second to let the third
+ # build start
+ db = defer.Deferred()
+ reactor.callLater(1, db.callback, None)
+ return db
+ d.addCallback(_done2)
+ d.addCallback(self._testReconfig_6)
+ return d
+
+ def _testReconfig_6(self, res):
+ log.msg("_testReconfig_6")
+ # now check to see that the third build is running
+ self.failUnless(self.build3_started)
+
+ # we're done
+
+
+
+class Slave2(RunMixin, unittest.TestCase):
+
+ revision = 0
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_1)
+ self.master.startService()
+
+ def doBuild(self, buildername, reason="forced"):
+ # we need to prevent these builds from being merged, so we create
+ # each of them with a different revision specifier. The revision is
+ # ignored because our build process does not have a source checkout
+ # step.
+ self.revision += 1
+ br = BuildRequest(reason, SourceStamp(revision=self.revision),
+ 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+ def testFirstComeFirstServed(self):
+ # submit three builds, then connect a slave which fails the
+ # slaveping. The first build will claim the slave, do the slaveping,
+ # give up, and re-queue the build. Verify that the build gets
+ # re-queued in front of all other builds. This may be tricky, because
+ # the other builds may attempt to claim the just-failed slave.
+
+ d1 = self.doBuild("b1", "first")
+ d2 = self.doBuild("b1", "second")
+ #buildable = self.master.botmaster.builders["b1"].buildable
+ #print [b.reason for b in buildable]
+
+ # specifically, I want the poor build to get precedence over any
+ # others that were waiting. To test this, we need more builds than
+ # slaves.
+
+ # now connect a broken slave. The first build started as soon as it
+ # connects, so by the time we get to our _1 method, the ill-fated
+ # build has already started.
+ d = self.connectSlave(["b1"], opts={"failPingOnce": True})
+ d.addCallback(self._testFirstComeFirstServed_1, d1, d2)
+ return d
+ def _testFirstComeFirstServed_1(self, res, d1, d2):
+ # the master has send the slaveping. When this is received, it will
+ # fail, causing the master to hang up on the slave. When it
+ # reconnects, it should find the first build at the front of the
+ # queue. If we simply wait for both builds to complete, then look at
+ # the status logs, we should see that the builds ran in the correct
+ # order.
+
+ d = defer.DeferredList([d1,d2])
+ d.addCallback(self._testFirstComeFirstServed_2)
+ return d
+ def _testFirstComeFirstServed_2(self, res):
+ b = self.status.getBuilder("b1")
+ builds = b.getBuild(0), b.getBuild(1)
+ reasons = [build.getReason() for build in builds]
+ self.failUnlessEqual(reasons, ["first", "second"])
+
+config_multi_builders = config_1 + """
+c['builders'] = [
+ {'name': 'dummy', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b1', 'factory': f2},
+ {'name': 'dummy2', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b2', 'factory': f2},
+ {'name': 'dummy3', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b3', 'factory': f2},
+ ]
+
+"""
+
+config_mail_missing = config_1 + """
+c['slaves'] = [BuildSlave('bot1', 'sekrit', notify_on_missing='admin',
+ missing_timeout=1)]
+c['builders'] = [
+ {'name': 'dummy', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+c['projectName'] = 'myproject'
+c['projectURL'] = 'myURL'
+"""
+
+class FakeMailer(mail.MailNotifier):
+ def sendMessage(self, m, recipients):
+ self.messages.append((m,recipients))
+ return defer.succeed(None)
+
+class BuildSlave(RunMixin, unittest.TestCase):
+ def test_track_builders(self):
+ self.master.loadConfig(config_multi_builders)
+ self.master.readConfig = True
+ self.master.startService()
+ d = self.connectSlave()
+
+ def _check(res):
+ b = self.master.botmaster.builders['dummy']
+ self.failUnless(len(b.slaves) == 1) # just bot1
+
+ bs = b.slaves[0].slave
+ self.failUnless(len(bs.slavebuilders) == 3)
+ self.failUnless(b in [sb.builder for sb in
+ bs.slavebuilders.values()])
+
+ d.addCallback(_check)
+ return d
+
+ def test_mail_on_missing(self):
+ self.master.loadConfig(config_mail_missing)
+ self.master.readConfig = True
+ self.master.startService()
+ fm = FakeMailer("buildbot@example.org")
+ fm.messages = []
+ fm.setServiceParent(self.master)
+ self.master.statusTargets.append(fm)
+
+ d = self.connectSlave()
+ d.addCallback(self.stall, 1)
+ d.addCallback(lambda res: self.shutdownSlave("bot1", "dummy"))
+ def _not_yet(res):
+ self.failIf(fm.messages)
+ d.addCallback(_not_yet)
+ # we reconnect right away, so the timer shouldn't fire
+ d.addCallback(lambda res: self.connectSlave())
+ d.addCallback(self.stall, 3)
+ d.addCallback(_not_yet)
+ d.addCallback(lambda res: self.shutdownSlave("bot1", "dummy"))
+ d.addCallback(_not_yet)
+ # now we let it sit disconnected for long enough for the timer to
+ # fire
+ d.addCallback(self.stall, 3)
+ def _check(res):
+ self.failUnlessEqual(len(fm.messages), 1)
+ msg,recips = fm.messages[0]
+ self.failUnlessEqual(recips, ["admin"])
+ body = msg.as_string()
+ self.failUnlessIn("To: admin", body)
+ self.failUnlessIn("Subject: Buildbot: buildslave bot1 was lost",
+ body)
+ self.failUnlessIn("From: buildbot@example.org", body)
+ self.failUnlessIn("working for 'myproject'", body)
+ self.failUnlessIn("has noticed that the buildslave named bot1 went away",
+ body)
+ self.failUnlessIn("was 'one'", body)
+ self.failUnlessIn("myURL", body)
+ d.addCallback(_check)
+ return d
+
+ def stall(self, result, delay=1):
+ d = defer.Deferred()
+ reactor.callLater(delay, d.callback, result)
+ return d
diff --git a/buildbot/buildbot/test/test_status.py b/buildbot/buildbot/test/test_status.py
new file mode 100644
index 0000000..b3c162a
--- /dev/null
+++ b/buildbot/buildbot/test/test_status.py
@@ -0,0 +1,1631 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+import email, os
+import operator
+
+from zope.interface import implements
+from twisted.internet import defer, reactor
+from twisted.trial import unittest
+
+from buildbot import interfaces
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest, Build
+from buildbot.status import builder, base, words, progress
+from buildbot.changes.changes import Change
+from buildbot.process.builder import Builder
+from time import sleep
+
+mail = None
+try:
+ from buildbot.status import mail
+except ImportError:
+ pass
+from buildbot.status import progress, client # NEEDS COVERAGE
+from buildbot.test.runutils import RunMixin, setupBuildStepStatus
+
+class MyStep:
+ build = None
+ def getName(self):
+ return "step"
+
+class MyLogFileProducer(builder.LogFileProducer):
+ # The reactor.callLater(0) in LogFileProducer.resumeProducing is a bit of
+ # a nuisance from a testing point of view. This subclass adds a Deferred
+ # to that call so we can find out when it is complete.
+ def resumeProducing(self):
+ d = defer.Deferred()
+ reactor.callLater(0, self._resumeProducing, d)
+ return d
+ def _resumeProducing(self, d):
+ builder.LogFileProducer._resumeProducing(self)
+ reactor.callLater(0, d.callback, None)
+
+class MyLog(builder.LogFile):
+ def __init__(self, basedir, name, text=None, step=None):
+ self.fakeBuilderBasedir = basedir
+ if not step:
+ step = MyStep()
+ builder.LogFile.__init__(self, step, name, name)
+ if text:
+ self.addStdout(text)
+ self.finish()
+ def getFilename(self):
+ return os.path.join(self.fakeBuilderBasedir, self.name)
+
+ def subscribeConsumer(self, consumer):
+ p = MyLogFileProducer(self, consumer)
+ d = p.resumeProducing()
+ return d
+
+class MyHTMLLog(builder.HTMLLogFile):
+ def __init__(self, basedir, name, html):
+ step = MyStep()
+ builder.HTMLLogFile.__init__(self, step, name, name, html)
+
+class MyLogSubscriber:
+ def __init__(self):
+ self.chunks = []
+ def logChunk(self, build, step, log, channel, text):
+ self.chunks.append((channel, text))
+
+class MyLogConsumer:
+ def __init__(self, limit=None):
+ self.chunks = []
+ self.finished = False
+ self.limit = limit
+ def registerProducer(self, producer, streaming):
+ self.producer = producer
+ self.streaming = streaming
+ def unregisterProducer(self):
+ self.producer = None
+ def writeChunk(self, chunk):
+ self.chunks.append(chunk)
+ if self.limit:
+ self.limit -= 1
+ if self.limit == 0:
+ self.producer.pauseProducing()
+ def finish(self):
+ self.finished = True
+
+if mail:
+ class MyMailer(mail.MailNotifier):
+ def sendMessage(self, m, recipients):
+ self.parent.messages.append((m, recipients))
+
+class MyStatus:
+ def getBuildbotURL(self):
+ return self.url
+ def getURLForThing(self, thing):
+ return None
+ def getProjectName(self):
+ return "myproj"
+
+class MyBuilder(builder.BuilderStatus):
+ nextBuildNumber = 0
+
+class MyBuild(builder.BuildStatus):
+ testlogs = []
+ def __init__(self, parent, number, results):
+ builder.BuildStatus.__init__(self, parent, number)
+ self.results = results
+ self.source = SourceStamp(revision="1.14")
+ self.reason = "build triggered by changes"
+ self.finished = True
+ def getLogs(self):
+ return self.testlogs
+
+class MyLookup:
+ implements(interfaces.IEmailLookup)
+
+ def getAddress(self, user):
+ d = defer.Deferred()
+ # With me now is Mr Thomas Walters of West Hartlepool who is totally
+ # invisible.
+ if user == "Thomas_Walters":
+ d.callback(None)
+ else:
+ d.callback(user + "@" + "dev.com")
+ return d
+
+def customTextMailMessage(attrs):
+ logLines = 3
+ text = list()
+ text.append("STATUS: %s" % attrs['result'].title())
+ text.append("")
+ text.extend([c.asText() for c in attrs['changes']])
+ text.append("")
+ name, url, lines = attrs['logs'][-1]
+ text.append("Last %d lines of '%s':" % (logLines, name))
+ text.extend(["\t%s\n" % line for line in lines[len(lines)-logLines:]])
+ text.append("")
+ text.append("-buildbot")
+ return ("\n".join(text), 'plain')
+
+def customHTMLMailMessage(attrs):
+ logLines = 3
+ text = list()
+ text.append("<h3>STATUS <a href='%s'>%s</a>:</h3>" % (attrs['buildURL'],
+ attrs['result'].title()))
+ text.append("<h4>Recent Changes:</h4>")
+ text.extend([c.asHTML() for c in attrs['changes']])
+ name, url, lines = attrs['logs'][-1]
+ text.append("<h4>Last %d lines of '%s':</h4>" % (logLines, name))
+ text.append("<p>")
+ text.append("<br>".join([line for line in lines[len(lines)-logLines:]]))
+ text.append("</p>")
+ text.append("<br>")
+ text.append("<b>-<a href='%s'>buildbot</a></b>" % attrs['buildbotURL'])
+ return ("\n".join(text), 'html')
+
+class Mail(unittest.TestCase):
+
+ def setUp(self):
+ self.builder = MyBuilder("builder1")
+
+ def stall(self, res, timeout):
+ d = defer.Deferred()
+ reactor.callLater(timeout, d.callback, res)
+ return d
+
+ def makeBuild(self, number, results):
+ return MyBuild(self.builder, number, results)
+
+ def failUnlessIn(self, substring, string):
+ self.failUnless(string.find(substring) != -1,
+ "didn't see '%s' in '%s'" % (substring, string))
+
+ def getProjectName(self):
+ return "PROJECT"
+
+ def getBuildbotURL(self):
+ return "BUILDBOT_URL"
+
+ def getURLForThing(self, thing):
+ return None
+
+ def testBuild1(self):
+ mailer = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup=mail.Domain("dev.com"))
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(3, builder.SUCCESS)
+ b1.blamelist = ["bob"]
+
+ mailer.buildFinished("builder1", b1, b1.results)
+ self.failUnless(len(self.messages) == 1)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ self.failUnlessIn("To: bob@dev.com\n", t)
+ self.failUnlessIn("CC: recip2@example.com, recip@example.com\n", t)
+ self.failUnlessIn("From: buildbot@example.com\n", t)
+ self.failUnlessIn("Subject: buildbot success in PROJECT on builder1\n", t)
+ self.failUnlessIn("Date: ", t)
+ self.failUnlessIn("Build succeeded!\n", t)
+ self.failUnlessIn("Buildbot URL: BUILDBOT_URL\n", t)
+
+ def testBuild2(self):
+ mailer = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup="dev.com",
+ sendToInterestedUsers=False)
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(3, builder.SUCCESS)
+ b1.blamelist = ["bob"]
+
+ mailer.buildFinished("builder1", b1, b1.results)
+ self.failUnless(len(self.messages) == 1)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ self.failUnlessIn("To: recip2@example.com, "
+ "recip@example.com\n", t)
+ self.failUnlessIn("From: buildbot@example.com\n", t)
+ self.failUnlessIn("Subject: buildbot success in PROJECT on builder1\n", t)
+ self.failUnlessIn("Build succeeded!\n", t)
+ self.failUnlessIn("Buildbot URL: BUILDBOT_URL\n", t)
+
+ def testBuildStatusCategory(self):
+ # a status client only interested in a category should only receive
+ # from that category
+ mailer = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup="dev.com",
+ sendToInterestedUsers=False,
+ categories=["debug"])
+
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(3, builder.SUCCESS)
+ b1.blamelist = ["bob"]
+
+ mailer.buildFinished("builder1", b1, b1.results)
+ self.failIf(self.messages)
+
+ def testBuilderCategory(self):
+ # a builder in a certain category should notify status clients that
+ # did not list categories, or categories including this one
+ mailer1 = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup="dev.com",
+ sendToInterestedUsers=False)
+ mailer2 = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup="dev.com",
+ sendToInterestedUsers=False,
+ categories=["active"])
+ mailer3 = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup="dev.com",
+ sendToInterestedUsers=False,
+ categories=["active", "debug"])
+
+ builderd = MyBuilder("builder2", "debug")
+
+ mailer1.parent = self
+ mailer1.status = self
+ mailer2.parent = self
+ mailer2.status = self
+ mailer3.parent = self
+ mailer3.status = self
+ self.messages = []
+
+ t = mailer1.builderAdded("builder2", builderd)
+ self.assertEqual(len(mailer1.watched), 1)
+ self.assertEqual(t, mailer1)
+ t = mailer2.builderAdded("builder2", builderd)
+ self.assertEqual(len(mailer2.watched), 0)
+ self.assertEqual(t, None)
+ t = mailer3.builderAdded("builder2", builderd)
+ self.assertEqual(len(mailer3.watched), 1)
+ self.assertEqual(t, mailer3)
+
+ b2 = MyBuild(builderd, 3, builder.SUCCESS)
+ b2.blamelist = ["bob"]
+
+ mailer1.buildFinished("builder2", b2, b2.results)
+ self.failUnlessEqual(len(self.messages), 1)
+ self.messages = []
+ mailer2.buildFinished("builder2", b2, b2.results)
+ self.failUnlessEqual(len(self.messages), 0)
+ self.messages = []
+ mailer3.buildFinished("builder2", b2, b2.results)
+ self.failUnlessEqual(len(self.messages), 1)
+
+ def testCustomTextMessage(self):
+ basedir = "test_custom_text_mesg"
+ os.mkdir(basedir)
+ mailer = MyMailer(fromaddr="buildbot@example.com", mode="problem",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup=MyLookup(),
+ customMesg=customTextMailMessage)
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(4, builder.FAILURE)
+ b1.setText(["snarkleack", "polarization", "failed"])
+ b1.blamelist = ["dev3", "dev3", "dev3", "dev4",
+ "Thomas_Walters"]
+ b1.source.changes = (Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 123),
+ Change(who = 'author2', files = ['file2'], comments = 'comment2', revision = 456))
+ b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
+ MyLog(basedir, 'test', "Test log here\nTest 1 failed\nTest 2 failed\nTest 3 failed\nTest 4 failed\n")]
+
+ mailer.buildFinished("builder1", b1, b1.results)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ #
+ # Uncomment to review custom message
+ #
+ #self.fail(t)
+ self.failUnlessIn("comment1", t)
+ self.failUnlessIn("comment2", t)
+ self.failUnlessIn("Test 4 failed", t)
+
+
+ def testCustomHTMLMessage(self):
+ basedir = "test_custom_HTML_mesg"
+ os.mkdir(basedir)
+ mailer = MyMailer(fromaddr="buildbot@example.com", mode="problem",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup=MyLookup(),
+ customMesg=customHTMLMailMessage)
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(4, builder.FAILURE)
+ b1.setText(["snarkleack", "polarization", "failed"])
+ b1.blamelist = ["dev3", "dev3", "dev3", "dev4",
+ "Thomas_Walters"]
+ b1.source.changes = (Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 123),
+ Change(who = 'author2', files = ['file2'], comments = 'comment2', revision = 456))
+ b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
+ MyLog(basedir, 'test', "Test log here\nTest 1 failed\nTest 2 failed\nTest 3 failed\nTest 4 failed\n")]
+
+ mailer.buildFinished("builder1", b1, b1.results)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ #
+ # Uncomment to review custom message
+ #
+ #self.fail(t)
+ self.failUnlessIn("<h4>Last 3 lines of 'step.test':</h4>", t)
+ self.failUnlessIn("<p>Changed by: <b>author2</b><br />", t)
+ self.failUnlessIn("Test 3 failed", t)
+
+ def testShouldAttachLog(self):
+ mailer = mail.MailNotifier(fromaddr="buildbot@example.com", addLogs=True)
+ self.assertTrue(mailer._shouldAttachLog('anything'))
+ mailer = mail.MailNotifier(fromaddr="buildbot@example.com", addLogs=False)
+ self.assertFalse(mailer._shouldAttachLog('anything'))
+ mailer = mail.MailNotifier(fromaddr="buildbot@example.com", addLogs=['something'])
+ self.assertFalse(mailer._shouldAttachLog('anything'))
+ self.assertTrue(mailer._shouldAttachLog('something'))
+
+ def testFailure(self):
+ mailer = MyMailer(fromaddr="buildbot@example.com", mode="problem",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup=MyLookup())
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(3, builder.SUCCESS)
+ b1.blamelist = ["dev1", "dev2"]
+ b2 = self.makeBuild(4, builder.FAILURE)
+ b2.setText(["snarkleack", "polarization", "failed"])
+ b2.blamelist = ["dev3", "dev3", "dev3", "dev4",
+ "Thomas_Walters"]
+ mailer.buildFinished("builder1", b1, b1.results)
+ self.failIf(self.messages)
+ mailer.buildFinished("builder1", b2, b2.results)
+ self.failUnless(len(self.messages) == 1)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ self.failUnlessIn("To: dev3@dev.com, dev4@dev.com\n", t)
+ self.failUnlessIn("CC: recip2@example.com, recip@example.com\n", t)
+ self.failUnlessIn("From: buildbot@example.com\n", t)
+ self.failUnlessIn("Subject: buildbot failure in PROJECT on builder1\n", t)
+ self.failUnlessIn("The Buildbot has detected a new failure", t)
+ self.failUnlessIn("BUILD FAILED: snarkleack polarization failed\n", t)
+ self.failUnlessEqual(set(r), set(["dev3@dev.com", "dev4@dev.com",
+ "recip2@example.com", "recip@example.com"]))
+
+ def testLogs(self):
+ basedir = "test_status_logs"
+ os.mkdir(basedir)
+ mailer = MyMailer(fromaddr="buildbot@example.com", addLogs=True,
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"])
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(3, builder.WARNINGS)
+ b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
+ MyLog(basedir,
+ 'test', "Test log here\nTest 4 failed\n"),
+ ]
+ b1.text = ["unusual", "gnarzzler", "output"]
+ mailer.buildFinished("builder1", b1, b1.results)
+ self.failUnless(len(self.messages) == 1)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ self.failUnlessIn("Subject: buildbot warnings in PROJECT on builder1\n", t)
+ m2 = email.message_from_string(t)
+ p = m2.get_payload()
+ self.failUnlessEqual(len(p), 3)
+
+ self.failUnlessIn("Build Had Warnings: unusual gnarzzler output\n",
+ p[0].get_payload())
+
+ self.failUnlessEqual(p[1].get_filename(), "step.compile")
+ self.failUnlessEqual(p[1].get_payload(), "Compile log here\n")
+
+ self.failUnlessEqual(p[2].get_filename(), "step.test")
+ self.failUnlessIn("Test log here\n", p[2].get_payload())
+
+ def testMail(self):
+ basedir = "test_status_mail"
+ os.mkdir(basedir)
+ dest = os.environ.get("BUILDBOT_TEST_MAIL")
+ if not dest:
+ raise unittest.SkipTest("define BUILDBOT_TEST_MAIL=dest to run this")
+ mailer = mail.MailNotifier(fromaddr="buildbot@example.com",
+ addLogs=True,
+ extraRecipients=[dest])
+ s = MyStatus()
+ s.url = "project URL"
+ mailer.status = s
+
+ b1 = self.makeBuild(3, builder.SUCCESS)
+ b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
+ MyLog(basedir,
+ 'test', "Test log here\nTest 4 failed\n"),
+ ]
+
+ d = mailer.buildFinished("builder1", b1, b1.results)
+ # When this fires, the mail has been sent, but the SMTP connection is
+ # still up (because smtp.sendmail relies upon the server to hang up).
+ # Spin for a moment to avoid the "unclean reactor" warning that Trial
+ # gives us if we finish before the socket is disconnected. Really,
+ # sendmail() ought to hang up the connection once it is finished:
+ # otherwise a malicious SMTP server could make us consume lots of
+ # memory.
+ d.addCallback(self.stall, 0.1)
+ return d
+
+if not mail:
+ Mail.skip = "the Twisted Mail package is not installed"
+
+class Progress(unittest.TestCase):
+ def testWavg(self):
+ bp = progress.BuildProgress([])
+ e = progress.Expectations(bp)
+ # wavg(old, current)
+ self.failUnlessEqual(e.wavg(None, None), None)
+ self.failUnlessEqual(e.wavg(None, 3), 3)
+ self.failUnlessEqual(e.wavg(3, None), 3)
+ self.failUnlessEqual(e.wavg(3, 4), 3.5)
+ e.decay = 0.1
+ self.failUnlessEqual(e.wavg(3, 4), 3.1)
+
+
+class Results(unittest.TestCase):
+
+ def testAddResults(self):
+ b = builder.BuildStatus(builder.BuilderStatus("test"), 12)
+ testname = ("buildbot", "test", "test_status", "Results",
+ "testAddResults")
+ r1 = builder.TestResult(name=testname,
+ results=builder.SUCCESS,
+ text=["passed"],
+ logs={'output': ""},
+ )
+ b.addTestResult(r1)
+
+ res = b.getTestResults()
+ self.failUnlessEqual(res.keys(), [testname])
+ t = res[testname]
+ self.failUnless(interfaces.ITestResult.providedBy(t))
+ self.failUnlessEqual(t.getName(), testname)
+ self.failUnlessEqual(t.getResults(), builder.SUCCESS)
+ self.failUnlessEqual(t.getText(), ["passed"])
+ self.failUnlessEqual(t.getLogs(), {'output': ""})
+
+class Log(unittest.TestCase):
+ def setUpClass(self):
+ self.basedir = "status_log_add"
+ os.mkdir(self.basedir)
+
+ def testAdd(self):
+ l = MyLog(self.basedir, "compile", step=13)
+ self.failUnlessEqual(l.getName(), "compile")
+ self.failUnlessEqual(l.getStep(), 13)
+ l.addHeader("HEADER\n")
+ l.addStdout("Some text\n")
+ l.addStderr("Some error\n")
+ l.addStdout("Some more text\n")
+ self.failIf(l.isFinished())
+ l.finish()
+ self.failUnless(l.isFinished())
+ self.failUnlessEqual(l.getText(),
+ "Some text\nSome error\nSome more text\n")
+ self.failUnlessEqual(l.getTextWithHeaders(),
+ "HEADER\n" +
+ "Some text\nSome error\nSome more text\n")
+ self.failUnlessEqual(len(list(l.getChunks())), 4)
+
+ self.failUnless(l.hasContents())
+ try:
+ os.unlink(l.getFilename())
+ except OSError:
+ os.unlink(l.getFilename() + ".bz2")
+ self.failIf(l.hasContents())
+
+ def TODO_testDuplicate(self):
+ # create multiple logs for the same step with the same logname, make
+ # sure their on-disk filenames are suitably uniquified. This
+ # functionality actually lives in BuildStepStatus and BuildStatus, so
+ # this test must involve more than just the MyLog class.
+
+ # naieve approach, doesn't work
+ l1 = MyLog(self.basedir, "duplicate")
+ l1.addStdout("Some text\n")
+ l1.finish()
+ l2 = MyLog(self.basedir, "duplicate")
+ l2.addStdout("Some more text\n")
+ l2.finish()
+ self.failIfEqual(l1.getFilename(), l2.getFilename())
+
+ def testMerge1(self):
+ l = MyLog(self.basedir, "merge1")
+ l.addHeader("HEADER\n")
+ l.addStdout("Some text\n")
+ l.addStdout("Some more text\n")
+ l.addStdout("more\n")
+ l.finish()
+ self.failUnlessEqual(l.getText(),
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(l.getTextWithHeaders(),
+ "HEADER\n" +
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(len(list(l.getChunks())), 2)
+
+ def testMerge2(self):
+ l = MyLog(self.basedir, "merge2")
+ l.addHeader("HEADER\n")
+ for i in xrange(1000):
+ l.addStdout("aaaa")
+ for i in xrange(30):
+ l.addStderr("bbbb")
+ for i in xrange(10):
+ l.addStdout("cc")
+ target = 1000*"aaaa" + 30 * "bbbb" + 10 * "cc"
+ self.failUnlessEqual(len(l.getText()), len(target))
+ self.failUnlessEqual(l.getText(), target)
+ l.finish()
+ self.failUnlessEqual(len(l.getText()), len(target))
+ self.failUnlessEqual(l.getText(), target)
+ self.failUnlessEqual(len(list(l.getChunks())), 4)
+
+ def testMerge3(self):
+ l = MyLog(self.basedir, "merge3")
+ l.chunkSize = 100
+ l.addHeader("HEADER\n")
+ for i in xrange(8):
+ l.addStdout(10*"a")
+ for i in xrange(8):
+ l.addStdout(10*"a")
+ self.failUnlessEqual(list(l.getChunks()),
+ [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, 100*"a"),
+ (builder.STDOUT, 60*"a")])
+ l.finish()
+ self.failUnlessEqual(l.getText(), 160*"a")
+
+ def testReadlines(self):
+ l = MyLog(self.basedir, "chunks1")
+ l.addHeader("HEADER\n") # should be ignored
+ l.addStdout("Some text\n")
+ l.addStdout("Some More Text\nAnd Some More\n")
+ l.addStderr("Some Stderr\n")
+ l.addStdout("Last line\n")
+ l.finish()
+ alllines = list(l.readlines())
+ self.failUnlessEqual(len(alllines), 4)
+ self.failUnlessEqual(alllines[0], "Some text\n")
+ self.failUnlessEqual(alllines[2], "And Some More\n")
+ self.failUnlessEqual(alllines[3], "Last line\n")
+ stderr = list(l.readlines(interfaces.LOG_CHANNEL_STDERR))
+ self.failUnlessEqual(len(stderr), 1)
+ self.failUnlessEqual(stderr[0], "Some Stderr\n")
+ lines = l.readlines()
+ if False: # TODO: l.readlines() is not yet an iterator
+ # verify that it really is an iterator
+ line0 = lines.next()
+ self.failUnlessEqual(line0, "Some text\n")
+ line1 = lines.next()
+ line2 = lines.next()
+ self.failUnlessEqual(line2, "And Some More\n")
+
+
+ def testChunks(self):
+ l = MyLog(self.basedir, "chunks2")
+ c1 = l.getChunks()
+ l.addHeader("HEADER\n")
+ l.addStdout("Some text\n")
+ self.failUnlessEqual("".join(l.getChunks(onlyText=True)),
+ "HEADER\nSome text\n")
+ c2 = l.getChunks()
+
+ l.addStdout("Some more text\n")
+ self.failUnlessEqual("".join(l.getChunks(onlyText=True)),
+ "HEADER\nSome text\nSome more text\n")
+ c3 = l.getChunks()
+
+ l.addStdout("more\n")
+ l.finish()
+
+ self.failUnlessEqual(list(c1), [])
+ self.failUnlessEqual(list(c2), [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, "Some text\n")])
+ self.failUnlessEqual(list(c3), [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT,
+ "Some text\nSome more text\n")])
+
+ self.failUnlessEqual(l.getText(),
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(l.getTextWithHeaders(),
+ "HEADER\n" +
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(len(list(l.getChunks())), 2)
+
+ def testUpgrade(self):
+ l = MyLog(self.basedir, "upgrade")
+ l.addHeader("HEADER\n")
+ l.addStdout("Some text\n")
+ l.addStdout("Some more text\n")
+ l.addStdout("more\n")
+ l.finish()
+ self.failUnless(l.hasContents())
+ # now doctor it to look like a 0.6.4-era non-upgraded logfile
+ l.entries = list(l.getChunks())
+ del l.filename
+ try:
+ os.unlink(l.getFilename() + ".bz2")
+ except OSError:
+ os.unlink(l.getFilename())
+ # now make sure we can upgrade it
+ l.upgrade("upgrade")
+ self.failUnlessEqual(l.getText(),
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(len(list(l.getChunks())), 2)
+ self.failIf(l.entries)
+
+ # now, do it again, but make it look like an upgraded 0.6.4 logfile
+ # (i.e. l.filename is missing, but the contents are there on disk)
+ l.entries = list(l.getChunks())
+ del l.filename
+ l.upgrade("upgrade")
+ self.failUnlessEqual(l.getText(),
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(len(list(l.getChunks())), 2)
+ self.failIf(l.entries)
+ self.failUnless(l.hasContents())
+
+ def testHTMLUpgrade(self):
+ l = MyHTMLLog(self.basedir, "upgrade", "log contents")
+ l.upgrade("filename")
+
+ def testSubscribe(self):
+ l1 = MyLog(self.basedir, "subscribe1")
+ l1.finish()
+ self.failUnless(l1.isFinished())
+
+ s = MyLogSubscriber()
+ l1.subscribe(s, True)
+ l1.unsubscribe(s)
+ self.failIf(s.chunks)
+
+ s = MyLogSubscriber()
+ l1.subscribe(s, False)
+ l1.unsubscribe(s)
+ self.failIf(s.chunks)
+
+ finished = []
+ l2 = MyLog(self.basedir, "subscribe2")
+ l2.waitUntilFinished().addCallback(finished.append)
+ l2.addHeader("HEADER\n")
+ s1 = MyLogSubscriber()
+ l2.subscribe(s1, True)
+ s2 = MyLogSubscriber()
+ l2.subscribe(s2, False)
+ self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n")])
+ self.failUnlessEqual(s2.chunks, [])
+
+ l2.addStdout("Some text\n")
+ self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, "Some text\n")])
+ self.failUnlessEqual(s2.chunks, [(builder.STDOUT, "Some text\n")])
+ l2.unsubscribe(s1)
+
+ l2.addStdout("Some more text\n")
+ self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, "Some text\n")])
+ self.failUnlessEqual(s2.chunks, [(builder.STDOUT, "Some text\n"),
+ (builder.STDOUT, "Some more text\n"),
+ ])
+ self.failIf(finished)
+ l2.finish()
+ self.failUnlessEqual(finished, [l2])
+
+ def testConsumer(self):
+ l1 = MyLog(self.basedir, "consumer1")
+ l1.finish()
+ self.failUnless(l1.isFinished())
+
+ s = MyLogConsumer()
+ d = l1.subscribeConsumer(s)
+ d.addCallback(self._testConsumer_1, s)
+ return d
+ testConsumer.timeout = 5
+ def _testConsumer_1(self, res, s):
+ self.failIf(s.chunks)
+ self.failUnless(s.finished)
+ self.failIf(s.producer) # producer should be registered and removed
+
+ l2 = MyLog(self.basedir, "consumer2")
+ l2.addHeader("HEADER\n")
+ l2.finish()
+ self.failUnless(l2.isFinished())
+
+ s = MyLogConsumer()
+ d = l2.subscribeConsumer(s)
+ d.addCallback(self._testConsumer_2, s)
+ return d
+ def _testConsumer_2(self, res, s):
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n")])
+ self.failUnless(s.finished)
+ self.failIf(s.producer) # producer should be registered and removed
+
+
+ l2 = MyLog(self.basedir, "consumer3")
+ l2.chunkSize = 1000
+ l2.addHeader("HEADER\n")
+ l2.addStdout(800*"a")
+ l2.addStdout(800*"a") # should now have two chunks on disk, 1000+600
+ l2.addStdout(800*"b") # HEADER,1000+600*a on disk, 800*a in memory
+ l2.addStdout(800*"b") # HEADER,1000+600*a,1000+600*b on disk
+ l2.addStdout(200*"c") # HEADER,1000+600*a,1000+600*b on disk,
+ # 200*c in memory
+
+ s = MyLogConsumer(limit=1)
+ d = l2.subscribeConsumer(s)
+ d.addCallback(self._testConsumer_3, l2, s)
+ return d
+ def _testConsumer_3(self, res, l2, s):
+ self.failUnless(s.streaming)
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n")])
+ s.limit = 1
+ d = s.producer.resumeProducing()
+ d.addCallback(self._testConsumer_4, l2, s)
+ return d
+ def _testConsumer_4(self, res, l2, s):
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, 1000*"a"),
+ ])
+ s.limit = None
+ d = s.producer.resumeProducing()
+ d.addCallback(self._testConsumer_5, l2, s)
+ return d
+ def _testConsumer_5(self, res, l2, s):
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, 1000*"a"),
+ (builder.STDOUT, 600*"a"),
+ (builder.STDOUT, 1000*"b"),
+ (builder.STDOUT, 600*"b"),
+ (builder.STDOUT, 200*"c")])
+ l2.addStdout(1000*"c") # HEADER,1600*a,1600*b,1200*c on disk
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, 1000*"a"),
+ (builder.STDOUT, 600*"a"),
+ (builder.STDOUT, 1000*"b"),
+ (builder.STDOUT, 600*"b"),
+ (builder.STDOUT, 200*"c"),
+ (builder.STDOUT, 1000*"c")])
+ l2.finish()
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, 1000*"a"),
+ (builder.STDOUT, 600*"a"),
+ (builder.STDOUT, 1000*"b"),
+ (builder.STDOUT, 600*"b"),
+ (builder.STDOUT, 200*"c"),
+ (builder.STDOUT, 1000*"c")])
+ self.failIf(s.producer)
+ self.failUnless(s.finished)
+
+ def testLargeSummary(self):
+ bigtext = "a" * 200000 # exceed the NetstringReceiver 100KB limit
+ l = MyLog(self.basedir, "large", bigtext)
+ s = MyLogConsumer()
+ d = l.subscribeConsumer(s)
+ def _check(res):
+ for ctype,chunk in s.chunks:
+ self.failUnless(len(chunk) < 100000)
+ merged = "".join([c[1] for c in s.chunks])
+ self.failUnless(merged == bigtext)
+ d.addCallback(_check)
+ # when this fails, it fails with a timeout, and there is an exception
+ # sent to log.err(). This AttributeError exception is in
+ # NetstringReceiver.dataReceived where it does
+ # self.transport.loseConnection() because of the NetstringParseError,
+ # however self.transport is None
+ return d
+ testLargeSummary.timeout = 5
+
+
+class CompressLog(unittest.TestCase):
+ def testCompressLogs(self):
+ bss = setupBuildStepStatus("test-compress")
+ bss.build.builder.setLogCompressionLimit(1024)
+ l = bss.addLog('not-compress')
+ l.addStdout('a' * 512)
+ l.finish()
+ lc = bss.addLog('to-compress')
+ lc.addStdout('b' * 1024)
+ lc.finish()
+ d = bss.stepFinished(builder.SUCCESS)
+ self.failUnless(d is not None)
+ d.addCallback(self._verifyCompression, bss)
+ return d
+
+ def _verifyCompression(self, result, bss):
+ self.failUnless(len(bss.getLogs()), 2)
+ (ncl, cl) = bss.getLogs() # not compressed, compressed log
+ self.failUnless(os.path.isfile(ncl.getFilename()))
+ self.failIf(os.path.isfile(ncl.getFilename() + ".bz2"))
+ self.failIf(os.path.isfile(cl.getFilename()))
+ self.failUnless(os.path.isfile(cl.getFilename() + ".bz2"))
+ content = ncl.getText()
+ self.failUnless(len(content), 512)
+ content = cl.getText()
+ self.failUnless(len(content), 1024)
+ pass
+
+config_base = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
+
+f2 = factory.BuildFactory([
+ s(dummy.Dummy, timeout=1),
+ s(dummy.RemoteDummy, timeout=2),
+ ])
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'quick', 'slavename':'bot1',
+ 'builddir': 'quickdir', 'factory': f1})
+c['slavePortnum'] = 0
+"""
+
+config_2 = config_base + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy1', 'factory': f2},
+ {'name': 'testdummy', 'slavename': 'bot1',
+ 'builddir': 'dummy2', 'factory': f2, 'category': 'test'}]
+"""
+
+class STarget(base.StatusReceiver):
+ debug = False
+
+ def __init__(self, mode):
+ self.mode = mode
+ self.events = []
+ def announce(self):
+ if self.debug:
+ print self.events[-1]
+
+ def builderAdded(self, name, builder):
+ self.events.append(("builderAdded", name, builder))
+ self.announce()
+ if "builder" in self.mode:
+ return self
+ def builderChangedState(self, name, state):
+ self.events.append(("builderChangedState", name, state))
+ self.announce()
+ def buildStarted(self, name, build):
+ self.events.append(("buildStarted", name, build))
+ self.announce()
+ if "eta" in self.mode:
+ self.eta_build = build.getETA()
+ if "build" in self.mode:
+ return self
+ def buildETAUpdate(self, build, ETA):
+ self.events.append(("buildETAUpdate", build, ETA))
+ self.announce()
+ def stepStarted(self, build, step):
+ self.events.append(("stepStarted", build, step))
+ self.announce()
+ if 0 and "eta" in self.mode:
+ print "TIMES", step.getTimes()
+ print "ETA", step.getETA()
+ print "EXP", step.getExpectations()
+ if "step" in self.mode:
+ return self
+ def stepTextChanged(self, build, step, text):
+ self.events.append(("stepTextChanged", step, text))
+ def stepText2Changed(self, build, step, text2):
+ self.events.append(("stepText2Changed", step, text2))
+ def stepETAUpdate(self, build, step, ETA, expectations):
+ self.events.append(("stepETAUpdate", build, step, ETA, expectations))
+ self.announce()
+ def logStarted(self, build, step, log):
+ self.events.append(("logStarted", build, step, log))
+ self.announce()
+ def logFinished(self, build, step, log):
+ self.events.append(("logFinished", build, step, log))
+ self.announce()
+ def stepFinished(self, build, step, results):
+ self.events.append(("stepFinished", build, step, results))
+ if 0 and "eta" in self.mode:
+ print "post-EXP", step.getExpectations()
+ self.announce()
+ def buildFinished(self, name, build, results):
+ self.events.append(("buildFinished", name, build, results))
+ self.announce()
+ def builderRemoved(self, name):
+ self.events.append(("builderRemoved", name))
+ self.announce()
+
+class Subscription(RunMixin, unittest.TestCase):
+ # verify that StatusTargets can subscribe/unsubscribe properly
+
+ def testSlave(self):
+ m = self.master
+ s = m.getStatus()
+ self.t1 = t1 = STarget(["builder"])
+ #t1.debug = True; print
+ s.subscribe(t1)
+ self.failUnlessEqual(len(t1.events), 0)
+
+ self.t3 = t3 = STarget(["builder", "build", "step"])
+ s.subscribe(t3)
+
+ m.loadConfig(config_2)
+ m.readConfig = True
+ m.startService()
+
+ self.failUnlessEqual(len(t1.events), 4)
+ self.failUnlessEqual(t1.events[0][0:2], ("builderAdded", "dummy"))
+ self.failUnlessEqual(t1.events[1],
+ ("builderChangedState", "dummy", "offline"))
+ self.failUnlessEqual(t1.events[2][0:2], ("builderAdded", "testdummy"))
+ self.failUnlessEqual(t1.events[3],
+ ("builderChangedState", "testdummy", "offline"))
+ t1.events = []
+
+ self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
+ self.failUnlessEqual(s.getBuilderNames(categories=['test']),
+ ["testdummy"])
+ self.s1 = s1 = s.getBuilder("dummy")
+ self.failUnlessEqual(s1.getName(), "dummy")
+ self.failUnlessEqual(s1.getState(), ("offline", []))
+ self.failUnlessEqual(s1.getCurrentBuilds(), [])
+ self.failUnlessEqual(s1.getLastFinishedBuild(), None)
+ self.failUnlessEqual(s1.getBuild(-1), None)
+ #self.failUnlessEqual(s1.getEvent(-1), foo("created"))
+
+ # status targets should, upon being subscribed, immediately get a
+ # list of all current builders matching their category
+ self.t2 = t2 = STarget([])
+ s.subscribe(t2)
+ self.failUnlessEqual(len(t2.events), 2)
+ self.failUnlessEqual(t2.events[0][0:2], ("builderAdded", "dummy"))
+ self.failUnlessEqual(t2.events[1][0:2], ("builderAdded", "testdummy"))
+
+ d = self.connectSlave(builders=["dummy", "testdummy"])
+ d.addCallback(self._testSlave_1, t1)
+ return d
+
+ def _testSlave_1(self, res, t1):
+ self.failUnlessEqual(len(t1.events), 2)
+ self.failUnlessEqual(t1.events[0],
+ ("builderChangedState", "dummy", "idle"))
+ self.failUnlessEqual(t1.events[1],
+ ("builderChangedState", "testdummy", "idle"))
+ t1.events = []
+
+ c = interfaces.IControl(self.master)
+ req = BuildRequest("forced build for testing", SourceStamp(), 'test_builder')
+ c.getBuilder("dummy").requestBuild(req)
+ d = req.waitUntilFinished()
+ d2 = self.master.botmaster.waitUntilBuilderIdle("dummy")
+ dl = defer.DeferredList([d, d2])
+ dl.addCallback(self._testSlave_2)
+ return dl
+
+ def _testSlave_2(self, res):
+ # t1 subscribes to builds, but not anything lower-level
+ ev = self.t1.events
+ self.failUnlessEqual(len(ev), 4)
+ self.failUnlessEqual(ev[0][0:3],
+ ("builderChangedState", "dummy", "building"))
+ self.failUnlessEqual(ev[1][0], "buildStarted")
+ self.failUnlessEqual(ev[2][0:2]+ev[2][3:4],
+ ("buildFinished", "dummy", builder.SUCCESS))
+ self.failUnlessEqual(ev[3][0:3],
+ ("builderChangedState", "dummy", "idle"))
+
+ self.failUnlessEqual([ev[0] for ev in self.t3.events],
+ ["builderAdded",
+ "builderChangedState", # offline
+ "builderAdded",
+ "builderChangedState", # idle
+ "builderChangedState", # offline
+ "builderChangedState", # idle
+ "builderChangedState", # building
+ "buildStarted",
+ "stepStarted", "stepETAUpdate",
+ "stepTextChanged", "stepFinished",
+ "stepStarted", "stepETAUpdate",
+ "stepTextChanged", "logStarted", "logFinished",
+ "stepTextChanged", "stepText2Changed",
+ "stepFinished",
+ "buildFinished",
+ "builderChangedState", # idle
+ ])
+
+ b = self.s1.getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getBuilder().getName(), "dummy")
+ self.failUnlessEqual(b.getNumber(), 0)
+ self.failUnlessEqual(b.getSourceStamp().branch, None)
+ self.failUnlessEqual(b.getSourceStamp().patch, None)
+ self.failUnlessEqual(b.getSourceStamp().revision, None)
+ self.failUnlessEqual(b.getReason(), "forced build for testing")
+ self.failUnlessEqual(b.getChanges(), ())
+ self.failUnlessEqual(b.getResponsibleUsers(), [])
+ self.failUnless(b.isFinished())
+ self.failUnlessEqual(b.getText(), ['build', 'successful'])
+ self.failUnlessEqual(b.getResults(), builder.SUCCESS)
+
+ steps = b.getSteps()
+ self.failUnlessEqual(len(steps), 2)
+
+ eta = 0
+ st1 = steps[0]
+ self.failUnlessEqual(st1.getName(), "dummy")
+ self.failUnless(st1.isFinished())
+ self.failUnlessEqual(st1.getText(), ["delay", "1 secs"])
+ start,finish = st1.getTimes()
+ self.failUnless(0.5 < (finish-start) < 10)
+ self.failUnlessEqual(st1.getExpectations(), [])
+ self.failUnlessEqual(st1.getLogs(), [])
+ eta += finish-start
+
+ st2 = steps[1]
+ self.failUnlessEqual(st2.getName(), "remote dummy")
+ self.failUnless(st2.isFinished())
+ self.failUnlessEqual(st2.getText(),
+ ["remote", "delay", "2 secs"])
+ start,finish = st2.getTimes()
+ self.failUnless(1.5 < (finish-start) < 10)
+ eta += finish-start
+ self.failUnlessEqual(st2.getExpectations(), [('output', 38, None)])
+ logs = st2.getLogs()
+ self.failUnlessEqual(len(logs), 1)
+ self.failUnlessEqual(logs[0].getName(), "stdio")
+ self.failUnlessEqual(logs[0].getText(), "data")
+
+ self.eta = eta
+ # now we run it a second time, and we should have an ETA
+
+ self.t4 = t4 = STarget(["builder", "build", "eta"])
+ self.master.getStatus().subscribe(t4)
+ c = interfaces.IControl(self.master)
+ req = BuildRequest("forced build for testing", SourceStamp(), 'test_builder')
+ c.getBuilder("dummy").requestBuild(req)
+ d = req.waitUntilFinished()
+ d2 = self.master.botmaster.waitUntilBuilderIdle("dummy")
+ dl = defer.DeferredList([d, d2])
+ dl.addCallback(self._testSlave_3)
+ return dl
+
+ def _testSlave_3(self, res):
+ t4 = self.t4
+ eta = self.eta
+ self.failUnless(eta-1 < t4.eta_build < eta+1, # should be 3 seconds
+ "t4.eta_build was %g, not in (%g,%g)"
+ % (t4.eta_build, eta-1, eta+1))
+
+
+class Client(unittest.TestCase):
+ def testAdaptation(self):
+ b = builder.BuilderStatus("bname")
+ b2 = client.makeRemote(b)
+ self.failUnless(isinstance(b2, client.RemoteBuilder))
+ b3 = client.makeRemote(None)
+ self.failUnless(b3 is None)
+
+
+class ContactTester(unittest.TestCase):
+ def test_notify_invalid_syntax(self):
+ irc = MyContact()
+ self.assertRaises(words.UsageError, lambda args, who: irc.command_NOTIFY(args, who), "", "mynick")
+
+ def test_notify_list(self):
+ irc = MyContact()
+ irc.command_NOTIFY("list", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: []", "empty notify list")
+
+ irc.message = ""
+ irc.command_NOTIFY("on started", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: ['started']", "on started")
+
+ irc.message = ""
+ irc.command_NOTIFY("on finished", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: ['started', 'finished']", "on finished")
+
+ irc.message = ""
+ irc.command_NOTIFY("off", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: []", "off all")
+
+ irc.message = ""
+ irc.command_NOTIFY("on", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: ['started', 'finished']", "on default set")
+
+ irc.message = ""
+ irc.command_NOTIFY("off started", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: ['finished']", "off started")
+
+ irc.message = ""
+ irc.command_NOTIFY("on success failure exception", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: ['failure', 'finished', 'exception', 'success']", "on multiple events")
+
+ def test_notification_default(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder78")
+ my_build = MyIrcBuild(my_builder, 23, builder.SUCCESS)
+
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "", "No notification with default settings")
+
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No notification with default settings")
+
+ def test_notification_started(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder78")
+ my_build = MyIrcBuild(my_builder, 23, builder.SUCCESS)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 123),
+ Change(who = 'author2', files = ['file2'], comments = 'comment2', revision = 456),
+ )
+
+ irc.command_NOTIFY("on started", "mynick")
+
+ irc.message = ""
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "build #23 of builder78 started including [123, 456]", "Start notification generated with notify_events=['started']")
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finished notification with notify_events=['started']")
+
+ def test_notification_finished(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.SUCCESS)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ irc.command_NOTIFY("on finished", "mynick")
+
+ irc.message = ""
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['finished']")
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated with notify_events=['finished']")
+
+ def test_notification_success(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.SUCCESS)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ irc.command_NOTIFY("on success", "mynick")
+
+ irc.message = ""
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['success']")
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on success with notify_events=['success']")
+
+ irc.message = ""
+ my_build.results = builder.FAILURE
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on failure with notify_events=['success']")
+
+ irc.message = ""
+ my_build.results = builder.EXCEPTION
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on exception with notify_events=['success']")
+
+ def test_notification_failed(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.FAILURE)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ irc.command_NOTIFY("on failure", "mynick")
+
+ irc.message = ""
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['failed']")
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on failure with notify_events=['failed']")
+
+ irc.message = ""
+ my_build.results = builder.SUCCESS
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on success with notify_events=['failed']")
+
+ irc.message = ""
+ my_build.results = builder.EXCEPTION
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on exception with notify_events=['failed']")
+
+ def test_notification_exception(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.EXCEPTION)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ irc.command_NOTIFY("on exception", "mynick")
+
+ irc.message = ""
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['exception']")
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on failure with notify_events=['exception']")
+
+ irc.message = ""
+ my_build.results = builder.SUCCESS
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on success with notify_events=['exception']")
+
+ irc.message = ""
+ my_build.results = builder.FAILURE
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on exception with notify_events=['exception']")
+
+ def do_x_to_y_notification_test(self, notify, previous_result, new_result, expected_msg):
+ irc = MyContact()
+ irc.command_NOTIFY("on %s" % notify, "mynick")
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.FAILURE)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ previous_build = MyIrcBuild(my_builder, 861, previous_result)
+ my_build.setPreviousBuild(previous_build)
+
+ irc.message = ""
+ my_build.results = new_result
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, expected_msg, "Finish notification generated on failure with notify_events=['successToFailure']")
+
+ def test_notification_successToFailure(self):
+ self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.FAILURE,
+ expected_msg="build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_successToWarnings(self):
+ self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.WARNINGS,
+ expected_msg="build #862 of builder834 is complete: Warnings [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_successToException(self):
+ self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.EXCEPTION,
+ expected_msg="build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+
+
+
+
+ def test_notification_failureToSuccess(self):
+ self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.SUCCESS,
+ expected_msg="build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_failureToWarnings(self):
+ self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.WARNINGS,
+ expected_msg="build #862 of builder834 is complete: Warnings [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_failureToException(self):
+ self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.EXCEPTION,
+ expected_msg="build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+
+
+
+
+ def test_notification_warningsToFailure(self):
+ self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.FAILURE,
+ expected_msg="build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_warningsToSuccess(self):
+ self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.SUCCESS,
+ expected_msg="build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_warningsToException(self):
+ self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.EXCEPTION,
+ expected_msg="build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+
+
+
+ def test_notification_exceptionToFailure(self):
+ self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.FAILURE,
+ expected_msg="build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_exceptionToWarnings(self):
+ self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.WARNINGS,
+ expected_msg="build #862 of builder834 is complete: Warnings [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_exceptionToSuccess(self):
+ self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.SUCCESS,
+ expected_msg="build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ def test_notification_set_in_config(self):
+ irc = MyContact(channel = MyChannel(notify_events = {'success': 1}))
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.SUCCESS)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on success with notify_events=['success']")
+
+class MyIrcBuild(builder.BuildStatus):
+ results = None
+
+ def __init__(self, parent, number, results):
+ builder.BuildStatus.__init__(self, parent, number)
+ self.results = results
+ self.previousBuild = None
+
+ def getResults(self):
+ return self.results
+
+ def getText(self):
+ return ('step1', 'step2')
+
+ def setPreviousBuild(self, pb):
+ self.previousBuild = pb
+
+ def getPreviousBuild(self):
+ return self.previousBuild
+
+class URLProducer:
+ def getURLForThing(self, build):
+ return 'http://myserver/mypath?build=765'
+
+class MyChannel:
+ categories = None
+ status = URLProducer()
+ notify_events = {}
+
+ def __init__(self, notify_events = {}):
+ self.notify_events = notify_events
+
+class MyContact(words.Contact):
+ message = ""
+
+ def __init__(self, channel = MyChannel()):
+ words.Contact.__init__(self, channel)
+ self.message = ""
+
+ def subscribe_to_build_events(self):
+ pass
+
+ def unsubscribe_from_build_events(self):
+ pass
+
+ def send(self, msg):
+ self.message += msg
+
+class StepStatistics(unittest.TestCase):
+ def testStepStatistics(self):
+ status = builder.BuildStatus(builder.BuilderStatus("test"), 123)
+ status.addStepWithName('step1')
+ status.addStepWithName('step2')
+ status.addStepWithName('step3')
+ status.addStepWithName('step4')
+
+ steps = status.getSteps()
+ (step1, step2, step3, step4) = steps
+
+ step1.setStatistic('test-prop', 1)
+ step3.setStatistic('test-prop', 2)
+ step4.setStatistic('test-prop', 4)
+
+ step1.setStatistic('other-prop', 27)
+ # Just to have some other properties around
+
+ self.failUnlessEqual(step1.getStatistic('test-prop'), 1,
+ 'Retrieve an existing property')
+ self.failUnlessEqual(step1.getStatistic('test-prop', 99), 1,
+ "Don't default an existing property")
+ self.failUnlessEqual(step2.getStatistic('test-prop', 99), 99,
+ 'Default a non-existant property')
+
+ self.failUnlessEqual(
+ status.getSummaryStatistic('test-prop', operator.add), 7,
+ 'Sum property across the build')
+
+ self.failUnlessEqual(
+ status.getSummaryStatistic('test-prop', operator.add, 13), 20,
+ 'Sum property across the build with initial value')
+
+class BuildExpectation(unittest.TestCase):
+ class MyBuilderStatus:
+ implements(interfaces.IBuilderStatus)
+
+ def setSlavenames(self, slaveName):
+ pass
+
+ class MyBuilder(Builder):
+ def __init__(self, name):
+ Builder.__init__(self, {
+ 'name': name,
+ 'builddir': '/tmp/somewhere',
+ 'factory': 'aFactory'
+ }, BuildExpectation.MyBuilderStatus())
+
+ class MyBuild(Build):
+ def __init__(self, b):
+ self.builder = b
+ self.remote = None
+
+ step1_progress = progress.StepProgress('step1', ['elapsed'])
+ self.progress = progress.BuildProgress([step1_progress])
+ step1_progress.setBuildProgress(self.progress)
+
+ step1_progress.start()
+ sleep(1);
+ step1_progress.finish()
+
+ self.deferred = defer.Deferred()
+ self.locks = []
+ self.build_status = builder.BuildStatus(b.builder_status, 1)
+
+
+ def testBuildExpectation_BuildSuccess(self):
+ b = BuildExpectation.MyBuilder("builder1")
+ build = BuildExpectation.MyBuild(b)
+
+ build.buildFinished(['sometext'], builder.SUCCESS)
+ self.failIfEqual(b.expectations.expectedBuildTime(), 0, 'Non-Zero expectation for a failed build')
+
+ def testBuildExpectation_BuildFailure(self):
+ b = BuildExpectation.MyBuilder("builder1")
+ build = BuildExpectation.MyBuild(b)
+
+ build.buildFinished(['sometext'], builder.FAILURE)
+ self.failUnlessEqual(b.expectations, None, 'Zero expectation for a failed build')
diff --git a/buildbot/buildbot/test/test_steps.py b/buildbot/buildbot/test/test_steps.py
new file mode 100644
index 0000000..880658c
--- /dev/null
+++ b/buildbot/buildbot/test/test_steps.py
@@ -0,0 +1,788 @@
+# -*- test-case-name: buildbot.test.test_steps -*-
+
+# create the BuildStep with a fake .remote instance that logs the
+# .callRemote invocations and compares them against the expected calls. Then
+# the test harness should send statusUpdate() messages in with assorted
+# data, eventually calling remote_complete(). Then we can verify that the
+# Step's rc was correct, and that the status it was supposed to return
+# matches.
+
+# sometimes, .callRemote should raise an exception because of a stale
+# reference. Sometimes it should errBack with an UnknownCommand failure.
+# Or other failure.
+
+# todo: test batched updates, by invoking remote_update(updates) instead of
+# statusUpdate(update). Also involves interrupted builds.
+
+import os
+
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process import buildstep, base, factory
+from buildbot.buildslave import BuildSlave
+from buildbot.steps import shell, source, python, master
+from buildbot.status import builder
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE
+from buildbot.test.runutils import RunMixin, rmtree
+from buildbot.test.runutils import makeBuildStep, StepTester
+from buildbot.slave import commands, registry
+
+
+class MyShellCommand(shell.ShellCommand):
+ started = False
+ def runCommand(self, c):
+ self.started = True
+ self.rc = c
+ return shell.ShellCommand.runCommand(self, c)
+
+class FakeBuild:
+ pass
+class FakeBuilder:
+ statusbag = None
+ name = "fakebuilder"
+class FakeSlaveBuilder:
+ def getSlaveCommandVersion(self, command, oldversion=None):
+ return "1.10"
+
+class FakeRemote:
+ def __init__(self):
+ self.events = []
+ self.remoteCalls = 0
+ #self.callRemoteNotifier = None
+ def callRemote(self, methname, *args):
+ event = ["callRemote", methname, args]
+ self.events.append(event)
+## if self.callRemoteNotifier:
+## reactor.callLater(0, self.callRemoteNotifier, event)
+ self.remoteCalls += 1
+ self.deferred = defer.Deferred()
+ return self.deferred
+ def notifyOnDisconnect(self, callback):
+ pass
+ def dontNotifyOnDisconnect(self, callback):
+ pass
+
+
+class BuildStep(unittest.TestCase):
+
+ def setUp(self):
+ rmtree("test_steps")
+ self.builder = FakeBuilder()
+ self.builder_status = builder.BuilderStatus("fakebuilder")
+ self.builder_status.basedir = "test_steps"
+ self.builder_status.nextBuildNumber = 0
+ os.mkdir(self.builder_status.basedir)
+ self.build_status = self.builder_status.newBuild()
+ req = base.BuildRequest("reason", SourceStamp(), 'test_builder')
+ self.build = base.Build([req])
+ self.build.build_status = self.build_status # fake it
+ self.build.builder = self.builder
+ self.build.slavebuilder = FakeSlaveBuilder()
+ self.remote = FakeRemote()
+ self.finished = 0
+
+ def callback(self, results):
+ self.failed = 0
+ self.failure = None
+ self.results = results
+ self.finished = 1
+ def errback(self, failure):
+ self.failed = 1
+ self.failure = failure
+ self.results = None
+ self.finished = 1
+
+ def testShellCommand1(self):
+ cmd = "argle bargle"
+ dir = "murkle"
+ self.expectedEvents = []
+ buildstep.RemoteCommand.commandCounter[0] = 3
+ c = MyShellCommand(workdir=dir, command=cmd, timeout=10)
+ c.setBuild(self.build)
+ c.setBuildSlave(BuildSlave("name", "password"))
+ self.assertEqual(self.remote.events, self.expectedEvents)
+ c.step_status = self.build_status.addStepWithName("myshellcommand")
+ d = c.startStep(self.remote)
+ self.failUnless(c.started)
+ d.addCallbacks(self.callback, self.errback)
+ d2 = self.poll()
+ d2.addCallback(self._testShellCommand1_2, c)
+ return d2
+ testShellCommand1.timeout = 10
+
+ def poll(self, ignored=None):
+ # TODO: This is gross, but at least it's no longer using
+ # reactor.iterate() . Still, get rid of this some day soon.
+ if self.remote.remoteCalls == 0:
+ d = defer.Deferred()
+ d.addCallback(self.poll)
+ reactor.callLater(0.1, d.callback, None)
+ return d
+ return defer.succeed(None)
+
+ def _testShellCommand1_2(self, res, c):
+ rc = c.rc
+ self.expectedEvents.append(["callRemote", "startCommand",
+ (rc, "3",
+ "shell",
+ {'command': "argle bargle",
+ 'workdir': "murkle",
+ 'want_stdout': 1,
+ 'want_stderr': 1,
+ 'logfiles': {},
+ 'timeout': 10,
+ 'usePTY': 'slave-config',
+ 'env': None}) ] )
+ self.assertEqual(self.remote.events, self.expectedEvents)
+
+ # we could do self.remote.deferred.errback(UnknownCommand) here. We
+ # could also do .callback(), but generally the master end silently
+ # ignores the slave's ack
+
+ logs = c.step_status.getLogs()
+ for log in logs:
+ if log.getName() == "log":
+ break
+
+ rc.remoteUpdate({'header':
+ "command 'argle bargle' in dir 'murkle'\n\n"})
+ rc.remoteUpdate({'stdout': "foo\n"})
+ self.assertEqual(log.getText(), "foo\n")
+ self.assertEqual(log.getTextWithHeaders(),
+ "command 'argle bargle' in dir 'murkle'\n\n"
+ "foo\n")
+ rc.remoteUpdate({'stderr': "bar\n"})
+ self.assertEqual(log.getText(), "foo\nbar\n")
+ self.assertEqual(log.getTextWithHeaders(),
+ "command 'argle bargle' in dir 'murkle'\n\n"
+ "foo\nbar\n")
+ rc.remoteUpdate({'rc': 0})
+ self.assertEqual(rc.rc, 0)
+
+ rc.remote_complete()
+ # that should fire the Deferred
+ d = self.poll2()
+ d.addCallback(self._testShellCommand1_3)
+ return d
+
+ def poll2(self, ignored=None):
+ if not self.finished:
+ d = defer.Deferred()
+ d.addCallback(self.poll2)
+ reactor.callLater(0.1, d.callback, None)
+ return d
+ return defer.succeed(None)
+
+ def _testShellCommand1_3(self, res):
+ self.assertEqual(self.failed, 0)
+ self.assertEqual(self.results, 0)
+
+
+class MyObserver(buildstep.LogObserver):
+ out = ""
+ def outReceived(self, data):
+ self.out = self.out + data
+
+class Steps(unittest.TestCase):
+ def testMultipleStepInstances(self):
+ steps = [
+ (source.CVS, {'cvsroot': "root", 'cvsmodule': "module"}),
+ (shell.Configure, {'command': "./configure"}),
+ (shell.Compile, {'command': "make"}),
+ (shell.Compile, {'command': "make more"}),
+ (shell.Compile, {'command': "make evenmore"}),
+ (shell.Test, {'command': "make test"}),
+ (shell.Test, {'command': "make testharder"}),
+ ]
+ f = factory.ConfigurableBuildFactory(steps)
+ req = base.BuildRequest("reason", SourceStamp(), 'test_builder')
+ b = f.newBuild([req])
+ #for s in b.steps: print s.name
+
+ def failUnlessClones(self, s1, attrnames):
+ f1 = s1.getStepFactory()
+ f,args = f1
+ s2 = f(**args)
+ for name in attrnames:
+ self.failUnlessEqual(getattr(s1, name), getattr(s2, name))
+
+ def clone(self, s1):
+ f1 = s1.getStepFactory()
+ f,args = f1
+ s2 = f(**args)
+ return s2
+
+ def testClone(self):
+ s1 = shell.ShellCommand(command=["make", "test"],
+ timeout=1234,
+ workdir="here",
+ description="yo",
+ descriptionDone="yoyo",
+ env={'key': 'value'},
+ want_stdout=False,
+ want_stderr=False,
+ logfiles={"name": "filename"},
+ )
+ shellparms = (buildstep.BuildStep.parms +
+ ("remote_kwargs description descriptionDone "
+ "command logfiles").split() )
+ self.failUnlessClones(s1, shellparms)
+
+
+ # test the various methods available to buildsteps
+
+ def test_getProperty(self):
+ s = makeBuildStep("test_steps.Steps.test_getProperty")
+ bs = s.step_status.getBuild()
+
+ s.setProperty("prop1", "value1", "test")
+ s.setProperty("prop2", "value2", "test")
+ self.failUnlessEqual(s.getProperty("prop1"), "value1")
+ self.failUnlessEqual(bs.getProperty("prop1"), "value1")
+ self.failUnlessEqual(s.getProperty("prop2"), "value2")
+ self.failUnlessEqual(bs.getProperty("prop2"), "value2")
+ s.setProperty("prop1", "value1a", "test")
+ self.failUnlessEqual(s.getProperty("prop1"), "value1a")
+ self.failUnlessEqual(bs.getProperty("prop1"), "value1a")
+
+
+ def test_addURL(self):
+ s = makeBuildStep("test_steps.Steps.test_addURL")
+ s.addURL("coverage", "http://coverage.example.org/target")
+ s.addURL("icon", "http://coverage.example.org/icon.png")
+ bs = s.step_status
+ links = bs.getURLs()
+ expected = {"coverage": "http://coverage.example.org/target",
+ "icon": "http://coverage.example.org/icon.png",
+ }
+ self.failUnlessEqual(links, expected)
+
+ def test_addLog(self):
+ s = makeBuildStep("test_steps.Steps.test_addLog")
+ l = s.addLog("newlog")
+ l.addStdout("some stdout here")
+ l.finish()
+ bs = s.step_status
+ logs = bs.getLogs()
+ self.failUnlessEqual(len(logs), 1)
+ l1 = logs[0]
+ self.failUnlessEqual(l1.getText(), "some stdout here")
+ l1a = s.getLog("newlog")
+ self.failUnlessEqual(l1a.getText(), "some stdout here")
+
+ def test_addHTMLLog(self):
+ s = makeBuildStep("test_steps.Steps.test_addHTMLLog")
+ l = s.addHTMLLog("newlog", "some html here")
+ bs = s.step_status
+ logs = bs.getLogs()
+ self.failUnlessEqual(len(logs), 1)
+ l1 = logs[0]
+ self.failUnless(isinstance(l1, builder.HTMLLogFile))
+ self.failUnlessEqual(l1.getText(), "some html here")
+
+ def test_addCompleteLog(self):
+ s = makeBuildStep("test_steps.Steps.test_addCompleteLog")
+ l = s.addCompleteLog("newlog", "some stdout here")
+ bs = s.step_status
+ logs = bs.getLogs()
+ self.failUnlessEqual(len(logs), 1)
+ l1 = logs[0]
+ self.failUnlessEqual(l1.getText(), "some stdout here")
+ l1a = s.getLog("newlog")
+ self.failUnlessEqual(l1a.getText(), "some stdout here")
+
+ def test_addLogObserver(self):
+ s = makeBuildStep("test_steps.Steps.test_addLogObserver")
+ bss = s.step_status
+ o1,o2,o3 = MyObserver(), MyObserver(), MyObserver()
+
+ # add the log before the observer
+ l1 = s.addLog("one")
+ l1.addStdout("onestuff")
+ s.addLogObserver("one", o1)
+ self.failUnlessEqual(o1.out, "onestuff")
+ l1.addStdout(" morestuff")
+ self.failUnlessEqual(o1.out, "onestuff morestuff")
+
+ # add the observer before the log
+ s.addLogObserver("two", o2)
+ l2 = s.addLog("two")
+ l2.addStdout("twostuff")
+ self.failUnlessEqual(o2.out, "twostuff")
+
+ # test more stuff about ShellCommands
+
+ def test_description(self):
+ s = makeBuildStep("test_steps.Steps.test_description.1",
+ step_class=shell.ShellCommand,
+ workdir="dummy",
+ description=["list", "of", "strings"],
+ descriptionDone=["another", "list"])
+ self.failUnlessEqual(s.description, ["list", "of", "strings"])
+ self.failUnlessEqual(s.descriptionDone, ["another", "list"])
+
+ s = makeBuildStep("test_steps.Steps.test_description.2",
+ step_class=shell.ShellCommand,
+ workdir="dummy",
+ description="single string",
+ descriptionDone="another string")
+ self.failUnlessEqual(s.description, ["single string"])
+ self.failUnlessEqual(s.descriptionDone, ["another string"])
+
+class VersionCheckingStep(buildstep.BuildStep):
+ def start(self):
+ # give our test a chance to run. It is non-trivial for a buildstep to
+ # claw its way back out to the test case which is currently running.
+ master = self.build.builder.botmaster.parent
+ checker = master._checker
+ checker(self)
+ # then complete
+ self.finished(buildstep.SUCCESS)
+
+version_config = """
+from buildbot.process import factory
+from buildbot.test.test_steps import VersionCheckingStep
+from buildbot.buildslave import BuildSlave
+BuildmasterConfig = c = {}
+f1 = factory.BuildFactory([
+ factory.s(VersionCheckingStep),
+ ])
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = [{'name':'quick', 'slavename':'bot1',
+ 'builddir': 'quickdir', 'factory': f1}]
+c['slavePortnum'] = 0
+"""
+
+class SlaveVersion(RunMixin, unittest.TestCase):
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(version_config)
+ self.master.startService()
+ d = self.connectSlave(["quick"])
+ return d
+
+ def doBuild(self, buildername):
+ br = base.BuildRequest("forced", SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+
+ def checkCompare(self, s):
+ cver = commands.command_version
+ v = s.slaveVersion("svn", None)
+ # this insures that we are getting the version correctly
+ self.failUnlessEqual(s.slaveVersion("svn", None), cver)
+ # and that non-existent commands do not provide a version
+ self.failUnlessEqual(s.slaveVersion("NOSUCHCOMMAND"), None)
+ # TODO: verify that a <=0.5.0 buildslave (which does not implement
+ # remote_getCommands) handles oldversion= properly. This requires a
+ # mutant slave which does not offer that method.
+ #self.failUnlessEqual(s.slaveVersion("NOSUCHCOMMAND", "old"), "old")
+
+ # now check the comparison functions
+ self.failIf(s.slaveVersionIsOlderThan("svn", cver))
+ self.failIf(s.slaveVersionIsOlderThan("svn", "1.1"))
+ self.failUnless(s.slaveVersionIsOlderThan("svn", cver + ".1"))
+
+ self.failUnlessEqual(s.getSlaveName(), "bot1")
+
+ def testCompare(self):
+ self.master._checker = self.checkCompare
+ d = self.doBuild("quick")
+ return d
+
+
+class _SimpleBuildStep(buildstep.BuildStep):
+ def start(self):
+ args = {"arg1": "value"}
+ cmd = buildstep.RemoteCommand("simple", args)
+ d = self.runCommand(cmd)
+ d.addCallback(lambda res: self.finished(SUCCESS))
+
+class _SimpleCommand(commands.Command):
+ def start(self):
+ self.builder.flag = True
+ self.builder.flag_args = self.args
+ return defer.succeed(None)
+
+class CheckStepTester(StepTester, unittest.TestCase):
+ def testSimple(self):
+ self.slavebase = "testSimple.slave"
+ self.masterbase = "testSimple.master"
+ sb = self.makeSlaveBuilder()
+ sb.flag = False
+ registry.registerSlaveCommand("simple", _SimpleCommand, "1")
+ step = self.makeStep(_SimpleBuildStep)
+ d = self.runStep(step)
+ def _checkSimple(results):
+ self.failUnless(sb.flag)
+ self.failUnlessEqual(sb.flag_args, {"arg1": "value"})
+ d.addCallback(_checkSimple)
+ return d
+
+class Python(StepTester, unittest.TestCase):
+ def testPyFlakes1(self):
+ self.masterbase = "Python.testPyFlakes1"
+ step = self.makeStep(python.PyFlakes)
+ output = \
+"""pyflakes buildbot
+buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
+buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
+buildbot/clients/debug.py:9: 'gnome' imported but unused
+buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
+buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
+buildbot/scripts/imaginary.py:12: undefined name 'size'
+buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ desc = step.descriptionDone
+ self.failUnless("unused=2" in desc)
+ self.failUnless("undefined=1" in desc)
+ self.failUnless("redefs=3" in desc)
+ self.failUnless("import*=1" in desc)
+ self.failIf("misc=" in desc)
+
+ self.failUnlessEqual(step.getProperty("pyflakes-unused"), 2)
+ self.failUnlessEqual(step.getProperty("pyflakes-undefined"), 1)
+ self.failUnlessEqual(step.getProperty("pyflakes-redefs"), 3)
+ self.failUnlessEqual(step.getProperty("pyflakes-import*"), 1)
+ self.failUnlessEqual(step.getProperty("pyflakes-misc"), 0)
+ self.failUnlessEqual(step.getProperty("pyflakes-total"), 7)
+
+ logs = {}
+ for log in step.step_status.getLogs():
+ logs[log.getName()] = log
+
+ for name in ["unused", "undefined", "redefs", "import*"]:
+ self.failUnless(name in logs)
+ self.failIf("misc" in logs)
+ lines = logs["unused"].readlines()
+ self.failUnlessEqual(len(lines), 2)
+ self.failUnlessEqual(lines[0], "buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused\n")
+
+ cmd = buildstep.RemoteCommand(None, {})
+ cmd.rc = 0
+ results = step.evaluateCommand(cmd)
+ self.failUnlessEqual(results, FAILURE) # because of the 'undefined'
+
+ def testPyFlakes2(self):
+ self.masterbase = "Python.testPyFlakes2"
+ step = self.makeStep(python.PyFlakes)
+ output = \
+"""pyflakes buildbot
+some more text here that should be ignored
+buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
+buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
+buildbot/clients/debug.py:9: 'gnome' imported but unused
+buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
+buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
+buildbot/scripts/imaginary.py:12: undefined name 'size'
+could not compile 'blah/blah.py':3:
+pretend there was an invalid line here
+buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ desc = step.descriptionDone
+ self.failUnless("unused=2" in desc)
+ self.failUnless("undefined=1" in desc)
+ self.failUnless("redefs=3" in desc)
+ self.failUnless("import*=1" in desc)
+ self.failUnless("misc=2" in desc)
+
+
+ def testPyFlakes3(self):
+ self.masterbase = "Python.testPyFlakes3"
+ step = self.makeStep(python.PyFlakes)
+ output = \
+"""buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
+buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
+buildbot/clients/debug.py:9: 'gnome' imported but unused
+buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
+buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
+buildbot/scripts/imaginary.py:12: undefined name 'size'
+buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ desc = step.descriptionDone
+ self.failUnless("unused=2" in desc)
+ self.failUnless("undefined=1" in desc)
+ self.failUnless("redefs=3" in desc)
+ self.failUnless("import*=1" in desc)
+ self.failIf("misc" in desc)
+
+
+class OrdinaryCompile(shell.Compile):
+ warningPattern = "ordinary line"
+
+class Warnings(StepTester, unittest.TestCase):
+ def testCompile1(self):
+ self.masterbase = "Warnings.testCompile1"
+ step = self.makeStep(shell.Compile)
+ output = \
+"""Compile started
+normal line
+warning: oh noes!
+ordinary line
+error (but we aren't looking for errors now, are we)
+line 23: warning: we are now on line 23
+ending line
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ self.failUnlessEqual(step.getProperty("warnings-count"), 2)
+ logs = {}
+ for log in step.step_status.getLogs():
+ logs[log.getName()] = log
+ self.failUnless("warnings" in logs)
+ lines = logs["warnings"].readlines()
+ self.failUnlessEqual(len(lines), 2)
+ self.failUnlessEqual(lines[0], "warning: oh noes!\n")
+ self.failUnlessEqual(lines[1],
+ "line 23: warning: we are now on line 23\n")
+
+ cmd = buildstep.RemoteCommand(None, {})
+ cmd.rc = 0
+ results = step.evaluateCommand(cmd)
+ self.failUnlessEqual(results, WARNINGS)
+
+ def testCompile2(self):
+ self.masterbase = "Warnings.testCompile2"
+ step = self.makeStep(shell.Compile, warningPattern="ordinary line")
+ output = \
+"""Compile started
+normal line
+warning: oh noes!
+ordinary line
+error (but we aren't looking for errors now, are we)
+line 23: warning: we are now on line 23
+ending line
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ self.failUnlessEqual(step.getProperty("warnings-count"), 1)
+ logs = {}
+ for log in step.step_status.getLogs():
+ logs[log.getName()] = log
+ self.failUnless("warnings" in logs)
+ lines = logs["warnings"].readlines()
+ self.failUnlessEqual(len(lines), 1)
+ self.failUnlessEqual(lines[0], "ordinary line\n")
+
+ cmd = buildstep.RemoteCommand(None, {})
+ cmd.rc = 0
+ results = step.evaluateCommand(cmd)
+ self.failUnlessEqual(results, WARNINGS)
+
+ def testCompile3(self):
+ self.masterbase = "Warnings.testCompile3"
+ step = self.makeStep(OrdinaryCompile)
+ output = \
+"""Compile started
+normal line
+warning: oh noes!
+ordinary line
+error (but we aren't looking for errors now, are we)
+line 23: warning: we are now on line 23
+ending line
+"""
+ step.setProperty("warnings-count", 10, "test")
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ self.failUnlessEqual(step.getProperty("warnings-count"), 11)
+ logs = {}
+ for log in step.step_status.getLogs():
+ logs[log.getName()] = log
+ self.failUnless("warnings" in logs)
+ lines = logs["warnings"].readlines()
+ self.failUnlessEqual(len(lines), 1)
+ self.failUnlessEqual(lines[0], "ordinary line\n")
+
+ cmd = buildstep.RemoteCommand(None, {})
+ cmd.rc = 0
+ results = step.evaluateCommand(cmd)
+ self.failUnlessEqual(results, WARNINGS)
+
+
+class TreeSize(StepTester, unittest.TestCase):
+ def testTreeSize(self):
+ self.slavebase = "TreeSize.testTreeSize.slave"
+ self.masterbase = "TreeSize.testTreeSize.master"
+
+ sb = self.makeSlaveBuilder()
+ step = self.makeStep(shell.TreeSize)
+ d = self.runStep(step)
+ def _check(results):
+ self.failUnlessEqual(results, SUCCESS)
+ kib = step.getProperty("tree-size-KiB")
+ self.failUnless(isinstance(kib, int))
+ self.failUnless(kib < 100) # should be empty, I get '4'
+ s = step.step_status
+ self.failUnlessEqual(" ".join(s.getText()),
+ "treesize %d KiB" % kib)
+ d.addCallback(_check)
+ return d
+
+class FakeCommand:
+ def __init__(self, rc):
+ self.rc = rc
+
+class PerlModuleTest(StepTester, unittest.TestCase):
+ def testAllTestsPassed(self):
+ self.masterbase = "PMT.testAllTestsPassed"
+ step = self.makeStep(shell.PerlModuleTest)
+ output = \
+"""ok 1
+ok 2
+All tests successful
+Files=1, Tests=123, other stuff
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ rc = step.evaluateCommand(FakeCommand(rc=241))
+ self.failUnlessEqual(rc, SUCCESS)
+ ss = step.step_status
+ self.failUnlessEqual(ss.getStatistic('tests-failed'), 0)
+ self.failUnlessEqual(ss.getStatistic('tests-total'), 123)
+ self.failUnlessEqual(ss.getStatistic('tests-passed'), 123)
+
+ def testFailures_OldTestHarness(self):
+ self.masterbase = "PMT.testFailures_OldTestHarness"
+ step = self.makeStep(shell.PerlModuleTest)
+ output = \
+"""
+ok 1
+ok 2
+3/7 subtests failed
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ rc = step.evaluateCommand(FakeCommand(rc = 123))
+ self.failUnlessEqual(rc, FAILURE)
+ ss = step.step_status
+ self.failUnlessEqual(ss.getStatistic('tests-failed'), 3)
+ self.failUnlessEqual(ss.getStatistic('tests-total'), 7)
+ self.failUnlessEqual(ss.getStatistic('tests-passed'), 4)
+
+ def testFailures_UnparseableStdio(self):
+ self.masterbase = "PMT.testFailures_UnparseableStdio"
+ step = self.makeStep(shell.PerlModuleTest)
+ output = \
+"""
+just some random stuff, you know
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ rc = step.evaluateCommand(FakeCommand(rc = 243))
+ self.failUnlessEqual(rc, 243)
+ ss = step.step_status
+ self.failUnlessEqual(ss.getStatistic('tests-failed'), None)
+ self.failUnlessEqual(ss.getStatistic('tests-total'), None)
+ self.failUnlessEqual(ss.getStatistic('tests-passed'), None)
+
+ def testFailures_NewTestHarness(self):
+ self.masterbase = "PMT.testFailures_NewTestHarness"
+ step = self.makeStep(shell.PerlModuleTest)
+ output = \
+"""
+# Looks like you failed 15 tests of 18.
+tests/services.......................... Failed 265/30904 subtests
+ (less 16 skipped subtests: 30623 okay)
+tests/simple_query_backend..............ok
+tests/simple_query_middleware...........ok
+tests/soap_globalcollect................ok
+tests/three_d_me........................ok
+tests/three_d_me_callback...............ok
+tests/transaction_create................ok
+tests/unique_txid.......................ok
+
+Test Summary Report
+-------------------
+tests/000policies (Wstat: 5632 Tests: 9078 Failed: 22)
+ Failed tests: 2409, 2896-2897, 2900-2901, 2940-2941, 2944-2945
+ 2961-2962, 2965-2966, 2969-2970, 2997-2998
+ 3262, 3281-3282, 3288-3289
+ Non-zero exit status: 22
+tests/services (Wstat: 0 Tests: 30904 Failed: 265)
+ Failed tests: 14, 16-21, 64-69, 71-96, 98, 30157, 30159
+ 30310, 30316, 30439-30543, 30564, 30566-30577
+ 30602, 30604-30607, 30609-30612, 30655
+ 30657-30668, 30675, 30697-30716, 30718-30720
+ 30722-30736, 30773-30774, 30776-30777, 30786
+ 30791, 30795, 30797, 30801, 30822-30827
+ 30830-30831, 30848-30855, 30858-30859, 30888-30899
+ 30901, 30903-30904
+Files=68, Tests=264809, 1944 wallclock secs (17.59 usr 0.63 sys + 470.04 cusr 131.40 csys = 619.66 CPU)
+Result: FAIL
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ rc = step.evaluateCommand(FakeCommand(rc=87))
+ self.failUnlessEqual(rc, FAILURE)
+ ss = step.step_status
+ self.failUnlessEqual(ss.getStatistic('tests-failed'), 287)
+ self.failUnlessEqual(ss.getStatistic('tests-total'), 264809)
+ self.failUnlessEqual(ss.getStatistic('tests-passed'), 264522)
+
+class MasterShellCommand(StepTester, unittest.TestCase):
+ def testMasterShellCommand(self):
+ self.slavebase = "testMasterShellCommand.slave"
+ self.masterbase = "testMasterShellCommand.master"
+ sb = self.makeSlaveBuilder()
+ step = self.makeStep(master.MasterShellCommand, command=['echo', 'hi'])
+
+ # we can't invoke runStep until the reactor is started .. hence this
+ # little dance
+ d = defer.Deferred()
+ def _dotest(_):
+ return self.runStep(step)
+ d.addCallback(_dotest)
+
+ def _check(results):
+ self.failUnlessEqual(results, SUCCESS)
+ logtxt = step.getLog("stdio").getText()
+ self.failUnlessEqual(logtxt.strip(), "hi")
+ d.addCallback(_check)
+ reactor.callLater(0, d.callback, None)
+ return d
+
+ def testMasterShellCommand_badexit(self):
+ self.slavebase = "testMasterShellCommand_badexit.slave"
+ self.masterbase = "testMasterShellCommand_badexit.master"
+ sb = self.makeSlaveBuilder()
+ step = self.makeStep(master.MasterShellCommand, command="exit 1")
+
+ # we can't invoke runStep until the reactor is started .. hence this
+ # little dance
+ d = defer.Deferred()
+ def _dotest(_):
+ return self.runStep(step)
+ d.addCallback(_dotest)
+
+ def _check(results):
+ self.failUnlessEqual(results, FAILURE)
+ d.addCallback(_check)
+ reactor.callLater(0, d.callback, None)
+ return d
diff --git a/buildbot/buildbot/test/test_svnpoller.py b/buildbot/buildbot/test/test_svnpoller.py
new file mode 100644
index 0000000..452a514
--- /dev/null
+++ b/buildbot/buildbot/test/test_svnpoller.py
@@ -0,0 +1,476 @@
+# -*- test-case-name: buildbot.test.test_svnpoller -*-
+
+import time
+from twisted.internet import defer
+from twisted.trial import unittest
+from buildbot.changes.svnpoller import SVNPoller
+
+# this is the output of "svn info --xml
+# svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
+prefix_output = """\
+<?xml version="1.0"?>
+<info>
+<entry
+ kind="dir"
+ path="trunk"
+ revision="18354">
+<url>svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk</url>
+<repository>
+<root>svn+ssh://svn.twistedmatrix.com/svn/Twisted</root>
+<uuid>bbbe8e31-12d6-0310-92fd-ac37d47ddeeb</uuid>
+</repository>
+<commit
+ revision="18352">
+<author>jml</author>
+<date>2006-10-01T02:37:34.063255Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+# and this is "svn info --xml svn://svn.twistedmatrix.com/svn/Twisted". I
+# think this is kind of a degenerate case.. it might even be a form of error.
+prefix_output_2 = """\
+<?xml version="1.0"?>
+<info>
+</info>
+"""
+
+# this is the svn info output for a local repository, svn info --xml
+# file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository
+prefix_output_3 = """\
+<?xml version="1.0"?>
+<info>
+<entry
+ kind="dir"
+ path="SVN-Repository"
+ revision="3">
+<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</url>
+<repository>
+<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
+<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
+</repository>
+<commit
+ revision="3">
+<author>warner</author>
+<date>2006-10-01T07:37:04.182499Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+# % svn info --xml file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk
+
+prefix_output_4 = """\
+<?xml version="1.0"?>
+<info>
+<entry
+ kind="dir"
+ path="trunk"
+ revision="3">
+<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk</url>
+<repository>
+<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
+<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
+</repository>
+<commit
+ revision="1">
+<author>warner</author>
+<date>2006-10-01T07:37:02.286440Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+
+
+class ComputePrefix(unittest.TestCase):
+ def test1(self):
+ base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
+ s = SVNPoller(base + "/")
+ self.failUnlessEqual(s.svnurl, base) # certify slash-stripping
+ prefix = s.determine_prefix(prefix_output)
+ self.failUnlessEqual(prefix, "trunk")
+ self.failUnlessEqual(s._prefix, prefix)
+
+ def test2(self):
+ base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted"
+ s = SVNPoller(base)
+ self.failUnlessEqual(s.svnurl, base)
+ prefix = s.determine_prefix(prefix_output_2)
+ self.failUnlessEqual(prefix, "")
+
+ def test3(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository"
+ s = SVNPoller(base)
+ self.failUnlessEqual(s.svnurl, base)
+ prefix = s.determine_prefix(prefix_output_3)
+ self.failUnlessEqual(prefix, "")
+
+ def test4(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk"
+ s = SVNPoller(base)
+ self.failUnlessEqual(s.svnurl, base)
+ prefix = s.determine_prefix(prefix_output_4)
+ self.failUnlessEqual(prefix, "sample/trunk")
+
+# output from svn log on .../SVN-Repository/sample
+# (so it includes trunk and branches)
+sample_base = "file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+sample_logentries = [None] * 6
+
+sample_logentries[5] = """\
+<logentry
+ revision="6">
+<author>warner</author>
+<date>2006-10-01T19:35:16.165664Z</date>
+<paths>
+<path
+ action="D">/sample/branch/version.c</path>
+</paths>
+<msg>revised_to_2</msg>
+</logentry>
+"""
+
+sample_logentries[4] = """\
+<logentry
+ revision="5">
+<author>warner</author>
+<date>2006-10-01T19:35:16.165664Z</date>
+<paths>
+<path
+ action="D">/sample/branch</path>
+</paths>
+<msg>revised_to_2</msg>
+</logentry>
+"""
+
+sample_logentries[3] = """\
+<logentry
+ revision="4">
+<author>warner</author>
+<date>2006-10-01T19:35:16.165664Z</date>
+<paths>
+<path
+ action="M">/sample/trunk/version.c</path>
+</paths>
+<msg>revised_to_2</msg>
+</logentry>
+"""
+
+sample_logentries[2] = """\
+<logentry
+ revision="3">
+<author>warner</author>
+<date>2006-10-01T19:35:10.215692Z</date>
+<paths>
+<path
+ action="M">/sample/branch/main.c</path>
+</paths>
+<msg>commit_on_branch</msg>
+</logentry>
+"""
+
+sample_logentries[1] = """\
+<logentry
+ revision="2">
+<author>warner</author>
+<date>2006-10-01T19:35:09.154973Z</date>
+<paths>
+<path
+ copyfrom-path="/sample/trunk"
+ copyfrom-rev="1"
+ action="A">/sample/branch</path>
+</paths>
+<msg>make_branch</msg>
+</logentry>
+"""
+
+sample_logentries[0] = """\
+<logentry
+ revision="1">
+<author>warner</author>
+<date>2006-10-01T19:35:08.642045Z</date>
+<paths>
+<path
+ action="A">/sample</path>
+<path
+ action="A">/sample/trunk</path>
+<path
+ action="A">/sample/trunk/subdir/subdir.c</path>
+<path
+ action="A">/sample/trunk/main.c</path>
+<path
+ action="A">/sample/trunk/version.c</path>
+<path
+ action="A">/sample/trunk/subdir</path>
+</paths>
+<msg>sample_project_files</msg>
+</logentry>
+"""
+
+sample_info_output = """\
+<?xml version="1.0"?>
+<info>
+<entry
+ kind="dir"
+ path="sample"
+ revision="4">
+<url>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample</url>
+<repository>
+<root>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository</root>
+<uuid>4f94adfc-c41e-0410-92d5-fbf86b7c7689</uuid>
+</repository>
+<commit
+ revision="4">
+<author>warner</author>
+<date>2006-10-01T19:35:16.165664Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+
+changes_output_template = """\
+<?xml version="1.0"?>
+<log>
+%s</log>
+"""
+
+def make_changes_output(maxrevision):
+ # return what 'svn log' would have just after the given revision was
+ # committed
+ logs = sample_logentries[0:maxrevision]
+ assert len(logs) == maxrevision
+ logs.reverse()
+ output = changes_output_template % ("".join(logs))
+ return output
+
+def split_file(path):
+ pieces = path.split("/")
+ if pieces[0] == "branch":
+ return "branch", "/".join(pieces[1:])
+ if pieces[0] == "trunk":
+ return None, "/".join(pieces[1:])
+ raise RuntimeError("there shouldn't be any files like %s" % path)
+
+class MySVNPoller(SVNPoller):
+ def __init__(self, *args, **kwargs):
+ SVNPoller.__init__(self, *args, **kwargs)
+ self.pending_commands = []
+ self.finished_changes = []
+
+ def getProcessOutput(self, args):
+ d = defer.Deferred()
+ self.pending_commands.append((args, d))
+ return d
+
+ def submit_changes(self, changes):
+ self.finished_changes.extend(changes)
+
+class ComputeChanges(unittest.TestCase):
+ def test1(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+ s = SVNPoller(base)
+ s._prefix = "sample"
+ output = make_changes_output(4)
+ doc = s.parse_logs(output)
+
+ newlast, logentries = s._filter_new_logentries(doc, 4)
+ self.failUnlessEqual(newlast, 4)
+ self.failUnlessEqual(len(logentries), 0)
+
+ newlast, logentries = s._filter_new_logentries(doc, 3)
+ self.failUnlessEqual(newlast, 4)
+ self.failUnlessEqual(len(logentries), 1)
+
+ newlast, logentries = s._filter_new_logentries(doc, 1)
+ self.failUnlessEqual(newlast, 4)
+ self.failUnlessEqual(len(logentries), 3)
+
+ newlast, logentries = s._filter_new_logentries(doc, None)
+ self.failUnlessEqual(newlast, 4)
+ self.failUnlessEqual(len(logentries), 0)
+
+ def testChanges(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+ s = SVNPoller(base, split_file=split_file)
+ s._prefix = "sample"
+ doc = s.parse_logs(make_changes_output(3))
+ newlast, logentries = s._filter_new_logentries(doc, 1)
+ # so we see revisions 2 and 3 as being new
+ self.failUnlessEqual(newlast, 3)
+ changes = s.create_changes(logentries)
+ self.failUnlessEqual(len(changes), 2)
+ self.failUnlessEqual(changes[0].branch, "branch")
+ self.failUnlessEqual(changes[0].revision, '2')
+ self.failUnlessEqual(changes[1].branch, "branch")
+ self.failUnlessEqual(changes[1].files, ["main.c"])
+ self.failUnlessEqual(changes[1].revision, '3')
+
+ # and now pull in r4
+ doc = s.parse_logs(make_changes_output(4))
+ newlast, logentries = s._filter_new_logentries(doc, newlast)
+ self.failUnlessEqual(newlast, 4)
+ # so we see revision 4 as being new
+ changes = s.create_changes(logentries)
+ self.failUnlessEqual(len(changes), 1)
+ self.failUnlessEqual(changes[0].branch, None)
+ self.failUnlessEqual(changes[0].revision, '4')
+ self.failUnlessEqual(changes[0].files, ["version.c"])
+
+ # and now pull in r5 (should *not* create a change as it's a
+ # branch deletion
+ doc = s.parse_logs(make_changes_output(5))
+ newlast, logentries = s._filter_new_logentries(doc, newlast)
+ self.failUnlessEqual(newlast, 5)
+ # so we see revision 5 as being new
+ changes = s.create_changes(logentries)
+ self.failUnlessEqual(len(changes), 0)
+
+ # and now pull in r6 (should create a change as it's not
+ # deleting an entire branch
+ doc = s.parse_logs(make_changes_output(6))
+ newlast, logentries = s._filter_new_logentries(doc, newlast)
+ self.failUnlessEqual(newlast, 6)
+ # so we see revision 6 as being new
+ changes = s.create_changes(logentries)
+ self.failUnlessEqual(len(changes), 1)
+ self.failUnlessEqual(changes[0].branch, 'branch')
+ self.failUnlessEqual(changes[0].revision, '6')
+ self.failUnlessEqual(changes[0].files, ["version.c"])
+
+ def testFirstTime(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+ s = SVNPoller(base, split_file=split_file)
+ s._prefix = "sample"
+ doc = s.parse_logs(make_changes_output(4))
+ logentries = s.get_new_logentries(doc)
+ # SVNPoller ignores all changes that happened before it was started
+ self.failUnlessEqual(len(logentries), 0)
+ self.failUnlessEqual(s.last_change, 4)
+
+class Misc(unittest.TestCase):
+ def testAlreadyWorking(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+ s = MySVNPoller(base)
+ d = s.checksvn()
+ # the SVNPoller is now waiting for its getProcessOutput to finish
+ self.failUnlessEqual(s.overrun_counter, 0)
+ d2 = s.checksvn()
+ self.failUnlessEqual(s.overrun_counter, 1)
+ self.failUnlessEqual(len(s.pending_commands), 1)
+
+ def testGetRoot(self):
+ base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
+ s = MySVNPoller(base)
+ d = s.checksvn()
+ # the SVNPoller is now waiting for its getProcessOutput to finish
+ self.failUnlessEqual(len(s.pending_commands), 1)
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["info", "--xml", "--non-interactive", base])
+
+def makeTime(timestring):
+ datefmt = '%Y/%m/%d %H:%M:%S'
+ when = time.mktime(time.strptime(timestring, datefmt))
+ return when
+
+
+class Everything(unittest.TestCase):
+ def test1(self):
+ s = MySVNPoller(sample_base, split_file=split_file)
+ d = s.checksvn()
+ # the SVNPoller is now waiting for its getProcessOutput to finish
+ self.failUnlessEqual(len(s.pending_commands), 1)
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["info", "--xml", "--non-interactive",
+ sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(sample_info_output)
+ # now it should be waiting for the 'svn log' command
+ self.failUnlessEqual(len(s.pending_commands), 1)
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["log", "--xml", "--verbose", "--non-interactive",
+ "--limit=100", sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(make_changes_output(1))
+ # the command ignores the first batch of changes
+ self.failUnlessEqual(len(s.finished_changes), 0)
+ self.failUnlessEqual(s.last_change, 1)
+
+ # now fire it again, nothing changing
+ d = s.checksvn()
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["log", "--xml", "--verbose", "--non-interactive",
+ "--limit=100", sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(make_changes_output(1))
+ # nothing has changed
+ self.failUnlessEqual(len(s.finished_changes), 0)
+ self.failUnlessEqual(s.last_change, 1)
+
+ # and again, with r2 this time
+ d = s.checksvn()
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["log", "--xml", "--verbose", "--non-interactive",
+ "--limit=100", sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(make_changes_output(2))
+ # r2 should appear
+ self.failUnlessEqual(len(s.finished_changes), 1)
+ self.failUnlessEqual(s.last_change, 2)
+
+ c = s.finished_changes[0]
+ self.failUnlessEqual(c.branch, "branch")
+ self.failUnlessEqual(c.revision, '2')
+ self.failUnlessEqual(c.files, [''])
+ # TODO: this is what creating the branch looks like: a Change with a
+ # zero-length file. We should decide if we want filenames like this
+ # in the Change (and make sure nobody else gets confused by it) or if
+ # we want to strip them out.
+ self.failUnlessEqual(c.comments, "make_branch")
+
+ # and again at r2, so nothing should change
+ d = s.checksvn()
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["log", "--xml", "--verbose", "--non-interactive",
+ "--limit=100", sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(make_changes_output(2))
+ # nothing has changed
+ self.failUnlessEqual(len(s.finished_changes), 1)
+ self.failUnlessEqual(s.last_change, 2)
+
+ # and again with both r3 and r4 appearing together
+ d = s.checksvn()
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["log", "--xml", "--verbose", "--non-interactive",
+ "--limit=100", sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(make_changes_output(4))
+ self.failUnlessEqual(len(s.finished_changes), 3)
+ self.failUnlessEqual(s.last_change, 4)
+
+ c3 = s.finished_changes[1]
+ self.failUnlessEqual(c3.branch, "branch")
+ self.failUnlessEqual(c3.revision, '3')
+ self.failUnlessEqual(c3.files, ["main.c"])
+ self.failUnlessEqual(c3.comments, "commit_on_branch")
+
+ c4 = s.finished_changes[2]
+ self.failUnlessEqual(c4.branch, None)
+ self.failUnlessEqual(c4.revision, '4')
+ self.failUnlessEqual(c4.files, ["version.c"])
+ self.failUnlessEqual(c4.comments, "revised_to_2")
+ self.failUnless(abs(c4.when - time.time()) < 60)
+
+
+# TODO:
+# get coverage of split_file returning None
+# point at a live SVN server for a little while
diff --git a/buildbot/buildbot/test/test_transfer.py b/buildbot/buildbot/test/test_transfer.py
new file mode 100644
index 0000000..c85c630
--- /dev/null
+++ b/buildbot/buildbot/test/test_transfer.py
@@ -0,0 +1,721 @@
+# -*- test-case-name: buildbot.test.test_transfer -*-
+
+import os
+from stat import ST_MODE
+from twisted.trial import unittest
+from buildbot.process.buildstep import WithProperties
+from buildbot.steps.transfer import FileUpload, FileDownload, DirectoryUpload
+from buildbot.test.runutils import StepTester
+from buildbot.status.builder import SUCCESS, FAILURE
+
+# these steps pass a pb.Referenceable inside their arguments, so we have to
+# catch and wrap them. If the LocalAsRemote wrapper were a proper membrane,
+# we wouldn't have to do this.
+
+class UploadFile(StepTester, unittest.TestCase):
+
+ def filterArgs(self, args):
+ if "writer" in args:
+ args["writer"] = self.wrap(args["writer"])
+ return args
+
+ def testSuccess(self):
+ self.slavebase = "UploadFile.testSuccess.slave"
+ self.masterbase = "UploadFile.testSuccess.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest.text")
+ step = self.makeStep(FileUpload,
+ slavesrc="source.txt",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source.txt")
+ contents = "this is the source file\n" * 1000
+ open(slavesrc, "w").write(contents)
+ f = open(masterdest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ masterdest_contents = open(masterdest, "r").read()
+ self.failUnlessEqual(masterdest_contents, contents)
+ d.addCallback(_checkUpload)
+ return d
+
+ def testMaxsize(self):
+ self.slavebase = "UploadFile.testMaxsize.slave"
+ self.masterbase = "UploadFile.testMaxsize.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ masterdest = os.path.join(self.masterbase, "dest2.text")
+ step = self.makeStep(FileUpload,
+ slavesrc="source.txt",
+ masterdest=masterdest,
+ maxsize=12345)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source.txt")
+ contents = "this is the source file\n" * 1000
+ open(slavesrc, "w").write(contents)
+ f = open(masterdest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, FAILURE)
+ self.failUnless(os.path.exists(masterdest))
+ masterdest_contents = open(masterdest, "r").read()
+ self.failUnlessEqual(len(masterdest_contents), 12345)
+ self.failUnlessEqual(masterdest_contents, contents[:12345])
+ d.addCallback(_checkUpload)
+ return d
+
+ def testMode(self):
+ self.slavebase = "UploadFile.testMode.slave"
+ self.masterbase = "UploadFile.testMode.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ masterdest = os.path.join(self.masterbase, "dest3.text")
+ step = self.makeStep(FileUpload,
+ slavesrc="source.txt",
+ masterdest=masterdest,
+ mode=0755)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source.txt")
+ contents = "this is the source file\n"
+ open(slavesrc, "w").write(contents)
+ f = open(masterdest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ masterdest_contents = open(masterdest, "r").read()
+ self.failUnlessEqual(masterdest_contents, contents)
+ # and with 0777 to ignore sticky bits
+ dest_mode = os.stat(masterdest)[ST_MODE] & 0777
+ self.failUnlessEqual(dest_mode, 0755,
+ "target mode was %o, we wanted %o" %
+ (dest_mode, 0755))
+ d.addCallback(_checkUpload)
+ return d
+
+ def testMissingFile(self):
+ self.slavebase = "UploadFile.testMissingFile.slave"
+ self.masterbase = "UploadFile.testMissingFile.master"
+ sb = self.makeSlaveBuilder()
+ step = self.makeStep(FileUpload,
+ slavesrc="MISSING.txt",
+ masterdest="dest.txt")
+ masterdest = os.path.join(self.masterbase, "dest4.txt")
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ self.failUnlessEqual(results, FAILURE)
+ self.failIf(os.path.exists(masterdest))
+ l = step_status.getLogs()
+ logtext = l[0].getText().strip()
+ self.failUnless(logtext.startswith("Cannot open file"))
+ self.failUnless(logtext.endswith("for upload"))
+ d.addCallback(_checkUpload)
+ return d
+
+ def testLotsOfBlocks(self):
+ self.slavebase = "UploadFile.testLotsOfBlocks.slave"
+ self.masterbase = "UploadFile.testLotsOfBlocks.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest.text")
+ step = self.makeStep(FileUpload,
+ slavesrc="source.txt",
+ masterdest=masterdest,
+ blocksize=15)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source.txt")
+ contents = "".join(["this is the source file #%d\n" % i
+ for i in range(1000)])
+ open(slavesrc, "w").write(contents)
+ f = open(masterdest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ masterdest_contents = open(masterdest, "r").read()
+ self.failUnlessEqual(masterdest_contents, contents)
+ d.addCallback(_checkUpload)
+ return d
+
+ def testWorkdir(self):
+ self.slavebase = "Upload.testWorkdir.slave"
+ self.masterbase = "Upload.testWorkdir.master"
+ sb = self.makeSlaveBuilder()
+
+ self.workdir = "mybuild" # override default in StepTest
+ full_workdir = os.path.join(
+ self.slavebase, self.slavebuilderbase, self.workdir)
+ os.mkdir(full_workdir)
+
+ masterdest = os.path.join(self.masterbase, "dest.txt")
+
+ step = self.makeStep(FileUpload,
+ slavesrc="source.txt",
+ masterdest=masterdest)
+
+ # Testing that the FileUpload's workdir is set when makeStep()
+ # calls setDefaultWorkdir() is actually enough; carrying on and
+ # making sure the upload actually succeeds is pure gravy.
+ self.failUnlessEqual(self.workdir, step.workdir)
+
+ slavesrc = os.path.join(full_workdir, "source.txt")
+ open(slavesrc, "w").write("upload me\n")
+
+ def _checkUpload(results):
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.isfile(masterdest))
+
+ d = self.runStep(step)
+ d.addCallback(_checkUpload)
+ return d
+
+ def testWithProperties(self):
+ # test that workdir can be a WithProperties object
+ self.slavebase = "Upload.testWithProperties.slave"
+ self.masterbase = "Upload.testWithProperties.master"
+ sb = self.makeSlaveBuilder()
+
+ step = self.makeStep(FileUpload,
+ slavesrc="src.txt",
+ masterdest="dest.txt")
+ step.workdir = WithProperties("build.%s", "buildnumber")
+
+ self.failUnlessEqual(step._getWorkdir(), "build.1")
+
+class DownloadFile(StepTester, unittest.TestCase):
+
+ def filterArgs(self, args):
+ if "reader" in args:
+ args["reader"] = self.wrap(args["reader"])
+ return args
+
+ def testSuccess(self):
+ self.slavebase = "DownloadFile.testSuccess.slave"
+ self.masterbase = "DownloadFile.testSuccess.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ mastersrc = os.path.join(self.masterbase, "source.text")
+ slavedest = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "dest.txt")
+ step = self.makeStep(FileDownload,
+ mastersrc=mastersrc,
+ slavedest="dest.txt")
+ contents = "this is the source file\n" * 1000 # 24kb, so two blocks
+ open(mastersrc, "w").write(contents)
+ f = open(slavedest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkDownload(results):
+ step_status = step.step_status
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(slavedest))
+ slavedest_contents = open(slavedest, "r").read()
+ self.failUnlessEqual(slavedest_contents, contents)
+ d.addCallback(_checkDownload)
+ return d
+
+ def testMaxsize(self):
+ self.slavebase = "DownloadFile.testMaxsize.slave"
+ self.masterbase = "DownloadFile.testMaxsize.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ mastersrc = os.path.join(self.masterbase, "source.text")
+ slavedest = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "dest.txt")
+ step = self.makeStep(FileDownload,
+ mastersrc=mastersrc,
+ slavedest="dest.txt",
+ maxsize=12345)
+ contents = "this is the source file\n" * 1000 # 24kb, so two blocks
+ open(mastersrc, "w").write(contents)
+ f = open(slavedest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkDownload(results):
+ step_status = step.step_status
+ # the file should be truncated, and the step a FAILURE
+ self.failUnlessEqual(results, FAILURE)
+ self.failUnless(os.path.exists(slavedest))
+ slavedest_contents = open(slavedest, "r").read()
+ self.failUnlessEqual(len(slavedest_contents), 12345)
+ self.failUnlessEqual(slavedest_contents, contents[:12345])
+ d.addCallback(_checkDownload)
+ return d
+
+ def testMode(self):
+ self.slavebase = "DownloadFile.testMode.slave"
+ self.masterbase = "DownloadFile.testMode.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ mastersrc = os.path.join(self.masterbase, "source.text")
+ slavedest = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "dest.txt")
+ step = self.makeStep(FileDownload,
+ mastersrc=mastersrc,
+ slavedest="dest.txt",
+ mode=0755)
+ contents = "this is the source file\n"
+ open(mastersrc, "w").write(contents)
+ f = open(slavedest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkDownload(results):
+ step_status = step.step_status
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(slavedest))
+ slavedest_contents = open(slavedest, "r").read()
+ self.failUnlessEqual(slavedest_contents, contents)
+ # and with 0777 to ignore sticky bits
+ dest_mode = os.stat(slavedest)[ST_MODE] & 0777
+ self.failUnlessEqual(dest_mode, 0755,
+ "target mode was %o, we wanted %o" %
+ (dest_mode, 0755))
+ d.addCallback(_checkDownload)
+ return d
+
+ def testMissingFile(self):
+ self.slavebase = "DownloadFile.testMissingFile.slave"
+ self.masterbase = "DownloadFile.testMissingFile.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ mastersrc = os.path.join(self.masterbase, "MISSING.text")
+ slavedest = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "dest.txt")
+ step = self.makeStep(FileDownload,
+ mastersrc=mastersrc,
+ slavedest="dest.txt")
+
+ d = self.runStep(step)
+ def _checkDownload(results):
+ step_status = step.step_status
+ self.failUnlessEqual(results, FAILURE)
+ self.failIf(os.path.exists(slavedest))
+ l = step_status.getLogs()
+ logtext = l[0].getText().strip()
+ self.failUnless(logtext.endswith(" not available at master"))
+ d.addCallbacks(_checkDownload)
+
+ return d
+
+ def testLotsOfBlocks(self):
+ self.slavebase = "DownloadFile.testLotsOfBlocks.slave"
+ self.masterbase = "DownloadFile.testLotsOfBlocks.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ mastersrc = os.path.join(self.masterbase, "source.text")
+ slavedest = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "dest.txt")
+ step = self.makeStep(FileDownload,
+ mastersrc=mastersrc,
+ slavedest="dest.txt",
+ blocksize=15)
+ contents = "".join(["this is the source file #%d\n" % i
+ for i in range(1000)])
+ open(mastersrc, "w").write(contents)
+ f = open(slavedest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkDownload(results):
+ step_status = step.step_status
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(slavedest))
+ slavedest_contents = open(slavedest, "r").read()
+ self.failUnlessEqual(slavedest_contents, contents)
+ d.addCallback(_checkDownload)
+ return d
+
+ def testWorkdir(self):
+ self.slavebase = "Download.testWorkdir.slave"
+ self.masterbase = "Download.testWorkdir.master"
+ sb = self.makeSlaveBuilder()
+
+ # As in Upload.testWorkdir(), it's enough to test that makeStep()'s
+ # call of setDefaultWorkdir() actually sets step.workdir.
+ self.workdir = "mybuild"
+ step = self.makeStep(FileDownload,
+ mastersrc="foo",
+ slavedest="foo")
+ self.failUnlessEqual(step.workdir, self.workdir)
+
+ def testWithProperties(self):
+ # test that workdir can be a WithProperties object
+ self.slavebase = "Download.testWithProperties.slave"
+ self.masterbase = "Download.testWithProperties.master"
+ sb = self.makeSlaveBuilder()
+
+ step = self.makeStep(FileDownload,
+ mastersrc="src.txt",
+ slavedest="dest.txt")
+ step.workdir = WithProperties("build.%s", "buildnumber")
+
+ self.failUnlessEqual(step._getWorkdir(), "build.1")
+
+
+
+class UploadDirectory(StepTester, unittest.TestCase):
+
+ def filterArgs(self, args):
+ if "writer" in args:
+ args["writer"] = self.wrap(args["writer"])
+ return args
+
+ def testSuccess(self):
+ self.slavebase = "UploadDirectory.testSuccess.slave"
+ self.masterbase = "UploadDirectory.testSuccess.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ dircount = 5
+ content = []
+ content.append("this is one source file\n" * 1000)
+ content.append("this is a second source file\n" * 978)
+ content.append("this is a third source file\n" * 473)
+ os.mkdir(slavesrc)
+ for i in range(dircount):
+ os.mkdir(os.path.join(slavesrc, "d%i" % (i)))
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "e%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+ for h in range(3):
+ open(os.path.join(slavesrc, curdir, "file%i" % (h)), "w").write(content[h])
+ for j in range(dircount):
+ #empty dirs, must be uploaded too
+ curdir = os.path.join("d%i" % (i), "f%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ for i in range(dircount):
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "e%i" % (j))
+ self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
+ for h in range(3):
+ masterdest_contents = open(os.path.join(masterdest, curdir, "file%i" % (h)), "r").read()
+ self.failUnlessEqual(masterdest_contents, content[h])
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "f%i" % (j))
+ self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
+ d.addCallback(_checkUpload)
+ return d
+
+ def testOneEmptyDir(self):
+ self.slavebase = "UploadDirectory.testOneEmptyDir.slave"
+ self.masterbase = "UploadDirectory.testOneEmptyDir.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ os.mkdir(slavesrc)
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ d.addCallback(_checkUpload)
+ return d
+
+ def testManyEmptyDirs(self):
+ self.slavebase = "UploadDirectory.testManyEmptyDirs.slave"
+ self.masterbase = "UploadDirectory.testManyEmptyDirs.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ dircount = 25
+ os.mkdir(slavesrc)
+ for i in range(dircount):
+ os.mkdir(os.path.join(slavesrc, "d%i" % (i)))
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "e%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+ curdir = os.path.join("d%i" % (i), "f%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ for i in range(dircount):
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "e%i" % (j))
+ self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
+ curdir = os.path.join("d%i" % (i), "f%i" % (j))
+ self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
+ d.addCallback(_checkUpload)
+ return d
+
+ def testOneDirOneFile(self):
+ self.slavebase = "UploadDirectory.testOneDirOneFile.slave"
+ self.masterbase = "UploadDirectory.testOneDirOneFile.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ os.mkdir(slavesrc)
+ content = "this is one source file\n" * 1000
+ open(os.path.join(slavesrc, "srcfile"), "w").write(content)
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ masterdest_contents = open(os.path.join(masterdest, "srcfile"), "r").read()
+ self.failUnlessEqual(masterdest_contents, content)
+ d.addCallback(_checkUpload)
+ return d
+
+ def testOneDirManyFiles(self):
+ self.slavebase = "UploadDirectory.testOneDirManyFile.slave"
+ self.masterbase = "UploadDirectory.testOneDirManyFile.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ filecount = 20
+ os.mkdir(slavesrc)
+ content = []
+ content.append("this is one source file\n" * 1000)
+ content.append("this is a second source file\n" * 978)
+ content.append("this is a third source file\n" * 473)
+ for i in range(3):
+ for j in range(filecount):
+ open(os.path.join(slavesrc, "srcfile%i_%i" % (i, j)), "w").write(content[i])
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ for i in range(3):
+ for j in range(filecount):
+ masterdest_contents = open(os.path.join(masterdest, "srcfile%i_%i" % (i, j)), "r").read()
+ self.failUnlessEqual(masterdest_contents, content[i])
+ d.addCallback(_checkUpload)
+ return d
+
+ def testManyDirsManyFiles(self):
+ self.slavebase = "UploadDirectory.testManyDirsManyFile.slave"
+ self.masterbase = "UploadDirectory.testManyDirsManyFile.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ dircount = 10
+ os.mkdir(slavesrc)
+ for i in range(dircount):
+ os.mkdir(os.path.join(slavesrc, "d%i" % (i)))
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "e%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+ curdir = os.path.join("d%i" % (i), "f%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+
+ filecount = 5
+ content = []
+ content.append("this is one source file\n" * 1000)
+ content.append("this is a second source file\n" * 978)
+ content.append("this is a third source file\n" * 473)
+ for i in range(dircount):
+ for j in range(dircount):
+ for k in range(3):
+ for l in range(filecount):
+ open(os.path.join(slavesrc, "d%i" % (i), "e%i" % (j), "srcfile%i_%i" % (k, l)), "w").write(content[k])
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ for i in range(dircount):
+ for j in range(dircount):
+ for k in range(3):
+ for l in range(filecount):
+ masterdest_contents = open(os.path.join(masterdest, "d%i" % (i), "e%i" % (j), "srcfile%i_%i" % (k, l)), "r").read()
+ self.failUnlessEqual(masterdest_contents, content[k])
+ d.addCallback(_checkUpload)
+ return d
+
+
+# TODO:
+# test relative paths, ~/paths
+# need to implement expanduser() for slave-side
+# test error message when master-side file is in a missing directory
+# remove workdir= default?
+
diff --git a/buildbot/buildbot/test/test_twisted.py b/buildbot/buildbot/test/test_twisted.py
new file mode 100644
index 0000000..7b4f9bf
--- /dev/null
+++ b/buildbot/buildbot/test/test_twisted.py
@@ -0,0 +1,219 @@
+# -*- test-case-name: buildbot.test.test_twisted -*-
+
+from twisted.trial import unittest
+
+from buildbot import interfaces
+from buildbot.steps.python_twisted import countFailedTests
+from buildbot.steps.python_twisted import Trial, TrialTestCaseCounter
+from buildbot.status import builder
+
+noisy = 0
+if noisy:
+ from twisted.python.log import startLogging
+ import sys
+ startLogging(sys.stdout)
+
+out1 = """
+-------------------------------------------------------------------------------
+Ran 13 tests in 1.047s
+
+OK
+"""
+
+out2 = """
+-------------------------------------------------------------------------------
+Ran 12 tests in 1.040s
+
+FAILED (failures=1)
+"""
+
+out3 = """
+ NotImplementedError
+-------------------------------------------------------------------------------
+Ran 13 tests in 1.042s
+
+FAILED (failures=1, errors=1)
+"""
+
+out4 = """
+unparseable
+"""
+
+out5 = """
+ File "/usr/home/warner/stuff/python/twisted/Twisted-CVS/twisted/test/test_defer.py", line 79, in testTwoCallbacks
+ self.fail("just because")
+ File "/usr/home/warner/stuff/python/twisted/Twisted-CVS/twisted/trial/unittest.py", line 21, in fail
+ raise AssertionError, message
+ AssertionError: just because
+unparseable
+"""
+
+out6 = """
+===============================================================================
+SKIPPED: testProtocolLocalhost (twisted.flow.test.test_flow.FlowTest)
+-------------------------------------------------------------------------------
+XXX freezes, fixme
+===============================================================================
+SKIPPED: testIPv6 (twisted.names.test.test_names.HostsTestCase)
+-------------------------------------------------------------------------------
+IPv6 support is not in our hosts resolver yet
+===============================================================================
+EXPECTED FAILURE: testSlots (twisted.test.test_rebuild.NewStyleTestCase)
+-------------------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 240, in _runPhase
+ stage(*args, **kwargs)
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 262, in _main
+ self.runner(self.method)
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/runner.py", line 95, in runTest
+ method()
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/test/test_rebuild.py", line 130, in testSlots
+ rebuild.updateInstance(self.m.SlottedClass())
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/python/rebuild.py", line 114, in updateInstance
+ self.__class__ = latestClass(self.__class__)
+TypeError: __class__ assignment: 'SlottedClass' object layout differs from 'SlottedClass'
+===============================================================================
+FAILURE: testBatchFile (twisted.conch.test.test_sftp.TestOurServerBatchFile)
+-------------------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 240, in _runPhase
+ stage(*args, **kwargs)
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 262, in _main
+ self.runner(self.method)
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/runner.py", line 95, in runTest
+ method()
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/conch/test/test_sftp.py", line 450, in testBatchFile
+ self.failUnlessEqual(res[1:-2], ['testDirectory', 'testRemoveFile', 'testRenameFile', 'testfile1'])
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 115, in failUnlessEqual
+ raise FailTest, (msg or '%r != %r' % (first, second))
+FailTest: [] != ['testDirectory', 'testRemoveFile', 'testRenameFile', 'testfile1']
+-------------------------------------------------------------------------------
+Ran 1454 tests in 911.579s
+
+FAILED (failures=2, skips=49, expectedFailures=9)
+Exception exceptions.AttributeError: "'NoneType' object has no attribute 'StringIO'" in <bound method RemoteReference.__del__ of <twisted.spread.pb.RemoteReference instance at 0x27036c0>> ignored
+"""
+
+class MyTrial(Trial):
+ def addTestResult(self, testname, results, text, logs):
+ self.results.append((testname, results, text, logs))
+ def addCompleteLog(self, name, log):
+ pass
+
+class MyLogFile:
+ def __init__(self, text):
+ self.text = text
+ def getText(self):
+ return self.text
+
+
+class Count(unittest.TestCase):
+
+ def count(self, total, failures=0, errors=0,
+ expectedFailures=0, unexpectedSuccesses=0, skips=0):
+ d = {
+ 'total': total,
+ 'failures': failures,
+ 'errors': errors,
+ 'expectedFailures': expectedFailures,
+ 'unexpectedSuccesses': unexpectedSuccesses,
+ 'skips': skips,
+ }
+ return d
+
+ def testCountFailedTests(self):
+ count = countFailedTests(out1)
+ self.assertEquals(count, self.count(total=13))
+ count = countFailedTests(out2)
+ self.assertEquals(count, self.count(total=12, failures=1))
+ count = countFailedTests(out3)
+ self.assertEquals(count, self.count(total=13, failures=1, errors=1))
+ count = countFailedTests(out4)
+ self.assertEquals(count, self.count(total=None))
+ count = countFailedTests(out5)
+ self.assertEquals(count, self.count(total=None))
+
+class Counter(unittest.TestCase):
+
+ def setProgress(self, metric, value):
+ self.progress = (metric, value)
+
+ def testCounter(self):
+ self.progress = (None,None)
+ c = TrialTestCaseCounter()
+ c.setStep(self)
+ STDOUT = interfaces.LOG_CHANNEL_STDOUT
+ def add(text):
+ c.logChunk(None, None, None, STDOUT, text)
+ add("\n\n")
+ self.failUnlessEqual(self.progress, (None,None))
+ add("bogus line\n")
+ self.failUnlessEqual(self.progress, (None,None))
+ add("buildbot.test.test_config.ConfigTest.testBots ... [OK]\n")
+ self.failUnlessEqual(self.progress, ("tests", 1))
+ add("buildbot.test.test_config.ConfigTest.tes")
+ self.failUnlessEqual(self.progress, ("tests", 1))
+ add("tBuilders ... [OK]\n")
+ self.failUnlessEqual(self.progress, ("tests", 2))
+ # confirm alternative delimiters work too.. ptys seem to emit
+ # something different
+ add("buildbot.test.test_config.ConfigTest.testIRC ... [OK]\r\n")
+ self.failUnlessEqual(self.progress, ("tests", 3))
+ add("===============================================================================\n")
+ self.failUnlessEqual(self.progress, ("tests", 3))
+ add("buildbot.test.test_config.IOnlyLookLikeA.testLine ... [OK]\n")
+ self.failUnlessEqual(self.progress, ("tests", 3))
+
+
+
+class Parse(unittest.TestCase):
+ def failUnlessIn(self, substr, string):
+ self.failUnless(string.find(substr) != -1)
+
+ def testParse(self):
+ t = MyTrial(build=None, workdir=".", testpath=None, testChanges=True)
+ t.results = []
+ log = MyLogFile(out6)
+ t.createSummary(log)
+
+ self.failUnlessEqual(len(t.results), 4)
+ r1, r2, r3, r4 = t.results
+ testname, results, text, logs = r1
+ self.failUnlessEqual(testname,
+ ("twisted", "flow", "test", "test_flow",
+ "FlowTest", "testProtocolLocalhost"))
+ self.failUnlessEqual(results, builder.SKIPPED)
+ self.failUnlessEqual(text, ['skipped'])
+ self.failUnlessIn("XXX freezes, fixme", logs)
+ self.failUnless(logs.startswith("SKIPPED:"))
+ self.failUnless(logs.endswith("fixme\n"))
+
+ testname, results, text, logs = r2
+ self.failUnlessEqual(testname,
+ ("twisted", "names", "test", "test_names",
+ "HostsTestCase", "testIPv6"))
+ self.failUnlessEqual(results, builder.SKIPPED)
+ self.failUnlessEqual(text, ['skipped'])
+ self.failUnless(logs.startswith("SKIPPED: testIPv6"))
+ self.failUnless(logs.endswith("IPv6 support is not in our hosts resolver yet\n"))
+
+ testname, results, text, logs = r3
+ self.failUnlessEqual(testname,
+ ("twisted", "test", "test_rebuild",
+ "NewStyleTestCase", "testSlots"))
+ self.failUnlessEqual(results, builder.SUCCESS)
+ self.failUnlessEqual(text, ['expected', 'failure'])
+ self.failUnless(logs.startswith("EXPECTED FAILURE: "))
+ self.failUnlessIn("\nTraceback ", logs)
+ self.failUnless(logs.endswith("layout differs from 'SlottedClass'\n"))
+
+ testname, results, text, logs = r4
+ self.failUnlessEqual(testname,
+ ("twisted", "conch", "test", "test_sftp",
+ "TestOurServerBatchFile", "testBatchFile"))
+ self.failUnlessEqual(results, builder.FAILURE)
+ self.failUnlessEqual(text, ['failure'])
+ self.failUnless(logs.startswith("FAILURE: "))
+ self.failUnlessIn("Traceback ", logs)
+ self.failUnless(logs.endswith("'testRenameFile', 'testfile1']\n"))
+
diff --git a/buildbot/buildbot/test/test_util.py b/buildbot/buildbot/test/test_util.py
new file mode 100644
index 0000000..b375390
--- /dev/null
+++ b/buildbot/buildbot/test/test_util.py
@@ -0,0 +1,26 @@
+# -*- test-case-name: buildbot.test.test_util -*-
+
+from twisted.trial import unittest
+
+from buildbot import util
+
+
+class Foo(util.ComparableMixin):
+ compare_attrs = ["a", "b"]
+
+ def __init__(self, a, b, c):
+ self.a, self.b, self.c = a,b,c
+
+
+class Bar(Foo, util.ComparableMixin):
+ compare_attrs = ["b", "c"]
+
+class Compare(unittest.TestCase):
+ def testCompare(self):
+ f1 = Foo(1, 2, 3)
+ f2 = Foo(1, 2, 4)
+ f3 = Foo(1, 3, 4)
+ b1 = Bar(1, 2, 3)
+ self.failUnless(f1 == f2)
+ self.failIf(f1 == f3)
+ self.failIf(f1 == b1)
diff --git a/buildbot/buildbot/test/test_vc.py b/buildbot/buildbot/test/test_vc.py
new file mode 100644
index 0000000..4d0c18e
--- /dev/null
+++ b/buildbot/buildbot/test/test_vc.py
@@ -0,0 +1,3023 @@
+# -*- test-case-name: buildbot.test.test_vc -*-
+
+import sys, os, time, re
+from email.Utils import mktime_tz, parsedate_tz
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor, utils, protocol, task, error
+from twisted.python import failure
+from twisted.python.procutils import which
+from twisted.web import client, static, server
+
+#defer.Deferred.debug = True
+
+from twisted.python import log
+#log.startLogging(sys.stderr)
+
+from buildbot import master, interfaces
+from buildbot.slave import bot, commands
+from buildbot.slave.commands import rmdirRecursive
+from buildbot.status.builder import SUCCESS, FAILURE
+from buildbot.process import base
+from buildbot.steps import source
+from buildbot.changes import changes
+from buildbot.sourcestamp import SourceStamp
+from buildbot.scripts import tryclient
+from buildbot.test.runutils import SignalMixin, myGetProcessOutputAndValue
+
+#step.LoggedRemoteCommand.debug = True
+
+from twisted.internet.defer import waitForDeferred, deferredGenerator
+
+# Most of these tests (all but SourceStamp) depend upon having a set of
+# repositories from which we can perform checkouts. These repositories are
+# created by the setUp method at the start of each test class. In earlier
+# versions these repositories were created offline and distributed with a
+# separate tarball named 'buildbot-test-vc-1.tar.gz'. This is no longer
+# necessary.
+
+# CVS requires a local file repository. Providing remote access is beyond
+# the feasible abilities of this test program (needs pserver or ssh).
+
+# SVN requires a local file repository. To provide remote access over HTTP
+# requires an apache server with DAV support and mod_svn, way beyond what we
+# can test from here.
+
+# Arch and Darcs both allow remote (read-only) operation with any web
+# server. We test both local file access and HTTP access (by spawning a
+# small web server to provide access to the repository files while the test
+# is running).
+
+# Perforce starts the daemon running on localhost. Unfortunately, it must
+# use a predetermined Internet-domain port number, unless we want to go
+# all-out: bind the listen socket ourselves and pretend to be inetd.
+
+config_vc = """
+from buildbot.process import factory
+from buildbot.steps import source
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+f1 = factory.BuildFactory([
+ %s,
+ ])
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = [{'name': 'vc', 'slavename': 'bot1',
+ 'builddir': 'vc-dir', 'factory': f1}]
+c['slavePortnum'] = 0
+# do not compress logs in tests
+c['logCompressionLimit'] = False
+BuildmasterConfig = c
+"""
+
+p0_diff = r"""
+Index: subdir/subdir.c
+===================================================================
+RCS file: /home/warner/stuff/Projects/BuildBot/code-arch/_trial_temp/test_vc/repositories/CVS-Repository/sample/subdir/subdir.c,v
+retrieving revision 1.1.1.1
+diff -u -r1.1.1.1 subdir.c
+--- subdir/subdir.c 14 Aug 2005 01:32:49 -0000 1.1.1.1
++++ subdir/subdir.c 14 Aug 2005 01:36:15 -0000
+@@ -4,6 +4,6 @@
+ int
+ main(int argc, const char *argv[])
+ {
+- printf("Hello subdir.\n");
++ printf("Hello patched subdir.\n");
+ return 0;
+ }
+"""
+
+# this patch does not include the filename headers, so it is
+# patchlevel-neutral
+TRY_PATCH = '''
+@@ -5,6 +5,6 @@
+ int
+ main(int argc, const char *argv[])
+ {
+- printf("Hello subdir.\\n");
++ printf("Hello try.\\n");
+ return 0;
+ }
+'''
+
+MAIN_C = '''
+// this is main.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+ printf("Hello world.\\n");
+ return 0;
+}
+'''
+
+BRANCH_C = '''
+// this is main.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+ printf("Hello branch.\\n");
+ return 0;
+}
+'''
+
+VERSION_C = '''
+// this is version.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+ printf("Hello world, version=%d\\n");
+ return 0;
+}
+'''
+
+SUBDIR_C = '''
+// this is subdir/subdir.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+ printf("Hello subdir.\\n");
+ return 0;
+}
+'''
+
+TRY_C = '''
+// this is subdir/subdir.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+ printf("Hello try.\\n");
+ return 0;
+}
+'''
+
+def qw(s):
+ return s.split()
+
+class VCS_Helper:
+ # this is a helper class which keeps track of whether each VC system is
+ # available, and whether the repository for each has been created. There
+ # is one instance of this class, at module level, shared between all test
+ # cases.
+
+ def __init__(self):
+ self._helpers = {}
+ self._isCapable = {}
+ self._excuses = {}
+ self._repoReady = {}
+
+ def registerVC(self, name, helper):
+ self._helpers[name] = helper
+ self._repoReady[name] = False
+
+ def skipIfNotCapable(self, name):
+ """Either return None, or raise SkipTest"""
+ d = self.capable(name)
+ def _maybeSkip(res):
+ if not res[0]:
+ raise unittest.SkipTest(res[1])
+ d.addCallback(_maybeSkip)
+ return d
+
+ def capable(self, name):
+ """Return a Deferred that fires with (True,None) if this host offers
+ the given VC tool, or (False,excuse) if it does not (and therefore
+ the tests should be skipped)."""
+
+ if self._isCapable.has_key(name):
+ if self._isCapable[name]:
+ return defer.succeed((True,None))
+ else:
+ return defer.succeed((False, self._excuses[name]))
+ d = defer.maybeDeferred(self._helpers[name].capable)
+ def _capable(res):
+ if res[0]:
+ self._isCapable[name] = True
+ else:
+ self._excuses[name] = res[1]
+ return res
+ d.addCallback(_capable)
+ return d
+
+ def getHelper(self, name):
+ return self._helpers[name]
+
+ def createRepository(self, name):
+ """Return a Deferred that fires when the repository is set up."""
+ if self._repoReady[name]:
+ return defer.succeed(True)
+ d = self._helpers[name].createRepository()
+ def _ready(res):
+ self._repoReady[name] = True
+ d.addCallback(_ready)
+ return d
+
+VCS = VCS_Helper()
+
+
+# the overall plan here:
+#
+# Each VC system is tested separately, all using the same source tree defined
+# in the 'files' dictionary above. Each VC system gets its own TestCase
+# subclass. The first test case that is run will create the repository during
+# setUp(), making two branches: 'trunk' and 'branch'. The trunk gets a copy
+# of all the files in 'files'. The variant of good.c is committed on the
+# branch.
+#
+# then testCheckout is run, which does a number of checkout/clobber/update
+# builds. These all use trunk r1. It then runs self.fix(), which modifies
+# 'fixable.c', then performs another build and makes sure the tree has been
+# updated.
+#
+# testBranch uses trunk-r1 and branch-r1, making sure that we clobber the
+# tree properly when we switch between them
+#
+# testPatch does a trunk-r1 checkout and applies a patch.
+#
+# testTryGetPatch performs a trunk-r1 checkout, modifies some files, then
+# verifies that tryclient.getSourceStamp figures out the base revision and
+# what got changed.
+
+
+# vc_create makes a repository at r1 with three files: main.c, version.c, and
+# subdir/foo.c . It also creates a branch from r1 (called b1) in which main.c
+# says "hello branch" instead of "hello world". self.trunk[] contains
+# revision stamps for everything on the trunk, and self.branch[] does the
+# same for the branch.
+
+# vc_revise() checks out a tree at HEAD, changes version.c, then checks it
+# back in. The new version stamp is appended to self.trunk[]. The tree is
+# removed afterwards.
+
+# vc_try_checkout(workdir, rev) checks out a tree at REV, then changes
+# subdir/subdir.c to say 'Hello try'
+# vc_try_finish(workdir) removes the tree and cleans up any VC state
+# necessary (like deleting the Arch archive entry).
+
+
+class BaseHelper:
+ def __init__(self):
+ self.trunk = []
+ self.branch = []
+ self.allrevs = []
+
+ def capable(self):
+ # this is also responsible for setting self.vcexe
+ raise NotImplementedError
+
+ def createBasedir(self):
+ # you must call this from createRepository
+ self.repbase = os.path.abspath(os.path.join("test_vc",
+ "repositories"))
+ if not os.path.isdir(self.repbase):
+ os.makedirs(self.repbase)
+
+ def createRepository(self):
+ # this will only be called once per process
+ raise NotImplementedError
+
+ def populate(self, basedir):
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+ os.makedirs(os.path.join(basedir, "subdir"))
+ open(os.path.join(basedir, "main.c"), "w").write(MAIN_C)
+ self.version = 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(basedir, "version.c"), "w").write(version_c)
+ open(os.path.join(basedir, "main.c"), "w").write(MAIN_C)
+ open(os.path.join(basedir, "subdir", "subdir.c"), "w").write(SUBDIR_C)
+
+ def populate_branch(self, basedir):
+ open(os.path.join(basedir, "main.c"), "w").write(BRANCH_C)
+
+ def addTrunkRev(self, rev):
+ self.trunk.append(rev)
+ self.allrevs.append(rev)
+ def addBranchRev(self, rev):
+ self.branch.append(rev)
+ self.allrevs.append(rev)
+
+ def runCommand(self, basedir, command, failureIsOk=False,
+ stdin=None, env=None):
+ # all commands passed to do() should be strings or lists. If they are
+ # strings, none of the arguments may have spaces. This makes the
+ # commands less verbose at the expense of restricting what they can
+ # specify.
+ if type(command) not in (list, tuple):
+ command = command.split(" ")
+
+ # execute scripts through cmd.exe on windows, to avoid space in path issues
+ if sys.platform == 'win32' and command[0].lower().endswith('.cmd'):
+ command = [which('cmd.exe')[0], '/c', 'call'] + command
+
+ DEBUG = False
+ if DEBUG:
+ print "do %s" % command
+ print " in basedir %s" % basedir
+ if stdin:
+ print " STDIN:\n", stdin, "\n--STDIN DONE"
+
+ if not env:
+ env = os.environ.copy()
+ env['LC_ALL'] = "C"
+ d = myGetProcessOutputAndValue(command[0], command[1:],
+ env=env, path=basedir,
+ stdin=stdin)
+ def check((out, err, code)):
+ if DEBUG:
+ print
+ print "command was: %s" % command
+ if out: print "out: %s" % out
+ if err: print "err: %s" % err
+ print "code: %s" % code
+ if code != 0 and not failureIsOk:
+ log.msg("command %s finished with exit code %d" %
+ (command, code))
+ log.msg(" and stdout %s" % (out,))
+ log.msg(" and stderr %s" % (err,))
+ raise RuntimeError("command %s finished with exit code %d"
+ % (command, code)
+ + ": see logs for stdout")
+ return out
+ d.addCallback(check)
+ return d
+
+ def do(self, basedir, command, failureIsOk=False, stdin=None, env=None):
+ d = self.runCommand(basedir, command, failureIsOk=failureIsOk,
+ stdin=stdin, env=env)
+ return waitForDeferred(d)
+
+ def dovc(self, basedir, command, failureIsOk=False, stdin=None, env=None):
+ """Like do(), but the VC binary will be prepended to COMMAND."""
+ if isinstance(command, (str, unicode)):
+ command = [self.vcexe] + command.split(' ')
+ else:
+ # command is a list
+ command = [self.vcexe] + command
+ return self.do(basedir, command, failureIsOk, stdin, env)
+
+class VCBase(SignalMixin):
+ metadir = None
+ createdRepository = False
+ master = None
+ slave = None
+ helper = None
+ httpServer = None
+ httpPort = None
+ skip = None
+ has_got_revision = False
+ has_got_revision_branches_are_merged = False # for SVN
+
+ def failUnlessIn(self, substring, string, msg=None):
+ # trial provides a version of this that requires python-2.3 to test
+ # strings.
+ if msg is None:
+ msg = ("did not see the expected substring '%s' in string '%s'" %
+ (substring, string))
+ self.failUnless(string.find(substring) != -1, msg)
+
+ def setUp(self):
+ d = VCS.skipIfNotCapable(self.vc_name)
+ d.addCallback(self._setUp1)
+ return d
+
+ def _setUp1(self, res):
+ self.helper = VCS.getHelper(self.vc_name)
+
+ if os.path.exists("basedir"):
+ rmdirRecursive("basedir")
+ os.mkdir("basedir")
+ self.master = master.BuildMaster("basedir")
+ self.slavebase = os.path.abspath("slavebase")
+ if os.path.exists(self.slavebase):
+ rmdirRecursive(self.slavebase)
+ os.mkdir("slavebase")
+
+ d = VCS.createRepository(self.vc_name)
+ return d
+
+ def connectSlave(self):
+ port = self.master.slavePort._port.getHost().port
+ slave = bot.BuildSlave("localhost", port, "bot1", "sekrit",
+ self.slavebase, keepalive=0, usePTY=False)
+ self.slave = slave
+ slave.startService()
+ d = self.master.botmaster.waitUntilBuilderAttached("vc")
+ return d
+
+ def loadConfig(self, config):
+ # reloading the config file causes a new 'listDirs' command to be
+ # sent to the slave. To synchronize on this properly, it is easiest
+ # to stop and restart the slave.
+ d = defer.succeed(None)
+ if self.slave:
+ d = self.master.botmaster.waitUntilBuilderDetached("vc")
+ self.slave.stopService()
+ d.addCallback(lambda res: self.master.loadConfig(config))
+ d.addCallback(lambda res: self.connectSlave())
+ return d
+
+ def serveHTTP(self):
+ # launch an HTTP server to serve the repository files
+ self.root = static.File(self.helper.repbase)
+ self.site = server.Site(self.root)
+ self.httpServer = reactor.listenTCP(0, self.site)
+ self.httpPort = self.httpServer.getHost().port
+
+ def doBuild(self, shouldSucceed=True, ss=None):
+ c = interfaces.IControl(self.master)
+
+ if ss is None:
+ ss = SourceStamp()
+ #print "doBuild(ss: b=%s rev=%s)" % (ss.branch, ss.revision)
+ req = base.BuildRequest("test_vc forced build", ss, 'test_builder')
+ d = req.waitUntilFinished()
+ c.getBuilder("vc").requestBuild(req)
+ d.addCallback(self._doBuild_1, shouldSucceed)
+ return d
+ def _doBuild_1(self, bs, shouldSucceed):
+ r = bs.getResults()
+ if r != SUCCESS and shouldSucceed:
+ print
+ print
+ if not bs.isFinished():
+ print "Hey, build wasn't even finished!"
+ print "Build did not succeed:", r, bs.getText()
+ for s in bs.getSteps():
+ for l in s.getLogs():
+ print "--- START step %s / log %s ---" % (s.getName(),
+ l.getName())
+ print l.getTextWithHeaders()
+ print "--- STOP ---"
+ print
+ self.fail("build did not succeed")
+ return bs
+
+ def printLogs(self, bs):
+ for s in bs.getSteps():
+ for l in s.getLogs():
+ print "--- START step %s / log %s ---" % (s.getName(),
+ l.getName())
+ print l.getTextWithHeaders()
+ print "--- STOP ---"
+ print
+
+ def touch(self, d, f):
+ open(os.path.join(d,f),"w").close()
+ def shouldExist(self, *args):
+ target = os.path.join(*args)
+ self.failUnless(os.path.exists(target),
+ "expected to find %s but didn't" % target)
+ def shouldNotExist(self, *args):
+ target = os.path.join(*args)
+ self.failIf(os.path.exists(target),
+ "expected to NOT find %s, but did" % target)
+ def shouldContain(self, d, f, contents):
+ c = open(os.path.join(d, f), "r").read()
+ self.failUnlessIn(contents, c)
+
+ def checkGotRevision(self, bs, expected):
+ if self.has_got_revision:
+ self.failUnlessEqual(bs.getProperty("got_revision"), str(expected))
+
+ def checkGotRevisionIsLatest(self, bs):
+ expected = self.helper.trunk[-1]
+ if self.has_got_revision_branches_are_merged:
+ expected = self.helper.allrevs[-1]
+ self.checkGotRevision(bs, expected)
+
+ def do_vctest(self, testRetry=True):
+ vctype = self.vctype
+ args = self.helper.vcargs
+ m = self.master
+ self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+ self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+ # woo double-substitution
+ s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+ for k,v in args.items():
+ s += ", %s=%s" % (k, repr(v))
+ s += ")"
+ config = config_vc % s
+
+ m.loadConfig(config % 'clobber')
+ m.readConfig = True
+ m.startService()
+
+ d = self.connectSlave()
+ d.addCallback(lambda res: log.msg("testing clobber"))
+ d.addCallback(self._do_vctest_clobber)
+ d.addCallback(lambda res: log.msg("doing update"))
+ d.addCallback(lambda res: self.loadConfig(config % 'update'))
+ d.addCallback(lambda res: log.msg("testing update"))
+ d.addCallback(self._do_vctest_update)
+ if testRetry:
+ d.addCallback(lambda res: log.msg("testing update retry"))
+ d.addCallback(self._do_vctest_update_retry)
+ d.addCallback(lambda res: log.msg("doing copy"))
+ d.addCallback(lambda res: self.loadConfig(config % 'copy'))
+ d.addCallback(lambda res: log.msg("testing copy"))
+ d.addCallback(self._do_vctest_copy)
+ d.addCallback(lambda res: log.msg("did copy test"))
+ if self.metadir:
+ d.addCallback(lambda res: log.msg("doing export"))
+ d.addCallback(lambda res: self.loadConfig(config % 'export'))
+ d.addCallback(lambda res: log.msg("testing export"))
+ d.addCallback(self._do_vctest_export)
+ d.addCallback(lambda res: log.msg("did export test"))
+ return d
+
+ def _do_vctest_clobber(self, res):
+ d = self.doBuild() # initial checkout
+ d.addCallback(self._do_vctest_clobber_1)
+ return d
+ def _do_vctest_clobber_1(self, bs):
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldExist(self.workdir, "subdir", "subdir.c")
+ if self.metadir:
+ self.shouldExist(self.workdir, self.metadir)
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.failUnlessEqual(bs.getProperty("branch"), None)
+ self.checkGotRevisionIsLatest(bs)
+
+ self.touch(self.workdir, "newfile")
+ self.shouldExist(self.workdir, "newfile")
+ d = self.doBuild() # rebuild clobbers workdir
+ d.addCallback(self._do_vctest_clobber_2)
+ return d
+ def _do_vctest_clobber_2(self, res):
+ self.shouldNotExist(self.workdir, "newfile")
+ # do a checkout to a specific version. Mercurial-over-HTTP (when
+ # either client or server is older than hg-0.9.2) cannot do this
+ # directly, so it must checkout HEAD and then update back to the
+ # requested revision.
+ d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[0]))
+ d.addCallback(self._do_vctest_clobber_3)
+ return d
+ def _do_vctest_clobber_3(self, bs):
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldExist(self.workdir, "subdir", "subdir.c")
+ if self.metadir:
+ self.shouldExist(self.workdir, self.metadir)
+ self.failUnlessEqual(bs.getProperty("revision"), self.helper.trunk[0] or None)
+ self.failUnlessEqual(bs.getProperty("branch"), None)
+ self.checkGotRevision(bs, self.helper.trunk[0])
+ # leave the tree at HEAD
+ return self.doBuild()
+
+
+ def _do_vctest_update(self, res):
+ log.msg("_do_vctest_update")
+ d = self.doBuild() # rebuild with update
+ d.addCallback(self._do_vctest_update_1)
+ return d
+ def _do_vctest_update_1(self, bs):
+ log.msg("_do_vctest_update_1")
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % self.helper.version)
+ if self.metadir:
+ self.shouldExist(self.workdir, self.metadir)
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.checkGotRevisionIsLatest(bs)
+
+ self.touch(self.workdir, "newfile")
+ d = self.doBuild() # update rebuild leaves new files
+ d.addCallback(self._do_vctest_update_2)
+ return d
+ def _do_vctest_update_2(self, bs):
+ log.msg("_do_vctest_update_2")
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.touch(self.workdir, "newfile")
+ # now make a change to the repository and make sure we pick it up
+ d = self.helper.vc_revise()
+ d.addCallback(lambda res: self.doBuild())
+ d.addCallback(self._do_vctest_update_3)
+ return d
+ def _do_vctest_update_3(self, bs):
+ log.msg("_do_vctest_update_3")
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % self.helper.version)
+ self.shouldExist(self.workdir, "newfile")
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.checkGotRevisionIsLatest(bs)
+
+ # now "update" to an older revision
+ d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[-2]))
+ d.addCallback(self._do_vctest_update_4)
+ return d
+ def _do_vctest_update_4(self, bs):
+ log.msg("_do_vctest_update_4")
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % (self.helper.version-1))
+ self.failUnlessEqual(bs.getProperty("revision"),
+ self.helper.trunk[-2] or None)
+ self.checkGotRevision(bs, self.helper.trunk[-2])
+
+ # now update to the newer revision
+ d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[-1]))
+ d.addCallback(self._do_vctest_update_5)
+ return d
+ def _do_vctest_update_5(self, bs):
+ log.msg("_do_vctest_update_5")
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % self.helper.version)
+ self.failUnlessEqual(bs.getProperty("revision"),
+ self.helper.trunk[-1] or None)
+ self.checkGotRevision(bs, self.helper.trunk[-1])
+
+
+ def _do_vctest_update_retry(self, res):
+ # certain local changes will prevent an update from working. The
+ # most common is to replace a file with a directory, or vice
+ # versa. The slave code should spot the failure and do a
+ # clobber/retry.
+ os.unlink(os.path.join(self.workdir, "main.c"))
+ os.mkdir(os.path.join(self.workdir, "main.c"))
+ self.touch(os.path.join(self.workdir, "main.c"), "foo")
+ self.touch(self.workdir, "newfile")
+
+ d = self.doBuild() # update, but must clobber to handle the error
+ d.addCallback(self._do_vctest_update_retry_1)
+ return d
+ def _do_vctest_update_retry_1(self, bs):
+ # SVN-1.4.0 doesn't seem to have any problem with the
+ # file-turned-directory issue (although older versions did). So don't
+ # actually check that the tree was clobbered.. as long as the update
+ # succeeded (checked by doBuild), that should be good enough.
+ #self.shouldNotExist(self.workdir, "newfile")
+ pass
+
+ def _do_vctest_copy(self, res):
+ log.msg("_do_vctest_copy 1")
+ d = self.doBuild() # copy rebuild clobbers new files
+ d.addCallback(self._do_vctest_copy_1)
+ return d
+ def _do_vctest_copy_1(self, bs):
+ log.msg("_do_vctest_copy 2")
+ if self.metadir:
+ self.shouldExist(self.workdir, self.metadir)
+ self.shouldNotExist(self.workdir, "newfile")
+ self.touch(self.workdir, "newfile")
+ self.touch(self.vcdir, "newvcfile")
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.checkGotRevisionIsLatest(bs)
+
+ d = self.doBuild() # copy rebuild clobbers new files
+ d.addCallback(self._do_vctest_copy_2)
+ return d
+ def _do_vctest_copy_2(self, bs):
+ log.msg("_do_vctest_copy 3")
+ if self.metadir:
+ self.shouldExist(self.workdir, self.metadir)
+ self.shouldNotExist(self.workdir, "newfile")
+ self.shouldExist(self.vcdir, "newvcfile")
+ self.shouldExist(self.workdir, "newvcfile")
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.checkGotRevisionIsLatest(bs)
+ self.touch(self.workdir, "newfile")
+
+ def _do_vctest_export(self, res):
+ d = self.doBuild() # export rebuild clobbers new files
+ d.addCallback(self._do_vctest_export_1)
+ return d
+ def _do_vctest_export_1(self, bs):
+ self.shouldNotExist(self.workdir, self.metadir)
+ self.shouldNotExist(self.workdir, "newfile")
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ #self.checkGotRevisionIsLatest(bs)
+ # VC 'export' is not required to have a got_revision
+ self.touch(self.workdir, "newfile")
+
+ d = self.doBuild() # export rebuild clobbers new files
+ d.addCallback(self._do_vctest_export_2)
+ return d
+ def _do_vctest_export_2(self, bs):
+ self.shouldNotExist(self.workdir, self.metadir)
+ self.shouldNotExist(self.workdir, "newfile")
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ #self.checkGotRevisionIsLatest(bs)
+ # VC 'export' is not required to have a got_revision
+
+ def do_patch(self):
+ vctype = self.vctype
+ args = self.helper.vcargs
+ m = self.master
+ self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+ self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+ s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+ for k,v in args.items():
+ s += ", %s=%s" % (k, repr(v))
+ s += ")"
+ self.config = config_vc % s
+
+ m.loadConfig(self.config % "clobber")
+ m.readConfig = True
+ m.startService()
+
+ ss = SourceStamp(revision=self.helper.trunk[-1], patch=(0, p0_diff))
+
+ d = self.connectSlave()
+ d.addCallback(lambda res: self.doBuild(ss=ss))
+ d.addCallback(self._doPatch_1)
+ return d
+ def _doPatch_1(self, bs):
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % self.helper.version)
+ # make sure the file actually got patched
+ subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+ "subdir", "subdir.c")
+ data = open(subdir_c, "r").read()
+ self.failUnlessIn("Hello patched subdir.\\n", data)
+ self.failUnlessEqual(bs.getProperty("revision"),
+ self.helper.trunk[-1] or None)
+ self.checkGotRevision(bs, self.helper.trunk[-1])
+
+ # make sure that a rebuild does not use the leftover patched workdir
+ d = self.master.loadConfig(self.config % "update")
+ d.addCallback(lambda res: self.doBuild(ss=None))
+ d.addCallback(self._doPatch_2)
+ return d
+ def _doPatch_2(self, bs):
+ # make sure the file is back to its original
+ subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+ "subdir", "subdir.c")
+ data = open(subdir_c, "r").read()
+ self.failUnlessIn("Hello subdir.\\n", data)
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.checkGotRevisionIsLatest(bs)
+
+ # now make sure we can patch an older revision. We need at least two
+ # revisions here, so we might have to create one first
+ if len(self.helper.trunk) < 2:
+ d = self.helper.vc_revise()
+ d.addCallback(self._doPatch_3)
+ return d
+ return self._doPatch_3()
+
+ def _doPatch_3(self, res=None):
+ ss = SourceStamp(revision=self.helper.trunk[-2], patch=(0, p0_diff))
+ d = self.doBuild(ss=ss)
+ d.addCallback(self._doPatch_4)
+ return d
+ def _doPatch_4(self, bs):
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % (self.helper.version-1))
+ # and make sure the file actually got patched
+ subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+ "subdir", "subdir.c")
+ data = open(subdir_c, "r").read()
+ self.failUnlessIn("Hello patched subdir.\\n", data)
+ self.failUnlessEqual(bs.getProperty("revision"),
+ self.helper.trunk[-2] or None)
+ self.checkGotRevision(bs, self.helper.trunk[-2])
+
+ # now check that we can patch a branch
+ ss = SourceStamp(branch=self.helper.branchname,
+ revision=self.helper.branch[-1],
+ patch=(0, p0_diff))
+ d = self.doBuild(ss=ss)
+ d.addCallback(self._doPatch_5)
+ return d
+ def _doPatch_5(self, bs):
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % 1)
+ self.shouldContain(self.workdir, "main.c", "Hello branch.")
+ subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+ "subdir", "subdir.c")
+ data = open(subdir_c, "r").read()
+ self.failUnlessIn("Hello patched subdir.\\n", data)
+ self.failUnlessEqual(bs.getProperty("revision"),
+ self.helper.branch[-1] or None)
+ self.failUnlessEqual(bs.getProperty("branch"), self.helper.branchname or None)
+ self.checkGotRevision(bs, self.helper.branch[-1])
+
+
+ def do_vctest_once(self, shouldSucceed):
+ m = self.master
+ vctype = self.vctype
+ args = self.helper.vcargs
+ vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+ workdir = os.path.join(self.slavebase, "vc-dir", "build")
+ # woo double-substitution
+ s = "s(%s, timeout=200, workdir='build', mode='clobber'" % (vctype,)
+ for k,v in args.items():
+ s += ", %s=%s" % (k, repr(v))
+ s += ")"
+ config = config_vc % s
+
+ m.loadConfig(config)
+ m.readConfig = True
+ m.startService()
+
+ self.connectSlave()
+ d = self.doBuild(shouldSucceed) # initial checkout
+ return d
+
+ def do_branch(self):
+ log.msg("do_branch")
+ vctype = self.vctype
+ args = self.helper.vcargs
+ m = self.master
+ self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+ self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+ s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+ for k,v in args.items():
+ s += ", %s=%s" % (k, repr(v))
+ s += ")"
+ self.config = config_vc % s
+
+ m.loadConfig(self.config % "update")
+ m.readConfig = True
+ m.startService()
+
+ # first we do a build of the trunk
+ d = self.connectSlave()
+ d.addCallback(lambda res: self.doBuild(ss=SourceStamp()))
+ d.addCallback(self._doBranch_1)
+ return d
+ def _doBranch_1(self, bs):
+ log.msg("_doBranch_1")
+ # make sure the checkout was of the trunk
+ main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+ data = open(main_c, "r").read()
+ self.failUnlessIn("Hello world.", data)
+
+ # now do a checkout on the branch. The change in branch name should
+ # trigger a clobber.
+ self.touch(self.workdir, "newfile")
+ d = self.doBuild(ss=SourceStamp(branch=self.helper.branchname))
+ d.addCallback(self._doBranch_2)
+ return d
+ def _doBranch_2(self, bs):
+ log.msg("_doBranch_2")
+ # make sure it was on the branch
+ main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+ data = open(main_c, "r").read()
+ self.failUnlessIn("Hello branch.", data)
+ # and make sure the tree was clobbered
+ self.shouldNotExist(self.workdir, "newfile")
+
+ # doing another build on the same branch should not clobber the tree
+ self.touch(self.workdir, "newbranchfile")
+ d = self.doBuild(ss=SourceStamp(branch=self.helper.branchname))
+ d.addCallback(self._doBranch_3)
+ return d
+ def _doBranch_3(self, bs):
+ log.msg("_doBranch_3")
+ # make sure it is still on the branch
+ main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+ data = open(main_c, "r").read()
+ self.failUnlessIn("Hello branch.", data)
+ # and make sure the tree was not clobbered
+ self.shouldExist(self.workdir, "newbranchfile")
+
+ # now make sure that a non-branch checkout clobbers the tree
+ d = self.doBuild(ss=SourceStamp())
+ d.addCallback(self._doBranch_4)
+ return d
+ def _doBranch_4(self, bs):
+ log.msg("_doBranch_4")
+ # make sure it was on the trunk
+ main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+ data = open(main_c, "r").read()
+ self.failUnlessIn("Hello world.", data)
+ self.shouldNotExist(self.workdir, "newbranchfile")
+
+ def do_getpatch(self, doBranch=True):
+ log.msg("do_getpatch")
+ # prepare a buildslave to do checkouts
+ vctype = self.vctype
+ args = self.helper.vcargs
+ m = self.master
+ self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+ self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+ # woo double-substitution
+ s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+ for k,v in args.items():
+ s += ", %s=%s" % (k, repr(v))
+ s += ")"
+ config = config_vc % s
+
+ m.loadConfig(config % 'clobber')
+ m.readConfig = True
+ m.startService()
+
+ d = self.connectSlave()
+
+ # then set up the "developer's tree". first we modify a tree from the
+ # head of the trunk
+ tmpdir = "try_workdir"
+ self.trydir = os.path.join(self.helper.repbase, tmpdir)
+ rmdirRecursive(self.trydir)
+ d.addCallback(self.do_getpatch_trunkhead)
+ d.addCallback(self.do_getpatch_trunkold)
+ if doBranch:
+ d.addCallback(self.do_getpatch_branch)
+ d.addCallback(self.do_getpatch_finish)
+ return d
+
+ def do_getpatch_finish(self, res):
+ log.msg("do_getpatch_finish")
+ self.helper.vc_try_finish(self.trydir)
+ return res
+
+ def try_shouldMatch(self, filename):
+ devfilename = os.path.join(self.trydir, filename)
+ devfile = open(devfilename, "r").read()
+ slavefilename = os.path.join(self.workdir, filename)
+ slavefile = open(slavefilename, "r").read()
+ self.failUnlessEqual(devfile, slavefile,
+ ("slavefile (%s) contains '%s'. "
+ "developer's file (%s) contains '%s'. "
+ "These ought to match") %
+ (slavefilename, slavefile,
+ devfilename, devfile))
+
+ def do_getpatch_trunkhead(self, res):
+ log.msg("do_getpatch_trunkhead")
+ d = self.helper.vc_try_checkout(self.trydir, self.helper.trunk[-1])
+ d.addCallback(self._do_getpatch_trunkhead_1)
+ return d
+ def _do_getpatch_trunkhead_1(self, res):
+ log.msg("_do_getpatch_trunkhead_1")
+ d = tryclient.getSourceStamp(self.vctype_try, self.trydir, None)
+ d.addCallback(self._do_getpatch_trunkhead_2)
+ return d
+ def _do_getpatch_trunkhead_2(self, ss):
+ log.msg("_do_getpatch_trunkhead_2")
+ d = self.doBuild(ss=ss)
+ d.addCallback(self._do_getpatch_trunkhead_3)
+ return d
+ def _do_getpatch_trunkhead_3(self, res):
+ log.msg("_do_getpatch_trunkhead_3")
+ # verify that the resulting buildslave tree matches the developer's
+ self.try_shouldMatch("main.c")
+ self.try_shouldMatch("version.c")
+ self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
+
+ def do_getpatch_trunkold(self, res):
+ log.msg("do_getpatch_trunkold")
+ # now try a tree from an older revision. We need at least two
+ # revisions here, so we might have to create one first
+ if len(self.helper.trunk) < 2:
+ d = self.helper.vc_revise()
+ d.addCallback(self._do_getpatch_trunkold_1)
+ return d
+ return self._do_getpatch_trunkold_1()
+ def _do_getpatch_trunkold_1(self, res=None):
+ log.msg("_do_getpatch_trunkold_1")
+ d = self.helper.vc_try_checkout(self.trydir, self.helper.trunk[-2])
+ d.addCallback(self._do_getpatch_trunkold_2)
+ return d
+ def _do_getpatch_trunkold_2(self, res):
+ log.msg("_do_getpatch_trunkold_2")
+ d = tryclient.getSourceStamp(self.vctype_try, self.trydir, None)
+ d.addCallback(self._do_getpatch_trunkold_3)
+ return d
+ def _do_getpatch_trunkold_3(self, ss):
+ log.msg("_do_getpatch_trunkold_3")
+ d = self.doBuild(ss=ss)
+ d.addCallback(self._do_getpatch_trunkold_4)
+ return d
+ def _do_getpatch_trunkold_4(self, res):
+ log.msg("_do_getpatch_trunkold_4")
+ # verify that the resulting buildslave tree matches the developer's
+ self.try_shouldMatch("main.c")
+ self.try_shouldMatch("version.c")
+ self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
+
+ def do_getpatch_branch(self, res):
+ log.msg("do_getpatch_branch")
+ # now try a tree from a branch
+ d = self.helper.vc_try_checkout(self.trydir, self.helper.branch[-1],
+ self.helper.branchname)
+ d.addCallback(self._do_getpatch_branch_1)
+ return d
+ def _do_getpatch_branch_1(self, res):
+ log.msg("_do_getpatch_branch_1")
+ d = tryclient.getSourceStamp(self.vctype_try, self.trydir,
+ self.helper.try_branchname)
+ d.addCallback(self._do_getpatch_branch_2)
+ return d
+ def _do_getpatch_branch_2(self, ss):
+ log.msg("_do_getpatch_branch_2")
+ d = self.doBuild(ss=ss)
+ d.addCallback(self._do_getpatch_branch_3)
+ return d
+ def _do_getpatch_branch_3(self, res):
+ log.msg("_do_getpatch_branch_3")
+ # verify that the resulting buildslave tree matches the developer's
+ self.try_shouldMatch("main.c")
+ self.try_shouldMatch("version.c")
+ self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
+
+
+ def dumpPatch(self, patch):
+ # this exists to help me figure out the right 'patchlevel' value
+ # should be returned by tryclient.getSourceStamp
+ n = self.mktemp()
+ open(n,"w").write(patch)
+ d = self.runCommand(".", ["lsdiff", n])
+ def p(res): print "lsdiff:", res.strip().split("\n")
+ d.addCallback(p)
+ return d
+
+
+ def tearDown(self):
+ d = defer.succeed(None)
+ if self.slave:
+ d2 = self.master.botmaster.waitUntilBuilderDetached("vc")
+ d.addCallback(lambda res: self.slave.stopService())
+ d.addCallback(lambda res: d2)
+ if self.master:
+ d.addCallback(lambda res: self.master.stopService())
+ if self.httpServer:
+ d.addCallback(lambda res: self.httpServer.stopListening())
+ def stopHTTPTimer():
+ from twisted.web import http
+ http._logDateTimeStop() # shut down the internal timer. DUMB!
+ d.addCallback(lambda res: stopHTTPTimer())
+ d.addCallback(lambda res: self.tearDown2())
+ return d
+
+ def tearDown2(self):
+ pass
+
+class CVSHelper(BaseHelper):
+ branchname = "branch"
+ try_branchname = "branch"
+
+ def capable(self):
+ cvspaths = which('cvs')
+ if not cvspaths:
+ return (False, "CVS is not installed")
+ # cvs-1.10 (as shipped with OS-X 10.3 "Panther") is too old for this
+ # test. There is a situation where we check out a tree, make a
+ # change, then commit it back, and CVS refuses to believe that we're
+ # operating in a CVS tree. I tested cvs-1.12.9 and it works ok, OS-X
+ # 10.4 "Tiger" comes with cvs-1.11, but I haven't tested that yet.
+ # For now, skip the tests if we've got 1.10 .
+ log.msg("running %s --version.." % (cvspaths[0],))
+ d = utils.getProcessOutput(cvspaths[0], ["--version"],
+ env=os.environ)
+ d.addCallback(self._capable, cvspaths[0])
+ return d
+
+ def _capable(self, v, vcexe):
+ m = re.search(r'\(CVS\) ([\d\.]+) ', v)
+ if not m:
+ log.msg("couldn't identify CVS version number in output:")
+ log.msg("'''%s'''" % v)
+ log.msg("skipping tests")
+ return (False, "Found CVS but couldn't identify its version")
+ ver = m.group(1)
+ log.msg("found CVS version '%s'" % ver)
+ if ver == "1.10":
+ return (False, "Found CVS, but it is too old")
+ self.vcexe = vcexe
+ return (True, None)
+
+ def getdate(self):
+ # this timestamp is eventually passed to CVS in a -D argument, and
+ # strftime's %z specifier doesn't seem to work reliably (I get +0000
+ # where I should get +0700 under linux sometimes, and windows seems
+ # to want to put a verbose 'Eastern Standard Time' in there), so
+ # leave off the timezone specifier and treat this as localtime. A
+ # valid alternative would be to use a hard-coded +0000 and
+ # time.gmtime().
+ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
+
+ def createRepository(self):
+ self.createBasedir()
+ self.cvsrep = cvsrep = os.path.join(self.repbase, "CVS-Repository")
+ tmp = os.path.join(self.repbase, "cvstmp")
+
+ w = self.dovc(self.repbase, ['-d', cvsrep, 'init'])
+ yield w; w.getResult() # we must getResult() to raise any exceptions
+
+ self.populate(tmp)
+ cmd = ['-d', self.cvsrep, 'import',
+ '-m', 'sample_project_files', 'sample', 'vendortag', 'start']
+ w = self.dovc(tmp, cmd)
+ yield w; w.getResult()
+ rmdirRecursive(tmp)
+ # take a timestamp as the first revision number
+ time.sleep(2)
+ self.addTrunkRev(self.getdate())
+ time.sleep(2)
+
+ w = self.dovc(self.repbase,
+ ['-d', self.cvsrep, 'checkout', '-d', 'cvstmp', 'sample'])
+ yield w; w.getResult()
+
+ w = self.dovc(tmp, ['tag', '-b', self.branchname])
+ yield w; w.getResult()
+ self.populate_branch(tmp)
+ w = self.dovc(tmp,
+ ['commit', '-m', 'commit_on_branch', '-r', self.branchname])
+ yield w; w.getResult()
+ rmdirRecursive(tmp)
+ time.sleep(2)
+ self.addBranchRev(self.getdate())
+ time.sleep(2)
+ self.vcargs = { 'cvsroot': self.cvsrep, 'cvsmodule': "sample" }
+ createRepository = deferredGenerator(createRepository)
+
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "cvstmp")
+
+ w = self.dovc(self.repbase,
+ ['-d', self.cvsrep, 'checkout', '-d', 'cvstmp', 'sample'])
+ yield w; w.getResult()
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+ w = self.dovc(tmp,
+ ['commit', '-m', 'revised_to_%d' % self.version, 'version.c'])
+ yield w; w.getResult()
+ rmdirRecursive(tmp)
+ time.sleep(2)
+ self.addTrunkRev(self.getdate())
+ time.sleep(2)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ # 'workdir' is an absolute path
+ assert os.path.abspath(workdir) == workdir
+ cmd = [self.vcexe, "-d", self.cvsrep, "checkout",
+ "-d", workdir,
+ "-D", rev]
+ if branch is not None:
+ cmd.append("-r")
+ cmd.append(branch)
+ cmd.append("sample")
+ w = self.do(self.repbase, cmd)
+ yield w; w.getResult()
+ open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+class CVS(VCBase, unittest.TestCase):
+ vc_name = "cvs"
+
+ metadir = "CVS"
+ vctype = "source.CVS"
+ vctype_try = "cvs"
+ # CVS gives us got_revision, but it is based entirely upon the local
+ # clock, which means it is unlikely to match the timestamp taken earlier.
+ # This might be enough for common use, but won't be good enough for our
+ # tests to accept, so pretend it doesn't have got_revision at all.
+ has_got_revision = False
+
+ def testCheckout(self):
+ d = self.do_vctest()
+ return d
+
+ def testPatch(self):
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ d = self.do_branch()
+ return d
+
+ def testTry(self):
+ d = self.do_getpatch(doBranch=False)
+ return d
+
+VCS.registerVC(CVS.vc_name, CVSHelper())
+
+
+class SVNHelper(BaseHelper):
+ branchname = "sample/branch"
+ try_branchname = "sample/branch"
+
+ def capable(self):
+ svnpaths = which('svn')
+ svnadminpaths = which('svnadmin')
+ if not svnpaths:
+ return (False, "SVN is not installed")
+ if not svnadminpaths:
+ return (False, "svnadmin is not installed")
+ # we need svn to be compiled with the ra_local access
+ # module
+ log.msg("running svn --version..")
+ env = os.environ.copy()
+ env['LC_ALL'] = "C"
+ d = utils.getProcessOutput(svnpaths[0], ["--version"],
+ env=env)
+ d.addCallback(self._capable, svnpaths[0], svnadminpaths[0])
+ return d
+
+ def _capable(self, v, vcexe, svnadmin):
+ if v.find("handles 'file' schem") != -1:
+ # older versions say 'schema', 1.2.0 and beyond say 'scheme'
+ self.vcexe = vcexe
+ self.svnadmin = svnadmin
+ return (True, None)
+ excuse = ("%s found but it does not support 'file:' " +
+ "schema, skipping svn tests") % vcexe
+ log.msg(excuse)
+ return (False, excuse)
+
+ def createRepository(self):
+ self.createBasedir()
+ self.svnrep = os.path.join(self.repbase,
+ "SVN-Repository").replace('\\','/')
+ tmp = os.path.join(self.repbase, "svntmp")
+ if sys.platform == 'win32':
+ # On Windows Paths do not start with a /
+ self.svnurl = "file:///%s" % self.svnrep
+ else:
+ self.svnurl = "file://%s" % self.svnrep
+ self.svnurl_trunk = self.svnurl + "/sample/trunk"
+ self.svnurl_branch = self.svnurl + "/sample/branch"
+
+ w = self.do(self.repbase, [self.svnadmin, "create", self.svnrep])
+ yield w; w.getResult()
+
+ self.populate(tmp)
+ w = self.dovc(tmp,
+ ['import', '-m', 'sample_project_files', self.svnurl_trunk])
+ yield w; out = w.getResult()
+ rmdirRecursive(tmp)
+ m = re.search(r'Committed revision (\d+)\.', out)
+ assert m.group(1) == "1" # first revision is always "1"
+ self.addTrunkRev(int(m.group(1)))
+
+ w = self.dovc(self.repbase,
+ ['checkout', self.svnurl_trunk, 'svntmp'])
+ yield w; w.getResult()
+
+ w = self.dovc(tmp, ['cp', '-m' , 'make_branch', self.svnurl_trunk,
+ self.svnurl_branch])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['switch', self.svnurl_branch])
+ yield w; w.getResult()
+ self.populate_branch(tmp)
+ w = self.dovc(tmp, ['commit', '-m', 'commit_on_branch'])
+ yield w; out = w.getResult()
+ rmdirRecursive(tmp)
+ m = re.search(r'Committed revision (\d+)\.', out)
+ self.addBranchRev(int(m.group(1)))
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "svntmp")
+ rmdirRecursive(tmp)
+ log.msg("vc_revise" + self.svnurl_trunk)
+ w = self.dovc(self.repbase,
+ ['checkout', self.svnurl_trunk, 'svntmp'])
+ yield w; w.getResult()
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+ w = self.dovc(tmp, ['commit', '-m', 'revised_to_%d' % self.version])
+ yield w; out = w.getResult()
+ m = re.search(r'Committed revision (\d+)\.', out)
+ self.addTrunkRev(int(m.group(1)))
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+ if not branch:
+ svnurl = self.svnurl_trunk
+ else:
+ # N.B.: this is *not* os.path.join: SVN URLs use slashes
+ # regardless of the host operating system's filepath separator
+ svnurl = self.svnurl + "/" + branch
+ w = self.dovc(self.repbase,
+ ['checkout', svnurl, workdir])
+ yield w; w.getResult()
+ open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+
+class SVN(VCBase, unittest.TestCase):
+ vc_name = "svn"
+
+ metadir = ".svn"
+ vctype = "source.SVN"
+ vctype_try = "svn"
+ has_got_revision = True
+ has_got_revision_branches_are_merged = True
+
+ def testCheckout(self):
+ # we verify this one with the svnurl style of vcargs. We test the
+ # baseURL/defaultBranch style in testPatch and testCheckoutBranch.
+ self.helper.vcargs = { 'svnurl': self.helper.svnurl_trunk }
+ d = self.do_vctest()
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
+ 'defaultBranch': "sample/trunk",
+ }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
+ 'defaultBranch': "sample/trunk",
+ }
+ d = self.do_branch()
+ return d
+
+ def testTry(self):
+ # extract the base revision and patch from a modified tree, use it to
+ # create the same contents on the buildslave
+ self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
+ 'defaultBranch': "sample/trunk",
+ }
+ d = self.do_getpatch()
+ return d
+
+ ## can't test the username= and password= options, because we do not have an
+ ## svn repository that requires authentication.
+
+VCS.registerVC(SVN.vc_name, SVNHelper())
+
+
+class P4Helper(BaseHelper):
+ branchname = "branch"
+ p4port = 'localhost:1666'
+ pid = None
+ base_descr = 'Change: new\nDescription: asdf\nFiles:\n'
+
+ def capable(self):
+ p4paths = which('p4')
+ p4dpaths = which('p4d')
+ if not p4paths:
+ return (False, "p4 is not installed")
+ if not p4dpaths:
+ return (False, "p4d is not installed")
+ self.vcexe = p4paths[0]
+ self.p4dexe = p4dpaths[0]
+ return (True, None)
+
+ class _P4DProtocol(protocol.ProcessProtocol):
+ def __init__(self):
+ self.started = defer.Deferred()
+ self.ended = defer.Deferred()
+
+ def outReceived(self, data):
+ # When it says starting, it has bound to the socket.
+ if self.started:
+ #
+ # Make sure p4d has started. Newer versions of p4d
+ # have more verbose messaging when db files don't exist, so
+ # we use re.search instead of startswith.
+ #
+ if re.search('Perforce Server starting...', data):
+ self.started.callback(None)
+ else:
+ print "p4d said %r" % data
+ try:
+ raise Exception('p4d said %r' % data)
+ except:
+ self.started.errback(failure.Failure())
+ self.started = None
+
+ def errReceived(self, data):
+ print "p4d stderr: %s" % data
+
+ def processEnded(self, status_object):
+ if status_object.check(error.ProcessDone):
+ self.ended.callback(None)
+ else:
+ self.ended.errback(status_object)
+
+ def _start_p4d(self):
+ proto = self._P4DProtocol()
+ reactor.spawnProcess(proto, self.p4dexe, ['p4d', '-p', self.p4port],
+ env=os.environ, path=self.p4rep)
+ return proto.started, proto.ended
+
+ def dop4(self, basedir, command, failureIsOk=False, stdin=None):
+ # p4 looks at $PWD instead of getcwd(), which causes confusion when
+ # we spawn commands without an intervening shell (sh -c). We can
+ # override this with a -d argument.
+ command = "-p %s -d %s %s" % (self.p4port, basedir, command)
+ return self.dovc(basedir, command, failureIsOk, stdin)
+
+ def createRepository(self):
+ # this is only called once per VC system, so start p4d here.
+
+ self.createBasedir()
+ tmp = os.path.join(self.repbase, "p4tmp")
+ self.p4rep = os.path.join(self.repbase, 'P4-Repository')
+ os.mkdir(self.p4rep)
+
+ # Launch p4d.
+ started, self.p4d_shutdown = self._start_p4d()
+ w = waitForDeferred(started)
+ yield w; w.getResult()
+
+ # Create client spec.
+ os.mkdir(tmp)
+ clispec = 'Client: creator\n'
+ clispec += 'Root: %s\n' % tmp
+ clispec += 'View:\n'
+ clispec += '\t//depot/... //creator/...\n'
+ w = self.dop4(tmp, 'client -i', stdin=clispec)
+ yield w; w.getResult()
+
+ # Create first rev (trunk).
+ self.populate(os.path.join(tmp, 'trunk'))
+ files = ['main.c', 'version.c', 'subdir/subdir.c']
+ w = self.dop4(tmp, "-c creator add "
+ + " ".join(['trunk/%s' % f for f in files]))
+ yield w; w.getResult()
+ descr = self.base_descr
+ for file in files:
+ descr += '\t//depot/trunk/%s\n' % file
+ w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
+ yield w; out = w.getResult()
+ m = re.search(r'Change (\d+) submitted.', out)
+ assert m.group(1) == '1'
+ self.addTrunkRev(m.group(1))
+
+ # Create second rev (branch).
+ w = self.dop4(tmp, '-c creator integrate '
+ + '//depot/trunk/... //depot/branch/...')
+ yield w; w.getResult()
+ w = self.dop4(tmp, "-c creator edit branch/main.c")
+ yield w; w.getResult()
+ self.populate_branch(os.path.join(tmp, 'branch'))
+ descr = self.base_descr
+ for file in files:
+ descr += '\t//depot/branch/%s\n' % file
+ w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
+ yield w; out = w.getResult()
+ m = re.search(r'Change (\d+) submitted.', out)
+ self.addBranchRev(m.group(1))
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "p4tmp")
+ self.version += 1
+ version_c = VERSION_C % self.version
+ w = self.dop4(tmp, '-c creator edit trunk/version.c')
+ yield w; w.getResult()
+ open(os.path.join(tmp, "trunk/version.c"), "w").write(version_c)
+ descr = self.base_descr + '\t//depot/trunk/version.c\n'
+ w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
+ yield w; out = w.getResult()
+ m = re.search(r'Change (\d+) submitted.', out)
+ self.addTrunkRev(m.group(1))
+ vc_revise = deferredGenerator(vc_revise)
+
+ def shutdown_p4d(self):
+ d = self.runCommand(self.repbase, '%s -p %s admin stop'
+ % (self.vcexe, self.p4port))
+ return d.addCallback(lambda _: self.p4d_shutdown)
+
+class P4(VCBase, unittest.TestCase):
+ metadir = None
+ vctype = "source.P4"
+ vc_name = "p4"
+ has_got_revision = True
+
+ def tearDownClass(self):
+ if self.helper:
+ return self.helper.shutdown_p4d()
+
+ def testCheckout(self):
+ self.helper.vcargs = { 'p4port': self.helper.p4port,
+ 'p4base': '//depot/',
+ 'defaultBranch': 'trunk' }
+ d = self.do_vctest(testRetry=False)
+ # TODO: like arch and darcs, sync does nothing when server is not
+ # changed.
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'p4port': self.helper.p4port,
+ 'p4base': '//depot/',
+ 'defaultBranch': 'trunk' }
+ d = self.do_branch()
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'p4port': self.helper.p4port,
+ 'p4base': '//depot/',
+ 'defaultBranch': 'trunk' }
+ d = self.do_patch()
+ return d
+
+VCS.registerVC(P4.vc_name, P4Helper())
+
+
+class DarcsHelper(BaseHelper):
+ branchname = "branch"
+ try_branchname = "branch"
+
+ def capable(self):
+ darcspaths = which('darcs')
+ if not darcspaths:
+ return (False, "Darcs is not installed")
+ self.vcexe = darcspaths[0]
+ return (True, None)
+
+ def createRepository(self):
+ self.createBasedir()
+ self.darcs_base = os.path.join(self.repbase, "Darcs-Repository")
+ self.rep_trunk = os.path.join(self.darcs_base, "trunk")
+ self.rep_branch = os.path.join(self.darcs_base, "branch")
+ tmp = os.path.join(self.repbase, "darcstmp")
+
+ os.makedirs(self.rep_trunk)
+ w = self.dovc(self.rep_trunk, ["initialize"])
+ yield w; w.getResult()
+ os.makedirs(self.rep_branch)
+ w = self.dovc(self.rep_branch, ["initialize"])
+ yield w; w.getResult()
+
+ self.populate(tmp)
+ w = self.dovc(tmp, qw("initialize"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("add -r ."))
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("record -a -m initial_import --skip-long-comment -A test@buildbot.sf.net"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["push", "-a", self.rep_trunk])
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("changes --context"))
+ yield w; out = w.getResult()
+ self.addTrunkRev(out)
+
+ self.populate_branch(tmp)
+ w = self.dovc(tmp, qw("record -a --ignore-times -m commit_on_branch --skip-long-comment -A test@buildbot.sf.net"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["push", "-a", self.rep_branch])
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("changes --context"))
+ yield w; out = w.getResult()
+ self.addBranchRev(out)
+ rmdirRecursive(tmp)
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "darcstmp")
+ os.makedirs(tmp)
+ w = self.dovc(tmp, qw("initialize"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["pull", "-a", self.rep_trunk])
+ yield w; w.getResult()
+
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+ w = self.dovc(tmp, qw("record -a --ignore-times -m revised_to_%d --skip-long-comment -A test@buildbot.sf.net" % self.version))
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["push", "-a", self.rep_trunk])
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("changes --context"))
+ yield w; out = w.getResult()
+ self.addTrunkRev(out)
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+ os.makedirs(workdir)
+ w = self.dovc(workdir, qw("initialize"))
+ yield w; w.getResult()
+ if not branch:
+ rep = self.rep_trunk
+ else:
+ rep = os.path.join(self.darcs_base, branch)
+ w = self.dovc(workdir, ["pull", "-a", rep])
+ yield w; w.getResult()
+ open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+
+class Darcs(VCBase, unittest.TestCase):
+ vc_name = "darcs"
+
+ # Darcs has a metadir="_darcs", but it does not have an 'export'
+ # mode
+ metadir = None
+ vctype = "source.Darcs"
+ vctype_try = "darcs"
+ has_got_revision = True
+
+ def testCheckout(self):
+ self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
+ d = self.do_vctest(testRetry=False)
+
+ # TODO: testRetry has the same problem with Darcs as it does for
+ # Arch
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_branch()
+ return d
+
+ def testCheckoutHTTP(self):
+ self.serveHTTP()
+ repourl = "http://localhost:%d/Darcs-Repository/trunk" % self.httpPort
+ self.helper.vcargs = { 'repourl': repourl }
+ d = self.do_vctest(testRetry=False)
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(Darcs.vc_name, DarcsHelper())
+
+
+class ArchCommon:
+ def registerRepository(self, coordinates):
+ a = self.archname
+ w = self.dovc(self.repbase, "archives %s" % a)
+ yield w; out = w.getResult()
+ if out:
+ w = self.dovc(self.repbase, "register-archive -d %s" % a)
+ yield w; w.getResult()
+ w = self.dovc(self.repbase, "register-archive %s" % coordinates)
+ yield w; w.getResult()
+ registerRepository = deferredGenerator(registerRepository)
+
+ def unregisterRepository(self):
+ a = self.archname
+ w = self.dovc(self.repbase, "archives %s" % a)
+ yield w; out = w.getResult()
+ if out:
+ w = self.dovc(self.repbase, "register-archive -d %s" % a)
+ yield w; out = w.getResult()
+ unregisterRepository = deferredGenerator(unregisterRepository)
+
+class TlaHelper(BaseHelper, ArchCommon):
+ defaultbranch = "testvc--mainline--1"
+ branchname = "testvc--branch--1"
+ try_branchname = None # TlaExtractor can figure it out by itself
+ archcmd = "tla"
+
+ def capable(self):
+ tlapaths = which('tla')
+ if not tlapaths:
+ return (False, "Arch (tla) is not installed")
+ self.vcexe = tlapaths[0]
+ return (True, None)
+
+ def do_get(self, basedir, archive, branch, newdir):
+ # the 'get' syntax is different between tla and baz. baz, while
+ # claiming to honor an --archive argument, in fact ignores it. The
+ # correct invocation is 'baz get archive/revision newdir'.
+ if self.archcmd == "tla":
+ w = self.dovc(basedir,
+ "get -A %s %s %s" % (archive, branch, newdir))
+ else:
+ w = self.dovc(basedir,
+ "get %s/%s %s" % (archive, branch, newdir))
+ return w
+
+ def createRepository(self):
+ self.createBasedir()
+ # first check to see if bazaar is around, since we'll need to know
+ # later
+ d = VCS.capable(Bazaar.vc_name)
+ d.addCallback(self._createRepository_1)
+ return d
+
+ def _createRepository_1(self, res):
+ has_baz = res[0]
+
+ # pick a hopefully unique string for the archive name, in the form
+ # test-%d@buildbot.sf.net--testvc, since otherwise multiple copies of
+ # the unit tests run in the same user account will collide (since the
+ # archive names are kept in the per-user ~/.arch-params/ directory).
+ pid = os.getpid()
+ self.archname = "test-%s-%d@buildbot.sf.net--testvc" % (self.archcmd,
+ pid)
+ trunk = self.defaultbranch
+ branch = self.branchname
+
+ repword = self.archcmd.capitalize()
+ self.archrep = os.path.join(self.repbase, "%s-Repository" % repword)
+ tmp = os.path.join(self.repbase, "archtmp")
+ a = self.archname
+
+ self.populate(tmp)
+
+ w = self.dovc(tmp, "my-id", failureIsOk=True)
+ yield w; res = w.getResult()
+ if not res:
+ # tla will fail a lot of operations if you have not set an ID
+ w = self.do(tmp, [self.vcexe, "my-id",
+ "Buildbot Test Suite <test@buildbot.sf.net>"])
+ yield w; w.getResult()
+
+ if has_baz:
+ # bazaar keeps a cache of revisions, but this test creates a new
+ # archive each time it is run, so the cache causes errors.
+ # Disable the cache to avoid these problems. This will be
+ # slightly annoying for people who run the buildbot tests under
+ # the same UID as one which uses baz on a regular basis, but
+ # bazaar doesn't give us a way to disable the cache just for this
+ # one archive.
+ cmd = "%s cache-config --disable" % VCS.getHelper('bazaar').vcexe
+ w = self.do(tmp, cmd)
+ yield w; w.getResult()
+
+ w = waitForDeferred(self.unregisterRepository())
+ yield w; w.getResult()
+
+ # these commands can be run in any directory
+ w = self.dovc(tmp, "make-archive -l %s %s" % (a, self.archrep))
+ yield w; w.getResult()
+ if self.archcmd == "tla":
+ w = self.dovc(tmp, "archive-setup -A %s %s" % (a, trunk))
+ yield w; w.getResult()
+ w = self.dovc(tmp, "archive-setup -A %s %s" % (a, branch))
+ yield w; w.getResult()
+ else:
+ # baz does not require an 'archive-setup' step
+ pass
+
+ # these commands must be run in the directory that is to be imported
+ w = self.dovc(tmp, "init-tree --nested %s/%s" % (a, trunk))
+ yield w; w.getResult()
+ files = " ".join(["main.c", "version.c", "subdir",
+ os.path.join("subdir", "subdir.c")])
+ w = self.dovc(tmp, "add-id %s" % files)
+ yield w; w.getResult()
+
+ w = self.dovc(tmp, "import %s/%s" % (a, trunk))
+ yield w; out = w.getResult()
+ self.addTrunkRev("base-0")
+
+ # create the branch
+ if self.archcmd == "tla":
+ branchstart = "%s--base-0" % trunk
+ w = self.dovc(tmp, "tag -A %s %s %s" % (a, branchstart, branch))
+ yield w; w.getResult()
+ else:
+ w = self.dovc(tmp, "branch %s" % branch)
+ yield w; w.getResult()
+
+ rmdirRecursive(tmp)
+
+ # check out the branch
+ w = self.do_get(self.repbase, a, branch, "archtmp")
+ yield w; w.getResult()
+ # and edit the file
+ self.populate_branch(tmp)
+ logfile = "++log.%s--%s" % (branch, a)
+ logmsg = "Summary: commit on branch\nKeywords:\n\n"
+ open(os.path.join(tmp, logfile), "w").write(logmsg)
+ w = self.dovc(tmp, "commit")
+ yield w; out = w.getResult()
+ m = re.search(r'committed %s/%s--([\S]+)' % (a, branch),
+ out)
+ assert (m.group(1) == "base-0" or m.group(1).startswith("patch-"))
+ self.addBranchRev(m.group(1))
+
+ w = waitForDeferred(self.unregisterRepository())
+ yield w; w.getResult()
+ rmdirRecursive(tmp)
+
+ # we unregister the repository each time, because we might have
+ # changed the coordinates (since we switch from a file: URL to an
+ # http: URL for various tests). The buildslave code doesn't forcibly
+ # unregister the archive, so we have to do it here.
+ w = waitForDeferred(self.unregisterRepository())
+ yield w; w.getResult()
+
+ _createRepository_1 = deferredGenerator(_createRepository_1)
+
+ def vc_revise(self):
+ # the fix needs to be done in a workspace that is linked to a
+ # read-write version of the archive (i.e., using file-based
+ # coordinates instead of HTTP ones), so we re-register the repository
+ # before we begin. We unregister it when we're done to make sure the
+ # build will re-register the correct one for whichever test is
+ # currently being run.
+
+ # except, that source.Bazaar really doesn't like it when the archive
+ # gets unregistered behind its back. The slave tries to do a 'baz
+ # replay' in a tree with an archive that is no longer recognized, and
+ # baz aborts with a botched invariant exception. This causes
+ # mode=update to fall back to clobber+get, which flunks one of the
+ # tests (the 'newfile' check in _do_vctest_update_3 fails)
+
+ # to avoid this, we take heroic steps here to leave the archive
+ # registration in the same state as we found it.
+
+ tmp = os.path.join(self.repbase, "archtmp")
+ a = self.archname
+
+ w = self.dovc(self.repbase, "archives %s" % a)
+ yield w; out = w.getResult()
+ assert out
+ lines = out.split("\n")
+ coordinates = lines[1].strip()
+
+ # now register the read-write location
+ w = waitForDeferred(self.registerRepository(self.archrep))
+ yield w; w.getResult()
+
+ trunk = self.defaultbranch
+
+ w = self.do_get(self.repbase, a, trunk, "archtmp")
+ yield w; w.getResult()
+
+ # tla appears to use timestamps to determine which files have
+ # changed, so wait long enough for the new file to have a different
+ # timestamp
+ time.sleep(2)
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+
+ logfile = "++log.%s--%s" % (trunk, a)
+ logmsg = "Summary: revised_to_%d\nKeywords:\n\n" % self.version
+ open(os.path.join(tmp, logfile), "w").write(logmsg)
+ w = self.dovc(tmp, "commit")
+ yield w; out = w.getResult()
+ m = re.search(r'committed %s/%s--([\S]+)' % (a, trunk),
+ out)
+ assert (m.group(1) == "base-0" or m.group(1).startswith("patch-"))
+ self.addTrunkRev(m.group(1))
+
+ # now re-register the original coordinates
+ w = waitForDeferred(self.registerRepository(coordinates))
+ yield w; w.getResult()
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+
+ a = self.archname
+
+ # register the read-write location, if it wasn't already registered
+ w = waitForDeferred(self.registerRepository(self.archrep))
+ yield w; w.getResult()
+
+ w = self.do_get(self.repbase, a, "testvc--mainline--1", workdir)
+ yield w; w.getResult()
+
+ # timestamps. ick.
+ time.sleep(2)
+ open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+class Arch(VCBase, unittest.TestCase):
+ vc_name = "tla"
+
+ metadir = None
+ # Arch has a metadir="{arch}", but it does not have an 'export' mode.
+ vctype = "source.Arch"
+ vctype_try = "tla"
+ has_got_revision = True
+
+ def testCheckout(self):
+ # these are the coordinates of the read-write archive used by all the
+ # non-HTTP tests. testCheckoutHTTP overrides these.
+ self.helper.vcargs = {'url': self.helper.archrep,
+ 'version': self.helper.defaultbranch }
+ d = self.do_vctest(testRetry=False)
+ # the current testRetry=True logic doesn't have the desired effect:
+ # "update" is a no-op because arch knows that the repository hasn't
+ # changed. Other VC systems will re-checkout missing files on
+ # update, arch just leaves the tree untouched. TODO: come up with
+ # some better test logic, probably involving a copy of the
+ # repository that has a few changes checked in.
+
+ return d
+
+ def testCheckoutHTTP(self):
+ self.serveHTTP()
+ url = "http://localhost:%d/Tla-Repository" % self.httpPort
+ self.helper.vcargs = { 'url': url,
+ 'version': "testvc--mainline--1" }
+ d = self.do_vctest(testRetry=False)
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ 'version': self.helper.defaultbranch }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ 'version': self.helper.defaultbranch }
+ d = self.do_branch()
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ 'version': self.helper.defaultbranch }
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(Arch.vc_name, TlaHelper())
+
+
+class BazaarHelper(TlaHelper):
+ archcmd = "baz"
+
+ def capable(self):
+ bazpaths = which('baz')
+ if not bazpaths:
+ return (False, "Arch (baz) is not installed")
+ self.vcexe = bazpaths[0]
+ return (True, None)
+
+ def setUp2(self, res):
+ # we unregister the repository each time, because we might have
+ # changed the coordinates (since we switch from a file: URL to an
+ # http: URL for various tests). The buildslave code doesn't forcibly
+ # unregister the archive, so we have to do it here.
+ d = self.unregisterRepository()
+ return d
+
+
+class Bazaar(Arch):
+ vc_name = "bazaar"
+
+ vctype = "source.Bazaar"
+ vctype_try = "baz"
+ has_got_revision = True
+
+ fixtimer = None
+
+ def testCheckout(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ # Baz adds the required 'archive' argument
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ }
+ d = self.do_vctest(testRetry=False)
+ # the current testRetry=True logic doesn't have the desired effect:
+ # "update" is a no-op because arch knows that the repository hasn't
+ # changed. Other VC systems will re-checkout missing files on
+ # update, arch just leaves the tree untouched. TODO: come up with
+ # some better test logic, probably involving a copy of the
+ # repository that has a few changes checked in.
+
+ return d
+
+ def testCheckoutHTTP(self):
+ self.serveHTTP()
+ url = "http://localhost:%d/Baz-Repository" % self.httpPort
+ self.helper.vcargs = { 'url': url,
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ }
+ d = self.do_vctest(testRetry=False)
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ # Baz adds the required 'archive' argument
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ # Baz adds the required 'archive' argument
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ }
+ d = self.do_branch()
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ # Baz adds the required 'archive' argument
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ }
+ d = self.do_getpatch()
+ return d
+
+ def fixRepository(self):
+ self.fixtimer = None
+ self.site.resource = self.root
+
+ def testRetry(self):
+ # we want to verify that source.Source(retry=) works, and the easiest
+ # way to make VC updates break (temporarily) is to break the HTTP
+ # server that's providing the repository. Anything else pretty much
+ # requires mutating the (read-only) BUILDBOT_TEST_VC repository, or
+ # modifying the buildslave's checkout command while it's running.
+
+ # this test takes a while to run, so don't bother doing it with
+ # anything other than baz
+
+ self.serveHTTP()
+
+ # break the repository server
+ from twisted.web import static
+ self.site.resource = static.Data("Sorry, repository is offline",
+ "text/plain")
+ # and arrange to fix it again in 5 seconds, while the test is
+ # running.
+ self.fixtimer = reactor.callLater(5, self.fixRepository)
+
+ url = "http://localhost:%d/Baz-Repository" % self.httpPort
+ self.helper.vcargs = { 'url': url,
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ 'retry': (5.0, 4),
+ }
+ d = self.do_vctest_once(True)
+ d.addCallback(self._testRetry_1)
+ return d
+ def _testRetry_1(self, bs):
+ # make sure there was mention of the retry attempt in the logs
+ l = bs.getLogs()[0]
+ self.failUnlessIn("unable to access URL", l.getText(),
+ "funny, VC operation didn't fail at least once")
+ self.failUnlessIn("update failed, trying 4 more times after 5 seconds",
+ l.getTextWithHeaders(),
+ "funny, VC operation wasn't reattempted")
+
+ def testRetryFails(self):
+ # make sure that the build eventually gives up on a repository which
+ # is completely unavailable
+
+ self.serveHTTP()
+
+ # break the repository server, and leave it broken
+ from twisted.web import static
+ self.site.resource = static.Data("Sorry, repository is offline",
+ "text/plain")
+
+ url = "http://localhost:%d/Baz-Repository" % self.httpPort
+ self.helper.vcargs = {'url': url,
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ 'retry': (0.5, 3),
+ }
+ d = self.do_vctest_once(False)
+ d.addCallback(self._testRetryFails_1)
+ return d
+ def _testRetryFails_1(self, bs):
+ self.failUnlessEqual(bs.getResults(), FAILURE)
+
+ def tearDown2(self):
+ if self.fixtimer:
+ self.fixtimer.cancel()
+ # tell tla to get rid of the leftover archive this test leaves in the
+ # user's 'tla archives' listing. The name of this archive is provided
+ # by the repository tarball, so the following command must use the
+ # same name. We could use archive= to set it explicitly, but if you
+ # change it from the default, then 'tla update' won't work.
+ d = self.helper.unregisterRepository()
+ return d
+
+VCS.registerVC(Bazaar.vc_name, BazaarHelper())
+
+class BzrHelper(BaseHelper):
+ branchname = "branch"
+ try_branchname = "branch"
+
+ def capable(self):
+ bzrpaths = which('bzr')
+ if not bzrpaths:
+ return (False, "bzr is not installed")
+ self.vcexe = bzrpaths[0]
+ return (True, None)
+
+ def get_revision_number(self, out):
+ for line in out.split("\n"):
+ colon = line.index(":")
+ key, value = line[:colon], line[colon+2:]
+ if key == "revno":
+ return int(value)
+ raise RuntimeError("unable to find revno: in bzr output: '%s'" % out)
+
+ def createRepository(self):
+ self.createBasedir()
+ self.bzr_base = os.path.join(self.repbase, "Bzr-Repository")
+ self.rep_trunk = os.path.join(self.bzr_base, "trunk")
+ self.rep_branch = os.path.join(self.bzr_base, "branch")
+ tmp = os.path.join(self.repbase, "bzrtmp")
+ btmp = os.path.join(self.repbase, "bzrtmp-branch")
+
+ os.makedirs(self.rep_trunk)
+ w = self.dovc(self.rep_trunk, ["init"])
+ yield w; w.getResult()
+ w = self.dovc(self.bzr_base,
+ ["branch", self.rep_trunk, self.rep_branch])
+ yield w; w.getResult()
+
+ w = self.dovc(self.repbase, ["checkout", self.rep_trunk, tmp])
+ yield w; w.getResult()
+ self.populate(tmp)
+ w = self.dovc(tmp, qw("add"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("commit -m initial_import"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("version-info"))
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.get_revision_number(out))
+ rmdirRecursive(tmp)
+
+ # pull all trunk revisions to the branch
+ w = self.dovc(self.rep_branch, qw("pull"))
+ yield w; w.getResult()
+ # obtain a branch tree
+ w = self.dovc(self.repbase, ["checkout", self.rep_branch, btmp])
+ yield w; w.getResult()
+ # modify it
+ self.populate_branch(btmp)
+ w = self.dovc(btmp, qw("add"))
+ yield w; w.getResult()
+ w = self.dovc(btmp, qw("commit -m commit_on_branch"))
+ yield w; w.getResult()
+ w = self.dovc(btmp, qw("version-info"))
+ yield w; out = w.getResult()
+ self.addBranchRev(self.get_revision_number(out))
+ rmdirRecursive(btmp)
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "bzrtmp")
+ w = self.dovc(self.repbase, ["checkout", self.rep_trunk, tmp])
+ yield w; w.getResult()
+
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+ w = self.dovc(tmp, qw("commit -m revised_to_%d" % self.version))
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("version-info"))
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.get_revision_number(out))
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+ #os.makedirs(workdir)
+ if not branch:
+ rep = self.rep_trunk
+ else:
+ rep = os.path.join(self.bzr_base, branch)
+ w = self.dovc(self.bzr_base, ["checkout", rep, workdir])
+ yield w; w.getResult()
+ open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+class Bzr(VCBase, unittest.TestCase):
+ vc_name = "bzr"
+
+ metadir = ".bzr"
+ vctype = "source.Bzr"
+ vctype_try = "bzr"
+ has_got_revision = True
+
+ def testCheckout(self):
+ self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
+ d = self.do_vctest(testRetry=False)
+
+ # TODO: testRetry has the same problem with Bzr as it does for
+ # Arch
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.bzr_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.bzr_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_branch()
+ return d
+
+ def testCheckoutHTTP(self):
+ self.serveHTTP()
+ repourl = "http://localhost:%d/Bzr-Repository/trunk" % self.httpPort
+ self.helper.vcargs = { 'repourl': repourl }
+ d = self.do_vctest(testRetry=False)
+ return d
+
+
+ def fixRepository(self):
+ self.fixtimer = None
+ self.site.resource = self.root
+
+ def testRetry(self):
+ # this test takes a while to run
+ self.serveHTTP()
+
+ # break the repository server
+ from twisted.web import static
+ self.site.resource = static.Data("Sorry, repository is offline",
+ "text/plain")
+ # and arrange to fix it again in 5 seconds, while the test is
+ # running.
+ self.fixtimer = reactor.callLater(5, self.fixRepository)
+
+ repourl = "http://localhost:%d/Bzr-Repository/trunk" % self.httpPort
+ self.helper.vcargs = { 'repourl': repourl,
+ 'retry': (5.0, 4),
+ }
+ d = self.do_vctest_once(True)
+ d.addCallback(self._testRetry_1)
+ return d
+ def _testRetry_1(self, bs):
+ # make sure there was mention of the retry attempt in the logs
+ l = bs.getLogs()[0]
+ self.failUnlessIn("ERROR: Not a branch: ", l.getText(),
+ "funny, VC operation didn't fail at least once")
+ self.failUnlessIn("update failed, trying 4 more times after 5 seconds",
+ l.getTextWithHeaders(),
+ "funny, VC operation wasn't reattempted")
+
+ def testRetryFails(self):
+ # make sure that the build eventually gives up on a repository which
+ # is completely unavailable
+
+ self.serveHTTP()
+
+ # break the repository server, and leave it broken
+ from twisted.web import static
+ self.site.resource = static.Data("Sorry, repository is offline",
+ "text/plain")
+
+ repourl = "http://localhost:%d/Bzr-Repository/trunk" % self.httpPort
+ self.helper.vcargs = { 'repourl': repourl,
+ 'retry': (0.5, 3),
+ }
+ d = self.do_vctest_once(False)
+ d.addCallback(self._testRetryFails_1)
+ return d
+ def _testRetryFails_1(self, bs):
+ self.failUnlessEqual(bs.getResults(), FAILURE)
+
+
+ def testTry(self):
+ self.helper.vcargs = { 'baseURL': self.helper.bzr_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(Bzr.vc_name, BzrHelper())
+
+
+class MercurialHelper(BaseHelper):
+ branchname = "branch"
+ try_branchname = "branch"
+
+ def capable(self):
+ hgpaths = which("hg")
+ if not hgpaths:
+ return (False, "Mercurial is not installed")
+ self.vcexe = hgpaths[0]
+ return (True, None)
+
+ def extract_id(self, output):
+ m = re.search(r'^(\w+)', output)
+ return m.group(0)
+
+ def createRepository(self):
+ self.createBasedir()
+ self.hg_base = os.path.join(self.repbase, "Mercurial-Repository")
+ self.rep_trunk = os.path.join(self.hg_base, "trunk")
+ self.rep_branch = os.path.join(self.hg_base, "branch")
+ tmp = os.path.join(self.hg_base, "hgtmp")
+
+ os.makedirs(self.rep_trunk)
+ w = self.dovc(self.rep_trunk, "init")
+ yield w; w.getResult()
+ os.makedirs(self.rep_branch)
+ w = self.dovc(self.rep_branch, "init")
+ yield w; w.getResult()
+
+ self.populate(tmp)
+ w = self.dovc(tmp, "init")
+ yield w; w.getResult()
+ w = self.dovc(tmp, "add")
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['commit', '-m', 'initial_import'])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', self.rep_trunk])
+ # note that hg-push does not actually update the working directory
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.extract_id(out))
+
+ self.populate_branch(tmp)
+ w = self.dovc(tmp, ['commit', '-m', 'commit_on_branch'])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', self.rep_branch])
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addBranchRev(self.extract_id(out))
+ rmdirRecursive(tmp)
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.hg_base, "hgtmp2")
+ w = self.dovc(self.hg_base, ['clone', self.rep_trunk, tmp])
+ yield w; w.getResult()
+
+ self.version += 1
+ version_c = VERSION_C % self.version
+ version_c_filename = os.path.join(tmp, "version.c")
+ open(version_c_filename, "w").write(version_c)
+ # hg uses timestamps to distinguish files which have changed, so we
+ # force the mtime forward a little bit
+ future = time.time() + 2*self.version
+ os.utime(version_c_filename, (future, future))
+ w = self.dovc(tmp, ['commit', '-m', 'revised_to_%d' % self.version])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', self.rep_trunk])
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.extract_id(out))
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+ if branch:
+ src = self.rep_branch
+ else:
+ src = self.rep_trunk
+ w = self.dovc(self.hg_base, ['clone', src, workdir])
+ yield w; w.getResult()
+ try_c_filename = os.path.join(workdir, "subdir", "subdir.c")
+ open(try_c_filename, "w").write(TRY_C)
+ future = time.time() + 2*self.version
+ os.utime(try_c_filename, (future, future))
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+class MercurialServerPP(protocol.ProcessProtocol):
+ def __init__(self):
+ self.wait = defer.Deferred()
+
+ def outReceived(self, data):
+ log.msg("hg-serve-stdout: %s" % (data,))
+ def errReceived(self, data):
+ print "HG-SERVE-STDERR:", data
+ log.msg("hg-serve-stderr: %s" % (data,))
+ def processEnded(self, reason):
+ log.msg("hg-serve ended: %s" % reason)
+ self.wait.callback(None)
+
+
+class Mercurial(VCBase, unittest.TestCase):
+ vc_name = "hg"
+
+ # Mercurial has a metadir=".hg", but it does not have an 'export' mode.
+ metadir = None
+ vctype = "source.Mercurial"
+ vctype_try = "hg"
+ has_got_revision = True
+ _hg_server = None
+ _wait_for_server_poller = None
+ _pp = None
+
+ def testCheckout(self):
+ self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
+ d = self.do_vctest(testRetry=False)
+
+ # TODO: testRetry has the same problem with Mercurial as it does for
+ # Arch
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_branch()
+ return d
+
+ def serveHTTP(self):
+ # the easiest way to publish hg over HTTP is by running 'hg serve' as
+ # a child process while the test is running. (you can also use a CGI
+ # script, which sounds difficult, or you can publish the files
+ # directly, which isn't well documented).
+
+ # grr.. 'hg serve' doesn't let you use --port=0 to mean "pick a free
+ # port", instead it uses it as a signal to use the default (port
+ # 8000). This means there is no way to make it choose a free port, so
+ # we are forced to make it use a statically-defined one, making it
+ # harder to avoid collisions.
+ self.httpPort = 8300 + (os.getpid() % 200)
+ args = [self.helper.vcexe,
+ "serve", "--port", str(self.httpPort), "--verbose"]
+
+ # in addition, hg doesn't flush its stdout, so we can't wait for the
+ # "listening at" message to know when it's safe to start the test.
+ # Instead, poll every second until a getPage works.
+
+ self._pp = MercurialServerPP() # logs+discards everything
+
+ # this serves one tree at a time, so we serve trunk. TODO: test hg's
+ # in-repo branches, for which a single tree will hold all branches.
+ self._hg_server = reactor.spawnProcess(self._pp, self.helper.vcexe, args,
+ os.environ,
+ self.helper.rep_trunk)
+ log.msg("waiting for hg serve to start")
+ done_d = defer.Deferred()
+ def poll():
+ d = client.getPage("http://localhost:%d/" % self.httpPort)
+ def success(res):
+ log.msg("hg serve appears to have started")
+ self._wait_for_server_poller.stop()
+ done_d.callback(None)
+ def ignore_connection_refused(f):
+ f.trap(error.ConnectionRefusedError)
+ d.addCallbacks(success, ignore_connection_refused)
+ d.addErrback(done_d.errback)
+ return d
+ self._wait_for_server_poller = task.LoopingCall(poll)
+ self._wait_for_server_poller.start(0.5, True)
+ return done_d
+
+ def tearDown(self):
+ if self._wait_for_server_poller:
+ if self._wait_for_server_poller.running:
+ self._wait_for_server_poller.stop()
+ if self._hg_server:
+ self._hg_server.loseConnection()
+ try:
+ self._hg_server.signalProcess("KILL")
+ except error.ProcessExitedAlready:
+ pass
+ self._hg_server = None
+ return VCBase.tearDown(self)
+
+ def tearDown2(self):
+ if self._pp:
+ return self._pp.wait
+
+ def testCheckoutHTTP(self):
+ d = self.serveHTTP()
+ def _started(res):
+ repourl = "http://localhost:%d/" % self.httpPort
+ self.helper.vcargs = { 'repourl': repourl }
+ return self.do_vctest(testRetry=False)
+ d.addCallback(_started)
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(Mercurial.vc_name, MercurialHelper())
+
+class MercurialInRepoHelper(MercurialHelper):
+ branchname = "the_branch"
+ try_branchname = "the_branch"
+
+
+ def createRepository(self):
+ self.createBasedir()
+ self.hg_base = os.path.join(self.repbase, "Mercurial-Repository")
+ self.repo = os.path.join(self.hg_base, "inrepobranch")
+ tmp = os.path.join(self.hg_base, "hgtmp")
+
+ os.makedirs(self.repo)
+ w = self.dovc(self.repo, "init")
+ yield w; w.getResult()
+
+ self.populate(tmp)
+ w = self.dovc(tmp, "init")
+ yield w; w.getResult()
+ w = self.dovc(tmp, "add")
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['commit', '-m', 'initial_import'])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', self.repo])
+ # note that hg-push does not actually update the working directory
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.extract_id(out))
+
+ self.populate_branch(tmp)
+ w = self.dovc(tmp, ['branch', self.branchname])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['commit', '-m', 'commit_on_branch'])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', '-f', self.repo])
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addBranchRev(self.extract_id(out))
+ rmdirRecursive(tmp)
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.hg_base, "hgtmp2")
+ w = self.dovc(self.hg_base, ['clone', self.repo, tmp])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['update', '--clean', '--rev', 'default'])
+ yield w; w.getResult()
+
+ self.version += 1
+ version_c = VERSION_C % self.version
+ version_c_filename = os.path.join(tmp, "version.c")
+ open(version_c_filename, "w").write(version_c)
+ # hg uses timestamps to distinguish files which have changed, so we
+ # force the mtime forward a little bit
+ future = time.time() + 2*self.version
+ os.utime(version_c_filename, (future, future))
+ w = self.dovc(tmp, ['commit', '-m', 'revised_to_%d' % self.version])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', '--force', self.repo])
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.extract_id(out))
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+ w = self.dovc(self.hg_base, ['clone', self.repo, workdir])
+ yield w; w.getResult()
+ if not branch: branch = "default"
+ w = self.dovc(workdir, ['update', '--clean', '--rev', branch ])
+ yield w; w.getResult()
+
+ try_c_filename = os.path.join(workdir, "subdir", "subdir.c")
+ open(try_c_filename, "w").write(TRY_C)
+ future = time.time() + 2*self.version
+ os.utime(try_c_filename, (future, future))
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+ pass
+
+
+class MercurialInRepo(Mercurial):
+ vc_name = 'MercurialInRepo'
+
+ def default_args(self):
+ return { 'repourl': self.helper.repo,
+ 'branchType': 'inrepo',
+ 'defaultBranch': 'default' }
+
+ def testCheckout(self):
+ self.helper.vcargs = self.default_args()
+ d = self.do_vctest(testRetry=False)
+
+ # TODO: testRetry has the same problem with Mercurial as it does for
+ # Arch
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = self.default_args()
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = self.default_args()
+ d = self.do_branch()
+ return d
+
+ def serveHTTP(self):
+ # the easiest way to publish hg over HTTP is by running 'hg serve' as
+ # a child process while the test is running. (you can also use a CGI
+ # script, which sounds difficult, or you can publish the files
+ # directly, which isn't well documented).
+
+ # grr.. 'hg serve' doesn't let you use --port=0 to mean "pick a free
+ # port", instead it uses it as a signal to use the default (port
+ # 8000). This means there is no way to make it choose a free port, so
+ # we are forced to make it use a statically-defined one, making it
+ # harder to avoid collisions.
+ self.httpPort = 8300 + (os.getpid() % 200)
+ args = [self.helper.vcexe,
+ "serve", "--port", str(self.httpPort), "--verbose"]
+
+ # in addition, hg doesn't flush its stdout, so we can't wait for the
+ # "listening at" message to know when it's safe to start the test.
+ # Instead, poll every second until a getPage works.
+
+ self._pp = MercurialServerPP() # logs+discards everything
+ # this serves one tree at a time, so we serve trunk. TODO: test hg's
+ # in-repo branches, for which a single tree will hold all branches.
+ self._hg_server = reactor.spawnProcess(self._pp, self.helper.vcexe, args,
+ os.environ,
+ self.helper.repo)
+ log.msg("waiting for hg serve to start")
+ done_d = defer.Deferred()
+ def poll():
+ d = client.getPage("http://localhost:%d/" % self.httpPort)
+ def success(res):
+ log.msg("hg serve appears to have started")
+ self._wait_for_server_poller.stop()
+ done_d.callback(None)
+ def ignore_connection_refused(f):
+ f.trap(error.ConnectionRefusedError)
+ d.addCallbacks(success, ignore_connection_refused)
+ d.addErrback(done_d.errback)
+ return d
+ self._wait_for_server_poller = task.LoopingCall(poll)
+ self._wait_for_server_poller.start(0.5, True)
+ return done_d
+
+ def tearDown(self):
+ if self._wait_for_server_poller:
+ if self._wait_for_server_poller.running:
+ self._wait_for_server_poller.stop()
+ if self._hg_server:
+ self._hg_server.loseConnection()
+ try:
+ self._hg_server.signalProcess("KILL")
+ except error.ProcessExitedAlready:
+ pass
+ self._hg_server = None
+ return VCBase.tearDown(self)
+
+ def tearDown2(self):
+ if self._pp:
+ return self._pp.wait
+
+ def testCheckoutHTTP(self):
+ d = self.serveHTTP()
+ def _started(res):
+ repourl = "http://localhost:%d/" % self.httpPort
+ self.helper.vcargs = self.default_args()
+ self.helper.vcargs['repourl'] = repourl
+ return self.do_vctest(testRetry=False)
+ d.addCallback(_started)
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = self.default_args()
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(MercurialInRepo.vc_name, MercurialInRepoHelper())
+
+
+class GitHelper(BaseHelper):
+ branchname = "branch"
+ try_branchname = "branch"
+
+ def capable(self):
+ gitpaths = which('git')
+ if not gitpaths:
+ return (False, "GIT is not installed")
+ d = utils.getProcessOutput(gitpaths[0], ["--version"], env=os.environ)
+ d.addCallback(self._capable, gitpaths[0])
+ return d
+
+ def _capable(self, v, vcexe):
+ try:
+ m = re.search(r'\b(\d+)\.(\d+)', v)
+
+ if not m:
+ raise Exception, 'no regex match'
+
+ ver = tuple([int(num) for num in m.groups()])
+
+ # git-1.1.3 (as shipped with Dapper) doesn't understand 'git
+ # init' (it wants 'git init-db'), and fails unit tests that
+ # involve branches. git-1.5.3.6 (on my debian/unstable system)
+ # works. I don't know where the dividing point is: if someone can
+ # figure it out (or figure out how to make buildbot support more
+ # versions), please update this check.
+ if ver < (1, 2):
+ return (False, "Found git (%s) but it is older than 1.2.x" % vcexe)
+
+ except Exception, e:
+ log.msg("couldn't identify git version number in output:")
+ log.msg("'''%s'''" % v)
+ log.msg("because: %s" % e)
+ log.msg("skipping tests")
+ return (False,
+ "Found git (%s) but couldn't identify its version from '%s'" % (vcexe, v))
+
+ self.vcexe = vcexe
+ return (True, None)
+
+ def createRepository(self):
+ self.createBasedir()
+ self.gitrepo = os.path.join(self.repbase,
+ "GIT-Repository")
+ tmp = os.path.join(self.repbase, "gittmp")
+
+ env = os.environ.copy()
+ env['GIT_DIR'] = self.gitrepo
+ w = self.dovc(self.repbase, "init", env=env)
+ yield w; w.getResult()
+
+ self.populate(tmp)
+ w = self.dovc(tmp, "init")
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["add", "."])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["config", "user.email", "buildbot-trial@localhost"])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["config", "user.name", "Buildbot Trial"])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["commit", "-m", "initial_import"])
+ yield w; w.getResult()
+
+ w = self.dovc(tmp, ["checkout", "-b", self.branchname])
+ yield w; w.getResult()
+ self.populate_branch(tmp)
+ w = self.dovc(tmp, ["commit", "-a", "-m", "commit_on_branch"])
+ yield w; w.getResult()
+
+ w = self.dovc(tmp, ["rev-parse", "master", self.branchname])
+ yield w; out = w.getResult()
+ revs = out.splitlines()
+ self.addTrunkRev(revs[0])
+ self.addBranchRev(revs[1])
+
+ w = self.dovc(tmp, ["push", self.gitrepo, "master", self.branchname])
+ yield w; w.getResult()
+
+ rmdirRecursive(tmp)
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "gittmp")
+ rmdirRecursive(tmp)
+ log.msg("vc_revise" + self.gitrepo)
+ w = self.dovc(self.repbase, ["clone", self.gitrepo, "gittmp"])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["config", "user.email", "buildbot-trial@localhost"])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["config", "user.name", "Buildbot Trial"])
+ yield w; w.getResult()
+
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+
+ w = self.dovc(tmp, ["commit", "-m", "revised_to_%d" % self.version,
+ "version.c"])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["rev-parse", "master"])
+ yield w; out = w.getResult()
+ self.addTrunkRev(out.strip())
+
+ w = self.dovc(tmp, ["push", self.gitrepo, "master"])
+ yield w; out = w.getResult()
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+
+ w = self.dovc(self.repbase, ["clone", self.gitrepo, workdir])
+ yield w; w.getResult()
+ w = self.dovc(workdir, ["config", "user.email", "buildbot-trial@localhost"])
+ yield w; w.getResult()
+ w = self.dovc(workdir, ["config", "user.name", "Buildbot Trial"])
+ yield w; w.getResult()
+
+ if branch is not None:
+ w = self.dovc(workdir, ["checkout", "-b", branch,
+ "origin/%s" % branch])
+ yield w; w.getResult()
+
+ # Hmm...why do nobody else bother to check out the correct
+ # revision?
+ w = self.dovc(workdir, ["reset", "--hard", rev])
+ yield w; w.getResult()
+
+ try_c_filename = os.path.join(workdir, "subdir", "subdir.c")
+ open(try_c_filename, "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+class Git(VCBase, unittest.TestCase):
+ vc_name = "git"
+
+ # No 'export' mode yet...
+ # metadir = ".git"
+ vctype = "source.Git"
+ vctype_try = "git"
+ has_got_revision = True
+
+ def testCheckout(self):
+ self.helper.vcargs = { 'repourl': self.helper.gitrepo }
+ d = self.do_vctest()
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'repourl': self.helper.gitrepo,
+ 'branch': "master" }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'repourl': self.helper.gitrepo,
+ 'branch': "master" }
+ d = self.do_branch()
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = { 'repourl': self.helper.gitrepo,
+ 'branch': "master" }
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(Git.vc_name, GitHelper())
+
+
+class Sources(unittest.TestCase):
+ # TODO: this needs serious rethink
+ def makeChange(self, when=None, revision=None):
+ if when:
+ when = mktime_tz(parsedate_tz(when))
+ return changes.Change("fred", [], "", when=when, revision=revision)
+
+ def testCVS1(self):
+ r = base.BuildRequest("forced build", SourceStamp(), 'test_builder')
+ b = base.Build([r])
+ s = source.CVS(cvsroot=None, cvsmodule=None)
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), None)
+
+ def testCVS2(self):
+ c = []
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
+ r = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
+ submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
+ r.submittedAt = mktime_tz(parsedate_tz(submitted))
+ b = base.Build([r])
+ s = source.CVS(cvsroot=None, cvsmodule=None)
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
+ "Wed, 08 Sep 2004 16:03:00 -0000")
+
+ def testCVS3(self):
+ c = []
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
+ r = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
+ submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
+ r.submittedAt = mktime_tz(parsedate_tz(submitted))
+ b = base.Build([r])
+ s = source.CVS(cvsroot=None, cvsmodule=None, checkoutDelay=10)
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
+ "Wed, 08 Sep 2004 16:02:10 -0000")
+
+ def testCVS4(self):
+ c = []
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
+ r1 = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
+ submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
+ r1.submittedAt = mktime_tz(parsedate_tz(submitted))
+
+ c = []
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:05:00 -0700"))
+ r2 = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
+ submitted = "Wed, 08 Sep 2004 09:07:00 -0700"
+ r2.submittedAt = mktime_tz(parsedate_tz(submitted))
+
+ b = base.Build([r1, r2])
+ s = source.CVS(cvsroot=None, cvsmodule=None)
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
+ "Wed, 08 Sep 2004 16:06:00 -0000")
+
+ def testSVN1(self):
+ r = base.BuildRequest("forced", SourceStamp(), 'test_builder')
+ b = base.Build([r])
+ s = source.SVN(svnurl="dummy")
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), None)
+
+ def testSVN2(self):
+ c = []
+ c.append(self.makeChange(revision=4))
+ c.append(self.makeChange(revision=10))
+ c.append(self.makeChange(revision=67))
+ r = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
+ b = base.Build([r])
+ s = source.SVN(svnurl="dummy")
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), 67)
+
+class Patch(VCBase, unittest.TestCase):
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def testPatch(self):
+ # invoke 'patch' all by itself, to see if it works the way we think
+ # it should. This is intended to ferret out some windows test
+ # failures.
+ helper = BaseHelper()
+ self.workdir = os.path.join("test_vc", "testPatch")
+ helper.populate(self.workdir)
+ patch = which("patch")[0]
+
+ command = [patch, "-p0"]
+ class FakeBuilder:
+ usePTY = False
+ def sendUpdate(self, status):
+ pass
+ c = commands.ShellCommand(FakeBuilder(), command, self.workdir,
+ sendRC=False, initialStdin=p0_diff)
+ d = c.start()
+ d.addCallback(self._testPatch_1)
+ return d
+
+ def _testPatch_1(self, res):
+ # make sure the file actually got patched
+ subdir_c = os.path.join(self.workdir, "subdir", "subdir.c")
+ data = open(subdir_c, "r").read()
+ self.failUnlessIn("Hello patched subdir.\\n", data)
diff --git a/buildbot/buildbot/test/test_web.py b/buildbot/buildbot/test/test_web.py
new file mode 100644
index 0000000..0f353d8
--- /dev/null
+++ b/buildbot/buildbot/test/test_web.py
@@ -0,0 +1,594 @@
+# -*- test-case-name: buildbot.test.test_web -*-
+
+import os, time, shutil
+from HTMLParser import HTMLParser
+from twisted.python import components
+
+from twisted.trial import unittest
+from buildbot.test.runutils import RunMixin
+
+from twisted.internet import reactor, defer, protocol
+from twisted.internet.interfaces import IReactorUNIX
+from twisted.web import client
+
+from buildbot import master, interfaces, sourcestamp
+from buildbot.status import html, builder
+from buildbot.status.web import waterfall
+from buildbot.changes.changes import Change
+from buildbot.process import base
+from buildbot.process.buildstep import BuildStep
+from buildbot.test.runutils import setupBuildStepStatus
+
+class ConfiguredMaster(master.BuildMaster):
+ """This BuildMaster variant has a static config file, provided as a
+ string when it is created."""
+
+ def __init__(self, basedir, config):
+ self.config = config
+ master.BuildMaster.__init__(self, basedir)
+
+ def loadTheConfigFile(self):
+ self.loadConfig(self.config)
+
+components.registerAdapter(master.Control, ConfiguredMaster,
+ interfaces.IControl)
+
+
+base_config = """
+from buildbot.changes.pb import PBChangeSource
+from buildbot.status import html
+from buildbot.buildslave import BuildSlave
+from buildbot.scheduler import Scheduler
+from buildbot.process.factory import BuildFactory
+
+BuildmasterConfig = c = {
+ 'change_source': PBChangeSource(),
+ 'slaves': [BuildSlave('bot1name', 'bot1passwd')],
+ 'schedulers': [Scheduler('name', None, 60, ['builder1'])],
+ 'builders': [{'name': 'builder1', 'slavename': 'bot1name',
+ 'builddir': 'builder1', 'factory': BuildFactory()}],
+ 'slavePortnum': 0,
+ }
+"""
+
+
+
+class DistribUNIX:
+ def __init__(self, unixpath):
+ from twisted.web import server, resource, distrib
+ root = resource.Resource()
+ self.r = r = distrib.ResourceSubscription("unix", unixpath)
+ root.putChild('remote', r)
+ self.p = p = reactor.listenTCP(0, server.Site(root))
+ self.portnum = p.getHost().port
+ def shutdown(self):
+ d = defer.maybeDeferred(self.p.stopListening)
+ return d
+
+class DistribTCP:
+ def __init__(self, port):
+ from twisted.web import server, resource, distrib
+ root = resource.Resource()
+ self.r = r = distrib.ResourceSubscription("localhost", port)
+ root.putChild('remote', r)
+ self.p = p = reactor.listenTCP(0, server.Site(root))
+ self.portnum = p.getHost().port
+ def shutdown(self):
+ d = defer.maybeDeferred(self.p.stopListening)
+ d.addCallback(self._shutdown_1)
+ return d
+ def _shutdown_1(self, res):
+ return self.r.publisher.broker.transport.loseConnection()
+
+class SlowReader(protocol.Protocol):
+ didPause = False
+ count = 0
+ data = ""
+ def __init__(self, req):
+ self.req = req
+ self.d = defer.Deferred()
+ def connectionMade(self):
+ self.transport.write(self.req)
+ def dataReceived(self, data):
+ self.data += data
+ self.count += len(data)
+ if not self.didPause and self.count > 10*1000:
+ self.didPause = True
+ self.transport.pauseProducing()
+ reactor.callLater(2, self.resume)
+ def resume(self):
+ self.transport.resumeProducing()
+ def connectionLost(self, why):
+ self.d.callback(None)
+
+class CFactory(protocol.ClientFactory):
+ def __init__(self, p):
+ self.p = p
+ def buildProtocol(self, addr):
+ self.p.factory = self
+ return self.p
+
+def stopHTTPLog():
+ # grr.
+ from twisted.web import http
+ http._logDateTimeStop()
+
+class BaseWeb:
+ master = None
+
+ def failUnlessIn(self, substr, string, note=None):
+ self.failUnless(string.find(substr) != -1, note)
+
+ def tearDown(self):
+ stopHTTPLog()
+ if self.master:
+ d = self.master.stopService()
+ return d
+
+ def find_webstatus(self, master):
+ for child in list(master):
+ if isinstance(child, html.WebStatus):
+ return child
+
+ def find_waterfall(self, master):
+ for child in list(master):
+ if isinstance(child, html.Waterfall):
+ return child
+
+class Ports(BaseWeb, unittest.TestCase):
+
+ def test_webPortnum(self):
+ # run a regular web server on a TCP socket
+ config = base_config + "c['status'] = [html.WebStatus(http_port=0)]\n"
+ os.mkdir("test_web1")
+ self.master = m = ConfiguredMaster("test_web1", config)
+ m.startService()
+ # hack to find out what randomly-assigned port it is listening on
+ port = self.find_webstatus(m).getPortnum()
+
+ d = client.getPage("http://localhost:%d/waterfall" % port)
+ def _check(page):
+ #print page
+ self.failUnless(page)
+ d.addCallback(_check)
+ return d
+ test_webPortnum.timeout = 10
+
+ def test_webPathname(self):
+ # running a t.web.distrib server over a UNIX socket
+ if not IReactorUNIX.providedBy(reactor):
+ raise unittest.SkipTest("UNIX sockets not supported here")
+ config = (base_config +
+ "c['status'] = [html.WebStatus(distrib_port='.web-pb')]\n")
+ os.mkdir("test_web2")
+ self.master = m = ConfiguredMaster("test_web2", config)
+ m.startService()
+
+ p = DistribUNIX("test_web2/.web-pb")
+
+ d = client.getPage("http://localhost:%d/remote/waterfall" % p.portnum)
+ def _check(page):
+ self.failUnless(page)
+ d.addCallback(_check)
+ def _done(res):
+ d1 = p.shutdown()
+ d1.addCallback(lambda x: res)
+ return d1
+ d.addBoth(_done)
+ return d
+ test_webPathname.timeout = 10
+
+
+ def test_webPathname_port(self):
+ # running a t.web.distrib server over TCP
+ config = (base_config +
+ "c['status'] = [html.WebStatus(distrib_port=0)]\n")
+ os.mkdir("test_web3")
+ self.master = m = ConfiguredMaster("test_web3", config)
+ m.startService()
+ dport = self.find_webstatus(m).getPortnum()
+
+ p = DistribTCP(dport)
+
+ d = client.getPage("http://localhost:%d/remote/waterfall" % p.portnum)
+ def _check(page):
+ self.failUnlessIn("BuildBot", page)
+ d.addCallback(_check)
+ def _done(res):
+ d1 = p.shutdown()
+ d1.addCallback(lambda x: res)
+ return d1
+ d.addBoth(_done)
+ return d
+ test_webPathname_port.timeout = 10
+
+
+class Waterfall(BaseWeb, unittest.TestCase):
+ def test_waterfall(self):
+ os.mkdir("test_web4")
+ os.mkdir("my-maildir"); os.mkdir("my-maildir/new")
+ self.robots_txt = os.path.abspath(os.path.join("test_web4",
+ "robots.txt"))
+ self.robots_txt_contents = "User-agent: *\nDisallow: /\n"
+ f = open(self.robots_txt, "w")
+ f.write(self.robots_txt_contents)
+ f.close()
+ # this is the right way to configure the Waterfall status
+ config1 = base_config + """
+from buildbot.changes import mail
+c['change_source'] = mail.SyncmailMaildirSource('my-maildir')
+c['status'] = [html.Waterfall(http_port=0, robots_txt=%s)]
+""" % repr(self.robots_txt)
+
+ self.master = m = ConfiguredMaster("test_web4", config1)
+ m.startService()
+ port = self.find_waterfall(m).getPortnum()
+ self.port = port
+ # insert an event
+ m.change_svc.addChange(Change("user", ["foo.c"], "comments"))
+
+ d = client.getPage("http://localhost:%d/" % port)
+
+ def _check1(page):
+ self.failUnless(page)
+ self.failUnlessIn("current activity", page)
+ self.failUnlessIn("<html", page)
+ TZ = time.tzname[time.localtime()[-1]]
+ self.failUnlessIn("time (%s)" % TZ, page)
+
+ # phase=0 is really for debugging the waterfall layout
+ return client.getPage("http://localhost:%d/?phase=0" % self.port)
+ d.addCallback(_check1)
+
+ def _check2(page):
+ self.failUnless(page)
+ self.failUnlessIn("<html", page)
+
+ return client.getPage("http://localhost:%d/changes" % self.port)
+ d.addCallback(_check2)
+
+ def _check3(changes):
+ self.failUnlessIn("<li>Syncmail mailing list in maildir " +
+ "my-maildir</li>", changes)
+
+ return client.getPage("http://localhost:%d/robots.txt" % self.port)
+ d.addCallback(_check3)
+
+ def _check4(robotstxt):
+ self.failUnless(robotstxt == self.robots_txt_contents)
+ d.addCallback(_check4)
+
+ return d
+
+ test_waterfall.timeout = 10
+
+class WaterfallSteps(unittest.TestCase):
+
+ # failUnlessSubstring copied from twisted-2.1.0, because this helps us
+ # maintain compatibility with python2.2.
+ def failUnlessSubstring(self, substring, astring, msg=None):
+ """a python2.2 friendly test to assert that substring is found in
+ astring parameters follow the semantics of failUnlessIn
+ """
+ if astring.find(substring) == -1:
+ raise self.failureException(msg or "%r not found in %r"
+ % (substring, astring))
+ return substring
+ assertSubstring = failUnlessSubstring
+
+ def test_urls(self):
+ s = setupBuildStepStatus("test_web.test_urls")
+ s.addURL("coverage", "http://coverage.example.org/target")
+ s.addURL("icon", "http://coverage.example.org/icon.png")
+ class FakeRequest:
+ prepath = []
+ postpath = []
+ def childLink(self, name):
+ return name
+ req = FakeRequest()
+ box = waterfall.IBox(s).getBox(req)
+ td = box.td()
+ e1 = '[<a href="http://coverage.example.org/target" class="BuildStep external">coverage</a>]'
+ self.failUnlessSubstring(e1, td)
+ e2 = '[<a href="http://coverage.example.org/icon.png" class="BuildStep external">icon</a>]'
+ self.failUnlessSubstring(e2, td)
+
+
+
+geturl_config = """
+from buildbot.status import html
+from buildbot.changes import mail
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.scheduler import Scheduler
+from buildbot.changes.base import ChangeSource
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+class DiscardScheduler(Scheduler):
+ def addChange(self, change):
+ pass
+class DummyChangeSource(ChangeSource):
+ pass
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
+c['change_source'] = DummyChangeSource()
+c['schedulers'] = [DiscardScheduler('discard', None, 60, ['b1'])]
+c['slavePortnum'] = 0
+c['status'] = [html.Waterfall(http_port=0)]
+
+f = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1','bot2'],
+ 'builddir': 'b1', 'factory': f},
+ ]
+c['buildbotURL'] = 'http://dummy.example.org:8010/'
+
+"""
+
+class GetURL(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(geturl_config)
+ self.master.startService()
+ d = self.connectSlave(["b1"])
+ return d
+
+ def tearDown(self):
+ stopHTTPLog()
+ return RunMixin.tearDown(self)
+
+ def doBuild(self, buildername):
+ br = base.BuildRequest("forced", sourcestamp.SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+ def assertNoURL(self, target):
+ self.failUnlessIdentical(self.status.getURLForThing(target), None)
+
+ def assertURLEqual(self, target, expected):
+ got = self.status.getURLForThing(target)
+ full_expected = "http://dummy.example.org:8010/" + expected
+ self.failUnlessEqual(got, full_expected)
+
+ def testMissingBase(self):
+ noweb_config1 = geturl_config + "del c['buildbotURL']\n"
+ d = self.master.loadConfig(noweb_config1)
+ d.addCallback(self._testMissingBase_1)
+ return d
+ def _testMissingBase_1(self, res):
+ s = self.status
+ self.assertNoURL(s)
+ builder_s = s.getBuilder("b1")
+ self.assertNoURL(builder_s)
+
+ def testBase(self):
+ s = self.status
+ self.assertURLEqual(s, "")
+ builder_s = s.getBuilder("b1")
+ self.assertURLEqual(builder_s, "builders/b1")
+
+ def testChange(self):
+ s = self.status
+ c = Change("user", ["foo.c"], "comments")
+ self.master.change_svc.addChange(c)
+ # TODO: something more like s.getChanges(), requires IChange and
+ # an accessor in IStatus. The HTML page exists already, though
+ self.assertURLEqual(c, "changes/1")
+
+ def testBuild(self):
+ # first we do some stuff so we'll have things to look at.
+ s = self.status
+ d = self.doBuild("b1")
+ # maybe check IBuildSetStatus here?
+ d.addCallback(self._testBuild_1)
+ return d
+
+ def _testBuild_1(self, res):
+ s = self.status
+ builder_s = s.getBuilder("b1")
+ build_s = builder_s.getLastFinishedBuild()
+ self.assertURLEqual(build_s, "builders/b1/builds/0")
+ # no page for builder.getEvent(-1)
+ step = build_s.getSteps()[0]
+ self.assertURLEqual(step, "builders/b1/builds/0/steps/remote%20dummy")
+ # maybe page for build.getTestResults?
+ self.assertURLEqual(step.getLogs()[0],
+ "builders/b1/builds/0/steps/remote%20dummy/logs/0")
+
+
+
+class Logfile(BaseWeb, RunMixin, unittest.TestCase):
+ def setUp(self):
+ config = """
+from buildbot.status import html
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.buildslave import BuildSlave
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+BuildmasterConfig = {
+ 'slaves': [BuildSlave('bot1', 'passwd1')],
+ 'schedulers': [],
+ 'builders': [{'name': 'builder1', 'slavename': 'bot1',
+ 'builddir':'workdir', 'factory':f1}],
+ 'slavePortnum': 0,
+ 'status': [html.WebStatus(http_port=0)],
+ }
+"""
+ if os.path.exists("test_logfile"):
+ shutil.rmtree("test_logfile")
+ os.mkdir("test_logfile")
+ self.master = m = ConfiguredMaster("test_logfile", config)
+ m.startService()
+ # hack to find out what randomly-assigned port it is listening on
+ port = self.find_webstatus(m).getPortnum()
+ self.port = port
+ # insert an event
+
+ req = base.BuildRequest("reason", sourcestamp.SourceStamp(), 'test_builder')
+ build1 = base.Build([req])
+ bs = m.status.getBuilder("builder1").newBuild()
+ bs.setReason("reason")
+ bs.buildStarted(build1)
+
+ step1 = BuildStep(name="setup")
+ step1.setBuild(build1)
+ bss = bs.addStepWithName("setup")
+ step1.setStepStatus(bss)
+ bss.stepStarted()
+
+ log1 = step1.addLog("output")
+ log1.addStdout("some stdout\n")
+ log1.finish()
+
+ log2 = step1.addHTMLLog("error", "<html>ouch</html>")
+
+ log3 = step1.addLog("big")
+ log3.addStdout("big log\n")
+ for i in range(1000):
+ log3.addStdout("a" * 500)
+ log3.addStderr("b" * 500)
+ log3.finish()
+
+ log4 = step1.addCompleteLog("bigcomplete",
+ "big2 log\n" + "a" * 1*1000*1000)
+
+ log5 = step1.addLog("mixed")
+ log5.addHeader("header content")
+ log5.addStdout("this is stdout content")
+ log5.addStderr("errors go here")
+ log5.addEntry(5, "non-standard content on channel 5")
+ log5.addStderr(" and some trailing stderr")
+
+ d = defer.maybeDeferred(step1.step_status.stepFinished,
+ builder.SUCCESS)
+ bs.buildFinished()
+ return d
+
+ def getLogPath(self, stepname, logname):
+ return ("/builders/builder1/builds/0/steps/%s/logs/%s" %
+ (stepname, logname))
+
+ def getLogURL(self, stepname, logname):
+ return ("http://localhost:%d" % self.port
+ + self.getLogPath(stepname, logname))
+
+ def test_logfile1(self):
+ d = client.getPage("http://localhost:%d/" % self.port)
+ def _check(page):
+ self.failUnless(page)
+ d.addCallback(_check)
+ return d
+
+ def test_logfile2(self):
+ logurl = self.getLogURL("setup", "output")
+ d = client.getPage(logurl)
+ def _check(logbody):
+ self.failUnless(logbody)
+ d.addCallback(_check)
+ return d
+
+ def test_logfile3(self):
+ logurl = self.getLogURL("setup", "output")
+ d = client.getPage(logurl + "/text")
+ def _check(logtext):
+ self.failUnlessEqual(logtext, "some stdout\n")
+ d.addCallback(_check)
+ return d
+
+ def test_logfile4(self):
+ logurl = self.getLogURL("setup", "error")
+ d = client.getPage(logurl)
+ def _check(logbody):
+ self.failUnlessEqual(logbody, "<html>ouch</html>")
+ d.addCallback(_check)
+ return d
+
+ def test_logfile5(self):
+ # this is log3, which is about 1MB in size, made up of alternating
+ # stdout/stderr chunks. buildbot-0.6.6, when run against
+ # twisted-1.3.0, fails to resume sending chunks after the client
+ # stalls for a few seconds, because of a recursive doWrite() call
+ # that was fixed in twisted-2.0.0
+ p = SlowReader("GET %s HTTP/1.0\r\n\r\n"
+ % self.getLogPath("setup", "big"))
+ cf = CFactory(p)
+ c = reactor.connectTCP("localhost", self.port, cf)
+ d = p.d
+ def _check(res):
+ self.failUnlessIn("big log", p.data)
+ self.failUnlessIn("a"*100, p.data)
+ self.failUnless(p.count > 1*1000*1000)
+ d.addCallback(_check)
+ return d
+
+ def test_logfile6(self):
+ # this is log4, which is about 1MB in size, one big chunk.
+ # buildbot-0.6.6 dies as the NetstringReceiver barfs on the
+ # saved logfile, because it was using one big chunk and exceeding
+ # NetstringReceiver.MAX_LENGTH
+ p = SlowReader("GET %s HTTP/1.0\r\n\r\n"
+ % self.getLogPath("setup", "bigcomplete"))
+ cf = CFactory(p)
+ c = reactor.connectTCP("localhost", self.port, cf)
+ d = p.d
+ def _check(res):
+ self.failUnlessIn("big2 log", p.data)
+ self.failUnlessIn("a"*100, p.data)
+ self.failUnless(p.count > 1*1000*1000)
+ d.addCallback(_check)
+ return d
+
+ def test_logfile7(self):
+ # this is log5, with mixed content on the tree standard channels
+ # as well as on channel 5
+
+ class SpanParser(HTMLParser):
+ '''Parser subclass to gather all the log spans from the log page'''
+ def __init__(self, test):
+ self.spans = []
+ self.test = test
+ self.inSpan = False
+ HTMLParser.__init__(self)
+
+ def handle_starttag(self, tag, attrs):
+ if tag == 'span':
+ self.inSpan = True
+ cls = attrs[0]
+ self.test.failUnless(cls[0] == 'class')
+ self.spans.append([cls[1],''])
+
+ def handle_data(self, data):
+ if self.inSpan:
+ self.spans[-1][1] += data
+
+ def handle_endtag(self, tag):
+ if tag == 'span':
+ self.inSpan = False
+
+ logurl = self.getLogURL("setup", "mixed")
+ d = client.getPage(logurl, timeout=2)
+ def _check(logbody):
+ try:
+ p = SpanParser(self)
+ p.feed(logbody)
+ p.close
+ except Exception, e:
+ print e
+ self.failUnlessEqual(len(p.spans), 4)
+ self.failUnlessEqual(p.spans[0][0], 'header')
+ self.failUnlessEqual(p.spans[0][1], 'header content')
+ self.failUnlessEqual(p.spans[1][0], 'stdout')
+ self.failUnlessEqual(p.spans[1][1], 'this is stdout content')
+ self.failUnlessEqual(p.spans[2][0], 'stderr')
+ self.failUnlessEqual(p.spans[2][1], 'errors go here')
+ self.failUnlessEqual(p.spans[3][0], 'stderr')
+ self.failUnlessEqual(p.spans[3][1], ' and some trailing stderr')
+ def _fail(err):
+ pass
+ d.addCallback(_check)
+ d.addErrback(_fail)
+ return d
diff --git a/buildbot/buildbot/test/test_webparts.py b/buildbot/buildbot/test/test_webparts.py
new file mode 100644
index 0000000..71dd59e
--- /dev/null
+++ b/buildbot/buildbot/test/test_webparts.py
@@ -0,0 +1,141 @@
+
+import os
+from twisted.trial import unittest
+from twisted.internet import defer
+from twisted.web import client
+from twisted.web.error import Error as WebError
+from buildbot.slave.commands import rmdirRecursive
+from buildbot.status import html
+from test_web import BaseWeb, base_config, ConfiguredMaster
+from buildbot.scripts import runner
+
+class Webparts(BaseWeb, unittest.TestCase):
+
+ def find_webstatus(self, master):
+ return filter(lambda child: isinstance(child, html.WebStatus),
+ list(master))
+
+ def startMaster(self, extraconfig):
+ config = base_config + extraconfig
+ rmdirRecursive("test_webparts")
+ os.mkdir("test_webparts")
+ runner.upgradeMaster({'basedir': "test_webparts",
+ 'quiet': True,
+ })
+ self.master = m = ConfiguredMaster("test_webparts", config)
+ m.startService()
+ # hack to find out what randomly-assigned port it is listening on
+ port = list(self.find_webstatus(m)[0])[0]._port.getHost().port
+ self.baseurl = "http://localhost:%d/" % port
+
+ def reconfigMaster(self, extraconfig):
+ config = base_config + extraconfig
+ d = self.master.loadConfig(config)
+ def _done(res):
+ m = self.master
+ port = list(self.find_webstatus(m)[0])[0]._port.getHost().port
+ self.baseurl = "http://localhost:%d/" % port
+ d.addCallback(_done)
+ return d
+
+ def getAndCheck(self, url, substring, show=False):
+ d = client.getPage(url)
+ def _show_weberror(why):
+ why.trap(WebError)
+ self.fail("error for %s: %s" % (url, why))
+ d.addErrback(_show_weberror)
+ d.addCallback(self._getAndCheck, substring, show)
+ return d
+ def _getAndCheck(self, page, substring, show):
+ if show:
+ print page
+ self.failUnlessIn(substring, page,
+ "Couldn't find substring '%s' in page:\n%s" %
+ (substring, page))
+
+ def testInit(self):
+ extraconfig = """
+from twisted.web import static
+ws = html.WebStatus(http_port=0)
+c['status'] = [ws]
+ws.putChild('child.html', static.Data('I am the child', 'text/plain'))
+"""
+ self.startMaster(extraconfig)
+ d = self.getAndCheck(self.baseurl + "child.html",
+ "I am the child")
+ return d
+ testInit.timeout = 10
+
+ def testStatic(self):
+ extraconfig = """
+from twisted.web import static
+ws = html.WebStatus(http_port=0)
+c['status'] = [ws]
+ws.putChild('child.html', static.Data('I am the child', 'text/plain'))
+"""
+ self.startMaster(extraconfig)
+ os.mkdir(os.path.join("test_webparts", "public_html", "subdir"))
+ f = open(os.path.join("test_webparts", "public_html", "foo.html"), "wt")
+ f.write("see me foo\n")
+ f.close()
+ f = open(os.path.join("test_webparts", "public_html", "subdir",
+ "bar.html"), "wt")
+ f.write("see me subdir/bar\n")
+ f.close()
+ d = self.getAndCheck(self.baseurl + "child.html", "I am the child")
+ d.addCallback(lambda res:
+ self.getAndCheck(self.baseurl+"foo.html",
+ "see me foo"))
+ d.addCallback(lambda res:
+ self.getAndCheck(self.baseurl+"subdir/bar.html",
+ "see me subdir/bar"))
+ return d
+
+ def _check(self, res, suburl, substring, show=False):
+ d = self.getAndCheck(self.baseurl + suburl, substring, show)
+ return d
+
+ def testPages(self):
+ extraconfig = """
+ws = html.WebStatus(http_port=0)
+c['status'] = [ws]
+"""
+ self.startMaster(extraconfig)
+ d = defer.succeed(None)
+ d.addCallback(self._do_page_tests)
+ extraconfig2 = """
+ws = html.WebStatus(http_port=0, allowForce=True)
+c['status'] = [ws]
+"""
+ d.addCallback(lambda res: self.reconfigMaster(extraconfig2))
+ d.addCallback(self._do_page_tests)
+ return d
+
+ def _do_page_tests(self, res):
+ d = defer.succeed(None)
+ d.addCallback(self._check, "", "Welcome to the Buildbot")
+ d.addCallback(self._check, "waterfall", "current activity")
+ d.addCallback(self._check, "about", "Buildbot is a free software")
+ d.addCallback(self._check, "changes", "PBChangeSource listener")
+ d.addCallback(self._check, "buildslaves", "Build Slaves")
+ d.addCallback(self._check, "one_line_per_build",
+ "Last 20 finished builds")
+ d.addCallback(self._check, "one_box_per_builder", "Latest builds")
+ d.addCallback(self._check, "builders", "Builders")
+ d.addCallback(self._check, "builders/builder1", "Builder: builder1")
+ d.addCallback(self._check, "builders/builder1/builds", "") # dummy
+ # TODO: the pages beyond here would be great to test, but that would
+ # require causing a build to complete.
+ #d.addCallback(self._check, "builders/builder1/builds/1", "")
+ # it'd be nice to assert that the Build page has a "Stop Build" button
+ #d.addCallback(self._check, "builders/builder1/builds/1/steps", "")
+ #d.addCallback(self._check,
+ # "builders/builder1/builds/1/steps/compile", "")
+ #d.addCallback(self._check,
+ # "builders/builder1/builds/1/steps/compile/logs", "")
+ #d.addCallback(self._check,
+ # "builders/builder1/builds/1/steps/compile/logs/stdio","")
+ #d.addCallback(self._check,
+ # "builders/builder1/builds/1/steps/compile/logs/stdio/text", "")
+ return d
+
diff --git a/buildbot/buildbot/util.py b/buildbot/buildbot/util.py
new file mode 100644
index 0000000..071cf5f
--- /dev/null
+++ b/buildbot/buildbot/util.py
@@ -0,0 +1,102 @@
+# -*- test-case-name: buildbot.test.test_util -*-
+
+from twisted.internet.defer import Deferred
+from twisted.spread import pb
+import time, re
+
+def naturalSort(l):
+ """Returns a sorted copy of l, so that numbers in strings are sorted in the
+ proper order.
+
+ e.g. ['foo10', 'foo1', 'foo2'] will be sorted as ['foo1', 'foo2', 'foo10']
+ instead of the default ['foo1', 'foo10', 'foo2']"""
+ l = l[:]
+ def try_int(s):
+ try:
+ return int(s)
+ except:
+ return s
+ def key_func(item):
+ return [try_int(s) for s in re.split('(\d+)', item)]
+ l.sort(key=key_func)
+ return l
+
+def now():
+ #return int(time.time())
+ return time.time()
+
+def earlier(old, new):
+ # minimum of two things, but "None" counts as +infinity
+ if old:
+ if new < old:
+ return new
+ return old
+ return new
+
+def later(old, new):
+ # maximum of two things, but "None" counts as -infinity
+ if old:
+ if new > old:
+ return new
+ return old
+ return new
+
+def formatInterval(eta):
+ eta_parts = []
+ if eta > 3600:
+ eta_parts.append("%d hrs" % (eta / 3600))
+ eta %= 3600
+ if eta > 60:
+ eta_parts.append("%d mins" % (eta / 60))
+ eta %= 60
+ eta_parts.append("%d secs" % eta)
+ return ", ".join(eta_parts)
+
+class CancelableDeferred(Deferred):
+ """I am a version of Deferred that can be canceled by calling my
+ .cancel() method. After being canceled, no callbacks or errbacks will be
+ executed.
+ """
+ def __init__(self):
+ Deferred.__init__(self)
+ self.canceled = 0
+ def cancel(self):
+ self.canceled = 1
+ def _runCallbacks(self):
+ if self.canceled:
+ self.callbacks = []
+ return
+ Deferred._runCallbacks(self)
+
+def ignoreStaleRefs(failure):
+ """d.addErrback(util.ignoreStaleRefs)"""
+ r = failure.trap(pb.DeadReferenceError, pb.PBConnectionLost)
+ return None
+
+class _None:
+ pass
+
+class ComparableMixin:
+ """Specify a list of attributes that are 'important'. These will be used
+ for all comparison operations."""
+
+ compare_attrs = []
+
+ def __hash__(self):
+ alist = [self.__class__] + \
+ [getattr(self, name, _None) for name in self.compare_attrs]
+ return hash(tuple(alist))
+
+ def __cmp__(self, them):
+ result = cmp(type(self), type(them))
+ if result:
+ return result
+
+ result = cmp(self.__class__, them.__class__)
+ if result:
+ return result
+
+ assert self.compare_attrs == them.compare_attrs
+ self_list= [getattr(self, name, _None) for name in self.compare_attrs]
+ them_list= [getattr(them, name, _None) for name in self.compare_attrs]
+ return cmp(self_list, them_list)
diff --git a/buildbot/contrib/CSS/sample1.css b/buildbot/contrib/CSS/sample1.css
new file mode 100644
index 0000000..08d7942
--- /dev/null
+++ b/buildbot/contrib/CSS/sample1.css
@@ -0,0 +1,53 @@
+* {
+ font-family: Verdana, Cursor;
+ font-size: 10px;
+ font-weight: bold;
+}
+
+a:link,a:visited,a:active {
+ color: #666666;
+}
+a:hover {
+ color: #FFFFFF;
+}
+
+.table {
+ border-spacing: 2px;
+}
+
+td.Event, td.Activity, td.Change, td.Time, td.Builder {
+ color: #333333;
+ border: 1px solid #666666;
+ background-color: #CCCCCC;
+}
+
+/* LastBuild, BuildStep states */
+.success {
+ color: #FFFFFF;
+ border: 1px solid #2f8f0f;
+ background-color: #8fdf5f;
+}
+
+.failure {
+ color: #FFFFFF;
+ border: 1px solid #f33636;
+ background-color: #e98080;
+}
+
+.warnings {
+ color: #FFFFFF;
+ border: 1px solid #fc901f;
+ background-color: #ffc343;
+}
+
+.exception, td.offline {
+ color: #FFFFFF;
+ border: 1px solid #8000c0;
+ background-color: #e0b0ff;
+}
+
+.start,.running, td.building {
+ color: #666666;
+ border: 1px solid #ffff00;
+ background-color: #fffc6c;
+}
diff --git a/buildbot/contrib/CSS/sample2.css b/buildbot/contrib/CSS/sample2.css
new file mode 100644
index 0000000..9164ee4
--- /dev/null
+++ b/buildbot/contrib/CSS/sample2.css
@@ -0,0 +1,53 @@
+* {
+ font-family: Verdana, Cursor;
+ font-size: 12px;
+ font-weight: bold;
+}
+
+a:link,a:visited,a:active {
+ color: #666666;
+}
+a:hover {
+ color: #FFFFFF;
+}
+
+.table {
+ border-spacing: 2px;
+}
+
+td.Event, td.Activity, td.Change, td.Time, td.Builder {
+ color: #333333;
+ border: 1px solid #666666;
+ background-color: #CCCCCC;
+}
+
+/* LastBuild, BuildStep states */
+.success {
+ color: #FFFFFF;
+ border: 1px solid #2f8f0f;
+ background-color: #72ff75;
+}
+
+.failure {
+ color: #FFFFFF;
+ border: 1px solid #f33636;
+ background-color: red;
+}
+
+.warnings {
+ color: #FFFFFF;
+ border: 1px solid #fc901f;
+ background-color: #ffc343;
+}
+
+.exception, td.offline {
+ color: #FFFFFF;
+ border: 1px solid #8000c0;
+ background-color: red;
+}
+
+.start,.running, td.building {
+ color: #666666;
+ border: 1px solid #ffff00;
+ background-color: yellow;
+}
diff --git a/buildbot/contrib/OS-X/README b/buildbot/contrib/OS-X/README
new file mode 100644
index 0000000..6cc1d64
--- /dev/null
+++ b/buildbot/contrib/OS-X/README
@@ -0,0 +1,23 @@
+Mark Pauley contributed the two launchd plist files for OS-X (10.4+) to start
+a buildmaster or buildslave automatically at startup:
+
+ contrib/OS-X/net.sourceforge.buildbot.master.plist
+ contrib/OS-X/net.sourceforge.buildbot.slave.plist
+
+His email message is as follows:
+
+ Message-Id: <C0E57556-0432-4EB6-9A6C-22CDC72208E9@apple.com>
+ From: Mark Pauley <mpauley@apple.com>
+ To: buildbot-devel <buildbot-devel@lists.sourceforge.net>
+ Date: Wed, 24 Jan 2007 11:05:44 -0800
+ Subject: [Buildbot-devel] Sample buildbot launchd plists for MacOS 10.4+
+
+ Hi guys,
+ I've had these kicking around for a while and thought that maybe
+ someone would like to see them. Installing either of these two to /
+ Library/LaunchDaemons will cause the bulidbot slave or master to auto-
+ start as whatever user you like on launch. This is the "right way to
+ do this" going forward, startupitems are deprecated. Please note that
+ this means any tests that require a windowserver connection on os x
+ won't work.
+
diff --git a/buildbot/contrib/OS-X/net.sourceforge.buildbot.master.plist b/buildbot/contrib/OS-X/net.sourceforge.buildbot.master.plist
new file mode 100644
index 0000000..3ba2395
--- /dev/null
+++ b/buildbot/contrib/OS-X/net.sourceforge.buildbot.master.plist
@@ -0,0 +1,42 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd
+">
+<plist version="1.0">
+<dict>
+ <key>Label</key>
+ <string>net.sourceforge.buildbot.slave</string>
+
+ <!-- Change this to the user you want to run buildbot as -->
+ <key>UserName</key>
+ <string>buildbot</string>
+
+ <!-- Change this to your buildbot working directory -->
+ <key>WorkingDirectory</key>
+ <string>/Users/buildbot/Buildbot_Master</string>
+
+ <key>ProgramArguments</key>
+ <array>
+ <string>/usr/bin/twistd</string>
+ <string>--nodaemon</string>
+ <string>--python=buildbot.tac</string>
+ <string>--logfile=buildbot.log</string>
+ <string>--prefix=master</string>
+ </array>
+
+ <!-- Hack to keep buildbot running even if it crashes -->
+ <key>QueueDirectories</key>
+ <array> <string>/</string> </array>
+
+ <key>KeepAlive</key>
+ <dict>
+ <key>SuccessfulExit</key>
+ <false/>
+ </dict>
+
+ <key>RunAtLoad</key>
+ <true/>
+
+ <key>StandardErrorPath</key>
+ <string>/var/log/build_master.log</string>
+</dict>
+</plist>
diff --git a/buildbot/contrib/OS-X/net.sourceforge.buildbot.slave.plist b/buildbot/contrib/OS-X/net.sourceforge.buildbot.slave.plist
new file mode 100644
index 0000000..a5656d8
--- /dev/null
+++ b/buildbot/contrib/OS-X/net.sourceforge.buildbot.slave.plist
@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd
+">
+<plist version="1.0">
+<dict>
+ <key>Label</key>
+ <string>net.sourceforge.buildbot.slave</string>
+
+ <!-- Change this to the user you want to run buildbot as -->
+ <key>UserName</key>
+ <string>buildbot</string>
+
+ <!-- Change this to your buildbot working directory -->
+ <key>WorkingDirectory</key>
+ <string>/Users/buildbot/Buildbot_Slave</string>
+
+ <key>ProgramArguments</key>
+ <array>
+ <string>/usr/bin/twistd</string>
+ <string>--nodaemon</string>
+ <string>--python=buildbot.tac</string>
+ <string>--logfile=buildbot.log</string>
+ <string>--prefix=slave</string>
+ </array>
+
+ <key>KeepAlive</key>
+ <dict>
+ <key>SuccessfulExit</key>
+ <false/>
+ </dict>
+
+ <key>RunAtLoad</key>
+ <true/>
+
+</dict>
+</plist>
diff --git a/buildbot/contrib/README.txt b/buildbot/contrib/README.txt
new file mode 100644
index 0000000..bed1a93
--- /dev/null
+++ b/buildbot/contrib/README.txt
@@ -0,0 +1,44 @@
+Utility scripts, things contributed by users but not strictly a part of
+buildbot:
+
+debugclient.py (and debug.*): debugging gui for buildbot
+
+fakechange.py: connect to a running bb and submit a fake change to trigger
+ builders
+
+generate_changelog.py: generated changelog entry using git. Requires git to
+ be installed.
+
+run_maxq.py: a builder-helper for running maxq under buildbot
+
+svn_buildbot.py: a script intended to be run from a subversion hook-script
+ which submits changes to svn (requires python 2.3)
+
+svnpoller.py: this script is intended to be run from a cronjob, and uses 'svn
+ log' to poll a (possibly remote) SVN repository for changes.
+ For each change it finds, it runs 'buildbot sendchange' to
+ deliver them to a waiting PBChangeSource on a (possibly remote)
+ buildmaster. Modify the svnurl to point at your own SVN
+ repository, and of course the user running the script must have
+ read permissions to that repository. It keeps track of the last
+ revision in a file, change 'fname' to set the location of this
+ state file. Modify the --master argument to the 'buildbot
+ sendchange' command to point at your buildmaster. Contributed
+ by John Pye. Note that if there are multiple changes within a
+ single polling interval, this will miss all but the last one.
+
+svn_watcher.py: adapted from svnpoller.py by Niklaus Giger to add options and
+ run under windows. Runs as a standalone script (it loops
+ internally rather than expecting to run from a cronjob),
+ polls an SVN repository every 10 minutes. It expects the
+ svnurl and buildmaster location as command-line arguments.
+
+viewcvspoll.py: a standalone script which loops every 60 seconds and polls a
+ (local?) MySQL database (presumably maintained by ViewCVS?)
+ for information about new CVS changes, then delivers them
+ over PB to a remote buildmaster's PBChangeSource. Contributed
+ by Stephen Kennedy.
+
+CSS/*.css: alternative HTML stylesheets to make the Waterfall display look
+ prettier. Copy them somewhere, then pass the filename to the
+ css= argument of the Waterfall() constructor.
diff --git a/buildbot/contrib/arch_buildbot.py b/buildbot/contrib/arch_buildbot.py
new file mode 100755
index 0000000..99b065c
--- /dev/null
+++ b/buildbot/contrib/arch_buildbot.py
@@ -0,0 +1,76 @@
+#! /usr/bin/python
+
+# this script is meant to run as an Arch post-commit hook (and also as a
+# pre-commit hook), using the "arch-meta-hook" framework. See
+# http://wiki.gnuarch.org/NdimMetaHook for details. The pre-commit hook
+# creates a list of files (and log comments), while the post-commit hook
+# actually notifies the buildmaster.
+
+# this script doesn't handle partial commits quite right: it will tell the
+# buildmaster that everything changed, not just the filenames you give to
+# 'tla commit'.
+
+import os
+import commands
+import cStringIO
+
+from buildbot.scripts import runner
+
+# Just modify the appropriate values below and then put this file in two
+# places: ~/.arch-params/hooks/ARCHIVE/=precommit/90buildbot.py and
+# ~/.arch-params/hooks/ARCHIVE/=commit/10buildbot.py
+
+master = "localhost:9989"
+username = "myloginname"
+
+# Remember that for this to work, your buildmaster's master.cfg needs to have
+# a c['sources'] list which includes a pb.PBChangeSource instance.
+
+os.chdir(os.getenv("ARCH_TREE_ROOT"))
+filelist = ",,bb-files"
+commentfile = ",,bb-comments"
+
+if os.getenv("ARCH_HOOK_ACTION") == "precommit":
+ files = []
+ out = commands.getoutput("tla changes")
+ for line in cStringIO.StringIO(out).readlines():
+ if line[0] in "AMD": # add, modify, delete
+ files.append(line[3:])
+ if files:
+ f = open(filelist, "w")
+ f.write("".join(files))
+ f.close()
+ # comments
+ logfiles = [f for f in os.listdir(".") if f.startswith("++log.")]
+ if len(logfiles) > 1:
+ print ("Warning, multiple ++log.* files found, getting comments "
+ "from the first one")
+ if logfiles:
+ open(commentfile, "w").write(open(logfiles[0], "r").read())
+
+elif os.getenv("ARCH_HOOK_ACTION") == "commit":
+ revision = os.getenv("ARCH_REVISION")
+
+ files = []
+ if os.path.exists(filelist):
+ f = open(filelist, "r")
+ for line in f.readlines():
+ files.append(line.rstrip())
+ if not files:
+ # buildbot insists upon having at least one modified file (otherwise
+ # the prefix-stripping mechanism will ignore the change)
+ files = ["dummy"]
+
+ if os.path.exists(commentfile):
+ comments = open(commentfile, "r").read()
+ else:
+ comments = "commit from arch"
+
+ c = {'master': master, 'username': username,
+ 'revision': revision, 'comments': comments, 'files': files}
+ runner.sendchange(c, True)
+
+ if os.path.exists(filelist):
+ os.unlink(filelist)
+ if os.path.exists(commentfile):
+ os.unlink(commentfile)
diff --git a/buildbot/contrib/bb_applet.py b/buildbot/contrib/bb_applet.py
new file mode 100755
index 0000000..8430a2f
--- /dev/null
+++ b/buildbot/contrib/bb_applet.py
@@ -0,0 +1,413 @@
+#! /usr/bin/python
+
+# This is a Gnome-2 panel applet that uses the
+# buildbot.status.client.PBListener interface to display a terse summary of
+# the buildmaster. It displays one column per builder, with a box on top for
+# the status of the most recent build (red, green, or orange), and a somewhat
+# smaller box on the bottom for the current state of the builder (white for
+# idle, yellow for building, red for offline). There are tooltips available
+# to tell you which box is which.
+
+# Edit the line at the beginning of the MyApplet class to fill in the host
+# and portnumber of your buildmaster's PBListener status port. Eventually
+# this will move into a preferences dialog, but first we must create a
+# preferences dialog.
+
+# See the notes at the end for installation hints and support files (you
+# cannot simply run this script from the shell). You must create a bonobo
+# .server file that points to this script, and put the .server file somewhere
+# that bonobo will look for it. Only then will this applet appear in the
+# panel's "Add Applet" menu.
+
+# Note: These applets are run in an environment that throws away stdout and
+# stderr. Any logging must be done with syslog or explicitly to a file.
+# Exceptions are particularly annoying in such an environment.
+
+# -Brian Warner, warner@lothar.com
+
+if 0:
+ import sys
+ dpipe = open("/tmp/applet.log", "a", 1)
+ sys.stdout = dpipe
+ sys.stderr = dpipe
+ print "starting"
+
+from twisted.internet import gtk2reactor
+gtk2reactor.install()
+
+import gtk
+import gnomeapplet
+
+# preferences are not yet implemented
+MENU = """
+<popup name="button3">
+ <menuitem name="Connect" verb="Connect" label="Connect"
+ pixtype="stock" pixname="gtk-refresh"/>
+ <menuitem name="Disconnect" verb="Disconnect" label="Disconnect"
+ pixtype="stock" pixname="gtk-stop"/>
+ <menuitem name="Prefs" verb="Props" label="_Preferences..."
+ pixtype="stock" pixname="gtk-properties"/>
+</popup>
+"""
+
+from twisted.spread import pb
+from twisted.cred import credentials
+
+# sigh, these constants should cross the wire as strings, not integers
+SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION = range(5)
+Results = ["success", "warnings", "failure", "skipped", "exception"]
+
+
+class Box:
+
+ def __init__(self, buildername, hbox, tips, size, hslice):
+ self.buildername = buildername
+ self.hbox = hbox
+ self.tips = tips
+ self.state = "idle"
+ self.eta = None
+ self.last_results = None
+ self.last_text = None
+ self.size = size
+ self.hslice = hslice
+
+ def create(self):
+ self.vbox = gtk.VBox(False)
+ l = gtk.Label(".")
+ self.current_box = box = gtk.EventBox()
+ # these size requests are somewhat non-deterministic. I think it
+ # depends upon how large label is, or how much space was already
+ # consumed when the box is added.
+ self.current_box.set_size_request(self.hslice, self.size * 0.75)
+ box.add(l)
+ self.vbox.pack_end(box)
+ self.current_box.modify_bg(gtk.STATE_NORMAL,
+ gtk.gdk.color_parse("gray50"))
+
+ l2 = gtk.Label(".")
+ self.last_box = gtk.EventBox()
+ self.current_box.set_size_request(self.hslice, self.size * 0.25)
+ self.last_box.add(l2)
+ self.vbox.pack_end(self.last_box, True, True)
+ self.vbox.show_all()
+ self.hbox.pack_start(self.vbox, True, True)
+
+ def remove(self):
+ self.hbox.remove(self.box)
+
+ def set_state(self, state):
+ self.state = state
+ self.update()
+
+ def set_eta(self, eta):
+ self.eta = eta
+ self.update()
+
+ def set_last_build_results(self, results):
+ self.last_results = results
+ self.update()
+
+ def set_last_build_text(self, text):
+ self.last_text = text
+ self.update()
+
+ def update(self):
+ currentmap = {"offline": "red",
+ "idle": "white",
+ "waiting": "yellow",
+ "interlocked": "yellow",
+ "building": "yellow",
+ }
+ color = currentmap[self.state]
+ self.current_box.modify_bg(gtk.STATE_NORMAL,
+ gtk.gdk.color_parse(color))
+ lastmap = {None: "gray50",
+ SUCCESS: "green",
+ WARNINGS: "orange",
+ FAILURE: "red",
+ EXCEPTION: "purple",
+ }
+ last_color = lastmap[self.last_results]
+ self.last_box.modify_bg(gtk.STATE_NORMAL,
+ gtk.gdk.color_parse(last_color))
+ current_tip = "%s:\n%s" % (self.buildername, self.state)
+ if self.eta is not None:
+ current_tip += " (ETA=%ds)" % self.eta
+ self.tips.set_tip(self.current_box, current_tip)
+ last_tip = "%s:\n" % self.buildername
+ if self.last_text:
+ last_tip += "\n".join(self.last_text)
+ else:
+ last_tip += "no builds"
+ self.tips.set_tip(self.last_box, last_tip)
+
+
+class MyApplet(pb.Referenceable):
+ # CHANGE THIS TO POINT TO YOUR BUILDMASTER
+ buildmaster = "buildmaster.example.org", 12345
+ filled = None
+
+ def __init__(self, container):
+ self.applet = container
+ self.size = container.get_size()
+ self.hslice = self.size / 4
+ container.set_size_request(self.size, self.size)
+ self.fill_nut()
+ verbs = [("Props", self.menu_preferences),
+ ("Connect", self.menu_connect),
+ ("Disconnect", self.menu_disconnect),
+ ]
+ container.setup_menu(MENU, verbs)
+ self.boxes = {}
+ self.connect()
+
+ def fill(self, what):
+ if self.filled:
+ self.applet.remove(self.filled)
+ self.filled = None
+ self.applet.add(what)
+ self.filled = what
+ self.applet.show_all()
+
+ def fill_nut(self):
+ i = gtk.Image()
+ i.set_from_file("/tmp/nut32.png")
+ self.fill(i)
+
+ def fill_hbox(self):
+ self.hbox = gtk.HBox(True)
+ self.fill(self.hbox)
+
+ def connect(self):
+ host, port = self.buildmaster
+ cf = pb.PBClientFactory()
+ creds = credentials.UsernamePassword("statusClient", "clientpw")
+ d = cf.login(creds)
+ reactor.connectTCP(host, port, cf)
+ d.addCallback(self.connected)
+ return d
+
+ def connected(self, ref):
+ print "connected"
+ ref.notifyOnDisconnect(self.disconnected)
+ self.remote = ref
+ self.remote.callRemote("subscribe", "steps", 5, self)
+ self.fill_hbox()
+ self.tips = gtk.Tooltips()
+ self.tips.enable()
+
+ def disconnect(self):
+ self.remote.broker.transport.loseConnection()
+
+ def disconnected(self, *args):
+ print "disconnected"
+ self.fill_nut()
+
+ def remote_builderAdded(self, buildername, builder):
+ print "builderAdded", buildername
+ box = Box(buildername, self.hbox, self.tips, self.size, self.hslice)
+ self.boxes[buildername] = box
+ box.create()
+ self.applet.set_size_request(self.hslice * len(self.boxes),
+ self.size)
+ d = builder.callRemote("getLastFinishedBuild")
+
+ def _got(build):
+ if build:
+ d1 = build.callRemote("getResults")
+ d1.addCallback(box.set_last_build_results)
+ d2 = build.callRemote("getText")
+ d2.addCallback(box.set_last_build_text)
+ d.addCallback(_got)
+
+ def remote_builderRemoved(self, buildername):
+ self.boxes[buildername].remove()
+ del self.boxes[buildername]
+ self.applet.set_size_request(self.hslice * len(self.boxes),
+ self.size)
+
+ def remote_builderChangedState(self, buildername, state, eta):
+ self.boxes[buildername].set_state(state)
+ self.boxes[buildername].set_eta(eta)
+ print "change", buildername, state, eta
+
+ def remote_buildStarted(self, buildername, build):
+ print "buildStarted", buildername
+
+ def remote_buildFinished(self, buildername, build, results):
+ print "buildFinished", results
+ box = self.boxes[buildername]
+ box.set_eta(None)
+ d1 = build.callRemote("getResults")
+ d1.addCallback(box.set_last_build_results)
+ d2 = build.callRemote("getText")
+ d2.addCallback(box.set_last_build_text)
+
+ def remote_buildETAUpdate(self, buildername, build, eta):
+ self.boxes[buildername].set_eta(eta)
+ print "ETA", buildername, eta
+
+ def remote_stepStarted(self, buildername, build, stepname, step):
+ print "stepStarted", buildername, stepname
+
+ def remote_stepFinished(self, buildername, build, stepname, step, results):
+ pass
+
+ def menu_preferences(self, event, data=None):
+ print "prefs!"
+ p = Prefs(self)
+ p.create()
+
+ def set_buildmaster(self, buildmaster):
+ host, port = buildmaster.split(":")
+ self.buildmaster = host, int(port)
+ self.disconnect()
+ reactor.callLater(0.5, self.connect)
+
+ def menu_connect(self, event, data=None):
+ self.connect()
+
+ def menu_disconnect(self, event, data=None):
+ self.disconnect()
+
+
+class Prefs:
+
+ def __init__(self, parent):
+ self.parent = parent
+
+ def create(self):
+ self.w = w = gtk.Window()
+ v = gtk.VBox()
+ h = gtk.HBox()
+ h.pack_start(gtk.Label("buildmaster (host:port) : "))
+ self.buildmaster_entry = b = gtk.Entry()
+ if self.parent.buildmaster:
+ host, port = self.parent.buildmaster
+ b.set_text("%s:%d" % (host, port))
+ h.pack_start(b)
+ v.add(h)
+
+ b = gtk.Button("Ok")
+ b.connect("clicked", self.done)
+ v.add(b)
+
+ w.add(v)
+ w.show_all()
+
+ def done(self, widget):
+ buildmaster = self.buildmaster_entry.get_text()
+ self.parent.set_buildmaster(buildmaster)
+ self.w.unmap()
+
+
+def factory(applet, iid):
+ MyApplet(applet)
+ applet.show_all()
+ return True
+
+
+from twisted.internet import reactor
+
+# instead of reactor.run(), we do the following:
+reactor.startRunning()
+reactor.simulate()
+gnomeapplet.bonobo_factory("OAFIID:GNOME_Buildbot_Factory",
+ gnomeapplet.Applet.__gtype__,
+ "buildbot", "0", factory)
+
+# code ends here: bonobo_factory runs gtk.mainloop() internally and
+# doesn't return until the program ends
+
+# SUPPORTING FILES:
+
+# save the following as ~/lib/bonobo/servers/bb_applet.server, and update all
+# the pathnames to match your system
+bb_applet_server = """
+<oaf_info>
+
+<oaf_server iid="OAFIID:GNOME_Buildbot_Factory"
+ type="exe"
+ location="/home/warner/stuff/buildbot-trunk/contrib/bb_applet.py">
+
+ <oaf_attribute name="repo_ids" type="stringv">
+ <item value="IDL:Bonobo/GenericFactory:1.0"/>
+ <item value="IDL:Bonobo/Unknown:1.0"/>
+ </oaf_attribute>
+ <oaf_attribute name="name" type="string" value="Buildbot Factory"/>
+ <oaf_attribute name="description" type="string" value="Test"/>
+</oaf_server>
+
+<oaf_server iid="OAFIID:GNOME_Buildbot"
+ type="factory"
+ location="OAFIID:GNOME_Buildbot_Factory">
+
+ <oaf_attribute name="repo_ids" type="stringv">
+ <item value="IDL:GNOME/Vertigo/PanelAppletShell:1.0"/>
+ <item value="IDL:Bonobo/Control:1.0"/>
+ <item value="IDL:Bonobo/Unknown:1.0"/>
+ </oaf_attribute>
+ <oaf_attribute name="name" type="string" value="Buildbot"/>
+ <oaf_attribute name="description" type="string"
+ value="Watch Buildbot status"
+ />
+ <oaf_attribute name="panel:category" type="string" value="Utility"/>
+ <oaf_attribute name="panel:icon" type="string"
+ value="/home/warner/stuff/buildbot-trunk/doc/hexnut32.png"
+ />
+
+</oaf_server>
+
+</oaf_info>
+"""
+
+# a quick rundown on the Gnome2 applet scheme (probably wrong: there are
+# better docs out there that you should be following instead)
+# http://www.pycage.de/howto_bonobo.html describes a lot of
+# the base Bonobo stuff.
+# http://www.daa.com.au/pipermail/pygtk/2002-September/003393.html
+
+# bb_applet.server must be in your $BONOBO_ACTIVATION_PATH . I use
+# ~/lib/bonobo/servers . This environment variable is read by
+# bonobo-activation-server, so it must be set before you start any Gnome
+# stuff. I set it in ~/.bash_profile . You can also put it in
+# /usr/lib/bonobo/servers/ , which is probably on the default
+# $BONOBO_ACTIVATION_PATH, so you won't have to update anything.
+
+# It is safest to put this in place before bonobo-activation-server is
+# started, which may mean before any Gnome program is running. It may or may
+# not detect bb_applet.server if it is installed afterwards.. there seem to
+# be hooks, some of which involve FAM, but I never managed to make them work.
+# The file must have a name that ends in .server or it will be ignored.
+
+# The .server file registers two OAF ids and tells the activation-server how
+# to create those objects. The first is the GNOME_Buildbot_Factory, and is
+# created by running the bb_applet.py script. The second is the
+# GNOME_Buildbot applet itself, and is created by asking the
+# GNOME_Buildbot_Factory to make it.
+
+# gnome-panel's "Add To Panel" menu will gather all the OAF ids that claim
+# to implement the "IDL:GNOME/Vertigo/PanelAppletShell:1.0" in its
+# "repo_ids" attribute. The sub-menu is determined by the "panel:category"
+# attribute. The icon comes from "panel:icon", the text displayed in the
+# menu comes from "name", the text in the tool-tip comes from "description".
+
+# The factory() function is called when a new applet is created. It receives
+# a container that should be populated with the actual applet contents (in
+# this case a Button).
+
+# If you're hacking on the code, just modify bb_applet.py and then kill -9
+# the running applet: the panel will ask you if you'd like to re-load the
+# applet, and when you say 'yes', bb_applet.py will be re-executed. Note that
+# 'kill PID' won't work because the program is sitting in C code, and SIGINT
+# isn't delivered until after it surfaces to python, which will be never.
+
+# Running bb_applet.py by itself will result in a factory instance being
+# created and then sitting around forever waiting for the activation-server
+# to ask it to make an applet. This isn't very useful.
+
+# The "location" filename in bb_applet.server must point to bb_applet.py, and
+# bb_applet.py must be executable.
+
+# Enjoy!
+# -Brian Warner
diff --git a/buildbot/contrib/bzr_buildbot.py b/buildbot/contrib/bzr_buildbot.py
new file mode 100755
index 0000000..cc32350
--- /dev/null
+++ b/buildbot/contrib/bzr_buildbot.py
@@ -0,0 +1,467 @@
+# Copyright (C) 2008-2009 Canonical
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""\
+bzr buildbot integration
+========================
+
+This file contains both bzr commit/change hooks and a bzr poller.
+
+------------
+Requirements
+------------
+
+This has been tested with buildbot 0.7.9, bzr 1.10, and Twisted 8.1.0. It
+should work in subsequent releases.
+
+For the hook to work, Twisted must be installed in the same Python that bzr
+uses.
+
+-----
+Hooks
+-----
+
+To install, put this file in a bzr plugins directory (e.g.,
+~/.bazaar/plugins). Then, in one of your bazaar conf files (e.g.,
+~/.bazaar/locations.conf), set the location you want to connect with buildbot
+with these keys:
+
+- buildbot_on: one of 'commit', 'push, or 'change'. Turns the plugin on to
+ report changes via commit, changes via push, or any changes to the trunk.
+ 'change' is recommended.
+
+- buildbot_server: (required to send to a buildbot master) the URL of the
+ buildbot master to which you will connect (as of this writing, the same
+ server and port to which slaves connect).
+
+- buildbot_port: (optional, defaults to 9989) the port of the buildbot master
+ to which you will connect (as of this writing, the same server and port to
+ which slaves connect)
+
+- buildbot_pqm: (optional, defaults to not pqm) Normally, the user that
+ commits the revision is the user that is responsible for the change. When
+ run in a pqm (Patch Queue Manager, see https://launchpad.net/pqm)
+ environment, the user that commits is the Patch Queue Manager, and the user
+ that committed the *parent* revision is responsible for the change. To turn
+ on the pqm mode, set this value to any of (case-insensitive) "Yes", "Y",
+ "True", or "T".
+
+- buildbot_dry_run: (optional, defaults to not a dry run) Normally, the
+ post-commit hook will attempt to communicate with the configured buildbot
+ server and port. If this parameter is included and any of (case-insensitive)
+ "Yes", "Y", "True", or "T", then the hook will simply print what it would
+ have sent, but not attempt to contact the buildbot master.
+
+- buildbot_send_branch_name: (optional, defaults to not sending the branch
+ name) If your buildbot's bzr source build step uses a repourl, do
+ *not* turn this on. If your buildbot's bzr build step uses a baseURL, then
+ you may set this value to any of (case-insensitive) "Yes", "Y", "True", or
+ "T" to have the buildbot master append the branch name to the baseURL.
+
+When buildbot no longer has a hardcoded password, it will be a configuration
+option here as well.
+
+------
+Poller
+------
+
+Put this file somewhere that your buildbot configuration can import it. Even
+in the same directory as the master.cfg should work. Install the poller in
+the buildbot configuration as with any other change source. Minimally,
+provide a URL that you want to poll (bzr://, bzr+ssh://, or lp:), though make
+sure the buildbot user has necessary privileges. You may also want to specify
+these optional values.
+
+poll_interval: the number of seconds to wait between polls. Defaults to 10
+ minutes.
+
+branch_name: any value to be used as the branch name. Defaults to None, or
+ specify a string, or specify the constants from this file SHORT
+ or FULL to get the short branch name or full branch address.
+
+blame_merge_author: normally, the user that commits the revision is the user
+ that is responsible for the change. When run in a pqm
+ (Patch Queue Manager, see https://launchpad.net/pqm)
+ environment, the user that commits is the Patch Queue
+ Manager, and the user that committed the merged, *parent*
+ revision is responsible for the change. set this value to
+ True if this is pointed against a PQM-managed branch.
+
+-------------------
+Contact Information
+-------------------
+
+Maintainer/author: gary.poster@canonical.com
+"""
+
+try:
+ import buildbot.util
+ import buildbot.changes.base
+ import buildbot.changes.changes
+except ImportError:
+ DEFINE_POLLER = False
+else:
+ DEFINE_POLLER = True
+import bzrlib.branch
+import bzrlib.errors
+import bzrlib.trace
+import twisted.cred.credentials
+import twisted.internet.base
+import twisted.internet.defer
+import twisted.internet.reactor
+import twisted.internet.selectreactor
+import twisted.internet.task
+import twisted.internet.threads
+import twisted.python.log
+import twisted.spread.pb
+
+
+#############################################################################
+# This is the code that the poller and the hooks share.
+
+def generate_change(branch,
+ old_revno=None, old_revid=None,
+ new_revno=None, new_revid=None,
+ blame_merge_author=False):
+ """Return a dict of information about a change to the branch.
+
+ Dict has keys of "files", "who", "comments", and "revision", as used by
+ the buildbot Change (and the PBChangeSource).
+
+ If only the branch is given, the most recent change is returned.
+
+ If only the new_revno is given, the comparison is expected to be between
+ it and the previous revno (new_revno -1) in the branch.
+
+ Passing old_revid and new_revid is only an optimization, included because
+ bzr hooks usually provide this information.
+
+ blame_merge_author means that the author of the merged branch is
+ identified as the "who", not the person who committed the branch itself.
+ This is typically used for PQM.
+ """
+ change = {} # files, who, comments, revision; NOT branch (= branch.nick)
+ if new_revno is None:
+ new_revno = branch.revno()
+ if new_revid is None:
+ new_revid = branch.get_rev_id(new_revno)
+ # TODO: This falls over if this is the very first revision
+ if old_revno is None:
+ old_revno = new_revno -1
+ if old_revid is None:
+ old_revid = branch.get_rev_id(old_revno)
+ repository = branch.repository
+ new_rev = repository.get_revision(new_revid)
+ if blame_merge_author:
+ # this is a pqm commit or something like it
+ change['who'] = repository.get_revision(
+ new_rev.parent_ids[-1]).get_apparent_author()
+ else:
+ change['who'] = new_rev.get_apparent_author()
+ # maybe useful to know:
+ # name, email = bzrtools.config.parse_username(change['who'])
+ change['comments'] = new_rev.message
+ change['revision'] = new_revno
+ files = change['files'] = []
+ changes = repository.revision_tree(new_revid).changes_from(
+ repository.revision_tree(old_revid))
+ for (collection, name) in ((changes.added, 'ADDED'),
+ (changes.removed, 'REMOVED'),
+ (changes.modified, 'MODIFIED')):
+ for info in collection:
+ path = info[0]
+ kind = info[2]
+ files.append(' '.join([path, kind, name]))
+ for info in changes.renamed:
+ oldpath, newpath, id, kind, text_modified, meta_modified = info
+ elements = [oldpath, kind,'RENAMED', newpath]
+ if text_modified or meta_modified:
+ elements.append('MODIFIED')
+ files.append(' '.join(elements))
+ return change
+
+#############################################################################
+# poller
+
+# We don't want to make the hooks unnecessarily depend on buildbot being
+# installed locally, so we conditionally create the BzrPoller class.
+if DEFINE_POLLER:
+
+ FULL = object()
+ SHORT = object()
+
+
+ class BzrPoller(buildbot.changes.base.ChangeSource,
+ buildbot.util.ComparableMixin):
+
+ compare_attrs = ['url']
+
+ def __init__(self, url, poll_interval=10*60, blame_merge_author=False,
+ branch_name=None):
+ # poll_interval is in seconds, so default poll_interval is 10
+ # minutes.
+ # bzr+ssh://bazaar.launchpad.net/~launchpad-pqm/launchpad/devel/
+ # works, lp:~launchpad-pqm/launchpad/devel/ doesn't without help.
+ if url.startswith('lp:'):
+ url = 'bzr+ssh://bazaar.launchpad.net/' + url[3:]
+ self.url = url
+ self.poll_interval = poll_interval
+ self.loop = twisted.internet.task.LoopingCall(self.poll)
+ self.blame_merge_author = blame_merge_author
+ self.branch_name = branch_name
+
+ def startService(self):
+ twisted.python.log.msg("BzrPoller(%s) starting" % self.url)
+ buildbot.changes.base.ChangeSource.startService(self)
+ twisted.internet.reactor.callWhenRunning(
+ self.loop.start, self.poll_interval)
+ for change in reversed(self.parent.changes):
+ if change.branch == self.url:
+ self.last_revision = change.revision
+ break
+ else:
+ self.last_revision = None
+ self.polling = False
+
+ def stopService(self):
+ twisted.python.log.msg("BzrPoller(%s) shutting down" % self.url)
+ self.loop.stop()
+ return buildbot.changes.base.ChangeSource.stopService(self)
+
+ def describe(self):
+ return "BzrPoller watching %s" % self.url
+
+ @twisted.internet.defer.inlineCallbacks
+ def poll(self):
+ if self.polling: # this is called in a loop, and the loop might
+ # conceivably overlap.
+ return
+ self.polling = True
+ try:
+ # On a big tree, even individual elements of the bzr commands
+ # can take awhile. So we just push the bzr work off to a
+ # thread.
+ try:
+ changes = yield twisted.internet.threads.deferToThread(
+ self.getRawChanges)
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except:
+ # we'll try again next poll. Meanwhile, let's report.
+ twisted.python.log.err()
+ else:
+ for change in changes:
+ yield self.addChange(
+ buildbot.changes.changes.Change(**change))
+ self.last_revision = change['revision']
+ finally:
+ self.polling = False
+
+ def getRawChanges(self):
+ branch = bzrlib.branch.Branch.open_containing(self.url)[0]
+ if self.branch_name is FULL:
+ branch_name = self.url
+ elif self.branch_name is SHORT:
+ branch_name = branch.nick
+ else: # presumably a string or maybe None
+ branch_name = self.branch_name
+ changes = []
+ change = generate_change(
+ branch, blame_merge_author=self.blame_merge_author)
+ if (self.last_revision is None or
+ change['revision'] > self.last_revision):
+ change['branch'] = branch_name
+ changes.append(change)
+ if self.last_revision is not None:
+ while self.last_revision + 1 < change['revision']:
+ change = generate_change(
+ branch, new_revno=change['revision']-1,
+ blame_merge_author=self.blame_merge_author)
+ change['branch'] = branch_name
+ changes.append(change)
+ changes.reverse()
+ return changes
+
+ def addChange(self, change):
+ d = twisted.internet.defer.Deferred()
+ def _add_change():
+ d.callback(
+ self.parent.addChange(change))
+ twisted.internet.reactor.callLater(0, _add_change)
+ return d
+
+#############################################################################
+# hooks
+
+HOOK_KEY = 'buildbot_on'
+SERVER_KEY = 'buildbot_server'
+PORT_KEY = 'buildbot_port'
+DRYRUN_KEY = 'buildbot_dry_run'
+PQM_KEY = 'buildbot_pqm'
+SEND_BRANCHNAME_KEY = 'buildbot_send_branch_name'
+
+PUSH_VALUE = 'push'
+COMMIT_VALUE = 'commit'
+CHANGE_VALUE = 'change'
+
+def _is_true(config, key):
+ val = config.get_user_option(key)
+ return val is not None and val.lower().strip() in (
+ 'y', 'yes', 't', 'true')
+
+def _installed_hook(branch):
+ value = branch.get_config().get_user_option(HOOK_KEY)
+ if value is not None:
+ value = value.strip().lower()
+ if value not in (PUSH_VALUE, COMMIT_VALUE, CHANGE_VALUE):
+ raise bzrlib.errors.BzrError(
+ '%s, if set, must be one of %s, %s, or %s' % (
+ HOOK_KEY, PUSH_VALUE, COMMIT_VALUE, CHANGE_VALUE))
+ return value
+
+##########################
+# Work around Twisted bug.
+# See http://twistedmatrix.com/trac/ticket/3591
+import operator
+import socket
+from twisted.internet import defer
+from twisted.python import failure
+
+# replaces twisted.internet.thread equivalent
+def _putResultInDeferred(reactor, deferred, f, args, kwargs):
+ """
+ Run a function and give results to a Deferred.
+ """
+ try:
+ result = f(*args, **kwargs)
+ except:
+ f = failure.Failure()
+ reactor.callFromThread(deferred.errback, f)
+ else:
+ reactor.callFromThread(deferred.callback, result)
+
+# would be a proposed addition. deferToThread could use it
+def deferToThreadInReactor(reactor, f, *args, **kwargs):
+ """
+ Run function in thread and return result as Deferred.
+ """
+ d = defer.Deferred()
+ reactor.callInThread(_putResultInDeferred, reactor, d, f, args, kwargs)
+ return d
+
+# uses its own reactor for the threaded calls, unlike Twisted's
+class ThreadedResolver(twisted.internet.base.ThreadedResolver):
+ def getHostByName(self, name, timeout = (1, 3, 11, 45)):
+ if timeout:
+ timeoutDelay = reduce(operator.add, timeout)
+ else:
+ timeoutDelay = 60
+ userDeferred = defer.Deferred()
+ lookupDeferred = deferToThreadInReactor(
+ self.reactor, socket.gethostbyname, name)
+ cancelCall = self.reactor.callLater(
+ timeoutDelay, self._cleanup, name, lookupDeferred)
+ self._runningQueries[lookupDeferred] = (userDeferred, cancelCall)
+ lookupDeferred.addBoth(self._checkTimeout, name, lookupDeferred)
+ return userDeferred
+##########################
+
+def send_change(branch, old_revno, old_revid, new_revno, new_revid, hook):
+ config = branch.get_config()
+ server = config.get_user_option(SERVER_KEY)
+ if not server:
+ bzrlib.trace.warning(
+ 'bzr_buildbot: ERROR. If %s is set, %s must be set',
+ HOOK_KEY, SERVER_KEY)
+ return
+ change = generate_change(
+ branch, old_revno, old_revid, new_revno, new_revid,
+ blame_merge_author=_is_true(config, PQM_KEY))
+ if _is_true(config, SEND_BRANCHNAME_KEY):
+ change['branch'] = branch.nick
+ # as of this writing (in Buildbot 0.7.9), 9989 is the default port when
+ # you make a buildbot master.
+ port = int(config.get_user_option(PORT_KEY) or 9989)
+ # if dry run, stop.
+ if _is_true(config, DRYRUN_KEY):
+ bzrlib.trace.note("bzr_buildbot DRY RUN "
+ "(*not* sending changes to %s:%d on %s)",
+ server, port, hook)
+ keys = change.keys()
+ keys.sort()
+ for k in keys:
+ bzrlib.trace.note("[%10s]: %s", k, change[k])
+ return
+ # We instantiate our own reactor so that this can run within a server.
+ reactor = twisted.internet.selectreactor.SelectReactor()
+ # See other reference to http://twistedmatrix.com/trac/ticket/3591
+ # above. This line can go away with a release of Twisted that addresses
+ # this issue.
+ reactor.resolver = ThreadedResolver(reactor)
+ pbcf = twisted.spread.pb.PBClientFactory()
+ reactor.connectTCP(server, port, pbcf)
+ deferred = pbcf.login(
+ twisted.cred.credentials.UsernamePassword('change', 'changepw'))
+
+ def sendChanges(remote):
+ """Send changes to buildbot."""
+ bzrlib.trace.mutter("bzrbuildout sending changes: %s", change)
+ return remote.callRemote('addChange', change)
+
+ deferred.addCallback(sendChanges)
+
+ def quit(ignore, msg):
+ bzrlib.trace.note("bzrbuildout: %s", msg)
+ reactor.stop()
+
+ def failed(failure):
+ bzrlib.trace.warning("bzrbuildout: FAILURE\n %s", failure)
+ reactor.stop()
+
+ deferred.addCallback(quit, "SUCCESS")
+ deferred.addErrback(failed)
+ reactor.callLater(60, quit, None, "TIMEOUT")
+ bzrlib.trace.note(
+ "bzr_buildbot: SENDING CHANGES to buildbot master %s:%d on %s",
+ server, port, hook)
+ reactor.run(installSignalHandlers=False) # run in a thread when in server
+
+def post_commit(local_branch, master_branch, # branch is the master_branch
+ old_revno, old_revid, new_revno, new_revid):
+ if _installed_hook(master_branch) == COMMIT_VALUE:
+ send_change(master_branch,
+ old_revid, old_revid, new_revno, new_revid, COMMIT_VALUE)
+
+def post_push(result):
+ if _installed_hook(result.target_branch) == PUSH_VALUE:
+ send_change(result.target_branch,
+ result.old_revid, result.old_revid,
+ result.new_revno, result.new_revid, PUSH_VALUE)
+
+def post_change_branch_tip(result):
+ if _installed_hook(result.branch) == CHANGE_VALUE:
+ send_change(result.branch,
+ result.old_revid, result.old_revid,
+ result.new_revno, result.new_revid, CHANGE_VALUE)
+
+bzrlib.branch.Branch.hooks.install_named_hook(
+ 'post_commit', post_commit,
+ 'send change to buildbot master')
+bzrlib.branch.Branch.hooks.install_named_hook(
+ 'post_push', post_push,
+ 'send change to buildbot master')
+bzrlib.branch.Branch.hooks.install_named_hook(
+ 'post_change_branch_tip', post_change_branch_tip,
+ 'send change to buildbot master')
diff --git a/buildbot/contrib/darcs_buildbot.py b/buildbot/contrib/darcs_buildbot.py
new file mode 100755
index 0000000..a8097d0
--- /dev/null
+++ b/buildbot/contrib/darcs_buildbot.py
@@ -0,0 +1,173 @@
+#! /usr/bin/python
+
+# This is a script which delivers Change events from Darcs to the buildmaster
+# each time a patch is pushed into a repository. Add it to the 'apply' hook
+# on your canonical "central" repository, by putting something like the
+# following in the _darcs/prefs/defaults file of that repository:
+#
+# apply posthook /PATH/TO/darcs_buildbot.py BUILDMASTER:PORT
+# apply run-posthook
+#
+# (the second command is necessary to avoid the usual "do you really want to
+# run this hook" prompt. Note that you cannot have multiple 'apply posthook'
+# lines: if you need this, you must create a shell script to run all your
+# desired commands, then point the posthook at that shell script.)
+#
+# Note that both Buildbot and Darcs must be installed on the repository
+# machine. You will also need the Python/XML distribution installed (the
+# "python2.3-xml" package under debian).
+
+import os
+import sys
+import commands
+import xml
+
+from buildbot.clients import sendchange
+from twisted.internet import defer, reactor
+from xml.dom import minidom
+
+
+def getText(node):
+ return "".join([cn.data
+ for cn in node.childNodes
+ if cn.nodeType == cn.TEXT_NODE])
+
+
+def getTextFromChild(parent, childtype):
+ children = parent.getElementsByTagName(childtype)
+ if not children:
+ return ""
+ return getText(children[0])
+
+
+def makeChange(p):
+ author = p.getAttribute("author")
+ revision = p.getAttribute("hash")
+ comments = (getTextFromChild(p, "name") + "\n" +
+ getTextFromChild(p, "comment"))
+
+ summary = p.getElementsByTagName("summary")[0]
+ files = []
+ for filenode in summary.childNodes:
+ if filenode.nodeName in ("add_file", "modify_file", "remove_file"):
+ filename = getText(filenode).strip()
+ files.append(filename)
+ elif filenode.nodeName == "move":
+ from_name = filenode.getAttribute("from")
+ to_name = filenode.getAttribute("to")
+ files.append(to_name)
+
+ # note that these are all unicode. Because PB can't handle unicode, we
+ # encode them into ascii, which will blow up early if there's anything we
+ # can't get to the far side. When we move to something that *can* handle
+ # unicode (like newpb), remove this.
+ author = author.encode("ascii", "replace")
+ comments = comments.encode("ascii", "replace")
+ files = [f.encode("ascii", "replace") for f in files]
+ revision = revision.encode("ascii", "replace")
+
+ change = {
+ # note: this is more likely to be a full email address, which would
+ # make the left-hand "Changes" column kind of wide. The buildmaster
+ # should probably be improved to display an abbreviation of the
+ # username.
+ 'username': author,
+ 'revision': revision,
+ 'comments': comments,
+ 'files': files,
+ }
+ return change
+
+
+def getChangesFromCommand(cmd, count):
+ out = commands.getoutput(cmd)
+ try:
+ doc = minidom.parseString(out)
+ except xml.parsers.expat.ExpatError, e:
+ print "failed to parse XML"
+ print str(e)
+ print "purported XML is:"
+ print "--BEGIN--"
+ print out
+ print "--END--"
+ sys.exit(1)
+
+ c = doc.getElementsByTagName("changelog")[0]
+ changes = []
+ for i, p in enumerate(c.getElementsByTagName("patch")):
+ if i >= count:
+ break
+ changes.append(makeChange(p))
+ return changes
+
+
+def getSomeChanges(count):
+ cmd = "darcs changes --last=%d --xml-output --summary" % count
+ return getChangesFromCommand(cmd, count)
+
+
+LASTCHANGEFILE = ".darcs_buildbot-lastchange"
+
+
+def findNewChanges():
+ if os.path.exists(LASTCHANGEFILE):
+ f = open(LASTCHANGEFILE, "r")
+ lastchange = f.read()
+ f.close()
+ else:
+ return getSomeChanges(1)
+ lookback = 10
+ while True:
+ changes = getSomeChanges(lookback)
+ # getSomeChanges returns newest-first, so changes[0] is the newest.
+ # we want to scan the newest first until we find the changes we sent
+ # last time, then deliver everything newer than that (and send them
+ # oldest-first).
+ for i, c in enumerate(changes):
+ if c['revision'] == lastchange:
+ newchanges = changes[:i]
+ newchanges.reverse()
+ return newchanges
+ if 2*lookback > 100:
+ raise RuntimeError("unable to find our most recent change "
+ "(%s) in the last %d changes" % (lastchange,
+ lookback))
+ lookback = 2*lookback
+
+
+def sendChanges(master):
+ changes = findNewChanges()
+ s = sendchange.Sender(master, None)
+
+ d = defer.Deferred()
+ reactor.callLater(0, d.callback, None)
+
+ if not changes:
+ print "darcs_buildbot.py: weird, no changes to send"
+ elif len(changes) == 1:
+ print "sending 1 change to buildmaster:"
+ else:
+ print "sending %d changes to buildmaster:" % len(changes)
+
+ def _send(res, c):
+ branch = None
+ print " %s" % c['revision']
+ return s.send(branch, c['revision'], c['comments'], c['files'],
+ c['username'])
+ for c in changes:
+ d.addCallback(_send, c)
+
+ d.addCallbacks(s.printSuccess, s.printFailure)
+ d.addBoth(s.stop)
+ s.run()
+
+ if changes:
+ lastchange = changes[-1]['revision']
+ f = open(LASTCHANGEFILE, "w")
+ f.write(lastchange)
+ f.close()
+
+
+if __name__ == '__main__':
+ MASTER = sys.argv[1]
+ sendChanges(MASTER)
diff --git a/buildbot/contrib/fakechange.py b/buildbot/contrib/fakechange.py
new file mode 100755
index 0000000..52cc766
--- /dev/null
+++ b/buildbot/contrib/fakechange.py
@@ -0,0 +1,82 @@
+#! /usr/bin/python
+
+"""
+This is an example of how to use the remote ChangeMaster interface, which is
+a port that allows a remote program to inject Changes into the buildmaster.
+
+The buildmaster can either pull changes in from external sources (see
+buildbot.changes.changes.ChangeMaster.addSource for an example), or those
+changes can be pushed in from outside. This script shows how to do the
+pushing.
+
+Changes are just dictionaries with three keys:
+
+ 'who': a simple string with a username. Responsibility for this change will
+ be assigned to the named user (if something goes wrong with the build, they
+ will be blamed for it).
+
+ 'files': a list of strings, each with a filename relative to the top of the
+ source tree.
+
+ 'comments': a (multiline) string with checkin comments.
+
+Each call to .addChange injects a single Change object: each Change
+represents multiple files, all changed by the same person, and all with the
+same checkin comments.
+
+The port that this script connects to is the same 'slavePort' that the
+buildslaves and other debug tools use. The ChangeMaster service will only be
+available on that port if 'change' is in the list of services passed to
+buildbot.master.makeApp (this service is turned ON by default).
+"""
+
+import sys
+import commands
+import random
+import os.path
+
+from twisted.spread import pb
+from twisted.cred import credentials
+from twisted.internet import reactor
+from twisted.python import log
+
+
+def done(*args):
+ reactor.stop()
+
+
+users = ('zaphod', 'arthur', 'trillian', 'marvin', 'sbfast')
+dirs = ('src', 'doc', 'tests')
+sources = ('foo.c', 'bar.c', 'baz.c', 'Makefile')
+docs = ('Makefile', 'index.html', 'manual.texinfo')
+
+
+def makeFilename():
+ d = random.choice(dirs)
+ if d in ('src', 'tests'):
+ f = random.choice(sources)
+ else:
+ f = random.choice(docs)
+ return os.path.join(d, f)
+
+
+def send_change(remote):
+ who = random.choice(users)
+ if len(sys.argv) > 1:
+ files = sys.argv[1:]
+ else:
+ files = [makeFilename()]
+ comments = commands.getoutput("fortune")
+ change = {'who': who, 'files': files, 'comments': comments}
+ d = remote.callRemote('addChange', change)
+ d.addCallback(done)
+ print "%s: %s" % (who, " ".join(files))
+
+
+f = pb.PBClientFactory()
+d = f.login(credentials.UsernamePassword("change", "changepw"))
+reactor.connectTCP("localhost", 8007, f)
+err = lambda f: (log.err(), reactor.stop())
+d.addCallback(send_change).addErrback(err)
+
+reactor.run()
diff --git a/buildbot/contrib/generate_changelog.py b/buildbot/contrib/generate_changelog.py
new file mode 100755
index 0000000..bff7389
--- /dev/null
+++ b/buildbot/contrib/generate_changelog.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+#
+# Copyright 2008
+# Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+"""
+Generates changelog information using git.
+"""
+
+__docformat__ = 'restructuredtext'
+
+
+import os
+import sys
+
+
+def print_err(msg):
+ """
+ Wrapper to make printing to stderr nicer.
+
+ :Parameters:
+ - `msg`: the message to print.
+ """
+ sys.stderr.write(msg)
+ sys.stderr.write('\n')
+
+
+def usage():
+ """
+ Prints out usage information to stderr.
+ """
+ print_err('Usage: %s git-binary since' % sys.argv[0])
+ print_err(('Example: %s /usr/bin/git f5067523dfae9c7cdefc82'
+ '8721ec593ac7be62db' % sys.argv[0]))
+
+
+def main(args):
+ """
+ Main entry point.
+
+ :Parameters:
+ - `args`: same as sys.argv[1:]
+ """
+ # Make sure we have the arguments we need, else show usage
+ try:
+ git_bin = args[0]
+ since = args[1]
+ except IndexError, ie:
+ usage()
+ return 1
+
+ if not os.access(git_bin, os.X_OK):
+ print_err('Can not access %s' % git_bin)
+ return 1
+
+ # Open a pipe and force the format
+ pipe = os.popen((git_bin + ' log --pretty="format:%ad %ae%n'
+ ' * %s" ' + since + '..'))
+ print pipe.read()
+ pipe.close()
+ return 0
+
+
+if __name__ == '__main__':
+ raise SystemExit(main(sys.argv[1:]))
diff --git a/buildbot/contrib/git_buildbot.py b/buildbot/contrib/git_buildbot.py
new file mode 100755
index 0000000..0185035
--- /dev/null
+++ b/buildbot/contrib/git_buildbot.py
@@ -0,0 +1,285 @@
+#! /usr/bin/env python
+
+# This script expects one line for each new revision on the form
+# <oldrev> <newrev> <refname>
+#
+# For example:
+# aa453216d1b3e49e7f6f98441fa56946ddcd6a20
+# 68f7abf4e6f922807889f52bc043ecd31b79f814 refs/heads/master
+#
+# Each of these changes will be passed to the buildbot server along
+# with any other change information we manage to extract from the
+# repository.
+#
+# This script is meant to be run from hooks/post-receive in the git
+# repository. It can also be run at client side with hooks/post-merge
+# after using this wrapper:
+
+#!/bin/sh
+# PRE=$(git rev-parse 'HEAD@{1}')
+# POST=$(git rev-parse HEAD)
+# SYMNAME=$(git rev-parse --symbolic-full-name HEAD)
+# echo "$PRE $POST $SYMNAME" | git_buildbot.py
+#
+# Largely based on contrib/hooks/post-receive-email from git.
+
+import commands
+import logging
+import os
+import re
+import sys
+
+from twisted.spread import pb
+from twisted.cred import credentials
+from twisted.internet import reactor
+
+from buildbot.scripts import runner
+from optparse import OptionParser
+
+# Modify this to fit your setup
+
+master = "localhost:9989"
+
+# The GIT_DIR environment variable must have been set up so that any
+# git commands that are executed will operate on the repository we're
+# installed in.
+
+changes = []
+
+
+def connectFailed(error):
+ logging.error("Could not connect to %s: %s"
+ % (master, error.getErrorMessage()))
+ return error
+
+
+def addChange(dummy, remote, changei):
+ logging.debug("addChange %s, %s" % (repr(remote), repr(changei)))
+ try:
+ c = changei.next()
+ except StopIteration:
+ remote.broker.transport.loseConnection()
+ return None
+
+ logging.info("New revision: %s" % c['revision'][:8])
+ for key, value in c.iteritems():
+ logging.debug(" %s: %s" % (key, value))
+
+ d = remote.callRemote('addChange', c)
+ d.addCallback(addChange, remote, changei)
+ return d
+
+
+def connected(remote):
+ return addChange(None, remote, changes.__iter__())
+
+
+def grab_commit_info(c, rev):
+ # Extract information about committer and files using git show
+ f = os.popen("git show --raw --pretty=full %s" % rev, 'r')
+
+ files = []
+
+ while True:
+ line = f.readline()
+ if not line:
+ break
+
+ m = re.match(r"^:.*[MAD]\s+(.+)$", line)
+ if m:
+ logging.debug("Got file: %s" % m.group(1))
+ files.append(m.group(1))
+ continue
+
+ m = re.match(r"^Author:\s+(.+)$", line)
+ if m:
+ logging.debug("Got author: %s" % m.group(1))
+ c['who'] = m.group(1)
+
+ if re.match(r"^Merge: .*$", line):
+ files.append('merge')
+
+ c['files'] = files
+ status = f.close()
+ if status:
+ logging.warning("git show exited with status %d" % status)
+
+
+def gen_changes(input, branch):
+ while True:
+ line = input.readline()
+ if not line:
+ break
+
+ logging.debug("Change: %s" % line)
+
+ m = re.match(r"^([0-9a-f]+) (.*)$", line.strip())
+ c = {'revision': m.group(1),
+ 'comments': m.group(2),
+ 'branch': branch,
+ }
+ grab_commit_info(c, m.group(1))
+ changes.append(c)
+
+
+def gen_create_branch_changes(newrev, refname, branch):
+ # A new branch has been created. Generate changes for everything
+ # up to `newrev' which does not exist in any branch but `refname'.
+ #
+ # Note that this may be inaccurate if two new branches are created
+ # at the same time, pointing to the same commit, or if there are
+ # commits that only exists in a common subset of the new branches.
+
+ logging.info("Branch `%s' created" % branch)
+
+ f = os.popen("git rev-parse --not --branches"
+ + "| grep -v $(git rev-parse %s)" % refname
+ + "| git rev-list --reverse --pretty=oneline --stdin %s" % newrev,
+ 'r')
+
+ gen_changes(f, branch)
+
+ status = f.close()
+ if status:
+ logging.warning("git rev-list exited with status %d" % status)
+
+
+def gen_update_branch_changes(oldrev, newrev, refname, branch):
+ # A branch has been updated. If it was a fast-forward update,
+ # generate Change events for everything between oldrev and newrev.
+ #
+ # In case of a forced update, first generate a "fake" Change event
+ # rewinding the branch to the common ancestor of oldrev and
+ # newrev. Then, generate Change events for each commit between the
+ # common ancestor and newrev.
+
+ logging.info("Branch `%s' updated %s .. %s"
+ % (branch, oldrev[:8], newrev[:8]))
+
+ baserev = commands.getoutput("git merge-base %s %s" % (oldrev, newrev))
+ logging.debug("oldrev=%s newrev=%s baserev=%s" % (oldrev, newrev, baserev))
+ if baserev != oldrev:
+ c = {'revision': baserev,
+ 'comments': "Rewind branch",
+ 'branch': branch,
+ 'who': "dummy",
+ }
+ logging.info("Branch %s was rewound to %s" % (branch, baserev[:8]))
+ files = []
+ f = os.popen("git diff --raw %s..%s" % (oldrev, baserev), 'r')
+ while True:
+ line = f.readline()
+ if not line:
+ break
+
+ file = re.match(r"^:.*[MAD]\s*(.+)$", line).group(1)
+ logging.debug(" Rewound file: %s" % file)
+ files.append(file)
+
+ status = f.close()
+ if status:
+ logging.warning("git diff exited with status %d" % status)
+
+ if files:
+ c['files'] = files
+ changes.append(c)
+
+ if newrev != baserev:
+ # Not a pure rewind
+ f = os.popen("git rev-list --reverse --pretty=oneline %s..%s"
+ % (baserev, newrev), 'r')
+ gen_changes(f, branch)
+
+ status = f.close()
+ if status:
+ logging.warning("git rev-list exited with status %d" % status)
+
+
+def cleanup(res):
+ reactor.stop()
+
+
+def process_changes():
+ # Read branch updates from stdin and generate Change events
+ while True:
+ line = sys.stdin.readline()
+ if not line:
+ break
+
+ [oldrev, newrev, refname] = line.split(None, 2)
+
+ # We only care about regular heads, i.e. branches
+ m = re.match(r"^refs\/heads\/(.+)$", refname)
+ if not m:
+ logging.info("Ignoring refname `%s': Not a branch" % refname)
+ continue
+
+ branch = m.group(1)
+
+ # Find out if the branch was created, deleted or updated. Branches
+ # being deleted aren't really interesting.
+ if re.match(r"^0*$", newrev):
+ logging.info("Branch `%s' deleted, ignoring" % branch)
+ continue
+ elif re.match(r"^0*$", oldrev):
+ gen_create_branch_changes(newrev, refname, branch)
+ else:
+ gen_update_branch_changes(oldrev, newrev, refname, branch)
+
+ # Submit the changes, if any
+ if not changes:
+ logging.warning("No changes found")
+ return
+
+ host, port = master.split(':')
+ port = int(port)
+
+ f = pb.PBClientFactory()
+ d = f.login(credentials.UsernamePassword("change", "changepw"))
+ reactor.connectTCP(host, port, f)
+
+ d.addErrback(connectFailed)
+ d.addCallback(connected)
+ d.addBoth(cleanup)
+
+ reactor.run()
+
+
+def parse_options():
+ parser = OptionParser()
+ parser.add_option("-l", "--logfile", action="store", type="string",
+ help="Log to the specified file")
+ parser.add_option("-v", "--verbose", action="count",
+ help="Be more verbose. Ignored if -l is not specified.")
+ options, args = parser.parse_args()
+ return options
+
+
+# Log errors and critical messages to stderr. Optionally log
+# information to a file as well (we'll set that up later.)
+stderr = logging.StreamHandler(sys.stderr)
+fmt = logging.Formatter("git_buildbot: %(levelname)s: %(message)s")
+stderr.setLevel(logging.ERROR)
+stderr.setFormatter(fmt)
+logging.getLogger().addHandler(stderr)
+logging.getLogger().setLevel(logging.DEBUG)
+
+try:
+ options = parse_options()
+ level = logging.WARNING
+ if options.verbose:
+ level -= 10 * options.verbose
+ if level < 0:
+ level = 0
+
+ if options.logfile:
+ logfile = logging.FileHandler(options.logfile)
+ logfile.setLevel(level)
+ fmt = logging.Formatter("%(asctime)s %(levelname)s: %(message)s")
+ logfile.setFormatter(fmt)
+ logging.getLogger().addHandler(logfile)
+
+ process_changes()
+except:
+ logging.exception("Unhandled exception")
+ sys.exit(1)
diff --git a/buildbot/contrib/hg_buildbot.py b/buildbot/contrib/hg_buildbot.py
new file mode 100755
index 0000000..3be49f6
--- /dev/null
+++ b/buildbot/contrib/hg_buildbot.py
@@ -0,0 +1,49 @@
+#! /usr/bin/python
+
+# This is a script which delivers Change events from Mercurial to the
+# buildmaster each time a changeset is pushed into a repository. Add it to
+# the 'incoming' commit hook on your canonical "central" repository, by
+# putting something like the following in the .hg/hgrc file of that
+# repository:
+#
+# [hooks]
+# incoming.buildbot = /PATH/TO/hg_buildbot.py BUILDMASTER:PORT
+#
+# Note that both Buildbot and Mercurial must be installed on the repository
+# machine.
+
+import os
+import sys
+import commands
+
+from StringIO import StringIO
+from buildbot.scripts import runner
+
+MASTER = sys.argv[1]
+
+CHANGESET_ID = os.environ["HG_NODE"]
+
+# TODO: consider doing 'import mercurial.hg' and extract this information
+# using the native python
+out = commands.getoutput(
+ "hg log -r %s --template '{author}\n{files}\n{desc}'" % CHANGESET_ID)
+
+s = StringIO(out)
+user = s.readline().strip()
+# NOTE: this fail when filenames contain spaces. I cannot find a way to get
+# hg to use some other filename separator.
+files = s.readline().strip().split()
+comments = "".join(s.readlines())
+
+change = {
+ 'master': MASTER,
+ # note: this is more likely to be a full email address, which would make
+ # the left-hand "Changes" column kind of wide. The buildmaster should
+ # probably be improved to display an abbreviation of the username.
+ 'username': user,
+ 'revision': CHANGESET_ID,
+ 'comments': comments,
+ 'files': files,
+}
+
+runner.sendchange(change, True)
diff --git a/buildbot/contrib/run_maxq.py b/buildbot/contrib/run_maxq.py
new file mode 100755
index 0000000..8a7fc6b
--- /dev/null
+++ b/buildbot/contrib/run_maxq.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env jython
+
+import sys
+import glob
+
+testdir = sys.argv[1]
+
+orderfiles = glob.glob(testdir + '/*.tests')
+
+# wee. just be glad I didn't make this one gigantic nested listcomp.
+# anyway, this builds a once-nested list of files to test.
+
+#open!
+files = [open(fn) for fn in orderfiles]
+
+#create prelim list of lists of files!
+files = [f.readlines() for f in files]
+
+#shwack newlines and filter out empties!
+files = [filter(None, [fn.strip() for fn in fs]) for fs in files]
+
+#prefix with testdir
+files = [[testdir + '/' + fn.strip() for fn in fs] for fs in files]
+
+print "Will run these tests:", files
+
+i = 0
+
+for testlist in files:
+
+ print "==========================="
+ print "running tests from testlist", orderfiles[i]
+ print "---------------------------"
+ i = i + 1
+
+ for test in testlist:
+ print "running test", test
+
+ try:
+ execfile(test, globals().copy())
+
+ except:
+ ei = sys.exc_info()
+ print "TEST FAILURE:", ei[1]
+
+ else:
+ print "SUCCESS"
diff --git a/buildbot/contrib/svn_buildbot.py b/buildbot/contrib/svn_buildbot.py
new file mode 100755
index 0000000..5a671dc
--- /dev/null
+++ b/buildbot/contrib/svn_buildbot.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+
+# this requires python >=2.3 for the 'sets' module.
+
+# The sets.py from python-2.3 appears to work fine under python2.2 . To
+# install this script on a host with only python2.2, copy
+# /usr/lib/python2.3/sets.py from a newer python into somewhere on your
+# PYTHONPATH, then edit the #! line above to invoke python2.2
+
+# python2.1 is right out
+
+# If you run this program as part of your SVN post-commit hooks, it will
+# deliver Change notices to a buildmaster that is running a PBChangeSource
+# instance.
+
+# edit your svn-repository/hooks/post-commit file, and add lines that look
+# like this:
+
+'''
+# set up PYTHONPATH to contain Twisted/buildbot perhaps, if not already
+# installed site-wide
+. ~/.environment
+
+/path/to/svn_buildbot.py --repository "$REPOS" --revision "$REV" \
+--bbserver localhost --bbport 9989
+'''
+
+import commands
+import sys
+import os
+import re
+import sets
+
+# We have hackish "-d" handling here rather than in the Options
+# subclass below because a common error will be to not have twisted in
+# PYTHONPATH; we want to be able to print that error to the log if
+# debug mode is on, so we set it up before the imports.
+
+DEBUG = None
+
+if '-d' in sys.argv:
+ i = sys.argv.index('-d')
+ DEBUG = sys.argv[i+1]
+ del sys.argv[i]
+ del sys.argv[i]
+
+if DEBUG:
+ f = open(DEBUG, 'a')
+ sys.stderr = f
+ sys.stdout = f
+
+
+from twisted.internet import defer, reactor
+from twisted.python import usage
+from twisted.spread import pb
+from twisted.cred import credentials
+
+
+class Options(usage.Options):
+ optParameters = [
+ ['repository', 'r', None,
+ "The repository that was changed."],
+ ['revision', 'v', None,
+ "The revision that we want to examine (default: latest)"],
+ ['bbserver', 's', 'localhost',
+ "The hostname of the server that buildbot is running on"],
+ ['bbport', 'p', 8007,
+ "The port that buildbot is listening on"],
+ ['include', 'f', None,
+ '''\
+Search the list of changed files for this regular expression, and if there is
+at least one match notify buildbot; otherwise buildbot will not do a build.
+You may provide more than one -f argument to try multiple
+patterns. If no filter is given, buildbot will always be notified.'''],
+ ['filter', 'f', None, "Same as --include. (Deprecated)"],
+ ['exclude', 'F', None,
+ '''\
+The inverse of --filter. Changed files matching this expression will never
+be considered for a build.
+You may provide more than one -F argument to try multiple
+patterns. Excludes override includes, that is, patterns that match both an
+include and an exclude will be excluded.'''],
+ ]
+ optFlags = [
+ ['dryrun', 'n', "Do not actually send changes"],
+ ]
+
+ def __init__(self):
+ usage.Options.__init__(self)
+ self._includes = []
+ self._excludes = []
+ self['includes'] = None
+ self['excludes'] = None
+
+ def opt_include(self, arg):
+ self._includes.append('.*%s.*' % (arg, ))
+
+ opt_filter = opt_include
+
+ def opt_exclude(self, arg):
+ self._excludes.append('.*%s.*' % (arg, ))
+
+ def postOptions(self):
+ if self['repository'] is None:
+ raise usage.error("You must pass --repository")
+ if self._includes:
+ self['includes'] = '(%s)' % ('|'.join(self._includes), )
+ if self._excludes:
+ self['excludes'] = '(%s)' % ('|'.join(self._excludes), )
+
+
+def split_file_dummy(changed_file):
+ """Split the repository-relative filename into a tuple of (branchname,
+ branch_relative_filename). If you have no branches, this should just
+ return (None, changed_file).
+ """
+ return (None, changed_file)
+
+
+# this version handles repository layouts that look like:
+# trunk/files.. -> trunk
+# branches/branch1/files.. -> branches/branch1
+# branches/branch2/files.. -> branches/branch2
+#
+
+
+def split_file_branches(changed_file):
+ pieces = changed_file.split(os.sep)
+ if pieces[0] == 'branches':
+ return (os.path.join(*pieces[:2]),
+ os.path.join(*pieces[2:]))
+ if pieces[0] == 'trunk':
+ return (pieces[0], os.path.join(*pieces[1:]))
+ ## there are other sibilings of 'trunk' and 'branches'. Pretend they are
+ ## all just funny-named branches, and let the Schedulers ignore them.
+ #return (pieces[0], os.path.join(*pieces[1:]))
+
+ raise RuntimeError("cannot determine branch for '%s'" % changed_file)
+
+
+split_file = split_file_dummy
+
+
+class ChangeSender:
+
+ def getChanges(self, opts):
+ """Generate and stash a list of Change dictionaries, ready to be sent
+ to the buildmaster's PBChangeSource."""
+
+ # first we extract information about the files that were changed
+ repo = opts['repository']
+ print "Repo:", repo
+ rev_arg = ''
+ if opts['revision']:
+ rev_arg = '-r %s' % (opts['revision'], )
+ changed = commands.getoutput('svnlook changed %s "%s"' % (
+ rev_arg, repo)).split('\n')
+ # the first 4 columns can contain status information
+ changed = [x[4:] for x in changed]
+
+ message = commands.getoutput('svnlook log %s "%s"' % (rev_arg, repo))
+ who = commands.getoutput('svnlook author %s "%s"' % (rev_arg, repo))
+ revision = opts.get('revision')
+ if revision is not None:
+ revision = int(revision)
+
+ # see if we even need to notify buildbot by looking at filters first
+ changestring = '\n'.join(changed)
+ fltpat = opts['includes']
+ if fltpat:
+ included = sets.Set(re.findall(fltpat, changestring))
+ else:
+ included = sets.Set(changed)
+
+ expat = opts['excludes']
+ if expat:
+ excluded = sets.Set(re.findall(expat, changestring))
+ else:
+ excluded = sets.Set([])
+ if len(included.difference(excluded)) == 0:
+ print changestring
+ print """\
+ Buildbot was not interested, no changes matched any of these filters:\n %s
+ or all the changes matched these exclusions:\n %s\
+ """ % (fltpat, expat)
+ sys.exit(0)
+
+ # now see which branches are involved
+ files_per_branch = {}
+ for f in changed:
+ branch, filename = split_file(f)
+ if branch in files_per_branch.keys():
+ files_per_branch[branch].append(filename)
+ else:
+ files_per_branch[branch] = [filename]
+
+ # now create the Change dictionaries
+ changes = []
+ for branch in files_per_branch.keys():
+ d = {'who': who,
+ 'branch': branch,
+ 'files': files_per_branch[branch],
+ 'comments': message,
+ 'revision': revision}
+ changes.append(d)
+
+ return changes
+
+ def sendChanges(self, opts, changes):
+ pbcf = pb.PBClientFactory()
+ reactor.connectTCP(opts['bbserver'], int(opts['bbport']), pbcf)
+ d = pbcf.login(credentials.UsernamePassword('change', 'changepw'))
+ d.addCallback(self.sendAllChanges, changes)
+ return d
+
+ def sendAllChanges(self, remote, changes):
+ dl = [remote.callRemote('addChange', change)
+ for change in changes]
+ return defer.DeferredList(dl)
+
+ def run(self):
+ opts = Options()
+ try:
+ opts.parseOptions()
+ except usage.error, ue:
+ print opts
+ print "%s: %s" % (sys.argv[0], ue)
+ sys.exit()
+
+ changes = self.getChanges(opts)
+ if opts['dryrun']:
+ for i, c in enumerate(changes):
+ print "CHANGE #%d" % (i+1)
+ keys = c.keys()
+ keys.sort()
+ for k in keys:
+ print "[%10s]: %s" % (k, c[k])
+ print "*NOT* sending any changes"
+ return
+
+ d = self.sendChanges(opts, changes)
+
+ def quit(*why):
+ print "quitting! because", why
+ reactor.stop()
+
+ def failed(f):
+ print "FAILURE"
+ print f
+ reactor.stop()
+
+ d.addCallback(quit, "SUCCESS")
+ d.addErrback(failed)
+ reactor.callLater(60, quit, "TIMEOUT")
+ reactor.run()
+
+
+if __name__ == '__main__':
+ s = ChangeSender()
+ s.run()
diff --git a/buildbot/contrib/svn_watcher.py b/buildbot/contrib/svn_watcher.py
new file mode 100755
index 0000000..a7ac668
--- /dev/null
+++ b/buildbot/contrib/svn_watcher.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+
+# This is a program which will poll a (remote) SVN repository, looking for
+# new revisions. It then uses the 'buildbot sendchange' command to deliver
+# information about the Change to a (remote) buildmaster. It can be run from
+# a cron job on a periodic basis, or can be told (with the 'watch' option) to
+# automatically repeat its check every 10 minutes.
+
+# This script does not store any state information, so to avoid spurious
+# changes you must use the 'watch' option and let it run forever.
+
+# You will need to provide it with the location of the buildmaster's
+# PBChangeSource port (in the form hostname:portnum), and the svnurl of the
+# repository to watch.
+
+
+# 15.03.06 by John Pye
+# 29.03.06 by Niklaus Giger, added support to run under windows,
+# added invocation option
+
+import subprocess
+import xml.dom.minidom
+import sys
+import time
+import os
+
+
+if sys.platform == 'win32':
+ import win32pipe
+
+
+def getoutput(cmd):
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ return p.stdout.read()
+
+
+def checkChanges(repo, master, verbose=False, oldRevision=-1):
+ cmd = ["svn", "log", "--non-interactive", "--xml", "--verbose",
+ "--limit=1", repo]
+ if verbose == True:
+ print "Getting last revision of repository: " + repo
+
+ if sys.platform == 'win32':
+ f = win32pipe.popen(cmd)
+ xml1 = ''.join(f.readlines())
+ f.close()
+ else:
+ xml1 = getoutput(cmd)
+
+ if verbose == True:
+ print "XML\n-----------\n"+xml1+"\n\n"
+
+ doc = xml.dom.minidom.parseString(xml1)
+ el = doc.getElementsByTagName("logentry")[0]
+ revision = el.getAttribute("revision")
+ author = "".join([t.data for t in
+ el.getElementsByTagName("author")[0].childNodes])
+ comments = "".join([t.data for t in
+ el.getElementsByTagName("msg")[0].childNodes])
+
+ pathlist = el.getElementsByTagName("paths")[0]
+ paths = []
+ for p in pathlist.getElementsByTagName("path"):
+ paths.append("".join([t.data for t in p.childNodes]))
+
+ if verbose == True:
+ print "PATHS"
+ print paths
+
+ if revision != oldRevision:
+ cmd = ["buildbot", "sendchange", "--master=%s"%master,
+ "--revision=%s"%revision, "--username=%s"%author,
+ "--comments=%s"%comments]
+ cmd += paths
+
+ if verbose == True:
+ print cmd
+
+ if sys.platform == 'win32':
+ f = win32pipe.popen(cmd)
+ print time.strftime("%H.%M.%S ") + "Revision "+revision+ ": "+ \
+''.join(f.readlines())
+ f.close()
+ else:
+ xml1 = getoutput(cmd)
+ else:
+ print time.strftime("%H.%M.%S ") + \
+"nothing has changed since revision "+revision
+
+ return revision
+
+
+if __name__ == '__main__':
+ if len(sys.argv) == 4 and sys.argv[3] == 'watch':
+ oldRevision = -1
+ print "Watching for changes in repo "+ sys.argv[1] +\
+" master " + sys.argv[2]
+ while 1:
+ oldRevision = checkChanges(
+ sys.argv[1], sys.argv[2], False, oldRevision)
+ time.sleep(10*60) # Check the repository every 10 minutes
+
+ elif len(sys.argv) == 3:
+ checkChanges(sys.argv[1], sys.argv[2], True)
+ else:
+ print os.path.basename(
+ sys.argv[0]) + ": http://host/path/to/repo master:port [watch]"
diff --git a/buildbot/contrib/svnpoller.py b/buildbot/contrib/svnpoller.py
new file mode 100755
index 0000000..ba0f174
--- /dev/null
+++ b/buildbot/contrib/svnpoller.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python
+"""
+ svn.py
+ Script for BuildBot to monitor a remote Subversion repository.
+ Copyright (C) 2006 John Pye
+"""
+# This script is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+# USA
+
+import commands
+import xml.dom.minidom
+import ConfigParser
+import os.path
+import codecs
+
+# change these settings to match your project
+svnurl = "https://pse.cheme.cmu.edu/svn/ascend/code/trunk"
+statefilename = "~/changemonitor/config.ini"
+buildmaster = "buildbot.example.org:9989" # connects to a PBChangeSource
+
+xml1 = commands.getoutput(
+ "svn log --non-interactive --verbose --xml --limit=1 " + svnurl)
+#print "XML\n-----------\n"+xml1+"\n\n"
+
+try:
+ doc = xml.dom.minidom.parseString(xml1)
+ el = doc.getElementsByTagName("logentry")[0]
+ revision = el.getAttribute("revision")
+ author = "".join([t.data for t in el.getElementsByTagName(
+ "author")[0].childNodes])
+ comments = "".join([t.data for t in el.getElementsByTagName(
+ "msg")[0].childNodes])
+
+ pathlist = el.getElementsByTagName("paths")[0]
+ paths = []
+ for p in pathlist.getElementsByTagName("path"):
+ paths.append("".join([t.data for t in p.childNodes]))
+ #print "PATHS"
+ #print paths
+except xml.parsers.expat.ExpatError, e:
+ print "FAILED TO PARSE 'svn log' XML:"
+ print str(e)
+ print "----"
+ print "RECEIVED TEXT:"
+ print xml1
+ import sys
+ sys.exit(1)
+
+fname = statefilename
+fname = os.path.expanduser(fname)
+ini = ConfigParser.SafeConfigParser()
+
+try:
+ ini.read(fname)
+except:
+ print "Creating changemonitor config.ini:", fname
+ ini.add_section("CurrentRevision")
+ ini.set("CurrentRevision", -1)
+
+try:
+ lastrevision = ini.get("CurrentRevision", "changeset")
+except ConfigParser.NoOptionError:
+ print "NO OPTION FOUND"
+ lastrevision = -1
+except ConfigParser.NoSectionError:
+ print "NO SECTION FOUND"
+ lastrevision = -1
+
+if lastrevision != revision:
+
+ #comments = codecs.encodings.unicode_escape.encode(comments)
+ cmd = "buildbot sendchange --master="+buildmaster+" --branch=trunk \
+--revision=\""+revision+"\" --username=\""+author+"\" --comments=\""+\
+comments+"\" "+" ".join(paths)
+
+ #print cmd
+ res = commands.getoutput(cmd)
+
+ print "SUBMITTING NEW REVISION", revision
+ if not ini.has_section("CurrentRevision"):
+ ini.add_section("CurrentRevision")
+ try:
+ ini.set("CurrentRevision", "changeset", revision)
+ f = open(fname, "w")
+ ini.write(f)
+ #print "WROTE CHANGES TO",fname
+ except:
+ print "FAILED TO RECORD INI FILE"
diff --git a/buildbot/contrib/viewcvspoll.py b/buildbot/contrib/viewcvspoll.py
new file mode 100755
index 0000000..f7dfb16
--- /dev/null
+++ b/buildbot/contrib/viewcvspoll.py
@@ -0,0 +1,99 @@
+#! /usr/bin/python
+
+"""Based on the fakechanges.py contrib script"""
+
+import sys
+import commands
+import random
+import os.path
+import time
+import MySQLdb
+
+from twisted.spread import pb
+from twisted.cred import credentials
+from twisted.internet import reactor, task
+from twisted.python import log
+
+
+class ViewCvsPoller:
+
+ def __init__(self):
+
+ def _load_rc():
+ import user
+ ret = {}
+ for line in open(os.path.join(
+ user.home, ".cvsblamerc")).readlines():
+ if line.find("=") != -1:
+ key, val = line.split("=")
+ ret[key.strip()] = val.strip()
+ return ret
+ # maybe add your own keys here db=xxx, user=xxx, passwd=xxx
+ self.cvsdb = MySQLdb.connect("cvs", **_load_rc())
+ #self.last_checkin = "2005-05-11" # for testing
+ self.last_checkin = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
+
+ def get_changes(self):
+ changes = []
+
+ def empty_change():
+ return {'who': None, 'files': [], 'comments': None}
+ change = empty_change()
+
+ cursor = self.cvsdb.cursor()
+ cursor.execute("""SELECT whoid, descid, fileid, dirid, branchid, \
+ci_when FROM checkins WHERE ci_when>='%s'""" % self.last_checkin)
+ last_checkin = None
+ for whoid, descid, fileid, dirid, branchid, ci_when in \
+cursor.fetchall():
+ if branchid != 1: # only head
+ continue
+ cursor.execute("""SELECT who from people where id=%s""" % whoid)
+ who = cursor.fetchone()[0]
+ cursor.execute("""SELECT description from descs where id=%s""" % (
+ descid))
+ desc = cursor.fetchone()[0]
+ cursor.execute("""SELECT file from files where id=%s""" % fileid)
+ filename = cursor.fetchone()[0]
+ cursor.execute("""SELECT dir from dirs where id=%s""" % dirid)
+ dirname = cursor.fetchone()[0]
+ if who == change["who"] and desc == change["comments"]:
+ change["files"].append("%s/%s" % (dirname, filename))
+ elif change["who"]:
+ changes.append(change)
+ change = empty_change()
+ else:
+ change["who"] = who
+ change["files"].append("%s/%s" % (dirname, filename))
+ change["comments"] = desc
+ if last_checkin == None or ci_when > last_checkin:
+ last_checkin = ci_when
+ if last_checkin:
+ self.last_checkin = last_checkin
+ return changes
+
+
+poller = ViewCvsPoller()
+
+
+def error(*args):
+ log.err()
+ reactor.stop()
+
+
+def poll_changes(remote):
+ print "GET CHANGES SINCE", poller.last_checkin,
+ changes = poller.get_changes()
+ for change in changes:
+ print change["who"], "\n *", "\n * ".join(change["files"])
+ remote.callRemote('addChange', change).addErrback(error)
+ print
+ reactor.callLater(60, poll_changes, remote)
+
+
+factory = pb.PBClientFactory()
+reactor.connectTCP("localhost", 9999, factory)
+deferred = factory.login(credentials.UsernamePassword("change", "changepw"))
+deferred.addCallback(poll_changes).addErrback(error)
+
+reactor.run()
diff --git a/buildbot/contrib/windows/buildbot.bat b/buildbot/contrib/windows/buildbot.bat
new file mode 100644
index 0000000..a916b3a
--- /dev/null
+++ b/buildbot/contrib/windows/buildbot.bat
@@ -0,0 +1 @@
+@"%~dp0..\python" "%~dp0buildbot" %*
diff --git a/buildbot/contrib/windows/buildbot2.bat b/buildbot/contrib/windows/buildbot2.bat
new file mode 100644
index 0000000..e211adc
--- /dev/null
+++ b/buildbot/contrib/windows/buildbot2.bat
@@ -0,0 +1,98 @@
+@echo off
+rem This is Windows helper batch file for Buildbot
+rem NOTE: You will need Windows NT5/XP to use some of the syntax here.
+
+rem Please note you must have Twisted Matrix installed to use this build system
+rem Details: http://twistedmatrix.com/ (Version 1.3.0 or more, preferrably 2.0+)
+
+rem NOTE: --reactor=win32 argument is need because of Twisted
+rem The Twisted default reactor is select based (ie. posix) (why?!)
+
+rem Keep environmental settings local to this file
+setlocal
+
+rem Change the following settings to suite your environment
+
+rem This is where you want Buildbot installed
+set BB_DIR=z:\Tools\PythonLibs
+
+rem Assuming you have TortoiseCVS installed [for CVS.exe].
+set CVS_EXE="c:\Program Files\TortoiseCVS\cvs.exe"
+
+rem Trial: --spew will give LOADS of information. Use -o for verbose.
+set TRIAL=python C:\Python23\scripts\trial.py -o --reactor=win32
+set BUILDBOT_TEST_VC=c:\temp
+
+if "%1"=="helper" (
+ goto print_help
+)
+
+if "%1"=="bbinstall" (
+ rem You will only need to run this when you install Buildbot
+ echo BB: Install BuildBot at the location you set in the config:
+ echo BB: BB_DIR= %BB_DIR%
+ echo BB: You must be in the buildbot-x.y.z directory to run this:
+ python setup.py install --prefix %BB_DIR% --install-lib %BB_DIR%
+ goto end
+)
+
+if "%1"=="cvsco" (
+ echo BB: Getting Buildbot from Sourceforge CVS [if CVS in path].
+ if "%2"=="" (
+ echo BB ERROR: Please give a root path for the check out, eg. z:\temp
+ goto end
+ )
+
+ cd %2
+ echo BB: Hit return as there is no password
+ %CVS_EXE% -d:pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot login
+ %CVS_EXE% -z3 -d:pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot co -P buildbot
+ goto end
+)
+
+if "%1"=="cvsup" (
+ echo BB: Updating Buildbot from Sourceforge CVS [if CVS in path].
+ echo BB: Make sure you have the project checked out in local VCS.
+
+ rem we only want buildbot code, the rest is from the install
+ cd %BB_DIR%
+ echo BB: Hit return as there is no password
+ %CVS_EXE% -d:pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot login
+ %CVS_EXE% -z3 -d:pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot up -P -d buildbot buildbot/buildbot
+ goto end
+)
+
+if "%1"=="test" (
+ rem Trial is a testing framework supplied by the Twisted Matrix package.
+ rem It installs itself in the Python installation directory in a "scripts" folder,
+ rem e.g. c:\python23\scripts
+ rem This is just a convenience function because that directory is not in our path.
+
+ if "%2" NEQ "" (
+ echo BB: TEST: buildbot.test.%2
+ %TRIAL% -m buildbot.test.%2
+ ) else (
+ echo BB: Running ALL buildbot tests...
+ %TRIAL% buildbot.test
+ )
+ goto end
+)
+
+rem Okay, nothing that we recognised to pass to buildbot
+echo BB: Running buildbot...
+python -c "from buildbot.scripts import runner; runner.run()" %*
+goto end
+
+:print_help
+echo Buildbot helper script commands:
+echo helper This help message
+echo test Test buildbot is set up correctly
+echo Maintenance:
+echo bbinstall Install Buildbot from package
+echo cvsup Update from cvs
+echo cvsco [dir] Check buildbot out from cvs into [dir]
+
+:end
+rem End environment scope
+endlocal
+
diff --git a/buildbot/contrib/windows/buildbot_service.py b/buildbot/contrib/windows/buildbot_service.py
new file mode 100755
index 0000000..859f559
--- /dev/null
+++ b/buildbot/contrib/windows/buildbot_service.py
@@ -0,0 +1,536 @@
+# Runs the build-bot as a Windows service.
+# To use:
+# * Install and configure buildbot as per normal (ie, running
+# 'setup.py install' from the source directory).
+#
+# * Configure any number of build-bot directories (slaves or masters), as
+# per the buildbot instructions. Test these directories normally by
+# using the (possibly modified) "buildbot.bat" file and ensure everything
+# is working as expected.
+#
+# * Install the buildbot service. Execute the command:
+# % python buildbot_service.py
+# To see installation options. You probably want to specify:
+# + --username and --password options to specify the user to run the
+# + --startup auto to have the service start at boot time.
+#
+# For example:
+# % python buildbot_service.py --user mark --password secret \
+# --startup auto install
+# Alternatively, you could execute:
+# % python buildbot_service.py install
+# to install the service with default options, then use Control Panel
+# to configure it.
+#
+# * Start the service specifying the name of all buildbot directories as
+# service args. This can be done one of 2 ways:
+# - Execute the command:
+# % python buildbot_service.py start "dir_name1" "dir_name2"
+# or:
+# - Start Control Panel->Administrative Tools->Services
+# - Locate the previously installed buildbot service.
+# - Open the "properties" for the service.
+# - Enter the directory names into the "Start Parameters" textbox. The
+# directory names must be fully qualified, and surrounded in quotes if
+# they include spaces.
+# - Press the "Start"button.
+# Note that the service will automatically use the previously specified
+# directories if no arguments are specified. This means the directories
+# need only be specified when the directories to use have changed (and
+# therefore also the first time buildbot is configured)
+#
+# * The service should now be running. You should check the Windows
+# event log. If all goes well, you should see some information messages
+# telling you the buildbot has successfully started.
+#
+# * If you change the buildbot configuration, you must restart the service.
+# There is currently no way to ask a running buildbot to reload the
+# config. You can restart by executing:
+# % python buildbot_service.py restart
+#
+# Troubleshooting:
+# * Check the Windows event log for any errors.
+# * Check the "twistd.log" file in your buildbot directories - once each
+# bot has been started it just writes to this log as normal.
+# * Try executing:
+# % python buildbot_service.py debug
+# This will execute the buildbot service in "debug" mode, and allow you to
+# see all messages etc generated. If the service works in debug mode but
+# not as a real service, the error probably relates to the environment or
+# permissions of the user configured to run the service (debug mode runs as
+# the currently logged in user, not the service user)
+# * Ensure you have the latest pywin32 build available, at least version 206.
+
+# Written by Mark Hammond, 2006.
+
+import sys
+import os
+import threading
+
+import pywintypes
+import winerror
+import win32con
+import win32api
+import win32event
+import win32file
+import win32pipe
+import win32process
+import win32security
+import win32service
+import win32serviceutil
+import servicemanager
+
+# Are we running in a py2exe environment?
+is_frozen = hasattr(sys, "frozen")
+
+# Taken from the Zope service support - each "child" is run as a sub-process
+# (trying to run multiple twisted apps in the same process is likely to screw
+# stdout redirection etc).
+# Note that unlike the Zope service, we do *not* attempt to detect a failed
+# client and perform restarts - buildbot itself does a good job
+# at reconnecting, and Windows itself provides restart semantics should
+# everything go pear-shaped.
+
+# We execute a new thread that captures the tail of the output from our child
+# process. If the child fails, it is written to the event log.
+# This process is unconditional, and the output is never written to disk
+# (except obviously via the event log entry)
+# Size of the blocks we read from the child process's output.
+CHILDCAPTURE_BLOCK_SIZE = 80
+# The number of BLOCKSIZE blocks we keep as process output.
+CHILDCAPTURE_MAX_BLOCKS = 200
+
+
+class BBService(win32serviceutil.ServiceFramework):
+ _svc_name_ = 'BuildBot'
+ _svc_display_name_ = _svc_name_
+ _svc_description_ = 'Manages local buildbot slaves and masters - ' \
+ 'see http://buildbot.sourceforge.net'
+
+ def __init__(self, args):
+ win32serviceutil.ServiceFramework.__init__(self, args)
+
+ # Create an event which we will use to wait on. The "service stop"
+ # request will set this event.
+ # * We must make it inheritable so we can pass it to the child
+ # process via the cmd-line
+ # * Must be manual reset so each child process and our service
+ # all get woken from a single set of the event.
+ sa = win32security.SECURITY_ATTRIBUTES()
+ sa.bInheritHandle = True
+ self.hWaitStop = win32event.CreateEvent(sa, True, False, None)
+
+ self.args = args
+ self.dirs = None
+ self.runner_prefix = None
+
+ # Patch up the service messages file in a frozen exe.
+ # (We use the py2exe option that magically bundles the .pyd files
+ # into the .zip file - so servicemanager.pyd doesn't exist.)
+ if is_frozen and servicemanager.RunningAsService():
+ msg_file = os.path.join(os.path.dirname(sys.executable),
+ "buildbot.msg")
+ if os.path.isfile(msg_file):
+ servicemanager.Initialize("BuildBot", msg_file)
+ else:
+ self.warning("Strange - '%s' does not exist" % (msg_file, ))
+
+ def _checkConfig(self):
+ # Locate our child process runner (but only when run from source)
+ if not is_frozen:
+ # Running from source
+ python_exe = os.path.join(sys.prefix, "python.exe")
+ if not os.path.isfile(python_exe):
+ # for ppl who build Python itself from source.
+ python_exe = os.path.join(sys.prefix, "PCBuild", "python.exe")
+ if not os.path.isfile(python_exe):
+ self.error("Can not find python.exe to spawn subprocess")
+ return False
+
+ me = __file__
+ if me.endswith(".pyc") or me.endswith(".pyo"):
+ me = me[:-1]
+
+ self.runner_prefix = '"%s" "%s"' % (python_exe, me)
+ else:
+ # Running from a py2exe built executable - our child process is
+ # us (but with the funky cmdline args!)
+ self.runner_prefix = '"' + sys.executable + '"'
+
+ # Now our arg processing - this may be better handled by a
+ # twisted/buildbot style config file - but as of time of writing,
+ # MarkH is clueless about such things!
+
+ # Note that the "arguments" you type into Control Panel for the
+ # service do *not* persist - they apply only when you click "start"
+ # on the service. When started by Windows, args are never presented.
+ # Thus, it is the responsibility of the service to persist any args.
+
+ # so, when args are presented, we save them as a "custom option". If
+ # they are not presented, we load them from the option.
+ self.dirs = []
+ if len(self.args) > 1:
+ dir_string = os.pathsep.join(self.args[1:])
+ save_dirs = True
+ else:
+ dir_string = win32serviceutil.GetServiceCustomOption(self,
+ "directories")
+ save_dirs = False
+
+ if not dir_string:
+ self.error("You must specify the buildbot directories as "
+ "parameters to the service.\nStopping the service.")
+ return False
+
+ dirs = dir_string.split(os.pathsep)
+ for d in dirs:
+ d = os.path.abspath(d)
+ sentinal = os.path.join(d, "buildbot.tac")
+ if os.path.isfile(sentinal):
+ self.dirs.append(d)
+ else:
+ msg = "Directory '%s' is not a buildbot dir - ignoring" \
+ % (d, )
+ self.warning(msg)
+ if not self.dirs:
+ self.error("No valid buildbot directories were specified.\n"
+ "Stopping the service.")
+ return False
+ if save_dirs:
+ dir_string = os.pathsep.join(self.dirs).encode("mbcs")
+ win32serviceutil.SetServiceCustomOption(self, "directories",
+ dir_string)
+ return True
+
+ def SvcStop(self):
+ # Tell the SCM we are starting the stop process.
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+ # Set the stop event - the main loop takes care of termination.
+ win32event.SetEvent(self.hWaitStop)
+
+ # SvcStop only gets triggered when the user explictly stops (or restarts)
+ # the service. To shut the service down cleanly when Windows is shutting
+ # down, we also need to hook SvcShutdown.
+ SvcShutdown = SvcStop
+
+ def SvcDoRun(self):
+ if not self._checkConfig():
+ # stopped status set by caller.
+ return
+
+ self.logmsg(servicemanager.PYS_SERVICE_STARTED)
+
+ child_infos = []
+
+ for bbdir in self.dirs:
+ self.info("Starting BuildBot in directory '%s'" % (bbdir, ))
+ hstop = self.hWaitStop
+
+ cmd = '%s --spawn %d start %s' % (self.runner_prefix, hstop, bbdir)
+ #print "cmd is", cmd
+ h, t, output = self.createProcess(cmd)
+ child_infos.append((bbdir, h, t, output))
+
+ while child_infos:
+ handles = [self.hWaitStop] + [i[1] for i in child_infos]
+
+ rc = win32event.WaitForMultipleObjects(handles,
+ 0, # bWaitAll
+ win32event.INFINITE)
+ if rc == win32event.WAIT_OBJECT_0:
+ # user sent a stop service request
+ break
+ else:
+ # A child process died. For now, just log the output
+ # and forget the process.
+ index = rc - win32event.WAIT_OBJECT_0 - 1
+ bbdir, dead_handle, dead_thread, output_blocks = \
+ child_infos[index]
+ status = win32process.GetExitCodeProcess(dead_handle)
+ output = "".join(output_blocks)
+ if not output:
+ output = "The child process generated no output. " \
+ "Please check the twistd.log file in the " \
+ "indicated directory."
+
+ self.warning("BuildBot for directory %r terminated with "
+ "exit code %d.\n%s" % (bbdir, status, output))
+
+ del child_infos[index]
+
+ if not child_infos:
+ self.warning("All BuildBot child processes have "
+ "terminated. Service stopping.")
+
+ # Either no child processes left, or stop event set.
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+
+ # The child processes should have also seen our stop signal
+ # so wait for them to terminate.
+ for bbdir, h, t, output in child_infos:
+ for i in range(10): # 30 seconds to shutdown...
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+ rc = win32event.WaitForSingleObject(h, 3000)
+ if rc == win32event.WAIT_OBJECT_0:
+ break
+ # Process terminated - no need to try harder.
+ if rc == win32event.WAIT_OBJECT_0:
+ break
+
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+ # If necessary, kill it
+ if win32process.GetExitCodeProcess(h)==win32con.STILL_ACTIVE:
+ self.warning("BuildBot process at %r failed to terminate - "
+ "killing it" % (bbdir, ))
+ win32api.TerminateProcess(h, 3)
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+
+ # Wait for the redirect thread - it should have died as the remote
+ # process terminated.
+ # As we are shutting down, we do the join with a little more care,
+ # reporting progress as we wait (even though we never will <wink>)
+ for i in range(5):
+ t.join(1)
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+ if not t.isAlive():
+ break
+ else:
+ self.warning("Redirect thread did not stop!")
+
+ # All done.
+ self.logmsg(servicemanager.PYS_SERVICE_STOPPED)
+
+ #
+ # Error reporting/logging functions.
+ #
+
+ def logmsg(self, event):
+ # log a service event using servicemanager.LogMsg
+ try:
+ servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
+ event,
+ (self._svc_name_,
+ " (%s)" % self._svc_display_name_))
+ except win32api.error, details:
+ # Failed to write a log entry - most likely problem is
+ # that the event log is full. We don't want this to kill us
+ try:
+ print "FAILED to write INFO event", event, ":", details
+ except IOError:
+ # No valid stdout! Ignore it.
+ pass
+
+ def _dolog(self, func, msg):
+ try:
+ func(msg)
+ except win32api.error, details:
+ # Failed to write a log entry - most likely problem is
+ # that the event log is full. We don't want this to kill us
+ try:
+ print "FAILED to write event log entry:", details
+ print msg
+ except IOError:
+ pass
+
+ def info(self, s):
+ self._dolog(servicemanager.LogInfoMsg, s)
+
+ def warning(self, s):
+ self._dolog(servicemanager.LogWarningMsg, s)
+
+ def error(self, s):
+ self._dolog(servicemanager.LogErrorMsg, s)
+
+ # Functions that spawn a child process, redirecting any output.
+ # Although builtbot itself does this, it is very handy to debug issues
+ # such as ImportErrors that happen before buildbot has redirected.
+
+ def createProcess(self, cmd):
+ hInputRead, hInputWriteTemp = self.newPipe()
+ hOutReadTemp, hOutWrite = self.newPipe()
+ pid = win32api.GetCurrentProcess()
+ # This one is duplicated as inheritable.
+ hErrWrite = win32api.DuplicateHandle(pid, hOutWrite, pid, 0, 1,
+ win32con.DUPLICATE_SAME_ACCESS)
+
+ # These are non-inheritable duplicates.
+ hOutRead = self.dup(hOutReadTemp)
+ hInputWrite = self.dup(hInputWriteTemp)
+ # dup() closed hOutReadTemp, hInputWriteTemp
+
+ si = win32process.STARTUPINFO()
+ si.hStdInput = hInputRead
+ si.hStdOutput = hOutWrite
+ si.hStdError = hErrWrite
+ si.dwFlags = win32process.STARTF_USESTDHANDLES | \
+ win32process.STARTF_USESHOWWINDOW
+ si.wShowWindow = win32con.SW_HIDE
+
+ # pass True to allow handles to be inherited. Inheritance is
+ # problematic in general, but should work in the controlled
+ # circumstances of a service process.
+ create_flags = win32process.CREATE_NEW_CONSOLE
+ # info is (hProcess, hThread, pid, tid)
+ info = win32process.CreateProcess(None, cmd, None, None, True,
+ create_flags, None, None, si)
+ # (NOTE: these really aren't necessary for Python - they are closed
+ # as soon as they are collected)
+ hOutWrite.Close()
+ hErrWrite.Close()
+ hInputRead.Close()
+ # We don't use stdin
+ hInputWrite.Close()
+
+ # start a thread collecting output
+ blocks = []
+ t = threading.Thread(target=self.redirectCaptureThread,
+ args = (hOutRead, blocks))
+ t.start()
+ return info[0], t, blocks
+
+ def redirectCaptureThread(self, handle, captured_blocks):
+ # One of these running per child process we are watching. It
+ # handles both stdout and stderr on a single handle. The read data is
+ # never referenced until the thread dies - so no need for locks
+ # around self.captured_blocks.
+ #self.info("Redirect thread starting")
+ while 1:
+ try:
+ ec, data = win32file.ReadFile(handle, CHILDCAPTURE_BLOCK_SIZE)
+ except pywintypes.error, err:
+ # ERROR_BROKEN_PIPE means the child process closed the
+ # handle - ie, it terminated.
+ if err[0] != winerror.ERROR_BROKEN_PIPE:
+ self.warning("Error reading output from process: %s" % err)
+ break
+ captured_blocks.append(data)
+ del captured_blocks[CHILDCAPTURE_MAX_BLOCKS:]
+ handle.Close()
+ #self.info("Redirect capture thread terminating")
+
+ def newPipe(self):
+ sa = win32security.SECURITY_ATTRIBUTES()
+ sa.bInheritHandle = True
+ return win32pipe.CreatePipe(sa, 0)
+
+ def dup(self, pipe):
+ # create a duplicate handle that is not inherited, so that
+ # it can be closed in the parent. close the original pipe in
+ # the process.
+ pid = win32api.GetCurrentProcess()
+ dup = win32api.DuplicateHandle(pid, pipe, pid, 0, 0,
+ win32con.DUPLICATE_SAME_ACCESS)
+ pipe.Close()
+ return dup
+
+
+# Service registration and startup
+
+
+def RegisterWithFirewall(exe_name, description):
+ # Register our executable as an exception with Windows Firewall.
+ # taken from http://msdn.microsoft.com/library/default.asp?url=\
+ #/library/en-us/ics/ics/wf_adding_an_application.asp
+ from win32com.client import Dispatch
+ # Set constants
+ NET_FW_PROFILE_DOMAIN = 0
+ NET_FW_PROFILE_STANDARD = 1
+
+ # Scope
+ NET_FW_SCOPE_ALL = 0
+
+ # IP Version - ANY is the only allowable setting for now
+ NET_FW_IP_VERSION_ANY = 2
+
+ fwMgr = Dispatch("HNetCfg.FwMgr")
+
+ # Get the current profile for the local firewall policy.
+ profile = fwMgr.LocalPolicy.CurrentProfile
+
+ app = Dispatch("HNetCfg.FwAuthorizedApplication")
+
+ app.ProcessImageFileName = exe_name
+ app.Name = description
+ app.Scope = NET_FW_SCOPE_ALL
+ # Use either Scope or RemoteAddresses, but not both
+ #app.RemoteAddresses = "*"
+ app.IpVersion = NET_FW_IP_VERSION_ANY
+ app.Enabled = True
+
+ # Use this line if you want to add the app, but disabled.
+ #app.Enabled = False
+
+ profile.AuthorizedApplications.Add(app)
+
+
+# A custom install function.
+
+
+def CustomInstall(opts):
+ # Register this process with the Windows Firewaall
+ import pythoncom
+ try:
+ RegisterWithFirewall(sys.executable, "BuildBot")
+ except pythoncom.com_error, why:
+ print "FAILED to register with the Windows firewall"
+ print why
+
+
+# Magic code to allow shutdown. Note that this code is executed in
+# the *child* process, by way of the service process executing us with
+# special cmdline args (which includes the service stop handle!)
+
+
+def _RunChild():
+ del sys.argv[1] # The --spawn arg.
+ # Create a new thread that just waits for the event to be signalled.
+ t = threading.Thread(target=_WaitForShutdown,
+ args = (int(sys.argv[1]), )
+ )
+ del sys.argv[1] # The stop handle
+ # This child process will be sent a console handler notification as
+ # users log off, or as the system shuts down. We want to ignore these
+ # signals as the service parent is responsible for our shutdown.
+
+ def ConsoleHandler(what):
+ # We can ignore *everything* - ctrl+c will never be sent as this
+ # process is never attached to a console the user can press the
+ # key in!
+ return True
+ win32api.SetConsoleCtrlHandler(ConsoleHandler, True)
+ t.setDaemon(True) # we don't want to wait for this to stop!
+ t.start()
+ if hasattr(sys, "frozen"):
+ # py2exe sets this env vars that may screw our child process - reset
+ del os.environ["PYTHONPATH"]
+
+ # Start the buildbot app
+ from buildbot.scripts import runner
+ runner.run()
+ print "Service child process terminating normally."
+
+
+def _WaitForShutdown(h):
+ win32event.WaitForSingleObject(h, win32event.INFINITE)
+ print "Shutdown requested"
+
+ from twisted.internet import reactor
+ reactor.callLater(0, reactor.stop)
+
+
+# This function is also called by the py2exe startup code.
+
+
+def HandleCommandLine():
+ if len(sys.argv)>1 and sys.argv[1] == "--spawn":
+ # Special command-line created by the service to execute the
+ # child-process.
+ # First arg is the handle to wait on
+ _RunChild()
+ else:
+ win32serviceutil.HandleCommandLine(BBService,
+ customOptionHandler=CustomInstall)
+
+
+if __name__ == '__main__':
+ HandleCommandLine()
diff --git a/buildbot/contrib/windows/setup.py b/buildbot/contrib/windows/setup.py
new file mode 100755
index 0000000..ecb18bc
--- /dev/null
+++ b/buildbot/contrib/windows/setup.py
@@ -0,0 +1,83 @@
+# setup.py
+# A distutils setup script to create py2exe binaries for buildbot.
+# Both a service and standard executable are created.
+# Usage:
+# % setup.py py2exe
+
+import sys
+import os
+import tempfile
+import shutil
+import py2exe
+
+from os.path import dirname, join, abspath, exists, splitext
+
+this_dir = abspath(dirname(__file__))
+bb_root_dir = abspath(join(this_dir, "..", ".."))
+
+from distutils.core import setup
+
+includes = []
+
+# We try and bundle *all* modules in the following packages:
+for package in ["buildbot.changes", "buildbot.process", "buildbot.status"]:
+ __import__(package)
+ p = sys.modules[package]
+ for fname in os.listdir(p.__path__[0]):
+ base, ext = splitext(fname)
+ if not fname.startswith("_") and ext == ".py":
+ includes.append(p.__name__ + "." + base)
+
+# Other misc modules dynamically imported, so missed by py2exe
+includes.extend("""
+ buildbot.scheduler
+ buildbot.slave.bot
+ buildbot.master
+ twisted.internet.win32eventreactor
+ twisted.web.resource""".split())
+
+# Turn into "," sep py2exe requires
+includes = ",".join(includes)
+
+py2exe_options = {"bundle_files": 1,
+ "includes": includes,
+ }
+
+# Each "target" executable we create
+buildbot_target = {
+ "script": join(bb_root_dir, "bin", "buildbot"),
+}
+# Due to the way py2exe works, we need to rebuild the service code as a
+# normal console process - this will be executed by the service itself.
+
+service_target = {
+ "modules": ["buildbot_service"],
+ "cmdline_style": "custom",
+}
+
+# We use the py2exe "bundle" option, so servicemanager.pyd
+# (which has the message resources) does not exist. Take a copy
+# of it with a "friendlier" name. The service runtime arranges for this
+# to be used.
+import servicemanager
+
+msg_file = join(tempfile.gettempdir(), "buildbot.msg")
+shutil.copy(servicemanager.__file__, msg_file)
+
+data_files = [
+ ["", [msg_file]],
+ ["", [join(bb_root_dir, "buildbot", "status", "web", "classic.css")]],
+ ["", [join(bb_root_dir, "buildbot", "buildbot.png")]],
+]
+
+try:
+ setup(name="buildbot",
+ # The buildbot script as a normal executable
+ console=[buildbot_target],
+ service=[service_target],
+ options={'py2exe': py2exe_options},
+ data_files = data_files,
+ zipfile = "buildbot.library", # 'library.zip' invites trouble :)
+ )
+finally:
+ os.unlink(msg_file)
diff --git a/buildbot/docs/buildbot.html b/buildbot/docs/buildbot.html
new file mode 100644
index 0000000..e4d1409
--- /dev/null
+++ b/buildbot/docs/buildbot.html
@@ -0,0 +1,9606 @@
+<html lang="en">
+<head>
+<title>BuildBot Manual 0.7.10</title>
+<meta http-equiv="Content-Type" content="text/html">
+<meta name="description" content="BuildBot Manual 0.7.10">
+<meta name="generator" content="makeinfo 4.11">
+<link title="Top" rel="top" href="#Top">
+<link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+<!--
+This is the BuildBot manual.
+
+Copyright (C) 2005,2006 Brian Warner
+
+Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty
+provided the copyright notice and this notice are preserved.-->
+<meta http-equiv="Content-Style-Type" content="text/css">
+<style type="text/css"><!--
+ pre.display { font-family:inherit }
+ pre.format { font-family:inherit }
+ pre.smalldisplay { font-family:inherit; font-size:smaller }
+ pre.smallformat { font-family:inherit; font-size:smaller }
+ pre.smallexample { font-size:smaller }
+ pre.smalllisp { font-size:smaller }
+ span.sc { font-variant:small-caps }
+ span.roman { font-family:serif; font-weight:normal; }
+ span.sansserif { font-family:sans-serif; font-weight:normal; }
+--></style>
+</head>
+<body>
+<h1 class="settitle">BuildBot Manual 0.7.10</h1>
+ <div class="contents">
+<h2>Table of Contents</h2>
+<ul>
+<li><a name="toc_Top" href="#Top">BuildBot</a>
+<li><a name="toc_Introduction" href="#Introduction">1 Introduction</a>
+<ul>
+<li><a href="#History-and-Philosophy">1.1 History and Philosophy</a>
+<li><a href="#System-Architecture">1.2 System Architecture</a>
+<ul>
+<li><a href="#BuildSlave-Connections">1.2.1 BuildSlave Connections</a>
+<li><a href="#Buildmaster-Architecture">1.2.2 Buildmaster Architecture</a>
+<li><a href="#Status-Delivery-Architecture">1.2.3 Status Delivery Architecture</a>
+</li></ul>
+<li><a href="#Control-Flow">1.3 Control Flow</a>
+</li></ul>
+<li><a name="toc_Installation" href="#Installation">2 Installation</a>
+<ul>
+<li><a href="#Requirements">2.1 Requirements</a>
+<li><a href="#Installing-the-code">2.2 Installing the code</a>
+<li><a href="#Creating-a-buildmaster">2.3 Creating a buildmaster</a>
+<li><a href="#Upgrading-an-Existing-Buildmaster">2.4 Upgrading an Existing Buildmaster</a>
+<li><a href="#Creating-a-buildslave">2.5 Creating a buildslave</a>
+<ul>
+<li><a href="#Buildslave-Options">2.5.1 Buildslave Options</a>
+</li></ul>
+<li><a href="#Launching-the-daemons">2.6 Launching the daemons</a>
+<li><a href="#Logfiles">2.7 Logfiles</a>
+<li><a href="#Shutdown">2.8 Shutdown</a>
+<li><a href="#Maintenance">2.9 Maintenance</a>
+<li><a href="#Troubleshooting">2.10 Troubleshooting</a>
+<ul>
+<li><a href="#Starting-the-buildslave">2.10.1 Starting the buildslave</a>
+<li><a href="#Connecting-to-the-buildmaster">2.10.2 Connecting to the buildmaster</a>
+<li><a href="#Forcing-Builds">2.10.3 Forcing Builds</a>
+</li></ul>
+</li></ul>
+<li><a name="toc_Concepts" href="#Concepts">3 Concepts</a>
+<ul>
+<li><a href="#Version-Control-Systems">3.1 Version Control Systems</a>
+<ul>
+<li><a href="#Generalizing-VC-Systems">3.1.1 Generalizing VC Systems</a>
+<li><a href="#Source-Tree-Specifications">3.1.2 Source Tree Specifications</a>
+<li><a href="#How-Different-VC-Systems-Specify-Sources">3.1.3 How Different VC Systems Specify Sources</a>
+<li><a href="#Attributes-of-Changes">3.1.4 Attributes of Changes</a>
+</li></ul>
+<li><a href="#Schedulers">3.2 Schedulers</a>
+<li><a href="#BuildSet">3.3 BuildSet</a>
+<li><a href="#BuildRequest">3.4 BuildRequest</a>
+<li><a href="#Builder">3.5 Builder</a>
+<li><a href="#Users">3.6 Users</a>
+<ul>
+<li><a href="#Doing-Things-With-Users">3.6.1 Doing Things With Users</a>
+<li><a href="#Email-Addresses">3.6.2 Email Addresses</a>
+<li><a href="#IRC-Nicknames">3.6.3 IRC Nicknames</a>
+<li><a href="#Live-Status-Clients">3.6.4 Live Status Clients</a>
+</li></ul>
+<li><a href="#Build-Properties">3.7 Build Properties</a>
+</li></ul>
+<li><a name="toc_Configuration" href="#Configuration">4 Configuration</a>
+<ul>
+<li><a href="#Config-File-Format">4.1 Config File Format</a>
+<li><a href="#Loading-the-Config-File">4.2 Loading the Config File</a>
+<li><a href="#Testing-the-Config-File">4.3 Testing the Config File</a>
+<li><a href="#Defining-the-Project">4.4 Defining the Project</a>
+<li><a href="#Change-Sources-and-Schedulers">4.5 Change Sources and Schedulers</a>
+<ul>
+<li><a href="#Scheduler-Scheduler">4.5.1 Scheduler Scheduler</a>
+<li><a href="#AnyBranchScheduler">4.5.2 AnyBranchScheduler</a>
+<li><a href="#Dependent-Scheduler">4.5.3 Dependent Scheduler</a>
+<li><a href="#Periodic-Scheduler">4.5.4 Periodic Scheduler</a>
+<li><a href="#Nightly-Scheduler">4.5.5 Nightly Scheduler</a>
+<li><a href="#Try-Schedulers">4.5.6 Try Schedulers</a>
+<li><a href="#Triggerable-Scheduler">4.5.7 Triggerable Scheduler</a>
+</li></ul>
+<li><a href="#Merging-BuildRequests">4.6 Merging BuildRequests</a>
+<li><a href="#Setting-the-slaveport">4.7 Setting the slaveport</a>
+<li><a href="#Buildslave-Specifiers">4.8 Buildslave Specifiers</a>
+<ul>
+<li><a href="#When-Buildslaves-Go-Missing">4.8.1 When Buildslaves Go Missing</a>
+</li></ul>
+<li><a href="#On_002dDemand-_0028_0022Latent_0022_0029-Buildslaves">4.9 On-Demand ("Latent") Buildslaves</a>
+<ul>
+<li><a href="#Amazon-Web-Services-Elastic-Compute-Cloud-_0028_0022AWS-EC2_0022_0029">4.9.1 Amazon Web Services Elastic Compute Cloud ("AWS EC2")</a>
+<ul>
+<li><a href="#Get-an-AWS-EC2-Account">4.9.1.1 Get an AWS EC2 Account</a>
+<li><a href="#Create-an-AMI">4.9.1.2 Create an AMI</a>
+<li><a href="#Configure-the-Master-with-an-EC2LatentBuildSlave">4.9.1.3 Configure the Master with an EC2LatentBuildSlave</a>
+</li></ul>
+<li><a href="#Dangers-with-Latent-Buildslaves">4.9.2 Dangers with Latent Buildslaves</a>
+<li><a href="#Writing-New-Latent-Buildslaves">4.9.3 Writing New Latent Buildslaves</a>
+</li></ul>
+<li><a href="#Defining-Global-Properties">4.10 Defining Global Properties</a>
+<li><a href="#Defining-Builders">4.11 Defining Builders</a>
+<li><a href="#Defining-Status-Targets">4.12 Defining Status Targets</a>
+<li><a href="#Debug-options">4.13 Debug options</a>
+</li></ul>
+<li><a name="toc_Getting-Source-Code-Changes" href="#Getting-Source-Code-Changes">5 Getting Source Code Changes</a>
+<ul>
+<li><a href="#Change-Sources">5.1 Change Sources</a>
+<li><a href="#Choosing-ChangeSources">5.2 Choosing ChangeSources</a>
+<li><a href="#CVSToys-_002d-PBService">5.3 CVSToys - PBService</a>
+<li><a href="#Mail_002dparsing-ChangeSources">5.4 Mail-parsing ChangeSources</a>
+<ul>
+<li><a href="#Subscribing-the-Buildmaster">5.4.1 Subscribing the Buildmaster</a>
+<li><a href="#Using-Maildirs">5.4.2 Using Maildirs</a>
+<li><a href="#Parsing-Email-Change-Messages">5.4.3 Parsing Email Change Messages</a>
+<ul>
+<li><a href="#FCMaildirSource">5.4.3.1 FCMaildirSource</a>
+<li><a href="#SyncmailMaildirSource">5.4.3.2 SyncmailMaildirSource</a>
+<li><a href="#BonsaiMaildirSource">5.4.3.3 BonsaiMaildirSource</a>
+<li><a href="#SVNCommitEmailMaildirSource">5.4.3.4 SVNCommitEmailMaildirSource</a>
+</li></ul>
+</li></ul>
+<li><a href="#PBChangeSource">5.5 PBChangeSource</a>
+<li><a href="#P4Source">5.6 P4Source</a>
+<li><a href="#BonsaiPoller">5.7 BonsaiPoller</a>
+<li><a href="#SVNPoller">5.8 SVNPoller</a>
+<li><a href="#MercurialHook">5.9 MercurialHook</a>
+<li><a href="#Bzr-Hook">5.10 Bzr Hook</a>
+<li><a href="#Bzr-Poller">5.11 Bzr Poller</a>
+</li></ul>
+<li><a name="toc_Build-Process" href="#Build-Process">6 Build Process</a>
+<ul>
+<li><a href="#Build-Steps">6.1 Build Steps</a>
+<ul>
+<li><a href="#Common-Parameters">6.1.1 Common Parameters</a>
+<li><a href="#Using-Build-Properties">6.1.2 Using Build Properties</a>
+<li><a href="#Source-Checkout">6.1.3 Source Checkout</a>
+<ul>
+<li><a href="#CVS">6.1.3.1 CVS</a>
+<li><a href="#SVN">6.1.3.2 SVN</a>
+<li><a href="#Darcs">6.1.3.3 Darcs</a>
+<li><a href="#Mercurial">6.1.3.4 Mercurial</a>
+<li><a href="#Arch">6.1.3.5 Arch</a>
+<li><a href="#Bazaar">6.1.3.6 Bazaar</a>
+<li><a href="#Bzr">6.1.3.7 Bzr</a>
+<li><a href="#P4">6.1.3.8 P4</a>
+<li><a href="#Git">6.1.3.9 Git</a>
+</li></ul>
+<li><a href="#ShellCommand">6.1.4 ShellCommand</a>
+<li><a href="#Simple-ShellCommand-Subclasses">6.1.5 Simple ShellCommand Subclasses</a>
+<ul>
+<li><a href="#Configure">6.1.5.1 Configure</a>
+<li><a href="#Compile">6.1.5.2 Compile</a>
+<li><a href="#Test">6.1.5.3 Test</a>
+<li><a href="#TreeSize">6.1.5.4 TreeSize</a>
+<li><a href="#PerlModuleTest">6.1.5.5 PerlModuleTest</a>
+<li><a href="#SetProperty">6.1.5.6 SetProperty</a>
+</li></ul>
+<li><a href="#Python-BuildSteps">6.1.6 Python BuildSteps</a>
+<ul>
+<li><a href="#BuildEPYDoc">6.1.6.1 BuildEPYDoc</a>
+<li><a href="#PyFlakes">6.1.6.2 PyFlakes</a>
+<li><a href="#PyLint">6.1.6.3 PyLint</a>
+</li></ul>
+<li><a href="#Transferring-Files">6.1.7 Transferring Files</a>
+<li><a href="#Steps-That-Run-on-the-Master">6.1.8 Steps That Run on the Master</a>
+<li><a href="#Triggering-Schedulers">6.1.9 Triggering Schedulers</a>
+<li><a href="#Writing-New-BuildSteps">6.1.10 Writing New BuildSteps</a>
+<ul>
+<li><a href="#Writing-BuildStep-Constructors">6.1.10.1 Writing BuildStep Constructors</a>
+<li><a href="#BuildStep-LogFiles">6.1.10.2 BuildStep LogFiles</a>
+<li><a href="#Reading-Logfiles">6.1.10.3 Reading Logfiles</a>
+<li><a href="#Adding-LogObservers">6.1.10.4 Adding LogObservers</a>
+<li><a href="#BuildStep-URLs">6.1.10.5 BuildStep URLs</a>
+</li></ul>
+</li></ul>
+<li><a href="#Interlocks">6.2 Interlocks</a>
+<li><a href="#Build-Factories">6.3 Build Factories</a>
+<ul>
+<li><a href="#BuildStep-Objects">6.3.1 BuildStep Objects</a>
+<li><a href="#BuildFactory">6.3.2 BuildFactory</a>
+<ul>
+<li><a href="#BuildFactory-Attributes">6.3.2.1 BuildFactory Attributes</a>
+<li><a href="#Quick-builds">6.3.2.2 Quick builds</a>
+</li></ul>
+<li><a href="#Process_002dSpecific-build-factories">6.3.3 Process-Specific build factories</a>
+<ul>
+<li><a href="#GNUAutoconf">6.3.3.1 GNUAutoconf</a>
+<li><a href="#CPAN">6.3.3.2 CPAN</a>
+<li><a href="#Python-distutils">6.3.3.3 Python distutils</a>
+<li><a href="#Python_002fTwisted_002ftrial-projects">6.3.3.4 Python/Twisted/trial projects</a>
+</li></ul>
+</li></ul>
+</li></ul>
+<li><a name="toc_Status-Delivery" href="#Status-Delivery">7 Status Delivery</a>
+<ul>
+<li><a href="#WebStatus">7.1 WebStatus</a>
+<ul>
+<li><a href="#WebStatus-Configuration-Parameters">7.1.1 WebStatus Configuration Parameters</a>
+<li><a href="#Buildbot-Web-Resources">7.1.2 Buildbot Web Resources</a>
+<li><a href="#XMLRPC-server">7.1.3 XMLRPC server</a>
+<li><a href="#HTML-Waterfall">7.1.4 HTML Waterfall</a>
+</li></ul>
+<li><a href="#MailNotifier">7.2 MailNotifier</a>
+<li><a href="#IRC-Bot">7.3 IRC Bot</a>
+<li><a href="#PBListener">7.4 PBListener</a>
+<li><a href="#Writing-New-Status-Plugins">7.5 Writing New Status Plugins</a>
+</li></ul>
+<li><a name="toc_Command_002dline-tool" href="#Command_002dline-tool">8 Command-line tool</a>
+<ul>
+<li><a href="#Administrator-Tools">8.1 Administrator Tools</a>
+<li><a href="#Developer-Tools">8.2 Developer Tools</a>
+<ul>
+<li><a href="#statuslog">8.2.1 statuslog</a>
+<li><a href="#statusgui">8.2.2 statusgui</a>
+<li><a href="#try">8.2.3 try</a>
+<ul>
+<li><a href="#try-_002d_002ddiff">8.2.3.1 try &ndash;diff</a>
+</li></ul>
+</li></ul>
+<li><a href="#Other-Tools">8.3 Other Tools</a>
+<ul>
+<li><a href="#sendchange">8.3.1 sendchange</a>
+<li><a href="#debugclient">8.3.2 debugclient</a>
+</li></ul>
+<li><a href="#_002ebuildbot-config-directory">8.4 .buildbot config directory</a>
+</li></ul>
+<li><a name="toc_Resources" href="#Resources">9 Resources</a>
+<li><a name="toc_Developer_0027s-Appendix" href="#Developer_0027s-Appendix">Developer's Appendix</a>
+<li><a name="toc_Index-of-Useful-Classes" href="#Index-of-Useful-Classes">Index of Useful Classes</a>
+<li><a name="toc_Index-of-master_002ecfg-keys" href="#Index-of-master_002ecfg-keys">Index of master.cfg keys</a>
+<li><a name="toc_Index" href="#Index">Index</a>
+</li></ul>
+</div>
+
+
+
+<div class="node">
+<p><hr>
+<a name="Top"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Introduction">Introduction</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#dir">(dir)</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#dir">(dir)</a>
+
+</div>
+
+<h2 class="unnumbered">BuildBot</h2>
+
+<p>This is the BuildBot manual.
+
+ <p>Copyright (C) 2005,2006 Brian Warner
+
+ <p>Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty
+provided the copyright notice and this notice are preserved.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Introduction">Introduction</a>: What the BuildBot does.
+<li><a accesskey="2" href="#Installation">Installation</a>: Creating a buildmaster and buildslaves,
+ running them.
+<li><a accesskey="3" href="#Concepts">Concepts</a>: What goes on in the buildbot's little mind.
+<li><a accesskey="4" href="#Configuration">Configuration</a>: Controlling the buildbot.
+<li><a accesskey="5" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>: Discovering when to run a build.
+<li><a accesskey="6" href="#Build-Process">Build Process</a>: Controlling how each build is run.
+<li><a accesskey="7" href="#Status-Delivery">Status Delivery</a>: Telling the world about the build's results.
+<li><a accesskey="8" href="#Command_002dline-tool">Command-line tool</a>
+<li><a accesskey="9" href="#Resources">Resources</a>: Getting help.
+<li><a href="#Developer_0027s-Appendix">Developer's Appendix</a>
+<li><a href="#Index-of-Useful-Classes">Index of Useful Classes</a>
+<li><a href="#Index-of-master_002ecfg-keys">Index of master.cfg keys</a>
+<li><a href="#Index">Index</a>: Complete index.
+
+</li></ul>
+<p>--- The Detailed Node Listing ---
+
+<p>Introduction
+
+</p>
+<ul class="menu">
+<li><a href="#History-and-Philosophy">History and Philosophy</a>
+<li><a href="#System-Architecture">System Architecture</a>
+<li><a href="#Control-Flow">Control Flow</a>
+
+</li></ul>
+<p>System Architecture
+
+</p>
+<ul class="menu">
+<li><a href="#BuildSlave-Connections">BuildSlave Connections</a>
+<li><a href="#Buildmaster-Architecture">Buildmaster Architecture</a>
+<li><a href="#Status-Delivery-Architecture">Status Delivery Architecture</a>
+
+</li></ul>
+<p>Installation
+
+</p>
+<ul class="menu">
+<li><a href="#Requirements">Requirements</a>
+<li><a href="#Installing-the-code">Installing the code</a>
+<li><a href="#Creating-a-buildmaster">Creating a buildmaster</a>
+<li><a href="#Upgrading-an-Existing-Buildmaster">Upgrading an Existing Buildmaster</a>
+<li><a href="#Creating-a-buildslave">Creating a buildslave</a>
+<li><a href="#Launching-the-daemons">Launching the daemons</a>
+<li><a href="#Logfiles">Logfiles</a>
+<li><a href="#Shutdown">Shutdown</a>
+<li><a href="#Maintenance">Maintenance</a>
+<li><a href="#Troubleshooting">Troubleshooting</a>
+
+</li></ul>
+<p>Creating a buildslave
+
+</p>
+<ul class="menu">
+<li><a href="#Buildslave-Options">Buildslave Options</a>
+
+</li></ul>
+<p>Troubleshooting
+
+</p>
+<ul class="menu">
+<li><a href="#Starting-the-buildslave">Starting the buildslave</a>
+<li><a href="#Connecting-to-the-buildmaster">Connecting to the buildmaster</a>
+<li><a href="#Forcing-Builds">Forcing Builds</a>
+
+</li></ul>
+<p>Concepts
+
+</p>
+<ul class="menu">
+<li><a href="#Version-Control-Systems">Version Control Systems</a>
+<li><a href="#Schedulers">Schedulers</a>
+<li><a href="#BuildSet">BuildSet</a>
+<li><a href="#BuildRequest">BuildRequest</a>
+<li><a href="#Builder">Builder</a>
+<li><a href="#Users">Users</a>
+<li><a href="#Build-Properties">Build Properties</a>
+
+</li></ul>
+<p>Version Control Systems
+
+</p>
+<ul class="menu">
+<li><a href="#Generalizing-VC-Systems">Generalizing VC Systems</a>
+<li><a href="#Source-Tree-Specifications">Source Tree Specifications</a>
+<li><a href="#How-Different-VC-Systems-Specify-Sources">How Different VC Systems Specify Sources</a>
+<li><a href="#Attributes-of-Changes">Attributes of Changes</a>
+
+</li></ul>
+<p>Users
+
+</p>
+<ul class="menu">
+<li><a href="#Doing-Things-With-Users">Doing Things With Users</a>
+<li><a href="#Email-Addresses">Email Addresses</a>
+<li><a href="#IRC-Nicknames">IRC Nicknames</a>
+<li><a href="#Live-Status-Clients">Live Status Clients</a>
+
+</li></ul>
+<p>Configuration
+
+</p>
+<ul class="menu">
+<li><a href="#Config-File-Format">Config File Format</a>
+<li><a href="#Loading-the-Config-File">Loading the Config File</a>
+<li><a href="#Testing-the-Config-File">Testing the Config File</a>
+<li><a href="#Defining-the-Project">Defining the Project</a>
+<li><a href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>
+<li><a href="#Setting-the-slaveport">Setting the slaveport</a>
+<li><a href="#Buildslave-Specifiers">Buildslave Specifiers</a>
+<li><a href="#On_002dDemand-_0028_0022Latent_0022_0029-Buildslaves">On-Demand ("Latent") Buildslaves</a>
+<li><a href="#Defining-Global-Properties">Defining Global Properties</a>
+<li><a href="#Defining-Builders">Defining Builders</a>
+<li><a href="#Defining-Status-Targets">Defining Status Targets</a>
+<li><a href="#Debug-options">Debug options</a>
+
+</li></ul>
+<p>Change Sources and Schedulers
+
+</p>
+<ul class="menu">
+<li><a href="#Scheduler-Scheduler">Scheduler Scheduler</a>
+<li><a href="#AnyBranchScheduler">AnyBranchScheduler</a>
+<li><a href="#Dependent-Scheduler">Dependent Scheduler</a>
+<li><a href="#Periodic-Scheduler">Periodic Scheduler</a>
+<li><a href="#Nightly-Scheduler">Nightly Scheduler</a>
+<li><a href="#Try-Schedulers">Try Schedulers</a>
+<li><a href="#Triggerable-Scheduler">Triggerable Scheduler</a>
+
+</li></ul>
+<p>Buildslave Specifiers
+</p>
+<ul class="menu">
+<li><a href="#When-Buildslaves-Go-Missing">When Buildslaves Go Missing</a>
+
+</li></ul>
+<p>On-Demand ("Latent") Buildslaves
+</p>
+<ul class="menu">
+<li><a href="#Amazon-Web-Services-Elastic-Compute-Cloud-_0028_0022AWS-EC2_0022_0029">Amazon Web Services Elastic Compute Cloud ("AWS EC2")</a>
+<li><a href="#Dangers-with-Latent-Buildslaves">Dangers with Latent Buildslaves</a>
+<li><a href="#Writing-New-Latent-Buildslaves">Writing New Latent Buildslaves</a>
+
+</li></ul>
+<p>Getting Source Code Changes
+
+</p>
+<ul class="menu">
+<li><a href="#Change-Sources">Change Sources</a>
+<li><a href="#Choosing-ChangeSources">Choosing ChangeSources</a>
+<li><a href="#CVSToys-_002d-PBService">CVSToys - PBService</a>
+<li><a href="#Mail_002dparsing-ChangeSources">Mail-parsing ChangeSources</a>
+<li><a href="#PBChangeSource">PBChangeSource</a>
+<li><a href="#P4Source">P4Source</a>
+<li><a href="#BonsaiPoller">BonsaiPoller</a>
+<li><a href="#SVNPoller">SVNPoller</a>
+<li><a href="#MercurialHook">MercurialHook</a>
+<li><a href="#Bzr-Hook">Bzr Hook</a>
+<li><a href="#Bzr-Poller">Bzr Poller</a>
+
+</li></ul>
+<p>Mail-parsing ChangeSources
+
+</p>
+<ul class="menu">
+<li><a href="#Subscribing-the-Buildmaster">Subscribing the Buildmaster</a>
+<li><a href="#Using-Maildirs">Using Maildirs</a>
+<li><a href="#Parsing-Email-Change-Messages">Parsing Email Change Messages</a>
+
+</li></ul>
+<p>Parsing Email Change Messages
+
+</p>
+<ul class="menu">
+<li><a href="#FCMaildirSource">FCMaildirSource</a>
+<li><a href="#SyncmailMaildirSource">SyncmailMaildirSource</a>
+<li><a href="#BonsaiMaildirSource">BonsaiMaildirSource</a>
+<li><a href="#SVNCommitEmailMaildirSource">SVNCommitEmailMaildirSource</a>
+
+</li></ul>
+<p>Build Process
+
+</p>
+<ul class="menu">
+<li><a href="#Build-Steps">Build Steps</a>
+<li><a href="#Interlocks">Interlocks</a>
+<li><a href="#Build-Factories">Build Factories</a>
+
+</li></ul>
+<p>Build Steps
+
+</p>
+<ul class="menu">
+<li><a href="#Common-Parameters">Common Parameters</a>
+<li><a href="#Using-Build-Properties">Using Build Properties</a>
+<li><a href="#Source-Checkout">Source Checkout</a>
+<li><a href="#ShellCommand">ShellCommand</a>
+<li><a href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+<li><a href="#Python-BuildSteps">Python BuildSteps</a>
+<li><a href="#Transferring-Files">Transferring Files</a>
+<li><a href="#Steps-That-Run-on-the-Master">Steps That Run on the Master</a>
+<li><a href="#Triggering-Schedulers">Triggering Schedulers</a>
+<li><a href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+
+</li></ul>
+<p>Source Checkout
+
+</p>
+<ul class="menu">
+<li><a href="#CVS">CVS</a>
+<li><a href="#SVN">SVN</a>
+<li><a href="#Darcs">Darcs</a>
+<li><a href="#Mercurial">Mercurial</a>
+<li><a href="#Arch">Arch</a>
+<li><a href="#Bazaar">Bazaar</a>
+<li><a href="#Bzr">Bzr</a>
+<li><a href="#P4">P4</a>
+<li><a href="#Git">Git</a>
+
+</li></ul>
+<p>Simple ShellCommand Subclasses
+
+</p>
+<ul class="menu">
+<li><a href="#Configure">Configure</a>
+<li><a href="#Compile">Compile</a>
+<li><a href="#Test">Test</a>
+<li><a href="#TreeSize">TreeSize</a>
+<li><a href="#PerlModuleTest">PerlModuleTest</a>
+<li><a href="#SetProperty">SetProperty</a>
+
+</li></ul>
+<p>Python BuildSteps
+
+</p>
+<ul class="menu">
+<li><a href="#BuildEPYDoc">BuildEPYDoc</a>
+<li><a href="#PyFlakes">PyFlakes</a>
+<li><a href="#PyLint">PyLint</a>
+
+</li></ul>
+<p>Writing New BuildSteps
+
+</p>
+<ul class="menu">
+<li><a href="#BuildStep-LogFiles">BuildStep LogFiles</a>
+<li><a href="#Reading-Logfiles">Reading Logfiles</a>
+<li><a href="#Adding-LogObservers">Adding LogObservers</a>
+<li><a href="#BuildStep-URLs">BuildStep URLs</a>
+
+</li></ul>
+<p>Build Factories
+
+</p>
+<ul class="menu">
+<li><a href="#BuildStep-Objects">BuildStep Objects</a>
+<li><a href="#BuildFactory">BuildFactory</a>
+<li><a href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+
+</li></ul>
+<p>BuildStep Objects
+
+</p>
+<ul class="menu">
+<li><a href="#BuildFactory-Attributes">BuildFactory Attributes</a>
+<li><a href="#Quick-builds">Quick builds</a>
+
+</li></ul>
+<p>BuildFactory
+
+</p>
+<ul class="menu">
+<li><a href="#BuildFactory-Attributes">BuildFactory Attributes</a>
+<li><a href="#Quick-builds">Quick builds</a>
+
+</li></ul>
+<p>Process-Specific build factories
+
+</p>
+<ul class="menu">
+<li><a href="#GNUAutoconf">GNUAutoconf</a>
+<li><a href="#CPAN">CPAN</a>
+<li><a href="#Python-distutils">Python distutils</a>
+<li><a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a>
+
+</li></ul>
+<p>Status Delivery
+
+</p>
+<ul class="menu">
+<li><a href="#WebStatus">WebStatus</a>
+<li><a href="#MailNotifier">MailNotifier</a>
+<li><a href="#IRC-Bot">IRC Bot</a>
+<li><a href="#PBListener">PBListener</a>
+<li><a href="#Writing-New-Status-Plugins">Writing New Status Plugins</a>
+
+</li></ul>
+<p>WebStatus
+
+</p>
+<ul class="menu">
+<li><a href="#WebStatus-Configuration-Parameters">WebStatus Configuration Parameters</a>
+<li><a href="#Buildbot-Web-Resources">Buildbot Web Resources</a>
+<li><a href="#XMLRPC-server">XMLRPC server</a>
+<li><a href="#HTML-Waterfall">HTML Waterfall</a>
+
+</li></ul>
+<p>Command-line tool
+
+</p>
+<ul class="menu">
+<li><a href="#Administrator-Tools">Administrator Tools</a>
+<li><a href="#Developer-Tools">Developer Tools</a>
+<li><a href="#Other-Tools">Other Tools</a>
+<li><a href="#g_t_002ebuildbot-config-directory">.buildbot config directory</a>
+
+</li></ul>
+<p>Developer Tools
+
+</p>
+<ul class="menu">
+<li><a href="#statuslog">statuslog</a>
+<li><a href="#statusgui">statusgui</a>
+<li><a href="#try">try</a>
+
+</li></ul>
+<p>waiting for results
+
+</p>
+<ul class="menu">
+<li><a href="#try-_002d_002ddiff">try --diff</a>
+
+</li></ul>
+<p>Other Tools
+
+</p>
+<ul class="menu">
+<li><a href="#sendchange">sendchange</a>
+<li><a href="#debugclient">debugclient</a>
+
+ </ul>
+
+<div class="node">
+<p><hr>
+<a name="Introduction"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Installation">Installation</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Top">Top</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">1 Introduction</h2>
+
+<p><a name="index-introduction-1"></a>
+The BuildBot is a system to automate the compile/test cycle required by most
+software projects to validate code changes. By automatically rebuilding and
+testing the tree each time something has changed, build problems are
+pinpointed quickly, before other developers are inconvenienced by the
+failure. The guilty developer can be identified and harassed without human
+intervention. By running the builds on a variety of platforms, developers
+who do not have the facilities to test their changes everywhere before
+checkin will at least know shortly afterwards whether they have broken the
+build or not. Warning counts, lint checks, image size, compile time, and
+other build parameters can be tracked over time, are more visible, and
+are therefore easier to improve.
+
+ <p>The overall goal is to reduce tree breakage and provide a platform to
+run tests or code-quality checks that are too annoying or pedantic for
+any human to waste their time with. Developers get immediate (and
+potentially public) feedback about their changes, encouraging them to
+be more careful about testing before checkin.
+
+ <p>Features:
+
+ <ul>
+<li>run builds on a variety of slave platforms
+<li>arbitrary build process: handles projects using C, Python, whatever
+<li>minimal host requirements: python and Twisted
+<li>slaves can be behind a firewall if they can still do checkout
+<li>status delivery through web page, email, IRC, other protocols
+<li>track builds in progress, provide estimated completion time
+<li>flexible configuration by subclassing generic build process classes
+<li>debug tools to force a new build, submit fake Changes, query slave status
+<li>released under the GPL
+</ul>
+
+<ul class="menu">
+<li><a accesskey="1" href="#History-and-Philosophy">History and Philosophy</a>
+<li><a accesskey="2" href="#System-Architecture">System Architecture</a>
+<li><a accesskey="3" href="#Control-Flow">Control Flow</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="History-and-Philosophy"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#System-Architecture">System Architecture</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Introduction">Introduction</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Introduction">Introduction</a>
+
+</div>
+
+<h3 class="section">1.1 History and Philosophy</h3>
+
+<p><a name="index-Philosophy-of-operation-2"></a>
+The Buildbot was inspired by a similar project built for a development
+team writing a cross-platform embedded system. The various components
+of the project were supposed to compile and run on several flavors of
+unix (linux, solaris, BSD), but individual developers had their own
+preferences and tended to stick to a single platform. From time to
+time, incompatibilities would sneak in (some unix platforms want to
+use <code>string.h</code>, some prefer <code>strings.h</code>), and then the tree
+would compile for some developers but not others. The buildbot was
+written to automate the human process of walking into the office,
+updating a tree, compiling (and discovering the breakage), finding the
+developer at fault, and complaining to them about the problem they had
+introduced. With multiple platforms it was difficult for developers to
+do the right thing (compile their potential change on all platforms);
+the buildbot offered a way to help.
+
+ <p>Another problem was when programmers would change the behavior of a
+library without warning its users, or change internal aspects that
+other code was (unfortunately) depending upon. Adding unit tests to
+the codebase helps here: if an application's unit tests pass despite
+changes in the libraries it uses, you can have more confidence that
+the library changes haven't broken anything. Many developers
+complained that the unit tests were inconvenient or took too long to
+run: having the buildbot run them reduces the developer's workload to
+a minimum.
+
+ <p>In general, having more visibility into the project is always good,
+and automation makes it easier for developers to do the right thing.
+When everyone can see the status of the project, developers are
+encouraged to keep the tree in good working order. Unit tests that
+aren't run on a regular basis tend to suffer from bitrot just like
+code does: exercising them on a regular basis helps to keep them
+functioning and useful.
+
+ <p>The current version of the Buildbot is additionally targeted at
+distributed free-software projects, where resources and platforms are
+only available when provided by interested volunteers. The buildslaves
+are designed to require an absolute minimum of configuration, reducing
+the effort a potential volunteer needs to expend to be able to
+contribute a new test environment to the project. The goal is for
+anyone who wishes that a given project would run on their favorite
+platform should be able to offer that project a buildslave, running on
+that platform, where they can verify that their portability code
+works, and keeps working.
+
+<div class="node">
+<p><hr>
+<a name="System-Architecture"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Control-Flow">Control Flow</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#History-and-Philosophy">History and Philosophy</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Introduction">Introduction</a>
+
+</div>
+
+<!-- node-name, next, previous, up -->
+<h3 class="section">1.2 System Architecture</h3>
+
+<p>The Buildbot consists of a single <code>buildmaster</code> and one or more
+<code>buildslaves</code>, connected in a star topology. The buildmaster
+makes all decisions about what, when, and how to build. It sends
+commands to be run on the build slaves, which simply execute the
+commands and return the results. (certain steps involve more local
+decision making, where the overhead of sending a lot of commands back
+and forth would be inappropriate, but in general the buildmaster is
+responsible for everything).
+
+ <p>The buildmaster is usually fed <code>Changes</code> by some sort of version
+control system (see <a href="#Change-Sources">Change Sources</a>), which may cause builds to be
+run. As the builds are performed, various status messages are
+produced, which are then sent to any registered Status Targets
+(see <a href="#Status-Delivery">Status Delivery</a>).
+
+<!-- @image{FILENAME, WIDTH, HEIGHT, ALTTEXT, EXTENSION} -->
+ <div class="block-image"><img src="images/overview.png" alt="Overview Diagram"></div>
+
+ <p>The buildmaster is configured and maintained by the &ldquo;buildmaster
+admin&rdquo;, who is generally the project team member responsible for
+build process issues. Each buildslave is maintained by a &ldquo;buildslave
+admin&rdquo;, who do not need to be quite as involved. Generally slaves are
+run by anyone who has an interest in seeing the project work well on
+their favorite platform.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildSlave-Connections">BuildSlave Connections</a>
+<li><a accesskey="2" href="#Buildmaster-Architecture">Buildmaster Architecture</a>
+<li><a accesskey="3" href="#Status-Delivery-Architecture">Status Delivery Architecture</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildSlave-Connections"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Buildmaster-Architecture">Buildmaster Architecture</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#System-Architecture">System Architecture</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#System-Architecture">System Architecture</a>
+
+</div>
+
+<h4 class="subsection">1.2.1 BuildSlave Connections</h4>
+
+<p>The buildslaves are typically run on a variety of separate machines,
+at least one per platform of interest. These machines connect to the
+buildmaster over a TCP connection to a publically-visible port. As a
+result, the buildslaves can live behind a NAT box or similar
+firewalls, as long as they can get to buildmaster. The TCP connections
+are initiated by the buildslave and accepted by the buildmaster, but
+commands and results travel both ways within this connection. The
+buildmaster is always in charge, so all commands travel exclusively
+from the buildmaster to the buildslave.
+
+ <p>To perform builds, the buildslaves must typically obtain source code
+from a CVS/SVN/etc repository. Therefore they must also be able to
+reach the repository. The buildmaster provides instructions for
+performing builds, but does not provide the source code itself.
+
+ <div class="block-image"><img src="images/slaves.png" alt="BuildSlave Connections"></div>
+
+<div class="node">
+<p><hr>
+<a name="Buildmaster-Architecture"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Status-Delivery-Architecture">Status Delivery Architecture</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildSlave-Connections">BuildSlave Connections</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#System-Architecture">System Architecture</a>
+
+</div>
+
+<h4 class="subsection">1.2.2 Buildmaster Architecture</h4>
+
+<p>The Buildmaster consists of several pieces:
+
+ <div class="block-image"><img src="images/master.png" alt="BuildMaster Architecture"></div>
+
+ <ul>
+<li>Change Sources, which create a Change object each time something is
+modified in the VC repository. Most ChangeSources listen for messages
+from a hook script of some sort. Some sources actively poll the
+repository on a regular basis. All Changes are fed to the Schedulers.
+
+ <li>Schedulers, which decide when builds should be performed. They collect
+Changes into BuildRequests, which are then queued for delivery to
+Builders until a buildslave is available.
+
+ <li>Builders, which control exactly <em>how</em> each build is performed
+(with a series of BuildSteps, configured in a BuildFactory). Each
+Build is run on a single buildslave.
+
+ <li>Status plugins, which deliver information about the build results
+through protocols like HTTP, mail, and IRC.
+
+ </ul>
+
+ <div class="block-image"><img src="images/slavebuilder.png" alt="SlaveBuilders"></div>
+
+ <p>Each Builder is configured with a list of BuildSlaves that it will use
+for its builds. These buildslaves are expected to behave identically:
+the only reason to use multiple BuildSlaves for a single Builder is to
+provide a measure of load-balancing.
+
+ <p>Within a single BuildSlave, each Builder creates its own SlaveBuilder
+instance. These SlaveBuilders operate independently from each other.
+Each gets its own base directory to work in. It is quite common to
+have many Builders sharing the same buildslave. For example, there
+might be two buildslaves: one for i386, and a second for PowerPC.
+There may then be a pair of Builders that do a full compile/test run,
+one for each architecture, and a lone Builder that creates snapshot
+source tarballs if the full builders complete successfully. The full
+builders would each run on a single buildslave, whereas the tarball
+creation step might run on either buildslave (since the platform
+doesn't matter when creating source tarballs). In this case, the
+mapping would look like:
+
+<pre class="example"> Builder(full-i386) -&gt; BuildSlaves(slave-i386)
+ Builder(full-ppc) -&gt; BuildSlaves(slave-ppc)
+ Builder(source-tarball) -&gt; BuildSlaves(slave-i386, slave-ppc)
+</pre>
+ <p>and each BuildSlave would have two SlaveBuilders inside it, one for a
+full builder, and a second for the source-tarball builder.
+
+ <p>Once a SlaveBuilder is available, the Builder pulls one or more
+BuildRequests off its incoming queue. (It may pull more than one if it
+determines that it can merge the requests together; for example, there
+may be multiple requests to build the current HEAD revision). These
+requests are merged into a single Build instance, which includes the
+SourceStamp that describes what exact version of the source code
+should be used for the build. The Build is then randomly assigned to a
+free SlaveBuilder and the build begins.
+
+ <p>The behaviour when BuildRequests are merged can be customized, see <a href="#Merging-BuildRequests">Merging BuildRequests</a>.
+
+<div class="node">
+<p><hr>
+<a name="Status-Delivery-Architecture"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Buildmaster-Architecture">Buildmaster Architecture</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#System-Architecture">System Architecture</a>
+
+</div>
+
+<h4 class="subsection">1.2.3 Status Delivery Architecture</h4>
+
+<p>The buildmaster maintains a central Status object, to which various
+status plugins are connected. Through this Status object, a full
+hierarchy of build status objects can be obtained.
+
+ <div class="block-image"><img src="images/status.png" alt="Status Delivery"></div>
+
+ <p>The configuration file controls which status plugins are active. Each
+status plugin gets a reference to the top-level Status object. From
+there they can request information on each Builder, Build, Step, and
+LogFile. This query-on-demand interface is used by the html.Waterfall
+plugin to create the main status page each time a web browser hits the
+main URL.
+
+ <p>The status plugins can also subscribe to hear about new Builds as they
+occur: this is used by the MailNotifier to create new email messages
+for each recently-completed Build.
+
+ <p>The Status object records the status of old builds on disk in the
+buildmaster's base directory. This allows it to return information
+about historical builds.
+
+ <p>There are also status objects that correspond to Schedulers and
+BuildSlaves. These allow status plugins to report information about
+upcoming builds, and the online/offline status of each buildslave.
+
+<div class="node">
+<p><hr>
+<a name="Control-Flow"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#System-Architecture">System Architecture</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Introduction">Introduction</a>
+
+</div>
+
+<!-- node-name, next, previous, up -->
+<h3 class="section">1.3 Control Flow</h3>
+
+<p>A day in the life of the buildbot:
+
+ <ul>
+<li>A developer commits some source code changes to the repository. A hook
+script or commit trigger of some sort sends information about this
+change to the buildmaster through one of its configured Change
+Sources. This notification might arrive via email, or over a network
+connection (either initiated by the buildmaster as it &ldquo;subscribes&rdquo;
+to changes, or by the commit trigger as it pushes Changes towards the
+buildmaster). The Change contains information about who made the
+change, what files were modified, which revision contains the change,
+and any checkin comments.
+
+ <li>The buildmaster distributes this change to all of its configured
+Schedulers. Any &ldquo;important&rdquo; changes cause the &ldquo;tree-stable-timer&rdquo;
+to be started, and the Change is added to a list of those that will go
+into a new Build. When the timer expires, a Build is started on each
+of a set of configured Builders, all compiling/testing the same source
+code. Unless configured otherwise, all Builds run in parallel on the
+various buildslaves.
+
+ <li>The Build consists of a series of Steps. Each Step causes some number
+of commands to be invoked on the remote buildslave associated with
+that Builder. The first step is almost always to perform a checkout of
+the appropriate revision from the same VC system that produced the
+Change. The rest generally perform a compile and run unit tests. As
+each Step runs, the buildslave reports back command output and return
+status to the buildmaster.
+
+ <li>As the Build runs, status messages like &ldquo;Build Started&rdquo;, &ldquo;Step
+Started&rdquo;, &ldquo;Build Finished&rdquo;, etc, are published to a collection of
+Status Targets. One of these targets is usually the HTML &ldquo;Waterfall&rdquo;
+display, which shows a chronological list of events, and summarizes
+the results of the most recent build at the top of each column.
+Developers can periodically check this page to see how their changes
+have fared. If they see red, they know that they've made a mistake and
+need to fix it. If they see green, they know that they've done their
+duty and don't need to worry about their change breaking anything.
+
+ <li>If a MailNotifier status target is active, the completion of a build
+will cause email to be sent to any developers whose Changes were
+incorporated into this Build. The MailNotifier can be configured to
+only send mail upon failing builds, or for builds which have just
+transitioned from passing to failing. Other status targets can provide
+similar real-time notification via different communication channels,
+like IRC.
+
+ </ul>
+
+<div class="node">
+<p><hr>
+<a name="Installation"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Concepts">Concepts</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Introduction">Introduction</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">2 Installation</h2>
+
+<ul class="menu">
+<li><a accesskey="1" href="#Requirements">Requirements</a>
+<li><a accesskey="2" href="#Installing-the-code">Installing the code</a>
+<li><a accesskey="3" href="#Creating-a-buildmaster">Creating a buildmaster</a>
+<li><a accesskey="4" href="#Upgrading-an-Existing-Buildmaster">Upgrading an Existing Buildmaster</a>
+<li><a accesskey="5" href="#Creating-a-buildslave">Creating a buildslave</a>
+<li><a accesskey="6" href="#Launching-the-daemons">Launching the daemons</a>
+<li><a accesskey="7" href="#Logfiles">Logfiles</a>
+<li><a accesskey="8" href="#Shutdown">Shutdown</a>
+<li><a accesskey="9" href="#Maintenance">Maintenance</a>
+<li><a href="#Troubleshooting">Troubleshooting</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Requirements"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Installing-the-code">Installing the code</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Installation">Installation</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.1 Requirements</h3>
+
+<p>At a bare minimum, you'll need the following (for both the buildmaster
+and a buildslave):
+
+ <ul>
+<li>Python: http://www.python.org
+
+ <p>Buildbot requires python-2.3 or later, and is primarily developed
+against python-2.4. It is also tested against python-2.5 .
+
+ <li>Twisted: http://twistedmatrix.com
+
+ <p>Both the buildmaster and the buildslaves require Twisted-2.0.x or
+later. It has been tested against all releases of Twisted up to
+Twisted-2.5.0 (the most recent as of this writing). As always, the
+most recent version is recommended.
+
+ <p>Twisted is delivered as a collection of subpackages. You'll need at
+least "Twisted" (the core package), and you'll also want TwistedMail,
+TwistedWeb, and TwistedWords (for sending email, serving a web status
+page, and delivering build status via IRC, respectively). You might
+also want TwistedConch (for the encrypted Manhole debug port). Note
+that Twisted requires ZopeInterface to be installed as well.
+
+ </ul>
+
+ <p>Certain other packages may be useful on the system running the
+buildmaster:
+
+ <ul>
+<li>CVSToys: http://purl.net/net/CVSToys
+
+ <p>If your buildmaster uses FreshCVSSource to receive change notification
+from a cvstoys daemon, it will require CVSToys be installed (tested
+with CVSToys-1.0.10). If the it doesn't use that source (i.e. if you
+only use a mail-parsing change source, or the SVN notification
+script), you will not need CVSToys.
+
+ </ul>
+
+ <p>And of course, your project's build process will impose additional
+requirements on the buildslaves. These hosts must have all the tools
+necessary to compile and test your project's source code.
+
+<div class="node">
+<p><hr>
+<a name="Installing-the-code"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Creating-a-buildmaster">Creating a buildmaster</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Requirements">Requirements</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.2 Installing the code</h3>
+
+<p><a name="index-installation-3"></a>
+The Buildbot is installed using the standard python <code>distutils</code>
+module. After unpacking the tarball, the process is:
+
+<pre class="example"> python setup.py build
+ python setup.py install
+</pre>
+ <p>where the install step may need to be done as root. This will put the
+bulk of the code in somewhere like
+/usr/lib/python2.3/site-packages/buildbot . It will also install the
+<code>buildbot</code> command-line tool in /usr/bin/buildbot.
+
+ <p>To test this, shift to a different directory (like /tmp), and run:
+
+<pre class="example"> buildbot --version
+</pre>
+ <p>If it shows you the versions of Buildbot and Twisted, the install went
+ok. If it says <code>no such command</code> or it gets an <code>ImportError</code>
+when it tries to load the libaries, then something went wrong.
+<code>pydoc buildbot</code> is another useful diagnostic tool.
+
+ <p>Windows users will find these files in other places. You will need to
+make sure that python can find the libraries, and will probably find
+it convenient to have <code>buildbot</code> on your PATH.
+
+ <p>If you wish, you can run the buildbot unit test suite like this:
+
+<pre class="example"> PYTHONPATH=. trial buildbot.test
+</pre>
+ <p>This should run up to 192 tests, depending upon what VC tools you have
+installed. On my desktop machine it takes about five minutes to
+complete. Nothing should fail, a few might be skipped. If any of the
+tests fail, you should stop and investigate the cause before
+continuing the installation process, as it will probably be easier to
+track down the bug early.
+
+ <p>If you cannot or do not wish to install the buildbot into a site-wide
+location like <samp><span class="file">/usr</span></samp> or <samp><span class="file">/usr/local</span></samp>, you can also install
+it into the account's home directory. Do the install command like
+this:
+
+<pre class="example"> python setup.py install --home=~
+</pre>
+ <p>That will populate <samp><span class="file">~/lib/python</span></samp> and create
+<samp><span class="file">~/bin/buildbot</span></samp>. Make sure this lib directory is on your
+<code>PYTHONPATH</code>.
+
+<div class="node">
+<p><hr>
+<a name="Creating-a-buildmaster"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Upgrading-an-Existing-Buildmaster">Upgrading an Existing Buildmaster</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Installing-the-code">Installing the code</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.3 Creating a buildmaster</h3>
+
+<p>As you learned earlier (see <a href="#System-Architecture">System Architecture</a>), the buildmaster
+runs on a central host (usually one that is publically visible, so
+everybody can check on the status of the project), and controls all
+aspects of the buildbot system. Let us call this host
+<code>buildbot.example.org</code>.
+
+ <p>You may wish to create a separate user account for the buildmaster,
+perhaps named <code>buildmaster</code>. This can help keep your personal
+configuration distinct from that of the buildmaster and is useful if
+you have to use a mail-based notification system (see <a href="#Change-Sources">Change Sources</a>). However, the Buildbot will work just fine with your regular
+user account.
+
+ <p>You need to choose a directory for the buildmaster, called the
+<code>basedir</code>. This directory will be owned by the buildmaster, which
+will use configuration files therein, and create status files as it
+runs. <samp><span class="file">~/Buildbot</span></samp> is a likely value. If you run multiple
+buildmasters in the same account, or if you run both masters and
+slaves, you may want a more distinctive name like
+<samp><span class="file">~/Buildbot/master/gnomovision</span></samp> or
+<samp><span class="file">~/Buildmasters/fooproject</span></samp>. If you are using a separate user
+account, this might just be <samp><span class="file">~buildmaster/masters/fooproject</span></samp>.
+
+ <p>Once you've picked a directory, use the <samp><span class="command">buildbot
+create-master</span></samp> command to create the directory and populate it with
+startup files:
+
+<pre class="example"> buildbot create-master <var>basedir</var>
+</pre>
+ <p>You will need to create a configuration file (see <a href="#Configuration">Configuration</a>)
+before starting the buildmaster. Most of the rest of this manual is
+dedicated to explaining how to do this. A sample configuration file is
+placed in the working directory, named <samp><span class="file">master.cfg.sample</span></samp>, which
+can be copied to <samp><span class="file">master.cfg</span></samp> and edited to suit your purposes.
+
+ <p>(Internal details: This command creates a file named
+<samp><span class="file">buildbot.tac</span></samp> that contains all the state necessary to create
+the buildmaster. Twisted has a tool called <code>twistd</code> which can use
+this .tac file to create and launch a buildmaster instance. twistd
+takes care of logging and daemonization (running the program in the
+background). <samp><span class="file">/usr/bin/buildbot</span></samp> is a front end which runs twistd
+for you.)
+
+ <p>In addition to <samp><span class="file">buildbot.tac</span></samp>, a small <samp><span class="file">Makefile.sample</span></samp> is
+installed. This can be used as the basis for customized daemon startup,
+See <a href="#Launching-the-daemons">Launching the daemons</a>.
+
+<div class="node">
+<p><hr>
+<a name="Upgrading-an-Existing-Buildmaster"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Creating-a-buildslave">Creating a buildslave</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Creating-a-buildmaster">Creating a buildmaster</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.4 Upgrading an Existing Buildmaster</h3>
+
+<p>If you have just installed a new version of the Buildbot code, and you
+have buildmasters that were created using an older version, you'll
+need to upgrade these buildmasters before you can use them. The
+upgrade process adds and modifies files in the buildmaster's base
+directory to make it compatible with the new code.
+
+<pre class="example"> buildbot upgrade-master <var>basedir</var>
+</pre>
+ <p>This command will also scan your <samp><span class="file">master.cfg</span></samp> file for
+incompatbilities (by loading it and printing any errors or deprecation
+warnings that occur). Each buildbot release tries to be compatible
+with configurations that worked cleanly (i.e. without deprecation
+warnings) on the previous release: any functions or classes that are
+to be removed will first be deprecated in a release, to give users a
+chance to start using their replacement.
+
+ <p>The 0.7.6 release introduced the <samp><span class="file">public_html/</span></samp> directory, which
+contains <samp><span class="file">index.html</span></samp> and other files served by the
+<code>WebStatus</code> and <code>Waterfall</code> status displays. The
+<code>upgrade-master</code> command will create these files if they do not
+already exist. It will not modify existing copies, but it will write a
+new copy in e.g. <samp><span class="file">index.html.new</span></samp> if the new version differs from
+the version that already exists.
+
+ <p>The <code>upgrade-master</code> command is idempotent. It is safe to run it
+multiple times. After each upgrade of the buildbot code, you should
+use <code>upgrade-master</code> on all your buildmasters.
+
+<div class="node">
+<p><hr>
+<a name="Creating-a-buildslave"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Launching-the-daemons">Launching the daemons</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Upgrading-an-Existing-Buildmaster">Upgrading an Existing Buildmaster</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.5 Creating a buildslave</h3>
+
+<p>Typically, you will be adding a buildslave to an existing buildmaster,
+to provide additional architecture coverage. The buildbot
+administrator will give you several pieces of information necessary to
+connect to the buildmaster. You should also be somewhat familiar with
+the project being tested, so you can troubleshoot build problems
+locally.
+
+ <p>The buildbot exists to make sure that the project's stated &ldquo;how to
+build it&rdquo; process actually works. To this end, the buildslave should
+run in an environment just like that of your regular developers.
+Typically the project build process is documented somewhere
+(<samp><span class="file">README</span></samp>, <samp><span class="file">INSTALL</span></samp>, etc), in a document that should
+mention all library dependencies and contain a basic set of build
+instructions. This document will be useful as you configure the host
+and account in which the buildslave runs.
+
+ <p>Here's a good checklist for setting up a buildslave:
+
+ <ol type=1 start=1>
+<li>Set up the account
+
+ <p>It is recommended (although not mandatory) to set up a separate user
+account for the buildslave. This account is frequently named
+<code>buildbot</code> or <code>buildslave</code>. This serves to isolate your
+personal working environment from that of the slave's, and helps to
+minimize the security threat posed by letting possibly-unknown
+contributors run arbitrary code on your system. The account should
+have a minimum of fancy init scripts.
+
+ <li>Install the buildbot code
+
+ <p>Follow the instructions given earlier (see <a href="#Installing-the-code">Installing the code</a>).
+If you use a separate buildslave account, and you didn't install the
+buildbot code to a shared location, then you will need to install it
+with <code>--home=~</code> for each account that needs it.
+
+ <li>Set up the host
+
+ <p>Make sure the host can actually reach the buildmaster. Usually the
+buildmaster is running a status webserver on the same machine, so
+simply point your web browser at it and see if you can get there.
+Install whatever additional packages or libraries the project's
+INSTALL document advises. (or not: if your buildslave is supposed to
+make sure that building without optional libraries still works, then
+don't install those libraries).
+
+ <p>Again, these libraries don't necessarily have to be installed to a
+site-wide shared location, but they must be available to your build
+process. Accomplishing this is usually very specific to the build
+process, so installing them to <samp><span class="file">/usr</span></samp> or <samp><span class="file">/usr/local</span></samp> is
+usually the best approach.
+
+ <li>Test the build process
+
+ <p>Follow the instructions in the INSTALL document, in the buildslave's
+account. Perform a full CVS (or whatever) checkout, configure, make,
+run tests, etc. Confirm that the build works without manual fussing.
+If it doesn't work when you do it by hand, it will be unlikely to work
+when the buildbot attempts to do it in an automated fashion.
+
+ <li>Choose a base directory
+
+ <p>This should be somewhere in the buildslave's account, typically named
+after the project which is being tested. The buildslave will not touch
+any file outside of this directory. Something like <samp><span class="file">~/Buildbot</span></samp>
+or <samp><span class="file">~/Buildslaves/fooproject</span></samp> is appropriate.
+
+ <li>Get the buildmaster host/port, botname, and password
+
+ <p>When the buildbot admin configures the buildmaster to accept and use
+your buildslave, they will provide you with the following pieces of
+information:
+
+ <ul>
+<li>your buildslave's name
+<li>the password assigned to your buildslave
+<li>the hostname and port number of the buildmaster, i.e. buildbot.example.org:8007
+</ul>
+
+ <li>Create the buildslave
+
+ <p>Now run the 'buildbot' command as follows:
+
+ <pre class="example"> buildbot create-slave <var>BASEDIR</var> <var>MASTERHOST</var>:<var>PORT</var> <var>SLAVENAME</var> <var>PASSWORD</var>
+</pre>
+ <p>This will create the base directory and a collection of files inside,
+including the <samp><span class="file">buildbot.tac</span></samp> file that contains all the
+information you passed to the <code>buildbot</code> command.
+
+ <li>Fill in the hostinfo files
+
+ <p>When it first connects, the buildslave will send a few files up to the
+buildmaster which describe the host that it is running on. These files
+are presented on the web status display so that developers have more
+information to reproduce any test failures that are witnessed by the
+buildbot. There are sample files in the <samp><span class="file">info</span></samp> subdirectory of
+the buildbot's base directory. You should edit these to correctly
+describe you and your host.
+
+ <p><samp><span class="file">BASEDIR/info/admin</span></samp> should contain your name and email address.
+This is the &ldquo;buildslave admin address&rdquo;, and will be visible from the
+build status page (so you may wish to munge it a bit if
+address-harvesting spambots are a concern).
+
+ <p><samp><span class="file">BASEDIR/info/host</span></samp> should be filled with a brief description of
+the host: OS, version, memory size, CPU speed, versions of relevant
+libraries installed, and finally the version of the buildbot code
+which is running the buildslave.
+
+ <p>If you run many buildslaves, you may want to create a single
+<samp><span class="file">~buildslave/info</span></samp> file and share it among all the buildslaves
+with symlinks.
+
+ </ol>
+
+<ul class="menu">
+<li><a accesskey="1" href="#Buildslave-Options">Buildslave Options</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Buildslave-Options"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Creating-a-buildslave">Creating a buildslave</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Creating-a-buildslave">Creating a buildslave</a>
+
+</div>
+
+<h4 class="subsection">2.5.1 Buildslave Options</h4>
+
+<p>There are a handful of options you might want to use when creating the
+buildslave with the <samp><span class="command">buildbot create-slave &lt;options&gt; DIR &lt;params&gt;</span></samp>
+command. You can type <samp><span class="command">buildbot create-slave --help</span></samp> for a summary.
+To use these, just include them on the <samp><span class="command">buildbot create-slave</span></samp>
+command line, like this:
+
+<pre class="example"> buildbot create-slave --umask=022 ~/buildslave buildmaster.example.org:42012 myslavename mypasswd
+</pre>
+ <dl>
+<dt><code>--usepty</code><dd>This is a boolean flag that tells the buildslave whether to launch child
+processes in a PTY or with regular pipes (the default) when the master does not
+specify. This option is deprecated, as this particular parameter is better
+specified on the master.
+
+ <br><dt><code>--umask</code><dd>This is a string (generally an octal representation of an integer)
+which will cause the buildslave process' &ldquo;umask&rdquo; value to be set
+shortly after initialization. The &ldquo;twistd&rdquo; daemonization utility
+forces the umask to 077 at startup (which means that all files created
+by the buildslave or its child processes will be unreadable by any
+user other than the buildslave account). If you want build products to
+be readable by other accounts, you can add <code>--umask=022</code> to tell
+the buildslave to fix the umask after twistd clobbers it. If you want
+build products to be <em>writable</em> by other accounts too, use
+<code>--umask=000</code>, but this is likely to be a security problem.
+
+ <br><dt><code>--keepalive</code><dd>This is a number that indicates how frequently &ldquo;keepalive&rdquo; messages
+should be sent from the buildslave to the buildmaster, expressed in
+seconds. The default (600) causes a message to be sent to the
+buildmaster at least once every 10 minutes. To set this to a lower
+value, use e.g. <code>--keepalive=120</code>.
+
+ <p>If the buildslave is behind a NAT box or stateful firewall, these
+messages may help to keep the connection alive: some NAT boxes tend to
+forget about a connection if it has not been used in a while. When
+this happens, the buildmaster will think that the buildslave has
+disappeared, and builds will time out. Meanwhile the buildslave will
+not realize than anything is wrong.
+
+ <br><dt><code>--maxdelay</code><dd>This is a number that indicates the maximum amount of time the
+buildslave will wait between connection attempts, expressed in
+seconds. The default (300) causes the buildslave to wait at most 5
+minutes before trying to connect to the buildmaster again.
+
+ <br><dt><code>--log-size</code><dd>This is the size in bytes when to rotate the Twisted log files.
+
+ <br><dt><code>--log-count</code><dd>This is the number of log rotations to keep around. You can either
+specify a number or <code>None</code> (the default) to keep all
+<samp><span class="file">twistd.log</span></samp> files around.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Launching-the-daemons"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Logfiles">Logfiles</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Creating-a-buildslave">Creating a buildslave</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.6 Launching the daemons</h3>
+
+<p>Both the buildmaster and the buildslave run as daemon programs. To
+launch them, pass the working directory to the <code>buildbot</code>
+command:
+
+<pre class="example"> buildbot start <var>BASEDIR</var>
+</pre>
+ <p>This command will start the daemon and then return, so normally it
+will not produce any output. To verify that the programs are indeed
+running, look for a pair of files named <samp><span class="file">twistd.log</span></samp> and
+<samp><span class="file">twistd.pid</span></samp> that should be created in the working directory.
+<samp><span class="file">twistd.pid</span></samp> contains the process ID of the newly-spawned daemon.
+
+ <p>When the buildslave connects to the buildmaster, new directories will
+start appearing in its base directory. The buildmaster tells the slave
+to create a directory for each Builder which will be using that slave.
+All build operations are performed within these directories: CVS
+checkouts, compiles, and tests.
+
+ <p>Once you get everything running, you will want to arrange for the
+buildbot daemons to be started at boot time. One way is to use
+<code>cron</code>, by putting them in a @reboot crontab entry<a rel="footnote" href="#fn-1" name="fnd-1"><sup>1</sup></a>:
+
+<pre class="example"> @reboot buildbot start <var>BASEDIR</var>
+</pre>
+ <p>When you run <samp><span class="command">crontab</span></samp> to set this up, remember to do it as
+the buildmaster or buildslave account! If you add this to your crontab
+when running as your regular account (or worse yet, root), then the
+daemon will run as the wrong user, quite possibly as one with more
+authority than you intended to provide.
+
+ <p>It is important to remember that the environment provided to cron jobs
+and init scripts can be quite different that your normal runtime.
+There may be fewer environment variables specified, and the PATH may
+be shorter than usual. It is a good idea to test out this method of
+launching the buildslave by using a cron job with a time in the near
+future, with the same command, and then check <samp><span class="file">twistd.log</span></samp> to
+make sure the slave actually started correctly. Common problems here
+are for <samp><span class="file">/usr/local</span></samp> or <samp><span class="file">~/bin</span></samp> to not be on your
+<code>PATH</code>, or for <code>PYTHONPATH</code> to not be set correctly.
+Sometimes <code>HOME</code> is messed up too.
+
+ <p>To modify the way the daemons are started (perhaps you want to set
+some environment variables first, or perform some cleanup each time),
+you can create a file named <samp><span class="file">Makefile.buildbot</span></samp> in the base
+directory. When the <samp><span class="file">buildbot</span></samp> front-end tool is told to
+<samp><span class="command">start</span></samp> the daemon, and it sees this file (and
+<samp><span class="file">/usr/bin/make</span></samp> exists), it will do <samp><span class="command">make -f
+Makefile.buildbot start</span></samp> instead of its usual action (which involves
+running <samp><span class="command">twistd</span></samp>). When the buildmaster or buildslave is
+installed, a <samp><span class="file">Makefile.sample</span></samp> is created which implements the
+same behavior as the the <samp><span class="file">buildbot</span></samp> tool uses, so if you want to
+customize the process, just copy <samp><span class="file">Makefile.sample</span></samp> to
+<samp><span class="file">Makefile.buildbot</span></samp> and edit it as necessary.
+
+ <p>Some distributions may include conveniences to make starting buildbot
+at boot time easy. For instance, with the default buildbot package in
+Debian-based distributions, you may only need to modify
+<code>/etc/default/buildbot</code> (see also <code>/etc/init.d/buildbot</code>, which
+reads the configuration in <code>/etc/default/buildbot</code>).
+
+<div class="node">
+<p><hr>
+<a name="Logfiles"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Shutdown">Shutdown</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Launching-the-daemons">Launching the daemons</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.7 Logfiles</h3>
+
+<p><a name="index-logfiles-4"></a>
+While a buildbot daemon runs, it emits text to a logfile, named
+<samp><span class="file">twistd.log</span></samp>. A command like <code>tail -f twistd.log</code> is useful
+to watch the command output as it runs.
+
+ <p>The buildmaster will announce any errors with its configuration file
+in the logfile, so it is a good idea to look at the log at startup
+time to check for any problems. Most buildmaster activities will cause
+lines to be added to the log.
+
+<div class="node">
+<p><hr>
+<a name="Shutdown"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Maintenance">Maintenance</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Logfiles">Logfiles</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.8 Shutdown</h3>
+
+<p>To stop a buildmaster or buildslave manually, use:
+
+<pre class="example"> buildbot stop <var>BASEDIR</var>
+</pre>
+ <p>This simply looks for the <samp><span class="file">twistd.pid</span></samp> file and kills whatever
+process is identified within.
+
+ <p>At system shutdown, all processes are sent a <code>SIGKILL</code>. The
+buildmaster and buildslave will respond to this by shutting down
+normally.
+
+ <p>The buildmaster will respond to a <code>SIGHUP</code> by re-reading its
+config file. Of course, this only works on unix-like systems with
+signal support, and won't work on Windows. The following shortcut is
+available:
+
+<pre class="example"> buildbot reconfig <var>BASEDIR</var>
+</pre>
+ <p>When you update the Buildbot code to a new release, you will need to
+restart the buildmaster and/or buildslave before it can take advantage
+of the new code. You can do a <code>buildbot stop </code><var>BASEDIR</var> and
+<code>buildbot start </code><var>BASEDIR</var> in quick succession, or you can
+use the <code>restart</code> shortcut, which does both steps for you:
+
+<pre class="example"> buildbot restart <var>BASEDIR</var>
+</pre>
+ <p>There are certain configuration changes that are not handled cleanly
+by <code>buildbot reconfig</code>. If this occurs, <code>buildbot restart</code>
+is a more robust tool to fully switch over to the new configuration.
+
+ <p><code>buildbot restart</code> may also be used to start a stopped Buildbot
+instance. This behaviour is useful when writing scripts that stop, start
+and restart Buildbot.
+
+ <p>A buildslave may also be gracefully shutdown from the
+see <a href="#WebStatus">WebStatus</a> status plugin. This is useful to shutdown a
+buildslave without interrupting any current builds. The buildmaster
+will wait until the buildslave is finished all its current builds, and
+will then tell the buildslave to shutdown.
+
+<div class="node">
+<p><hr>
+<a name="Maintenance"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Troubleshooting">Troubleshooting</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Shutdown">Shutdown</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.9 Maintenance</h3>
+
+<p>It is a good idea to check the buildmaster's status page every once in
+a while, to see if your buildslave is still online. Eventually the
+buildbot will probably be enhanced to send you email (via the
+<samp><span class="file">info/admin</span></samp> email address) when the slave has been offline for
+more than a few hours.
+
+ <p>If you find you can no longer provide a buildslave to the project, please
+let the project admins know, so they can put out a call for a
+replacement.
+
+ <p>The Buildbot records status and logs output continually, each time a
+build is performed. The status tends to be small, but the build logs
+can become quite large. Each build and log are recorded in a separate
+file, arranged hierarchically under the buildmaster's base directory.
+To prevent these files from growing without bound, you should
+periodically delete old build logs. A simple cron job to delete
+anything older than, say, two weeks should do the job. The only trick
+is to leave the <samp><span class="file">buildbot.tac</span></samp> and other support files alone, for
+which find's <code>-mindepth</code> argument helps skip everything in the
+top directory. You can use something like the following:
+
+<pre class="example"> @weekly cd BASEDIR &amp;&amp; find . -mindepth 2 i-path './public_html/*' -prune -o -type f -mtime +14 -exec rm {} \;
+ @weekly cd BASEDIR &amp;&amp; find twistd.log* -mtime +14 -exec rm {} \;
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Troubleshooting"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Maintenance">Maintenance</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Installation">Installation</a>
+
+</div>
+
+<h3 class="section">2.10 Troubleshooting</h3>
+
+<p>Here are a few hints on diagnosing common problems.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Starting-the-buildslave">Starting the buildslave</a>
+<li><a accesskey="2" href="#Connecting-to-the-buildmaster">Connecting to the buildmaster</a>
+<li><a accesskey="3" href="#Forcing-Builds">Forcing Builds</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Starting-the-buildslave"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Connecting-to-the-buildmaster">Connecting to the buildmaster</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Troubleshooting">Troubleshooting</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Troubleshooting">Troubleshooting</a>
+
+</div>
+
+<h4 class="subsection">2.10.1 Starting the buildslave</h4>
+
+<p>Cron jobs are typically run with a minimal shell (<samp><span class="file">/bin/sh</span></samp>, not
+<samp><span class="file">/bin/bash</span></samp>), and tilde expansion is not always performed in such
+commands. You may want to use explicit paths, because the <code>PATH</code>
+is usually quite short and doesn't include anything set by your
+shell's startup scripts (<samp><span class="file">.profile</span></samp>, <samp><span class="file">.bashrc</span></samp>, etc). If
+you've installed buildbot (or other python libraries) to an unusual
+location, you may need to add a <code>PYTHONPATH</code> specification (note
+that python will do tilde-expansion on <code>PYTHONPATH</code> elements by
+itself). Sometimes it is safer to fully-specify everything:
+
+<pre class="example"> @reboot PYTHONPATH=~/lib/python /usr/local/bin/buildbot start /usr/home/buildbot/basedir
+</pre>
+ <p>Take the time to get the @reboot job set up. Otherwise, things will work
+fine for a while, but the first power outage or system reboot you have will
+stop the buildslave with nothing but the cries of sorrowful developers to
+remind you that it has gone away.
+
+<div class="node">
+<p><hr>
+<a name="Connecting-to-the-buildmaster"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Forcing-Builds">Forcing Builds</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Starting-the-buildslave">Starting the buildslave</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Troubleshooting">Troubleshooting</a>
+
+</div>
+
+<h4 class="subsection">2.10.2 Connecting to the buildmaster</h4>
+
+<p>If the buildslave cannot connect to the buildmaster, the reason should
+be described in the <samp><span class="file">twistd.log</span></samp> logfile. Some common problems
+are an incorrect master hostname or port number, or a mistyped bot
+name or password. If the buildslave loses the connection to the
+master, it is supposed to attempt to reconnect with an
+exponentially-increasing backoff. Each attempt (and the time of the
+next attempt) will be logged. If you get impatient, just manually stop
+and re-start the buildslave.
+
+ <p>When the buildmaster is restarted, all slaves will be disconnected,
+and will attempt to reconnect as usual. The reconnect time will depend
+upon how long the buildmaster is offline (i.e. how far up the
+exponential backoff curve the slaves have travelled). Again,
+<code>buildbot stop </code><var>BASEDIR</var><code>; buildbot start </code><var>BASEDIR</var> will
+speed up the process.
+
+<div class="node">
+<p><hr>
+<a name="Forcing-Builds"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Connecting-to-the-buildmaster">Connecting to the buildmaster</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Troubleshooting">Troubleshooting</a>
+
+</div>
+
+<h4 class="subsection">2.10.3 Forcing Builds</h4>
+
+<p>From the buildmaster's main status web page, you can force a build to
+be run on your build slave. Figure out which column is for a builder
+that runs on your slave, click on that builder's name, and the page
+that comes up will have a &ldquo;Force Build&rdquo; button. Fill in the form,
+hit the button, and a moment later you should see your slave's
+<samp><span class="file">twistd.log</span></samp> filling with commands being run. Using <code>pstree</code>
+or <code>top</code> should also reveal the cvs/make/gcc/etc processes being
+run by the buildslave. Note that the same web page should also show
+the <samp><span class="file">admin</span></samp> and <samp><span class="file">host</span></samp> information files that you configured
+earlier.
+
+<div class="node">
+<p><hr>
+<a name="Concepts"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Configuration">Configuration</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Installation">Installation</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">3 Concepts</h2>
+
+<p>This chapter defines some of the basic concepts that the Buildbot
+uses. You'll need to understand how the Buildbot sees the world to
+configure it properly.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Version-Control-Systems">Version Control Systems</a>
+<li><a accesskey="2" href="#Schedulers">Schedulers</a>
+<li><a accesskey="3" href="#BuildSet">BuildSet</a>
+<li><a accesskey="4" href="#BuildRequest">BuildRequest</a>
+<li><a accesskey="5" href="#Builder">Builder</a>
+<li><a accesskey="6" href="#Users">Users</a>
+<li><a accesskey="7" href="#Build-Properties">Build Properties</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Version-Control-Systems"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Schedulers">Schedulers</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Concepts">Concepts</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.1 Version Control Systems</h3>
+
+<p><a name="index-Version-Control-5"></a>
+These source trees come from a Version Control System of some kind.
+CVS and Subversion are two popular ones, but the Buildbot supports
+others. All VC systems have some notion of an upstream
+<code>repository</code> which acts as a server<a rel="footnote" href="#fn-2" name="fnd-2"><sup>2</sup></a>, from which clients
+can obtain source trees according to various parameters. The VC
+repository provides source trees of various projects, for different
+branches, and from various points in time. The first thing we have to
+do is to specify which source tree we want to get.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Generalizing-VC-Systems">Generalizing VC Systems</a>
+<li><a accesskey="2" href="#Source-Tree-Specifications">Source Tree Specifications</a>
+<li><a accesskey="3" href="#How-Different-VC-Systems-Specify-Sources">How Different VC Systems Specify Sources</a>
+<li><a accesskey="4" href="#Attributes-of-Changes">Attributes of Changes</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Generalizing-VC-Systems"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Source-Tree-Specifications">Source Tree Specifications</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Version-Control-Systems">Version Control Systems</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Version-Control-Systems">Version Control Systems</a>
+
+</div>
+
+<h4 class="subsection">3.1.1 Generalizing VC Systems</h4>
+
+<p>For the purposes of the Buildbot, we will try to generalize all VC
+systems as having repositories that each provide sources for a variety
+of projects. Each project is defined as a directory tree with source
+files. The individual files may each have revisions, but we ignore
+that and treat the project as a whole as having a set of revisions
+(CVS is really the only VC system still in widespread use that has
+per-file revisions.. everything modern has moved to atomic tree-wide
+changesets). Each time someone commits a change to the project, a new
+revision becomes available. These revisions can be described by a
+tuple with two items: the first is a branch tag, and the second is
+some kind of revision stamp or timestamp. Complex projects may have
+multiple branch tags, but there is always a default branch. The
+timestamp may be an actual timestamp (such as the -D option to CVS),
+or it may be a monotonically-increasing transaction number (such as
+the change number used by SVN and P4, or the revision number used by
+Arch/Baz/Bazaar, or a labeled tag used in CVS)<a rel="footnote" href="#fn-3" name="fnd-3"><sup>3</sup></a>. The SHA1 revision ID used by Monotone, Mercurial, and Git is
+also a kind of revision stamp, in that it specifies a unique copy of
+the source tree, as does a Darcs &ldquo;context&rdquo; file.
+
+ <p>When we aren't intending to make any changes to the sources we check out
+(at least not any that need to be committed back upstream), there are two
+basic ways to use a VC system:
+
+ <ul>
+<li>Retrieve a specific set of source revisions: some tag or key is used
+to index this set, which is fixed and cannot be changed by subsequent
+developers committing new changes to the tree. Releases are built from
+tagged revisions like this, so that they can be rebuilt again later
+(probably with controlled modifications).
+<li>Retrieve the latest sources along a specific branch: some tag is used
+to indicate which branch is to be used, but within that constraint we want
+to get the latest revisions.
+</ul>
+
+ <p>Build personnel or CM staff typically use the first approach: the
+build that results is (ideally) completely specified by the two
+parameters given to the VC system: repository and revision tag. This
+gives QA and end-users something concrete to point at when reporting
+bugs. Release engineers are also reportedly fond of shipping code that
+can be traced back to a concise revision tag of some sort.
+
+ <p>Developers are more likely to use the second approach: each morning
+the developer does an update to pull in the changes committed by the
+team over the last day. These builds are not easy to fully specify: it
+depends upon exactly when you did a checkout, and upon what local
+changes the developer has in their tree. Developers do not normally
+tag each build they produce, because there is usually significant
+overhead involved in creating these tags. Recreating the trees used by
+one of these builds can be a challenge. Some VC systems may provide
+implicit tags (like a revision number), while others may allow the use
+of timestamps to mean &ldquo;the state of the tree at time X&rdquo; as opposed
+to a tree-state that has been explicitly marked.
+
+ <p>The Buildbot is designed to help developers, so it usually works in
+terms of <em>the latest</em> sources as opposed to specific tagged
+revisions. However, it would really prefer to build from reproducible
+source trees, so implicit revisions are used whenever possible.
+
+<div class="node">
+<p><hr>
+<a name="Source-Tree-Specifications"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#How-Different-VC-Systems-Specify-Sources">How Different VC Systems Specify Sources</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Generalizing-VC-Systems">Generalizing VC Systems</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Version-Control-Systems">Version Control Systems</a>
+
+</div>
+
+<h4 class="subsection">3.1.2 Source Tree Specifications</h4>
+
+<p>So for the Buildbot's purposes we treat each VC system as a server
+which can take a list of specifications as input and produce a source
+tree as output. Some of these specifications are static: they are
+attributes of the builder and do not change over time. Others are more
+variable: each build will have a different value. The repository is
+changed over time by a sequence of Changes, each of which represents a
+single developer making changes to some set of files. These Changes
+are cumulative<a rel="footnote" href="#fn-4" name="fnd-4"><sup>4</sup></a>.
+
+ <p>For normal builds, the Buildbot wants to get well-defined source trees
+that contain specific Changes, and exclude other Changes that may have
+occurred after the desired ones. We assume that the Changes arrive at
+the buildbot (through one of the mechanisms described in see <a href="#Change-Sources">Change Sources</a>) in the same order in which they are committed to the
+repository. The Buildbot waits for the tree to become &ldquo;stable&rdquo;
+before initiating a build, for two reasons. The first is that
+developers frequently make multiple related commits in quick
+succession, even when the VC system provides ways to make atomic
+transactions involving multiple files at the same time. Running a
+build in the middle of these sets of changes would use an inconsistent
+set of source files, and is likely to fail (and is certain to be less
+useful than a build which uses the full set of changes). The
+tree-stable-timer is intended to avoid these useless builds that
+include some of the developer's changes but not all. The second reason
+is that some VC systems (i.e. CVS) do not provide repository-wide
+transaction numbers, so that timestamps are the only way to refer to
+a specific repository state. These timestamps may be somewhat
+ambiguous, due to processing and notification delays. By waiting until
+the tree has been stable for, say, 10 minutes, we can choose a
+timestamp from the middle of that period to use for our source
+checkout, and then be reasonably sure that any clock-skew errors will
+not cause the build to be performed on an inconsistent set of source
+files.
+
+ <p>The Schedulers always use the tree-stable-timer, with a timeout that
+is configured to reflect a reasonable tradeoff between build latency
+and change frequency. When the VC system provides coherent
+repository-wide revision markers (such as Subversion's revision
+numbers, or in fact anything other than CVS's timestamps), the
+resulting Build is simply performed against a source tree defined by
+that revision marker. When the VC system does not provide this, a
+timestamp from the middle of the tree-stable period is used to
+generate the source tree<a rel="footnote" href="#fn-5" name="fnd-5"><sup>5</sup></a>.
+
+<div class="node">
+<p><hr>
+<a name="How-Different-VC-Systems-Specify-Sources"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Attributes-of-Changes">Attributes of Changes</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Source-Tree-Specifications">Source Tree Specifications</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Version-Control-Systems">Version Control Systems</a>
+
+</div>
+
+<h4 class="subsection">3.1.3 How Different VC Systems Specify Sources</h4>
+
+<p>For CVS, the static specifications are <code>repository</code> and
+<code>module</code>. In addition to those, each build uses a timestamp (or
+omits the timestamp to mean <code>the latest</code>) and <code>branch tag</code>
+(which defaults to HEAD). These parameters collectively specify a set
+of sources from which a build may be performed.
+
+ <p><a href="http://subversion.tigris.org">Subversion</a> combines the
+repository, module, and branch into a single <code>Subversion URL</code>
+parameter. Within that scope, source checkouts can be specified by a
+numeric <code>revision number</code> (a repository-wide
+monotonically-increasing marker, such that each transaction that
+changes the repository is indexed by a different revision number), or
+a revision timestamp. When branches are used, the repository and
+module form a static <code>baseURL</code>, while each build has a
+<code>revision number</code> and a <code>branch</code> (which defaults to a
+statically-specified <code>defaultBranch</code>). The <code>baseURL</code> and
+<code>branch</code> are simply concatenated together to derive the
+<code>svnurl</code> to use for the checkout.
+
+ <p><a href="http://www.perforce.com/">Perforce</a> is similar. The server
+is specified through a <code>P4PORT</code> parameter. Module and branch
+are specified in a single depot path, and revisions are
+depot-wide. When branches are used, the <code>p4base</code> and
+<code>defaultBranch</code> are concatenated together to produce the depot
+path.
+
+ <p><a href="http://wiki.gnuarch.org/">Arch</a> and
+<a href="http://bazaar.canonical.com/">Bazaar</a> specify a repository by
+URL, as well as a <code>version</code> which is kind of like a branch name.
+Arch uses the word <code>archive</code> to represent the repository. Arch
+lets you push changes from one archive to another, removing the strict
+centralization required by CVS and SVN. It retains the distinction
+between repository and working directory that most other VC systems
+use. For complex multi-module directory structures, Arch has a
+built-in <code>build config</code> layer with which the checkout process has
+two steps. First, an initial bootstrap checkout is performed to
+retrieve a set of build-config files. Second, one of these files is
+used to figure out which archives/modules should be used to populate
+subdirectories of the initial checkout.
+
+ <p>Builders which use Arch and Bazaar therefore have a static archive
+<code>url</code>, and a default &ldquo;branch&rdquo; (which is a string that specifies
+a complete category&ndash;branch&ndash;version triple). Each build can have its
+own branch (the category&ndash;branch&ndash;version string) to override the
+default, as well as a revision number (which is turned into a
+&ndash;patch-NN suffix when performing the checkout).
+
+ <p><a href="http://bazaar-vcs.org">Bzr</a> (which is a descendant of
+Arch/Bazaar, and is frequently referred to as &ldquo;Bazaar&rdquo;) has the same
+sort of repository-vs-workspace model as Arch, but the repository data
+can either be stored inside the working directory or kept elsewhere
+(either on the same machine or on an entirely different machine). For
+the purposes of Buildbot (which never commits changes), the repository
+is specified with a URL and a revision number.
+
+ <p>The most common way to obtain read-only access to a bzr tree is via
+HTTP, simply by making the repository visible through a web server
+like Apache. Bzr can also use FTP and SFTP servers, if the buildslave
+process has sufficient privileges to access them. Higher performance
+can be obtained by running a special Bazaar-specific server. None of
+these matter to the buildbot: the repository URL just has to match the
+kind of server being used. The <code>repoURL</code> argument provides the
+location of the repository.
+
+ <p>Branches are expressed as subdirectories of the main central
+repository, which means that if branches are being used, the BZR step
+is given a <code>baseURL</code> and <code>defaultBranch</code> instead of getting
+the <code>repoURL</code> argument.
+
+ <p><a href="http://darcs.net/">Darcs</a> doesn't really have the
+notion of a single master repository. Nor does it really have
+branches. In Darcs, each working directory is also a repository, and
+there are operations to push and pull patches from one of these
+<code>repositories</code> to another. For the Buildbot's purposes, all you
+need to do is specify the URL of a repository that you want to build
+from. The build slave will then pull the latest patches from that
+repository and build them. Multiple branches are implemented by using
+multiple repositories (possibly living on the same server).
+
+ <p>Builders which use Darcs therefore have a static <code>repourl</code> which
+specifies the location of the repository. If branches are being used,
+the source Step is instead configured with a <code>baseURL</code> and a
+<code>defaultBranch</code>, and the two strings are simply concatenated
+together to obtain the repository's URL. Each build then has a
+specific branch which replaces <code>defaultBranch</code>, or just uses the
+default one. Instead of a revision number, each build can have a
+&ldquo;context&rdquo;, which is a string that records all the patches that are
+present in a given tree (this is the output of <samp><span class="command">darcs changes
+--context</span></samp>, and is considerably less concise than, e.g. Subversion's
+revision number, but the patch-reordering flexibility of Darcs makes
+it impossible to provide a shorter useful specification).
+
+ <p><a href="http://selenic.com/mercurial">Mercurial</a> is like Darcs, in that
+each branch is stored in a separate repository. The <code>repourl</code>,
+<code>baseURL</code>, and <code>defaultBranch</code> arguments are all handled the
+same way as with Darcs. The &ldquo;revision&rdquo;, however, is the hash
+identifier returned by <samp><span class="command">hg identify</span></samp>.
+
+ <p><a href="http://git.or.cz/">Git</a> also follows a decentralized model, and
+each repository can have several branches and tags. The source Step is
+configured with a static <code>repourl</code> which specifies the location
+of the repository. In addition, an optional <code>branch</code> parameter
+can be specified to check out code from a specific branch instead of
+the default &ldquo;master&rdquo; branch. The &ldquo;revision&rdquo; is specified as a SHA1
+hash as returned by e.g. <samp><span class="command">git rev-parse</span></samp>. No attempt is made
+to ensure that the specified revision is actually a subset of the
+specified branch.
+
+<div class="node">
+<p><hr>
+<a name="Attributes-of-Changes"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#How-Different-VC-Systems-Specify-Sources">How Different VC Systems Specify Sources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Version-Control-Systems">Version Control Systems</a>
+
+</div>
+
+<h4 class="subsection">3.1.4 Attributes of Changes</h4>
+
+<h3 class="heading">Who</h3>
+
+<p>Each Change has a <code>who</code> attribute, which specifies which
+developer is responsible for the change. This is a string which comes
+from a namespace controlled by the VC repository. Frequently this
+means it is a username on the host which runs the repository, but not
+all VC systems require this (Arch, for example, uses a fully-qualified
+<code>Arch ID</code>, which looks like an email address, as does Darcs).
+Each StatusNotifier will map the <code>who</code> attribute into something
+appropriate for their particular means of communication: an email
+address, an IRC handle, etc.
+
+<h3 class="heading">Files</h3>
+
+<p>It also has a list of <code>files</code>, which are just the tree-relative
+filenames of any files that were added, deleted, or modified for this
+Change. These filenames are used by the <code>fileIsImportant</code>
+function (in the Scheduler) to decide whether it is worth triggering a
+new build or not, e.g. the function could use the following function
+to only run a build if a C file were checked in:
+
+<pre class="example"> def has_C_files(change):
+ for name in change.files:
+ if name.endswith(".c"):
+ return True
+ return False
+</pre>
+ <p>Certain BuildSteps can also use the list of changed files
+to run a more targeted series of tests, e.g. the
+<code>python_twisted.Trial</code> step can run just the unit tests that
+provide coverage for the modified .py files instead of running the
+full test suite.
+
+<h3 class="heading">Comments</h3>
+
+<p>The Change also has a <code>comments</code> attribute, which is a string
+containing any checkin comments.
+
+<h3 class="heading">Revision</h3>
+
+<p>Each Change can have a <code>revision</code> attribute, which describes how
+to get a tree with a specific state: a tree which includes this Change
+(and all that came before it) but none that come after it. If this
+information is unavailable, the <code>.revision</code> attribute will be
+<code>None</code>. These revisions are provided by the ChangeSource, and
+consumed by the <code>computeSourceRevision</code> method in the appropriate
+<code>step.Source</code> class.
+
+ <dl>
+<dt>&lsquo;<samp><span class="samp">CVS</span></samp>&rsquo;<dd><code>revision</code> is an int, seconds since the epoch
+<br><dt>&lsquo;<samp><span class="samp">SVN</span></samp>&rsquo;<dd><code>revision</code> is an int, the changeset number (r%d)
+<br><dt>&lsquo;<samp><span class="samp">Darcs</span></samp>&rsquo;<dd><code>revision</code> is a large string, the output of <code>darcs changes --context</code>
+<br><dt>&lsquo;<samp><span class="samp">Mercurial</span></samp>&rsquo;<dd><code>revision</code> is a short string (a hash ID), the output of <code>hg identify</code>
+<br><dt>&lsquo;<samp><span class="samp">Arch/Bazaar</span></samp>&rsquo;<dd><code>revision</code> is the full revision ID (ending in &ndash;patch-%d)
+<br><dt>&lsquo;<samp><span class="samp">P4</span></samp>&rsquo;<dd><code>revision</code> is an int, the transaction number
+<br><dt>&lsquo;<samp><span class="samp">Git</span></samp>&rsquo;<dd><code>revision</code> is a short string (a SHA1 hash), the output of e.g.
+<code>git rev-parse</code>
+</dl>
+
+<h3 class="heading">Branches</h3>
+
+<p>The Change might also have a <code>branch</code> attribute. This indicates
+that all of the Change's files are in the same named branch. The
+Schedulers get to decide whether the branch should be built or not.
+
+ <p>For VC systems like CVS, Arch, Monotone, and Git, the <code>branch</code>
+name is unrelated to the filename. (that is, the branch name and the
+filename inhabit unrelated namespaces). For SVN, branches are
+expressed as subdirectories of the repository, so the file's
+&ldquo;svnurl&rdquo; is a combination of some base URL, the branch name, and the
+filename within the branch. (In a sense, the branch name and the
+filename inhabit the same namespace). Darcs branches are
+subdirectories of a base URL just like SVN. Mercurial branches are the
+same as Darcs.
+
+ <dl>
+<dt>&lsquo;<samp><span class="samp">CVS</span></samp>&rsquo;<dd>branch='warner-newfeature', files=['src/foo.c']
+<br><dt>&lsquo;<samp><span class="samp">SVN</span></samp>&rsquo;<dd>branch='branches/warner-newfeature', files=['src/foo.c']
+<br><dt>&lsquo;<samp><span class="samp">Darcs</span></samp>&rsquo;<dd>branch='warner-newfeature', files=['src/foo.c']
+<br><dt>&lsquo;<samp><span class="samp">Mercurial</span></samp>&rsquo;<dd>branch='warner-newfeature', files=['src/foo.c']
+<br><dt>&lsquo;<samp><span class="samp">Arch/Bazaar</span></samp>&rsquo;<dd>branch='buildbot&ndash;usebranches&ndash;0', files=['buildbot/master.py']
+<br><dt>&lsquo;<samp><span class="samp">Git</span></samp>&rsquo;<dd>branch='warner-newfeature', files=['src/foo.c']
+</dl>
+
+<h3 class="heading">Links</h3>
+
+<!-- TODO: who is using 'links'? how is it being used? -->
+<p>Finally, the Change might have a <code>links</code> list, which is intended
+to provide a list of URLs to a <em>viewcvs</em>-style web page that
+provides more detail for this Change, perhaps including the full file
+diffs.
+
+<div class="node">
+<p><hr>
+<a name="Schedulers"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BuildSet">BuildSet</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Version-Control-Systems">Version Control Systems</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.2 Schedulers</h3>
+
+<p><a name="index-Scheduler-6"></a>
+Each Buildmaster has a set of <code>Scheduler</code> objects, each of which
+gets a copy of every incoming Change. The Schedulers are responsible
+for deciding when Builds should be run. Some Buildbot installations
+might have a single Scheduler, while others may have several, each for
+a different purpose.
+
+ <p>For example, a &ldquo;quick&rdquo; scheduler might exist to give immediate
+feedback to developers, hoping to catch obvious problems in the code
+that can be detected quickly. These typically do not run the full test
+suite, nor do they run on a wide variety of platforms. They also
+usually do a VC update rather than performing a brand-new checkout
+each time. You could have a &ldquo;quick&rdquo; scheduler which used a 30 second
+timeout, and feeds a single &ldquo;quick&rdquo; Builder that uses a VC
+<code>mode='update'</code> setting.
+
+ <p>A separate &ldquo;full&rdquo; scheduler would run more comprehensive tests a
+little while later, to catch more subtle problems. This scheduler
+would have a longer tree-stable-timer, maybe 30 minutes, and would
+feed multiple Builders (with a <code>mode=</code> of <code>'copy'</code>,
+<code>'clobber'</code>, or <code>'export'</code>).
+
+ <p>The <code>tree-stable-timer</code> and <code>fileIsImportant</code> decisions are
+made by the Scheduler. Dependencies are also implemented here.
+Periodic builds (those which are run every N seconds rather than after
+new Changes arrive) are triggered by a special <code>Periodic</code>
+Scheduler subclass. The default Scheduler class can also be told to
+watch for specific branches, ignoring Changes on other branches. This
+may be useful if you have a trunk and a few release branches which
+should be tracked, but when you don't want to have the Buildbot pay
+attention to several dozen private user branches.
+
+ <p>When the setup has multiple sources of Changes the <code>category</code>
+can be used for <code>Scheduler</code> objects to filter out a subset
+of the Changes. Note that not all change sources can attach a category.
+
+ <p>Some Schedulers may trigger builds for other reasons, other than
+recent Changes. For example, a Scheduler subclass could connect to a
+remote buildmaster and watch for builds of a library to succeed before
+triggering a local build that uses that library.
+
+ <p>Each Scheduler creates and submits <code>BuildSet</code> objects to the
+<code>BuildMaster</code>, which is then responsible for making sure the
+individual <code>BuildRequests</code> are delivered to the target
+<code>Builders</code>.
+
+ <p><code>Scheduler</code> instances are activated by placing them in the
+<code>c['schedulers']</code> list in the buildmaster config file. Each
+Scheduler has a unique name.
+
+<div class="node">
+<p><hr>
+<a name="BuildSet"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BuildRequest">BuildRequest</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Schedulers">Schedulers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.3 BuildSet</h3>
+
+<p><a name="index-BuildSet-7"></a>
+A <code>BuildSet</code> is the name given to a set of Builds that all
+compile/test the same version of the tree on multiple Builders. In
+general, all these component Builds will perform the same sequence of
+Steps, using the same source code, but on different platforms or
+against a different set of libraries.
+
+ <p>The <code>BuildSet</code> is tracked as a single unit, which fails if any of
+the component Builds have failed, and therefore can succeed only if
+<em>all</em> of the component Builds have succeeded. There are two kinds
+of status notification messages that can be emitted for a BuildSet:
+the <code>firstFailure</code> type (which fires as soon as we know the
+BuildSet will fail), and the <code>Finished</code> type (which fires once
+the BuildSet has completely finished, regardless of whether the
+overall set passed or failed).
+
+ <p>A <code>BuildSet</code> is created with a <em>source stamp</em> tuple of
+(branch, revision, changes, patch), some of which may be None, and a
+list of Builders on which it is to be run. They are then given to the
+BuildMaster, which is responsible for creating a separate
+<code>BuildRequest</code> for each Builder.
+
+ <p>There are a couple of different likely values for the
+<code>SourceStamp</code>:
+
+ <dl>
+<dt><code>(revision=None, changes=[CHANGES], patch=None)</code><dd>This is a <code>SourceStamp</code> used when a series of Changes have
+triggered a build. The VC step will attempt to check out a tree that
+contains CHANGES (and any changes that occurred before CHANGES, but
+not any that occurred after them).
+
+ <br><dt><code>(revision=None, changes=None, patch=None)</code><dd>This builds the most recent code on the default branch. This is the
+sort of <code>SourceStamp</code> that would be used on a Build that was
+triggered by a user request, or a Periodic scheduler. It is also
+possible to configure the VC Source Step to always check out the
+latest sources rather than paying attention to the Changes in the
+SourceStamp, which will result in same behavior as this.
+
+ <br><dt><code>(branch=BRANCH, revision=None, changes=None, patch=None)</code><dd>This builds the most recent code on the given BRANCH. Again, this is
+generally triggered by a user request or Periodic build.
+
+ <br><dt><code>(revision=REV, changes=None, patch=(LEVEL, DIFF))</code><dd>This checks out the tree at the given revision REV, then applies a
+patch (using <code>patch -pLEVEL &lt;DIFF</code>). The <a href="#try">try</a> feature uses
+this kind of <code>SourceStamp</code>. If <code>patch</code> is None, the patching
+step is bypassed.
+
+ </dl>
+
+ <p>The buildmaster is responsible for turning the <code>BuildSet</code> into a
+set of <code>BuildRequest</code> objects and queueing them on the
+appropriate Builders.
+
+<div class="node">
+<p><hr>
+<a name="BuildRequest"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Builder">Builder</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildSet">BuildSet</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.4 BuildRequest</h3>
+
+<p><a name="index-BuildRequest-8"></a>
+A <code>BuildRequest</code> is a request to build a specific set of sources
+on a single specific <code>Builder</code>. Each <code>Builder</code> runs the
+<code>BuildRequest</code> as soon as it can (i.e. when an associated
+buildslave becomes free). <code>BuildRequest</code>s are prioritized from
+oldest to newest, so when a buildslave becomes free, the
+<code>Builder</code> with the oldest <code>BuildRequest</code> is run.
+
+ <p>The <code>BuildRequest</code> contains the <code>SourceStamp</code> specification.
+The actual process of running the build (the series of Steps that will
+be executed) is implemented by the <code>Build</code> object. In this future
+this might be changed, to have the <code>Build</code> define <em>what</em>
+gets built, and a separate <code>BuildProcess</code> (provided by the
+Builder) to define <em>how</em> it gets built.
+
+ <p><code>BuildRequest</code> is created with optional <code>Properties</code>. One
+of these, <code>owner</code>, is collected by the resultant <code>Build</code> and
+added to the set of <em>interested users</em> to which status
+notifications will be sent, depending on the configuration for each
+status object.
+
+ <p>The <code>BuildRequest</code> may be mergeable with other compatible
+<code>BuildRequest</code>s. Builds that are triggered by incoming Changes
+will generally be mergeable. Builds that are triggered by user
+requests are generally not, unless they are multiple requests to build
+the <em>latest sources</em> of the same branch.
+
+<div class="node">
+<p><hr>
+<a name="Builder"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Users">Users</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildRequest">BuildRequest</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.5 Builder</h3>
+
+<p><a name="index-Builder-9"></a>
+The <code>Builder</code> is a long-lived object which controls all Builds of
+a given type. Each one is created when the config file is first
+parsed, and lives forever (or rather until it is removed from the
+config file). It mediates the connections to the buildslaves that do
+all the work, and is responsible for creating the <code>Build</code> objects
+that decide <em>how</em> a build is performed (i.e., which steps are
+executed in what order).
+
+ <p>Each <code>Builder</code> gets a unique name, and the path name of a
+directory where it gets to do all its work (there is a
+buildmaster-side directory for keeping status information, as well as
+a buildslave-side directory where the actual checkout/compile/test
+commands are executed). It also gets a <code>BuildFactory</code>, which is
+responsible for creating new <code>Build</code> instances: because the
+<code>Build</code> instance is what actually performs each build, choosing
+the <code>BuildFactory</code> is the way to specify what happens each time a
+build is done.
+
+ <p>Each <code>Builder</code> is associated with one of more <code>BuildSlaves</code>.
+A <code>Builder</code> which is used to perform OS-X builds (as opposed to
+Linux or Solaris builds) should naturally be associated with an
+OS-X-based buildslave.
+
+ <p>A <code>Builder</code> may be given a set of environment variables to be used
+in its see <a href="#ShellCommand">ShellCommand</a>s. These variables will override anything in the
+buildslave's environment. Variables passed directly to a ShellCommand will
+override variables of the same name passed to the Builder.
+
+ <p>For example, if you a pool of identical slaves it is often easier to manage
+variables like PATH from Buildbot rather than manually editing it inside of
+the slaves' environment.
+
+<pre class="example"> f = factory.BuildFactory
+ f.addStep(ShellCommand(
+ command=['bash', './configure']))
+ f.addStep(Compile())
+
+ c['builders'] = [
+ {'name': 'test', 'slavenames': ['slave1', 'slave2', 'slave3', 'slave4',
+ 'slave5', 'slave6'],
+ 'builddir': 'test', 'factory': f',
+ 'env': {'PATH': '/opt/local/bin:/opt/app/bin:/usr/local/bin:/usr/bin'}}
+
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Users"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Build-Properties">Build Properties</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Builder">Builder</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.6 Users</h3>
+
+<p><a name="index-Users-10"></a>
+Buildbot has a somewhat limited awareness of <em>users</em>. It assumes
+the world consists of a set of developers, each of whom can be
+described by a couple of simple attributes. These developers make
+changes to the source code, causing builds which may succeed or fail.
+
+ <p>Each developer is primarily known through the source control system. Each
+Change object that arrives is tagged with a <code>who</code> field that
+typically gives the account name (on the repository machine) of the user
+responsible for that change. This string is the primary key by which the
+User is known, and is displayed on the HTML status pages and in each Build's
+&ldquo;blamelist&rdquo;.
+
+ <p>To do more with the User than just refer to them, this username needs to
+be mapped into an address of some sort. The responsibility for this mapping
+is left up to the status module which needs the address. The core code knows
+nothing about email addresses or IRC nicknames, just user names.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Doing-Things-With-Users">Doing Things With Users</a>
+<li><a accesskey="2" href="#Email-Addresses">Email Addresses</a>
+<li><a accesskey="3" href="#IRC-Nicknames">IRC Nicknames</a>
+<li><a accesskey="4" href="#Live-Status-Clients">Live Status Clients</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Doing-Things-With-Users"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Email-Addresses">Email Addresses</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Users">Users</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Users">Users</a>
+
+</div>
+
+<h4 class="subsection">3.6.1 Doing Things With Users</h4>
+
+<p>Each Change has a single User who is responsible for that Change. Most
+Builds have a set of Changes: the Build represents the first time these
+Changes have been built and tested by the Buildbot. The build has a
+&ldquo;blamelist&rdquo; that consists of a simple union of the Users responsible
+for all the Build's Changes.
+
+ <p>The Build provides (through the IBuildStatus interface) a list of Users
+who are &ldquo;involved&rdquo; in the build. For now this is equal to the
+blamelist, but in the future it will be expanded to include a &ldquo;build
+sheriff&rdquo; (a person who is &ldquo;on duty&rdquo; at that time and responsible for
+watching over all builds that occur during their shift), as well as
+per-module owners who simply want to keep watch over their domain (chosen by
+subdirectory or a regexp matched against the filenames pulled out of the
+Changes). The Involved Users are those who probably have an interest in the
+results of any given build.
+
+ <p>In the future, Buildbot will acquire the concept of &ldquo;Problems&rdquo;,
+which last longer than builds and have beginnings and ends. For example, a
+test case which passed in one build and then failed in the next is a
+Problem. The Problem lasts until the test case starts passing again, at
+which point the Problem is said to be &ldquo;resolved&rdquo;.
+
+ <p>If there appears to be a code change that went into the tree at the
+same time as the test started failing, that Change is marked as being
+resposible for the Problem, and the user who made the change is added
+to the Problem's &ldquo;Guilty&rdquo; list. In addition to this user, there may
+be others who share responsibility for the Problem (module owners,
+sponsoring developers). In addition to the Responsible Users, there
+may be a set of Interested Users, who take an interest in the fate of
+the Problem.
+
+ <p>Problems therefore have sets of Users who may want to be kept aware of
+the condition of the problem as it changes over time. If configured, the
+Buildbot can pester everyone on the Responsible list with increasing
+harshness until the problem is resolved, with the most harshness reserved
+for the Guilty parties themselves. The Interested Users may merely be told
+when the problem starts and stops, as they are not actually responsible for
+fixing anything.
+
+<div class="node">
+<p><hr>
+<a name="Email-Addresses"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#IRC-Nicknames">IRC Nicknames</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Doing-Things-With-Users">Doing Things With Users</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Users">Users</a>
+
+</div>
+
+<h4 class="subsection">3.6.2 Email Addresses</h4>
+
+<p>The <code>buildbot.status.mail.MailNotifier</code> class
+(see <a href="#MailNotifier">MailNotifier</a>) provides a status target which can send email
+about the results of each build. It accepts a static list of email
+addresses to which each message should be delivered, but it can also
+be configured to send mail to the Build's Interested Users. To do
+this, it needs a way to convert User names into email addresses.
+
+ <p>For many VC systems, the User Name is actually an account name on the
+system which hosts the repository. As such, turning the name into an
+email address is a simple matter of appending
+&ldquo;@repositoryhost.com&rdquo;. Some projects use other kinds of mappings
+(for example the preferred email address may be at &ldquo;project.org&rdquo;
+despite the repository host being named &ldquo;cvs.project.org&rdquo;), and some
+VC systems have full separation between the concept of a user and that
+of an account on the repository host (like Perforce). Some systems
+(like Arch) put a full contact email address in every change.
+
+ <p>To convert these names to addresses, the MailNotifier uses an EmailLookup
+object. This provides a .getAddress method which accepts a name and
+(eventually) returns an address. The default <code>MailNotifier</code>
+module provides an EmailLookup which simply appends a static string,
+configurable when the notifier is created. To create more complex behaviors
+(perhaps using an LDAP lookup, or using &ldquo;finger&rdquo; on a central host to
+determine a preferred address for the developer), provide a different object
+as the <code>lookup</code> argument.
+
+ <p>In the future, when the Problem mechanism has been set up, the Buildbot
+will need to send mail to arbitrary Users. It will do this by locating a
+MailNotifier-like object among all the buildmaster's status targets, and
+asking it to send messages to various Users. This means the User-to-address
+mapping only has to be set up once, in your MailNotifier, and every email
+message the buildbot emits will take advantage of it.
+
+<div class="node">
+<p><hr>
+<a name="IRC-Nicknames"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Live-Status-Clients">Live Status Clients</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Email-Addresses">Email Addresses</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Users">Users</a>
+
+</div>
+
+<h4 class="subsection">3.6.3 IRC Nicknames</h4>
+
+<p>Like MailNotifier, the <code>buildbot.status.words.IRC</code> class
+provides a status target which can announce the results of each build. It
+also provides an interactive interface by responding to online queries
+posted in the channel or sent as private messages.
+
+ <p>In the future, the buildbot can be configured map User names to IRC
+nicknames, to watch for the recent presence of these nicknames, and to
+deliver build status messages to the interested parties. Like
+<code>MailNotifier</code> does for email addresses, the <code>IRC</code> object
+will have an <code>IRCLookup</code> which is responsible for nicknames. The
+mapping can be set up statically, or it can be updated by online users
+themselves (by claiming a username with some kind of &ldquo;buildbot: i am
+user warner&rdquo; commands).
+
+ <p>Once the mapping is established, the rest of the buildbot can ask the
+<code>IRC</code> object to send messages to various users. It can report on
+the likelihood that the user saw the given message (based upon how long the
+user has been inactive on the channel), which might prompt the Problem
+Hassler logic to send them an email message instead.
+
+<div class="node">
+<p><hr>
+<a name="Live-Status-Clients"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#IRC-Nicknames">IRC Nicknames</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Users">Users</a>
+
+</div>
+
+<h4 class="subsection">3.6.4 Live Status Clients</h4>
+
+<p>The Buildbot also offers a PB-based status client interface which can
+display real-time build status in a GUI panel on the developer's desktop.
+This interface is normally anonymous, but it could be configured to let the
+buildmaster know <em>which</em> developer is using the status client. The
+status client could then be used as a message-delivery service, providing an
+alternative way to deliver low-latency high-interruption messages to the
+developer (like &ldquo;hey, you broke the build&rdquo;).
+
+<div class="node">
+<p><hr>
+<a name="Build-Properties"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Users">Users</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Concepts">Concepts</a>
+
+</div>
+
+<h3 class="section">3.7 Build Properties</h3>
+
+<p><a name="index-Properties-11"></a>
+Each build has a set of &ldquo;Build Properties&rdquo;, which can be used by its
+BuildStep to modify their actions. These properties, in the form of
+key-value pairs, provide a general framework for dynamically altering
+the behavior of a build based on its circumstances.
+
+ <p>Properties come from a number of places:
+ <ul>
+<li>global configuration &ndash;
+These properties apply to all builds.
+<li>schedulers &ndash;
+A scheduler can specify properties available to all the builds it
+starts.
+<li>buildslaves &ndash;
+A buildslave can pass properties on to the builds it performs.
+<li>builds &ndash;
+A build automatically sets a number of properties on itself.
+<li>steps &ndash;
+Steps of a build can set properties that are available to subsequent
+steps. In particular, source steps set a number of properties.
+</ul>
+
+ <p>Properties are very flexible, and can be used to implement all manner
+of functionality. Here are some examples:
+
+ <p>Most Source steps record the revision that they checked out in
+the <code>got_revision</code> property. A later step could use this
+property to specify the name of a fully-built tarball, dropped in an
+easily-acessible directory for later testing.
+
+ <p>Some projects want to perform nightly builds as well as in response
+to committed changes. Such a project would run two schedulers,
+both pointing to the same set of builders, but could provide an
+<code>is_nightly</code> property so that steps can distinguish the nightly
+builds, perhaps to run more resource-intensive tests.
+
+ <p>Some projects have different build processes on different systems.
+Rather than create a build factory for each slave, the steps can use
+buildslave properties to identify the unique aspects of each slave
+and adapt the build process dynamically.
+
+<div class="node">
+<p><hr>
+<a name="Configuration"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Concepts">Concepts</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">4 Configuration</h2>
+
+<p><a name="index-Configuration-12"></a>
+The buildbot's behavior is defined by the &ldquo;config file&rdquo;, which
+normally lives in the <samp><span class="file">master.cfg</span></samp> file in the buildmaster's base
+directory (but this can be changed with an option to the
+<code>buildbot create-master</code> command). This file completely specifies
+which Builders are to be run, which slaves they should use, how
+Changes should be tracked, and where the status information is to be
+sent. The buildmaster's <samp><span class="file">buildbot.tac</span></samp> file names the base
+directory; everything else comes from the config file.
+
+ <p>A sample config file was installed for you when you created the
+buildmaster, but you will need to edit it before your buildbot will do
+anything useful.
+
+ <p>This chapter gives an overview of the format of this file and the
+various sections in it. You will need to read the later chapters to
+understand how to fill in each section properly.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Config-File-Format">Config File Format</a>
+<li><a accesskey="2" href="#Loading-the-Config-File">Loading the Config File</a>
+<li><a accesskey="3" href="#Testing-the-Config-File">Testing the Config File</a>
+<li><a accesskey="4" href="#Defining-the-Project">Defining the Project</a>
+<li><a accesskey="5" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>
+<li><a accesskey="6" href="#Merging-BuildRequests">Merging BuildRequests</a>
+<li><a accesskey="7" href="#Setting-the-slaveport">Setting the slaveport</a>
+<li><a accesskey="8" href="#Buildslave-Specifiers">Buildslave Specifiers</a>
+<li><a accesskey="9" href="#On_002dDemand-_0028_0022Latent_0022_0029-Buildslaves">On-Demand ("Latent") Buildslaves</a>
+<li><a href="#Defining-Global-Properties">Defining Global Properties</a>
+<li><a href="#Defining-Builders">Defining Builders</a>
+<li><a href="#Defining-Status-Targets">Defining Status Targets</a>
+<li><a href="#Debug-options">Debug options</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Config-File-Format"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Loading-the-Config-File">Loading the Config File</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Configuration">Configuration</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.1 Config File Format</h3>
+
+<p>The config file is, fundamentally, just a piece of Python code which
+defines a dictionary named <code>BuildmasterConfig</code>, with a number of
+keys that are treated specially. You don't need to know Python to do
+basic configuration, though, you can just copy the syntax of the
+sample file. If you <em>are</em> comfortable writing Python code,
+however, you can use all the power of a full programming language to
+achieve more complicated configurations.
+
+ <p>The <code>BuildmasterConfig</code> name is the only one which matters: all
+other names defined during the execution of the file are discarded.
+When parsing the config file, the Buildmaster generally compares the
+old configuration with the new one and performs the minimum set of
+actions necessary to bring the buildbot up to date: Builders which are
+not changed are left untouched, and Builders which are modified get to
+keep their old event history.
+
+ <p>Basic Python syntax: comments start with a hash character (&ldquo;#&rdquo;),
+tuples are defined with <code>(parenthesis, pairs)</code>, arrays are
+defined with <code>[square, brackets]</code>, tuples and arrays are mostly
+interchangeable. Dictionaries (data structures which map &ldquo;keys&rdquo; to
+&ldquo;values&rdquo;) are defined with curly braces: <code>{'key1': 'value1',
+'key2': 'value2'} </code>. Function calls (and object instantiation) can use
+named parameters, like <code>w = html.Waterfall(http_port=8010)</code>.
+
+ <p>The config file starts with a series of <code>import</code> statements,
+which make various kinds of Steps and Status targets available for
+later use. The main <code>BuildmasterConfig</code> dictionary is created,
+then it is populated with a variety of keys. These keys are broken
+roughly into the following sections, each of which is documented in
+the rest of this chapter:
+
+ <ul>
+<li>Project Definitions
+<li>Change Sources / Schedulers
+<li>Slaveport
+<li>Buildslave Configuration
+<li>Builders / Interlocks
+<li>Status Targets
+<li>Debug options
+</ul>
+
+ <p>The config file can use a few names which are placed into its namespace:
+
+ <dl>
+<dt><code>basedir</code><dd>the base directory for the buildmaster. This string has not been
+expanded, so it may start with a tilde. It needs to be expanded before
+use. The config file is located in
+<code>os.path.expanduser(os.path.join(basedir, 'master.cfg'))</code>
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Loading-the-Config-File"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Testing-the-Config-File">Testing the Config File</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Config-File-Format">Config File Format</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.2 Loading the Config File</h3>
+
+<p>The config file is only read at specific points in time. It is first
+read when the buildmaster is launched. Once it is running, there are
+various ways to ask it to reload the config file. If you are on the
+system hosting the buildmaster, you can send a <code>SIGHUP</code> signal to
+it: the <samp><span class="command">buildbot</span></samp> tool has a shortcut for this:
+
+<pre class="example"> buildbot reconfig <var>BASEDIR</var>
+</pre>
+ <p>This command will show you all of the lines from <samp><span class="file">twistd.log</span></samp>
+that relate to the reconfiguration. If there are any problems during
+the config-file reload, they will be displayed in these lines.
+
+ <p>The debug tool (<code>buildbot debugclient --master HOST:PORT</code>) has a
+&ldquo;Reload .cfg&rdquo; button which will also trigger a reload. In the
+future, there will be other ways to accomplish this step (probably a
+password-protected button on the web page, as well as a privileged IRC
+command).
+
+ <p>When reloading the config file, the buildmaster will endeavor to
+change as little as possible about the running system. For example,
+although old status targets may be shut down and new ones started up,
+any status targets that were not changed since the last time the
+config file was read will be left running and untouched. Likewise any
+Builders which have not been changed will be left running. If a
+Builder is modified (say, the build process is changed) while a Build
+is currently running, that Build will keep running with the old
+process until it completes. Any previously queued Builds (or Builds
+which get queued after the reconfig) will use the new process.
+
+<div class="node">
+<p><hr>
+<a name="Testing-the-Config-File"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Defining-the-Project">Defining the Project</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Loading-the-Config-File">Loading the Config File</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.3 Testing the Config File</h3>
+
+<p>To verify that the config file is well-formed and contains no
+deprecated or invalid elements, use the &ldquo;checkconfig&rdquo; command:
+
+<pre class="example"> % buildbot checkconfig master.cfg
+ Config file is good!
+</pre>
+ <p>If the config file has deprecated features (perhaps because you've
+upgraded the buildmaster and need to update the config file to match),
+they will be announced by checkconfig. In this case, the config file
+will work, but you should really remove the deprecated items and use
+the recommended replacements instead:
+
+<pre class="example"> % buildbot checkconfig master.cfg
+ /usr/lib/python2.4/site-packages/buildbot/master.py:559: DeprecationWarning: c['sources'] is
+ deprecated as of 0.7.6 and will be removed by 0.8.0 . Please use c['change_source'] instead.
+ warnings.warn(m, DeprecationWarning)
+ Config file is good!
+</pre>
+ <p>If the config file is simply broken, that will be caught too:
+
+<pre class="example"> % buildbot checkconfig master.cfg
+ Traceback (most recent call last):
+ File "/usr/lib/python2.4/site-packages/buildbot/scripts/runner.py", line 834, in doCheckConfig
+ ConfigLoader(configFile)
+ File "/usr/lib/python2.4/site-packages/buildbot/scripts/checkconfig.py", line 31, in __init__
+ self.loadConfig(configFile)
+ File "/usr/lib/python2.4/site-packages/buildbot/master.py", line 480, in loadConfig
+ exec f in localDict
+ File "/home/warner/BuildBot/master/foolscap/master.cfg", line 90, in ?
+ c[bogus] = "stuff"
+ NameError: name 'bogus' is not defined
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Defining-the-Project"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Testing-the-Config-File">Testing the Config File</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.4 Defining the Project</h3>
+
+<p>There are a couple of basic settings that you use to tell the buildbot
+what project it is working on. This information is used by status
+reporters to let users find out more about the codebase being
+exercised by this particular Buildbot installation.
+
+<pre class="example"> c['projectName'] = "Buildbot"
+ c['projectURL'] = "http://buildbot.sourceforge.net/"
+ c['buildbotURL'] = "http://localhost:8010/"
+</pre>
+ <p><a name="index-c_005b_0027projectName_0027_005d-13"></a><code>projectName</code> is a short string will be used to describe the
+project that this buildbot is working on. For example, it is used as
+the title of the waterfall HTML page.
+
+ <p><a name="index-c_005b_0027projectURL_0027_005d-14"></a><code>projectURL</code> is a string that gives a URL for the project as a
+whole. HTML status displays will show <code>projectName</code> as a link to
+<code>projectURL</code>, to provide a link from buildbot HTML pages to your
+project's home page.
+
+ <p><a name="index-c_005b_0027buildbotURL_0027_005d-15"></a>The <code>buildbotURL</code> string should point to the location where the
+buildbot's internal web server (usually the <code>html.Waterfall</code>
+page) is visible. This typically uses the port number set when you
+create the <code>Waterfall</code> object: the buildbot needs your help to
+figure out a suitable externally-visible host name.
+
+ <p>When status notices are sent to users (either by email or over IRC),
+<code>buildbotURL</code> will be used to create a URL to the specific build
+or problem that they are being notified about. It will also be made
+available to queriers (over IRC) who want to find out where to get
+more information about this buildbot.
+
+ <p><a name="index-c_005b_0027logCompressionLimit_0027_005d-16"></a>The <code>logCompressionLimit</code> enables bz2-compression of build logs on
+disk for logs that are bigger than the given size, or disables that
+completely if given <code>False</code>. The default value is 4k, which should
+be a reasonable default on most file systems. This setting has no impact
+on status plugins, and merely affects the required disk space on the
+master for build logs.
+
+<div class="node">
+<p><hr>
+<a name="Change-Sources-and-Schedulers"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Merging-BuildRequests">Merging BuildRequests</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Defining-the-Project">Defining the Project</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.5 Change Sources and Schedulers</h3>
+
+<p><a name="index-c_005b_0027sources_0027_005d-17"></a><a name="index-c_005b_0027change_005fsource_0027_005d-18"></a>
+The <code>c['change_source']</code> key is the ChangeSource
+instance<a rel="footnote" href="#fn-6" name="fnd-6"><sup>6</sup></a> that
+defines how the buildmaster learns about source code changes. More
+information about what goes here is available in See <a href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>.
+
+<pre class="example"> from buildbot.changes.pb import PBChangeSource
+ c['change_source'] = PBChangeSource()
+</pre>
+ <p><a name="index-c_005b_0027schedulers_0027_005d-19"></a>
+(note: in buildbot-0.7.5 and earlier, this key was named
+<code>c['sources']</code>, and required a list. <code>c['sources']</code> is
+deprecated as of buildbot-0.7.6 and is scheduled to be removed in a
+future release).
+
+ <p><code>c['schedulers']</code> is a list of Scheduler instances, each
+of which causes builds to be started on a particular set of
+Builders. The two basic Scheduler classes you are likely to start
+with are <code>Scheduler</code> and <code>Periodic</code>, but you can write a
+customized subclass to implement more complicated build scheduling.
+
+ <p>Scheduler arguments
+should always be specified by name (as keyword arguments), to allow
+for future expansion:
+
+<pre class="example"> sched = Scheduler(name="quick", builderNames=['lin', 'win'])
+</pre>
+ <p>All schedulers have several arguments in common:
+
+ <dl>
+<dt><code>name</code><dd>
+Each Scheduler must have a unique name. This is used in status
+displays, and is also available in the build property <code>scheduler</code>.
+
+ <br><dt><code>builderNames</code><dd>
+This is the set of builders which this scheduler should trigger, specified
+as a list of names (strings).
+
+ <br><dt><code>properties</code><dd><a name="index-Properties-20"></a>
+This is a dictionary specifying properties that will be transmitted
+to all builds started by this scheduler.
+
+ </dl>
+
+ <p>Here is a brief catalog of the available Scheduler types. All these
+Schedulers are classes in <code>buildbot.scheduler</code>, and the
+docstrings there are the best source of documentation on the arguments
+taken by each one.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Scheduler-Scheduler">Scheduler Scheduler</a>
+<li><a accesskey="2" href="#AnyBranchScheduler">AnyBranchScheduler</a>
+<li><a accesskey="3" href="#Dependent-Scheduler">Dependent Scheduler</a>
+<li><a accesskey="4" href="#Periodic-Scheduler">Periodic Scheduler</a>
+<li><a accesskey="5" href="#Nightly-Scheduler">Nightly Scheduler</a>
+<li><a accesskey="6" href="#Try-Schedulers">Try Schedulers</a>
+<li><a accesskey="7" href="#Triggerable-Scheduler">Triggerable Scheduler</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Scheduler-Scheduler"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#AnyBranchScheduler">AnyBranchScheduler</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>
+
+</div>
+
+<h4 class="subsection">4.5.1 Scheduler Scheduler</h4>
+
+<p><a name="index-buildbot_002escheduler_002eScheduler-21"></a>
+This is the original and still most popular Scheduler class. It follows
+exactly one branch, and starts a configurable tree-stable-timer after
+each change on that branch. When the timer expires, it starts a build
+on some set of Builders. The Scheduler accepts a <code>fileIsImportant</code>
+function which can be used to ignore some Changes if they do not
+affect any &ldquo;important&rdquo; files.
+
+ <p>The arguments to this scheduler are:
+
+ <dl>
+<dt><code>name</code>
+<br><dt><code>builderNames</code>
+<br><dt><code>properties</code>
+<br><dt><code>branch</code><dd>This Scheduler will pay attention to a single branch, ignoring Changes
+that occur on other branches. Setting <code>branch</code> equal to the
+special value of <code>None</code> means it should only pay attention to
+the default branch. Note that <code>None</code> is a keyword, not a string,
+so you want to use <code>None</code> and not <code>"None"</code>.
+
+ <br><dt><code>treeStableTimer</code><dd>The Scheduler will wait for this many seconds before starting the
+build. If new changes are made during this interval, the timer will be
+restarted, so really the build will be started after a change and then
+after this many seconds of inactivity.
+
+ <br><dt><code>fileIsImportant</code><dd>A callable which takes one argument, a Change instance, and returns
+<code>True</code> if the change is worth building, and <code>False</code> if
+it is not. Unimportant Changes are accumulated until the build is
+triggered by an important change. The default value of None means
+that all Changes are important.
+
+ <br><dt><code>categories</code><dd>A list of categories of changes that this scheduler will respond to. If this
+is specified, then any non-matching changes are ignored.
+
+ </dl>
+
+ <p>Example:
+
+<pre class="example"> from buildbot import scheduler
+ quick = scheduler.Scheduler(name="quick",
+ branch=None,
+ treeStableTimer=60,
+ builderNames=["quick-linux", "quick-netbsd"])
+ full = scheduler.Scheduler(name="full",
+ branch=None,
+ treeStableTimer=5*60,
+ builderNames=["full-linux", "full-netbsd", "full-OSX"])
+ c['schedulers'] = [quick, full]
+</pre>
+ <p>In this example, the two &ldquo;quick&rdquo; builders are triggered 60 seconds
+after the tree has been changed. The &ldquo;full&rdquo; builds do not run quite
+so quickly (they wait 5 minutes), so hopefully if the quick builds
+fail due to a missing file or really simple typo, the developer can
+discover and fix the problem before the full builds are started. Both
+Schedulers only pay attention to the default branch: any changes
+on other branches are ignored by these Schedulers. Each Scheduler
+triggers a different set of Builders, referenced by name.
+
+<div class="node">
+<p><hr>
+<a name="AnyBranchScheduler"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Dependent-Scheduler">Dependent Scheduler</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Scheduler-Scheduler">Scheduler Scheduler</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>
+
+</div>
+
+<h4 class="subsection">4.5.2 AnyBranchScheduler</h4>
+
+<p><a name="index-buildbot_002escheduler_002eAnyBranchScheduler-22"></a>
+This scheduler uses a tree-stable-timer like the default one, but
+follows multiple branches at once. Each branch gets a separate timer.
+
+ <p>The arguments to this scheduler are:
+
+ <dl>
+<dt><code>name</code>
+<br><dt><code>builderNames</code>
+<br><dt><code>properties</code>
+<br><dt><code>branches</code><dd>This Scheduler will pay attention to any number of branches, ignoring
+Changes that occur on other branches. Branches are specified just as
+for the <code>Scheduler</code> class.
+
+ <br><dt><code>treeStableTimer</code><dd>The Scheduler will wait for this many seconds before starting the
+build. If new changes are made during this interval, the timer will be
+restarted, so really the build will be started after a change and then
+after this many seconds of inactivity.
+
+ <br><dt><code>fileIsImportant</code><dd>A callable which takes one argument, a Change instance, and returns
+<code>True</code> if the change is worth building, and <code>False</code> if
+it is not. Unimportant Changes are accumulated until the build is
+triggered by an important change. The default value of None means
+that all Changes are important.
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="Dependent-Scheduler"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Periodic-Scheduler">Periodic Scheduler</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#AnyBranchScheduler">AnyBranchScheduler</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>
+
+</div>
+
+<h4 class="subsection">4.5.3 Dependent Scheduler</h4>
+
+<p><a name="index-Dependent-23"></a><a name="index-Dependencies-24"></a><a name="index-buildbot_002escheduler_002eDependent-25"></a>
+It is common to wind up with one kind of build which should only be
+performed if the same source code was successfully handled by some
+other kind of build first. An example might be a packaging step: you
+might only want to produce .deb or RPM packages from a tree that was
+known to compile successfully and pass all unit tests. You could put
+the packaging step in the same Build as the compile and testing steps,
+but there might be other reasons to not do this (in particular you
+might have several Builders worth of compiles/tests, but only wish to
+do the packaging once). Another example is if you want to skip the
+&ldquo;full&rdquo; builds after a failing &ldquo;quick&rdquo; build of the same source
+code. Or, if one Build creates a product (like a compiled library)
+that is used by some other Builder, you'd want to make sure the
+consuming Build is run <em>after</em> the producing one.
+
+ <p>You can use &ldquo;Dependencies&rdquo; to express this relationship
+to the Buildbot. There is a special kind of Scheduler named
+<code>scheduler.Dependent</code> that will watch an &ldquo;upstream&rdquo; Scheduler
+for builds to complete successfully (on all of its Builders). Each time
+that happens, the same source code (i.e. the same <code>SourceStamp</code>)
+will be used to start a new set of builds, on a different set of
+Builders. This &ldquo;downstream&rdquo; scheduler doesn't pay attention to
+Changes at all. It only pays attention to the upstream scheduler.
+
+ <p>If the build fails on any of the Builders in the upstream set,
+the downstream builds will not fire. Note that, for SourceStamps
+generated by a ChangeSource, the <code>revision</code> is None, meaning HEAD.
+If any changes are committed between the time the upstream scheduler
+begins its build and the time the dependent scheduler begins its
+build, then those changes will be included in the downstream build.
+See the see <a href="#Triggerable-Scheduler">Triggerable Scheduler</a> for a more flexible dependency
+mechanism that can avoid this problem.
+
+ <p>The arguments to this scheduler are:
+
+ <dl>
+<dt><code>name</code>
+<br><dt><code>builderNames</code>
+<br><dt><code>properties</code>
+<br><dt><code>upstream</code><dd>The upstream scheduler to watch. Note that this is an &ldquo;instance&rdquo;,
+not the name of the scheduler.
+</dl>
+
+ <p>Example:
+
+<pre class="example"> from buildbot import scheduler
+ tests = scheduler.Scheduler("just-tests", None, 5*60,
+ ["full-linux", "full-netbsd", "full-OSX"])
+ package = scheduler.Dependent("build-package",
+ tests, # upstream scheduler -- no quotes!
+ ["make-tarball", "make-deb", "make-rpm"])
+ c['schedulers'] = [tests, package]
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Periodic-Scheduler"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Nightly-Scheduler">Nightly Scheduler</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Dependent-Scheduler">Dependent Scheduler</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>
+
+</div>
+
+<h4 class="subsection">4.5.4 Periodic Scheduler</h4>
+
+<p><a name="index-buildbot_002escheduler_002ePeriodic-26"></a>
+This simple scheduler just triggers a build every N seconds.
+
+ <p>The arguments to this scheduler are:
+
+ <dl>
+<dt><code>name</code>
+<br><dt><code>builderNames</code>
+<br><dt><code>properties</code>
+<br><dt><code>periodicBuildTimer</code><dd>The time, in seconds, after which to start a build.
+</dl>
+
+ <p>Example:
+
+<pre class="example"> from buildbot import scheduler
+ nightly = scheduler.Periodic(name="nightly",
+ builderNames=["full-solaris"],
+ periodicBuildTimer=24*60*60)
+ c['schedulers'] = [nightly]
+</pre>
+ <p>The Scheduler in this example just runs the full solaris build once
+per day. Note that this Scheduler only lets you control the time
+between builds, not the absolute time-of-day of each Build, so this
+could easily wind up a &ldquo;daily&rdquo; or &ldquo;every afternoon&rdquo; scheduler
+depending upon when it was first activated.
+
+<div class="node">
+<p><hr>
+<a name="Nightly-Scheduler"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Try-Schedulers">Try Schedulers</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Periodic-Scheduler">Periodic Scheduler</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>
+
+</div>
+
+<h4 class="subsection">4.5.5 Nightly Scheduler</h4>
+
+<p><a name="index-buildbot_002escheduler_002eNightly-27"></a>
+This is highly configurable periodic build scheduler, which triggers
+a build at particular times of day, week, month, or year. The
+configuration syntax is very similar to the well-known <code>crontab</code>
+format, in which you provide values for minute, hour, day, and month
+(some of which can be wildcards), and a build is triggered whenever
+the current time matches the given constraints. This can run a build
+every night, every morning, every weekend, alternate Thursdays,
+on your boss's birthday, etc.
+
+ <p>Pass some subset of <code>minute</code>, <code>hour</code>, <code>dayOfMonth</code>,
+<code>month</code>, and <code>dayOfWeek</code>; each may be a single number or
+a list of valid values. The builds will be triggered whenever the
+current time matches these values. Wildcards are represented by a
+'*' string. All fields default to a wildcard except 'minute', so
+with no fields this defaults to a build every hour, on the hour.
+The full list of parameters is:
+
+ <dl>
+<dt><code>name</code>
+<br><dt><code>builderNames</code>
+<br><dt><code>properties</code>
+<br><dt><code>branch</code><dd>The branch to build, just as for <code>Scheduler</code>.
+
+ <br><dt><code>minute</code><dd>The minute of the hour on which to start the build. This defaults
+to 0, meaning an hourly build.
+
+ <br><dt><code>hour</code><dd>The hour of the day on which to start the build, in 24-hour notation.
+This defaults to *, meaning every hour.
+
+ <br><dt><code>month</code><dd>The month in which to start the build, with January = 1. This defaults
+to *, meaning every month.
+
+ <br><dt><code>dayOfWeek</code><dd>The day of the week to start a build, with Monday = 0. This defauls
+to *, meaning every day of the week.
+
+ <br><dt><code>onlyIfChanged</code><dd>If this is true, then builds will not be scheduled at the designated time
+unless the source has changed since the previous build.
+</dl>
+
+ <p>For example, the following master.cfg clause will cause a build to be
+started every night at 3:00am:
+
+<pre class="example"> s = scheduler.Nightly(name='nightly',
+ builderNames=['builder1', 'builder2'],
+ hour=3,
+ minute=0)
+</pre>
+ <p>This scheduler will perform a build each monday morning at 6:23am and
+again at 8:23am, but only if someone has committed code in the interim:
+
+<pre class="example"> s = scheduler.Nightly(name='BeforeWork',
+ builderNames=['builder1'],
+ dayOfWeek=0,
+ hour=[6,8],
+ minute=23,
+ onlyIfChanged=True)
+</pre>
+ <p>The following runs a build every two hours, using Python's <code>range</code>
+function:
+
+<pre class="example"> s = Nightly(name='every2hours',
+ builderNames=['builder1'],
+ hour=range(0, 24, 2))
+</pre>
+ <p>Finally, this example will run only on December 24th:
+
+<pre class="example"> s = Nightly(name='SleighPreflightCheck',
+ builderNames=['flying_circuits', 'radar'],
+ month=12,
+ dayOfMonth=24,
+ hour=12,
+ minute=0)
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Try-Schedulers"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Triggerable-Scheduler">Triggerable Scheduler</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Nightly-Scheduler">Nightly Scheduler</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>
+
+</div>
+
+<h4 class="subsection">4.5.6 Try Schedulers</h4>
+
+<p><a name="index-buildbot_002escheduler_002eTry_005fJobdir-28"></a><a name="index-buildbot_002escheduler_002eTry_005fUserpass-29"></a>
+This scheduler allows developers to use the <code>buildbot try</code>
+command to trigger builds of code they have not yet committed. See
+<a href="#try">try</a> for complete details.
+
+ <p>Two implementations are available: <code>Try_Jobdir</code> and
+<code>Try_Userpass</code>. The former monitors a job directory, specified
+by the <code>jobdir</code> parameter, while the latter listens for PB
+connections on a specific <code>port</code>, and authenticates against
+<code>userport</code>.
+
+<div class="node">
+<p><hr>
+<a name="Triggerable-Scheduler"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Try-Schedulers">Try Schedulers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>
+
+</div>
+
+<h4 class="subsection">4.5.7 Triggerable Scheduler</h4>
+
+<p><a name="index-Triggers-30"></a><a name="index-buildbot_002escheduler_002eTriggerable-31"></a>
+The <code>Triggerable</code> scheduler waits to be triggered by a Trigger
+step (see <a href="#Triggering-Schedulers">Triggering Schedulers</a>) in another build. That step
+can optionally wait for the scheduler's builds to complete. This
+provides two advantages over Dependent schedulers. First, the same
+scheduler can be triggered from multiple builds. Second, the ability
+to wait for a Triggerable's builds to complete provides a form of
+"subroutine call", where one or more builds can "call" a scheduler
+to perform some work for them, perhaps on other buildslaves.
+
+ <p>The parameters are just the basics:
+
+ <dl>
+<dt><code>name</code><br><dt><code>builderNames</code><br><dt><code>properties</code><dd></dl>
+
+ <p>This class is only useful in conjunction with the <code>Trigger</code> step.
+Here is a fully-worked example:
+
+<pre class="example"> from buildbot import scheduler
+ from buildbot.process import factory
+ from buildbot.steps import trigger
+
+ checkin = scheduler.Scheduler(name="checkin",
+ branch=None,
+ treeStableTimer=5*60,
+ builderNames=["checkin"])
+ nightly = scheduler.Nightly(name='nightly',
+ builderNames=['nightly'],
+ hour=3,
+ minute=0)
+
+ mktarball = scheduler.Triggerable(name="mktarball",
+ builderNames=["mktarball"])
+ build = scheduler.Triggerable(name="build-all-platforms",
+ builderNames=["build-all-platforms"])
+ test = scheduler.Triggerable(name="distributed-test",
+ builderNames=["distributed-test"])
+ package = scheduler.Triggerable(name="package-all-platforms",
+ builderNames=["package-all-platforms"])
+
+ c['schedulers'] = [checkin, nightly, build, test, package]
+
+ # on checkin, make a tarball, build it, and test it
+ checkin_factory = factory.BuildFactory()
+ checkin_factory.addStep(trigger.Trigger(schedulerNames=['mktarball'],
+ waitForFinish=True))
+ checkin_factory.addStep(trigger.Trigger(schedulerNames=['build-all-platforms'],
+ waitForFinish=True))
+ checkin_factory.addStep(trigger.Trigger(schedulerNames=['distributed-test'],
+ waitForFinish=True))
+
+ # and every night, make a tarball, build it, and package it
+ nightly_factory = factory.BuildFactory()
+ nightly_factory.addStep(trigger.Trigger(schedulerNames=['mktarball'],
+ waitForFinish=True))
+ nightly_factory.addStep(trigger.Trigger(schedulerNames=['build-all-platforms'],
+ waitForFinish=True))
+ nightly_factory.addStep(trigger.Trigger(schedulerNames=['package-all-platforms'],
+ waitForFinish=True))
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Merging-BuildRequests"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Setting-the-slaveport">Setting the slaveport</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.6 Merging BuildRequests</h3>
+
+<p><a name="index-c_005b_0027mergeRequests_0027_005d-32"></a>
+By default, buildbot merges BuildRequests that have the compatible
+SourceStamps. This behaviour can be customized with the
+<code>c['mergeRequests']</code> configuration key. This key specifies a function
+which is caleld with three arguments: a <code>Builder</code> and two
+<code>BuildRequest</code> objects. It should return true if the requests can be
+merged. For example:
+
+<pre class="example"> def mergeRequests(builder, req1, req2):
+ """Don't merge buildrequest at all"""
+ return False
+ c['mergeRequests'] = mergeRequests
+</pre>
+ <p>In many cases, the details of the SourceStamps and BuildRequests are important.
+In this example, only BuildRequests with the same "reason" are merged; thus
+developers forcing builds for different reasons will see distinct builds.
+
+<pre class="example"> def mergeRequests(builder, req1, req2):
+ if req1.source.canBeMergedWith(req2.source) and req1.reason == req2.reason:
+ return True
+ return False
+ c['mergeRequests'] = mergeRequests
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Setting-the-slaveport"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Buildslave-Specifiers">Buildslave Specifiers</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Merging-BuildRequests">Merging BuildRequests</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.7 Setting the slaveport</h3>
+
+<p><a name="index-c_005b_0027slavePortnum_0027_005d-33"></a>
+The buildmaster will listen on a TCP port of your choosing for
+connections from buildslaves. It can also use this port for
+connections from remote Change Sources, status clients, and debug
+tools. This port should be visible to the outside world, and you'll
+need to tell your buildslave admins about your choice.
+
+ <p>It does not matter which port you pick, as long it is externally
+visible, however you should probably use something larger than 1024,
+since most operating systems don't allow non-root processes to bind to
+low-numbered ports. If your buildmaster is behind a firewall or a NAT
+box of some sort, you may have to configure your firewall to permit
+inbound connections to this port.
+
+<pre class="example"> c['slavePortnum'] = 10000
+</pre>
+ <p><code>c['slavePortnum']</code> is a <em>strports</em> specification string,
+defined in the <code>twisted.application.strports</code> module (try
+<samp><span class="command">pydoc twisted.application.strports</span></samp> to get documentation on
+the format). This means that you can have the buildmaster listen on a
+localhost-only port by doing:
+
+<pre class="example"> c['slavePortnum'] = "tcp:10000:interface=127.0.0.1"
+</pre>
+ <p>This might be useful if you only run buildslaves on the same machine,
+and they are all configured to contact the buildmaster at
+<code>localhost:10000</code>.
+
+<div class="node">
+<p><hr>
+<a name="Buildslave-Specifiers"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#On_002dDemand-_0028_0022Latent_0022_0029-Buildslaves">On-Demand (&quot;Latent&quot;) Buildslaves</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Setting-the-slaveport">Setting the slaveport</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.8 Buildslave Specifiers</h3>
+
+<p><a name="index-c_005b_0027slaves_0027_005d-34"></a>
+The <code>c['slaves']</code> key is a list of known buildslaves. In the common case,
+each buildslave is defined by an instance of the BuildSlave class. It
+represents a standard, manually started machine that will try to connect to
+the buildbot master as a slave. Contrast these with the "on-demand" latent
+buildslaves, such as the Amazon Web Service Elastic Compute Cloud latent
+buildslave discussed below.
+
+ <p>The BuildSlave class is instantiated with two values: (slavename,
+slavepassword). These are the same two values that need to be provided to the
+buildslave administrator when they create the buildslave.
+
+ <p>The slavenames must be unique, of course. The password exists to
+prevent evildoers from interfering with the buildbot by inserting
+their own (broken) buildslaves into the system and thus displacing the
+real ones.
+
+ <p>Buildslaves with an unrecognized slavename or a non-matching password
+will be rejected when they attempt to connect, and a message
+describing the problem will be put in the log file (see <a href="#Logfiles">Logfiles</a>).
+
+<pre class="example"> from buildbot.buildslave import BuildSlave
+ c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd')
+ BuildSlave('bot-bsd', 'bsdpasswd')
+ ]
+</pre>
+ <p><a name="index-Properties-35"></a><code>BuildSlave</code> objects can also be created with an optional
+<code>properties</code> argument, a dictionary specifying properties that
+will be available to any builds performed on this slave. For example:
+
+<pre class="example"> from buildbot.buildslave import BuildSlave
+ c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ properties={'os':'solaris'}),
+ ]
+</pre>
+ <p>The <code>BuildSlave</code> constructor can also take an optional
+<code>max_builds</code> parameter to limit the number of builds that it
+will execute simultaneously:
+
+<pre class="example"> from buildbot.buildslave import BuildSlave
+ c['slaves'] = [BuildSlave("bot-linux", "linuxpassword", max_builds=2)]
+</pre>
+ <p>Historical note: in buildbot-0.7.5 and earlier, the <code>c['bots']</code>
+key was used instead, and it took a list of (name, password) tuples.
+This key is accepted for backwards compatibility, but is deprecated as
+of 0.7.6 and will go away in some future release.
+
+<ul class="menu">
+<li><a accesskey="1" href="#When-Buildslaves-Go-Missing">When Buildslaves Go Missing</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="When-Buildslaves-Go-Missing"></a>
+Up:&nbsp;<a rel="up" accesskey="u" href="#Buildslave-Specifiers">Buildslave Specifiers</a>
+
+</div>
+
+<h4 class="subsection">4.8.1 When Buildslaves Go Missing</h4>
+
+<p>Sometimes, the buildslaves go away. One very common reason for this is
+when the buildslave process is started once (manually) and left
+running, but then later the machine reboots and the process is not
+automatically restarted.
+
+ <p>If you'd like to have the administrator of the buildslave (or other
+people) be notified by email when the buildslave has been missing for
+too long, just add the <code>notify_on_missing=</code> argument to the
+<code>BuildSlave</code> definition:
+
+<pre class="example"> c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ notify_on_missing="bob@example.com"),
+ ]
+</pre>
+ <p>By default, this will send email when the buildslave has been
+disconnected for more than one hour. Only one email per
+connection-loss event will be sent. To change the timeout, use
+<code>missing_timeout=</code> and give it a number of seconds (the default
+is 3600).
+
+ <p>You can have the buildmaster send email to multiple recipients: just
+provide a list of addresses instead of a single one:
+
+<pre class="example"> c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ notify_on_missing=["bob@example.com",
+ "alice@example.org"],
+ missing_timeout=300, # notify after 5 minutes
+ ),
+ ]
+</pre>
+ <p>The email sent this way will use a MailNotifier (see <a href="#MailNotifier">MailNotifier</a>)
+status target, if one is configured. This provides a way for you to
+control the &ldquo;from&rdquo; address of the email, as well as the relayhost
+(aka &ldquo;smarthost&rdquo;) to use as an SMTP server. If no MailNotifier is
+configured on this buildmaster, the buildslave-missing emails will be
+sent using a default configuration.
+
+ <p>Note that if you want to have a MailNotifier for buildslave-missing
+emails but not for regular build emails, just create one with
+builders=[], as follows:
+
+<pre class="example"> from buildbot.status import mail
+ m = mail.MailNotifier(fromaddr="buildbot@localhost", builders=[],
+ relayhost="smtp.example.org")
+ c['status'].append(m)
+ c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ notify_on_missing="bob@example.com"),
+ ]
+</pre>
+ <div class="node">
+<p><hr>
+<a name="On-Demand-(%22Latent%22)-Buildslaves"></a>
+<a name="On_002dDemand-_0028_0022Latent_0022_0029-Buildslaves"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Defining-Global-Properties">Defining Global Properties</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Buildslave-Specifiers">Buildslave Specifiers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.9 On-Demand ("Latent") Buildslaves</h3>
+
+<p>The standard buildbot model has slaves started manually. The previous section
+described how to configure the master for this approach.
+
+ <p>Another approach is to let the buildbot master start slaves when builds are
+ready, on-demand. Thanks to services such as Amazon Web Services' Elastic
+Compute Cloud ("AWS EC2"), this is relatively easy to set up, and can be
+very useful for some situations.
+
+ <p>The buildslaves that are started on-demand are called "latent" buildslaves.
+As of this writing, buildbot ships with an abstract base class for building
+latent buildslaves, and a concrete implementation for AWS EC2.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Amazon-Web-Services-Elastic-Compute-Cloud-_0028_0022AWS-EC2_0022_0029">Amazon Web Services Elastic Compute Cloud ("AWS EC2")</a>
+<li><a accesskey="2" href="#Dangers-with-Latent-Buildslaves">Dangers with Latent Buildslaves</a>
+<li><a accesskey="3" href="#Writing-New-Latent-Buildslaves">Writing New Latent Buildslaves</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Amazon-Web-Services-Elastic-Compute-Cloud-(%22AWS-EC2%22)"></a>
+<a name="Amazon-Web-Services-Elastic-Compute-Cloud-_0028_0022AWS-EC2_0022_0029"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Dangers-with-Latent-Buildslaves">Dangers with Latent Buildslaves</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#On_002dDemand-_0028_0022Latent_0022_0029-Buildslaves">On-Demand (&quot;Latent&quot;) Buildslaves</a>
+
+</div>
+
+<h4 class="subsection">4.9.1 Amazon Web Services Elastic Compute Cloud ("AWS EC2")</h4>
+
+<p><a href="http://aws.amazon.com/ec2/">AWS EC2</a> is a web service that allows you to
+start virtual machines in an Amazon data center. Please see their website for
+details, incuding costs. Using the AWS EC2 latent buildslaves involves getting
+an EC2 account with AWS and setting up payment; customizing one or more EC2
+machine images ("AMIs") on your desired operating system(s) and publishing
+them (privately if needed); and configuring the buildbot master to know how to
+start your customized images for "substantiating" your latent slaves.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Get-an-AWS-EC2-Account">Get an AWS EC2 Account</a>
+<li><a accesskey="2" href="#Create-an-AMI">Create an AMI</a>
+<li><a accesskey="3" href="#Configure-the-Master-with-an-EC2LatentBuildSlave">Configure the Master with an EC2LatentBuildSlave</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Get-an-AWS-EC2-Account"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Create-an-AMI">Create an AMI</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Amazon-Web-Services-Elastic-Compute-Cloud-_0028_0022AWS-EC2_0022_0029">Amazon Web Services Elastic Compute Cloud (&quot;AWS EC2&quot;)</a>
+
+</div>
+
+<h5 class="subsubsection">4.9.1.1 Get an AWS EC2 Account</h5>
+
+<p>To start off, to use the AWS EC2 latent buildslave, you need to get an AWS
+developer account and sign up for EC2. These instructions may help you get
+started:
+
+ <ul>
+<li>Go to http://aws.amazon.com/ and click to "Sign Up Now" for an AWS account.
+
+ <li>Once you are logged into your account, you need to sign up for EC2.
+Instructions for how to do this have changed over time because Amazon changes
+their website, so the best advice is to hunt for it. After signing up for EC2,
+it may say it wants you to upload an x.509 cert. You will need this to create
+images (see below) but it is not technically necessary for the buildbot master
+configuration.
+
+ <li>You must enter a valid credit card before you will be able to use EC2. Do that
+under 'Payment Method'.
+
+ <li>Make sure you're signed up for EC2 by going to 'Your Account'-&gt;'Account
+Activity' and verifying EC2 is listed.
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Create-an-AMI"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Configure-the-Master-with-an-EC2LatentBuildSlave">Configure the Master with an EC2LatentBuildSlave</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Get-an-AWS-EC2-Account">Get an AWS EC2 Account</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Amazon-Web-Services-Elastic-Compute-Cloud-_0028_0022AWS-EC2_0022_0029">Amazon Web Services Elastic Compute Cloud (&quot;AWS EC2&quot;)</a>
+
+</div>
+
+<h5 class="subsubsection">4.9.1.2 Create an AMI</h5>
+
+<p>Now you need to create an AMI and configure the master. You may need to
+run through this cycle a few times to get it working, but these instructions
+should get you started.
+
+ <p>Creating an AMI is out of the scope of this document. The
+<a href="http://docs.amazonwebservices.com/AWSEC2/latest/GettingStartedGuide/">EC2 Getting Started Guide</a>
+is a good resource for this task. Here are a few additional hints.
+
+ <ul>
+<li>When an instance of the image starts, it needs to automatically start a
+buildbot slave that connects to your master (to create a buildbot slave,
+see <a href="#Creating-a-buildslave">Creating a buildslave</a>; to make a daemon,
+see <a href="#Launching-the-daemons">Launching the daemons</a>).
+
+ <li>You may want to make an instance of the buildbot slave, configure it as a
+standard buildslave in the master (i.e., not as a latent slave), and test and
+debug it that way before you turn it into an AMI and convert to a latent
+slave in the master.
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Configure-the-Master-with-an-EC2LatentBuildSlave"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Create-an-AMI">Create an AMI</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Amazon-Web-Services-Elastic-Compute-Cloud-_0028_0022AWS-EC2_0022_0029">Amazon Web Services Elastic Compute Cloud (&quot;AWS EC2&quot;)</a>
+
+</div>
+
+<h5 class="subsubsection">4.9.1.3 Configure the Master with an EC2LatentBuildSlave</h5>
+
+<p>Now let's assume you have an AMI that should work with the
+EC2LatentBuildSlave. It's now time to set up your buildbot master
+configuration.
+
+ <p>You will need some information from your AWS account: the "Access Key Id" and
+the "Secret Access Key". If you've built the AMI yourself, you probably
+already are familiar with these values. If you have not, and someone has
+given you access to an AMI, these hints may help you find the necessary
+values:
+
+ <ul>
+<li>While logged into your AWS account, find the "Access Identifiers" link (either
+on the left, or via "Your Account" -&gt; "Access Identifiers".
+
+ <li>On the page, you'll see alphanumeric values for "Your Access Key Id:" and
+"Your Secret Access Key:". Make a note of these. Later on, we'll call the
+first one your "identifier" and the second one your "secret_identifier."
+</ul>
+
+ <p>When creating an EC2LatentBuildSlave in the buildbot master configuration,
+the first three arguments are required. The name and password are the first
+two arguments, and work the same as with normal buildslaves. The next
+argument specifies the type of the EC2 virtual machine (available options as
+of this writing include "m1.small", "m1.large", 'm1.xlarge", "c1.medium",
+and "c1.xlarge"; see the EC2 documentation for descriptions of these
+machines).
+
+ <p>Here is the simplest example of configuring an EC2 latent buildslave. It
+specifies all necessary remaining values explicitly in the instantiation.
+
+<pre class="example"> from buildbot.ec2buildslave import EC2LatentBuildSlave
+ c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ ami='ami-12345',
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )]
+</pre>
+ <p>The "ami" argument specifies the AMI that the master should start. The
+"identifier" argument specifies the AWS "Access Key Id," and the
+"secret_identifier" specifies the AWS "Secret Access Key." Both the AMI and
+the account information can be specified in alternate ways.
+
+ <p>Note that whoever has your identifier and secret_identifier values can request
+AWS work charged to your account, so these values need to be carefully
+protected. Another way to specify these access keys is to put them in a
+separate file. You can then make the access privileges stricter for this
+separate file, and potentially let more people read your main configuration
+file.
+
+ <p>By default, you can make an .ec2 directory in the home folder of the user
+running the buildbot master. In that directory, create a file called aws_id.
+The first line of that file should be your access key id; the second line
+should be your secret access key id. Then you can instantiate the build slave
+as follows.
+
+<pre class="example"> from buildbot.ec2buildslave import EC2LatentBuildSlave
+ c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ ami='ami-12345')]
+</pre>
+ <p>If you want to put the key information in another file, use the
+"aws_id_file_path" initialization argument.
+
+ <p>Previous examples used a particular AMI. If the Buildbot master will be
+deployed in a process-controlled environment, it may be convenient to
+specify the AMI more flexibly. Rather than specifying an individual AMI,
+specify one or two AMI filters.
+
+ <p>In all cases, the AMI that sorts last by its location (the S3 bucket and
+manifest name) will be preferred.
+
+ <p>One available filter is to specify the acceptable AMI owners, by AWS account
+number (the 12 digit number, usually rendered in AWS with hyphens like
+"1234-5678-9012", should be entered as in integer).
+
+<pre class="example"> from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111,
+ 22222222222],
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+</pre>
+ <p>The other available filter is to provide a regular expression string that
+will be matched against each AMI's location (the S3 bucket and manifest name).
+
+<pre class="example"> from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'buildbot\-.*/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+</pre>
+ <p>The regular expression can specify a group, which will be preferred for the
+sorting. Only the first group is used; subsequent groups are ignored.
+
+<pre class="example"> from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'buildbot\-.*\-(.*)/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+</pre>
+ <p>If the group can be cast to an integer, it will be. This allows 10 to sort
+after 1, for instance.
+
+<pre class="example"> from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'buildbot\-.*\-(\d+)/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+</pre>
+ <p>In addition to using the password as a handshake between the master and the
+slave, you may want to use a firewall to assert that only machines from a
+specific IP can connect as slaves. This is possible with AWS EC2 by using
+the Elastic IP feature. To configure, generate a Elastic IP in AWS, and then
+specify it in your configuration using the "elastic_ip" argument.
+
+<pre class="example"> from buildbot.ec2buildslave import EC2LatentBuildSlave
+ c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345',
+ identifier='publickey',
+ secret_identifier='privatekey',
+ elastic_ip='208.77.188.166'
+ )]
+</pre>
+ <p>The EC2LatentBuildSlave supports all other configuration from the standard
+BuildSlave. The "missing_timeout" and "notify_on_missing" specify how long
+to wait for an EC2 instance to attach before considering the attempt to have
+failed, and email addresses to alert, respectively. "missing_timeout"
+defaults to 20 minutes.
+
+ <p>The "build_wait_timeout" allows you to specify how long an EC2LatentBuildSlave
+should wait after a build for another build before it shuts down the EC2
+instance. It defaults to 10 minutes.
+
+ <p>"keypair_name" and "security_name" allow you to specify different names for
+these AWS EC2 values. They both default to "latent_buildbot_slave".
+
+<div class="node">
+<p><hr>
+<a name="Dangers-with-Latent-Buildslaves"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Writing-New-Latent-Buildslaves">Writing New Latent Buildslaves</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Amazon-Web-Services-Elastic-Compute-Cloud-_0028_0022AWS-EC2_0022_0029">Amazon Web Services Elastic Compute Cloud (&quot;AWS EC2&quot;)</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#On_002dDemand-_0028_0022Latent_0022_0029-Buildslaves">On-Demand (&quot;Latent&quot;) Buildslaves</a>
+
+</div>
+
+<h4 class="subsection">4.9.2 Dangers with Latent Buildslaves</h4>
+
+<p>Any latent build slave that interacts with a for-fee service, such as the
+EC2LatentBuildSlave, brings significant risks. As already identified, the
+configuraton will need access to account information that, if obtained by a
+criminal, can be used to charge services to your account. Also, bugs in the
+buildbot software may lead to unnecessary charges. In particular, if the
+master neglects to shut down an instance for some reason, a virtual machine
+may be running unnecessarily, charging against your account. Manual and/or
+automatic (e.g. nagios with a plugin using a library like boto)
+double-checking may be appropriate.
+
+ <p>A comparitively trivial note is that currently if two instances try to attach
+to the same latent buildslave, it is likely that the system will become
+confused. This should not occur, unless, for instance, you configure a normal
+build slave to connect with the authentication of a latent buildbot. If the
+situation occurs, stop all attached instances and restart the master.
+
+<div class="node">
+<p><hr>
+<a name="Writing-New-Latent-Buildslaves"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Dangers-with-Latent-Buildslaves">Dangers with Latent Buildslaves</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#On_002dDemand-_0028_0022Latent_0022_0029-Buildslaves">On-Demand (&quot;Latent&quot;) Buildslaves</a>
+
+</div>
+
+<h4 class="subsection">4.9.3 Writing New Latent Buildslaves</h4>
+
+<p>Writing a new latent buildslave should only require subclassing
+<code>buildbot.buildslave.AbstractLatentBuildSlave</code> and implementing
+start_instance and stop_instance.
+
+<pre class="example"> def start_instance(self):
+ # responsible for starting instance that will try to connect with this
+ # master. Should return deferred. Problems should use an errback. The
+ # callback value can be None, or can be an iterable of short strings to
+ # include in the "substantiate success" status message, such as
+ # identifying the instance that started.
+ raise NotImplementedError
+
+ def stop_instance(self, fast=False):
+ # responsible for shutting down instance. Return a deferred. If `fast`,
+ # we're trying to shut the master down, so callback as soon as is safe.
+ # Callback value is ignored.
+ raise NotImplementedError
+</pre>
+ <p>See <code>buildbot.ec2buildslave.EC2LatentBuildSlave</code> for an example, or see the
+test example <code>buildbot.test_slaves.FakeLatentBuildSlave</code>.
+
+<div class="node">
+<p><hr>
+<a name="Defining-Global-Properties"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Defining-Builders">Defining Builders</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#On_002dDemand-_0028_0022Latent_0022_0029-Buildslaves">On-Demand (&quot;Latent&quot;) Buildslaves</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.10 Defining Global Properties</h3>
+
+<p><a name="index-c_005b_0027properties_0027_005d-36"></a><a name="index-Properties-37"></a>
+The <code>'properties'</code> configuration key defines a dictionary
+of properties that will be available to all builds started by the
+buildmaster:
+
+<pre class="example"> c['properties'] = {
+ 'Widget-version' : '1.2',
+ 'release-stage' : 'alpha'
+ }
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Defining-Builders"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Defining-Status-Targets">Defining Status Targets</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Defining-Global-Properties">Defining Global Properties</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.11 Defining Builders</h3>
+
+<p><a name="index-c_005b_0027builders_0027_005d-38"></a>
+The <code>c['builders']</code> key is a list of dictionaries which specify
+the Builders. The Buildmaster runs a collection of Builders, each of
+which handles a single type of build (e.g. full versus quick), on a
+single build slave. A Buildbot which makes sure that the latest code
+(&ldquo;HEAD&rdquo;) compiles correctly across four separate architecture will
+have four Builders, each performing the same build but on different
+slaves (one per platform).
+
+ <p>Each Builder gets a separate column in the waterfall display. In
+general, each Builder runs independently (although various kinds of
+interlocks can cause one Builder to have an effect on another).
+
+ <p>Each Builder specification dictionary has several required keys:
+
+ <dl>
+<dt><code>name</code><dd>This specifies the Builder's name, which is used in status
+reports.
+
+ <br><dt><code>slavename</code><dd>This specifies which buildslave will be used by this Builder.
+<code>slavename</code> must appear in the <code>c['slaves']</code> list. Each
+buildslave can accomodate multiple Builders.
+
+ <br><dt><code>slavenames</code><dd>If you provide <code>slavenames</code> instead of <code>slavename</code>, you can
+give a list of buildslaves which are capable of running this Builder.
+If multiple buildslaves are available for any given Builder, you will
+have some measure of redundancy: in case one slave goes offline, the
+others can still keep the Builder working. In addition, multiple
+buildslaves will allow multiple simultaneous builds for the same
+Builder, which might be useful if you have a lot of forced or &ldquo;try&rdquo;
+builds taking place.
+
+ <p>If you use this feature, it is important to make sure that the
+buildslaves are all, in fact, capable of running the given build. The
+slave hosts should be configured similarly, otherwise you will spend a
+lot of time trying (unsuccessfully) to reproduce a failure that only
+occurs on some of the buildslaves and not the others. Different
+platforms, operating systems, versions of major programs or libraries,
+all these things mean you should use separate Builders.
+
+ <br><dt><code>builddir</code><dd>This specifies the name of a subdirectory (under the base directory)
+in which everything related to this builder will be placed. On the
+buildmaster, this holds build status information. On the buildslave,
+this is where checkouts, compiles, and tests are run.
+
+ <br><dt><code>factory</code><dd>This is a <code>buildbot.process.factory.BuildFactory</code> instance which
+controls how the build is performed. Full details appear in their own
+chapter, See <a href="#Build-Process">Build Process</a>. Parameters like the location of the CVS
+repository and the compile-time options used for the build are
+generally provided as arguments to the factory's constructor.
+
+ </dl>
+
+ <p>Other optional keys may be set on each Builder:
+
+ <dl>
+<dt><code>category</code><dd>If provided, this is a string that identifies a category for the
+builder to be a part of. Status clients can limit themselves to a
+subset of the available categories. A common use for this is to add
+new builders to your setup (for a new module, or for a new buildslave)
+that do not work correctly yet and allow you to integrate them with
+the active builders. You can put these new builders in a test
+category, make your main status clients ignore them, and have only
+private status clients pick them up. As soon as they work, you can
+move them over to the active category.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Defining-Status-Targets"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Debug-options">Debug options</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Defining-Builders">Defining Builders</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.12 Defining Status Targets</h3>
+
+<p>The Buildmaster has a variety of ways to present build status to
+various users. Each such delivery method is a &ldquo;Status Target&rdquo; object
+in the configuration's <code>status</code> list. To add status targets, you
+just append more objects to this list:
+
+ <p><a name="index-c_005b_0027status_0027_005d-39"></a>
+<pre class="example"> c['status'] = []
+
+ from buildbot.status import html
+ c['status'].append(html.Waterfall(http_port=8010))
+
+ from buildbot.status import mail
+ m = mail.MailNotifier(fromaddr="buildbot@localhost",
+ extraRecipients=["builds@lists.example.com"],
+ sendToInterestedUsers=False)
+ c['status'].append(m)
+
+ from buildbot.status import words
+ c['status'].append(words.IRC(host="irc.example.com", nick="bb",
+ channels=["#example"]))
+</pre>
+ <p>Status delivery has its own chapter, See <a href="#Status-Delivery">Status Delivery</a>, in which
+all the built-in status targets are documented.
+
+<div class="node">
+<p><hr>
+<a name="Debug-options"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Defining-Status-Targets">Defining Status Targets</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Configuration">Configuration</a>
+
+</div>
+
+<h3 class="section">4.13 Debug options</h3>
+
+<p><a name="index-c_005b_0027debugPassword_0027_005d-40"></a>If you set <code>c['debugPassword']</code>, then you can connect to the
+buildmaster with the diagnostic tool launched by <code>buildbot
+debugclient MASTER:PORT</code>. From this tool, you can reload the config
+file, manually force builds, and inject changes, which may be useful
+for testing your buildmaster without actually commiting changes to
+your repository (or before you have the Change Sources set up). The
+debug tool uses the same port number as the slaves do:
+<code>c['slavePortnum']</code>, and is authenticated with this password.
+
+<pre class="example"> c['debugPassword'] = "debugpassword"
+</pre>
+ <p><a name="index-c_005b_0027manhole_0027_005d-41"></a>If you set <code>c['manhole']</code> to an instance of one of the classes in
+<code>buildbot.manhole</code>, you can telnet or ssh into the buildmaster
+and get an interactive Python shell, which may be useful for debugging
+buildbot internals. It is probably only useful for buildbot
+developers. It exposes full access to the buildmaster's account
+(including the ability to modify and delete files), so it should not
+be enabled with a weak or easily guessable password.
+
+ <p>There are three separate <code>Manhole</code> classes. Two of them use SSH,
+one uses unencrypted telnet. Two of them use a username+password
+combination to grant access, one of them uses an SSH-style
+<samp><span class="file">authorized_keys</span></samp> file which contains a list of ssh public keys.
+
+ <dl>
+<dt><code>manhole.AuthorizedKeysManhole</code><dd>You construct this with the name of a file that contains one SSH
+public key per line, just like <samp><span class="file">~/.ssh/authorized_keys</span></samp>. If you
+provide a non-absolute filename, it will be interpreted relative to
+the buildmaster's base directory.
+
+ <br><dt><code>manhole.PasswordManhole</code><dd>This one accepts SSH connections but asks for a username and password
+when authenticating. It accepts only one such pair.
+
+ <br><dt><code>manhole.TelnetManhole</code><dd>This accepts regular unencrypted telnet connections, and asks for a
+username/password pair before providing access. Because this
+username/password is transmitted in the clear, and because Manhole
+access to the buildmaster is equivalent to granting full shell
+privileges to both the buildmaster and all the buildslaves (and to all
+accounts which then run code produced by the buildslaves), it is
+highly recommended that you use one of the SSH manholes instead.
+
+ </dl>
+
+<pre class="example"> # some examples:
+ from buildbot import manhole
+ c['manhole'] = manhole.AuthorizedKeysManhole(1234, "authorized_keys")
+ c['manhole'] = manhole.PasswordManhole(1234, "alice", "mysecretpassword")
+ c['manhole'] = manhole.TelnetManhole(1234, "bob", "snoop_my_password_please")
+</pre>
+ <p>The <code>Manhole</code> instance can be configured to listen on a specific
+port. You may wish to have this listening port bind to the loopback
+interface (sometimes known as &ldquo;lo0&rdquo;, &ldquo;localhost&rdquo;, or 127.0.0.1) to
+restrict access to clients which are running on the same host.
+
+<pre class="example"> from buildbot.manhole import PasswordManhole
+ c['manhole'] = PasswordManhole("tcp:9999:interface=127.0.0.1","admin","passwd")
+</pre>
+ <p>To have the <code>Manhole</code> listen on all interfaces, use
+<code>"tcp:9999"</code> or simply 9999. This port specification uses
+<code>twisted.application.strports</code>, so you can make it listen on SSL
+or even UNIX-domain sockets if you want.
+
+ <p>Note that using any Manhole requires that the TwistedConch package be
+installed, and that you be using Twisted version 2.0 or later.
+
+ <p>The buildmaster's SSH server will use a different host key than the
+normal sshd running on a typical unix host. This will cause the ssh
+client to complain about a &ldquo;host key mismatch&rdquo;, because it does not
+realize there are two separate servers running on the same host. To
+avoid this, use a clause like the following in your <samp><span class="file">.ssh/config</span></samp>
+file:
+
+<pre class="example"> Host remotehost-buildbot
+ HostName remotehost
+ HostKeyAlias remotehost-buildbot
+ Port 9999
+ # use 'user' if you use PasswordManhole and your name is not 'admin'.
+ # if you use AuthorizedKeysManhole, this probably doesn't matter.
+ User admin
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Getting-Source-Code-Changes"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Build-Process">Build Process</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Configuration">Configuration</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">5 Getting Source Code Changes</h2>
+
+<p>The most common way to use the Buildbot is centered around the idea of
+<code>Source Trees</code>: a directory tree filled with source code of some form
+which can be compiled and/or tested. Some projects use languages that don't
+involve any compilation step: nevertheless there may be a <code>build</code> phase
+where files are copied or rearranged into a form that is suitable for
+installation. Some projects do not have unit tests, and the Buildbot is
+merely helping to make sure that the sources can compile correctly. But in
+all of these cases, the thing-being-tested is a single source tree.
+
+ <p>A Version Control System mantains a source tree, and tells the
+buildmaster when it changes. The first step of each Build is typically
+to acquire a copy of some version of this tree.
+
+ <p>This chapter describes how the Buildbot learns about what Changes have
+occurred. For more information on VC systems and Changes, see
+<a href="#Version-Control-Systems">Version Control Systems</a>.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Change-Sources">Change Sources</a>
+<li><a accesskey="2" href="#Choosing-ChangeSources">Choosing ChangeSources</a>
+<li><a accesskey="3" href="#CVSToys-_002d-PBService">CVSToys - PBService</a>
+<li><a accesskey="4" href="#Mail_002dparsing-ChangeSources">Mail-parsing ChangeSources</a>
+<li><a accesskey="5" href="#PBChangeSource">PBChangeSource</a>
+<li><a accesskey="6" href="#P4Source">P4Source</a>
+<li><a accesskey="7" href="#BonsaiPoller">BonsaiPoller</a>
+<li><a accesskey="8" href="#SVNPoller">SVNPoller</a>
+<li><a accesskey="9" href="#MercurialHook">MercurialHook</a>
+<li><a href="#Bzr-Hook">Bzr Hook</a>
+<li><a href="#Bzr-Poller">Bzr Poller</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Change-Sources"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Choosing-ChangeSources">Choosing ChangeSources</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.1 Change Sources</h3>
+
+<!-- TODO: rework this, the one-buildmaster-one-tree thing isn't quite -->
+<!-- so narrow-minded anymore -->
+<p>Each Buildmaster watches a single source tree. Changes can be provided
+by a variety of ChangeSource types, however any given project will
+typically have only a single ChangeSource active. This section
+provides a description of all available ChangeSource types and
+explains how to set up each of them.
+
+ <p>There are a variety of ChangeSources available, some of which are
+meant to be used in conjunction with other tools to deliver Change
+events from the VC repository to the buildmaster.
+
+ <ul>
+<li>CVSToys
+This ChangeSource opens a TCP connection from the buildmaster to a
+waiting FreshCVS daemon that lives on the repository machine, and
+subscribes to hear about Changes.
+
+ <li>MaildirSource
+This one watches a local maildir-format inbox for email sent out by
+the repository when a change is made. When a message arrives, it is
+parsed to create the Change object. A variety of parsing functions are
+available to accomodate different email-sending tools.
+
+ <li>PBChangeSource
+This ChangeSource listens on a local TCP socket for inbound
+connections from a separate tool. Usually, this tool would be run on
+the VC repository machine in a commit hook. It is expected to connect
+to the TCP socket and send a Change message over the network
+connection. The <samp><span class="command">buildbot sendchange</span></samp> command is one example
+of a tool that knows how to send these messages, so you can write a
+commit script for your VC system that calls it to deliver the Change.
+There are other tools in the contrib/ directory that use the same
+protocol.
+
+ </ul>
+
+ <p>As a quick guide, here is a list of VC systems and the ChangeSources
+that might be useful with them. All of these ChangeSources are in the
+<code>buildbot.changes</code> module.
+
+ <dl>
+<dt><code>CVS</code><dd>
+ <ul>
+<li>freshcvs.FreshCVSSource (connected via TCP to the freshcvs daemon)
+<li>mail.FCMaildirSource (watching for email sent by a freshcvs daemon)
+<li>mail.BonsaiMaildirSource (watching for email sent by Bonsai)
+<li>mail.SyncmailMaildirSource (watching for email sent by syncmail)
+<li>pb.PBChangeSource (listening for connections from <code>buildbot
+sendchange</code> run in a loginfo script)
+<li>pb.PBChangeSource (listening for connections from a long-running
+<code>contrib/viewcvspoll.py</code> polling process which examines the ViewCVS
+database directly
+</ul>
+
+ <br><dt><code>SVN</code><dd>
+ <ul>
+<li>pb.PBChangeSource (listening for connections from
+<code>contrib/svn_buildbot.py</code> run in a postcommit script)
+<li>pb.PBChangeSource (listening for connections from a long-running
+<code>contrib/svn_watcher.py</code> or <code>contrib/svnpoller.py</code> polling
+process
+<li>mail.SVNCommitEmailMaildirSource (watching for email sent by commit-email.pl)
+<li>svnpoller.SVNPoller (polling the SVN repository)
+</ul>
+
+ <br><dt><code>Darcs</code><dd>
+ <ul>
+<li>pb.PBChangeSource (listening for connections from
+<code>contrib/darcs_buildbot.py</code> in a commit script
+</ul>
+
+ <br><dt><code>Mercurial</code><dd>
+ <ul>
+<li>pb.PBChangeSource (listening for connections from
+<code>contrib/hg_buildbot.py</code> run in an 'incoming' hook)
+<li>pb.PBChangeSource (listening for connections from
+<code>buildbot/changes/hgbuildbot.py</code> run as an in-process 'changegroup'
+hook)
+</ul>
+
+ <br><dt><code>Arch/Bazaar</code><dd>
+ <ul>
+<li>pb.PBChangeSource (listening for connections from
+<code>contrib/arch_buildbot.py</code> run in a commit hook)
+</ul>
+
+ <br><dt><code>Bzr (the newer Bazaar)</code><dd>
+ <ul>
+<li>pb.PBChangeSource (listening for connections from
+<code>contrib/bzr_buildbot.py</code> run in a post-change-branch-tip or commit hook)
+<li><code>contrib/bzr_buildbot.py</code>'s BzrPoller (polling the Bzr repository)
+</ul>
+
+ <br><dt><code>Git</code><dd>
+ <ul>
+<li>pb.PBChangeSource (listening for connections from
+<code>contrib/git_buildbot.py</code> run in the post-receive hook)
+</ul>
+
+ </dl>
+
+ <p>All VC systems can be driven by a PBChangeSource and the
+<code>buildbot sendchange</code> tool run from some form of commit script.
+If you write an email parsing function, they can also all be driven by
+a suitable <code>MaildirSource</code>.
+
+<div class="node">
+<p><hr>
+<a name="Choosing-ChangeSources"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#CVSToys-_002d-PBService">CVSToys - PBService</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Change-Sources">Change Sources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.2 Choosing ChangeSources</h3>
+
+<p>The <code>master.cfg</code> configuration file has a dictionary key named
+<code>BuildmasterConfig['change_source']</code>, which holds the active
+<code>IChangeSource</code> object. The config file will typically create an
+object from one of the classes described below and stuff it into this
+key.
+
+ <p>Each buildmaster typically has just a single ChangeSource, since it is
+only watching a single source tree. But if, for some reason, you need
+multiple sources, just set <code>c['change_source']</code> to a list of
+ChangeSources.. it will accept that too.
+
+<pre class="example"> s = FreshCVSSourceNewcred(host="host", port=4519,
+ user="alice", passwd="secret",
+ prefix="Twisted")
+ BuildmasterConfig['change_source'] = [s]
+</pre>
+ <p>Each source tree has a nominal <code>top</code>. Each Change has a list of
+filenames, which are all relative to this top location. The
+ChangeSource is responsible for doing whatever is necessary to
+accomplish this. Most sources have a <code>prefix</code> argument: a partial
+pathname which is stripped from the front of all filenames provided to
+that <code>ChangeSource</code>. Files which are outside this sub-tree are
+ignored by the changesource: it does not generate Changes for those
+files.
+
+<div class="node">
+<p><hr>
+<a name="CVSToys---PBService"></a>
+<a name="CVSToys-_002d-PBService"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Mail_002dparsing-ChangeSources">Mail-parsing ChangeSources</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Choosing-ChangeSources">Choosing ChangeSources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.3 CVSToys - PBService</h3>
+
+<p><a name="index-buildbot_002echanges_002efreshcvs_002eFreshCVSSource-42"></a>
+The <a href="http://purl.net/net/CVSToys">CVSToys</a> package provides a
+server which runs on the machine that hosts the CVS repository it
+watches. It has a variety of ways to distribute commit notifications,
+and offers a flexible regexp-based way to filter out uninteresting
+changes. One of the notification options is named <code>PBService</code> and
+works by listening on a TCP port for clients. These clients subscribe
+to hear about commit notifications.
+
+ <p>The buildmaster has a CVSToys-compatible <code>PBService</code> client built
+in. There are two versions of it, one for old versions of CVSToys
+(1.0.9 and earlier) which used the <code>oldcred</code> authentication
+framework, and one for newer versions (1.0.10 and later) which use
+<code>newcred</code>. Both are classes in the
+<code>buildbot.changes.freshcvs</code> package.
+
+ <p><code>FreshCVSSourceNewcred</code> objects are created with the following
+parameters:
+
+ <dl>
+<dt>&lsquo;<samp><code>host</code><span class="samp"> and </span><code>port</code></samp>&rsquo;<dd>these specify where the CVSToys server can be reached
+
+ <br><dt>&lsquo;<samp><code>user</code><span class="samp"> and </span><code>passwd</code></samp>&rsquo;<dd>these specify the login information for the CVSToys server
+(<code>freshcvs</code>). These must match the server's values, which are
+defined in the <code>freshCfg</code> configuration file (which lives in the
+CVSROOT directory of the repository).
+
+ <br><dt>&lsquo;<samp><code>prefix</code></samp>&rsquo;<dd>this is the prefix to be found and stripped from filenames delivered
+by the CVSToys server. Most projects live in sub-directories of the
+main repository, as siblings of the CVSROOT sub-directory, so
+typically this prefix is set to that top sub-directory name.
+
+ </dl>
+
+<h3 class="heading">Example</h3>
+
+<p>To set up the freshCVS server, add a statement like the following to
+your <samp><span class="file">freshCfg</span></samp> file:
+
+<pre class="example"> pb = ConfigurationSet([
+ (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
+ ])
+</pre>
+ <p>This will announce all changes to a client which connects to port 4519
+using a username of 'foo' and a password of 'bar'.
+
+ <p>Then add a clause like this to your buildmaster's <samp><span class="file">master.cfg</span></samp>:
+
+<pre class="example"> BuildmasterConfig['change_source'] = FreshCVSSource("cvs.example.com", 4519,
+ "foo", "bar",
+ prefix="glib/")
+</pre>
+ <p>where "cvs.example.com" is the host that is running the FreshCVS daemon, and
+"glib" is the top-level directory (relative to the repository's root) where
+all your source code lives. Most projects keep one or more projects in the
+same repository (along with CVSROOT/ to hold admin files like loginfo and
+freshCfg); the prefix= argument tells the buildmaster to ignore everything
+outside that directory, and to strip that common prefix from all pathnames
+it handles.
+
+<div class="node">
+<p><hr>
+<a name="Mail-parsing-ChangeSources"></a>
+<a name="Mail_002dparsing-ChangeSources"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#PBChangeSource">PBChangeSource</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#CVSToys-_002d-PBService">CVSToys - PBService</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.4 Mail-parsing ChangeSources</h3>
+
+<p>Many projects publish information about changes to their source tree
+by sending an email message out to a mailing list, frequently named
+PROJECT-commits or PROJECT-changes. Each message usually contains a
+description of the change (who made the change, which files were
+affected) and sometimes a copy of the diff. Humans can subscribe to
+this list to stay informed about what's happening to the source tree.
+
+ <p>The Buildbot can also be subscribed to a -commits mailing list, and
+can trigger builds in response to Changes that it hears about. The
+buildmaster admin needs to arrange for these email messages to arrive
+in a place where the buildmaster can find them, and configure the
+buildmaster to parse the messages correctly. Once that is in place,
+the email parser will create Change objects and deliver them to the
+Schedulers (see see <a href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a>) just
+like any other ChangeSource.
+
+ <p>There are two components to setting up an email-based ChangeSource.
+The first is to route the email messages to the buildmaster, which is
+done by dropping them into a &ldquo;maildir&rdquo;. The second is to actually
+parse the messages, which is highly dependent upon the tool that was
+used to create them. Each VC system has a collection of favorite
+change-emailing tools, and each has a slightly different format, so
+each has a different parsing function. There is a separate
+ChangeSource variant for each parsing function.
+
+ <p>Once you've chosen a maildir location and a parsing function, create
+the change source and put it in <code>c['change_source']</code>:
+
+<pre class="example"> from buildbot.changes.mail import SyncmailMaildirSource
+ c['change_source'] = SyncmailMaildirSource("~/maildir-buildbot",
+ prefix="/trunk/")
+</pre>
+ <ul class="menu">
+<li><a accesskey="1" href="#Subscribing-the-Buildmaster">Subscribing the Buildmaster</a>
+<li><a accesskey="2" href="#Using-Maildirs">Using Maildirs</a>
+<li><a accesskey="3" href="#Parsing-Email-Change-Messages">Parsing Email Change Messages</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Subscribing-the-Buildmaster"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Using-Maildirs">Using Maildirs</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Mail_002dparsing-ChangeSources">Mail-parsing ChangeSources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Mail_002dparsing-ChangeSources">Mail-parsing ChangeSources</a>
+
+</div>
+
+<h4 class="subsection">5.4.1 Subscribing the Buildmaster</h4>
+
+<p>The recommended way to install the buildbot is to create a dedicated
+account for the buildmaster. If you do this, the account will probably
+have a distinct email address (perhaps
+<a href="mailto:buildmaster@example.org">buildmaster@example.org</a>). Then just arrange for this
+account's email to be delivered to a suitable maildir (described in
+the next section).
+
+ <p>If the buildbot does not have its own account, &ldquo;extension addresses&rdquo;
+can be used to distinguish between email intended for the buildmaster
+and email intended for the rest of the account. In most modern MTAs,
+the e.g. <code>foo@example.org</code> account has control over every email
+address at example.org which begins with "foo", such that email
+addressed to <a href="mailto:account-foo@example.org">account-foo@example.org</a> can be delivered to a
+different destination than <a href="mailto:account-bar@example.org">account-bar@example.org</a>. qmail
+does this by using separate .qmail files for the two destinations
+(<samp><span class="file">.qmail-foo</span></samp> and <samp><span class="file">.qmail-bar</span></samp>, with <samp><span class="file">.qmail</span></samp>
+controlling the base address and <samp><span class="file">.qmail-default</span></samp> controlling all
+other extensions). Other MTAs have similar mechanisms.
+
+ <p>Thus you can assign an extension address like
+<a href="mailto:foo-buildmaster@example.org">foo-buildmaster@example.org</a> to the buildmaster, and retain
+<a href="mailto:foo@example.org">foo@example.org</a> for your own use.
+
+<div class="node">
+<p><hr>
+<a name="Using-Maildirs"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Parsing-Email-Change-Messages">Parsing Email Change Messages</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Subscribing-the-Buildmaster">Subscribing the Buildmaster</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Mail_002dparsing-ChangeSources">Mail-parsing ChangeSources</a>
+
+</div>
+
+<h4 class="subsection">5.4.2 Using Maildirs</h4>
+
+<p>A &ldquo;maildir&rdquo; is a simple directory structure originally developed for
+qmail that allows safe atomic update without locking. Create a base
+directory with three subdirectories: &ldquo;new&rdquo;, &ldquo;tmp&rdquo;, and &ldquo;cur&rdquo;.
+When messages arrive, they are put into a uniquely-named file (using
+pids, timestamps, and random numbers) in &ldquo;tmp&rdquo;. When the file is
+complete, it is atomically renamed into &ldquo;new&rdquo;. Eventually the
+buildmaster notices the file in &ldquo;new&rdquo;, reads and parses the
+contents, then moves it into &ldquo;cur&rdquo;. A cronjob can be used to delete
+files in &ldquo;cur&rdquo; at leisure.
+
+ <p>Maildirs are frequently created with the <samp><span class="command">maildirmake</span></samp> tool,
+but a simple <samp><span class="command">mkdir -p ~/MAILDIR/{cur,new,tmp}</span></samp> is pretty much
+equivalent.
+
+ <p>Many modern MTAs can deliver directly to maildirs. The usual .forward
+or .procmailrc syntax is to name the base directory with a trailing
+slash, so something like <code>~/MAILDIR/</code> . qmail and postfix are
+maildir-capable MTAs, and procmail is a maildir-capable MDA (Mail
+Delivery Agent).
+
+ <p>For MTAs which cannot put files into maildirs directly, the
+&ldquo;safecat&rdquo; tool can be executed from a .forward file to accomplish
+the same thing.
+
+ <p>The Buildmaster uses the linux DNotify facility to receive immediate
+notification when the maildir's &ldquo;new&rdquo; directory has changed. When
+this facility is not available, it polls the directory for new
+messages, every 10 seconds by default.
+
+<div class="node">
+<p><hr>
+<a name="Parsing-Email-Change-Messages"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Using-Maildirs">Using Maildirs</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Mail_002dparsing-ChangeSources">Mail-parsing ChangeSources</a>
+
+</div>
+
+<h4 class="subsection">5.4.3 Parsing Email Change Messages</h4>
+
+<p>The second component to setting up an email-based ChangeSource is to
+parse the actual notices. This is highly dependent upon the VC system
+and commit script in use.
+
+ <p>A couple of common tools used to create these change emails are:
+
+ <dl>
+<dt>&lsquo;<samp><span class="samp">CVS</span></samp>&rsquo;<dd>
+ <dl>
+<dt>&lsquo;<samp><span class="samp">CVSToys MailNotifier</span></samp>&rsquo;<dd><a href="#FCMaildirSource">FCMaildirSource</a>
+<br><dt>&lsquo;<samp><span class="samp">Bonsai notification</span></samp>&rsquo;<dd><a href="#BonsaiMaildirSource">BonsaiMaildirSource</a>
+<br><dt>&lsquo;<samp><span class="samp">syncmail</span></samp>&rsquo;<dd><a href="#SyncmailMaildirSource">SyncmailMaildirSource</a>
+</dl>
+
+ <br><dt>&lsquo;<samp><span class="samp">SVN</span></samp>&rsquo;<dd>
+ <dl>
+<dt>&lsquo;<samp><span class="samp">svnmailer</span></samp>&rsquo;<dd>http://opensource.perlig.de/en/svnmailer/
+<br><dt>&lsquo;<samp><span class="samp">commit-email.pl</span></samp>&rsquo;<dd><a href="#SVNCommitEmailMaildirSource">SVNCommitEmailMaildirSource</a>
+</dl>
+
+ <br><dt>&lsquo;<samp><span class="samp">Mercurial</span></samp>&rsquo;<dd>
+ <dl>
+<dt>&lsquo;<samp><span class="samp">NotifyExtension</span></samp>&rsquo;<dd>http://www.selenic.com/mercurial/wiki/index.cgi/NotifyExtension
+</dl>
+
+ <br><dt>&lsquo;<samp><span class="samp">Git</span></samp>&rsquo;<dd>
+ <dl>
+<dt>&lsquo;<samp><span class="samp">post-receive-email</span></samp>&rsquo;<dd>http://git.kernel.org/?p=git/git.git;a=blob;f=contrib/hooks/post-receive-email;hb=HEAD
+</dl>
+
+ </dl>
+
+ <p>The following sections describe the parsers available for each of
+these tools.
+
+ <p>Most of these parsers accept a <code>prefix=</code> argument, which is used
+to limit the set of files that the buildmaster pays attention to. This
+is most useful for systems like CVS and SVN which put multiple
+projects in a single repository (or use repository names to indicate
+branches). Each filename that appears in the email is tested against
+the prefix: if the filename does not start with the prefix, the file
+is ignored. If the filename <em>does</em> start with the prefix, that
+prefix is stripped from the filename before any further processing is
+done. Thus the prefix usually ends with a slash.
+
+<ul class="menu">
+<li><a accesskey="1" href="#FCMaildirSource">FCMaildirSource</a>
+<li><a accesskey="2" href="#SyncmailMaildirSource">SyncmailMaildirSource</a>
+<li><a accesskey="3" href="#BonsaiMaildirSource">BonsaiMaildirSource</a>
+<li><a accesskey="4" href="#SVNCommitEmailMaildirSource">SVNCommitEmailMaildirSource</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="FCMaildirSource"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#SyncmailMaildirSource">SyncmailMaildirSource</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Parsing-Email-Change-Messages">Parsing Email Change Messages</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Parsing-Email-Change-Messages">Parsing Email Change Messages</a>
+
+</div>
+
+<h5 class="subsubsection">5.4.3.1 FCMaildirSource</h5>
+
+<p><a name="index-buildbot_002echanges_002email_002eFCMaildirSource-43"></a>
+http://twistedmatrix.com/users/acapnotic/wares/code/CVSToys/
+
+ <p>This parser works with the CVSToys <code>MailNotification</code> action,
+which will send email to a list of recipients for each commit. This
+tends to work better than using <code>/bin/mail</code> from within the
+CVSROOT/loginfo file directly, as CVSToys will batch together all
+files changed during the same CVS invocation, and can provide more
+information (like creating a ViewCVS URL for each file changed).
+
+ <p>The Buildbot's <code>FCMaildirSource</code> knows for to parse these CVSToys
+messages and turn them into Change objects. It can be given two
+parameters: the directory name of the maildir root, and the prefix to
+strip.
+
+<pre class="example"> from buildbot.changes.mail import FCMaildirSource
+ c['change_source'] = FCMaildirSource("~/maildir-buildbot")
+</pre>
+ <div class="node">
+<p><hr>
+<a name="SyncmailMaildirSource"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BonsaiMaildirSource">BonsaiMaildirSource</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#FCMaildirSource">FCMaildirSource</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Parsing-Email-Change-Messages">Parsing Email Change Messages</a>
+
+</div>
+
+<h5 class="subsubsection">5.4.3.2 SyncmailMaildirSource</h5>
+
+<p><a name="index-buildbot_002echanges_002email_002eSyncmailMaildirSource-44"></a>
+http://sourceforge.net/projects/cvs-syncmail
+
+ <p><code>SyncmailMaildirSource</code> knows how to parse the message format used by
+the CVS &ldquo;syncmail&rdquo; script.
+
+<pre class="example"> from buildbot.changes.mail import SyncmailMaildirSource
+ c['change_source'] = SyncmailMaildirSource("~/maildir-buildbot")
+</pre>
+ <div class="node">
+<p><hr>
+<a name="BonsaiMaildirSource"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#SVNCommitEmailMaildirSource">SVNCommitEmailMaildirSource</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#SyncmailMaildirSource">SyncmailMaildirSource</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Parsing-Email-Change-Messages">Parsing Email Change Messages</a>
+
+</div>
+
+<h5 class="subsubsection">5.4.3.3 BonsaiMaildirSource</h5>
+
+<p><a name="index-buildbot_002echanges_002email_002eBonsaiMaildirSource-45"></a>
+http://www.mozilla.org/bonsai.html
+
+ <p><code>BonsaiMaildirSource</code> parses messages sent out by Bonsai, the CVS
+tree-management system built by Mozilla.
+
+<pre class="example"> from buildbot.changes.mail import BonsaiMaildirSource
+ c['change_source'] = BonsaiMaildirSource("~/maildir-buildbot")
+</pre>
+ <div class="node">
+<p><hr>
+<a name="SVNCommitEmailMaildirSource"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BonsaiMaildirSource">BonsaiMaildirSource</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Parsing-Email-Change-Messages">Parsing Email Change Messages</a>
+
+</div>
+
+<h5 class="subsubsection">5.4.3.4 SVNCommitEmailMaildirSource</h5>
+
+<p><a name="index-buildbot_002echanges_002email_002eSVNCommitEmailMaildirSource-46"></a>
+<code>SVNCommitEmailMaildirSource</code> parses message sent out by the
+<code>commit-email.pl</code> script, which is included in the Subversion
+distribution.
+
+ <p>It does not currently handle branches: all of the Change objects that
+it creates will be associated with the default (i.e. trunk) branch.
+
+<pre class="example"> from buildbot.changes.mail import SVNCommitEmailMaildirSource
+ c['change_source'] = SVNCommitEmailMaildirSource("~/maildir-buildbot")
+</pre>
+ <div class="node">
+<p><hr>
+<a name="PBChangeSource"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#P4Source">P4Source</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Mail_002dparsing-ChangeSources">Mail-parsing ChangeSources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.5 PBChangeSource</h3>
+
+<p><a name="index-buildbot_002echanges_002epb_002ePBChangeSource-47"></a>
+The last kind of ChangeSource actually listens on a TCP port for
+clients to connect and push change notices <em>into</em> the
+Buildmaster. This is used by the built-in <code>buildbot sendchange</code>
+notification tool, as well as the VC-specific
+<samp><span class="file">contrib/svn_buildbot.py</span></samp>, <samp><span class="file">contrib/arch_buildbot.py</span></samp>,
+<samp><span class="file">contrib/hg_buildbot.py</span></samp> tools, and the
+<code>buildbot.changes.hgbuildbot</code> hook. These tools are run by the
+repository (in a commit hook script), and connect to the buildmaster
+directly each time a file is comitted. This is also useful for
+creating new kinds of change sources that work on a <code>push</code> model
+instead of some kind of subscription scheme, for example a script
+which is run out of an email .forward file.
+
+ <p>This ChangeSource can be configured to listen on its own TCP port, or
+it can share the port that the buildmaster is already using for the
+buildslaves to connect. (This is possible because the
+<code>PBChangeSource</code> uses the same protocol as the buildslaves, and
+they can be distinguished by the <code>username</code> attribute used when
+the initial connection is established). It might be useful to have it
+listen on a different port if, for example, you wanted to establish
+different firewall rules for that port. You could allow only the SVN
+repository machine access to the <code>PBChangeSource</code> port, while
+allowing only the buildslave machines access to the slave port. Or you
+could just expose one port and run everything over it. <em>Note:
+this feature is not yet implemented, the PBChangeSource will always
+share the slave port and will always have a </em><code>user</code><em> name of
+</em><code>change</code><em>, and a passwd of </em><code>changepw</code><em>. These limitations will
+be removed in the future.</em>.
+
+ <p>The <code>PBChangeSource</code> is created with the following arguments. All
+are optional.
+
+ <dl>
+<dt>&lsquo;<samp><code>port</code></samp>&rsquo;<dd>which port to listen on. If <code>None</code> (which is the default), it
+shares the port used for buildslave connections. <em>Not
+Implemented, always set to </em><code>None</code>.
+
+ <br><dt>&lsquo;<samp><code>user</code><span class="samp"> and </span><code>passwd</code></samp>&rsquo;<dd>The user/passwd account information that the client program must use
+to connect. Defaults to <code>change</code> and <code>changepw</code>. <em>Not
+Implemented, </em><code>user</code><em> is currently always set to </em><code>change</code><em>,
+</em><code>passwd</code><em> is always set to </em><code>changepw</code>.
+
+ <br><dt>&lsquo;<samp><code>prefix</code></samp>&rsquo;<dd>The prefix to be found and stripped from filenames delivered over the
+connection. Any filenames which do not start with this prefix will be
+removed. If all the filenames in a given Change are removed, the that
+whole Change will be dropped. This string should probably end with a
+directory separator.
+
+ <p>This is useful for changes coming from version control systems that
+represent branches as parent directories within the repository (like
+SVN and Perforce). Use a prefix of 'trunk/' or
+'project/branches/foobranch/' to only follow one branch and to get
+correct tree-relative filenames. Without a prefix, the PBChangeSource
+will probably deliver Changes with filenames like <samp><span class="file">trunk/foo.c</span></samp>
+instead of just <samp><span class="file">foo.c</span></samp>. Of course this also depends upon the
+tool sending the Changes in (like <samp><span class="command">buildbot sendchange</span></samp>) and
+what filenames it is delivering: that tool may be filtering and
+stripping prefixes at the sending end.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="P4Source"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BonsaiPoller">BonsaiPoller</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#PBChangeSource">PBChangeSource</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.6 P4Source</h3>
+
+<p><a name="index-buildbot_002echanges_002ep4poller_002eP4Source-48"></a>
+The <code>P4Source</code> periodically polls a <a href="http://www.perforce.com/">Perforce</a> depot for changes. It accepts the following arguments:
+
+ <dl>
+<dt>&lsquo;<samp><code>p4base</code></samp>&rsquo;<dd>The base depot path to watch, without the trailing '/...'.
+
+ <br><dt>&lsquo;<samp><code>p4port</code></samp>&rsquo;<dd>The Perforce server to connect to (as host:port).
+
+ <br><dt>&lsquo;<samp><code>p4user</code></samp>&rsquo;<dd>The Perforce user.
+
+ <br><dt>&lsquo;<samp><code>p4passwd</code></samp>&rsquo;<dd>The Perforce password.
+
+ <br><dt>&lsquo;<samp><code>p4bin</code></samp>&rsquo;<dd>An optional string parameter. Specify the location of the perforce command
+line binary (p4). You only need to do this if the perforce binary is not
+in the path of the buildbot user. Defaults to &ldquo;p4&rdquo;.
+
+ <br><dt>&lsquo;<samp><code>split_file</code></samp>&rsquo;<dd>A function that maps a pathname, without the leading <code>p4base</code>, to a
+(branch, filename) tuple. The default just returns (None, branchfile),
+which effectively disables branch support. You should supply a function
+which understands your repository structure.
+
+ <br><dt>&lsquo;<samp><code>pollinterval</code></samp>&rsquo;<dd>How often to poll, in seconds. Defaults to 600 (10 minutes).
+
+ <br><dt>&lsquo;<samp><code>histmax</code></samp>&rsquo;<dd>The maximum number of changes to inspect at a time. If more than this
+number occur since the last poll, older changes will be silently
+ignored.
+</dl>
+
+<h3 class="heading">Example</h3>
+
+<p>This configuration uses the <code>P4PORT</code>, <code>P4USER</code>, and <code>P4PASSWD</code>
+specified in the buildmaster's environment. It watches a project in which the
+branch name is simply the next path component, and the file is all path
+components after.
+
+<pre class="example"> import buildbot.changes.p4poller
+ s = p4poller.P4Source(p4base='//depot/project/',
+ split_file=lambda branchfile: branchfile.split('/',1),
+ )
+ c['change_source'] = s
+</pre>
+ <div class="node">
+<p><hr>
+<a name="BonsaiPoller"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#SVNPoller">SVNPoller</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#P4Source">P4Source</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.7 BonsaiPoller</h3>
+
+<p><a name="index-buildbot_002echanges_002ebonsaipoller_002eBonsaiPoller-49"></a>
+The <code>BonsaiPoller</code> periodically polls a Bonsai server. This is a
+CGI script accessed through a web server that provides information
+about a CVS tree, for example the Mozilla bonsai server at
+<a href="http://bonsai.mozilla.org">http://bonsai.mozilla.org</a>. Bonsai servers are usable by both
+humans and machines. In this case, the buildbot's change source forms
+a query which asks about any files in the specified branch which have
+changed since the last query.
+
+ <p>Please take a look at the BonsaiPoller docstring for details about the
+arguments it accepts.
+
+<div class="node">
+<p><hr>
+<a name="SVNPoller"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#MercurialHook">MercurialHook</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BonsaiPoller">BonsaiPoller</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.8 SVNPoller</h3>
+
+<p><a name="index-buildbot_002echanges_002esvnpoller_002eSVNPoller-50"></a>
+The <code>buildbot.changes.svnpoller.SVNPoller</code> is a ChangeSource
+which periodically polls a <a href="http://subversion.tigris.org/">Subversion</a> repository for new revisions, by running the <code>svn
+log</code> command in a subshell. It can watch a single branch or multiple
+branches.
+
+ <p><code>SVNPoller</code> accepts the following arguments:
+
+ <dl>
+<dt><code>svnurl</code><dd>The base URL path to watch, like
+<code>svn://svn.twistedmatrix.com/svn/Twisted/trunk</code>, or
+<code>http://divmod.org/svn/Divmod/</code>, or even
+<code>file:///home/svn/Repository/ProjectA/branches/1.5/</code>. This must
+include the access scheme, the location of the repository (both the
+hostname for remote ones, and any additional directory names necessary
+to get to the repository), and the sub-path within the repository's
+virtual filesystem for the project and branch of interest.
+
+ <p>The <code>SVNPoller</code> will only pay attention to files inside the
+subdirectory specified by the complete svnurl.
+
+ <br><dt><code>split_file</code><dd>A function to convert pathnames into (branch, relative_pathname)
+tuples. Use this to explain your repository's branch-naming policy to
+<code>SVNPoller</code>. This function must accept a single string and return
+a two-entry tuple. There are a few utility functions in
+<code>buildbot.changes.svnpoller</code> that can be used as a
+<code>split_file</code> function, see below for details.
+
+ <p>The default value always returns (None, path), which indicates that
+all files are on the trunk.
+
+ <p>Subclasses of <code>SVNPoller</code> can override the <code>split_file</code>
+method instead of using the <code>split_file=</code> argument.
+
+ <br><dt><code>svnuser</code><dd>An optional string parameter. If set, the <code>--user</code> argument will
+be added to all <code>svn</code> commands. Use this if you have to
+authenticate to the svn server before you can do <code>svn info</code> or
+<code>svn log</code> commands.
+
+ <br><dt><code>svnpasswd</code><dd>Like <code>svnuser</code>, this will cause a <code>--password</code> argument to
+be passed to all svn commands.
+
+ <br><dt><code>pollinterval</code><dd>How often to poll, in seconds. Defaults to 600 (checking once every 10
+minutes). Lower this if you want the buildbot to notice changes
+faster, raise it if you want to reduce the network and CPU load on
+your svn server. Please be considerate of public SVN repositories by
+using a large interval when polling them.
+
+ <br><dt><code>histmax</code><dd>The maximum number of changes to inspect at a time. Every POLLINTERVAL
+seconds, the <code>SVNPoller</code> asks for the last HISTMAX changes and
+looks through them for any ones it does not already know about. If
+more than HISTMAX revisions have been committed since the last poll,
+older changes will be silently ignored. Larger values of histmax will
+cause more time and memory to be consumed on each poll attempt.
+<code>histmax</code> defaults to 100.
+
+ <br><dt><code>svnbin</code><dd>This controls the <code>svn</code> executable to use. If subversion is
+installed in a weird place on your system (outside of the
+buildmaster's <code>$PATH</code>), use this to tell <code>SVNPoller</code> where
+to find it. The default value of &ldquo;svn&rdquo; will almost always be
+sufficient.
+
+ </dl>
+
+<h3 class="heading">Branches</h3>
+
+<p>Each source file that is tracked by a Subversion repository has a
+fully-qualified SVN URL in the following form:
+(REPOURL)(PROJECT-plus-BRANCH)(FILEPATH). When you create the
+<code>SVNPoller</code>, you give it a <code>svnurl</code> value that includes all
+of the REPOURL and possibly some portion of the PROJECT-plus-BRANCH
+string. The <code>SVNPoller</code> is responsible for producing Changes that
+contain a branch name and a FILEPATH (which is relative to the top of
+a checked-out tree). The details of how these strings are split up
+depend upon how your repository names its branches.
+
+<h4 class="subheading">PROJECT/BRANCHNAME/FILEPATH repositories</h4>
+
+<p>One common layout is to have all the various projects that share a
+repository get a single top-level directory each. Then under a given
+project's directory, you get two subdirectories, one named &ldquo;trunk&rdquo;
+and another named &ldquo;branches&rdquo;. Under &ldquo;branches&rdquo; you have a bunch of
+other directories, one per branch, with names like &ldquo;1.5.x&rdquo; and
+&ldquo;testing&rdquo;. It is also common to see directories like &ldquo;tags&rdquo; and
+&ldquo;releases&rdquo; next to &ldquo;branches&rdquo; and &ldquo;trunk&rdquo;.
+
+ <p>For example, the Twisted project has a subversion server on
+&ldquo;svn.twistedmatrix.com&rdquo; that hosts several sub-projects. The
+repository is available through a SCHEME of &ldquo;svn:&rdquo;. The primary
+sub-project is Twisted, of course, with a repository root of
+&ldquo;svn://svn.twistedmatrix.com/svn/Twisted&rdquo;. Another sub-project is
+Informant, with a root of
+&ldquo;svn://svn.twistedmatrix.com/svn/Informant&rdquo;, etc. Inside any
+checked-out Twisted tree, there is a file named bin/trial (which is
+used to run unit test suites).
+
+ <p>The trunk for Twisted is in
+&ldquo;svn://svn.twistedmatrix.com/svn/Twisted/trunk&rdquo;, and the
+fully-qualified SVN URL for the trunk version of <code>trial</code> would be
+&ldquo;svn://svn.twistedmatrix.com/svn/Twisted/trunk/bin/trial&rdquo;. The same
+SVNURL for that file on a branch named &ldquo;1.5.x&rdquo; would be
+&ldquo;svn://svn.twistedmatrix.com/svn/Twisted/branches/1.5.x/bin/trial&rdquo;.
+
+ <p>To set up a <code>SVNPoller</code> that watches the Twisted trunk (and
+nothing else), we would use the following:
+
+<pre class="example"> from buildbot.changes.svnpoller import SVNPoller
+ c['change_source'] = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted/trunk")
+</pre>
+ <p>In this case, every Change that our <code>SVNPoller</code> produces will
+have <code>.branch=None</code>, to indicate that the Change is on the trunk.
+No other sub-projects or branches will be tracked.
+
+ <p>If we want our ChangeSource to follow multiple branches, we have to do
+two things. First we have to change our <code>svnurl=</code> argument to
+watch more than just &ldquo;.../Twisted/trunk&rdquo;. We will set it to
+&ldquo;.../Twisted&rdquo; so that we'll see both the trunk and all the branches.
+Second, we have to tell <code>SVNPoller</code> how to split the
+(PROJECT-plus-BRANCH)(FILEPATH) strings it gets from the repository
+out into (BRANCH) and (FILEPATH) pairs.
+
+ <p>We do the latter by providing a &ldquo;split_file&rdquo; function. This function
+is responsible for splitting something like
+&ldquo;branches/1.5.x/bin/trial&rdquo; into <code>branch</code>=&rdquo;branches/1.5.x&rdquo; and
+<code>filepath</code>=&rdquo;bin/trial&rdquo;. This function is always given a string
+that names a file relative to the subdirectory pointed to by the
+<code>SVNPoller</code>'s <code>svnurl=</code> argument. It is expected to return a
+(BRANCHNAME, FILEPATH) tuple (in which FILEPATH is relative to the
+branch indicated), or None to indicate that the file is outside any
+project of interest.
+
+ <p>(note that we want to see &ldquo;branches/1.5.x&rdquo; rather than just
+&ldquo;1.5.x&rdquo; because when we perform the SVN checkout, we will probably
+append the branch name to the baseURL, which requires that we keep the
+&ldquo;branches&rdquo; component in there. Other VC schemes use a different
+approach towards branches and may not require this artifact.)
+
+ <p>If your repository uses this same PROJECT/BRANCH/FILEPATH naming
+scheme, the following function will work:
+
+<pre class="example"> def split_file_branches(path):
+ pieces = path.split('/')
+ if pieces[0] == 'trunk':
+ return (None, '/'.join(pieces[1:]))
+ elif pieces[0] == 'branches':
+ return ('/'.join(pieces[0:2]),
+ '/'.join(pieces[2:]))
+ else:
+ return None
+</pre>
+ <p>This function is provided as
+<code>buildbot.changes.svnpoller.split_file_branches</code> for your
+convenience. So to have our Twisted-watching <code>SVNPoller</code> follow
+multiple branches, we would use this:
+
+<pre class="example"> from buildbot.changes.svnpoller import SVNPoller, split_file_branches
+ c['change_source'] = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted",
+ split_file=split_file_branches)
+</pre>
+ <p>Changes for all sorts of branches (with names like &ldquo;branches/1.5.x&rdquo;,
+and None to indicate the trunk) will be delivered to the Schedulers.
+Each Scheduler is then free to use or ignore each branch as it sees
+fit.
+
+<h4 class="subheading">BRANCHNAME/PROJECT/FILEPATH repositories</h4>
+
+<p>Another common way to organize a Subversion repository is to put the
+branch name at the top, and the projects underneath. This is
+especially frequent when there are a number of related sub-projects
+that all get released in a group.
+
+ <p>For example, Divmod.org hosts a project named &ldquo;Nevow&rdquo; as well as one
+named &ldquo;Quotient&rdquo;. In a checked-out Nevow tree there is a directory
+named &ldquo;formless&rdquo; that contains a python source file named
+&ldquo;webform.py&rdquo;. This repository is accessible via webdav (and thus
+uses an &ldquo;http:&rdquo; scheme) through the divmod.org hostname. There are
+many branches in this repository, and they use a
+(BRANCHNAME)/(PROJECT) naming policy.
+
+ <p>The fully-qualified SVN URL for the trunk version of webform.py is
+<code>http://divmod.org/svn/Divmod/trunk/Nevow/formless/webform.py</code>.
+You can do an <code>svn co</code> with that URL and get a copy of the latest
+version. The 1.5.x branch version of this file would have a URL of
+<code>http://divmod.org/svn/Divmod/branches/1.5.x/Nevow/formless/webform.py</code>.
+The whole Nevow trunk would be checked out with
+<code>http://divmod.org/svn/Divmod/trunk/Nevow</code>, while the Quotient
+trunk would be checked out using
+<code>http://divmod.org/svn/Divmod/trunk/Quotient</code>.
+
+ <p>Now suppose we want to have an <code>SVNPoller</code> that only cares about
+the Nevow trunk. This case looks just like the PROJECT/BRANCH layout
+described earlier:
+
+<pre class="example"> from buildbot.changes.svnpoller import SVNPoller
+ c['change_source'] = SVNPoller("http://divmod.org/svn/Divmod/trunk/Nevow")
+</pre>
+ <p>But what happens when we want to track multiple Nevow branches? We
+have to point our <code>svnurl=</code> high enough to see all those
+branches, but we also don't want to include Quotient changes (since
+we're only building Nevow). To accomplish this, we must rely upon the
+<code>split_file</code> function to help us tell the difference between
+files that belong to Nevow and those that belong to Quotient, as well
+as figuring out which branch each one is on.
+
+<pre class="example"> from buildbot.changes.svnpoller import SVNPoller
+ c['change_source'] = SVNPoller("http://divmod.org/svn/Divmod",
+ split_file=my_file_splitter)
+</pre>
+ <p>The <code>my_file_splitter</code> function will be called with
+repository-relative pathnames like:
+
+ <dl>
+<dt><code>trunk/Nevow/formless/webform.py</code><dd>This is a Nevow file, on the trunk. We want the Change that includes this
+to see a filename of <code>formless/webform.py"</code>, and a branch of None
+
+ <br><dt><code>branches/1.5.x/Nevow/formless/webform.py</code><dd>This is a Nevow file, on a branch. We want to get
+branch=&rdquo;branches/1.5.x&rdquo; and filename=&rdquo;formless/webform.py&rdquo;.
+
+ <br><dt><code>trunk/Quotient/setup.py</code><dd>This is a Quotient file, so we want to ignore it by having
+<code>my_file_splitter</code> return None.
+
+ <br><dt><code>branches/1.5.x/Quotient/setup.py</code><dd>This is also a Quotient file, which should be ignored.
+</dl>
+
+ <p>The following definition for <code>my_file_splitter</code> will do the job:
+
+<pre class="example"> def my_file_splitter(path):
+ pieces = path.split('/')
+ if pieces[0] == 'trunk':
+ branch = None
+ pieces.pop(0) # remove 'trunk'
+ elif pieces[0] == 'branches':
+ pieces.pop(0) # remove 'branches'
+ # grab branch name
+ branch = 'branches/' + pieces.pop(0)
+ else:
+ return None # something weird
+ projectname = pieces.pop(0)
+ if projectname != 'Nevow':
+ return None # wrong project
+ return (branch, '/'.join(pieces))
+</pre>
+ <div class="node">
+<p><hr>
+<a name="MercurialHook"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Bzr-Hook">Bzr Hook</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#SVNPoller">SVNPoller</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.9 MercurialHook</h3>
+
+<p>Since Mercurial is written in python, the hook script can invoke
+Buildbot's <code>sendchange</code> function directly, rather than having to
+spawn an external process. This function delivers the same sort of
+changes as <code>buildbot sendchange</code> and the various hook scripts in
+contrib/, so you'll need to add a <code>pb.PBChangeSource</code> to your
+buildmaster to receive these changes.
+
+ <p>To set this up, first choose a Mercurial repository that represents
+your central &ldquo;official&rdquo; source tree. This will be the same
+repository that your buildslaves will eventually pull from. Install
+Buildbot on the machine that hosts this repository, using the same
+version of python as Mercurial is using (so that the Mercurial hook
+can import code from buildbot). Then add the following to the
+<code>.hg/hgrc</code> file in that repository, replacing the buildmaster
+hostname/portnumber as appropriate for your buildbot:
+
+<pre class="example"> [hooks]
+ changegroup.buildbot = python:buildbot.changes.hgbuildbot.hook
+
+ [hgbuildbot]
+ master = buildmaster.example.org:9987
+</pre>
+ <p>(Note that Mercurial lets you define multiple <code>changegroup</code> hooks
+by giving them distinct names, like <code>changegroup.foo</code> and
+<code>changegroup.bar</code>, which is why we use
+<code>changegroup.buildbot</code> in this example. There is nothing magical
+about the &ldquo;buildbot&rdquo; suffix in the hook name. The
+<code>[hgbuildbot]</code> section <em>is</em> special, however, as it is the
+only section that the buildbot hook pays attention to.)
+
+ <p>Also note that this runs as a <code>changegroup</code> hook, rather than as
+an <code>incoming</code> hook. The <code>changegroup</code> hook is run with
+multiple revisions at a time (say, if multiple revisions are being
+pushed to this repository in a single <samp><span class="command">hg push</span></samp> command),
+whereas the <code>incoming</code> hook is run with just one revision at a
+time. The <code>hgbuildbot.hook</code> function will only work with the
+<code>changegroup</code> hook.
+
+ <p>The <code>[hgbuildbot]</code> section has two other parameters that you
+might specify, both of which control the name of the branch that is
+attached to the changes coming from this hook.
+
+ <p>One common branch naming policy for Mercurial repositories is to use
+it just like Darcs: each branch goes into a separate repository, and
+all the branches for a single project share a common parent directory.
+For example, you might have <samp><span class="file">/var/repos/PROJECT/trunk/</span></samp> and
+<samp><span class="file">/var/repos/PROJECT/release</span></samp>. To use this style, use the
+<code>branchtype = dirname</code> setting, which simply uses the last
+component of the repository's enclosing directory as the branch name:
+
+<pre class="example"> [hgbuildbot]
+ master = buildmaster.example.org:9987
+ branchtype = dirname
+</pre>
+ <p>Another approach is to use Mercurial's built-in branches (the kind
+created with <samp><span class="command">hg branch</span></samp> and listed with <samp><span class="command">hg
+branches</span></samp>). This feature associates persistent names with particular
+lines of descent within a single repository. (note that the buildbot
+<code>source.Mercurial</code> checkout step does not yet support this kind
+of branch). To have the commit hook deliver this sort of branch name
+with the Change object, use <code>branchtype = inrepo</code>:
+
+<pre class="example"> [hgbuildbot]
+ master = buildmaster.example.org:9987
+ branchtype = inrepo
+</pre>
+ <p>Finally, if you want to simply specify the branchname directly, for
+all changes, use <code>branch = BRANCHNAME</code>. This overrides
+<code>branchtype</code>:
+
+<pre class="example"> [hgbuildbot]
+ master = buildmaster.example.org:9987
+ branch = trunk
+</pre>
+ <p>If you use <code>branch=</code> like this, you'll need to put a separate
+.hgrc in each repository. If you use <code>branchtype=</code>, you may be
+able to use the same .hgrc for all your repositories, stored in
+<samp><span class="file">~/.hgrc</span></samp> or <samp><span class="file">/etc/mercurial/hgrc</span></samp>.
+
+<div class="node">
+<p><hr>
+<a name="Bzr-Hook"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Bzr-Poller">Bzr Poller</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#MercurialHook">MercurialHook</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.10 Bzr Hook</h3>
+
+<p>Bzr is also written in Python, and the Bzr hook depends on Twisted to send the
+changes.
+
+ <p>To install, put <code>contrib/bzr_buildbot.py</code> in one of your plugins
+locations a bzr plugins directory (e.g.,
+<code>~/.bazaar/plugins</code>). Then, in one of your bazaar conf files (e.g.,
+<code>~/.bazaar/locations.conf</code>), set the location you want to connect with buildbot
+with these keys:
+
+ <dl>
+<dt><code>buildbot_on</code><dd>one of 'commit', 'push, or 'change'. Turns the plugin on to report changes via
+commit, changes via push, or any changes to the trunk. 'change' is
+recommended.
+
+ <br><dt><code>buildbot_server</code><dd>(required to send to a buildbot master) the URL of the buildbot master to
+which you will connect (as of this writing, the same server and port to which
+slaves connect).
+
+ <br><dt><code>buildbot_port</code><dd>(optional, defaults to 9989) the port of the buildbot master to which you will
+connect (as of this writing, the same server and port to which slaves connect)
+
+ <br><dt><code>buildbot_pqm</code><dd>(optional, defaults to not pqm) Normally, the user that commits the revision
+is the user that is responsible for the change. When run in a pqm (Patch Queue
+Manager, see https://launchpad.net/pqm) environment, the user that commits is
+the Patch Queue Manager, and the user that committed the *parent* revision is
+responsible for the change. To turn on the pqm mode, set this value to any of
+(case-insensitive) "Yes", "Y", "True", or "T".
+
+ <br><dt><code>buildbot_dry_run</code><dd>(optional, defaults to not a dry run) Normally, the post-commit hook will
+attempt to communicate with the configured buildbot server and port. If this
+parameter is included and any of (case-insensitive) "Yes", "Y", "True", or
+"T", then the hook will simply print what it would have sent, but not attempt
+to contact the buildbot master.
+
+ <br><dt><code>buildbot_send_branch_name</code><dd>(optional, defaults to not sending the branch name) If your buildbot's bzr
+source build step uses a repourl, do *not* turn this on. If your buildbot's
+bzr build step uses a baseURL, then you may set this value to any of
+(case-insensitive) "Yes", "Y", "True", or "T" to have the buildbot master
+append the branch name to the baseURL.
+
+ </dl>
+
+ <p>When buildbot no longer has a hardcoded password, it will be a configuration
+option here as well.
+
+ <p>Here's a simple example that you might have in your
+<code>~/.bazaar/locations.conf</code>.
+
+<pre class="example"> [chroot-*:///var/local/myrepo/mybranch]
+ buildbot_on = change
+ buildbot_server = localhost
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Bzr-Poller"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Bzr-Hook">Bzr Hook</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>
+
+</div>
+
+<h3 class="section">5.11 Bzr Poller</h3>
+
+<p>If you cannot insert a Bzr hook in the server, you can use the Bzr Poller. To
+use, put <code>contrib/bzr_buildbot.py</code> somewhere that your buildbot
+configuration can import it. Even putting it in the same directory as the master.cfg
+should work. Install the poller in the buildbot configuration as with any
+other change source. Minimally, provide a URL that you want to poll (bzr://,
+bzr+ssh://, or lp:), though make sure the buildbot user has necessary
+privileges. You may also want to specify these optional values.
+
+ <dl>
+<dt><code>poll_interval</code><dd>The number of seconds to wait between polls. Defaults to 10 minutes.
+
+ <br><dt><code>branch_name</code><dd>Any value to be used as the branch name. Defaults to None, or specify a
+string, or specify the constants from <code>bzr_buildbot.py</code> SHORT or FULL to
+get the short branch name or full branch address.
+
+ <br><dt><code>blame_merge_author</code><dd>normally, the user that commits the revision is the user that is responsible
+for the change. When run in a pqm (Patch Queue Manager, see
+https://launchpad.net/pqm) environment, the user that commits is the Patch
+Queue Manager, and the user that committed the merged, *parent* revision is
+responsible for the change. set this value to True if this is pointed against
+a PQM-managed branch.
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="Build-Process"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Status-Delivery">Status Delivery</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Getting-Source-Code-Changes">Getting Source Code Changes</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">6 Build Process</h2>
+
+<p>A <code>Build</code> object is responsible for actually performing a build.
+It gets access to a remote <code>SlaveBuilder</code> where it may run
+commands, and a <code>BuildStatus</code> object where it must emit status
+events. The <code>Build</code> is created by the Builder's
+<code>BuildFactory</code>.
+
+ <p>The default <code>Build</code> class is made up of a fixed sequence of
+<code>BuildSteps</code>, executed one after another until all are complete
+(or one of them indicates that the build should be halted early). The
+default <code>BuildFactory</code> creates instances of this <code>Build</code>
+class with a list of <code>BuildSteps</code>, so the basic way to configure
+the build is to provide a list of <code>BuildSteps</code> to your
+<code>BuildFactory</code>.
+
+ <p>More complicated <code>Build</code> subclasses can make other decisions:
+execute some steps only if certain files were changed, or if certain
+previous steps passed or failed. The base class has been written to
+allow users to express basic control flow without writing code, but
+you can always subclass and customize to achieve more specialized
+behavior.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Build-Steps">Build Steps</a>
+<li><a accesskey="2" href="#Interlocks">Interlocks</a>
+<li><a accesskey="3" href="#Build-Factories">Build Factories</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Build-Steps"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Interlocks">Interlocks</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Build-Process">Build Process</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Process">Build Process</a>
+
+</div>
+
+<h3 class="section">6.1 Build Steps</h3>
+
+<p><code>BuildStep</code>s are usually specified in the buildmaster's
+configuration file, in a list that goes into the <code>BuildFactory</code>.
+The <code>BuildStep</code> instances in this list are used as templates to
+construct new independent copies for each build (so that state can be
+kept on the <code>BuildStep</code> in one build without affecting a later
+build). Each <code>BuildFactory</code> can be created with a list of steps,
+or the factory can be created empty and then steps added to it using
+the <code>addStep</code> method:
+
+<pre class="example"> from buildbot.steps import source, shell
+ from buildbot.process import factory
+
+ f = factory.BuildFactory()
+ f.addStep(source.SVN(svnurl="http://svn.example.org/Trunk/"))
+ f.addStep(shell.ShellCommand(command=["make", "all"]))
+ f.addStep(shell.ShellCommand(command=["make", "test"]))
+</pre>
+ <p>In earlier versions (0.7.5 and older), these steps were specified with
+a tuple of (step_class, keyword_arguments). Steps can still be
+specified this way, but the preferred form is to pass actual
+<code>BuildStep</code> instances to <code>addStep</code>, because that gives the
+<code>BuildStep</code> class a chance to do some validation on the
+arguments.
+
+ <p>If you have a common set of steps which are used in several factories, the
+<code>addSteps</code> method may be handy. It takes an iterable of <code>BuildStep</code>
+instances.
+
+<pre class="example"> setup_steps = [
+ source.SVN(svnurl="http://svn.example.org/Trunk/")
+ shell.ShellCommand(command="./setup")
+ ]
+ quick = factory.BuildFactory()
+ quick.addSteps(setup_steps)
+ quick.addStep(shell.shellCommand(command="make quick"))
+</pre>
+ <p>The rest of this section lists all the standard BuildStep objects
+available for use in a Build, and the parameters which can be used to
+control each.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Common-Parameters">Common Parameters</a>
+<li><a accesskey="2" href="#Using-Build-Properties">Using Build Properties</a>
+<li><a accesskey="3" href="#Source-Checkout">Source Checkout</a>
+<li><a accesskey="4" href="#ShellCommand">ShellCommand</a>
+<li><a accesskey="5" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+<li><a accesskey="6" href="#Python-BuildSteps">Python BuildSteps</a>
+<li><a accesskey="7" href="#Transferring-Files">Transferring Files</a>
+<li><a accesskey="8" href="#Steps-That-Run-on-the-Master">Steps That Run on the Master</a>
+<li><a accesskey="9" href="#Triggering-Schedulers">Triggering Schedulers</a>
+<li><a href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Common-Parameters"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Using-Build-Properties">Using Build Properties</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Build-Steps">Build Steps</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.1 Common Parameters</h4>
+
+<p>The standard <code>Build</code> runs a series of <code>BuildStep</code>s in order,
+only stopping when it runs out of steps or if one of them requests
+that the build be halted. It collects status information from each one
+to create an overall build status (of SUCCESS, WARNINGS, or FAILURE).
+
+ <p>All BuildSteps accept some common parameters. Some of these control
+how their individual status affects the overall build. Others are used
+to specify which <code>Locks</code> (see see <a href="#Interlocks">Interlocks</a>) should be
+acquired before allowing the step to run.
+
+ <p>Arguments common to all <code>BuildStep</code> subclasses:
+
+ <dl>
+<dt><code>name</code><dd>the name used to describe the step on the status display. It is also
+used to give a name to any LogFiles created by this step.
+
+ <br><dt><code>haltOnFailure</code><dd>if True, a FAILURE of this build step will cause the build to halt
+immediately. Steps with <code>alwaysRun=True</code> are still run. Generally
+speaking, haltOnFailure implies flunkOnFailure (the default for most
+BuildSteps). In some cases, particularly series of tests, it makes sense
+to haltOnFailure if something fails early on but not flunkOnFailure.
+This can be achieved with haltOnFailure=True, flunkOnFailure=False.
+
+ <br><dt><code>flunkOnWarnings</code><dd>when True, a WARNINGS or FAILURE of this build step will mark the
+overall build as FAILURE. The remaining steps will still be executed.
+
+ <br><dt><code>flunkOnFailure</code><dd>when True, a FAILURE of this build step will mark the overall build as
+a FAILURE. The remaining steps will still be executed.
+
+ <br><dt><code>warnOnWarnings</code><dd>when True, a WARNINGS or FAILURE of this build step will mark the
+overall build as having WARNINGS. The remaining steps will still be
+executed.
+
+ <br><dt><code>warnOnFailure</code><dd>when True, a FAILURE of this build step will mark the overall build as
+having WARNINGS. The remaining steps will still be executed.
+
+ <br><dt><code>alwaysRun</code><dd>if True, this build step will always be run, even if a previous buildstep
+with <code>haltOnFailure=True</code> has failed.
+
+ <br><dt><code>locks</code><dd>a list of Locks (instances of <code>buildbot.locks.SlaveLock</code> or
+<code>buildbot.locks.MasterLock</code>) that should be acquired before
+starting this Step. The Locks will be released when the step is
+complete. Note that this is a list of actual Lock instances, not
+names. Also note that all Locks must have unique names.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Using-Build-Properties"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Source-Checkout">Source Checkout</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Common-Parameters">Common Parameters</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.2 Using Build Properties</h4>
+
+<p><a name="index-Properties-51"></a>
+Build properties are a generalized way to provide configuration
+information to build steps; see <a href="#Build-Properties">Build Properties</a>.
+
+ <p>Some build properties are inherited from external sources &ndash; global
+properties, schedulers, or buildslaves. Some build properties are
+set when the build starts, such as the SourceStamp information. Other
+properties can be set by BuildSteps as they run, for example the
+various Source steps will set the <code>got_revision</code> property to the
+source revision that was actually checked out (which can be useful
+when the SourceStamp in use merely requested the &ldquo;latest revision&rdquo;:
+<code>got_revision</code> will tell you what was actually built).
+
+ <p>In custom BuildSteps, you can get and set the build properties with
+the <code>getProperty</code>/<code>setProperty</code> methods. Each takes a string
+for the name of the property, and returns or accepts an
+arbitrary<a rel="footnote" href="#fn-7" name="fnd-7"><sup>7</sup></a> object. For example:
+
+<pre class="example"> class MakeTarball(ShellCommand):
+ def start(self):
+ if self.getProperty("os") == "win":
+ self.setCommand([ ... ]) # windows-only command
+ else:
+ self.setCommand([ ... ]) # equivalent for other systems
+ ShellCommand.start(self)
+</pre>
+ <h3 class="heading">WithProperties</h3>
+
+<p><a name="index-WithProperties-52"></a>
+You can use build properties in ShellCommands by using the
+<code>WithProperties</code> wrapper when setting the arguments of
+the ShellCommand. This interpolates the named build properties
+into the generated shell command. Most step parameters accept
+<code>WithProperties</code>. Please file bugs for any parameters which
+do not.
+
+<pre class="example"> from buildbot.steps.shell import ShellCommand
+ from buildbot.process.properties import WithProperties
+
+ f.addStep(ShellCommand(
+ command=["tar", "czf",
+ WithProperties("build-%s.tar.gz", "revision"),
+ "source"]))
+</pre>
+ <p>If this BuildStep were used in a tree obtained from Subversion, it
+would create a tarball with a name like <samp><span class="file">build-1234.tar.gz</span></samp>.
+
+ <p>The <code>WithProperties</code> function does <code>printf</code>-style string
+interpolation, using strings obtained by calling
+<code>build.getProperty(propname)</code>. Note that for every <code>%s</code> (or
+<code>%d</code>, etc), you must have exactly one additional argument to
+indicate which build property you want to insert.
+
+ <p>You can also use python dictionary-style string interpolation by using
+the <code>%(propname)s</code> syntax. In this form, the property name goes
+in the parentheses, and WithProperties takes <em>no</em> additional
+arguments:
+
+<pre class="example"> f.addStep(ShellCommand(
+ command=["tar", "czf",
+ WithProperties("build-%(revision)s.tar.gz"),
+ "source"]))
+</pre>
+ <p>Don't forget the extra &ldquo;s&rdquo; after the closing parenthesis! This is
+the cause of many confusing errors.
+
+ <p>The dictionary-style interpolation supports a number of more advanced
+syntaxes, too.
+
+ <dl>
+<dt><code>propname:-replacement</code><dd>If <code>propname</code> exists, substitute its value; otherwise,
+substitute <code>replacement</code>. <code>replacement</code> may be empty
+(<code>%(propname:-)s</code>)
+
+ <br><dt><code>propname:+replacement</code><dd>If <code>propname</code> exists, substitute <code>replacement</code>; otherwise,
+substitute an empty string.
+
+ </dl>
+
+ <p>Although these are similar to shell substitutions, no other
+substitutions are currently supported, and <code>replacement</code> in the
+above cannot contain more substitutions.
+
+ <p>Note: like python, you can either do positional-argument interpolation
+<em>or</em> keyword-argument interpolation, not both. Thus you cannot use
+a string like <code>WithProperties("foo-%(revision)s-%s", "branch")</code>.
+
+<h3 class="heading">Common Build Properties</h3>
+
+<p>The following build properties are set when the build is started, and
+are available to all steps.
+
+ <dl>
+<dt><code>branch</code><dd>
+This comes from the build's SourceStamp, and describes which branch is
+being checked out. This will be <code>None</code> (which interpolates into
+<code>WithProperties</code> as an empty string) if the build is on the
+default branch, which is generally the trunk. Otherwise it will be a
+string like &ldquo;branches/beta1.4&rdquo;. The exact syntax depends upon the VC
+system being used.
+
+ <br><dt><code>revision</code><dd>
+This also comes from the SourceStamp, and is the revision of the source code
+tree that was requested from the VC system. When a build is requested of a
+specific revision (as is generally the case when the build is triggered by
+Changes), this will contain the revision specification. This is always a
+string, although the syntax depends upon the VC system in use: for SVN it is an
+integer, for Mercurial it is a short string, for Darcs it is a rather large
+string, etc.
+
+ <p>If the &ldquo;force build&rdquo; button was pressed, the revision will be <code>None</code>,
+which means to use the most recent revision available. This is a &ldquo;trunk
+build&rdquo;. This will be interpolated as an empty string.
+
+ <br><dt><code>got_revision</code><dd>
+This is set when a Source step checks out the source tree, and
+provides the revision that was actually obtained from the VC system.
+In general this should be the same as <code>revision</code>, except for
+trunk builds, where <code>got_revision</code> indicates what revision was
+current when the checkout was performed. This can be used to rebuild
+the same source code later.
+
+ <p>Note that for some VC systems (Darcs in particular), the revision is a
+large string containing newlines, and is not suitable for interpolation
+into a filename.
+
+ <br><dt><code>buildername</code><dd>
+This is a string that indicates which Builder the build was a part of.
+The combination of buildername and buildnumber uniquely identify a
+build.
+
+ <br><dt><code>buildnumber</code><dd>
+Each build gets a number, scoped to the Builder (so the first build
+performed on any given Builder will have a build number of 0). This
+integer property contains the build's number.
+
+ <br><dt><code>slavename</code><dd>
+This is a string which identifies which buildslave the build is
+running on.
+
+ <br><dt><code>scheduler</code><dd>
+If the build was started from a scheduler, then this property will
+contain the name of that scheduler.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Source-Checkout"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#ShellCommand">ShellCommand</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Using-Build-Properties">Using Build Properties</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.3 Source Checkout</h4>
+
+<p>The first step of any build is typically to acquire the source code
+from which the build will be performed. There are several classes to
+handle this, one for each of the different source control system that
+Buildbot knows about. For a description of how Buildbot treats source
+control in general, see <a href="#Version-Control-Systems">Version Control Systems</a>.
+
+ <p>All source checkout steps accept some common parameters to control how
+they get the sources and where they should be placed. The remaining
+per-VC-system parameters are mostly to specify where exactly the
+sources are coming from.
+
+ <dl>
+<dt><code>mode</code><dd>
+a string describing the kind of VC operation that is desired. Defaults
+to <code>update</code>.
+
+ <dl>
+<dt><code>update</code><dd>specifies that the CVS checkout/update should be performed directly
+into the workdir. Each build is performed in the same directory,
+allowing for incremental builds. This minimizes disk space, bandwidth,
+and CPU time. However, it may encounter problems if the build process
+does not handle dependencies properly (sometimes you must do a &ldquo;clean
+build&rdquo; to make sure everything gets compiled), or if source files are
+deleted but generated files can influence test behavior (e.g. python's
+.pyc files), or when source directories are deleted but generated
+files prevent CVS from removing them. Builds ought to be correct
+regardless of whether they are done &ldquo;from scratch&rdquo; or incrementally,
+but it is useful to test both kinds: this mode exercises the
+incremental-build style.
+
+ <br><dt><code>copy</code><dd>specifies that the CVS workspace should be maintained in a separate
+directory (called the 'copydir'), using checkout or update as
+necessary. For each build, a new workdir is created with a copy of the
+source tree (rm -rf workdir; cp -r copydir workdir). This doubles the
+disk space required, but keeps the bandwidth low (update instead of a
+full checkout). A full 'clean' build is performed each time. This
+avoids any generated-file build problems, but is still occasionally
+vulnerable to CVS problems such as a repository being manually
+rearranged, causing CVS errors on update which are not an issue with a
+full checkout.
+
+ <!-- TODO: something is screwy about this, revisit. Is it the source -->
+ <!-- directory or the working directory that is deleted each time? -->
+ <br><dt><code>clobber</code><dd>specifes that the working directory should be deleted each time,
+necessitating a full checkout for each build. This insures a clean
+build off a complete checkout, avoiding any of the problems described
+above. This mode exercises the &ldquo;from-scratch&rdquo; build style.
+
+ <br><dt><code>export</code><dd>this is like <code>clobber</code>, except that the 'cvs export' command is
+used to create the working directory. This command removes all CVS
+metadata files (the CVS/ directories) from the tree, which is
+sometimes useful for creating source tarballs (to avoid including the
+metadata in the tar file).
+</dl>
+
+ <br><dt><code>workdir</code><dd>like all Steps, this indicates the directory where the build will take
+place. Source Steps are special in that they perform some operations
+outside of the workdir (like creating the workdir itself).
+
+ <br><dt><code>alwaysUseLatest</code><dd>if True, bypass the usual &ldquo;update to the last Change&rdquo; behavior, and
+always update to the latest changes instead.
+
+ <br><dt><code>retry</code><dd>If set, this specifies a tuple of <code>(delay, repeats)</code> which means
+that when a full VC checkout fails, it should be retried up to
+<var>repeats</var> times, waiting <var>delay</var> seconds between attempts. If
+you don't provide this, it defaults to <code>None</code>, which means VC
+operations should not be retried. This is provided to make life easier
+for buildslaves which are stuck behind poor network connections.
+
+ </dl>
+
+ <p>My habit as a developer is to do a <code>cvs update</code> and <code>make</code> each
+morning. Problems can occur, either because of bad code being checked in, or
+by incomplete dependencies causing a partial rebuild to fail where a
+complete from-scratch build might succeed. A quick Builder which emulates
+this incremental-build behavior would use the <code>mode='update'</code>
+setting.
+
+ <p>On the other hand, other kinds of dependency problems can cause a clean
+build to fail where a partial build might succeed. This frequently results
+from a link step that depends upon an object file that was removed from a
+later version of the tree: in the partial tree, the object file is still
+around (even though the Makefiles no longer know how to create it).
+
+ <p>&ldquo;official&rdquo; builds (traceable builds performed from a known set of
+source revisions) are always done as clean builds, to make sure it is
+not influenced by any uncontrolled factors (like leftover files from a
+previous build). A &ldquo;full&rdquo; Builder which behaves this way would want
+to use the <code>mode='clobber'</code> setting.
+
+ <p>Each VC system has a corresponding source checkout class: their
+arguments are described on the following pages.
+
+<ul class="menu">
+<li><a accesskey="1" href="#CVS">CVS</a>
+<li><a accesskey="2" href="#SVN">SVN</a>
+<li><a accesskey="3" href="#Darcs">Darcs</a>
+<li><a accesskey="4" href="#Mercurial">Mercurial</a>
+<li><a accesskey="5" href="#Arch">Arch</a>
+<li><a accesskey="6" href="#Bazaar">Bazaar</a>
+<li><a accesskey="7" href="#Bzr">Bzr</a>
+<li><a accesskey="8" href="#P4">P4</a>
+<li><a accesskey="9" href="#Git">Git</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="CVS"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#SVN">SVN</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Source-Checkout">Source Checkout</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.3.1 CVS</h5>
+
+<p><a name="index-CVS-Checkout-53"></a><a name="index-buildbot_002esteps_002esource_002eCVS-54"></a>
+
+ <p>The <code>CVS</code> build step performs a <a href="http://www.nongnu.org/cvs/">CVS</a> checkout or update. It takes the following arguments:
+
+ <dl>
+<dt><code>cvsroot</code><dd>(required): specify the CVSROOT value, which points to a CVS
+repository, probably on a remote machine. For example, the cvsroot
+value you would use to get a copy of the Buildbot source code is
+<code>:pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot</code>
+
+ <br><dt><code>cvsmodule</code><dd>(required): specify the cvs <code>module</code>, which is generally a
+subdirectory of the CVSROOT. The cvsmodule for the Buildbot source
+code is <code>buildbot</code>.
+
+ <br><dt><code>branch</code><dd>a string which will be used in a <code>-r</code> argument. This is most
+useful for specifying a branch to work on. Defaults to <code>HEAD</code>.
+
+ <br><dt><code>global_options</code><dd>a list of flags to be put before the verb in the CVS command.
+
+ <br><dt><code>checkoutDelay</code><dd>if set, the number of seconds to put between the timestamp of the last
+known Change and the value used for the <code>-D</code> option. Defaults to
+half of the parent Build's treeStableTimer.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="SVN"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Darcs">Darcs</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#CVS">CVS</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.3.2 SVN</h5>
+
+<p><a name="index-SVN-Checkout-55"></a><a name="index-buildbot_002esteps_002esource_002eSVN-56"></a>
+
+ <p>The <code>SVN</code> build step performs a
+<a href="http://subversion.tigris.org">Subversion</a> checkout or update.
+There are two basic ways of setting up the checkout step, depending
+upon whether you are using multiple branches or not.
+
+ <p>If all of your builds use the same branch, then you should create the
+<code>SVN</code> step with the <code>svnurl</code> argument:
+
+ <dl>
+<dt><code>svnurl</code><dd>(required): this specifies the <code>URL</code> argument that will be given
+to the <code>svn checkout</code> command. It dictates both where the
+repository is located and which sub-tree should be extracted. In this
+respect, it is like a combination of the CVS <code>cvsroot</code> and
+<code>cvsmodule</code> arguments. For example, if you are using a remote
+Subversion repository which is accessible through HTTP at a URL of
+<code>http://svn.example.com/repos</code>, and you wanted to check out the
+<code>trunk/calc</code> sub-tree, you would use
+<code>svnurl="http://svn.example.com/repos/trunk/calc"</code> as an argument
+to your <code>SVN</code> step.
+</dl>
+
+ <p>If, on the other hand, you are building from multiple branches, then
+you should create the <code>SVN</code> step with the <code>baseURL</code> and
+<code>defaultBranch</code> arguments instead:
+
+ <dl>
+<dt><code>baseURL</code><dd>(required): this specifies the base repository URL, to which a branch
+name will be appended. It should probably end in a slash.
+
+ <br><dt><code>defaultBranch</code><dd>this specifies the name of the branch to use when a Build does not
+provide one of its own. This will be appended to <code>baseURL</code> to
+create the string that will be passed to the <code>svn checkout</code>
+command.
+
+ <br><dt><code>username</code><dd>if specified, this will be passed to the <code>svn</code> binary with a
+<code>--username</code> option.
+
+ <br><dt><code>password</code><dd>if specified, this will be passed to the <code>svn</code> binary with a
+<code>--password</code> option. The password itself will be suitably obfuscated in
+the logs.
+
+ </dl>
+
+ <p>If you are using branches, you must also make sure your
+<code>ChangeSource</code> will report the correct branch names.
+
+<h3 class="heading">branch example</h3>
+
+<p>Let's suppose that the &ldquo;MyProject&rdquo; repository uses branches for the
+trunk, for various users' individual development efforts, and for
+several new features that will require some amount of work (involving
+multiple developers) before they are ready to merge onto the trunk.
+Such a repository might be organized as follows:
+
+<pre class="example"> svn://svn.example.org/MyProject/trunk
+ svn://svn.example.org/MyProject/branches/User1/foo
+ svn://svn.example.org/MyProject/branches/User1/bar
+ svn://svn.example.org/MyProject/branches/User2/baz
+ svn://svn.example.org/MyProject/features/newthing
+ svn://svn.example.org/MyProject/features/otherthing
+</pre>
+ <p>Further assume that we want the Buildbot to run tests against the
+trunk and against all the feature branches (i.e., do a
+checkout/compile/build of branch X when a file has been changed on
+branch X, when X is in the set [trunk, features/newthing,
+features/otherthing]). We do not want the Buildbot to automatically
+build any of the user branches, but it should be willing to build a
+user branch when explicitly requested (most likely by the user who
+owns that branch).
+
+ <p>There are three things that need to be set up to accomodate this
+system. The first is a ChangeSource that is capable of identifying the
+branch which owns any given file. This depends upon a user-supplied
+function, in an external program that runs in the SVN commit hook and
+connects to the buildmaster's <code>PBChangeSource</code> over a TCP
+connection. (you can use the &ldquo;<code>buildbot sendchange</code>&rdquo; utility
+for this purpose, but you will still need an external program to
+decide what value should be passed to the <code>--branch=</code> argument).
+For example, a change to a file with the SVN url of
+&ldquo;svn://svn.example.org/MyProject/features/newthing/src/foo.c&rdquo; should
+be broken down into a Change instance with
+<code>branch='features/newthing'</code> and <code>file='src/foo.c'</code>.
+
+ <p>The second piece is an <code>AnyBranchScheduler</code> which will pay
+attention to the desired branches. It will not pay attention to the
+user branches, so it will not automatically start builds in response
+to changes there. The AnyBranchScheduler class requires you to
+explicitly list all the branches you want it to use, but it would not
+be difficult to write a subclass which used
+<code>branch.startswith('features/'</code> to remove the need for this
+explicit list. Or, if you want to build user branches too, you can use
+AnyBranchScheduler with <code>branches=None</code> to indicate that you want
+it to pay attention to all branches.
+
+ <p>The third piece is an <code>SVN</code> checkout step that is configured to
+handle the branches correctly, with a <code>baseURL</code> value that
+matches the way the ChangeSource splits each file's URL into base,
+branch, and file.
+
+<pre class="example"> from buildbot.changes.pb import PBChangeSource
+ from buildbot.scheduler import AnyBranchScheduler
+ from buildbot.process import source, factory
+ from buildbot.steps import source, shell
+
+ c['change_source'] = PBChangeSource()
+ s1 = AnyBranchScheduler('main',
+ ['trunk', 'features/newthing', 'features/otherthing'],
+ 10*60, ['test-i386', 'test-ppc'])
+ c['schedulers'] = [s1]
+
+ f = factory.BuildFactory()
+ f.addStep(source.SVN(mode='update',
+ baseURL='svn://svn.example.org/MyProject/',
+ defaultBranch='trunk'))
+ f.addStep(shell.Compile(command="make all"))
+ f.addStep(shell.Test(command="make test"))
+
+ c['builders'] = [
+ {'name':'test-i386', 'slavename':'bot-i386', 'builddir':'test-i386',
+ 'factory':f },
+ {'name':'test-ppc', 'slavename':'bot-ppc', 'builddir':'test-ppc',
+ 'factory':f },
+ ]
+</pre>
+ <p>In this example, when a change arrives with a <code>branch</code> attribute
+of &ldquo;trunk&rdquo;, the resulting build will have an SVN step that
+concatenates &ldquo;svn://svn.example.org/MyProject/&rdquo; (the baseURL) with
+&ldquo;trunk&rdquo; (the branch name) to get the correct svn command. If the
+&ldquo;newthing&rdquo; branch has a change to &ldquo;src/foo.c&rdquo;, then the SVN step
+will concatenate &ldquo;svn://svn.example.org/MyProject/&rdquo; with
+&ldquo;features/newthing&rdquo; to get the svnurl for checkout.
+
+<div class="node">
+<p><hr>
+<a name="Darcs"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Mercurial">Mercurial</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#SVN">SVN</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.3.3 Darcs</h5>
+
+<p><a name="index-Darcs-Checkout-57"></a><a name="index-buildbot_002esteps_002esource_002eDarcs-58"></a>
+
+ <p>The <code>Darcs</code> build step performs a
+<a href="http://darcs.net/">Darcs</a> checkout or update.
+
+ <p>Like See <a href="#SVN">SVN</a>, this step can either be configured to always check
+out a specific tree, or set up to pull from a particular branch that
+gets specified separately for each build. Also like SVN, the
+repository URL given to Darcs is created by concatenating a
+<code>baseURL</code> with the branch name, and if no particular branch is
+requested, it uses a <code>defaultBranch</code>. The only difference in
+usage is that each potential Darcs repository URL must point to a
+fully-fledged repository, whereas SVN URLs usually point to sub-trees
+of the main Subversion repository. In other words, doing an SVN
+checkout of <code>baseURL</code> is legal, but silly, since you'd probably
+wind up with a copy of every single branch in the whole repository.
+Doing a Darcs checkout of <code>baseURL</code> is just plain wrong, since
+the parent directory of a collection of Darcs repositories is not
+itself a valid repository.
+
+ <p>The Darcs step takes the following arguments:
+
+ <dl>
+<dt><code>repourl</code><dd>(required unless <code>baseURL</code> is provided): the URL at which the
+Darcs source repository is available.
+
+ <br><dt><code>baseURL</code><dd>(required unless <code>repourl</code> is provided): the base repository URL,
+to which a branch name will be appended. It should probably end in a
+slash.
+
+ <br><dt><code>defaultBranch</code><dd>(allowed if and only if <code>baseURL</code> is provided): this specifies
+the name of the branch to use when a Build does not provide one of its
+own. This will be appended to <code>baseURL</code> to create the string that
+will be passed to the <code>darcs get</code> command.
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="Mercurial"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Arch">Arch</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Darcs">Darcs</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.3.4 Mercurial</h5>
+
+<p><a name="index-Mercurial-Checkout-59"></a><a name="index-buildbot_002esteps_002esource_002eMercurial-60"></a>
+
+ <p>The <code>Mercurial</code> build step performs a
+<a href="http://selenic.com/mercurial">Mercurial</a> (aka &ldquo;hg&rdquo;) checkout
+or update.
+
+ <p>Branches are handled just like See <a href="#Darcs">Darcs</a>.
+
+ <p>The Mercurial step takes the following arguments:
+
+ <dl>
+<dt><code>repourl</code><dd>(required unless <code>baseURL</code> is provided): the URL at which the
+Mercurial source repository is available.
+
+ <br><dt><code>baseURL</code><dd>(required unless <code>repourl</code> is provided): the base repository URL,
+to which a branch name will be appended. It should probably end in a
+slash.
+
+ <br><dt><code>defaultBranch</code><dd>(allowed if and only if <code>baseURL</code> is provided): this specifies
+the name of the branch to use when a Build does not provide one of its
+own. This will be appended to <code>baseURL</code> to create the string that
+will be passed to the <code>hg clone</code> command.
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="Arch"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Bazaar">Bazaar</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Mercurial">Mercurial</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.3.5 Arch</h5>
+
+<p><a name="index-Arch-Checkout-61"></a><a name="index-buildbot_002esteps_002esource_002eArch-62"></a>
+
+ <p>The <code>Arch</code> build step performs an <a href="http://gnuarch.org/">Arch</a> checkout or update using the <code>tla</code> client. It takes the
+following arguments:
+
+ <dl>
+<dt><code>url</code><dd>(required): this specifies the URL at which the Arch source archive is
+available.
+
+ <br><dt><code>version</code><dd>(required): this specifies which &ldquo;development line&rdquo; (like a branch)
+should be used. This provides the default branch name, but individual
+builds may specify a different one.
+
+ <br><dt><code>archive</code><dd>(optional): Each repository knows its own archive name. If this
+parameter is provided, it must match the repository's archive name.
+The parameter is accepted for compatibility with the <code>Bazaar</code>
+step, below.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Bazaar"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Bzr">Bzr</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Arch">Arch</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.3.6 Bazaar</h5>
+
+<p><a name="index-Bazaar-Checkout-63"></a><a name="index-buildbot_002esteps_002esource_002eBazaar-64"></a>
+
+ <p><code>Bazaar</code> is an alternate implementation of the Arch VC system,
+which uses a client named <code>baz</code>. The checkout semantics are just
+different enough from <code>tla</code> that there is a separate BuildStep for
+it.
+
+ <p>It takes exactly the same arguments as <code>Arch</code>, except that the
+<code>archive=</code> parameter is required. (baz does not emit the archive
+name when you do <code>baz register-archive</code>, so we must provide it
+ourselves).
+
+<div class="node">
+<p><hr>
+<a name="Bzr"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#P4">P4</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Bazaar">Bazaar</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.3.7 Bzr</h5>
+
+<p><a name="index-Bzr-Checkout-65"></a><a name="index-buildbot_002esteps_002esource_002eBzr-66"></a>
+<code>bzr</code> is a descendant of Arch/Baz, and is frequently referred to
+as simply &ldquo;Bazaar&rdquo;. The repository-vs-workspace model is similar to
+Darcs, but it uses a strictly linear sequence of revisions (one
+history per branch) like Arch. Branches are put in subdirectories.
+This makes it look very much like Mercurial, so it takes the same
+arguments:
+
+ <dl>
+<dt><code>repourl</code><dd>(required unless <code>baseURL</code> is provided): the URL at which the
+Bzr source repository is available.
+
+ <br><dt><code>baseURL</code><dd>(required unless <code>repourl</code> is provided): the base repository URL,
+to which a branch name will be appended. It should probably end in a
+slash.
+
+ <br><dt><code>defaultBranch</code><dd>(allowed if and only if <code>baseURL</code> is provided): this specifies
+the name of the branch to use when a Build does not provide one of its
+own. This will be appended to <code>baseURL</code> to create the string that
+will be passed to the <code>bzr checkout</code> command.
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="P4"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Git">Git</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Bzr">Bzr</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.3.8 P4</h5>
+
+<p><a name="index-Perforce-Update-67"></a><a name="index-buildbot_002esteps_002esource_002eP4-68"></a><!-- TODO @bsindex buildbot.steps.source.P4Sync -->
+
+ <p>The <code>P4</code> build step creates a <a href="http://www.perforce.com/">Perforce</a> client specification and performs an update.
+
+ <dl>
+<dt><code>p4base</code><dd>A view into the Perforce depot without branch name or trailing "...".
+Typically "//depot/proj/".
+<br><dt><code>defaultBranch</code><dd>A branch name to append on build requests if none is specified.
+Typically "trunk".
+<br><dt><code>p4port</code><dd>(optional): the host:port string describing how to get to the P4 Depot
+(repository), used as the -p argument for all p4 commands.
+<br><dt><code>p4user</code><dd>(optional): the Perforce user, used as the -u argument to all p4
+commands.
+<br><dt><code>p4passwd</code><dd>(optional): the Perforce password, used as the -p argument to all p4
+commands.
+<br><dt><code>p4extra_views</code><dd>(optional): a list of (depotpath, clientpath) tuples containing extra
+views to be mapped into the client specification. Both will have
+"/..." appended automatically. The client name and source directory
+will be prepended to the client path.
+<br><dt><code>p4client</code><dd>(optional): The name of the client to use. In mode='copy' and
+mode='update', it's particularly important that a unique name is used
+for each checkout directory to avoid incorrect synchronization. For
+this reason, Python percent substitution will be performed on this value
+to replace %(slave)s with the slave name and %(builder)s with the
+builder name. The default is "buildbot_%(slave)s_%(build)s".
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="Git"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#P4">P4</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Source-Checkout">Source Checkout</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.3.9 Git</h5>
+
+<p><a name="index-Git-Checkout-69"></a><a name="index-buildbot_002esteps_002esource_002eGit-70"></a>
+The <code>Git</code> build step clones or updates a <a href="http://git.or.cz/">Git</a> repository and checks out the specified branch or revision. Note
+that the buildbot supports Git version 1.2.0 and later: earlier
+versions (such as the one shipped in Ubuntu 'Dapper') do not support
+the <samp><span class="command">git init</span></samp> command that the buildbot uses.
+
+ <p>The Git step takes the following arguments:
+
+ <dl>
+<dt><code>repourl</code><dd>(required): the URL of the upstream Git repository.
+
+ <br><dt><code>branch</code><dd>(optional): this specifies the name of the branch to use when a Build
+does not provide one of its own. If this this parameter is not
+specified, and the Build does not provide a branch, the &ldquo;master&rdquo;
+branch will be used.
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="ShellCommand"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Source-Checkout">Source Checkout</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.4 ShellCommand</h4>
+
+<p><a name="index-buildbot_002esteps_002eshell_002eShellCommand-71"></a><!-- TODO @bsindex buildbot.steps.shell.TreeSize -->
+
+ <p>This is a useful base class for just about everything you might want
+to do during a build (except for the initial source checkout). It runs
+a single command in a child shell on the buildslave. All stdout/stderr
+is recorded into a LogFile. The step finishes with a status of FAILURE
+if the command's exit code is non-zero, otherwise it has a status of
+SUCCESS.
+
+ <p>The preferred way to specify the command is with a list of argv strings,
+since this allows for spaces in filenames and avoids doing any fragile
+shell-escaping. You can also specify the command with a single string, in
+which case the string is given to '/bin/sh -c COMMAND' for parsing.
+
+ <p>On Windows, commands are run via <code>cmd.exe /c</code> which works well. However,
+if you're running a batch file, the error level does not get propagated
+correctly unless you add 'call' before your batch file's name:
+<code>cmd=['call', 'myfile.bat', ...]</code>.
+
+ <p>All ShellCommands are run by default in the &ldquo;workdir&rdquo;, which
+defaults to the &ldquo;<samp><span class="file">build</span></samp>&rdquo; subdirectory of the slave builder's
+base directory. The absolute path of the workdir will thus be the
+slave's basedir (set as an option to <code>buildbot create-slave</code>,
+see <a href="#Creating-a-buildslave">Creating a buildslave</a>) plus the builder's basedir (set in the
+builder's <code>c['builddir']</code> key in master.cfg) plus the workdir
+itself (a class-level attribute of the BuildFactory, defaults to
+&ldquo;<samp><span class="file">build</span></samp>&rdquo;).
+
+ <p><code>ShellCommand</code> arguments:
+
+ <dl>
+<dt><code>command</code><dd>a list of strings (preferred) or single string (discouraged) which
+specifies the command to be run. A list of strings is preferred
+because it can be used directly as an argv array. Using a single
+string (with embedded spaces) requires the buildslave to pass the
+string to /bin/sh for interpretation, which raises all sorts of
+difficult questions about how to escape or interpret shell
+metacharacters.
+
+ <br><dt><code>env</code><dd>a dictionary of environment strings which will be added to the child
+command's environment. For example, to run tests with a different i18n
+language setting, you might use
+
+ <pre class="example"> f.addStep(ShellCommand(command=["make", "test"],
+ env={'LANG': 'fr_FR'}))
+</pre>
+ <p>These variable settings will override any existing ones in the
+buildslave's environment or the environment specified in the
+Builder. The exception is PYTHONPATH, which is merged
+with (actually prepended to) any existing $PYTHONPATH setting. The
+value is treated as a list of directories to prepend, and a single
+string is treated like a one-item list. For example, to prepend both
+<samp><span class="file">/usr/local/lib/python2.3</span></samp> and <samp><span class="file">/home/buildbot/lib/python</span></samp>
+to any existing $PYTHONPATH setting, you would do something like the
+following:
+
+ <pre class="example"> f.addStep(ShellCommand(
+ command=["make", "test"],
+ env={'PYTHONPATH': ["/usr/local/lib/python2.3",
+ "/home/buildbot/lib/python"] }))
+</pre>
+ <br><dt><code>want_stdout</code><dd>if False, stdout from the child process is discarded rather than being
+sent to the buildmaster for inclusion in the step's LogFile.
+
+ <br><dt><code>want_stderr</code><dd>like <code>want_stdout</code> but for stderr. Note that commands run through
+a PTY do not have separate stdout/stderr streams: both are merged into
+stdout.
+
+ <br><dt><code>usePTY</code><dd>Should this command be run in a <code>pty</code>? The default is to observe the
+configuration of the client (see <a href="#Buildslave-Options">Buildslave Options</a>), but specifying
+<code>True</code> or <code>False</code> here will override the default.
+
+ <p>The advantage of using a PTY is that &ldquo;grandchild&rdquo; processes are more likely
+to be cleaned up if the build is interrupted or times out (since it enables the
+use of a &ldquo;process group&rdquo; in which all child processes will be placed). The
+disadvantages: some forms of Unix have problems with PTYs, some of your unit
+tests may behave differently when run under a PTY (generally those which check
+to see if they are being run interactively), and PTYs will merge the stdout and
+stderr streams into a single output stream (which means the red-vs-black
+coloring in the logfiles will be lost).
+
+ <br><dt><code>logfiles</code><dd>Sometimes commands will log interesting data to a local file, rather
+than emitting everything to stdout or stderr. For example, Twisted's
+&ldquo;trial&rdquo; command (which runs unit tests) only presents summary
+information to stdout, and puts the rest into a file named
+<samp><span class="file">_trial_temp/test.log</span></samp>. It is often useful to watch these files
+as the command runs, rather than using <samp><span class="command">/bin/cat</span></samp> to dump
+their contents afterwards.
+
+ <p>The <code>logfiles=</code> argument allows you to collect data from these
+secondary logfiles in near-real-time, as the step is running. It
+accepts a dictionary which maps from a local Log name (which is how
+the log data is presented in the build results) to a remote filename
+(interpreted relative to the build's working directory). Each named
+file will be polled on a regular basis (every couple of seconds) as
+the build runs, and any new text will be sent over to the buildmaster.
+
+ <pre class="example"> f.addStep(ShellCommand(
+ command=["make", "test"],
+ logfiles={"triallog": "_trial_temp/test.log"}))
+</pre>
+ <br><dt><code>timeout</code><dd>if the command fails to produce any output for this many seconds, it
+is assumed to be locked up and will be killed.
+
+ <br><dt><code>description</code><dd>This will be used to describe the command (on the Waterfall display)
+while the command is still running. It should be a single
+imperfect-tense verb, like &ldquo;compiling&rdquo; or &ldquo;testing&rdquo;. The preferred
+form is a list of short strings, which allows the HTML Waterfall
+display to create narrower columns by emitting a &lt;br&gt; tag between each
+word. You may also provide a single string.
+
+ <br><dt><code>descriptionDone</code><dd>This will be used to describe the command once it has finished. A
+simple noun like &ldquo;compile&rdquo; or &ldquo;tests&rdquo; should be used. Like
+<code>description</code>, this may either be a list of short strings or a
+single string.
+
+ <p>If neither <code>description</code> nor <code>descriptionDone</code> are set, the
+actual command arguments will be used to construct the description.
+This may be a bit too wide to fit comfortably on the Waterfall
+display.
+
+ <pre class="example"> f.addStep(ShellCommand(command=["make", "test"],
+ description=["testing"],
+ descriptionDone=["tests"]))
+</pre>
+ <br><dt><code>logEnviron</code><dd>If this option is true (the default), then the step's logfile will describe the
+environment variables on the slave. In situations where the environment is not
+relevant and is long, it may be easier to set <code>logEnviron=False</code>.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Simple-ShellCommand-Subclasses"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Python-BuildSteps">Python BuildSteps</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#ShellCommand">ShellCommand</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.5 Simple ShellCommand Subclasses</h4>
+
+<p>Several subclasses of ShellCommand are provided as starting points for
+common build steps. These are all very simple: they just override a few
+parameters so you don't have to specify them yourself, making the master.cfg
+file less verbose.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Configure">Configure</a>
+<li><a accesskey="2" href="#Compile">Compile</a>
+<li><a accesskey="3" href="#Test">Test</a>
+<li><a accesskey="4" href="#TreeSize">TreeSize</a>
+<li><a accesskey="5" href="#PerlModuleTest">PerlModuleTest</a>
+<li><a accesskey="6" href="#SetProperty">SetProperty</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Configure"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Compile">Compile</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.5.1 Configure</h5>
+
+<p><a name="index-buildbot_002esteps_002eshell_002eConfigure-72"></a>
+This is intended to handle the <code>./configure</code> step from
+autoconf-style projects, or the <code>perl Makefile.PL</code> step from perl
+MakeMaker.pm-style modules. The default command is <code>./configure</code>
+but you can change this by providing a <code>command=</code> parameter.
+
+<div class="node">
+<p><hr>
+<a name="Compile"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Test">Test</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Configure">Configure</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.5.2 Compile</h5>
+
+<p><a name="index-buildbot_002esteps_002eshell_002eCompile-73"></a>
+This is meant to handle compiling or building a project written in C.
+The default command is <code>make all</code>. When the compile is finished,
+the log file is scanned for GCC warning messages, a summary log is
+created with any problems that were seen, and the step is marked as
+WARNINGS if any were discovered. The number of warnings is stored in a
+Build Property named &ldquo;warnings-count&rdquo;, which is accumulated over all
+Compile steps (so if two warnings are found in one step, and three are
+found in another step, the overall build will have a
+&ldquo;warnings-count&rdquo; property of 5.
+
+ <p>The default regular expression used to detect a warning is
+<code>'.*warning[: ].*'</code> , which is fairly liberal and may cause
+false-positives. To use a different regexp, provide a
+<code>warningPattern=</code> argument, or use a subclass which sets the
+<code>warningPattern</code> attribute:
+
+<pre class="example"> f.addStep(Compile(command=["make", "test"],
+ warningPattern="^Warning: "))
+</pre>
+ <p>The <code>warningPattern=</code> can also be a pre-compiled python regexp
+object: this makes it possible to add flags like <code>re.I</code> (to use
+case-insensitive matching).
+
+ <p>(TODO: this step needs to be extended to look for GCC error messages
+as well, and collect them into a separate logfile, along with the
+source code filenames involved).
+
+<div class="node">
+<p><hr>
+<a name="Test"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#TreeSize">TreeSize</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Compile">Compile</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.5.3 Test</h5>
+
+<p><a name="index-buildbot_002esteps_002eshell_002eTest-74"></a>
+This is meant to handle unit tests. The default command is <code>make
+test</code>, and the <code>warnOnFailure</code> flag is set.
+
+<div class="node">
+<p><hr>
+<a name="TreeSize"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#PerlModuleTest">PerlModuleTest</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Test">Test</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.5.4 TreeSize</h5>
+
+<p><a name="index-buildbot_002esteps_002eshell_002eTreeSize-75"></a>
+This is a simple command that uses the 'du' tool to measure the size
+of the code tree. It puts the size (as a count of 1024-byte blocks,
+aka 'KiB' or 'kibibytes') on the step's status text, and sets a build
+property named 'tree-size-KiB' with the same value.
+
+<div class="node">
+<p><hr>
+<a name="PerlModuleTest"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#SetProperty">SetProperty</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#TreeSize">TreeSize</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.5.5 PerlModuleTest</h5>
+
+<p><a name="index-buildbot_002esteps_002eshell_002ePerlModuleTest-76"></a>
+This is a simple command that knows how to run tests of perl modules.
+It parses the output to determine the number of tests passed and
+failed and total number executed, saving the results for later query.
+
+<div class="node">
+<p><hr>
+<a name="SetProperty"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#PerlModuleTest">PerlModuleTest</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.5.6 SetProperty</h5>
+
+<p><a name="index-buildbot_002esteps_002eshell_002eSetProperty-77"></a>
+This buildstep is similar to ShellCommand, except that it captures the
+output of the command into a property. It is usually used like this:
+
+<pre class="example"> f.addStep(SetProperty(command="uname -a", property="uname"))
+</pre>
+ <p>This runs <code>uname -a</code> and captures its stdout, stripped of leading
+and trailing whitespace, in the property "uname". To avoid stripping,
+add <code>strip=False</code>. The <code>property</code> argument can be specified
+as a <code>WithProperties</code> object.
+
+ <p>The more advanced usage allows you to specify a function to extract
+properties from the command output. Here you can use regular
+expressions, string interpolation, or whatever you would like.
+The function is called with three arguments: the exit status of the
+command, its standard output as a string, and its standard error as
+a string. It should return a dictionary containing all new properties.
+
+<pre class="example"> def glob2list(rc, stdout, stderr):
+ jpgs = [ l.strip() for l in stdout.split('\n') ]
+ return { 'jpgs' : jpgs }
+ f.addStep(SetProperty(command="ls -1 *.jpg", extract_fn=glob2list))
+</pre>
+ <p>Note that any ordering relationship of the contents of stdout and
+stderr is lost. For example, given
+
+<pre class="example"> f.addStep(SetProperty(
+ command="echo output1; echo error &gt;&amp;2; echo output2",
+ extract_fn=my_extract))
+</pre>
+ <p>Then <code>my_extract</code> will see <code>stdout="output1\noutput2\n"</code>
+and <code>stderr="error\n"</code>.
+
+<div class="node">
+<p><hr>
+<a name="Python-BuildSteps"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Transferring-Files">Transferring Files</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Simple-ShellCommand-Subclasses">Simple ShellCommand Subclasses</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.6 Python BuildSteps</h4>
+
+<p>Here are some BuildSteps that are specifcally useful for projects
+implemented in Python.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildEPYDoc">BuildEPYDoc</a>
+<li><a accesskey="2" href="#PyFlakes">PyFlakes</a>
+<li><a accesskey="3" href="#PyLint">PyLint</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildEPYDoc"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#PyFlakes">PyFlakes</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Python-BuildSteps">Python BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.6.1 BuildEPYDoc</h5>
+
+<p><a name="index-buildbot_002esteps_002epython_002eBuildEPYDoc-78"></a>
+<a href="http://epydoc.sourceforge.net/">epydoc</a> is a tool for generating
+API documentation for Python modules from their docstrings. It reads
+all the .py files from your source tree, processes the docstrings
+therein, and creates a large tree of .html files (or a single .pdf
+file).
+
+ <p>The <code>buildbot.steps.python.BuildEPYDoc</code> step will run
+<samp><span class="command">epydoc</span></samp> to produce this API documentation, and will count the
+errors and warnings from its output.
+
+ <p>You must supply the command line to be used. The default is
+<samp><span class="command">make epydocs</span></samp>, which assumes that your project has a Makefile
+with an &ldquo;epydocs&rdquo; target. You might wish to use something like
+<samp><span class="command">epydoc -o apiref source/PKGNAME</span></samp> instead. You might also want
+to add <samp><span class="command">--pdf</span></samp> to generate a PDF file instead of a large tree
+of HTML files.
+
+ <p>The API docs are generated in-place in the build tree (under the
+workdir, in the subdirectory controlled by the &ldquo;-o&rdquo; argument). To
+make them useful, you will probably have to copy them to somewhere
+they can be read. A command like <samp><span class="command">rsync -ad apiref/
+dev.example.com:~public_html/current-apiref/</span></samp> might be useful. You
+might instead want to bundle them into a tarball and publish it in the
+same place where the generated install tarball is placed.
+
+<pre class="example"> from buildbot.steps.python import BuildEPYDoc
+
+ ...
+ f.addStep(BuildEPYDoc(command=["epydoc", "-o", "apiref", "source/mypkg"]))
+</pre>
+ <div class="node">
+<p><hr>
+<a name="PyFlakes"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#PyLint">PyLint</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildEPYDoc">BuildEPYDoc</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Python-BuildSteps">Python BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.6.2 PyFlakes</h5>
+
+<p><a name="index-buildbot_002esteps_002epython_002ePyFlakes-79"></a>
+<a href="http://divmod.org/trac/wiki/DivmodPyflakes">PyFlakes</a> is a tool
+to perform basic static analysis of Python code to look for simple
+errors, like missing imports and references of undefined names. It is
+like a fast and simple form of the C &ldquo;lint&rdquo; program. Other tools
+(like pychecker) provide more detailed results but take longer to run.
+
+ <p>The <code>buildbot.steps.python.PyFlakes</code> step will run pyflakes and
+count the various kinds of errors and warnings it detects.
+
+ <p>You must supply the command line to be used. The default is
+<samp><span class="command">make pyflakes</span></samp>, which assumes you have a top-level Makefile
+with a &ldquo;pyflakes&rdquo; target. You might want to use something like
+<samp><span class="command">pyflakes .</span></samp> or <samp><span class="command">pyflakes src</span></samp>.
+
+<pre class="example"> from buildbot.steps.python import PyFlakes
+
+ ...
+ f.addStep(PyFlakes(command=["pyflakes", "src"]))
+</pre>
+ <div class="node">
+<p><hr>
+<a name="PyLint"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#PyFlakes">PyFlakes</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Python-BuildSteps">Python BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.6.3 PyLint</h5>
+
+<p><a name="index-buildbot_002esteps_002epython_002ePyLint-80"></a>
+Similarly, the <code>buildbot.steps.python.PyLint</code> step will run pylint and
+analyze the results.
+
+ <p>You must supply the command line to be used. There is no default.
+
+<pre class="example"> from buildbot.steps.python import PyLint
+
+ ...
+ f.addStep(PyLint(command=["pylint", "src"]))
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Transferring-Files"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Steps-That-Run-on-the-Master">Steps That Run on the Master</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Python-BuildSteps">Python BuildSteps</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.7 Transferring Files</h4>
+
+<p><a name="index-File-Transfer-81"></a><a name="index-buildbot_002esteps_002etransfer_002eFileUpload-82"></a><a name="index-buildbot_002esteps_002etransfer_002eFileDownload-83"></a><a name="index-buildbot_002esteps_002etransfer_002eDirectoryUpload-84"></a>
+Most of the work involved in a build will take place on the
+buildslave. But occasionally it is useful to do some work on the
+buildmaster side. The most basic way to involve the buildmaster is
+simply to move a file from the slave to the master, or vice versa.
+There are a pair of BuildSteps named <code>FileUpload</code> and
+<code>FileDownload</code> to provide this functionality. <code>FileUpload</code>
+moves a file <em>up to</em> the master, while <code>FileDownload</code> moves
+a file <em>down from</em> the master.
+
+ <p>As an example, let's assume that there is a step which produces an
+HTML file within the source tree that contains some sort of generated
+project documentation. We want to move this file to the buildmaster,
+into a <samp><span class="file">~/public_html</span></samp> directory, so it can be visible to
+developers. This file will wind up in the slave-side working directory
+under the name <samp><span class="file">docs/reference.html</span></samp>. We want to put it into the
+master-side <samp><span class="file">~/public_html/ref.html</span></samp>.
+
+<pre class="example"> from buildbot.steps.shell import ShellCommand
+ from buildbot.steps.transfer import FileUpload
+
+ f.addStep(ShellCommand(command=["make", "docs"]))
+ f.addStep(FileUpload(slavesrc="docs/reference.html",
+ masterdest="~/public_html/ref.html"))
+</pre>
+ <p>The <code>masterdest=</code> argument will be passed to os.path.expanduser,
+so things like &ldquo;~&rdquo; will be expanded properly. Non-absolute paths
+will be interpreted relative to the buildmaster's base directory.
+Likewise, the <code>slavesrc=</code> argument will be expanded and
+interpreted relative to the builder's working directory.
+
+ <p>To move a file from the master to the slave, use the
+<code>FileDownload</code> command. For example, let's assume that some step
+requires a configuration file that, for whatever reason, could not be
+recorded in the source code repository or generated on the buildslave
+side:
+
+<pre class="example"> from buildbot.steps.shell import ShellCommand
+ from buildbot.steps.transfer import FileUpload
+
+ f.addStep(FileDownload(mastersrc="~/todays_build_config.txt",
+ slavedest="build_config.txt"))
+ f.addStep(ShellCommand(command=["make", "config"]))
+</pre>
+ <p>Like <code>FileUpload</code>, the <code>mastersrc=</code> argument is interpreted
+relative to the buildmaster's base directory, and the
+<code>slavedest=</code> argument is relative to the builder's working
+directory. If the buildslave is running in <samp><span class="file">~buildslave</span></samp>, and the
+builder's &ldquo;builddir&rdquo; is something like <samp><span class="file">tests-i386</span></samp>, then the
+workdir is going to be <samp><span class="file">~buildslave/tests-i386/build</span></samp>, and a
+<code>slavedest=</code> of <samp><span class="file">foo/bar.html</span></samp> will get put in
+<samp><span class="file">~buildslave/tests-i386/build/foo/bar.html</span></samp>. Both of these commands
+will create any missing intervening directories.
+
+<h4 class="subheading">Other Parameters</h4>
+
+<p>The <code>maxsize=</code> argument lets you set a maximum size for the file
+to be transferred. This may help to avoid surprises: transferring a
+100MB coredump when you were expecting to move a 10kB status file
+might take an awfully long time. The <code>blocksize=</code> argument
+controls how the file is sent over the network: larger blocksizes are
+slightly more efficient but also consume more memory on each end, and
+there is a hard-coded limit of about 640kB.
+
+ <p>The <code>mode=</code> argument allows you to control the access permissions
+of the target file, traditionally expressed as an octal integer. The
+most common value is probably 0755, which sets the &ldquo;x&rdquo; executable
+bit on the file (useful for shell scripts and the like). The default
+value for <code>mode=</code> is None, which means the permission bits will
+default to whatever the umask of the writing process is. The default
+umask tends to be fairly restrictive, but at least on the buildslave
+you can make it less restrictive with a &ndash;umask command-line option at
+creation time (see <a href="#Buildslave-Options">Buildslave Options</a>).
+
+<h4 class="subheading">Transfering Directories</h4>
+
+<p>To transfer complete directories from the buildslave to the master, there
+is a BuildStep named <code>DirectoryUpload</code>. It works like <code>FileUpload</code>,
+just for directories. However it does not support the <code>maxsize</code>,
+<code>blocksize</code> and <code>mode</code> arguments. As an example, let's assume an
+generated project documentation, which consists of many files (like the output
+of doxygen or epydoc). We want to move the entire documentation to the
+buildmaster, into a <code>~/public_html/docs</code> directory. On the slave-side
+the directory can be found under <code>docs</code>:
+
+<pre class="example"> from buildbot.steps.shell import ShellCommand
+ from buildbot.steps.transfer import DirectoryUpload
+
+ f.addStep(ShellCommand(command=["make", "docs"]))
+ f.addStep(DirectoryUpload(slavesrc="docs",
+ masterdest="~/public_html/docs"))
+</pre>
+ <p>The DirectoryUpload step will create all necessary directories and
+transfers empty directories, too.
+
+<div class="node">
+<p><hr>
+<a name="Steps-That-Run-on-the-Master"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Triggering-Schedulers">Triggering Schedulers</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Transferring-Files">Transferring Files</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.8 Steps That Run on the Master</h4>
+
+<p>Occasionally, it is useful to execute some task on the master, for example to
+create a directory, deploy a build result, or trigger some other centralized
+processing. This is possible, in a limited fashion, with the
+<code>MasterShellCommand</code> step.
+
+ <p>This step operates similarly to a regular <code>ShellCommand</code>, but executes on
+the master, instead of the slave. To be clear, the enclosing <code>Build</code>
+object must still have a slave object, just as for any other step &ndash; only, in
+this step, the slave does not do anything.
+
+ <p>In this example, the step renames a tarball based on the day of the week.
+
+<pre class="example"> from buildbot.steps.transfer import FileUpload
+ from buildbot.steps.master import MasterShellCommand
+
+ f.addStep(FileUpload(slavesrc="widgetsoft.tar.gz",
+ masterdest="/var/buildoutputs/widgetsoft-new.tar.gz"))
+ f.addStep(MasterShellCommand(command="""
+ cd /var/buildoutputs;
+ mv widgetsoft-new.tar.gz widgetsoft-`date +%a`.tar.gz"""))
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Triggering-Schedulers"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Steps-That-Run-on-the-Master">Steps That Run on the Master</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.9 Triggering Schedulers</h4>
+
+<p>The counterpart to the Triggerable described in section
+see <a href="#Triggerable-Scheduler">Triggerable Scheduler</a> is the Trigger BuildStep.
+
+<pre class="example"> from buildbot.steps.trigger import Trigger
+ f.addStep(Trigger(schedulerNames=['build-prep'],
+ waitForFinish=True,
+ updateSourceStamp=True))
+</pre>
+ <p>The <code>schedulerNames=</code> argument lists the Triggerables
+that should be triggered when this step is executed. Note that
+it is possible, but not advisable, to create a cycle where a build
+continually triggers itself, because the schedulers are specified
+by name.
+
+ <p>If <code>waitForFinish</code> is True, then the step will not finish until
+all of the builds from the triggered schedulers have finished. If this
+argument is False (the default) or not given, then the buildstep
+succeeds immediately after triggering the schedulers.
+
+ <p>If <code>updateSourceStamp</code> is True (the default), then step updates
+the SourceStamp given to the Triggerables to include
+<code>got_revision</code> (the revision actually used in this build) as
+<code>revision</code> (the revision to use in the triggered builds). This is
+useful to ensure that all of the builds use exactly the same
+SourceStamp, even if other Changes have occurred while the build was
+running.
+
+<div class="node">
+<p><hr>
+<a name="Writing-New-BuildSteps"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Triggering-Schedulers">Triggering Schedulers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Steps">Build Steps</a>
+
+</div>
+
+<h4 class="subsection">6.1.10 Writing New BuildSteps</h4>
+
+<p>While it is a good idea to keep your build process self-contained in
+the source code tree, sometimes it is convenient to put more
+intelligence into your Buildbot configuration. One way to do this is
+to write a custom BuildStep. Once written, this Step can be used in
+the <samp><span class="file">master.cfg</span></samp> file.
+
+ <p>The best reason for writing a custom BuildStep is to better parse the
+results of the command being run. For example, a BuildStep that knows
+about JUnit could look at the logfiles to determine which tests had
+been run, how many passed and how many failed, and then report more
+detailed information than a simple <code>rc==0</code> -based &ldquo;good/bad&rdquo;
+decision.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Writing-BuildStep-Constructors">Writing BuildStep Constructors</a>
+<li><a accesskey="2" href="#BuildStep-LogFiles">BuildStep LogFiles</a>
+<li><a accesskey="3" href="#Reading-Logfiles">Reading Logfiles</a>
+<li><a accesskey="4" href="#Adding-LogObservers">Adding LogObservers</a>
+<li><a accesskey="5" href="#BuildStep-URLs">BuildStep URLs</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Writing-BuildStep-Constructors"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BuildStep-LogFiles">BuildStep LogFiles</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.10.1 Writing BuildStep Constructors</h5>
+
+<p>BuildStep classes have some extra equipment, because they are their own
+factories. Consider the use of a BuildStep in <samp><span class="file">master.cfg</span></samp>:
+
+<pre class="example"> f.addStep(MyStep(someopt="stuff", anotheropt=1))
+</pre>
+ <p>This creates a single instance of class <code>MyStep</code>. However, Buildbot needs
+a new object each time the step is executed. this is accomplished by storing
+the information required to instantiate a new object in the <code>factory</code>
+attribute. When the time comes to construct a new Build, BuildFactory consults
+this attribute (via <code>getStepFactory</code>) and instantiates a new step object.
+
+ <p>When writing a new step class, then, keep in mind are that you cannot do
+anything "interesting" in the constructor &ndash; limit yourself to checking and
+storing arguments. To ensure that these arguments are provided to any new
+objects, call <code>self.addFactoryArguments</code> with any keyword arguments your
+constructor needs.
+
+ <p>Keep a <code>**kwargs</code> argument on the end of your options, and pass that up to
+the parent class's constructor.
+
+ <p>The whole thing looks like this:
+
+<pre class="example"> class Frobinfy(LoggingBuildStep):
+ def __init__(self,
+ frob_what="frobee",
+ frob_how_many=None,
+ frob_how=None,
+ **kwargs)
+
+ # check
+ if frob_how_many is None:
+ raise TypeError("Frobinfy argument how_many is required")
+
+ # call parent
+ LoggingBuildStep.__init__(self, **kwargs)
+
+ # and record arguments for later
+ self.addFactoryArguments(
+ frob_what=frob_what,
+ frob_how_many=frob_how_many,
+ frob_how=frob_how)
+
+ class FastFrobnify(Frobnify):
+ def __init__(self,
+ speed=5,
+ **kwargs)
+ Frobnify.__init__(self, **kwargs)
+ self.addFactoryArguments(
+ speed=speed)
+</pre>
+ <div class="node">
+<p><hr>
+<a name="BuildStep-LogFiles"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Reading-Logfiles">Reading Logfiles</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Writing-BuildStep-Constructors">Writing BuildStep Constructors</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.10.2 BuildStep LogFiles</h5>
+
+<p>Each BuildStep has a collection of &ldquo;logfiles&rdquo;. Each one has a short
+name, like &ldquo;stdio&rdquo; or &ldquo;warnings&rdquo;. Each LogFile contains an
+arbitrary amount of text, usually the contents of some output file
+generated during a build or test step, or a record of everything that
+was printed to stdout/stderr during the execution of some command.
+
+ <p>These LogFiles are stored to disk, so they can be retrieved later.
+
+ <p>Each can contain multiple &ldquo;channels&rdquo;, generally limited to three
+basic ones: stdout, stderr, and &ldquo;headers&rdquo;. For example, when a
+ShellCommand runs, it writes a few lines to the &ldquo;headers&rdquo; channel to
+indicate the exact argv strings being run, which directory the command
+is being executed in, and the contents of the current environment
+variables. Then, as the command runs, it adds a lot of &ldquo;stdout&rdquo; and
+&ldquo;stderr&rdquo; messages. When the command finishes, a final &ldquo;header&rdquo;
+line is added with the exit code of the process.
+
+ <p>Status display plugins can format these different channels in
+different ways. For example, the web page shows LogFiles as text/html,
+with header lines in blue text, stdout in black, and stderr in red. A
+different URL is available which provides a text/plain format, in
+which stdout and stderr are collapsed together, and header lines are
+stripped completely. This latter option makes it easy to save the
+results to a file and run <samp><span class="command">grep</span></samp> or whatever against the
+output.
+
+ <p>Each BuildStep contains a mapping (implemented in a python dictionary)
+from LogFile name to the actual LogFile objects. Status plugins can
+get a list of LogFiles to display, for example, a list of HREF links
+that, when clicked, provide the full contents of the LogFile.
+
+<h3 class="heading">Using LogFiles in custom BuildSteps</h3>
+
+<p>The most common way for a custom BuildStep to use a LogFile is to
+summarize the results of a ShellCommand (after the command has
+finished running). For example, a compile step with thousands of lines
+of output might want to create a summary of just the warning messages.
+If you were doing this from a shell, you would use something like:
+
+<pre class="example"> grep "warning:" output.log &gt;warnings.log
+</pre>
+ <p>In a custom BuildStep, you could instead create a &ldquo;warnings&rdquo; LogFile
+that contained the same text. To do this, you would add code to your
+<code>createSummary</code> method that pulls lines from the main output log
+and creates a new LogFile with the results:
+
+<pre class="example"> def createSummary(self, log):
+ warnings = []
+ for line in log.readlines():
+ if "warning:" in line:
+ warnings.append()
+ self.addCompleteLog('warnings', "".join(warnings))
+</pre>
+ <p>This example uses the <code>addCompleteLog</code> method, which creates a
+new LogFile, puts some text in it, and then &ldquo;closes&rdquo; it, meaning
+that no further contents will be added. This LogFile will appear in
+the HTML display under an HREF with the name &ldquo;warnings&rdquo;, since that
+is the name of the LogFile.
+
+ <p>You can also use <code>addHTMLLog</code> to create a complete (closed)
+LogFile that contains HTML instead of plain text. The normal LogFile
+will be HTML-escaped if presented through a web page, but the HTML
+LogFile will not. At the moment this is only used to present a pretty
+HTML representation of an otherwise ugly exception traceback when
+something goes badly wrong during the BuildStep.
+
+ <p>In contrast, you might want to create a new LogFile at the beginning
+of the step, and add text to it as the command runs. You can create
+the LogFile and attach it to the build by calling <code>addLog</code>, which
+returns the LogFile object. You then add text to this LogFile by
+calling methods like <code>addStdout</code> and <code>addHeader</code>. When you
+are done, you must call the <code>finish</code> method so the LogFile can be
+closed. It may be useful to create and populate a LogFile like this
+from a LogObserver method See <a href="#Adding-LogObservers">Adding LogObservers</a>.
+
+ <p>The <code>logfiles=</code> argument to <code>ShellCommand</code> (see
+see <a href="#ShellCommand">ShellCommand</a>) creates new LogFiles and fills them in realtime
+by asking the buildslave to watch a actual file on disk. The
+buildslave will look for additions in the target file and report them
+back to the BuildStep. These additions will be added to the LogFile by
+calling <code>addStdout</code>. These secondary LogFiles can be used as the
+source of a LogObserver just like the normal &ldquo;stdio&rdquo; LogFile.
+
+<div class="node">
+<p><hr>
+<a name="Reading-Logfiles"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Adding-LogObservers">Adding LogObservers</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildStep-LogFiles">BuildStep LogFiles</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.10.3 Reading Logfiles</h5>
+
+<p>Once a LogFile has been added to a BuildStep with <code>addLog()</code>,
+<code>addCompleteLog()</code>, <code>addHTMLLog()</code>, or <code>logfiles=</code>,
+your BuildStep can retrieve it by using <code>getLog()</code>:
+
+<pre class="example"> class MyBuildStep(ShellCommand):
+ logfiles = { "nodelog": "_test/node.log" }
+
+ def evaluateCommand(self, cmd):
+ nodelog = self.getLog("nodelog")
+ if "STARTED" in nodelog.getText():
+ return SUCCESS
+ else:
+ return FAILURE
+</pre>
+ <p>For a complete list of the methods you can call on a LogFile, please
+see the docstrings on the <code>IStatusLog</code> class in
+<samp><span class="file">buildbot/interfaces.py</span></samp>.
+
+<div class="node">
+<p><hr>
+<a name="Adding-LogObservers"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BuildStep-URLs">BuildStep URLs</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Reading-Logfiles">Reading Logfiles</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.10.4 Adding LogObservers</h5>
+
+<p><a name="index-LogObserver-85"></a><a name="index-LogLineObserver-86"></a>
+Most shell commands emit messages to stdout or stderr as they operate,
+especially if you ask them nicely with a <code>--verbose</code> flag of some
+sort. They may also write text to a log file while they run. Your
+BuildStep can watch this output as it arrives, to keep track of how
+much progress the command has made. You can get a better measure of
+progress by counting the number of source files compiled or test cases
+run than by merely tracking the number of bytes that have been written
+to stdout. This improves the accuracy and the smoothness of the ETA
+display.
+
+ <p>To accomplish this, you will need to attach a <code>LogObserver</code> to
+one of the log channels, most commonly to the &ldquo;stdio&rdquo; channel but
+perhaps to another one which tracks a log file. This observer is given
+all text as it is emitted from the command, and has the opportunity to
+parse that output incrementally. Once the observer has decided that
+some event has occurred (like a source file being compiled), it can
+use the <code>setProgress</code> method to tell the BuildStep about the
+progress that this event represents.
+
+ <p>There are a number of pre-built <code>LogObserver</code> classes that you
+can choose from (defined in <code>buildbot.process.buildstep</code>, and of
+course you can subclass them to add further customization. The
+<code>LogLineObserver</code> class handles the grunt work of buffering and
+scanning for end-of-line delimiters, allowing your parser to operate
+on complete stdout/stderr lines. (Lines longer than a set maximum
+length are dropped; the maximum defaults to 16384 bytes, but you can
+change it by calling <code>setMaxLineLength()</code> on your
+<code>LogLineObserver</code> instance. Use <code>sys.maxint</code> for effective
+infinity.)
+
+ <p>For example, let's take a look at the <code>TrialTestCaseCounter</code>,
+which is used by the Trial step to count test cases as they are run.
+As Trial executes, it emits lines like the following:
+
+<pre class="example"> buildbot.test.test_config.ConfigTest.testDebugPassword ... [OK]
+ buildbot.test.test_config.ConfigTest.testEmpty ... [OK]
+ buildbot.test.test_config.ConfigTest.testIRC ... [FAIL]
+ buildbot.test.test_config.ConfigTest.testLocks ... [OK]
+</pre>
+ <p>When the tests are finished, trial emits a long line of &ldquo;======&rdquo; and
+then some lines which summarize the tests that failed. We want to
+avoid parsing these trailing lines, because their format is less
+well-defined than the &ldquo;[OK]&rdquo; lines.
+
+ <p>The parser class looks like this:
+
+<pre class="example"> from buildbot.process.buildstep import LogLineObserver
+
+ class TrialTestCaseCounter(LogLineObserver):
+ _line_re = re.compile(r'^([\w\.]+) \.\.\. \[([^\]]+)\]$')
+ numTests = 0
+ finished = False
+
+ def outLineReceived(self, line):
+ if self.finished:
+ return
+ if line.startswith("=" * 40):
+ self.finished = True
+ return
+
+ m = self._line_re.search(line.strip())
+ if m:
+ testname, result = m.groups()
+ self.numTests += 1
+ self.step.setProgress('tests', self.numTests)
+</pre>
+ <p>This parser only pays attention to stdout, since that's where trial
+writes the progress lines. It has a mode flag named <code>finished</code> to
+ignore everything after the &ldquo;====&rdquo; marker, and a scary-looking
+regular expression to match each line while hopefully ignoring other
+messages that might get displayed as the test runs.
+
+ <p>Each time it identifies a test has been completed, it increments its
+counter and delivers the new progress value to the step with
+<code>self.step.setProgress</code>. This class is specifically measuring
+progress along the &ldquo;tests&rdquo; metric, in units of test cases (as
+opposed to other kinds of progress like the &ldquo;output&rdquo; metric, which
+measures in units of bytes). The Progress-tracking code uses each
+progress metric separately to come up with an overall completion
+percentage and an ETA value.
+
+ <p>To connect this parser into the <code>Trial</code> BuildStep,
+<code>Trial.__init__</code> ends with the following clause:
+
+<pre class="example"> # this counter will feed Progress along the 'test cases' metric
+ counter = TrialTestCaseCounter()
+ self.addLogObserver('stdio', counter)
+ self.progressMetrics += ('tests',)
+</pre>
+ <p>This creates a TrialTestCaseCounter and tells the step that the
+counter wants to watch the &ldquo;stdio&rdquo; log. The observer is
+automatically given a reference to the step in its <code>.step</code>
+attribute.
+
+<h4 class="subheading">A Somewhat Whimsical Example</h4>
+
+<p>Let's say that we've got some snazzy new unit-test framework called
+Framboozle. It's the hottest thing since sliced bread. It slices, it
+dices, it runs unit tests like there's no tomorrow. Plus if your unit
+tests fail, you can use its name for a Web 2.1 startup company, make
+millions of dollars, and hire engineers to fix the bugs for you, while
+you spend your afternoons lazily hang-gliding along a scenic pacific
+beach, blissfully unconcerned about the state of your
+tests.<a rel="footnote" href="#fn-8" name="fnd-8"><sup>8</sup></a>
+
+ <p>To run a Framboozle-enabled test suite, you just run the 'framboozler'
+command from the top of your source code tree. The 'framboozler'
+command emits a bunch of stuff to stdout, but the most interesting bit
+is that it emits the line "FNURRRGH!" every time it finishes running a
+test case<a rel="footnote" href="#fn-9" name="fnd-9"><sup>9</sup></a>. You'd like to have a test-case counting LogObserver that
+watches for these lines and counts them, because counting them will
+help the buildbot more accurately calculate how long the build will
+take, and this will let you know exactly how long you can sneak out of
+the office for your hang-gliding lessons without anyone noticing that
+you're gone.
+
+ <p>This will involve writing a new BuildStep (probably named
+"Framboozle") which inherits from ShellCommand. The BuildStep class
+definition itself will look something like this:
+
+<pre class="example"> # START
+ from buildbot.steps.shell import ShellCommand
+ from buildbot.process.buildstep import LogLineObserver
+
+ class FNURRRGHCounter(LogLineObserver):
+ numTests = 0
+ def outLineReceived(self, line):
+ if "FNURRRGH!" in line:
+ self.numTests += 1
+ self.step.setProgress('tests', self.numTests)
+
+ class Framboozle(ShellCommand):
+ command = ["framboozler"]
+
+ def __init__(self, **kwargs):
+ ShellCommand.__init__(self, **kwargs) # always upcall!
+ counter = FNURRRGHCounter())
+ self.addLogObserver('stdio', counter)
+ self.progressMetrics += ('tests',)
+ # FINISH
+</pre>
+ <p>So that's the code that we want to wind up using. How do we actually
+deploy it?
+
+ <p>You have a couple of different options.
+
+ <p>Option 1: The simplest technique is to simply put this text
+(everything from START to FINISH) in your master.cfg file, somewhere
+before the BuildFactory definition where you actually use it in a
+clause like:
+
+<pre class="example"> f = BuildFactory()
+ f.addStep(SVN(svnurl="stuff"))
+ f.addStep(Framboozle())
+</pre>
+ <p>Remember that master.cfg is secretly just a python program with one
+job: populating the BuildmasterConfig dictionary. And python programs
+are allowed to define as many classes as they like. So you can define
+classes and use them in the same file, just as long as the class is
+defined before some other code tries to use it.
+
+ <p>This is easy, and it keeps the point of definition very close to the
+point of use, and whoever replaces you after that unfortunate
+hang-gliding accident will appreciate being able to easily figure out
+what the heck this stupid "Framboozle" step is doing anyways. The
+downside is that every time you reload the config file, the Framboozle
+class will get redefined, which means that the buildmaster will think
+that you've reconfigured all the Builders that use it, even though
+nothing changed. Bleh.
+
+ <p>Option 2: Instead, we can put this code in a separate file, and import
+it into the master.cfg file just like we would the normal buildsteps
+like ShellCommand and SVN.
+
+ <p>Create a directory named ~/lib/python, put everything from START to
+FINISH in ~/lib/python/framboozle.py, and run your buildmaster using:
+
+<pre class="example"> PYTHONPATH=~/lib/python buildbot start MASTERDIR
+</pre>
+ <p>or use the <samp><span class="file">Makefile.buildbot</span></samp> to control the way
+<samp><span class="command">buildbot start</span></samp> works. Or add something like this to
+something like your ~/.bashrc or ~/.bash_profile or ~/.cshrc:
+
+<pre class="example"> export PYTHONPATH=~/lib/python
+</pre>
+ <p>Once we've done this, our master.cfg can look like:
+
+<pre class="example"> from framboozle import Framboozle
+ f = BuildFactory()
+ f.addStep(SVN(svnurl="stuff"))
+ f.addStep(Framboozle())
+</pre>
+ <p>or:
+
+<pre class="example"> import framboozle
+ f = BuildFactory()
+ f.addStep(SVN(svnurl="stuff"))
+ f.addStep(framboozle.Framboozle())
+</pre>
+ <p>(check out the python docs for details about how "import" and "from A
+import B" work).
+
+ <p>What we've done here is to tell python that every time it handles an
+"import" statement for some named module, it should look in our
+~/lib/python/ for that module before it looks anywhere else. After our
+directories, it will try in a bunch of standard directories too
+(including the one where buildbot is installed). By setting the
+PYTHONPATH environment variable, you can add directories to the front
+of this search list.
+
+ <p>Python knows that once it "import"s a file, it doesn't need to
+re-import it again. This means that reconfiguring the buildmaster
+(with "buildbot reconfig", for example) won't make it think the
+Framboozle class has changed every time, so the Builders that use it
+will not be spuriously restarted. On the other hand, you either have
+to start your buildmaster in a slightly weird way, or you have to
+modify your environment to set the PYTHONPATH variable.
+
+ <p>Option 3: Install this code into a standard python library directory
+
+ <p>Find out what your python's standard include path is by asking it:
+
+<pre class="example"> 80:warner@luther% python
+ Python 2.4.4c0 (#2, Oct 2 2006, 00:57:46)
+ [GCC 4.1.2 20060928 (prerelease) (Debian 4.1.1-15)] on linux2
+ Type "help", "copyright", "credits" or "license" for more information.
+ &gt;&gt;&gt; import sys
+ &gt;&gt;&gt; import pprint
+ &gt;&gt;&gt; pprint.pprint(sys.path)
+ ['',
+ '/usr/lib/python24.zip',
+ '/usr/lib/python2.4',
+ '/usr/lib/python2.4/plat-linux2',
+ '/usr/lib/python2.4/lib-tk',
+ '/usr/lib/python2.4/lib-dynload',
+ '/usr/local/lib/python2.4/site-packages',
+ '/usr/lib/python2.4/site-packages',
+ '/usr/lib/python2.4/site-packages/Numeric',
+ '/var/lib/python-support/python2.4',
+ '/usr/lib/site-python']
+</pre>
+ <p>In this case, putting the code into
+/usr/local/lib/python2.4/site-packages/framboozle.py would work just
+fine. We can use the same master.cfg "import framboozle" statement as
+in Option 2. By putting it in a standard include directory (instead of
+the decidedly non-standard ~/lib/python), we don't even have to set
+PYTHONPATH to anything special. The downside is that you probably have
+to be root to write to one of those standard include directories.
+
+ <p>Option 4: Submit the code for inclusion in the Buildbot distribution
+
+ <p>Make a fork of buildbot on http://github.com/djmitche/buildbot or post a patch
+in a bug at http://buildbot.net. In either case, post a note about your patch
+to the mailing list, so others can provide feedback and, eventually, commit it.
+
+<pre class="example"> from buildbot.steps import framboozle
+ f = BuildFactory()
+ f.addStep(SVN(svnurl="stuff"))
+ f.addStep(framboozle.Framboozle())
+</pre>
+ <p>And then you don't even have to install framboozle.py anywhere on your
+system, since it will ship with Buildbot. You don't have to be root,
+you don't have to set PYTHONPATH. But you do have to make a good case
+for Framboozle being worth going into the main distribution, you'll
+probably have to provide docs and some unit test cases, you'll need to
+figure out what kind of beer the author likes, and then you'll have to
+wait until the next release. But in some environments, all this is
+easier than getting root on your buildmaster box, so the tradeoffs may
+actually be worth it.
+
+ <p>Putting the code in master.cfg (1) makes it available to that
+buildmaster instance. Putting it in a file in a personal library
+directory (2) makes it available for any buildmasters you might be
+running. Putting it in a file in a system-wide shared library
+directory (3) makes it available for any buildmasters that anyone on
+that system might be running. Getting it into the buildbot's upstream
+repository (4) makes it available for any buildmasters that anyone in
+the world might be running. It's all a matter of how widely you want
+to deploy that new class.
+
+<div class="node">
+<p><hr>
+<a name="BuildStep-URLs"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Adding-LogObservers">Adding LogObservers</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Writing-New-BuildSteps">Writing New BuildSteps</a>
+
+</div>
+
+<h5 class="subsubsection">6.1.10.5 BuildStep URLs</h5>
+
+<p><a name="index-links-87"></a><a name="index-BuildStep-URLs-88"></a><a name="index-addURL-89"></a>
+Each BuildStep has a collection of &ldquo;links&rdquo;. Like its collection of
+LogFiles, each link has a name and a target URL. The web status page
+creates HREFs for each link in the same box as it does for LogFiles,
+except that the target of the link is the external URL instead of an
+internal link to a page that shows the contents of the LogFile.
+
+ <p>These external links can be used to point at build information hosted
+on other servers. For example, the test process might produce an
+intricate description of which tests passed and failed, or some sort
+of code coverage data in HTML form, or a PNG or GIF image with a graph
+of memory usage over time. The external link can provide an easy way
+for users to navigate from the buildbot's status page to these
+external web sites or file servers. Note that the step itself is
+responsible for insuring that there will be a document available at
+the given URL (perhaps by using <samp><span class="command">scp</span></samp> to copy the HTML output
+to a <samp><span class="file">~/public_html/</span></samp> directory on a remote web server). Calling
+<code>addURL</code> does not magically populate a web server.
+
+ <p>To set one of these links, the BuildStep should call the <code>addURL</code>
+method with the name of the link and the target URL. Multiple URLs can
+be set.
+
+ <p>In this example, we assume that the <samp><span class="command">make test</span></samp> command causes
+a collection of HTML files to be created and put somewhere on the
+coverage.example.org web server, in a filename that incorporates the
+build number.
+
+<pre class="example"> class TestWithCodeCoverage(BuildStep):
+ command = ["make", "test",
+ WithProperties("buildnum=%s" % "buildnumber")]
+
+ def createSummary(self, log):
+ buildnumber = self.getProperty("buildnumber")
+ url = "http://coverage.example.org/builds/%s.html" % buildnumber
+ self.addURL("coverage", url)
+</pre>
+ <p>You might also want to extract the URL from some special message
+output by the build process itself:
+
+<pre class="example"> class TestWithCodeCoverage(BuildStep):
+ command = ["make", "test",
+ WithProperties("buildnum=%s" % "buildnumber")]
+
+ def createSummary(self, log):
+ output = StringIO(log.getText())
+ for line in output.readlines():
+ if line.startswith("coverage-url:"):
+ url = line[len("coverage-url:"):].strip()
+ self.addURL("coverage", url)
+ return
+</pre>
+ <p>Note that a build process which emits both stdout and stderr might
+cause this line to be split or interleaved between other lines. It
+might be necessary to restrict the getText() call to only stdout with
+something like this:
+
+<pre class="example"> output = StringIO("".join([c[1]
+ for c in log.getChunks()
+ if c[0] == LOG_CHANNEL_STDOUT]))
+</pre>
+ <p>Of course if the build is run under a PTY, then stdout and stderr will
+be merged before the buildbot ever sees them, so such interleaving
+will be unavoidable.
+
+<div class="node">
+<p><hr>
+<a name="Interlocks"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Build-Factories">Build Factories</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Build-Steps">Build Steps</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Process">Build Process</a>
+
+</div>
+
+<h3 class="section">6.2 Interlocks</h3>
+
+<p><a name="index-locks-90"></a><a name="index-buildbot_002elocks_002eMasterLock-91"></a><a name="index-buildbot_002elocks_002eSlaveLock-92"></a><a name="index-buildbot_002elocks_002eLockAccess-93"></a>
+Until now, we assumed that a master can run builds at any slave whenever
+needed or desired. Some times, you want to enforce additional constraints on
+builds. For reasons like limited network bandwidth, old slave machines, or a
+self-willed data base server, you may want to limit the number of builds (or
+build steps) that can access a resource.
+
+ <p>The mechanism used by Buildbot is known as the read/write lock.<a rel="footnote" href="#fn-10" name="fnd-10"><sup>10</sup></a> It
+allows either many readers or a single writer but not a combination of readers
+and writers. The general lock has been modified and extended for use in
+Buildbot. Firstly, the general lock allows an infinite number of readers. In
+Buildbot, we often want to put an upper limit on the number of readers, for
+example allowing two out of five possible builds at the same time. To do this,
+the lock counts the number of active readers. Secondly, the terms <em>read
+mode</em> and <em>write mode</em> are confusing in Buildbot context. They have been
+replaced by <em>counting mode</em> (since the lock counts them) and <em>exclusive
+mode</em>. As a result of these changes, locks in Buildbot allow a number of
+builds (upto some fixed number) in counting mode, or they allow one build in
+exclusive mode.
+
+ <p>Often, not all slaves are equal. To allow for this situation, Buildbot allows
+to have a separate upper limit on the count for each slave. In this way, you
+can have at most 3 concurrent builds at a fast slave, 2 at a slightly older
+slave, and 1 at all other slaves.
+
+ <p>The final thing you can specify when you introduce a new lock is its scope.
+Some constraints are global &ndash; they must be enforced over all slaves. Other
+constraints are local to each slave. A <em>master lock</em> is used for the
+global constraints. You can ensure for example that at most one build (of all
+builds running at all slaves) accesses the data base server. With a
+<em>slave lock</em> you can add a limit local to each slave. With such a lock,
+you can for example enforce an upper limit to the number of active builds at a
+slave, like above.
+
+ <p>Time for a few examples. Below a master lock is defined to protect a data base,
+and a slave lock is created to limit the number of builds at each slave.
+
+<pre class="example"> from buildbot import locks
+
+ db_lock = locks.MasterLock("database")
+ build_lock = locks.SlaveLock("slave_builds",
+ maxCount = 1,
+ maxCountForSlave = { 'fast': 3, 'new': 2 })
+</pre>
+ <p>After importing locks from buildbot, <code>db_lock</code> is defined to be a master
+lock. The <code>"database"</code> string is used for uniquely identifying the lock.
+At the next line, a slave lock called <code>build_lock</code> is created. It is
+identified by the <code>"slave_builds"</code> string. Since the requirements of the
+lock are a bit more complicated, two optional arguments are also specified. The
+<code>maxCount</code> parameter sets the default limit for builds in counting mode to
+<code>1</code>. For the slave called <code>'fast'</code> however, we want to have at most
+three builds, and for the slave called <code>'new'</code> the upper limit is two
+builds running at the same time.
+
+ <p>The next step is using the locks in builds. Buildbot allows a lock to be used
+during an entire build (from beginning to end), or only during a single build
+step. In the latter case, the lock is claimed for use just before the step
+starts, and released again when the step ends. To prevent
+deadlocks,<a rel="footnote" href="#fn-11" name="fnd-11"><sup>11</sup></a> it is not possible to claim or release
+locks at other times.
+
+ <p>To use locks, you should add them with a <code>locks</code> argument.
+Each use of a lock is either in counting mode (that is, possibly shared with
+other builds) or in exclusive mode. A build or build step proceeds only when it
+has acquired all locks. If a build or step needs a lot of locks, it may be
+starved<a rel="footnote" href="#fn-12" name="fnd-12"><sup>12</sup></a> by other builds that need fewer locks.
+
+ <p>To illustrate use of locks, a few examples.
+
+<pre class="example"> from buildbot import locks
+ from buildbot.steps import source, shell
+ from buildbot.process import factory
+
+ db_lock = locks.MasterLock("database")
+ build_lock = locks.SlaveLock("slave_builds",
+ maxCount = 1,
+ maxCountForSlave = { 'fast': 3, 'new': 2 })
+
+ f = factory.BuildFactory()
+ f.addStep(source.SVN(svnurl="http://example.org/svn/Trunk"))
+ f.addStep(shell.ShellCommand(command="make all"))
+ f.addStep(shell.ShellCommand(command="make test",
+ locks=[db_lock.access('exclusive')]))
+
+ b1 = {'name': 'full1', 'slavename': 'fast', 'builddir': 'f1', 'factory': f,
+ 'locks': [build_lock.access('counting')] }
+
+ b2 = {'name': 'full2', 'slavename': 'new', 'builddir': 'f2', 'factory': f.
+ 'locks': [build_lock.access('counting')] }
+
+ b3 = {'name': 'full3', 'slavename': 'old', 'builddir': 'f3', 'factory': f.
+ 'locks': [build_lock.access('counting')] }
+
+ b4 = {'name': 'full4', 'slavename': 'other', 'builddir': 'f4', 'factory': f.
+ 'locks': [build_lock.access('counting')] }
+
+ c['builders'] = [b1, b2, b3, b4]
+</pre>
+ <p>Here we have four slaves <code>b1</code>, <code>b2</code>, <code>b3</code>, and <code>b4</code>. Each
+slave performs the same checkout, make, and test build step sequence.
+We want to enforce that at most one test step is executed between all slaves due
+to restrictions with the data base server. This is done by adding the
+<code>locks=</code> parameter with the third step. It takes a list of locks with their
+access mode. In this case only the <code>db_lock</code> is needed. The exclusive
+access mode is used to ensure there is at most one slave that executes the test
+step.
+
+ <p>In addition to exclusive accessing the data base, we also want slaves to stay
+responsive even under the load of a large number of builds being triggered.
+For this purpose, the slave lock called <code>build_lock</code> is defined. Since
+the restraint holds for entire builds, the lock is specified in the builder
+with <code>'locks': [build_lock.access('counting')]</code>.
+<div class="node">
+<p><hr>
+<a name="Build-Factories"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Interlocks">Interlocks</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Process">Build Process</a>
+
+</div>
+
+<h3 class="section">6.3 Build Factories</h3>
+
+<p>Each Builder is equipped with a &ldquo;build factory&rdquo;, which is
+responsible for producing the actual <code>Build</code> objects that perform
+each build. This factory is created in the configuration file, and
+attached to a Builder through the <code>factory</code> element of its
+dictionary.
+
+ <p>The standard <code>BuildFactory</code> object creates <code>Build</code> objects
+by default. These Builds will each execute a collection of BuildSteps
+in a fixed sequence. Each step can affect the results of the build,
+but in general there is little intelligence to tie the different steps
+together. You can create subclasses of <code>Build</code> to implement more
+sophisticated build processes, and then use a subclass of
+<code>BuildFactory</code> (or simply set the <code>buildClass</code> attribute) to
+create instances of your new Build subclass.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildStep-Objects">BuildStep Objects</a>
+<li><a accesskey="2" href="#BuildFactory">BuildFactory</a>
+<li><a accesskey="3" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildStep-Objects"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#BuildFactory">BuildFactory</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Build-Factories">Build Factories</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Factories">Build Factories</a>
+
+</div>
+
+<h4 class="subsection">6.3.1 BuildStep Objects</h4>
+
+<p>The steps used by these builds are all subclasses of <code>BuildStep</code>.
+The standard ones provided with Buildbot are documented later,
+See <a href="#Build-Steps">Build Steps</a>. You can also write your own subclasses to use in
+builds.
+
+ <p>The basic behavior for a <code>BuildStep</code> is to:
+
+ <ul>
+<li>run for a while, then stop
+<li>possibly invoke some RemoteCommands on the attached build slave
+<li>possibly produce a set of log files
+<li>finish with a status described by one of four values defined in
+buildbot.status.builder: SUCCESS, WARNINGS, FAILURE, SKIPPED
+<li>provide a list of short strings to describe the step
+<li>define a color (generally green, orange, or red) with which the
+step should be displayed
+</ul>
+
+ <p>More sophisticated steps may produce additional information and
+provide it to later build steps, or store it in the factory to provide
+to later builds.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildFactory-Attributes">BuildFactory Attributes</a>
+<li><a accesskey="2" href="#Quick-builds">Quick builds</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildFactory"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildStep-Objects">BuildStep Objects</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Factories">Build Factories</a>
+
+</div>
+
+<h4 class="subsection">6.3.2 BuildFactory</h4>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eBuildFactory-94"></a><a name="index-buildbot_002eprocess_002efactory_002eBasicBuildFactory-95"></a><!-- TODO: what is BasicSVN anyway? -->
+<a name="index-buildbot_002eprocess_002efactory_002eBasicSVN-96"></a>
+The default <code>BuildFactory</code>, provided in the
+<code>buildbot.process.factory</code> module, contains an internal list of
+&ldquo;BuildStep specifications&rdquo;: a list of <code>(step_class, kwargs)</code>
+tuples for each. These specification tuples are constructed when the
+config file is read, by asking the instances passed to <code>addStep</code>
+for their subclass and arguments.
+
+ <p>When asked to create a Build, the <code>BuildFactory</code> puts a copy of
+the list of step specifications into the new Build object. When the
+Build is actually started, these step specifications are used to
+create the actual set of BuildSteps, which are then executed one at a
+time. This serves to give each Build an independent copy of each step.
+For example, a build which consists of a CVS checkout followed by a
+<code>make build</code> would be constructed as follows:
+
+<pre class="example"> from buildbot.steps import source, shell
+ from buildbot.process import factory
+
+ f = factory.BuildFactory()
+ f.addStep(source.CVS(cvsroot=CVSROOT, cvsmodule="project", mode="update"))
+ f.addStep(shell.Compile(command=["make", "build"]))
+</pre>
+ <p>(To support config files from buildbot-0.7.5 and earlier,
+<code>addStep</code> also accepts the <code>f.addStep(shell.Compile,
+command=["make","build"])</code> form, although its use is discouraged
+because then the <code>Compile</code> step doesn't get to validate or
+complain about its arguments until build time. The modern
+pass-by-instance approach allows this validation to occur while the
+config file is being loaded, where the admin has a better chance of
+noticing problems).
+
+ <p>It is also possible to pass a list of steps into the
+<code>BuildFactory</code> when it is created. Using <code>addStep</code> is
+usually simpler, but there are cases where is is more convenient to
+create the list of steps ahead of time.:
+
+<pre class="example"> from buildbot.steps import source, shell
+ from buildbot.process import factory
+
+ all_steps = [source.CVS(cvsroot=CVSROOT, cvsmodule="project", mode="update"),
+ shell.Compile(command=["make", "build"]),
+ ]
+ f = factory.BuildFactory(all_steps)
+</pre>
+ <p>Each step can affect the build process in the following ways:
+
+ <ul>
+<li>If the step's <code>haltOnFailure</code> attribute is True, then a failure
+in the step (i.e. if it completes with a result of FAILURE) will cause
+the whole build to be terminated immediately: no further steps will be
+executed, with the exception of steps with <code>alwaysRun</code> set to
+True. <code>haltOnFailure</code> is useful for setup steps upon which the
+rest of the build depends: if the CVS checkout or <code>./configure</code>
+process fails, there is no point in trying to compile or test the
+resulting tree.
+
+ <li>If the step's <code>alwaysRun</code> attribute is True, then it will always
+be run, regardless of if previous steps have failed. This is useful
+for cleanup steps that should always be run to return the build
+directory or build slave into a good state.
+
+ <li>If the <code>flunkOnFailure</code> or <code>flunkOnWarnings</code> flag is set,
+then a result of FAILURE or WARNINGS will mark the build as a whole as
+FAILED. However, the remaining steps will still be executed. This is
+appropriate for things like multiple testing steps: a failure in any
+one of them will indicate that the build has failed, however it is
+still useful to run them all to completion.
+
+ <li>Similarly, if the <code>warnOnFailure</code> or <code>warnOnWarnings</code> flag
+is set, then a result of FAILURE or WARNINGS will mark the build as
+having WARNINGS, and the remaining steps will still be executed. This
+may be appropriate for certain kinds of optional build or test steps.
+For example, a failure experienced while building documentation files
+should be made visible with a WARNINGS result but not be serious
+enough to warrant marking the whole build with a FAILURE.
+
+ </ul>
+
+ <p>In addition, each Step produces its own results, may create logfiles,
+etc. However only the flags described above have any effect on the
+build as a whole.
+
+ <p>The pre-defined BuildSteps like <code>CVS</code> and <code>Compile</code> have
+reasonably appropriate flags set on them already. For example, without
+a source tree there is no point in continuing the build, so the
+<code>CVS</code> class has the <code>haltOnFailure</code> flag set to True. Look
+in <samp><span class="file">buildbot/steps/*.py</span></samp> to see how the other Steps are
+marked.
+
+ <p>Each Step is created with an additional <code>workdir</code> argument that
+indicates where its actions should take place. This is specified as a
+subdirectory of the slave builder's base directory, with a default
+value of <code>build</code>. This is only implemented as a step argument (as
+opposed to simply being a part of the base directory) because the
+CVS/SVN steps need to perform their checkouts from the parent
+directory.
+
+<ul class="menu">
+<li><a accesskey="1" href="#BuildFactory-Attributes">BuildFactory Attributes</a>
+<li><a accesskey="2" href="#Quick-builds">Quick builds</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="BuildFactory-Attributes"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Quick-builds">Quick builds</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildFactory">BuildFactory</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#BuildFactory">BuildFactory</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.2.1 BuildFactory Attributes</h5>
+
+<p>Some attributes from the BuildFactory are copied into each Build.
+
+ <p><a name="index-treeStableTimer-97"></a>
+ <dl>
+<dt><code>useProgress</code><dd>(defaults to True): if True, the buildmaster keeps track of how long
+each step takes, so it can provide estimates of how long future builds
+will take. If builds are not expected to take a consistent amount of
+time (such as incremental builds in which a random set of files are
+recompiled or tested each time), this should be set to False to
+inhibit progress-tracking.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Quick-builds"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildFactory-Attributes">BuildFactory Attributes</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#BuildFactory">BuildFactory</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.2.2 Quick builds</h5>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eQuickBuildFactory-98"></a>
+The difference between a &ldquo;full build&rdquo; and a &ldquo;quick build&rdquo; is that
+quick builds are generally done incrementally, starting with the tree
+where the previous build was performed. That simply means that the
+source-checkout step should be given a <code>mode='update'</code> flag, to
+do the source update in-place.
+
+ <p>In addition to that, the <code>useProgress</code> flag should be set to
+False. Incremental builds will (or at least the ought to) compile as
+few files as necessary, so they will take an unpredictable amount of
+time to run. Therefore it would be misleading to claim to predict how
+long the build will take.
+
+<div class="node">
+<p><hr>
+<a name="Process-Specific-build-factories"></a>
+<a name="Process_002dSpecific-build-factories"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#BuildFactory">BuildFactory</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Build-Factories">Build Factories</a>
+
+</div>
+
+<h4 class="subsection">6.3.3 Process-Specific build factories</h4>
+
+<p>Many projects use one of a few popular build frameworks to simplify
+the creation and maintenance of Makefiles or other compilation
+structures. Buildbot provides several pre-configured BuildFactory
+subclasses which let you build these projects with a minimum of fuss.
+
+<ul class="menu">
+<li><a accesskey="1" href="#GNUAutoconf">GNUAutoconf</a>
+<li><a accesskey="2" href="#CPAN">CPAN</a>
+<li><a accesskey="3" href="#Python-distutils">Python distutils</a>
+<li><a accesskey="4" href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="GNUAutoconf"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#CPAN">CPAN</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.3.1 GNUAutoconf</h5>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eGNUAutoconf-99"></a>
+<a href="http://www.gnu.org/software/autoconf/">GNU Autoconf</a> is a
+software portability tool, intended to make it possible to write
+programs in C (and other languages) which will run on a variety of
+UNIX-like systems. Most GNU software is built using autoconf. It is
+frequently used in combination with GNU automake. These tools both
+encourage a build process which usually looks like this:
+
+<pre class="example"> % CONFIG_ENV=foo ./configure --with-flags
+ % make all
+ % make check
+ # make install
+</pre>
+ <p>(except of course the Buildbot always skips the <code>make install</code>
+part).
+
+ <p>The Buildbot's <code>buildbot.process.factory.GNUAutoconf</code> factory is
+designed to build projects which use GNU autoconf and/or automake. The
+configuration environment variables, the configure flags, and command
+lines used for the compile and test are all configurable, in general
+the default values will be suitable.
+
+ <p>Example:
+
+<pre class="example"> # use the s() convenience function defined earlier
+ f = factory.GNUAutoconf(source=s(step.SVN, svnurl=URL, mode="copy"),
+ flags=["--disable-nls"])
+</pre>
+ <p>Required Arguments:
+
+ <dl>
+<dt><code>source</code><dd>This argument must be a step specification tuple that provides a
+BuildStep to generate the source tree.
+</dl>
+
+ <p>Optional Arguments:
+
+ <dl>
+<dt><code>configure</code><dd>The command used to configure the tree. Defaults to
+<code>./configure</code>. Accepts either a string or a list of shell argv
+elements.
+
+ <br><dt><code>configureEnv</code><dd>The environment used for the initial configuration step. This accepts
+a dictionary which will be merged into the buildslave's normal
+environment. This is commonly used to provide things like
+<code>CFLAGS="-O2 -g"</code> (to turn off debug symbols during the compile).
+Defaults to an empty dictionary.
+
+ <br><dt><code>configureFlags</code><dd>A list of flags to be appended to the argument list of the configure
+command. This is commonly used to enable or disable specific features
+of the autoconf-controlled package, like <code>["--without-x"]</code> to
+disable windowing support. Defaults to an empty list.
+
+ <br><dt><code>compile</code><dd>this is a shell command or list of argv values which is used to
+actually compile the tree. It defaults to <code>make all</code>. If set to
+None, the compile step is skipped.
+
+ <br><dt><code>test</code><dd>this is a shell command or list of argv values which is used to run
+the tree's self-tests. It defaults to <code>make check</code>. If set to
+None, the test step is skipped.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="CPAN"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Python-distutils">Python distutils</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#GNUAutoconf">GNUAutoconf</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.3.2 CPAN</h5>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eCPAN-100"></a>
+Most Perl modules available from the <a href="http://www.cpan.org/">CPAN</a>
+archive use the <code>MakeMaker</code> module to provide configuration,
+build, and test services. The standard build routine for these modules
+looks like:
+
+<pre class="example"> % perl Makefile.PL
+ % make
+ % make test
+ # make install
+</pre>
+ <p>(except again Buildbot skips the install step)
+
+ <p>Buildbot provides a <code>CPAN</code> factory to compile and test these
+projects.
+
+ <p>Arguments:
+ <dl>
+<dt><code>source</code><dd>(required): A step specification tuple, like that used by GNUAutoconf.
+
+ <br><dt><code>perl</code><dd>A string which specifies the <code>perl</code> executable to use. Defaults
+to just <code>perl</code>.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Python-distutils"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#CPAN">CPAN</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.3.3 Python distutils</h5>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eDistutils-101"></a>
+Most Python modules use the <code>distutils</code> package to provide
+configuration and build services. The standard build process looks
+like:
+
+<pre class="example"> % python ./setup.py build
+ % python ./setup.py install
+</pre>
+ <p>Unfortunately, although Python provides a standard unit-test framework
+named <code>unittest</code>, to the best of my knowledge <code>distutils</code>
+does not provide a standardized target to run such unit tests. (Please
+let me know if I'm wrong, and I will update this factory.)
+
+ <p>The <code>Distutils</code> factory provides support for running the build
+part of this process. It accepts the same <code>source=</code> parameter as
+the other build factories.
+
+ <p>Arguments:
+ <dl>
+<dt><code>source</code><dd>(required): A step specification tuple, like that used by GNUAutoconf.
+
+ <br><dt><code>python</code><dd>A string which specifies the <code>python</code> executable to use. Defaults
+to just <code>python</code>.
+
+ <br><dt><code>test</code><dd>Provides a shell command which runs unit tests. This accepts either a
+string or a list. The default value is None, which disables the test
+step (since there is no common default command to run unit tests in
+distutils modules).
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Python%2fTwisted%2ftrial-projects"></a>
+<a name="Python_002fTwisted_002ftrial-projects"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Python-distutils">Python distutils</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Process_002dSpecific-build-factories">Process-Specific build factories</a>
+
+</div>
+
+<h5 class="subsubsection">6.3.3.4 Python/Twisted/trial projects</h5>
+
+<p><a name="index-buildbot_002eprocess_002efactory_002eTrial-102"></a><!-- TODO: document these steps better -->
+<a name="index-buildbot_002esteps_002epython_005ftwisted_002eHLint-103"></a><a name="index-buildbot_002esteps_002epython_005ftwisted_002eTrial-104"></a><a name="index-buildbot_002esteps_002epython_005ftwisted_002eProcessDocs-105"></a><a name="index-buildbot_002esteps_002epython_005ftwisted_002eBuildDebs-106"></a><a name="index-buildbot_002esteps_002epython_005ftwisted_002eRemovePYCs-107"></a>
+Twisted provides a unit test tool named <code>trial</code> which provides a
+few improvements over Python's built-in <code>unittest</code> module. Many
+python projects which use Twisted for their networking or application
+services also use trial for their unit tests. These modules are
+usually built and tested with something like the following:
+
+<pre class="example"> % python ./setup.py build
+ % PYTHONPATH=build/lib.linux-i686-2.3 trial -v PROJECTNAME.test
+ % python ./setup.py install
+</pre>
+ <p>Unfortunately, the <samp><span class="file">build/lib</span></samp> directory into which the
+built/copied .py files are placed is actually architecture-dependent,
+and I do not yet know of a simple way to calculate its value. For many
+projects it is sufficient to import their libraries &ldquo;in place&rdquo; from
+the tree's base directory (<code>PYTHONPATH=.</code>).
+
+ <p>In addition, the <var>PROJECTNAME</var> value where the test files are
+located is project-dependent: it is usually just the project's
+top-level library directory, as common practice suggests the unit test
+files are put in the <code>test</code> sub-module. This value cannot be
+guessed, the <code>Trial</code> class must be told where to find the test
+files.
+
+ <p>The <code>Trial</code> class provides support for building and testing
+projects which use distutils and trial. If the test module name is
+specified, trial will be invoked. The library path used for testing
+can also be set.
+
+ <p>One advantage of trial is that the Buildbot happens to know how to
+parse trial output, letting it identify which tests passed and which
+ones failed. The Buildbot can then provide fine-grained reports about
+how many tests have failed, when individual tests fail when they had
+been passing previously, etc.
+
+ <p>Another feature of trial is that you can give it a series of source
+.py files, and it will search them for special <code>test-case-name</code>
+tags that indicate which test cases provide coverage for that file.
+Trial can then run just the appropriate tests. This is useful for
+quick builds, where you want to only run the test cases that cover the
+changed functionality.
+
+ <p>Arguments:
+ <dl>
+<dt><code>source</code><dd>(required): A step specification tuple, like that used by GNUAutoconf.
+
+ <br><dt><code>buildpython</code><dd>A list (argv array) of strings which specifies the <code>python</code>
+executable to use when building the package. Defaults to just
+<code>['python']</code>. It may be useful to add flags here, to supress
+warnings during compilation of extension modules. This list is
+extended with <code>['./setup.py', 'build']</code> and then executed in a
+ShellCommand.
+
+ <br><dt><code>testpath</code><dd>Provides a directory to add to <code>PYTHONPATH</code> when running the unit
+tests, if tests are being run. Defaults to <code>.</code> to include the
+project files in-place. The generated build library is frequently
+architecture-dependent, but may simply be <samp><span class="file">build/lib</span></samp> for
+pure-python modules.
+
+ <br><dt><code>trialpython</code><dd>Another list of strings used to build the command that actually runs
+trial. This is prepended to the contents of the <code>trial</code> argument
+below. It may be useful to add <code>-W</code> flags here to supress
+warnings that occur while tests are being run. Defaults to an empty
+list, meaning <code>trial</code> will be run without an explicit
+interpreter, which is generally what you want if you're using
+<samp><span class="file">/usr/bin/trial</span></samp> instead of, say, the <samp><span class="file">./bin/trial</span></samp> that
+lives in the Twisted source tree.
+
+ <br><dt><code>trial</code><dd>provides the name of the <code>trial</code> command. It is occasionally
+useful to use an alternate executable, such as <code>trial2.2</code> which
+might run the tests under an older version of Python. Defaults to
+<code>trial</code>.
+
+ <br><dt><code>tests</code><dd>Provides a module name or names which contain the unit tests for this
+project. Accepts a string, typically <code>PROJECTNAME.test</code>, or a
+list of strings. Defaults to None, indicating that no tests should be
+run. You must either set this or <code>useTestCaseNames</code> to do anyting
+useful with the Trial factory.
+
+ <br><dt><code>useTestCaseNames</code><dd>Tells the Step to provide the names of all changed .py files to trial,
+so it can look for test-case-name tags and run just the matching test
+cases. Suitable for use in quick builds. Defaults to False.
+
+ <br><dt><code>randomly</code><dd>If <code>True</code>, tells Trial (with the <code>--random=0</code> argument) to
+run the test cases in random order, which sometimes catches subtle
+inter-test dependency bugs. Defaults to <code>False</code>.
+
+ <br><dt><code>recurse</code><dd>If <code>True</code>, tells Trial (with the <code>--recurse</code> argument) to
+look in all subdirectories for additional test cases. It isn't clear
+to me how this works, but it may be useful to deal with the
+unknown-PROJECTNAME problem described above, and is currently used in
+the Twisted buildbot to accomodate the fact that test cases are now
+distributed through multiple twisted.SUBPROJECT.test directories.
+
+ </dl>
+
+ <p>Unless one of <code>trialModule</code> or <code>useTestCaseNames</code>
+are set, no tests will be run.
+
+ <p>Some quick examples follow. Most of these examples assume that the
+target python code (the &ldquo;code under test&rdquo;) can be reached directly
+from the root of the target tree, rather than being in a <samp><span class="file">lib/</span></samp>
+subdirectory.
+
+<pre class="example"> # Trial(source, tests="toplevel.test") does:
+ # python ./setup.py build
+ # PYTHONPATH=. trial -to toplevel.test
+
+ # Trial(source, tests=["toplevel.test", "other.test"]) does:
+ # python ./setup.py build
+ # PYTHONPATH=. trial -to toplevel.test other.test
+
+ # Trial(source, useTestCaseNames=True) does:
+ # python ./setup.py build
+ # PYTHONPATH=. trial -to --testmodule=foo/bar.py.. (from Changes)
+
+ # Trial(source, buildpython=["python2.3", "-Wall"], tests="foo.tests"):
+ # python2.3 -Wall ./setup.py build
+ # PYTHONPATH=. trial -to foo.tests
+
+ # Trial(source, trialpython="python2.3", trial="/usr/bin/trial",
+ # tests="foo.tests") does:
+ # python2.3 -Wall ./setup.py build
+ # PYTHONPATH=. python2.3 /usr/bin/trial -to foo.tests
+
+ # For running trial out of the tree being tested (only useful when the
+ # tree being built is Twisted itself):
+ # Trial(source, trialpython=["python2.3", "-Wall"], trial="./bin/trial",
+ # tests="foo.tests") does:
+ # python2.3 -Wall ./setup.py build
+ # PYTHONPATH=. python2.3 -Wall ./bin/trial -to foo.tests
+</pre>
+ <p>If the output directory of <code>./setup.py build</code> is known, you can
+pull the python code from the built location instead of the source
+directories. This should be able to handle variations in where the
+source comes from, as well as accomodating binary extension modules:
+
+<pre class="example"> # Trial(source,tests="toplevel.test",testpath='build/lib.linux-i686-2.3')
+ # does:
+ # python ./setup.py build
+ # PYTHONPATH=build/lib.linux-i686-2.3 trial -to toplevel.test
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Status-Delivery"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Command_002dline-tool">Command-line tool</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Build-Process">Build Process</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">7 Status Delivery</h2>
+
+<p>More details are available in the docstrings for each class, use a
+command like <code>pydoc buildbot.status.html.WebStatus</code> to see them.
+Most status delivery objects take a <code>categories=</code> argument, which
+can contain a list of &ldquo;category&rdquo; names: in this case, it will only
+show status for Builders that are in one of the named categories.
+
+ <p>(implementor's note: each of these objects should be a
+service.MultiService which will be attached to the BuildMaster object
+when the configuration is processed. They should use
+<code>self.parent.getStatus()</code> to get access to the top-level IStatus
+object, either inside <code>startService</code> or later. They may call
+<code>status.subscribe()</code> in <code>startService</code> to receive
+notifications of builder events, in which case they must define
+<code>builderAdded</code> and related methods. See the docstrings in
+<samp><span class="file">buildbot/interfaces.py</span></samp> for full details.)
+
+<ul class="menu">
+<li><a accesskey="1" href="#WebStatus">WebStatus</a>
+<li><a accesskey="2" href="#MailNotifier">MailNotifier</a>
+<li><a accesskey="3" href="#IRC-Bot">IRC Bot</a>
+<li><a accesskey="4" href="#PBListener">PBListener</a>
+<li><a accesskey="5" href="#Writing-New-Status-Plugins">Writing New Status Plugins</a>
+</ul>
+
+<!-- @node Email Delivery, , Status Delivery, Status Delivery -->
+<!-- @subsection Email Delivery -->
+<!-- DOCUMENT THIS -->
+<div class="node">
+<p><hr>
+<a name="WebStatus"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#MailNotifier">MailNotifier</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Status-Delivery">Status Delivery</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Status-Delivery">Status Delivery</a>
+
+</div>
+
+<h3 class="section">7.1 WebStatus</h3>
+
+<p><a name="index-WebStatus-108"></a><a name="index-buildbot_002estatus_002eweb_002ebaseweb_002eWebStatus-109"></a>
+The <code>buildbot.status.html.WebStatus</code> status target runs a small
+web server inside the buildmaster. You can point a browser at this web
+server and retrieve information about every build the buildbot knows
+about, as well as find out what the buildbot is currently working on.
+
+ <p>The first page you will see is the &ldquo;Welcome Page&rdquo;, which contains
+links to all the other useful pages. This page is simply served from
+the <samp><span class="file">public_html/index.html</span></samp> file in the buildmaster's base
+directory, where it is created by the <samp><span class="command">buildbot create-master</span></samp>
+command along with the rest of the buildmaster.
+
+ <p>The most complex resource provided by <code>WebStatus</code> is the
+&ldquo;Waterfall Display&rdquo;, which shows a time-based chart of events. This
+somewhat-busy display provides detailed information about all steps of
+all recent builds, and provides hyperlinks to look at individual build
+logs and source changes. By simply reloading this page on a regular
+basis, you will see a complete description of everything the buildbot
+is currently working on.
+
+ <p>There are also pages with more specialized information. For example,
+there is a page which shows the last 20 builds performed by the
+buildbot, one line each. Each line is a link to detailed information
+about that build. By adding query arguments to the URL used to reach
+this page, you can narrow the display to builds that involved certain
+branches, or which ran on certain Builders. These pages are described
+in great detail below.
+
+ <p>When the buildmaster is created, a subdirectory named
+<samp><span class="file">public_html/</span></samp> is created in its base directory. By default, <code>WebStatus</code>
+will serve files from this directory: for example, when a user points
+their browser at the buildbot's <code>WebStatus</code> URL, they will see
+the contents of the <samp><span class="file">public_html/index.html</span></samp> file. Likewise,
+<samp><span class="file">public_html/robots.txt</span></samp>, <samp><span class="file">public_html/buildbot.css</span></samp>, and
+<samp><span class="file">public_html/favicon.ico</span></samp> are all useful things to have in there.
+The first time a buildmaster is created, the <samp><span class="file">public_html</span></samp>
+directory is populated with some sample files, which you will probably
+want to customize for your own project. These files are all static:
+the buildbot does not modify them in any way as it serves them to HTTP
+clients.
+
+<pre class="example"> from buildbot.status.html import WebStatus
+ c['status'].append(WebStatus(8080))
+</pre>
+ <p>Note that the initial robots.txt file has Disallow lines for all of
+the dynamically-generated buildbot pages, to discourage web spiders
+and search engines from consuming a lot of CPU time as they crawl
+through the entire history of your buildbot. If you are running the
+buildbot behind a reverse proxy, you'll probably need to put the
+robots.txt file somewhere else (at the top level of the parent web
+server), and replace the URL prefixes in it with more suitable values.
+
+ <p>If you would like to use an alternative root directory, add the
+<code>public_html=..</code> option to the <code>WebStatus</code> creation:
+
+<pre class="example"> c['status'].append(WebStatus(8080, public_html="/var/www/buildbot"))
+</pre>
+ <p>In addition, if you are familiar with twisted.web <em>Resource
+Trees</em>, you can write code to add additional pages at places inside
+this web space. Just use <code>webstatus.putChild</code> to place these
+resources.
+
+ <p>The following section describes the special URLs and the status views
+they provide.
+
+<ul class="menu">
+<li><a accesskey="1" href="#WebStatus-Configuration-Parameters">WebStatus Configuration Parameters</a>
+<li><a accesskey="2" href="#Buildbot-Web-Resources">Buildbot Web Resources</a>
+<li><a accesskey="3" href="#XMLRPC-server">XMLRPC server</a>
+<li><a accesskey="4" href="#HTML-Waterfall">HTML Waterfall</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="WebStatus-Configuration-Parameters"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Buildbot-Web-Resources">Buildbot Web Resources</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#WebStatus">WebStatus</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#WebStatus">WebStatus</a>
+
+</div>
+
+<h4 class="subsection">7.1.1 WebStatus Configuration Parameters</h4>
+
+<p>The most common way to run a <code>WebStatus</code> is on a regular TCP
+port. To do this, just pass in the TCP port number when you create the
+<code>WebStatus</code> instance; this is called the <code>http_port</code> argument:
+
+<pre class="example"> from buildbot.status.html import WebStatus
+ c['status'].append(WebStatus(8080))
+</pre>
+ <p>The <code>http_port</code> argument is actually a &ldquo;strports specification&rdquo;
+for the port that the web server should listen on. This can be a
+simple port number, or a string like
+<code>tcp:8080:interface=127.0.0.1</code> (to limit connections to the
+loopback interface, and therefore to clients running on the same
+host)<a rel="footnote" href="#fn-13" name="fnd-13"><sup>13</sup></a>.
+
+ <p>If instead (or in addition) you provide the <code>distrib_port</code>
+argument, a twisted.web distributed server will be started either on a
+TCP port (if <code>distrib_port</code> is like <code>"tcp:12345"</code>) or more
+likely on a UNIX socket (if <code>distrib_port</code> is like
+<code>"unix:/path/to/socket"</code>).
+
+ <p>The <code>distrib_port</code> option means that, on a host with a
+suitably-configured twisted-web server, you do not need to consume a
+separate TCP port for the buildmaster's status web page. When the web
+server is constructed with <code>mktap web --user</code>, URLs that point to
+<code>http://host/~username/</code> are dispatched to a sub-server that is
+listening on a UNIX socket at <code>~username/.twisted-web-pb</code>. On
+such a system, it is convenient to create a dedicated <code>buildbot</code>
+user, then set <code>distrib_port</code> to
+<code>"unix:"+os.path.expanduser("~/.twistd-web-pb")</code>. This
+configuration will make the HTML status page available at
+<code>http://host/~buildbot/</code> . Suitable URL remapping can make it
+appear at <code>http://host/buildbot/</code>, and the right virtual host
+setup can even place it at <code>http://buildbot.host/</code> .
+
+ <p>The other <code>WebStatus</code> argument is <code>allowForce</code>. If set to
+True, then the web page will provide a &ldquo;Force Build&rdquo; button that
+allows visitors to manually trigger builds. This is useful for
+developers to re-run builds that have failed because of intermittent
+problems in the test suite, or because of libraries that were not
+installed at the time of the previous build. You may not wish to allow
+strangers to cause a build to run: in that case, set this to False to
+remove these buttons. The default value is False.
+
+<div class="node">
+<p><hr>
+<a name="Buildbot-Web-Resources"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#XMLRPC-server">XMLRPC server</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#WebStatus-Configuration-Parameters">WebStatus Configuration Parameters</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#WebStatus">WebStatus</a>
+
+</div>
+
+<h4 class="subsection">7.1.2 Buildbot Web Resources</h4>
+
+<p>Certain URLs are &ldquo;magic&rdquo;, and the pages they serve are created by
+code in various classes in the <samp><span class="file">buildbot.status.web</span></samp> package
+instead of being read from disk. The most common way to access these
+pages is for the buildmaster admin to write or modify the
+<samp><span class="file">index.html</span></samp> page to contain links to them. Of course other
+project web pages can contain links to these buildbot pages as well.
+
+ <p>Many pages can be modified by adding query arguments to the URL. For
+example, a page which shows the results of the most recent build
+normally does this for all builders at once. But by appending
+&ldquo;?builder=i386&rdquo; to the end of the URL, the page will show only the
+results for the &ldquo;i386&rdquo; builder. When used in this way, you can add
+multiple &ldquo;builder=&rdquo; arguments to see multiple builders. Remembering
+that URL query arguments are separated <em>from each other</em> with
+ampersands, a URL that ends in &ldquo;?builder=i386&amp;builder=ppc&rdquo; would
+show builds for just those two Builders.
+
+ <p>The <code>branch=</code> query argument can be used on some pages. This
+filters the information displayed by that page down to only the builds
+or changes which involved the given branch. Use <code>branch=trunk</code> to
+reference the trunk: if you aren't intentionally using branches,
+you're probably using trunk. Multiple <code>branch=</code> arguments can be
+used to examine multiple branches at once (so appending
+<code>?branch=foo&amp;branch=bar</code> to the URL will show builds involving
+either branch). No <code>branch=</code> arguments means to show builds and
+changes for all branches.
+
+ <p>Some pages may include the Builder name or the build number in the
+main part of the URL itself. For example, a page that describes Build
+#7 of the &ldquo;i386&rdquo; builder would live at <samp><span class="file">/builders/i386/builds/7</span></samp>.
+
+ <p>The table below lists all of the internal pages and the URLs that can
+be used to access them.
+
+ <p>NOTE: of the pages described here, <code>/slave_status_timeline</code> and
+<code>/last_build</code> have not yet been implemented, and <code>/xmlrpc</code>
+has only a few methods so far. Future releases will improve this.
+
+ <dl>
+<dt><code>/waterfall</code><dd>
+This provides a chronologically-oriented display of the activity of
+all builders. It is the same display used by the Waterfall display.
+
+ <p>By adding one or more &ldquo;builder=&rdquo; query arguments, the Waterfall is
+restricted to only showing information about the given Builders. By
+adding one or more &ldquo;branch=&rdquo; query arguments, the display is
+restricted to showing information about the given branches. In
+addition, adding one or more &ldquo;category=&rdquo; query arguments to the URL
+will limit the display to Builders that were defined with one of the
+given categories.
+
+ <p>A 'show_events=true' query argument causes the display to include
+non-Build events, like slaves attaching and detaching, as well as
+reconfiguration events. 'show_events=false' hides these events. The
+default is to show them.
+
+ <p>The <code>last_time=</code>, <code>first_time=</code>, and <code>show_time=</code>
+arguments will control what interval of time is displayed. The default
+is to show the latest events, but these can be used to look at earlier
+periods in history. The <code>num_events=</code> argument also provides a
+limit on the size of the displayed page.
+
+ <p>The Waterfall has references to resources many of the other portions
+of the URL space: <samp><span class="file">/builders</span></samp> for access to individual builds,
+<samp><span class="file">/changes</span></samp> for access to information about source code changes,
+etc.
+
+ <br><dt><code>/rss</code><dd>
+This provides a rss feed summarizing all failed builds. The same
+query-arguments used by 'waterfall' can be added to filter the
+feed output.
+
+ <br><dt><code>/atom</code><dd>
+This provides an atom feed summarizing all failed builds. The same
+query-arguments used by 'waterfall' can be added to filter the feed
+output.
+
+ <br><dt><code>/builders/$BUILDERNAME</code><dd>
+This describes the given Builder, and provides buttons to force a build.
+
+ <br><dt><code>/builders/$BUILDERNAME/builds/$BUILDNUM</code><dd>
+This describes a specific Build.
+
+ <br><dt><code>/builders/$BUILDERNAME/builds/$BUILDNUM/steps/$STEPNAME</code><dd>
+This describes a specific BuildStep.
+
+ <br><dt><code>/builders/$BUILDERNAME/builds/$BUILDNUM/steps/$STEPNAME/logs/$LOGNAME</code><dd>
+This provides an HTML representation of a specific logfile.
+
+ <br><dt><code>/builders/$BUILDERNAME/builds/$BUILDNUM/steps/$STEPNAME/logs/$LOGNAME/text</code><dd>
+This returns the logfile as plain text, without any HTML coloring
+markup. It also removes the &ldquo;headers&rdquo;, which are the lines that
+describe what command was run and what the environment variable
+settings were like. This maybe be useful for saving to disk and
+feeding to tools like 'grep'.
+
+ <br><dt><code>/changes</code><dd>
+This provides a brief description of the ChangeSource in use
+(see <a href="#Change-Sources">Change Sources</a>).
+
+ <br><dt><code>/changes/NN</code><dd>
+This shows detailed information about the numbered Change: who was the
+author, what files were changed, what revision number was represented,
+etc.
+
+ <br><dt><code>/buildslaves</code><dd>
+This summarizes each BuildSlave, including which Builders are
+configured to use it, whether the buildslave is currently connected or
+not, and host information retrieved from the buildslave itself.
+
+ <br><dt><code>/one_line_per_build</code><dd>
+This page shows one line of text for each build, merging information
+from all Builders<a rel="footnote" href="#fn-14" name="fnd-14"><sup>14</sup></a>. Each line specifies
+the name of the Builder, the number of the Build, what revision it
+used, and a summary of the results. Successful builds are in green,
+while failing builds are in red. The date and time of the build are
+added to the right-hand edge of the line. The lines are ordered by
+build finish timestamp.
+
+ <p>One or more <code>builder=</code> or <code>branch=</code> arguments can be used to
+restrict the list. In addition, a <code>numbuilds=</code> argument will
+control how many lines are displayed (20 by default).
+
+ <br><dt><code>/one_box_per_builder</code><dd>
+This page shows a small table, with one box for each Builder,
+containing the results of the most recent Build. It does not show the
+individual steps, or the current status. This is a simple summary of
+buildbot status: if this page is green, then all tests are passing.
+
+ <p>As with <code>/one_line_per_build</code>, this page will also honor
+<code>builder=</code> and <code>branch=</code> arguments.
+
+ <br><dt><code>/about</code><dd>
+This page gives a brief summary of the Buildbot itself: software
+version, versions of some libraries that the Buildbot depends upon,
+etc. It also contains a link to the buildbot.net home page.
+
+ <br><dt><code>/slave_status_timeline</code><dd>
+(note: this page has not yet been implemented)
+
+ <p>This provides a chronological display of configuration and operational
+events: master startup/shutdown, slave connect/disconnect, and
+config-file changes. When a config-file reload is abandoned because of
+an error in the config file, the error is displayed on this page.
+
+ <p>This page does not show any builds.
+
+ <br><dt><code>/last_build/$BUILDERNAME/status.png</code><dd>
+This returns a PNG image that describes the results of the most recent
+build, which can be referenced in an IMG tag by other pages, perhaps
+from a completely different site. Use it as you would a webcounter.
+
+ </dl>
+
+ <p>There are also a set of web-status resources that are intended for use
+by other programs, rather than humans.
+
+ <dl>
+<dt><code>/xmlrpc</code><dd>
+This runs an XML-RPC server which can be used to query status
+information about various builds. See <a href="#XMLRPC-server">XMLRPC server</a> for more
+details.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="XMLRPC-server"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#HTML-Waterfall">HTML Waterfall</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Buildbot-Web-Resources">Buildbot Web Resources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#WebStatus">WebStatus</a>
+
+</div>
+
+<h4 class="subsection">7.1.3 XMLRPC server</h4>
+
+<p>When using WebStatus, the buildbot runs an XML-RPC server at
+<samp><span class="file">/xmlrpc</span></samp> that can be used by other programs to query build
+status. The following table lists the methods that can be invoked
+using this interface.
+
+ <dl>
+<dt><code>getAllBuildsInInterval(start, stop)</code><dd>
+Return a list of builds that have completed after the 'start'
+timestamp and before the 'stop' timestamp. This looks at all Builders.
+
+ <p>The timestamps are integers, interpreted as standard unix timestamps
+(seconds since epoch).
+
+ <p>Each Build is returned as a tuple in the form: <code>(buildername,
+buildnumber, build_end, branchname, revision, results, text)</code>
+
+ <p>The buildnumber is an integer. 'build_end' is an integer (seconds
+since epoch) specifying when the build finished.
+
+ <p>The branchname is a string, which may be an empty string to indicate
+None (i.e. the default branch). The revision is a string whose meaning
+is specific to the VC system in use, and comes from the 'got_revision'
+build property. The results are expressed as a string, one of
+('success', 'warnings', 'failure', 'exception'). The text is a list of
+short strings that ought to be joined by spaces and include slightly
+more data about the results of the build.
+
+ <br><dt><code>getBuild(builder_name, build_number)</code><dd>
+Return information about a specific build.
+
+ <p>This returns a dictionary (aka &ldquo;struct&rdquo; in XMLRPC terms) with
+complete information about the build. It does not include the contents
+of the log files, but it has just about everything else.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="HTML-Waterfall"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#XMLRPC-server">XMLRPC server</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#WebStatus">WebStatus</a>
+
+</div>
+
+<h4 class="subsection">7.1.4 HTML Waterfall</h4>
+
+<p><a name="index-Waterfall-110"></a><a name="index-buildbot_002estatus_002ehtml_002eWaterfall-111"></a>
+The <code>Waterfall</code> status target, deprecated as of 0.7.6, is a
+subset of the regular <code>WebStatus</code> resource (see <a href="#WebStatus">WebStatus</a>).
+This section (and the <code>Waterfall</code> class itself) will be removed
+from a future release.
+
+<pre class="example"> from buildbot.status import html
+ w = html.WebStatus(http_port=8080)
+ c['status'].append(w)
+</pre>
+ <div class="node">
+<p><hr>
+<a name="MailNotifier"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#IRC-Bot">IRC Bot</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#WebStatus">WebStatus</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Status-Delivery">Status Delivery</a>
+
+</div>
+
+<h3 class="section">7.2 MailNotifier</h3>
+
+<p><a name="index-email-112"></a><a name="index-mail-113"></a><a name="index-buildbot_002estatus_002email_002eMailNotifier-114"></a>
+The buildbot can also send email when builds finish. The most common
+use of this is to tell developers when their change has caused the
+build to fail. It is also quite common to send a message to a mailing
+list (usually named &ldquo;builds&rdquo; or similar) about every build.
+
+ <p>The <code>MailNotifier</code> status target is used to accomplish this. You
+configure it by specifying who mail should be sent to, under what
+circumstances mail should be sent, and how to deliver the mail. It can
+be configured to only send out mail for certain builders, and only
+send messages when the build fails, or when the builder transitions
+from success to failure. It can also be configured to include various
+build logs in each message.
+
+ <p>By default, the message will be sent to the Interested Users list
+(see <a href="#Doing-Things-With-Users">Doing Things With Users</a>), which includes all developers who
+made changes in the build. You can add additional recipients with the
+extraRecipients argument.
+
+ <p>Each MailNotifier sends mail to a single set of recipients. To send
+different kinds of mail to different recipients, use multiple
+MailNotifiers.
+
+ <p>The following simple example will send an email upon the completion of
+each build, to just those developers whose Changes were included in
+the build. The email contains a description of the Build, its results,
+and URLs where more information can be obtained.
+
+<pre class="example"> from buildbot.status.mail import MailNotifier
+ mn = MailNotifier(fromaddr="buildbot@example.org", lookup="example.org")
+ c['status'].append(mn)
+</pre>
+ <p>To get a simple one-message-per-build (say, for a mailing list), use
+the following form instead. This form does not send mail to individual
+developers (and thus does not need the <code>lookup=</code> argument,
+explained below), instead it only ever sends mail to the &ldquo;extra
+recipients&rdquo; named in the arguments:
+
+<pre class="example"> mn = MailNotifier(fromaddr="buildbot@example.org",
+ sendToInterestedUsers=False,
+ extraRecipients=['listaddr@example.org'])
+</pre>
+ <p>In some cases it is desirable to have different information then what
+is provided in a standard MailNotifier message. For this purpose
+MailNotifier provides the argument customMesg (a function) which allows
+for the creation of messages with unique content.
+
+ <p>For example it can be useful to display the last few lines of a log file
+and recent changes when a builder fails:
+
+<pre class="example"> def message(attrs):
+ logLines = 10
+ text = list()
+ text.append("STATUS: %s" % attrs['result'].title())
+ text.append("")
+ text.extend([c.asText() for c in attrs['changes']])
+ text.append("")
+ name, url, lines = attrs['logs'][-1]
+ text.append("Last %d lines of '%s':" % (logLines, name))
+ text.extend(["\t%s\n" % line for line in lines[len(lines)-logLines:]])
+ text.append("")
+ text.append("-buildbot")
+ return ("\n".join(text), 'plain')
+
+ mn = MailNotifier(fromaddr="buildbot@example.org",
+ sendToInterestedUsers=False,
+ mode='problem',
+ extraRecipients=['listaddr@example.org'],
+ customMesg=message)
+</pre>
+ <p>A customMesg function takes a single dict argument (see below) and returns a
+tuple of strings. The first string is the complete text of the message and the
+second is the message type ('plain' or 'html'). The 'html' type should be used
+when generating an HTML message:
+
+<pre class="example"> def message(attrs):
+ logLines = 10
+ text = list()
+ text.append('&lt;h4&gt;Build status %s.&lt;/h4&gt;' % (attrs['result'].title()))
+ if attrs['changes']:
+ text.append('&lt;h4&gt;Recent Changes:&lt;/h4&gt;')
+ text.extend([c.asHTML() for c in attrs['changes']])
+ name, url, lines = attrs['logs'][-1]
+ text.append('&lt;h4&gt;Last %d lines of "%s":&lt;/h4&gt;' % (logLines, name))
+ text.append('&lt;p&gt;')
+ text.append('&lt;br&gt;'.join([line for line in lines[len(lines)-logLines:]]))
+ text.append('&lt;/p&gt;')
+ text.append('&lt;br&gt;&lt;br&gt;')
+ text.append('Full log at: %s' % url)
+ text.append('&lt;br&gt;&lt;br&gt;')
+ text.append('&lt;b&gt;-buildbot&lt;/b&gt;')
+ return ('\n'.join(text), 'html')
+</pre>
+ <h3 class="heading">MailNotifier arguments</h3>
+
+ <dl>
+<dt><code>fromaddr</code><dd>The email address to be used in the 'From' header.
+
+ <br><dt><code>sendToInterestedUsers</code><dd>(boolean). If True (the default), send mail to all of the Interested
+Users. If False, only send mail to the extraRecipients list.
+
+ <br><dt><code>extraRecipients</code><dd>(tuple of strings). A list of email addresses to which messages should
+be sent (in addition to the InterestedUsers list, which includes any
+developers who made Changes that went into this build). It is a good
+idea to create a small mailing list and deliver to that, then let
+subscribers come and go as they please.
+
+ <br><dt><code>subject</code><dd>(string). A string to be used as the subject line of the message.
+<code>%(builder)s</code> will be replaced with the name of the builder which
+provoked the message.
+
+ <br><dt><code>mode</code><dd>(string). Default to 'all'. One of:
+ <dl>
+<dt><code>all</code><dd>Send mail about all builds, bothpassing and failing
+<br><dt><code>failing</code><dd>Only send mail about builds which fail
+<br><dt><code>problem</code><dd>Only send mail about a build which failed when the previous build has passed.
+If your builds usually pass, then this will only send mail when a problem
+occurs.
+</dl>
+
+ <br><dt><code>builders</code><dd>(list of strings). A list of builder names for which mail should be
+sent. Defaults to None (send mail for all builds). Use either builders
+or categories, but not both.
+
+ <br><dt><code>categories</code><dd>(list of strings). A list of category names to serve status
+information for. Defaults to None (all categories). Use either
+builders or categories, but not both.
+
+ <br><dt><code>addLogs</code><dd>(boolean). If True, include all build logs as attachments to the
+messages. These can be quite large. This can also be set to a list of
+log names, to send a subset of the logs. Defaults to False.
+
+ <br><dt><code>relayhost</code><dd>(string). The host to which the outbound SMTP connection should be
+made. Defaults to 'localhost'
+
+ <br><dt><code>lookup</code><dd>(implementor of <code>IEmailLookup</code>). Object which provides
+IEmailLookup, which is responsible for mapping User names (which come
+from the VC system) into valid email addresses. If not provided, the
+notifier will only be able to send mail to the addresses in the
+extraRecipients list. Most of the time you can use a simple Domain
+instance. As a shortcut, you can pass as string: this will be treated
+as if you had provided Domain(str). For example,
+lookup='twistedmatrix.com' will allow mail to be sent to all
+developers whose SVN usernames match their twistedmatrix.com account
+names. See buildbot/status/mail.py for more details.
+
+ <br><dt><code>customMesg</code><dd>This is a optional function that can be used to generate a custom mail
+message. The customMesg function takes a single dict and must return a
+tuple containing the message text and type ('html' or 'plain'). Below is a list
+of availale keys in the dict passed to customMesg:
+
+ <dl>
+<dt><code>builderName</code><dd>(str) Name of the builder that generated this event.
+<br><dt><code>projectName</code><dd>(str) Name of the project.
+<br><dt><code>mode</code><dd>(str) Mode set in MailNotifier. (failing, passing, problem).
+<br><dt><code>result</code><dd>(str) Builder result as a string. 'success', 'warnings', 'failure', 'skipped', or 'exception'
+<br><dt><code>buildURL</code><dd>(str) URL to build page.
+<br><dt><code>buildbotURL</code><dd>(str) URL to buildbot main page.
+<br><dt><code>buildText</code><dd>(str) Build text from build.getText().
+<br><dt><code>slavename</code><dd>(str) Slavename.
+<br><dt><code>reason</code><dd>(str) Build reason from build.getReason().
+<br><dt><code>responsibleUsers</code><dd>(List of str) List of responsible users.
+<br><dt><code>branch</code><dd>(str) Name of branch used. If no SourceStamp exists branch
+is an empty string.
+<br><dt><code>revision</code><dd>(str) Name of revision used. If no SourceStamp exists revision
+is an empty string.
+<br><dt><code>patch</code><dd>(str) Name of patch used. If no SourceStamp exists patch
+is an empty string.
+<br><dt><code>changes</code><dd>(list of objs) List of change objects from SourceStamp. A change
+object has the following useful information:
+ <dl>
+<dt><code>who</code><dd>(str) who made this change
+<br><dt><code>revision</code><dd>(str) what VC revision is this change
+<br><dt><code>branch</code><dd>(str) on what branch did this change occur
+<br><dt><code>when</code><dd>(str) when did this change occur
+<br><dt><code>files</code><dd>(list of str) what files were affected in this change
+<br><dt><code>comments</code><dd>(str) comments reguarding the change.
+</dl>
+ The functions asText and asHTML return a list of strings with
+the above information formatted.
+<br><dt><code>logs</code><dd>(List of Tuples) List of tuples where each tuple contains the log name, log url,
+and log contents as a list of strings.
+</dl>
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="IRC-Bot"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#PBListener">PBListener</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#MailNotifier">MailNotifier</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Status-Delivery">Status Delivery</a>
+
+</div>
+
+<h3 class="section">7.3 IRC Bot</h3>
+
+<p><a name="index-IRC-115"></a><a name="index-buildbot_002estatus_002ewords_002eIRC-116"></a>
+
+ <p>The <code>buildbot.status.words.IRC</code> status target creates an IRC bot
+which will attach to certain channels and be available for status
+queries. It can also be asked to announce builds as they occur, or be
+told to shut up.
+
+<pre class="example"> from buildbot.status import words
+ irc = words.IRC("irc.example.org", "botnickname",
+ channels=["channel1", "channel2"],
+ password="mysecretpassword",
+ notify_events={
+ 'exception': 1,
+ 'successToFailure': 1,
+ 'failureToSuccess': 1,
+ })
+ c['status'].append(irc)
+</pre>
+ <p>Take a look at the docstring for <code>words.IRC</code> for more details on
+configuring this service. The <code>password</code> argument, if provided,
+will be sent to Nickserv to claim the nickname: some IRC servers will
+not allow clients to send private messages until they have logged in
+with a password.
+
+ <p>To use the service, you address messages at the buildbot, either
+normally (<code>botnickname: status</code>) or with private messages
+(<code>/msg botnickname status</code>). The buildbot will respond in kind.
+
+ <p>Some of the commands currently available:
+
+ <dl>
+<dt><code>list builders</code><dd>Emit a list of all configured builders
+<br><dt><code>status BUILDER</code><dd>Announce the status of a specific Builder: what it is doing right now.
+<br><dt><code>status all</code><dd>Announce the status of all Builders
+<br><dt><code>watch BUILDER</code><dd>If the given Builder is currently running, wait until the Build is
+finished and then announce the results.
+<br><dt><code>last BUILDER</code><dd>Return the results of the last build to run on the given Builder.
+<br><dt><code>join CHANNEL</code><dd>Join the given IRC channel
+<br><dt><code>leave CHANNEL</code><dd>Leave the given IRC channel
+<br><dt><code>notify on|off|list EVENT</code><dd>Report events relating to builds. If the command is issued as a
+private message, then the report will be sent back as a private
+message to the user who issued the command. Otherwise, the report
+will be sent to the channel. Available events to be notified are:
+
+ <dl>
+<dt><code>started</code><dd>A build has started
+<br><dt><code>finished</code><dd>A build has finished
+<br><dt><code>success</code><dd>A build finished successfully
+<br><dt><code>failed</code><dd>A build failed
+<br><dt><code>exception</code><dd>A build generated and exception
+<br><dt><code>xToY</code><dd>The previous build was x, but this one is Y, where x and Y are each
+one of success, warnings, failure, exception (except Y is
+capitalized). For example: successToFailure will notify if the
+previous build was successful, but this one failed
+</dl>
+
+ <br><dt><code>help COMMAND</code><dd>Describe a command. Use <code>help commands</code> to get a list of known
+commands.
+<br><dt><code>source</code><dd>Announce the URL of the Buildbot's home page.
+<br><dt><code>version</code><dd>Announce the version of this Buildbot.
+</dl>
+
+ <p>Additionally, the config file may specify default notification options
+as shown in the example earlier.
+
+ <p>If the <code>allowForce=True</code> option was used, some addtional commands
+will be available:
+
+ <dl>
+<dt><code>force build BUILDER REASON</code><dd>Tell the given Builder to start a build of the latest code. The user
+requesting the build and REASON are recorded in the Build status. The
+buildbot will announce the build's status when it finishes.
+
+ <br><dt><code>stop build BUILDER REASON</code><dd>Terminate any running build in the given Builder. REASON will be added
+to the build status to explain why it was stopped. You might use this
+if you committed a bug, corrected it right away, and don't want to
+wait for the first build (which is destined to fail) to complete
+before starting the second (hopefully fixed) build.
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="PBListener"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Writing-New-Status-Plugins">Writing New Status Plugins</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#IRC-Bot">IRC Bot</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Status-Delivery">Status Delivery</a>
+
+</div>
+
+<h3 class="section">7.4 PBListener</h3>
+
+<p><a name="index-PBListener-117"></a><a name="index-buildbot_002estatus_002eclient_002ePBListener-118"></a>
+
+<pre class="example"> import buildbot.status.client
+ pbl = buildbot.status.client.PBListener(port=int, user=str,
+ passwd=str)
+ c['status'].append(pbl)
+</pre>
+ <p>This sets up a PB listener on the given TCP port, to which a PB-based
+status client can connect and retrieve status information.
+<code>buildbot statusgui</code> (see <a href="#statusgui">statusgui</a>) is an example of such a
+status client. The <code>port</code> argument can also be a strports
+specification string.
+
+<div class="node">
+<p><hr>
+<a name="Writing-New-Status-Plugins"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#PBListener">PBListener</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Status-Delivery">Status Delivery</a>
+
+</div>
+
+<h3 class="section">7.5 Writing New Status Plugins</h3>
+
+<p>TODO: this needs a lot more examples
+
+ <p>Each status plugin is an object which provides the
+<code>twisted.application.service.IService</code> interface, which creates a
+tree of Services with the buildmaster at the top [not strictly true].
+The status plugins are all children of an object which implements
+<code>buildbot.interfaces.IStatus</code>, the main status object. From this
+object, the plugin can retrieve anything it wants about current and
+past builds. It can also subscribe to hear about new and upcoming
+builds.
+
+ <p>Status plugins which only react to human queries (like the Waterfall
+display) never need to subscribe to anything: they are idle until
+someone asks a question, then wake up and extract the information they
+need to answer it, then they go back to sleep. Plugins which need to
+act spontaneously when builds complete (like the MailNotifier plugin)
+need to subscribe to hear about new builds.
+
+ <p>If the status plugin needs to run network services (like the HTTP
+server used by the Waterfall plugin), they can be attached as Service
+children of the plugin itself, using the <code>IServiceCollection</code>
+interface.
+
+<div class="node">
+<p><hr>
+<a name="Command-line-tool"></a>
+<a name="Command_002dline-tool"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Resources">Resources</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Status-Delivery">Status Delivery</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">8 Command-line tool</h2>
+
+<p>The <samp><span class="command">buildbot</span></samp> command-line tool can be used to start or stop a
+buildmaster or buildbot, and to interact with a running buildmaster.
+Some of its subcommands are intended for buildmaster admins, while
+some are for developers who are editing the code that the buildbot is
+monitoring.
+
+<ul class="menu">
+<li><a accesskey="1" href="#Administrator-Tools">Administrator Tools</a>
+<li><a accesskey="2" href="#Developer-Tools">Developer Tools</a>
+<li><a accesskey="3" href="#Other-Tools">Other Tools</a>
+<li><a accesskey="4" href="#g_t_002ebuildbot-config-directory">.buildbot config directory</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="Administrator-Tools"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Developer-Tools">Developer Tools</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Command_002dline-tool">Command-line tool</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Command_002dline-tool">Command-line tool</a>
+
+</div>
+
+<h3 class="section">8.1 Administrator Tools</h3>
+
+<p>The following <samp><span class="command">buildbot</span></samp> sub-commands are intended for
+buildmaster administrators:
+
+<h3 class="heading">create-master</h3>
+
+<p>This creates a new directory and populates it with files that allow it
+to be used as a buildmaster's base directory.
+
+<pre class="example"> buildbot create-master BASEDIR
+</pre>
+ <h3 class="heading">create-slave</h3>
+
+<p>This creates a new directory and populates it with files that let it
+be used as a buildslave's base directory. You must provide several
+arguments, which are used to create the initial <samp><span class="file">buildbot.tac</span></samp>
+file.
+
+<pre class="example"> buildbot create-slave <var>BASEDIR</var> <var>MASTERHOST</var>:<var>PORT</var> <var>SLAVENAME</var> <var>PASSWORD</var>
+</pre>
+ <h3 class="heading">start</h3>
+
+<p>This starts a buildmaster or buildslave which was already created in
+the given base directory. The daemon is launched in the background,
+with events logged to a file named <samp><span class="file">twistd.log</span></samp>.
+
+<pre class="example"> buildbot start BASEDIR
+</pre>
+ <h3 class="heading">stop</h3>
+
+<p>This terminates the daemon (either buildmaster or buildslave) running
+in the given directory.
+
+<pre class="example"> buildbot stop BASEDIR
+</pre>
+ <h3 class="heading">sighup</h3>
+
+<p>This sends a SIGHUP to the buildmaster running in the given directory,
+which causes it to re-read its <samp><span class="file">master.cfg</span></samp> file.
+
+<pre class="example"> buildbot sighup BASEDIR
+</pre>
+ <div class="node">
+<p><hr>
+<a name="Developer-Tools"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Other-Tools">Other Tools</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Administrator-Tools">Administrator Tools</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Command_002dline-tool">Command-line tool</a>
+
+</div>
+
+<h3 class="section">8.2 Developer Tools</h3>
+
+<p>These tools are provided for use by the developers who are working on
+the code that the buildbot is monitoring.
+
+<ul class="menu">
+<li><a accesskey="1" href="#statuslog">statuslog</a>
+<li><a accesskey="2" href="#statusgui">statusgui</a>
+<li><a accesskey="3" href="#try">try</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="statuslog"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#statusgui">statusgui</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Developer-Tools">Developer Tools</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Developer-Tools">Developer Tools</a>
+
+</div>
+
+<h4 class="subsection">8.2.1 statuslog</h4>
+
+<pre class="example"> buildbot statuslog --master <var>MASTERHOST</var>:<var>PORT</var>
+</pre>
+ <p>This command starts a simple text-based status client, one which just
+prints out a new line each time an event occurs on the buildmaster.
+
+ <p>The <samp><span class="option">--master</span></samp> option provides the location of the
+<code>buildbot.status.client.PBListener</code> status port, used to deliver
+build information to realtime status clients. The option is always in
+the form of a string, with hostname and port number separated by a
+colon (<code>HOSTNAME:PORTNUM</code>). Note that this port is <em>not</em> the
+same as the slaveport (although a future version may allow the same
+port number to be used for both purposes). If you get an error message
+to the effect of &ldquo;Failure: twisted.cred.error.UnauthorizedLogin:&rdquo;,
+this may indicate that you are connecting to the slaveport rather than
+a <code>PBListener</code> port.
+
+ <p>The <samp><span class="option">--master</span></samp> option can also be provided by the
+<code>masterstatus</code> name in <samp><span class="file">.buildbot/options</span></samp> (see <a href="#g_t_002ebuildbot-config-directory">.buildbot config directory</a>).
+
+<div class="node">
+<p><hr>
+<a name="statusgui"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#try">try</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#statuslog">statuslog</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Developer-Tools">Developer Tools</a>
+
+</div>
+
+<h4 class="subsection">8.2.2 statusgui</h4>
+
+<p><a name="index-statusgui-119"></a>
+If you have set up a PBListener (see <a href="#PBListener">PBListener</a>), you will be able
+to monitor your Buildbot using a simple Gtk+ application invoked with
+the <code>buildbot statusgui</code> command:
+
+<pre class="example"> buildbot statusgui --master <var>MASTERHOST</var>:<var>PORT</var>
+</pre>
+ <p>This command starts a simple Gtk+-based status client, which contains
+a few boxes for each Builder that change color as events occur. It
+uses the same <samp><span class="option">--master</span></samp> argument as the <samp><span class="command">buildbot
+statuslog</span></samp> command (see <a href="#statuslog">statuslog</a>).
+
+<div class="node">
+<p><hr>
+<a name="try"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#statusgui">statusgui</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Developer-Tools">Developer Tools</a>
+
+</div>
+
+<h4 class="subsection">8.2.3 try</h4>
+
+<p>This lets a developer to ask the question &ldquo;What would happen if I
+committed this patch right now?&rdquo;. It runs the unit test suite (across
+multiple build platforms) on the developer's current code, allowing
+them to make sure they will not break the tree when they finally
+commit their changes.
+
+ <p>The <samp><span class="command">buildbot try</span></samp> command is meant to be run from within a
+developer's local tree, and starts by figuring out the base revision
+of that tree (what revision was current the last time the tree was
+updated), and a patch that can be applied to that revision of the tree
+to make it match the developer's copy. This (revision, patch) pair is
+then sent to the buildmaster, which runs a build with that
+SourceStamp. If you want, the tool will emit status messages as the
+builds run, and will not terminate until the first failure has been
+detected (or the last success).
+
+ <p>There is an alternate form which accepts a pre-made patch file
+(typically the output of a command like 'svn diff'). This &ldquo;&ndash;diff&rdquo;
+form does not require a local tree to run from. See See <a href="#try-_002d_002ddiff">try &ndash;diff</a>.
+
+ <p>For this command to work, several pieces must be in place:
+
+<h3 class="heading">TryScheduler</h3>
+
+<p><a name="index-buildbot_002escheduler_002eTry_005fJobdir-120"></a><a name="index-buildbot_002escheduler_002eTry_005fUserpass-121"></a>
+The buildmaster must have a <code>scheduler.Try</code> instance in
+the config file's <code>c['schedulers']</code> list. This lets the
+administrator control who may initiate these &ldquo;trial&rdquo; builds, which
+branches are eligible for trial builds, and which Builders should be
+used for them.
+
+ <p>The <code>TryScheduler</code> has various means to accept build requests:
+all of them enforce more security than the usual buildmaster ports do.
+Any source code being built can be used to compromise the buildslave
+accounts, but in general that code must be checked out from the VC
+repository first, so only people with commit privileges can get
+control of the buildslaves. The usual force-build control channels can
+waste buildslave time but do not allow arbitrary commands to be
+executed by people who don't have those commit privileges. However,
+the source code patch that is provided with the trial build does not
+have to go through the VC system first, so it is important to make
+sure these builds cannot be abused by a non-committer to acquire as
+much control over the buildslaves as a committer has. Ideally, only
+developers who have commit access to the VC repository would be able
+to start trial builds, but unfortunately the buildmaster does not, in
+general, have access to VC system's user list.
+
+ <p>As a result, the <code>TryScheduler</code> requires a bit more
+configuration. There are currently two ways to set this up:
+
+ <dl>
+<dt><strong>jobdir (ssh)</strong><dd>
+This approach creates a command queue directory, called the
+&ldquo;jobdir&rdquo;, in the buildmaster's working directory. The buildmaster
+admin sets the ownership and permissions of this directory to only
+grant write access to the desired set of developers, all of whom must
+have accounts on the machine. The <code>buildbot try</code> command creates
+a special file containing the source stamp information and drops it in
+the jobdir, just like a standard maildir. When the buildmaster notices
+the new file, it unpacks the information inside and starts the builds.
+
+ <p>The config file entries used by 'buildbot try' either specify a local
+queuedir (for which write and mv are used) or a remote one (using scp
+and ssh).
+
+ <p>The advantage of this scheme is that it is quite secure, the
+disadvantage is that it requires fiddling outside the buildmaster
+config (to set the permissions on the jobdir correctly). If the
+buildmaster machine happens to also house the VC repository, then it
+can be fairly easy to keep the VC userlist in sync with the
+trial-build userlist. If they are on different machines, this will be
+much more of a hassle. It may also involve granting developer accounts
+on a machine that would not otherwise require them.
+
+ <p>To implement this, the buildslave invokes 'ssh -l username host
+buildbot tryserver ARGS', passing the patch contents over stdin. The
+arguments must include the inlet directory and the revision
+information.
+
+ <br><dt><strong>user+password (PB)</strong><dd>
+In this approach, each developer gets a username/password pair, which
+are all listed in the buildmaster's configuration file. When the
+developer runs <code>buildbot try</code>, their machine connects to the
+buildmaster via PB and authenticates themselves using that username
+and password, then sends a PB command to start the trial build.
+
+ <p>The advantage of this scheme is that the entire configuration is
+performed inside the buildmaster's config file. The disadvantages are
+that it is less secure (while the &ldquo;cred&rdquo; authentication system does
+not expose the password in plaintext over the wire, it does not offer
+most of the other security properties that SSH does). In addition, the
+buildmaster admin is responsible for maintaining the username/password
+list, adding and deleting entries as developers come and go.
+
+ </dl>
+
+ <p>For example, to set up the &ldquo;jobdir&rdquo; style of trial build, using a
+command queue directory of <samp><span class="file">MASTERDIR/jobdir</span></samp> (and assuming that
+all your project developers were members of the <code>developers</code> unix
+group), you would first create that directory (with <samp><span class="command">mkdir
+MASTERDIR/jobdir MASTERDIR/jobdir/new MASTERDIR/jobdir/cur
+MASTERDIR/jobdir/tmp; chgrp developers MASTERDIR/jobdir
+MASTERDIR/jobdir/*; chmod g+rwx,o-rwx MASTERDIR/jobdir
+MASTERDIR/jobdir/*</span></samp>), and then use the following scheduler in the
+buildmaster's config file:
+
+<pre class="example"> from buildbot.scheduler import Try_Jobdir
+ s = Try_Jobdir("try1", ["full-linux", "full-netbsd", "full-OSX"],
+ jobdir="jobdir")
+ c['schedulers'] = [s]
+</pre>
+ <p>Note that you must create the jobdir before telling the buildmaster to
+use this configuration, otherwise you will get an error. Also remember
+that the buildmaster must be able to read and write to the jobdir as
+well. Be sure to watch the <samp><span class="file">twistd.log</span></samp> file (see <a href="#Logfiles">Logfiles</a>)
+as you start using the jobdir, to make sure the buildmaster is happy
+with it.
+
+ <p>To use the username/password form of authentication, create a
+<code>Try_Userpass</code> instance instead. It takes the same
+<code>builderNames</code> argument as the <code>Try_Jobdir</code> form, but
+accepts an addtional <code>port</code> argument (to specify the TCP port to
+listen on) and a <code>userpass</code> list of username/password pairs to
+accept. Remember to use good passwords for this: the security of the
+buildslave accounts depends upon it:
+
+<pre class="example"> from buildbot.scheduler import Try_Userpass
+ s = Try_Userpass("try2", ["full-linux", "full-netbsd", "full-OSX"],
+ port=8031, userpass=[("alice","pw1"), ("bob", "pw2")] )
+ c['schedulers'] = [s]
+</pre>
+ <p>Like most places in the buildbot, the <code>port</code> argument takes a
+strports specification. See <code>twisted.application.strports</code> for
+details.
+
+<h3 class="heading">locating the master</h3>
+
+<p>The <samp><span class="command">try</span></samp> command needs to be told how to connect to the
+<code>TryScheduler</code>, and must know which of the authentication
+approaches described above is in use by the buildmaster. You specify
+the approach by using <samp><span class="option">--connect=ssh</span></samp> or <samp><span class="option">--connect=pb</span></samp>
+(or <code>try_connect = 'ssh'</code> or <code>try_connect = 'pb'</code> in
+<samp><span class="file">.buildbot/options</span></samp>).
+
+ <p>For the PB approach, the command must be given a <samp><span class="option">--master</span></samp>
+argument (in the form HOST:PORT) that points to TCP port that you
+picked in the <code>Try_Userpass</code> scheduler. It also takes a
+<samp><span class="option">--username</span></samp> and <samp><span class="option">--passwd</span></samp> pair of arguments that match
+one of the entries in the buildmaster's <code>userpass</code> list. These
+arguments can also be provided as <code>try_master</code>,
+<code>try_username</code>, and <code>try_password</code> entries in the
+<samp><span class="file">.buildbot/options</span></samp> file.
+
+ <p>For the SSH approach, the command must be given <samp><span class="option">--tryhost</span></samp>,
+<samp><span class="option">--username</span></samp>, and optionally <samp><span class="option">--password</span></samp> (TODO:
+really?) to get to the buildmaster host. It must also be given
+<samp><span class="option">--trydir</span></samp>, which points to the inlet directory configured
+above. The trydir can be relative to the user's home directory, but
+most of the time you will use an explicit path like
+<samp><span class="file">~buildbot/project/trydir</span></samp>. These arguments can be provided in
+<samp><span class="file">.buildbot/options</span></samp> as <code>try_host</code>, <code>try_username</code>,
+<code>try_password</code>, and <code>try_dir</code>.
+
+ <p>In addition, the SSH approach needs to connect to a PBListener status
+port, so it can retrieve and report the results of the build (the PB
+approach uses the existing connection to retrieve status information,
+so this step is not necessary). This requires a <samp><span class="option">--master</span></samp>
+argument, or a <code>masterstatus</code> entry in <samp><span class="file">.buildbot/options</span></samp>,
+in the form of a HOSTNAME:PORT string.
+
+<h3 class="heading">choosing the Builders</h3>
+
+<p>A trial build is performed on multiple Builders at the same time, and
+the developer gets to choose which Builders are used (limited to a set
+selected by the buildmaster admin with the TryScheduler's
+<code>builderNames=</code> argument). The set you choose will depend upon
+what your goals are: if you are concerned about cross-platform
+compatibility, you should use multiple Builders, one from each
+platform of interest. You might use just one builder if that platform
+has libraries or other facilities that allow better test coverage than
+what you can accomplish on your own machine, or faster test runs.
+
+ <p>The set of Builders to use can be specified with multiple
+<samp><span class="option">--builder</span></samp> arguments on the command line. It can also be
+specified with a single <code>try_builders</code> option in
+<samp><span class="file">.buildbot/options</span></samp> that uses a list of strings to specify all
+the Builder names:
+
+<pre class="example"> try_builders = ["full-OSX", "full-win32", "full-linux"]
+</pre>
+ <h3 class="heading">specifying the VC system</h3>
+
+<p>The <samp><span class="command">try</span></samp> command also needs to know how to take the
+developer's current tree and extract the (revision, patch)
+source-stamp pair. Each VC system uses a different process, so you
+start by telling the <samp><span class="command">try</span></samp> command which VC system you are
+using, with an argument like <samp><span class="option">--vc=cvs</span></samp> or <samp><span class="option">--vc=tla</span></samp>.
+This can also be provided as <code>try_vc</code> in
+<samp><span class="file">.buildbot/options</span></samp>.
+
+ <p>The following names are recognized: <code>cvs</code> <code>svn</code> <code>baz</code>
+<code>tla</code> <code>hg</code> <code>darcs</code>
+
+<h3 class="heading">finding the top of the tree</h3>
+
+<p>Some VC systems (notably CVS and SVN) track each directory
+more-or-less independently, which means the <samp><span class="command">try</span></samp> command
+needs to move up to the top of the project tree before it will be able
+to construct a proper full-tree patch. To accomplish this, the
+<samp><span class="command">try</span></samp> command will crawl up through the parent directories
+until it finds a marker file. The default name for this marker file is
+<samp><span class="file">.buildbot-top</span></samp>, so when you are using CVS or SVN you should
+<code>touch .buildbot-top</code> from the top of your tree before running
+<samp><span class="command">buildbot try</span></samp>. Alternatively, you can use a filename like
+<samp><span class="file">ChangeLog</span></samp> or <samp><span class="file">README</span></samp>, since many projects put one of
+these files in their top-most directory (and nowhere else). To set
+this filename, use <samp><span class="option">--try-topfile=ChangeLog</span></samp>, or set it in the
+options file with <code>try_topfile = 'ChangeLog'</code>.
+
+ <p>You can also manually set the top of the tree with
+<samp><span class="option">--try-topdir=~/trees/mytree</span></samp>, or <code>try_topdir =
+'~/trees/mytree'</code>. If you use <code>try_topdir</code>, in a
+<samp><span class="file">.buildbot/options</span></samp> file, you will need a separate options file
+for each tree you use, so it may be more convenient to use the
+<code>try_topfile</code> approach instead.
+
+ <p>Other VC systems which work on full projects instead of individual
+directories (tla, baz, darcs, monotone, mercurial, git) do not require
+<samp><span class="command">try</span></samp> to know the top directory, so the <samp><span class="option">--try-topfile</span></samp>
+and <samp><span class="option">--try-topdir</span></samp> arguments will be ignored.
+<!-- is this true? I think I currently require topdirs all the time. -->
+
+ <p>If the <samp><span class="command">try</span></samp> command cannot find the top directory, it will
+abort with an error message.
+
+<h3 class="heading">determining the branch name</h3>
+
+<p>Some VC systems record the branch information in a way that &ldquo;try&rdquo;
+can locate it, in particular Arch (both <samp><span class="command">tla</span></samp> and
+<samp><span class="command">baz</span></samp>). For the others, if you are using something other than
+the default branch, you will have to tell the buildbot which branch
+your tree is using. You can do this with either the <samp><span class="option">--branch</span></samp>
+argument, or a <samp><span class="option">try_branch</span></samp> entry in the
+<samp><span class="file">.buildbot/options</span></samp> file.
+
+<h3 class="heading">determining the revision and patch</h3>
+
+<p>Each VC system has a separate approach for determining the tree's base
+revision and computing a patch.
+
+ <dl>
+<dt><code>CVS</code><dd>
+<samp><span class="command">try</span></samp> pretends that the tree is up to date. It converts the
+current time into a <code>-D</code> time specification, uses it as the base
+revision, and computes the diff between the upstream tree as of that
+point in time versus the current contents. This works, more or less,
+but requires that the local clock be in reasonably good sync with the
+repository.
+
+ <br><dt><code>SVN</code><dd><samp><span class="command">try</span></samp> does a <code>svn status -u</code> to find the latest
+repository revision number (emitted on the last line in the &ldquo;Status
+against revision: NN&rdquo; message). It then performs an <code>svn diff
+-rNN</code> to find out how your tree differs from the repository version,
+and sends the resulting patch to the buildmaster. If your tree is not
+up to date, this will result in the &ldquo;try&rdquo; tree being created with
+the latest revision, then <em>backwards</em> patches applied to bring it
+&ldquo;back&rdquo; to the version you actually checked out (plus your actual
+code changes), but this will still result in the correct tree being
+used for the build.
+
+ <br><dt><code>baz</code><dd><samp><span class="command">try</span></samp> does a <code>baz tree-id</code> to determine the
+fully-qualified version and patch identifier for the tree
+(ARCHIVE/VERSION&ndash;patch-NN), and uses the VERSION&ndash;patch-NN component
+as the base revision. It then does a <code>baz diff</code> to obtain the
+patch.
+
+ <br><dt><code>tla</code><dd><samp><span class="command">try</span></samp> does a <code>tla tree-version</code> to get the
+fully-qualified version identifier (ARCHIVE/VERSION), then takes the
+first line of <code>tla logs --reverse</code> to figure out the base
+revision. Then it does <code>tla changes --diffs</code> to obtain the patch.
+
+ <br><dt><code>Darcs</code><dd><code>darcs changes --context</code> emits a text file that contains a list
+of all patches back to and including the last tag was made. This text
+file (plus the location of a repository that contains all these
+patches) is sufficient to re-create the tree. Therefore the contents
+of this &ldquo;context&rdquo; file <em>are</em> the revision stamp for a
+Darcs-controlled source tree.
+
+ <p>So <samp><span class="command">try</span></samp> does a <code>darcs changes --context</code> to determine
+what your tree's base revision is, and then does a <code>darcs diff
+-u</code> to compute the patch relative to that revision.
+
+ <br><dt><code>Mercurial</code><dd><code>hg identify</code> emits a short revision ID (basically a truncated
+SHA1 hash of the current revision's contents), which is used as the
+base revision. <code>hg diff</code> then provides the patch relative to that
+revision. For <samp><span class="command">try</span></samp> to work, your working directory must only
+have patches that are available from the same remotely-available
+repository that the build process' <code>step.Mercurial</code> will use.
+
+ <br><dt><code>Git</code><dd><code>git branch -v</code> lists all the branches available in the local
+repository along with the revision ID it points to and a short summary
+of the last commit. The line containing the currently checked out
+branch begins with '* ' (star and space) while all the others start
+with ' ' (two spaces). <samp><span class="command">try</span></samp> scans for this line and extracts
+the branch name and revision from it. Then it generates a diff against
+the base revision.
+<!-- TODO: I'm not sure if this actually works the way it's intended -->
+<!-- since the extracted base revision might not actually exist in the -->
+<!-- upstream repository. Perhaps we need to add a -remote option to -->
+<!-- specify the remote tracking branch to generate a diff against. -->
+
+ <!-- TODO: monotone -->
+ </dl>
+
+<h3 class="heading">waiting for results</h3>
+
+<p>If you provide the <samp><span class="option">--wait</span></samp> option (or <code>try_wait = True</code>
+in <samp><span class="file">.buildbot/options</span></samp>), the <samp><span class="command">buildbot try</span></samp> command will
+wait until your changes have either been proven good or bad before
+exiting. Unless you use the <samp><span class="option">--quiet</span></samp> option (or
+<code>try_quiet=True</code>), it will emit a progress message every 60
+seconds until the builds have completed.
+
+<ul class="menu">
+<li><a accesskey="1" href="#try-_002d_002ddiff">try --diff</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="try---diff"></a>
+<a name="try-_002d_002ddiff"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#try">try</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#try">try</a>
+
+</div>
+
+<h5 class="subsubsection">8.2.3.1 try &ndash;diff</h5>
+
+<p>Sometimes you might have a patch from someone else that you want to
+submit to the buildbot. For example, a user may have created a patch
+to fix some specific bug and sent it to you by email. You've inspected
+the patch and suspect that it might do the job (and have at least
+confirmed that it doesn't do anything evil). Now you want to test it
+out.
+
+ <p>One approach would be to check out a new local tree, apply the patch,
+run your local tests, then use &ldquo;buildbot try&rdquo; to run the tests on
+other platforms. An alternate approach is to use the <samp><span class="command">buildbot
+try --diff</span></samp> form to have the buildbot test the patch without using a
+local tree.
+
+ <p>This form takes a <samp><span class="option">--diff</span></samp> argument which points to a file that
+contains the patch you want to apply. By default this patch will be
+applied to the TRUNK revision, but if you give the optional
+<samp><span class="option">--baserev</span></samp> argument, a tree of the given revision will be used
+as a starting point instead of TRUNK.
+
+ <p>You can also use <samp><span class="command">buildbot try --diff=-</span></samp> to read the patch
+from stdin.
+
+ <p>Each patch has a &ldquo;patchlevel&rdquo; associated with it. This indicates the
+number of slashes (and preceding pathnames) that should be stripped
+before applying the diff. This exactly corresponds to the <samp><span class="option">-p</span></samp>
+or <samp><span class="option">--strip</span></samp> argument to the <samp><span class="command">patch</span></samp> utility. By
+default <samp><span class="command">buildbot try --diff</span></samp> uses a patchlevel of 0, but you
+can override this with the <samp><span class="option">-p</span></samp> argument.
+
+ <p>When you use <samp><span class="option">--diff</span></samp>, you do not need to use any of the other
+options that relate to a local tree, specifically <samp><span class="option">--vc</span></samp>,
+<samp><span class="option">--try-topfile</span></samp>, or <samp><span class="option">--try-topdir</span></samp>. These options will
+be ignored. Of course you must still specify how to get to the
+buildmaster (with <samp><span class="option">--connect</span></samp>, <samp><span class="option">--tryhost</span></samp>, etc).
+
+<div class="node">
+<p><hr>
+<a name="Other-Tools"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#g_t_002ebuildbot-config-directory">.buildbot config directory</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Developer-Tools">Developer Tools</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Command_002dline-tool">Command-line tool</a>
+
+</div>
+
+<h3 class="section">8.3 Other Tools</h3>
+
+<p>These tools are generally used by buildmaster administrators.
+
+<ul class="menu">
+<li><a accesskey="1" href="#sendchange">sendchange</a>
+<li><a accesskey="2" href="#debugclient">debugclient</a>
+</ul>
+
+<div class="node">
+<p><hr>
+<a name="sendchange"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#debugclient">debugclient</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Other-Tools">Other Tools</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Other-Tools">Other Tools</a>
+
+</div>
+
+<h4 class="subsection">8.3.1 sendchange</h4>
+
+<p>This command is used to tell the buildmaster about source changes. It
+is intended to be used from within a commit script, installed on the
+VC server. It requires that you have a PBChangeSource
+(see <a href="#PBChangeSource">PBChangeSource</a>) running in the buildmaster (by being set in
+<code>c['change_source']</code>).
+
+<pre class="example"> buildbot sendchange --master <var>MASTERHOST</var>:<var>PORT</var> --username <var>USER</var> <var>FILENAMES..</var>
+</pre>
+ <p>There are other (optional) arguments which can influence the
+<code>Change</code> that gets submitted:
+
+ <dl>
+<dt><code>--branch</code><dd>This provides the (string) branch specifier. If omitted, it defaults
+to None, indicating the &ldquo;default branch&rdquo;. All files included in this
+Change must be on the same branch.
+
+ <br><dt><code>--category</code><dd>This provides the (string) category specifier. If omitted, it defaults
+to None, indicating &ldquo;no category&rdquo;. The category property is used
+by Schedulers to filter what changes they listen to.
+
+ <br><dt><code>--revision_number</code><dd>This provides a (numeric) revision number for the change, used for VC systems
+that use numeric transaction numbers (like Subversion).
+
+ <br><dt><code>--revision</code><dd>This provides a (string) revision specifier, for VC systems that use
+strings (Arch would use something like patch-42 etc).
+
+ <br><dt><code>--revision_file</code><dd>This provides a filename which will be opened and the contents used as
+the revision specifier. This is specifically for Darcs, which uses the
+output of <samp><span class="command">darcs changes --context</span></samp> as a revision specifier.
+This context file can be a couple of kilobytes long, spanning a couple
+lines per patch, and would be a hassle to pass as a command-line
+argument.
+
+ <br><dt><code>--comments</code><dd>This provides the change comments as a single argument. You may want
+to use <samp><span class="option">--logfile</span></samp> instead.
+
+ <br><dt><code>--logfile</code><dd>This instructs the tool to read the change comments from the given
+file. If you use <code>-</code> as the filename, the tool will read the
+change comments from stdin.
+</dl>
+
+<div class="node">
+<p><hr>
+<a name="debugclient"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#sendchange">sendchange</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Other-Tools">Other Tools</a>
+
+</div>
+
+<h4 class="subsection">8.3.2 debugclient</h4>
+
+<pre class="example"> buildbot debugclient --master <var>MASTERHOST</var>:<var>PORT</var> --passwd <var>DEBUGPW</var>
+</pre>
+ <p>This launches a small Gtk+/Glade-based debug tool, connecting to the
+buildmaster's &ldquo;debug port&rdquo;. This debug port shares the same port
+number as the slaveport (see <a href="#Setting-the-slaveport">Setting the slaveport</a>), but the
+<code>debugPort</code> is only enabled if you set a debug password in the
+buildmaster's config file (see <a href="#Debug-options">Debug options</a>). The
+<samp><span class="option">--passwd</span></samp> option must match the <code>c['debugPassword']</code>
+value.
+
+ <p><samp><span class="option">--master</span></samp> can also be provided in <samp><span class="file">.debug/options</span></samp> by the
+<code>master</code> key. <samp><span class="option">--passwd</span></samp> can be provided by the
+<code>debugPassword</code> key.
+
+ <p>The <code>Connect</code> button must be pressed before any of the other
+buttons will be active. This establishes the connection to the
+buildmaster. The other sections of the tool are as follows:
+
+ <dl>
+<dt><code>Reload .cfg</code><dd>Forces the buildmaster to reload its <samp><span class="file">master.cfg</span></samp> file. This is
+equivalent to sending a SIGHUP to the buildmaster, but can be done
+remotely through the debug port. Note that it is a good idea to be
+watching the buildmaster's <samp><span class="file">twistd.log</span></samp> as you reload the config
+file, as any errors which are detected in the config file will be
+announced there.
+
+ <br><dt><code>Rebuild .py</code><dd>(not yet implemented). The idea here is to use Twisted's &ldquo;rebuild&rdquo;
+facilities to replace the buildmaster's running code with a new
+version. Even if this worked, it would only be used by buildbot
+developers.
+
+ <br><dt><code>poke IRC</code><dd>This locates a <code>words.IRC</code> status target and causes it to emit a
+message on all the channels to which it is currently connected. This
+was used to debug a problem in which the buildmaster lost the
+connection to the IRC server and did not attempt to reconnect.
+
+ <br><dt><code>Commit</code><dd>This allows you to inject a Change, just as if a real one had been
+delivered by whatever VC hook you are using. You can set the name of
+the committed file and the name of the user who is doing the commit.
+Optionally, you can also set a revision for the change. If the
+revision you provide looks like a number, it will be sent as an
+integer, otherwise it will be sent as a string.
+
+ <br><dt><code>Force Build</code><dd>This lets you force a Builder (selected by name) to start a build of
+the current source tree.
+
+ <br><dt><code>Currently</code><dd>(obsolete). This was used to manually set the status of the given
+Builder, but the status-assignment code was changed in an incompatible
+way and these buttons are no longer meaningful.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name=".buildbot-config-directory"></a>
+<a name="g_t_002ebuildbot-config-directory"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Other-Tools">Other Tools</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Command_002dline-tool">Command-line tool</a>
+
+</div>
+
+<h3 class="section">8.4 .buildbot config directory</h3>
+
+<p>Many of the <samp><span class="command">buildbot</span></samp> tools must be told how to contact the
+buildmaster that they interact with. This specification can be
+provided as a command-line argument, but most of the time it will be
+easier to set them in an &ldquo;options&rdquo; file. The <samp><span class="command">buildbot</span></samp>
+command will look for a special directory named <samp><span class="file">.buildbot</span></samp>,
+starting from the current directory (where the command was run) and
+crawling upwards, eventually looking in the user's home directory. It
+will look for a file named <samp><span class="file">options</span></samp> in this directory, and will
+evaluate it as a python script, looking for certain names to be set.
+You can just put simple <code>name = 'value'</code> pairs in this file to
+set the options.
+
+ <p>For a description of the names used in this file, please see the
+documentation for the individual <samp><span class="command">buildbot</span></samp> sub-commands. The
+following is a brief sample of what this file's contents could be.
+
+<pre class="example"> # for status-reading tools
+ masterstatus = 'buildbot.example.org:12345'
+ # for 'sendchange' or the debug port
+ master = 'buildbot.example.org:18990'
+ debugPassword = 'eiv7Po'
+</pre>
+ <dl>
+<dt><code>masterstatus</code><dd>Location of the <code>client.PBListener</code> status port, used by
+<samp><span class="command">statuslog</span></samp> and <samp><span class="command">statusgui</span></samp>.
+
+ <br><dt><code>master</code><dd>Location of the <code>debugPort</code> (for <samp><span class="command">debugclient</span></samp>). Also the
+location of the <code>pb.PBChangeSource</code> (for <samp><span class="command">sendchange</span></samp>).
+Usually shares the slaveport, but a future version may make it
+possible to have these listen on a separate port number.
+
+ <br><dt><code>debugPassword</code><dd>Must match the value of <code>c['debugPassword']</code>, used to protect the
+debug port, for the <samp><span class="command">debugclient</span></samp> command.
+
+ <br><dt><code>username</code><dd>Provides a default username for the <samp><span class="command">sendchange</span></samp> command.
+
+ </dl>
+
+ <p>The following options are used by the <code>buildbot try</code> command
+(see <a href="#try">try</a>):
+
+ <dl>
+<dt><code>try_connect</code><dd>This specifies how the &ldquo;try&rdquo; command should deliver its request to
+the buildmaster. The currently accepted values are &ldquo;ssh&rdquo; and &ldquo;pb&rdquo;.
+<br><dt><code>try_builders</code><dd>Which builders should be used for the &ldquo;try&rdquo; build.
+<br><dt><code>try_vc</code><dd>This specifies the version control system being used.
+<br><dt><code>try_branch</code><dd>This indicates that the current tree is on a non-trunk branch.
+<br><dt><code>try_topdir</code><br><dt><code>try_topfile</code><dd>Use <code>try_topdir</code> to explicitly indicate the top of your working
+tree, or <code>try_topfile</code> to name a file that will only be found in
+that top-most directory.
+
+ <br><dt><code>try_host</code><br><dt><code>try_username</code><br><dt><code>try_dir</code><dd>When try_connect is &ldquo;ssh&rdquo;, the command will pay attention to
+<code>try_host</code>, <code>try_username</code>, and <code>try_dir</code>.
+
+ <br><dt><code>try_username</code><br><dt><code>try_password</code><br><dt><code>try_master</code><dd>Instead, when <code>try_connect</code> is &ldquo;pb&rdquo;, the command will pay
+attention to <code>try_username</code>, <code>try_password</code>, and
+<code>try_master</code>.
+
+ <br><dt><code>try_wait</code><br><dt><code>masterstatus</code><dd><code>try_wait</code> and <code>masterstatus</code> are used to ask the &ldquo;try&rdquo;
+command to wait for the requested build to complete.
+
+ </dl>
+
+<div class="node">
+<p><hr>
+<a name="Resources"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Developer_0027s-Appendix">Developer's Appendix</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Command_002dline-tool">Command-line tool</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="chapter">9 Resources</h2>
+
+<p>The Buildbot's home page is at <a href="http://buildbot.sourceforge.net/">http://buildbot.sourceforge.net/</a>
+
+ <p>For configuration questions and general discussion, please use the
+<code>buildbot-devel</code> mailing list. The subscription instructions and
+archives are available at
+<a href="http://lists.sourceforge.net/lists/listinfo/buildbot-devel">http://lists.sourceforge.net/lists/listinfo/buildbot-devel</a>
+
+<div class="node">
+<p><hr>
+<a name="Developer's-Appendix"></a>
+<a name="Developer_0027s-Appendix"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Index-of-Useful-Classes">Index of Useful Classes</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Resources">Resources</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="unnumbered">Developer's Appendix</h2>
+
+<p>This appendix contains random notes about the implementation of the
+Buildbot, and is likely to only be of use to people intending to
+extend the Buildbot's internals.
+
+ <p>The buildmaster consists of a tree of Service objects, which is shaped
+as follows:
+
+<pre class="example"> BuildMaster
+ ChangeMaster (in .change_svc)
+ [IChangeSource instances]
+ [IScheduler instances] (in .schedulers)
+ BotMaster (in .botmaster)
+ [IBuildSlave instances]
+ [IStatusTarget instances] (in .statusTargets)
+</pre>
+ <p>The BotMaster has a collection of Builder objects as values of its
+<code>.builders</code> dictionary.
+
+<div class="node">
+<p><hr>
+<a name="Index-of-Useful-Classes"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Index-of-master_002ecfg-keys">Index of master.cfg keys</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Developer_0027s-Appendix">Developer's Appendix</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="unnumbered">Index of Useful Classes</h2>
+
+<p>This is a list of all user-visible classes. There are the ones that
+are useful in <samp><span class="file">master.cfg</span></samp>, the buildmaster's configuration file.
+Classes that are not listed here are generally internal things that
+admins are unlikely to have much use for.
+
+<h3 class="heading">Change Sources</h3>
+
+<ul class="index-cs" compact>
+<li><a href="#index-buildbot_002echanges_002ebonsaipoller_002eBonsaiPoller-49"><code>buildbot.changes.bonsaipoller.BonsaiPoller</code></a>: <a href="#BonsaiPoller">BonsaiPoller</a></li>
+<li><a href="#index-buildbot_002echanges_002efreshcvs_002eFreshCVSSource-42"><code>buildbot.changes.freshcvs.FreshCVSSource</code></a>: <a href="#CVSToys-_002d-PBService">CVSToys - PBService</a></li>
+<li><a href="#index-buildbot_002echanges_002email_002eBonsaiMaildirSource-45"><code>buildbot.changes.mail.BonsaiMaildirSource</code></a>: <a href="#BonsaiMaildirSource">BonsaiMaildirSource</a></li>
+<li><a href="#index-buildbot_002echanges_002email_002eFCMaildirSource-43"><code>buildbot.changes.mail.FCMaildirSource</code></a>: <a href="#FCMaildirSource">FCMaildirSource</a></li>
+<li><a href="#index-buildbot_002echanges_002email_002eSVNCommitEmailMaildirSource-46"><code>buildbot.changes.mail.SVNCommitEmailMaildirSource</code></a>: <a href="#SVNCommitEmailMaildirSource">SVNCommitEmailMaildirSource</a></li>
+<li><a href="#index-buildbot_002echanges_002email_002eSyncmailMaildirSource-44"><code>buildbot.changes.mail.SyncmailMaildirSource</code></a>: <a href="#SyncmailMaildirSource">SyncmailMaildirSource</a></li>
+<li><a href="#index-buildbot_002echanges_002ep4poller_002eP4Source-48"><code>buildbot.changes.p4poller.P4Source</code></a>: <a href="#P4Source">P4Source</a></li>
+<li><a href="#index-buildbot_002echanges_002epb_002ePBChangeSource-47"><code>buildbot.changes.pb.PBChangeSource</code></a>: <a href="#PBChangeSource">PBChangeSource</a></li>
+<li><a href="#index-buildbot_002echanges_002esvnpoller_002eSVNPoller-50"><code>buildbot.changes.svnpoller.SVNPoller</code></a>: <a href="#SVNPoller">SVNPoller</a></li>
+ </ul><h3 class="heading">Schedulers and Locks</h3>
+
+
+
+<ul class="index-sl" compact>
+<li><a href="#index-buildbot_002elocks_002eLockAccess-93"><code>buildbot.locks.LockAccess</code></a>: <a href="#Interlocks">Interlocks</a></li>
+<li><a href="#index-buildbot_002elocks_002eMasterLock-91"><code>buildbot.locks.MasterLock</code></a>: <a href="#Interlocks">Interlocks</a></li>
+<li><a href="#index-buildbot_002elocks_002eSlaveLock-92"><code>buildbot.locks.SlaveLock</code></a>: <a href="#Interlocks">Interlocks</a></li>
+<li><a href="#index-buildbot_002escheduler_002eAnyBranchScheduler-22"><code>buildbot.scheduler.AnyBranchScheduler</code></a>: <a href="#AnyBranchScheduler">AnyBranchScheduler</a></li>
+<li><a href="#index-buildbot_002escheduler_002eDependent-25"><code>buildbot.scheduler.Dependent</code></a>: <a href="#Dependent-Scheduler">Dependent Scheduler</a></li>
+<li><a href="#index-buildbot_002escheduler_002eNightly-27"><code>buildbot.scheduler.Nightly</code></a>: <a href="#Nightly-Scheduler">Nightly Scheduler</a></li>
+<li><a href="#index-buildbot_002escheduler_002ePeriodic-26"><code>buildbot.scheduler.Periodic</code></a>: <a href="#Periodic-Scheduler">Periodic Scheduler</a></li>
+<li><a href="#index-buildbot_002escheduler_002eScheduler-21"><code>buildbot.scheduler.Scheduler</code></a>: <a href="#Scheduler-Scheduler">Scheduler Scheduler</a></li>
+<li><a href="#index-buildbot_002escheduler_002eTriggerable-31"><code>buildbot.scheduler.Triggerable</code></a>: <a href="#Triggerable-Scheduler">Triggerable Scheduler</a></li>
+<li><a href="#index-buildbot_002escheduler_002eTry_005fJobdir-120"><code>buildbot.scheduler.Try_Jobdir</code></a>: <a href="#try">try</a></li>
+<li><a href="#index-buildbot_002escheduler_002eTry_005fJobdir-28"><code>buildbot.scheduler.Try_Jobdir</code></a>: <a href="#Try-Schedulers">Try Schedulers</a></li>
+<li><a href="#index-buildbot_002escheduler_002eTry_005fUserpass-121"><code>buildbot.scheduler.Try_Userpass</code></a>: <a href="#try">try</a></li>
+<li><a href="#index-buildbot_002escheduler_002eTry_005fUserpass-29"><code>buildbot.scheduler.Try_Userpass</code></a>: <a href="#Try-Schedulers">Try Schedulers</a></li>
+ </ul><h3 class="heading">Build Factories</h3>
+
+
+
+<ul class="index-bf" compact>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eBasicBuildFactory-95"><code>buildbot.process.factory.BasicBuildFactory</code></a>: <a href="#BuildFactory">BuildFactory</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eBasicSVN-96"><code>buildbot.process.factory.BasicSVN</code></a>: <a href="#BuildFactory">BuildFactory</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eBuildFactory-94"><code>buildbot.process.factory.BuildFactory</code></a>: <a href="#BuildFactory">BuildFactory</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eCPAN-100"><code>buildbot.process.factory.CPAN</code></a>: <a href="#CPAN">CPAN</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eDistutils-101"><code>buildbot.process.factory.Distutils</code></a>: <a href="#Python-distutils">Python distutils</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eGNUAutoconf-99"><code>buildbot.process.factory.GNUAutoconf</code></a>: <a href="#GNUAutoconf">GNUAutoconf</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eQuickBuildFactory-98"><code>buildbot.process.factory.QuickBuildFactory</code></a>: <a href="#Quick-builds">Quick builds</a></li>
+<li><a href="#index-buildbot_002eprocess_002efactory_002eTrial-102"><code>buildbot.process.factory.Trial</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+ </ul><h3 class="heading">Build Steps</h3>
+
+
+
+<ul class="index-bs" compact>
+<li><a href="#index-buildbot_002esteps_002emaxq_002eMaxQ-123"><code>buildbot.steps.maxq.MaxQ</code></a>: <a href="#Index-of-Useful-Classes">Index of Useful Classes</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_002eBuildEPYDoc-78"><code>buildbot.steps.python.BuildEPYDoc</code></a>: <a href="#BuildEPYDoc">BuildEPYDoc</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_002ePyFlakes-79"><code>buildbot.steps.python.PyFlakes</code></a>: <a href="#PyFlakes">PyFlakes</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_002ePyLint-80"><code>buildbot.steps.python.PyLint</code></a>: <a href="#PyLint">PyLint</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_005ftwisted_002eBuildDebs-106"><code>buildbot.steps.python_twisted.BuildDebs</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_005ftwisted_002eHLint-103"><code>buildbot.steps.python_twisted.HLint</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_005ftwisted_002eProcessDocs-105"><code>buildbot.steps.python_twisted.ProcessDocs</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_005ftwisted_002eRemovePYCs-107"><code>buildbot.steps.python_twisted.RemovePYCs</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+<li><a href="#index-buildbot_002esteps_002epython_005ftwisted_002eTrial-104"><code>buildbot.steps.python_twisted.Trial</code></a>: <a href="#Python_002fTwisted_002ftrial-projects">Python/Twisted/trial projects</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002eCompile-73"><code>buildbot.steps.shell.Compile</code></a>: <a href="#Compile">Compile</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002eConfigure-72"><code>buildbot.steps.shell.Configure</code></a>: <a href="#Configure">Configure</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002ePerlModuleTest-76"><code>buildbot.steps.shell.PerlModuleTest</code></a>: <a href="#PerlModuleTest">PerlModuleTest</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002eSetProperty-77"><code>buildbot.steps.shell.SetProperty</code></a>: <a href="#SetProperty">SetProperty</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002eShellCommand-71"><code>buildbot.steps.shell.ShellCommand</code></a>: <a href="#ShellCommand">ShellCommand</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002eTest-74"><code>buildbot.steps.shell.Test</code></a>: <a href="#Test">Test</a></li>
+<li><a href="#index-buildbot_002esteps_002eshell_002eTreeSize-75"><code>buildbot.steps.shell.TreeSize</code></a>: <a href="#TreeSize">TreeSize</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eArch-62"><code>buildbot.steps.source.Arch</code></a>: <a href="#Arch">Arch</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eBazaar-64"><code>buildbot.steps.source.Bazaar</code></a>: <a href="#Bazaar">Bazaar</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eBzr-66"><code>buildbot.steps.source.Bzr</code></a>: <a href="#Bzr">Bzr</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eCVS-54"><code>buildbot.steps.source.CVS</code></a>: <a href="#CVS">CVS</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eDarcs-58"><code>buildbot.steps.source.Darcs</code></a>: <a href="#Darcs">Darcs</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eGit-122"><code>buildbot.steps.source.Git</code></a>: <a href="#Index-of-Useful-Classes">Index of Useful Classes</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eGit-70"><code>buildbot.steps.source.Git</code></a>: <a href="#Git">Git</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eMercurial-60"><code>buildbot.steps.source.Mercurial</code></a>: <a href="#Mercurial">Mercurial</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eP4-68"><code>buildbot.steps.source.P4</code></a>: <a href="#P4">P4</a></li>
+<li><a href="#index-buildbot_002esteps_002esource_002eSVN-56"><code>buildbot.steps.source.SVN</code></a>: <a href="#SVN">SVN</a></li>
+<li><a href="#index-buildbot_002esteps_002etransfer_002eDirectoryUpload-84"><code>buildbot.steps.transfer.DirectoryUpload</code></a>: <a href="#Transferring-Files">Transferring Files</a></li>
+<li><a href="#index-buildbot_002esteps_002etransfer_002eFileDownload-83"><code>buildbot.steps.transfer.FileDownload</code></a>: <a href="#Transferring-Files">Transferring Files</a></li>
+<li><a href="#index-buildbot_002esteps_002etransfer_002eFileUpload-82"><code>buildbot.steps.transfer.FileUpload</code></a>: <a href="#Transferring-Files">Transferring Files</a></li>
+ </ul><!-- undocumented steps -->
+<p><a name="index-buildbot_002esteps_002esource_002eGit-122"></a><a name="index-buildbot_002esteps_002emaxq_002eMaxQ-123"></a>
+
+<h3 class="heading">Status Targets</h3>
+
+
+
+<ul class="index-st" compact>
+<li><a href="#index-buildbot_002estatus_002eclient_002ePBListener-118"><code>buildbot.status.client.PBListener</code></a>: <a href="#PBListener">PBListener</a></li>
+<li><a href="#index-buildbot_002estatus_002ehtml_002eWaterfall-111"><code>buildbot.status.html.Waterfall</code></a>: <a href="#HTML-Waterfall">HTML Waterfall</a></li>
+<li><a href="#index-buildbot_002estatus_002email_002eMailNotifier-114"><code>buildbot.status.mail.MailNotifier</code></a>: <a href="#MailNotifier">MailNotifier</a></li>
+<li><a href="#index-buildbot_002estatus_002eweb_002ebaseweb_002eWebStatus-109"><code>buildbot.status.web.baseweb.WebStatus</code></a>: <a href="#WebStatus">WebStatus</a></li>
+<li><a href="#index-buildbot_002estatus_002ewords_002eIRC-116"><code>buildbot.status.words.IRC</code></a>: <a href="#IRC-Bot">IRC Bot</a></li>
+ </ul><!-- TODO: undocumented targets -->
+<div class="node">
+<p><hr>
+<a name="Index-of-master.cfg-keys"></a>
+<a name="Index-of-master_002ecfg-keys"></a>
+Next:&nbsp;<a rel="next" accesskey="n" href="#Index">Index</a>,
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Index-of-Useful-Classes">Index of Useful Classes</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="unnumbered">Index of master.cfg keys</h2>
+
+<p>This is a list of all of the significant keys in master.cfg . Recall
+that master.cfg is effectively a small python program with exactly one
+responsibility: create a dictionary named <code>BuildmasterConfig</code>.
+The keys of this dictionary are listed here. The beginning of the
+master.cfg file typically starts with something like:
+
+<pre class="example"> BuildmasterConfig = c = {}
+</pre>
+ <p>Therefore a config key of <code>change_source</code> will usually appear in
+master.cfg as <code>c['change_source']</code>.
+
+
+
+<ul class="index-bc" compact>
+<li><a href="#index-c_005b_0027buildbotURL_0027_005d-15"><code>c['buildbotURL']</code></a>: <a href="#Defining-the-Project">Defining the Project</a></li>
+<li><a href="#index-c_005b_0027builders_0027_005d-38"><code>c['builders']</code></a>: <a href="#Defining-Builders">Defining Builders</a></li>
+<li><a href="#index-c_005b_0027change_005fsource_0027_005d-18"><code>c['change_source']</code></a>: <a href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a></li>
+<li><a href="#index-c_005b_0027debugPassword_0027_005d-40"><code>c['debugPassword']</code></a>: <a href="#Debug-options">Debug options</a></li>
+<li><a href="#index-c_005b_0027logCompressionLimit_0027_005d-16"><code>c['logCompressionLimit']</code></a>: <a href="#Defining-the-Project">Defining the Project</a></li>
+<li><a href="#index-c_005b_0027manhole_0027_005d-41"><code>c['manhole']</code></a>: <a href="#Debug-options">Debug options</a></li>
+<li><a href="#index-c_005b_0027mergeRequests_0027_005d-32"><code>c['mergeRequests']</code></a>: <a href="#Merging-BuildRequests">Merging BuildRequests</a></li>
+<li><a href="#index-c_005b_0027projectName_0027_005d-13"><code>c['projectName']</code></a>: <a href="#Defining-the-Project">Defining the Project</a></li>
+<li><a href="#index-c_005b_0027projectURL_0027_005d-14"><code>c['projectURL']</code></a>: <a href="#Defining-the-Project">Defining the Project</a></li>
+<li><a href="#index-c_005b_0027properties_0027_005d-36"><code>c['properties']</code></a>: <a href="#Defining-Global-Properties">Defining Global Properties</a></li>
+<li><a href="#index-c_005b_0027schedulers_0027_005d-19"><code>c['schedulers']</code></a>: <a href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a></li>
+<li><a href="#index-c_005b_0027slavePortnum_0027_005d-33"><code>c['slavePortnum']</code></a>: <a href="#Setting-the-slaveport">Setting the slaveport</a></li>
+<li><a href="#index-c_005b_0027slaves_0027_005d-34"><code>c['slaves']</code></a>: <a href="#Buildslave-Specifiers">Buildslave Specifiers</a></li>
+<li><a href="#index-c_005b_0027sources_0027_005d-17"><code>c['sources']</code></a>: <a href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a></li>
+<li><a href="#index-c_005b_0027status_0027_005d-39"><code>c['status']</code></a>: <a href="#Defining-Status-Targets">Defining Status Targets</a></li>
+ </ul><div class="node">
+<p><hr>
+<a name="Index"></a>
+Previous:&nbsp;<a rel="previous" accesskey="p" href="#Index-of-master_002ecfg-keys">Index of master.cfg keys</a>,
+Up:&nbsp;<a rel="up" accesskey="u" href="#Top">Top</a>
+
+</div>
+
+<h2 class="unnumbered">Index</h2>
+
+
+
+<ul class="index-cp" compact>
+<li><a href="#index-addURL-89">addURL</a>: <a href="#BuildStep-URLs">BuildStep URLs</a></li>
+<li><a href="#index-Arch-Checkout-61">Arch Checkout</a>: <a href="#Arch">Arch</a></li>
+<li><a href="#index-Bazaar-Checkout-63">Bazaar Checkout</a>: <a href="#Bazaar">Bazaar</a></li>
+<li><a href="#index-Builder-9">Builder</a>: <a href="#Builder">Builder</a></li>
+<li><a href="#index-BuildRequest-8">BuildRequest</a>: <a href="#BuildRequest">BuildRequest</a></li>
+<li><a href="#index-BuildSet-7">BuildSet</a>: <a href="#BuildSet">BuildSet</a></li>
+<li><a href="#index-BuildStep-URLs-88">BuildStep URLs</a>: <a href="#BuildStep-URLs">BuildStep URLs</a></li>
+<li><a href="#index-Bzr-Checkout-65">Bzr Checkout</a>: <a href="#Bzr">Bzr</a></li>
+<li><a href="#index-Configuration-12">Configuration</a>: <a href="#Configuration">Configuration</a></li>
+<li><a href="#index-CVS-Checkout-53">CVS Checkout</a>: <a href="#CVS">CVS</a></li>
+<li><a href="#index-Darcs-Checkout-57">Darcs Checkout</a>: <a href="#Darcs">Darcs</a></li>
+<li><a href="#index-Dependencies-24">Dependencies</a>: <a href="#Dependent-Scheduler">Dependent Scheduler</a></li>
+<li><a href="#index-Dependent-23">Dependent</a>: <a href="#Dependent-Scheduler">Dependent Scheduler</a></li>
+<li><a href="#index-email-112">email</a>: <a href="#MailNotifier">MailNotifier</a></li>
+<li><a href="#index-File-Transfer-81">File Transfer</a>: <a href="#Transferring-Files">Transferring Files</a></li>
+<li><a href="#index-Git-Checkout-69">Git Checkout</a>: <a href="#Git">Git</a></li>
+<li><a href="#index-installation-3">installation</a>: <a href="#Installing-the-code">Installing the code</a></li>
+<li><a href="#index-introduction-1">introduction</a>: <a href="#Introduction">Introduction</a></li>
+<li><a href="#index-IRC-115">IRC</a>: <a href="#IRC-Bot">IRC Bot</a></li>
+<li><a href="#index-links-87">links</a>: <a href="#BuildStep-URLs">BuildStep URLs</a></li>
+<li><a href="#index-locks-90">locks</a>: <a href="#Interlocks">Interlocks</a></li>
+<li><a href="#index-logfiles-4">logfiles</a>: <a href="#Logfiles">Logfiles</a></li>
+<li><a href="#index-LogLineObserver-86">LogLineObserver</a>: <a href="#Adding-LogObservers">Adding LogObservers</a></li>
+<li><a href="#index-LogObserver-85">LogObserver</a>: <a href="#Adding-LogObservers">Adding LogObservers</a></li>
+<li><a href="#index-mail-113">mail</a>: <a href="#MailNotifier">MailNotifier</a></li>
+<li><a href="#index-Mercurial-Checkout-59">Mercurial Checkout</a>: <a href="#Mercurial">Mercurial</a></li>
+<li><a href="#index-PBListener-117">PBListener</a>: <a href="#PBListener">PBListener</a></li>
+<li><a href="#index-Perforce-Update-67">Perforce Update</a>: <a href="#P4">P4</a></li>
+<li><a href="#index-Philosophy-of-operation-2">Philosophy of operation</a>: <a href="#History-and-Philosophy">History and Philosophy</a></li>
+<li><a href="#index-Properties-51">Properties</a>: <a href="#Using-Build-Properties">Using Build Properties</a></li>
+<li><a href="#index-Properties-37">Properties</a>: <a href="#Defining-Global-Properties">Defining Global Properties</a></li>
+<li><a href="#index-Properties-35">Properties</a>: <a href="#Buildslave-Specifiers">Buildslave Specifiers</a></li>
+<li><a href="#index-Properties-20">Properties</a>: <a href="#Change-Sources-and-Schedulers">Change Sources and Schedulers</a></li>
+<li><a href="#index-Properties-11">Properties</a>: <a href="#Build-Properties">Build Properties</a></li>
+<li><a href="#index-Scheduler-6">Scheduler</a>: <a href="#Schedulers">Schedulers</a></li>
+<li><a href="#index-statusgui-119">statusgui</a>: <a href="#statusgui">statusgui</a></li>
+<li><a href="#index-SVN-Checkout-55">SVN Checkout</a>: <a href="#SVN">SVN</a></li>
+<li><a href="#index-treeStableTimer-97">treeStableTimer</a>: <a href="#BuildFactory-Attributes">BuildFactory Attributes</a></li>
+<li><a href="#index-Triggers-30">Triggers</a>: <a href="#Triggerable-Scheduler">Triggerable Scheduler</a></li>
+<li><a href="#index-Users-10">Users</a>: <a href="#Users">Users</a></li>
+<li><a href="#index-Version-Control-5">Version Control</a>: <a href="#Version-Control-Systems">Version Control Systems</a></li>
+<li><a href="#index-Waterfall-110">Waterfall</a>: <a href="#HTML-Waterfall">HTML Waterfall</a></li>
+<li><a href="#index-WebStatus-108">WebStatus</a>: <a href="#WebStatus">WebStatus</a></li>
+<li><a href="#index-WithProperties-52">WithProperties</a>: <a href="#Using-Build-Properties">Using Build Properties</a></li>
+ </ul><div class="footnote">
+<hr>
+<a name="texinfo-footnotes-in-document"></a><h4>Footnotes</h4><p class="footnote"><small>[<a name="fn-1" href="#fnd-1">1</a>]</small> this
+@reboot syntax is understood by Vixie cron, which is the flavor
+usually provided with linux systems. Other unices may have a cron that
+doesn't understand @reboot</p>
+
+ <p class="footnote"><small>[<a name="fn-2" href="#fnd-2">2</a>]</small> except Darcs, but
+since the Buildbot never modifies its local source tree we can ignore
+the fact that Darcs uses a less centralized model</p>
+
+ <p class="footnote"><small>[<a name="fn-3" href="#fnd-3">3</a>]</small> many VC
+systems provide more complexity than this: in particular the local
+views that P4 and ClearCase can assemble out of various source
+directories are more complex than we're prepared to take advantage of
+here</p>
+
+ <p class="footnote"><small>[<a name="fn-4" href="#fnd-4">4</a>]</small> Monotone's <em>multiple heads</em> feature
+violates this assumption of cumulative Changes, but in most situations
+the changes don't occur frequently enough for this to be a significant
+problem</p>
+
+ <p class="footnote"><small>[<a name="fn-5" href="#fnd-5">5</a>]</small> this <code>checkoutDelay</code> defaults
+to half the tree-stable timer, but it can be overridden with an
+argument to the Source Step</p>
+
+ <p class="footnote"><small>[<a name="fn-6" href="#fnd-6">6</a>]</small> To be precise, it is an object or a list of objects
+which all implement the <code>buildbot.interfaces.IChangeSource</code>
+Interface. It is unusual to have multiple ChangeSources, so this key
+accepts either a single ChangeSource or a sequence of them.</p>
+
+ <p class="footnote"><small>[<a name="fn-7" href="#fnd-7">7</a>]</small> Build properties are serialized along with the
+build results, so they must be serializable. For this reason, the
+value of any build property should be simple inert data: strings,
+numbers, lists, tuples, and dictionaries. They should not contain
+class instances.</p>
+
+ <p class="footnote"><small>[<a name="fn-8" href="#fnd-8">8</a>]</small> framboozle.com is still available. Remember, I get 10%
+:).</p>
+
+ <p class="footnote"><small>[<a name="fn-9" href="#fnd-9">9</a>]</small> Framboozle gets very excited about running unit
+tests.</p>
+
+ <p class="footnote"><small>[<a name="fn-10" href="#fnd-10">10</a>]</small> See
+http://en.wikipedia.org/wiki/Read/write_lock_pattern for more information.</p>
+
+ <p class="footnote"><small>[<a name="fn-11" href="#fnd-11">11</a>]</small> Deadlock is the situation where two or more slaves each
+hold a lock in exclusive mode, and in addition want to claim the lock held by
+the other slave exclusively as well. Since locks allow at most one exclusive
+user, both slaves will wait forever.</p>
+
+ <p class="footnote"><small>[<a name="fn-12" href="#fnd-12">12</a>]</small> Starving is the situation that only a few locks are available,
+and they are immediately grabbed by another build. As a result, it may take a
+long time before all locks needed by the starved build are free at the same
+time.</p>
+
+ <p class="footnote"><small>[<a name="fn-13" href="#fnd-13">13</a>]</small> It may even be possible to provide SSL access by using
+a specification like
+<code>"ssl:12345:privateKey=mykey.pen:certKey=cert.pem"</code>, but this is
+completely untested</p>
+
+ <p class="footnote"><small>[<a name="fn-14" href="#fnd-14">14</a>]</small> Apparently this is the same way
+http://buildd.debian.org displays build status</p>
+
+ <hr></div>
+
+</body></html>
+
diff --git a/buildbot/docs/buildbot.info b/buildbot/docs/buildbot.info
new file mode 100644
index 0000000..3665210
--- /dev/null
+++ b/buildbot/docs/buildbot.info
@@ -0,0 +1,192 @@
+This is buildbot.info, produced by makeinfo version 4.11 from
+buildbot.texinfo.
+
+This is the BuildBot manual.
+
+ Copyright (C) 2005,2006 Brian Warner
+
+ Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty provided
+the copyright notice and this notice are preserved.
+
+
+Indirect:
+buildbot.info-1: 330
+buildbot.info-2: 300456
+
+Tag Table:
+(Indirect)
+Node: Top330
+Node: Introduction5164
+Node: History and Philosophy7041
+Node: System Architecture9766
+Node: BuildSlave Connections12363
+Node: Buildmaster Architecture14476
+Node: Status Delivery Architecture19913
+Node: Control Flow22109
+Node: Installation24947
+Node: Requirements25300
+Node: Installing the code27101
+Node: Creating a buildmaster29051
+Node: Upgrading an Existing Buildmaster31499
+Node: Creating a buildslave33109
+Node: Buildslave Options38471
+Node: Launching the daemons41338
+Ref: Launching the daemons-Footnote-144523
+Node: Logfiles44698
+Node: Shutdown45237
+Node: Maintenance46934
+Node: Troubleshooting48361
+Node: Starting the buildslave48634
+Node: Connecting to the buildmaster49767
+Node: Forcing Builds50810
+Node: Concepts51562
+Node: Version Control Systems51961
+Ref: Version Control Systems-Footnote-152803
+Node: Generalizing VC Systems52949
+Ref: Generalizing VC Systems-Footnote-156576
+Node: Source Tree Specifications56797
+Ref: Source Tree Specifications-Footnote-159670
+Ref: Source Tree Specifications-Footnote-259864
+Node: How Different VC Systems Specify Sources59994
+Node: Attributes of Changes66108
+Node: Schedulers70116
+Node: BuildSet72704
+Node: BuildRequest75364
+Node: Builder76761
+Node: Users78924
+Node: Doing Things With Users80073
+Node: Email Addresses82438
+Node: IRC Nicknames84517
+Node: Live Status Clients85752
+Node: Build Properties86374
+Node: Configuration88213
+Node: Config File Format89559
+Node: Loading the Config File91934
+Node: Testing the Config File93629
+Node: Defining the Project95328
+Node: Change Sources and Schedulers97299
+Ref: Change Sources and Schedulers-Footnote-199417
+Node: Scheduler Scheduler99666
+Node: AnyBranchScheduler102478
+Node: Dependent Scheduler103699
+Node: Periodic Scheduler106408
+Node: Nightly Scheduler107365
+Node: Try Schedulers110251
+Node: Triggerable Scheduler110844
+Node: Merging BuildRequests113765
+Node: Setting the slaveport114912
+Node: Buildslave Specifiers116314
+Node: When Buildslaves Go Missing118689
+Node: On-Demand ("Latent") Buildslaves121063
+Node: Amazon Web Services Elastic Compute Cloud ("AWS EC2")122044
+Node: Get an AWS EC2 Account122939
+Node: Create an AMI124055
+Node: Configure the Master with an EC2LatentBuildSlave125139
+Node: Dangers with Latent Buildslaves132190
+Node: Writing New Latent Buildslaves133475
+Node: Defining Global Properties134700
+Node: Defining Builders135151
+Node: Defining Status Targets138719
+Node: Debug options139801
+Node: Getting Source Code Changes143846
+Node: Change Sources145171
+Node: Choosing ChangeSources149378
+Node: CVSToys - PBService150763
+Node: Mail-parsing ChangeSources153575
+Node: Subscribing the Buildmaster155627
+Node: Using Maildirs157009
+Node: Parsing Email Change Messages158571
+Node: FCMaildirSource160359
+Node: SyncmailMaildirSource161344
+Node: BonsaiMaildirSource161825
+Node: SVNCommitEmailMaildirSource162310
+Node: PBChangeSource162930
+Node: P4Source166261
+Node: BonsaiPoller168020
+Node: SVNPoller168690
+Node: MercurialHook180073
+Node: Bzr Hook183738
+Node: Bzr Poller186391
+Node: Build Process187802
+Node: Build Steps189002
+Node: Common Parameters191091
+Node: Using Build Properties193575
+Ref: Using Build Properties-Footnote-1199730
+Node: Source Checkout200000
+Node: CVS205244
+Node: SVN206386
+Node: Darcs212558
+Node: Mercurial214252
+Node: Arch215166
+Node: Bazaar215962
+Node: Bzr216485
+Node: P4217505
+Node: Git219019
+Node: ShellCommand219788
+Node: Simple ShellCommand Subclasses226679
+Node: Configure227214
+Node: Compile227632
+Node: Test229039
+Node: TreeSize229282
+Node: PerlModuleTest229690
+Node: SetProperty230063
+Node: Python BuildSteps231576
+Node: BuildEPYDoc231898
+Node: PyFlakes233378
+Node: PyLint234310
+Node: Transferring Files234690
+Node: Steps That Run on the Master239471
+Node: Triggering Schedulers240662
+Node: Writing New BuildSteps242074
+Node: Writing BuildStep Constructors243011
+Node: BuildStep LogFiles245106
+Node: Reading Logfiles249588
+Node: Adding LogObservers250397
+Ref: Adding LogObservers-Footnote-1263069
+Ref: Adding LogObservers-Footnote-2263136
+Node: BuildStep URLs263199
+Node: Interlocks266299
+Ref: Interlocks-Footnote-1272349
+Ref: Interlocks-Footnote-2272436
+Ref: Interlocks-Footnote-3272692
+Node: Build Factories272923
+Node: BuildStep Objects273900
+Node: BuildFactory274972
+Node: BuildFactory Attributes279864
+Node: Quick builds280526
+Node: Process-Specific build factories281262
+Node: GNUAutoconf281806
+Node: CPAN284385
+Node: Python distutils285146
+Node: Python/Twisted/trial projects286420
+Node: Status Delivery293295
+Node: WebStatus294392
+Node: WebStatus Configuration Parameters297881
+Ref: WebStatus Configuration Parameters-Footnote-1300287
+Node: Buildbot Web Resources300456
+Ref: Buildbot Web Resources-Footnote-1308015
+Node: XMLRPC server308102
+Node: HTML Waterfall309810
+Node: MailNotifier310256
+Node: IRC Bot319439
+Node: PBListener323175
+Node: Writing New Status Plugins323786
+Node: Command-line tool325056
+Node: Administrator Tools325582
+Node: Developer Tools326858
+Node: statuslog327177
+Node: statusgui328258
+Node: try328842
+Node: try --diff344415
+Node: Other Tools346172
+Node: sendchange346435
+Node: debugclient348474
+Node: .buildbot config directory351050
+Node: Resources354006
+Node: Developer's Appendix354427
+Node: Index of Useful Classes355183
+Node: Index of master.cfg keys361528
+Node: Index364036
+
+End Tag Table
diff --git a/buildbot/docs/buildbot.info-1 b/buildbot/docs/buildbot.info-1
new file mode 100644
index 0000000..5dcf913
--- /dev/null
+++ b/buildbot/docs/buildbot.info-1
@@ -0,0 +1,7278 @@
+This is buildbot.info, produced by makeinfo version 4.11 from
+buildbot.texinfo.
+
+This is the BuildBot manual.
+
+ Copyright (C) 2005,2006 Brian Warner
+
+ Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty provided
+the copyright notice and this notice are preserved.
+
+
+File: buildbot.info, Node: Top, Next: Introduction, Prev: (dir), Up: (dir)
+
+BuildBot
+********
+
+This is the BuildBot manual.
+
+ Copyright (C) 2005,2006 Brian Warner
+
+ Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty provided
+the copyright notice and this notice are preserved.
+
+* Menu:
+
+* Introduction:: What the BuildBot does.
+* Installation:: Creating a buildmaster and buildslaves,
+ running them.
+* Concepts:: What goes on in the buildbot's little mind.
+* Configuration:: Controlling the buildbot.
+* Getting Source Code Changes:: Discovering when to run a build.
+* Build Process:: Controlling how each build is run.
+* Status Delivery:: Telling the world about the build's results.
+* Command-line tool::
+* Resources:: Getting help.
+* Developer's Appendix::
+* Index of Useful Classes::
+* Index of master.cfg keys::
+* Index:: Complete index.
+
+ --- The Detailed Node Listing ---
+
+Introduction
+
+* History and Philosophy::
+* System Architecture::
+* Control Flow::
+
+System Architecture
+
+* BuildSlave Connections::
+* Buildmaster Architecture::
+* Status Delivery Architecture::
+
+Installation
+
+* Requirements::
+* Installing the code::
+* Creating a buildmaster::
+* Upgrading an Existing Buildmaster::
+* Creating a buildslave::
+* Launching the daemons::
+* Logfiles::
+* Shutdown::
+* Maintenance::
+* Troubleshooting::
+
+Creating a buildslave
+
+* Buildslave Options::
+
+Troubleshooting
+
+* Starting the buildslave::
+* Connecting to the buildmaster::
+* Forcing Builds::
+
+Concepts
+
+* Version Control Systems::
+* Schedulers::
+* BuildSet::
+* BuildRequest::
+* Builder::
+* Users::
+* Build Properties::
+
+Version Control Systems
+
+* Generalizing VC Systems::
+* Source Tree Specifications::
+* How Different VC Systems Specify Sources::
+* Attributes of Changes::
+
+Users
+
+* Doing Things With Users::
+* Email Addresses::
+* IRC Nicknames::
+* Live Status Clients::
+
+Configuration
+
+* Config File Format::
+* Loading the Config File::
+* Testing the Config File::
+* Defining the Project::
+* Change Sources and Schedulers::
+* Setting the slaveport::
+* Buildslave Specifiers::
+* On-Demand ("Latent") Buildslaves::
+* Defining Global Properties::
+* Defining Builders::
+* Defining Status Targets::
+* Debug options::
+
+Change Sources and Schedulers
+
+* Scheduler Scheduler::
+* AnyBranchScheduler::
+* Dependent Scheduler::
+* Periodic Scheduler::
+* Nightly Scheduler::
+* Try Schedulers::
+* Triggerable Scheduler::
+
+Buildslave Specifiers
+* When Buildslaves Go Missing::
+
+On-Demand ("Latent") Buildslaves
+* Amazon Web Services Elastic Compute Cloud ("AWS EC2")::
+* Dangers with Latent Buildslaves::
+* Writing New Latent Buildslaves::
+
+Getting Source Code Changes
+
+* Change Sources::
+* Choosing ChangeSources::
+* CVSToys - PBService::
+* Mail-parsing ChangeSources::
+* PBChangeSource::
+* P4Source::
+* BonsaiPoller::
+* SVNPoller::
+* MercurialHook::
+* Bzr Hook::
+* Bzr Poller::
+
+Mail-parsing ChangeSources
+
+* Subscribing the Buildmaster::
+* Using Maildirs::
+* Parsing Email Change Messages::
+
+Parsing Email Change Messages
+
+* FCMaildirSource::
+* SyncmailMaildirSource::
+* BonsaiMaildirSource::
+* SVNCommitEmailMaildirSource::
+
+Build Process
+
+* Build Steps::
+* Interlocks::
+* Build Factories::
+
+Build Steps
+
+* Common Parameters::
+* Using Build Properties::
+* Source Checkout::
+* ShellCommand::
+* Simple ShellCommand Subclasses::
+* Python BuildSteps::
+* Transferring Files::
+* Steps That Run on the Master::
+* Triggering Schedulers::
+* Writing New BuildSteps::
+
+Source Checkout
+
+* CVS::
+* SVN::
+* Darcs::
+* Mercurial::
+* Arch::
+* Bazaar::
+* Bzr::
+* P4::
+* Git::
+
+Simple ShellCommand Subclasses
+
+* Configure::
+* Compile::
+* Test::
+* TreeSize::
+* PerlModuleTest::
+* SetProperty::
+
+Python BuildSteps
+
+* BuildEPYDoc::
+* PyFlakes::
+* PyLint::
+
+Writing New BuildSteps
+
+* BuildStep LogFiles::
+* Reading Logfiles::
+* Adding LogObservers::
+* BuildStep URLs::
+
+Build Factories
+
+* BuildStep Objects::
+* BuildFactory::
+* Process-Specific build factories::
+
+BuildStep Objects
+
+* BuildFactory Attributes::
+* Quick builds::
+
+BuildFactory
+
+* BuildFactory Attributes::
+* Quick builds::
+
+Process-Specific build factories
+
+* GNUAutoconf::
+* CPAN::
+* Python distutils::
+* Python/Twisted/trial projects::
+
+Status Delivery
+
+* WebStatus::
+* MailNotifier::
+* IRC Bot::
+* PBListener::
+* Writing New Status Plugins::
+
+WebStatus
+
+* WebStatus Configuration Parameters::
+* Buildbot Web Resources::
+* XMLRPC server::
+* HTML Waterfall::
+
+Command-line tool
+
+* Administrator Tools::
+* Developer Tools::
+* Other Tools::
+* .buildbot config directory::
+
+Developer Tools
+
+* statuslog::
+* statusgui::
+* try::
+
+waiting for results
+
+* try --diff::
+
+Other Tools
+
+* sendchange::
+* debugclient::
+
+
+File: buildbot.info, Node: Introduction, Next: Installation, Prev: Top, Up: Top
+
+1 Introduction
+**************
+
+The BuildBot is a system to automate the compile/test cycle required
+by most software projects to validate code changes. By automatically
+rebuilding and testing the tree each time something has changed,
+build problems are pinpointed quickly, before other developers are
+inconvenienced by the failure. The guilty developer can be identified
+and harassed without human intervention. By running the builds on a
+variety of platforms, developers who do not have the facilities to
+test their changes everywhere before checkin will at least know
+shortly afterwards whether they have broken the build or not. Warning
+counts, lint checks, image size, compile time, and other build
+parameters can be tracked over time, are more visible, and are
+therefore easier to improve.
+
+ The overall goal is to reduce tree breakage and provide a platform
+to run tests or code-quality checks that are too annoying or pedantic
+for any human to waste their time with. Developers get immediate (and
+potentially public) feedback about their changes, encouraging them to
+be more careful about testing before checkin.
+
+ Features:
+
+ * run builds on a variety of slave platforms
+
+ * arbitrary build process: handles projects using C, Python,
+ whatever
+
+ * minimal host requirements: python and Twisted
+
+ * slaves can be behind a firewall if they can still do checkout
+
+ * status delivery through web page, email, IRC, other protocols
+
+ * track builds in progress, provide estimated completion time
+
+ * flexible configuration by subclassing generic build process
+ classes
+
+ * debug tools to force a new build, submit fake Changes, query
+ slave status
+
+ * released under the GPL
+
+* Menu:
+
+* History and Philosophy::
+* System Architecture::
+* Control Flow::
+
+
+File: buildbot.info, Node: History and Philosophy, Next: System Architecture, Prev: Introduction, Up: Introduction
+
+1.1 History and Philosophy
+==========================
+
+The Buildbot was inspired by a similar project built for a development
+team writing a cross-platform embedded system. The various components
+of the project were supposed to compile and run on several flavors of
+unix (linux, solaris, BSD), but individual developers had their own
+preferences and tended to stick to a single platform. From time to
+time, incompatibilities would sneak in (some unix platforms want to
+use `string.h', some prefer `strings.h'), and then the tree would
+compile for some developers but not others. The buildbot was written
+to automate the human process of walking into the office, updating a
+tree, compiling (and discovering the breakage), finding the developer
+at fault, and complaining to them about the problem they had
+introduced. With multiple platforms it was difficult for developers to
+do the right thing (compile their potential change on all platforms);
+the buildbot offered a way to help.
+
+ Another problem was when programmers would change the behavior of a
+library without warning its users, or change internal aspects that
+other code was (unfortunately) depending upon. Adding unit tests to
+the codebase helps here: if an application's unit tests pass despite
+changes in the libraries it uses, you can have more confidence that
+the library changes haven't broken anything. Many developers
+complained that the unit tests were inconvenient or took too long to
+run: having the buildbot run them reduces the developer's workload to
+a minimum.
+
+ In general, having more visibility into the project is always good,
+and automation makes it easier for developers to do the right thing.
+When everyone can see the status of the project, developers are
+encouraged to keep the tree in good working order. Unit tests that
+aren't run on a regular basis tend to suffer from bitrot just like
+code does: exercising them on a regular basis helps to keep them
+functioning and useful.
+
+ The current version of the Buildbot is additionally targeted at
+distributed free-software projects, where resources and platforms are
+only available when provided by interested volunteers. The buildslaves
+are designed to require an absolute minimum of configuration, reducing
+the effort a potential volunteer needs to expend to be able to
+contribute a new test environment to the project. The goal is for
+anyone who wishes that a given project would run on their favorite
+platform should be able to offer that project a buildslave, running on
+that platform, where they can verify that their portability code
+works, and keeps working.
+
+
+File: buildbot.info, Node: System Architecture, Next: Control Flow, Prev: History and Philosophy, Up: Introduction
+
+1.2 System Architecture
+=======================
+
+The Buildbot consists of a single `buildmaster' and one or more
+`buildslaves', connected in a star topology. The buildmaster makes
+all decisions about what, when, and how to build. It sends commands
+to be run on the build slaves, which simply execute the commands and
+return the results. (certain steps involve more local decision
+making, where the overhead of sending a lot of commands back and
+forth would be inappropriate, but in general the buildmaster is
+responsible for everything).
+
+ The buildmaster is usually fed `Changes' by some sort of version
+control system (*note Change Sources::), which may cause builds to be
+run. As the builds are performed, various status messages are
+produced, which are then sent to any registered Status Targets (*note
+Status Delivery::).
+
+
+ +------------------+ +-----------+
+ | |---------->| Browser |
+ | BuildMaster | +-----------+
+ Changes | |--------------->+--------+
+ +----------->| | Build Status | email |
+ | | |------------+ +--------+
+ | | |-------+ | +---------------+
+ | +------------------+ | +---->| Status Client |
++----------+ | ^ | ^ | +---------------+
+| Change | | | C| | | +-----+
+| Sources | | | o| | +------------>| IRC |
+| | | | m| |R +-----+
+| CVS | v | m| |e
+| SVN | +---------+ a| |s
+| Darcs | | Build | n| |u
+| .. etc | | Slave | d| |l
+| | +---------+ s| |t
+| | v |s
++----------+ +---------+
+ | Build |
+ | Slave |
+ +---------+
+
+ The buildmaster is configured and maintained by the "buildmaster
+admin", who is generally the project team member responsible for
+build process issues. Each buildslave is maintained by a "buildslave
+admin", who do not need to be quite as involved. Generally slaves are
+run by anyone who has an interest in seeing the project work well on
+their favorite platform.
+
+* Menu:
+
+* BuildSlave Connections::
+* Buildmaster Architecture::
+* Status Delivery Architecture::
+
+
+File: buildbot.info, Node: BuildSlave Connections, Next: Buildmaster Architecture, Prev: System Architecture, Up: System Architecture
+
+1.2.1 BuildSlave Connections
+----------------------------
+
+The buildslaves are typically run on a variety of separate machines,
+at least one per platform of interest. These machines connect to the
+buildmaster over a TCP connection to a publically-visible port. As a
+result, the buildslaves can live behind a NAT box or similar
+firewalls, as long as they can get to buildmaster. The TCP connections
+are initiated by the buildslave and accepted by the buildmaster, but
+commands and results travel both ways within this connection. The
+buildmaster is always in charge, so all commands travel exclusively
+from the buildmaster to the buildslave.
+
+ To perform builds, the buildslaves must typically obtain source
+code from a CVS/SVN/etc repository. Therefore they must also be able
+to reach the repository. The buildmaster provides instructions for
+performing builds, but does not provide the source code itself.
+
+
+
+Repository| | BuildMaster | |
+ (CVS/SVN)| | ^|^^^ |
+ | | / c \ |
+----------+ +------------------/--o----\-+
+ ^ / m ^ \
+ | / m | \
+ checkout/update --+ a | +--
+ | TCP| n | |TCP
+ | | d | |
+ | | s | |
+ | | | | |
+ | | | r |
+ | | | e |
+ -N-A-T-|- - - - -N-A-T- - - - -|- |- s-|- - - - -N-A-T- - -
+ | | | u |
+ | | | l |
+ | +------------------|--|--t-|-+
+ | | | | s | |
+ +----| v | |
+ | | |
+ | | |
+ | |
+ | BuildSlave |
+ +----------------------------+
+
+
+File: buildbot.info, Node: Buildmaster Architecture, Next: Status Delivery Architecture, Prev: BuildSlave Connections, Up: System Architecture
+
+1.2.2 Buildmaster Architecture
+------------------------------
+
+The Buildmaster consists of several pieces:
+
+
+
+ +---------------+
+ | Change Source |----->----+
+ +---------------+ |
+ Changes
+ |
+ +---------------+ v
+ | Change Source |----->----+
+ +---------------+ v
+ +-----+-------+
+ | |
+ v v
+ +-----------+ +-----------+
+ | Scheduler | | Scheduler |
+ +-----------+ +-----------+
+ | | |
+ +------+---------+ +---+ +-----+
+ | | | |
+ v | | Build
+ : : : v v : Request
+ : : : : |
+ : ---- : : : |
+ : ---- : : ---- : |
+ +======+ +======+ : v :
+ | | : :
+ v v : :
+ +---------+ +---------+ :queue :
+ | Builder | | Builder | +======+
+ +---------+ +---------+ |
+ v
+ +---------+
+ | Builder |
+ +---------+
+
+ * Change Sources, which create a Change object each time something
+ is modified in the VC repository. Most ChangeSources listen for
+ messages from a hook script of some sort. Some sources actively
+ poll the repository on a regular basis. All Changes are fed to
+ the Schedulers.
+
+ * Schedulers, which decide when builds should be performed. They
+ collect Changes into BuildRequests, which are then queued for
+ delivery to Builders until a buildslave is available.
+
+ * Builders, which control exactly _how_ each build is performed
+ (with a series of BuildSteps, configured in a BuildFactory). Each
+ Build is run on a single buildslave.
+
+ * Status plugins, which deliver information about the build results
+ through protocols like HTTP, mail, and IRC.
+
+
+
+
+ +-----------------+
+ | BuildSlave |
+ | |
+ | |
+ +-------+ | +------------+ |
+ |Builder|----Build----->|SlaveBuilder| |
+ +-------+ | +------------+ |
+ | |
+ | +------------+ |
+ +-Build---->|SlaveBuilder| |
+ | | +------------+ |
+ +-------+ | | |
+ |Builder|---+ +-----------------+
+ +-------+ |
+ |
+ | +-----------------+
+ Build | BuildSlave |
+ | | |
+ | | |
+ | | +------------+ |
+ +------->|SlaveBuilder| |
+ | +------------+ |
+ +-------+ | |
+ |Builder|--+ | +------------+ |
+ +-------+ +-------->|SlaveBuilder| |
+ | +------------+ |
+ | |
+ +-----------------+
+
+ Each Builder is configured with a list of BuildSlaves that it will
+use for its builds. These buildslaves are expected to behave
+identically: the only reason to use multiple BuildSlaves for a single
+Builder is to provide a measure of load-balancing.
+
+ Within a single BuildSlave, each Builder creates its own
+SlaveBuilder instance. These SlaveBuilders operate independently from
+each other. Each gets its own base directory to work in. It is quite
+common to have many Builders sharing the same buildslave. For
+example, there might be two buildslaves: one for i386, and a second
+for PowerPC. There may then be a pair of Builders that do a full
+compile/test run, one for each architecture, and a lone Builder that
+creates snapshot source tarballs if the full builders complete
+successfully. The full builders would each run on a single
+buildslave, whereas the tarball creation step might run on either
+buildslave (since the platform doesn't matter when creating source
+tarballs). In this case, the mapping would look like:
+
+ Builder(full-i386) -> BuildSlaves(slave-i386)
+ Builder(full-ppc) -> BuildSlaves(slave-ppc)
+ Builder(source-tarball) -> BuildSlaves(slave-i386, slave-ppc)
+
+ and each BuildSlave would have two SlaveBuilders inside it, one
+for a full builder, and a second for the source-tarball builder.
+
+ Once a SlaveBuilder is available, the Builder pulls one or more
+BuildRequests off its incoming queue. (It may pull more than one if it
+determines that it can merge the requests together; for example, there
+may be multiple requests to build the current HEAD revision). These
+requests are merged into a single Build instance, which includes the
+SourceStamp that describes what exact version of the source code
+should be used for the build. The Build is then randomly assigned to a
+free SlaveBuilder and the build begins.
+
+ The behaviour when BuildRequests are merged can be customized,
+*note Merging BuildRequests::.
+
+
+File: buildbot.info, Node: Status Delivery Architecture, Prev: Buildmaster Architecture, Up: System Architecture
+
+1.2.3 Status Delivery Architecture
+----------------------------------
+
+The buildmaster maintains a central Status object, to which various
+status plugins are connected. Through this Status object, a full
+hierarchy of build status objects can be obtained.
+
+
+
+ Status Objects Status Plugins User Clients
+
+ +------+ +---------+ +-----------+
+ |Status|<--------------+-->|Waterfall|<-------|Web Browser|
+ +------+ | +---------+ +-----------+
+ | +-----+ |
+ v v |
++-------+ +-------+ | +---+ +----------+
+|Builder| |Builder| +---->|IRC|<----------->IRC Server|
+|Status | |Status | | +---+ +----------+
++-------+ +-------+ |
+ | +----+ |
+ v v | +------------+ +----+
++------+ +------+ +-->|MailNotifier|---->|SMTP|
+|Build | |Build | +------------+ +----+
+|Status| |Status|
++------+ +------+
+ | +-----+
+ v v
++------+ +------+
+|Step | |Step |
+|Status| |Status|
++------+ +------+
+ | +---+
+ v v
++----+ +----+
+|Log | |Log |
+|File| |File|
++----+ +----+
+
+ The configuration file controls which status plugins are active.
+Each status plugin gets a reference to the top-level Status object.
+From there they can request information on each Builder, Build, Step,
+and LogFile. This query-on-demand interface is used by the
+html.Waterfall plugin to create the main status page each time a web
+browser hits the main URL.
+
+ The status plugins can also subscribe to hear about new Builds as
+they occur: this is used by the MailNotifier to create new email
+messages for each recently-completed Build.
+
+ The Status object records the status of old builds on disk in the
+buildmaster's base directory. This allows it to return information
+about historical builds.
+
+ There are also status objects that correspond to Schedulers and
+BuildSlaves. These allow status plugins to report information about
+upcoming builds, and the online/offline status of each buildslave.
+
+
+File: buildbot.info, Node: Control Flow, Prev: System Architecture, Up: Introduction
+
+1.3 Control Flow
+================
+
+A day in the life of the buildbot:
+
+ * A developer commits some source code changes to the repository.
+ A hook script or commit trigger of some sort sends information
+ about this change to the buildmaster through one of its
+ configured Change Sources. This notification might arrive via
+ email, or over a network connection (either initiated by the
+ buildmaster as it "subscribes" to changes, or by the commit
+ trigger as it pushes Changes towards the buildmaster). The
+ Change contains information about who made the change, what
+ files were modified, which revision contains the change, and any
+ checkin comments.
+
+ * The buildmaster distributes this change to all of its configured
+ Schedulers. Any "important" changes cause the "tree-stable-timer"
+ to be started, and the Change is added to a list of those that
+ will go into a new Build. When the timer expires, a Build is
+ started on each of a set of configured Builders, all
+ compiling/testing the same source code. Unless configured
+ otherwise, all Builds run in parallel on the various buildslaves.
+
+ * The Build consists of a series of Steps. Each Step causes some
+ number of commands to be invoked on the remote buildslave
+ associated with that Builder. The first step is almost always to
+ perform a checkout of the appropriate revision from the same VC
+ system that produced the Change. The rest generally perform a
+ compile and run unit tests. As each Step runs, the buildslave
+ reports back command output and return status to the buildmaster.
+
+ * As the Build runs, status messages like "Build Started", "Step
+ Started", "Build Finished", etc, are published to a collection of
+ Status Targets. One of these targets is usually the HTML
+ "Waterfall" display, which shows a chronological list of events,
+ and summarizes the results of the most recent build at the top
+ of each column. Developers can periodically check this page to
+ see how their changes have fared. If they see red, they know
+ that they've made a mistake and need to fix it. If they see
+ green, they know that they've done their duty and don't need to
+ worry about their change breaking anything.
+
+ * If a MailNotifier status target is active, the completion of a
+ build will cause email to be sent to any developers whose
+ Changes were incorporated into this Build. The MailNotifier can
+ be configured to only send mail upon failing builds, or for
+ builds which have just transitioned from passing to failing.
+ Other status targets can provide similar real-time notification
+ via different communication channels, like IRC.
+
+
+
+File: buildbot.info, Node: Installation, Next: Concepts, Prev: Introduction, Up: Top
+
+2 Installation
+**************
+
+* Menu:
+
+* Requirements::
+* Installing the code::
+* Creating a buildmaster::
+* Upgrading an Existing Buildmaster::
+* Creating a buildslave::
+* Launching the daemons::
+* Logfiles::
+* Shutdown::
+* Maintenance::
+* Troubleshooting::
+
+
+File: buildbot.info, Node: Requirements, Next: Installing the code, Prev: Installation, Up: Installation
+
+2.1 Requirements
+================
+
+At a bare minimum, you'll need the following (for both the buildmaster
+and a buildslave):
+
+ * Python: http://www.python.org
+
+ Buildbot requires python-2.3 or later, and is primarily developed
+ against python-2.4. It is also tested against python-2.5 .
+
+ * Twisted: http://twistedmatrix.com
+
+ Both the buildmaster and the buildslaves require Twisted-2.0.x or
+ later. It has been tested against all releases of Twisted up to
+ Twisted-2.5.0 (the most recent as of this writing). As always,
+ the most recent version is recommended.
+
+ Twisted is delivered as a collection of subpackages. You'll need
+ at least "Twisted" (the core package), and you'll also want
+ TwistedMail, TwistedWeb, and TwistedWords (for sending email,
+ serving a web status page, and delivering build status via IRC,
+ respectively). You might also want TwistedConch (for the
+ encrypted Manhole debug port). Note that Twisted requires
+ ZopeInterface to be installed as well.
+
+
+ Certain other packages may be useful on the system running the
+buildmaster:
+
+ * CVSToys: http://purl.net/net/CVSToys
+
+ If your buildmaster uses FreshCVSSource to receive change
+ notification from a cvstoys daemon, it will require CVSToys be
+ installed (tested with CVSToys-1.0.10). If the it doesn't use
+ that source (i.e. if you only use a mail-parsing change source,
+ or the SVN notification script), you will not need CVSToys.
+
+
+ And of course, your project's build process will impose additional
+requirements on the buildslaves. These hosts must have all the tools
+necessary to compile and test your project's source code.
+
+
+File: buildbot.info, Node: Installing the code, Next: Creating a buildmaster, Prev: Requirements, Up: Installation
+
+2.2 Installing the code
+=======================
+
+The Buildbot is installed using the standard python `distutils'
+module. After unpacking the tarball, the process is:
+
+ python setup.py build
+ python setup.py install
+
+ where the install step may need to be done as root. This will put
+the bulk of the code in somewhere like
+/usr/lib/python2.3/site-packages/buildbot . It will also install the
+`buildbot' command-line tool in /usr/bin/buildbot.
+
+ To test this, shift to a different directory (like /tmp), and run:
+
+ buildbot --version
+
+ If it shows you the versions of Buildbot and Twisted, the install
+went ok. If it says `no such command' or it gets an `ImportError'
+when it tries to load the libaries, then something went wrong.
+`pydoc buildbot' is another useful diagnostic tool.
+
+ Windows users will find these files in other places. You will need
+to make sure that python can find the libraries, and will probably
+find it convenient to have `buildbot' on your PATH.
+
+ If you wish, you can run the buildbot unit test suite like this:
+
+ PYTHONPATH=. trial buildbot.test
+
+ This should run up to 192 tests, depending upon what VC tools you
+have installed. On my desktop machine it takes about five minutes to
+complete. Nothing should fail, a few might be skipped. If any of the
+tests fail, you should stop and investigate the cause before
+continuing the installation process, as it will probably be easier to
+track down the bug early.
+
+ If you cannot or do not wish to install the buildbot into a
+site-wide location like `/usr' or `/usr/local', you can also install
+it into the account's home directory. Do the install command like
+this:
+
+ python setup.py install --home=~
+
+ That will populate `~/lib/python' and create `~/bin/buildbot'.
+Make sure this lib directory is on your `PYTHONPATH'.
+
+
+File: buildbot.info, Node: Creating a buildmaster, Next: Upgrading an Existing Buildmaster, Prev: Installing the code, Up: Installation
+
+2.3 Creating a buildmaster
+==========================
+
+As you learned earlier (*note System Architecture::), the buildmaster
+runs on a central host (usually one that is publically visible, so
+everybody can check on the status of the project), and controls all
+aspects of the buildbot system. Let us call this host
+`buildbot.example.org'.
+
+ You may wish to create a separate user account for the buildmaster,
+perhaps named `buildmaster'. This can help keep your personal
+configuration distinct from that of the buildmaster and is useful if
+you have to use a mail-based notification system (*note Change
+Sources::). However, the Buildbot will work just fine with your
+regular user account.
+
+ You need to choose a directory for the buildmaster, called the
+`basedir'. This directory will be owned by the buildmaster, which
+will use configuration files therein, and create status files as it
+runs. `~/Buildbot' is a likely value. If you run multiple
+buildmasters in the same account, or if you run both masters and
+slaves, you may want a more distinctive name like
+`~/Buildbot/master/gnomovision' or `~/Buildmasters/fooproject'. If
+you are using a separate user account, this might just be
+`~buildmaster/masters/fooproject'.
+
+ Once you've picked a directory, use the `buildbot create-master'
+command to create the directory and populate it with startup files:
+
+ buildbot create-master BASEDIR
+
+ You will need to create a configuration file (*note
+Configuration::) before starting the buildmaster. Most of the rest of
+this manual is dedicated to explaining how to do this. A sample
+configuration file is placed in the working directory, named
+`master.cfg.sample', which can be copied to `master.cfg' and edited
+to suit your purposes.
+
+ (Internal details: This command creates a file named
+`buildbot.tac' that contains all the state necessary to create the
+buildmaster. Twisted has a tool called `twistd' which can use this
+.tac file to create and launch a buildmaster instance. twistd takes
+care of logging and daemonization (running the program in the
+background). `/usr/bin/buildbot' is a front end which runs twistd for
+you.)
+
+ In addition to `buildbot.tac', a small `Makefile.sample' is
+installed. This can be used as the basis for customized daemon
+startup, *Note Launching the daemons::.
+
+
+File: buildbot.info, Node: Upgrading an Existing Buildmaster, Next: Creating a buildslave, Prev: Creating a buildmaster, Up: Installation
+
+2.4 Upgrading an Existing Buildmaster
+=====================================
+
+If you have just installed a new version of the Buildbot code, and you
+have buildmasters that were created using an older version, you'll
+need to upgrade these buildmasters before you can use them. The
+upgrade process adds and modifies files in the buildmaster's base
+directory to make it compatible with the new code.
+
+ buildbot upgrade-master BASEDIR
+
+ This command will also scan your `master.cfg' file for
+incompatbilities (by loading it and printing any errors or deprecation
+warnings that occur). Each buildbot release tries to be compatible
+with configurations that worked cleanly (i.e. without deprecation
+warnings) on the previous release: any functions or classes that are
+to be removed will first be deprecated in a release, to give users a
+chance to start using their replacement.
+
+ The 0.7.6 release introduced the `public_html/' directory, which
+contains `index.html' and other files served by the `WebStatus' and
+`Waterfall' status displays. The `upgrade-master' command will create
+these files if they do not already exist. It will not modify existing
+copies, but it will write a new copy in e.g. `index.html.new' if the
+new version differs from the version that already exists.
+
+ The `upgrade-master' command is idempotent. It is safe to run it
+multiple times. After each upgrade of the buildbot code, you should
+use `upgrade-master' on all your buildmasters.
+
+
+File: buildbot.info, Node: Creating a buildslave, Next: Launching the daemons, Prev: Upgrading an Existing Buildmaster, Up: Installation
+
+2.5 Creating a buildslave
+=========================
+
+Typically, you will be adding a buildslave to an existing buildmaster,
+to provide additional architecture coverage. The buildbot
+administrator will give you several pieces of information necessary to
+connect to the buildmaster. You should also be somewhat familiar with
+the project being tested, so you can troubleshoot build problems
+locally.
+
+ The buildbot exists to make sure that the project's stated "how to
+build it" process actually works. To this end, the buildslave should
+run in an environment just like that of your regular developers.
+Typically the project build process is documented somewhere
+(`README', `INSTALL', etc), in a document that should mention all
+library dependencies and contain a basic set of build instructions.
+This document will be useful as you configure the host and account in
+which the buildslave runs.
+
+ Here's a good checklist for setting up a buildslave:
+
+ 1. Set up the account
+
+ It is recommended (although not mandatory) to set up a separate
+ user account for the buildslave. This account is frequently named
+ `buildbot' or `buildslave'. This serves to isolate your personal
+ working environment from that of the slave's, and helps to
+ minimize the security threat posed by letting possibly-unknown
+ contributors run arbitrary code on your system. The account
+ should have a minimum of fancy init scripts.
+
+ 2. Install the buildbot code
+
+ Follow the instructions given earlier (*note Installing the
+ code::). If you use a separate buildslave account, and you
+ didn't install the buildbot code to a shared location, then you
+ will need to install it with `--home=~' for each account that
+ needs it.
+
+ 3. Set up the host
+
+ Make sure the host can actually reach the buildmaster. Usually
+ the buildmaster is running a status webserver on the same
+ machine, so simply point your web browser at it and see if you
+ can get there. Install whatever additional packages or
+ libraries the project's INSTALL document advises. (or not: if
+ your buildslave is supposed to make sure that building without
+ optional libraries still works, then don't install those
+ libraries).
+
+ Again, these libraries don't necessarily have to be installed to
+ a site-wide shared location, but they must be available to your
+ build process. Accomplishing this is usually very specific to
+ the build process, so installing them to `/usr' or `/usr/local'
+ is usually the best approach.
+
+ 4. Test the build process
+
+ Follow the instructions in the INSTALL document, in the
+ buildslave's account. Perform a full CVS (or whatever) checkout,
+ configure, make, run tests, etc. Confirm that the build works
+ without manual fussing. If it doesn't work when you do it by
+ hand, it will be unlikely to work when the buildbot attempts to
+ do it in an automated fashion.
+
+ 5. Choose a base directory
+
+ This should be somewhere in the buildslave's account, typically
+ named after the project which is being tested. The buildslave
+ will not touch any file outside of this directory. Something
+ like `~/Buildbot' or `~/Buildslaves/fooproject' is appropriate.
+
+ 6. Get the buildmaster host/port, botname, and password
+
+ When the buildbot admin configures the buildmaster to accept and
+ use your buildslave, they will provide you with the following
+ pieces of information:
+
+ * your buildslave's name
+
+ * the password assigned to your buildslave
+
+ * the hostname and port number of the buildmaster, i.e.
+ buildbot.example.org:8007
+
+ 7. Create the buildslave
+
+ Now run the 'buildbot' command as follows:
+
+ buildbot create-slave BASEDIR MASTERHOST:PORT SLAVENAME PASSWORD
+
+ This will create the base directory and a collection of files
+ inside, including the `buildbot.tac' file that contains all the
+ information you passed to the `buildbot' command.
+
+ 8. Fill in the hostinfo files
+
+ When it first connects, the buildslave will send a few files up
+ to the buildmaster which describe the host that it is running
+ on. These files are presented on the web status display so that
+ developers have more information to reproduce any test failures
+ that are witnessed by the buildbot. There are sample files in
+ the `info' subdirectory of the buildbot's base directory. You
+ should edit these to correctly describe you and your host.
+
+ `BASEDIR/info/admin' should contain your name and email address.
+ This is the "buildslave admin address", and will be visible from
+ the build status page (so you may wish to munge it a bit if
+ address-harvesting spambots are a concern).
+
+ `BASEDIR/info/host' should be filled with a brief description of
+ the host: OS, version, memory size, CPU speed, versions of
+ relevant libraries installed, and finally the version of the
+ buildbot code which is running the buildslave.
+
+ If you run many buildslaves, you may want to create a single
+ `~buildslave/info' file and share it among all the buildslaves
+ with symlinks.
+
+
+* Menu:
+
+* Buildslave Options::
+
+
+File: buildbot.info, Node: Buildslave Options, Prev: Creating a buildslave, Up: Creating a buildslave
+
+2.5.1 Buildslave Options
+------------------------
+
+There are a handful of options you might want to use when creating the
+buildslave with the `buildbot create-slave <options> DIR <params>'
+command. You can type `buildbot create-slave --help' for a summary.
+To use these, just include them on the `buildbot create-slave'
+command line, like this:
+
+ buildbot create-slave --umask=022 ~/buildslave buildmaster.example.org:42012 myslavename mypasswd
+
+`--usepty'
+ This is a boolean flag that tells the buildslave whether to
+ launch child processes in a PTY or with regular pipes (the
+ default) when the master does not specify. This option is
+ deprecated, as this particular parameter is better specified on
+ the master.
+
+`--umask'
+ This is a string (generally an octal representation of an
+ integer) which will cause the buildslave process' "umask" value
+ to be set shortly after initialization. The "twistd"
+ daemonization utility forces the umask to 077 at startup (which
+ means that all files created by the buildslave or its child
+ processes will be unreadable by any user other than the
+ buildslave account). If you want build products to be readable
+ by other accounts, you can add `--umask=022' to tell the
+ buildslave to fix the umask after twistd clobbers it. If you want
+ build products to be _writable_ by other accounts too, use
+ `--umask=000', but this is likely to be a security problem.
+
+`--keepalive'
+ This is a number that indicates how frequently "keepalive"
+ messages should be sent from the buildslave to the buildmaster,
+ expressed in seconds. The default (600) causes a message to be
+ sent to the buildmaster at least once every 10 minutes. To set
+ this to a lower value, use e.g. `--keepalive=120'.
+
+ If the buildslave is behind a NAT box or stateful firewall, these
+ messages may help to keep the connection alive: some NAT boxes
+ tend to forget about a connection if it has not been used in a
+ while. When this happens, the buildmaster will think that the
+ buildslave has disappeared, and builds will time out. Meanwhile
+ the buildslave will not realize than anything is wrong.
+
+`--maxdelay'
+ This is a number that indicates the maximum amount of time the
+ buildslave will wait between connection attempts, expressed in
+ seconds. The default (300) causes the buildslave to wait at most
+ 5 minutes before trying to connect to the buildmaster again.
+
+`--log-size'
+ This is the size in bytes when to rotate the Twisted log files.
+
+`--log-count'
+ This is the number of log rotations to keep around. You can
+ either specify a number or `None' (the default) to keep all
+ `twistd.log' files around.
+
+
+
+File: buildbot.info, Node: Launching the daemons, Next: Logfiles, Prev: Creating a buildslave, Up: Installation
+
+2.6 Launching the daemons
+=========================
+
+Both the buildmaster and the buildslave run as daemon programs. To
+launch them, pass the working directory to the `buildbot' command:
+
+ buildbot start BASEDIR
+
+ This command will start the daemon and then return, so normally it
+will not produce any output. To verify that the programs are indeed
+running, look for a pair of files named `twistd.log' and `twistd.pid'
+that should be created in the working directory. `twistd.pid'
+contains the process ID of the newly-spawned daemon.
+
+ When the buildslave connects to the buildmaster, new directories
+will start appearing in its base directory. The buildmaster tells the
+slave to create a directory for each Builder which will be using that
+slave. All build operations are performed within these directories:
+CVS checkouts, compiles, and tests.
+
+ Once you get everything running, you will want to arrange for the
+buildbot daemons to be started at boot time. One way is to use
+`cron', by putting them in a @reboot crontab entry(1):
+
+ @reboot buildbot start BASEDIR
+
+ When you run `crontab' to set this up, remember to do it as the
+buildmaster or buildslave account! If you add this to your crontab
+when running as your regular account (or worse yet, root), then the
+daemon will run as the wrong user, quite possibly as one with more
+authority than you intended to provide.
+
+ It is important to remember that the environment provided to cron
+jobs and init scripts can be quite different that your normal runtime.
+There may be fewer environment variables specified, and the PATH may
+be shorter than usual. It is a good idea to test out this method of
+launching the buildslave by using a cron job with a time in the near
+future, with the same command, and then check `twistd.log' to make
+sure the slave actually started correctly. Common problems here are
+for `/usr/local' or `~/bin' to not be on your `PATH', or for
+`PYTHONPATH' to not be set correctly. Sometimes `HOME' is messed up
+too.
+
+ To modify the way the daemons are started (perhaps you want to set
+some environment variables first, or perform some cleanup each time),
+you can create a file named `Makefile.buildbot' in the base
+directory. When the `buildbot' front-end tool is told to `start' the
+daemon, and it sees this file (and `/usr/bin/make' exists), it will
+do `make -f Makefile.buildbot start' instead of its usual action
+(which involves running `twistd'). When the buildmaster or buildslave
+is installed, a `Makefile.sample' is created which implements the
+same behavior as the the `buildbot' tool uses, so if you want to
+customize the process, just copy `Makefile.sample' to
+`Makefile.buildbot' and edit it as necessary.
+
+ Some distributions may include conveniences to make starting
+buildbot at boot time easy. For instance, with the default buildbot
+package in Debian-based distributions, you may only need to modify
+`/etc/default/buildbot' (see also `/etc/init.d/buildbot', which reads
+the configuration in `/etc/default/buildbot').
+
+ ---------- Footnotes ----------
+
+ (1) this @reboot syntax is understood by Vixie cron, which is the
+flavor usually provided with linux systems. Other unices may have a
+cron that doesn't understand @reboot
+
+
+File: buildbot.info, Node: Logfiles, Next: Shutdown, Prev: Launching the daemons, Up: Installation
+
+2.7 Logfiles
+============
+
+While a buildbot daemon runs, it emits text to a logfile, named
+`twistd.log'. A command like `tail -f twistd.log' is useful to watch
+the command output as it runs.
+
+ The buildmaster will announce any errors with its configuration
+file in the logfile, so it is a good idea to look at the log at
+startup time to check for any problems. Most buildmaster activities
+will cause lines to be added to the log.
+
+
+File: buildbot.info, Node: Shutdown, Next: Maintenance, Prev: Logfiles, Up: Installation
+
+2.8 Shutdown
+============
+
+To stop a buildmaster or buildslave manually, use:
+
+ buildbot stop BASEDIR
+
+ This simply looks for the `twistd.pid' file and kills whatever
+process is identified within.
+
+ At system shutdown, all processes are sent a `SIGKILL'. The
+buildmaster and buildslave will respond to this by shutting down
+normally.
+
+ The buildmaster will respond to a `SIGHUP' by re-reading its
+config file. Of course, this only works on unix-like systems with
+signal support, and won't work on Windows. The following shortcut is
+available:
+
+ buildbot reconfig BASEDIR
+
+ When you update the Buildbot code to a new release, you will need
+to restart the buildmaster and/or buildslave before it can take
+advantage of the new code. You can do a `buildbot stop BASEDIR' and
+`buildbot start BASEDIR' in quick succession, or you can use the
+`restart' shortcut, which does both steps for you:
+
+ buildbot restart BASEDIR
+
+ There are certain configuration changes that are not handled
+cleanly by `buildbot reconfig'. If this occurs, `buildbot restart' is
+a more robust tool to fully switch over to the new configuration.
+
+ `buildbot restart' may also be used to start a stopped Buildbot
+instance. This behaviour is useful when writing scripts that stop,
+start and restart Buildbot.
+
+ A buildslave may also be gracefully shutdown from the *note
+WebStatus:: status plugin. This is useful to shutdown a buildslave
+without interrupting any current builds. The buildmaster will wait
+until the buildslave is finished all its current builds, and will
+then tell the buildslave to shutdown.
+
+
+File: buildbot.info, Node: Maintenance, Next: Troubleshooting, Prev: Shutdown, Up: Installation
+
+2.9 Maintenance
+===============
+
+It is a good idea to check the buildmaster's status page every once in
+a while, to see if your buildslave is still online. Eventually the
+buildbot will probably be enhanced to send you email (via the
+`info/admin' email address) when the slave has been offline for more
+than a few hours.
+
+ If you find you can no longer provide a buildslave to the project,
+please let the project admins know, so they can put out a call for a
+replacement.
+
+ The Buildbot records status and logs output continually, each time
+a build is performed. The status tends to be small, but the build logs
+can become quite large. Each build and log are recorded in a separate
+file, arranged hierarchically under the buildmaster's base directory.
+To prevent these files from growing without bound, you should
+periodically delete old build logs. A simple cron job to delete
+anything older than, say, two weeks should do the job. The only trick
+is to leave the `buildbot.tac' and other support files alone, for
+which find's `-mindepth' argument helps skip everything in the top
+directory. You can use something like the following:
+
+ @weekly cd BASEDIR && find . -mindepth 2 i-path './public_html/*' -prune -o -type f -mtime +14 -exec rm {} \;
+ @weekly cd BASEDIR && find twistd.log* -mtime +14 -exec rm {} \;
+
+
+File: buildbot.info, Node: Troubleshooting, Prev: Maintenance, Up: Installation
+
+2.10 Troubleshooting
+====================
+
+Here are a few hints on diagnosing common problems.
+
+* Menu:
+
+* Starting the buildslave::
+* Connecting to the buildmaster::
+* Forcing Builds::
+
+
+File: buildbot.info, Node: Starting the buildslave, Next: Connecting to the buildmaster, Prev: Troubleshooting, Up: Troubleshooting
+
+2.10.1 Starting the buildslave
+------------------------------
+
+Cron jobs are typically run with a minimal shell (`/bin/sh', not
+`/bin/bash'), and tilde expansion is not always performed in such
+commands. You may want to use explicit paths, because the `PATH' is
+usually quite short and doesn't include anything set by your shell's
+startup scripts (`.profile', `.bashrc', etc). If you've installed
+buildbot (or other python libraries) to an unusual location, you may
+need to add a `PYTHONPATH' specification (note that python will do
+tilde-expansion on `PYTHONPATH' elements by itself). Sometimes it is
+safer to fully-specify everything:
+
+ @reboot PYTHONPATH=~/lib/python /usr/local/bin/buildbot start /usr/home/buildbot/basedir
+
+ Take the time to get the @reboot job set up. Otherwise, things
+will work fine for a while, but the first power outage or system
+reboot you have will stop the buildslave with nothing but the cries
+of sorrowful developers to remind you that it has gone away.
+
+
+File: buildbot.info, Node: Connecting to the buildmaster, Next: Forcing Builds, Prev: Starting the buildslave, Up: Troubleshooting
+
+2.10.2 Connecting to the buildmaster
+------------------------------------
+
+If the buildslave cannot connect to the buildmaster, the reason should
+be described in the `twistd.log' logfile. Some common problems are an
+incorrect master hostname or port number, or a mistyped bot name or
+password. If the buildslave loses the connection to the master, it is
+supposed to attempt to reconnect with an exponentially-increasing
+backoff. Each attempt (and the time of the next attempt) will be
+logged. If you get impatient, just manually stop and re-start the
+buildslave.
+
+ When the buildmaster is restarted, all slaves will be disconnected,
+and will attempt to reconnect as usual. The reconnect time will depend
+upon how long the buildmaster is offline (i.e. how far up the
+exponential backoff curve the slaves have travelled). Again,
+`buildbot stop BASEDIR; buildbot start BASEDIR' will speed up the
+process.
+
+
+File: buildbot.info, Node: Forcing Builds, Prev: Connecting to the buildmaster, Up: Troubleshooting
+
+2.10.3 Forcing Builds
+---------------------
+
+From the buildmaster's main status web page, you can force a build to
+be run on your build slave. Figure out which column is for a builder
+that runs on your slave, click on that builder's name, and the page
+that comes up will have a "Force Build" button. Fill in the form, hit
+the button, and a moment later you should see your slave's
+`twistd.log' filling with commands being run. Using `pstree' or `top'
+should also reveal the cvs/make/gcc/etc processes being run by the
+buildslave. Note that the same web page should also show the `admin'
+and `host' information files that you configured earlier.
+
+
+File: buildbot.info, Node: Concepts, Next: Configuration, Prev: Installation, Up: Top
+
+3 Concepts
+**********
+
+This chapter defines some of the basic concepts that the Buildbot
+uses. You'll need to understand how the Buildbot sees the world to
+configure it properly.
+
+* Menu:
+
+* Version Control Systems::
+* Schedulers::
+* BuildSet::
+* BuildRequest::
+* Builder::
+* Users::
+* Build Properties::
+
+
+File: buildbot.info, Node: Version Control Systems, Next: Schedulers, Prev: Concepts, Up: Concepts
+
+3.1 Version Control Systems
+===========================
+
+These source trees come from a Version Control System of some kind.
+CVS and Subversion are two popular ones, but the Buildbot supports
+others. All VC systems have some notion of an upstream `repository'
+which acts as a server(1), from which clients can obtain source trees
+according to various parameters. The VC repository provides source
+trees of various projects, for different branches, and from various
+points in time. The first thing we have to do is to specify which
+source tree we want to get.
+
+* Menu:
+
+* Generalizing VC Systems::
+* Source Tree Specifications::
+* How Different VC Systems Specify Sources::
+* Attributes of Changes::
+
+ ---------- Footnotes ----------
+
+ (1) except Darcs, but since the Buildbot never modifies its local
+source tree we can ignore the fact that Darcs uses a less centralized
+model
+
+
+File: buildbot.info, Node: Generalizing VC Systems, Next: Source Tree Specifications, Prev: Version Control Systems, Up: Version Control Systems
+
+3.1.1 Generalizing VC Systems
+-----------------------------
+
+For the purposes of the Buildbot, we will try to generalize all VC
+systems as having repositories that each provide sources for a variety
+of projects. Each project is defined as a directory tree with source
+files. The individual files may each have revisions, but we ignore
+that and treat the project as a whole as having a set of revisions
+(CVS is really the only VC system still in widespread use that has
+per-file revisions.. everything modern has moved to atomic tree-wide
+changesets). Each time someone commits a change to the project, a new
+revision becomes available. These revisions can be described by a
+tuple with two items: the first is a branch tag, and the second is
+some kind of revision stamp or timestamp. Complex projects may have
+multiple branch tags, but there is always a default branch. The
+timestamp may be an actual timestamp (such as the -D option to CVS),
+or it may be a monotonically-increasing transaction number (such as
+the change number used by SVN and P4, or the revision number used by
+Arch/Baz/Bazaar, or a labeled tag used in CVS)(1). The SHA1 revision
+ID used by Monotone, Mercurial, and Git is also a kind of revision
+stamp, in that it specifies a unique copy of the source tree, as does
+a Darcs "context" file.
+
+ When we aren't intending to make any changes to the sources we
+check out (at least not any that need to be committed back upstream),
+there are two basic ways to use a VC system:
+
+ * Retrieve a specific set of source revisions: some tag or key is
+ used to index this set, which is fixed and cannot be changed by
+ subsequent developers committing new changes to the tree.
+ Releases are built from tagged revisions like this, so that they
+ can be rebuilt again later (probably with controlled
+ modifications).
+
+ * Retrieve the latest sources along a specific branch: some tag is
+ used to indicate which branch is to be used, but within that
+ constraint we want to get the latest revisions.
+
+ Build personnel or CM staff typically use the first approach: the
+build that results is (ideally) completely specified by the two
+parameters given to the VC system: repository and revision tag. This
+gives QA and end-users something concrete to point at when reporting
+bugs. Release engineers are also reportedly fond of shipping code that
+can be traced back to a concise revision tag of some sort.
+
+ Developers are more likely to use the second approach: each morning
+the developer does an update to pull in the changes committed by the
+team over the last day. These builds are not easy to fully specify: it
+depends upon exactly when you did a checkout, and upon what local
+changes the developer has in their tree. Developers do not normally
+tag each build they produce, because there is usually significant
+overhead involved in creating these tags. Recreating the trees used by
+one of these builds can be a challenge. Some VC systems may provide
+implicit tags (like a revision number), while others may allow the use
+of timestamps to mean "the state of the tree at time X" as opposed to
+a tree-state that has been explicitly marked.
+
+ The Buildbot is designed to help developers, so it usually works in
+terms of _the latest_ sources as opposed to specific tagged
+revisions. However, it would really prefer to build from reproducible
+source trees, so implicit revisions are used whenever possible.
+
+ ---------- Footnotes ----------
+
+ (1) many VC systems provide more complexity than this: in
+particular the local views that P4 and ClearCase can assemble out of
+various source directories are more complex than we're prepared to
+take advantage of here
+
+
+File: buildbot.info, Node: Source Tree Specifications, Next: How Different VC Systems Specify Sources, Prev: Generalizing VC Systems, Up: Version Control Systems
+
+3.1.2 Source Tree Specifications
+--------------------------------
+
+So for the Buildbot's purposes we treat each VC system as a server
+which can take a list of specifications as input and produce a source
+tree as output. Some of these specifications are static: they are
+attributes of the builder and do not change over time. Others are more
+variable: each build will have a different value. The repository is
+changed over time by a sequence of Changes, each of which represents a
+single developer making changes to some set of files. These Changes
+are cumulative(1).
+
+ For normal builds, the Buildbot wants to get well-defined source
+trees that contain specific Changes, and exclude other Changes that
+may have occurred after the desired ones. We assume that the Changes
+arrive at the buildbot (through one of the mechanisms described in
+*note Change Sources::) in the same order in which they are committed
+to the repository. The Buildbot waits for the tree to become "stable"
+before initiating a build, for two reasons. The first is that
+developers frequently make multiple related commits in quick
+succession, even when the VC system provides ways to make atomic
+transactions involving multiple files at the same time. Running a
+build in the middle of these sets of changes would use an inconsistent
+set of source files, and is likely to fail (and is certain to be less
+useful than a build which uses the full set of changes). The
+tree-stable-timer is intended to avoid these useless builds that
+include some of the developer's changes but not all. The second reason
+is that some VC systems (i.e. CVS) do not provide repository-wide
+transaction numbers, so that timestamps are the only way to refer to
+a specific repository state. These timestamps may be somewhat
+ambiguous, due to processing and notification delays. By waiting until
+the tree has been stable for, say, 10 minutes, we can choose a
+timestamp from the middle of that period to use for our source
+checkout, and then be reasonably sure that any clock-skew errors will
+not cause the build to be performed on an inconsistent set of source
+files.
+
+ The Schedulers always use the tree-stable-timer, with a timeout
+that is configured to reflect a reasonable tradeoff between build
+latency and change frequency. When the VC system provides coherent
+repository-wide revision markers (such as Subversion's revision
+numbers, or in fact anything other than CVS's timestamps), the
+resulting Build is simply performed against a source tree defined by
+that revision marker. When the VC system does not provide this, a
+timestamp from the middle of the tree-stable period is used to
+generate the source tree(2).
+
+ ---------- Footnotes ----------
+
+ (1) Monotone's _multiple heads_ feature violates this assumption
+of cumulative Changes, but in most situations the changes don't occur
+frequently enough for this to be a significant problem
+
+ (2) this `checkoutDelay' defaults to half the tree-stable timer,
+but it can be overridden with an argument to the Source Step
+
+
+File: buildbot.info, Node: How Different VC Systems Specify Sources, Next: Attributes of Changes, Prev: Source Tree Specifications, Up: Version Control Systems
+
+3.1.3 How Different VC Systems Specify Sources
+----------------------------------------------
+
+For CVS, the static specifications are `repository' and `module'. In
+addition to those, each build uses a timestamp (or omits the
+timestamp to mean `the latest') and `branch tag' (which defaults to
+HEAD). These parameters collectively specify a set of sources from
+which a build may be performed.
+
+ Subversion (http://subversion.tigris.org) combines the repository,
+module, and branch into a single `Subversion URL' parameter. Within
+that scope, source checkouts can be specified by a numeric `revision
+number' (a repository-wide monotonically-increasing marker, such that
+each transaction that changes the repository is indexed by a
+different revision number), or a revision timestamp. When branches
+are used, the repository and module form a static `baseURL', while
+each build has a `revision number' and a `branch' (which defaults to a
+statically-specified `defaultBranch'). The `baseURL' and `branch' are
+simply concatenated together to derive the `svnurl' to use for the
+checkout.
+
+ Perforce (http://www.perforce.com/) is similar. The server is
+specified through a `P4PORT' parameter. Module and branch are
+specified in a single depot path, and revisions are depot-wide. When
+branches are used, the `p4base' and `defaultBranch' are concatenated
+together to produce the depot path.
+
+ Arch (http://wiki.gnuarch.org/) and Bazaar
+(http://bazaar.canonical.com/) specify a repository by URL, as well
+as a `version' which is kind of like a branch name. Arch uses the
+word `archive' to represent the repository. Arch lets you push
+changes from one archive to another, removing the strict
+centralization required by CVS and SVN. It retains the distinction
+between repository and working directory that most other VC systems
+use. For complex multi-module directory structures, Arch has a
+built-in `build config' layer with which the checkout process has two
+steps. First, an initial bootstrap checkout is performed to retrieve
+a set of build-config files. Second, one of these files is used to
+figure out which archives/modules should be used to populate
+subdirectories of the initial checkout.
+
+ Builders which use Arch and Bazaar therefore have a static archive
+`url', and a default "branch" (which is a string that specifies a
+complete category-branch-version triple). Each build can have its own
+branch (the category-branch-version string) to override the default,
+as well as a revision number (which is turned into a -patch-NN suffix
+when performing the checkout).
+
+ Bzr (http://bazaar-vcs.org) (which is a descendant of Arch/Bazaar,
+and is frequently referred to as "Bazaar") has the same sort of
+repository-vs-workspace model as Arch, but the repository data can
+either be stored inside the working directory or kept elsewhere
+(either on the same machine or on an entirely different machine). For
+the purposes of Buildbot (which never commits changes), the repository
+is specified with a URL and a revision number.
+
+ The most common way to obtain read-only access to a bzr tree is via
+HTTP, simply by making the repository visible through a web server
+like Apache. Bzr can also use FTP and SFTP servers, if the buildslave
+process has sufficient privileges to access them. Higher performance
+can be obtained by running a special Bazaar-specific server. None of
+these matter to the buildbot: the repository URL just has to match the
+kind of server being used. The `repoURL' argument provides the
+location of the repository.
+
+ Branches are expressed as subdirectories of the main central
+repository, which means that if branches are being used, the BZR step
+is given a `baseURL' and `defaultBranch' instead of getting the
+`repoURL' argument.
+
+ Darcs (http://darcs.net/) doesn't really have the notion of a
+single master repository. Nor does it really have branches. In Darcs,
+each working directory is also a repository, and there are operations
+to push and pull patches from one of these `repositories' to another.
+For the Buildbot's purposes, all you need to do is specify the URL of
+a repository that you want to build from. The build slave will then
+pull the latest patches from that repository and build them. Multiple
+branches are implemented by using multiple repositories (possibly
+living on the same server).
+
+ Builders which use Darcs therefore have a static `repourl' which
+specifies the location of the repository. If branches are being used,
+the source Step is instead configured with a `baseURL' and a
+`defaultBranch', and the two strings are simply concatenated together
+to obtain the repository's URL. Each build then has a specific branch
+which replaces `defaultBranch', or just uses the default one. Instead
+of a revision number, each build can have a "context", which is a
+string that records all the patches that are present in a given tree
+(this is the output of `darcs changes --context', and is considerably
+less concise than, e.g. Subversion's revision number, but the
+patch-reordering flexibility of Darcs makes it impossible to provide
+a shorter useful specification).
+
+ Mercurial (http://selenic.com/mercurial) is like Darcs, in that
+each branch is stored in a separate repository. The `repourl',
+`baseURL', and `defaultBranch' arguments are all handled the same way
+as with Darcs. The "revision", however, is the hash identifier
+returned by `hg identify'.
+
+ Git (http://git.or.cz/) also follows a decentralized model, and
+each repository can have several branches and tags. The source Step is
+configured with a static `repourl' which specifies the location of
+the repository. In addition, an optional `branch' parameter can be
+specified to check out code from a specific branch instead of the
+default "master" branch. The "revision" is specified as a SHA1 hash
+as returned by e.g. `git rev-parse'. No attempt is made to ensure
+that the specified revision is actually a subset of the specified
+branch.
+
+
+File: buildbot.info, Node: Attributes of Changes, Prev: How Different VC Systems Specify Sources, Up: Version Control Systems
+
+3.1.4 Attributes of Changes
+---------------------------
+
+Who
+===
+
+Each Change has a `who' attribute, which specifies which developer is
+responsible for the change. This is a string which comes from a
+namespace controlled by the VC repository. Frequently this means it
+is a username on the host which runs the repository, but not all VC
+systems require this (Arch, for example, uses a fully-qualified `Arch
+ID', which looks like an email address, as does Darcs). Each
+StatusNotifier will map the `who' attribute into something
+appropriate for their particular means of communication: an email
+address, an IRC handle, etc.
+
+Files
+=====
+
+It also has a list of `files', which are just the tree-relative
+filenames of any files that were added, deleted, or modified for this
+Change. These filenames are used by the `fileIsImportant' function
+(in the Scheduler) to decide whether it is worth triggering a new
+build or not, e.g. the function could use the following function to
+only run a build if a C file were checked in:
+
+ def has_C_files(change):
+ for name in change.files:
+ if name.endswith(".c"):
+ return True
+ return False
+
+ Certain BuildSteps can also use the list of changed files to run a
+more targeted series of tests, e.g. the `python_twisted.Trial' step
+can run just the unit tests that provide coverage for the modified
+.py files instead of running the full test suite.
+
+Comments
+========
+
+The Change also has a `comments' attribute, which is a string
+containing any checkin comments.
+
+Revision
+========
+
+Each Change can have a `revision' attribute, which describes how to
+get a tree with a specific state: a tree which includes this Change
+(and all that came before it) but none that come after it. If this
+information is unavailable, the `.revision' attribute will be `None'.
+These revisions are provided by the ChangeSource, and consumed by the
+`computeSourceRevision' method in the appropriate `step.Source' class.
+
+`CVS'
+ `revision' is an int, seconds since the epoch
+
+`SVN'
+ `revision' is an int, the changeset number (r%d)
+
+`Darcs'
+ `revision' is a large string, the output of `darcs changes
+ --context'
+
+`Mercurial'
+ `revision' is a short string (a hash ID), the output of `hg
+ identify'
+
+`Arch/Bazaar'
+ `revision' is the full revision ID (ending in -patch-%d)
+
+`P4'
+ `revision' is an int, the transaction number
+
+`Git'
+ `revision' is a short string (a SHA1 hash), the output of e.g.
+ `git rev-parse'
+
+Branches
+========
+
+The Change might also have a `branch' attribute. This indicates that
+all of the Change's files are in the same named branch. The
+Schedulers get to decide whether the branch should be built or not.
+
+ For VC systems like CVS, Arch, Monotone, and Git, the `branch'
+name is unrelated to the filename. (that is, the branch name and the
+filename inhabit unrelated namespaces). For SVN, branches are
+expressed as subdirectories of the repository, so the file's "svnurl"
+is a combination of some base URL, the branch name, and the filename
+within the branch. (In a sense, the branch name and the filename
+inhabit the same namespace). Darcs branches are subdirectories of a
+base URL just like SVN. Mercurial branches are the same as Darcs.
+
+`CVS'
+ branch='warner-newfeature', files=['src/foo.c']
+
+`SVN'
+ branch='branches/warner-newfeature', files=['src/foo.c']
+
+`Darcs'
+ branch='warner-newfeature', files=['src/foo.c']
+
+`Mercurial'
+ branch='warner-newfeature', files=['src/foo.c']
+
+`Arch/Bazaar'
+ branch='buildbot-usebranches-0', files=['buildbot/master.py']
+
+`Git'
+ branch='warner-newfeature', files=['src/foo.c']
+
+Links
+=====
+
+Finally, the Change might have a `links' list, which is intended to
+provide a list of URLs to a _viewcvs_-style web page that provides
+more detail for this Change, perhaps including the full file diffs.
+
+
+File: buildbot.info, Node: Schedulers, Next: BuildSet, Prev: Version Control Systems, Up: Concepts
+
+3.2 Schedulers
+==============
+
+Each Buildmaster has a set of `Scheduler' objects, each of which gets
+a copy of every incoming Change. The Schedulers are responsible for
+deciding when Builds should be run. Some Buildbot installations might
+have a single Scheduler, while others may have several, each for a
+different purpose.
+
+ For example, a "quick" scheduler might exist to give immediate
+feedback to developers, hoping to catch obvious problems in the code
+that can be detected quickly. These typically do not run the full test
+suite, nor do they run on a wide variety of platforms. They also
+usually do a VC update rather than performing a brand-new checkout
+each time. You could have a "quick" scheduler which used a 30 second
+timeout, and feeds a single "quick" Builder that uses a VC
+`mode='update'' setting.
+
+ A separate "full" scheduler would run more comprehensive tests a
+little while later, to catch more subtle problems. This scheduler
+would have a longer tree-stable-timer, maybe 30 minutes, and would
+feed multiple Builders (with a `mode=' of `'copy'', `'clobber'', or
+`'export'').
+
+ The `tree-stable-timer' and `fileIsImportant' decisions are made
+by the Scheduler. Dependencies are also implemented here. Periodic
+builds (those which are run every N seconds rather than after new
+Changes arrive) are triggered by a special `Periodic' Scheduler
+subclass. The default Scheduler class can also be told to watch for
+specific branches, ignoring Changes on other branches. This may be
+useful if you have a trunk and a few release branches which should be
+tracked, but when you don't want to have the Buildbot pay attention
+to several dozen private user branches.
+
+ When the setup has multiple sources of Changes the `category' can
+be used for `Scheduler' objects to filter out a subset of the
+Changes. Note that not all change sources can attach a category.
+
+ Some Schedulers may trigger builds for other reasons, other than
+recent Changes. For example, a Scheduler subclass could connect to a
+remote buildmaster and watch for builds of a library to succeed before
+triggering a local build that uses that library.
+
+ Each Scheduler creates and submits `BuildSet' objects to the
+`BuildMaster', which is then responsible for making sure the
+individual `BuildRequests' are delivered to the target `Builders'.
+
+ `Scheduler' instances are activated by placing them in the
+`c['schedulers']' list in the buildmaster config file. Each Scheduler
+has a unique name.
+
+
+File: buildbot.info, Node: BuildSet, Next: BuildRequest, Prev: Schedulers, Up: Concepts
+
+3.3 BuildSet
+============
+
+A `BuildSet' is the name given to a set of Builds that all
+compile/test the same version of the tree on multiple Builders. In
+general, all these component Builds will perform the same sequence of
+Steps, using the same source code, but on different platforms or
+against a different set of libraries.
+
+ The `BuildSet' is tracked as a single unit, which fails if any of
+the component Builds have failed, and therefore can succeed only if
+_all_ of the component Builds have succeeded. There are two kinds of
+status notification messages that can be emitted for a BuildSet: the
+`firstFailure' type (which fires as soon as we know the BuildSet will
+fail), and the `Finished' type (which fires once the BuildSet has
+completely finished, regardless of whether the overall set passed or
+failed).
+
+ A `BuildSet' is created with a _source stamp_ tuple of (branch,
+revision, changes, patch), some of which may be None, and a list of
+Builders on which it is to be run. They are then given to the
+BuildMaster, which is responsible for creating a separate
+`BuildRequest' for each Builder.
+
+ There are a couple of different likely values for the
+`SourceStamp':
+
+`(revision=None, changes=[CHANGES], patch=None)'
+ This is a `SourceStamp' used when a series of Changes have
+ triggered a build. The VC step will attempt to check out a tree
+ that contains CHANGES (and any changes that occurred before
+ CHANGES, but not any that occurred after them).
+
+`(revision=None, changes=None, patch=None)'
+ This builds the most recent code on the default branch. This is
+ the sort of `SourceStamp' that would be used on a Build that was
+ triggered by a user request, or a Periodic scheduler. It is also
+ possible to configure the VC Source Step to always check out the
+ latest sources rather than paying attention to the Changes in the
+ SourceStamp, which will result in same behavior as this.
+
+`(branch=BRANCH, revision=None, changes=None, patch=None)'
+ This builds the most recent code on the given BRANCH. Again,
+ this is generally triggered by a user request or Periodic build.
+
+`(revision=REV, changes=None, patch=(LEVEL, DIFF))'
+ This checks out the tree at the given revision REV, then applies
+ a patch (using `patch -pLEVEL <DIFF'). The *note try:: feature
+ uses this kind of `SourceStamp'. If `patch' is None, the patching
+ step is bypassed.
+
+
+ The buildmaster is responsible for turning the `BuildSet' into a
+set of `BuildRequest' objects and queueing them on the appropriate
+Builders.
+
+
+File: buildbot.info, Node: BuildRequest, Next: Builder, Prev: BuildSet, Up: Concepts
+
+3.4 BuildRequest
+================
+
+A `BuildRequest' is a request to build a specific set of sources on a
+single specific `Builder'. Each `Builder' runs the `BuildRequest' as
+soon as it can (i.e. when an associated buildslave becomes free).
+`BuildRequest's are prioritized from oldest to newest, so when a
+buildslave becomes free, the `Builder' with the oldest `BuildRequest'
+is run.
+
+ The `BuildRequest' contains the `SourceStamp' specification. The
+actual process of running the build (the series of Steps that will be
+executed) is implemented by the `Build' object. In this future this
+might be changed, to have the `Build' define _what_ gets built, and a
+separate `BuildProcess' (provided by the Builder) to define _how_ it
+gets built.
+
+ `BuildRequest' is created with optional `Properties'. One of
+these, `owner', is collected by the resultant `Build' and added to
+the set of _interested users_ to which status notifications will be
+sent, depending on the configuration for each status object.
+
+ The `BuildRequest' may be mergeable with other compatible
+`BuildRequest's. Builds that are triggered by incoming Changes will
+generally be mergeable. Builds that are triggered by user requests
+are generally not, unless they are multiple requests to build the
+_latest sources_ of the same branch.
+
+
+File: buildbot.info, Node: Builder, Next: Users, Prev: BuildRequest, Up: Concepts
+
+3.5 Builder
+===========
+
+The `Builder' is a long-lived object which controls all Builds of a
+given type. Each one is created when the config file is first parsed,
+and lives forever (or rather until it is removed from the config
+file). It mediates the connections to the buildslaves that do all the
+work, and is responsible for creating the `Build' objects that decide
+_how_ a build is performed (i.e., which steps are executed in what
+order).
+
+ Each `Builder' gets a unique name, and the path name of a
+directory where it gets to do all its work (there is a
+buildmaster-side directory for keeping status information, as well as
+a buildslave-side directory where the actual checkout/compile/test
+commands are executed). It also gets a `BuildFactory', which is
+responsible for creating new `Build' instances: because the `Build'
+instance is what actually performs each build, choosing the
+`BuildFactory' is the way to specify what happens each time a build
+is done.
+
+ Each `Builder' is associated with one of more `BuildSlaves'. A
+`Builder' which is used to perform OS-X builds (as opposed to Linux
+or Solaris builds) should naturally be associated with an OS-X-based
+buildslave.
+
+ A `Builder' may be given a set of environment variables to be used
+in its *note ShellCommand::s. These variables will override anything
+in the buildslave's environment. Variables passed directly to a
+ShellCommand will override variables of the same name passed to the
+Builder.
+
+ For example, if you a pool of identical slaves it is often easier
+to manage variables like PATH from Buildbot rather than manually
+editing it inside of the slaves' environment.
+
+ f = factory.BuildFactory
+ f.addStep(ShellCommand(
+ command=['bash', './configure']))
+ f.addStep(Compile())
+
+ c['builders'] = [
+ {'name': 'test', 'slavenames': ['slave1', 'slave2', 'slave3', 'slave4',
+ 'slave5', 'slave6'],
+ 'builddir': 'test', 'factory': f',
+ 'env': {'PATH': '/opt/local/bin:/opt/app/bin:/usr/local/bin:/usr/bin'}}
+
+
+File: buildbot.info, Node: Users, Next: Build Properties, Prev: Builder, Up: Concepts
+
+3.6 Users
+=========
+
+Buildbot has a somewhat limited awareness of _users_. It assumes the
+world consists of a set of developers, each of whom can be described
+by a couple of simple attributes. These developers make changes to
+the source code, causing builds which may succeed or fail.
+
+ Each developer is primarily known through the source control
+system. Each Change object that arrives is tagged with a `who' field
+that typically gives the account name (on the repository machine) of
+the user responsible for that change. This string is the primary key
+by which the User is known, and is displayed on the HTML status pages
+and in each Build's "blamelist".
+
+ To do more with the User than just refer to them, this username
+needs to be mapped into an address of some sort. The responsibility
+for this mapping is left up to the status module which needs the
+address. The core code knows nothing about email addresses or IRC
+nicknames, just user names.
+
+* Menu:
+
+* Doing Things With Users::
+* Email Addresses::
+* IRC Nicknames::
+* Live Status Clients::
+
+
+File: buildbot.info, Node: Doing Things With Users, Next: Email Addresses, Prev: Users, Up: Users
+
+3.6.1 Doing Things With Users
+-----------------------------
+
+Each Change has a single User who is responsible for that Change. Most
+Builds have a set of Changes: the Build represents the first time
+these Changes have been built and tested by the Buildbot. The build
+has a "blamelist" that consists of a simple union of the Users
+responsible for all the Build's Changes.
+
+ The Build provides (through the IBuildStatus interface) a list of
+Users who are "involved" in the build. For now this is equal to the
+blamelist, but in the future it will be expanded to include a "build
+sheriff" (a person who is "on duty" at that time and responsible for
+watching over all builds that occur during their shift), as well as
+per-module owners who simply want to keep watch over their domain
+(chosen by subdirectory or a regexp matched against the filenames
+pulled out of the Changes). The Involved Users are those who probably
+have an interest in the results of any given build.
+
+ In the future, Buildbot will acquire the concept of "Problems",
+which last longer than builds and have beginnings and ends. For
+example, a test case which passed in one build and then failed in the
+next is a Problem. The Problem lasts until the test case starts
+passing again, at which point the Problem is said to be "resolved".
+
+ If there appears to be a code change that went into the tree at the
+same time as the test started failing, that Change is marked as being
+resposible for the Problem, and the user who made the change is added
+to the Problem's "Guilty" list. In addition to this user, there may
+be others who share responsibility for the Problem (module owners,
+sponsoring developers). In addition to the Responsible Users, there
+may be a set of Interested Users, who take an interest in the fate of
+the Problem.
+
+ Problems therefore have sets of Users who may want to be kept
+aware of the condition of the problem as it changes over time. If
+configured, the Buildbot can pester everyone on the Responsible list
+with increasing harshness until the problem is resolved, with the
+most harshness reserved for the Guilty parties themselves. The
+Interested Users may merely be told when the problem starts and
+stops, as they are not actually responsible for fixing anything.
+
+
+File: buildbot.info, Node: Email Addresses, Next: IRC Nicknames, Prev: Doing Things With Users, Up: Users
+
+3.6.2 Email Addresses
+---------------------
+
+The `buildbot.status.mail.MailNotifier' class (*note MailNotifier::)
+provides a status target which can send email about the results of
+each build. It accepts a static list of email addresses to which each
+message should be delivered, but it can also be configured to send
+mail to the Build's Interested Users. To do this, it needs a way to
+convert User names into email addresses.
+
+ For many VC systems, the User Name is actually an account name on
+the system which hosts the repository. As such, turning the name into
+an email address is a simple matter of appending
+"@repositoryhost.com". Some projects use other kinds of mappings (for
+example the preferred email address may be at "project.org" despite
+the repository host being named "cvs.project.org"), and some VC
+systems have full separation between the concept of a user and that
+of an account on the repository host (like Perforce). Some systems
+(like Arch) put a full contact email address in every change.
+
+ To convert these names to addresses, the MailNotifier uses an
+EmailLookup object. This provides a .getAddress method which accepts
+a name and (eventually) returns an address. The default `MailNotifier'
+module provides an EmailLookup which simply appends a static string,
+configurable when the notifier is created. To create more complex
+behaviors (perhaps using an LDAP lookup, or using "finger" on a
+central host to determine a preferred address for the developer),
+provide a different object as the `lookup' argument.
+
+ In the future, when the Problem mechanism has been set up, the
+Buildbot will need to send mail to arbitrary Users. It will do this
+by locating a MailNotifier-like object among all the buildmaster's
+status targets, and asking it to send messages to various Users. This
+means the User-to-address mapping only has to be set up once, in your
+MailNotifier, and every email message the buildbot emits will take
+advantage of it.
+
+
+File: buildbot.info, Node: IRC Nicknames, Next: Live Status Clients, Prev: Email Addresses, Up: Users
+
+3.6.3 IRC Nicknames
+-------------------
+
+Like MailNotifier, the `buildbot.status.words.IRC' class provides a
+status target which can announce the results of each build. It also
+provides an interactive interface by responding to online queries
+posted in the channel or sent as private messages.
+
+ In the future, the buildbot can be configured map User names to IRC
+nicknames, to watch for the recent presence of these nicknames, and to
+deliver build status messages to the interested parties. Like
+`MailNotifier' does for email addresses, the `IRC' object will have
+an `IRCLookup' which is responsible for nicknames. The mapping can be
+set up statically, or it can be updated by online users themselves
+(by claiming a username with some kind of "buildbot: i am user
+warner" commands).
+
+ Once the mapping is established, the rest of the buildbot can ask
+the `IRC' object to send messages to various users. It can report on
+the likelihood that the user saw the given message (based upon how
+long the user has been inactive on the channel), which might prompt
+the Problem Hassler logic to send them an email message instead.
+
+
+File: buildbot.info, Node: Live Status Clients, Prev: IRC Nicknames, Up: Users
+
+3.6.4 Live Status Clients
+-------------------------
+
+The Buildbot also offers a PB-based status client interface which can
+display real-time build status in a GUI panel on the developer's
+desktop. This interface is normally anonymous, but it could be
+configured to let the buildmaster know _which_ developer is using the
+status client. The status client could then be used as a
+message-delivery service, providing an alternative way to deliver
+low-latency high-interruption messages to the developer (like "hey,
+you broke the build").
+
+
+File: buildbot.info, Node: Build Properties, Prev: Users, Up: Concepts
+
+3.7 Build Properties
+====================
+
+Each build has a set of "Build Properties", which can be used by its
+BuildStep to modify their actions. These properties, in the form of
+key-value pairs, provide a general framework for dynamically altering
+the behavior of a build based on its circumstances.
+
+ Properties come from a number of places:
+ * global configuration - These properties apply to all builds.
+
+ * schedulers - A scheduler can specify properties available to all
+ the builds it starts.
+
+ * buildslaves - A buildslave can pass properties on to the builds
+ it performs.
+
+ * builds - A build automatically sets a number of properties on
+ itself.
+
+ * steps - Steps of a build can set properties that are available
+ to subsequent steps. In particular, source steps set a number
+ of properties.
+
+ Properties are very flexible, and can be used to implement all
+manner of functionality. Here are some examples:
+
+ Most Source steps record the revision that they checked out in the
+`got_revision' property. A later step could use this property to
+specify the name of a fully-built tarball, dropped in an
+easily-acessible directory for later testing.
+
+ Some projects want to perform nightly builds as well as in response
+to committed changes. Such a project would run two schedulers, both
+pointing to the same set of builders, but could provide an
+`is_nightly' property so that steps can distinguish the nightly
+builds, perhaps to run more resource-intensive tests.
+
+ Some projects have different build processes on different systems.
+Rather than create a build factory for each slave, the steps can use
+buildslave properties to identify the unique aspects of each slave
+and adapt the build process dynamically.
+
+
+File: buildbot.info, Node: Configuration, Next: Getting Source Code Changes, Prev: Concepts, Up: Top
+
+4 Configuration
+***************
+
+The buildbot's behavior is defined by the "config file", which
+normally lives in the `master.cfg' file in the buildmaster's base
+directory (but this can be changed with an option to the `buildbot
+create-master' command). This file completely specifies which
+Builders are to be run, which slaves they should use, how Changes
+should be tracked, and where the status information is to be sent.
+The buildmaster's `buildbot.tac' file names the base directory;
+everything else comes from the config file.
+
+ A sample config file was installed for you when you created the
+buildmaster, but you will need to edit it before your buildbot will do
+anything useful.
+
+ This chapter gives an overview of the format of this file and the
+various sections in it. You will need to read the later chapters to
+understand how to fill in each section properly.
+
+* Menu:
+
+* Config File Format::
+* Loading the Config File::
+* Testing the Config File::
+* Defining the Project::
+* Change Sources and Schedulers::
+* Merging BuildRequests::
+* Setting the slaveport::
+* Buildslave Specifiers::
+* On-Demand ("Latent") Buildslaves::
+* Defining Global Properties::
+* Defining Builders::
+* Defining Status Targets::
+* Debug options::
+
+
+File: buildbot.info, Node: Config File Format, Next: Loading the Config File, Prev: Configuration, Up: Configuration
+
+4.1 Config File Format
+======================
+
+The config file is, fundamentally, just a piece of Python code which
+defines a dictionary named `BuildmasterConfig', with a number of keys
+that are treated specially. You don't need to know Python to do basic
+configuration, though, you can just copy the syntax of the sample
+file. If you _are_ comfortable writing Python code, however, you can
+use all the power of a full programming language to achieve more
+complicated configurations.
+
+ The `BuildmasterConfig' name is the only one which matters: all
+other names defined during the execution of the file are discarded.
+When parsing the config file, the Buildmaster generally compares the
+old configuration with the new one and performs the minimum set of
+actions necessary to bring the buildbot up to date: Builders which are
+not changed are left untouched, and Builders which are modified get to
+keep their old event history.
+
+ Basic Python syntax: comments start with a hash character ("#"),
+tuples are defined with `(parenthesis, pairs)', arrays are defined
+with `[square, brackets]', tuples and arrays are mostly
+interchangeable. Dictionaries (data structures which map "keys" to
+"values") are defined with curly braces: `{'key1': 'value1', 'key2':
+'value2'} '. Function calls (and object instantiation) can use named
+parameters, like `w = html.Waterfall(http_port=8010)'.
+
+ The config file starts with a series of `import' statements, which
+make various kinds of Steps and Status targets available for later
+use. The main `BuildmasterConfig' dictionary is created, then it is
+populated with a variety of keys. These keys are broken roughly into
+the following sections, each of which is documented in the rest of
+this chapter:
+
+ * Project Definitions
+
+ * Change Sources / Schedulers
+
+ * Slaveport
+
+ * Buildslave Configuration
+
+ * Builders / Interlocks
+
+ * Status Targets
+
+ * Debug options
+
+ The config file can use a few names which are placed into its
+namespace:
+
+`basedir'
+ the base directory for the buildmaster. This string has not been
+ expanded, so it may start with a tilde. It needs to be expanded
+ before use. The config file is located in
+ `os.path.expanduser(os.path.join(basedir, 'master.cfg'))'
+
+
+
+File: buildbot.info, Node: Loading the Config File, Next: Testing the Config File, Prev: Config File Format, Up: Configuration
+
+4.2 Loading the Config File
+===========================
+
+The config file is only read at specific points in time. It is first
+read when the buildmaster is launched. Once it is running, there are
+various ways to ask it to reload the config file. If you are on the
+system hosting the buildmaster, you can send a `SIGHUP' signal to it:
+the `buildbot' tool has a shortcut for this:
+
+ buildbot reconfig BASEDIR
+
+ This command will show you all of the lines from `twistd.log' that
+relate to the reconfiguration. If there are any problems during the
+config-file reload, they will be displayed in these lines.
+
+ The debug tool (`buildbot debugclient --master HOST:PORT') has a
+"Reload .cfg" button which will also trigger a reload. In the future,
+there will be other ways to accomplish this step (probably a
+password-protected button on the web page, as well as a privileged IRC
+command).
+
+ When reloading the config file, the buildmaster will endeavor to
+change as little as possible about the running system. For example,
+although old status targets may be shut down and new ones started up,
+any status targets that were not changed since the last time the
+config file was read will be left running and untouched. Likewise any
+Builders which have not been changed will be left running. If a
+Builder is modified (say, the build process is changed) while a Build
+is currently running, that Build will keep running with the old
+process until it completes. Any previously queued Builds (or Builds
+which get queued after the reconfig) will use the new process.
+
+
+File: buildbot.info, Node: Testing the Config File, Next: Defining the Project, Prev: Loading the Config File, Up: Configuration
+
+4.3 Testing the Config File
+===========================
+
+To verify that the config file is well-formed and contains no
+deprecated or invalid elements, use the "checkconfig" command:
+
+ % buildbot checkconfig master.cfg
+ Config file is good!
+
+ If the config file has deprecated features (perhaps because you've
+upgraded the buildmaster and need to update the config file to match),
+they will be announced by checkconfig. In this case, the config file
+will work, but you should really remove the deprecated items and use
+the recommended replacements instead:
+
+ % buildbot checkconfig master.cfg
+ /usr/lib/python2.4/site-packages/buildbot/master.py:559: DeprecationWarning: c['sources'] is
+ deprecated as of 0.7.6 and will be removed by 0.8.0 . Please use c['change_source'] instead.
+ warnings.warn(m, DeprecationWarning)
+ Config file is good!
+
+ If the config file is simply broken, that will be caught too:
+
+ % buildbot checkconfig master.cfg
+ Traceback (most recent call last):
+ File "/usr/lib/python2.4/site-packages/buildbot/scripts/runner.py", line 834, in doCheckConfig
+ ConfigLoader(configFile)
+ File "/usr/lib/python2.4/site-packages/buildbot/scripts/checkconfig.py", line 31, in __init__
+ self.loadConfig(configFile)
+ File "/usr/lib/python2.4/site-packages/buildbot/master.py", line 480, in loadConfig
+ exec f in localDict
+ File "/home/warner/BuildBot/master/foolscap/master.cfg", line 90, in ?
+ c[bogus] = "stuff"
+ NameError: name 'bogus' is not defined
+
+
+File: buildbot.info, Node: Defining the Project, Next: Change Sources and Schedulers, Prev: Testing the Config File, Up: Configuration
+
+4.4 Defining the Project
+========================
+
+There are a couple of basic settings that you use to tell the buildbot
+what project it is working on. This information is used by status
+reporters to let users find out more about the codebase being
+exercised by this particular Buildbot installation.
+
+ c['projectName'] = "Buildbot"
+ c['projectURL'] = "http://buildbot.sourceforge.net/"
+ c['buildbotURL'] = "http://localhost:8010/"
+
+ `projectName' is a short string will be used to describe the
+project that this buildbot is working on. For example, it is used as
+the title of the waterfall HTML page.
+
+ `projectURL' is a string that gives a URL for the project as a
+whole. HTML status displays will show `projectName' as a link to
+`projectURL', to provide a link from buildbot HTML pages to your
+project's home page.
+
+ The `buildbotURL' string should point to the location where the
+buildbot's internal web server (usually the `html.Waterfall' page) is
+visible. This typically uses the port number set when you create the
+`Waterfall' object: the buildbot needs your help to figure out a
+suitable externally-visible host name.
+
+ When status notices are sent to users (either by email or over
+IRC), `buildbotURL' will be used to create a URL to the specific build
+or problem that they are being notified about. It will also be made
+available to queriers (over IRC) who want to find out where to get
+more information about this buildbot.
+
+ The `logCompressionLimit' enables bz2-compression of build logs on
+disk for logs that are bigger than the given size, or disables that
+completely if given `False'. The default value is 4k, which should be
+a reasonable default on most file systems. This setting has no impact
+on status plugins, and merely affects the required disk space on the
+master for build logs.
+
+
+File: buildbot.info, Node: Change Sources and Schedulers, Next: Merging BuildRequests, Prev: Defining the Project, Up: Configuration
+
+4.5 Change Sources and Schedulers
+=================================
+
+The `c['change_source']' key is the ChangeSource instance(1) that
+defines how the buildmaster learns about source code changes. More
+information about what goes here is available in *Note Getting Source
+Code Changes::.
+
+ from buildbot.changes.pb import PBChangeSource
+ c['change_source'] = PBChangeSource()
+
+ (note: in buildbot-0.7.5 and earlier, this key was named
+`c['sources']', and required a list. `c['sources']' is deprecated as
+of buildbot-0.7.6 and is scheduled to be removed in a future release).
+
+ `c['schedulers']' is a list of Scheduler instances, each of which
+causes builds to be started on a particular set of Builders. The two
+basic Scheduler classes you are likely to start with are `Scheduler'
+and `Periodic', but you can write a customized subclass to implement
+more complicated build scheduling.
+
+ Scheduler arguments should always be specified by name (as keyword
+arguments), to allow for future expansion:
+
+ sched = Scheduler(name="quick", builderNames=['lin', 'win'])
+
+ All schedulers have several arguments in common:
+
+`name'
+ Each Scheduler must have a unique name. This is used in status
+ displays, and is also available in the build property
+ `scheduler'.
+
+`builderNames'
+ This is the set of builders which this scheduler should trigger,
+ specified as a list of names (strings).
+
+`properties'
+ This is a dictionary specifying properties that will be
+ transmitted to all builds started by this scheduler.
+
+
+ Here is a brief catalog of the available Scheduler types. All these
+Schedulers are classes in `buildbot.scheduler', and the docstrings
+there are the best source of documentation on the arguments taken by
+each one.
+
+* Menu:
+
+* Scheduler Scheduler::
+* AnyBranchScheduler::
+* Dependent Scheduler::
+* Periodic Scheduler::
+* Nightly Scheduler::
+* Try Schedulers::
+* Triggerable Scheduler::
+
+ ---------- Footnotes ----------
+
+ (1) To be precise, it is an object or a list of objects which all
+implement the `buildbot.interfaces.IChangeSource' Interface. It is
+unusual to have multiple ChangeSources, so this key accepts either a
+single ChangeSource or a sequence of them.
+
+
+File: buildbot.info, Node: Scheduler Scheduler, Next: AnyBranchScheduler, Prev: Change Sources and Schedulers, Up: Change Sources and Schedulers
+
+4.5.1 Scheduler Scheduler
+-------------------------
+
+This is the original and still most popular Scheduler class. It
+follows exactly one branch, and starts a configurable
+tree-stable-timer after each change on that branch. When the timer
+expires, it starts a build on some set of Builders. The Scheduler
+accepts a `fileIsImportant' function which can be used to ignore some
+Changes if they do not affect any "important" files.
+
+ The arguments to this scheduler are:
+
+`name'
+
+`builderNames'
+
+`properties'
+
+`branch'
+ This Scheduler will pay attention to a single branch, ignoring
+ Changes that occur on other branches. Setting `branch' equal to
+ the special value of `None' means it should only pay attention to
+ the default branch. Note that `None' is a keyword, not a string,
+ so you want to use `None' and not `"None"'.
+
+`treeStableTimer'
+ The Scheduler will wait for this many seconds before starting the
+ build. If new changes are made during this interval, the timer
+ will be restarted, so really the build will be started after a
+ change and then after this many seconds of inactivity.
+
+`fileIsImportant'
+ A callable which takes one argument, a Change instance, and
+ returns `True' if the change is worth building, and `False' if
+ it is not. Unimportant Changes are accumulated until the build
+ is triggered by an important change. The default value of None
+ means that all Changes are important.
+
+`categories'
+ A list of categories of changes that this scheduler will respond
+ to. If this is specified, then any non-matching changes are
+ ignored.
+
+
+ Example:
+
+ from buildbot import scheduler
+ quick = scheduler.Scheduler(name="quick",
+ branch=None,
+ treeStableTimer=60,
+ builderNames=["quick-linux", "quick-netbsd"])
+ full = scheduler.Scheduler(name="full",
+ branch=None,
+ treeStableTimer=5*60,
+ builderNames=["full-linux", "full-netbsd", "full-OSX"])
+ c['schedulers'] = [quick, full]
+
+ In this example, the two "quick" builders are triggered 60 seconds
+after the tree has been changed. The "full" builds do not run quite
+so quickly (they wait 5 minutes), so hopefully if the quick builds
+fail due to a missing file or really simple typo, the developer can
+discover and fix the problem before the full builds are started. Both
+Schedulers only pay attention to the default branch: any changes on
+other branches are ignored by these Schedulers. Each Scheduler
+triggers a different set of Builders, referenced by name.
+
+
+File: buildbot.info, Node: AnyBranchScheduler, Next: Dependent Scheduler, Prev: Scheduler Scheduler, Up: Change Sources and Schedulers
+
+4.5.2 AnyBranchScheduler
+------------------------
+
+This scheduler uses a tree-stable-timer like the default one, but
+follows multiple branches at once. Each branch gets a separate timer.
+
+ The arguments to this scheduler are:
+
+`name'
+
+`builderNames'
+
+`properties'
+
+`branches'
+ This Scheduler will pay attention to any number of branches,
+ ignoring Changes that occur on other branches. Branches are
+ specified just as for the `Scheduler' class.
+
+`treeStableTimer'
+ The Scheduler will wait for this many seconds before starting the
+ build. If new changes are made during this interval, the timer
+ will be restarted, so really the build will be started after a
+ change and then after this many seconds of inactivity.
+
+`fileIsImportant'
+ A callable which takes one argument, a Change instance, and
+ returns `True' if the change is worth building, and `False' if
+ it is not. Unimportant Changes are accumulated until the build
+ is triggered by an important change. The default value of None
+ means that all Changes are important.
+
+
+File: buildbot.info, Node: Dependent Scheduler, Next: Periodic Scheduler, Prev: AnyBranchScheduler, Up: Change Sources and Schedulers
+
+4.5.3 Dependent Scheduler
+-------------------------
+
+It is common to wind up with one kind of build which should only be
+performed if the same source code was successfully handled by some
+other kind of build first. An example might be a packaging step: you
+might only want to produce .deb or RPM packages from a tree that was
+known to compile successfully and pass all unit tests. You could put
+the packaging step in the same Build as the compile and testing steps,
+but there might be other reasons to not do this (in particular you
+might have several Builders worth of compiles/tests, but only wish to
+do the packaging once). Another example is if you want to skip the
+"full" builds after a failing "quick" build of the same source code.
+Or, if one Build creates a product (like a compiled library) that is
+used by some other Builder, you'd want to make sure the consuming
+Build is run _after_ the producing one.
+
+ You can use "Dependencies" to express this relationship to the
+Buildbot. There is a special kind of Scheduler named
+`scheduler.Dependent' that will watch an "upstream" Scheduler for
+builds to complete successfully (on all of its Builders). Each time
+that happens, the same source code (i.e. the same `SourceStamp') will
+be used to start a new set of builds, on a different set of Builders.
+This "downstream" scheduler doesn't pay attention to Changes at all.
+It only pays attention to the upstream scheduler.
+
+ If the build fails on any of the Builders in the upstream set, the
+downstream builds will not fire. Note that, for SourceStamps
+generated by a ChangeSource, the `revision' is None, meaning HEAD.
+If any changes are committed between the time the upstream scheduler
+begins its build and the time the dependent scheduler begins its
+build, then those changes will be included in the downstream build.
+See the *note Triggerable Scheduler:: for a more flexible dependency
+mechanism that can avoid this problem.
+
+ The arguments to this scheduler are:
+
+`name'
+
+`builderNames'
+
+`properties'
+
+`upstream'
+ The upstream scheduler to watch. Note that this is an
+ "instance", not the name of the scheduler.
+
+ Example:
+
+ from buildbot import scheduler
+ tests = scheduler.Scheduler("just-tests", None, 5*60,
+ ["full-linux", "full-netbsd", "full-OSX"])
+ package = scheduler.Dependent("build-package",
+ tests, # upstream scheduler -- no quotes!
+ ["make-tarball", "make-deb", "make-rpm"])
+ c['schedulers'] = [tests, package]
+
+
+File: buildbot.info, Node: Periodic Scheduler, Next: Nightly Scheduler, Prev: Dependent Scheduler, Up: Change Sources and Schedulers
+
+4.5.4 Periodic Scheduler
+------------------------
+
+This simple scheduler just triggers a build every N seconds.
+
+ The arguments to this scheduler are:
+
+`name'
+
+`builderNames'
+
+`properties'
+
+`periodicBuildTimer'
+ The time, in seconds, after which to start a build.
+
+ Example:
+
+ from buildbot import scheduler
+ nightly = scheduler.Periodic(name="nightly",
+ builderNames=["full-solaris"],
+ periodicBuildTimer=24*60*60)
+ c['schedulers'] = [nightly]
+
+ The Scheduler in this example just runs the full solaris build once
+per day. Note that this Scheduler only lets you control the time
+between builds, not the absolute time-of-day of each Build, so this
+could easily wind up a "daily" or "every afternoon" scheduler
+depending upon when it was first activated.
+
+
+File: buildbot.info, Node: Nightly Scheduler, Next: Try Schedulers, Prev: Periodic Scheduler, Up: Change Sources and Schedulers
+
+4.5.5 Nightly Scheduler
+-----------------------
+
+This is highly configurable periodic build scheduler, which triggers
+a build at particular times of day, week, month, or year. The
+configuration syntax is very similar to the well-known `crontab'
+format, in which you provide values for minute, hour, day, and month
+(some of which can be wildcards), and a build is triggered whenever
+the current time matches the given constraints. This can run a build
+every night, every morning, every weekend, alternate Thursdays, on
+your boss's birthday, etc.
+
+ Pass some subset of `minute', `hour', `dayOfMonth', `month', and
+`dayOfWeek'; each may be a single number or a list of valid values.
+The builds will be triggered whenever the current time matches these
+values. Wildcards are represented by a '*' string. All fields default
+to a wildcard except 'minute', so with no fields this defaults to a
+build every hour, on the hour. The full list of parameters is:
+
+`name'
+
+`builderNames'
+
+`properties'
+
+`branch'
+ The branch to build, just as for `Scheduler'.
+
+`minute'
+ The minute of the hour on which to start the build. This
+ defaults to 0, meaning an hourly build.
+
+`hour'
+ The hour of the day on which to start the build, in 24-hour
+ notation. This defaults to *, meaning every hour.
+
+`month'
+ The month in which to start the build, with January = 1. This
+ defaults to *, meaning every month.
+
+`dayOfWeek'
+ The day of the week to start a build, with Monday = 0. This
+ defauls to *, meaning every day of the week.
+
+`onlyIfChanged'
+ If this is true, then builds will not be scheduled at the
+ designated time unless the source has changed since the previous
+ build.
+
+ For example, the following master.cfg clause will cause a build to
+be started every night at 3:00am:
+
+ s = scheduler.Nightly(name='nightly',
+ builderNames=['builder1', 'builder2'],
+ hour=3,
+ minute=0)
+
+ This scheduler will perform a build each monday morning at 6:23am
+and again at 8:23am, but only if someone has committed code in the
+interim:
+
+ s = scheduler.Nightly(name='BeforeWork',
+ builderNames=['builder1'],
+ dayOfWeek=0,
+ hour=[6,8],
+ minute=23,
+ onlyIfChanged=True)
+
+ The following runs a build every two hours, using Python's `range'
+function:
+
+ s = Nightly(name='every2hours',
+ builderNames=['builder1'],
+ hour=range(0, 24, 2))
+
+ Finally, this example will run only on December 24th:
+
+ s = Nightly(name='SleighPreflightCheck',
+ builderNames=['flying_circuits', 'radar'],
+ month=12,
+ dayOfMonth=24,
+ hour=12,
+ minute=0)
+
+
+File: buildbot.info, Node: Try Schedulers, Next: Triggerable Scheduler, Prev: Nightly Scheduler, Up: Change Sources and Schedulers
+
+4.5.6 Try Schedulers
+--------------------
+
+This scheduler allows developers to use the `buildbot try' command to
+trigger builds of code they have not yet committed. See *note try::
+for complete details.
+
+ Two implementations are available: `Try_Jobdir' and
+`Try_Userpass'. The former monitors a job directory, specified by
+the `jobdir' parameter, while the latter listens for PB connections
+on a specific `port', and authenticates against `userport'.
+
+
+File: buildbot.info, Node: Triggerable Scheduler, Prev: Try Schedulers, Up: Change Sources and Schedulers
+
+4.5.7 Triggerable Scheduler
+---------------------------
+
+The `Triggerable' scheduler waits to be triggered by a Trigger step
+(see *note Triggering Schedulers::) in another build. That step can
+optionally wait for the scheduler's builds to complete. This provides
+two advantages over Dependent schedulers. First, the same scheduler
+can be triggered from multiple builds. Second, the ability to wait
+for a Triggerable's builds to complete provides a form of "subroutine
+call", where one or more builds can "call" a scheduler to perform
+some work for them, perhaps on other buildslaves.
+
+ The parameters are just the basics:
+
+`name'
+
+`builderNames'
+
+`properties'
+
+ This class is only useful in conjunction with the `Trigger' step.
+Here is a fully-worked example:
+
+ from buildbot import scheduler
+ from buildbot.process import factory
+ from buildbot.steps import trigger
+
+ checkin = scheduler.Scheduler(name="checkin",
+ branch=None,
+ treeStableTimer=5*60,
+ builderNames=["checkin"])
+ nightly = scheduler.Nightly(name='nightly',
+ builderNames=['nightly'],
+ hour=3,
+ minute=0)
+
+ mktarball = scheduler.Triggerable(name="mktarball",
+ builderNames=["mktarball"])
+ build = scheduler.Triggerable(name="build-all-platforms",
+ builderNames=["build-all-platforms"])
+ test = scheduler.Triggerable(name="distributed-test",
+ builderNames=["distributed-test"])
+ package = scheduler.Triggerable(name="package-all-platforms",
+ builderNames=["package-all-platforms"])
+
+ c['schedulers'] = [checkin, nightly, build, test, package]
+
+ # on checkin, make a tarball, build it, and test it
+ checkin_factory = factory.BuildFactory()
+ checkin_factory.addStep(trigger.Trigger(schedulerNames=['mktarball'],
+ waitForFinish=True))
+ checkin_factory.addStep(trigger.Trigger(schedulerNames=['build-all-platforms'],
+ waitForFinish=True))
+ checkin_factory.addStep(trigger.Trigger(schedulerNames=['distributed-test'],
+ waitForFinish=True))
+
+ # and every night, make a tarball, build it, and package it
+ nightly_factory = factory.BuildFactory()
+ nightly_factory.addStep(trigger.Trigger(schedulerNames=['mktarball'],
+ waitForFinish=True))
+ nightly_factory.addStep(trigger.Trigger(schedulerNames=['build-all-platforms'],
+ waitForFinish=True))
+ nightly_factory.addStep(trigger.Trigger(schedulerNames=['package-all-platforms'],
+ waitForFinish=True))
+
+
+File: buildbot.info, Node: Merging BuildRequests, Next: Setting the slaveport, Prev: Change Sources and Schedulers, Up: Configuration
+
+4.6 Merging BuildRequests
+=========================
+
+By default, buildbot merges BuildRequests that have the compatible
+SourceStamps. This behaviour can be customized with the
+`c['mergeRequests']' configuration key. This key specifies a function
+which is caleld with three arguments: a `Builder' and two
+`BuildRequest' objects. It should return true if the requests can be
+merged. For example:
+
+ def mergeRequests(builder, req1, req2):
+ """Don't merge buildrequest at all"""
+ return False
+ c['mergeRequests'] = mergeRequests
+
+ In many cases, the details of the SourceStamps and BuildRequests
+are important. In this example, only BuildRequests with the same
+"reason" are merged; thus developers forcing builds for different
+reasons will see distinct builds.
+
+ def mergeRequests(builder, req1, req2):
+ if req1.source.canBeMergedWith(req2.source) and req1.reason == req2.reason:
+ return True
+ return False
+ c['mergeRequests'] = mergeRequests
+
+
+File: buildbot.info, Node: Setting the slaveport, Next: Buildslave Specifiers, Prev: Merging BuildRequests, Up: Configuration
+
+4.7 Setting the slaveport
+=========================
+
+The buildmaster will listen on a TCP port of your choosing for
+connections from buildslaves. It can also use this port for
+connections from remote Change Sources, status clients, and debug
+tools. This port should be visible to the outside world, and you'll
+need to tell your buildslave admins about your choice.
+
+ It does not matter which port you pick, as long it is externally
+visible, however you should probably use something larger than 1024,
+since most operating systems don't allow non-root processes to bind to
+low-numbered ports. If your buildmaster is behind a firewall or a NAT
+box of some sort, you may have to configure your firewall to permit
+inbound connections to this port.
+
+ c['slavePortnum'] = 10000
+
+ `c['slavePortnum']' is a _strports_ specification string, defined
+in the `twisted.application.strports' module (try `pydoc
+twisted.application.strports' to get documentation on the format).
+This means that you can have the buildmaster listen on a
+localhost-only port by doing:
+
+ c['slavePortnum'] = "tcp:10000:interface=127.0.0.1"
+
+ This might be useful if you only run buildslaves on the same
+machine, and they are all configured to contact the buildmaster at
+`localhost:10000'.
+
+
+File: buildbot.info, Node: Buildslave Specifiers, Next: On-Demand ("Latent") Buildslaves, Prev: Setting the slaveport, Up: Configuration
+
+4.8 Buildslave Specifiers
+=========================
+
+The `c['slaves']' key is a list of known buildslaves. In the common
+case, each buildslave is defined by an instance of the BuildSlave
+class. It represents a standard, manually started machine that will
+try to connect to the buildbot master as a slave. Contrast these
+with the "on-demand" latent buildslaves, such as the Amazon Web
+Service Elastic Compute Cloud latent buildslave discussed below.
+
+ The BuildSlave class is instantiated with two values: (slavename,
+slavepassword). These are the same two values that need to be
+provided to the buildslave administrator when they create the
+buildslave.
+
+ The slavenames must be unique, of course. The password exists to
+prevent evildoers from interfering with the buildbot by inserting
+their own (broken) buildslaves into the system and thus displacing the
+real ones.
+
+ Buildslaves with an unrecognized slavename or a non-matching
+password will be rejected when they attempt to connect, and a message
+describing the problem will be put in the log file (see *note
+Logfiles::).
+
+ from buildbot.buildslave import BuildSlave
+ c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd')
+ BuildSlave('bot-bsd', 'bsdpasswd')
+ ]
+
+ `BuildSlave' objects can also be created with an optional
+`properties' argument, a dictionary specifying properties that will
+be available to any builds performed on this slave. For example:
+
+ from buildbot.buildslave import BuildSlave
+ c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ properties={'os':'solaris'}),
+ ]
+
+ The `BuildSlave' constructor can also take an optional
+`max_builds' parameter to limit the number of builds that it will
+execute simultaneously:
+
+ from buildbot.buildslave import BuildSlave
+ c['slaves'] = [BuildSlave("bot-linux", "linuxpassword", max_builds=2)]
+
+ Historical note: in buildbot-0.7.5 and earlier, the `c['bots']'
+key was used instead, and it took a list of (name, password) tuples.
+This key is accepted for backwards compatibility, but is deprecated as
+of 0.7.6 and will go away in some future release.
+
+* Menu:
+
+* When Buildslaves Go Missing::
+
+
+File: buildbot.info, Node: When Buildslaves Go Missing, Up: Buildslave Specifiers
+
+4.8.1 When Buildslaves Go Missing
+---------------------------------
+
+Sometimes, the buildslaves go away. One very common reason for this is
+when the buildslave process is started once (manually) and left
+running, but then later the machine reboots and the process is not
+automatically restarted.
+
+ If you'd like to have the administrator of the buildslave (or other
+people) be notified by email when the buildslave has been missing for
+too long, just add the `notify_on_missing=' argument to the
+`BuildSlave' definition:
+
+ c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ notify_on_missing="bob@example.com"),
+ ]
+
+ By default, this will send email when the buildslave has been
+disconnected for more than one hour. Only one email per
+connection-loss event will be sent. To change the timeout, use
+`missing_timeout=' and give it a number of seconds (the default is
+3600).
+
+ You can have the buildmaster send email to multiple recipients:
+just provide a list of addresses instead of a single one:
+
+ c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ notify_on_missing=["bob@example.com",
+ "alice@example.org"],
+ missing_timeout=300, # notify after 5 minutes
+ ),
+ ]
+
+ The email sent this way will use a MailNotifier (*note
+MailNotifier::) status target, if one is configured. This provides a
+way for you to control the "from" address of the email, as well as
+the relayhost (aka "smarthost") to use as an SMTP server. If no
+MailNotifier is configured on this buildmaster, the
+buildslave-missing emails will be sent using a default configuration.
+
+ Note that if you want to have a MailNotifier for buildslave-missing
+emails but not for regular build emails, just create one with
+builders=[], as follows:
+
+ from buildbot.status import mail
+ m = mail.MailNotifier(fromaddr="buildbot@localhost", builders=[],
+ relayhost="smtp.example.org")
+ c['status'].append(m)
+ c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ notify_on_missing="bob@example.com"),
+ ]
+
+
+File: buildbot.info, Node: On-Demand ("Latent") Buildslaves, Next: Defining Global Properties, Prev: Buildslave Specifiers, Up: Configuration
+
+4.9 On-Demand ("Latent") Buildslaves
+====================================
+
+The standard buildbot model has slaves started manually. The
+previous section described how to configure the master for this
+approach.
+
+ Another approach is to let the buildbot master start slaves when
+builds are ready, on-demand. Thanks to services such as Amazon Web
+Services' Elastic Compute Cloud ("AWS EC2"), this is relatively easy
+to set up, and can be very useful for some situations.
+
+ The buildslaves that are started on-demand are called "latent"
+buildslaves. As of this writing, buildbot ships with an abstract
+base class for building latent buildslaves, and a concrete
+implementation for AWS EC2.
+
+* Menu:
+
+* Amazon Web Services Elastic Compute Cloud ("AWS EC2")::
+* Dangers with Latent Buildslaves::
+* Writing New Latent Buildslaves::
+
+
+File: buildbot.info, Node: Amazon Web Services Elastic Compute Cloud ("AWS EC2"), Next: Dangers with Latent Buildslaves, Up: On-Demand ("Latent") Buildslaves
+
+4.9.1 Amazon Web Services Elastic Compute Cloud ("AWS EC2")
+-----------------------------------------------------------
+
+AWS EC2 is a web service that allows you to start virtual machines in
+an Amazon data center. Please see their website for details, incuding
+costs. Using the AWS EC2 latent buildslaves involves getting an EC2
+account with AWS and setting up payment; customizing one or more EC2
+machine images ("AMIs") on your desired operating system(s) and
+publishing them (privately if needed); and configuring the buildbot
+master to know how to start your customized images for
+"substantiating" your latent slaves.
+
+* Menu:
+
+* Get an AWS EC2 Account::
+* Create an AMI::
+* Configure the Master with an EC2LatentBuildSlave::
+
+
+File: buildbot.info, Node: Get an AWS EC2 Account, Next: Create an AMI, Up: Amazon Web Services Elastic Compute Cloud ("AWS EC2")
+
+4.9.1.1 Get an AWS EC2 Account
+..............................
+
+To start off, to use the AWS EC2 latent buildslave, you need to get
+an AWS developer account and sign up for EC2. These instructions may
+help you get started:
+
+ * Go to http://aws.amazon.com/ and click to "Sign Up Now" for an
+ AWS account.
+
+ * Once you are logged into your account, you need to sign up for
+ EC2. Instructions for how to do this have changed over time
+ because Amazon changes their website, so the best advice is to
+ hunt for it. After signing up for EC2, it may say it wants you
+ to upload an x.509 cert. You will need this to create images
+ (see below) but it is not technically necessary for the buildbot
+ master configuration.
+
+ * You must enter a valid credit card before you will be able to
+ use EC2. Do that under 'Payment Method'.
+
+ * Make sure you're signed up for EC2 by going to 'Your
+ Account'->'Account Activity' and verifying EC2 is listed.
+
+
+File: buildbot.info, Node: Create an AMI, Next: Configure the Master with an EC2LatentBuildSlave, Prev: Get an AWS EC2 Account, Up: Amazon Web Services Elastic Compute Cloud ("AWS EC2")
+
+4.9.1.2 Create an AMI
+.....................
+
+Now you need to create an AMI and configure the master. You may need
+to run through this cycle a few times to get it working, but these
+instructions should get you started.
+
+ Creating an AMI is out of the scope of this document. The EC2
+Getting Started Guide is a good resource for this task. Here are a
+few additional hints.
+
+ * When an instance of the image starts, it needs to automatically
+ start a buildbot slave that connects to your master (to create a
+ buildbot slave, *note Creating a buildslave::; to make a daemon,
+ *note Launching the daemons::).
+
+ * You may want to make an instance of the buildbot slave,
+ configure it as a standard buildslave in the master (i.e., not
+ as a latent slave), and test and debug it that way before you
+ turn it into an AMI and convert to a latent slave in the master.
+
+
+File: buildbot.info, Node: Configure the Master with an EC2LatentBuildSlave, Prev: Create an AMI, Up: Amazon Web Services Elastic Compute Cloud ("AWS EC2")
+
+4.9.1.3 Configure the Master with an EC2LatentBuildSlave
+........................................................
+
+Now let's assume you have an AMI that should work with the
+EC2LatentBuildSlave. It's now time to set up your buildbot master
+configuration.
+
+ You will need some information from your AWS account: the "Access
+Key Id" and the "Secret Access Key". If you've built the AMI
+yourself, you probably already are familiar with these values. If
+you have not, and someone has given you access to an AMI, these hints
+may help you find the necessary values:
+
+ * While logged into your AWS account, find the "Access
+ Identifiers" link (either on the left, or via "Your Account" ->
+ "Access Identifiers".
+
+ * On the page, you'll see alphanumeric values for "Your Access Key
+ Id:" and "Your Secret Access Key:". Make a note of these. Later
+ on, we'll call the first one your "identifier" and the second
+ one your "secret_identifier."
+
+ When creating an EC2LatentBuildSlave in the buildbot master
+configuration, the first three arguments are required. The name and
+password are the first two arguments, and work the same as with
+normal buildslaves. The next argument specifies the type of the EC2
+virtual machine (available options as of this writing include
+"m1.small", "m1.large", 'm1.xlarge", "c1.medium", and "c1.xlarge";
+see the EC2 documentation for descriptions of these machines).
+
+ Here is the simplest example of configuring an EC2 latent
+buildslave. It specifies all necessary remaining values explicitly in
+the instantiation.
+
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ ami='ami-12345',
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )]
+
+ The "ami" argument specifies the AMI that the master should start.
+The "identifier" argument specifies the AWS "Access Key Id," and the
+"secret_identifier" specifies the AWS "Secret Access Key." Both the
+AMI and the account information can be specified in alternate ways.
+
+ Note that whoever has your identifier and secret_identifier values
+can request AWS work charged to your account, so these values need to
+be carefully protected. Another way to specify these access keys is
+to put them in a separate file. You can then make the access
+privileges stricter for this separate file, and potentially let more
+people read your main configuration file.
+
+ By default, you can make an .ec2 directory in the home folder of
+the user running the buildbot master. In that directory, create a
+file called aws_id. The first line of that file should be your
+access key id; the second line should be your secret access key id.
+Then you can instantiate the build slave as follows.
+
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ ami='ami-12345')]
+
+ If you want to put the key information in another file, use the
+"aws_id_file_path" initialization argument.
+
+ Previous examples used a particular AMI. If the Buildbot master
+will be deployed in a process-controlled environment, it may be
+convenient to specify the AMI more flexibly. Rather than specifying
+an individual AMI, specify one or two AMI filters.
+
+ In all cases, the AMI that sorts last by its location (the S3
+bucket and manifest name) will be preferred.
+
+ One available filter is to specify the acceptable AMI owners, by
+AWS account number (the 12 digit number, usually rendered in AWS with
+hyphens like "1234-5678-9012", should be entered as in integer).
+
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111,
+ 22222222222],
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+
+ The other available filter is to provide a regular expression
+string that will be matched against each AMI's location (the S3
+bucket and manifest name).
+
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'buildbot\-.*/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+
+ The regular expression can specify a group, which will be
+preferred for the sorting. Only the first group is used; subsequent
+groups are ignored.
+
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'buildbot\-.*\-(.*)/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+
+ If the group can be cast to an integer, it will be. This allows
+10 to sort after 1, for instance.
+
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'buildbot\-.*\-(\d+)/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+
+ In addition to using the password as a handshake between the
+master and the slave, you may want to use a firewall to assert that
+only machines from a specific IP can connect as slaves. This is
+possible with AWS EC2 by using the Elastic IP feature. To configure,
+generate a Elastic IP in AWS, and then specify it in your
+configuration using the "elastic_ip" argument.
+
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345',
+ identifier='publickey',
+ secret_identifier='privatekey',
+ elastic_ip='208.77.188.166'
+ )]
+
+ The EC2LatentBuildSlave supports all other configuration from the
+standard BuildSlave. The "missing_timeout" and "notify_on_missing"
+specify how long to wait for an EC2 instance to attach before
+considering the attempt to have failed, and email addresses to alert,
+respectively. "missing_timeout" defaults to 20 minutes.
+
+ The "build_wait_timeout" allows you to specify how long an
+EC2LatentBuildSlave should wait after a build for another build
+before it shuts down the EC2 instance. It defaults to 10 minutes.
+
+ "keypair_name" and "security_name" allow you to specify different
+names for these AWS EC2 values. They both default to
+"latent_buildbot_slave".
+
+
+File: buildbot.info, Node: Dangers with Latent Buildslaves, Next: Writing New Latent Buildslaves, Prev: Amazon Web Services Elastic Compute Cloud ("AWS EC2"), Up: On-Demand ("Latent") Buildslaves
+
+4.9.2 Dangers with Latent Buildslaves
+-------------------------------------
+
+Any latent build slave that interacts with a for-fee service, such as
+the EC2LatentBuildSlave, brings significant risks. As already
+identified, the configuraton will need access to account information
+that, if obtained by a criminal, can be used to charge services to
+your account. Also, bugs in the buildbot software may lead to
+unnecessary charges. In particular, if the master neglects to shut
+down an instance for some reason, a virtual machine may be running
+unnecessarily, charging against your account. Manual and/or automatic
+(e.g. nagios with a plugin using a library like boto) double-checking
+may be appropriate.
+
+ A comparitively trivial note is that currently if two instances
+try to attach to the same latent buildslave, it is likely that the
+system will become confused. This should not occur, unless, for
+instance, you configure a normal build slave to connect with the
+authentication of a latent buildbot. If the situation occurs, stop
+all attached instances and restart the master.
+
+
+File: buildbot.info, Node: Writing New Latent Buildslaves, Prev: Dangers with Latent Buildslaves, Up: On-Demand ("Latent") Buildslaves
+
+4.9.3 Writing New Latent Buildslaves
+------------------------------------
+
+Writing a new latent buildslave should only require subclassing
+`buildbot.buildslave.AbstractLatentBuildSlave' and implementing
+start_instance and stop_instance.
+
+ def start_instance(self):
+ # responsible for starting instance that will try to connect with this
+ # master. Should return deferred. Problems should use an errback. The
+ # callback value can be None, or can be an iterable of short strings to
+ # include in the "substantiate success" status message, such as
+ # identifying the instance that started.
+ raise NotImplementedError
+
+ def stop_instance(self, fast=False):
+ # responsible for shutting down instance. Return a deferred. If `fast`,
+ # we're trying to shut the master down, so callback as soon as is safe.
+ # Callback value is ignored.
+ raise NotImplementedError
+
+ See `buildbot.ec2buildslave.EC2LatentBuildSlave' for an example,
+or see the test example `buildbot.test_slaves.FakeLatentBuildSlave'.
+
+
+File: buildbot.info, Node: Defining Global Properties, Next: Defining Builders, Prev: On-Demand ("Latent") Buildslaves, Up: Configuration
+
+4.10 Defining Global Properties
+===============================
+
+The `'properties'' configuration key defines a dictionary of
+properties that will be available to all builds started by the
+buildmaster:
+
+ c['properties'] = {
+ 'Widget-version' : '1.2',
+ 'release-stage' : 'alpha'
+ }
+
+
+File: buildbot.info, Node: Defining Builders, Next: Defining Status Targets, Prev: Defining Global Properties, Up: Configuration
+
+4.11 Defining Builders
+======================
+
+The `c['builders']' key is a list of dictionaries which specify the
+Builders. The Buildmaster runs a collection of Builders, each of
+which handles a single type of build (e.g. full versus quick), on a
+single build slave. A Buildbot which makes sure that the latest code
+("HEAD") compiles correctly across four separate architecture will
+have four Builders, each performing the same build but on different
+slaves (one per platform).
+
+ Each Builder gets a separate column in the waterfall display. In
+general, each Builder runs independently (although various kinds of
+interlocks can cause one Builder to have an effect on another).
+
+ Each Builder specification dictionary has several required keys:
+
+`name'
+ This specifies the Builder's name, which is used in status
+ reports.
+
+`slavename'
+ This specifies which buildslave will be used by this Builder.
+ `slavename' must appear in the `c['slaves']' list. Each
+ buildslave can accomodate multiple Builders.
+
+`slavenames'
+ If you provide `slavenames' instead of `slavename', you can give
+ a list of buildslaves which are capable of running this Builder.
+ If multiple buildslaves are available for any given Builder, you
+ will have some measure of redundancy: in case one slave goes
+ offline, the others can still keep the Builder working. In
+ addition, multiple buildslaves will allow multiple simultaneous
+ builds for the same Builder, which might be useful if you have a
+ lot of forced or "try" builds taking place.
+
+ If you use this feature, it is important to make sure that the
+ buildslaves are all, in fact, capable of running the given
+ build. The slave hosts should be configured similarly, otherwise
+ you will spend a lot of time trying (unsuccessfully) to
+ reproduce a failure that only occurs on some of the buildslaves
+ and not the others. Different platforms, operating systems,
+ versions of major programs or libraries, all these things mean
+ you should use separate Builders.
+
+`builddir'
+ This specifies the name of a subdirectory (under the base
+ directory) in which everything related to this builder will be
+ placed. On the buildmaster, this holds build status information.
+ On the buildslave, this is where checkouts, compiles, and tests
+ are run.
+
+`factory'
+ This is a `buildbot.process.factory.BuildFactory' instance which
+ controls how the build is performed. Full details appear in
+ their own chapter, *Note Build Process::. Parameters like the
+ location of the CVS repository and the compile-time options used
+ for the build are generally provided as arguments to the
+ factory's constructor.
+
+
+ Other optional keys may be set on each Builder:
+
+`category'
+ If provided, this is a string that identifies a category for the
+ builder to be a part of. Status clients can limit themselves to a
+ subset of the available categories. A common use for this is to
+ add new builders to your setup (for a new module, or for a new
+ buildslave) that do not work correctly yet and allow you to
+ integrate them with the active builders. You can put these new
+ builders in a test category, make your main status clients
+ ignore them, and have only private status clients pick them up.
+ As soon as they work, you can move them over to the active
+ category.
+
+
+
+File: buildbot.info, Node: Defining Status Targets, Next: Debug options, Prev: Defining Builders, Up: Configuration
+
+4.12 Defining Status Targets
+============================
+
+The Buildmaster has a variety of ways to present build status to
+various users. Each such delivery method is a "Status Target" object
+in the configuration's `status' list. To add status targets, you just
+append more objects to this list:
+
+ c['status'] = []
+
+ from buildbot.status import html
+ c['status'].append(html.Waterfall(http_port=8010))
+
+ from buildbot.status import mail
+ m = mail.MailNotifier(fromaddr="buildbot@localhost",
+ extraRecipients=["builds@lists.example.com"],
+ sendToInterestedUsers=False)
+ c['status'].append(m)
+
+ from buildbot.status import words
+ c['status'].append(words.IRC(host="irc.example.com", nick="bb",
+ channels=["#example"]))
+
+ Status delivery has its own chapter, *Note Status Delivery::, in
+which all the built-in status targets are documented.
+
+
+File: buildbot.info, Node: Debug options, Prev: Defining Status Targets, Up: Configuration
+
+4.13 Debug options
+==================
+
+If you set `c['debugPassword']', then you can connect to the
+buildmaster with the diagnostic tool launched by `buildbot
+debugclient MASTER:PORT'. From this tool, you can reload the config
+file, manually force builds, and inject changes, which may be useful
+for testing your buildmaster without actually commiting changes to
+your repository (or before you have the Change Sources set up). The
+debug tool uses the same port number as the slaves do:
+`c['slavePortnum']', and is authenticated with this password.
+
+ c['debugPassword'] = "debugpassword"
+
+ If you set `c['manhole']' to an instance of one of the classes in
+`buildbot.manhole', you can telnet or ssh into the buildmaster and
+get an interactive Python shell, which may be useful for debugging
+buildbot internals. It is probably only useful for buildbot
+developers. It exposes full access to the buildmaster's account
+(including the ability to modify and delete files), so it should not
+be enabled with a weak or easily guessable password.
+
+ There are three separate `Manhole' classes. Two of them use SSH,
+one uses unencrypted telnet. Two of them use a username+password
+combination to grant access, one of them uses an SSH-style
+`authorized_keys' file which contains a list of ssh public keys.
+
+`manhole.AuthorizedKeysManhole'
+ You construct this with the name of a file that contains one SSH
+ public key per line, just like `~/.ssh/authorized_keys'. If you
+ provide a non-absolute filename, it will be interpreted relative
+ to the buildmaster's base directory.
+
+`manhole.PasswordManhole'
+ This one accepts SSH connections but asks for a username and
+ password when authenticating. It accepts only one such pair.
+
+`manhole.TelnetManhole'
+ This accepts regular unencrypted telnet connections, and asks
+ for a username/password pair before providing access. Because
+ this username/password is transmitted in the clear, and because
+ Manhole access to the buildmaster is equivalent to granting full
+ shell privileges to both the buildmaster and all the buildslaves
+ (and to all accounts which then run code produced by the
+ buildslaves), it is highly recommended that you use one of the
+ SSH manholes instead.
+
+
+ # some examples:
+ from buildbot import manhole
+ c['manhole'] = manhole.AuthorizedKeysManhole(1234, "authorized_keys")
+ c['manhole'] = manhole.PasswordManhole(1234, "alice", "mysecretpassword")
+ c['manhole'] = manhole.TelnetManhole(1234, "bob", "snoop_my_password_please")
+
+ The `Manhole' instance can be configured to listen on a specific
+port. You may wish to have this listening port bind to the loopback
+interface (sometimes known as "lo0", "localhost", or 127.0.0.1) to
+restrict access to clients which are running on the same host.
+
+ from buildbot.manhole import PasswordManhole
+ c['manhole'] = PasswordManhole("tcp:9999:interface=127.0.0.1","admin","passwd")
+
+ To have the `Manhole' listen on all interfaces, use `"tcp:9999"'
+or simply 9999. This port specification uses
+`twisted.application.strports', so you can make it listen on SSL or
+even UNIX-domain sockets if you want.
+
+ Note that using any Manhole requires that the TwistedConch package
+be installed, and that you be using Twisted version 2.0 or later.
+
+ The buildmaster's SSH server will use a different host key than the
+normal sshd running on a typical unix host. This will cause the ssh
+client to complain about a "host key mismatch", because it does not
+realize there are two separate servers running on the same host. To
+avoid this, use a clause like the following in your `.ssh/config'
+file:
+
+ Host remotehost-buildbot
+ HostName remotehost
+ HostKeyAlias remotehost-buildbot
+ Port 9999
+ # use 'user' if you use PasswordManhole and your name is not 'admin'.
+ # if you use AuthorizedKeysManhole, this probably doesn't matter.
+ User admin
+
+
+File: buildbot.info, Node: Getting Source Code Changes, Next: Build Process, Prev: Configuration, Up: Top
+
+5 Getting Source Code Changes
+*****************************
+
+The most common way to use the Buildbot is centered around the idea of
+`Source Trees': a directory tree filled with source code of some form
+which can be compiled and/or tested. Some projects use languages that
+don't involve any compilation step: nevertheless there may be a
+`build' phase where files are copied or rearranged into a form that
+is suitable for installation. Some projects do not have unit tests,
+and the Buildbot is merely helping to make sure that the sources can
+compile correctly. But in all of these cases, the thing-being-tested
+is a single source tree.
+
+ A Version Control System mantains a source tree, and tells the
+buildmaster when it changes. The first step of each Build is typically
+to acquire a copy of some version of this tree.
+
+ This chapter describes how the Buildbot learns about what Changes
+have occurred. For more information on VC systems and Changes, see
+*note Version Control Systems::.
+
+* Menu:
+
+* Change Sources::
+* Choosing ChangeSources::
+* CVSToys - PBService::
+* Mail-parsing ChangeSources::
+* PBChangeSource::
+* P4Source::
+* BonsaiPoller::
+* SVNPoller::
+* MercurialHook::
+* Bzr Hook::
+* Bzr Poller::
+
+
+File: buildbot.info, Node: Change Sources, Next: Choosing ChangeSources, Prev: Getting Source Code Changes, Up: Getting Source Code Changes
+
+5.1 Change Sources
+==================
+
+Each Buildmaster watches a single source tree. Changes can be provided
+by a variety of ChangeSource types, however any given project will
+typically have only a single ChangeSource active. This section
+provides a description of all available ChangeSource types and
+explains how to set up each of them.
+
+ There are a variety of ChangeSources available, some of which are
+meant to be used in conjunction with other tools to deliver Change
+events from the VC repository to the buildmaster.
+
+ * CVSToys This ChangeSource opens a TCP connection from the
+ buildmaster to a waiting FreshCVS daemon that lives on the
+ repository machine, and subscribes to hear about Changes.
+
+ * MaildirSource This one watches a local maildir-format inbox for
+ email sent out by the repository when a change is made. When a
+ message arrives, it is parsed to create the Change object. A
+ variety of parsing functions are available to accomodate
+ different email-sending tools.
+
+ * PBChangeSource This ChangeSource listens on a local TCP socket
+ for inbound connections from a separate tool. Usually, this tool
+ would be run on the VC repository machine in a commit hook. It
+ is expected to connect to the TCP socket and send a Change
+ message over the network connection. The `buildbot sendchange'
+ command is one example of a tool that knows how to send these
+ messages, so you can write a commit script for your VC system
+ that calls it to deliver the Change. There are other tools in
+ the contrib/ directory that use the same protocol.
+
+
+ As a quick guide, here is a list of VC systems and the
+ChangeSources that might be useful with them. All of these
+ChangeSources are in the `buildbot.changes' module.
+
+`CVS'
+ * freshcvs.FreshCVSSource (connected via TCP to the freshcvs
+ daemon)
+
+ * mail.FCMaildirSource (watching for email sent by a freshcvs
+ daemon)
+
+ * mail.BonsaiMaildirSource (watching for email sent by Bonsai)
+
+ * mail.SyncmailMaildirSource (watching for email sent by
+ syncmail)
+
+ * pb.PBChangeSource (listening for connections from `buildbot
+ sendchange' run in a loginfo script)
+
+ * pb.PBChangeSource (listening for connections from a
+ long-running `contrib/viewcvspoll.py' polling process which
+ examines the ViewCVS database directly
+
+`SVN'
+ * pb.PBChangeSource (listening for connections from
+ `contrib/svn_buildbot.py' run in a postcommit script)
+
+ * pb.PBChangeSource (listening for connections from a
+ long-running `contrib/svn_watcher.py' or
+ `contrib/svnpoller.py' polling process
+
+ * mail.SVNCommitEmailMaildirSource (watching for email sent
+ by commit-email.pl)
+
+ * svnpoller.SVNPoller (polling the SVN repository)
+
+`Darcs'
+ * pb.PBChangeSource (listening for connections from
+ `contrib/darcs_buildbot.py' in a commit script
+
+`Mercurial'
+ * pb.PBChangeSource (listening for connections from
+ `contrib/hg_buildbot.py' run in an 'incoming' hook)
+
+ * pb.PBChangeSource (listening for connections from
+ `buildbot/changes/hgbuildbot.py' run as an in-process
+ 'changegroup' hook)
+
+`Arch/Bazaar'
+ * pb.PBChangeSource (listening for connections from
+ `contrib/arch_buildbot.py' run in a commit hook)
+
+`Bzr (the newer Bazaar)'
+ * pb.PBChangeSource (listening for connections from
+ `contrib/bzr_buildbot.py' run in a post-change-branch-tip
+ or commit hook)
+
+ * `contrib/bzr_buildbot.py''s BzrPoller (polling the Bzr
+ repository)
+
+`Git'
+ * pb.PBChangeSource (listening for connections from
+ `contrib/git_buildbot.py' run in the post-receive hook)
+
+
+ All VC systems can be driven by a PBChangeSource and the `buildbot
+sendchange' tool run from some form of commit script. If you write
+an email parsing function, they can also all be driven by a suitable
+`MaildirSource'.
+
+
+File: buildbot.info, Node: Choosing ChangeSources, Next: CVSToys - PBService, Prev: Change Sources, Up: Getting Source Code Changes
+
+5.2 Choosing ChangeSources
+==========================
+
+The `master.cfg' configuration file has a dictionary key named
+`BuildmasterConfig['change_source']', which holds the active
+`IChangeSource' object. The config file will typically create an
+object from one of the classes described below and stuff it into this
+key.
+
+ Each buildmaster typically has just a single ChangeSource, since
+it is only watching a single source tree. But if, for some reason,
+you need multiple sources, just set `c['change_source']' to a list of
+ChangeSources.. it will accept that too.
+
+ s = FreshCVSSourceNewcred(host="host", port=4519,
+ user="alice", passwd="secret",
+ prefix="Twisted")
+ BuildmasterConfig['change_source'] = [s]
+
+ Each source tree has a nominal `top'. Each Change has a list of
+filenames, which are all relative to this top location. The
+ChangeSource is responsible for doing whatever is necessary to
+accomplish this. Most sources have a `prefix' argument: a partial
+pathname which is stripped from the front of all filenames provided to
+that `ChangeSource'. Files which are outside this sub-tree are
+ignored by the changesource: it does not generate Changes for those
+files.
+
+
+File: buildbot.info, Node: CVSToys - PBService, Next: Mail-parsing ChangeSources, Prev: Choosing ChangeSources, Up: Getting Source Code Changes
+
+5.3 CVSToys - PBService
+=======================
+
+The CVSToys (http://purl.net/net/CVSToys) package provides a server
+which runs on the machine that hosts the CVS repository it watches.
+It has a variety of ways to distribute commit notifications, and
+offers a flexible regexp-based way to filter out uninteresting
+changes. One of the notification options is named `PBService' and
+works by listening on a TCP port for clients. These clients subscribe
+to hear about commit notifications.
+
+ The buildmaster has a CVSToys-compatible `PBService' client built
+in. There are two versions of it, one for old versions of CVSToys
+(1.0.9 and earlier) which used the `oldcred' authentication
+framework, and one for newer versions (1.0.10 and later) which use
+`newcred'. Both are classes in the `buildbot.changes.freshcvs'
+package.
+
+ `FreshCVSSourceNewcred' objects are created with the following
+parameters:
+
+``host' and `port''
+ these specify where the CVSToys server can be reached
+
+``user' and `passwd''
+ these specify the login information for the CVSToys server
+ (`freshcvs'). These must match the server's values, which are
+ defined in the `freshCfg' configuration file (which lives in the
+ CVSROOT directory of the repository).
+
+``prefix''
+ this is the prefix to be found and stripped from filenames
+ delivered by the CVSToys server. Most projects live in
+ sub-directories of the main repository, as siblings of the
+ CVSROOT sub-directory, so typically this prefix is set to that
+ top sub-directory name.
+
+
+Example
+=======
+
+To set up the freshCVS server, add a statement like the following to
+your `freshCfg' file:
+
+ pb = ConfigurationSet([
+ (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
+ ])
+
+ This will announce all changes to a client which connects to port
+4519 using a username of 'foo' and a password of 'bar'.
+
+ Then add a clause like this to your buildmaster's `master.cfg':
+
+ BuildmasterConfig['change_source'] = FreshCVSSource("cvs.example.com", 4519,
+ "foo", "bar",
+ prefix="glib/")
+
+ where "cvs.example.com" is the host that is running the FreshCVS
+daemon, and "glib" is the top-level directory (relative to the
+repository's root) where all your source code lives. Most projects
+keep one or more projects in the same repository (along with CVSROOT/
+to hold admin files like loginfo and freshCfg); the prefix= argument
+tells the buildmaster to ignore everything outside that directory,
+and to strip that common prefix from all pathnames it handles.
+
+
+File: buildbot.info, Node: Mail-parsing ChangeSources, Next: PBChangeSource, Prev: CVSToys - PBService, Up: Getting Source Code Changes
+
+5.4 Mail-parsing ChangeSources
+==============================
+
+Many projects publish information about changes to their source tree
+by sending an email message out to a mailing list, frequently named
+PROJECT-commits or PROJECT-changes. Each message usually contains a
+description of the change (who made the change, which files were
+affected) and sometimes a copy of the diff. Humans can subscribe to
+this list to stay informed about what's happening to the source tree.
+
+ The Buildbot can also be subscribed to a -commits mailing list, and
+can trigger builds in response to Changes that it hears about. The
+buildmaster admin needs to arrange for these email messages to arrive
+in a place where the buildmaster can find them, and configure the
+buildmaster to parse the messages correctly. Once that is in place,
+the email parser will create Change objects and deliver them to the
+Schedulers (see *note Change Sources and Schedulers::) just like any
+other ChangeSource.
+
+ There are two components to setting up an email-based ChangeSource.
+The first is to route the email messages to the buildmaster, which is
+done by dropping them into a "maildir". The second is to actually
+parse the messages, which is highly dependent upon the tool that was
+used to create them. Each VC system has a collection of favorite
+change-emailing tools, and each has a slightly different format, so
+each has a different parsing function. There is a separate
+ChangeSource variant for each parsing function.
+
+ Once you've chosen a maildir location and a parsing function,
+create the change source and put it in `c['change_source']':
+
+ from buildbot.changes.mail import SyncmailMaildirSource
+ c['change_source'] = SyncmailMaildirSource("~/maildir-buildbot",
+ prefix="/trunk/")
+
+* Menu:
+
+* Subscribing the Buildmaster::
+* Using Maildirs::
+* Parsing Email Change Messages::
+
+
+File: buildbot.info, Node: Subscribing the Buildmaster, Next: Using Maildirs, Prev: Mail-parsing ChangeSources, Up: Mail-parsing ChangeSources
+
+5.4.1 Subscribing the Buildmaster
+---------------------------------
+
+The recommended way to install the buildbot is to create a dedicated
+account for the buildmaster. If you do this, the account will probably
+have a distinct email address (perhaps <buildmaster@example.org>).
+Then just arrange for this account's email to be delivered to a
+suitable maildir (described in the next section).
+
+ If the buildbot does not have its own account, "extension
+addresses" can be used to distinguish between email intended for the
+buildmaster and email intended for the rest of the account. In most
+modern MTAs, the e.g. `foo@example.org' account has control over
+every email address at example.org which begins with "foo", such that
+email addressed to <account-foo@example.org> can be delivered to a
+different destination than <account-bar@example.org>. qmail does this
+by using separate .qmail files for the two destinations (`.qmail-foo'
+and `.qmail-bar', with `.qmail' controlling the base address and
+`.qmail-default' controlling all other extensions). Other MTAs have
+similar mechanisms.
+
+ Thus you can assign an extension address like
+<foo-buildmaster@example.org> to the buildmaster, and retain
+<foo@example.org> for your own use.
+
+
+File: buildbot.info, Node: Using Maildirs, Next: Parsing Email Change Messages, Prev: Subscribing the Buildmaster, Up: Mail-parsing ChangeSources
+
+5.4.2 Using Maildirs
+--------------------
+
+A "maildir" is a simple directory structure originally developed for
+qmail that allows safe atomic update without locking. Create a base
+directory with three subdirectories: "new", "tmp", and "cur". When
+messages arrive, they are put into a uniquely-named file (using pids,
+timestamps, and random numbers) in "tmp". When the file is complete,
+it is atomically renamed into "new". Eventually the buildmaster
+notices the file in "new", reads and parses the contents, then moves
+it into "cur". A cronjob can be used to delete files in "cur" at
+leisure.
+
+ Maildirs are frequently created with the `maildirmake' tool, but a
+simple `mkdir -p ~/MAILDIR/{cur,new,tmp}' is pretty much equivalent.
+
+ Many modern MTAs can deliver directly to maildirs. The usual
+.forward or .procmailrc syntax is to name the base directory with a
+trailing slash, so something like `~/MAILDIR/' . qmail and postfix are
+maildir-capable MTAs, and procmail is a maildir-capable MDA (Mail
+Delivery Agent).
+
+ For MTAs which cannot put files into maildirs directly, the
+"safecat" tool can be executed from a .forward file to accomplish the
+same thing.
+
+ The Buildmaster uses the linux DNotify facility to receive
+immediate notification when the maildir's "new" directory has
+changed. When this facility is not available, it polls the directory
+for new messages, every 10 seconds by default.
+
+
+File: buildbot.info, Node: Parsing Email Change Messages, Prev: Using Maildirs, Up: Mail-parsing ChangeSources
+
+5.4.3 Parsing Email Change Messages
+-----------------------------------
+
+The second component to setting up an email-based ChangeSource is to
+parse the actual notices. This is highly dependent upon the VC system
+and commit script in use.
+
+ A couple of common tools used to create these change emails are:
+
+`CVS'
+
+ `CVSToys MailNotifier'
+ *note FCMaildirSource::
+
+ `Bonsai notification'
+ *note BonsaiMaildirSource::
+
+ `syncmail'
+ *note SyncmailMaildirSource::
+
+`SVN'
+
+ `svnmailer'
+ http://opensource.perlig.de/en/svnmailer/
+
+ `commit-email.pl'
+ *note SVNCommitEmailMaildirSource::
+
+`Mercurial'
+
+ `NotifyExtension'
+ http://www.selenic.com/mercurial/wiki/index.cgi/NotifyExtension
+
+`Git'
+
+ `post-receive-email'
+ http://git.kernel.org/?p=git/git.git;a=blob;f=contrib/hooks/post-receive-email;hb=HEAD
+
+
+ The following sections describe the parsers available for each of
+these tools.
+
+ Most of these parsers accept a `prefix=' argument, which is used
+to limit the set of files that the buildmaster pays attention to. This
+is most useful for systems like CVS and SVN which put multiple
+projects in a single repository (or use repository names to indicate
+branches). Each filename that appears in the email is tested against
+the prefix: if the filename does not start with the prefix, the file
+is ignored. If the filename _does_ start with the prefix, that prefix
+is stripped from the filename before any further processing is done.
+Thus the prefix usually ends with a slash.
+
+* Menu:
+
+* FCMaildirSource::
+* SyncmailMaildirSource::
+* BonsaiMaildirSource::
+* SVNCommitEmailMaildirSource::
+
+
+File: buildbot.info, Node: FCMaildirSource, Next: SyncmailMaildirSource, Prev: Parsing Email Change Messages, Up: Parsing Email Change Messages
+
+5.4.3.1 FCMaildirSource
+.......................
+
+http://twistedmatrix.com/users/acapnotic/wares/code/CVSToys/
+
+ This parser works with the CVSToys `MailNotification' action,
+which will send email to a list of recipients for each commit. This
+tends to work better than using `/bin/mail' from within the
+CVSROOT/loginfo file directly, as CVSToys will batch together all
+files changed during the same CVS invocation, and can provide more
+information (like creating a ViewCVS URL for each file changed).
+
+ The Buildbot's `FCMaildirSource' knows for to parse these CVSToys
+messages and turn them into Change objects. It can be given two
+parameters: the directory name of the maildir root, and the prefix to
+strip.
+
+ from buildbot.changes.mail import FCMaildirSource
+ c['change_source'] = FCMaildirSource("~/maildir-buildbot")
+
+
+File: buildbot.info, Node: SyncmailMaildirSource, Next: BonsaiMaildirSource, Prev: FCMaildirSource, Up: Parsing Email Change Messages
+
+5.4.3.2 SyncmailMaildirSource
+.............................
+
+http://sourceforge.net/projects/cvs-syncmail
+
+ `SyncmailMaildirSource' knows how to parse the message format used
+by the CVS "syncmail" script.
+
+ from buildbot.changes.mail import SyncmailMaildirSource
+ c['change_source'] = SyncmailMaildirSource("~/maildir-buildbot")
+
+
+File: buildbot.info, Node: BonsaiMaildirSource, Next: SVNCommitEmailMaildirSource, Prev: SyncmailMaildirSource, Up: Parsing Email Change Messages
+
+5.4.3.3 BonsaiMaildirSource
+...........................
+
+http://www.mozilla.org/bonsai.html
+
+ `BonsaiMaildirSource' parses messages sent out by Bonsai, the CVS
+tree-management system built by Mozilla.
+
+ from buildbot.changes.mail import BonsaiMaildirSource
+ c['change_source'] = BonsaiMaildirSource("~/maildir-buildbot")
+
+
+File: buildbot.info, Node: SVNCommitEmailMaildirSource, Prev: BonsaiMaildirSource, Up: Parsing Email Change Messages
+
+5.4.3.4 SVNCommitEmailMaildirSource
+...................................
+
+`SVNCommitEmailMaildirSource' parses message sent out by the
+`commit-email.pl' script, which is included in the Subversion
+distribution.
+
+ It does not currently handle branches: all of the Change objects
+that it creates will be associated with the default (i.e. trunk)
+branch.
+
+ from buildbot.changes.mail import SVNCommitEmailMaildirSource
+ c['change_source'] = SVNCommitEmailMaildirSource("~/maildir-buildbot")
+
+
+File: buildbot.info, Node: PBChangeSource, Next: P4Source, Prev: Mail-parsing ChangeSources, Up: Getting Source Code Changes
+
+5.5 PBChangeSource
+==================
+
+The last kind of ChangeSource actually listens on a TCP port for
+clients to connect and push change notices _into_ the Buildmaster.
+This is used by the built-in `buildbot sendchange' notification tool,
+as well as the VC-specific `contrib/svn_buildbot.py',
+`contrib/arch_buildbot.py', `contrib/hg_buildbot.py' tools, and the
+`buildbot.changes.hgbuildbot' hook. These tools are run by the
+repository (in a commit hook script), and connect to the buildmaster
+directly each time a file is comitted. This is also useful for
+creating new kinds of change sources that work on a `push' model
+instead of some kind of subscription scheme, for example a script
+which is run out of an email .forward file.
+
+ This ChangeSource can be configured to listen on its own TCP port,
+or it can share the port that the buildmaster is already using for the
+buildslaves to connect. (This is possible because the
+`PBChangeSource' uses the same protocol as the buildslaves, and they
+can be distinguished by the `username' attribute used when the
+initial connection is established). It might be useful to have it
+listen on a different port if, for example, you wanted to establish
+different firewall rules for that port. You could allow only the SVN
+repository machine access to the `PBChangeSource' port, while
+allowing only the buildslave machines access to the slave port. Or you
+could just expose one port and run everything over it. _Note: this
+feature is not yet implemented, the PBChangeSource will always share
+the slave port and will always have a `user' name of `change', and a
+passwd of `changepw'. These limitations will be removed in the
+future._.
+
+ The `PBChangeSource' is created with the following arguments. All
+are optional.
+
+``port''
+ which port to listen on. If `None' (which is the default), it
+ shares the port used for buildslave connections. _Not
+ Implemented, always set to `None'_.
+
+``user' and `passwd''
+ The user/passwd account information that the client program must
+ use to connect. Defaults to `change' and `changepw'. _Not
+ Implemented, `user' is currently always set to `change',
+ `passwd' is always set to `changepw'_.
+
+``prefix''
+ The prefix to be found and stripped from filenames delivered
+ over the connection. Any filenames which do not start with this
+ prefix will be removed. If all the filenames in a given Change
+ are removed, the that whole Change will be dropped. This string
+ should probably end with a directory separator.
+
+ This is useful for changes coming from version control systems
+ that represent branches as parent directories within the
+ repository (like SVN and Perforce). Use a prefix of 'trunk/' or
+ 'project/branches/foobranch/' to only follow one branch and to
+ get correct tree-relative filenames. Without a prefix, the
+ PBChangeSource will probably deliver Changes with filenames like
+ `trunk/foo.c' instead of just `foo.c'. Of course this also
+ depends upon the tool sending the Changes in (like `buildbot
+ sendchange') and what filenames it is delivering: that tool may
+ be filtering and stripping prefixes at the sending end.
+
+
+
+File: buildbot.info, Node: P4Source, Next: BonsaiPoller, Prev: PBChangeSource, Up: Getting Source Code Changes
+
+5.6 P4Source
+============
+
+The `P4Source' periodically polls a Perforce
+(http://www.perforce.com/) depot for changes. It accepts the
+following arguments:
+
+``p4base''
+ The base depot path to watch, without the trailing '/...'.
+
+``p4port''
+ The Perforce server to connect to (as host:port).
+
+``p4user''
+ The Perforce user.
+
+``p4passwd''
+ The Perforce password.
+
+``p4bin''
+ An optional string parameter. Specify the location of the
+ perforce command line binary (p4). You only need to do this if
+ the perforce binary is not in the path of the buildbot user.
+ Defaults to "p4".
+
+``split_file''
+ A function that maps a pathname, without the leading `p4base',
+ to a (branch, filename) tuple. The default just returns (None,
+ branchfile), which effectively disables branch support. You
+ should supply a function which understands your repository
+ structure.
+
+``pollinterval''
+ How often to poll, in seconds. Defaults to 600 (10 minutes).
+
+``histmax''
+ The maximum number of changes to inspect at a time. If more than
+ this number occur since the last poll, older changes will be
+ silently ignored.
+
+Example
+=======
+
+This configuration uses the `P4PORT', `P4USER', and `P4PASSWD'
+specified in the buildmaster's environment. It watches a project in
+which the branch name is simply the next path component, and the file
+is all path components after.
+
+ import buildbot.changes.p4poller
+ s = p4poller.P4Source(p4base='//depot/project/',
+ split_file=lambda branchfile: branchfile.split('/',1),
+ )
+ c['change_source'] = s
+
+
+File: buildbot.info, Node: BonsaiPoller, Next: SVNPoller, Prev: P4Source, Up: Getting Source Code Changes
+
+5.7 BonsaiPoller
+================
+
+The `BonsaiPoller' periodically polls a Bonsai server. This is a CGI
+script accessed through a web server that provides information about
+a CVS tree, for example the Mozilla bonsai server at
+`http://bonsai.mozilla.org'. Bonsai servers are usable by both humans
+and machines. In this case, the buildbot's change source forms a
+query which asks about any files in the specified branch which have
+changed since the last query.
+
+ Please take a look at the BonsaiPoller docstring for details about
+the arguments it accepts.
+
+
+File: buildbot.info, Node: SVNPoller, Next: MercurialHook, Prev: BonsaiPoller, Up: Getting Source Code Changes
+
+5.8 SVNPoller
+=============
+
+The `buildbot.changes.svnpoller.SVNPoller' is a ChangeSource which
+periodically polls a Subversion (http://subversion.tigris.org/)
+repository for new revisions, by running the `svn log' command in a
+subshell. It can watch a single branch or multiple branches.
+
+ `SVNPoller' accepts the following arguments:
+
+`svnurl'
+ The base URL path to watch, like
+ `svn://svn.twistedmatrix.com/svn/Twisted/trunk', or
+ `http://divmod.org/svn/Divmod/', or even
+ `file:///home/svn/Repository/ProjectA/branches/1.5/'. This must
+ include the access scheme, the location of the repository (both
+ the hostname for remote ones, and any additional directory names
+ necessary to get to the repository), and the sub-path within the
+ repository's virtual filesystem for the project and branch of
+ interest.
+
+ The `SVNPoller' will only pay attention to files inside the
+ subdirectory specified by the complete svnurl.
+
+`split_file'
+ A function to convert pathnames into (branch, relative_pathname)
+ tuples. Use this to explain your repository's branch-naming
+ policy to `SVNPoller'. This function must accept a single string
+ and return a two-entry tuple. There are a few utility functions
+ in `buildbot.changes.svnpoller' that can be used as a
+ `split_file' function, see below for details.
+
+ The default value always returns (None, path), which indicates
+ that all files are on the trunk.
+
+ Subclasses of `SVNPoller' can override the `split_file' method
+ instead of using the `split_file=' argument.
+
+`svnuser'
+ An optional string parameter. If set, the `--user' argument will
+ be added to all `svn' commands. Use this if you have to
+ authenticate to the svn server before you can do `svn info' or
+ `svn log' commands.
+
+`svnpasswd'
+ Like `svnuser', this will cause a `--password' argument to be
+ passed to all svn commands.
+
+`pollinterval'
+ How often to poll, in seconds. Defaults to 600 (checking once
+ every 10 minutes). Lower this if you want the buildbot to notice
+ changes faster, raise it if you want to reduce the network and
+ CPU load on your svn server. Please be considerate of public SVN
+ repositories by using a large interval when polling them.
+
+`histmax'
+ The maximum number of changes to inspect at a time. Every
+ POLLINTERVAL seconds, the `SVNPoller' asks for the last HISTMAX
+ changes and looks through them for any ones it does not already
+ know about. If more than HISTMAX revisions have been committed
+ since the last poll, older changes will be silently ignored.
+ Larger values of histmax will cause more time and memory to be
+ consumed on each poll attempt. `histmax' defaults to 100.
+
+`svnbin'
+ This controls the `svn' executable to use. If subversion is
+ installed in a weird place on your system (outside of the
+ buildmaster's `$PATH'), use this to tell `SVNPoller' where to
+ find it. The default value of "svn" will almost always be
+ sufficient.
+
+
+Branches
+========
+
+Each source file that is tracked by a Subversion repository has a
+fully-qualified SVN URL in the following form:
+(REPOURL)(PROJECT-plus-BRANCH)(FILEPATH). When you create the
+`SVNPoller', you give it a `svnurl' value that includes all of the
+REPOURL and possibly some portion of the PROJECT-plus-BRANCH string.
+The `SVNPoller' is responsible for producing Changes that contain a
+branch name and a FILEPATH (which is relative to the top of a
+checked-out tree). The details of how these strings are split up
+depend upon how your repository names its branches.
+
+PROJECT/BRANCHNAME/FILEPATH repositories
+----------------------------------------
+
+One common layout is to have all the various projects that share a
+repository get a single top-level directory each. Then under a given
+project's directory, you get two subdirectories, one named "trunk"
+and another named "branches". Under "branches" you have a bunch of
+other directories, one per branch, with names like "1.5.x" and
+"testing". It is also common to see directories like "tags" and
+"releases" next to "branches" and "trunk".
+
+ For example, the Twisted project has a subversion server on
+"svn.twistedmatrix.com" that hosts several sub-projects. The
+repository is available through a SCHEME of "svn:". The primary
+sub-project is Twisted, of course, with a repository root of
+"svn://svn.twistedmatrix.com/svn/Twisted". Another sub-project is
+Informant, with a root of
+"svn://svn.twistedmatrix.com/svn/Informant", etc. Inside any
+checked-out Twisted tree, there is a file named bin/trial (which is
+used to run unit test suites).
+
+ The trunk for Twisted is in
+"svn://svn.twistedmatrix.com/svn/Twisted/trunk", and the
+fully-qualified SVN URL for the trunk version of `trial' would be
+"svn://svn.twistedmatrix.com/svn/Twisted/trunk/bin/trial". The same
+SVNURL for that file on a branch named "1.5.x" would be
+"svn://svn.twistedmatrix.com/svn/Twisted/branches/1.5.x/bin/trial".
+
+ To set up a `SVNPoller' that watches the Twisted trunk (and
+nothing else), we would use the following:
+
+ from buildbot.changes.svnpoller import SVNPoller
+ c['change_source'] = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted/trunk")
+
+ In this case, every Change that our `SVNPoller' produces will have
+`.branch=None', to indicate that the Change is on the trunk. No
+other sub-projects or branches will be tracked.
+
+ If we want our ChangeSource to follow multiple branches, we have
+to do two things. First we have to change our `svnurl=' argument to
+watch more than just ".../Twisted/trunk". We will set it to
+".../Twisted" so that we'll see both the trunk and all the branches.
+Second, we have to tell `SVNPoller' how to split the
+(PROJECT-plus-BRANCH)(FILEPATH) strings it gets from the repository
+out into (BRANCH) and (FILEPATH) pairs.
+
+ We do the latter by providing a "split_file" function. This
+function is responsible for splitting something like
+"branches/1.5.x/bin/trial" into `branch'="branches/1.5.x" and
+`filepath'="bin/trial". This function is always given a string that
+names a file relative to the subdirectory pointed to by the
+`SVNPoller''s `svnurl=' argument. It is expected to return a
+(BRANCHNAME, FILEPATH) tuple (in which FILEPATH is relative to the
+branch indicated), or None to indicate that the file is outside any
+project of interest.
+
+ (note that we want to see "branches/1.5.x" rather than just
+"1.5.x" because when we perform the SVN checkout, we will probably
+append the branch name to the baseURL, which requires that we keep the
+"branches" component in there. Other VC schemes use a different
+approach towards branches and may not require this artifact.)
+
+ If your repository uses this same PROJECT/BRANCH/FILEPATH naming
+scheme, the following function will work:
+
+ def split_file_branches(path):
+ pieces = path.split('/')
+ if pieces[0] == 'trunk':
+ return (None, '/'.join(pieces[1:]))
+ elif pieces[0] == 'branches':
+ return ('/'.join(pieces[0:2]),
+ '/'.join(pieces[2:]))
+ else:
+ return None
+
+ This function is provided as
+`buildbot.changes.svnpoller.split_file_branches' for your
+convenience. So to have our Twisted-watching `SVNPoller' follow
+multiple branches, we would use this:
+
+ from buildbot.changes.svnpoller import SVNPoller, split_file_branches
+ c['change_source'] = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted",
+ split_file=split_file_branches)
+
+ Changes for all sorts of branches (with names like
+"branches/1.5.x", and None to indicate the trunk) will be delivered
+to the Schedulers. Each Scheduler is then free to use or ignore each
+branch as it sees fit.
+
+BRANCHNAME/PROJECT/FILEPATH repositories
+----------------------------------------
+
+Another common way to organize a Subversion repository is to put the
+branch name at the top, and the projects underneath. This is
+especially frequent when there are a number of related sub-projects
+that all get released in a group.
+
+ For example, Divmod.org hosts a project named "Nevow" as well as
+one named "Quotient". In a checked-out Nevow tree there is a directory
+named "formless" that contains a python source file named
+"webform.py". This repository is accessible via webdav (and thus uses
+an "http:" scheme) through the divmod.org hostname. There are many
+branches in this repository, and they use a (BRANCHNAME)/(PROJECT)
+naming policy.
+
+ The fully-qualified SVN URL for the trunk version of webform.py is
+`http://divmod.org/svn/Divmod/trunk/Nevow/formless/webform.py'. You
+can do an `svn co' with that URL and get a copy of the latest
+version. The 1.5.x branch version of this file would have a URL of
+`http://divmod.org/svn/Divmod/branches/1.5.x/Nevow/formless/webform.py'.
+The whole Nevow trunk would be checked out with
+`http://divmod.org/svn/Divmod/trunk/Nevow', while the Quotient trunk
+would be checked out using
+`http://divmod.org/svn/Divmod/trunk/Quotient'.
+
+ Now suppose we want to have an `SVNPoller' that only cares about
+the Nevow trunk. This case looks just like the PROJECT/BRANCH layout
+described earlier:
+
+ from buildbot.changes.svnpoller import SVNPoller
+ c['change_source'] = SVNPoller("http://divmod.org/svn/Divmod/trunk/Nevow")
+
+ But what happens when we want to track multiple Nevow branches? We
+have to point our `svnurl=' high enough to see all those branches,
+but we also don't want to include Quotient changes (since we're only
+building Nevow). To accomplish this, we must rely upon the
+`split_file' function to help us tell the difference between files
+that belong to Nevow and those that belong to Quotient, as well as
+figuring out which branch each one is on.
+
+ from buildbot.changes.svnpoller import SVNPoller
+ c['change_source'] = SVNPoller("http://divmod.org/svn/Divmod",
+ split_file=my_file_splitter)
+
+ The `my_file_splitter' function will be called with
+repository-relative pathnames like:
+
+`trunk/Nevow/formless/webform.py'
+ This is a Nevow file, on the trunk. We want the Change that
+ includes this to see a filename of `formless/webform.py"', and a
+ branch of None
+
+`branches/1.5.x/Nevow/formless/webform.py'
+ This is a Nevow file, on a branch. We want to get
+ branch="branches/1.5.x" and filename="formless/webform.py".
+
+`trunk/Quotient/setup.py'
+ This is a Quotient file, so we want to ignore it by having
+ `my_file_splitter' return None.
+
+`branches/1.5.x/Quotient/setup.py'
+ This is also a Quotient file, which should be ignored.
+
+ The following definition for `my_file_splitter' will do the job:
+
+ def my_file_splitter(path):
+ pieces = path.split('/')
+ if pieces[0] == 'trunk':
+ branch = None
+ pieces.pop(0) # remove 'trunk'
+ elif pieces[0] == 'branches':
+ pieces.pop(0) # remove 'branches'
+ # grab branch name
+ branch = 'branches/' + pieces.pop(0)
+ else:
+ return None # something weird
+ projectname = pieces.pop(0)
+ if projectname != 'Nevow':
+ return None # wrong project
+ return (branch, '/'.join(pieces))
+
+
+File: buildbot.info, Node: MercurialHook, Next: Bzr Hook, Prev: SVNPoller, Up: Getting Source Code Changes
+
+5.9 MercurialHook
+=================
+
+Since Mercurial is written in python, the hook script can invoke
+Buildbot's `sendchange' function directly, rather than having to
+spawn an external process. This function delivers the same sort of
+changes as `buildbot sendchange' and the various hook scripts in
+contrib/, so you'll need to add a `pb.PBChangeSource' to your
+buildmaster to receive these changes.
+
+ To set this up, first choose a Mercurial repository that represents
+your central "official" source tree. This will be the same repository
+that your buildslaves will eventually pull from. Install Buildbot on
+the machine that hosts this repository, using the same version of
+python as Mercurial is using (so that the Mercurial hook can import
+code from buildbot). Then add the following to the `.hg/hgrc' file in
+that repository, replacing the buildmaster hostname/portnumber as
+appropriate for your buildbot:
+
+ [hooks]
+ changegroup.buildbot = python:buildbot.changes.hgbuildbot.hook
+
+ [hgbuildbot]
+ master = buildmaster.example.org:9987
+
+ (Note that Mercurial lets you define multiple `changegroup' hooks
+by giving them distinct names, like `changegroup.foo' and
+`changegroup.bar', which is why we use `changegroup.buildbot' in this
+example. There is nothing magical about the "buildbot" suffix in the
+hook name. The `[hgbuildbot]' section _is_ special, however, as it is
+the only section that the buildbot hook pays attention to.)
+
+ Also note that this runs as a `changegroup' hook, rather than as
+an `incoming' hook. The `changegroup' hook is run with multiple
+revisions at a time (say, if multiple revisions are being pushed to
+this repository in a single `hg push' command), whereas the
+`incoming' hook is run with just one revision at a time. The
+`hgbuildbot.hook' function will only work with the `changegroup' hook.
+
+ The `[hgbuildbot]' section has two other parameters that you might
+specify, both of which control the name of the branch that is
+attached to the changes coming from this hook.
+
+ One common branch naming policy for Mercurial repositories is to
+use it just like Darcs: each branch goes into a separate repository,
+and all the branches for a single project share a common parent
+directory. For example, you might have `/var/repos/PROJECT/trunk/'
+and `/var/repos/PROJECT/release'. To use this style, use the
+`branchtype = dirname' setting, which simply uses the last component
+of the repository's enclosing directory as the branch name:
+
+ [hgbuildbot]
+ master = buildmaster.example.org:9987
+ branchtype = dirname
+
+ Another approach is to use Mercurial's built-in branches (the kind
+created with `hg branch' and listed with `hg branches'). This feature
+associates persistent names with particular lines of descent within a
+single repository. (note that the buildbot `source.Mercurial'
+checkout step does not yet support this kind of branch). To have the
+commit hook deliver this sort of branch name with the Change object,
+use `branchtype = inrepo':
+
+ [hgbuildbot]
+ master = buildmaster.example.org:9987
+ branchtype = inrepo
+
+ Finally, if you want to simply specify the branchname directly, for
+all changes, use `branch = BRANCHNAME'. This overrides `branchtype':
+
+ [hgbuildbot]
+ master = buildmaster.example.org:9987
+ branch = trunk
+
+ If you use `branch=' like this, you'll need to put a separate
+.hgrc in each repository. If you use `branchtype=', you may be able
+to use the same .hgrc for all your repositories, stored in `~/.hgrc'
+or `/etc/mercurial/hgrc'.
+
+
+File: buildbot.info, Node: Bzr Hook, Next: Bzr Poller, Prev: MercurialHook, Up: Getting Source Code Changes
+
+5.10 Bzr Hook
+=============
+
+Bzr is also written in Python, and the Bzr hook depends on Twisted to
+send the changes.
+
+ To install, put `contrib/bzr_buildbot.py' in one of your plugins
+locations a bzr plugins directory (e.g., `~/.bazaar/plugins'). Then,
+in one of your bazaar conf files (e.g., `~/.bazaar/locations.conf'),
+set the location you want to connect with buildbot with these keys:
+
+`buildbot_on'
+ one of 'commit', 'push, or 'change'. Turns the plugin on to
+ report changes via commit, changes via push, or any changes to
+ the trunk. 'change' is recommended.
+
+`buildbot_server'
+ (required to send to a buildbot master) the URL of the buildbot
+ master to which you will connect (as of this writing, the same
+ server and port to which slaves connect).
+
+`buildbot_port'
+ (optional, defaults to 9989) the port of the buildbot master to
+ which you will connect (as of this writing, the same server and
+ port to which slaves connect)
+
+`buildbot_pqm'
+ (optional, defaults to not pqm) Normally, the user that commits
+ the revision is the user that is responsible for the change.
+ When run in a pqm (Patch Queue Manager, see
+ https://launchpad.net/pqm) environment, the user that commits is
+ the Patch Queue Manager, and the user that committed the
+ *parent* revision is responsible for the change. To turn on the
+ pqm mode, set this value to any of (case-insensitive) "Yes",
+ "Y", "True", or "T".
+
+`buildbot_dry_run'
+ (optional, defaults to not a dry run) Normally, the post-commit
+ hook will attempt to communicate with the configured buildbot
+ server and port. If this parameter is included and any of
+ (case-insensitive) "Yes", "Y", "True", or "T", then the hook
+ will simply print what it would have sent, but not attempt to
+ contact the buildbot master.
+
+`buildbot_send_branch_name'
+ (optional, defaults to not sending the branch name) If your
+ buildbot's bzr source build step uses a repourl, do *not* turn
+ this on. If your buildbot's bzr build step uses a baseURL, then
+ you may set this value to any of (case-insensitive) "Yes", "Y",
+ "True", or "T" to have the buildbot master append the branch
+ name to the baseURL.
+
+
+ When buildbot no longer has a hardcoded password, it will be a
+configuration option here as well.
+
+ Here's a simple example that you might have in your
+`~/.bazaar/locations.conf'.
+
+ [chroot-*:///var/local/myrepo/mybranch]
+ buildbot_on = change
+ buildbot_server = localhost
+
+
+File: buildbot.info, Node: Bzr Poller, Prev: Bzr Hook, Up: Getting Source Code Changes
+
+5.11 Bzr Poller
+===============
+
+If you cannot insert a Bzr hook in the server, you can use the Bzr
+Poller. To use, put `contrib/bzr_buildbot.py' somewhere that your
+buildbot configuration can import it. Even putting it in the same
+directory as the master.cfg should work. Install the poller in the
+buildbot configuration as with any other change source. Minimally,
+provide a URL that you want to poll (bzr://, bzr+ssh://, or lp:),
+though make sure the buildbot user has necessary privileges. You may
+also want to specify these optional values.
+
+`poll_interval'
+ The number of seconds to wait between polls. Defaults to 10
+ minutes.
+
+`branch_name'
+ Any value to be used as the branch name. Defaults to None, or
+ specify a string, or specify the constants from
+ `bzr_buildbot.py' SHORT or FULL to get the short branch name or
+ full branch address.
+
+`blame_merge_author'
+ normally, the user that commits the revision is the user that is
+ responsible for the change. When run in a pqm (Patch Queue
+ Manager, see https://launchpad.net/pqm) environment, the user
+ that commits is the Patch Queue Manager, and the user that
+ committed the merged, *parent* revision is responsible for the
+ change. set this value to True if this is pointed against a
+ PQM-managed branch.
+
+
+File: buildbot.info, Node: Build Process, Next: Status Delivery, Prev: Getting Source Code Changes, Up: Top
+
+6 Build Process
+***************
+
+A `Build' object is responsible for actually performing a build. It
+gets access to a remote `SlaveBuilder' where it may run commands, and
+a `BuildStatus' object where it must emit status events. The `Build'
+is created by the Builder's `BuildFactory'.
+
+ The default `Build' class is made up of a fixed sequence of
+`BuildSteps', executed one after another until all are complete (or
+one of them indicates that the build should be halted early). The
+default `BuildFactory' creates instances of this `Build' class with a
+list of `BuildSteps', so the basic way to configure the build is to
+provide a list of `BuildSteps' to your `BuildFactory'.
+
+ More complicated `Build' subclasses can make other decisions:
+execute some steps only if certain files were changed, or if certain
+previous steps passed or failed. The base class has been written to
+allow users to express basic control flow without writing code, but
+you can always subclass and customize to achieve more specialized
+behavior.
+
+* Menu:
+
+* Build Steps::
+* Interlocks::
+* Build Factories::
+
+
+File: buildbot.info, Node: Build Steps, Next: Interlocks, Prev: Build Process, Up: Build Process
+
+6.1 Build Steps
+===============
+
+`BuildStep's are usually specified in the buildmaster's configuration
+file, in a list that goes into the `BuildFactory'. The `BuildStep'
+instances in this list are used as templates to construct new
+independent copies for each build (so that state can be kept on the
+`BuildStep' in one build without affecting a later build). Each
+`BuildFactory' can be created with a list of steps, or the factory
+can be created empty and then steps added to it using the `addStep'
+method:
+
+ from buildbot.steps import source, shell
+ from buildbot.process import factory
+
+ f = factory.BuildFactory()
+ f.addStep(source.SVN(svnurl="http://svn.example.org/Trunk/"))
+ f.addStep(shell.ShellCommand(command=["make", "all"]))
+ f.addStep(shell.ShellCommand(command=["make", "test"]))
+
+ In earlier versions (0.7.5 and older), these steps were specified
+with a tuple of (step_class, keyword_arguments). Steps can still be
+specified this way, but the preferred form is to pass actual
+`BuildStep' instances to `addStep', because that gives the
+`BuildStep' class a chance to do some validation on the arguments.
+
+ If you have a common set of steps which are used in several
+factories, the `addSteps' method may be handy. It takes an iterable
+of `BuildStep' instances.
+
+ setup_steps = [
+ source.SVN(svnurl="http://svn.example.org/Trunk/")
+ shell.ShellCommand(command="./setup")
+ ]
+ quick = factory.BuildFactory()
+ quick.addSteps(setup_steps)
+ quick.addStep(shell.shellCommand(command="make quick"))
+
+ The rest of this section lists all the standard BuildStep objects
+available for use in a Build, and the parameters which can be used to
+control each.
+
+* Menu:
+
+* Common Parameters::
+* Using Build Properties::
+* Source Checkout::
+* ShellCommand::
+* Simple ShellCommand Subclasses::
+* Python BuildSteps::
+* Transferring Files::
+* Steps That Run on the Master::
+* Triggering Schedulers::
+* Writing New BuildSteps::
+
+
+File: buildbot.info, Node: Common Parameters, Next: Using Build Properties, Prev: Build Steps, Up: Build Steps
+
+6.1.1 Common Parameters
+-----------------------
+
+The standard `Build' runs a series of `BuildStep's in order, only
+stopping when it runs out of steps or if one of them requests that
+the build be halted. It collects status information from each one to
+create an overall build status (of SUCCESS, WARNINGS, or FAILURE).
+
+ All BuildSteps accept some common parameters. Some of these control
+how their individual status affects the overall build. Others are used
+to specify which `Locks' (see *note Interlocks::) should be acquired
+before allowing the step to run.
+
+ Arguments common to all `BuildStep' subclasses:
+
+`name'
+ the name used to describe the step on the status display. It is
+ also used to give a name to any LogFiles created by this step.
+
+`haltOnFailure'
+ if True, a FAILURE of this build step will cause the build to
+ halt immediately. Steps with `alwaysRun=True' are still run.
+ Generally speaking, haltOnFailure implies flunkOnFailure (the
+ default for most BuildSteps). In some cases, particularly series
+ of tests, it makes sense to haltOnFailure if something fails
+ early on but not flunkOnFailure. This can be achieved with
+ haltOnFailure=True, flunkOnFailure=False.
+
+`flunkOnWarnings'
+ when True, a WARNINGS or FAILURE of this build step will mark the
+ overall build as FAILURE. The remaining steps will still be
+ executed.
+
+`flunkOnFailure'
+ when True, a FAILURE of this build step will mark the overall
+ build as a FAILURE. The remaining steps will still be executed.
+
+`warnOnWarnings'
+ when True, a WARNINGS or FAILURE of this build step will mark the
+ overall build as having WARNINGS. The remaining steps will still
+ be executed.
+
+`warnOnFailure'
+ when True, a FAILURE of this build step will mark the overall
+ build as having WARNINGS. The remaining steps will still be
+ executed.
+
+`alwaysRun'
+ if True, this build step will always be run, even if a previous
+ buildstep with `haltOnFailure=True' has failed.
+
+`locks'
+ a list of Locks (instances of `buildbot.locks.SlaveLock' or
+ `buildbot.locks.MasterLock') that should be acquired before
+ starting this Step. The Locks will be released when the step is
+ complete. Note that this is a list of actual Lock instances, not
+ names. Also note that all Locks must have unique names.
+
+
+
+File: buildbot.info, Node: Using Build Properties, Next: Source Checkout, Prev: Common Parameters, Up: Build Steps
+
+6.1.2 Using Build Properties
+----------------------------
+
+Build properties are a generalized way to provide configuration
+information to build steps; see *note Build Properties::.
+
+ Some build properties are inherited from external sources - global
+properties, schedulers, or buildslaves. Some build properties are
+set when the build starts, such as the SourceStamp information. Other
+properties can be set by BuildSteps as they run, for example the
+various Source steps will set the `got_revision' property to the
+source revision that was actually checked out (which can be useful
+when the SourceStamp in use merely requested the "latest revision":
+`got_revision' will tell you what was actually built).
+
+ In custom BuildSteps, you can get and set the build properties with
+the `getProperty'/`setProperty' methods. Each takes a string for the
+name of the property, and returns or accepts an arbitrary(1) object.
+For example:
+
+ class MakeTarball(ShellCommand):
+ def start(self):
+ if self.getProperty("os") == "win":
+ self.setCommand([ ... ]) # windows-only command
+ else:
+ self.setCommand([ ... ]) # equivalent for other systems
+ ShellCommand.start(self)
+
+WithProperties
+==============
+
+You can use build properties in ShellCommands by using the
+`WithProperties' wrapper when setting the arguments of the
+ShellCommand. This interpolates the named build properties into the
+generated shell command. Most step parameters accept
+`WithProperties'. Please file bugs for any parameters which do not.
+
+ from buildbot.steps.shell import ShellCommand
+ from buildbot.process.properties import WithProperties
+
+ f.addStep(ShellCommand(
+ command=["tar", "czf",
+ WithProperties("build-%s.tar.gz", "revision"),
+ "source"]))
+
+ If this BuildStep were used in a tree obtained from Subversion, it
+would create a tarball with a name like `build-1234.tar.gz'.
+
+ The `WithProperties' function does `printf'-style string
+interpolation, using strings obtained by calling
+`build.getProperty(propname)'. Note that for every `%s' (or `%d',
+etc), you must have exactly one additional argument to indicate which
+build property you want to insert.
+
+ You can also use python dictionary-style string interpolation by
+using the `%(propname)s' syntax. In this form, the property name goes
+in the parentheses, and WithProperties takes _no_ additional
+arguments:
+
+ f.addStep(ShellCommand(
+ command=["tar", "czf",
+ WithProperties("build-%(revision)s.tar.gz"),
+ "source"]))
+
+ Don't forget the extra "s" after the closing parenthesis! This is
+the cause of many confusing errors.
+
+ The dictionary-style interpolation supports a number of more
+advanced syntaxes, too.
+
+`propname:-replacement'
+ If `propname' exists, substitute its value; otherwise,
+ substitute `replacement'. `replacement' may be empty
+ (`%(propname:-)s')
+
+`propname:+replacement'
+ If `propname' exists, substitute `replacement'; otherwise,
+ substitute an empty string.
+
+
+ Although these are similar to shell substitutions, no other
+substitutions are currently supported, and `replacement' in the above
+cannot contain more substitutions.
+
+ Note: like python, you can either do positional-argument
+interpolation _or_ keyword-argument interpolation, not both. Thus you
+cannot use a string like `WithProperties("foo-%(revision)s-%s",
+"branch")'.
+
+Common Build Properties
+=======================
+
+The following build properties are set when the build is started, and
+are available to all steps.
+
+`branch'
+ This comes from the build's SourceStamp, and describes which
+ branch is being checked out. This will be `None' (which
+ interpolates into `WithProperties' as an empty string) if the
+ build is on the default branch, which is generally the trunk.
+ Otherwise it will be a string like "branches/beta1.4". The exact
+ syntax depends upon the VC system being used.
+
+`revision'
+ This also comes from the SourceStamp, and is the revision of the
+ source code tree that was requested from the VC system. When a
+ build is requested of a specific revision (as is generally the
+ case when the build is triggered by Changes), this will contain
+ the revision specification. This is always a string, although
+ the syntax depends upon the VC system in use: for SVN it is an
+ integer, for Mercurial it is a short string, for Darcs it is a
+ rather large string, etc.
+
+ If the "force build" button was pressed, the revision will be
+ `None', which means to use the most recent revision available.
+ This is a "trunk build". This will be interpolated as an empty
+ string.
+
+`got_revision'
+ This is set when a Source step checks out the source tree, and
+ provides the revision that was actually obtained from the VC
+ system. In general this should be the same as `revision',
+ except for trunk builds, where `got_revision' indicates what
+ revision was current when the checkout was performed. This can
+ be used to rebuild the same source code later.
+
+ Note that for some VC systems (Darcs in particular), the
+ revision is a large string containing newlines, and is not
+ suitable for interpolation into a filename.
+
+`buildername'
+ This is a string that indicates which Builder the build was a
+ part of. The combination of buildername and buildnumber
+ uniquely identify a build.
+
+`buildnumber'
+ Each build gets a number, scoped to the Builder (so the first
+ build performed on any given Builder will have a build number of
+ 0). This integer property contains the build's number.
+
+`slavename'
+ This is a string which identifies which buildslave the build is
+ running on.
+
+`scheduler'
+ If the build was started from a scheduler, then this property
+ will contain the name of that scheduler.
+
+
+ ---------- Footnotes ----------
+
+ (1) Build properties are serialized along with the build results,
+so they must be serializable. For this reason, the value of any build
+property should be simple inert data: strings, numbers, lists,
+tuples, and dictionaries. They should not contain class instances.
+
+
+File: buildbot.info, Node: Source Checkout, Next: ShellCommand, Prev: Using Build Properties, Up: Build Steps
+
+6.1.3 Source Checkout
+---------------------
+
+The first step of any build is typically to acquire the source code
+from which the build will be performed. There are several classes to
+handle this, one for each of the different source control system that
+Buildbot knows about. For a description of how Buildbot treats source
+control in general, see *note Version Control Systems::.
+
+ All source checkout steps accept some common parameters to control
+how they get the sources and where they should be placed. The
+remaining per-VC-system parameters are mostly to specify where
+exactly the sources are coming from.
+
+`mode'
+ a string describing the kind of VC operation that is desired.
+ Defaults to `update'.
+
+ `update'
+ specifies that the CVS checkout/update should be performed
+ directly into the workdir. Each build is performed in the
+ same directory, allowing for incremental builds. This
+ minimizes disk space, bandwidth, and CPU time. However, it
+ may encounter problems if the build process does not handle
+ dependencies properly (sometimes you must do a "clean
+ build" to make sure everything gets compiled), or if source
+ files are deleted but generated files can influence test
+ behavior (e.g. python's .pyc files), or when source
+ directories are deleted but generated files prevent CVS
+ from removing them. Builds ought to be correct regardless
+ of whether they are done "from scratch" or incrementally,
+ but it is useful to test both kinds: this mode exercises the
+ incremental-build style.
+
+ `copy'
+ specifies that the CVS workspace should be maintained in a
+ separate directory (called the 'copydir'), using checkout
+ or update as necessary. For each build, a new workdir is
+ created with a copy of the source tree (rm -rf workdir; cp
+ -r copydir workdir). This doubles the disk space required,
+ but keeps the bandwidth low (update instead of a full
+ checkout). A full 'clean' build is performed each time. This
+ avoids any generated-file build problems, but is still
+ occasionally vulnerable to CVS problems such as a
+ repository being manually rearranged, causing CVS errors on
+ update which are not an issue with a full checkout.
+
+ `clobber'
+ specifes that the working directory should be deleted each
+ time, necessitating a full checkout for each build. This
+ insures a clean build off a complete checkout, avoiding any
+ of the problems described above. This mode exercises the
+ "from-scratch" build style.
+
+ `export'
+ this is like `clobber', except that the 'cvs export'
+ command is used to create the working directory. This
+ command removes all CVS metadata files (the CVS/
+ directories) from the tree, which is sometimes useful for
+ creating source tarballs (to avoid including the metadata
+ in the tar file).
+
+`workdir'
+ like all Steps, this indicates the directory where the build
+ will take place. Source Steps are special in that they perform
+ some operations outside of the workdir (like creating the
+ workdir itself).
+
+`alwaysUseLatest'
+ if True, bypass the usual "update to the last Change" behavior,
+ and always update to the latest changes instead.
+
+`retry'
+ If set, this specifies a tuple of `(delay, repeats)' which means
+ that when a full VC checkout fails, it should be retried up to
+ REPEATS times, waiting DELAY seconds between attempts. If you
+ don't provide this, it defaults to `None', which means VC
+ operations should not be retried. This is provided to make life
+ easier for buildslaves which are stuck behind poor network
+ connections.
+
+
+ My habit as a developer is to do a `cvs update' and `make' each
+morning. Problems can occur, either because of bad code being checked
+in, or by incomplete dependencies causing a partial rebuild to fail
+where a complete from-scratch build might succeed. A quick Builder
+which emulates this incremental-build behavior would use the
+`mode='update'' setting.
+
+ On the other hand, other kinds of dependency problems can cause a
+clean build to fail where a partial build might succeed. This
+frequently results from a link step that depends upon an object file
+that was removed from a later version of the tree: in the partial
+tree, the object file is still around (even though the Makefiles no
+longer know how to create it).
+
+ "official" builds (traceable builds performed from a known set of
+source revisions) are always done as clean builds, to make sure it is
+not influenced by any uncontrolled factors (like leftover files from a
+previous build). A "full" Builder which behaves this way would want
+to use the `mode='clobber'' setting.
+
+ Each VC system has a corresponding source checkout class: their
+arguments are described on the following pages.
+
+* Menu:
+
+* CVS::
+* SVN::
+* Darcs::
+* Mercurial::
+* Arch::
+* Bazaar::
+* Bzr::
+* P4::
+* Git::
+
+
+File: buildbot.info, Node: CVS, Next: SVN, Prev: Source Checkout, Up: Source Checkout
+
+6.1.3.1 CVS
+...........
+
+The `CVS' build step performs a CVS (http://www.nongnu.org/cvs/)
+checkout or update. It takes the following arguments:
+
+`cvsroot'
+ (required): specify the CVSROOT value, which points to a CVS
+ repository, probably on a remote machine. For example, the
+ cvsroot value you would use to get a copy of the Buildbot source
+ code is
+ `:pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot'
+
+`cvsmodule'
+ (required): specify the cvs `module', which is generally a
+ subdirectory of the CVSROOT. The cvsmodule for the Buildbot
+ source code is `buildbot'.
+
+`branch'
+ a string which will be used in a `-r' argument. This is most
+ useful for specifying a branch to work on. Defaults to `HEAD'.
+
+`global_options'
+ a list of flags to be put before the verb in the CVS command.
+
+`checkoutDelay'
+ if set, the number of seconds to put between the timestamp of
+ the last known Change and the value used for the `-D' option.
+ Defaults to half of the parent Build's treeStableTimer.
+
+
+
+File: buildbot.info, Node: SVN, Next: Darcs, Prev: CVS, Up: Source Checkout
+
+6.1.3.2 SVN
+...........
+
+The `SVN' build step performs a Subversion
+(http://subversion.tigris.org) checkout or update. There are two
+basic ways of setting up the checkout step, depending upon whether
+you are using multiple branches or not.
+
+ If all of your builds use the same branch, then you should create
+the `SVN' step with the `svnurl' argument:
+
+`svnurl'
+ (required): this specifies the `URL' argument that will be given
+ to the `svn checkout' command. It dictates both where the
+ repository is located and which sub-tree should be extracted. In
+ this respect, it is like a combination of the CVS `cvsroot' and
+ `cvsmodule' arguments. For example, if you are using a remote
+ Subversion repository which is accessible through HTTP at a URL
+ of `http://svn.example.com/repos', and you wanted to check out
+ the `trunk/calc' sub-tree, you would use
+ `svnurl="http://svn.example.com/repos/trunk/calc"' as an argument
+ to your `SVN' step.
+
+ If, on the other hand, you are building from multiple branches,
+then you should create the `SVN' step with the `baseURL' and
+`defaultBranch' arguments instead:
+
+`baseURL'
+ (required): this specifies the base repository URL, to which a
+ branch name will be appended. It should probably end in a slash.
+
+`defaultBranch'
+ this specifies the name of the branch to use when a Build does
+ not provide one of its own. This will be appended to `baseURL' to
+ create the string that will be passed to the `svn checkout'
+ command.
+
+`username'
+ if specified, this will be passed to the `svn' binary with a
+ `--username' option.
+
+`password'
+ if specified, this will be passed to the `svn' binary with a
+ `--password' option. The password itself will be suitably
+ obfuscated in the logs.
+
+
+ If you are using branches, you must also make sure your
+`ChangeSource' will report the correct branch names.
+
+branch example
+==============
+
+Let's suppose that the "MyProject" repository uses branches for the
+trunk, for various users' individual development efforts, and for
+several new features that will require some amount of work (involving
+multiple developers) before they are ready to merge onto the trunk.
+Such a repository might be organized as follows:
+
+ svn://svn.example.org/MyProject/trunk
+ svn://svn.example.org/MyProject/branches/User1/foo
+ svn://svn.example.org/MyProject/branches/User1/bar
+ svn://svn.example.org/MyProject/branches/User2/baz
+ svn://svn.example.org/MyProject/features/newthing
+ svn://svn.example.org/MyProject/features/otherthing
+
+ Further assume that we want the Buildbot to run tests against the
+trunk and against all the feature branches (i.e., do a
+checkout/compile/build of branch X when a file has been changed on
+branch X, when X is in the set [trunk, features/newthing,
+features/otherthing]). We do not want the Buildbot to automatically
+build any of the user branches, but it should be willing to build a
+user branch when explicitly requested (most likely by the user who
+owns that branch).
+
+ There are three things that need to be set up to accomodate this
+system. The first is a ChangeSource that is capable of identifying the
+branch which owns any given file. This depends upon a user-supplied
+function, in an external program that runs in the SVN commit hook and
+connects to the buildmaster's `PBChangeSource' over a TCP connection.
+(you can use the "`buildbot sendchange'" utility for this purpose,
+but you will still need an external program to decide what value
+should be passed to the `--branch=' argument). For example, a change
+to a file with the SVN url of
+"svn://svn.example.org/MyProject/features/newthing/src/foo.c" should
+be broken down into a Change instance with
+`branch='features/newthing'' and `file='src/foo.c''.
+
+ The second piece is an `AnyBranchScheduler' which will pay
+attention to the desired branches. It will not pay attention to the
+user branches, so it will not automatically start builds in response
+to changes there. The AnyBranchScheduler class requires you to
+explicitly list all the branches you want it to use, but it would not
+be difficult to write a subclass which used
+`branch.startswith('features/'' to remove the need for this explicit
+list. Or, if you want to build user branches too, you can use
+AnyBranchScheduler with `branches=None' to indicate that you want it
+to pay attention to all branches.
+
+ The third piece is an `SVN' checkout step that is configured to
+handle the branches correctly, with a `baseURL' value that matches
+the way the ChangeSource splits each file's URL into base, branch,
+and file.
+
+ from buildbot.changes.pb import PBChangeSource
+ from buildbot.scheduler import AnyBranchScheduler
+ from buildbot.process import source, factory
+ from buildbot.steps import source, shell
+
+ c['change_source'] = PBChangeSource()
+ s1 = AnyBranchScheduler('main',
+ ['trunk', 'features/newthing', 'features/otherthing'],
+ 10*60, ['test-i386', 'test-ppc'])
+ c['schedulers'] = [s1]
+
+ f = factory.BuildFactory()
+ f.addStep(source.SVN(mode='update',
+ baseURL='svn://svn.example.org/MyProject/',
+ defaultBranch='trunk'))
+ f.addStep(shell.Compile(command="make all"))
+ f.addStep(shell.Test(command="make test"))
+
+ c['builders'] = [
+ {'name':'test-i386', 'slavename':'bot-i386', 'builddir':'test-i386',
+ 'factory':f },
+ {'name':'test-ppc', 'slavename':'bot-ppc', 'builddir':'test-ppc',
+ 'factory':f },
+ ]
+
+ In this example, when a change arrives with a `branch' attribute
+of "trunk", the resulting build will have an SVN step that
+concatenates "svn://svn.example.org/MyProject/" (the baseURL) with
+"trunk" (the branch name) to get the correct svn command. If the
+"newthing" branch has a change to "src/foo.c", then the SVN step will
+concatenate "svn://svn.example.org/MyProject/" with
+"features/newthing" to get the svnurl for checkout.
+
+
+File: buildbot.info, Node: Darcs, Next: Mercurial, Prev: SVN, Up: Source Checkout
+
+6.1.3.3 Darcs
+.............
+
+The `Darcs' build step performs a Darcs (http://darcs.net/) checkout
+or update.
+
+ Like *Note SVN::, this step can either be configured to always
+check out a specific tree, or set up to pull from a particular branch
+that gets specified separately for each build. Also like SVN, the
+repository URL given to Darcs is created by concatenating a `baseURL'
+with the branch name, and if no particular branch is requested, it
+uses a `defaultBranch'. The only difference in usage is that each
+potential Darcs repository URL must point to a fully-fledged
+repository, whereas SVN URLs usually point to sub-trees of the main
+Subversion repository. In other words, doing an SVN checkout of
+`baseURL' is legal, but silly, since you'd probably wind up with a
+copy of every single branch in the whole repository. Doing a Darcs
+checkout of `baseURL' is just plain wrong, since the parent directory
+of a collection of Darcs repositories is not itself a valid
+repository.
+
+ The Darcs step takes the following arguments:
+
+`repourl'
+ (required unless `baseURL' is provided): the URL at which the
+ Darcs source repository is available.
+
+`baseURL'
+ (required unless `repourl' is provided): the base repository URL,
+ to which a branch name will be appended. It should probably end
+ in a slash.
+
+`defaultBranch'
+ (allowed if and only if `baseURL' is provided): this specifies
+ the name of the branch to use when a Build does not provide one
+ of its own. This will be appended to `baseURL' to create the
+ string that will be passed to the `darcs get' command.
+
+
+File: buildbot.info, Node: Mercurial, Next: Arch, Prev: Darcs, Up: Source Checkout
+
+6.1.3.4 Mercurial
+.................
+
+The `Mercurial' build step performs a Mercurial
+(http://selenic.com/mercurial) (aka "hg") checkout or update.
+
+ Branches are handled just like *Note Darcs::.
+
+ The Mercurial step takes the following arguments:
+
+`repourl'
+ (required unless `baseURL' is provided): the URL at which the
+ Mercurial source repository is available.
+
+`baseURL'
+ (required unless `repourl' is provided): the base repository URL,
+ to which a branch name will be appended. It should probably end
+ in a slash.
+
+`defaultBranch'
+ (allowed if and only if `baseURL' is provided): this specifies
+ the name of the branch to use when a Build does not provide one
+ of its own. This will be appended to `baseURL' to create the
+ string that will be passed to the `hg clone' command.
+
+
+File: buildbot.info, Node: Arch, Next: Bazaar, Prev: Mercurial, Up: Source Checkout
+
+6.1.3.5 Arch
+............
+
+The `Arch' build step performs an Arch (http://gnuarch.org/) checkout
+or update using the `tla' client. It takes the following arguments:
+
+`url'
+ (required): this specifies the URL at which the Arch source
+ archive is available.
+
+`version'
+ (required): this specifies which "development line" (like a
+ branch) should be used. This provides the default branch name,
+ but individual builds may specify a different one.
+
+`archive'
+ (optional): Each repository knows its own archive name. If this
+ parameter is provided, it must match the repository's archive
+ name. The parameter is accepted for compatibility with the
+ `Bazaar' step, below.
+
+
+
+File: buildbot.info, Node: Bazaar, Next: Bzr, Prev: Arch, Up: Source Checkout
+
+6.1.3.6 Bazaar
+..............
+
+`Bazaar' is an alternate implementation of the Arch VC system, which
+uses a client named `baz'. The checkout semantics are just different
+enough from `tla' that there is a separate BuildStep for it.
+
+ It takes exactly the same arguments as `Arch', except that the
+`archive=' parameter is required. (baz does not emit the archive name
+when you do `baz register-archive', so we must provide it ourselves).
+
+
+File: buildbot.info, Node: Bzr, Next: P4, Prev: Bazaar, Up: Source Checkout
+
+6.1.3.7 Bzr
+...........
+
+`bzr' is a descendant of Arch/Baz, and is frequently referred to as
+simply "Bazaar". The repository-vs-workspace model is similar to
+Darcs, but it uses a strictly linear sequence of revisions (one
+history per branch) like Arch. Branches are put in subdirectories.
+This makes it look very much like Mercurial, so it takes the same
+arguments:
+
+`repourl'
+ (required unless `baseURL' is provided): the URL at which the
+ Bzr source repository is available.
+
+`baseURL'
+ (required unless `repourl' is provided): the base repository URL,
+ to which a branch name will be appended. It should probably end
+ in a slash.
+
+`defaultBranch'
+ (allowed if and only if `baseURL' is provided): this specifies
+ the name of the branch to use when a Build does not provide one
+ of its own. This will be appended to `baseURL' to create the
+ string that will be passed to the `bzr checkout' command.
+
+
+File: buildbot.info, Node: P4, Next: Git, Prev: Bzr, Up: Source Checkout
+
+6.1.3.8 P4
+..........
+
+The `P4' build step creates a Perforce (http://www.perforce.com/)
+client specification and performs an update.
+
+`p4base'
+ A view into the Perforce depot without branch name or trailing
+ "...". Typically "//depot/proj/".
+
+`defaultBranch'
+ A branch name to append on build requests if none is specified.
+ Typically "trunk".
+
+`p4port'
+ (optional): the host:port string describing how to get to the P4
+ Depot (repository), used as the -p argument for all p4 commands.
+
+`p4user'
+ (optional): the Perforce user, used as the -u argument to all p4
+ commands.
+
+`p4passwd'
+ (optional): the Perforce password, used as the -p argument to
+ all p4 commands.
+
+`p4extra_views'
+ (optional): a list of (depotpath, clientpath) tuples containing
+ extra views to be mapped into the client specification. Both
+ will have "/..." appended automatically. The client name and
+ source directory will be prepended to the client path.
+
+`p4client'
+ (optional): The name of the client to use. In mode='copy' and
+ mode='update', it's particularly important that a unique name is
+ used for each checkout directory to avoid incorrect
+ synchronization. For this reason, Python percent substitution
+ will be performed on this value to replace %(slave)s with the
+ slave name and %(builder)s with the builder name. The default is
+ "buildbot_%(slave)s_%(build)s".
+
+
+File: buildbot.info, Node: Git, Prev: P4, Up: Source Checkout
+
+6.1.3.9 Git
+...........
+
+The `Git' build step clones or updates a Git (http://git.or.cz/)
+repository and checks out the specified branch or revision. Note that
+the buildbot supports Git version 1.2.0 and later: earlier versions
+(such as the one shipped in Ubuntu 'Dapper') do not support the `git
+init' command that the buildbot uses.
+
+ The Git step takes the following arguments:
+
+`repourl'
+ (required): the URL of the upstream Git repository.
+
+`branch'
+ (optional): this specifies the name of the branch to use when a
+ Build does not provide one of its own. If this this parameter is
+ not specified, and the Build does not provide a branch, the
+ "master" branch will be used.
+
+
+File: buildbot.info, Node: ShellCommand, Next: Simple ShellCommand Subclasses, Prev: Source Checkout, Up: Build Steps
+
+6.1.4 ShellCommand
+------------------
+
+This is a useful base class for just about everything you might want
+to do during a build (except for the initial source checkout). It runs
+a single command in a child shell on the buildslave. All stdout/stderr
+is recorded into a LogFile. The step finishes with a status of FAILURE
+if the command's exit code is non-zero, otherwise it has a status of
+SUCCESS.
+
+ The preferred way to specify the command is with a list of argv
+strings, since this allows for spaces in filenames and avoids doing
+any fragile shell-escaping. You can also specify the command with a
+single string, in which case the string is given to '/bin/sh -c
+COMMAND' for parsing.
+
+ On Windows, commands are run via `cmd.exe /c' which works well.
+However, if you're running a batch file, the error level does not get
+propagated correctly unless you add 'call' before your batch file's
+name: `cmd=['call', 'myfile.bat', ...]'.
+
+ All ShellCommands are run by default in the "workdir", which
+defaults to the "`build'" subdirectory of the slave builder's base
+directory. The absolute path of the workdir will thus be the slave's
+basedir (set as an option to `buildbot create-slave', *note Creating
+a buildslave::) plus the builder's basedir (set in the builder's
+`c['builddir']' key in master.cfg) plus the workdir itself (a
+class-level attribute of the BuildFactory, defaults to "`build'").
+
+ `ShellCommand' arguments:
+
+`command'
+ a list of strings (preferred) or single string (discouraged)
+ which specifies the command to be run. A list of strings is
+ preferred because it can be used directly as an argv array.
+ Using a single string (with embedded spaces) requires the
+ buildslave to pass the string to /bin/sh for interpretation,
+ which raises all sorts of difficult questions about how to
+ escape or interpret shell metacharacters.
+
+`env'
+ a dictionary of environment strings which will be added to the
+ child command's environment. For example, to run tests with a
+ different i18n language setting, you might use
+
+ f.addStep(ShellCommand(command=["make", "test"],
+ env={'LANG': 'fr_FR'}))
+
+ These variable settings will override any existing ones in the
+ buildslave's environment or the environment specified in the
+ Builder. The exception is PYTHONPATH, which is merged with
+ (actually prepended to) any existing $PYTHONPATH setting. The
+ value is treated as a list of directories to prepend, and a
+ single string is treated like a one-item list. For example, to
+ prepend both `/usr/local/lib/python2.3' and
+ `/home/buildbot/lib/python' to any existing $PYTHONPATH setting,
+ you would do something like the following:
+
+ f.addStep(ShellCommand(
+ command=["make", "test"],
+ env={'PYTHONPATH': ["/usr/local/lib/python2.3",
+ "/home/buildbot/lib/python"] }))
+
+`want_stdout'
+ if False, stdout from the child process is discarded rather than
+ being sent to the buildmaster for inclusion in the step's
+ LogFile.
+
+`want_stderr'
+ like `want_stdout' but for stderr. Note that commands run through
+ a PTY do not have separate stdout/stderr streams: both are
+ merged into stdout.
+
+`usePTY'
+ Should this command be run in a `pty'? The default is to
+ observe the configuration of the client (*note Buildslave
+ Options::), but specifying `True' or `False' here will override
+ the default.
+
+ The advantage of using a PTY is that "grandchild" processes are
+ more likely to be cleaned up if the build is interrupted or
+ times out (since it enables the use of a "process group" in
+ which all child processes will be placed). The disadvantages:
+ some forms of Unix have problems with PTYs, some of your unit
+ tests may behave differently when run under a PTY (generally
+ those which check to see if they are being run interactively),
+ and PTYs will merge the stdout and stderr streams into a single
+ output stream (which means the red-vs-black coloring in the
+ logfiles will be lost).
+
+`logfiles'
+ Sometimes commands will log interesting data to a local file,
+ rather than emitting everything to stdout or stderr. For
+ example, Twisted's "trial" command (which runs unit tests) only
+ presents summary information to stdout, and puts the rest into a
+ file named `_trial_temp/test.log'. It is often useful to watch
+ these files as the command runs, rather than using `/bin/cat' to
+ dump their contents afterwards.
+
+ The `logfiles=' argument allows you to collect data from these
+ secondary logfiles in near-real-time, as the step is running. It
+ accepts a dictionary which maps from a local Log name (which is
+ how the log data is presented in the build results) to a remote
+ filename (interpreted relative to the build's working
+ directory). Each named file will be polled on a regular basis
+ (every couple of seconds) as the build runs, and any new text
+ will be sent over to the buildmaster.
+
+ f.addStep(ShellCommand(
+ command=["make", "test"],
+ logfiles={"triallog": "_trial_temp/test.log"}))
+
+`timeout'
+ if the command fails to produce any output for this many
+ seconds, it is assumed to be locked up and will be killed.
+
+`description'
+ This will be used to describe the command (on the Waterfall
+ display) while the command is still running. It should be a
+ single imperfect-tense verb, like "compiling" or "testing". The
+ preferred form is a list of short strings, which allows the HTML
+ Waterfall display to create narrower columns by emitting a <br>
+ tag between each word. You may also provide a single string.
+
+`descriptionDone'
+ This will be used to describe the command once it has finished. A
+ simple noun like "compile" or "tests" should be used. Like
+ `description', this may either be a list of short strings or a
+ single string.
+
+ If neither `description' nor `descriptionDone' are set, the
+ actual command arguments will be used to construct the
+ description. This may be a bit too wide to fit comfortably on
+ the Waterfall display.
+
+ f.addStep(ShellCommand(command=["make", "test"],
+ description=["testing"],
+ descriptionDone=["tests"]))
+
+`logEnviron'
+ If this option is true (the default), then the step's logfile
+ will describe the environment variables on the slave. In
+ situations where the environment is not relevant and is long, it
+ may be easier to set `logEnviron=False'.
+
+
+
+File: buildbot.info, Node: Simple ShellCommand Subclasses, Next: Python BuildSteps, Prev: ShellCommand, Up: Build Steps
+
+6.1.5 Simple ShellCommand Subclasses
+------------------------------------
+
+Several subclasses of ShellCommand are provided as starting points for
+common build steps. These are all very simple: they just override a
+few parameters so you don't have to specify them yourself, making the
+master.cfg file less verbose.
+
+* Menu:
+
+* Configure::
+* Compile::
+* Test::
+* TreeSize::
+* PerlModuleTest::
+* SetProperty::
+
+
+File: buildbot.info, Node: Configure, Next: Compile, Prev: Simple ShellCommand Subclasses, Up: Simple ShellCommand Subclasses
+
+6.1.5.1 Configure
+.................
+
+This is intended to handle the `./configure' step from autoconf-style
+projects, or the `perl Makefile.PL' step from perl MakeMaker.pm-style
+modules. The default command is `./configure' but you can change this
+by providing a `command=' parameter.
+
+
+File: buildbot.info, Node: Compile, Next: Test, Prev: Configure, Up: Simple ShellCommand Subclasses
+
+6.1.5.2 Compile
+...............
+
+This is meant to handle compiling or building a project written in C.
+The default command is `make all'. When the compile is finished, the
+log file is scanned for GCC warning messages, a summary log is
+created with any problems that were seen, and the step is marked as
+WARNINGS if any were discovered. The number of warnings is stored in a
+Build Property named "warnings-count", which is accumulated over all
+Compile steps (so if two warnings are found in one step, and three are
+found in another step, the overall build will have a "warnings-count"
+property of 5.
+
+ The default regular expression used to detect a warning is
+`'.*warning[: ].*'' , which is fairly liberal and may cause
+false-positives. To use a different regexp, provide a
+`warningPattern=' argument, or use a subclass which sets the
+`warningPattern' attribute:
+
+ f.addStep(Compile(command=["make", "test"],
+ warningPattern="^Warning: "))
+
+ The `warningPattern=' can also be a pre-compiled python regexp
+object: this makes it possible to add flags like `re.I' (to use
+case-insensitive matching).
+
+ (TODO: this step needs to be extended to look for GCC error
+messages as well, and collect them into a separate logfile, along
+with the source code filenames involved).
+
+
+File: buildbot.info, Node: Test, Next: TreeSize, Prev: Compile, Up: Simple ShellCommand Subclasses
+
+6.1.5.3 Test
+............
+
+This is meant to handle unit tests. The default command is `make
+test', and the `warnOnFailure' flag is set.
+
+
+File: buildbot.info, Node: TreeSize, Next: PerlModuleTest, Prev: Test, Up: Simple ShellCommand Subclasses
+
+6.1.5.4 TreeSize
+................
+
+This is a simple command that uses the 'du' tool to measure the size
+of the code tree. It puts the size (as a count of 1024-byte blocks,
+aka 'KiB' or 'kibibytes') on the step's status text, and sets a build
+property named 'tree-size-KiB' with the same value.
+
+
+File: buildbot.info, Node: PerlModuleTest, Next: SetProperty, Prev: TreeSize, Up: Simple ShellCommand Subclasses
+
+6.1.5.5 PerlModuleTest
+......................
+
+This is a simple command that knows how to run tests of perl modules.
+It parses the output to determine the number of tests passed and
+failed and total number executed, saving the results for later query.
+
+
+File: buildbot.info, Node: SetProperty, Prev: PerlModuleTest, Up: Simple ShellCommand Subclasses
+
+6.1.5.6 SetProperty
+...................
+
+This buildstep is similar to ShellCommand, except that it captures the
+output of the command into a property. It is usually used like this:
+
+ f.addStep(SetProperty(command="uname -a", property="uname"))
+
+ This runs `uname -a' and captures its stdout, stripped of leading
+and trailing whitespace, in the property "uname". To avoid stripping,
+add `strip=False'. The `property' argument can be specified as a
+`WithProperties' object.
+
+ The more advanced usage allows you to specify a function to extract
+properties from the command output. Here you can use regular
+expressions, string interpolation, or whatever you would like. The
+function is called with three arguments: the exit status of the
+command, its standard output as a string, and its standard error as a
+string. It should return a dictionary containing all new properties.
+
+ def glob2list(rc, stdout, stderr):
+ jpgs = [ l.strip() for l in stdout.split('\n') ]
+ return { 'jpgs' : jpgs }
+ f.addStep(SetProperty(command="ls -1 *.jpg", extract_fn=glob2list))
+
+ Note that any ordering relationship of the contents of stdout and
+stderr is lost. For example, given
+
+ f.addStep(SetProperty(
+ command="echo output1; echo error >&2; echo output2",
+ extract_fn=my_extract))
+
+ Then `my_extract' will see `stdout="output1\noutput2\n"' and
+`stderr="error\n"'.
+
+
+File: buildbot.info, Node: Python BuildSteps, Next: Transferring Files, Prev: Simple ShellCommand Subclasses, Up: Build Steps
+
+6.1.6 Python BuildSteps
+-----------------------
+
+Here are some BuildSteps that are specifcally useful for projects
+implemented in Python.
+
+* Menu:
+
+* BuildEPYDoc::
+* PyFlakes::
+* PyLint::
+
+
+File: buildbot.info, Node: BuildEPYDoc, Next: PyFlakes, Up: Python BuildSteps
+
+6.1.6.1 BuildEPYDoc
+...................
+
+epydoc (http://epydoc.sourceforge.net/) is a tool for generating API
+documentation for Python modules from their docstrings. It reads all
+the .py files from your source tree, processes the docstrings
+therein, and creates a large tree of .html files (or a single .pdf
+file).
+
+ The `buildbot.steps.python.BuildEPYDoc' step will run `epydoc' to
+produce this API documentation, and will count the errors and
+warnings from its output.
+
+ You must supply the command line to be used. The default is `make
+epydocs', which assumes that your project has a Makefile with an
+"epydocs" target. You might wish to use something like `epydoc -o
+apiref source/PKGNAME' instead. You might also want to add `--pdf' to
+generate a PDF file instead of a large tree of HTML files.
+
+ The API docs are generated in-place in the build tree (under the
+workdir, in the subdirectory controlled by the "-o" argument). To
+make them useful, you will probably have to copy them to somewhere
+they can be read. A command like `rsync -ad apiref/
+dev.example.com:~public_html/current-apiref/' might be useful. You
+might instead want to bundle them into a tarball and publish it in the
+same place where the generated install tarball is placed.
+
+ from buildbot.steps.python import BuildEPYDoc
+
+ ...
+ f.addStep(BuildEPYDoc(command=["epydoc", "-o", "apiref", "source/mypkg"]))
+
+
+File: buildbot.info, Node: PyFlakes, Next: PyLint, Prev: BuildEPYDoc, Up: Python BuildSteps
+
+6.1.6.2 PyFlakes
+................
+
+PyFlakes (http://divmod.org/trac/wiki/DivmodPyflakes) is a tool to
+perform basic static analysis of Python code to look for simple
+errors, like missing imports and references of undefined names. It is
+like a fast and simple form of the C "lint" program. Other tools
+(like pychecker) provide more detailed results but take longer to run.
+
+ The `buildbot.steps.python.PyFlakes' step will run pyflakes and
+count the various kinds of errors and warnings it detects.
+
+ You must supply the command line to be used. The default is `make
+pyflakes', which assumes you have a top-level Makefile with a
+"pyflakes" target. You might want to use something like `pyflakes .'
+or `pyflakes src'.
+
+ from buildbot.steps.python import PyFlakes
+
+ ...
+ f.addStep(PyFlakes(command=["pyflakes", "src"]))
+
+
+File: buildbot.info, Node: PyLint, Prev: PyFlakes, Up: Python BuildSteps
+
+6.1.6.3 PyLint
+..............
+
+Similarly, the `buildbot.steps.python.PyLint' step will run pylint and
+analyze the results.
+
+ You must supply the command line to be used. There is no default.
+
+ from buildbot.steps.python import PyLint
+
+ ...
+ f.addStep(PyLint(command=["pylint", "src"]))
+
+
+File: buildbot.info, Node: Transferring Files, Next: Steps That Run on the Master, Prev: Python BuildSteps, Up: Build Steps
+
+6.1.7 Transferring Files
+------------------------
+
+Most of the work involved in a build will take place on the
+buildslave. But occasionally it is useful to do some work on the
+buildmaster side. The most basic way to involve the buildmaster is
+simply to move a file from the slave to the master, or vice versa.
+There are a pair of BuildSteps named `FileUpload' and `FileDownload'
+to provide this functionality. `FileUpload' moves a file _up to_ the
+master, while `FileDownload' moves a file _down from_ the master.
+
+ As an example, let's assume that there is a step which produces an
+HTML file within the source tree that contains some sort of generated
+project documentation. We want to move this file to the buildmaster,
+into a `~/public_html' directory, so it can be visible to developers.
+This file will wind up in the slave-side working directory under the
+name `docs/reference.html'. We want to put it into the master-side
+`~/public_html/ref.html'.
+
+ from buildbot.steps.shell import ShellCommand
+ from buildbot.steps.transfer import FileUpload
+
+ f.addStep(ShellCommand(command=["make", "docs"]))
+ f.addStep(FileUpload(slavesrc="docs/reference.html",
+ masterdest="~/public_html/ref.html"))
+
+ The `masterdest=' argument will be passed to os.path.expanduser,
+so things like "~" will be expanded properly. Non-absolute paths will
+be interpreted relative to the buildmaster's base directory.
+Likewise, the `slavesrc=' argument will be expanded and interpreted
+relative to the builder's working directory.
+
+ To move a file from the master to the slave, use the
+`FileDownload' command. For example, let's assume that some step
+requires a configuration file that, for whatever reason, could not be
+recorded in the source code repository or generated on the buildslave
+side:
+
+ from buildbot.steps.shell import ShellCommand
+ from buildbot.steps.transfer import FileUpload
+
+ f.addStep(FileDownload(mastersrc="~/todays_build_config.txt",
+ slavedest="build_config.txt"))
+ f.addStep(ShellCommand(command=["make", "config"]))
+
+ Like `FileUpload', the `mastersrc=' argument is interpreted
+relative to the buildmaster's base directory, and the `slavedest='
+argument is relative to the builder's working directory. If the
+buildslave is running in `~buildslave', and the builder's "builddir"
+is something like `tests-i386', then the workdir is going to be
+`~buildslave/tests-i386/build', and a `slavedest=' of `foo/bar.html'
+will get put in `~buildslave/tests-i386/build/foo/bar.html'. Both of
+these commands will create any missing intervening directories.
+
+Other Parameters
+----------------
+
+The `maxsize=' argument lets you set a maximum size for the file to
+be transferred. This may help to avoid surprises: transferring a
+100MB coredump when you were expecting to move a 10kB status file
+might take an awfully long time. The `blocksize=' argument controls
+how the file is sent over the network: larger blocksizes are slightly
+more efficient but also consume more memory on each end, and there is
+a hard-coded limit of about 640kB.
+
+ The `mode=' argument allows you to control the access permissions
+of the target file, traditionally expressed as an octal integer. The
+most common value is probably 0755, which sets the "x" executable bit
+on the file (useful for shell scripts and the like). The default
+value for `mode=' is None, which means the permission bits will
+default to whatever the umask of the writing process is. The default
+umask tends to be fairly restrictive, but at least on the buildslave
+you can make it less restrictive with a -umask command-line option at
+creation time (*note Buildslave Options::).
+
+Transfering Directories
+-----------------------
+
+To transfer complete directories from the buildslave to the master,
+there is a BuildStep named `DirectoryUpload'. It works like
+`FileUpload', just for directories. However it does not support the
+`maxsize', `blocksize' and `mode' arguments. As an example, let's
+assume an generated project documentation, which consists of many
+files (like the output of doxygen or epydoc). We want to move the
+entire documentation to the buildmaster, into a `~/public_html/docs'
+directory. On the slave-side the directory can be found under `docs':
+
+ from buildbot.steps.shell import ShellCommand
+ from buildbot.steps.transfer import DirectoryUpload
+
+ f.addStep(ShellCommand(command=["make", "docs"]))
+ f.addStep(DirectoryUpload(slavesrc="docs",
+ masterdest="~/public_html/docs"))
+
+ The DirectoryUpload step will create all necessary directories and
+transfers empty directories, too.
+
+
+File: buildbot.info, Node: Steps That Run on the Master, Next: Triggering Schedulers, Prev: Transferring Files, Up: Build Steps
+
+6.1.8 Steps That Run on the Master
+----------------------------------
+
+Occasionally, it is useful to execute some task on the master, for
+example to create a directory, deploy a build result, or trigger some
+other centralized processing. This is possible, in a limited
+fashion, with the `MasterShellCommand' step.
+
+ This step operates similarly to a regular `ShellCommand', but
+executes on the master, instead of the slave. To be clear, the
+enclosing `Build' object must still have a slave object, just as for
+any other step - only, in this step, the slave does not do anything.
+
+ In this example, the step renames a tarball based on the day of
+the week.
+
+ from buildbot.steps.transfer import FileUpload
+ from buildbot.steps.master import MasterShellCommand
+
+ f.addStep(FileUpload(slavesrc="widgetsoft.tar.gz",
+ masterdest="/var/buildoutputs/widgetsoft-new.tar.gz"))
+ f.addStep(MasterShellCommand(command="""
+ cd /var/buildoutputs;
+ mv widgetsoft-new.tar.gz widgetsoft-`date +%a`.tar.gz"""))
+
+
+File: buildbot.info, Node: Triggering Schedulers, Next: Writing New BuildSteps, Prev: Steps That Run on the Master, Up: Build Steps
+
+6.1.9 Triggering Schedulers
+---------------------------
+
+The counterpart to the Triggerable described in section *note
+Triggerable Scheduler:: is the Trigger BuildStep.
+
+ from buildbot.steps.trigger import Trigger
+ f.addStep(Trigger(schedulerNames=['build-prep'],
+ waitForFinish=True,
+ updateSourceStamp=True))
+
+ The `schedulerNames=' argument lists the Triggerables that should
+be triggered when this step is executed. Note that it is possible,
+but not advisable, to create a cycle where a build continually
+triggers itself, because the schedulers are specified by name.
+
+ If `waitForFinish' is True, then the step will not finish until
+all of the builds from the triggered schedulers have finished. If this
+argument is False (the default) or not given, then the buildstep
+succeeds immediately after triggering the schedulers.
+
+ If `updateSourceStamp' is True (the default), then step updates
+the SourceStamp given to the Triggerables to include `got_revision'
+(the revision actually used in this build) as `revision' (the
+revision to use in the triggered builds). This is useful to ensure
+that all of the builds use exactly the same SourceStamp, even if
+other Changes have occurred while the build was running.
+
+
+File: buildbot.info, Node: Writing New BuildSteps, Prev: Triggering Schedulers, Up: Build Steps
+
+6.1.10 Writing New BuildSteps
+-----------------------------
+
+While it is a good idea to keep your build process self-contained in
+the source code tree, sometimes it is convenient to put more
+intelligence into your Buildbot configuration. One way to do this is
+to write a custom BuildStep. Once written, this Step can be used in
+the `master.cfg' file.
+
+ The best reason for writing a custom BuildStep is to better parse
+the results of the command being run. For example, a BuildStep that
+knows about JUnit could look at the logfiles to determine which tests
+had been run, how many passed and how many failed, and then report
+more detailed information than a simple `rc==0' -based "good/bad"
+decision.
+
+* Menu:
+
+* Writing BuildStep Constructors::
+* BuildStep LogFiles::
+* Reading Logfiles::
+* Adding LogObservers::
+* BuildStep URLs::
+
+
+File: buildbot.info, Node: Writing BuildStep Constructors, Next: BuildStep LogFiles, Up: Writing New BuildSteps
+
+6.1.10.1 Writing BuildStep Constructors
+.......................................
+
+BuildStep classes have some extra equipment, because they are their
+own factories. Consider the use of a BuildStep in `master.cfg':
+
+ f.addStep(MyStep(someopt="stuff", anotheropt=1))
+
+ This creates a single instance of class `MyStep'. However,
+Buildbot needs a new object each time the step is executed. this is
+accomplished by storing the information required to instantiate a new
+object in the `factory' attribute. When the time comes to construct
+a new Build, BuildFactory consults this attribute (via
+`getStepFactory') and instantiates a new step object.
+
+ When writing a new step class, then, keep in mind are that you
+cannot do anything "interesting" in the constructor - limit yourself
+to checking and storing arguments. To ensure that these arguments
+are provided to any new objects, call `self.addFactoryArguments' with
+any keyword arguments your constructor needs.
+
+ Keep a `**kwargs' argument on the end of your options, and pass
+that up to the parent class's constructor.
+
+ The whole thing looks like this:
+
+ class Frobinfy(LoggingBuildStep):
+ def __init__(self,
+ frob_what="frobee",
+ frob_how_many=None,
+ frob_how=None,
+ **kwargs)
+
+ # check
+ if frob_how_many is None:
+ raise TypeError("Frobinfy argument how_many is required")
+
+ # call parent
+ LoggingBuildStep.__init__(self, **kwargs)
+
+ # and record arguments for later
+ self.addFactoryArguments(
+ frob_what=frob_what,
+ frob_how_many=frob_how_many,
+ frob_how=frob_how)
+
+ class FastFrobnify(Frobnify):
+ def __init__(self,
+ speed=5,
+ **kwargs)
+ Frobnify.__init__(self, **kwargs)
+ self.addFactoryArguments(
+ speed=speed)
+
+
+File: buildbot.info, Node: BuildStep LogFiles, Next: Reading Logfiles, Prev: Writing BuildStep Constructors, Up: Writing New BuildSteps
+
+6.1.10.2 BuildStep LogFiles
+...........................
+
+Each BuildStep has a collection of "logfiles". Each one has a short
+name, like "stdio" or "warnings". Each LogFile contains an arbitrary
+amount of text, usually the contents of some output file generated
+during a build or test step, or a record of everything that was
+printed to stdout/stderr during the execution of some command.
+
+ These LogFiles are stored to disk, so they can be retrieved later.
+
+ Each can contain multiple "channels", generally limited to three
+basic ones: stdout, stderr, and "headers". For example, when a
+ShellCommand runs, it writes a few lines to the "headers" channel to
+indicate the exact argv strings being run, which directory the command
+is being executed in, and the contents of the current environment
+variables. Then, as the command runs, it adds a lot of "stdout" and
+"stderr" messages. When the command finishes, a final "header" line
+is added with the exit code of the process.
+
+ Status display plugins can format these different channels in
+different ways. For example, the web page shows LogFiles as text/html,
+with header lines in blue text, stdout in black, and stderr in red. A
+different URL is available which provides a text/plain format, in
+which stdout and stderr are collapsed together, and header lines are
+stripped completely. This latter option makes it easy to save the
+results to a file and run `grep' or whatever against the output.
+
+ Each BuildStep contains a mapping (implemented in a python
+dictionary) from LogFile name to the actual LogFile objects. Status
+plugins can get a list of LogFiles to display, for example, a list of
+HREF links that, when clicked, provide the full contents of the
+LogFile.
+
+Using LogFiles in custom BuildSteps
+===================================
+
+The most common way for a custom BuildStep to use a LogFile is to
+summarize the results of a ShellCommand (after the command has
+finished running). For example, a compile step with thousands of lines
+of output might want to create a summary of just the warning messages.
+If you were doing this from a shell, you would use something like:
+
+ grep "warning:" output.log >warnings.log
+
+ In a custom BuildStep, you could instead create a "warnings"
+LogFile that contained the same text. To do this, you would add code
+to your `createSummary' method that pulls lines from the main output
+log and creates a new LogFile with the results:
+
+ def createSummary(self, log):
+ warnings = []
+ for line in log.readlines():
+ if "warning:" in line:
+ warnings.append()
+ self.addCompleteLog('warnings', "".join(warnings))
+
+ This example uses the `addCompleteLog' method, which creates a new
+LogFile, puts some text in it, and then "closes" it, meaning that no
+further contents will be added. This LogFile will appear in the HTML
+display under an HREF with the name "warnings", since that is the
+name of the LogFile.
+
+ You can also use `addHTMLLog' to create a complete (closed)
+LogFile that contains HTML instead of plain text. The normal LogFile
+will be HTML-escaped if presented through a web page, but the HTML
+LogFile will not. At the moment this is only used to present a pretty
+HTML representation of an otherwise ugly exception traceback when
+something goes badly wrong during the BuildStep.
+
+ In contrast, you might want to create a new LogFile at the
+beginning of the step, and add text to it as the command runs. You
+can create the LogFile and attach it to the build by calling
+`addLog', which returns the LogFile object. You then add text to this
+LogFile by calling methods like `addStdout' and `addHeader'. When you
+are done, you must call the `finish' method so the LogFile can be
+closed. It may be useful to create and populate a LogFile like this
+from a LogObserver method *Note Adding LogObservers::.
+
+ The `logfiles=' argument to `ShellCommand' (see *note
+ShellCommand::) creates new LogFiles and fills them in realtime by
+asking the buildslave to watch a actual file on disk. The buildslave
+will look for additions in the target file and report them back to
+the BuildStep. These additions will be added to the LogFile by
+calling `addStdout'. These secondary LogFiles can be used as the
+source of a LogObserver just like the normal "stdio" LogFile.
+
+
+File: buildbot.info, Node: Reading Logfiles, Next: Adding LogObservers, Prev: BuildStep LogFiles, Up: Writing New BuildSteps
+
+6.1.10.3 Reading Logfiles
+.........................
+
+Once a LogFile has been added to a BuildStep with `addLog()',
+`addCompleteLog()', `addHTMLLog()', or `logfiles=', your BuildStep
+can retrieve it by using `getLog()':
+
+ class MyBuildStep(ShellCommand):
+ logfiles = { "nodelog": "_test/node.log" }
+
+ def evaluateCommand(self, cmd):
+ nodelog = self.getLog("nodelog")
+ if "STARTED" in nodelog.getText():
+ return SUCCESS
+ else:
+ return FAILURE
+
+ For a complete list of the methods you can call on a LogFile,
+please see the docstrings on the `IStatusLog' class in
+`buildbot/interfaces.py'.
+
+
+File: buildbot.info, Node: Adding LogObservers, Next: BuildStep URLs, Prev: Reading Logfiles, Up: Writing New BuildSteps
+
+6.1.10.4 Adding LogObservers
+............................
+
+Most shell commands emit messages to stdout or stderr as they operate,
+especially if you ask them nicely with a `--verbose' flag of some
+sort. They may also write text to a log file while they run. Your
+BuildStep can watch this output as it arrives, to keep track of how
+much progress the command has made. You can get a better measure of
+progress by counting the number of source files compiled or test cases
+run than by merely tracking the number of bytes that have been written
+to stdout. This improves the accuracy and the smoothness of the ETA
+display.
+
+ To accomplish this, you will need to attach a `LogObserver' to one
+of the log channels, most commonly to the "stdio" channel but perhaps
+to another one which tracks a log file. This observer is given all
+text as it is emitted from the command, and has the opportunity to
+parse that output incrementally. Once the observer has decided that
+some event has occurred (like a source file being compiled), it can
+use the `setProgress' method to tell the BuildStep about the progress
+that this event represents.
+
+ There are a number of pre-built `LogObserver' classes that you can
+choose from (defined in `buildbot.process.buildstep', and of course
+you can subclass them to add further customization. The
+`LogLineObserver' class handles the grunt work of buffering and
+scanning for end-of-line delimiters, allowing your parser to operate
+on complete stdout/stderr lines. (Lines longer than a set maximum
+length are dropped; the maximum defaults to 16384 bytes, but you can
+change it by calling `setMaxLineLength()' on your `LogLineObserver'
+instance. Use `sys.maxint' for effective infinity.)
+
+ For example, let's take a look at the `TrialTestCaseCounter',
+which is used by the Trial step to count test cases as they are run.
+As Trial executes, it emits lines like the following:
+
+ buildbot.test.test_config.ConfigTest.testDebugPassword ... [OK]
+ buildbot.test.test_config.ConfigTest.testEmpty ... [OK]
+ buildbot.test.test_config.ConfigTest.testIRC ... [FAIL]
+ buildbot.test.test_config.ConfigTest.testLocks ... [OK]
+
+ When the tests are finished, trial emits a long line of "======"
+and then some lines which summarize the tests that failed. We want to
+avoid parsing these trailing lines, because their format is less
+well-defined than the "[OK]" lines.
+
+ The parser class looks like this:
+
+ from buildbot.process.buildstep import LogLineObserver
+
+ class TrialTestCaseCounter(LogLineObserver):
+ _line_re = re.compile(r'^([\w\.]+) \.\.\. \[([^\]]+)\]$')
+ numTests = 0
+ finished = False
+
+ def outLineReceived(self, line):
+ if self.finished:
+ return
+ if line.startswith("=" * 40):
+ self.finished = True
+ return
+
+ m = self._line_re.search(line.strip())
+ if m:
+ testname, result = m.groups()
+ self.numTests += 1
+ self.step.setProgress('tests', self.numTests)
+
+ This parser only pays attention to stdout, since that's where trial
+writes the progress lines. It has a mode flag named `finished' to
+ignore everything after the "====" marker, and a scary-looking
+regular expression to match each line while hopefully ignoring other
+messages that might get displayed as the test runs.
+
+ Each time it identifies a test has been completed, it increments
+its counter and delivers the new progress value to the step with
+`self.step.setProgress'. This class is specifically measuring
+progress along the "tests" metric, in units of test cases (as opposed
+to other kinds of progress like the "output" metric, which measures
+in units of bytes). The Progress-tracking code uses each progress
+metric separately to come up with an overall completion percentage
+and an ETA value.
+
+ To connect this parser into the `Trial' BuildStep,
+`Trial.__init__' ends with the following clause:
+
+ # this counter will feed Progress along the 'test cases' metric
+ counter = TrialTestCaseCounter()
+ self.addLogObserver('stdio', counter)
+ self.progressMetrics += ('tests',)
+
+ This creates a TrialTestCaseCounter and tells the step that the
+counter wants to watch the "stdio" log. The observer is automatically
+given a reference to the step in its `.step' attribute.
+
+A Somewhat Whimsical Example
+----------------------------
+
+Let's say that we've got some snazzy new unit-test framework called
+Framboozle. It's the hottest thing since sliced bread. It slices, it
+dices, it runs unit tests like there's no tomorrow. Plus if your unit
+tests fail, you can use its name for a Web 2.1 startup company, make
+millions of dollars, and hire engineers to fix the bugs for you, while
+you spend your afternoons lazily hang-gliding along a scenic pacific
+beach, blissfully unconcerned about the state of your tests.(1)
+
+ To run a Framboozle-enabled test suite, you just run the
+'framboozler' command from the top of your source code tree. The
+'framboozler' command emits a bunch of stuff to stdout, but the most
+interesting bit is that it emits the line "FNURRRGH!" every time it
+finishes running a test case(2). You'd like to have a test-case
+counting LogObserver that watches for these lines and counts them,
+because counting them will help the buildbot more accurately
+calculate how long the build will take, and this will let you know
+exactly how long you can sneak out of the office for your
+hang-gliding lessons without anyone noticing that you're gone.
+
+ This will involve writing a new BuildStep (probably named
+"Framboozle") which inherits from ShellCommand. The BuildStep class
+definition itself will look something like this:
+
+ # START
+ from buildbot.steps.shell import ShellCommand
+ from buildbot.process.buildstep import LogLineObserver
+
+ class FNURRRGHCounter(LogLineObserver):
+ numTests = 0
+ def outLineReceived(self, line):
+ if "FNURRRGH!" in line:
+ self.numTests += 1
+ self.step.setProgress('tests', self.numTests)
+
+ class Framboozle(ShellCommand):
+ command = ["framboozler"]
+
+ def __init__(self, **kwargs):
+ ShellCommand.__init__(self, **kwargs) # always upcall!
+ counter = FNURRRGHCounter())
+ self.addLogObserver('stdio', counter)
+ self.progressMetrics += ('tests',)
+ # FINISH
+
+ So that's the code that we want to wind up using. How do we
+actually deploy it?
+
+ You have a couple of different options.
+
+ Option 1: The simplest technique is to simply put this text
+(everything from START to FINISH) in your master.cfg file, somewhere
+before the BuildFactory definition where you actually use it in a
+clause like:
+
+ f = BuildFactory()
+ f.addStep(SVN(svnurl="stuff"))
+ f.addStep(Framboozle())
+
+ Remember that master.cfg is secretly just a python program with one
+job: populating the BuildmasterConfig dictionary. And python programs
+are allowed to define as many classes as they like. So you can define
+classes and use them in the same file, just as long as the class is
+defined before some other code tries to use it.
+
+ This is easy, and it keeps the point of definition very close to
+the point of use, and whoever replaces you after that unfortunate
+hang-gliding accident will appreciate being able to easily figure out
+what the heck this stupid "Framboozle" step is doing anyways. The
+downside is that every time you reload the config file, the Framboozle
+class will get redefined, which means that the buildmaster will think
+that you've reconfigured all the Builders that use it, even though
+nothing changed. Bleh.
+
+ Option 2: Instead, we can put this code in a separate file, and
+import it into the master.cfg file just like we would the normal
+buildsteps like ShellCommand and SVN.
+
+ Create a directory named ~/lib/python, put everything from START to
+FINISH in ~/lib/python/framboozle.py, and run your buildmaster using:
+
+ PYTHONPATH=~/lib/python buildbot start MASTERDIR
+
+ or use the `Makefile.buildbot' to control the way `buildbot start'
+works. Or add something like this to something like your ~/.bashrc or
+~/.bash_profile or ~/.cshrc:
+
+ export PYTHONPATH=~/lib/python
+
+ Once we've done this, our master.cfg can look like:
+
+ from framboozle import Framboozle
+ f = BuildFactory()
+ f.addStep(SVN(svnurl="stuff"))
+ f.addStep(Framboozle())
+
+ or:
+
+ import framboozle
+ f = BuildFactory()
+ f.addStep(SVN(svnurl="stuff"))
+ f.addStep(framboozle.Framboozle())
+
+ (check out the python docs for details about how "import" and
+"from A import B" work).
+
+ What we've done here is to tell python that every time it handles
+an "import" statement for some named module, it should look in our
+~/lib/python/ for that module before it looks anywhere else. After our
+directories, it will try in a bunch of standard directories too
+(including the one where buildbot is installed). By setting the
+PYTHONPATH environment variable, you can add directories to the front
+of this search list.
+
+ Python knows that once it "import"s a file, it doesn't need to
+re-import it again. This means that reconfiguring the buildmaster
+(with "buildbot reconfig", for example) won't make it think the
+Framboozle class has changed every time, so the Builders that use it
+will not be spuriously restarted. On the other hand, you either have
+to start your buildmaster in a slightly weird way, or you have to
+modify your environment to set the PYTHONPATH variable.
+
+ Option 3: Install this code into a standard python library
+directory
+
+ Find out what your python's standard include path is by asking it:
+
+ 80:warner@luther% python
+ Python 2.4.4c0 (#2, Oct 2 2006, 00:57:46)
+ [GCC 4.1.2 20060928 (prerelease) (Debian 4.1.1-15)] on linux2
+ Type "help", "copyright", "credits" or "license" for more information.
+ >>> import sys
+ >>> import pprint
+ >>> pprint.pprint(sys.path)
+ ['',
+ '/usr/lib/python24.zip',
+ '/usr/lib/python2.4',
+ '/usr/lib/python2.4/plat-linux2',
+ '/usr/lib/python2.4/lib-tk',
+ '/usr/lib/python2.4/lib-dynload',
+ '/usr/local/lib/python2.4/site-packages',
+ '/usr/lib/python2.4/site-packages',
+ '/usr/lib/python2.4/site-packages/Numeric',
+ '/var/lib/python-support/python2.4',
+ '/usr/lib/site-python']
+
+ In this case, putting the code into
+/usr/local/lib/python2.4/site-packages/framboozle.py would work just
+fine. We can use the same master.cfg "import framboozle" statement as
+in Option 2. By putting it in a standard include directory (instead of
+the decidedly non-standard ~/lib/python), we don't even have to set
+PYTHONPATH to anything special. The downside is that you probably have
+to be root to write to one of those standard include directories.
+
+ Option 4: Submit the code for inclusion in the Buildbot
+distribution
+
+ Make a fork of buildbot on http://github.com/djmitche/buildbot or
+post a patch in a bug at http://buildbot.net. In either case, post a
+note about your patch to the mailing list, so others can provide
+feedback and, eventually, commit it.
+
+ from buildbot.steps import framboozle
+ f = BuildFactory()
+ f.addStep(SVN(svnurl="stuff"))
+ f.addStep(framboozle.Framboozle())
+
+ And then you don't even have to install framboozle.py anywhere on
+your system, since it will ship with Buildbot. You don't have to be
+root, you don't have to set PYTHONPATH. But you do have to make a
+good case for Framboozle being worth going into the main
+distribution, you'll probably have to provide docs and some unit test
+cases, you'll need to figure out what kind of beer the author likes,
+and then you'll have to wait until the next release. But in some
+environments, all this is easier than getting root on your
+buildmaster box, so the tradeoffs may actually be worth it.
+
+ Putting the code in master.cfg (1) makes it available to that
+buildmaster instance. Putting it in a file in a personal library
+directory (2) makes it available for any buildmasters you might be
+running. Putting it in a file in a system-wide shared library
+directory (3) makes it available for any buildmasters that anyone on
+that system might be running. Getting it into the buildbot's upstream
+repository (4) makes it available for any buildmasters that anyone in
+the world might be running. It's all a matter of how widely you want
+to deploy that new class.
+
+ ---------- Footnotes ----------
+
+ (1) framboozle.com is still available. Remember, I get 10% :).
+
+ (2) Framboozle gets very excited about running unit tests.
+
+
+File: buildbot.info, Node: BuildStep URLs, Prev: Adding LogObservers, Up: Writing New BuildSteps
+
+6.1.10.5 BuildStep URLs
+.......................
+
+Each BuildStep has a collection of "links". Like its collection of
+LogFiles, each link has a name and a target URL. The web status page
+creates HREFs for each link in the same box as it does for LogFiles,
+except that the target of the link is the external URL instead of an
+internal link to a page that shows the contents of the LogFile.
+
+ These external links can be used to point at build information
+hosted on other servers. For example, the test process might produce
+an intricate description of which tests passed and failed, or some
+sort of code coverage data in HTML form, or a PNG or GIF image with a
+graph of memory usage over time. The external link can provide an
+easy way for users to navigate from the buildbot's status page to
+these external web sites or file servers. Note that the step itself is
+responsible for insuring that there will be a document available at
+the given URL (perhaps by using `scp' to copy the HTML output to a
+`~/public_html/' directory on a remote web server). Calling `addURL'
+does not magically populate a web server.
+
+ To set one of these links, the BuildStep should call the `addURL'
+method with the name of the link and the target URL. Multiple URLs can
+be set.
+
+ In this example, we assume that the `make test' command causes a
+collection of HTML files to be created and put somewhere on the
+coverage.example.org web server, in a filename that incorporates the
+build number.
+
+ class TestWithCodeCoverage(BuildStep):
+ command = ["make", "test",
+ WithProperties("buildnum=%s" % "buildnumber")]
+
+ def createSummary(self, log):
+ buildnumber = self.getProperty("buildnumber")
+ url = "http://coverage.example.org/builds/%s.html" % buildnumber
+ self.addURL("coverage", url)
+
+ You might also want to extract the URL from some special message
+output by the build process itself:
+
+ class TestWithCodeCoverage(BuildStep):
+ command = ["make", "test",
+ WithProperties("buildnum=%s" % "buildnumber")]
+
+ def createSummary(self, log):
+ output = StringIO(log.getText())
+ for line in output.readlines():
+ if line.startswith("coverage-url:"):
+ url = line[len("coverage-url:"):].strip()
+ self.addURL("coverage", url)
+ return
+
+ Note that a build process which emits both stdout and stderr might
+cause this line to be split or interleaved between other lines. It
+might be necessary to restrict the getText() call to only stdout with
+something like this:
+
+ output = StringIO("".join([c[1]
+ for c in log.getChunks()
+ if c[0] == LOG_CHANNEL_STDOUT]))
+
+ Of course if the build is run under a PTY, then stdout and stderr
+will be merged before the buildbot ever sees them, so such
+interleaving will be unavoidable.
+
+
+File: buildbot.info, Node: Interlocks, Next: Build Factories, Prev: Build Steps, Up: Build Process
+
+6.2 Interlocks
+==============
+
+Until now, we assumed that a master can run builds at any slave
+whenever needed or desired. Some times, you want to enforce
+additional constraints on builds. For reasons like limited network
+bandwidth, old slave machines, or a self-willed data base server, you
+may want to limit the number of builds (or build steps) that can
+access a resource.
+
+ The mechanism used by Buildbot is known as the read/write lock.(1)
+It allows either many readers or a single writer but not a
+combination of readers and writers. The general lock has been
+modified and extended for use in Buildbot. Firstly, the general lock
+allows an infinite number of readers. In Buildbot, we often want to
+put an upper limit on the number of readers, for example allowing two
+out of five possible builds at the same time. To do this, the lock
+counts the number of active readers. Secondly, the terms _read mode_
+and _write mode_ are confusing in Buildbot context. They have been
+replaced by _counting mode_ (since the lock counts them) and
+_exclusive mode_. As a result of these changes, locks in Buildbot
+allow a number of builds (upto some fixed number) in counting mode,
+or they allow one build in exclusive mode.
+
+ Often, not all slaves are equal. To allow for this situation,
+Buildbot allows to have a separate upper limit on the count for each
+slave. In this way, you can have at most 3 concurrent builds at a
+fast slave, 2 at a slightly older slave, and 1 at all other slaves.
+
+ The final thing you can specify when you introduce a new lock is
+its scope. Some constraints are global - they must be enforced over
+all slaves. Other constraints are local to each slave. A _master
+lock_ is used for the global constraints. You can ensure for example
+that at most one build (of all builds running at all slaves) accesses
+the data base server. With a _slave lock_ you can add a limit local
+to each slave. With such a lock, you can for example enforce an upper
+limit to the number of active builds at a slave, like above.
+
+ Time for a few examples. Below a master lock is defined to protect
+a data base, and a slave lock is created to limit the number of
+builds at each slave.
+
+ from buildbot import locks
+
+ db_lock = locks.MasterLock("database")
+ build_lock = locks.SlaveLock("slave_builds",
+ maxCount = 1,
+ maxCountForSlave = { 'fast': 3, 'new': 2 })
+
+ After importing locks from buildbot, `db_lock' is defined to be a
+master lock. The `"database"' string is used for uniquely identifying
+the lock. At the next line, a slave lock called `build_lock' is
+created. It is identified by the `"slave_builds"' string. Since the
+requirements of the lock are a bit more complicated, two optional
+arguments are also specified. The `maxCount' parameter sets the
+default limit for builds in counting mode to `1'. For the slave
+called `'fast'' however, we want to have at most three builds, and
+for the slave called `'new'' the upper limit is two builds running at
+the same time.
+
+ The next step is using the locks in builds. Buildbot allows a
+lock to be used during an entire build (from beginning to end), or
+only during a single build step. In the latter case, the lock is
+claimed for use just before the step starts, and released again when
+the step ends. To prevent deadlocks,(2) it is not possible to claim
+or release locks at other times.
+
+ To use locks, you should add them with a `locks' argument. Each
+use of a lock is either in counting mode (that is, possibly shared
+with other builds) or in exclusive mode. A build or build step
+proceeds only when it has acquired all locks. If a build or step
+needs a lot of locks, it may be starved(3) by other builds that need
+fewer locks.
+
+ To illustrate use of locks, a few examples.
+
+ from buildbot import locks
+ from buildbot.steps import source, shell
+ from buildbot.process import factory
+
+ db_lock = locks.MasterLock("database")
+ build_lock = locks.SlaveLock("slave_builds",
+ maxCount = 1,
+ maxCountForSlave = { 'fast': 3, 'new': 2 })
+
+ f = factory.BuildFactory()
+ f.addStep(source.SVN(svnurl="http://example.org/svn/Trunk"))
+ f.addStep(shell.ShellCommand(command="make all"))
+ f.addStep(shell.ShellCommand(command="make test",
+ locks=[db_lock.access('exclusive')]))
+
+ b1 = {'name': 'full1', 'slavename': 'fast', 'builddir': 'f1', 'factory': f,
+ 'locks': [build_lock.access('counting')] }
+
+ b2 = {'name': 'full2', 'slavename': 'new', 'builddir': 'f2', 'factory': f.
+ 'locks': [build_lock.access('counting')] }
+
+ b3 = {'name': 'full3', 'slavename': 'old', 'builddir': 'f3', 'factory': f.
+ 'locks': [build_lock.access('counting')] }
+
+ b4 = {'name': 'full4', 'slavename': 'other', 'builddir': 'f4', 'factory': f.
+ 'locks': [build_lock.access('counting')] }
+
+ c['builders'] = [b1, b2, b3, b4]
+
+ Here we have four slaves `b1', `b2', `b3', and `b4'. Each slave
+performs the same checkout, make, and test build step sequence. We
+want to enforce that at most one test step is executed between all
+slaves due to restrictions with the data base server. This is done by
+adding the `locks=' parameter with the third step. It takes a list of
+locks with their access mode. In this case only the `db_lock' is
+needed. The exclusive access mode is used to ensure there is at most
+one slave that executes the test step.
+
+ In addition to exclusive accessing the data base, we also want
+slaves to stay responsive even under the load of a large number of
+builds being triggered. For this purpose, the slave lock called
+`build_lock' is defined. Since the restraint holds for entire builds,
+the lock is specified in the builder with `'locks':
+[build_lock.access('counting')]'.
+
+ ---------- Footnotes ----------
+
+ (1) See http://en.wikipedia.org/wiki/Read/write_lock_pattern for
+more information.
+
+ (2) Deadlock is the situation where two or more slaves each hold a
+lock in exclusive mode, and in addition want to claim the lock held by
+the other slave exclusively as well. Since locks allow at most one
+exclusive user, both slaves will wait forever.
+
+ (3) Starving is the situation that only a few locks are available,
+and they are immediately grabbed by another build. As a result, it
+may take a long time before all locks needed by the starved build are
+free at the same time.
+
+
+File: buildbot.info, Node: Build Factories, Prev: Interlocks, Up: Build Process
+
+6.3 Build Factories
+===================
+
+Each Builder is equipped with a "build factory", which is responsible
+for producing the actual `Build' objects that perform each build.
+This factory is created in the configuration file, and attached to a
+Builder through the `factory' element of its dictionary.
+
+ The standard `BuildFactory' object creates `Build' objects by
+default. These Builds will each execute a collection of BuildSteps in
+a fixed sequence. Each step can affect the results of the build, but
+in general there is little intelligence to tie the different steps
+together. You can create subclasses of `Build' to implement more
+sophisticated build processes, and then use a subclass of
+`BuildFactory' (or simply set the `buildClass' attribute) to create
+instances of your new Build subclass.
+
+* Menu:
+
+* BuildStep Objects::
+* BuildFactory::
+* Process-Specific build factories::
+
+
+File: buildbot.info, Node: BuildStep Objects, Next: BuildFactory, Prev: Build Factories, Up: Build Factories
+
+6.3.1 BuildStep Objects
+-----------------------
+
+The steps used by these builds are all subclasses of `BuildStep'.
+The standard ones provided with Buildbot are documented later, *Note
+Build Steps::. You can also write your own subclasses to use in
+builds.
+
+ The basic behavior for a `BuildStep' is to:
+
+ * run for a while, then stop
+
+ * possibly invoke some RemoteCommands on the attached build slave
+
+ * possibly produce a set of log files
+
+ * finish with a status described by one of four values defined in
+ buildbot.status.builder: SUCCESS, WARNINGS, FAILURE, SKIPPED
+
+ * provide a list of short strings to describe the step
+
+ * define a color (generally green, orange, or red) with which the
+ step should be displayed
+
+ More sophisticated steps may produce additional information and
+provide it to later build steps, or store it in the factory to provide
+to later builds.
+
+* Menu:
+
+* BuildFactory Attributes::
+* Quick builds::
+
+
+File: buildbot.info, Node: BuildFactory, Next: Process-Specific build factories, Prev: BuildStep Objects, Up: Build Factories
+
+6.3.2 BuildFactory
+------------------
+
+The default `BuildFactory', provided in the
+`buildbot.process.factory' module, contains an internal list of
+"BuildStep specifications": a list of `(step_class, kwargs)' tuples
+for each. These specification tuples are constructed when the config
+file is read, by asking the instances passed to `addStep' for their
+subclass and arguments.
+
+ When asked to create a Build, the `BuildFactory' puts a copy of
+the list of step specifications into the new Build object. When the
+Build is actually started, these step specifications are used to
+create the actual set of BuildSteps, which are then executed one at a
+time. This serves to give each Build an independent copy of each step.
+For example, a build which consists of a CVS checkout followed by a
+`make build' would be constructed as follows:
+
+ from buildbot.steps import source, shell
+ from buildbot.process import factory
+
+ f = factory.BuildFactory()
+ f.addStep(source.CVS(cvsroot=CVSROOT, cvsmodule="project", mode="update"))
+ f.addStep(shell.Compile(command=["make", "build"]))
+
+ (To support config files from buildbot-0.7.5 and earlier,
+`addStep' also accepts the `f.addStep(shell.Compile,
+command=["make","build"])' form, although its use is discouraged
+because then the `Compile' step doesn't get to validate or complain
+about its arguments until build time. The modern pass-by-instance
+approach allows this validation to occur while the config file is
+being loaded, where the admin has a better chance of noticing
+problems).
+
+ It is also possible to pass a list of steps into the
+`BuildFactory' when it is created. Using `addStep' is usually
+simpler, but there are cases where is is more convenient to create
+the list of steps ahead of time.:
+
+ from buildbot.steps import source, shell
+ from buildbot.process import factory
+
+ all_steps = [source.CVS(cvsroot=CVSROOT, cvsmodule="project", mode="update"),
+ shell.Compile(command=["make", "build"]),
+ ]
+ f = factory.BuildFactory(all_steps)
+
+ Each step can affect the build process in the following ways:
+
+ * If the step's `haltOnFailure' attribute is True, then a failure
+ in the step (i.e. if it completes with a result of FAILURE) will
+ cause the whole build to be terminated immediately: no further
+ steps will be executed, with the exception of steps with
+ `alwaysRun' set to True. `haltOnFailure' is useful for setup
+ steps upon which the rest of the build depends: if the CVS
+ checkout or `./configure' process fails, there is no point in
+ trying to compile or test the resulting tree.
+
+ * If the step's `alwaysRun' attribute is True, then it will always
+ be run, regardless of if previous steps have failed. This is
+ useful for cleanup steps that should always be run to return the
+ build directory or build slave into a good state.
+
+ * If the `flunkOnFailure' or `flunkOnWarnings' flag is set, then a
+ result of FAILURE or WARNINGS will mark the build as a whole as
+ FAILED. However, the remaining steps will still be executed.
+ This is appropriate for things like multiple testing steps: a
+ failure in any one of them will indicate that the build has
+ failed, however it is still useful to run them all to completion.
+
+ * Similarly, if the `warnOnFailure' or `warnOnWarnings' flag is
+ set, then a result of FAILURE or WARNINGS will mark the build as
+ having WARNINGS, and the remaining steps will still be executed.
+ This may be appropriate for certain kinds of optional build or
+ test steps. For example, a failure experienced while building
+ documentation files should be made visible with a WARNINGS
+ result but not be serious enough to warrant marking the whole
+ build with a FAILURE.
+
+
+ In addition, each Step produces its own results, may create
+logfiles, etc. However only the flags described above have any effect
+on the build as a whole.
+
+ The pre-defined BuildSteps like `CVS' and `Compile' have
+reasonably appropriate flags set on them already. For example, without
+a source tree there is no point in continuing the build, so the `CVS'
+class has the `haltOnFailure' flag set to True. Look in
+`buildbot/steps/*.py' to see how the other Steps are marked.
+
+ Each Step is created with an additional `workdir' argument that
+indicates where its actions should take place. This is specified as a
+subdirectory of the slave builder's base directory, with a default
+value of `build'. This is only implemented as a step argument (as
+opposed to simply being a part of the base directory) because the
+CVS/SVN steps need to perform their checkouts from the parent
+directory.
+
+* Menu:
+
+* BuildFactory Attributes::
+* Quick builds::
+
+
+File: buildbot.info, Node: BuildFactory Attributes, Next: Quick builds, Prev: BuildFactory, Up: BuildFactory
+
+6.3.2.1 BuildFactory Attributes
+...............................
+
+Some attributes from the BuildFactory are copied into each Build.
+
+`useProgress'
+ (defaults to True): if True, the buildmaster keeps track of how
+ long each step takes, so it can provide estimates of how long
+ future builds will take. If builds are not expected to take a
+ consistent amount of time (such as incremental builds in which a
+ random set of files are recompiled or tested each time), this
+ should be set to False to inhibit progress-tracking.
+
+
+
+File: buildbot.info, Node: Quick builds, Prev: BuildFactory Attributes, Up: BuildFactory
+
+6.3.2.2 Quick builds
+....................
+
+The difference between a "full build" and a "quick build" is that
+quick builds are generally done incrementally, starting with the tree
+where the previous build was performed. That simply means that the
+source-checkout step should be given a `mode='update'' flag, to do
+the source update in-place.
+
+ In addition to that, the `useProgress' flag should be set to
+False. Incremental builds will (or at least the ought to) compile as
+few files as necessary, so they will take an unpredictable amount of
+time to run. Therefore it would be misleading to claim to predict how
+long the build will take.
+
+
+File: buildbot.info, Node: Process-Specific build factories, Prev: BuildFactory, Up: Build Factories
+
+6.3.3 Process-Specific build factories
+--------------------------------------
+
+Many projects use one of a few popular build frameworks to simplify
+the creation and maintenance of Makefiles or other compilation
+structures. Buildbot provides several pre-configured BuildFactory
+subclasses which let you build these projects with a minimum of fuss.
+
+* Menu:
+
+* GNUAutoconf::
+* CPAN::
+* Python distutils::
+* Python/Twisted/trial projects::
+
+
+File: buildbot.info, Node: GNUAutoconf, Next: CPAN, Prev: Process-Specific build factories, Up: Process-Specific build factories
+
+6.3.3.1 GNUAutoconf
+...................
+
+GNU Autoconf (http://www.gnu.org/software/autoconf/) is a software
+portability tool, intended to make it possible to write programs in C
+(and other languages) which will run on a variety of UNIX-like
+systems. Most GNU software is built using autoconf. It is frequently
+used in combination with GNU automake. These tools both encourage a
+build process which usually looks like this:
+
+ % CONFIG_ENV=foo ./configure --with-flags
+ % make all
+ % make check
+ # make install
+
+ (except of course the Buildbot always skips the `make install'
+part).
+
+ The Buildbot's `buildbot.process.factory.GNUAutoconf' factory is
+designed to build projects which use GNU autoconf and/or automake. The
+configuration environment variables, the configure flags, and command
+lines used for the compile and test are all configurable, in general
+the default values will be suitable.
+
+ Example:
+
+ # use the s() convenience function defined earlier
+ f = factory.GNUAutoconf(source=s(step.SVN, svnurl=URL, mode="copy"),
+ flags=["--disable-nls"])
+
+ Required Arguments:
+
+`source'
+ This argument must be a step specification tuple that provides a
+ BuildStep to generate the source tree.
+
+ Optional Arguments:
+
+`configure'
+ The command used to configure the tree. Defaults to
+ `./configure'. Accepts either a string or a list of shell argv
+ elements.
+
+`configureEnv'
+ The environment used for the initial configuration step. This
+ accepts a dictionary which will be merged into the buildslave's
+ normal environment. This is commonly used to provide things like
+ `CFLAGS="-O2 -g"' (to turn off debug symbols during the compile).
+ Defaults to an empty dictionary.
+
+`configureFlags'
+ A list of flags to be appended to the argument list of the
+ configure command. This is commonly used to enable or disable
+ specific features of the autoconf-controlled package, like
+ `["--without-x"]' to disable windowing support. Defaults to an
+ empty list.
+
+`compile'
+ this is a shell command or list of argv values which is used to
+ actually compile the tree. It defaults to `make all'. If set to
+ None, the compile step is skipped.
+
+`test'
+ this is a shell command or list of argv values which is used to
+ run the tree's self-tests. It defaults to `make check'. If set to
+ None, the test step is skipped.
+
+
+
+File: buildbot.info, Node: CPAN, Next: Python distutils, Prev: GNUAutoconf, Up: Process-Specific build factories
+
+6.3.3.2 CPAN
+............
+
+Most Perl modules available from the CPAN (http://www.cpan.org/)
+archive use the `MakeMaker' module to provide configuration, build,
+and test services. The standard build routine for these modules looks
+like:
+
+ % perl Makefile.PL
+ % make
+ % make test
+ # make install
+
+ (except again Buildbot skips the install step)
+
+ Buildbot provides a `CPAN' factory to compile and test these
+projects.
+
+ Arguments:
+`source'
+ (required): A step specification tuple, like that used by
+ GNUAutoconf.
+
+`perl'
+ A string which specifies the `perl' executable to use. Defaults
+ to just `perl'.
+
+
+
+File: buildbot.info, Node: Python distutils, Next: Python/Twisted/trial projects, Prev: CPAN, Up: Process-Specific build factories
+
+6.3.3.3 Python distutils
+........................
+
+Most Python modules use the `distutils' package to provide
+configuration and build services. The standard build process looks
+like:
+
+ % python ./setup.py build
+ % python ./setup.py install
+
+ Unfortunately, although Python provides a standard unit-test
+framework named `unittest', to the best of my knowledge `distutils'
+does not provide a standardized target to run such unit tests. (Please
+let me know if I'm wrong, and I will update this factory.)
+
+ The `Distutils' factory provides support for running the build
+part of this process. It accepts the same `source=' parameter as the
+other build factories.
+
+ Arguments:
+`source'
+ (required): A step specification tuple, like that used by
+ GNUAutoconf.
+
+`python'
+ A string which specifies the `python' executable to use. Defaults
+ to just `python'.
+
+`test'
+ Provides a shell command which runs unit tests. This accepts
+ either a string or a list. The default value is None, which
+ disables the test step (since there is no common default command
+ to run unit tests in distutils modules).
+
+
+
+File: buildbot.info, Node: Python/Twisted/trial projects, Prev: Python distutils, Up: Process-Specific build factories
+
+6.3.3.4 Python/Twisted/trial projects
+.....................................
+
+Twisted provides a unit test tool named `trial' which provides a few
+improvements over Python's built-in `unittest' module. Many python
+projects which use Twisted for their networking or application
+services also use trial for their unit tests. These modules are
+usually built and tested with something like the following:
+
+ % python ./setup.py build
+ % PYTHONPATH=build/lib.linux-i686-2.3 trial -v PROJECTNAME.test
+ % python ./setup.py install
+
+ Unfortunately, the `build/lib' directory into which the
+built/copied .py files are placed is actually architecture-dependent,
+and I do not yet know of a simple way to calculate its value. For many
+projects it is sufficient to import their libraries "in place" from
+the tree's base directory (`PYTHONPATH=.').
+
+ In addition, the PROJECTNAME value where the test files are
+located is project-dependent: it is usually just the project's
+top-level library directory, as common practice suggests the unit test
+files are put in the `test' sub-module. This value cannot be guessed,
+the `Trial' class must be told where to find the test files.
+
+ The `Trial' class provides support for building and testing
+projects which use distutils and trial. If the test module name is
+specified, trial will be invoked. The library path used for testing
+can also be set.
+
+ One advantage of trial is that the Buildbot happens to know how to
+parse trial output, letting it identify which tests passed and which
+ones failed. The Buildbot can then provide fine-grained reports about
+how many tests have failed, when individual tests fail when they had
+been passing previously, etc.
+
+ Another feature of trial is that you can give it a series of source
+.py files, and it will search them for special `test-case-name' tags
+that indicate which test cases provide coverage for that file. Trial
+can then run just the appropriate tests. This is useful for quick
+builds, where you want to only run the test cases that cover the
+changed functionality.
+
+ Arguments:
+`source'
+ (required): A step specification tuple, like that used by
+ GNUAutoconf.
+
+`buildpython'
+ A list (argv array) of strings which specifies the `python'
+ executable to use when building the package. Defaults to just
+ `['python']'. It may be useful to add flags here, to supress
+ warnings during compilation of extension modules. This list is
+ extended with `['./setup.py', 'build']' and then executed in a
+ ShellCommand.
+
+`testpath'
+ Provides a directory to add to `PYTHONPATH' when running the unit
+ tests, if tests are being run. Defaults to `.' to include the
+ project files in-place. The generated build library is frequently
+ architecture-dependent, but may simply be `build/lib' for
+ pure-python modules.
+
+`trialpython'
+ Another list of strings used to build the command that actually
+ runs trial. This is prepended to the contents of the `trial'
+ argument below. It may be useful to add `-W' flags here to
+ supress warnings that occur while tests are being run. Defaults
+ to an empty list, meaning `trial' will be run without an explicit
+ interpreter, which is generally what you want if you're using
+ `/usr/bin/trial' instead of, say, the `./bin/trial' that lives
+ in the Twisted source tree.
+
+`trial'
+ provides the name of the `trial' command. It is occasionally
+ useful to use an alternate executable, such as `trial2.2' which
+ might run the tests under an older version of Python. Defaults to
+ `trial'.
+
+`tests'
+ Provides a module name or names which contain the unit tests for
+ this project. Accepts a string, typically `PROJECTNAME.test', or
+ a list of strings. Defaults to None, indicating that no tests
+ should be run. You must either set this or `useTestCaseNames' to
+ do anyting useful with the Trial factory.
+
+`useTestCaseNames'
+ Tells the Step to provide the names of all changed .py files to
+ trial, so it can look for test-case-name tags and run just the
+ matching test cases. Suitable for use in quick builds. Defaults
+ to False.
+
+`randomly'
+ If `True', tells Trial (with the `--random=0' argument) to run
+ the test cases in random order, which sometimes catches subtle
+ inter-test dependency bugs. Defaults to `False'.
+
+`recurse'
+ If `True', tells Trial (with the `--recurse' argument) to look
+ in all subdirectories for additional test cases. It isn't clear
+ to me how this works, but it may be useful to deal with the
+ unknown-PROJECTNAME problem described above, and is currently
+ used in the Twisted buildbot to accomodate the fact that test
+ cases are now distributed through multiple
+ twisted.SUBPROJECT.test directories.
+
+
+ Unless one of `trialModule' or `useTestCaseNames' are set, no
+tests will be run.
+
+ Some quick examples follow. Most of these examples assume that the
+target python code (the "code under test") can be reached directly
+from the root of the target tree, rather than being in a `lib/'
+subdirectory.
+
+ # Trial(source, tests="toplevel.test") does:
+ # python ./setup.py build
+ # PYTHONPATH=. trial -to toplevel.test
+
+ # Trial(source, tests=["toplevel.test", "other.test"]) does:
+ # python ./setup.py build
+ # PYTHONPATH=. trial -to toplevel.test other.test
+
+ # Trial(source, useTestCaseNames=True) does:
+ # python ./setup.py build
+ # PYTHONPATH=. trial -to --testmodule=foo/bar.py.. (from Changes)
+
+ # Trial(source, buildpython=["python2.3", "-Wall"], tests="foo.tests"):
+ # python2.3 -Wall ./setup.py build
+ # PYTHONPATH=. trial -to foo.tests
+
+ # Trial(source, trialpython="python2.3", trial="/usr/bin/trial",
+ # tests="foo.tests") does:
+ # python2.3 -Wall ./setup.py build
+ # PYTHONPATH=. python2.3 /usr/bin/trial -to foo.tests
+
+ # For running trial out of the tree being tested (only useful when the
+ # tree being built is Twisted itself):
+ # Trial(source, trialpython=["python2.3", "-Wall"], trial="./bin/trial",
+ # tests="foo.tests") does:
+ # python2.3 -Wall ./setup.py build
+ # PYTHONPATH=. python2.3 -Wall ./bin/trial -to foo.tests
+
+ If the output directory of `./setup.py build' is known, you can
+pull the python code from the built location instead of the source
+directories. This should be able to handle variations in where the
+source comes from, as well as accomodating binary extension modules:
+
+ # Trial(source,tests="toplevel.test",testpath='build/lib.linux-i686-2.3')
+ # does:
+ # python ./setup.py build
+ # PYTHONPATH=build/lib.linux-i686-2.3 trial -to toplevel.test
+
+
+File: buildbot.info, Node: Status Delivery, Next: Command-line tool, Prev: Build Process, Up: Top
+
+7 Status Delivery
+*****************
+
+More details are available in the docstrings for each class, use a
+command like `pydoc buildbot.status.html.WebStatus' to see them.
+Most status delivery objects take a `categories=' argument, which can
+contain a list of "category" names: in this case, it will only show
+status for Builders that are in one of the named categories.
+
+ (implementor's note: each of these objects should be a
+service.MultiService which will be attached to the BuildMaster object
+when the configuration is processed. They should use
+`self.parent.getStatus()' to get access to the top-level IStatus
+object, either inside `startService' or later. They may call
+`status.subscribe()' in `startService' to receive notifications of
+builder events, in which case they must define `builderAdded' and
+related methods. See the docstrings in `buildbot/interfaces.py' for
+full details.)
+
+* Menu:
+
+* WebStatus::
+* MailNotifier::
+* IRC Bot::
+* PBListener::
+* Writing New Status Plugins::
+
+
+File: buildbot.info, Node: WebStatus, Next: MailNotifier, Prev: Status Delivery, Up: Status Delivery
+
+7.1 WebStatus
+=============
+
+The `buildbot.status.html.WebStatus' status target runs a small web
+server inside the buildmaster. You can point a browser at this web
+server and retrieve information about every build the buildbot knows
+about, as well as find out what the buildbot is currently working on.
+
+ The first page you will see is the "Welcome Page", which contains
+links to all the other useful pages. This page is simply served from
+the `public_html/index.html' file in the buildmaster's base
+directory, where it is created by the `buildbot create-master'
+command along with the rest of the buildmaster.
+
+ The most complex resource provided by `WebStatus' is the
+"Waterfall Display", which shows a time-based chart of events. This
+somewhat-busy display provides detailed information about all steps of
+all recent builds, and provides hyperlinks to look at individual build
+logs and source changes. By simply reloading this page on a regular
+basis, you will see a complete description of everything the buildbot
+is currently working on.
+
+ There are also pages with more specialized information. For
+example, there is a page which shows the last 20 builds performed by
+the buildbot, one line each. Each line is a link to detailed
+information about that build. By adding query arguments to the URL
+used to reach this page, you can narrow the display to builds that
+involved certain branches, or which ran on certain Builders. These
+pages are described in great detail below.
+
+ When the buildmaster is created, a subdirectory named
+`public_html/' is created in its base directory. By default,
+`WebStatus' will serve files from this directory: for example, when a
+user points their browser at the buildbot's `WebStatus' URL, they
+will see the contents of the `public_html/index.html' file. Likewise,
+`public_html/robots.txt', `public_html/buildbot.css', and
+`public_html/favicon.ico' are all useful things to have in there.
+The first time a buildmaster is created, the `public_html' directory
+is populated with some sample files, which you will probably want to
+customize for your own project. These files are all static: the
+buildbot does not modify them in any way as it serves them to HTTP
+clients.
+
+ from buildbot.status.html import WebStatus
+ c['status'].append(WebStatus(8080))
+
+ Note that the initial robots.txt file has Disallow lines for all of
+the dynamically-generated buildbot pages, to discourage web spiders
+and search engines from consuming a lot of CPU time as they crawl
+through the entire history of your buildbot. If you are running the
+buildbot behind a reverse proxy, you'll probably need to put the
+robots.txt file somewhere else (at the top level of the parent web
+server), and replace the URL prefixes in it with more suitable values.
+
+ If you would like to use an alternative root directory, add the
+`public_html=..' option to the `WebStatus' creation:
+
+ c['status'].append(WebStatus(8080, public_html="/var/www/buildbot"))
+
+ In addition, if you are familiar with twisted.web _Resource
+Trees_, you can write code to add additional pages at places inside
+this web space. Just use `webstatus.putChild' to place these
+resources.
+
+ The following section describes the special URLs and the status
+views they provide.
+
+* Menu:
+
+* WebStatus Configuration Parameters::
+* Buildbot Web Resources::
+* XMLRPC server::
+* HTML Waterfall::
+
+
+File: buildbot.info, Node: WebStatus Configuration Parameters, Next: Buildbot Web Resources, Prev: WebStatus, Up: WebStatus
+
+7.1.1 WebStatus Configuration Parameters
+----------------------------------------
+
+The most common way to run a `WebStatus' is on a regular TCP port. To
+do this, just pass in the TCP port number when you create the
+`WebStatus' instance; this is called the `http_port' argument:
+
+ from buildbot.status.html import WebStatus
+ c['status'].append(WebStatus(8080))
+
+ The `http_port' argument is actually a "strports specification"
+for the port that the web server should listen on. This can be a
+simple port number, or a string like `tcp:8080:interface=127.0.0.1'
+(to limit connections to the loopback interface, and therefore to
+clients running on the same host)(1).
+
+ If instead (or in addition) you provide the `distrib_port'
+argument, a twisted.web distributed server will be started either on a
+TCP port (if `distrib_port' is like `"tcp:12345"') or more likely on
+a UNIX socket (if `distrib_port' is like `"unix:/path/to/socket"').
+
+ The `distrib_port' option means that, on a host with a
+suitably-configured twisted-web server, you do not need to consume a
+separate TCP port for the buildmaster's status web page. When the web
+server is constructed with `mktap web --user', URLs that point to
+`http://host/~username/' are dispatched to a sub-server that is
+listening on a UNIX socket at `~username/.twisted-web-pb'. On such a
+system, it is convenient to create a dedicated `buildbot' user, then
+set `distrib_port' to
+`"unix:"+os.path.expanduser("~/.twistd-web-pb")'. This configuration
+will make the HTML status page available at `http://host/~buildbot/'
+. Suitable URL remapping can make it appear at
+`http://host/buildbot/', and the right virtual host setup can even
+place it at `http://buildbot.host/' .
+
+ The other `WebStatus' argument is `allowForce'. If set to True,
+then the web page will provide a "Force Build" button that allows
+visitors to manually trigger builds. This is useful for developers to
+re-run builds that have failed because of intermittent problems in
+the test suite, or because of libraries that were not installed at
+the time of the previous build. You may not wish to allow strangers
+to cause a build to run: in that case, set this to False to remove
+these buttons. The default value is False.
+
+ ---------- Footnotes ----------
+
+ (1) It may even be possible to provide SSL access by using a
+specification like
+`"ssl:12345:privateKey=mykey.pen:certKey=cert.pem"', but this is
+completely untested
+
diff --git a/buildbot/docs/buildbot.info-2 b/buildbot/docs/buildbot.info-2
new file mode 100644
index 0000000..bb7089a
--- /dev/null
+++ b/buildbot/docs/buildbot.info-2
@@ -0,0 +1,1654 @@
+This is buildbot.info, produced by makeinfo version 4.11 from
+buildbot.texinfo.
+
+This is the BuildBot manual.
+
+ Copyright (C) 2005,2006 Brian Warner
+
+ Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty provided
+the copyright notice and this notice are preserved.
+
+
+File: buildbot.info, Node: Buildbot Web Resources, Next: XMLRPC server, Prev: WebStatus Configuration Parameters, Up: WebStatus
+
+7.1.2 Buildbot Web Resources
+----------------------------
+
+Certain URLs are "magic", and the pages they serve are created by
+code in various classes in the `buildbot.status.web' package instead
+of being read from disk. The most common way to access these pages is
+for the buildmaster admin to write or modify the `index.html' page to
+contain links to them. Of course other project web pages can contain
+links to these buildbot pages as well.
+
+ Many pages can be modified by adding query arguments to the URL.
+For example, a page which shows the results of the most recent build
+normally does this for all builders at once. But by appending
+"?builder=i386" to the end of the URL, the page will show only the
+results for the "i386" builder. When used in this way, you can add
+multiple "builder=" arguments to see multiple builders. Remembering
+that URL query arguments are separated _from each other_ with
+ampersands, a URL that ends in "?builder=i386&builder=ppc" would show
+builds for just those two Builders.
+
+ The `branch=' query argument can be used on some pages. This
+filters the information displayed by that page down to only the builds
+or changes which involved the given branch. Use `branch=trunk' to
+reference the trunk: if you aren't intentionally using branches,
+you're probably using trunk. Multiple `branch=' arguments can be used
+to examine multiple branches at once (so appending
+`?branch=foo&branch=bar' to the URL will show builds involving either
+branch). No `branch=' arguments means to show builds and changes for
+all branches.
+
+ Some pages may include the Builder name or the build number in the
+main part of the URL itself. For example, a page that describes Build
+#7 of the "i386" builder would live at `/builders/i386/builds/7'.
+
+ The table below lists all of the internal pages and the URLs that
+can be used to access them.
+
+ NOTE: of the pages described here, `/slave_status_timeline' and
+`/last_build' have not yet been implemented, and `/xmlrpc' has only a
+few methods so far. Future releases will improve this.
+
+`/waterfall'
+ This provides a chronologically-oriented display of the activity
+ of all builders. It is the same display used by the Waterfall
+ display.
+
+ By adding one or more "builder=" query arguments, the Waterfall
+ is restricted to only showing information about the given
+ Builders. By adding one or more "branch=" query arguments, the
+ display is restricted to showing information about the given
+ branches. In addition, adding one or more "category=" query
+ arguments to the URL will limit the display to Builders that
+ were defined with one of the given categories.
+
+ A 'show_events=true' query argument causes the display to include
+ non-Build events, like slaves attaching and detaching, as well as
+ reconfiguration events. 'show_events=false' hides these events.
+ The default is to show them.
+
+ The `last_time=', `first_time=', and `show_time=' arguments will
+ control what interval of time is displayed. The default is to
+ show the latest events, but these can be used to look at earlier
+ periods in history. The `num_events=' argument also provides a
+ limit on the size of the displayed page.
+
+ The Waterfall has references to resources many of the other
+ portions of the URL space: `/builders' for access to individual
+ builds, `/changes' for access to information about source code
+ changes, etc.
+
+`/rss'
+ This provides a rss feed summarizing all failed builds. The same
+ query-arguments used by 'waterfall' can be added to filter the
+ feed output.
+
+`/atom'
+ This provides an atom feed summarizing all failed builds. The
+ same query-arguments used by 'waterfall' can be added to filter
+ the feed output.
+
+`/builders/$BUILDERNAME'
+ This describes the given Builder, and provides buttons to force
+ a build.
+
+`/builders/$BUILDERNAME/builds/$BUILDNUM'
+ This describes a specific Build.
+
+`/builders/$BUILDERNAME/builds/$BUILDNUM/steps/$STEPNAME'
+ This describes a specific BuildStep.
+
+`/builders/$BUILDERNAME/builds/$BUILDNUM/steps/$STEPNAME/logs/$LOGNAME'
+ This provides an HTML representation of a specific logfile.
+
+`/builders/$BUILDERNAME/builds/$BUILDNUM/steps/$STEPNAME/logs/$LOGNAME/text'
+ This returns the logfile as plain text, without any HTML coloring
+ markup. It also removes the "headers", which are the lines that
+ describe what command was run and what the environment variable
+ settings were like. This maybe be useful for saving to disk and
+ feeding to tools like 'grep'.
+
+`/changes'
+ This provides a brief description of the ChangeSource in use
+ (*note Change Sources::).
+
+`/changes/NN'
+ This shows detailed information about the numbered Change: who
+ was the author, what files were changed, what revision number
+ was represented, etc.
+
+`/buildslaves'
+ This summarizes each BuildSlave, including which Builders are
+ configured to use it, whether the buildslave is currently
+ connected or not, and host information retrieved from the
+ buildslave itself.
+
+`/one_line_per_build'
+ This page shows one line of text for each build, merging
+ information from all Builders(1). Each line specifies the name
+ of the Builder, the number of the Build, what revision it used,
+ and a summary of the results. Successful builds are in green,
+ while failing builds are in red. The date and time of the build
+ are added to the right-hand edge of the line. The lines are
+ ordered by build finish timestamp.
+
+ One or more `builder=' or `branch=' arguments can be used to
+ restrict the list. In addition, a `numbuilds=' argument will
+ control how many lines are displayed (20 by default).
+
+`/one_box_per_builder'
+ This page shows a small table, with one box for each Builder,
+ containing the results of the most recent Build. It does not
+ show the individual steps, or the current status. This is a
+ simple summary of buildbot status: if this page is green, then
+ all tests are passing.
+
+ As with `/one_line_per_build', this page will also honor
+ `builder=' and `branch=' arguments.
+
+`/about'
+ This page gives a brief summary of the Buildbot itself: software
+ version, versions of some libraries that the Buildbot depends
+ upon, etc. It also contains a link to the buildbot.net home page.
+
+`/slave_status_timeline'
+ (note: this page has not yet been implemented)
+
+ This provides a chronological display of configuration and
+ operational events: master startup/shutdown, slave
+ connect/disconnect, and config-file changes. When a config-file
+ reload is abandoned because of an error in the config file, the
+ error is displayed on this page.
+
+ This page does not show any builds.
+
+`/last_build/$BUILDERNAME/status.png'
+ This returns a PNG image that describes the results of the most
+ recent build, which can be referenced in an IMG tag by other
+ pages, perhaps from a completely different site. Use it as you
+ would a webcounter.
+
+
+ There are also a set of web-status resources that are intended for
+use by other programs, rather than humans.
+
+`/xmlrpc'
+ This runs an XML-RPC server which can be used to query status
+ information about various builds. See *note XMLRPC server:: for
+ more details.
+
+
+ ---------- Footnotes ----------
+
+ (1) Apparently this is the same way http://buildd.debian.org
+displays build status
+
+
+File: buildbot.info, Node: XMLRPC server, Next: HTML Waterfall, Prev: Buildbot Web Resources, Up: WebStatus
+
+7.1.3 XMLRPC server
+-------------------
+
+When using WebStatus, the buildbot runs an XML-RPC server at
+`/xmlrpc' that can be used by other programs to query build status.
+The following table lists the methods that can be invoked using this
+interface.
+
+`getAllBuildsInInterval(start, stop)'
+ Return a list of builds that have completed after the 'start'
+ timestamp and before the 'stop' timestamp. This looks at all
+ Builders.
+
+ The timestamps are integers, interpreted as standard unix
+ timestamps (seconds since epoch).
+
+ Each Build is returned as a tuple in the form: `(buildername,
+ buildnumber, build_end, branchname, revision, results, text)'
+
+ The buildnumber is an integer. 'build_end' is an integer (seconds
+ since epoch) specifying when the build finished.
+
+ The branchname is a string, which may be an empty string to
+ indicate None (i.e. the default branch). The revision is a
+ string whose meaning is specific to the VC system in use, and
+ comes from the 'got_revision' build property. The results are
+ expressed as a string, one of ('success', 'warnings', 'failure',
+ 'exception'). The text is a list of short strings that ought to
+ be joined by spaces and include slightly more data about the
+ results of the build.
+
+`getBuild(builder_name, build_number)'
+ Return information about a specific build.
+
+ This returns a dictionary (aka "struct" in XMLRPC terms) with
+ complete information about the build. It does not include the
+ contents of the log files, but it has just about everything else.
+
+
+
+File: buildbot.info, Node: HTML Waterfall, Prev: XMLRPC server, Up: WebStatus
+
+7.1.4 HTML Waterfall
+--------------------
+
+The `Waterfall' status target, deprecated as of 0.7.6, is a subset of
+the regular `WebStatus' resource (*note WebStatus::). This section
+(and the `Waterfall' class itself) will be removed from a future
+release.
+
+ from buildbot.status import html
+ w = html.WebStatus(http_port=8080)
+ c['status'].append(w)
+
+
+File: buildbot.info, Node: MailNotifier, Next: IRC Bot, Prev: WebStatus, Up: Status Delivery
+
+7.2 MailNotifier
+================
+
+The buildbot can also send email when builds finish. The most common
+use of this is to tell developers when their change has caused the
+build to fail. It is also quite common to send a message to a mailing
+list (usually named "builds" or similar) about every build.
+
+ The `MailNotifier' status target is used to accomplish this. You
+configure it by specifying who mail should be sent to, under what
+circumstances mail should be sent, and how to deliver the mail. It can
+be configured to only send out mail for certain builders, and only
+send messages when the build fails, or when the builder transitions
+from success to failure. It can also be configured to include various
+build logs in each message.
+
+ By default, the message will be sent to the Interested Users list
+(*note Doing Things With Users::), which includes all developers who
+made changes in the build. You can add additional recipients with the
+extraRecipients argument.
+
+ Each MailNotifier sends mail to a single set of recipients. To send
+different kinds of mail to different recipients, use multiple
+MailNotifiers.
+
+ The following simple example will send an email upon the
+completion of each build, to just those developers whose Changes were
+included in the build. The email contains a description of the Build,
+its results, and URLs where more information can be obtained.
+
+ from buildbot.status.mail import MailNotifier
+ mn = MailNotifier(fromaddr="buildbot@example.org", lookup="example.org")
+ c['status'].append(mn)
+
+ To get a simple one-message-per-build (say, for a mailing list),
+use the following form instead. This form does not send mail to
+individual developers (and thus does not need the `lookup=' argument,
+explained below), instead it only ever sends mail to the "extra
+recipients" named in the arguments:
+
+ mn = MailNotifier(fromaddr="buildbot@example.org",
+ sendToInterestedUsers=False,
+ extraRecipients=['listaddr@example.org'])
+
+ In some cases it is desirable to have different information then
+what is provided in a standard MailNotifier message. For this purpose
+MailNotifier provides the argument customMesg (a function) which
+allows for the creation of messages with unique content.
+
+ For example it can be useful to display the last few lines of a
+log file and recent changes when a builder fails:
+
+ def message(attrs):
+ logLines = 10
+ text = list()
+ text.append("STATUS: %s" % attrs['result'].title())
+ text.append("")
+ text.extend([c.asText() for c in attrs['changes']])
+ text.append("")
+ name, url, lines = attrs['logs'][-1]
+ text.append("Last %d lines of '%s':" % (logLines, name))
+ text.extend(["\t%s\n" % line for line in lines[len(lines)-logLines:]])
+ text.append("")
+ text.append("-buildbot")
+ return ("\n".join(text), 'plain')
+
+ mn = MailNotifier(fromaddr="buildbot@example.org",
+ sendToInterestedUsers=False,
+ mode='problem',
+ extraRecipients=['listaddr@example.org'],
+ customMesg=message)
+
+ A customMesg function takes a single dict argument (see below) and
+returns a tuple of strings. The first string is the complete text of
+the message and the second is the message type ('plain' or 'html').
+The 'html' type should be used when generating an HTML message:
+
+ def message(attrs):
+ logLines = 10
+ text = list()
+ text.append('<h4>Build status %s.</h4>' % (attrs['result'].title()))
+ if attrs['changes']:
+ text.append('<h4>Recent Changes:</h4>')
+ text.extend([c.asHTML() for c in attrs['changes']])
+ name, url, lines = attrs['logs'][-1]
+ text.append('<h4>Last %d lines of "%s":</h4>' % (logLines, name))
+ text.append('<p>')
+ text.append('<br>'.join([line for line in lines[len(lines)-logLines:]]))
+ text.append('</p>')
+ text.append('<br><br>')
+ text.append('Full log at: %s' % url)
+ text.append('<br><br>')
+ text.append('<b>-buildbot</b>')
+ return ('\n'.join(text), 'html')
+
+MailNotifier arguments
+======================
+
+`fromaddr'
+ The email address to be used in the 'From' header.
+
+`sendToInterestedUsers'
+ (boolean). If True (the default), send mail to all of the
+ Interested Users. If False, only send mail to the
+ extraRecipients list.
+
+`extraRecipients'
+ (tuple of strings). A list of email addresses to which messages
+ should be sent (in addition to the InterestedUsers list, which
+ includes any developers who made Changes that went into this
+ build). It is a good idea to create a small mailing list and
+ deliver to that, then let subscribers come and go as they please.
+
+`subject'
+ (string). A string to be used as the subject line of the message.
+ `%(builder)s' will be replaced with the name of the builder which
+ provoked the message.
+
+`mode'
+ (string). Default to 'all'. One of:
+ `all'
+ Send mail about all builds, bothpassing and failing
+
+ `failing'
+ Only send mail about builds which fail
+
+ `problem'
+ Only send mail about a build which failed when the previous
+ build has passed. If your builds usually pass, then this
+ will only send mail when a problem occurs.
+
+`builders'
+ (list of strings). A list of builder names for which mail should
+ be sent. Defaults to None (send mail for all builds). Use either
+ builders or categories, but not both.
+
+`categories'
+ (list of strings). A list of category names to serve status
+ information for. Defaults to None (all categories). Use either
+ builders or categories, but not both.
+
+`addLogs'
+ (boolean). If True, include all build logs as attachments to the
+ messages. These can be quite large. This can also be set to a
+ list of log names, to send a subset of the logs. Defaults to
+ False.
+
+`relayhost'
+ (string). The host to which the outbound SMTP connection should
+ be made. Defaults to 'localhost'
+
+`lookup'
+ (implementor of `IEmailLookup'). Object which provides
+ IEmailLookup, which is responsible for mapping User names (which
+ come from the VC system) into valid email addresses. If not
+ provided, the notifier will only be able to send mail to the
+ addresses in the extraRecipients list. Most of the time you can
+ use a simple Domain instance. As a shortcut, you can pass as
+ string: this will be treated as if you had provided Domain(str).
+ For example, lookup='twistedmatrix.com' will allow mail to be
+ sent to all developers whose SVN usernames match their
+ twistedmatrix.com account names. See buildbot/status/mail.py for
+ more details.
+
+`customMesg'
+ This is a optional function that can be used to generate a
+ custom mail message. The customMesg function takes a single dict
+ and must return a tuple containing the message text and type
+ ('html' or 'plain'). Below is a list of availale keys in the
+ dict passed to customMesg:
+
+ `builderName'
+ (str) Name of the builder that generated this event.
+
+ `projectName'
+ (str) Name of the project.
+
+ `mode'
+ (str) Mode set in MailNotifier. (failing, passing, problem).
+
+ `result'
+ (str) Builder result as a string. 'success', 'warnings',
+ 'failure', 'skipped', or 'exception'
+
+ `buildURL'
+ (str) URL to build page.
+
+ `buildbotURL'
+ (str) URL to buildbot main page.
+
+ `buildText'
+ (str) Build text from build.getText().
+
+ `slavename'
+ (str) Slavename.
+
+ `reason'
+ (str) Build reason from build.getReason().
+
+ `responsibleUsers'
+ (List of str) List of responsible users.
+
+ `branch'
+ (str) Name of branch used. If no SourceStamp exists branch
+ is an empty string.
+
+ `revision'
+ (str) Name of revision used. If no SourceStamp exists
+ revision is an empty string.
+
+ `patch'
+ (str) Name of patch used. If no SourceStamp exists patch is
+ an empty string.
+
+ `changes'
+ (list of objs) List of change objects from SourceStamp. A
+ change object has the following useful information:
+ `who'
+ (str) who made this change
+
+ `revision'
+ (str) what VC revision is this change
+
+ `branch'
+ (str) on what branch did this change occur
+
+ `when'
+ (str) when did this change occur
+
+ `files'
+ (list of str) what files were affected in this change
+
+ `comments'
+ (str) comments reguarding the change.
+ The functions asText and asHTML return a list of strings
+ with the above information formatted.
+
+ `logs'
+ (List of Tuples) List of tuples where each tuple contains
+ the log name, log url, and log contents as a list of
+ strings.
+
+
+File: buildbot.info, Node: IRC Bot, Next: PBListener, Prev: MailNotifier, Up: Status Delivery
+
+7.3 IRC Bot
+===========
+
+The `buildbot.status.words.IRC' status target creates an IRC bot
+which will attach to certain channels and be available for status
+queries. It can also be asked to announce builds as they occur, or be
+told to shut up.
+
+ from buildbot.status import words
+ irc = words.IRC("irc.example.org", "botnickname",
+ channels=["channel1", "channel2"],
+ password="mysecretpassword",
+ notify_events={
+ 'exception': 1,
+ 'successToFailure': 1,
+ 'failureToSuccess': 1,
+ })
+ c['status'].append(irc)
+
+ Take a look at the docstring for `words.IRC' for more details on
+configuring this service. The `password' argument, if provided, will
+be sent to Nickserv to claim the nickname: some IRC servers will not
+allow clients to send private messages until they have logged in with
+a password.
+
+ To use the service, you address messages at the buildbot, either
+normally (`botnickname: status') or with private messages (`/msg
+botnickname status'). The buildbot will respond in kind.
+
+ Some of the commands currently available:
+
+`list builders'
+ Emit a list of all configured builders
+
+`status BUILDER'
+ Announce the status of a specific Builder: what it is doing
+ right now.
+
+`status all'
+ Announce the status of all Builders
+
+`watch BUILDER'
+ If the given Builder is currently running, wait until the Build
+ is finished and then announce the results.
+
+`last BUILDER'
+ Return the results of the last build to run on the given Builder.
+
+`join CHANNEL'
+ Join the given IRC channel
+
+`leave CHANNEL'
+ Leave the given IRC channel
+
+`notify on|off|list EVENT'
+ Report events relating to builds. If the command is issued as a
+ private message, then the report will be sent back as a private
+ message to the user who issued the command. Otherwise, the
+ report will be sent to the channel. Available events to be
+ notified are:
+
+ `started'
+ A build has started
+
+ `finished'
+ A build has finished
+
+ `success'
+ A build finished successfully
+
+ `failed'
+ A build failed
+
+ `exception'
+ A build generated and exception
+
+ `xToY'
+ The previous build was x, but this one is Y, where x and Y
+ are each one of success, warnings, failure, exception
+ (except Y is capitalized). For example: successToFailure
+ will notify if the previous build was successful, but this
+ one failed
+
+`help COMMAND'
+ Describe a command. Use `help commands' to get a list of known
+ commands.
+
+`source'
+ Announce the URL of the Buildbot's home page.
+
+`version'
+ Announce the version of this Buildbot.
+
+ Additionally, the config file may specify default notification
+options as shown in the example earlier.
+
+ If the `allowForce=True' option was used, some addtional commands
+will be available:
+
+`force build BUILDER REASON'
+ Tell the given Builder to start a build of the latest code. The
+ user requesting the build and REASON are recorded in the Build
+ status. The buildbot will announce the build's status when it
+ finishes.
+
+`stop build BUILDER REASON'
+ Terminate any running build in the given Builder. REASON will be
+ added to the build status to explain why it was stopped. You
+ might use this if you committed a bug, corrected it right away,
+ and don't want to wait for the first build (which is destined to
+ fail) to complete before starting the second (hopefully fixed)
+ build.
+
+
+File: buildbot.info, Node: PBListener, Next: Writing New Status Plugins, Prev: IRC Bot, Up: Status Delivery
+
+7.4 PBListener
+==============
+
+ import buildbot.status.client
+ pbl = buildbot.status.client.PBListener(port=int, user=str,
+ passwd=str)
+ c['status'].append(pbl)
+
+ This sets up a PB listener on the given TCP port, to which a
+PB-based status client can connect and retrieve status information.
+`buildbot statusgui' (*note statusgui::) is an example of such a
+status client. The `port' argument can also be a strports
+specification string.
+
+
+File: buildbot.info, Node: Writing New Status Plugins, Prev: PBListener, Up: Status Delivery
+
+7.5 Writing New Status Plugins
+==============================
+
+TODO: this needs a lot more examples
+
+ Each status plugin is an object which provides the
+`twisted.application.service.IService' interface, which creates a
+tree of Services with the buildmaster at the top [not strictly true].
+The status plugins are all children of an object which implements
+`buildbot.interfaces.IStatus', the main status object. From this
+object, the plugin can retrieve anything it wants about current and
+past builds. It can also subscribe to hear about new and upcoming
+builds.
+
+ Status plugins which only react to human queries (like the
+Waterfall display) never need to subscribe to anything: they are idle
+until someone asks a question, then wake up and extract the
+information they need to answer it, then they go back to sleep.
+Plugins which need to act spontaneously when builds complete (like
+the MailNotifier plugin) need to subscribe to hear about new builds.
+
+ If the status plugin needs to run network services (like the HTTP
+server used by the Waterfall plugin), they can be attached as Service
+children of the plugin itself, using the `IServiceCollection'
+interface.
+
+
+File: buildbot.info, Node: Command-line tool, Next: Resources, Prev: Status Delivery, Up: Top
+
+8 Command-line tool
+*******************
+
+The `buildbot' command-line tool can be used to start or stop a
+buildmaster or buildbot, and to interact with a running buildmaster.
+Some of its subcommands are intended for buildmaster admins, while
+some are for developers who are editing the code that the buildbot is
+monitoring.
+
+* Menu:
+
+* Administrator Tools::
+* Developer Tools::
+* Other Tools::
+* .buildbot config directory::
+
+
+File: buildbot.info, Node: Administrator Tools, Next: Developer Tools, Prev: Command-line tool, Up: Command-line tool
+
+8.1 Administrator Tools
+=======================
+
+The following `buildbot' sub-commands are intended for buildmaster
+administrators:
+
+create-master
+=============
+
+This creates a new directory and populates it with files that allow it
+to be used as a buildmaster's base directory.
+
+ buildbot create-master BASEDIR
+
+create-slave
+============
+
+This creates a new directory and populates it with files that let it
+be used as a buildslave's base directory. You must provide several
+arguments, which are used to create the initial `buildbot.tac' file.
+
+ buildbot create-slave BASEDIR MASTERHOST:PORT SLAVENAME PASSWORD
+
+start
+=====
+
+This starts a buildmaster or buildslave which was already created in
+the given base directory. The daemon is launched in the background,
+with events logged to a file named `twistd.log'.
+
+ buildbot start BASEDIR
+
+stop
+====
+
+This terminates the daemon (either buildmaster or buildslave) running
+in the given directory.
+
+ buildbot stop BASEDIR
+
+sighup
+======
+
+This sends a SIGHUP to the buildmaster running in the given directory,
+which causes it to re-read its `master.cfg' file.
+
+ buildbot sighup BASEDIR
+
+
+File: buildbot.info, Node: Developer Tools, Next: Other Tools, Prev: Administrator Tools, Up: Command-line tool
+
+8.2 Developer Tools
+===================
+
+These tools are provided for use by the developers who are working on
+the code that the buildbot is monitoring.
+
+* Menu:
+
+* statuslog::
+* statusgui::
+* try::
+
+
+File: buildbot.info, Node: statuslog, Next: statusgui, Prev: Developer Tools, Up: Developer Tools
+
+8.2.1 statuslog
+---------------
+
+ buildbot statuslog --master MASTERHOST:PORT
+
+ This command starts a simple text-based status client, one which
+just prints out a new line each time an event occurs on the
+buildmaster.
+
+ The `--master' option provides the location of the
+`buildbot.status.client.PBListener' status port, used to deliver
+build information to realtime status clients. The option is always in
+the form of a string, with hostname and port number separated by a
+colon (`HOSTNAME:PORTNUM'). Note that this port is _not_ the same as
+the slaveport (although a future version may allow the same port
+number to be used for both purposes). If you get an error message to
+the effect of "Failure: twisted.cred.error.UnauthorizedLogin:", this
+may indicate that you are connecting to the slaveport rather than a
+`PBListener' port.
+
+ The `--master' option can also be provided by the `masterstatus'
+name in `.buildbot/options' (*note .buildbot config directory::).
+
+
+File: buildbot.info, Node: statusgui, Next: try, Prev: statuslog, Up: Developer Tools
+
+8.2.2 statusgui
+---------------
+
+If you have set up a PBListener (*note PBListener::), you will be able
+to monitor your Buildbot using a simple Gtk+ application invoked with
+the `buildbot statusgui' command:
+
+ buildbot statusgui --master MASTERHOST:PORT
+
+ This command starts a simple Gtk+-based status client, which
+contains a few boxes for each Builder that change color as events
+occur. It uses the same `--master' argument as the `buildbot
+statuslog' command (*note statuslog::).
+
+
+File: buildbot.info, Node: try, Prev: statusgui, Up: Developer Tools
+
+8.2.3 try
+---------
+
+This lets a developer to ask the question "What would happen if I
+committed this patch right now?". It runs the unit test suite (across
+multiple build platforms) on the developer's current code, allowing
+them to make sure they will not break the tree when they finally
+commit their changes.
+
+ The `buildbot try' command is meant to be run from within a
+developer's local tree, and starts by figuring out the base revision
+of that tree (what revision was current the last time the tree was
+updated), and a patch that can be applied to that revision of the tree
+to make it match the developer's copy. This (revision, patch) pair is
+then sent to the buildmaster, which runs a build with that
+SourceStamp. If you want, the tool will emit status messages as the
+builds run, and will not terminate until the first failure has been
+detected (or the last success).
+
+ There is an alternate form which accepts a pre-made patch file
+(typically the output of a command like 'svn diff'). This "-diff"
+form does not require a local tree to run from. See *Note try
+--diff::.
+
+ For this command to work, several pieces must be in place:
+
+TryScheduler
+============
+
+The buildmaster must have a `scheduler.Try' instance in the config
+file's `c['schedulers']' list. This lets the administrator control
+who may initiate these "trial" builds, which branches are eligible
+for trial builds, and which Builders should be used for them.
+
+ The `TryScheduler' has various means to accept build requests: all
+of them enforce more security than the usual buildmaster ports do.
+Any source code being built can be used to compromise the buildslave
+accounts, but in general that code must be checked out from the VC
+repository first, so only people with commit privileges can get
+control of the buildslaves. The usual force-build control channels can
+waste buildslave time but do not allow arbitrary commands to be
+executed by people who don't have those commit privileges. However,
+the source code patch that is provided with the trial build does not
+have to go through the VC system first, so it is important to make
+sure these builds cannot be abused by a non-committer to acquire as
+much control over the buildslaves as a committer has. Ideally, only
+developers who have commit access to the VC repository would be able
+to start trial builds, but unfortunately the buildmaster does not, in
+general, have access to VC system's user list.
+
+ As a result, the `TryScheduler' requires a bit more configuration.
+There are currently two ways to set this up:
+
+*jobdir (ssh)*
+ This approach creates a command queue directory, called the
+ "jobdir", in the buildmaster's working directory. The buildmaster
+ admin sets the ownership and permissions of this directory to
+ only grant write access to the desired set of developers, all of
+ whom must have accounts on the machine. The `buildbot try'
+ command creates a special file containing the source stamp
+ information and drops it in the jobdir, just like a standard
+ maildir. When the buildmaster notices the new file, it unpacks
+ the information inside and starts the builds.
+
+ The config file entries used by 'buildbot try' either specify a
+ local queuedir (for which write and mv are used) or a remote one
+ (using scp and ssh).
+
+ The advantage of this scheme is that it is quite secure, the
+ disadvantage is that it requires fiddling outside the buildmaster
+ config (to set the permissions on the jobdir correctly). If the
+ buildmaster machine happens to also house the VC repository,
+ then it can be fairly easy to keep the VC userlist in sync with
+ the trial-build userlist. If they are on different machines,
+ this will be much more of a hassle. It may also involve granting
+ developer accounts on a machine that would not otherwise require
+ them.
+
+ To implement this, the buildslave invokes 'ssh -l username host
+ buildbot tryserver ARGS', passing the patch contents over stdin.
+ The arguments must include the inlet directory and the revision
+ information.
+
+*user+password (PB)*
+ In this approach, each developer gets a username/password pair,
+ which are all listed in the buildmaster's configuration file.
+ When the developer runs `buildbot try', their machine connects
+ to the buildmaster via PB and authenticates themselves using
+ that username and password, then sends a PB command to start the
+ trial build.
+
+ The advantage of this scheme is that the entire configuration is
+ performed inside the buildmaster's config file. The
+ disadvantages are that it is less secure (while the "cred"
+ authentication system does not expose the password in plaintext
+ over the wire, it does not offer most of the other security
+ properties that SSH does). In addition, the buildmaster admin is
+ responsible for maintaining the username/password list, adding
+ and deleting entries as developers come and go.
+
+
+ For example, to set up the "jobdir" style of trial build, using a
+command queue directory of `MASTERDIR/jobdir' (and assuming that all
+your project developers were members of the `developers' unix group),
+you would first create that directory (with `mkdir MASTERDIR/jobdir
+MASTERDIR/jobdir/new MASTERDIR/jobdir/cur MASTERDIR/jobdir/tmp; chgrp
+developers MASTERDIR/jobdir MASTERDIR/jobdir/*; chmod g+rwx,o-rwx
+MASTERDIR/jobdir MASTERDIR/jobdir/*'), and then use the following
+scheduler in the buildmaster's config file:
+
+ from buildbot.scheduler import Try_Jobdir
+ s = Try_Jobdir("try1", ["full-linux", "full-netbsd", "full-OSX"],
+ jobdir="jobdir")
+ c['schedulers'] = [s]
+
+ Note that you must create the jobdir before telling the
+buildmaster to use this configuration, otherwise you will get an
+error. Also remember that the buildmaster must be able to read and
+write to the jobdir as well. Be sure to watch the `twistd.log' file
+(*note Logfiles::) as you start using the jobdir, to make sure the
+buildmaster is happy with it.
+
+ To use the username/password form of authentication, create a
+`Try_Userpass' instance instead. It takes the same `builderNames'
+argument as the `Try_Jobdir' form, but accepts an addtional `port'
+argument (to specify the TCP port to listen on) and a `userpass' list
+of username/password pairs to accept. Remember to use good passwords
+for this: the security of the buildslave accounts depends upon it:
+
+ from buildbot.scheduler import Try_Userpass
+ s = Try_Userpass("try2", ["full-linux", "full-netbsd", "full-OSX"],
+ port=8031, userpass=[("alice","pw1"), ("bob", "pw2")] )
+ c['schedulers'] = [s]
+
+ Like most places in the buildbot, the `port' argument takes a
+strports specification. See `twisted.application.strports' for
+details.
+
+locating the master
+===================
+
+The `try' command needs to be told how to connect to the
+`TryScheduler', and must know which of the authentication approaches
+described above is in use by the buildmaster. You specify the
+approach by using `--connect=ssh' or `--connect=pb' (or `try_connect
+= 'ssh'' or `try_connect = 'pb'' in `.buildbot/options').
+
+ For the PB approach, the command must be given a `--master'
+argument (in the form HOST:PORT) that points to TCP port that you
+picked in the `Try_Userpass' scheduler. It also takes a `--username'
+and `--passwd' pair of arguments that match one of the entries in the
+buildmaster's `userpass' list. These arguments can also be provided
+as `try_master', `try_username', and `try_password' entries in the
+`.buildbot/options' file.
+
+ For the SSH approach, the command must be given `--tryhost',
+`--username', and optionally `--password' (TODO: really?) to get to
+the buildmaster host. It must also be given `--trydir', which points
+to the inlet directory configured above. The trydir can be relative
+to the user's home directory, but most of the time you will use an
+explicit path like `~buildbot/project/trydir'. These arguments can be
+provided in `.buildbot/options' as `try_host', `try_username',
+`try_password', and `try_dir'.
+
+ In addition, the SSH approach needs to connect to a PBListener
+status port, so it can retrieve and report the results of the build
+(the PB approach uses the existing connection to retrieve status
+information, so this step is not necessary). This requires a
+`--master' argument, or a `masterstatus' entry in `.buildbot/options',
+in the form of a HOSTNAME:PORT string.
+
+choosing the Builders
+=====================
+
+A trial build is performed on multiple Builders at the same time, and
+the developer gets to choose which Builders are used (limited to a set
+selected by the buildmaster admin with the TryScheduler's
+`builderNames=' argument). The set you choose will depend upon what
+your goals are: if you are concerned about cross-platform
+compatibility, you should use multiple Builders, one from each
+platform of interest. You might use just one builder if that platform
+has libraries or other facilities that allow better test coverage than
+what you can accomplish on your own machine, or faster test runs.
+
+ The set of Builders to use can be specified with multiple
+`--builder' arguments on the command line. It can also be specified
+with a single `try_builders' option in `.buildbot/options' that uses
+a list of strings to specify all the Builder names:
+
+ try_builders = ["full-OSX", "full-win32", "full-linux"]
+
+specifying the VC system
+========================
+
+The `try' command also needs to know how to take the developer's
+current tree and extract the (revision, patch) source-stamp pair.
+Each VC system uses a different process, so you start by telling the
+`try' command which VC system you are using, with an argument like
+`--vc=cvs' or `--vc=tla'. This can also be provided as `try_vc' in
+`.buildbot/options'.
+
+ The following names are recognized: `cvs' `svn' `baz' `tla' `hg'
+`darcs'
+
+finding the top of the tree
+===========================
+
+Some VC systems (notably CVS and SVN) track each directory
+more-or-less independently, which means the `try' command needs to
+move up to the top of the project tree before it will be able to
+construct a proper full-tree patch. To accomplish this, the `try'
+command will crawl up through the parent directories until it finds a
+marker file. The default name for this marker file is
+`.buildbot-top', so when you are using CVS or SVN you should `touch
+.buildbot-top' from the top of your tree before running `buildbot
+try'. Alternatively, you can use a filename like `ChangeLog' or
+`README', since many projects put one of these files in their
+top-most directory (and nowhere else). To set this filename, use
+`--try-topfile=ChangeLog', or set it in the options file with
+`try_topfile = 'ChangeLog''.
+
+ You can also manually set the top of the tree with
+`--try-topdir=~/trees/mytree', or `try_topdir = '~/trees/mytree''. If
+you use `try_topdir', in a `.buildbot/options' file, you will need a
+separate options file for each tree you use, so it may be more
+convenient to use the `try_topfile' approach instead.
+
+ Other VC systems which work on full projects instead of individual
+directories (tla, baz, darcs, monotone, mercurial, git) do not require
+`try' to know the top directory, so the `--try-topfile' and
+`--try-topdir' arguments will be ignored.
+
+ If the `try' command cannot find the top directory, it will abort
+with an error message.
+
+determining the branch name
+===========================
+
+Some VC systems record the branch information in a way that "try" can
+locate it, in particular Arch (both `tla' and `baz'). For the others,
+if you are using something other than the default branch, you will
+have to tell the buildbot which branch your tree is using. You can do
+this with either the `--branch' argument, or a `try_branch' entry in
+the `.buildbot/options' file.
+
+determining the revision and patch
+==================================
+
+Each VC system has a separate approach for determining the tree's base
+revision and computing a patch.
+
+`CVS'
+ `try' pretends that the tree is up to date. It converts the
+ current time into a `-D' time specification, uses it as the base
+ revision, and computes the diff between the upstream tree as of
+ that point in time versus the current contents. This works, more
+ or less, but requires that the local clock be in reasonably good
+ sync with the repository.
+
+`SVN'
+ `try' does a `svn status -u' to find the latest repository
+ revision number (emitted on the last line in the "Status against
+ revision: NN" message). It then performs an `svn diff -rNN' to
+ find out how your tree differs from the repository version, and
+ sends the resulting patch to the buildmaster. If your tree is not
+ up to date, this will result in the "try" tree being created with
+ the latest revision, then _backwards_ patches applied to bring it
+ "back" to the version you actually checked out (plus your actual
+ code changes), but this will still result in the correct tree
+ being used for the build.
+
+`baz'
+ `try' does a `baz tree-id' to determine the fully-qualified
+ version and patch identifier for the tree
+ (ARCHIVE/VERSION-patch-NN), and uses the VERSION-patch-NN
+ component as the base revision. It then does a `baz diff' to
+ obtain the patch.
+
+`tla'
+ `try' does a `tla tree-version' to get the fully-qualified
+ version identifier (ARCHIVE/VERSION), then takes the first line
+ of `tla logs --reverse' to figure out the base revision. Then it
+ does `tla changes --diffs' to obtain the patch.
+
+`Darcs'
+ `darcs changes --context' emits a text file that contains a list
+ of all patches back to and including the last tag was made. This
+ text file (plus the location of a repository that contains all
+ these patches) is sufficient to re-create the tree. Therefore
+ the contents of this "context" file _are_ the revision stamp for
+ a Darcs-controlled source tree.
+
+ So `try' does a `darcs changes --context' to determine what your
+ tree's base revision is, and then does a `darcs diff -u' to
+ compute the patch relative to that revision.
+
+`Mercurial'
+ `hg identify' emits a short revision ID (basically a truncated
+ SHA1 hash of the current revision's contents), which is used as
+ the base revision. `hg diff' then provides the patch relative to
+ that revision. For `try' to work, your working directory must
+ only have patches that are available from the same
+ remotely-available repository that the build process'
+ `step.Mercurial' will use.
+
+`Git'
+ `git branch -v' lists all the branches available in the local
+ repository along with the revision ID it points to and a short
+ summary of the last commit. The line containing the currently
+ checked out branch begins with '* ' (star and space) while all
+ the others start with ' ' (two spaces). `try' scans for this
+ line and extracts the branch name and revision from it. Then it
+ generates a diff against the base revision.
+
+
+waiting for results
+===================
+
+If you provide the `--wait' option (or `try_wait = True' in
+`.buildbot/options'), the `buildbot try' command will wait until your
+changes have either been proven good or bad before exiting. Unless
+you use the `--quiet' option (or `try_quiet=True'), it will emit a
+progress message every 60 seconds until the builds have completed.
+
+* Menu:
+
+* try --diff::
+
+
+File: buildbot.info, Node: try --diff, Prev: try, Up: try
+
+8.2.3.1 try -diff
+.................
+
+Sometimes you might have a patch from someone else that you want to
+submit to the buildbot. For example, a user may have created a patch
+to fix some specific bug and sent it to you by email. You've inspected
+the patch and suspect that it might do the job (and have at least
+confirmed that it doesn't do anything evil). Now you want to test it
+out.
+
+ One approach would be to check out a new local tree, apply the
+patch, run your local tests, then use "buildbot try" to run the tests
+on other platforms. An alternate approach is to use the `buildbot try
+--diff' form to have the buildbot test the patch without using a
+local tree.
+
+ This form takes a `--diff' argument which points to a file that
+contains the patch you want to apply. By default this patch will be
+applied to the TRUNK revision, but if you give the optional
+`--baserev' argument, a tree of the given revision will be used as a
+starting point instead of TRUNK.
+
+ You can also use `buildbot try --diff=-' to read the patch from
+stdin.
+
+ Each patch has a "patchlevel" associated with it. This indicates
+the number of slashes (and preceding pathnames) that should be
+stripped before applying the diff. This exactly corresponds to the
+`-p' or `--strip' argument to the `patch' utility. By default
+`buildbot try --diff' uses a patchlevel of 0, but you can override
+this with the `-p' argument.
+
+ When you use `--diff', you do not need to use any of the other
+options that relate to a local tree, specifically `--vc',
+`--try-topfile', or `--try-topdir'. These options will be ignored. Of
+course you must still specify how to get to the buildmaster (with
+`--connect', `--tryhost', etc).
+
+
+File: buildbot.info, Node: Other Tools, Next: .buildbot config directory, Prev: Developer Tools, Up: Command-line tool
+
+8.3 Other Tools
+===============
+
+These tools are generally used by buildmaster administrators.
+
+* Menu:
+
+* sendchange::
+* debugclient::
+
+
+File: buildbot.info, Node: sendchange, Next: debugclient, Prev: Other Tools, Up: Other Tools
+
+8.3.1 sendchange
+----------------
+
+This command is used to tell the buildmaster about source changes. It
+is intended to be used from within a commit script, installed on the
+VC server. It requires that you have a PBChangeSource (*note
+PBChangeSource::) running in the buildmaster (by being set in
+`c['change_source']').
+
+ buildbot sendchange --master MASTERHOST:PORT --username USER FILENAMES..
+
+ There are other (optional) arguments which can influence the
+`Change' that gets submitted:
+
+`--branch'
+ This provides the (string) branch specifier. If omitted, it
+ defaults to None, indicating the "default branch". All files
+ included in this Change must be on the same branch.
+
+`--category'
+ This provides the (string) category specifier. If omitted, it
+ defaults to None, indicating "no category". The category
+ property is used by Schedulers to filter what changes they
+ listen to.
+
+`--revision_number'
+ This provides a (numeric) revision number for the change, used
+ for VC systems that use numeric transaction numbers (like
+ Subversion).
+
+`--revision'
+ This provides a (string) revision specifier, for VC systems that
+ use strings (Arch would use something like patch-42 etc).
+
+`--revision_file'
+ This provides a filename which will be opened and the contents
+ used as the revision specifier. This is specifically for Darcs,
+ which uses the output of `darcs changes --context' as a revision
+ specifier. This context file can be a couple of kilobytes long,
+ spanning a couple lines per patch, and would be a hassle to pass
+ as a command-line argument.
+
+`--comments'
+ This provides the change comments as a single argument. You may
+ want to use `--logfile' instead.
+
+`--logfile'
+ This instructs the tool to read the change comments from the
+ given file. If you use `-' as the filename, the tool will read
+ the change comments from stdin.
+
+
+File: buildbot.info, Node: debugclient, Prev: sendchange, Up: Other Tools
+
+8.3.2 debugclient
+-----------------
+
+ buildbot debugclient --master MASTERHOST:PORT --passwd DEBUGPW
+
+ This launches a small Gtk+/Glade-based debug tool, connecting to
+the buildmaster's "debug port". This debug port shares the same port
+number as the slaveport (*note Setting the slaveport::), but the
+`debugPort' is only enabled if you set a debug password in the
+buildmaster's config file (*note Debug options::). The `--passwd'
+option must match the `c['debugPassword']' value.
+
+ `--master' can also be provided in `.debug/options' by the
+`master' key. `--passwd' can be provided by the `debugPassword' key.
+
+ The `Connect' button must be pressed before any of the other
+buttons will be active. This establishes the connection to the
+buildmaster. The other sections of the tool are as follows:
+
+`Reload .cfg'
+ Forces the buildmaster to reload its `master.cfg' file. This is
+ equivalent to sending a SIGHUP to the buildmaster, but can be
+ done remotely through the debug port. Note that it is a good
+ idea to be watching the buildmaster's `twistd.log' as you reload
+ the config file, as any errors which are detected in the config
+ file will be announced there.
+
+`Rebuild .py'
+ (not yet implemented). The idea here is to use Twisted's
+ "rebuild" facilities to replace the buildmaster's running code
+ with a new version. Even if this worked, it would only be used
+ by buildbot developers.
+
+`poke IRC'
+ This locates a `words.IRC' status target and causes it to emit a
+ message on all the channels to which it is currently connected.
+ This was used to debug a problem in which the buildmaster lost
+ the connection to the IRC server and did not attempt to
+ reconnect.
+
+`Commit'
+ This allows you to inject a Change, just as if a real one had
+ been delivered by whatever VC hook you are using. You can set
+ the name of the committed file and the name of the user who is
+ doing the commit. Optionally, you can also set a revision for
+ the change. If the revision you provide looks like a number, it
+ will be sent as an integer, otherwise it will be sent as a
+ string.
+
+`Force Build'
+ This lets you force a Builder (selected by name) to start a
+ build of the current source tree.
+
+`Currently'
+ (obsolete). This was used to manually set the status of the given
+ Builder, but the status-assignment code was changed in an
+ incompatible way and these buttons are no longer meaningful.
+
+
+
+File: buildbot.info, Node: .buildbot config directory, Prev: Other Tools, Up: Command-line tool
+
+8.4 .buildbot config directory
+==============================
+
+Many of the `buildbot' tools must be told how to contact the
+buildmaster that they interact with. This specification can be
+provided as a command-line argument, but most of the time it will be
+easier to set them in an "options" file. The `buildbot' command will
+look for a special directory named `.buildbot', starting from the
+current directory (where the command was run) and crawling upwards,
+eventually looking in the user's home directory. It will look for a
+file named `options' in this directory, and will evaluate it as a
+python script, looking for certain names to be set. You can just put
+simple `name = 'value'' pairs in this file to set the options.
+
+ For a description of the names used in this file, please see the
+documentation for the individual `buildbot' sub-commands. The
+following is a brief sample of what this file's contents could be.
+
+ # for status-reading tools
+ masterstatus = 'buildbot.example.org:12345'
+ # for 'sendchange' or the debug port
+ master = 'buildbot.example.org:18990'
+ debugPassword = 'eiv7Po'
+
+`masterstatus'
+ Location of the `client.PBListener' status port, used by
+ `statuslog' and `statusgui'.
+
+`master'
+ Location of the `debugPort' (for `debugclient'). Also the
+ location of the `pb.PBChangeSource' (for `sendchange'). Usually
+ shares the slaveport, but a future version may make it possible
+ to have these listen on a separate port number.
+
+`debugPassword'
+ Must match the value of `c['debugPassword']', used to protect the
+ debug port, for the `debugclient' command.
+
+`username'
+ Provides a default username for the `sendchange' command.
+
+
+ The following options are used by the `buildbot try' command
+(*note try::):
+
+`try_connect'
+ This specifies how the "try" command should deliver its request
+ to the buildmaster. The currently accepted values are "ssh" and
+ "pb".
+
+`try_builders'
+ Which builders should be used for the "try" build.
+
+`try_vc'
+ This specifies the version control system being used.
+
+`try_branch'
+ This indicates that the current tree is on a non-trunk branch.
+
+`try_topdir'
+
+`try_topfile'
+ Use `try_topdir' to explicitly indicate the top of your working
+ tree, or `try_topfile' to name a file that will only be found in
+ that top-most directory.
+
+`try_host'
+
+`try_username'
+
+`try_dir'
+ When try_connect is "ssh", the command will pay attention to
+ `try_host', `try_username', and `try_dir'.
+
+`try_username'
+
+`try_password'
+
+`try_master'
+ Instead, when `try_connect' is "pb", the command will pay
+ attention to `try_username', `try_password', and `try_master'.
+
+`try_wait'
+
+`masterstatus'
+ `try_wait' and `masterstatus' are used to ask the "try" command
+ to wait for the requested build to complete.
+
+
+
+File: buildbot.info, Node: Resources, Next: Developer's Appendix, Prev: Command-line tool, Up: Top
+
+9 Resources
+***********
+
+The Buildbot's home page is at `http://buildbot.sourceforge.net/'
+
+ For configuration questions and general discussion, please use the
+`buildbot-devel' mailing list. The subscription instructions and
+archives are available at
+`http://lists.sourceforge.net/lists/listinfo/buildbot-devel'
+
+
+File: buildbot.info, Node: Developer's Appendix, Next: Index of Useful Classes, Prev: Resources, Up: Top
+
+Developer's Appendix
+********************
+
+This appendix contains random notes about the implementation of the
+Buildbot, and is likely to only be of use to people intending to
+extend the Buildbot's internals.
+
+ The buildmaster consists of a tree of Service objects, which is
+shaped as follows:
+
+ BuildMaster
+ ChangeMaster (in .change_svc)
+ [IChangeSource instances]
+ [IScheduler instances] (in .schedulers)
+ BotMaster (in .botmaster)
+ [IBuildSlave instances]
+ [IStatusTarget instances] (in .statusTargets)
+
+ The BotMaster has a collection of Builder objects as values of its
+`.builders' dictionary.
+
+
+File: buildbot.info, Node: Index of Useful Classes, Next: Index of master.cfg keys, Prev: Developer's Appendix, Up: Top
+
+Index of Useful Classes
+***********************
+
+This is a list of all user-visible classes. There are the ones that
+are useful in `master.cfg', the buildmaster's configuration file.
+Classes that are not listed here are generally internal things that
+admins are unlikely to have much use for.
+
+Change Sources
+==============
+
+
+* Menu:
+
+* buildbot.changes.bonsaipoller.BonsaiPoller: BonsaiPoller. (line 6)
+* buildbot.changes.freshcvs.FreshCVSSource: CVSToys - PBService.
+ (line 6)
+* buildbot.changes.mail.BonsaiMaildirSource: BonsaiMaildirSource.
+ (line 6)
+* buildbot.changes.mail.FCMaildirSource: FCMaildirSource. (line 6)
+* buildbot.changes.mail.SVNCommitEmailMaildirSource: SVNCommitEmailMaildirSource.
+ (line 6)
+* buildbot.changes.mail.SyncmailMaildirSource: SyncmailMaildirSource.
+ (line 6)
+* buildbot.changes.p4poller.P4Source: P4Source. (line 6)
+* buildbot.changes.pb.PBChangeSource: PBChangeSource. (line 6)
+* buildbot.changes.svnpoller.SVNPoller: SVNPoller. (line 6)
+
+Schedulers and Locks
+====================
+
+
+* Menu:
+
+* buildbot.locks.LockAccess: Interlocks. (line 6)
+* buildbot.locks.MasterLock: Interlocks. (line 6)
+* buildbot.locks.SlaveLock: Interlocks. (line 6)
+* buildbot.scheduler.AnyBranchScheduler: AnyBranchScheduler. (line 6)
+* buildbot.scheduler.Dependent: Dependent Scheduler.
+ (line 6)
+* buildbot.scheduler.Nightly: Nightly Scheduler. (line 6)
+* buildbot.scheduler.Periodic: Periodic Scheduler. (line 6)
+* buildbot.scheduler.Scheduler: Scheduler Scheduler.
+ (line 6)
+* buildbot.scheduler.Triggerable: Triggerable Scheduler.
+ (line 6)
+* buildbot.scheduler.Try_Jobdir <1>: try. (line 32)
+* buildbot.scheduler.Try_Jobdir: Try Schedulers. (line 6)
+* buildbot.scheduler.Try_Userpass <1>: try. (line 32)
+* buildbot.scheduler.Try_Userpass: Try Schedulers. (line 6)
+
+Build Factories
+===============
+
+
+* Menu:
+
+* buildbot.process.factory.BasicBuildFactory: BuildFactory. (line 6)
+* buildbot.process.factory.BasicSVN: BuildFactory. (line 6)
+* buildbot.process.factory.BuildFactory: BuildFactory. (line 6)
+* buildbot.process.factory.CPAN: CPAN. (line 6)
+* buildbot.process.factory.Distutils: Python distutils. (line 6)
+* buildbot.process.factory.GNUAutoconf: GNUAutoconf. (line 6)
+* buildbot.process.factory.QuickBuildFactory: Quick builds. (line 6)
+* buildbot.process.factory.Trial: Python/Twisted/trial projects.
+ (line 6)
+
+Build Steps
+===========
+
+
+* Menu:
+
+* buildbot.steps.maxq.MaxQ: Index of Useful Classes.
+ (line 73)
+* buildbot.steps.python.BuildEPYDoc: BuildEPYDoc. (line 6)
+* buildbot.steps.python.PyFlakes: PyFlakes. (line 6)
+* buildbot.steps.python.PyLint: PyLint. (line 6)
+* buildbot.steps.python_twisted.BuildDebs: Python/Twisted/trial projects.
+ (line 6)
+* buildbot.steps.python_twisted.HLint: Python/Twisted/trial projects.
+ (line 6)
+* buildbot.steps.python_twisted.ProcessDocs: Python/Twisted/trial projects.
+ (line 6)
+* buildbot.steps.python_twisted.RemovePYCs: Python/Twisted/trial projects.
+ (line 6)
+* buildbot.steps.python_twisted.Trial: Python/Twisted/trial projects.
+ (line 6)
+* buildbot.steps.shell.Compile: Compile. (line 6)
+* buildbot.steps.shell.Configure: Configure. (line 6)
+* buildbot.steps.shell.PerlModuleTest: PerlModuleTest. (line 6)
+* buildbot.steps.shell.SetProperty: SetProperty. (line 6)
+* buildbot.steps.shell.ShellCommand: ShellCommand. (line 6)
+* buildbot.steps.shell.Test: Test. (line 6)
+* buildbot.steps.shell.TreeSize: TreeSize. (line 6)
+* buildbot.steps.source.Arch: Arch. (line 6)
+* buildbot.steps.source.Bazaar: Bazaar. (line 6)
+* buildbot.steps.source.Bzr: Bzr. (line 6)
+* buildbot.steps.source.CVS: CVS. (line 6)
+* buildbot.steps.source.Darcs: Darcs. (line 6)
+* buildbot.steps.source.Git <1>: Index of Useful Classes.
+ (line 73)
+* buildbot.steps.source.Git: Git. (line 6)
+* buildbot.steps.source.Mercurial: Mercurial. (line 6)
+* buildbot.steps.source.P4: P4. (line 6)
+* buildbot.steps.source.SVN: SVN. (line 6)
+* buildbot.steps.transfer.DirectoryUpload: Transferring Files.
+ (line 6)
+* buildbot.steps.transfer.FileDownload: Transferring Files. (line 6)
+* buildbot.steps.transfer.FileUpload: Transferring Files. (line 6)
+
+Status Targets
+==============
+
+
+* Menu:
+
+* buildbot.status.client.PBListener: PBListener. (line 6)
+* buildbot.status.html.Waterfall: HTML Waterfall. (line 6)
+* buildbot.status.mail.MailNotifier: MailNotifier. (line 6)
+* buildbot.status.web.baseweb.WebStatus: WebStatus. (line 6)
+* buildbot.status.words.IRC: IRC Bot. (line 6)
+
+
+File: buildbot.info, Node: Index of master.cfg keys, Next: Index, Prev: Index of Useful Classes, Up: Top
+
+Index of master.cfg keys
+************************
+
+This is a list of all of the significant keys in master.cfg . Recall
+that master.cfg is effectively a small python program with exactly one
+responsibility: create a dictionary named `BuildmasterConfig'. The
+keys of this dictionary are listed here. The beginning of the
+master.cfg file typically starts with something like:
+
+ BuildmasterConfig = c = {}
+
+ Therefore a config key of `change_source' will usually appear in
+master.cfg as `c['change_source']'.
+
+
+* Menu:
+
+* c['buildbotURL']: Defining the Project.
+ (line 24)
+* c['builders']: Defining Builders. (line 6)
+* c['change_source']: Change Sources and Schedulers.
+ (line 6)
+* c['debugPassword']: Debug options. (line 6)
+* c['logCompressionLimit']: Defining the Project.
+ (line 36)
+* c['manhole']: Debug options. (line 17)
+* c['mergeRequests']: Merging BuildRequests.
+ (line 6)
+* c['projectName']: Defining the Project.
+ (line 15)
+* c['projectURL']: Defining the Project.
+ (line 19)
+* c['properties']: Defining Global Properties.
+ (line 6)
+* c['schedulers']: Change Sources and Schedulers.
+ (line 13)
+* c['slavePortnum']: Setting the slaveport.
+ (line 6)
+* c['slaves']: Buildslave Specifiers.
+ (line 6)
+* c['sources']: Change Sources and Schedulers.
+ (line 6)
+* c['status']: Defining Status Targets.
+ (line 11)
+
+
+File: buildbot.info, Node: Index, Prev: Index of master.cfg keys, Up: Top
+
+Index
+*****
+
+
+* Menu:
+
+* addURL: BuildStep URLs. (line 6)
+* Arch Checkout: Arch. (line 6)
+* Bazaar Checkout: Bazaar. (line 6)
+* Builder: Builder. (line 6)
+* BuildRequest: BuildRequest. (line 6)
+* BuildSet: BuildSet. (line 6)
+* BuildStep URLs: BuildStep URLs. (line 6)
+* Bzr Checkout: Bzr. (line 6)
+* Configuration: Configuration. (line 6)
+* CVS Checkout: CVS. (line 6)
+* Darcs Checkout: Darcs. (line 6)
+* Dependencies: Dependent Scheduler.
+ (line 6)
+* Dependent: Dependent Scheduler.
+ (line 6)
+* email: MailNotifier. (line 6)
+* File Transfer: Transferring Files. (line 6)
+* Git Checkout: Git. (line 6)
+* installation: Installing the code.
+ (line 6)
+* introduction: Introduction. (line 6)
+* IRC: IRC Bot. (line 6)
+* links: BuildStep URLs. (line 6)
+* locks: Interlocks. (line 6)
+* logfiles: Logfiles. (line 6)
+* LogLineObserver: Adding LogObservers.
+ (line 6)
+* LogObserver: Adding LogObservers.
+ (line 6)
+* mail: MailNotifier. (line 6)
+* Mercurial Checkout: Mercurial. (line 6)
+* PBListener: PBListener. (line 6)
+* Perforce Update: P4. (line 6)
+* Philosophy of operation: History and Philosophy.
+ (line 6)
+* Properties <1>: Using Build Properties.
+ (line 6)
+* Properties <2>: Defining Global Properties.
+ (line 6)
+* Properties <3>: Buildslave Specifiers.
+ (line 33)
+* Properties <4>: Change Sources and Schedulers.
+ (line 41)
+* Properties: Build Properties. (line 6)
+* Scheduler: Schedulers. (line 6)
+* statusgui: statusgui. (line 6)
+* SVN Checkout: SVN. (line 6)
+* treeStableTimer: BuildFactory Attributes.
+ (line 8)
+* Triggers: Triggerable Scheduler.
+ (line 6)
+* Users: Users. (line 6)
+* Version Control: Version Control Systems.
+ (line 6)
+* Waterfall: HTML Waterfall. (line 6)
+* WebStatus: WebStatus. (line 6)
+* WithProperties: Using Build Properties.
+ (line 34)
+
+
diff --git a/buildbot/docs/buildbot.texinfo b/buildbot/docs/buildbot.texinfo
new file mode 100644
index 0000000..639103b
--- /dev/null
+++ b/buildbot/docs/buildbot.texinfo
@@ -0,0 +1,8807 @@
+\input texinfo @c -*-texinfo-*-
+@c %**start of header
+@setfilename buildbot.info
+@settitle BuildBot Manual 0.7.10
+@defcodeindex cs
+@defcodeindex sl
+@defcodeindex bf
+@defcodeindex bs
+@defcodeindex st
+@defcodeindex bc
+@c %**end of header
+
+@c these indices are for classes useful in a master.cfg config file
+@c @csindex : Change Sources
+@c @slindex : Schedulers and Locks
+@c @bfindex : Build Factories
+@c @bsindex : Build Steps
+@c @stindex : Status Targets
+
+@c @bcindex : keys that make up BuildmasterConfig
+
+@copying
+This is the BuildBot manual.
+
+Copyright (C) 2005,2006 Brian Warner
+
+Copying and distribution of this file, with or without
+modification, are permitted in any medium without royalty
+provided the copyright notice and this notice are preserved.
+
+@end copying
+
+@titlepage
+@title BuildBot
+@page
+@vskip 0pt plus 1filll
+@insertcopying
+@end titlepage
+
+@c Output the table of the contents at the beginning.
+@contents
+
+@ifnottex
+@node Top, Introduction, (dir), (dir)
+@top BuildBot
+
+@insertcopying
+@end ifnottex
+
+@menu
+* Introduction:: What the BuildBot does.
+* Installation:: Creating a buildmaster and buildslaves,
+ running them.
+* Concepts:: What goes on in the buildbot's little mind.
+* Configuration:: Controlling the buildbot.
+* Getting Source Code Changes:: Discovering when to run a build.
+* Build Process:: Controlling how each build is run.
+* Status Delivery:: Telling the world about the build's results.
+* Command-line tool::
+* Resources:: Getting help.
+* Developer's Appendix::
+* Index of Useful Classes::
+* Index of master.cfg keys::
+* Index:: Complete index.
+
+@detailmenu
+ --- The Detailed Node Listing ---
+
+Introduction
+
+* History and Philosophy::
+* System Architecture::
+* Control Flow::
+
+System Architecture
+
+* BuildSlave Connections::
+* Buildmaster Architecture::
+* Status Delivery Architecture::
+
+Installation
+
+* Requirements::
+* Installing the code::
+* Creating a buildmaster::
+* Upgrading an Existing Buildmaster::
+* Creating a buildslave::
+* Launching the daemons::
+* Logfiles::
+* Shutdown::
+* Maintenance::
+* Troubleshooting::
+
+Creating a buildslave
+
+* Buildslave Options::
+
+Troubleshooting
+
+* Starting the buildslave::
+* Connecting to the buildmaster::
+* Forcing Builds::
+
+Concepts
+
+* Version Control Systems::
+* Schedulers::
+* BuildSet::
+* BuildRequest::
+* Builder::
+* Users::
+* Build Properties::
+
+Version Control Systems
+
+* Generalizing VC Systems::
+* Source Tree Specifications::
+* How Different VC Systems Specify Sources::
+* Attributes of Changes::
+
+Users
+
+* Doing Things With Users::
+* Email Addresses::
+* IRC Nicknames::
+* Live Status Clients::
+
+Configuration
+
+* Config File Format::
+* Loading the Config File::
+* Testing the Config File::
+* Defining the Project::
+* Change Sources and Schedulers::
+* Setting the slaveport::
+* Buildslave Specifiers::
+* On-Demand ("Latent") Buildslaves::
+* Defining Global Properties::
+* Defining Builders::
+* Defining Status Targets::
+* Debug options::
+
+Change Sources and Schedulers
+
+* Scheduler Scheduler::
+* AnyBranchScheduler::
+* Dependent Scheduler::
+* Periodic Scheduler::
+* Nightly Scheduler::
+* Try Schedulers::
+* Triggerable Scheduler::
+
+Buildslave Specifiers
+* When Buildslaves Go Missing::
+
+On-Demand ("Latent") Buildslaves
+* Amazon Web Services Elastic Compute Cloud ("AWS EC2")::
+* Dangers with Latent Buildslaves::
+* Writing New Latent Buildslaves::
+
+Getting Source Code Changes
+
+* Change Sources::
+* Choosing ChangeSources::
+* CVSToys - PBService::
+* Mail-parsing ChangeSources::
+* PBChangeSource::
+* P4Source::
+* BonsaiPoller::
+* SVNPoller::
+* MercurialHook::
+* Bzr Hook::
+* Bzr Poller::
+
+Mail-parsing ChangeSources
+
+* Subscribing the Buildmaster::
+* Using Maildirs::
+* Parsing Email Change Messages::
+
+Parsing Email Change Messages
+
+* FCMaildirSource::
+* SyncmailMaildirSource::
+* BonsaiMaildirSource::
+* SVNCommitEmailMaildirSource::
+
+Build Process
+
+* Build Steps::
+* Interlocks::
+* Build Factories::
+
+Build Steps
+
+* Common Parameters::
+* Using Build Properties::
+* Source Checkout::
+* ShellCommand::
+* Simple ShellCommand Subclasses::
+* Python BuildSteps::
+* Transferring Files::
+* Steps That Run on the Master::
+* Triggering Schedulers::
+* Writing New BuildSteps::
+
+Source Checkout
+
+* CVS::
+* SVN::
+* Darcs::
+* Mercurial::
+* Arch::
+* Bazaar::
+* Bzr::
+* P4::
+* Git::
+
+Simple ShellCommand Subclasses
+
+* Configure::
+* Compile::
+* Test::
+* TreeSize::
+* PerlModuleTest::
+* SetProperty::
+
+Python BuildSteps
+
+* BuildEPYDoc::
+* PyFlakes::
+* PyLint::
+
+Writing New BuildSteps
+
+* BuildStep LogFiles::
+* Reading Logfiles::
+* Adding LogObservers::
+* BuildStep URLs::
+
+Build Factories
+
+* BuildStep Objects::
+* BuildFactory::
+* Process-Specific build factories::
+
+BuildStep Objects
+
+* BuildFactory Attributes::
+* Quick builds::
+
+BuildFactory
+
+* BuildFactory Attributes::
+* Quick builds::
+
+Process-Specific build factories
+
+* GNUAutoconf::
+* CPAN::
+* Python distutils::
+* Python/Twisted/trial projects::
+
+Status Delivery
+
+* WebStatus::
+* MailNotifier::
+* IRC Bot::
+* PBListener::
+* Writing New Status Plugins::
+
+WebStatus
+
+* WebStatus Configuration Parameters::
+* Buildbot Web Resources::
+* XMLRPC server::
+* HTML Waterfall::
+
+Command-line tool
+
+* Administrator Tools::
+* Developer Tools::
+* Other Tools::
+* .buildbot config directory::
+
+Developer Tools
+
+* statuslog::
+* statusgui::
+* try::
+
+waiting for results
+
+* try --diff::
+
+Other Tools
+
+* sendchange::
+* debugclient::
+
+@end detailmenu
+@end menu
+
+@node Introduction, Installation, Top, Top
+@chapter Introduction
+
+@cindex introduction
+
+The BuildBot is a system to automate the compile/test cycle required by most
+software projects to validate code changes. By automatically rebuilding and
+testing the tree each time something has changed, build problems are
+pinpointed quickly, before other developers are inconvenienced by the
+failure. The guilty developer can be identified and harassed without human
+intervention. By running the builds on a variety of platforms, developers
+who do not have the facilities to test their changes everywhere before
+checkin will at least know shortly afterwards whether they have broken the
+build or not. Warning counts, lint checks, image size, compile time, and
+other build parameters can be tracked over time, are more visible, and
+are therefore easier to improve.
+
+The overall goal is to reduce tree breakage and provide a platform to
+run tests or code-quality checks that are too annoying or pedantic for
+any human to waste their time with. Developers get immediate (and
+potentially public) feedback about their changes, encouraging them to
+be more careful about testing before checkin.
+
+Features:
+
+@itemize @bullet
+@item
+run builds on a variety of slave platforms
+@item
+arbitrary build process: handles projects using C, Python, whatever
+@item
+minimal host requirements: python and Twisted
+@item
+slaves can be behind a firewall if they can still do checkout
+@item
+status delivery through web page, email, IRC, other protocols
+@item
+track builds in progress, provide estimated completion time
+@item
+flexible configuration by subclassing generic build process classes
+@item
+debug tools to force a new build, submit fake Changes, query slave status
+@item
+released under the GPL
+@end itemize
+
+@menu
+* History and Philosophy::
+* System Architecture::
+* Control Flow::
+@end menu
+
+
+@node History and Philosophy, System Architecture, Introduction, Introduction
+@section History and Philosophy
+
+@cindex Philosophy of operation
+
+The Buildbot was inspired by a similar project built for a development
+team writing a cross-platform embedded system. The various components
+of the project were supposed to compile and run on several flavors of
+unix (linux, solaris, BSD), but individual developers had their own
+preferences and tended to stick to a single platform. From time to
+time, incompatibilities would sneak in (some unix platforms want to
+use @code{string.h}, some prefer @code{strings.h}), and then the tree
+would compile for some developers but not others. The buildbot was
+written to automate the human process of walking into the office,
+updating a tree, compiling (and discovering the breakage), finding the
+developer at fault, and complaining to them about the problem they had
+introduced. With multiple platforms it was difficult for developers to
+do the right thing (compile their potential change on all platforms);
+the buildbot offered a way to help.
+
+Another problem was when programmers would change the behavior of a
+library without warning its users, or change internal aspects that
+other code was (unfortunately) depending upon. Adding unit tests to
+the codebase helps here: if an application's unit tests pass despite
+changes in the libraries it uses, you can have more confidence that
+the library changes haven't broken anything. Many developers
+complained that the unit tests were inconvenient or took too long to
+run: having the buildbot run them reduces the developer's workload to
+a minimum.
+
+In general, having more visibility into the project is always good,
+and automation makes it easier for developers to do the right thing.
+When everyone can see the status of the project, developers are
+encouraged to keep the tree in good working order. Unit tests that
+aren't run on a regular basis tend to suffer from bitrot just like
+code does: exercising them on a regular basis helps to keep them
+functioning and useful.
+
+The current version of the Buildbot is additionally targeted at
+distributed free-software projects, where resources and platforms are
+only available when provided by interested volunteers. The buildslaves
+are designed to require an absolute minimum of configuration, reducing
+the effort a potential volunteer needs to expend to be able to
+contribute a new test environment to the project. The goal is for
+anyone who wishes that a given project would run on their favorite
+platform should be able to offer that project a buildslave, running on
+that platform, where they can verify that their portability code
+works, and keeps working.
+
+@node System Architecture, Control Flow, History and Philosophy, Introduction
+@comment node-name, next, previous, up
+@section System Architecture
+
+The Buildbot consists of a single @code{buildmaster} and one or more
+@code{buildslaves}, connected in a star topology. The buildmaster
+makes all decisions about what, when, and how to build. It sends
+commands to be run on the build slaves, which simply execute the
+commands and return the results. (certain steps involve more local
+decision making, where the overhead of sending a lot of commands back
+and forth would be inappropriate, but in general the buildmaster is
+responsible for everything).
+
+The buildmaster is usually fed @code{Changes} by some sort of version
+control system (@pxref{Change Sources}), which may cause builds to be
+run. As the builds are performed, various status messages are
+produced, which are then sent to any registered Status Targets
+(@pxref{Status Delivery}).
+
+@c @image{FILENAME, WIDTH, HEIGHT, ALTTEXT, EXTENSION}
+@image{images/overview,,,Overview Diagram,}
+
+The buildmaster is configured and maintained by the ``buildmaster
+admin'', who is generally the project team member responsible for
+build process issues. Each buildslave is maintained by a ``buildslave
+admin'', who do not need to be quite as involved. Generally slaves are
+run by anyone who has an interest in seeing the project work well on
+their favorite platform.
+
+@menu
+* BuildSlave Connections::
+* Buildmaster Architecture::
+* Status Delivery Architecture::
+@end menu
+
+@node BuildSlave Connections, Buildmaster Architecture, System Architecture, System Architecture
+@subsection BuildSlave Connections
+
+The buildslaves are typically run on a variety of separate machines,
+at least one per platform of interest. These machines connect to the
+buildmaster over a TCP connection to a publically-visible port. As a
+result, the buildslaves can live behind a NAT box or similar
+firewalls, as long as they can get to buildmaster. The TCP connections
+are initiated by the buildslave and accepted by the buildmaster, but
+commands and results travel both ways within this connection. The
+buildmaster is always in charge, so all commands travel exclusively
+from the buildmaster to the buildslave.
+
+To perform builds, the buildslaves must typically obtain source code
+from a CVS/SVN/etc repository. Therefore they must also be able to
+reach the repository. The buildmaster provides instructions for
+performing builds, but does not provide the source code itself.
+
+@image{images/slaves,,,BuildSlave Connections,}
+
+@node Buildmaster Architecture, Status Delivery Architecture, BuildSlave Connections, System Architecture
+@subsection Buildmaster Architecture
+
+The Buildmaster consists of several pieces:
+
+@image{images/master,,,BuildMaster Architecture,}
+
+@itemize @bullet
+
+@item
+Change Sources, which create a Change object each time something is
+modified in the VC repository. Most ChangeSources listen for messages
+from a hook script of some sort. Some sources actively poll the
+repository on a regular basis. All Changes are fed to the Schedulers.
+
+@item
+Schedulers, which decide when builds should be performed. They collect
+Changes into BuildRequests, which are then queued for delivery to
+Builders until a buildslave is available.
+
+@item
+Builders, which control exactly @emph{how} each build is performed
+(with a series of BuildSteps, configured in a BuildFactory). Each
+Build is run on a single buildslave.
+
+@item
+Status plugins, which deliver information about the build results
+through protocols like HTTP, mail, and IRC.
+
+@end itemize
+
+@image{images/slavebuilder,,,SlaveBuilders,}
+
+Each Builder is configured with a list of BuildSlaves that it will use
+for its builds. These buildslaves are expected to behave identically:
+the only reason to use multiple BuildSlaves for a single Builder is to
+provide a measure of load-balancing.
+
+Within a single BuildSlave, each Builder creates its own SlaveBuilder
+instance. These SlaveBuilders operate independently from each other.
+Each gets its own base directory to work in. It is quite common to
+have many Builders sharing the same buildslave. For example, there
+might be two buildslaves: one for i386, and a second for PowerPC.
+There may then be a pair of Builders that do a full compile/test run,
+one for each architecture, and a lone Builder that creates snapshot
+source tarballs if the full builders complete successfully. The full
+builders would each run on a single buildslave, whereas the tarball
+creation step might run on either buildslave (since the platform
+doesn't matter when creating source tarballs). In this case, the
+mapping would look like:
+
+@example
+Builder(full-i386) -> BuildSlaves(slave-i386)
+Builder(full-ppc) -> BuildSlaves(slave-ppc)
+Builder(source-tarball) -> BuildSlaves(slave-i386, slave-ppc)
+@end example
+
+and each BuildSlave would have two SlaveBuilders inside it, one for a
+full builder, and a second for the source-tarball builder.
+
+Once a SlaveBuilder is available, the Builder pulls one or more
+BuildRequests off its incoming queue. (It may pull more than one if it
+determines that it can merge the requests together; for example, there
+may be multiple requests to build the current HEAD revision). These
+requests are merged into a single Build instance, which includes the
+SourceStamp that describes what exact version of the source code
+should be used for the build. The Build is then randomly assigned to a
+free SlaveBuilder and the build begins.
+
+The behaviour when BuildRequests are merged can be customized, @pxref{Merging
+BuildRequests}.
+
+@node Status Delivery Architecture, , Buildmaster Architecture, System Architecture
+@subsection Status Delivery Architecture
+
+The buildmaster maintains a central Status object, to which various
+status plugins are connected. Through this Status object, a full
+hierarchy of build status objects can be obtained.
+
+@image{images/status,,,Status Delivery,}
+
+The configuration file controls which status plugins are active. Each
+status plugin gets a reference to the top-level Status object. From
+there they can request information on each Builder, Build, Step, and
+LogFile. This query-on-demand interface is used by the html.Waterfall
+plugin to create the main status page each time a web browser hits the
+main URL.
+
+The status plugins can also subscribe to hear about new Builds as they
+occur: this is used by the MailNotifier to create new email messages
+for each recently-completed Build.
+
+The Status object records the status of old builds on disk in the
+buildmaster's base directory. This allows it to return information
+about historical builds.
+
+There are also status objects that correspond to Schedulers and
+BuildSlaves. These allow status plugins to report information about
+upcoming builds, and the online/offline status of each buildslave.
+
+
+@node Control Flow, , System Architecture, Introduction
+@comment node-name, next, previous, up
+@section Control Flow
+
+A day in the life of the buildbot:
+
+@itemize @bullet
+
+@item
+A developer commits some source code changes to the repository. A hook
+script or commit trigger of some sort sends information about this
+change to the buildmaster through one of its configured Change
+Sources. This notification might arrive via email, or over a network
+connection (either initiated by the buildmaster as it ``subscribes''
+to changes, or by the commit trigger as it pushes Changes towards the
+buildmaster). The Change contains information about who made the
+change, what files were modified, which revision contains the change,
+and any checkin comments.
+
+@item
+The buildmaster distributes this change to all of its configured
+Schedulers. Any ``important'' changes cause the ``tree-stable-timer''
+to be started, and the Change is added to a list of those that will go
+into a new Build. When the timer expires, a Build is started on each
+of a set of configured Builders, all compiling/testing the same source
+code. Unless configured otherwise, all Builds run in parallel on the
+various buildslaves.
+
+@item
+The Build consists of a series of Steps. Each Step causes some number
+of commands to be invoked on the remote buildslave associated with
+that Builder. The first step is almost always to perform a checkout of
+the appropriate revision from the same VC system that produced the
+Change. The rest generally perform a compile and run unit tests. As
+each Step runs, the buildslave reports back command output and return
+status to the buildmaster.
+
+@item
+As the Build runs, status messages like ``Build Started'', ``Step
+Started'', ``Build Finished'', etc, are published to a collection of
+Status Targets. One of these targets is usually the HTML ``Waterfall''
+display, which shows a chronological list of events, and summarizes
+the results of the most recent build at the top of each column.
+Developers can periodically check this page to see how their changes
+have fared. If they see red, they know that they've made a mistake and
+need to fix it. If they see green, they know that they've done their
+duty and don't need to worry about their change breaking anything.
+
+@item
+If a MailNotifier status target is active, the completion of a build
+will cause email to be sent to any developers whose Changes were
+incorporated into this Build. The MailNotifier can be configured to
+only send mail upon failing builds, or for builds which have just
+transitioned from passing to failing. Other status targets can provide
+similar real-time notification via different communication channels,
+like IRC.
+
+@end itemize
+
+
+@node Installation, Concepts, Introduction, Top
+@chapter Installation
+
+@menu
+* Requirements::
+* Installing the code::
+* Creating a buildmaster::
+* Upgrading an Existing Buildmaster::
+* Creating a buildslave::
+* Launching the daemons::
+* Logfiles::
+* Shutdown::
+* Maintenance::
+* Troubleshooting::
+@end menu
+
+@node Requirements, Installing the code, Installation, Installation
+@section Requirements
+
+At a bare minimum, you'll need the following (for both the buildmaster
+and a buildslave):
+
+@itemize @bullet
+@item
+Python: http://www.python.org
+
+Buildbot requires python-2.3 or later, and is primarily developed
+against python-2.4. It is also tested against python-2.5 .
+
+@item
+Twisted: http://twistedmatrix.com
+
+Both the buildmaster and the buildslaves require Twisted-2.0.x or
+later. It has been tested against all releases of Twisted up to
+Twisted-2.5.0 (the most recent as of this writing). As always, the
+most recent version is recommended.
+
+Twisted is delivered as a collection of subpackages. You'll need at
+least "Twisted" (the core package), and you'll also want TwistedMail,
+TwistedWeb, and TwistedWords (for sending email, serving a web status
+page, and delivering build status via IRC, respectively). You might
+also want TwistedConch (for the encrypted Manhole debug port). Note
+that Twisted requires ZopeInterface to be installed as well.
+
+@end itemize
+
+Certain other packages may be useful on the system running the
+buildmaster:
+
+@itemize @bullet
+@item
+CVSToys: http://purl.net/net/CVSToys
+
+If your buildmaster uses FreshCVSSource to receive change notification
+from a cvstoys daemon, it will require CVSToys be installed (tested
+with CVSToys-1.0.10). If the it doesn't use that source (i.e. if you
+only use a mail-parsing change source, or the SVN notification
+script), you will not need CVSToys.
+
+@end itemize
+
+And of course, your project's build process will impose additional
+requirements on the buildslaves. These hosts must have all the tools
+necessary to compile and test your project's source code.
+
+
+@node Installing the code, Creating a buildmaster, Requirements, Installation
+@section Installing the code
+
+@cindex installation
+
+The Buildbot is installed using the standard python @code{distutils}
+module. After unpacking the tarball, the process is:
+
+@example
+python setup.py build
+python setup.py install
+@end example
+
+where the install step may need to be done as root. This will put the
+bulk of the code in somewhere like
+/usr/lib/python2.3/site-packages/buildbot . It will also install the
+@code{buildbot} command-line tool in /usr/bin/buildbot.
+
+To test this, shift to a different directory (like /tmp), and run:
+
+@example
+buildbot --version
+@end example
+
+If it shows you the versions of Buildbot and Twisted, the install went
+ok. If it says @code{no such command} or it gets an @code{ImportError}
+when it tries to load the libaries, then something went wrong.
+@code{pydoc buildbot} is another useful diagnostic tool.
+
+Windows users will find these files in other places. You will need to
+make sure that python can find the libraries, and will probably find
+it convenient to have @code{buildbot} on your PATH.
+
+If you wish, you can run the buildbot unit test suite like this:
+
+@example
+PYTHONPATH=. trial buildbot.test
+@end example
+
+This should run up to 192 tests, depending upon what VC tools you have
+installed. On my desktop machine it takes about five minutes to
+complete. Nothing should fail, a few might be skipped. If any of the
+tests fail, you should stop and investigate the cause before
+continuing the installation process, as it will probably be easier to
+track down the bug early.
+
+If you cannot or do not wish to install the buildbot into a site-wide
+location like @file{/usr} or @file{/usr/local}, you can also install
+it into the account's home directory. Do the install command like
+this:
+
+@example
+python setup.py install --home=~
+@end example
+
+That will populate @file{~/lib/python} and create
+@file{~/bin/buildbot}. Make sure this lib directory is on your
+@code{PYTHONPATH}.
+
+
+@node Creating a buildmaster, Upgrading an Existing Buildmaster, Installing the code, Installation
+@section Creating a buildmaster
+
+As you learned earlier (@pxref{System Architecture}), the buildmaster
+runs on a central host (usually one that is publically visible, so
+everybody can check on the status of the project), and controls all
+aspects of the buildbot system. Let us call this host
+@code{buildbot.example.org}.
+
+You may wish to create a separate user account for the buildmaster,
+perhaps named @code{buildmaster}. This can help keep your personal
+configuration distinct from that of the buildmaster and is useful if
+you have to use a mail-based notification system (@pxref{Change
+Sources}). However, the Buildbot will work just fine with your regular
+user account.
+
+You need to choose a directory for the buildmaster, called the
+@code{basedir}. This directory will be owned by the buildmaster, which
+will use configuration files therein, and create status files as it
+runs. @file{~/Buildbot} is a likely value. If you run multiple
+buildmasters in the same account, or if you run both masters and
+slaves, you may want a more distinctive name like
+@file{~/Buildbot/master/gnomovision} or
+@file{~/Buildmasters/fooproject}. If you are using a separate user
+account, this might just be @file{~buildmaster/masters/fooproject}.
+
+Once you've picked a directory, use the @command{buildbot
+create-master} command to create the directory and populate it with
+startup files:
+
+@example
+buildbot create-master @var{basedir}
+@end example
+
+You will need to create a configuration file (@pxref{Configuration})
+before starting the buildmaster. Most of the rest of this manual is
+dedicated to explaining how to do this. A sample configuration file is
+placed in the working directory, named @file{master.cfg.sample}, which
+can be copied to @file{master.cfg} and edited to suit your purposes.
+
+(Internal details: This command creates a file named
+@file{buildbot.tac} that contains all the state necessary to create
+the buildmaster. Twisted has a tool called @code{twistd} which can use
+this .tac file to create and launch a buildmaster instance. twistd
+takes care of logging and daemonization (running the program in the
+background). @file{/usr/bin/buildbot} is a front end which runs twistd
+for you.)
+
+In addition to @file{buildbot.tac}, a small @file{Makefile.sample} is
+installed. This can be used as the basis for customized daemon startup,
+@xref{Launching the daemons}.
+
+@node Upgrading an Existing Buildmaster, Creating a buildslave, Creating a buildmaster, Installation
+@section Upgrading an Existing Buildmaster
+
+If you have just installed a new version of the Buildbot code, and you
+have buildmasters that were created using an older version, you'll
+need to upgrade these buildmasters before you can use them. The
+upgrade process adds and modifies files in the buildmaster's base
+directory to make it compatible with the new code.
+
+@example
+buildbot upgrade-master @var{basedir}
+@end example
+
+This command will also scan your @file{master.cfg} file for
+incompatbilities (by loading it and printing any errors or deprecation
+warnings that occur). Each buildbot release tries to be compatible
+with configurations that worked cleanly (i.e. without deprecation
+warnings) on the previous release: any functions or classes that are
+to be removed will first be deprecated in a release, to give users a
+chance to start using their replacement.
+
+The 0.7.6 release introduced the @file{public_html/} directory, which
+contains @file{index.html} and other files served by the
+@code{WebStatus} and @code{Waterfall} status displays. The
+@code{upgrade-master} command will create these files if they do not
+already exist. It will not modify existing copies, but it will write a
+new copy in e.g. @file{index.html.new} if the new version differs from
+the version that already exists.
+
+The @code{upgrade-master} command is idempotent. It is safe to run it
+multiple times. After each upgrade of the buildbot code, you should
+use @code{upgrade-master} on all your buildmasters.
+
+
+@node Creating a buildslave, Launching the daemons, Upgrading an Existing Buildmaster, Installation
+@section Creating a buildslave
+
+Typically, you will be adding a buildslave to an existing buildmaster,
+to provide additional architecture coverage. The buildbot
+administrator will give you several pieces of information necessary to
+connect to the buildmaster. You should also be somewhat familiar with
+the project being tested, so you can troubleshoot build problems
+locally.
+
+The buildbot exists to make sure that the project's stated ``how to
+build it'' process actually works. To this end, the buildslave should
+run in an environment just like that of your regular developers.
+Typically the project build process is documented somewhere
+(@file{README}, @file{INSTALL}, etc), in a document that should
+mention all library dependencies and contain a basic set of build
+instructions. This document will be useful as you configure the host
+and account in which the buildslave runs.
+
+Here's a good checklist for setting up a buildslave:
+
+@enumerate
+@item
+Set up the account
+
+It is recommended (although not mandatory) to set up a separate user
+account for the buildslave. This account is frequently named
+@code{buildbot} or @code{buildslave}. This serves to isolate your
+personal working environment from that of the slave's, and helps to
+minimize the security threat posed by letting possibly-unknown
+contributors run arbitrary code on your system. The account should
+have a minimum of fancy init scripts.
+
+@item
+Install the buildbot code
+
+Follow the instructions given earlier (@pxref{Installing the code}).
+If you use a separate buildslave account, and you didn't install the
+buildbot code to a shared location, then you will need to install it
+with @code{--home=~} for each account that needs it.
+
+@item
+Set up the host
+
+Make sure the host can actually reach the buildmaster. Usually the
+buildmaster is running a status webserver on the same machine, so
+simply point your web browser at it and see if you can get there.
+Install whatever additional packages or libraries the project's
+INSTALL document advises. (or not: if your buildslave is supposed to
+make sure that building without optional libraries still works, then
+don't install those libraries).
+
+Again, these libraries don't necessarily have to be installed to a
+site-wide shared location, but they must be available to your build
+process. Accomplishing this is usually very specific to the build
+process, so installing them to @file{/usr} or @file{/usr/local} is
+usually the best approach.
+
+@item
+Test the build process
+
+Follow the instructions in the INSTALL document, in the buildslave's
+account. Perform a full CVS (or whatever) checkout, configure, make,
+run tests, etc. Confirm that the build works without manual fussing.
+If it doesn't work when you do it by hand, it will be unlikely to work
+when the buildbot attempts to do it in an automated fashion.
+
+@item
+Choose a base directory
+
+This should be somewhere in the buildslave's account, typically named
+after the project which is being tested. The buildslave will not touch
+any file outside of this directory. Something like @file{~/Buildbot}
+or @file{~/Buildslaves/fooproject} is appropriate.
+
+@item
+Get the buildmaster host/port, botname, and password
+
+When the buildbot admin configures the buildmaster to accept and use
+your buildslave, they will provide you with the following pieces of
+information:
+
+@itemize @bullet
+@item
+your buildslave's name
+@item
+the password assigned to your buildslave
+@item
+the hostname and port number of the buildmaster, i.e. buildbot.example.org:8007
+@end itemize
+
+@item
+Create the buildslave
+
+Now run the 'buildbot' command as follows:
+
+@example
+buildbot create-slave @var{BASEDIR} @var{MASTERHOST}:@var{PORT} @var{SLAVENAME} @var{PASSWORD}
+@end example
+
+This will create the base directory and a collection of files inside,
+including the @file{buildbot.tac} file that contains all the
+information you passed to the @code{buildbot} command.
+
+@item
+Fill in the hostinfo files
+
+When it first connects, the buildslave will send a few files up to the
+buildmaster which describe the host that it is running on. These files
+are presented on the web status display so that developers have more
+information to reproduce any test failures that are witnessed by the
+buildbot. There are sample files in the @file{info} subdirectory of
+the buildbot's base directory. You should edit these to correctly
+describe you and your host.
+
+@file{BASEDIR/info/admin} should contain your name and email address.
+This is the ``buildslave admin address'', and will be visible from the
+build status page (so you may wish to munge it a bit if
+address-harvesting spambots are a concern).
+
+@file{BASEDIR/info/host} should be filled with a brief description of
+the host: OS, version, memory size, CPU speed, versions of relevant
+libraries installed, and finally the version of the buildbot code
+which is running the buildslave.
+
+If you run many buildslaves, you may want to create a single
+@file{~buildslave/info} file and share it among all the buildslaves
+with symlinks.
+
+@end enumerate
+
+@menu
+* Buildslave Options::
+@end menu
+
+@node Buildslave Options, , Creating a buildslave, Creating a buildslave
+@subsection Buildslave Options
+
+There are a handful of options you might want to use when creating the
+buildslave with the @command{buildbot create-slave <options> DIR <params>}
+command. You can type @command{buildbot create-slave --help} for a summary.
+To use these, just include them on the @command{buildbot create-slave}
+command line, like this:
+
+@example
+buildbot create-slave --umask=022 ~/buildslave buildmaster.example.org:42012 myslavename mypasswd
+@end example
+
+@table @code
+@item --usepty
+This is a boolean flag that tells the buildslave whether to launch child
+processes in a PTY or with regular pipes (the default) when the master does not
+specify. This option is deprecated, as this particular parameter is better
+specified on the master.
+
+@item --umask
+This is a string (generally an octal representation of an integer)
+which will cause the buildslave process' ``umask'' value to be set
+shortly after initialization. The ``twistd'' daemonization utility
+forces the umask to 077 at startup (which means that all files created
+by the buildslave or its child processes will be unreadable by any
+user other than the buildslave account). If you want build products to
+be readable by other accounts, you can add @code{--umask=022} to tell
+the buildslave to fix the umask after twistd clobbers it. If you want
+build products to be @emph{writable} by other accounts too, use
+@code{--umask=000}, but this is likely to be a security problem.
+
+@item --keepalive
+This is a number that indicates how frequently ``keepalive'' messages
+should be sent from the buildslave to the buildmaster, expressed in
+seconds. The default (600) causes a message to be sent to the
+buildmaster at least once every 10 minutes. To set this to a lower
+value, use e.g. @code{--keepalive=120}.
+
+If the buildslave is behind a NAT box or stateful firewall, these
+messages may help to keep the connection alive: some NAT boxes tend to
+forget about a connection if it has not been used in a while. When
+this happens, the buildmaster will think that the buildslave has
+disappeared, and builds will time out. Meanwhile the buildslave will
+not realize than anything is wrong.
+
+@item --maxdelay
+This is a number that indicates the maximum amount of time the
+buildslave will wait between connection attempts, expressed in
+seconds. The default (300) causes the buildslave to wait at most 5
+minutes before trying to connect to the buildmaster again.
+
+@item --log-size
+This is the size in bytes when to rotate the Twisted log files.
+
+@item --log-count
+This is the number of log rotations to keep around. You can either
+specify a number or @code{None} (the default) to keep all
+@file{twistd.log} files around.
+
+@end table
+
+
+@node Launching the daemons, Logfiles, Creating a buildslave, Installation
+@section Launching the daemons
+
+Both the buildmaster and the buildslave run as daemon programs. To
+launch them, pass the working directory to the @code{buildbot}
+command:
+
+@example
+buildbot start @var{BASEDIR}
+@end example
+
+This command will start the daemon and then return, so normally it
+will not produce any output. To verify that the programs are indeed
+running, look for a pair of files named @file{twistd.log} and
+@file{twistd.pid} that should be created in the working directory.
+@file{twistd.pid} contains the process ID of the newly-spawned daemon.
+
+When the buildslave connects to the buildmaster, new directories will
+start appearing in its base directory. The buildmaster tells the slave
+to create a directory for each Builder which will be using that slave.
+All build operations are performed within these directories: CVS
+checkouts, compiles, and tests.
+
+Once you get everything running, you will want to arrange for the
+buildbot daemons to be started at boot time. One way is to use
+@code{cron}, by putting them in a @@reboot crontab entry@footnote{this
+@@reboot syntax is understood by Vixie cron, which is the flavor
+usually provided with linux systems. Other unices may have a cron that
+doesn't understand @@reboot}:
+
+@example
+@@reboot buildbot start @var{BASEDIR}
+@end example
+
+When you run @command{crontab} to set this up, remember to do it as
+the buildmaster or buildslave account! If you add this to your crontab
+when running as your regular account (or worse yet, root), then the
+daemon will run as the wrong user, quite possibly as one with more
+authority than you intended to provide.
+
+It is important to remember that the environment provided to cron jobs
+and init scripts can be quite different that your normal runtime.
+There may be fewer environment variables specified, and the PATH may
+be shorter than usual. It is a good idea to test out this method of
+launching the buildslave by using a cron job with a time in the near
+future, with the same command, and then check @file{twistd.log} to
+make sure the slave actually started correctly. Common problems here
+are for @file{/usr/local} or @file{~/bin} to not be on your
+@code{PATH}, or for @code{PYTHONPATH} to not be set correctly.
+Sometimes @code{HOME} is messed up too.
+
+To modify the way the daemons are started (perhaps you want to set
+some environment variables first, or perform some cleanup each time),
+you can create a file named @file{Makefile.buildbot} in the base
+directory. When the @file{buildbot} front-end tool is told to
+@command{start} the daemon, and it sees this file (and
+@file{/usr/bin/make} exists), it will do @command{make -f
+Makefile.buildbot start} instead of its usual action (which involves
+running @command{twistd}). When the buildmaster or buildslave is
+installed, a @file{Makefile.sample} is created which implements the
+same behavior as the the @file{buildbot} tool uses, so if you want to
+customize the process, just copy @file{Makefile.sample} to
+@file{Makefile.buildbot} and edit it as necessary.
+
+Some distributions may include conveniences to make starting buildbot
+at boot time easy. For instance, with the default buildbot package in
+Debian-based distributions, you may only need to modify
+@code{/etc/default/buildbot} (see also @code{/etc/init.d/buildbot}, which
+reads the configuration in @code{/etc/default/buildbot}).
+
+@node Logfiles, Shutdown, Launching the daemons, Installation
+@section Logfiles
+
+@cindex logfiles
+
+While a buildbot daemon runs, it emits text to a logfile, named
+@file{twistd.log}. A command like @code{tail -f twistd.log} is useful
+to watch the command output as it runs.
+
+The buildmaster will announce any errors with its configuration file
+in the logfile, so it is a good idea to look at the log at startup
+time to check for any problems. Most buildmaster activities will cause
+lines to be added to the log.
+
+@node Shutdown, Maintenance, Logfiles, Installation
+@section Shutdown
+
+To stop a buildmaster or buildslave manually, use:
+
+@example
+buildbot stop @var{BASEDIR}
+@end example
+
+This simply looks for the @file{twistd.pid} file and kills whatever
+process is identified within.
+
+At system shutdown, all processes are sent a @code{SIGKILL}. The
+buildmaster and buildslave will respond to this by shutting down
+normally.
+
+The buildmaster will respond to a @code{SIGHUP} by re-reading its
+config file. Of course, this only works on unix-like systems with
+signal support, and won't work on Windows. The following shortcut is
+available:
+
+@example
+buildbot reconfig @var{BASEDIR}
+@end example
+
+When you update the Buildbot code to a new release, you will need to
+restart the buildmaster and/or buildslave before it can take advantage
+of the new code. You can do a @code{buildbot stop @var{BASEDIR}} and
+@code{buildbot start @var{BASEDIR}} in quick succession, or you can
+use the @code{restart} shortcut, which does both steps for you:
+
+@example
+buildbot restart @var{BASEDIR}
+@end example
+
+There are certain configuration changes that are not handled cleanly
+by @code{buildbot reconfig}. If this occurs, @code{buildbot restart}
+is a more robust tool to fully switch over to the new configuration.
+
+@code{buildbot restart} may also be used to start a stopped Buildbot
+instance. This behaviour is useful when writing scripts that stop, start
+and restart Buildbot.
+
+A buildslave may also be gracefully shutdown from the
+@pxref{WebStatus} status plugin. This is useful to shutdown a
+buildslave without interrupting any current builds. The buildmaster
+will wait until the buildslave is finished all its current builds, and
+will then tell the buildslave to shutdown.
+
+@node Maintenance, Troubleshooting, Shutdown, Installation
+@section Maintenance
+
+It is a good idea to check the buildmaster's status page every once in
+a while, to see if your buildslave is still online. Eventually the
+buildbot will probably be enhanced to send you email (via the
+@file{info/admin} email address) when the slave has been offline for
+more than a few hours.
+
+If you find you can no longer provide a buildslave to the project, please
+let the project admins know, so they can put out a call for a
+replacement.
+
+The Buildbot records status and logs output continually, each time a
+build is performed. The status tends to be small, but the build logs
+can become quite large. Each build and log are recorded in a separate
+file, arranged hierarchically under the buildmaster's base directory.
+To prevent these files from growing without bound, you should
+periodically delete old build logs. A simple cron job to delete
+anything older than, say, two weeks should do the job. The only trick
+is to leave the @file{buildbot.tac} and other support files alone, for
+which find's @code{-mindepth} argument helps skip everything in the
+top directory. You can use something like the following:
+
+@example
+@@weekly cd BASEDIR && find . -mindepth 2 i-path './public_html/*' -prune -o -type f -mtime +14 -exec rm @{@} \;
+@@weekly cd BASEDIR && find twistd.log* -mtime +14 -exec rm @{@} \;
+@end example
+
+@node Troubleshooting, , Maintenance, Installation
+@section Troubleshooting
+
+Here are a few hints on diagnosing common problems.
+
+@menu
+* Starting the buildslave::
+* Connecting to the buildmaster::
+* Forcing Builds::
+@end menu
+
+@node Starting the buildslave, Connecting to the buildmaster, Troubleshooting, Troubleshooting
+@subsection Starting the buildslave
+
+Cron jobs are typically run with a minimal shell (@file{/bin/sh}, not
+@file{/bin/bash}), and tilde expansion is not always performed in such
+commands. You may want to use explicit paths, because the @code{PATH}
+is usually quite short and doesn't include anything set by your
+shell's startup scripts (@file{.profile}, @file{.bashrc}, etc). If
+you've installed buildbot (or other python libraries) to an unusual
+location, you may need to add a @code{PYTHONPATH} specification (note
+that python will do tilde-expansion on @code{PYTHONPATH} elements by
+itself). Sometimes it is safer to fully-specify everything:
+
+@example
+@@reboot PYTHONPATH=~/lib/python /usr/local/bin/buildbot start /usr/home/buildbot/basedir
+@end example
+
+Take the time to get the @@reboot job set up. Otherwise, things will work
+fine for a while, but the first power outage or system reboot you have will
+stop the buildslave with nothing but the cries of sorrowful developers to
+remind you that it has gone away.
+
+@node Connecting to the buildmaster, Forcing Builds, Starting the buildslave, Troubleshooting
+@subsection Connecting to the buildmaster
+
+If the buildslave cannot connect to the buildmaster, the reason should
+be described in the @file{twistd.log} logfile. Some common problems
+are an incorrect master hostname or port number, or a mistyped bot
+name or password. If the buildslave loses the connection to the
+master, it is supposed to attempt to reconnect with an
+exponentially-increasing backoff. Each attempt (and the time of the
+next attempt) will be logged. If you get impatient, just manually stop
+and re-start the buildslave.
+
+When the buildmaster is restarted, all slaves will be disconnected,
+and will attempt to reconnect as usual. The reconnect time will depend
+upon how long the buildmaster is offline (i.e. how far up the
+exponential backoff curve the slaves have travelled). Again,
+@code{buildbot stop @var{BASEDIR}; buildbot start @var{BASEDIR}} will
+speed up the process.
+
+@node Forcing Builds, , Connecting to the buildmaster, Troubleshooting
+@subsection Forcing Builds
+
+From the buildmaster's main status web page, you can force a build to
+be run on your build slave. Figure out which column is for a builder
+that runs on your slave, click on that builder's name, and the page
+that comes up will have a ``Force Build'' button. Fill in the form,
+hit the button, and a moment later you should see your slave's
+@file{twistd.log} filling with commands being run. Using @code{pstree}
+or @code{top} should also reveal the cvs/make/gcc/etc processes being
+run by the buildslave. Note that the same web page should also show
+the @file{admin} and @file{host} information files that you configured
+earlier.
+
+@node Concepts, Configuration, Installation, Top
+@chapter Concepts
+
+This chapter defines some of the basic concepts that the Buildbot
+uses. You'll need to understand how the Buildbot sees the world to
+configure it properly.
+
+@menu
+* Version Control Systems::
+* Schedulers::
+* BuildSet::
+* BuildRequest::
+* Builder::
+* Users::
+* Build Properties::
+@end menu
+
+@node Version Control Systems, Schedulers, Concepts, Concepts
+@section Version Control Systems
+
+@cindex Version Control
+
+These source trees come from a Version Control System of some kind.
+CVS and Subversion are two popular ones, but the Buildbot supports
+others. All VC systems have some notion of an upstream
+@code{repository} which acts as a server@footnote{except Darcs, but
+since the Buildbot never modifies its local source tree we can ignore
+the fact that Darcs uses a less centralized model}, from which clients
+can obtain source trees according to various parameters. The VC
+repository provides source trees of various projects, for different
+branches, and from various points in time. The first thing we have to
+do is to specify which source tree we want to get.
+
+@menu
+* Generalizing VC Systems::
+* Source Tree Specifications::
+* How Different VC Systems Specify Sources::
+* Attributes of Changes::
+@end menu
+
+@node Generalizing VC Systems, Source Tree Specifications, Version Control Systems, Version Control Systems
+@subsection Generalizing VC Systems
+
+For the purposes of the Buildbot, we will try to generalize all VC
+systems as having repositories that each provide sources for a variety
+of projects. Each project is defined as a directory tree with source
+files. The individual files may each have revisions, but we ignore
+that and treat the project as a whole as having a set of revisions
+(CVS is really the only VC system still in widespread use that has
+per-file revisions.. everything modern has moved to atomic tree-wide
+changesets). Each time someone commits a change to the project, a new
+revision becomes available. These revisions can be described by a
+tuple with two items: the first is a branch tag, and the second is
+some kind of revision stamp or timestamp. Complex projects may have
+multiple branch tags, but there is always a default branch. The
+timestamp may be an actual timestamp (such as the -D option to CVS),
+or it may be a monotonically-increasing transaction number (such as
+the change number used by SVN and P4, or the revision number used by
+Arch/Baz/Bazaar, or a labeled tag used in CVS)@footnote{many VC
+systems provide more complexity than this: in particular the local
+views that P4 and ClearCase can assemble out of various source
+directories are more complex than we're prepared to take advantage of
+here}. The SHA1 revision ID used by Monotone, Mercurial, and Git is
+also a kind of revision stamp, in that it specifies a unique copy of
+the source tree, as does a Darcs ``context'' file.
+
+When we aren't intending to make any changes to the sources we check out
+(at least not any that need to be committed back upstream), there are two
+basic ways to use a VC system:
+
+@itemize @bullet
+@item
+Retrieve a specific set of source revisions: some tag or key is used
+to index this set, which is fixed and cannot be changed by subsequent
+developers committing new changes to the tree. Releases are built from
+tagged revisions like this, so that they can be rebuilt again later
+(probably with controlled modifications).
+@item
+Retrieve the latest sources along a specific branch: some tag is used
+to indicate which branch is to be used, but within that constraint we want
+to get the latest revisions.
+@end itemize
+
+Build personnel or CM staff typically use the first approach: the
+build that results is (ideally) completely specified by the two
+parameters given to the VC system: repository and revision tag. This
+gives QA and end-users something concrete to point at when reporting
+bugs. Release engineers are also reportedly fond of shipping code that
+can be traced back to a concise revision tag of some sort.
+
+Developers are more likely to use the second approach: each morning
+the developer does an update to pull in the changes committed by the
+team over the last day. These builds are not easy to fully specify: it
+depends upon exactly when you did a checkout, and upon what local
+changes the developer has in their tree. Developers do not normally
+tag each build they produce, because there is usually significant
+overhead involved in creating these tags. Recreating the trees used by
+one of these builds can be a challenge. Some VC systems may provide
+implicit tags (like a revision number), while others may allow the use
+of timestamps to mean ``the state of the tree at time X'' as opposed
+to a tree-state that has been explicitly marked.
+
+The Buildbot is designed to help developers, so it usually works in
+terms of @emph{the latest} sources as opposed to specific tagged
+revisions. However, it would really prefer to build from reproducible
+source trees, so implicit revisions are used whenever possible.
+
+@node Source Tree Specifications, How Different VC Systems Specify Sources, Generalizing VC Systems, Version Control Systems
+@subsection Source Tree Specifications
+
+So for the Buildbot's purposes we treat each VC system as a server
+which can take a list of specifications as input and produce a source
+tree as output. Some of these specifications are static: they are
+attributes of the builder and do not change over time. Others are more
+variable: each build will have a different value. The repository is
+changed over time by a sequence of Changes, each of which represents a
+single developer making changes to some set of files. These Changes
+are cumulative@footnote{Monotone's @emph{multiple heads} feature
+violates this assumption of cumulative Changes, but in most situations
+the changes don't occur frequently enough for this to be a significant
+problem}.
+
+For normal builds, the Buildbot wants to get well-defined source trees
+that contain specific Changes, and exclude other Changes that may have
+occurred after the desired ones. We assume that the Changes arrive at
+the buildbot (through one of the mechanisms described in @pxref{Change
+Sources}) in the same order in which they are committed to the
+repository. The Buildbot waits for the tree to become ``stable''
+before initiating a build, for two reasons. The first is that
+developers frequently make multiple related commits in quick
+succession, even when the VC system provides ways to make atomic
+transactions involving multiple files at the same time. Running a
+build in the middle of these sets of changes would use an inconsistent
+set of source files, and is likely to fail (and is certain to be less
+useful than a build which uses the full set of changes). The
+tree-stable-timer is intended to avoid these useless builds that
+include some of the developer's changes but not all. The second reason
+is that some VC systems (i.e. CVS) do not provide repository-wide
+transaction numbers, so that timestamps are the only way to refer to
+a specific repository state. These timestamps may be somewhat
+ambiguous, due to processing and notification delays. By waiting until
+the tree has been stable for, say, 10 minutes, we can choose a
+timestamp from the middle of that period to use for our source
+checkout, and then be reasonably sure that any clock-skew errors will
+not cause the build to be performed on an inconsistent set of source
+files.
+
+The Schedulers always use the tree-stable-timer, with a timeout that
+is configured to reflect a reasonable tradeoff between build latency
+and change frequency. When the VC system provides coherent
+repository-wide revision markers (such as Subversion's revision
+numbers, or in fact anything other than CVS's timestamps), the
+resulting Build is simply performed against a source tree defined by
+that revision marker. When the VC system does not provide this, a
+timestamp from the middle of the tree-stable period is used to
+generate the source tree@footnote{this @code{checkoutDelay} defaults
+to half the tree-stable timer, but it can be overridden with an
+argument to the Source Step}.
+
+@node How Different VC Systems Specify Sources, Attributes of Changes, Source Tree Specifications, Version Control Systems
+@subsection How Different VC Systems Specify Sources
+
+For CVS, the static specifications are @code{repository} and
+@code{module}. In addition to those, each build uses a timestamp (or
+omits the timestamp to mean @code{the latest}) and @code{branch tag}
+(which defaults to HEAD). These parameters collectively specify a set
+of sources from which a build may be performed.
+
+@uref{http://subversion.tigris.org, Subversion} combines the
+repository, module, and branch into a single @code{Subversion URL}
+parameter. Within that scope, source checkouts can be specified by a
+numeric @code{revision number} (a repository-wide
+monotonically-increasing marker, such that each transaction that
+changes the repository is indexed by a different revision number), or
+a revision timestamp. When branches are used, the repository and
+module form a static @code{baseURL}, while each build has a
+@code{revision number} and a @code{branch} (which defaults to a
+statically-specified @code{defaultBranch}). The @code{baseURL} and
+@code{branch} are simply concatenated together to derive the
+@code{svnurl} to use for the checkout.
+
+@uref{http://www.perforce.com/, Perforce} is similar. The server
+is specified through a @code{P4PORT} parameter. Module and branch
+are specified in a single depot path, and revisions are
+depot-wide. When branches are used, the @code{p4base} and
+@code{defaultBranch} are concatenated together to produce the depot
+path.
+
+@uref{http://wiki.gnuarch.org/, Arch} and
+@uref{http://bazaar.canonical.com/, Bazaar} specify a repository by
+URL, as well as a @code{version} which is kind of like a branch name.
+Arch uses the word @code{archive} to represent the repository. Arch
+lets you push changes from one archive to another, removing the strict
+centralization required by CVS and SVN. It retains the distinction
+between repository and working directory that most other VC systems
+use. For complex multi-module directory structures, Arch has a
+built-in @code{build config} layer with which the checkout process has
+two steps. First, an initial bootstrap checkout is performed to
+retrieve a set of build-config files. Second, one of these files is
+used to figure out which archives/modules should be used to populate
+subdirectories of the initial checkout.
+
+Builders which use Arch and Bazaar therefore have a static archive
+@code{url}, and a default ``branch'' (which is a string that specifies
+a complete category--branch--version triple). Each build can have its
+own branch (the category--branch--version string) to override the
+default, as well as a revision number (which is turned into a
+--patch-NN suffix when performing the checkout).
+
+
+@uref{http://bazaar-vcs.org, Bzr} (which is a descendant of
+Arch/Bazaar, and is frequently referred to as ``Bazaar'') has the same
+sort of repository-vs-workspace model as Arch, but the repository data
+can either be stored inside the working directory or kept elsewhere
+(either on the same machine or on an entirely different machine). For
+the purposes of Buildbot (which never commits changes), the repository
+is specified with a URL and a revision number.
+
+The most common way to obtain read-only access to a bzr tree is via
+HTTP, simply by making the repository visible through a web server
+like Apache. Bzr can also use FTP and SFTP servers, if the buildslave
+process has sufficient privileges to access them. Higher performance
+can be obtained by running a special Bazaar-specific server. None of
+these matter to the buildbot: the repository URL just has to match the
+kind of server being used. The @code{repoURL} argument provides the
+location of the repository.
+
+Branches are expressed as subdirectories of the main central
+repository, which means that if branches are being used, the BZR step
+is given a @code{baseURL} and @code{defaultBranch} instead of getting
+the @code{repoURL} argument.
+
+
+@uref{http://darcs.net/, Darcs} doesn't really have the
+notion of a single master repository. Nor does it really have
+branches. In Darcs, each working directory is also a repository, and
+there are operations to push and pull patches from one of these
+@code{repositories} to another. For the Buildbot's purposes, all you
+need to do is specify the URL of a repository that you want to build
+from. The build slave will then pull the latest patches from that
+repository and build them. Multiple branches are implemented by using
+multiple repositories (possibly living on the same server).
+
+Builders which use Darcs therefore have a static @code{repourl} which
+specifies the location of the repository. If branches are being used,
+the source Step is instead configured with a @code{baseURL} and a
+@code{defaultBranch}, and the two strings are simply concatenated
+together to obtain the repository's URL. Each build then has a
+specific branch which replaces @code{defaultBranch}, or just uses the
+default one. Instead of a revision number, each build can have a
+``context'', which is a string that records all the patches that are
+present in a given tree (this is the output of @command{darcs changes
+--context}, and is considerably less concise than, e.g. Subversion's
+revision number, but the patch-reordering flexibility of Darcs makes
+it impossible to provide a shorter useful specification).
+
+@uref{http://selenic.com/mercurial, Mercurial} is like Darcs, in that
+each branch is stored in a separate repository. The @code{repourl},
+@code{baseURL}, and @code{defaultBranch} arguments are all handled the
+same way as with Darcs. The ``revision'', however, is the hash
+identifier returned by @command{hg identify}.
+
+@uref{http://git.or.cz/, Git} also follows a decentralized model, and
+each repository can have several branches and tags. The source Step is
+configured with a static @code{repourl} which specifies the location
+of the repository. In addition, an optional @code{branch} parameter
+can be specified to check out code from a specific branch instead of
+the default ``master'' branch. The ``revision'' is specified as a SHA1
+hash as returned by e.g. @command{git rev-parse}. No attempt is made
+to ensure that the specified revision is actually a subset of the
+specified branch.
+
+
+@node Attributes of Changes, , How Different VC Systems Specify Sources, Version Control Systems
+@subsection Attributes of Changes
+
+@heading Who
+
+Each Change has a @code{who} attribute, which specifies which
+developer is responsible for the change. This is a string which comes
+from a namespace controlled by the VC repository. Frequently this
+means it is a username on the host which runs the repository, but not
+all VC systems require this (Arch, for example, uses a fully-qualified
+@code{Arch ID}, which looks like an email address, as does Darcs).
+Each StatusNotifier will map the @code{who} attribute into something
+appropriate for their particular means of communication: an email
+address, an IRC handle, etc.
+
+@heading Files
+
+It also has a list of @code{files}, which are just the tree-relative
+filenames of any files that were added, deleted, or modified for this
+Change. These filenames are used by the @code{fileIsImportant}
+function (in the Scheduler) to decide whether it is worth triggering a
+new build or not, e.g. the function could use the following function
+to only run a build if a C file were checked in:
+
+@example
+def has_C_files(change):
+ for name in change.files:
+ if name.endswith(".c"):
+ return True
+ return False
+@end example
+
+Certain BuildSteps can also use the list of changed files
+to run a more targeted series of tests, e.g. the
+@code{python_twisted.Trial} step can run just the unit tests that
+provide coverage for the modified .py files instead of running the
+full test suite.
+
+@heading Comments
+
+The Change also has a @code{comments} attribute, which is a string
+containing any checkin comments.
+
+@heading Revision
+
+Each Change can have a @code{revision} attribute, which describes how
+to get a tree with a specific state: a tree which includes this Change
+(and all that came before it) but none that come after it. If this
+information is unavailable, the @code{.revision} attribute will be
+@code{None}. These revisions are provided by the ChangeSource, and
+consumed by the @code{computeSourceRevision} method in the appropriate
+@code{step.Source} class.
+
+@table @samp
+@item CVS
+@code{revision} is an int, seconds since the epoch
+@item SVN
+@code{revision} is an int, the changeset number (r%d)
+@item Darcs
+@code{revision} is a large string, the output of @code{darcs changes --context}
+@item Mercurial
+@code{revision} is a short string (a hash ID), the output of @code{hg identify}
+@item Arch/Bazaar
+@code{revision} is the full revision ID (ending in --patch-%d)
+@item P4
+@code{revision} is an int, the transaction number
+@item Git
+@code{revision} is a short string (a SHA1 hash), the output of e.g.
+@code{git rev-parse}
+@end table
+
+@heading Branches
+
+The Change might also have a @code{branch} attribute. This indicates
+that all of the Change's files are in the same named branch. The
+Schedulers get to decide whether the branch should be built or not.
+
+For VC systems like CVS, Arch, Monotone, and Git, the @code{branch}
+name is unrelated to the filename. (that is, the branch name and the
+filename inhabit unrelated namespaces). For SVN, branches are
+expressed as subdirectories of the repository, so the file's
+``svnurl'' is a combination of some base URL, the branch name, and the
+filename within the branch. (In a sense, the branch name and the
+filename inhabit the same namespace). Darcs branches are
+subdirectories of a base URL just like SVN. Mercurial branches are the
+same as Darcs.
+
+@table @samp
+@item CVS
+branch='warner-newfeature', files=['src/foo.c']
+@item SVN
+branch='branches/warner-newfeature', files=['src/foo.c']
+@item Darcs
+branch='warner-newfeature', files=['src/foo.c']
+@item Mercurial
+branch='warner-newfeature', files=['src/foo.c']
+@item Arch/Bazaar
+branch='buildbot--usebranches--0', files=['buildbot/master.py']
+@item Git
+branch='warner-newfeature', files=['src/foo.c']
+@end table
+
+@heading Links
+
+@c TODO: who is using 'links'? how is it being used?
+
+Finally, the Change might have a @code{links} list, which is intended
+to provide a list of URLs to a @emph{viewcvs}-style web page that
+provides more detail for this Change, perhaps including the full file
+diffs.
+
+
+@node Schedulers, BuildSet, Version Control Systems, Concepts
+@section Schedulers
+
+@cindex Scheduler
+
+Each Buildmaster has a set of @code{Scheduler} objects, each of which
+gets a copy of every incoming Change. The Schedulers are responsible
+for deciding when Builds should be run. Some Buildbot installations
+might have a single Scheduler, while others may have several, each for
+a different purpose.
+
+For example, a ``quick'' scheduler might exist to give immediate
+feedback to developers, hoping to catch obvious problems in the code
+that can be detected quickly. These typically do not run the full test
+suite, nor do they run on a wide variety of platforms. They also
+usually do a VC update rather than performing a brand-new checkout
+each time. You could have a ``quick'' scheduler which used a 30 second
+timeout, and feeds a single ``quick'' Builder that uses a VC
+@code{mode='update'} setting.
+
+A separate ``full'' scheduler would run more comprehensive tests a
+little while later, to catch more subtle problems. This scheduler
+would have a longer tree-stable-timer, maybe 30 minutes, and would
+feed multiple Builders (with a @code{mode=} of @code{'copy'},
+@code{'clobber'}, or @code{'export'}).
+
+The @code{tree-stable-timer} and @code{fileIsImportant} decisions are
+made by the Scheduler. Dependencies are also implemented here.
+Periodic builds (those which are run every N seconds rather than after
+new Changes arrive) are triggered by a special @code{Periodic}
+Scheduler subclass. The default Scheduler class can also be told to
+watch for specific branches, ignoring Changes on other branches. This
+may be useful if you have a trunk and a few release branches which
+should be tracked, but when you don't want to have the Buildbot pay
+attention to several dozen private user branches.
+
+When the setup has multiple sources of Changes the @code{category}
+can be used for @code{Scheduler} objects to filter out a subset
+of the Changes. Note that not all change sources can attach a category.
+
+Some Schedulers may trigger builds for other reasons, other than
+recent Changes. For example, a Scheduler subclass could connect to a
+remote buildmaster and watch for builds of a library to succeed before
+triggering a local build that uses that library.
+
+Each Scheduler creates and submits @code{BuildSet} objects to the
+@code{BuildMaster}, which is then responsible for making sure the
+individual @code{BuildRequests} are delivered to the target
+@code{Builders}.
+
+@code{Scheduler} instances are activated by placing them in the
+@code{c['schedulers']} list in the buildmaster config file. Each
+Scheduler has a unique name.
+
+
+@node BuildSet, BuildRequest, Schedulers, Concepts
+@section BuildSet
+
+@cindex BuildSet
+
+A @code{BuildSet} is the name given to a set of Builds that all
+compile/test the same version of the tree on multiple Builders. In
+general, all these component Builds will perform the same sequence of
+Steps, using the same source code, but on different platforms or
+against a different set of libraries.
+
+The @code{BuildSet} is tracked as a single unit, which fails if any of
+the component Builds have failed, and therefore can succeed only if
+@emph{all} of the component Builds have succeeded. There are two kinds
+of status notification messages that can be emitted for a BuildSet:
+the @code{firstFailure} type (which fires as soon as we know the
+BuildSet will fail), and the @code{Finished} type (which fires once
+the BuildSet has completely finished, regardless of whether the
+overall set passed or failed).
+
+A @code{BuildSet} is created with a @emph{source stamp} tuple of
+(branch, revision, changes, patch), some of which may be None, and a
+list of Builders on which it is to be run. They are then given to the
+BuildMaster, which is responsible for creating a separate
+@code{BuildRequest} for each Builder.
+
+There are a couple of different likely values for the
+@code{SourceStamp}:
+
+@table @code
+@item (revision=None, changes=[CHANGES], patch=None)
+This is a @code{SourceStamp} used when a series of Changes have
+triggered a build. The VC step will attempt to check out a tree that
+contains CHANGES (and any changes that occurred before CHANGES, but
+not any that occurred after them).
+
+@item (revision=None, changes=None, patch=None)
+This builds the most recent code on the default branch. This is the
+sort of @code{SourceStamp} that would be used on a Build that was
+triggered by a user request, or a Periodic scheduler. It is also
+possible to configure the VC Source Step to always check out the
+latest sources rather than paying attention to the Changes in the
+SourceStamp, which will result in same behavior as this.
+
+@item (branch=BRANCH, revision=None, changes=None, patch=None)
+This builds the most recent code on the given BRANCH. Again, this is
+generally triggered by a user request or Periodic build.
+
+@item (revision=REV, changes=None, patch=(LEVEL, DIFF))
+This checks out the tree at the given revision REV, then applies a
+patch (using @code{patch -pLEVEL <DIFF}). The @ref{try} feature uses
+this kind of @code{SourceStamp}. If @code{patch} is None, the patching
+step is bypassed.
+
+@end table
+
+The buildmaster is responsible for turning the @code{BuildSet} into a
+set of @code{BuildRequest} objects and queueing them on the
+appropriate Builders.
+
+
+@node BuildRequest, Builder, BuildSet, Concepts
+@section BuildRequest
+
+@cindex BuildRequest
+
+A @code{BuildRequest} is a request to build a specific set of sources
+on a single specific @code{Builder}. Each @code{Builder} runs the
+@code{BuildRequest} as soon as it can (i.e. when an associated
+buildslave becomes free). @code{BuildRequest}s are prioritized from
+oldest to newest, so when a buildslave becomes free, the
+@code{Builder} with the oldest @code{BuildRequest} is run.
+
+The @code{BuildRequest} contains the @code{SourceStamp} specification.
+The actual process of running the build (the series of Steps that will
+be executed) is implemented by the @code{Build} object. In this future
+this might be changed, to have the @code{Build} define @emph{what}
+gets built, and a separate @code{BuildProcess} (provided by the
+Builder) to define @emph{how} it gets built.
+
+@code{BuildRequest} is created with optional @code{Properties}. One
+of these, @code{owner}, is collected by the resultant @code{Build} and
+added to the set of @emph{interested users} to which status
+notifications will be sent, depending on the configuration for each
+status object.
+
+The @code{BuildRequest} may be mergeable with other compatible
+@code{BuildRequest}s. Builds that are triggered by incoming Changes
+will generally be mergeable. Builds that are triggered by user
+requests are generally not, unless they are multiple requests to build
+the @emph{latest sources} of the same branch.
+
+@node Builder, Users, BuildRequest, Concepts
+@section Builder
+
+@cindex Builder
+
+The @code{Builder} is a long-lived object which controls all Builds of
+a given type. Each one is created when the config file is first
+parsed, and lives forever (or rather until it is removed from the
+config file). It mediates the connections to the buildslaves that do
+all the work, and is responsible for creating the @code{Build} objects
+that decide @emph{how} a build is performed (i.e., which steps are
+executed in what order).
+
+Each @code{Builder} gets a unique name, and the path name of a
+directory where it gets to do all its work (there is a
+buildmaster-side directory for keeping status information, as well as
+a buildslave-side directory where the actual checkout/compile/test
+commands are executed). It also gets a @code{BuildFactory}, which is
+responsible for creating new @code{Build} instances: because the
+@code{Build} instance is what actually performs each build, choosing
+the @code{BuildFactory} is the way to specify what happens each time a
+build is done.
+
+Each @code{Builder} is associated with one of more @code{BuildSlaves}.
+A @code{Builder} which is used to perform OS-X builds (as opposed to
+Linux or Solaris builds) should naturally be associated with an
+OS-X-based buildslave.
+
+A @code{Builder} may be given a set of environment variables to be used
+in its @pxref{ShellCommand}s. These variables will override anything in the
+buildslave's environment. Variables passed directly to a ShellCommand will
+override variables of the same name passed to the Builder.
+
+For example, if you a pool of identical slaves it is often easier to manage
+variables like PATH from Buildbot rather than manually editing it inside of
+the slaves' environment.
+
+@example
+f = factory.BuildFactory
+f.addStep(ShellCommand(
+ command=['bash', './configure']))
+f.addStep(Compile())
+
+c['builders'] = [
+ @{'name': 'test', 'slavenames': ['slave1', 'slave2', 'slave3', 'slave4',
+ 'slave5', 'slave6'],
+ 'builddir': 'test', 'factory': f',
+ 'env': @{'PATH': '/opt/local/bin:/opt/app/bin:/usr/local/bin:/usr/bin'@}@}
+
+@end example
+
+@node Users, Build Properties, Builder, Concepts
+@section Users
+
+@cindex Users
+
+Buildbot has a somewhat limited awareness of @emph{users}. It assumes
+the world consists of a set of developers, each of whom can be
+described by a couple of simple attributes. These developers make
+changes to the source code, causing builds which may succeed or fail.
+
+Each developer is primarily known through the source control system. Each
+Change object that arrives is tagged with a @code{who} field that
+typically gives the account name (on the repository machine) of the user
+responsible for that change. This string is the primary key by which the
+User is known, and is displayed on the HTML status pages and in each Build's
+``blamelist''.
+
+To do more with the User than just refer to them, this username needs to
+be mapped into an address of some sort. The responsibility for this mapping
+is left up to the status module which needs the address. The core code knows
+nothing about email addresses or IRC nicknames, just user names.
+
+@menu
+* Doing Things With Users::
+* Email Addresses::
+* IRC Nicknames::
+* Live Status Clients::
+@end menu
+
+@node Doing Things With Users, Email Addresses, Users, Users
+@subsection Doing Things With Users
+
+Each Change has a single User who is responsible for that Change. Most
+Builds have a set of Changes: the Build represents the first time these
+Changes have been built and tested by the Buildbot. The build has a
+``blamelist'' that consists of a simple union of the Users responsible
+for all the Build's Changes.
+
+The Build provides (through the IBuildStatus interface) a list of Users
+who are ``involved'' in the build. For now this is equal to the
+blamelist, but in the future it will be expanded to include a ``build
+sheriff'' (a person who is ``on duty'' at that time and responsible for
+watching over all builds that occur during their shift), as well as
+per-module owners who simply want to keep watch over their domain (chosen by
+subdirectory or a regexp matched against the filenames pulled out of the
+Changes). The Involved Users are those who probably have an interest in the
+results of any given build.
+
+In the future, Buildbot will acquire the concept of ``Problems'',
+which last longer than builds and have beginnings and ends. For example, a
+test case which passed in one build and then failed in the next is a
+Problem. The Problem lasts until the test case starts passing again, at
+which point the Problem is said to be ``resolved''.
+
+If there appears to be a code change that went into the tree at the
+same time as the test started failing, that Change is marked as being
+resposible for the Problem, and the user who made the change is added
+to the Problem's ``Guilty'' list. In addition to this user, there may
+be others who share responsibility for the Problem (module owners,
+sponsoring developers). In addition to the Responsible Users, there
+may be a set of Interested Users, who take an interest in the fate of
+the Problem.
+
+Problems therefore have sets of Users who may want to be kept aware of
+the condition of the problem as it changes over time. If configured, the
+Buildbot can pester everyone on the Responsible list with increasing
+harshness until the problem is resolved, with the most harshness reserved
+for the Guilty parties themselves. The Interested Users may merely be told
+when the problem starts and stops, as they are not actually responsible for
+fixing anything.
+
+@node Email Addresses, IRC Nicknames, Doing Things With Users, Users
+@subsection Email Addresses
+
+The @code{buildbot.status.mail.MailNotifier} class
+(@pxref{MailNotifier}) provides a status target which can send email
+about the results of each build. It accepts a static list of email
+addresses to which each message should be delivered, but it can also
+be configured to send mail to the Build's Interested Users. To do
+this, it needs a way to convert User names into email addresses.
+
+For many VC systems, the User Name is actually an account name on the
+system which hosts the repository. As such, turning the name into an
+email address is a simple matter of appending
+``@@repositoryhost.com''. Some projects use other kinds of mappings
+(for example the preferred email address may be at ``project.org''
+despite the repository host being named ``cvs.project.org''), and some
+VC systems have full separation between the concept of a user and that
+of an account on the repository host (like Perforce). Some systems
+(like Arch) put a full contact email address in every change.
+
+To convert these names to addresses, the MailNotifier uses an EmailLookup
+object. This provides a .getAddress method which accepts a name and
+(eventually) returns an address. The default @code{MailNotifier}
+module provides an EmailLookup which simply appends a static string,
+configurable when the notifier is created. To create more complex behaviors
+(perhaps using an LDAP lookup, or using ``finger'' on a central host to
+determine a preferred address for the developer), provide a different object
+as the @code{lookup} argument.
+
+In the future, when the Problem mechanism has been set up, the Buildbot
+will need to send mail to arbitrary Users. It will do this by locating a
+MailNotifier-like object among all the buildmaster's status targets, and
+asking it to send messages to various Users. This means the User-to-address
+mapping only has to be set up once, in your MailNotifier, and every email
+message the buildbot emits will take advantage of it.
+
+@node IRC Nicknames, Live Status Clients, Email Addresses, Users
+@subsection IRC Nicknames
+
+Like MailNotifier, the @code{buildbot.status.words.IRC} class
+provides a status target which can announce the results of each build. It
+also provides an interactive interface by responding to online queries
+posted in the channel or sent as private messages.
+
+In the future, the buildbot can be configured map User names to IRC
+nicknames, to watch for the recent presence of these nicknames, and to
+deliver build status messages to the interested parties. Like
+@code{MailNotifier} does for email addresses, the @code{IRC} object
+will have an @code{IRCLookup} which is responsible for nicknames. The
+mapping can be set up statically, or it can be updated by online users
+themselves (by claiming a username with some kind of ``buildbot: i am
+user warner'' commands).
+
+Once the mapping is established, the rest of the buildbot can ask the
+@code{IRC} object to send messages to various users. It can report on
+the likelihood that the user saw the given message (based upon how long the
+user has been inactive on the channel), which might prompt the Problem
+Hassler logic to send them an email message instead.
+
+@node Live Status Clients, , IRC Nicknames, Users
+@subsection Live Status Clients
+
+The Buildbot also offers a PB-based status client interface which can
+display real-time build status in a GUI panel on the developer's desktop.
+This interface is normally anonymous, but it could be configured to let the
+buildmaster know @emph{which} developer is using the status client. The
+status client could then be used as a message-delivery service, providing an
+alternative way to deliver low-latency high-interruption messages to the
+developer (like ``hey, you broke the build'').
+
+@node Build Properties, , Users, Concepts
+@section Build Properties
+@cindex Properties
+
+Each build has a set of ``Build Properties'', which can be used by its
+BuildStep to modify their actions. These properties, in the form of
+key-value pairs, provide a general framework for dynamically altering
+the behavior of a build based on its circumstances.
+
+Properties come from a number of places:
+@itemize
+@item global configuration --
+These properties apply to all builds.
+@item schedulers --
+A scheduler can specify properties available to all the builds it
+starts.
+@item buildslaves --
+A buildslave can pass properties on to the builds it performs.
+@item builds --
+A build automatically sets a number of properties on itself.
+@item steps --
+Steps of a build can set properties that are available to subsequent
+steps. In particular, source steps set a number of properties.
+@end itemize
+
+Properties are very flexible, and can be used to implement all manner
+of functionality. Here are some examples:
+
+Most Source steps record the revision that they checked out in
+the @code{got_revision} property. A later step could use this
+property to specify the name of a fully-built tarball, dropped in an
+easily-acessible directory for later testing.
+
+Some projects want to perform nightly builds as well as in response
+to committed changes. Such a project would run two schedulers,
+both pointing to the same set of builders, but could provide an
+@code{is_nightly} property so that steps can distinguish the nightly
+builds, perhaps to run more resource-intensive tests.
+
+Some projects have different build processes on different systems.
+Rather than create a build factory for each slave, the steps can use
+buildslave properties to identify the unique aspects of each slave
+and adapt the build process dynamically.
+
+@node Configuration, Getting Source Code Changes, Concepts, Top
+@chapter Configuration
+
+@cindex Configuration
+
+The buildbot's behavior is defined by the ``config file'', which
+normally lives in the @file{master.cfg} file in the buildmaster's base
+directory (but this can be changed with an option to the
+@code{buildbot create-master} command). This file completely specifies
+which Builders are to be run, which slaves they should use, how
+Changes should be tracked, and where the status information is to be
+sent. The buildmaster's @file{buildbot.tac} file names the base
+directory; everything else comes from the config file.
+
+A sample config file was installed for you when you created the
+buildmaster, but you will need to edit it before your buildbot will do
+anything useful.
+
+This chapter gives an overview of the format of this file and the
+various sections in it. You will need to read the later chapters to
+understand how to fill in each section properly.
+
+@menu
+* Config File Format::
+* Loading the Config File::
+* Testing the Config File::
+* Defining the Project::
+* Change Sources and Schedulers::
+* Merging BuildRequests::
+* Setting the slaveport::
+* Buildslave Specifiers::
+* On-Demand ("Latent") Buildslaves::
+* Defining Global Properties::
+* Defining Builders::
+* Defining Status Targets::
+* Debug options::
+@end menu
+
+@node Config File Format, Loading the Config File, Configuration, Configuration
+@section Config File Format
+
+The config file is, fundamentally, just a piece of Python code which
+defines a dictionary named @code{BuildmasterConfig}, with a number of
+keys that are treated specially. You don't need to know Python to do
+basic configuration, though, you can just copy the syntax of the
+sample file. If you @emph{are} comfortable writing Python code,
+however, you can use all the power of a full programming language to
+achieve more complicated configurations.
+
+The @code{BuildmasterConfig} name is the only one which matters: all
+other names defined during the execution of the file are discarded.
+When parsing the config file, the Buildmaster generally compares the
+old configuration with the new one and performs the minimum set of
+actions necessary to bring the buildbot up to date: Builders which are
+not changed are left untouched, and Builders which are modified get to
+keep their old event history.
+
+Basic Python syntax: comments start with a hash character (``#''),
+tuples are defined with @code{(parenthesis, pairs)}, arrays are
+defined with @code{[square, brackets]}, tuples and arrays are mostly
+interchangeable. Dictionaries (data structures which map ``keys'' to
+``values'') are defined with curly braces: @code{@{'key1': 'value1',
+'key2': 'value2'@} }. Function calls (and object instantiation) can use
+named parameters, like @code{w = html.Waterfall(http_port=8010)}.
+
+The config file starts with a series of @code{import} statements,
+which make various kinds of Steps and Status targets available for
+later use. The main @code{BuildmasterConfig} dictionary is created,
+then it is populated with a variety of keys. These keys are broken
+roughly into the following sections, each of which is documented in
+the rest of this chapter:
+
+@itemize @bullet
+@item
+Project Definitions
+@item
+Change Sources / Schedulers
+@item
+Slaveport
+@item
+Buildslave Configuration
+@item
+Builders / Interlocks
+@item
+Status Targets
+@item
+Debug options
+@end itemize
+
+The config file can use a few names which are placed into its namespace:
+
+@table @code
+@item basedir
+the base directory for the buildmaster. This string has not been
+expanded, so it may start with a tilde. It needs to be expanded before
+use. The config file is located in
+@code{os.path.expanduser(os.path.join(basedir, 'master.cfg'))}
+
+@end table
+
+
+@node Loading the Config File, Testing the Config File, Config File Format, Configuration
+@section Loading the Config File
+
+The config file is only read at specific points in time. It is first
+read when the buildmaster is launched. Once it is running, there are
+various ways to ask it to reload the config file. If you are on the
+system hosting the buildmaster, you can send a @code{SIGHUP} signal to
+it: the @command{buildbot} tool has a shortcut for this:
+
+@example
+buildbot reconfig @var{BASEDIR}
+@end example
+
+This command will show you all of the lines from @file{twistd.log}
+that relate to the reconfiguration. If there are any problems during
+the config-file reload, they will be displayed in these lines.
+
+The debug tool (@code{buildbot debugclient --master HOST:PORT}) has a
+``Reload .cfg'' button which will also trigger a reload. In the
+future, there will be other ways to accomplish this step (probably a
+password-protected button on the web page, as well as a privileged IRC
+command).
+
+When reloading the config file, the buildmaster will endeavor to
+change as little as possible about the running system. For example,
+although old status targets may be shut down and new ones started up,
+any status targets that were not changed since the last time the
+config file was read will be left running and untouched. Likewise any
+Builders which have not been changed will be left running. If a
+Builder is modified (say, the build process is changed) while a Build
+is currently running, that Build will keep running with the old
+process until it completes. Any previously queued Builds (or Builds
+which get queued after the reconfig) will use the new process.
+
+@node Testing the Config File, Defining the Project, Loading the Config File, Configuration
+@section Testing the Config File
+
+To verify that the config file is well-formed and contains no
+deprecated or invalid elements, use the ``checkconfig'' command:
+
+@example
+% buildbot checkconfig master.cfg
+Config file is good!
+@end example
+
+If the config file has deprecated features (perhaps because you've
+upgraded the buildmaster and need to update the config file to match),
+they will be announced by checkconfig. In this case, the config file
+will work, but you should really remove the deprecated items and use
+the recommended replacements instead:
+
+@example
+% buildbot checkconfig master.cfg
+/usr/lib/python2.4/site-packages/buildbot/master.py:559: DeprecationWarning: c['sources'] is
+deprecated as of 0.7.6 and will be removed by 0.8.0 . Please use c['change_source'] instead.
+ warnings.warn(m, DeprecationWarning)
+Config file is good!
+@end example
+
+If the config file is simply broken, that will be caught too:
+
+@example
+% buildbot checkconfig master.cfg
+Traceback (most recent call last):
+ File "/usr/lib/python2.4/site-packages/buildbot/scripts/runner.py", line 834, in doCheckConfig
+ ConfigLoader(configFile)
+ File "/usr/lib/python2.4/site-packages/buildbot/scripts/checkconfig.py", line 31, in __init__
+ self.loadConfig(configFile)
+ File "/usr/lib/python2.4/site-packages/buildbot/master.py", line 480, in loadConfig
+ exec f in localDict
+ File "/home/warner/BuildBot/master/foolscap/master.cfg", line 90, in ?
+ c[bogus] = "stuff"
+NameError: name 'bogus' is not defined
+@end example
+
+
+@node Defining the Project, Change Sources and Schedulers, Testing the Config File, Configuration
+@section Defining the Project
+
+There are a couple of basic settings that you use to tell the buildbot
+what project it is working on. This information is used by status
+reporters to let users find out more about the codebase being
+exercised by this particular Buildbot installation.
+
+@example
+c['projectName'] = "Buildbot"
+c['projectURL'] = "http://buildbot.sourceforge.net/"
+c['buildbotURL'] = "http://localhost:8010/"
+@end example
+
+@bcindex c['projectName']
+@code{projectName} is a short string will be used to describe the
+project that this buildbot is working on. For example, it is used as
+the title of the waterfall HTML page.
+
+@bcindex c['projectURL']
+@code{projectURL} is a string that gives a URL for the project as a
+whole. HTML status displays will show @code{projectName} as a link to
+@code{projectURL}, to provide a link from buildbot HTML pages to your
+project's home page.
+
+@bcindex c['buildbotURL']
+The @code{buildbotURL} string should point to the location where the
+buildbot's internal web server (usually the @code{html.Waterfall}
+page) is visible. This typically uses the port number set when you
+create the @code{Waterfall} object: the buildbot needs your help to
+figure out a suitable externally-visible host name.
+
+When status notices are sent to users (either by email or over IRC),
+@code{buildbotURL} will be used to create a URL to the specific build
+or problem that they are being notified about. It will also be made
+available to queriers (over IRC) who want to find out where to get
+more information about this buildbot.
+
+@bcindex c['logCompressionLimit']
+The @code{logCompressionLimit} enables bz2-compression of build logs on
+disk for logs that are bigger than the given size, or disables that
+completely if given @code{False}. The default value is 4k, which should
+be a reasonable default on most file systems. This setting has no impact
+on status plugins, and merely affects the required disk space on the
+master for build logs.
+
+
+@node Change Sources and Schedulers, Merging BuildRequests, Defining the Project, Configuration
+@section Change Sources and Schedulers
+
+@bcindex c['sources']
+@bcindex c['change_source']
+
+The @code{c['change_source']} key is the ChangeSource
+instance@footnote{To be precise, it is an object or a list of objects
+which all implement the @code{buildbot.interfaces.IChangeSource}
+Interface. It is unusual to have multiple ChangeSources, so this key
+accepts either a single ChangeSource or a sequence of them.} that
+defines how the buildmaster learns about source code changes. More
+information about what goes here is available in @xref{Getting Source
+Code Changes}.
+
+@example
+from buildbot.changes.pb import PBChangeSource
+c['change_source'] = PBChangeSource()
+@end example
+@bcindex c['schedulers']
+
+(note: in buildbot-0.7.5 and earlier, this key was named
+@code{c['sources']}, and required a list. @code{c['sources']} is
+deprecated as of buildbot-0.7.6 and is scheduled to be removed in a
+future release).
+
+@code{c['schedulers']} is a list of Scheduler instances, each
+of which causes builds to be started on a particular set of
+Builders. The two basic Scheduler classes you are likely to start
+with are @code{Scheduler} and @code{Periodic}, but you can write a
+customized subclass to implement more complicated build scheduling.
+
+Scheduler arguments
+should always be specified by name (as keyword arguments), to allow
+for future expansion:
+
+@example
+sched = Scheduler(name="quick", builderNames=['lin', 'win'])
+@end example
+
+All schedulers have several arguments in common:
+
+@table @code
+@item name
+
+Each Scheduler must have a unique name. This is used in status
+displays, and is also available in the build property @code{scheduler}.
+
+@item builderNames
+
+This is the set of builders which this scheduler should trigger, specified
+as a list of names (strings).
+
+@item properties
+@cindex Properties
+
+This is a dictionary specifying properties that will be transmitted
+to all builds started by this scheduler.
+
+@end table
+
+Here is a brief catalog of the available Scheduler types. All these
+Schedulers are classes in @code{buildbot.scheduler}, and the
+docstrings there are the best source of documentation on the arguments
+taken by each one.
+
+@menu
+* Scheduler Scheduler::
+* AnyBranchScheduler::
+* Dependent Scheduler::
+* Periodic Scheduler::
+* Nightly Scheduler::
+* Try Schedulers::
+* Triggerable Scheduler::
+@end menu
+
+@node Scheduler Scheduler, AnyBranchScheduler, Change Sources and Schedulers, Change Sources and Schedulers
+@subsection Scheduler Scheduler
+@slindex buildbot.scheduler.Scheduler
+
+This is the original and still most popular Scheduler class. It follows
+exactly one branch, and starts a configurable tree-stable-timer after
+each change on that branch. When the timer expires, it starts a build
+on some set of Builders. The Scheduler accepts a @code{fileIsImportant}
+function which can be used to ignore some Changes if they do not
+affect any ``important'' files.
+
+The arguments to this scheduler are:
+
+@table @code
+@item name
+
+@item builderNames
+
+@item properties
+
+@item branch
+This Scheduler will pay attention to a single branch, ignoring Changes
+that occur on other branches. Setting @code{branch} equal to the
+special value of @code{None} means it should only pay attention to
+the default branch. Note that @code{None} is a keyword, not a string,
+so you want to use @code{None} and not @code{"None"}.
+
+@item treeStableTimer
+The Scheduler will wait for this many seconds before starting the
+build. If new changes are made during this interval, the timer will be
+restarted, so really the build will be started after a change and then
+after this many seconds of inactivity.
+
+@item fileIsImportant
+A callable which takes one argument, a Change instance, and returns
+@code{True} if the change is worth building, and @code{False} if
+it is not. Unimportant Changes are accumulated until the build is
+triggered by an important change. The default value of None means
+that all Changes are important.
+
+@item categories
+A list of categories of changes that this scheduler will respond to. If this
+is specified, then any non-matching changes are ignored.
+
+@end table
+
+Example:
+
+@example
+from buildbot import scheduler
+quick = scheduler.Scheduler(name="quick",
+ branch=None,
+ treeStableTimer=60,
+ builderNames=["quick-linux", "quick-netbsd"])
+full = scheduler.Scheduler(name="full",
+ branch=None,
+ treeStableTimer=5*60,
+ builderNames=["full-linux", "full-netbsd", "full-OSX"])
+c['schedulers'] = [quick, full]
+@end example
+
+In this example, the two ``quick'' builders are triggered 60 seconds
+after the tree has been changed. The ``full'' builds do not run quite
+so quickly (they wait 5 minutes), so hopefully if the quick builds
+fail due to a missing file or really simple typo, the developer can
+discover and fix the problem before the full builds are started. Both
+Schedulers only pay attention to the default branch: any changes
+on other branches are ignored by these Schedulers. Each Scheduler
+triggers a different set of Builders, referenced by name.
+
+@node AnyBranchScheduler, Dependent Scheduler, Scheduler Scheduler, Change Sources and Schedulers
+@subsection AnyBranchScheduler
+@slindex buildbot.scheduler.AnyBranchScheduler
+
+This scheduler uses a tree-stable-timer like the default one, but
+follows multiple branches at once. Each branch gets a separate timer.
+
+The arguments to this scheduler are:
+
+@table @code
+@item name
+
+@item builderNames
+
+@item properties
+
+@item branches
+This Scheduler will pay attention to any number of branches, ignoring
+Changes that occur on other branches. Branches are specified just as
+for the @code{Scheduler} class.
+
+@item treeStableTimer
+The Scheduler will wait for this many seconds before starting the
+build. If new changes are made during this interval, the timer will be
+restarted, so really the build will be started after a change and then
+after this many seconds of inactivity.
+
+@item fileIsImportant
+A callable which takes one argument, a Change instance, and returns
+@code{True} if the change is worth building, and @code{False} if
+it is not. Unimportant Changes are accumulated until the build is
+triggered by an important change. The default value of None means
+that all Changes are important.
+@end table
+
+@node Dependent Scheduler, Periodic Scheduler, AnyBranchScheduler, Change Sources and Schedulers
+@subsection Dependent Scheduler
+@cindex Dependent
+@cindex Dependencies
+@slindex buildbot.scheduler.Dependent
+
+It is common to wind up with one kind of build which should only be
+performed if the same source code was successfully handled by some
+other kind of build first. An example might be a packaging step: you
+might only want to produce .deb or RPM packages from a tree that was
+known to compile successfully and pass all unit tests. You could put
+the packaging step in the same Build as the compile and testing steps,
+but there might be other reasons to not do this (in particular you
+might have several Builders worth of compiles/tests, but only wish to
+do the packaging once). Another example is if you want to skip the
+``full'' builds after a failing ``quick'' build of the same source
+code. Or, if one Build creates a product (like a compiled library)
+that is used by some other Builder, you'd want to make sure the
+consuming Build is run @emph{after} the producing one.
+
+You can use ``Dependencies'' to express this relationship
+to the Buildbot. There is a special kind of Scheduler named
+@code{scheduler.Dependent} that will watch an ``upstream'' Scheduler
+for builds to complete successfully (on all of its Builders). Each time
+that happens, the same source code (i.e. the same @code{SourceStamp})
+will be used to start a new set of builds, on a different set of
+Builders. This ``downstream'' scheduler doesn't pay attention to
+Changes at all. It only pays attention to the upstream scheduler.
+
+If the build fails on any of the Builders in the upstream set,
+the downstream builds will not fire. Note that, for SourceStamps
+generated by a ChangeSource, the @code{revision} is None, meaning HEAD.
+If any changes are committed between the time the upstream scheduler
+begins its build and the time the dependent scheduler begins its
+build, then those changes will be included in the downstream build.
+See the @pxref{Triggerable Scheduler} for a more flexible dependency
+mechanism that can avoid this problem.
+
+The arguments to this scheduler are:
+
+@table @code
+@item name
+
+@item builderNames
+
+@item properties
+
+@item upstream
+The upstream scheduler to watch. Note that this is an ``instance'',
+not the name of the scheduler.
+@end table
+
+Example:
+
+@example
+from buildbot import scheduler
+tests = scheduler.Scheduler("just-tests", None, 5*60,
+ ["full-linux", "full-netbsd", "full-OSX"])
+package = scheduler.Dependent("build-package",
+ tests, # upstream scheduler -- no quotes!
+ ["make-tarball", "make-deb", "make-rpm"])
+c['schedulers'] = [tests, package]
+@end example
+
+@node Periodic Scheduler, Nightly Scheduler, Dependent Scheduler, Change Sources and Schedulers
+@subsection Periodic Scheduler
+@slindex buildbot.scheduler.Periodic
+
+This simple scheduler just triggers a build every N seconds.
+
+The arguments to this scheduler are:
+
+@table @code
+@item name
+
+@item builderNames
+
+@item properties
+
+@item periodicBuildTimer
+The time, in seconds, after which to start a build.
+@end table
+
+Example:
+
+@example
+from buildbot import scheduler
+nightly = scheduler.Periodic(name="nightly",
+ builderNames=["full-solaris"],
+ periodicBuildTimer=24*60*60)
+c['schedulers'] = [nightly]
+@end example
+
+The Scheduler in this example just runs the full solaris build once
+per day. Note that this Scheduler only lets you control the time
+between builds, not the absolute time-of-day of each Build, so this
+could easily wind up a ``daily'' or ``every afternoon'' scheduler
+depending upon when it was first activated.
+
+@node Nightly Scheduler, Try Schedulers, Periodic Scheduler, Change Sources and Schedulers
+@subsection Nightly Scheduler
+@slindex buildbot.scheduler.Nightly
+
+This is highly configurable periodic build scheduler, which triggers
+a build at particular times of day, week, month, or year. The
+configuration syntax is very similar to the well-known @code{crontab}
+format, in which you provide values for minute, hour, day, and month
+(some of which can be wildcards), and a build is triggered whenever
+the current time matches the given constraints. This can run a build
+every night, every morning, every weekend, alternate Thursdays,
+on your boss's birthday, etc.
+
+Pass some subset of @code{minute}, @code{hour}, @code{dayOfMonth},
+@code{month}, and @code{dayOfWeek}; each may be a single number or
+a list of valid values. The builds will be triggered whenever the
+current time matches these values. Wildcards are represented by a
+'*' string. All fields default to a wildcard except 'minute', so
+with no fields this defaults to a build every hour, on the hour.
+The full list of parameters is:
+
+@table @code
+@item name
+
+@item builderNames
+
+@item properties
+
+@item branch
+The branch to build, just as for @code{Scheduler}.
+
+@item minute
+The minute of the hour on which to start the build. This defaults
+to 0, meaning an hourly build.
+
+@item hour
+The hour of the day on which to start the build, in 24-hour notation.
+This defaults to *, meaning every hour.
+
+@item month
+The month in which to start the build, with January = 1. This defaults
+to *, meaning every month.
+
+@item dayOfWeek
+The day of the week to start a build, with Monday = 0. This defauls
+to *, meaning every day of the week.
+
+@item onlyIfChanged
+If this is true, then builds will not be scheduled at the designated time
+unless the source has changed since the previous build.
+@end table
+
+For example, the following master.cfg clause will cause a build to be
+started every night at 3:00am:
+
+@example
+s = scheduler.Nightly(name='nightly',
+ builderNames=['builder1', 'builder2'],
+ hour=3,
+ minute=0)
+@end example
+
+This scheduler will perform a build each monday morning at 6:23am and
+again at 8:23am, but only if someone has committed code in the interim:
+
+@example
+s = scheduler.Nightly(name='BeforeWork',
+ builderNames=['builder1'],
+ dayOfWeek=0,
+ hour=[6,8],
+ minute=23,
+ onlyIfChanged=True)
+@end example
+
+The following runs a build every two hours, using Python's @code{range}
+function:
+
+@example
+s = Nightly(name='every2hours',
+ builderNames=['builder1'],
+ hour=range(0, 24, 2))
+@end example
+
+Finally, this example will run only on December 24th:
+
+@example
+s = Nightly(name='SleighPreflightCheck',
+ builderNames=['flying_circuits', 'radar'],
+ month=12,
+ dayOfMonth=24,
+ hour=12,
+ minute=0)
+@end example
+
+@node Try Schedulers, Triggerable Scheduler, Nightly Scheduler, Change Sources and Schedulers
+@subsection Try Schedulers
+@slindex buildbot.scheduler.Try_Jobdir
+@slindex buildbot.scheduler.Try_Userpass
+
+This scheduler allows developers to use the @code{buildbot try}
+command to trigger builds of code they have not yet committed. See
+@ref{try} for complete details.
+
+Two implementations are available: @code{Try_Jobdir} and
+@code{Try_Userpass}. The former monitors a job directory, specified
+by the @code{jobdir} parameter, while the latter listens for PB
+connections on a specific @code{port}, and authenticates against
+@code{userport}.
+
+@node Triggerable Scheduler, , Try Schedulers, Change Sources and Schedulers
+@subsection Triggerable Scheduler
+@cindex Triggers
+@slindex buildbot.scheduler.Triggerable
+
+The @code{Triggerable} scheduler waits to be triggered by a Trigger
+step (see @ref{Triggering Schedulers}) in another build. That step
+can optionally wait for the scheduler's builds to complete. This
+provides two advantages over Dependent schedulers. First, the same
+scheduler can be triggered from multiple builds. Second, the ability
+to wait for a Triggerable's builds to complete provides a form of
+"subroutine call", where one or more builds can "call" a scheduler
+to perform some work for them, perhaps on other buildslaves.
+
+The parameters are just the basics:
+
+@table @code
+@item name
+@item builderNames
+@item properties
+@end table
+
+This class is only useful in conjunction with the @code{Trigger} step.
+Here is a fully-worked example:
+
+@example
+from buildbot import scheduler
+from buildbot.process import factory
+from buildbot.steps import trigger
+
+checkin = scheduler.Scheduler(name="checkin",
+ branch=None,
+ treeStableTimer=5*60,
+ builderNames=["checkin"])
+nightly = scheduler.Nightly(name='nightly',
+ builderNames=['nightly'],
+ hour=3,
+ minute=0)
+
+mktarball = scheduler.Triggerable(name="mktarball",
+ builderNames=["mktarball"])
+build = scheduler.Triggerable(name="build-all-platforms",
+ builderNames=["build-all-platforms"])
+test = scheduler.Triggerable(name="distributed-test",
+ builderNames=["distributed-test"])
+package = scheduler.Triggerable(name="package-all-platforms",
+ builderNames=["package-all-platforms"])
+
+c['schedulers'] = [checkin, nightly, build, test, package]
+
+# on checkin, make a tarball, build it, and test it
+checkin_factory = factory.BuildFactory()
+checkin_factory.addStep(trigger.Trigger(schedulerNames=['mktarball'],
+ waitForFinish=True))
+checkin_factory.addStep(trigger.Trigger(schedulerNames=['build-all-platforms'],
+ waitForFinish=True))
+checkin_factory.addStep(trigger.Trigger(schedulerNames=['distributed-test'],
+ waitForFinish=True))
+
+# and every night, make a tarball, build it, and package it
+nightly_factory = factory.BuildFactory()
+nightly_factory.addStep(trigger.Trigger(schedulerNames=['mktarball'],
+ waitForFinish=True))
+nightly_factory.addStep(trigger.Trigger(schedulerNames=['build-all-platforms'],
+ waitForFinish=True))
+nightly_factory.addStep(trigger.Trigger(schedulerNames=['package-all-platforms'],
+ waitForFinish=True))
+@end example
+
+@node Merging BuildRequests, Setting the slaveport, Change Sources and Schedulers, Configuration
+@section Merging BuildRequests
+
+@bcindex c['mergeRequests']
+
+By default, buildbot merges BuildRequests that have the compatible
+SourceStamps. This behaviour can be customized with the
+@code{c['mergeRequests']} configuration key. This key specifies a function
+which is caleld with three arguments: a @code{Builder} and two
+@code{BuildRequest} objects. It should return true if the requests can be
+merged. For example:
+
+@example
+def mergeRequests(builder, req1, req2):
+ """Don't merge buildrequest at all"""
+ return False
+c['mergeRequests'] = mergeRequests
+@end example
+
+In many cases, the details of the SourceStamps and BuildRequests are important.
+In this example, only BuildRequests with the same "reason" are merged; thus
+developers forcing builds for different reasons will see distinct builds.
+
+@example
+def mergeRequests(builder, req1, req2):
+ if req1.source.canBeMergedWith(req2.source) and req1.reason == req2.reason:
+ return True
+ return False
+c['mergeRequests'] = mergeRequests
+@end example
+
+@node Setting the slaveport, Buildslave Specifiers, Merging BuildRequests, Configuration
+@section Setting the slaveport
+
+@bcindex c['slavePortnum']
+
+The buildmaster will listen on a TCP port of your choosing for
+connections from buildslaves. It can also use this port for
+connections from remote Change Sources, status clients, and debug
+tools. This port should be visible to the outside world, and you'll
+need to tell your buildslave admins about your choice.
+
+It does not matter which port you pick, as long it is externally
+visible, however you should probably use something larger than 1024,
+since most operating systems don't allow non-root processes to bind to
+low-numbered ports. If your buildmaster is behind a firewall or a NAT
+box of some sort, you may have to configure your firewall to permit
+inbound connections to this port.
+
+@example
+c['slavePortnum'] = 10000
+@end example
+
+@code{c['slavePortnum']} is a @emph{strports} specification string,
+defined in the @code{twisted.application.strports} module (try
+@command{pydoc twisted.application.strports} to get documentation on
+the format). This means that you can have the buildmaster listen on a
+localhost-only port by doing:
+
+@example
+c['slavePortnum'] = "tcp:10000:interface=127.0.0.1"
+@end example
+
+This might be useful if you only run buildslaves on the same machine,
+and they are all configured to contact the buildmaster at
+@code{localhost:10000}.
+
+
+@node Buildslave Specifiers, On-Demand ("Latent") Buildslaves, Setting the slaveport, Configuration
+@section Buildslave Specifiers
+@bcindex c['slaves']
+
+The @code{c['slaves']} key is a list of known buildslaves. In the common case,
+each buildslave is defined by an instance of the BuildSlave class. It
+represents a standard, manually started machine that will try to connect to
+the buildbot master as a slave. Contrast these with the "on-demand" latent
+buildslaves, such as the Amazon Web Service Elastic Compute Cloud latent
+buildslave discussed below.
+
+The BuildSlave class is instantiated with two values: (slavename,
+slavepassword). These are the same two values that need to be provided to the
+buildslave administrator when they create the buildslave.
+
+The slavenames must be unique, of course. The password exists to
+prevent evildoers from interfering with the buildbot by inserting
+their own (broken) buildslaves into the system and thus displacing the
+real ones.
+
+Buildslaves with an unrecognized slavename or a non-matching password
+will be rejected when they attempt to connect, and a message
+describing the problem will be put in the log file (see @ref{Logfiles}).
+
+@example
+from buildbot.buildslave import BuildSlave
+c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd')
+ BuildSlave('bot-bsd', 'bsdpasswd')
+ ]
+@end example
+
+@cindex Properties
+@code{BuildSlave} objects can also be created with an optional
+@code{properties} argument, a dictionary specifying properties that
+will be available to any builds performed on this slave. For example:
+
+@example
+from buildbot.buildslave import BuildSlave
+c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ properties=@{'os':'solaris'@}),
+ ]
+@end example
+
+The @code{BuildSlave} constructor can also take an optional
+@code{max_builds} parameter to limit the number of builds that it
+will execute simultaneously:
+
+@example
+from buildbot.buildslave import BuildSlave
+c['slaves'] = [BuildSlave("bot-linux", "linuxpassword", max_builds=2)]
+@end example
+
+Historical note: in buildbot-0.7.5 and earlier, the @code{c['bots']}
+key was used instead, and it took a list of (name, password) tuples.
+This key is accepted for backwards compatibility, but is deprecated as
+of 0.7.6 and will go away in some future release.
+
+@menu
+* When Buildslaves Go Missing::
+@end menu
+
+@node When Buildslaves Go Missing, , , Buildslave Specifiers
+@subsection When Buildslaves Go Missing
+
+Sometimes, the buildslaves go away. One very common reason for this is
+when the buildslave process is started once (manually) and left
+running, but then later the machine reboots and the process is not
+automatically restarted.
+
+If you'd like to have the administrator of the buildslave (or other
+people) be notified by email when the buildslave has been missing for
+too long, just add the @code{notify_on_missing=} argument to the
+@code{BuildSlave} definition:
+
+@example
+c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ notify_on_missing="bob@@example.com"),
+ ]
+@end example
+
+By default, this will send email when the buildslave has been
+disconnected for more than one hour. Only one email per
+connection-loss event will be sent. To change the timeout, use
+@code{missing_timeout=} and give it a number of seconds (the default
+is 3600).
+
+You can have the buildmaster send email to multiple recipients: just
+provide a list of addresses instead of a single one:
+
+@example
+c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ notify_on_missing=["bob@@example.com",
+ "alice@@example.org"],
+ missing_timeout=300, # notify after 5 minutes
+ ),
+ ]
+@end example
+
+The email sent this way will use a MailNotifier (@pxref{MailNotifier})
+status target, if one is configured. This provides a way for you to
+control the ``from'' address of the email, as well as the relayhost
+(aka ``smarthost'') to use as an SMTP server. If no MailNotifier is
+configured on this buildmaster, the buildslave-missing emails will be
+sent using a default configuration.
+
+Note that if you want to have a MailNotifier for buildslave-missing
+emails but not for regular build emails, just create one with
+builders=[], as follows:
+
+@example
+from buildbot.status import mail
+m = mail.MailNotifier(fromaddr="buildbot@@localhost", builders=[],
+ relayhost="smtp.example.org")
+c['status'].append(m)
+c['slaves'] = [BuildSlave('bot-solaris', 'solarispasswd',
+ notify_on_missing="bob@@example.com"),
+ ]
+@end example
+
+@node On-Demand ("Latent") Buildslaves, Defining Global Properties, Buildslave Specifiers, Configuration
+@section On-Demand ("Latent") Buildslaves
+
+The standard buildbot model has slaves started manually. The previous section
+described how to configure the master for this approach.
+
+Another approach is to let the buildbot master start slaves when builds are
+ready, on-demand. Thanks to services such as Amazon Web Services' Elastic
+Compute Cloud ("AWS EC2"), this is relatively easy to set up, and can be
+very useful for some situations.
+
+The buildslaves that are started on-demand are called "latent" buildslaves.
+As of this writing, buildbot ships with an abstract base class for building
+latent buildslaves, and a concrete implementation for AWS EC2.
+
+@menu
+* Amazon Web Services Elastic Compute Cloud ("AWS EC2")::
+* Dangers with Latent Buildslaves::
+* Writing New Latent Buildslaves::
+@end menu
+
+@node Amazon Web Services Elastic Compute Cloud ("AWS EC2"), Dangers with Latent Buildslaves, , On-Demand ("Latent") Buildslaves
+@subsection Amazon Web Services Elastic Compute Cloud ("AWS EC2")
+
+@url{http://aws.amazon.com/ec2/,,AWS EC2} is a web service that allows you to
+start virtual machines in an Amazon data center. Please see their website for
+details, incuding costs. Using the AWS EC2 latent buildslaves involves getting
+an EC2 account with AWS and setting up payment; customizing one or more EC2
+machine images ("AMIs") on your desired operating system(s) and publishing
+them (privately if needed); and configuring the buildbot master to know how to
+start your customized images for "substantiating" your latent slaves.
+
+@menu
+* Get an AWS EC2 Account::
+* Create an AMI::
+* Configure the Master with an EC2LatentBuildSlave::
+@end menu
+
+@node Get an AWS EC2 Account, Create an AMI, , Amazon Web Services Elastic Compute Cloud ("AWS EC2")
+@subsubsection Get an AWS EC2 Account
+
+To start off, to use the AWS EC2 latent buildslave, you need to get an AWS
+developer account and sign up for EC2. These instructions may help you get
+started:
+
+@itemize @bullet
+@item
+Go to http://aws.amazon.com/ and click to "Sign Up Now" for an AWS account.
+
+@item
+Once you are logged into your account, you need to sign up for EC2.
+Instructions for how to do this have changed over time because Amazon changes
+their website, so the best advice is to hunt for it. After signing up for EC2,
+it may say it wants you to upload an x.509 cert. You will need this to create
+images (see below) but it is not technically necessary for the buildbot master
+configuration.
+
+@item
+You must enter a valid credit card before you will be able to use EC2. Do that
+under 'Payment Method'.
+
+@item
+Make sure you're signed up for EC2 by going to 'Your Account'->'Account
+Activity' and verifying EC2 is listed.
+@end itemize
+
+@node Create an AMI, Configure the Master with an EC2LatentBuildSlave, Get an AWS EC2 Account, Amazon Web Services Elastic Compute Cloud ("AWS EC2")
+@subsubsection Create an AMI
+
+Now you need to create an AMI and configure the master. You may need to
+run through this cycle a few times to get it working, but these instructions
+should get you started.
+
+Creating an AMI is out of the scope of this document. The
+@url{http://docs.amazonwebservices.com/AWSEC2/latest/GettingStartedGuide/,,EC2 Getting Started Guide}
+is a good resource for this task. Here are a few additional hints.
+
+@itemize @bullet
+@item
+When an instance of the image starts, it needs to automatically start a
+buildbot slave that connects to your master (to create a buildbot slave,
+@pxref{Creating a buildslave}; to make a daemon,
+@pxref{Launching the daemons}).
+
+@item
+You may want to make an instance of the buildbot slave, configure it as a
+standard buildslave in the master (i.e., not as a latent slave), and test and
+debug it that way before you turn it into an AMI and convert to a latent
+slave in the master.
+@end itemize
+
+@node Configure the Master with an EC2LatentBuildSlave, , Create an AMI, Amazon Web Services Elastic Compute Cloud ("AWS EC2")
+@subsubsection Configure the Master with an EC2LatentBuildSlave
+
+Now let's assume you have an AMI that should work with the
+EC2LatentBuildSlave. It's now time to set up your buildbot master
+configuration.
+
+You will need some information from your AWS account: the "Access Key Id" and
+the "Secret Access Key". If you've built the AMI yourself, you probably
+already are familiar with these values. If you have not, and someone has
+given you access to an AMI, these hints may help you find the necessary
+values:
+
+@itemize @bullet
+@item
+While logged into your AWS account, find the "Access Identifiers" link (either
+on the left, or via "Your Account" -> "Access Identifiers".
+
+@item
+On the page, you'll see alphanumeric values for "Your Access Key Id:" and
+"Your Secret Access Key:". Make a note of these. Later on, we'll call the
+first one your "identifier" and the second one your "secret_identifier."
+@end itemize
+
+When creating an EC2LatentBuildSlave in the buildbot master configuration,
+the first three arguments are required. The name and password are the first
+two arguments, and work the same as with normal buildslaves. The next
+argument specifies the type of the EC2 virtual machine (available options as
+of this writing include "m1.small", "m1.large", 'm1.xlarge", "c1.medium",
+and "c1.xlarge"; see the EC2 documentation for descriptions of these
+machines).
+
+Here is the simplest example of configuring an EC2 latent buildslave. It
+specifies all necessary remaining values explicitly in the instantiation.
+
+@example
+from buildbot.ec2buildslave import EC2LatentBuildSlave
+c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ ami='ami-12345',
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )]
+@end example
+
+The "ami" argument specifies the AMI that the master should start. The
+"identifier" argument specifies the AWS "Access Key Id," and the
+"secret_identifier" specifies the AWS "Secret Access Key." Both the AMI and
+the account information can be specified in alternate ways.
+
+Note that whoever has your identifier and secret_identifier values can request
+AWS work charged to your account, so these values need to be carefully
+protected. Another way to specify these access keys is to put them in a
+separate file. You can then make the access privileges stricter for this
+separate file, and potentially let more people read your main configuration
+file.
+
+By default, you can make an .ec2 directory in the home folder of the user
+running the buildbot master. In that directory, create a file called aws_id.
+The first line of that file should be your access key id; the second line
+should be your secret access key id. Then you can instantiate the build slave
+as follows.
+
+@example
+from buildbot.ec2buildslave import EC2LatentBuildSlave
+c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ ami='ami-12345')]
+@end example
+
+If you want to put the key information in another file, use the
+"aws_id_file_path" initialization argument.
+
+Previous examples used a particular AMI. If the Buildbot master will be
+deployed in a process-controlled environment, it may be convenient to
+specify the AMI more flexibly. Rather than specifying an individual AMI,
+specify one or two AMI filters.
+
+In all cases, the AMI that sorts last by its location (the S3 bucket and
+manifest name) will be preferred.
+
+One available filter is to specify the acceptable AMI owners, by AWS account
+number (the 12 digit number, usually rendered in AWS with hyphens like
+"1234-5678-9012", should be entered as in integer).
+
+@example
+from buildbot.ec2buildslave import EC2LatentBuildSlave
+bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111,
+ 22222222222],
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+@end example
+
+The other available filter is to provide a regular expression string that
+will be matched against each AMI's location (the S3 bucket and manifest name).
+
+@example
+from buildbot.ec2buildslave import EC2LatentBuildSlave
+bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'buildbot\-.*/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+@end example
+
+The regular expression can specify a group, which will be preferred for the
+sorting. Only the first group is used; subsequent groups are ignored.
+
+@example
+from buildbot.ec2buildslave import EC2LatentBuildSlave
+bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'buildbot\-.*\-(.*)/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+@end example
+
+If the group can be cast to an integer, it will be. This allows 10 to sort
+after 1, for instance.
+
+@example
+from buildbot.ec2buildslave import EC2LatentBuildSlave
+bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'buildbot\-.*\-(\d+)/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+@end example
+
+In addition to using the password as a handshake between the master and the
+slave, you may want to use a firewall to assert that only machines from a
+specific IP can connect as slaves. This is possible with AWS EC2 by using
+the Elastic IP feature. To configure, generate a Elastic IP in AWS, and then
+specify it in your configuration using the "elastic_ip" argument.
+
+@example
+from buildbot.ec2buildslave import EC2LatentBuildSlave
+c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345',
+ identifier='publickey',
+ secret_identifier='privatekey',
+ elastic_ip='208.77.188.166'
+ )]
+@end example
+
+The EC2LatentBuildSlave supports all other configuration from the standard
+BuildSlave. The "missing_timeout" and "notify_on_missing" specify how long
+to wait for an EC2 instance to attach before considering the attempt to have
+failed, and email addresses to alert, respectively. "missing_timeout"
+defaults to 20 minutes.
+
+The "build_wait_timeout" allows you to specify how long an EC2LatentBuildSlave
+should wait after a build for another build before it shuts down the EC2
+instance. It defaults to 10 minutes.
+
+"keypair_name" and "security_name" allow you to specify different names for
+these AWS EC2 values. They both default to "latent_buildbot_slave".
+
+@node Dangers with Latent Buildslaves, Writing New Latent Buildslaves, Amazon Web Services Elastic Compute Cloud ("AWS EC2"), On-Demand ("Latent") Buildslaves
+@subsection Dangers with Latent Buildslaves
+
+Any latent build slave that interacts with a for-fee service, such as the
+EC2LatentBuildSlave, brings significant risks. As already identified, the
+configuraton will need access to account information that, if obtained by a
+criminal, can be used to charge services to your account. Also, bugs in the
+buildbot software may lead to unnecessary charges. In particular, if the
+master neglects to shut down an instance for some reason, a virtual machine
+may be running unnecessarily, charging against your account. Manual and/or
+automatic (e.g. nagios with a plugin using a library like boto)
+double-checking may be appropriate.
+
+A comparitively trivial note is that currently if two instances try to attach
+to the same latent buildslave, it is likely that the system will become
+confused. This should not occur, unless, for instance, you configure a normal
+build slave to connect with the authentication of a latent buildbot. If the
+situation occurs, stop all attached instances and restart the master.
+
+@node Writing New Latent Buildslaves, , Dangers with Latent Buildslaves, On-Demand ("Latent") Buildslaves
+@subsection Writing New Latent Buildslaves
+
+Writing a new latent buildslave should only require subclassing
+@code{buildbot.buildslave.AbstractLatentBuildSlave} and implementing
+start_instance and stop_instance.
+
+@example
+def start_instance(self):
+ # responsible for starting instance that will try to connect with this
+ # master. Should return deferred. Problems should use an errback. The
+ # callback value can be None, or can be an iterable of short strings to
+ # include in the "substantiate success" status message, such as
+ # identifying the instance that started.
+ raise NotImplementedError
+
+def stop_instance(self, fast=False):
+ # responsible for shutting down instance. Return a deferred. If `fast`,
+ # we're trying to shut the master down, so callback as soon as is safe.
+ # Callback value is ignored.
+ raise NotImplementedError
+@end example
+
+See @code{buildbot.ec2buildslave.EC2LatentBuildSlave} for an example, or see the
+test example @code{buildbot.test_slaves.FakeLatentBuildSlave}.
+
+@node Defining Global Properties, Defining Builders, On-Demand ("Latent") Buildslaves, Configuration
+@section Defining Global Properties
+@bcindex c['properties']
+@cindex Properties
+
+The @code{'properties'} configuration key defines a dictionary
+of properties that will be available to all builds started by the
+buildmaster:
+
+@example
+c['properties'] = @{
+ 'Widget-version' : '1.2',
+ 'release-stage' : 'alpha'
+@}
+@end example
+
+@node Defining Builders, Defining Status Targets, Defining Global Properties, Configuration
+@section Defining Builders
+
+@bcindex c['builders']
+
+The @code{c['builders']} key is a list of dictionaries which specify
+the Builders. The Buildmaster runs a collection of Builders, each of
+which handles a single type of build (e.g. full versus quick), on a
+single build slave. A Buildbot which makes sure that the latest code
+(``HEAD'') compiles correctly across four separate architecture will
+have four Builders, each performing the same build but on different
+slaves (one per platform).
+
+Each Builder gets a separate column in the waterfall display. In
+general, each Builder runs independently (although various kinds of
+interlocks can cause one Builder to have an effect on another).
+
+Each Builder specification dictionary has several required keys:
+
+@table @code
+@item name
+This specifies the Builder's name, which is used in status
+reports.
+
+@item slavename
+This specifies which buildslave will be used by this Builder.
+@code{slavename} must appear in the @code{c['slaves']} list. Each
+buildslave can accomodate multiple Builders.
+
+@item slavenames
+If you provide @code{slavenames} instead of @code{slavename}, you can
+give a list of buildslaves which are capable of running this Builder.
+If multiple buildslaves are available for any given Builder, you will
+have some measure of redundancy: in case one slave goes offline, the
+others can still keep the Builder working. In addition, multiple
+buildslaves will allow multiple simultaneous builds for the same
+Builder, which might be useful if you have a lot of forced or ``try''
+builds taking place.
+
+If you use this feature, it is important to make sure that the
+buildslaves are all, in fact, capable of running the given build. The
+slave hosts should be configured similarly, otherwise you will spend a
+lot of time trying (unsuccessfully) to reproduce a failure that only
+occurs on some of the buildslaves and not the others. Different
+platforms, operating systems, versions of major programs or libraries,
+all these things mean you should use separate Builders.
+
+@item builddir
+This specifies the name of a subdirectory (under the base directory)
+in which everything related to this builder will be placed. On the
+buildmaster, this holds build status information. On the buildslave,
+this is where checkouts, compiles, and tests are run.
+
+@item factory
+This is a @code{buildbot.process.factory.BuildFactory} instance which
+controls how the build is performed. Full details appear in their own
+chapter, @xref{Build Process}. Parameters like the location of the CVS
+repository and the compile-time options used for the build are
+generally provided as arguments to the factory's constructor.
+
+@end table
+
+Other optional keys may be set on each Builder:
+
+@table @code
+
+@item category
+If provided, this is a string that identifies a category for the
+builder to be a part of. Status clients can limit themselves to a
+subset of the available categories. A common use for this is to add
+new builders to your setup (for a new module, or for a new buildslave)
+that do not work correctly yet and allow you to integrate them with
+the active builders. You can put these new builders in a test
+category, make your main status clients ignore them, and have only
+private status clients pick them up. As soon as they work, you can
+move them over to the active category.
+
+@end table
+
+
+@node Defining Status Targets, Debug options, Defining Builders, Configuration
+@section Defining Status Targets
+
+The Buildmaster has a variety of ways to present build status to
+various users. Each such delivery method is a ``Status Target'' object
+in the configuration's @code{status} list. To add status targets, you
+just append more objects to this list:
+
+@bcindex c['status']
+
+@example
+c['status'] = []
+
+from buildbot.status import html
+c['status'].append(html.Waterfall(http_port=8010))
+
+from buildbot.status import mail
+m = mail.MailNotifier(fromaddr="buildbot@@localhost",
+ extraRecipients=["builds@@lists.example.com"],
+ sendToInterestedUsers=False)
+c['status'].append(m)
+
+from buildbot.status import words
+c['status'].append(words.IRC(host="irc.example.com", nick="bb",
+ channels=["#example"]))
+@end example
+
+Status delivery has its own chapter, @xref{Status Delivery}, in which
+all the built-in status targets are documented.
+
+
+@node Debug options, , Defining Status Targets, Configuration
+@section Debug options
+
+
+@bcindex c['debugPassword']
+If you set @code{c['debugPassword']}, then you can connect to the
+buildmaster with the diagnostic tool launched by @code{buildbot
+debugclient MASTER:PORT}. From this tool, you can reload the config
+file, manually force builds, and inject changes, which may be useful
+for testing your buildmaster without actually commiting changes to
+your repository (or before you have the Change Sources set up). The
+debug tool uses the same port number as the slaves do:
+@code{c['slavePortnum']}, and is authenticated with this password.
+
+@example
+c['debugPassword'] = "debugpassword"
+@end example
+
+@bcindex c['manhole']
+If you set @code{c['manhole']} to an instance of one of the classes in
+@code{buildbot.manhole}, you can telnet or ssh into the buildmaster
+and get an interactive Python shell, which may be useful for debugging
+buildbot internals. It is probably only useful for buildbot
+developers. It exposes full access to the buildmaster's account
+(including the ability to modify and delete files), so it should not
+be enabled with a weak or easily guessable password.
+
+There are three separate @code{Manhole} classes. Two of them use SSH,
+one uses unencrypted telnet. Two of them use a username+password
+combination to grant access, one of them uses an SSH-style
+@file{authorized_keys} file which contains a list of ssh public keys.
+
+@table @code
+@item manhole.AuthorizedKeysManhole
+You construct this with the name of a file that contains one SSH
+public key per line, just like @file{~/.ssh/authorized_keys}. If you
+provide a non-absolute filename, it will be interpreted relative to
+the buildmaster's base directory.
+
+@item manhole.PasswordManhole
+This one accepts SSH connections but asks for a username and password
+when authenticating. It accepts only one such pair.
+
+
+@item manhole.TelnetManhole
+This accepts regular unencrypted telnet connections, and asks for a
+username/password pair before providing access. Because this
+username/password is transmitted in the clear, and because Manhole
+access to the buildmaster is equivalent to granting full shell
+privileges to both the buildmaster and all the buildslaves (and to all
+accounts which then run code produced by the buildslaves), it is
+highly recommended that you use one of the SSH manholes instead.
+
+@end table
+
+@example
+# some examples:
+from buildbot import manhole
+c['manhole'] = manhole.AuthorizedKeysManhole(1234, "authorized_keys")
+c['manhole'] = manhole.PasswordManhole(1234, "alice", "mysecretpassword")
+c['manhole'] = manhole.TelnetManhole(1234, "bob", "snoop_my_password_please")
+@end example
+
+The @code{Manhole} instance can be configured to listen on a specific
+port. You may wish to have this listening port bind to the loopback
+interface (sometimes known as ``lo0'', ``localhost'', or 127.0.0.1) to
+restrict access to clients which are running on the same host.
+
+@example
+from buildbot.manhole import PasswordManhole
+c['manhole'] = PasswordManhole("tcp:9999:interface=127.0.0.1","admin","passwd")
+@end example
+
+To have the @code{Manhole} listen on all interfaces, use
+@code{"tcp:9999"} or simply 9999. This port specification uses
+@code{twisted.application.strports}, so you can make it listen on SSL
+or even UNIX-domain sockets if you want.
+
+Note that using any Manhole requires that the TwistedConch package be
+installed, and that you be using Twisted version 2.0 or later.
+
+The buildmaster's SSH server will use a different host key than the
+normal sshd running on a typical unix host. This will cause the ssh
+client to complain about a ``host key mismatch'', because it does not
+realize there are two separate servers running on the same host. To
+avoid this, use a clause like the following in your @file{.ssh/config}
+file:
+
+@example
+Host remotehost-buildbot
+ HostName remotehost
+ HostKeyAlias remotehost-buildbot
+ Port 9999
+ # use 'user' if you use PasswordManhole and your name is not 'admin'.
+ # if you use AuthorizedKeysManhole, this probably doesn't matter.
+ User admin
+@end example
+
+
+@node Getting Source Code Changes, Build Process, Configuration, Top
+@chapter Getting Source Code Changes
+
+The most common way to use the Buildbot is centered around the idea of
+@code{Source Trees}: a directory tree filled with source code of some form
+which can be compiled and/or tested. Some projects use languages that don't
+involve any compilation step: nevertheless there may be a @code{build} phase
+where files are copied or rearranged into a form that is suitable for
+installation. Some projects do not have unit tests, and the Buildbot is
+merely helping to make sure that the sources can compile correctly. But in
+all of these cases, the thing-being-tested is a single source tree.
+
+A Version Control System mantains a source tree, and tells the
+buildmaster when it changes. The first step of each Build is typically
+to acquire a copy of some version of this tree.
+
+This chapter describes how the Buildbot learns about what Changes have
+occurred. For more information on VC systems and Changes, see
+@ref{Version Control Systems}.
+
+
+@menu
+* Change Sources::
+* Choosing ChangeSources::
+* CVSToys - PBService::
+* Mail-parsing ChangeSources::
+* PBChangeSource::
+* P4Source::
+* BonsaiPoller::
+* SVNPoller::
+* MercurialHook::
+* Bzr Hook::
+* Bzr Poller::
+@end menu
+
+
+
+@node Change Sources, Choosing ChangeSources, Getting Source Code Changes, Getting Source Code Changes
+@section Change Sources
+
+@c TODO: rework this, the one-buildmaster-one-tree thing isn't quite
+@c so narrow-minded anymore
+
+Each Buildmaster watches a single source tree. Changes can be provided
+by a variety of ChangeSource types, however any given project will
+typically have only a single ChangeSource active. This section
+provides a description of all available ChangeSource types and
+explains how to set up each of them.
+
+There are a variety of ChangeSources available, some of which are
+meant to be used in conjunction with other tools to deliver Change
+events from the VC repository to the buildmaster.
+
+@itemize @bullet
+
+@item CVSToys
+This ChangeSource opens a TCP connection from the buildmaster to a
+waiting FreshCVS daemon that lives on the repository machine, and
+subscribes to hear about Changes.
+
+@item MaildirSource
+This one watches a local maildir-format inbox for email sent out by
+the repository when a change is made. When a message arrives, it is
+parsed to create the Change object. A variety of parsing functions are
+available to accomodate different email-sending tools.
+
+@item PBChangeSource
+This ChangeSource listens on a local TCP socket for inbound
+connections from a separate tool. Usually, this tool would be run on
+the VC repository machine in a commit hook. It is expected to connect
+to the TCP socket and send a Change message over the network
+connection. The @command{buildbot sendchange} command is one example
+of a tool that knows how to send these messages, so you can write a
+commit script for your VC system that calls it to deliver the Change.
+There are other tools in the contrib/ directory that use the same
+protocol.
+
+@end itemize
+
+As a quick guide, here is a list of VC systems and the ChangeSources
+that might be useful with them. All of these ChangeSources are in the
+@code{buildbot.changes} module.
+
+@table @code
+@item CVS
+
+@itemize @bullet
+@item freshcvs.FreshCVSSource (connected via TCP to the freshcvs daemon)
+@item mail.FCMaildirSource (watching for email sent by a freshcvs daemon)
+@item mail.BonsaiMaildirSource (watching for email sent by Bonsai)
+@item mail.SyncmailMaildirSource (watching for email sent by syncmail)
+@item pb.PBChangeSource (listening for connections from @code{buildbot
+sendchange} run in a loginfo script)
+@item pb.PBChangeSource (listening for connections from a long-running
+@code{contrib/viewcvspoll.py} polling process which examines the ViewCVS
+database directly
+@end itemize
+
+@item SVN
+@itemize @bullet
+@item pb.PBChangeSource (listening for connections from
+@code{contrib/svn_buildbot.py} run in a postcommit script)
+@item pb.PBChangeSource (listening for connections from a long-running
+@code{contrib/svn_watcher.py} or @code{contrib/svnpoller.py} polling
+process
+@item mail.SVNCommitEmailMaildirSource (watching for email sent by commit-email.pl)
+@item svnpoller.SVNPoller (polling the SVN repository)
+@end itemize
+
+@item Darcs
+@itemize @bullet
+@item pb.PBChangeSource (listening for connections from
+@code{contrib/darcs_buildbot.py} in a commit script
+@end itemize
+
+@item Mercurial
+@itemize @bullet
+@item pb.PBChangeSource (listening for connections from
+@code{contrib/hg_buildbot.py} run in an 'incoming' hook)
+@item pb.PBChangeSource (listening for connections from
+@code{buildbot/changes/hgbuildbot.py} run as an in-process 'changegroup'
+hook)
+@end itemize
+
+@item Arch/Bazaar
+@itemize @bullet
+@item pb.PBChangeSource (listening for connections from
+@code{contrib/arch_buildbot.py} run in a commit hook)
+@end itemize
+
+@item Bzr (the newer Bazaar)
+@itemize @bullet
+@item pb.PBChangeSource (listening for connections from
+@code{contrib/bzr_buildbot.py} run in a post-change-branch-tip or commit hook)
+@item @code{contrib/bzr_buildbot.py}'s BzrPoller (polling the Bzr repository)
+@end itemize
+
+@item Git
+@itemize @bullet
+@item pb.PBChangeSource (listening for connections from
+@code{contrib/git_buildbot.py} run in the post-receive hook)
+@end itemize
+
+@end table
+
+All VC systems can be driven by a PBChangeSource and the
+@code{buildbot sendchange} tool run from some form of commit script.
+If you write an email parsing function, they can also all be driven by
+a suitable @code{MaildirSource}.
+
+
+@node Choosing ChangeSources, CVSToys - PBService, Change Sources, Getting Source Code Changes
+@section Choosing ChangeSources
+
+The @code{master.cfg} configuration file has a dictionary key named
+@code{BuildmasterConfig['change_source']}, which holds the active
+@code{IChangeSource} object. The config file will typically create an
+object from one of the classes described below and stuff it into this
+key.
+
+Each buildmaster typically has just a single ChangeSource, since it is
+only watching a single source tree. But if, for some reason, you need
+multiple sources, just set @code{c['change_source']} to a list of
+ChangeSources.. it will accept that too.
+
+@example
+s = FreshCVSSourceNewcred(host="host", port=4519,
+ user="alice", passwd="secret",
+ prefix="Twisted")
+BuildmasterConfig['change_source'] = [s]
+@end example
+
+Each source tree has a nominal @code{top}. Each Change has a list of
+filenames, which are all relative to this top location. The
+ChangeSource is responsible for doing whatever is necessary to
+accomplish this. Most sources have a @code{prefix} argument: a partial
+pathname which is stripped from the front of all filenames provided to
+that @code{ChangeSource}. Files which are outside this sub-tree are
+ignored by the changesource: it does not generate Changes for those
+files.
+
+
+@node CVSToys - PBService, Mail-parsing ChangeSources, Choosing ChangeSources, Getting Source Code Changes
+@section CVSToys - PBService
+
+@csindex buildbot.changes.freshcvs.FreshCVSSource
+
+The @uref{http://purl.net/net/CVSToys, CVSToys} package provides a
+server which runs on the machine that hosts the CVS repository it
+watches. It has a variety of ways to distribute commit notifications,
+and offers a flexible regexp-based way to filter out uninteresting
+changes. One of the notification options is named @code{PBService} and
+works by listening on a TCP port for clients. These clients subscribe
+to hear about commit notifications.
+
+The buildmaster has a CVSToys-compatible @code{PBService} client built
+in. There are two versions of it, one for old versions of CVSToys
+(1.0.9 and earlier) which used the @code{oldcred} authentication
+framework, and one for newer versions (1.0.10 and later) which use
+@code{newcred}. Both are classes in the
+@code{buildbot.changes.freshcvs} package.
+
+@code{FreshCVSSourceNewcred} objects are created with the following
+parameters:
+
+@table @samp
+
+@item @code{host} and @code{port}
+these specify where the CVSToys server can be reached
+
+@item @code{user} and @code{passwd}
+these specify the login information for the CVSToys server
+(@code{freshcvs}). These must match the server's values, which are
+defined in the @code{freshCfg} configuration file (which lives in the
+CVSROOT directory of the repository).
+
+@item @code{prefix}
+this is the prefix to be found and stripped from filenames delivered
+by the CVSToys server. Most projects live in sub-directories of the
+main repository, as siblings of the CVSROOT sub-directory, so
+typically this prefix is set to that top sub-directory name.
+
+@end table
+
+@heading Example
+
+To set up the freshCVS server, add a statement like the following to
+your @file{freshCfg} file:
+
+@example
+pb = ConfigurationSet([
+ (None, None, None, PBService(userpass=('foo', 'bar'), port=4519)),
+ ])
+@end example
+
+This will announce all changes to a client which connects to port 4519
+using a username of 'foo' and a password of 'bar'.
+
+Then add a clause like this to your buildmaster's @file{master.cfg}:
+
+@example
+BuildmasterConfig['change_source'] = FreshCVSSource("cvs.example.com", 4519,
+ "foo", "bar",
+ prefix="glib/")
+@end example
+
+where "cvs.example.com" is the host that is running the FreshCVS daemon, and
+"glib" is the top-level directory (relative to the repository's root) where
+all your source code lives. Most projects keep one or more projects in the
+same repository (along with CVSROOT/ to hold admin files like loginfo and
+freshCfg); the prefix= argument tells the buildmaster to ignore everything
+outside that directory, and to strip that common prefix from all pathnames
+it handles.
+
+
+@node Mail-parsing ChangeSources, PBChangeSource, CVSToys - PBService, Getting Source Code Changes
+@section Mail-parsing ChangeSources
+
+Many projects publish information about changes to their source tree
+by sending an email message out to a mailing list, frequently named
+PROJECT-commits or PROJECT-changes. Each message usually contains a
+description of the change (who made the change, which files were
+affected) and sometimes a copy of the diff. Humans can subscribe to
+this list to stay informed about what's happening to the source tree.
+
+The Buildbot can also be subscribed to a -commits mailing list, and
+can trigger builds in response to Changes that it hears about. The
+buildmaster admin needs to arrange for these email messages to arrive
+in a place where the buildmaster can find them, and configure the
+buildmaster to parse the messages correctly. Once that is in place,
+the email parser will create Change objects and deliver them to the
+Schedulers (see @pxref{Change Sources and Schedulers}) just
+like any other ChangeSource.
+
+There are two components to setting up an email-based ChangeSource.
+The first is to route the email messages to the buildmaster, which is
+done by dropping them into a ``maildir''. The second is to actually
+parse the messages, which is highly dependent upon the tool that was
+used to create them. Each VC system has a collection of favorite
+change-emailing tools, and each has a slightly different format, so
+each has a different parsing function. There is a separate
+ChangeSource variant for each parsing function.
+
+Once you've chosen a maildir location and a parsing function, create
+the change source and put it in @code{c['change_source']}:
+
+@example
+from buildbot.changes.mail import SyncmailMaildirSource
+c['change_source'] = SyncmailMaildirSource("~/maildir-buildbot",
+ prefix="/trunk/")
+@end example
+
+@menu
+* Subscribing the Buildmaster::
+* Using Maildirs::
+* Parsing Email Change Messages::
+@end menu
+
+@node Subscribing the Buildmaster, Using Maildirs, Mail-parsing ChangeSources, Mail-parsing ChangeSources
+@subsection Subscribing the Buildmaster
+
+The recommended way to install the buildbot is to create a dedicated
+account for the buildmaster. If you do this, the account will probably
+have a distinct email address (perhaps
+@email{buildmaster@@example.org}). Then just arrange for this
+account's email to be delivered to a suitable maildir (described in
+the next section).
+
+If the buildbot does not have its own account, ``extension addresses''
+can be used to distinguish between email intended for the buildmaster
+and email intended for the rest of the account. In most modern MTAs,
+the e.g. @code{foo@@example.org} account has control over every email
+address at example.org which begins with "foo", such that email
+addressed to @email{account-foo@@example.org} can be delivered to a
+different destination than @email{account-bar@@example.org}. qmail
+does this by using separate .qmail files for the two destinations
+(@file{.qmail-foo} and @file{.qmail-bar}, with @file{.qmail}
+controlling the base address and @file{.qmail-default} controlling all
+other extensions). Other MTAs have similar mechanisms.
+
+Thus you can assign an extension address like
+@email{foo-buildmaster@@example.org} to the buildmaster, and retain
+@email{foo@@example.org} for your own use.
+
+
+@node Using Maildirs, Parsing Email Change Messages, Subscribing the Buildmaster, Mail-parsing ChangeSources
+@subsection Using Maildirs
+
+A ``maildir'' is a simple directory structure originally developed for
+qmail that allows safe atomic update without locking. Create a base
+directory with three subdirectories: ``new'', ``tmp'', and ``cur''.
+When messages arrive, they are put into a uniquely-named file (using
+pids, timestamps, and random numbers) in ``tmp''. When the file is
+complete, it is atomically renamed into ``new''. Eventually the
+buildmaster notices the file in ``new'', reads and parses the
+contents, then moves it into ``cur''. A cronjob can be used to delete
+files in ``cur'' at leisure.
+
+Maildirs are frequently created with the @command{maildirmake} tool,
+but a simple @command{mkdir -p ~/MAILDIR/@{cur,new,tmp@}} is pretty much
+equivalent.
+
+Many modern MTAs can deliver directly to maildirs. The usual .forward
+or .procmailrc syntax is to name the base directory with a trailing
+slash, so something like @code{~/MAILDIR/} . qmail and postfix are
+maildir-capable MTAs, and procmail is a maildir-capable MDA (Mail
+Delivery Agent).
+
+For MTAs which cannot put files into maildirs directly, the
+``safecat'' tool can be executed from a .forward file to accomplish
+the same thing.
+
+The Buildmaster uses the linux DNotify facility to receive immediate
+notification when the maildir's ``new'' directory has changed. When
+this facility is not available, it polls the directory for new
+messages, every 10 seconds by default.
+
+@node Parsing Email Change Messages, , Using Maildirs, Mail-parsing ChangeSources
+@subsection Parsing Email Change Messages
+
+The second component to setting up an email-based ChangeSource is to
+parse the actual notices. This is highly dependent upon the VC system
+and commit script in use.
+
+A couple of common tools used to create these change emails are:
+
+@table @samp
+
+@item CVS
+@table @samp
+@item CVSToys MailNotifier
+@ref{FCMaildirSource}
+@item Bonsai notification
+@ref{BonsaiMaildirSource}
+@item syncmail
+@ref{SyncmailMaildirSource}
+@end table
+
+@item SVN
+@table @samp
+@item svnmailer
+http://opensource.perlig.de/en/svnmailer/
+@item commit-email.pl
+@ref{SVNCommitEmailMaildirSource}
+@end table
+
+@item Mercurial
+@table @samp
+@item NotifyExtension
+http://www.selenic.com/mercurial/wiki/index.cgi/NotifyExtension
+@end table
+
+@item Git
+@table @samp
+@item post-receive-email
+http://git.kernel.org/?p=git/git.git;a=blob;f=contrib/hooks/post-receive-email;hb=HEAD
+@end table
+
+@end table
+
+
+The following sections describe the parsers available for each of
+these tools.
+
+Most of these parsers accept a @code{prefix=} argument, which is used
+to limit the set of files that the buildmaster pays attention to. This
+is most useful for systems like CVS and SVN which put multiple
+projects in a single repository (or use repository names to indicate
+branches). Each filename that appears in the email is tested against
+the prefix: if the filename does not start with the prefix, the file
+is ignored. If the filename @emph{does} start with the prefix, that
+prefix is stripped from the filename before any further processing is
+done. Thus the prefix usually ends with a slash.
+
+@menu
+* FCMaildirSource::
+* SyncmailMaildirSource::
+* BonsaiMaildirSource::
+* SVNCommitEmailMaildirSource::
+@end menu
+
+@node FCMaildirSource, SyncmailMaildirSource, Parsing Email Change Messages, Parsing Email Change Messages
+@subsubsection FCMaildirSource
+
+
+@csindex buildbot.changes.mail.FCMaildirSource
+
+http://twistedmatrix.com/users/acapnotic/wares/code/CVSToys/
+
+This parser works with the CVSToys @code{MailNotification} action,
+which will send email to a list of recipients for each commit. This
+tends to work better than using @code{/bin/mail} from within the
+CVSROOT/loginfo file directly, as CVSToys will batch together all
+files changed during the same CVS invocation, and can provide more
+information (like creating a ViewCVS URL for each file changed).
+
+The Buildbot's @code{FCMaildirSource} knows for to parse these CVSToys
+messages and turn them into Change objects. It can be given two
+parameters: the directory name of the maildir root, and the prefix to
+strip.
+
+@example
+from buildbot.changes.mail import FCMaildirSource
+c['change_source'] = FCMaildirSource("~/maildir-buildbot")
+@end example
+
+@node SyncmailMaildirSource, BonsaiMaildirSource, FCMaildirSource, Parsing Email Change Messages
+@subsubsection SyncmailMaildirSource
+
+@csindex buildbot.changes.mail.SyncmailMaildirSource
+
+http://sourceforge.net/projects/cvs-syncmail
+
+@code{SyncmailMaildirSource} knows how to parse the message format used by
+the CVS ``syncmail'' script.
+
+@example
+from buildbot.changes.mail import SyncmailMaildirSource
+c['change_source'] = SyncmailMaildirSource("~/maildir-buildbot")
+@end example
+
+@node BonsaiMaildirSource, SVNCommitEmailMaildirSource, SyncmailMaildirSource, Parsing Email Change Messages
+@subsubsection BonsaiMaildirSource
+
+@csindex buildbot.changes.mail.BonsaiMaildirSource
+
+http://www.mozilla.org/bonsai.html
+
+@code{BonsaiMaildirSource} parses messages sent out by Bonsai, the CVS
+tree-management system built by Mozilla.
+
+@example
+from buildbot.changes.mail import BonsaiMaildirSource
+c['change_source'] = BonsaiMaildirSource("~/maildir-buildbot")
+@end example
+
+@node SVNCommitEmailMaildirSource, , BonsaiMaildirSource, Parsing Email Change Messages
+@subsubsection SVNCommitEmailMaildirSource
+
+@csindex buildbot.changes.mail.SVNCommitEmailMaildirSource
+
+@code{SVNCommitEmailMaildirSource} parses message sent out by the
+@code{commit-email.pl} script, which is included in the Subversion
+distribution.
+
+It does not currently handle branches: all of the Change objects that
+it creates will be associated with the default (i.e. trunk) branch.
+
+@example
+from buildbot.changes.mail import SVNCommitEmailMaildirSource
+c['change_source'] = SVNCommitEmailMaildirSource("~/maildir-buildbot")
+@end example
+
+
+@node PBChangeSource, P4Source, Mail-parsing ChangeSources, Getting Source Code Changes
+@section PBChangeSource
+
+@csindex buildbot.changes.pb.PBChangeSource
+
+The last kind of ChangeSource actually listens on a TCP port for
+clients to connect and push change notices @emph{into} the
+Buildmaster. This is used by the built-in @code{buildbot sendchange}
+notification tool, as well as the VC-specific
+@file{contrib/svn_buildbot.py}, @file{contrib/arch_buildbot.py},
+@file{contrib/hg_buildbot.py} tools, and the
+@code{buildbot.changes.hgbuildbot} hook. These tools are run by the
+repository (in a commit hook script), and connect to the buildmaster
+directly each time a file is comitted. This is also useful for
+creating new kinds of change sources that work on a @code{push} model
+instead of some kind of subscription scheme, for example a script
+which is run out of an email .forward file.
+
+This ChangeSource can be configured to listen on its own TCP port, or
+it can share the port that the buildmaster is already using for the
+buildslaves to connect. (This is possible because the
+@code{PBChangeSource} uses the same protocol as the buildslaves, and
+they can be distinguished by the @code{username} attribute used when
+the initial connection is established). It might be useful to have it
+listen on a different port if, for example, you wanted to establish
+different firewall rules for that port. You could allow only the SVN
+repository machine access to the @code{PBChangeSource} port, while
+allowing only the buildslave machines access to the slave port. Or you
+could just expose one port and run everything over it. @emph{Note:
+this feature is not yet implemented, the PBChangeSource will always
+share the slave port and will always have a @code{user} name of
+@code{change}, and a passwd of @code{changepw}. These limitations will
+be removed in the future.}.
+
+
+The @code{PBChangeSource} is created with the following arguments. All
+are optional.
+
+@table @samp
+@item @code{port}
+which port to listen on. If @code{None} (which is the default), it
+shares the port used for buildslave connections. @emph{Not
+Implemented, always set to @code{None}}.
+
+@item @code{user} and @code{passwd}
+The user/passwd account information that the client program must use
+to connect. Defaults to @code{change} and @code{changepw}. @emph{Not
+Implemented, @code{user} is currently always set to @code{change},
+@code{passwd} is always set to @code{changepw}}.
+
+@item @code{prefix}
+The prefix to be found and stripped from filenames delivered over the
+connection. Any filenames which do not start with this prefix will be
+removed. If all the filenames in a given Change are removed, the that
+whole Change will be dropped. This string should probably end with a
+directory separator.
+
+This is useful for changes coming from version control systems that
+represent branches as parent directories within the repository (like
+SVN and Perforce). Use a prefix of 'trunk/' or
+'project/branches/foobranch/' to only follow one branch and to get
+correct tree-relative filenames. Without a prefix, the PBChangeSource
+will probably deliver Changes with filenames like @file{trunk/foo.c}
+instead of just @file{foo.c}. Of course this also depends upon the
+tool sending the Changes in (like @command{buildbot sendchange}) and
+what filenames it is delivering: that tool may be filtering and
+stripping prefixes at the sending end.
+
+@end table
+
+@node P4Source, BonsaiPoller, PBChangeSource, Getting Source Code Changes
+@section P4Source
+
+@csindex buildbot.changes.p4poller.P4Source
+
+The @code{P4Source} periodically polls a @uref{http://www.perforce.com/,
+Perforce} depot for changes. It accepts the following arguments:
+
+@table @samp
+@item @code{p4base}
+The base depot path to watch, without the trailing '/...'.
+
+@item @code{p4port}
+The Perforce server to connect to (as host:port).
+
+@item @code{p4user}
+The Perforce user.
+
+@item @code{p4passwd}
+The Perforce password.
+
+@item @code{p4bin}
+An optional string parameter. Specify the location of the perforce command
+line binary (p4). You only need to do this if the perforce binary is not
+in the path of the buildbot user. Defaults to ``p4''.
+
+@item @code{split_file}
+A function that maps a pathname, without the leading @code{p4base}, to a
+(branch, filename) tuple. The default just returns (None, branchfile),
+which effectively disables branch support. You should supply a function
+which understands your repository structure.
+
+@item @code{pollinterval}
+How often to poll, in seconds. Defaults to 600 (10 minutes).
+
+@item @code{histmax}
+The maximum number of changes to inspect at a time. If more than this
+number occur since the last poll, older changes will be silently
+ignored.
+@end table
+
+@heading Example
+
+This configuration uses the @code{P4PORT}, @code{P4USER}, and @code{P4PASSWD}
+specified in the buildmaster's environment. It watches a project in which the
+branch name is simply the next path component, and the file is all path
+components after.
+
+@example
+import buildbot.changes.p4poller
+s = p4poller.P4Source(p4base='//depot/project/',
+ split_file=lambda branchfile: branchfile.split('/',1),
+ )
+c['change_source'] = s
+@end example
+
+@node BonsaiPoller, SVNPoller, P4Source, Getting Source Code Changes
+@section BonsaiPoller
+
+@csindex buildbot.changes.bonsaipoller.BonsaiPoller
+
+The @code{BonsaiPoller} periodically polls a Bonsai server. This is a
+CGI script accessed through a web server that provides information
+about a CVS tree, for example the Mozilla bonsai server at
+@uref{http://bonsai.mozilla.org}. Bonsai servers are usable by both
+humans and machines. In this case, the buildbot's change source forms
+a query which asks about any files in the specified branch which have
+changed since the last query.
+
+Please take a look at the BonsaiPoller docstring for details about the
+arguments it accepts.
+
+
+@node SVNPoller, MercurialHook, BonsaiPoller, Getting Source Code Changes
+@section SVNPoller
+
+@csindex buildbot.changes.svnpoller.SVNPoller
+
+The @code{buildbot.changes.svnpoller.SVNPoller} is a ChangeSource
+which periodically polls a @uref{http://subversion.tigris.org/,
+Subversion} repository for new revisions, by running the @code{svn
+log} command in a subshell. It can watch a single branch or multiple
+branches.
+
+@code{SVNPoller} accepts the following arguments:
+
+@table @code
+@item svnurl
+The base URL path to watch, like
+@code{svn://svn.twistedmatrix.com/svn/Twisted/trunk}, or
+@code{http://divmod.org/svn/Divmod/}, or even
+@code{file:///home/svn/Repository/ProjectA/branches/1.5/}. This must
+include the access scheme, the location of the repository (both the
+hostname for remote ones, and any additional directory names necessary
+to get to the repository), and the sub-path within the repository's
+virtual filesystem for the project and branch of interest.
+
+The @code{SVNPoller} will only pay attention to files inside the
+subdirectory specified by the complete svnurl.
+
+@item split_file
+A function to convert pathnames into (branch, relative_pathname)
+tuples. Use this to explain your repository's branch-naming policy to
+@code{SVNPoller}. This function must accept a single string and return
+a two-entry tuple. There are a few utility functions in
+@code{buildbot.changes.svnpoller} that can be used as a
+@code{split_file} function, see below for details.
+
+The default value always returns (None, path), which indicates that
+all files are on the trunk.
+
+Subclasses of @code{SVNPoller} can override the @code{split_file}
+method instead of using the @code{split_file=} argument.
+
+@item svnuser
+An optional string parameter. If set, the @code{--user} argument will
+be added to all @code{svn} commands. Use this if you have to
+authenticate to the svn server before you can do @code{svn info} or
+@code{svn log} commands.
+
+@item svnpasswd
+Like @code{svnuser}, this will cause a @code{--password} argument to
+be passed to all svn commands.
+
+@item pollinterval
+How often to poll, in seconds. Defaults to 600 (checking once every 10
+minutes). Lower this if you want the buildbot to notice changes
+faster, raise it if you want to reduce the network and CPU load on
+your svn server. Please be considerate of public SVN repositories by
+using a large interval when polling them.
+
+@item histmax
+The maximum number of changes to inspect at a time. Every POLLINTERVAL
+seconds, the @code{SVNPoller} asks for the last HISTMAX changes and
+looks through them for any ones it does not already know about. If
+more than HISTMAX revisions have been committed since the last poll,
+older changes will be silently ignored. Larger values of histmax will
+cause more time and memory to be consumed on each poll attempt.
+@code{histmax} defaults to 100.
+
+@item svnbin
+This controls the @code{svn} executable to use. If subversion is
+installed in a weird place on your system (outside of the
+buildmaster's @code{$PATH}), use this to tell @code{SVNPoller} where
+to find it. The default value of ``svn'' will almost always be
+sufficient.
+
+@end table
+
+@heading Branches
+
+Each source file that is tracked by a Subversion repository has a
+fully-qualified SVN URL in the following form:
+(REPOURL)(PROJECT-plus-BRANCH)(FILEPATH). When you create the
+@code{SVNPoller}, you give it a @code{svnurl} value that includes all
+of the REPOURL and possibly some portion of the PROJECT-plus-BRANCH
+string. The @code{SVNPoller} is responsible for producing Changes that
+contain a branch name and a FILEPATH (which is relative to the top of
+a checked-out tree). The details of how these strings are split up
+depend upon how your repository names its branches.
+
+@subheading PROJECT/BRANCHNAME/FILEPATH repositories
+
+One common layout is to have all the various projects that share a
+repository get a single top-level directory each. Then under a given
+project's directory, you get two subdirectories, one named ``trunk''
+and another named ``branches''. Under ``branches'' you have a bunch of
+other directories, one per branch, with names like ``1.5.x'' and
+``testing''. It is also common to see directories like ``tags'' and
+``releases'' next to ``branches'' and ``trunk''.
+
+For example, the Twisted project has a subversion server on
+``svn.twistedmatrix.com'' that hosts several sub-projects. The
+repository is available through a SCHEME of ``svn:''. The primary
+sub-project is Twisted, of course, with a repository root of
+``svn://svn.twistedmatrix.com/svn/Twisted''. Another sub-project is
+Informant, with a root of
+``svn://svn.twistedmatrix.com/svn/Informant'', etc. Inside any
+checked-out Twisted tree, there is a file named bin/trial (which is
+used to run unit test suites).
+
+The trunk for Twisted is in
+``svn://svn.twistedmatrix.com/svn/Twisted/trunk'', and the
+fully-qualified SVN URL for the trunk version of @code{trial} would be
+``svn://svn.twistedmatrix.com/svn/Twisted/trunk/bin/trial''. The same
+SVNURL for that file on a branch named ``1.5.x'' would be
+``svn://svn.twistedmatrix.com/svn/Twisted/branches/1.5.x/bin/trial''.
+
+To set up a @code{SVNPoller} that watches the Twisted trunk (and
+nothing else), we would use the following:
+
+@example
+from buildbot.changes.svnpoller import SVNPoller
+c['change_source'] = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted/trunk")
+@end example
+
+In this case, every Change that our @code{SVNPoller} produces will
+have @code{.branch=None}, to indicate that the Change is on the trunk.
+No other sub-projects or branches will be tracked.
+
+If we want our ChangeSource to follow multiple branches, we have to do
+two things. First we have to change our @code{svnurl=} argument to
+watch more than just ``.../Twisted/trunk''. We will set it to
+``.../Twisted'' so that we'll see both the trunk and all the branches.
+Second, we have to tell @code{SVNPoller} how to split the
+(PROJECT-plus-BRANCH)(FILEPATH) strings it gets from the repository
+out into (BRANCH) and (FILEPATH) pairs.
+
+We do the latter by providing a ``split_file'' function. This function
+is responsible for splitting something like
+``branches/1.5.x/bin/trial'' into @code{branch}=''branches/1.5.x'' and
+@code{filepath}=''bin/trial''. This function is always given a string
+that names a file relative to the subdirectory pointed to by the
+@code{SVNPoller}'s @code{svnurl=} argument. It is expected to return a
+(BRANCHNAME, FILEPATH) tuple (in which FILEPATH is relative to the
+branch indicated), or None to indicate that the file is outside any
+project of interest.
+
+(note that we want to see ``branches/1.5.x'' rather than just
+``1.5.x'' because when we perform the SVN checkout, we will probably
+append the branch name to the baseURL, which requires that we keep the
+``branches'' component in there. Other VC schemes use a different
+approach towards branches and may not require this artifact.)
+
+If your repository uses this same PROJECT/BRANCH/FILEPATH naming
+scheme, the following function will work:
+
+@example
+def split_file_branches(path):
+ pieces = path.split('/')
+ if pieces[0] == 'trunk':
+ return (None, '/'.join(pieces[1:]))
+ elif pieces[0] == 'branches':
+ return ('/'.join(pieces[0:2]),
+ '/'.join(pieces[2:]))
+ else:
+ return None
+@end example
+
+This function is provided as
+@code{buildbot.changes.svnpoller.split_file_branches} for your
+convenience. So to have our Twisted-watching @code{SVNPoller} follow
+multiple branches, we would use this:
+
+@example
+from buildbot.changes.svnpoller import SVNPoller, split_file_branches
+c['change_source'] = SVNPoller("svn://svn.twistedmatrix.com/svn/Twisted",
+ split_file=split_file_branches)
+@end example
+
+Changes for all sorts of branches (with names like ``branches/1.5.x'',
+and None to indicate the trunk) will be delivered to the Schedulers.
+Each Scheduler is then free to use or ignore each branch as it sees
+fit.
+
+@subheading BRANCHNAME/PROJECT/FILEPATH repositories
+
+Another common way to organize a Subversion repository is to put the
+branch name at the top, and the projects underneath. This is
+especially frequent when there are a number of related sub-projects
+that all get released in a group.
+
+For example, Divmod.org hosts a project named ``Nevow'' as well as one
+named ``Quotient''. In a checked-out Nevow tree there is a directory
+named ``formless'' that contains a python source file named
+``webform.py''. This repository is accessible via webdav (and thus
+uses an ``http:'' scheme) through the divmod.org hostname. There are
+many branches in this repository, and they use a
+(BRANCHNAME)/(PROJECT) naming policy.
+
+The fully-qualified SVN URL for the trunk version of webform.py is
+@code{http://divmod.org/svn/Divmod/trunk/Nevow/formless/webform.py}.
+You can do an @code{svn co} with that URL and get a copy of the latest
+version. The 1.5.x branch version of this file would have a URL of
+@code{http://divmod.org/svn/Divmod/branches/1.5.x/Nevow/formless/webform.py}.
+The whole Nevow trunk would be checked out with
+@code{http://divmod.org/svn/Divmod/trunk/Nevow}, while the Quotient
+trunk would be checked out using
+@code{http://divmod.org/svn/Divmod/trunk/Quotient}.
+
+Now suppose we want to have an @code{SVNPoller} that only cares about
+the Nevow trunk. This case looks just like the PROJECT/BRANCH layout
+described earlier:
+
+@example
+from buildbot.changes.svnpoller import SVNPoller
+c['change_source'] = SVNPoller("http://divmod.org/svn/Divmod/trunk/Nevow")
+@end example
+
+But what happens when we want to track multiple Nevow branches? We
+have to point our @code{svnurl=} high enough to see all those
+branches, but we also don't want to include Quotient changes (since
+we're only building Nevow). To accomplish this, we must rely upon the
+@code{split_file} function to help us tell the difference between
+files that belong to Nevow and those that belong to Quotient, as well
+as figuring out which branch each one is on.
+
+@example
+from buildbot.changes.svnpoller import SVNPoller
+c['change_source'] = SVNPoller("http://divmod.org/svn/Divmod",
+ split_file=my_file_splitter)
+@end example
+
+The @code{my_file_splitter} function will be called with
+repository-relative pathnames like:
+
+@table @code
+@item trunk/Nevow/formless/webform.py
+This is a Nevow file, on the trunk. We want the Change that includes this
+to see a filename of @code{formless/webform.py"}, and a branch of None
+
+@item branches/1.5.x/Nevow/formless/webform.py
+This is a Nevow file, on a branch. We want to get
+branch=''branches/1.5.x'' and filename=''formless/webform.py''.
+
+@item trunk/Quotient/setup.py
+This is a Quotient file, so we want to ignore it by having
+@code{my_file_splitter} return None.
+
+@item branches/1.5.x/Quotient/setup.py
+This is also a Quotient file, which should be ignored.
+@end table
+
+The following definition for @code{my_file_splitter} will do the job:
+
+@example
+def my_file_splitter(path):
+ pieces = path.split('/')
+ if pieces[0] == 'trunk':
+ branch = None
+ pieces.pop(0) # remove 'trunk'
+ elif pieces[0] == 'branches':
+ pieces.pop(0) # remove 'branches'
+ # grab branch name
+ branch = 'branches/' + pieces.pop(0)
+ else:
+ return None # something weird
+ projectname = pieces.pop(0)
+ if projectname != 'Nevow':
+ return None # wrong project
+ return (branch, '/'.join(pieces))
+@end example
+
+@node MercurialHook, Bzr Hook, SVNPoller, Getting Source Code Changes
+@section MercurialHook
+
+Since Mercurial is written in python, the hook script can invoke
+Buildbot's @code{sendchange} function directly, rather than having to
+spawn an external process. This function delivers the same sort of
+changes as @code{buildbot sendchange} and the various hook scripts in
+contrib/, so you'll need to add a @code{pb.PBChangeSource} to your
+buildmaster to receive these changes.
+
+To set this up, first choose a Mercurial repository that represents
+your central ``official'' source tree. This will be the same
+repository that your buildslaves will eventually pull from. Install
+Buildbot on the machine that hosts this repository, using the same
+version of python as Mercurial is using (so that the Mercurial hook
+can import code from buildbot). Then add the following to the
+@code{.hg/hgrc} file in that repository, replacing the buildmaster
+hostname/portnumber as appropriate for your buildbot:
+
+@example
+[hooks]
+changegroup.buildbot = python:buildbot.changes.hgbuildbot.hook
+
+[hgbuildbot]
+master = buildmaster.example.org:9987
+@end example
+
+(Note that Mercurial lets you define multiple @code{changegroup} hooks
+by giving them distinct names, like @code{changegroup.foo} and
+@code{changegroup.bar}, which is why we use
+@code{changegroup.buildbot} in this example. There is nothing magical
+about the ``buildbot'' suffix in the hook name. The
+@code{[hgbuildbot]} section @emph{is} special, however, as it is the
+only section that the buildbot hook pays attention to.)
+
+Also note that this runs as a @code{changegroup} hook, rather than as
+an @code{incoming} hook. The @code{changegroup} hook is run with
+multiple revisions at a time (say, if multiple revisions are being
+pushed to this repository in a single @command{hg push} command),
+whereas the @code{incoming} hook is run with just one revision at a
+time. The @code{hgbuildbot.hook} function will only work with the
+@code{changegroup} hook.
+
+The @code{[hgbuildbot]} section has two other parameters that you
+might specify, both of which control the name of the branch that is
+attached to the changes coming from this hook.
+
+One common branch naming policy for Mercurial repositories is to use
+it just like Darcs: each branch goes into a separate repository, and
+all the branches for a single project share a common parent directory.
+For example, you might have @file{/var/repos/PROJECT/trunk/} and
+@file{/var/repos/PROJECT/release}. To use this style, use the
+@code{branchtype = dirname} setting, which simply uses the last
+component of the repository's enclosing directory as the branch name:
+
+@example
+[hgbuildbot]
+master = buildmaster.example.org:9987
+branchtype = dirname
+@end example
+
+Another approach is to use Mercurial's built-in branches (the kind
+created with @command{hg branch} and listed with @command{hg
+branches}). This feature associates persistent names with particular
+lines of descent within a single repository. (note that the buildbot
+@code{source.Mercurial} checkout step does not yet support this kind
+of branch). To have the commit hook deliver this sort of branch name
+with the Change object, use @code{branchtype = inrepo}:
+
+@example
+[hgbuildbot]
+master = buildmaster.example.org:9987
+branchtype = inrepo
+@end example
+
+Finally, if you want to simply specify the branchname directly, for
+all changes, use @code{branch = BRANCHNAME}. This overrides
+@code{branchtype}:
+
+@example
+[hgbuildbot]
+master = buildmaster.example.org:9987
+branch = trunk
+@end example
+
+If you use @code{branch=} like this, you'll need to put a separate
+.hgrc in each repository. If you use @code{branchtype=}, you may be
+able to use the same .hgrc for all your repositories, stored in
+@file{~/.hgrc} or @file{/etc/mercurial/hgrc}.
+
+
+@node Bzr Hook, Bzr Poller, MercurialHook, Getting Source Code Changes
+@section Bzr Hook
+
+Bzr is also written in Python, and the Bzr hook depends on Twisted to send the
+changes.
+
+To install, put @code{contrib/bzr_buildbot.py} in one of your plugins
+locations a bzr plugins directory (e.g.,
+@code{~/.bazaar/plugins}). Then, in one of your bazaar conf files (e.g.,
+@code{~/.bazaar/locations.conf}), set the location you want to connect with buildbot
+with these keys:
+
+@table @code
+@item buildbot_on
+one of 'commit', 'push, or 'change'. Turns the plugin on to report changes via
+commit, changes via push, or any changes to the trunk. 'change' is
+recommended.
+
+@item buildbot_server
+(required to send to a buildbot master) the URL of the buildbot master to
+which you will connect (as of this writing, the same server and port to which
+slaves connect).
+
+@item buildbot_port
+(optional, defaults to 9989) the port of the buildbot master to which you will
+connect (as of this writing, the same server and port to which slaves connect)
+
+@item buildbot_pqm
+(optional, defaults to not pqm) Normally, the user that commits the revision
+is the user that is responsible for the change. When run in a pqm (Patch Queue
+Manager, see https://launchpad.net/pqm) environment, the user that commits is
+the Patch Queue Manager, and the user that committed the *parent* revision is
+responsible for the change. To turn on the pqm mode, set this value to any of
+(case-insensitive) "Yes", "Y", "True", or "T".
+
+@item buildbot_dry_run
+(optional, defaults to not a dry run) Normally, the post-commit hook will
+attempt to communicate with the configured buildbot server and port. If this
+parameter is included and any of (case-insensitive) "Yes", "Y", "True", or
+"T", then the hook will simply print what it would have sent, but not attempt
+to contact the buildbot master.
+
+@item buildbot_send_branch_name
+(optional, defaults to not sending the branch name) If your buildbot's bzr
+source build step uses a repourl, do *not* turn this on. If your buildbot's
+bzr build step uses a baseURL, then you may set this value to any of
+(case-insensitive) "Yes", "Y", "True", or "T" to have the buildbot master
+append the branch name to the baseURL.
+
+@end table
+
+When buildbot no longer has a hardcoded password, it will be a configuration
+option here as well.
+
+Here's a simple example that you might have in your
+@code{~/.bazaar/locations.conf}.
+
+@example
+[chroot-*:///var/local/myrepo/mybranch]
+buildbot_on = change
+buildbot_server = localhost
+@end example
+
+@node Bzr Poller, , Bzr Hook, Getting Source Code Changes
+@section Bzr Poller
+
+If you cannot insert a Bzr hook in the server, you can use the Bzr Poller. To
+use, put @code{contrib/bzr_buildbot.py} somewhere that your buildbot
+configuration can import it. Even putting it in the same directory as the master.cfg
+should work. Install the poller in the buildbot configuration as with any
+other change source. Minimally, provide a URL that you want to poll (bzr://,
+bzr+ssh://, or lp:), though make sure the buildbot user has necessary
+privileges. You may also want to specify these optional values.
+
+@table @code
+@item poll_interval
+The number of seconds to wait between polls. Defaults to 10 minutes.
+
+@item branch_name
+Any value to be used as the branch name. Defaults to None, or specify a
+string, or specify the constants from @code{bzr_buildbot.py} SHORT or FULL to
+get the short branch name or full branch address.
+
+@item blame_merge_author
+normally, the user that commits the revision is the user that is responsible
+for the change. When run in a pqm (Patch Queue Manager, see
+https://launchpad.net/pqm) environment, the user that commits is the Patch
+Queue Manager, and the user that committed the merged, *parent* revision is
+responsible for the change. set this value to True if this is pointed against
+a PQM-managed branch.
+@end table
+
+@node Build Process, Status Delivery, Getting Source Code Changes, Top
+@chapter Build Process
+
+A @code{Build} object is responsible for actually performing a build.
+It gets access to a remote @code{SlaveBuilder} where it may run
+commands, and a @code{BuildStatus} object where it must emit status
+events. The @code{Build} is created by the Builder's
+@code{BuildFactory}.
+
+The default @code{Build} class is made up of a fixed sequence of
+@code{BuildSteps}, executed one after another until all are complete
+(or one of them indicates that the build should be halted early). The
+default @code{BuildFactory} creates instances of this @code{Build}
+class with a list of @code{BuildSteps}, so the basic way to configure
+the build is to provide a list of @code{BuildSteps} to your
+@code{BuildFactory}.
+
+More complicated @code{Build} subclasses can make other decisions:
+execute some steps only if certain files were changed, or if certain
+previous steps passed or failed. The base class has been written to
+allow users to express basic control flow without writing code, but
+you can always subclass and customize to achieve more specialized
+behavior.
+
+@menu
+* Build Steps::
+* Interlocks::
+* Build Factories::
+@end menu
+
+@node Build Steps, Interlocks, Build Process, Build Process
+@section Build Steps
+
+@code{BuildStep}s are usually specified in the buildmaster's
+configuration file, in a list that goes into the @code{BuildFactory}.
+The @code{BuildStep} instances in this list are used as templates to
+construct new independent copies for each build (so that state can be
+kept on the @code{BuildStep} in one build without affecting a later
+build). Each @code{BuildFactory} can be created with a list of steps,
+or the factory can be created empty and then steps added to it using
+the @code{addStep} method:
+
+@example
+from buildbot.steps import source, shell
+from buildbot.process import factory
+
+f = factory.BuildFactory()
+f.addStep(source.SVN(svnurl="http://svn.example.org/Trunk/"))
+f.addStep(shell.ShellCommand(command=["make", "all"]))
+f.addStep(shell.ShellCommand(command=["make", "test"]))
+@end example
+
+In earlier versions (0.7.5 and older), these steps were specified with
+a tuple of (step_class, keyword_arguments). Steps can still be
+specified this way, but the preferred form is to pass actual
+@code{BuildStep} instances to @code{addStep}, because that gives the
+@code{BuildStep} class a chance to do some validation on the
+arguments.
+
+If you have a common set of steps which are used in several factories, the
+@code{addSteps} method may be handy. It takes an iterable of @code{BuildStep}
+instances.
+
+@example
+setup_steps = [
+ source.SVN(svnurl="http://svn.example.org/Trunk/")
+ shell.ShellCommand(command="./setup")
+]
+quick = factory.BuildFactory()
+quick.addSteps(setup_steps)
+quick.addStep(shell.shellCommand(command="make quick"))
+@end example
+
+The rest of this section lists all the standard BuildStep objects
+available for use in a Build, and the parameters which can be used to
+control each.
+
+@menu
+* Common Parameters::
+* Using Build Properties::
+* Source Checkout::
+* ShellCommand::
+* Simple ShellCommand Subclasses::
+* Python BuildSteps::
+* Transferring Files::
+* Steps That Run on the Master::
+* Triggering Schedulers::
+* Writing New BuildSteps::
+@end menu
+
+@node Common Parameters, Using Build Properties, Build Steps, Build Steps
+@subsection Common Parameters
+
+The standard @code{Build} runs a series of @code{BuildStep}s in order,
+only stopping when it runs out of steps or if one of them requests
+that the build be halted. It collects status information from each one
+to create an overall build status (of SUCCESS, WARNINGS, or FAILURE).
+
+All BuildSteps accept some common parameters. Some of these control
+how their individual status affects the overall build. Others are used
+to specify which @code{Locks} (see @pxref{Interlocks}) should be
+acquired before allowing the step to run.
+
+Arguments common to all @code{BuildStep} subclasses:
+
+
+@table @code
+@item name
+the name used to describe the step on the status display. It is also
+used to give a name to any LogFiles created by this step.
+
+@item haltOnFailure
+if True, a FAILURE of this build step will cause the build to halt
+immediately. Steps with @code{alwaysRun=True} are still run. Generally
+speaking, haltOnFailure implies flunkOnFailure (the default for most
+BuildSteps). In some cases, particularly series of tests, it makes sense
+to haltOnFailure if something fails early on but not flunkOnFailure.
+This can be achieved with haltOnFailure=True, flunkOnFailure=False.
+
+@item flunkOnWarnings
+when True, a WARNINGS or FAILURE of this build step will mark the
+overall build as FAILURE. The remaining steps will still be executed.
+
+@item flunkOnFailure
+when True, a FAILURE of this build step will mark the overall build as
+a FAILURE. The remaining steps will still be executed.
+
+@item warnOnWarnings
+when True, a WARNINGS or FAILURE of this build step will mark the
+overall build as having WARNINGS. The remaining steps will still be
+executed.
+
+@item warnOnFailure
+when True, a FAILURE of this build step will mark the overall build as
+having WARNINGS. The remaining steps will still be executed.
+
+@item alwaysRun
+if True, this build step will always be run, even if a previous buildstep
+with @code{haltOnFailure=True} has failed.
+
+@item locks
+a list of Locks (instances of @code{buildbot.locks.SlaveLock} or
+@code{buildbot.locks.MasterLock}) that should be acquired before
+starting this Step. The Locks will be released when the step is
+complete. Note that this is a list of actual Lock instances, not
+names. Also note that all Locks must have unique names.
+
+@end table
+
+@node Using Build Properties, Source Checkout, Common Parameters, Build Steps
+@subsection Using Build Properties
+@cindex Properties
+
+Build properties are a generalized way to provide configuration
+information to build steps; see @ref{Build Properties}.
+
+Some build properties are inherited from external sources -- global
+properties, schedulers, or buildslaves. Some build properties are
+set when the build starts, such as the SourceStamp information. Other
+properties can be set by BuildSteps as they run, for example the
+various Source steps will set the @code{got_revision} property to the
+source revision that was actually checked out (which can be useful
+when the SourceStamp in use merely requested the ``latest revision'':
+@code{got_revision} will tell you what was actually built).
+
+In custom BuildSteps, you can get and set the build properties with
+the @code{getProperty}/@code{setProperty} methods. Each takes a string
+for the name of the property, and returns or accepts an
+arbitrary@footnote{Build properties are serialized along with the
+build results, so they must be serializable. For this reason, the
+value of any build property should be simple inert data: strings,
+numbers, lists, tuples, and dictionaries. They should not contain
+class instances.} object. For example:
+
+@example
+class MakeTarball(ShellCommand):
+ def start(self):
+ if self.getProperty("os") == "win":
+ self.setCommand([ ... ]) # windows-only command
+ else:
+ self.setCommand([ ... ]) # equivalent for other systems
+ ShellCommand.start(self)
+@end example
+
+@heading WithProperties
+@cindex WithProperties
+
+You can use build properties in ShellCommands by using the
+@code{WithProperties} wrapper when setting the arguments of
+the ShellCommand. This interpolates the named build properties
+into the generated shell command. Most step parameters accept
+@code{WithProperties}. Please file bugs for any parameters which
+do not.
+
+@example
+from buildbot.steps.shell import ShellCommand
+from buildbot.process.properties import WithProperties
+
+f.addStep(ShellCommand(
+ command=["tar", "czf",
+ WithProperties("build-%s.tar.gz", "revision"),
+ "source"]))
+@end example
+
+If this BuildStep were used in a tree obtained from Subversion, it
+would create a tarball with a name like @file{build-1234.tar.gz}.
+
+The @code{WithProperties} function does @code{printf}-style string
+interpolation, using strings obtained by calling
+@code{build.getProperty(propname)}. Note that for every @code{%s} (or
+@code{%d}, etc), you must have exactly one additional argument to
+indicate which build property you want to insert.
+
+You can also use python dictionary-style string interpolation by using
+the @code{%(propname)s} syntax. In this form, the property name goes
+in the parentheses, and WithProperties takes @emph{no} additional
+arguments:
+
+@example
+f.addStep(ShellCommand(
+ command=["tar", "czf",
+ WithProperties("build-%(revision)s.tar.gz"),
+ "source"]))
+@end example
+
+Don't forget the extra ``s'' after the closing parenthesis! This is
+the cause of many confusing errors.
+
+The dictionary-style interpolation supports a number of more advanced
+syntaxes, too.
+
+@table @code
+
+@item propname:-replacement
+If @code{propname} exists, substitute its value; otherwise,
+substitute @code{replacement}. @code{replacement} may be empty
+(@code{%(propname:-)s})
+
+@item propname:+replacement
+If @code{propname} exists, substitute @code{replacement}; otherwise,
+substitute an empty string.
+
+@end table
+
+Although these are similar to shell substitutions, no other
+substitutions are currently supported, and @code{replacement} in the
+above cannot contain more substitutions.
+
+Note: like python, you can either do positional-argument interpolation
+@emph{or} keyword-argument interpolation, not both. Thus you cannot use
+a string like @code{WithProperties("foo-%(revision)s-%s", "branch")}.
+
+@heading Common Build Properties
+
+The following build properties are set when the build is started, and
+are available to all steps.
+
+@table @code
+@item branch
+
+This comes from the build's SourceStamp, and describes which branch is
+being checked out. This will be @code{None} (which interpolates into
+@code{WithProperties} as an empty string) if the build is on the
+default branch, which is generally the trunk. Otherwise it will be a
+string like ``branches/beta1.4''. The exact syntax depends upon the VC
+system being used.
+
+@item revision
+
+This also comes from the SourceStamp, and is the revision of the source code
+tree that was requested from the VC system. When a build is requested of a
+specific revision (as is generally the case when the build is triggered by
+Changes), this will contain the revision specification. This is always a
+string, although the syntax depends upon the VC system in use: for SVN it is an
+integer, for Mercurial it is a short string, for Darcs it is a rather large
+string, etc.
+
+If the ``force build'' button was pressed, the revision will be @code{None},
+which means to use the most recent revision available. This is a ``trunk
+build''. This will be interpolated as an empty string.
+
+@item got_revision
+
+This is set when a Source step checks out the source tree, and
+provides the revision that was actually obtained from the VC system.
+In general this should be the same as @code{revision}, except for
+trunk builds, where @code{got_revision} indicates what revision was
+current when the checkout was performed. This can be used to rebuild
+the same source code later.
+
+Note that for some VC systems (Darcs in particular), the revision is a
+large string containing newlines, and is not suitable for interpolation
+into a filename.
+
+@item buildername
+
+This is a string that indicates which Builder the build was a part of.
+The combination of buildername and buildnumber uniquely identify a
+build.
+
+@item buildnumber
+
+Each build gets a number, scoped to the Builder (so the first build
+performed on any given Builder will have a build number of 0). This
+integer property contains the build's number.
+
+@item slavename
+
+This is a string which identifies which buildslave the build is
+running on.
+
+@item scheduler
+
+If the build was started from a scheduler, then this property will
+contain the name of that scheduler.
+
+@end table
+
+
+@node Source Checkout, ShellCommand, Using Build Properties, Build Steps
+@subsection Source Checkout
+
+The first step of any build is typically to acquire the source code
+from which the build will be performed. There are several classes to
+handle this, one for each of the different source control system that
+Buildbot knows about. For a description of how Buildbot treats source
+control in general, see @ref{Version Control Systems}.
+
+All source checkout steps accept some common parameters to control how
+they get the sources and where they should be placed. The remaining
+per-VC-system parameters are mostly to specify where exactly the
+sources are coming from.
+
+@table @code
+@item mode
+
+a string describing the kind of VC operation that is desired. Defaults
+to @code{update}.
+
+@table @code
+@item update
+specifies that the CVS checkout/update should be performed directly
+into the workdir. Each build is performed in the same directory,
+allowing for incremental builds. This minimizes disk space, bandwidth,
+and CPU time. However, it may encounter problems if the build process
+does not handle dependencies properly (sometimes you must do a ``clean
+build'' to make sure everything gets compiled), or if source files are
+deleted but generated files can influence test behavior (e.g. python's
+.pyc files), or when source directories are deleted but generated
+files prevent CVS from removing them. Builds ought to be correct
+regardless of whether they are done ``from scratch'' or incrementally,
+but it is useful to test both kinds: this mode exercises the
+incremental-build style.
+
+@item copy
+specifies that the CVS workspace should be maintained in a separate
+directory (called the 'copydir'), using checkout or update as
+necessary. For each build, a new workdir is created with a copy of the
+source tree (rm -rf workdir; cp -r copydir workdir). This doubles the
+disk space required, but keeps the bandwidth low (update instead of a
+full checkout). A full 'clean' build is performed each time. This
+avoids any generated-file build problems, but is still occasionally
+vulnerable to CVS problems such as a repository being manually
+rearranged, causing CVS errors on update which are not an issue with a
+full checkout.
+
+@c TODO: something is screwy about this, revisit. Is it the source
+@c directory or the working directory that is deleted each time?
+
+@item clobber
+specifes that the working directory should be deleted each time,
+necessitating a full checkout for each build. This insures a clean
+build off a complete checkout, avoiding any of the problems described
+above. This mode exercises the ``from-scratch'' build style.
+
+@item export
+this is like @code{clobber}, except that the 'cvs export' command is
+used to create the working directory. This command removes all CVS
+metadata files (the CVS/ directories) from the tree, which is
+sometimes useful for creating source tarballs (to avoid including the
+metadata in the tar file).
+@end table
+
+@item workdir
+like all Steps, this indicates the directory where the build will take
+place. Source Steps are special in that they perform some operations
+outside of the workdir (like creating the workdir itself).
+
+@item alwaysUseLatest
+if True, bypass the usual ``update to the last Change'' behavior, and
+always update to the latest changes instead.
+
+@item retry
+If set, this specifies a tuple of @code{(delay, repeats)} which means
+that when a full VC checkout fails, it should be retried up to
+@var{repeats} times, waiting @var{delay} seconds between attempts. If
+you don't provide this, it defaults to @code{None}, which means VC
+operations should not be retried. This is provided to make life easier
+for buildslaves which are stuck behind poor network connections.
+
+@end table
+
+
+My habit as a developer is to do a @code{cvs update} and @code{make} each
+morning. Problems can occur, either because of bad code being checked in, or
+by incomplete dependencies causing a partial rebuild to fail where a
+complete from-scratch build might succeed. A quick Builder which emulates
+this incremental-build behavior would use the @code{mode='update'}
+setting.
+
+On the other hand, other kinds of dependency problems can cause a clean
+build to fail where a partial build might succeed. This frequently results
+from a link step that depends upon an object file that was removed from a
+later version of the tree: in the partial tree, the object file is still
+around (even though the Makefiles no longer know how to create it).
+
+``official'' builds (traceable builds performed from a known set of
+source revisions) are always done as clean builds, to make sure it is
+not influenced by any uncontrolled factors (like leftover files from a
+previous build). A ``full'' Builder which behaves this way would want
+to use the @code{mode='clobber'} setting.
+
+Each VC system has a corresponding source checkout class: their
+arguments are described on the following pages.
+
+
+@menu
+* CVS::
+* SVN::
+* Darcs::
+* Mercurial::
+* Arch::
+* Bazaar::
+* Bzr::
+* P4::
+* Git::
+@end menu
+
+@node CVS, SVN, Source Checkout, Source Checkout
+@subsubsection CVS
+@cindex CVS Checkout
+@bsindex buildbot.steps.source.CVS
+
+
+The @code{CVS} build step performs a @uref{http://www.nongnu.org/cvs/,
+CVS} checkout or update. It takes the following arguments:
+
+@table @code
+@item cvsroot
+(required): specify the CVSROOT value, which points to a CVS
+repository, probably on a remote machine. For example, the cvsroot
+value you would use to get a copy of the Buildbot source code is
+@code{:pserver:anonymous@@cvs.sourceforge.net:/cvsroot/buildbot}
+
+@item cvsmodule
+(required): specify the cvs @code{module}, which is generally a
+subdirectory of the CVSROOT. The cvsmodule for the Buildbot source
+code is @code{buildbot}.
+
+@item branch
+a string which will be used in a @code{-r} argument. This is most
+useful for specifying a branch to work on. Defaults to @code{HEAD}.
+
+@item global_options
+a list of flags to be put before the verb in the CVS command.
+
+@item checkoutDelay
+if set, the number of seconds to put between the timestamp of the last
+known Change and the value used for the @code{-D} option. Defaults to
+half of the parent Build's treeStableTimer.
+
+@end table
+
+
+@node SVN, Darcs, CVS, Source Checkout
+@subsubsection SVN
+
+@cindex SVN Checkout
+@bsindex buildbot.steps.source.SVN
+
+
+The @code{SVN} build step performs a
+@uref{http://subversion.tigris.org, Subversion} checkout or update.
+There are two basic ways of setting up the checkout step, depending
+upon whether you are using multiple branches or not.
+
+If all of your builds use the same branch, then you should create the
+@code{SVN} step with the @code{svnurl} argument:
+
+@table @code
+@item svnurl
+(required): this specifies the @code{URL} argument that will be given
+to the @code{svn checkout} command. It dictates both where the
+repository is located and which sub-tree should be extracted. In this
+respect, it is like a combination of the CVS @code{cvsroot} and
+@code{cvsmodule} arguments. For example, if you are using a remote
+Subversion repository which is accessible through HTTP at a URL of
+@code{http://svn.example.com/repos}, and you wanted to check out the
+@code{trunk/calc} sub-tree, you would use
+@code{svnurl="http://svn.example.com/repos/trunk/calc"} as an argument
+to your @code{SVN} step.
+@end table
+
+If, on the other hand, you are building from multiple branches, then
+you should create the @code{SVN} step with the @code{baseURL} and
+@code{defaultBranch} arguments instead:
+
+@table @code
+@item baseURL
+(required): this specifies the base repository URL, to which a branch
+name will be appended. It should probably end in a slash.
+
+@item defaultBranch
+this specifies the name of the branch to use when a Build does not
+provide one of its own. This will be appended to @code{baseURL} to
+create the string that will be passed to the @code{svn checkout}
+command.
+
+@item username
+if specified, this will be passed to the @code{svn} binary with a
+@code{--username} option.
+
+@item password
+if specified, this will be passed to the @code{svn} binary with a
+@code{--password} option. The password itself will be suitably obfuscated in
+the logs.
+
+@end table
+
+If you are using branches, you must also make sure your
+@code{ChangeSource} will report the correct branch names.
+
+@heading branch example
+
+Let's suppose that the ``MyProject'' repository uses branches for the
+trunk, for various users' individual development efforts, and for
+several new features that will require some amount of work (involving
+multiple developers) before they are ready to merge onto the trunk.
+Such a repository might be organized as follows:
+
+@example
+svn://svn.example.org/MyProject/trunk
+svn://svn.example.org/MyProject/branches/User1/foo
+svn://svn.example.org/MyProject/branches/User1/bar
+svn://svn.example.org/MyProject/branches/User2/baz
+svn://svn.example.org/MyProject/features/newthing
+svn://svn.example.org/MyProject/features/otherthing
+@end example
+
+Further assume that we want the Buildbot to run tests against the
+trunk and against all the feature branches (i.e., do a
+checkout/compile/build of branch X when a file has been changed on
+branch X, when X is in the set [trunk, features/newthing,
+features/otherthing]). We do not want the Buildbot to automatically
+build any of the user branches, but it should be willing to build a
+user branch when explicitly requested (most likely by the user who
+owns that branch).
+
+There are three things that need to be set up to accomodate this
+system. The first is a ChangeSource that is capable of identifying the
+branch which owns any given file. This depends upon a user-supplied
+function, in an external program that runs in the SVN commit hook and
+connects to the buildmaster's @code{PBChangeSource} over a TCP
+connection. (you can use the ``@code{buildbot sendchange}'' utility
+for this purpose, but you will still need an external program to
+decide what value should be passed to the @code{--branch=} argument).
+For example, a change to a file with the SVN url of
+``svn://svn.example.org/MyProject/features/newthing/src/foo.c'' should
+be broken down into a Change instance with
+@code{branch='features/newthing'} and @code{file='src/foo.c'}.
+
+The second piece is an @code{AnyBranchScheduler} which will pay
+attention to the desired branches. It will not pay attention to the
+user branches, so it will not automatically start builds in response
+to changes there. The AnyBranchScheduler class requires you to
+explicitly list all the branches you want it to use, but it would not
+be difficult to write a subclass which used
+@code{branch.startswith('features/'} to remove the need for this
+explicit list. Or, if you want to build user branches too, you can use
+AnyBranchScheduler with @code{branches=None} to indicate that you want
+it to pay attention to all branches.
+
+The third piece is an @code{SVN} checkout step that is configured to
+handle the branches correctly, with a @code{baseURL} value that
+matches the way the ChangeSource splits each file's URL into base,
+branch, and file.
+
+@example
+from buildbot.changes.pb import PBChangeSource
+from buildbot.scheduler import AnyBranchScheduler
+from buildbot.process import source, factory
+from buildbot.steps import source, shell
+
+c['change_source'] = PBChangeSource()
+s1 = AnyBranchScheduler('main',
+ ['trunk', 'features/newthing', 'features/otherthing'],
+ 10*60, ['test-i386', 'test-ppc'])
+c['schedulers'] = [s1]
+
+f = factory.BuildFactory()
+f.addStep(source.SVN(mode='update',
+ baseURL='svn://svn.example.org/MyProject/',
+ defaultBranch='trunk'))
+f.addStep(shell.Compile(command="make all"))
+f.addStep(shell.Test(command="make test"))
+
+c['builders'] = [
+ @{'name':'test-i386', 'slavename':'bot-i386', 'builddir':'test-i386',
+ 'factory':f @},
+ @{'name':'test-ppc', 'slavename':'bot-ppc', 'builddir':'test-ppc',
+ 'factory':f @},
+ ]
+@end example
+
+In this example, when a change arrives with a @code{branch} attribute
+of ``trunk'', the resulting build will have an SVN step that
+concatenates ``svn://svn.example.org/MyProject/'' (the baseURL) with
+``trunk'' (the branch name) to get the correct svn command. If the
+``newthing'' branch has a change to ``src/foo.c'', then the SVN step
+will concatenate ``svn://svn.example.org/MyProject/'' with
+``features/newthing'' to get the svnurl for checkout.
+
+@node Darcs, Mercurial, SVN, Source Checkout
+@subsubsection Darcs
+
+@cindex Darcs Checkout
+@bsindex buildbot.steps.source.Darcs
+
+
+The @code{Darcs} build step performs a
+@uref{http://darcs.net/, Darcs} checkout or update.
+
+Like @xref{SVN}, this step can either be configured to always check
+out a specific tree, or set up to pull from a particular branch that
+gets specified separately for each build. Also like SVN, the
+repository URL given to Darcs is created by concatenating a
+@code{baseURL} with the branch name, and if no particular branch is
+requested, it uses a @code{defaultBranch}. The only difference in
+usage is that each potential Darcs repository URL must point to a
+fully-fledged repository, whereas SVN URLs usually point to sub-trees
+of the main Subversion repository. In other words, doing an SVN
+checkout of @code{baseURL} is legal, but silly, since you'd probably
+wind up with a copy of every single branch in the whole repository.
+Doing a Darcs checkout of @code{baseURL} is just plain wrong, since
+the parent directory of a collection of Darcs repositories is not
+itself a valid repository.
+
+The Darcs step takes the following arguments:
+
+@table @code
+@item repourl
+(required unless @code{baseURL} is provided): the URL at which the
+Darcs source repository is available.
+
+@item baseURL
+(required unless @code{repourl} is provided): the base repository URL,
+to which a branch name will be appended. It should probably end in a
+slash.
+
+@item defaultBranch
+(allowed if and only if @code{baseURL} is provided): this specifies
+the name of the branch to use when a Build does not provide one of its
+own. This will be appended to @code{baseURL} to create the string that
+will be passed to the @code{darcs get} command.
+@end table
+
+@node Mercurial, Arch, Darcs, Source Checkout
+@subsubsection Mercurial
+
+@cindex Mercurial Checkout
+@bsindex buildbot.steps.source.Mercurial
+
+
+The @code{Mercurial} build step performs a
+@uref{http://selenic.com/mercurial, Mercurial} (aka ``hg'') checkout
+or update.
+
+Branches are handled just like @xref{Darcs}.
+
+The Mercurial step takes the following arguments:
+
+@table @code
+@item repourl
+(required unless @code{baseURL} is provided): the URL at which the
+Mercurial source repository is available.
+
+@item baseURL
+(required unless @code{repourl} is provided): the base repository URL,
+to which a branch name will be appended. It should probably end in a
+slash.
+
+@item defaultBranch
+(allowed if and only if @code{baseURL} is provided): this specifies
+the name of the branch to use when a Build does not provide one of its
+own. This will be appended to @code{baseURL} to create the string that
+will be passed to the @code{hg clone} command.
+@end table
+
+
+@node Arch, Bazaar, Mercurial, Source Checkout
+@subsubsection Arch
+
+@cindex Arch Checkout
+@bsindex buildbot.steps.source.Arch
+
+
+The @code{Arch} build step performs an @uref{http://gnuarch.org/,
+Arch} checkout or update using the @code{tla} client. It takes the
+following arguments:
+
+@table @code
+@item url
+(required): this specifies the URL at which the Arch source archive is
+available.
+
+@item version
+(required): this specifies which ``development line'' (like a branch)
+should be used. This provides the default branch name, but individual
+builds may specify a different one.
+
+@item archive
+(optional): Each repository knows its own archive name. If this
+parameter is provided, it must match the repository's archive name.
+The parameter is accepted for compatibility with the @code{Bazaar}
+step, below.
+
+@end table
+
+@node Bazaar, Bzr, Arch, Source Checkout
+@subsubsection Bazaar
+
+@cindex Bazaar Checkout
+@bsindex buildbot.steps.source.Bazaar
+
+
+@code{Bazaar} is an alternate implementation of the Arch VC system,
+which uses a client named @code{baz}. The checkout semantics are just
+different enough from @code{tla} that there is a separate BuildStep for
+it.
+
+It takes exactly the same arguments as @code{Arch}, except that the
+@code{archive=} parameter is required. (baz does not emit the archive
+name when you do @code{baz register-archive}, so we must provide it
+ourselves).
+
+
+@node Bzr, P4, Bazaar, Source Checkout
+@subsubsection Bzr
+
+@cindex Bzr Checkout
+@bsindex buildbot.steps.source.Bzr
+
+@code{bzr} is a descendant of Arch/Baz, and is frequently referred to
+as simply ``Bazaar''. The repository-vs-workspace model is similar to
+Darcs, but it uses a strictly linear sequence of revisions (one
+history per branch) like Arch. Branches are put in subdirectories.
+This makes it look very much like Mercurial, so it takes the same
+arguments:
+
+@table @code
+
+@item repourl
+(required unless @code{baseURL} is provided): the URL at which the
+Bzr source repository is available.
+
+@item baseURL
+(required unless @code{repourl} is provided): the base repository URL,
+to which a branch name will be appended. It should probably end in a
+slash.
+
+@item defaultBranch
+(allowed if and only if @code{baseURL} is provided): this specifies
+the name of the branch to use when a Build does not provide one of its
+own. This will be appended to @code{baseURL} to create the string that
+will be passed to the @code{bzr checkout} command.
+@end table
+
+
+
+@node P4, Git, Bzr, Source Checkout
+@subsubsection P4
+
+@cindex Perforce Update
+@bsindex buildbot.steps.source.P4
+@c TODO @bsindex buildbot.steps.source.P4Sync
+
+
+The @code{P4} build step creates a @uref{http://www.perforce.com/,
+Perforce} client specification and performs an update.
+
+@table @code
+@item p4base
+A view into the Perforce depot without branch name or trailing "...".
+Typically "//depot/proj/".
+@item defaultBranch
+A branch name to append on build requests if none is specified.
+Typically "trunk".
+@item p4port
+(optional): the host:port string describing how to get to the P4 Depot
+(repository), used as the -p argument for all p4 commands.
+@item p4user
+(optional): the Perforce user, used as the -u argument to all p4
+commands.
+@item p4passwd
+(optional): the Perforce password, used as the -p argument to all p4
+commands.
+@item p4extra_views
+(optional): a list of (depotpath, clientpath) tuples containing extra
+views to be mapped into the client specification. Both will have
+"/..." appended automatically. The client name and source directory
+will be prepended to the client path.
+@item p4client
+(optional): The name of the client to use. In mode='copy' and
+mode='update', it's particularly important that a unique name is used
+for each checkout directory to avoid incorrect synchronization. For
+this reason, Python percent substitution will be performed on this value
+to replace %(slave)s with the slave name and %(builder)s with the
+builder name. The default is "buildbot_%(slave)s_%(build)s".
+@end table
+
+
+@node Git, , P4, Source Checkout
+@subsubsection Git
+
+@cindex Git Checkout
+@bsindex buildbot.steps.source.Git
+
+The @code{Git} build step clones or updates a @uref{http://git.or.cz/,
+Git} repository and checks out the specified branch or revision. Note
+that the buildbot supports Git version 1.2.0 and later: earlier
+versions (such as the one shipped in Ubuntu 'Dapper') do not support
+the @command{git init} command that the buildbot uses.
+
+The Git step takes the following arguments:
+
+@table @code
+@item repourl
+(required): the URL of the upstream Git repository.
+
+@item branch
+(optional): this specifies the name of the branch to use when a Build
+does not provide one of its own. If this this parameter is not
+specified, and the Build does not provide a branch, the ``master''
+branch will be used.
+@end table
+
+
+@node ShellCommand, Simple ShellCommand Subclasses, Source Checkout, Build Steps
+@subsection ShellCommand
+
+@bsindex buildbot.steps.shell.ShellCommand
+@c TODO @bsindex buildbot.steps.shell.TreeSize
+
+This is a useful base class for just about everything you might want
+to do during a build (except for the initial source checkout). It runs
+a single command in a child shell on the buildslave. All stdout/stderr
+is recorded into a LogFile. The step finishes with a status of FAILURE
+if the command's exit code is non-zero, otherwise it has a status of
+SUCCESS.
+
+The preferred way to specify the command is with a list of argv strings,
+since this allows for spaces in filenames and avoids doing any fragile
+shell-escaping. You can also specify the command with a single string, in
+which case the string is given to '/bin/sh -c COMMAND' for parsing.
+
+On Windows, commands are run via @code{cmd.exe /c} which works well. However,
+if you're running a batch file, the error level does not get propagated
+correctly unless you add 'call' before your batch file's name:
+@code{cmd=['call', 'myfile.bat', ...]}.
+
+All ShellCommands are run by default in the ``workdir'', which
+defaults to the ``@file{build}'' subdirectory of the slave builder's
+base directory. The absolute path of the workdir will thus be the
+slave's basedir (set as an option to @code{buildbot create-slave},
+@pxref{Creating a buildslave}) plus the builder's basedir (set in the
+builder's @code{c['builddir']} key in master.cfg) plus the workdir
+itself (a class-level attribute of the BuildFactory, defaults to
+``@file{build}'').
+
+@code{ShellCommand} arguments:
+
+@table @code
+@item command
+a list of strings (preferred) or single string (discouraged) which
+specifies the command to be run. A list of strings is preferred
+because it can be used directly as an argv array. Using a single
+string (with embedded spaces) requires the buildslave to pass the
+string to /bin/sh for interpretation, which raises all sorts of
+difficult questions about how to escape or interpret shell
+metacharacters.
+
+@item env
+a dictionary of environment strings which will be added to the child
+command's environment. For example, to run tests with a different i18n
+language setting, you might use
+
+@example
+f.addStep(ShellCommand(command=["make", "test"],
+ env=@{'LANG': 'fr_FR'@}))
+@end example
+
+These variable settings will override any existing ones in the
+buildslave's environment or the environment specified in the
+Builder. The exception is PYTHONPATH, which is merged
+with (actually prepended to) any existing $PYTHONPATH setting. The
+value is treated as a list of directories to prepend, and a single
+string is treated like a one-item list. For example, to prepend both
+@file{/usr/local/lib/python2.3} and @file{/home/buildbot/lib/python}
+to any existing $PYTHONPATH setting, you would do something like the
+following:
+
+@example
+f.addStep(ShellCommand(
+ command=["make", "test"],
+ env=@{'PYTHONPATH': ["/usr/local/lib/python2.3",
+ "/home/buildbot/lib/python"] @}))
+@end example
+
+@item want_stdout
+if False, stdout from the child process is discarded rather than being
+sent to the buildmaster for inclusion in the step's LogFile.
+
+@item want_stderr
+like @code{want_stdout} but for stderr. Note that commands run through
+a PTY do not have separate stdout/stderr streams: both are merged into
+stdout.
+
+@item usePTY
+Should this command be run in a @code{pty}? The default is to observe the
+configuration of the client (@pxref{Buildslave Options}), but specifying
+@code{True} or @code{False} here will override the default.
+
+The advantage of using a PTY is that ``grandchild'' processes are more likely
+to be cleaned up if the build is interrupted or times out (since it enables the
+use of a ``process group'' in which all child processes will be placed). The
+disadvantages: some forms of Unix have problems with PTYs, some of your unit
+tests may behave differently when run under a PTY (generally those which check
+to see if they are being run interactively), and PTYs will merge the stdout and
+stderr streams into a single output stream (which means the red-vs-black
+coloring in the logfiles will be lost).
+
+@item logfiles
+Sometimes commands will log interesting data to a local file, rather
+than emitting everything to stdout or stderr. For example, Twisted's
+``trial'' command (which runs unit tests) only presents summary
+information to stdout, and puts the rest into a file named
+@file{_trial_temp/test.log}. It is often useful to watch these files
+as the command runs, rather than using @command{/bin/cat} to dump
+their contents afterwards.
+
+The @code{logfiles=} argument allows you to collect data from these
+secondary logfiles in near-real-time, as the step is running. It
+accepts a dictionary which maps from a local Log name (which is how
+the log data is presented in the build results) to a remote filename
+(interpreted relative to the build's working directory). Each named
+file will be polled on a regular basis (every couple of seconds) as
+the build runs, and any new text will be sent over to the buildmaster.
+
+@example
+f.addStep(ShellCommand(
+ command=["make", "test"],
+ logfiles=@{"triallog": "_trial_temp/test.log"@}))
+@end example
+
+
+@item timeout
+if the command fails to produce any output for this many seconds, it
+is assumed to be locked up and will be killed.
+
+@item description
+This will be used to describe the command (on the Waterfall display)
+while the command is still running. It should be a single
+imperfect-tense verb, like ``compiling'' or ``testing''. The preferred
+form is a list of short strings, which allows the HTML Waterfall
+display to create narrower columns by emitting a <br> tag between each
+word. You may also provide a single string.
+
+@item descriptionDone
+This will be used to describe the command once it has finished. A
+simple noun like ``compile'' or ``tests'' should be used. Like
+@code{description}, this may either be a list of short strings or a
+single string.
+
+If neither @code{description} nor @code{descriptionDone} are set, the
+actual command arguments will be used to construct the description.
+This may be a bit too wide to fit comfortably on the Waterfall
+display.
+
+@example
+f.addStep(ShellCommand(command=["make", "test"],
+ description=["testing"],
+ descriptionDone=["tests"]))
+@end example
+
+@item logEnviron
+If this option is true (the default), then the step's logfile will describe the
+environment variables on the slave. In situations where the environment is not
+relevant and is long, it may be easier to set @code{logEnviron=False}.
+
+@end table
+
+@node Simple ShellCommand Subclasses, Python BuildSteps, ShellCommand, Build Steps
+@subsection Simple ShellCommand Subclasses
+
+Several subclasses of ShellCommand are provided as starting points for
+common build steps. These are all very simple: they just override a few
+parameters so you don't have to specify them yourself, making the master.cfg
+file less verbose.
+
+@menu
+* Configure::
+* Compile::
+* Test::
+* TreeSize::
+* PerlModuleTest::
+* SetProperty::
+@end menu
+
+@node Configure, Compile, Simple ShellCommand Subclasses, Simple ShellCommand Subclasses
+@subsubsection Configure
+
+@bsindex buildbot.steps.shell.Configure
+
+This is intended to handle the @code{./configure} step from
+autoconf-style projects, or the @code{perl Makefile.PL} step from perl
+MakeMaker.pm-style modules. The default command is @code{./configure}
+but you can change this by providing a @code{command=} parameter.
+
+@node Compile, Test, Configure, Simple ShellCommand Subclasses
+@subsubsection Compile
+
+@bsindex buildbot.steps.shell.Compile
+
+This is meant to handle compiling or building a project written in C.
+The default command is @code{make all}. When the compile is finished,
+the log file is scanned for GCC warning messages, a summary log is
+created with any problems that were seen, and the step is marked as
+WARNINGS if any were discovered. The number of warnings is stored in a
+Build Property named ``warnings-count'', which is accumulated over all
+Compile steps (so if two warnings are found in one step, and three are
+found in another step, the overall build will have a
+``warnings-count'' property of 5.
+
+The default regular expression used to detect a warning is
+@code{'.*warning[: ].*'} , which is fairly liberal and may cause
+false-positives. To use a different regexp, provide a
+@code{warningPattern=} argument, or use a subclass which sets the
+@code{warningPattern} attribute:
+
+@example
+f.addStep(Compile(command=["make", "test"],
+ warningPattern="^Warning: "))
+@end example
+
+The @code{warningPattern=} can also be a pre-compiled python regexp
+object: this makes it possible to add flags like @code{re.I} (to use
+case-insensitive matching).
+
+(TODO: this step needs to be extended to look for GCC error messages
+as well, and collect them into a separate logfile, along with the
+source code filenames involved).
+
+
+@node Test, TreeSize, Compile, Simple ShellCommand Subclasses
+@subsubsection Test
+
+@bsindex buildbot.steps.shell.Test
+
+This is meant to handle unit tests. The default command is @code{make
+test}, and the @code{warnOnFailure} flag is set.
+
+@node TreeSize, PerlModuleTest, Test, Simple ShellCommand Subclasses
+@subsubsection TreeSize
+
+@bsindex buildbot.steps.shell.TreeSize
+
+This is a simple command that uses the 'du' tool to measure the size
+of the code tree. It puts the size (as a count of 1024-byte blocks,
+aka 'KiB' or 'kibibytes') on the step's status text, and sets a build
+property named 'tree-size-KiB' with the same value.
+
+@node PerlModuleTest, SetProperty, TreeSize, Simple ShellCommand Subclasses
+@subsubsection PerlModuleTest
+
+@bsindex buildbot.steps.shell.PerlModuleTest
+
+This is a simple command that knows how to run tests of perl modules.
+It parses the output to determine the number of tests passed and
+failed and total number executed, saving the results for later query.
+
+@node SetProperty, , PerlModuleTest, Simple ShellCommand Subclasses
+@subsubsection SetProperty
+
+@bsindex buildbot.steps.shell.SetProperty
+
+This buildstep is similar to ShellCommand, except that it captures the
+output of the command into a property. It is usually used like this:
+
+@example
+f.addStep(SetProperty(command="uname -a", property="uname"))
+@end example
+
+This runs @code{uname -a} and captures its stdout, stripped of leading
+and trailing whitespace, in the property "uname". To avoid stripping,
+add @code{strip=False}. The @code{property} argument can be specified
+as a @code{WithProperties} object.
+
+The more advanced usage allows you to specify a function to extract
+properties from the command output. Here you can use regular
+expressions, string interpolation, or whatever you would like.
+The function is called with three arguments: the exit status of the
+command, its standard output as a string, and its standard error as
+a string. It should return a dictionary containing all new properties.
+
+@example
+def glob2list(rc, stdout, stderr):
+ jpgs = [ l.strip() for l in stdout.split('\n') ]
+ return @{ 'jpgs' : jpgs @}
+f.addStep(SetProperty(command="ls -1 *.jpg", extract_fn=glob2list))
+@end example
+
+Note that any ordering relationship of the contents of stdout and
+stderr is lost. For example, given
+
+@example
+f.addStep(SetProperty(
+ command="echo output1; echo error >&2; echo output2",
+ extract_fn=my_extract))
+@end example
+
+Then @code{my_extract} will see @code{stdout="output1\noutput2\n"}
+and @code{stderr="error\n"}.
+
+@node Python BuildSteps, Transferring Files, Simple ShellCommand Subclasses, Build Steps
+@subsection Python BuildSteps
+
+Here are some BuildSteps that are specifcally useful for projects
+implemented in Python.
+
+@menu
+* BuildEPYDoc::
+* PyFlakes::
+* PyLint::
+@end menu
+
+@node BuildEPYDoc
+@subsubsection BuildEPYDoc
+
+@bsindex buildbot.steps.python.BuildEPYDoc
+
+@url{http://epydoc.sourceforge.net/, epydoc} is a tool for generating
+API documentation for Python modules from their docstrings. It reads
+all the .py files from your source tree, processes the docstrings
+therein, and creates a large tree of .html files (or a single .pdf
+file).
+
+The @code{buildbot.steps.python.BuildEPYDoc} step will run
+@command{epydoc} to produce this API documentation, and will count the
+errors and warnings from its output.
+
+You must supply the command line to be used. The default is
+@command{make epydocs}, which assumes that your project has a Makefile
+with an ``epydocs'' target. You might wish to use something like
+@command{epydoc -o apiref source/PKGNAME} instead. You might also want
+to add @command{--pdf} to generate a PDF file instead of a large tree
+of HTML files.
+
+The API docs are generated in-place in the build tree (under the
+workdir, in the subdirectory controlled by the ``-o'' argument). To
+make them useful, you will probably have to copy them to somewhere
+they can be read. A command like @command{rsync -ad apiref/
+dev.example.com:~public_html/current-apiref/} might be useful. You
+might instead want to bundle them into a tarball and publish it in the
+same place where the generated install tarball is placed.
+
+@example
+from buildbot.steps.python import BuildEPYDoc
+
+...
+f.addStep(BuildEPYDoc(command=["epydoc", "-o", "apiref", "source/mypkg"]))
+@end example
+
+
+@node PyFlakes
+@subsubsection PyFlakes
+
+@bsindex buildbot.steps.python.PyFlakes
+
+@url{http://divmod.org/trac/wiki/DivmodPyflakes, PyFlakes} is a tool
+to perform basic static analysis of Python code to look for simple
+errors, like missing imports and references of undefined names. It is
+like a fast and simple form of the C ``lint'' program. Other tools
+(like pychecker) provide more detailed results but take longer to run.
+
+The @code{buildbot.steps.python.PyFlakes} step will run pyflakes and
+count the various kinds of errors and warnings it detects.
+
+You must supply the command line to be used. The default is
+@command{make pyflakes}, which assumes you have a top-level Makefile
+with a ``pyflakes'' target. You might want to use something like
+@command{pyflakes .} or @command{pyflakes src}.
+
+@example
+from buildbot.steps.python import PyFlakes
+
+...
+f.addStep(PyFlakes(command=["pyflakes", "src"]))
+@end example
+
+@node PyLint
+@subsubsection PyLint
+
+@bsindex buildbot.steps.python.PyLint
+
+Similarly, the @code{buildbot.steps.python.PyLint} step will run pylint and
+analyze the results.
+
+You must supply the command line to be used. There is no default.
+
+@example
+from buildbot.steps.python import PyLint
+
+...
+f.addStep(PyLint(command=["pylint", "src"]))
+@end example
+
+
+@node Transferring Files
+@subsection Transferring Files
+
+@cindex File Transfer
+@bsindex buildbot.steps.transfer.FileUpload
+@bsindex buildbot.steps.transfer.FileDownload
+@bsindex buildbot.steps.transfer.DirectoryUpload
+
+Most of the work involved in a build will take place on the
+buildslave. But occasionally it is useful to do some work on the
+buildmaster side. The most basic way to involve the buildmaster is
+simply to move a file from the slave to the master, or vice versa.
+There are a pair of BuildSteps named @code{FileUpload} and
+@code{FileDownload} to provide this functionality. @code{FileUpload}
+moves a file @emph{up to} the master, while @code{FileDownload} moves
+a file @emph{down from} the master.
+
+As an example, let's assume that there is a step which produces an
+HTML file within the source tree that contains some sort of generated
+project documentation. We want to move this file to the buildmaster,
+into a @file{~/public_html} directory, so it can be visible to
+developers. This file will wind up in the slave-side working directory
+under the name @file{docs/reference.html}. We want to put it into the
+master-side @file{~/public_html/ref.html}.
+
+@example
+from buildbot.steps.shell import ShellCommand
+from buildbot.steps.transfer import FileUpload
+
+f.addStep(ShellCommand(command=["make", "docs"]))
+f.addStep(FileUpload(slavesrc="docs/reference.html",
+ masterdest="~/public_html/ref.html"))
+@end example
+
+The @code{masterdest=} argument will be passed to os.path.expanduser,
+so things like ``~'' will be expanded properly. Non-absolute paths
+will be interpreted relative to the buildmaster's base directory.
+Likewise, the @code{slavesrc=} argument will be expanded and
+interpreted relative to the builder's working directory.
+
+
+To move a file from the master to the slave, use the
+@code{FileDownload} command. For example, let's assume that some step
+requires a configuration file that, for whatever reason, could not be
+recorded in the source code repository or generated on the buildslave
+side:
+
+@example
+from buildbot.steps.shell import ShellCommand
+from buildbot.steps.transfer import FileUpload
+
+f.addStep(FileDownload(mastersrc="~/todays_build_config.txt",
+ slavedest="build_config.txt"))
+f.addStep(ShellCommand(command=["make", "config"]))
+@end example
+
+Like @code{FileUpload}, the @code{mastersrc=} argument is interpreted
+relative to the buildmaster's base directory, and the
+@code{slavedest=} argument is relative to the builder's working
+directory. If the buildslave is running in @file{~buildslave}, and the
+builder's ``builddir'' is something like @file{tests-i386}, then the
+workdir is going to be @file{~buildslave/tests-i386/build}, and a
+@code{slavedest=} of @file{foo/bar.html} will get put in
+@file{~buildslave/tests-i386/build/foo/bar.html}. Both of these commands
+will create any missing intervening directories.
+
+@subheading Other Parameters
+
+The @code{maxsize=} argument lets you set a maximum size for the file
+to be transferred. This may help to avoid surprises: transferring a
+100MB coredump when you were expecting to move a 10kB status file
+might take an awfully long time. The @code{blocksize=} argument
+controls how the file is sent over the network: larger blocksizes are
+slightly more efficient but also consume more memory on each end, and
+there is a hard-coded limit of about 640kB.
+
+The @code{mode=} argument allows you to control the access permissions
+of the target file, traditionally expressed as an octal integer. The
+most common value is probably 0755, which sets the ``x'' executable
+bit on the file (useful for shell scripts and the like). The default
+value for @code{mode=} is None, which means the permission bits will
+default to whatever the umask of the writing process is. The default
+umask tends to be fairly restrictive, but at least on the buildslave
+you can make it less restrictive with a --umask command-line option at
+creation time (@pxref{Buildslave Options}).
+
+@subheading Transfering Directories
+
+To transfer complete directories from the buildslave to the master, there
+is a BuildStep named @code{DirectoryUpload}. It works like @code{FileUpload},
+just for directories. However it does not support the @code{maxsize},
+@code{blocksize} and @code{mode} arguments. As an example, let's assume an
+generated project documentation, which consists of many files (like the output
+of doxygen or epydoc). We want to move the entire documentation to the
+buildmaster, into a @code{~/public_html/docs} directory. On the slave-side
+the directory can be found under @code{docs}:
+
+@example
+from buildbot.steps.shell import ShellCommand
+from buildbot.steps.transfer import DirectoryUpload
+
+f.addStep(ShellCommand(command=["make", "docs"]))
+f.addStep(DirectoryUpload(slavesrc="docs",
+ masterdest="~/public_html/docs"))
+@end example
+
+The DirectoryUpload step will create all necessary directories and
+transfers empty directories, too.
+
+@node Steps That Run on the Master
+@subsection Steps That Run on the Master
+
+Occasionally, it is useful to execute some task on the master, for example to
+create a directory, deploy a build result, or trigger some other centralized
+processing. This is possible, in a limited fashion, with the
+@code{MasterShellCommand} step.
+
+This step operates similarly to a regular @code{ShellCommand}, but executes on
+the master, instead of the slave. To be clear, the enclosing @code{Build}
+object must still have a slave object, just as for any other step -- only, in
+this step, the slave does not do anything.
+
+In this example, the step renames a tarball based on the day of the week.
+
+@example
+from buildbot.steps.transfer import FileUpload
+from buildbot.steps.master import MasterShellCommand
+
+f.addStep(FileUpload(slavesrc="widgetsoft.tar.gz",
+ masterdest="/var/buildoutputs/widgetsoft-new.tar.gz"))
+f.addStep(MasterShellCommand(command="""
+ cd /var/buildoutputs;
+ mv widgetsoft-new.tar.gz widgetsoft-`date +%a`.tar.gz"""))
+@end example
+
+@node Triggering Schedulers
+@subsection Triggering Schedulers
+
+The counterpart to the Triggerable described in section
+@pxref{Triggerable Scheduler} is the Trigger BuildStep.
+
+@example
+from buildbot.steps.trigger import Trigger
+f.addStep(Trigger(schedulerNames=['build-prep'],
+ waitForFinish=True,
+ updateSourceStamp=True))
+@end example
+
+The @code{schedulerNames=} argument lists the Triggerables
+that should be triggered when this step is executed. Note that
+it is possible, but not advisable, to create a cycle where a build
+continually triggers itself, because the schedulers are specified
+by name.
+
+If @code{waitForFinish} is True, then the step will not finish until
+all of the builds from the triggered schedulers have finished. If this
+argument is False (the default) or not given, then the buildstep
+succeeds immediately after triggering the schedulers.
+
+If @code{updateSourceStamp} is True (the default), then step updates
+the SourceStamp given to the Triggerables to include
+@code{got_revision} (the revision actually used in this build) as
+@code{revision} (the revision to use in the triggered builds). This is
+useful to ensure that all of the builds use exactly the same
+SourceStamp, even if other Changes have occurred while the build was
+running.
+
+@node Writing New BuildSteps
+@subsection Writing New BuildSteps
+
+While it is a good idea to keep your build process self-contained in
+the source code tree, sometimes it is convenient to put more
+intelligence into your Buildbot configuration. One way to do this is
+to write a custom BuildStep. Once written, this Step can be used in
+the @file{master.cfg} file.
+
+The best reason for writing a custom BuildStep is to better parse the
+results of the command being run. For example, a BuildStep that knows
+about JUnit could look at the logfiles to determine which tests had
+been run, how many passed and how many failed, and then report more
+detailed information than a simple @code{rc==0} -based ``good/bad''
+decision.
+
+@menu
+* Writing BuildStep Constructors::
+* BuildStep LogFiles::
+* Reading Logfiles::
+* Adding LogObservers::
+* BuildStep URLs::
+@end menu
+
+@node Writing BuildStep Constructors
+@subsubsection Writing BuildStep Constructors
+
+BuildStep classes have some extra equipment, because they are their own
+factories. Consider the use of a BuildStep in @file{master.cfg}:
+
+@example
+f.addStep(MyStep(someopt="stuff", anotheropt=1))
+@end example
+
+This creates a single instance of class @code{MyStep}. However, Buildbot needs
+a new object each time the step is executed. this is accomplished by storing
+the information required to instantiate a new object in the @code{factory}
+attribute. When the time comes to construct a new Build, BuildFactory consults
+this attribute (via @code{getStepFactory}) and instantiates a new step object.
+
+When writing a new step class, then, keep in mind are that you cannot do
+anything "interesting" in the constructor -- limit yourself to checking and
+storing arguments. To ensure that these arguments are provided to any new
+objects, call @code{self.addFactoryArguments} with any keyword arguments your
+constructor needs.
+
+Keep a @code{**kwargs} argument on the end of your options, and pass that up to
+the parent class's constructor.
+
+The whole thing looks like this:
+
+@example
+class Frobinfy(LoggingBuildStep):
+ def __init__(self,
+ frob_what="frobee",
+ frob_how_many=None,
+ frob_how=None,
+ **kwargs)
+
+ # check
+ if frob_how_many is None:
+ raise TypeError("Frobinfy argument how_many is required")
+
+ # call parent
+ LoggingBuildStep.__init__(self, **kwargs)
+
+ # and record arguments for later
+ self.addFactoryArguments(
+ frob_what=frob_what,
+ frob_how_many=frob_how_many,
+ frob_how=frob_how)
+
+class FastFrobnify(Frobnify):
+ def __init__(self,
+ speed=5,
+ **kwargs)
+ Frobnify.__init__(self, **kwargs)
+ self.addFactoryArguments(
+ speed=speed)
+@end example
+
+@node BuildStep LogFiles
+@subsubsection BuildStep LogFiles
+
+Each BuildStep has a collection of ``logfiles''. Each one has a short
+name, like ``stdio'' or ``warnings''. Each LogFile contains an
+arbitrary amount of text, usually the contents of some output file
+generated during a build or test step, or a record of everything that
+was printed to stdout/stderr during the execution of some command.
+
+These LogFiles are stored to disk, so they can be retrieved later.
+
+Each can contain multiple ``channels'', generally limited to three
+basic ones: stdout, stderr, and ``headers''. For example, when a
+ShellCommand runs, it writes a few lines to the ``headers'' channel to
+indicate the exact argv strings being run, which directory the command
+is being executed in, and the contents of the current environment
+variables. Then, as the command runs, it adds a lot of ``stdout'' and
+``stderr'' messages. When the command finishes, a final ``header''
+line is added with the exit code of the process.
+
+Status display plugins can format these different channels in
+different ways. For example, the web page shows LogFiles as text/html,
+with header lines in blue text, stdout in black, and stderr in red. A
+different URL is available which provides a text/plain format, in
+which stdout and stderr are collapsed together, and header lines are
+stripped completely. This latter option makes it easy to save the
+results to a file and run @command{grep} or whatever against the
+output.
+
+Each BuildStep contains a mapping (implemented in a python dictionary)
+from LogFile name to the actual LogFile objects. Status plugins can
+get a list of LogFiles to display, for example, a list of HREF links
+that, when clicked, provide the full contents of the LogFile.
+
+@heading Using LogFiles in custom BuildSteps
+
+The most common way for a custom BuildStep to use a LogFile is to
+summarize the results of a ShellCommand (after the command has
+finished running). For example, a compile step with thousands of lines
+of output might want to create a summary of just the warning messages.
+If you were doing this from a shell, you would use something like:
+
+@example
+grep "warning:" output.log >warnings.log
+@end example
+
+In a custom BuildStep, you could instead create a ``warnings'' LogFile
+that contained the same text. To do this, you would add code to your
+@code{createSummary} method that pulls lines from the main output log
+and creates a new LogFile with the results:
+
+@example
+ def createSummary(self, log):
+ warnings = []
+ for line in log.readlines():
+ if "warning:" in line:
+ warnings.append()
+ self.addCompleteLog('warnings', "".join(warnings))
+@end example
+
+This example uses the @code{addCompleteLog} method, which creates a
+new LogFile, puts some text in it, and then ``closes'' it, meaning
+that no further contents will be added. This LogFile will appear in
+the HTML display under an HREF with the name ``warnings'', since that
+is the name of the LogFile.
+
+You can also use @code{addHTMLLog} to create a complete (closed)
+LogFile that contains HTML instead of plain text. The normal LogFile
+will be HTML-escaped if presented through a web page, but the HTML
+LogFile will not. At the moment this is only used to present a pretty
+HTML representation of an otherwise ugly exception traceback when
+something goes badly wrong during the BuildStep.
+
+In contrast, you might want to create a new LogFile at the beginning
+of the step, and add text to it as the command runs. You can create
+the LogFile and attach it to the build by calling @code{addLog}, which
+returns the LogFile object. You then add text to this LogFile by
+calling methods like @code{addStdout} and @code{addHeader}. When you
+are done, you must call the @code{finish} method so the LogFile can be
+closed. It may be useful to create and populate a LogFile like this
+from a LogObserver method @xref{Adding LogObservers}.
+
+The @code{logfiles=} argument to @code{ShellCommand} (see
+@pxref{ShellCommand}) creates new LogFiles and fills them in realtime
+by asking the buildslave to watch a actual file on disk. The
+buildslave will look for additions in the target file and report them
+back to the BuildStep. These additions will be added to the LogFile by
+calling @code{addStdout}. These secondary LogFiles can be used as the
+source of a LogObserver just like the normal ``stdio'' LogFile.
+
+@node Reading Logfiles
+@subsubsection Reading Logfiles
+
+Once a LogFile has been added to a BuildStep with @code{addLog()},
+@code{addCompleteLog()}, @code{addHTMLLog()}, or @code{logfiles=},
+your BuildStep can retrieve it by using @code{getLog()}:
+
+@example
+class MyBuildStep(ShellCommand):
+ logfiles = @{ "nodelog": "_test/node.log" @}
+
+ def evaluateCommand(self, cmd):
+ nodelog = self.getLog("nodelog")
+ if "STARTED" in nodelog.getText():
+ return SUCCESS
+ else:
+ return FAILURE
+@end example
+
+For a complete list of the methods you can call on a LogFile, please
+see the docstrings on the @code{IStatusLog} class in
+@file{buildbot/interfaces.py}.
+
+
+@node Adding LogObservers, BuildStep URLs, Reading Logfiles, Writing New BuildSteps
+@subsubsection Adding LogObservers
+
+@cindex LogObserver
+@cindex LogLineObserver
+
+Most shell commands emit messages to stdout or stderr as they operate,
+especially if you ask them nicely with a @code{--verbose} flag of some
+sort. They may also write text to a log file while they run. Your
+BuildStep can watch this output as it arrives, to keep track of how
+much progress the command has made. You can get a better measure of
+progress by counting the number of source files compiled or test cases
+run than by merely tracking the number of bytes that have been written
+to stdout. This improves the accuracy and the smoothness of the ETA
+display.
+
+To accomplish this, you will need to attach a @code{LogObserver} to
+one of the log channels, most commonly to the ``stdio'' channel but
+perhaps to another one which tracks a log file. This observer is given
+all text as it is emitted from the command, and has the opportunity to
+parse that output incrementally. Once the observer has decided that
+some event has occurred (like a source file being compiled), it can
+use the @code{setProgress} method to tell the BuildStep about the
+progress that this event represents.
+
+There are a number of pre-built @code{LogObserver} classes that you
+can choose from (defined in @code{buildbot.process.buildstep}, and of
+course you can subclass them to add further customization. The
+@code{LogLineObserver} class handles the grunt work of buffering and
+scanning for end-of-line delimiters, allowing your parser to operate
+on complete stdout/stderr lines. (Lines longer than a set maximum
+length are dropped; the maximum defaults to 16384 bytes, but you can
+change it by calling @code{setMaxLineLength()} on your
+@code{LogLineObserver} instance. Use @code{sys.maxint} for effective
+infinity.)
+
+For example, let's take a look at the @code{TrialTestCaseCounter},
+which is used by the Trial step to count test cases as they are run.
+As Trial executes, it emits lines like the following:
+
+@example
+buildbot.test.test_config.ConfigTest.testDebugPassword ... [OK]
+buildbot.test.test_config.ConfigTest.testEmpty ... [OK]
+buildbot.test.test_config.ConfigTest.testIRC ... [FAIL]
+buildbot.test.test_config.ConfigTest.testLocks ... [OK]
+@end example
+
+When the tests are finished, trial emits a long line of ``======'' and
+then some lines which summarize the tests that failed. We want to
+avoid parsing these trailing lines, because their format is less
+well-defined than the ``[OK]'' lines.
+
+The parser class looks like this:
+
+@example
+from buildbot.process.buildstep import LogLineObserver
+
+class TrialTestCaseCounter(LogLineObserver):
+ _line_re = re.compile(r'^([\w\.]+) \.\.\. \[([^\]]+)\]$')
+ numTests = 0
+ finished = False
+
+ def outLineReceived(self, line):
+ if self.finished:
+ return
+ if line.startswith("=" * 40):
+ self.finished = True
+ return
+
+ m = self._line_re.search(line.strip())
+ if m:
+ testname, result = m.groups()
+ self.numTests += 1
+ self.step.setProgress('tests', self.numTests)
+@end example
+
+This parser only pays attention to stdout, since that's where trial
+writes the progress lines. It has a mode flag named @code{finished} to
+ignore everything after the ``===='' marker, and a scary-looking
+regular expression to match each line while hopefully ignoring other
+messages that might get displayed as the test runs.
+
+Each time it identifies a test has been completed, it increments its
+counter and delivers the new progress value to the step with
+@code{self.step.setProgress}. This class is specifically measuring
+progress along the ``tests'' metric, in units of test cases (as
+opposed to other kinds of progress like the ``output'' metric, which
+measures in units of bytes). The Progress-tracking code uses each
+progress metric separately to come up with an overall completion
+percentage and an ETA value.
+
+To connect this parser into the @code{Trial} BuildStep,
+@code{Trial.__init__} ends with the following clause:
+
+@example
+ # this counter will feed Progress along the 'test cases' metric
+ counter = TrialTestCaseCounter()
+ self.addLogObserver('stdio', counter)
+ self.progressMetrics += ('tests',)
+@end example
+
+This creates a TrialTestCaseCounter and tells the step that the
+counter wants to watch the ``stdio'' log. The observer is
+automatically given a reference to the step in its @code{.step}
+attribute.
+
+@subheading A Somewhat Whimsical Example
+
+Let's say that we've got some snazzy new unit-test framework called
+Framboozle. It's the hottest thing since sliced bread. It slices, it
+dices, it runs unit tests like there's no tomorrow. Plus if your unit
+tests fail, you can use its name for a Web 2.1 startup company, make
+millions of dollars, and hire engineers to fix the bugs for you, while
+you spend your afternoons lazily hang-gliding along a scenic pacific
+beach, blissfully unconcerned about the state of your
+tests.@footnote{framboozle.com is still available. Remember, I get 10%
+:).}
+
+To run a Framboozle-enabled test suite, you just run the 'framboozler'
+command from the top of your source code tree. The 'framboozler'
+command emits a bunch of stuff to stdout, but the most interesting bit
+is that it emits the line "FNURRRGH!" every time it finishes running a
+test case@footnote{Framboozle gets very excited about running unit
+tests.}. You'd like to have a test-case counting LogObserver that
+watches for these lines and counts them, because counting them will
+help the buildbot more accurately calculate how long the build will
+take, and this will let you know exactly how long you can sneak out of
+the office for your hang-gliding lessons without anyone noticing that
+you're gone.
+
+This will involve writing a new BuildStep (probably named
+"Framboozle") which inherits from ShellCommand. The BuildStep class
+definition itself will look something like this:
+
+@example
+# START
+from buildbot.steps.shell import ShellCommand
+from buildbot.process.buildstep import LogLineObserver
+
+class FNURRRGHCounter(LogLineObserver):
+ numTests = 0
+ def outLineReceived(self, line):
+ if "FNURRRGH!" in line:
+ self.numTests += 1
+ self.step.setProgress('tests', self.numTests)
+
+class Framboozle(ShellCommand):
+ command = ["framboozler"]
+
+ def __init__(self, **kwargs):
+ ShellCommand.__init__(self, **kwargs) # always upcall!
+ counter = FNURRRGHCounter())
+ self.addLogObserver('stdio', counter)
+ self.progressMetrics += ('tests',)
+# FINISH
+@end example
+
+So that's the code that we want to wind up using. How do we actually
+deploy it?
+
+You have a couple of different options.
+
+Option 1: The simplest technique is to simply put this text
+(everything from START to FINISH) in your master.cfg file, somewhere
+before the BuildFactory definition where you actually use it in a
+clause like:
+
+@example
+f = BuildFactory()
+f.addStep(SVN(svnurl="stuff"))
+f.addStep(Framboozle())
+@end example
+
+Remember that master.cfg is secretly just a python program with one
+job: populating the BuildmasterConfig dictionary. And python programs
+are allowed to define as many classes as they like. So you can define
+classes and use them in the same file, just as long as the class is
+defined before some other code tries to use it.
+
+This is easy, and it keeps the point of definition very close to the
+point of use, and whoever replaces you after that unfortunate
+hang-gliding accident will appreciate being able to easily figure out
+what the heck this stupid "Framboozle" step is doing anyways. The
+downside is that every time you reload the config file, the Framboozle
+class will get redefined, which means that the buildmaster will think
+that you've reconfigured all the Builders that use it, even though
+nothing changed. Bleh.
+
+Option 2: Instead, we can put this code in a separate file, and import
+it into the master.cfg file just like we would the normal buildsteps
+like ShellCommand and SVN.
+
+Create a directory named ~/lib/python, put everything from START to
+FINISH in ~/lib/python/framboozle.py, and run your buildmaster using:
+
+@example
+ PYTHONPATH=~/lib/python buildbot start MASTERDIR
+@end example
+
+or use the @file{Makefile.buildbot} to control the way
+@command{buildbot start} works. Or add something like this to
+something like your ~/.bashrc or ~/.bash_profile or ~/.cshrc:
+
+@example
+ export PYTHONPATH=~/lib/python
+@end example
+
+Once we've done this, our master.cfg can look like:
+
+@example
+from framboozle import Framboozle
+f = BuildFactory()
+f.addStep(SVN(svnurl="stuff"))
+f.addStep(Framboozle())
+@end example
+
+or:
+
+@example
+import framboozle
+f = BuildFactory()
+f.addStep(SVN(svnurl="stuff"))
+f.addStep(framboozle.Framboozle())
+@end example
+
+(check out the python docs for details about how "import" and "from A
+import B" work).
+
+What we've done here is to tell python that every time it handles an
+"import" statement for some named module, it should look in our
+~/lib/python/ for that module before it looks anywhere else. After our
+directories, it will try in a bunch of standard directories too
+(including the one where buildbot is installed). By setting the
+PYTHONPATH environment variable, you can add directories to the front
+of this search list.
+
+Python knows that once it "import"s a file, it doesn't need to
+re-import it again. This means that reconfiguring the buildmaster
+(with "buildbot reconfig", for example) won't make it think the
+Framboozle class has changed every time, so the Builders that use it
+will not be spuriously restarted. On the other hand, you either have
+to start your buildmaster in a slightly weird way, or you have to
+modify your environment to set the PYTHONPATH variable.
+
+
+Option 3: Install this code into a standard python library directory
+
+Find out what your python's standard include path is by asking it:
+
+@example
+80:warner@@luther% python
+Python 2.4.4c0 (#2, Oct 2 2006, 00:57:46)
+[GCC 4.1.2 20060928 (prerelease) (Debian 4.1.1-15)] on linux2
+Type "help", "copyright", "credits" or "license" for more information.
+>>> import sys
+>>> import pprint
+>>> pprint.pprint(sys.path)
+['',
+ '/usr/lib/python24.zip',
+ '/usr/lib/python2.4',
+ '/usr/lib/python2.4/plat-linux2',
+ '/usr/lib/python2.4/lib-tk',
+ '/usr/lib/python2.4/lib-dynload',
+ '/usr/local/lib/python2.4/site-packages',
+ '/usr/lib/python2.4/site-packages',
+ '/usr/lib/python2.4/site-packages/Numeric',
+ '/var/lib/python-support/python2.4',
+ '/usr/lib/site-python']
+@end example
+
+In this case, putting the code into
+/usr/local/lib/python2.4/site-packages/framboozle.py would work just
+fine. We can use the same master.cfg "import framboozle" statement as
+in Option 2. By putting it in a standard include directory (instead of
+the decidedly non-standard ~/lib/python), we don't even have to set
+PYTHONPATH to anything special. The downside is that you probably have
+to be root to write to one of those standard include directories.
+
+
+Option 4: Submit the code for inclusion in the Buildbot distribution
+
+Make a fork of buildbot on http://github.com/djmitche/buildbot or post a patch
+in a bug at http://buildbot.net. In either case, post a note about your patch
+to the mailing list, so others can provide feedback and, eventually, commit it.
+
+@example
+from buildbot.steps import framboozle
+f = BuildFactory()
+f.addStep(SVN(svnurl="stuff"))
+f.addStep(framboozle.Framboozle())
+@end example
+
+And then you don't even have to install framboozle.py anywhere on your
+system, since it will ship with Buildbot. You don't have to be root,
+you don't have to set PYTHONPATH. But you do have to make a good case
+for Framboozle being worth going into the main distribution, you'll
+probably have to provide docs and some unit test cases, you'll need to
+figure out what kind of beer the author likes, and then you'll have to
+wait until the next release. But in some environments, all this is
+easier than getting root on your buildmaster box, so the tradeoffs may
+actually be worth it.
+
+
+
+Putting the code in master.cfg (1) makes it available to that
+buildmaster instance. Putting it in a file in a personal library
+directory (2) makes it available for any buildmasters you might be
+running. Putting it in a file in a system-wide shared library
+directory (3) makes it available for any buildmasters that anyone on
+that system might be running. Getting it into the buildbot's upstream
+repository (4) makes it available for any buildmasters that anyone in
+the world might be running. It's all a matter of how widely you want
+to deploy that new class.
+
+
+
+@node BuildStep URLs, , Adding LogObservers, Writing New BuildSteps
+@subsubsection BuildStep URLs
+
+@cindex links
+@cindex BuildStep URLs
+@cindex addURL
+
+Each BuildStep has a collection of ``links''. Like its collection of
+LogFiles, each link has a name and a target URL. The web status page
+creates HREFs for each link in the same box as it does for LogFiles,
+except that the target of the link is the external URL instead of an
+internal link to a page that shows the contents of the LogFile.
+
+These external links can be used to point at build information hosted
+on other servers. For example, the test process might produce an
+intricate description of which tests passed and failed, or some sort
+of code coverage data in HTML form, or a PNG or GIF image with a graph
+of memory usage over time. The external link can provide an easy way
+for users to navigate from the buildbot's status page to these
+external web sites or file servers. Note that the step itself is
+responsible for insuring that there will be a document available at
+the given URL (perhaps by using @command{scp} to copy the HTML output
+to a @file{~/public_html/} directory on a remote web server). Calling
+@code{addURL} does not magically populate a web server.
+
+To set one of these links, the BuildStep should call the @code{addURL}
+method with the name of the link and the target URL. Multiple URLs can
+be set.
+
+In this example, we assume that the @command{make test} command causes
+a collection of HTML files to be created and put somewhere on the
+coverage.example.org web server, in a filename that incorporates the
+build number.
+
+@example
+class TestWithCodeCoverage(BuildStep):
+ command = ["make", "test",
+ WithProperties("buildnum=%s" % "buildnumber")]
+
+ def createSummary(self, log):
+ buildnumber = self.getProperty("buildnumber")
+ url = "http://coverage.example.org/builds/%s.html" % buildnumber
+ self.addURL("coverage", url)
+@end example
+
+You might also want to extract the URL from some special message
+output by the build process itself:
+
+@example
+class TestWithCodeCoverage(BuildStep):
+ command = ["make", "test",
+ WithProperties("buildnum=%s" % "buildnumber")]
+
+ def createSummary(self, log):
+ output = StringIO(log.getText())
+ for line in output.readlines():
+ if line.startswith("coverage-url:"):
+ url = line[len("coverage-url:"):].strip()
+ self.addURL("coverage", url)
+ return
+@end example
+
+Note that a build process which emits both stdout and stderr might
+cause this line to be split or interleaved between other lines. It
+might be necessary to restrict the getText() call to only stdout with
+something like this:
+
+@example
+ output = StringIO("".join([c[1]
+ for c in log.getChunks()
+ if c[0] == LOG_CHANNEL_STDOUT]))
+@end example
+
+Of course if the build is run under a PTY, then stdout and stderr will
+be merged before the buildbot ever sees them, so such interleaving
+will be unavoidable.
+
+
+@node Interlocks, Build Factories, Build Steps, Build Process
+@section Interlocks
+
+@cindex locks
+@slindex buildbot.locks.MasterLock
+@slindex buildbot.locks.SlaveLock
+@slindex buildbot.locks.LockAccess
+
+Until now, we assumed that a master can run builds at any slave whenever
+needed or desired. Some times, you want to enforce additional constraints on
+builds. For reasons like limited network bandwidth, old slave machines, or a
+self-willed data base server, you may want to limit the number of builds (or
+build steps) that can access a resource.
+
+The mechanism used by Buildbot is known as the read/write lock.@footnote{See
+http://en.wikipedia.org/wiki/Read/write_lock_pattern for more information.} It
+allows either many readers or a single writer but not a combination of readers
+and writers. The general lock has been modified and extended for use in
+Buildbot. Firstly, the general lock allows an infinite number of readers. In
+Buildbot, we often want to put an upper limit on the number of readers, for
+example allowing two out of five possible builds at the same time. To do this,
+the lock counts the number of active readers. Secondly, the terms @emph{read
+mode} and @emph{write mode} are confusing in Buildbot context. They have been
+replaced by @emph{counting mode} (since the lock counts them) and @emph{exclusive
+mode}. As a result of these changes, locks in Buildbot allow a number of
+builds (upto some fixed number) in counting mode, or they allow one build in
+exclusive mode.
+
+Often, not all slaves are equal. To allow for this situation, Buildbot allows
+to have a separate upper limit on the count for each slave. In this way, you
+can have at most 3 concurrent builds at a fast slave, 2 at a slightly older
+slave, and 1 at all other slaves.
+
+The final thing you can specify when you introduce a new lock is its scope.
+Some constraints are global -- they must be enforced over all slaves. Other
+constraints are local to each slave. A @emph{master lock} is used for the
+global constraints. You can ensure for example that at most one build (of all
+builds running at all slaves) accesses the data base server. With a
+@emph{slave lock} you can add a limit local to each slave. With such a lock,
+you can for example enforce an upper limit to the number of active builds at a
+slave, like above.
+
+Time for a few examples. Below a master lock is defined to protect a data base,
+and a slave lock is created to limit the number of builds at each slave.
+
+@example
+from buildbot import locks
+
+db_lock = locks.MasterLock("database")
+build_lock = locks.SlaveLock("slave_builds",
+ maxCount = 1,
+ maxCountForSlave = @{ 'fast': 3, 'new': 2 @})
+@end example
+
+After importing locks from buildbot, @code{db_lock} is defined to be a master
+lock. The @code{"database"} string is used for uniquely identifying the lock.
+At the next line, a slave lock called @code{build_lock} is created. It is
+identified by the @code{"slave_builds"} string. Since the requirements of the
+lock are a bit more complicated, two optional arguments are also specified. The
+@code{maxCount} parameter sets the default limit for builds in counting mode to
+@code{1}. For the slave called @code{'fast'} however, we want to have at most
+three builds, and for the slave called @code{'new'} the upper limit is two
+builds running at the same time.
+
+The next step is using the locks in builds. Buildbot allows a lock to be used
+during an entire build (from beginning to end), or only during a single build
+step. In the latter case, the lock is claimed for use just before the step
+starts, and released again when the step ends. To prevent
+deadlocks,@footnote{Deadlock is the situation where two or more slaves each
+hold a lock in exclusive mode, and in addition want to claim the lock held by
+the other slave exclusively as well. Since locks allow at most one exclusive
+user, both slaves will wait forever.} it is not possible to claim or release
+locks at other times.
+
+To use locks, you should add them with a @code{locks} argument.
+Each use of a lock is either in counting mode (that is, possibly shared with
+other builds) or in exclusive mode. A build or build step proceeds only when it
+has acquired all locks. If a build or step needs a lot of locks, it may be
+starved@footnote{Starving is the situation that only a few locks are available,
+and they are immediately grabbed by another build. As a result, it may take a
+long time before all locks needed by the starved build are free at the same
+time.} by other builds that need fewer locks.
+
+To illustrate use of locks, a few examples.
+
+@example
+from buildbot import locks
+from buildbot.steps import source, shell
+from buildbot.process import factory
+
+db_lock = locks.MasterLock("database")
+build_lock = locks.SlaveLock("slave_builds",
+ maxCount = 1,
+ maxCountForSlave = @{ 'fast': 3, 'new': 2 @})
+
+f = factory.BuildFactory()
+f.addStep(source.SVN(svnurl="http://example.org/svn/Trunk"))
+f.addStep(shell.ShellCommand(command="make all"))
+f.addStep(shell.ShellCommand(command="make test",
+ locks=[db_lock.access('exclusive')]))
+
+b1 = @{'name': 'full1', 'slavename': 'fast', 'builddir': 'f1', 'factory': f,
+ 'locks': [build_lock.access('counting')] @}
+
+b2 = @{'name': 'full2', 'slavename': 'new', 'builddir': 'f2', 'factory': f.
+ 'locks': [build_lock.access('counting')] @}
+
+b3 = @{'name': 'full3', 'slavename': 'old', 'builddir': 'f3', 'factory': f.
+ 'locks': [build_lock.access('counting')] @}
+
+b4 = @{'name': 'full4', 'slavename': 'other', 'builddir': 'f4', 'factory': f.
+ 'locks': [build_lock.access('counting')] @}
+
+c['builders'] = [b1, b2, b3, b4]
+@end example
+
+Here we have four slaves @code{b1}, @code{b2}, @code{b3}, and @code{b4}. Each
+slave performs the same checkout, make, and test build step sequence.
+We want to enforce that at most one test step is executed between all slaves due
+to restrictions with the data base server. This is done by adding the
+@code{locks=} parameter with the third step. It takes a list of locks with their
+access mode. In this case only the @code{db_lock} is needed. The exclusive
+access mode is used to ensure there is at most one slave that executes the test
+step.
+
+In addition to exclusive accessing the data base, we also want slaves to stay
+responsive even under the load of a large number of builds being triggered.
+For this purpose, the slave lock called @code{build_lock} is defined. Since
+the restraint holds for entire builds, the lock is specified in the builder
+with @code{'locks': [build_lock.access('counting')]}.
+@node Build Factories, , Interlocks, Build Process
+@section Build Factories
+
+
+Each Builder is equipped with a ``build factory'', which is
+responsible for producing the actual @code{Build} objects that perform
+each build. This factory is created in the configuration file, and
+attached to a Builder through the @code{factory} element of its
+dictionary.
+
+The standard @code{BuildFactory} object creates @code{Build} objects
+by default. These Builds will each execute a collection of BuildSteps
+in a fixed sequence. Each step can affect the results of the build,
+but in general there is little intelligence to tie the different steps
+together. You can create subclasses of @code{Build} to implement more
+sophisticated build processes, and then use a subclass of
+@code{BuildFactory} (or simply set the @code{buildClass} attribute) to
+create instances of your new Build subclass.
+
+
+@menu
+* BuildStep Objects::
+* BuildFactory::
+* Process-Specific build factories::
+@end menu
+
+@node BuildStep Objects, BuildFactory, Build Factories, Build Factories
+@subsection BuildStep Objects
+
+The steps used by these builds are all subclasses of @code{BuildStep}.
+The standard ones provided with Buildbot are documented later,
+@xref{Build Steps}. You can also write your own subclasses to use in
+builds.
+
+The basic behavior for a @code{BuildStep} is to:
+
+@itemize @bullet
+@item
+run for a while, then stop
+@item
+possibly invoke some RemoteCommands on the attached build slave
+@item
+possibly produce a set of log files
+@item
+finish with a status described by one of four values defined in
+buildbot.status.builder: SUCCESS, WARNINGS, FAILURE, SKIPPED
+@item
+provide a list of short strings to describe the step
+@item
+define a color (generally green, orange, or red) with which the
+step should be displayed
+@end itemize
+
+
+More sophisticated steps may produce additional information and
+provide it to later build steps, or store it in the factory to provide
+to later builds.
+
+
+@menu
+* BuildFactory Attributes::
+* Quick builds::
+@end menu
+
+@node BuildFactory, Process-Specific build factories, BuildStep Objects, Build Factories
+@subsection BuildFactory
+
+@bfindex buildbot.process.factory.BuildFactory
+@bfindex buildbot.process.factory.BasicBuildFactory
+@c TODO: what is BasicSVN anyway?
+@bfindex buildbot.process.factory.BasicSVN
+
+The default @code{BuildFactory}, provided in the
+@code{buildbot.process.factory} module, contains an internal list of
+``BuildStep specifications'': a list of @code{(step_class, kwargs)}
+tuples for each. These specification tuples are constructed when the
+config file is read, by asking the instances passed to @code{addStep}
+for their subclass and arguments.
+
+When asked to create a Build, the @code{BuildFactory} puts a copy of
+the list of step specifications into the new Build object. When the
+Build is actually started, these step specifications are used to
+create the actual set of BuildSteps, which are then executed one at a
+time. This serves to give each Build an independent copy of each step.
+For example, a build which consists of a CVS checkout followed by a
+@code{make build} would be constructed as follows:
+
+@example
+from buildbot.steps import source, shell
+from buildbot.process import factory
+
+f = factory.BuildFactory()
+f.addStep(source.CVS(cvsroot=CVSROOT, cvsmodule="project", mode="update"))
+f.addStep(shell.Compile(command=["make", "build"]))
+@end example
+
+(To support config files from buildbot-0.7.5 and earlier,
+@code{addStep} also accepts the @code{f.addStep(shell.Compile,
+command=["make","build"])} form, although its use is discouraged
+because then the @code{Compile} step doesn't get to validate or
+complain about its arguments until build time. The modern
+pass-by-instance approach allows this validation to occur while the
+config file is being loaded, where the admin has a better chance of
+noticing problems).
+
+It is also possible to pass a list of steps into the
+@code{BuildFactory} when it is created. Using @code{addStep} is
+usually simpler, but there are cases where is is more convenient to
+create the list of steps ahead of time.:
+
+@example
+from buildbot.steps import source, shell
+from buildbot.process import factory
+
+all_steps = [source.CVS(cvsroot=CVSROOT, cvsmodule="project", mode="update"),
+ shell.Compile(command=["make", "build"]),
+ ]
+f = factory.BuildFactory(all_steps)
+@end example
+
+
+Each step can affect the build process in the following ways:
+
+@itemize @bullet
+@item
+If the step's @code{haltOnFailure} attribute is True, then a failure
+in the step (i.e. if it completes with a result of FAILURE) will cause
+the whole build to be terminated immediately: no further steps will be
+executed, with the exception of steps with @code{alwaysRun} set to
+True. @code{haltOnFailure} is useful for setup steps upon which the
+rest of the build depends: if the CVS checkout or @code{./configure}
+process fails, there is no point in trying to compile or test the
+resulting tree.
+
+@item
+If the step's @code{alwaysRun} attribute is True, then it will always
+be run, regardless of if previous steps have failed. This is useful
+for cleanup steps that should always be run to return the build
+directory or build slave into a good state.
+
+@item
+If the @code{flunkOnFailure} or @code{flunkOnWarnings} flag is set,
+then a result of FAILURE or WARNINGS will mark the build as a whole as
+FAILED. However, the remaining steps will still be executed. This is
+appropriate for things like multiple testing steps: a failure in any
+one of them will indicate that the build has failed, however it is
+still useful to run them all to completion.
+
+@item
+Similarly, if the @code{warnOnFailure} or @code{warnOnWarnings} flag
+is set, then a result of FAILURE or WARNINGS will mark the build as
+having WARNINGS, and the remaining steps will still be executed. This
+may be appropriate for certain kinds of optional build or test steps.
+For example, a failure experienced while building documentation files
+should be made visible with a WARNINGS result but not be serious
+enough to warrant marking the whole build with a FAILURE.
+
+@end itemize
+
+In addition, each Step produces its own results, may create logfiles,
+etc. However only the flags described above have any effect on the
+build as a whole.
+
+The pre-defined BuildSteps like @code{CVS} and @code{Compile} have
+reasonably appropriate flags set on them already. For example, without
+a source tree there is no point in continuing the build, so the
+@code{CVS} class has the @code{haltOnFailure} flag set to True. Look
+in @file{buildbot/steps/*.py} to see how the other Steps are
+marked.
+
+Each Step is created with an additional @code{workdir} argument that
+indicates where its actions should take place. This is specified as a
+subdirectory of the slave builder's base directory, with a default
+value of @code{build}. This is only implemented as a step argument (as
+opposed to simply being a part of the base directory) because the
+CVS/SVN steps need to perform their checkouts from the parent
+directory.
+
+@menu
+* BuildFactory Attributes::
+* Quick builds::
+@end menu
+
+@node BuildFactory Attributes, Quick builds, BuildFactory, BuildFactory
+@subsubsection BuildFactory Attributes
+
+Some attributes from the BuildFactory are copied into each Build.
+
+@cindex treeStableTimer
+
+@table @code
+@item useProgress
+(defaults to True): if True, the buildmaster keeps track of how long
+each step takes, so it can provide estimates of how long future builds
+will take. If builds are not expected to take a consistent amount of
+time (such as incremental builds in which a random set of files are
+recompiled or tested each time), this should be set to False to
+inhibit progress-tracking.
+
+@end table
+
+
+@node Quick builds, , BuildFactory Attributes, BuildFactory
+@subsubsection Quick builds
+
+@bfindex buildbot.process.factory.QuickBuildFactory
+
+The difference between a ``full build'' and a ``quick build'' is that
+quick builds are generally done incrementally, starting with the tree
+where the previous build was performed. That simply means that the
+source-checkout step should be given a @code{mode='update'} flag, to
+do the source update in-place.
+
+In addition to that, the @code{useProgress} flag should be set to
+False. Incremental builds will (or at least the ought to) compile as
+few files as necessary, so they will take an unpredictable amount of
+time to run. Therefore it would be misleading to claim to predict how
+long the build will take.
+
+
+@node Process-Specific build factories, , BuildFactory, Build Factories
+@subsection Process-Specific build factories
+
+Many projects use one of a few popular build frameworks to simplify
+the creation and maintenance of Makefiles or other compilation
+structures. Buildbot provides several pre-configured BuildFactory
+subclasses which let you build these projects with a minimum of fuss.
+
+@menu
+* GNUAutoconf::
+* CPAN::
+* Python distutils::
+* Python/Twisted/trial projects::
+@end menu
+
+@node GNUAutoconf, CPAN, Process-Specific build factories, Process-Specific build factories
+@subsubsection GNUAutoconf
+
+@bfindex buildbot.process.factory.GNUAutoconf
+
+@uref{http://www.gnu.org/software/autoconf/, GNU Autoconf} is a
+software portability tool, intended to make it possible to write
+programs in C (and other languages) which will run on a variety of
+UNIX-like systems. Most GNU software is built using autoconf. It is
+frequently used in combination with GNU automake. These tools both
+encourage a build process which usually looks like this:
+
+@example
+% CONFIG_ENV=foo ./configure --with-flags
+% make all
+% make check
+# make install
+@end example
+
+(except of course the Buildbot always skips the @code{make install}
+part).
+
+The Buildbot's @code{buildbot.process.factory.GNUAutoconf} factory is
+designed to build projects which use GNU autoconf and/or automake. The
+configuration environment variables, the configure flags, and command
+lines used for the compile and test are all configurable, in general
+the default values will be suitable.
+
+Example:
+
+@example
+# use the s() convenience function defined earlier
+f = factory.GNUAutoconf(source=s(step.SVN, svnurl=URL, mode="copy"),
+ flags=["--disable-nls"])
+@end example
+
+Required Arguments:
+
+@table @code
+@item source
+This argument must be a step specification tuple that provides a
+BuildStep to generate the source tree.
+@end table
+
+Optional Arguments:
+
+@table @code
+@item configure
+The command used to configure the tree. Defaults to
+@code{./configure}. Accepts either a string or a list of shell argv
+elements.
+
+@item configureEnv
+The environment used for the initial configuration step. This accepts
+a dictionary which will be merged into the buildslave's normal
+environment. This is commonly used to provide things like
+@code{CFLAGS="-O2 -g"} (to turn off debug symbols during the compile).
+Defaults to an empty dictionary.
+
+@item configureFlags
+A list of flags to be appended to the argument list of the configure
+command. This is commonly used to enable or disable specific features
+of the autoconf-controlled package, like @code{["--without-x"]} to
+disable windowing support. Defaults to an empty list.
+
+@item compile
+this is a shell command or list of argv values which is used to
+actually compile the tree. It defaults to @code{make all}. If set to
+None, the compile step is skipped.
+
+@item test
+this is a shell command or list of argv values which is used to run
+the tree's self-tests. It defaults to @code{make check}. If set to
+None, the test step is skipped.
+
+@end table
+
+
+@node CPAN, Python distutils, GNUAutoconf, Process-Specific build factories
+@subsubsection CPAN
+
+@bfindex buildbot.process.factory.CPAN
+
+Most Perl modules available from the @uref{http://www.cpan.org/, CPAN}
+archive use the @code{MakeMaker} module to provide configuration,
+build, and test services. The standard build routine for these modules
+looks like:
+
+@example
+% perl Makefile.PL
+% make
+% make test
+# make install
+@end example
+
+(except again Buildbot skips the install step)
+
+Buildbot provides a @code{CPAN} factory to compile and test these
+projects.
+
+
+Arguments:
+@table @code
+@item source
+(required): A step specification tuple, like that used by GNUAutoconf.
+
+@item perl
+A string which specifies the @code{perl} executable to use. Defaults
+to just @code{perl}.
+
+@end table
+
+
+@node Python distutils, Python/Twisted/trial projects, CPAN, Process-Specific build factories
+@subsubsection Python distutils
+
+@bfindex buildbot.process.factory.Distutils
+
+Most Python modules use the @code{distutils} package to provide
+configuration and build services. The standard build process looks
+like:
+
+@example
+% python ./setup.py build
+% python ./setup.py install
+@end example
+
+Unfortunately, although Python provides a standard unit-test framework
+named @code{unittest}, to the best of my knowledge @code{distutils}
+does not provide a standardized target to run such unit tests. (Please
+let me know if I'm wrong, and I will update this factory.)
+
+The @code{Distutils} factory provides support for running the build
+part of this process. It accepts the same @code{source=} parameter as
+the other build factories.
+
+
+Arguments:
+@table @code
+@item source
+(required): A step specification tuple, like that used by GNUAutoconf.
+
+@item python
+A string which specifies the @code{python} executable to use. Defaults
+to just @code{python}.
+
+@item test
+Provides a shell command which runs unit tests. This accepts either a
+string or a list. The default value is None, which disables the test
+step (since there is no common default command to run unit tests in
+distutils modules).
+
+@end table
+
+
+@node Python/Twisted/trial projects, , Python distutils, Process-Specific build factories
+@subsubsection Python/Twisted/trial projects
+
+@bfindex buildbot.process.factory.Trial
+@c TODO: document these steps better
+@bsindex buildbot.steps.python_twisted.HLint
+@bsindex buildbot.steps.python_twisted.Trial
+@bsindex buildbot.steps.python_twisted.ProcessDocs
+@bsindex buildbot.steps.python_twisted.BuildDebs
+@bsindex buildbot.steps.python_twisted.RemovePYCs
+
+Twisted provides a unit test tool named @code{trial} which provides a
+few improvements over Python's built-in @code{unittest} module. Many
+python projects which use Twisted for their networking or application
+services also use trial for their unit tests. These modules are
+usually built and tested with something like the following:
+
+@example
+% python ./setup.py build
+% PYTHONPATH=build/lib.linux-i686-2.3 trial -v PROJECTNAME.test
+% python ./setup.py install
+@end example
+
+Unfortunately, the @file{build/lib} directory into which the
+built/copied .py files are placed is actually architecture-dependent,
+and I do not yet know of a simple way to calculate its value. For many
+projects it is sufficient to import their libraries ``in place'' from
+the tree's base directory (@code{PYTHONPATH=.}).
+
+In addition, the @var{PROJECTNAME} value where the test files are
+located is project-dependent: it is usually just the project's
+top-level library directory, as common practice suggests the unit test
+files are put in the @code{test} sub-module. This value cannot be
+guessed, the @code{Trial} class must be told where to find the test
+files.
+
+The @code{Trial} class provides support for building and testing
+projects which use distutils and trial. If the test module name is
+specified, trial will be invoked. The library path used for testing
+can also be set.
+
+One advantage of trial is that the Buildbot happens to know how to
+parse trial output, letting it identify which tests passed and which
+ones failed. The Buildbot can then provide fine-grained reports about
+how many tests have failed, when individual tests fail when they had
+been passing previously, etc.
+
+Another feature of trial is that you can give it a series of source
+.py files, and it will search them for special @code{test-case-name}
+tags that indicate which test cases provide coverage for that file.
+Trial can then run just the appropriate tests. This is useful for
+quick builds, where you want to only run the test cases that cover the
+changed functionality.
+
+Arguments:
+@table @code
+@item source
+(required): A step specification tuple, like that used by GNUAutoconf.
+
+@item buildpython
+A list (argv array) of strings which specifies the @code{python}
+executable to use when building the package. Defaults to just
+@code{['python']}. It may be useful to add flags here, to supress
+warnings during compilation of extension modules. This list is
+extended with @code{['./setup.py', 'build']} and then executed in a
+ShellCommand.
+
+@item testpath
+Provides a directory to add to @code{PYTHONPATH} when running the unit
+tests, if tests are being run. Defaults to @code{.} to include the
+project files in-place. The generated build library is frequently
+architecture-dependent, but may simply be @file{build/lib} for
+pure-python modules.
+
+@item trialpython
+Another list of strings used to build the command that actually runs
+trial. This is prepended to the contents of the @code{trial} argument
+below. It may be useful to add @code{-W} flags here to supress
+warnings that occur while tests are being run. Defaults to an empty
+list, meaning @code{trial} will be run without an explicit
+interpreter, which is generally what you want if you're using
+@file{/usr/bin/trial} instead of, say, the @file{./bin/trial} that
+lives in the Twisted source tree.
+
+@item trial
+provides the name of the @code{trial} command. It is occasionally
+useful to use an alternate executable, such as @code{trial2.2} which
+might run the tests under an older version of Python. Defaults to
+@code{trial}.
+
+@item tests
+Provides a module name or names which contain the unit tests for this
+project. Accepts a string, typically @code{PROJECTNAME.test}, or a
+list of strings. Defaults to None, indicating that no tests should be
+run. You must either set this or @code{useTestCaseNames} to do anyting
+useful with the Trial factory.
+
+@item useTestCaseNames
+Tells the Step to provide the names of all changed .py files to trial,
+so it can look for test-case-name tags and run just the matching test
+cases. Suitable for use in quick builds. Defaults to False.
+
+@item randomly
+If @code{True}, tells Trial (with the @code{--random=0} argument) to
+run the test cases in random order, which sometimes catches subtle
+inter-test dependency bugs. Defaults to @code{False}.
+
+@item recurse
+If @code{True}, tells Trial (with the @code{--recurse} argument) to
+look in all subdirectories for additional test cases. It isn't clear
+to me how this works, but it may be useful to deal with the
+unknown-PROJECTNAME problem described above, and is currently used in
+the Twisted buildbot to accomodate the fact that test cases are now
+distributed through multiple twisted.SUBPROJECT.test directories.
+
+@end table
+
+Unless one of @code{trialModule} or @code{useTestCaseNames}
+are set, no tests will be run.
+
+Some quick examples follow. Most of these examples assume that the
+target python code (the ``code under test'') can be reached directly
+from the root of the target tree, rather than being in a @file{lib/}
+subdirectory.
+
+@example
+# Trial(source, tests="toplevel.test") does:
+# python ./setup.py build
+# PYTHONPATH=. trial -to toplevel.test
+
+# Trial(source, tests=["toplevel.test", "other.test"]) does:
+# python ./setup.py build
+# PYTHONPATH=. trial -to toplevel.test other.test
+
+# Trial(source, useTestCaseNames=True) does:
+# python ./setup.py build
+# PYTHONPATH=. trial -to --testmodule=foo/bar.py.. (from Changes)
+
+# Trial(source, buildpython=["python2.3", "-Wall"], tests="foo.tests"):
+# python2.3 -Wall ./setup.py build
+# PYTHONPATH=. trial -to foo.tests
+
+# Trial(source, trialpython="python2.3", trial="/usr/bin/trial",
+# tests="foo.tests") does:
+# python2.3 -Wall ./setup.py build
+# PYTHONPATH=. python2.3 /usr/bin/trial -to foo.tests
+
+# For running trial out of the tree being tested (only useful when the
+# tree being built is Twisted itself):
+# Trial(source, trialpython=["python2.3", "-Wall"], trial="./bin/trial",
+# tests="foo.tests") does:
+# python2.3 -Wall ./setup.py build
+# PYTHONPATH=. python2.3 -Wall ./bin/trial -to foo.tests
+@end example
+
+If the output directory of @code{./setup.py build} is known, you can
+pull the python code from the built location instead of the source
+directories. This should be able to handle variations in where the
+source comes from, as well as accomodating binary extension modules:
+
+@example
+# Trial(source,tests="toplevel.test",testpath='build/lib.linux-i686-2.3')
+# does:
+# python ./setup.py build
+# PYTHONPATH=build/lib.linux-i686-2.3 trial -to toplevel.test
+@end example
+
+
+@node Status Delivery, Command-line tool, Build Process, Top
+@chapter Status Delivery
+
+More details are available in the docstrings for each class, use a
+command like @code{pydoc buildbot.status.html.WebStatus} to see them.
+Most status delivery objects take a @code{categories=} argument, which
+can contain a list of ``category'' names: in this case, it will only
+show status for Builders that are in one of the named categories.
+
+(implementor's note: each of these objects should be a
+service.MultiService which will be attached to the BuildMaster object
+when the configuration is processed. They should use
+@code{self.parent.getStatus()} to get access to the top-level IStatus
+object, either inside @code{startService} or later. They may call
+@code{status.subscribe()} in @code{startService} to receive
+notifications of builder events, in which case they must define
+@code{builderAdded} and related methods. See the docstrings in
+@file{buildbot/interfaces.py} for full details.)
+
+@menu
+* WebStatus::
+* MailNotifier::
+* IRC Bot::
+* PBListener::
+* Writing New Status Plugins::
+@end menu
+
+@c @node Email Delivery, , Status Delivery, Status Delivery
+@c @subsection Email Delivery
+
+@c DOCUMENT THIS
+
+
+@node WebStatus, MailNotifier, Status Delivery, Status Delivery
+@section WebStatus
+
+@cindex WebStatus
+@stindex buildbot.status.web.baseweb.WebStatus
+
+The @code{buildbot.status.html.WebStatus} status target runs a small
+web server inside the buildmaster. You can point a browser at this web
+server and retrieve information about every build the buildbot knows
+about, as well as find out what the buildbot is currently working on.
+
+The first page you will see is the ``Welcome Page'', which contains
+links to all the other useful pages. This page is simply served from
+the @file{public_html/index.html} file in the buildmaster's base
+directory, where it is created by the @command{buildbot create-master}
+command along with the rest of the buildmaster.
+
+The most complex resource provided by @code{WebStatus} is the
+``Waterfall Display'', which shows a time-based chart of events. This
+somewhat-busy display provides detailed information about all steps of
+all recent builds, and provides hyperlinks to look at individual build
+logs and source changes. By simply reloading this page on a regular
+basis, you will see a complete description of everything the buildbot
+is currently working on.
+
+There are also pages with more specialized information. For example,
+there is a page which shows the last 20 builds performed by the
+buildbot, one line each. Each line is a link to detailed information
+about that build. By adding query arguments to the URL used to reach
+this page, you can narrow the display to builds that involved certain
+branches, or which ran on certain Builders. These pages are described
+in great detail below.
+
+
+When the buildmaster is created, a subdirectory named
+@file{public_html/} is created in its base directory. By default, @code{WebStatus}
+will serve files from this directory: for example, when a user points
+their browser at the buildbot's @code{WebStatus} URL, they will see
+the contents of the @file{public_html/index.html} file. Likewise,
+@file{public_html/robots.txt}, @file{public_html/buildbot.css}, and
+@file{public_html/favicon.ico} are all useful things to have in there.
+The first time a buildmaster is created, the @file{public_html}
+directory is populated with some sample files, which you will probably
+want to customize for your own project. These files are all static:
+the buildbot does not modify them in any way as it serves them to HTTP
+clients.
+
+@example
+from buildbot.status.html import WebStatus
+c['status'].append(WebStatus(8080))
+@end example
+
+Note that the initial robots.txt file has Disallow lines for all of
+the dynamically-generated buildbot pages, to discourage web spiders
+and search engines from consuming a lot of CPU time as they crawl
+through the entire history of your buildbot. If you are running the
+buildbot behind a reverse proxy, you'll probably need to put the
+robots.txt file somewhere else (at the top level of the parent web
+server), and replace the URL prefixes in it with more suitable values.
+
+If you would like to use an alternative root directory, add the
+@code{public_html=..} option to the @code{WebStatus} creation:
+
+@example
+c['status'].append(WebStatus(8080, public_html="/var/www/buildbot"))
+@end example
+
+In addition, if you are familiar with twisted.web @emph{Resource
+Trees}, you can write code to add additional pages at places inside
+this web space. Just use @code{webstatus.putChild} to place these
+resources.
+
+The following section describes the special URLs and the status views
+they provide.
+
+
+@menu
+* WebStatus Configuration Parameters::
+* Buildbot Web Resources::
+* XMLRPC server::
+* HTML Waterfall::
+@end menu
+
+@node WebStatus Configuration Parameters, Buildbot Web Resources, WebStatus, WebStatus
+@subsection WebStatus Configuration Parameters
+
+The most common way to run a @code{WebStatus} is on a regular TCP
+port. To do this, just pass in the TCP port number when you create the
+@code{WebStatus} instance; this is called the @code{http_port} argument:
+
+@example
+from buildbot.status.html import WebStatus
+c['status'].append(WebStatus(8080))
+@end example
+
+The @code{http_port} argument is actually a ``strports specification''
+for the port that the web server should listen on. This can be a
+simple port number, or a string like
+@code{tcp:8080:interface=127.0.0.1} (to limit connections to the
+loopback interface, and therefore to clients running on the same
+host)@footnote{It may even be possible to provide SSL access by using
+a specification like
+@code{"ssl:12345:privateKey=mykey.pen:certKey=cert.pem"}, but this is
+completely untested}.
+
+If instead (or in addition) you provide the @code{distrib_port}
+argument, a twisted.web distributed server will be started either on a
+TCP port (if @code{distrib_port} is like @code{"tcp:12345"}) or more
+likely on a UNIX socket (if @code{distrib_port} is like
+@code{"unix:/path/to/socket"}).
+
+The @code{distrib_port} option means that, on a host with a
+suitably-configured twisted-web server, you do not need to consume a
+separate TCP port for the buildmaster's status web page. When the web
+server is constructed with @code{mktap web --user}, URLs that point to
+@code{http://host/~username/} are dispatched to a sub-server that is
+listening on a UNIX socket at @code{~username/.twisted-web-pb}. On
+such a system, it is convenient to create a dedicated @code{buildbot}
+user, then set @code{distrib_port} to
+@code{"unix:"+os.path.expanduser("~/.twistd-web-pb")}. This
+configuration will make the HTML status page available at
+@code{http://host/~buildbot/} . Suitable URL remapping can make it
+appear at @code{http://host/buildbot/}, and the right virtual host
+setup can even place it at @code{http://buildbot.host/} .
+
+The other @code{WebStatus} argument is @code{allowForce}. If set to
+True, then the web page will provide a ``Force Build'' button that
+allows visitors to manually trigger builds. This is useful for
+developers to re-run builds that have failed because of intermittent
+problems in the test suite, or because of libraries that were not
+installed at the time of the previous build. You may not wish to allow
+strangers to cause a build to run: in that case, set this to False to
+remove these buttons. The default value is False.
+
+
+
+@node Buildbot Web Resources, XMLRPC server, WebStatus Configuration Parameters, WebStatus
+@subsection Buildbot Web Resources
+
+Certain URLs are ``magic'', and the pages they serve are created by
+code in various classes in the @file{buildbot.status.web} package
+instead of being read from disk. The most common way to access these
+pages is for the buildmaster admin to write or modify the
+@file{index.html} page to contain links to them. Of course other
+project web pages can contain links to these buildbot pages as well.
+
+Many pages can be modified by adding query arguments to the URL. For
+example, a page which shows the results of the most recent build
+normally does this for all builders at once. But by appending
+``?builder=i386'' to the end of the URL, the page will show only the
+results for the ``i386'' builder. When used in this way, you can add
+multiple ``builder='' arguments to see multiple builders. Remembering
+that URL query arguments are separated @emph{from each other} with
+ampersands, a URL that ends in ``?builder=i386&builder=ppc'' would
+show builds for just those two Builders.
+
+The @code{branch=} query argument can be used on some pages. This
+filters the information displayed by that page down to only the builds
+or changes which involved the given branch. Use @code{branch=trunk} to
+reference the trunk: if you aren't intentionally using branches,
+you're probably using trunk. Multiple @code{branch=} arguments can be
+used to examine multiple branches at once (so appending
+@code{?branch=foo&branch=bar} to the URL will show builds involving
+either branch). No @code{branch=} arguments means to show builds and
+changes for all branches.
+
+Some pages may include the Builder name or the build number in the
+main part of the URL itself. For example, a page that describes Build
+#7 of the ``i386'' builder would live at @file{/builders/i386/builds/7}.
+
+The table below lists all of the internal pages and the URLs that can
+be used to access them.
+
+NOTE: of the pages described here, @code{/slave_status_timeline} and
+@code{/last_build} have not yet been implemented, and @code{/xmlrpc}
+has only a few methods so far. Future releases will improve this.
+
+@table @code
+
+@item /waterfall
+
+This provides a chronologically-oriented display of the activity of
+all builders. It is the same display used by the Waterfall display.
+
+By adding one or more ``builder='' query arguments, the Waterfall is
+restricted to only showing information about the given Builders. By
+adding one or more ``branch='' query arguments, the display is
+restricted to showing information about the given branches. In
+addition, adding one or more ``category='' query arguments to the URL
+will limit the display to Builders that were defined with one of the
+given categories.
+
+A 'show_events=true' query argument causes the display to include
+non-Build events, like slaves attaching and detaching, as well as
+reconfiguration events. 'show_events=false' hides these events. The
+default is to show them.
+
+The @code{last_time=}, @code{first_time=}, and @code{show_time=}
+arguments will control what interval of time is displayed. The default
+is to show the latest events, but these can be used to look at earlier
+periods in history. The @code{num_events=} argument also provides a
+limit on the size of the displayed page.
+
+The Waterfall has references to resources many of the other portions
+of the URL space: @file{/builders} for access to individual builds,
+@file{/changes} for access to information about source code changes,
+etc.
+
+@item /rss
+
+This provides a rss feed summarizing all failed builds. The same
+query-arguments used by 'waterfall' can be added to filter the
+feed output.
+
+@item /atom
+
+This provides an atom feed summarizing all failed builds. The same
+query-arguments used by 'waterfall' can be added to filter the feed
+output.
+
+@item /builders/$BUILDERNAME
+
+This describes the given Builder, and provides buttons to force a build.
+
+@item /builders/$BUILDERNAME/builds/$BUILDNUM
+
+This describes a specific Build.
+
+@item /builders/$BUILDERNAME/builds/$BUILDNUM/steps/$STEPNAME
+
+This describes a specific BuildStep.
+
+@item /builders/$BUILDERNAME/builds/$BUILDNUM/steps/$STEPNAME/logs/$LOGNAME
+
+This provides an HTML representation of a specific logfile.
+
+@item /builders/$BUILDERNAME/builds/$BUILDNUM/steps/$STEPNAME/logs/$LOGNAME/text
+
+This returns the logfile as plain text, without any HTML coloring
+markup. It also removes the ``headers'', which are the lines that
+describe what command was run and what the environment variable
+settings were like. This maybe be useful for saving to disk and
+feeding to tools like 'grep'.
+
+@item /changes
+
+This provides a brief description of the ChangeSource in use
+(@pxref{Change Sources}).
+
+@item /changes/NN
+
+This shows detailed information about the numbered Change: who was the
+author, what files were changed, what revision number was represented,
+etc.
+
+@item /buildslaves
+
+This summarizes each BuildSlave, including which Builders are
+configured to use it, whether the buildslave is currently connected or
+not, and host information retrieved from the buildslave itself.
+
+@item /one_line_per_build
+
+This page shows one line of text for each build, merging information
+from all Builders@footnote{Apparently this is the same way
+http://buildd.debian.org displays build status}. Each line specifies
+the name of the Builder, the number of the Build, what revision it
+used, and a summary of the results. Successful builds are in green,
+while failing builds are in red. The date and time of the build are
+added to the right-hand edge of the line. The lines are ordered by
+build finish timestamp.
+
+One or more @code{builder=} or @code{branch=} arguments can be used to
+restrict the list. In addition, a @code{numbuilds=} argument will
+control how many lines are displayed (20 by default).
+
+@item /one_box_per_builder
+
+This page shows a small table, with one box for each Builder,
+containing the results of the most recent Build. It does not show the
+individual steps, or the current status. This is a simple summary of
+buildbot status: if this page is green, then all tests are passing.
+
+As with @code{/one_line_per_build}, this page will also honor
+@code{builder=} and @code{branch=} arguments.
+
+@item /about
+
+This page gives a brief summary of the Buildbot itself: software
+version, versions of some libraries that the Buildbot depends upon,
+etc. It also contains a link to the buildbot.net home page.
+
+@item /slave_status_timeline
+
+(note: this page has not yet been implemented)
+
+This provides a chronological display of configuration and operational
+events: master startup/shutdown, slave connect/disconnect, and
+config-file changes. When a config-file reload is abandoned because of
+an error in the config file, the error is displayed on this page.
+
+This page does not show any builds.
+
+@item /last_build/$BUILDERNAME/status.png
+
+This returns a PNG image that describes the results of the most recent
+build, which can be referenced in an IMG tag by other pages, perhaps
+from a completely different site. Use it as you would a webcounter.
+
+@end table
+
+There are also a set of web-status resources that are intended for use
+by other programs, rather than humans.
+
+@table @code
+
+@item /xmlrpc
+
+This runs an XML-RPC server which can be used to query status
+information about various builds. See @ref{XMLRPC server} for more
+details.
+
+@end table
+
+@node XMLRPC server, HTML Waterfall, Buildbot Web Resources, WebStatus
+@subsection XMLRPC server
+
+When using WebStatus, the buildbot runs an XML-RPC server at
+@file{/xmlrpc} that can be used by other programs to query build
+status. The following table lists the methods that can be invoked
+using this interface.
+
+@table @code
+@item getAllBuildsInInterval(start, stop)
+
+Return a list of builds that have completed after the 'start'
+timestamp and before the 'stop' timestamp. This looks at all Builders.
+
+The timestamps are integers, interpreted as standard unix timestamps
+(seconds since epoch).
+
+Each Build is returned as a tuple in the form: @code{(buildername,
+buildnumber, build_end, branchname, revision, results, text)}
+
+The buildnumber is an integer. 'build_end' is an integer (seconds
+since epoch) specifying when the build finished.
+
+The branchname is a string, which may be an empty string to indicate
+None (i.e. the default branch). The revision is a string whose meaning
+is specific to the VC system in use, and comes from the 'got_revision'
+build property. The results are expressed as a string, one of
+('success', 'warnings', 'failure', 'exception'). The text is a list of
+short strings that ought to be joined by spaces and include slightly
+more data about the results of the build.
+
+@item getBuild(builder_name, build_number)
+
+Return information about a specific build.
+
+This returns a dictionary (aka ``struct'' in XMLRPC terms) with
+complete information about the build. It does not include the contents
+of the log files, but it has just about everything else.
+
+@end table
+
+@node HTML Waterfall, , XMLRPC server, WebStatus
+@subsection HTML Waterfall
+
+@cindex Waterfall
+@stindex buildbot.status.html.Waterfall
+
+The @code{Waterfall} status target, deprecated as of 0.7.6, is a
+subset of the regular @code{WebStatus} resource (@pxref{WebStatus}).
+This section (and the @code{Waterfall} class itself) will be removed
+from a future release.
+
+@example
+from buildbot.status import html
+w = html.WebStatus(http_port=8080)
+c['status'].append(w)
+@end example
+
+
+
+@node MailNotifier, IRC Bot, WebStatus, Status Delivery
+@section MailNotifier
+
+@cindex email
+@cindex mail
+@stindex buildbot.status.mail.MailNotifier
+
+The buildbot can also send email when builds finish. The most common
+use of this is to tell developers when their change has caused the
+build to fail. It is also quite common to send a message to a mailing
+list (usually named ``builds'' or similar) about every build.
+
+The @code{MailNotifier} status target is used to accomplish this. You
+configure it by specifying who mail should be sent to, under what
+circumstances mail should be sent, and how to deliver the mail. It can
+be configured to only send out mail for certain builders, and only
+send messages when the build fails, or when the builder transitions
+from success to failure. It can also be configured to include various
+build logs in each message.
+
+
+By default, the message will be sent to the Interested Users list
+(@pxref{Doing Things With Users}), which includes all developers who
+made changes in the build. You can add additional recipients with the
+extraRecipients argument.
+
+Each MailNotifier sends mail to a single set of recipients. To send
+different kinds of mail to different recipients, use multiple
+MailNotifiers.
+
+The following simple example will send an email upon the completion of
+each build, to just those developers whose Changes were included in
+the build. The email contains a description of the Build, its results,
+and URLs where more information can be obtained.
+
+@example
+from buildbot.status.mail import MailNotifier
+mn = MailNotifier(fromaddr="buildbot@@example.org", lookup="example.org")
+c['status'].append(mn)
+@end example
+
+To get a simple one-message-per-build (say, for a mailing list), use
+the following form instead. This form does not send mail to individual
+developers (and thus does not need the @code{lookup=} argument,
+explained below), instead it only ever sends mail to the ``extra
+recipients'' named in the arguments:
+
+@example
+mn = MailNotifier(fromaddr="buildbot@@example.org",
+ sendToInterestedUsers=False,
+ extraRecipients=['listaddr@@example.org'])
+@end example
+
+In some cases it is desirable to have different information then what
+is provided in a standard MailNotifier message. For this purpose
+MailNotifier provides the argument customMesg (a function) which allows
+for the creation of messages with unique content.
+
+For example it can be useful to display the last few lines of a log file
+and recent changes when a builder fails:
+
+@example
+def message(attrs):
+ logLines = 10
+ text = list()
+ text.append("STATUS: %s" % attrs['result'].title())
+ text.append("")
+ text.extend([c.asText() for c in attrs['changes']])
+ text.append("")
+ name, url, lines = attrs['logs'][-1]
+ text.append("Last %d lines of '%s':" % (logLines, name))
+ text.extend(["\t%s\n" % line for line in lines[len(lines)-logLines:]])
+ text.append("")
+ text.append("-buildbot")
+ return ("\n".join(text), 'plain')
+
+mn = MailNotifier(fromaddr="buildbot@@example.org",
+ sendToInterestedUsers=False,
+ mode='problem',
+ extraRecipients=['listaddr@@example.org'],
+ customMesg=message)
+@end example
+
+A customMesg function takes a single dict argument (see below) and returns a
+tuple of strings. The first string is the complete text of the message and the
+second is the message type ('plain' or 'html'). The 'html' type should be used
+when generating an HTML message:
+
+@example
+def message(attrs):
+ logLines = 10
+ text = list()
+ text.append('<h4>Build status %s.</h4>' % (attrs['result'].title()))
+ if attrs['changes']:
+ text.append('<h4>Recent Changes:</h4>')
+ text.extend([c.asHTML() for c in attrs['changes']])
+ name, url, lines = attrs['logs'][-1]
+ text.append('<h4>Last %d lines of "%s":</h4>' % (logLines, name))
+ text.append('<p>')
+ text.append('<br>'.join([line for line in lines[len(lines)-logLines:]]))
+ text.append('</p>')
+ text.append('<br><br>')
+ text.append('Full log at: %s' % url)
+ text.append('<br><br>')
+ text.append('<b>-buildbot</b>')
+ return ('\n'.join(text), 'html')
+@end example
+
+@heading MailNotifier arguments
+
+@table @code
+@item fromaddr
+The email address to be used in the 'From' header.
+
+@item sendToInterestedUsers
+(boolean). If True (the default), send mail to all of the Interested
+Users. If False, only send mail to the extraRecipients list.
+
+@item extraRecipients
+(tuple of strings). A list of email addresses to which messages should
+be sent (in addition to the InterestedUsers list, which includes any
+developers who made Changes that went into this build). It is a good
+idea to create a small mailing list and deliver to that, then let
+subscribers come and go as they please.
+
+@item subject
+(string). A string to be used as the subject line of the message.
+@code{%(builder)s} will be replaced with the name of the builder which
+provoked the message.
+
+@item mode
+(string). Default to 'all'. One of:
+@table @code
+@item all
+Send mail about all builds, bothpassing and failing
+@item failing
+Only send mail about builds which fail
+@item problem
+Only send mail about a build which failed when the previous build has passed.
+If your builds usually pass, then this will only send mail when a problem
+occurs.
+@end table
+
+@item builders
+(list of strings). A list of builder names for which mail should be
+sent. Defaults to None (send mail for all builds). Use either builders
+or categories, but not both.
+
+@item categories
+(list of strings). A list of category names to serve status
+information for. Defaults to None (all categories). Use either
+builders or categories, but not both.
+
+@item addLogs
+(boolean). If True, include all build logs as attachments to the
+messages. These can be quite large. This can also be set to a list of
+log names, to send a subset of the logs. Defaults to False.
+
+@item relayhost
+(string). The host to which the outbound SMTP connection should be
+made. Defaults to 'localhost'
+
+@item lookup
+(implementor of @code{IEmailLookup}). Object which provides
+IEmailLookup, which is responsible for mapping User names (which come
+from the VC system) into valid email addresses. If not provided, the
+notifier will only be able to send mail to the addresses in the
+extraRecipients list. Most of the time you can use a simple Domain
+instance. As a shortcut, you can pass as string: this will be treated
+as if you had provided Domain(str). For example,
+lookup='twistedmatrix.com' will allow mail to be sent to all
+developers whose SVN usernames match their twistedmatrix.com account
+names. See buildbot/status/mail.py for more details.
+
+@item customMesg
+This is a optional function that can be used to generate a custom mail
+message. The customMesg function takes a single dict and must return a
+tuple containing the message text and type ('html' or 'plain'). Below is a list
+of availale keys in the dict passed to customMesg:
+
+@table @code
+@item builderName
+(str) Name of the builder that generated this event.
+@item projectName
+(str) Name of the project.
+@item mode
+(str) Mode set in MailNotifier. (failing, passing, problem).
+@item result
+(str) Builder result as a string. 'success', 'warnings', 'failure', 'skipped', or 'exception'
+@item buildURL
+(str) URL to build page.
+@item buildbotURL
+(str) URL to buildbot main page.
+@item buildText
+(str) Build text from build.getText().
+@item slavename
+(str) Slavename.
+@item reason
+(str) Build reason from build.getReason().
+@item responsibleUsers
+(List of str) List of responsible users.
+@item branch
+(str) Name of branch used. If no SourceStamp exists branch
+is an empty string.
+@item revision
+(str) Name of revision used. If no SourceStamp exists revision
+is an empty string.
+@item patch
+(str) Name of patch used. If no SourceStamp exists patch
+is an empty string.
+@item changes
+(list of objs) List of change objects from SourceStamp. A change
+object has the following useful information:
+@table @code
+@item who
+(str) who made this change
+@item revision
+(str) what VC revision is this change
+@item branch
+(str) on what branch did this change occur
+@item when
+(str) when did this change occur
+@item files
+(list of str) what files were affected in this change
+@item comments
+(str) comments reguarding the change.
+@end table
+The functions asText and asHTML return a list of strings with
+the above information formatted.
+@item logs
+(List of Tuples) List of tuples where each tuple contains the log name, log url,
+and log contents as a list of strings.
+@end table
+@end table
+
+@node IRC Bot, PBListener, MailNotifier, Status Delivery
+@section IRC Bot
+
+@cindex IRC
+@stindex buildbot.status.words.IRC
+
+
+The @code{buildbot.status.words.IRC} status target creates an IRC bot
+which will attach to certain channels and be available for status
+queries. It can also be asked to announce builds as they occur, or be
+told to shut up.
+
+@example
+from buildbot.status import words
+irc = words.IRC("irc.example.org", "botnickname",
+ channels=["channel1", "channel2"],
+ password="mysecretpassword",
+ notify_events=@{
+ 'exception': 1,
+ 'successToFailure': 1,
+ 'failureToSuccess': 1,
+ @})
+c['status'].append(irc)
+@end example
+
+Take a look at the docstring for @code{words.IRC} for more details on
+configuring this service. The @code{password} argument, if provided,
+will be sent to Nickserv to claim the nickname: some IRC servers will
+not allow clients to send private messages until they have logged in
+with a password.
+
+To use the service, you address messages at the buildbot, either
+normally (@code{botnickname: status}) or with private messages
+(@code{/msg botnickname status}). The buildbot will respond in kind.
+
+Some of the commands currently available:
+
+@table @code
+
+@item list builders
+Emit a list of all configured builders
+@item status BUILDER
+Announce the status of a specific Builder: what it is doing right now.
+@item status all
+Announce the status of all Builders
+@item watch BUILDER
+If the given Builder is currently running, wait until the Build is
+finished and then announce the results.
+@item last BUILDER
+Return the results of the last build to run on the given Builder.
+@item join CHANNEL
+Join the given IRC channel
+@item leave CHANNEL
+Leave the given IRC channel
+@item notify on|off|list EVENT
+Report events relating to builds. If the command is issued as a
+private message, then the report will be sent back as a private
+message to the user who issued the command. Otherwise, the report
+will be sent to the channel. Available events to be notified are:
+
+@table @code
+@item started
+A build has started
+@item finished
+A build has finished
+@item success
+A build finished successfully
+@item failed
+A build failed
+@item exception
+A build generated and exception
+@item xToY
+The previous build was x, but this one is Y, where x and Y are each
+one of success, warnings, failure, exception (except Y is
+capitalized). For example: successToFailure will notify if the
+previous build was successful, but this one failed
+@end table
+
+@item help COMMAND
+Describe a command. Use @code{help commands} to get a list of known
+commands.
+@item source
+Announce the URL of the Buildbot's home page.
+@item version
+Announce the version of this Buildbot.
+@end table
+
+Additionally, the config file may specify default notification options
+as shown in the example earlier.
+
+If the @code{allowForce=True} option was used, some addtional commands
+will be available:
+
+@table @code
+@item force build BUILDER REASON
+Tell the given Builder to start a build of the latest code. The user
+requesting the build and REASON are recorded in the Build status. The
+buildbot will announce the build's status when it finishes.
+
+@item stop build BUILDER REASON
+Terminate any running build in the given Builder. REASON will be added
+to the build status to explain why it was stopped. You might use this
+if you committed a bug, corrected it right away, and don't want to
+wait for the first build (which is destined to fail) to complete
+before starting the second (hopefully fixed) build.
+@end table
+
+@node PBListener, Writing New Status Plugins, IRC Bot, Status Delivery
+@section PBListener
+
+@cindex PBListener
+@stindex buildbot.status.client.PBListener
+
+
+@example
+import buildbot.status.client
+pbl = buildbot.status.client.PBListener(port=int, user=str,
+ passwd=str)
+c['status'].append(pbl)
+@end example
+
+This sets up a PB listener on the given TCP port, to which a PB-based
+status client can connect and retrieve status information.
+@code{buildbot statusgui} (@pxref{statusgui}) is an example of such a
+status client. The @code{port} argument can also be a strports
+specification string.
+
+@node Writing New Status Plugins, , PBListener, Status Delivery
+@section Writing New Status Plugins
+
+TODO: this needs a lot more examples
+
+Each status plugin is an object which provides the
+@code{twisted.application.service.IService} interface, which creates a
+tree of Services with the buildmaster at the top [not strictly true].
+The status plugins are all children of an object which implements
+@code{buildbot.interfaces.IStatus}, the main status object. From this
+object, the plugin can retrieve anything it wants about current and
+past builds. It can also subscribe to hear about new and upcoming
+builds.
+
+Status plugins which only react to human queries (like the Waterfall
+display) never need to subscribe to anything: they are idle until
+someone asks a question, then wake up and extract the information they
+need to answer it, then they go back to sleep. Plugins which need to
+act spontaneously when builds complete (like the MailNotifier plugin)
+need to subscribe to hear about new builds.
+
+If the status plugin needs to run network services (like the HTTP
+server used by the Waterfall plugin), they can be attached as Service
+children of the plugin itself, using the @code{IServiceCollection}
+interface.
+
+
+
+@node Command-line tool, Resources, Status Delivery, Top
+@chapter Command-line tool
+
+The @command{buildbot} command-line tool can be used to start or stop a
+buildmaster or buildbot, and to interact with a running buildmaster.
+Some of its subcommands are intended for buildmaster admins, while
+some are for developers who are editing the code that the buildbot is
+monitoring.
+
+@menu
+* Administrator Tools::
+* Developer Tools::
+* Other Tools::
+* .buildbot config directory::
+@end menu
+
+@node Administrator Tools, Developer Tools, Command-line tool, Command-line tool
+@section Administrator Tools
+
+The following @command{buildbot} sub-commands are intended for
+buildmaster administrators:
+
+@heading create-master
+
+This creates a new directory and populates it with files that allow it
+to be used as a buildmaster's base directory.
+
+@example
+buildbot create-master BASEDIR
+@end example
+
+@heading create-slave
+
+This creates a new directory and populates it with files that let it
+be used as a buildslave's base directory. You must provide several
+arguments, which are used to create the initial @file{buildbot.tac}
+file.
+
+@example
+buildbot create-slave @var{BASEDIR} @var{MASTERHOST}:@var{PORT} @var{SLAVENAME} @var{PASSWORD}
+@end example
+
+@heading start
+
+This starts a buildmaster or buildslave which was already created in
+the given base directory. The daemon is launched in the background,
+with events logged to a file named @file{twistd.log}.
+
+@example
+buildbot start BASEDIR
+@end example
+
+@heading stop
+
+This terminates the daemon (either buildmaster or buildslave) running
+in the given directory.
+
+@example
+buildbot stop BASEDIR
+@end example
+
+@heading sighup
+
+This sends a SIGHUP to the buildmaster running in the given directory,
+which causes it to re-read its @file{master.cfg} file.
+
+@example
+buildbot sighup BASEDIR
+@end example
+
+@node Developer Tools, Other Tools, Administrator Tools, Command-line tool
+@section Developer Tools
+
+These tools are provided for use by the developers who are working on
+the code that the buildbot is monitoring.
+
+@menu
+* statuslog::
+* statusgui::
+* try::
+@end menu
+
+@node statuslog, statusgui, Developer Tools, Developer Tools
+@subsection statuslog
+
+@example
+buildbot statuslog --master @var{MASTERHOST}:@var{PORT}
+@end example
+
+This command starts a simple text-based status client, one which just
+prints out a new line each time an event occurs on the buildmaster.
+
+The @option{--master} option provides the location of the
+@code{buildbot.status.client.PBListener} status port, used to deliver
+build information to realtime status clients. The option is always in
+the form of a string, with hostname and port number separated by a
+colon (@code{HOSTNAME:PORTNUM}). Note that this port is @emph{not} the
+same as the slaveport (although a future version may allow the same
+port number to be used for both purposes). If you get an error message
+to the effect of ``Failure: twisted.cred.error.UnauthorizedLogin:'',
+this may indicate that you are connecting to the slaveport rather than
+a @code{PBListener} port.
+
+The @option{--master} option can also be provided by the
+@code{masterstatus} name in @file{.buildbot/options} (@pxref{.buildbot
+config directory}).
+
+@node statusgui, try, statuslog, Developer Tools
+@subsection statusgui
+
+@cindex statusgui
+
+If you have set up a PBListener (@pxref{PBListener}), you will be able
+to monitor your Buildbot using a simple Gtk+ application invoked with
+the @code{buildbot statusgui} command:
+
+@example
+buildbot statusgui --master @var{MASTERHOST}:@var{PORT}
+@end example
+
+This command starts a simple Gtk+-based status client, which contains
+a few boxes for each Builder that change color as events occur. It
+uses the same @option{--master} argument as the @command{buildbot
+statuslog} command (@pxref{statuslog}).
+
+@node try, , statusgui, Developer Tools
+@subsection try
+
+This lets a developer to ask the question ``What would happen if I
+committed this patch right now?''. It runs the unit test suite (across
+multiple build platforms) on the developer's current code, allowing
+them to make sure they will not break the tree when they finally
+commit their changes.
+
+The @command{buildbot try} command is meant to be run from within a
+developer's local tree, and starts by figuring out the base revision
+of that tree (what revision was current the last time the tree was
+updated), and a patch that can be applied to that revision of the tree
+to make it match the developer's copy. This (revision, patch) pair is
+then sent to the buildmaster, which runs a build with that
+SourceStamp. If you want, the tool will emit status messages as the
+builds run, and will not terminate until the first failure has been
+detected (or the last success).
+
+There is an alternate form which accepts a pre-made patch file
+(typically the output of a command like 'svn diff'). This ``--diff''
+form does not require a local tree to run from. See @xref{try --diff}.
+
+For this command to work, several pieces must be in place:
+
+
+@heading TryScheduler
+
+@slindex buildbot.scheduler.Try_Jobdir
+@slindex buildbot.scheduler.Try_Userpass
+
+The buildmaster must have a @code{scheduler.Try} instance in
+the config file's @code{c['schedulers']} list. This lets the
+administrator control who may initiate these ``trial'' builds, which
+branches are eligible for trial builds, and which Builders should be
+used for them.
+
+The @code{TryScheduler} has various means to accept build requests:
+all of them enforce more security than the usual buildmaster ports do.
+Any source code being built can be used to compromise the buildslave
+accounts, but in general that code must be checked out from the VC
+repository first, so only people with commit privileges can get
+control of the buildslaves. The usual force-build control channels can
+waste buildslave time but do not allow arbitrary commands to be
+executed by people who don't have those commit privileges. However,
+the source code patch that is provided with the trial build does not
+have to go through the VC system first, so it is important to make
+sure these builds cannot be abused by a non-committer to acquire as
+much control over the buildslaves as a committer has. Ideally, only
+developers who have commit access to the VC repository would be able
+to start trial builds, but unfortunately the buildmaster does not, in
+general, have access to VC system's user list.
+
+As a result, the @code{TryScheduler} requires a bit more
+configuration. There are currently two ways to set this up:
+
+@table @strong
+@item jobdir (ssh)
+
+This approach creates a command queue directory, called the
+``jobdir'', in the buildmaster's working directory. The buildmaster
+admin sets the ownership and permissions of this directory to only
+grant write access to the desired set of developers, all of whom must
+have accounts on the machine. The @code{buildbot try} command creates
+a special file containing the source stamp information and drops it in
+the jobdir, just like a standard maildir. When the buildmaster notices
+the new file, it unpacks the information inside and starts the builds.
+
+The config file entries used by 'buildbot try' either specify a local
+queuedir (for which write and mv are used) or a remote one (using scp
+and ssh).
+
+The advantage of this scheme is that it is quite secure, the
+disadvantage is that it requires fiddling outside the buildmaster
+config (to set the permissions on the jobdir correctly). If the
+buildmaster machine happens to also house the VC repository, then it
+can be fairly easy to keep the VC userlist in sync with the
+trial-build userlist. If they are on different machines, this will be
+much more of a hassle. It may also involve granting developer accounts
+on a machine that would not otherwise require them.
+
+To implement this, the buildslave invokes 'ssh -l username host
+buildbot tryserver ARGS', passing the patch contents over stdin. The
+arguments must include the inlet directory and the revision
+information.
+
+@item user+password (PB)
+
+In this approach, each developer gets a username/password pair, which
+are all listed in the buildmaster's configuration file. When the
+developer runs @code{buildbot try}, their machine connects to the
+buildmaster via PB and authenticates themselves using that username
+and password, then sends a PB command to start the trial build.
+
+The advantage of this scheme is that the entire configuration is
+performed inside the buildmaster's config file. The disadvantages are
+that it is less secure (while the ``cred'' authentication system does
+not expose the password in plaintext over the wire, it does not offer
+most of the other security properties that SSH does). In addition, the
+buildmaster admin is responsible for maintaining the username/password
+list, adding and deleting entries as developers come and go.
+
+@end table
+
+
+For example, to set up the ``jobdir'' style of trial build, using a
+command queue directory of @file{MASTERDIR/jobdir} (and assuming that
+all your project developers were members of the @code{developers} unix
+group), you would first create that directory (with @command{mkdir
+MASTERDIR/jobdir MASTERDIR/jobdir/new MASTERDIR/jobdir/cur
+MASTERDIR/jobdir/tmp; chgrp developers MASTERDIR/jobdir
+MASTERDIR/jobdir/*; chmod g+rwx,o-rwx MASTERDIR/jobdir
+MASTERDIR/jobdir/*}), and then use the following scheduler in the
+buildmaster's config file:
+
+@example
+from buildbot.scheduler import Try_Jobdir
+s = Try_Jobdir("try1", ["full-linux", "full-netbsd", "full-OSX"],
+ jobdir="jobdir")
+c['schedulers'] = [s]
+@end example
+
+Note that you must create the jobdir before telling the buildmaster to
+use this configuration, otherwise you will get an error. Also remember
+that the buildmaster must be able to read and write to the jobdir as
+well. Be sure to watch the @file{twistd.log} file (@pxref{Logfiles})
+as you start using the jobdir, to make sure the buildmaster is happy
+with it.
+
+To use the username/password form of authentication, create a
+@code{Try_Userpass} instance instead. It takes the same
+@code{builderNames} argument as the @code{Try_Jobdir} form, but
+accepts an addtional @code{port} argument (to specify the TCP port to
+listen on) and a @code{userpass} list of username/password pairs to
+accept. Remember to use good passwords for this: the security of the
+buildslave accounts depends upon it:
+
+@example
+from buildbot.scheduler import Try_Userpass
+s = Try_Userpass("try2", ["full-linux", "full-netbsd", "full-OSX"],
+ port=8031, userpass=[("alice","pw1"), ("bob", "pw2")] )
+c['schedulers'] = [s]
+@end example
+
+Like most places in the buildbot, the @code{port} argument takes a
+strports specification. See @code{twisted.application.strports} for
+details.
+
+
+@heading locating the master
+
+The @command{try} command needs to be told how to connect to the
+@code{TryScheduler}, and must know which of the authentication
+approaches described above is in use by the buildmaster. You specify
+the approach by using @option{--connect=ssh} or @option{--connect=pb}
+(or @code{try_connect = 'ssh'} or @code{try_connect = 'pb'} in
+@file{.buildbot/options}).
+
+For the PB approach, the command must be given a @option{--master}
+argument (in the form HOST:PORT) that points to TCP port that you
+picked in the @code{Try_Userpass} scheduler. It also takes a
+@option{--username} and @option{--passwd} pair of arguments that match
+one of the entries in the buildmaster's @code{userpass} list. These
+arguments can also be provided as @code{try_master},
+@code{try_username}, and @code{try_password} entries in the
+@file{.buildbot/options} file.
+
+For the SSH approach, the command must be given @option{--tryhost},
+@option{--username}, and optionally @option{--password} (TODO:
+really?) to get to the buildmaster host. It must also be given
+@option{--trydir}, which points to the inlet directory configured
+above. The trydir can be relative to the user's home directory, but
+most of the time you will use an explicit path like
+@file{~buildbot/project/trydir}. These arguments can be provided in
+@file{.buildbot/options} as @code{try_host}, @code{try_username},
+@code{try_password}, and @code{try_dir}.
+
+In addition, the SSH approach needs to connect to a PBListener status
+port, so it can retrieve and report the results of the build (the PB
+approach uses the existing connection to retrieve status information,
+so this step is not necessary). This requires a @option{--master}
+argument, or a @code{masterstatus} entry in @file{.buildbot/options},
+in the form of a HOSTNAME:PORT string.
+
+
+@heading choosing the Builders
+
+A trial build is performed on multiple Builders at the same time, and
+the developer gets to choose which Builders are used (limited to a set
+selected by the buildmaster admin with the TryScheduler's
+@code{builderNames=} argument). The set you choose will depend upon
+what your goals are: if you are concerned about cross-platform
+compatibility, you should use multiple Builders, one from each
+platform of interest. You might use just one builder if that platform
+has libraries or other facilities that allow better test coverage than
+what you can accomplish on your own machine, or faster test runs.
+
+The set of Builders to use can be specified with multiple
+@option{--builder} arguments on the command line. It can also be
+specified with a single @code{try_builders} option in
+@file{.buildbot/options} that uses a list of strings to specify all
+the Builder names:
+
+@example
+try_builders = ["full-OSX", "full-win32", "full-linux"]
+@end example
+
+@heading specifying the VC system
+
+The @command{try} command also needs to know how to take the
+developer's current tree and extract the (revision, patch)
+source-stamp pair. Each VC system uses a different process, so you
+start by telling the @command{try} command which VC system you are
+using, with an argument like @option{--vc=cvs} or @option{--vc=tla}.
+This can also be provided as @code{try_vc} in
+@file{.buildbot/options}.
+
+The following names are recognized: @code{cvs} @code{svn} @code{baz}
+@code{tla} @code{hg} @code{darcs}
+
+
+@heading finding the top of the tree
+
+Some VC systems (notably CVS and SVN) track each directory
+more-or-less independently, which means the @command{try} command
+needs to move up to the top of the project tree before it will be able
+to construct a proper full-tree patch. To accomplish this, the
+@command{try} command will crawl up through the parent directories
+until it finds a marker file. The default name for this marker file is
+@file{.buildbot-top}, so when you are using CVS or SVN you should
+@code{touch .buildbot-top} from the top of your tree before running
+@command{buildbot try}. Alternatively, you can use a filename like
+@file{ChangeLog} or @file{README}, since many projects put one of
+these files in their top-most directory (and nowhere else). To set
+this filename, use @option{--try-topfile=ChangeLog}, or set it in the
+options file with @code{try_topfile = 'ChangeLog'}.
+
+You can also manually set the top of the tree with
+@option{--try-topdir=~/trees/mytree}, or @code{try_topdir =
+'~/trees/mytree'}. If you use @code{try_topdir}, in a
+@file{.buildbot/options} file, you will need a separate options file
+for each tree you use, so it may be more convenient to use the
+@code{try_topfile} approach instead.
+
+Other VC systems which work on full projects instead of individual
+directories (tla, baz, darcs, monotone, mercurial, git) do not require
+@command{try} to know the top directory, so the @option{--try-topfile}
+and @option{--try-topdir} arguments will be ignored.
+@c is this true? I think I currently require topdirs all the time.
+
+If the @command{try} command cannot find the top directory, it will
+abort with an error message.
+
+@heading determining the branch name
+
+Some VC systems record the branch information in a way that ``try''
+can locate it, in particular Arch (both @command{tla} and
+@command{baz}). For the others, if you are using something other than
+the default branch, you will have to tell the buildbot which branch
+your tree is using. You can do this with either the @option{--branch}
+argument, or a @option{try_branch} entry in the
+@file{.buildbot/options} file.
+
+@heading determining the revision and patch
+
+Each VC system has a separate approach for determining the tree's base
+revision and computing a patch.
+
+@table @code
+
+@item CVS
+
+@command{try} pretends that the tree is up to date. It converts the
+current time into a @code{-D} time specification, uses it as the base
+revision, and computes the diff between the upstream tree as of that
+point in time versus the current contents. This works, more or less,
+but requires that the local clock be in reasonably good sync with the
+repository.
+
+@item SVN
+@command{try} does a @code{svn status -u} to find the latest
+repository revision number (emitted on the last line in the ``Status
+against revision: NN'' message). It then performs an @code{svn diff
+-rNN} to find out how your tree differs from the repository version,
+and sends the resulting patch to the buildmaster. If your tree is not
+up to date, this will result in the ``try'' tree being created with
+the latest revision, then @emph{backwards} patches applied to bring it
+``back'' to the version you actually checked out (plus your actual
+code changes), but this will still result in the correct tree being
+used for the build.
+
+@item baz
+@command{try} does a @code{baz tree-id} to determine the
+fully-qualified version and patch identifier for the tree
+(ARCHIVE/VERSION--patch-NN), and uses the VERSION--patch-NN component
+as the base revision. It then does a @code{baz diff} to obtain the
+patch.
+
+@item tla
+@command{try} does a @code{tla tree-version} to get the
+fully-qualified version identifier (ARCHIVE/VERSION), then takes the
+first line of @code{tla logs --reverse} to figure out the base
+revision. Then it does @code{tla changes --diffs} to obtain the patch.
+
+@item Darcs
+@code{darcs changes --context} emits a text file that contains a list
+of all patches back to and including the last tag was made. This text
+file (plus the location of a repository that contains all these
+patches) is sufficient to re-create the tree. Therefore the contents
+of this ``context'' file @emph{are} the revision stamp for a
+Darcs-controlled source tree.
+
+So @command{try} does a @code{darcs changes --context} to determine
+what your tree's base revision is, and then does a @code{darcs diff
+-u} to compute the patch relative to that revision.
+
+@item Mercurial
+@code{hg identify} emits a short revision ID (basically a truncated
+SHA1 hash of the current revision's contents), which is used as the
+base revision. @code{hg diff} then provides the patch relative to that
+revision. For @command{try} to work, your working directory must only
+have patches that are available from the same remotely-available
+repository that the build process' @code{step.Mercurial} will use.
+
+@item Git
+@code{git branch -v} lists all the branches available in the local
+repository along with the revision ID it points to and a short summary
+of the last commit. The line containing the currently checked out
+branch begins with '* ' (star and space) while all the others start
+with ' ' (two spaces). @command{try} scans for this line and extracts
+the branch name and revision from it. Then it generates a diff against
+the base revision.
+@c TODO: I'm not sure if this actually works the way it's intended
+@c since the extracted base revision might not actually exist in the
+@c upstream repository. Perhaps we need to add a --remote option to
+@c specify the remote tracking branch to generate a diff against.
+
+@c TODO: monotone
+@end table
+
+@heading waiting for results
+
+If you provide the @option{--wait} option (or @code{try_wait = True}
+in @file{.buildbot/options}), the @command{buildbot try} command will
+wait until your changes have either been proven good or bad before
+exiting. Unless you use the @option{--quiet} option (or
+@code{try_quiet=True}), it will emit a progress message every 60
+seconds until the builds have completed.
+
+@menu
+* try --diff::
+@end menu
+
+@node try --diff, , try, try
+@subsubsection try --diff
+
+Sometimes you might have a patch from someone else that you want to
+submit to the buildbot. For example, a user may have created a patch
+to fix some specific bug and sent it to you by email. You've inspected
+the patch and suspect that it might do the job (and have at least
+confirmed that it doesn't do anything evil). Now you want to test it
+out.
+
+One approach would be to check out a new local tree, apply the patch,
+run your local tests, then use ``buildbot try'' to run the tests on
+other platforms. An alternate approach is to use the @command{buildbot
+try --diff} form to have the buildbot test the patch without using a
+local tree.
+
+This form takes a @option{--diff} argument which points to a file that
+contains the patch you want to apply. By default this patch will be
+applied to the TRUNK revision, but if you give the optional
+@option{--baserev} argument, a tree of the given revision will be used
+as a starting point instead of TRUNK.
+
+You can also use @command{buildbot try --diff=-} to read the patch
+from stdin.
+
+Each patch has a ``patchlevel'' associated with it. This indicates the
+number of slashes (and preceding pathnames) that should be stripped
+before applying the diff. This exactly corresponds to the @option{-p}
+or @option{--strip} argument to the @command{patch} utility. By
+default @command{buildbot try --diff} uses a patchlevel of 0, but you
+can override this with the @option{-p} argument.
+
+When you use @option{--diff}, you do not need to use any of the other
+options that relate to a local tree, specifically @option{--vc},
+@option{--try-topfile}, or @option{--try-topdir}. These options will
+be ignored. Of course you must still specify how to get to the
+buildmaster (with @option{--connect}, @option{--tryhost}, etc).
+
+
+@node Other Tools, .buildbot config directory, Developer Tools, Command-line tool
+@section Other Tools
+
+These tools are generally used by buildmaster administrators.
+
+@menu
+* sendchange::
+* debugclient::
+@end menu
+
+@node sendchange, debugclient, Other Tools, Other Tools
+@subsection sendchange
+
+This command is used to tell the buildmaster about source changes. It
+is intended to be used from within a commit script, installed on the
+VC server. It requires that you have a PBChangeSource
+(@pxref{PBChangeSource}) running in the buildmaster (by being set in
+@code{c['change_source']}).
+
+
+@example
+buildbot sendchange --master @var{MASTERHOST}:@var{PORT} --username @var{USER} @var{FILENAMES..}
+@end example
+
+There are other (optional) arguments which can influence the
+@code{Change} that gets submitted:
+
+@table @code
+@item --branch
+This provides the (string) branch specifier. If omitted, it defaults
+to None, indicating the ``default branch''. All files included in this
+Change must be on the same branch.
+
+@item --category
+This provides the (string) category specifier. If omitted, it defaults
+to None, indicating ``no category''. The category property is used
+by Schedulers to filter what changes they listen to.
+
+@item --revision_number
+This provides a (numeric) revision number for the change, used for VC systems
+that use numeric transaction numbers (like Subversion).
+
+@item --revision
+This provides a (string) revision specifier, for VC systems that use
+strings (Arch would use something like patch-42 etc).
+
+@item --revision_file
+This provides a filename which will be opened and the contents used as
+the revision specifier. This is specifically for Darcs, which uses the
+output of @command{darcs changes --context} as a revision specifier.
+This context file can be a couple of kilobytes long, spanning a couple
+lines per patch, and would be a hassle to pass as a command-line
+argument.
+
+@item --comments
+This provides the change comments as a single argument. You may want
+to use @option{--logfile} instead.
+
+@item --logfile
+This instructs the tool to read the change comments from the given
+file. If you use @code{-} as the filename, the tool will read the
+change comments from stdin.
+@end table
+
+
+@node debugclient, , sendchange, Other Tools
+@subsection debugclient
+
+@example
+buildbot debugclient --master @var{MASTERHOST}:@var{PORT} --passwd @var{DEBUGPW}
+@end example
+
+This launches a small Gtk+/Glade-based debug tool, connecting to the
+buildmaster's ``debug port''. This debug port shares the same port
+number as the slaveport (@pxref{Setting the slaveport}), but the
+@code{debugPort} is only enabled if you set a debug password in the
+buildmaster's config file (@pxref{Debug options}). The
+@option{--passwd} option must match the @code{c['debugPassword']}
+value.
+
+@option{--master} can also be provided in @file{.debug/options} by the
+@code{master} key. @option{--passwd} can be provided by the
+@code{debugPassword} key.
+
+The @code{Connect} button must be pressed before any of the other
+buttons will be active. This establishes the connection to the
+buildmaster. The other sections of the tool are as follows:
+
+@table @code
+@item Reload .cfg
+Forces the buildmaster to reload its @file{master.cfg} file. This is
+equivalent to sending a SIGHUP to the buildmaster, but can be done
+remotely through the debug port. Note that it is a good idea to be
+watching the buildmaster's @file{twistd.log} as you reload the config
+file, as any errors which are detected in the config file will be
+announced there.
+
+@item Rebuild .py
+(not yet implemented). The idea here is to use Twisted's ``rebuild''
+facilities to replace the buildmaster's running code with a new
+version. Even if this worked, it would only be used by buildbot
+developers.
+
+@item poke IRC
+This locates a @code{words.IRC} status target and causes it to emit a
+message on all the channels to which it is currently connected. This
+was used to debug a problem in which the buildmaster lost the
+connection to the IRC server and did not attempt to reconnect.
+
+@item Commit
+This allows you to inject a Change, just as if a real one had been
+delivered by whatever VC hook you are using. You can set the name of
+the committed file and the name of the user who is doing the commit.
+Optionally, you can also set a revision for the change. If the
+revision you provide looks like a number, it will be sent as an
+integer, otherwise it will be sent as a string.
+
+@item Force Build
+This lets you force a Builder (selected by name) to start a build of
+the current source tree.
+
+@item Currently
+(obsolete). This was used to manually set the status of the given
+Builder, but the status-assignment code was changed in an incompatible
+way and these buttons are no longer meaningful.
+
+@end table
+
+
+@node .buildbot config directory, , Other Tools, Command-line tool
+@section .buildbot config directory
+
+Many of the @command{buildbot} tools must be told how to contact the
+buildmaster that they interact with. This specification can be
+provided as a command-line argument, but most of the time it will be
+easier to set them in an ``options'' file. The @command{buildbot}
+command will look for a special directory named @file{.buildbot},
+starting from the current directory (where the command was run) and
+crawling upwards, eventually looking in the user's home directory. It
+will look for a file named @file{options} in this directory, and will
+evaluate it as a python script, looking for certain names to be set.
+You can just put simple @code{name = 'value'} pairs in this file to
+set the options.
+
+For a description of the names used in this file, please see the
+documentation for the individual @command{buildbot} sub-commands. The
+following is a brief sample of what this file's contents could be.
+
+@example
+# for status-reading tools
+masterstatus = 'buildbot.example.org:12345'
+# for 'sendchange' or the debug port
+master = 'buildbot.example.org:18990'
+debugPassword = 'eiv7Po'
+@end example
+
+@table @code
+@item masterstatus
+Location of the @code{client.PBListener} status port, used by
+@command{statuslog} and @command{statusgui}.
+
+@item master
+Location of the @code{debugPort} (for @command{debugclient}). Also the
+location of the @code{pb.PBChangeSource} (for @command{sendchange}).
+Usually shares the slaveport, but a future version may make it
+possible to have these listen on a separate port number.
+
+@item debugPassword
+Must match the value of @code{c['debugPassword']}, used to protect the
+debug port, for the @command{debugclient} command.
+
+@item username
+Provides a default username for the @command{sendchange} command.
+
+@end table
+
+
+The following options are used by the @code{buildbot try} command
+(@pxref{try}):
+
+@table @code
+@item try_connect
+This specifies how the ``try'' command should deliver its request to
+the buildmaster. The currently accepted values are ``ssh'' and ``pb''.
+@item try_builders
+Which builders should be used for the ``try'' build.
+@item try_vc
+This specifies the version control system being used.
+@item try_branch
+This indicates that the current tree is on a non-trunk branch.
+@item try_topdir
+@item try_topfile
+Use @code{try_topdir} to explicitly indicate the top of your working
+tree, or @code{try_topfile} to name a file that will only be found in
+that top-most directory.
+
+@item try_host
+@item try_username
+@item try_dir
+When try_connect is ``ssh'', the command will pay attention to
+@code{try_host}, @code{try_username}, and @code{try_dir}.
+
+@item try_username
+@item try_password
+@item try_master
+Instead, when @code{try_connect} is ``pb'', the command will pay
+attention to @code{try_username}, @code{try_password}, and
+@code{try_master}.
+
+@item try_wait
+@item masterstatus
+@code{try_wait} and @code{masterstatus} are used to ask the ``try''
+command to wait for the requested build to complete.
+
+@end table
+
+
+
+@node Resources, Developer's Appendix, Command-line tool, Top
+@chapter Resources
+
+The Buildbot's home page is at @uref{http://buildbot.sourceforge.net/}
+
+For configuration questions and general discussion, please use the
+@code{buildbot-devel} mailing list. The subscription instructions and
+archives are available at
+@uref{http://lists.sourceforge.net/lists/listinfo/buildbot-devel}
+
+@node Developer's Appendix, Index of Useful Classes, Resources, Top
+@unnumbered Developer's Appendix
+
+This appendix contains random notes about the implementation of the
+Buildbot, and is likely to only be of use to people intending to
+extend the Buildbot's internals.
+
+The buildmaster consists of a tree of Service objects, which is shaped
+as follows:
+
+@example
+BuildMaster
+ ChangeMaster (in .change_svc)
+ [IChangeSource instances]
+ [IScheduler instances] (in .schedulers)
+ BotMaster (in .botmaster)
+ [IBuildSlave instances]
+ [IStatusTarget instances] (in .statusTargets)
+@end example
+
+The BotMaster has a collection of Builder objects as values of its
+@code{.builders} dictionary.
+
+
+@node Index of Useful Classes, Index of master.cfg keys, Developer's Appendix, Top
+@unnumbered Index of Useful Classes
+
+This is a list of all user-visible classes. There are the ones that
+are useful in @file{master.cfg}, the buildmaster's configuration file.
+Classes that are not listed here are generally internal things that
+admins are unlikely to have much use for.
+
+
+@heading Change Sources
+@printindex cs
+
+@heading Schedulers and Locks
+@printindex sl
+
+@heading Build Factories
+@printindex bf
+
+@heading Build Steps
+@printindex bs
+
+@c undocumented steps
+@bsindex buildbot.steps.source.Git
+@bsindex buildbot.steps.maxq.MaxQ
+
+
+@heading Status Targets
+@printindex st
+
+@c TODO: undocumented targets
+
+@node Index of master.cfg keys, Index, Index of Useful Classes, Top
+@unnumbered Index of master.cfg keys
+
+This is a list of all of the significant keys in master.cfg . Recall
+that master.cfg is effectively a small python program with exactly one
+responsibility: create a dictionary named @code{BuildmasterConfig}.
+The keys of this dictionary are listed here. The beginning of the
+master.cfg file typically starts with something like:
+
+@example
+BuildmasterConfig = c = @{@}
+@end example
+
+Therefore a config key of @code{change_source} will usually appear in
+master.cfg as @code{c['change_source']}.
+
+@printindex bc
+
+
+@node Index, , Index of master.cfg keys, Top
+@unnumbered Index
+
+@printindex cp
+
+
+@bye
diff --git a/buildbot/docs/epyrun b/buildbot/docs/epyrun
new file mode 100644
index 0000000..db60b5a
--- /dev/null
+++ b/buildbot/docs/epyrun
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+
+import sys
+import os
+
+from twisted.python import reflect
+from twisted.internet import reactor
+
+# epydoc
+import epydoc
+assert epydoc.__version__[0] == '2', "You need epydoc 2.x!"
+from epydoc.cli import cli
+
+class FakeModule:
+
+ def __init__(self, name, level):
+ self.__level = level
+ self.__name__ = name
+
+ def __repr__(self):
+ return '<Fake %s>' % self.__name__
+ __str__ = __repr__
+
+ def __nonzero__(self):
+ return 1
+
+ def __call__(self, *args, **kw):
+ pass #print 'Called:', args
+
+ def __getattr__(self, attr):
+ if self.__level == 0:
+ raise AttributeError
+ return FakeModule(self.__name__+'.'+attr, self.__level-1)
+
+ def __cmp__(self, other):
+ if not hasattr(other, '___name__'):
+ return -1
+ return cmp(self.__name__, other.__name__)
+
+
+def fakeOut(modname):
+ modpath = modname.split('.')
+ prevmod = None
+ for m in range(len(modpath)):
+ mp = '.'.join(modpath[:m+1])
+ nm = FakeModule(mp, 4)
+ if prevmod:
+ setattr(prevmod, modpath[m], nm)
+ sys.modules[mp] = nm
+ prevmod = nm
+
+#fakeOut("twisted")
+
+# HACK: Another "only doc what we tell you". We don't want epydoc to
+# automatically recurse into subdirectories: "twisted"'s presence was
+# causing "twisted/test" to be docced, even thought we explicitly
+# didn't put any twisted/test in our modnames.
+
+from epydoc import imports
+orig_find_modules = imports.find_modules
+
+import re
+
+def find_modules(dirname):
+ if not os.path.isdir(dirname): return []
+ found_init = 0
+ modules = {}
+ dirs = []
+
+ # Search for directories & modules, and check for __init__.py.
+ # Don't include duplicates (like foo.py and foo.pyc), and give
+ # precedance to the .py files.
+ for file in os.listdir(dirname):
+ filepath = os.path.join(dirname, file)
+ if os.path.isdir(filepath): dirs.append(filepath)
+ elif not re.match(r'\w+.py.?', file):
+ continue # Ignore things like ".#foo.py" or "a-b.py"
+ elif file[-3:] == '.py':
+ modules[file] = os.path.join(dirname, file)
+ if file == '__init__.py': found_init = 1
+ elif file[-4:-1] == '.py':
+ modules.setdefault(file[:-1], file)
+ if file[:-1] == '__init__.py': found_init = 1
+ modules = modules.values()
+
+ # If there was no __init__.py, then this isn't a package
+ # directory; return nothing.
+ if not found_init: return []
+
+ # Recurse to the child directories.
+ # **twisted** here's the change: commented next line out
+ #for d in dirs: modules += find_modules(d)
+ return modules
+
+imports.find_modules = find_modules
+
+
+
+# Now, set up the list of modules for epydoc to document
+modnames = []
+def addMod(arg, path, files):
+ for fn in files:
+ file = os.path.join(path, fn).replace('%s__init__'%os.sep, '')
+ if file[-3:] == '.py' and not file.count('%stest%s' % (os.sep,os.sep)):
+ modName = file[:-3].replace(os.sep,'.')
+ try:
+ #print 'pre-loading', modName
+ reflect.namedModule(modName)
+ except ImportError, e:
+ print 'import error:', modName, e
+ except Exception, e:
+ print 'other error:', modName, e
+ else:
+ modnames.append(modName)
+
+document_all = True # are we doing a full build?
+names = ['buildbot/'] #default, may be overriden below
+
+#get list of modules/pkgs on cmd-line
+try:
+ i = sys.argv.index("--modules")
+except:
+ pass
+else:
+ names = sys.argv[i+1:]
+ document_all = False
+ sys.argv[i:] = []
+ #sanity check on names
+ for i in range(len(names)):
+ try:
+ j = names[i].rindex('buildbot/')
+ except:
+ raise SystemExit, 'You can only specify buildbot modules or packages'
+ else:
+ #strip off any leading directories before the 'twisted/'
+ #dir. this makes it easy to specify full paths, such as
+ #from TwistedEmacs
+ names[i] = names[i][j:]
+
+ old_out_dir = "html"
+ #if -o was specified, we need to change it to point to a tmp dir
+ #otherwise add our own -o option
+ try:
+ i = sys.argv.index('-o')
+ old_out_dir = sys.argv[i+1]
+ try:
+ os.mkdir(tmp_dir)
+ except OSError:
+ pass
+ sys.argv[i+1] = tmp_dir
+ except ValueError:
+ sys.argv[1:1] = ['-o', tmp_dir]
+
+osrv = sys.argv
+sys.argv=["IGNORE"]
+
+for name in names:
+ if name.endswith(".py"):
+ # turn it in to a python module name
+ name = name[:-3].replace(os.sep, ".")
+ try:
+ reflect.namedModule(name)
+ except ImportError:
+ print 'import error:', name
+ except:
+ print 'other error:', name
+ else:
+ modnames.append(name)
+ else: #assume it's a dir
+ os.path.walk(name, addMod, None)
+
+sys.argv = osrv
+
+if 'buildbot.test' in modnames:
+ modnames.remove('buildbot.test')
+##if 'twisted' in modnames:
+## modnames.remove('twisted')
+
+sys.argv.extend(modnames)
+
+import buildbot
+
+
+sys.argv[1:1] = [
+ '-n', 'BuildBot %s' % buildbot.version,
+ '-u', 'http://buildbot.sourceforge.net/', '--no-private']
+
+# Make it easy to profile epyrun
+if 0:
+ import profile
+ profile.run('cli()', 'epyrun.prof')
+else:
+ cli()
+
+print 'Done!'
diff --git a/buildbot/docs/examples/hello.cfg b/buildbot/docs/examples/hello.cfg
new file mode 100644
index 0000000..d6642a2
--- /dev/null
+++ b/buildbot/docs/examples/hello.cfg
@@ -0,0 +1,92 @@
+#! /usr/bin/python
+
+from buildbot import master
+from buildbot.buildslave import BuildSlave
+from buildbot.process import factory
+from buildbot.steps.source import CVS, SVN, Darcs, Arch
+from buildbot.steps.shell import Configure, Compile, Test
+from buildbot.status import html, client
+from buildbot.changes.pb import PBChangeSource
+
+BuildmasterConfig = c = {}
+
+c['slaves'] = [BuildSlave("bot1", "sekrit")]
+
+c['change_source'] = PBChangeSource(prefix="trunk")
+c['builders'] = []
+
+if True:
+ f = factory.BuildFactory()
+ f.addStep(CVS(cvsroot="/usr/home/warner/stuff/Projects/BuildBot/demo/Repository",
+ cvsmodule="hello",
+ mode="clobber",
+ checkoutDelay=6,
+ alwaysUseLatest=True,
+ ))
+ f.addStep(Configure())
+ f.addStep(Compile())
+ f.addStep(Test(command=["make", "check"]))
+ b1 = {"name": "cvs-hello",
+ "slavename": "bot1",
+ "builddir": "cvs-hello",
+ "factory": f,
+ }
+ c['builders'].append(b1)
+
+if True:
+ svnrep="file:///usr/home/warner/stuff/Projects/BuildBot/demo/SVN-Repository"
+ f = factory.BuildFactory()
+ f.addStep(SVN(svnurl=svnrep+"/hello", mode="update"))
+ f.addStep(Configure())
+ f.addStep(Compile()),
+ f.addStep(Test(command=["make", "check"]))
+ b1 = {"name": "svn-hello",
+ "slavename": "bot1",
+ "builddir": "svn-hello",
+ "factory": f,
+ }
+ c['builders'].append(b1)
+
+if True:
+ f = factory.BuildFactory()
+ f.addStep(Darcs(repourl="http://localhost/~warner/hello-darcs",
+ mode="copy"))
+ f.addStep(Configure(command=["/bin/sh", "./configure"]))
+ f.addStep(Compile())
+ f.addStep(Test(command=["make", "check"]))
+ b1 = {"name": "darcs-hello",
+ "slavename": "bot1",
+ "builddir": "darcs-hello",
+ "factory": f,
+ }
+ c['builders'].append(b1)
+
+if True:
+ f = factory.BuildFactory()
+ f.addStep(Arch(url="http://localhost/~warner/hello-arch",
+ version="gnu-hello--release--2.1.1",
+ mode="copy",
+ ))
+ f.addStep(Configure(command=["/bin/sh", "./configure"]))
+ f.addStep(Compile())
+ f.addStep(Test(command=["make", "check"]))
+ b1 = {"name": "arch-hello",
+ "slavename": "bot1",
+ "builddir": "arch-hello",
+ "factory": f,
+ }
+ c['builders'].append(b1)
+
+
+c['projectName'] = "Hello"
+c['projectURL'] = "http://www.hello.example.com"
+c['buildbotURL'] = "http://localhost:8080"
+
+c['slavePortnum'] = 8007
+c['debugPassword'] = "asdf"
+c['manhole'] = master.Manhole(9900, "username", "password")
+
+c['status'] = [html.WebStatus(http_port=8080),
+ client.PBListener(port=8008),
+ ]
+
diff --git a/buildbot/docs/examples/twisted_master.cfg b/buildbot/docs/examples/twisted_master.cfg
new file mode 100644
index 0000000..7185ef3
--- /dev/null
+++ b/buildbot/docs/examples/twisted_master.cfg
@@ -0,0 +1,329 @@
+#! /usr/bin/python
+
+# NOTE: this configuration file is from the buildbot-0.7.5 era or earlier. It
+# has not been brought up-to-date with the standards of buildbot-0.7.6 . For
+# examples of modern usage, please see hello.cfg, or the sample.cfg which is
+# installed when you run 'buildbot create-master'.
+
+# This configuration file is described in $BUILDBOT/docs/config.xhtml
+
+# This is used (with online=True) to run the Twisted Buildbot at
+# http://www.twistedmatrix.com/buildbot/ . Passwords and other secret
+# information are loaded from a neighboring file called 'private.py'.
+
+import sys
+sys.path.append('/home/buildbot/BuildBot/support-master')
+
+import os.path
+
+from buildbot.changes.pb import PBChangeSource
+from buildbot.scheduler import Scheduler, Try_Userpass
+from buildbot.steps.source import SVN
+from buildbot.process.factory import s
+from buildbot.process.process_twisted import \
+ QuickTwistedBuildFactory, \
+ FullTwistedBuildFactory, \
+ TwistedReactorsBuildFactory
+from buildbot.status import html, words, client, mail
+
+import extra_factory
+reload(extra_factory)
+from extra_factory import GoodTwistedBuildFactory
+
+import private # holds passwords
+reload(private) # make it possible to change the contents without a restart
+
+BuildmasterConfig = c = {}
+
+# I set really=False when testing this configuration at home
+really = True
+usePBChangeSource = True
+
+
+c['bots'] = []
+for bot in private.bot_passwords.keys():
+ c['bots'].append((bot, private.bot_passwords[bot]))
+
+c['sources'] = []
+
+# the Twisted buildbot currently uses the contrib/svn_buildbot.py script.
+# This makes a TCP connection to the ChangeMaster service to push Changes
+# into the build master. The script is invoked by
+# /svn/Twisted/hooks/post-commit, so it will only be run for things inside
+# the Twisted repository. However, the standard SVN practice is to put the
+# actual trunk in a subdirectory named "trunk/" (to leave room for
+# "branches/" and "tags/"). We want to only pay attention to the trunk, so
+# we use "trunk" as a prefix for the ChangeSource. This also strips off that
+# prefix, so that the Builders all see sensible pathnames (which means they
+# can do things like ignore the sandbox properly).
+
+source = PBChangeSource(prefix="trunk/")
+c['sources'].append(source)
+
+
+## configure the builders
+
+if 0:
+ # always build on trunk
+ svnurl = "svn://svn.twistedmatrix.com/svn/Twisted/trunk"
+ source_update = s(SVN, svnurl=svnurl, mode="update")
+ source_copy = s(SVN, svnurl=svnurl, mode="copy")
+ source_export = s(SVN, svnurl=svnurl, mode="export")
+else:
+ # for build-on-branch, we use these instead
+ baseURL = "svn://svn.twistedmatrix.com/svn/Twisted/"
+ defaultBranch = "trunk"
+ source_update = s(SVN, baseURL=baseURL, defaultBranch=defaultBranch,
+ mode="update")
+ source_copy = s(SVN, baseURL=baseURL, defaultBranch=defaultBranch,
+ mode="copy")
+ source_export = s(SVN, baseURL=baseURL, defaultBranch=defaultBranch,
+ mode="export")
+
+
+builders = []
+
+
+
+b24compile_opts = [
+ "-Wignore::PendingDeprecationWarning:distutils.command.build_py",
+ "-Wignore::PendingDeprecationWarning:distutils.command.build_ext",
+ ]
+
+
+b25compile_opts = b24compile_opts # FIXME
+
+
+b1 = {'name': "quick",
+ 'slavename': "bot1",
+ 'builddir': "quick",
+ 'factory': QuickTwistedBuildFactory(source_update,
+ python=["python2.3", "python2.4"]),
+ }
+builders.append(b1)
+
+b23compile_opts = [
+ "-Wignore::PendingDeprecationWarning:distutils.command.build_py",
+ "-Wignore::PendingDeprecationWarning:distutils.command.build_ext",
+ ]
+b23 = {'name': "debian-py2.3-select",
+ 'slavename': "bot-exarkun",
+ 'builddir': "full2.3",
+ 'factory': FullTwistedBuildFactory(source_copy,
+ python=["python2.3", "-Wall"],
+ # use -Werror soon
+ compileOpts=b23compile_opts,
+ processDocs=1,
+ runTestsRandomly=1),
+ }
+builders.append(b23)
+
+b24 = {'name': "debian-py2.4-select",
+ 'slavenames': ["bot-exarkun"],
+ 'builddir': "full2.4",
+ 'factory': FullTwistedBuildFactory(source_copy,
+ python=["python2.4", "-Wall"],
+ # use -Werror soon
+ compileOpts=b24compile_opts,
+ runTestsRandomly=1),
+ }
+builders.append(b24)
+
+b24debian64 = {
+ 'name': 'debian64-py2.4-select',
+ 'slavenames': ['bot-idnar-debian64'],
+ 'builddir': 'full2.4-debian64',
+ 'factory': FullTwistedBuildFactory(source_copy,
+ python=["python2.4", "-Wall"],
+ compileOpts=b24compile_opts),
+ }
+builders.append(b24debian64)
+
+b25debian = {
+ 'name': 'debian-py2.5-select',
+ 'slavenames': ['bot-idnar-debian'],
+ 'builddir': 'full2.5-debian',
+ 'factory': FullTwistedBuildFactory(source_copy,
+ python=["python2.5", "-Wall"],
+ compileOpts=b24compile_opts)}
+builders.append(b25debian)
+
+
+b25suse = {
+ 'name': 'suse-py2.5-select',
+ 'slavenames': ['bot-scmikes-2.5'],
+ 'builddir': 'bot-scmikes-2.5',
+ 'factory': FullTwistedBuildFactory(source_copy,
+ python=["python2.5", "-Wall"],
+ compileOpts=b24compile_opts),
+ }
+builders.append(b25suse)
+
+reactors = ['poll', 'epoll', 'gtk', 'gtk2']
+b4 = {'name': "debian-py2.4-reactors",
+ 'slavename': "bot2",
+ 'builddir': "reactors",
+ 'factory': TwistedReactorsBuildFactory(source_copy,
+ python="python2.4",
+ reactors=reactors),
+ }
+builders.append(b4)
+
+bosx24 = {
+ 'name': 'osx-py2.4-select',
+ 'slavenames': ['bot-exarkun-osx'],
+ 'builddir': 'full2.4-exarkun-osx',
+ 'factory': FullTwistedBuildFactory(source_copy,
+ python=["python2.4", "-Wall"],
+ compileOpts=b24compile_opts,
+ runTestsRandomly=1)}
+builders.append(bosx24)
+
+forcegc = {
+ 'name': 'osx-py2.4-select-gc',
+ 'slavenames': ['bot-exarkun-osx'],
+ 'builddir': 'full2.4-force-gc-exarkun-osx',
+ 'factory': GoodTwistedBuildFactory(source_copy,
+ python="python2.4")}
+builders.append(forcegc)
+
+
+# debuild is offline while we figure out how to build 2.0 .debs from SVN
+# b3 = {'name': "debuild",
+# 'slavename': "bot2",
+# 'builddir': "debuild",
+# 'factory': TwistedDebsBuildFactory(source_export,
+# python="python2.4"),
+# }
+# builders.append(b3)
+
+b24w32_scmikes_select = {
+ 'name': "win32-py2.4-select",
+ 'slavename': "bot-scmikes-win32",
+ 'builddir': "W32-full2.4-scmikes-select",
+ 'factory': TwistedReactorsBuildFactory(source_copy,
+ python="python",
+ compileOpts2=["-c","mingw32"],
+ reactors=["default"]),
+ }
+builders.append(b24w32_scmikes_select)
+
+b25w32_scmikes_select = {
+ 'name': "win32-py2.5-select",
+ 'slavename': "bot-scmikes-win32-2.5",
+ 'builddir': "W32-full2.5-scmikes-select",
+ 'factory': TwistedReactorsBuildFactory(source_copy,
+ python="python",
+ compileOpts2=["-c","mingw32"],
+ reactors=["default"]),
+ }
+builders.append(b25w32_scmikes_select)
+
+b24w32_win32er = {
+ 'name': "win32-py2.4-er",
+ 'slavename': "bot-win32-win32er",
+ 'builddir': "W32-full2.4-win32er",
+ 'factory': TwistedReactorsBuildFactory(source_copy,
+ python="python",
+ compileOpts2=["-c","mingw32"],
+ reactors=["win32"]),
+ }
+builders.append(b24w32_win32er)
+
+
+b24w32_iocp = {
+ 'name': "win32-py2.4-iocp",
+ 'slavename': "bot-win32-iocp",
+ 'builddir': "W32-full2.4-iocp",
+ 'factory': TwistedReactorsBuildFactory(source_copy,
+ python="python",
+ compileOpts2=[],
+ reactors=["iocp"]),
+ }
+builders.append(b24w32_iocp)
+
+
+b24freebsd = {'name': "freebsd-py2.4-select-kq",
+ 'slavename': "bot-landonf",
+ 'builddir': "freebsd-full2.4",
+ 'factory':
+ TwistedReactorsBuildFactory(source_copy,
+ python="python2.4",
+ reactors=["default",
+ "kqueue",
+ ]),
+ }
+builders.append(b24freebsd)
+
+
+osxtsr = {'name': "osx-py2.4-tsr",
+ 'slavename': "bot-exarkun-osx",
+ 'builddir': "osx-tsr",
+ 'factory': TwistedReactorsBuildFactory(
+ source_copy,
+ python="python2.4",
+ reactors=["tsr"])}
+builders.append(osxtsr)
+
+
+bpypyc = {'name': 'osx-pypyc-select',
+ 'slavename': 'bot-jerub-pypy',
+ 'builddir': 'pypy-c',
+ 'factory': TwistedReactorsBuildFactory(source_copy,
+ python="pypy-c",
+ reactors=["default"])}
+builders.append(bpypyc)
+
+c['builders'] = builders
+
+# now set up the schedulers. We do this after setting up c['builders'] so we
+# can auto-generate a list of all of them.
+all_builders = [b['name'] for b in c['builders']]
+all_builders.sort()
+all_builders.remove("quick")
+
+## configure the schedulers
+s_quick = Scheduler(name="quick", branch=None, treeStableTimer=30,
+ builderNames=["quick"])
+s_try = Try_Userpass("try", all_builders, port=9989,
+ userpass=private.try_users)
+
+s_all = []
+for i, builderName in enumerate(all_builders):
+ s_all.append(Scheduler(name="all-" + builderName,
+ branch=None, builderNames=[builderName],
+ treeStableTimer=(5 * 60 + i * 30)))
+c['schedulers'] = [s_quick, s_try] + s_all
+
+
+
+# configure other status things
+
+c['slavePortnum'] = 9987
+c['status'] = []
+if really:
+ p = os.path.expanduser("~/.twistd-web-pb")
+ c['status'].append(html.Waterfall(distrib_port=p))
+else:
+ c['status'].append(html.Waterfall(http_port=9988))
+if really:
+ c['status'].append(words.IRC(host="irc.freenode.net",
+ nick='buildbot',
+ channels=["twisted"]))
+
+c['debugPassword'] = private.debugPassword
+#c['interlocks'] = [("do-deb", ["full-2.2"], ["debuild"])]
+if hasattr(private, "manhole"):
+ from buildbot import manhole
+ c['manhole'] = manhole.PasswordManhole(*private.manhole)
+c['status'].append(client.PBListener(9936))
+m = mail.MailNotifier(fromaddr="buildbot@twistedmatrix.com",
+ builders=["quick", "debian-py2.3-select"],
+ sendToInterestedUsers=True,
+ extraRecipients=["warner@lothar.com"],
+ mode="problem",
+ )
+c['status'].append(m)
+c['projectName'] = "Twisted"
+c['projectURL'] = "http://twistedmatrix.com/"
+c['buildbotURL'] = "http://twistedmatrix.com/buildbot/"
diff --git a/buildbot/docs/gen-reference b/buildbot/docs/gen-reference
new file mode 100644
index 0000000..1094c16
--- /dev/null
+++ b/buildbot/docs/gen-reference
@@ -0,0 +1 @@
+cd .. && python docs/epyrun -o docs/reference
diff --git a/buildbot/docs/hexnut32.png b/buildbot/docs/hexnut32.png
new file mode 100644
index 0000000..c07d4dc
--- /dev/null
+++ b/buildbot/docs/hexnut32.png
Binary files differ
diff --git a/buildbot/docs/hexnut48.png b/buildbot/docs/hexnut48.png
new file mode 100644
index 0000000..1c79c38
--- /dev/null
+++ b/buildbot/docs/hexnut48.png
Binary files differ
diff --git a/buildbot/docs/hexnut64.png b/buildbot/docs/hexnut64.png
new file mode 100644
index 0000000..101eb55
--- /dev/null
+++ b/buildbot/docs/hexnut64.png
Binary files differ
diff --git a/buildbot/docs/images/master.png b/buildbot/docs/images/master.png
new file mode 100644
index 0000000..a4167da
--- /dev/null
+++ b/buildbot/docs/images/master.png
Binary files differ
diff --git a/buildbot/docs/images/master.svg b/buildbot/docs/images/master.svg
new file mode 100644
index 0000000..40a2698
--- /dev/null
+++ b/buildbot/docs/images/master.svg
@@ -0,0 +1,508 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Sodipodi ("http://www.sodipodi.com/") -->
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://web.resource.org/cc/"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg101"
+ sodipodi:version="0.32"
+ width="500"
+ height="300"
+ sodipodi:docname="master.svg"
+ inkscape:version="0.44"
+ sodipodi:docbase="/usr/home/warner/stuff/Projects/BuildBot/trees/docs/docs/images"
+ version="1.0">
+ <metadata
+ id="metadata71">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs103">
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Send"
+ style="overflow:visible;">
+ <path
+ id="path3088"
+ style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(0.3) rotate(180) translate(-2.3,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Mend"
+ style="overflow:visible;">
+ <path
+ id="path3094"
+ style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(0.6) rotate(180) translate(0,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Mend"
+ style="overflow:visible;">
+ <path
+ id="path3112"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none;"
+ transform="scale(0.4) rotate(180) translate(10,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ showgrid="true"
+ snaptoguides="false"
+ width="500px"
+ height="300px"
+ inkscape:zoom="1"
+ inkscape:cx="250"
+ inkscape:cy="150"
+ inkscape:window-width="930"
+ inkscape:window-height="578"
+ inkscape:window-x="45"
+ inkscape:window-y="111"
+ inkscape:current-layer="svg101" />
+ <use
+ x="0"
+ y="0"
+ xlink:href="#g2850"
+ id="use3128"
+ transform="translate(-4.078128,9.375008)"
+ width="500"
+ height="300" />
+ <g
+ id="g2850"
+ transform="translate(7.8125,-84.24993)">
+ <rect
+ rx="18.750004"
+ ry="18.749989"
+ y="103.12496"
+ x="8.5937519"
+ height="40.624969"
+ width="72.656258"
+ id="rect1954"
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:0.99999976;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text2844"
+ y="119.53125"
+ x="45.084755"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="119.53125"
+ x="45.084755"
+ id="tspan2846"
+ sodipodi:role="line">Change</tspan><tspan
+ id="tspan2848"
+ y="135.53125"
+ x="45.084755"
+ sodipodi:role="line">Source</tspan></text>
+ </g>
+ <g
+ id="g2872"
+ transform="translate(8.437492,3.90625)">
+ <rect
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect2858"
+ width="72.656258"
+ height="29.624987"
+ x="99.40625"
+ y="83.875061"
+ ry="18.75"
+ rx="18.750004" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="136.22226"
+ y="102.82355"
+ id="text2860"
+ sodipodi:linespacing="100%"><tspan
+ y="102.82355"
+ x="136.22226"
+ id="tspan2870"
+ sodipodi:role="line">Scheduler</tspan></text>
+ </g>
+ <use
+ x="0"
+ y="0"
+ xlink:href="#g2872"
+ id="use2885"
+ transform="translate(89,0)"
+ width="500"
+ height="300" />
+ <g
+ id="g2900"
+ transform="translate(156.5076,127.5182)">
+ <rect
+ rx="18.750013"
+ ry="18.75"
+ y="140.98706"
+ x="72.414238"
+ height="29.624987"
+ width="62.656288"
+ id="rect2889"
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:1.00000024;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text2891"
+ y="160.82355"
+ x="103.22226"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="160.82355"
+ x="103.22226"
+ id="tspan2898"
+ sodipodi:role="line">Builder</tspan></text>
+ </g>
+ <g
+ transform="translate(19.00767,126.1275)"
+ id="use2905">
+ <rect
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:1.00000024;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3305"
+ width="62.656288"
+ height="29.624987"
+ x="72.414238"
+ y="140.98706"
+ ry="18.75"
+ rx="18.750013" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="103.22226"
+ y="160.82355"
+ id="text3307"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3309"
+ x="103.22226"
+ y="160.82355">Builder</tspan></text>
+ </g>
+ <g
+ transform="translate(-52.99233,120.8125)"
+ id="use2907">
+ <rect
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:1.00000024;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3297"
+ width="62.656288"
+ height="29.624987"
+ x="72.414238"
+ y="140.98706"
+ ry="18.75"
+ rx="18.750013" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="103.22226"
+ y="160.82355"
+ id="text3299"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3301"
+ x="103.22226"
+ y="160.82355">Builder</tspan></text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ d="M 90.831754,40.831754 C 132.28357,41.383096 185.66074,32.793444 189.52962,61.893365 C 189.52962,61.893365 219.88078,68.249407 235.0585,86.259627"
+ id="path3181"
+ sodipodi:nodetypes="ccc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ d="M 188.99882,62.52888 C 188.99882,62.52888 160.5391,72.881369 145.78125,87.384479"
+ id="path3189"
+ sodipodi:nodetypes="cc" />
+ <g
+ id="g3213"
+ transform="translate(145.4062,29.875)">
+ <path
+ id="path3195"
+ d="M 83.59375,166.40625 L 83.59375,216.40625 L 146.09375,216.40625 L 146.09375,166.40625"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ id="path3205"
+ d="M 87.329101,203.10058 L 142.3584,203.10058"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 87.329099,207.28759 L 142.3584,207.28759"
+ id="path3207" />
+ <path
+ id="path3209"
+ d="M 87.329099,211.4746 L 142.3584,211.4746"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ id="path3211"
+ d="M 114.84375,181.56738 L 114.84375,201.10677"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 155,117 C 155,117 243,143.78851 243,203"
+ id="path3244"
+ sodipodi:nodetypes="cc" />
+ <path
+ sodipodi:nodetypes="cc"
+ id="path3246"
+ d="M 127,118 C 127,118 52,133 53,198"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <g
+ id="use3222"
+ transform="matrix(0.585953,0,0,0.585953,-15.943,58.2413)">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.36707859pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path3236"
+ sodipodi:cx="111.71875"
+ sodipodi:cy="217.1875"
+ sodipodi:rx="27.34375"
+ sodipodi:ry="28.125"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:start="2.2083886"
+ sodipodi:end="8.4574167"
+ sodipodi:open="true"
+ transform="matrix(0.747745,0,0,0.715583,54.81875,14.55241)" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="138.20795"
+ y="166.51833"
+ id="text3238"
+ sodipodi:linespacing="100%"><tspan
+ y="166.51833"
+ x="138.20795"
+ id="tspan3240"
+ sodipodi:role="line">build</tspan><tspan
+ y="178.51833"
+ x="138.20795"
+ id="tspan3242"
+ sodipodi:role="line">request</tspan></text>
+ </g>
+ <g
+ id="g3265"
+ transform="translate(-68,2)">
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 176.72446,196.26159 L 176.72446,244.30079 L 204.77565,244.30079 L 204.77565,196.26159"
+ id="path3250" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 178.40096,231.51692 L 203.09915,231.51692"
+ id="path3252" />
+ <path
+ id="path3254"
+ d="M 178.40096,235.53973 L 203.09915,235.53973"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 178.40096,239.56254 L 203.09915,239.56254"
+ id="path3256" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 190.75006,210.82816 L 190.75006,229.6013"
+ id="path3258" />
+ </g>
+ <g
+ transform="translate(-140,-2)"
+ id="g3272">
+ <path
+ id="path3274"
+ d="M 176.72446,196.26159 L 176.72446,244.30079 L 204.77565,244.30079 L 204.77565,196.26159"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ id="path3276"
+ d="M 178.40096,231.51692 L 203.09915,231.51692"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 178.40096,235.53973 L 203.09915,235.53973"
+ id="path3278" />
+ <path
+ id="path3280"
+ d="M 178.40096,239.56254 L 203.09915,239.56254"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ id="path3282"
+ d="M 190.75006,210.82816 L 190.75006,229.6013"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 229,116 C 229,116 271,136 272,201"
+ id="path3284"
+ sodipodi:nodetypes="cc" />
+ <g
+ id="g3151"
+ transform="translate(129.5745,-12.6876)">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.36707859pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path3136"
+ sodipodi:cx="111.71875"
+ sodipodi:cy="217.1875"
+ sodipodi:rx="27.34375"
+ sodipodi:ry="28.125"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:start="2.2083886"
+ sodipodi:end="8.4574167"
+ sodipodi:open="true"
+ transform="matrix(0.747745,0,0,0.715583,54.81875,14.55241)" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="138.20795"
+ y="166.51833"
+ id="text3138"
+ sodipodi:linespacing="100%"><tspan
+ y="166.51833"
+ x="138.20795"
+ id="tspan3147"
+ sodipodi:role="line">build</tspan><tspan
+ y="178.51833"
+ x="138.20795"
+ id="tspan3149"
+ sodipodi:role="line">request</tspan></text>
+ </g>
+ <g
+ transform="matrix(0.585953,0,0,0.585953,149.5842,69.56477)"
+ id="use3220">
+ <path
+ transform="matrix(0.747745,0,0,0.715583,54.81875,14.55241)"
+ sodipodi:open="true"
+ sodipodi:end="8.4574167"
+ sodipodi:start="2.2083886"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:ry="28.125"
+ sodipodi:rx="27.34375"
+ sodipodi:cy="217.1875"
+ sodipodi:cx="111.71875"
+ id="path3226"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.36707859pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text3228"
+ y="166.51833"
+ x="138.20795"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ sodipodi:role="line"
+ id="tspan3230"
+ x="138.20795"
+ y="166.51833">build</tspan><tspan
+ sodipodi:role="line"
+ id="tspan3232"
+ x="138.20795"
+ y="178.51833">request</tspan></text>
+ </g>
+ <path
+ sodipodi:nodetypes="cc"
+ id="path3286"
+ d="M 211,116 C 211,116 131,135 130,200"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 143,119 C 143,119 117,136 118,201"
+ id="path3288"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 50.750055,243.89323 L 50.750055,260.03906"
+ id="path3311"
+ sodipodi:nodetypes="cc" />
+ <path
+ sodipodi:nodetypes="cc"
+ id="path3313"
+ d="M 122.75006,248.33334 L 122.75006,264.47917"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 260.24995,249.14063 L 260.24995,265.28646"
+ id="path3315"
+ sodipodi:nodetypes="cc" />
+ <g
+ id="g3332">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.49475012pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path3161"
+ sodipodi:cx="111.71875"
+ sodipodi:cy="217.1875"
+ sodipodi:rx="27.34375"
+ sodipodi:ry="28.125"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:start="2.2083886"
+ sodipodi:end="8.4574167"
+ sodipodi:open="true"
+ transform="matrix(0.805479,0,0,0.555659,95.75979,-85.11798)" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text3328"
+ y="39.175049"
+ x="167.54092"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="39.175049"
+ x="167.54092"
+ id="tspan3330"
+ sodipodi:role="line">Change</tspan></text>
+ </g>
+ <g
+ id="g3345"
+ transform="matrix(0.533875,0,0,0.533875,41.79461,9.441594)">
+ <path
+ transform="matrix(0.805479,0,0,0.555659,95.75979,-85.11798)"
+ sodipodi:open="true"
+ sodipodi:end="8.4574167"
+ sodipodi:start="2.2083886"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:ry="28.125"
+ sodipodi:rx="27.34375"
+ sodipodi:cy="217.1875"
+ sodipodi:cx="111.71875"
+ id="path3347"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:2.7998148pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="167.54092"
+ y="39.175049"
+ id="text3349"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3351"
+ x="167.54092"
+ y="39.175049">Change</tspan></text>
+ </g>
+ <use
+ x="0"
+ y="0"
+ xlink:href="#g3345"
+ id="use3353"
+ transform="translate(-31.35071,0)"
+ width="500"
+ height="300" />
+</svg>
diff --git a/buildbot/docs/images/master.txt b/buildbot/docs/images/master.txt
new file mode 100644
index 0000000..a8034f4
--- /dev/null
+++ b/buildbot/docs/images/master.txt
@@ -0,0 +1,34 @@
+
+
+ +---------------+
+ | Change Source |----->----+
+ +---------------+ |
+ Changes
+ |
+ +---------------+ v
+ | Change Source |----->----+
+ +---------------+ v
+ +-----+-------+
+ | |
+ v v
+ +-----------+ +-----------+
+ | Scheduler | | Scheduler |
+ +-----------+ +-----------+
+ | | |
+ +------+---------+ +---+ +-----+
+ | | | |
+ v | | Build
+ : : : v v : Request
+ : : : : |
+ : ---- : : : |
+ : ---- : : ---- : |
+ +======+ +======+ : v :
+ | | : :
+ v v : :
+ +---------+ +---------+ :queue :
+ | Builder | | Builder | +======+
+ +---------+ +---------+ |
+ v
+ +---------+
+ | Builder |
+ +---------+
diff --git a/buildbot/docs/images/overview.png b/buildbot/docs/images/overview.png
new file mode 100644
index 0000000..86189eb
--- /dev/null
+++ b/buildbot/docs/images/overview.png
Binary files differ
diff --git a/buildbot/docs/images/overview.svg b/buildbot/docs/images/overview.svg
new file mode 100644
index 0000000..bcd03ec
--- /dev/null
+++ b/buildbot/docs/images/overview.svg
@@ -0,0 +1,396 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Sodipodi ("http://www.sodipodi.com/") -->
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://web.resource.org/cc/"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg101"
+ sodipodi:version="0.32"
+ width="500"
+ height="300"
+ sodipodi:docname="overview.svg"
+ inkscape:version="0.44"
+ sodipodi:docbase="/usr/home/warner/stuff/Projects/BuildBot/trees/docs/docs/images"
+ version="1.0">
+ <metadata
+ id="metadata71">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs103" />
+ <sodipodi:namedview
+ id="base"
+ showgrid="true"
+ snaptoguides="false"
+ width="500px"
+ height="300px"
+ inkscape:zoom="1"
+ inkscape:cx="250"
+ inkscape:cy="149.027"
+ inkscape:window-width="853"
+ inkscape:window-height="578"
+ inkscape:window-x="393"
+ inkscape:window-y="177"
+ inkscape:current-layer="svg101" />
+ <path
+ sodipodi:type="arc"
+ style="font-size:12px;fill:#e6e6e6;fill-rule:evenodd;stroke:black;stroke-width:1.32591999"
+ id="path134"
+ d="M 334.57774 117.91791 A 70.437424 62.398376 0 1 1 193.7029,117.91791 A 70.437424 62.398376 0 1 1 334.57774 117.91791 z"
+ sodipodi:cx="264.14032"
+ sodipodi:cy="117.91791"
+ sodipodi:rx="70.437424"
+ sodipodi:ry="62.398376"
+ transform="matrix(0.866355,0,0,0.977968,-21.56583,-35.43663)" />
+ <text
+ style="font-size:16;font-weight:normal;stroke-width:1pt;font-family:Nimbus Roman No9 L;font-style:normal;font-stretch:normal;font-variant:normal;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%"
+ x="167.34743"
+ y="51.167366"
+ id="text109"
+ sodipodi:linespacing="125%">
+ <tspan
+ x="167.34743"
+ y="51.167366"
+ sodipodi:role="line"
+ id="tspan112">BuildMaster</tspan>
+ </text>
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:1.62730002"
+ d="M 130.94278,218.01587 L 171.40853,129.3176"
+ id="path155"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:1.62730002"
+ d="M 215.17964,208.5925 L 210.28277,141.01854"
+ id="path124" />
+ <g
+ id="g2215">
+ <path
+ sodipodi:type="arc"
+ style="font-size:12px;fill:#e6e6e6;fill-rule:evenodd;stroke:black;stroke-width:1.49836004"
+ id="path105"
+ d="M 179.3815 607.44885 A 73.16877 76.709229 0 1 1 33.043961,607.44885 A 73.16877 76.709229 0 1 1 179.3815 607.44885 z"
+ sodipodi:cx="106.21273"
+ sodipodi:cy="607.44885"
+ sodipodi:rx="73.16877"
+ sodipodi:ry="76.709229"
+ transform="matrix(0.556009,0,0,0.530347,57.1479,-66.29883)" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:16;font-style:normal;font-weight:normal;writing-mode:lr;text-anchor:start;stroke-width:1pt;font-family:Nimbus Roman No9 L;font-stretch:normal;font-variant:normal;text-align:start;line-height:125%"
+ x="98.274147"
+ y="233.58066"
+ id="text114">
+ <tspan
+ x="98.274147"
+ y="233.58066"
+ sodipodi:role="line"
+ id="tspan130">Build</tspan>
+ <tspan
+ x="98.274147"
+ y="253.58066"
+ sodipodi:role="line"
+ id="tspan132">Slave</tspan>
+ </text>
+ </g>
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:1.22047496"
+ d="M 259.53398,110.94929 L 306.46076,195.22325"
+ id="path183"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:1.22047496"
+ d="M 305.31741,185.05459 L 306.46173,195.22261"
+ id="path185"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:1.22047496"
+ d="M 255.81397,116.36431 L 303.00828,198.44011"
+ id="path187"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:1.22047496"
+ d="M 257.98167,128.59521 L 255.8141,116.36442"
+ id="path188"
+ sodipodi:nodetypes="cc" />
+ <text
+ style="font-size:12.00000501;font-weight:normal;stroke-width:1pt;font-family:Nimbus Roman No9 L;font-style:normal;font-stretch:normal;font-variant:normal;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%"
+ x="225.89719"
+ y="-172.93504"
+ id="text189"
+ transform="matrix(0.495343,0.868697,-0.868697,0.495343,0,0)"
+ sodipodi:linespacing="125%">
+ <tspan
+ x="225.89719"
+ y="-172.93504"
+ sodipodi:role="line"
+ id="tspan197">Commands</tspan>
+ </text>
+ <rect
+ style="font-size:12px;fill:#e6e6e6;fill-rule:evenodd;stroke:black;stroke-width:0.81365001;stroke-dasharray:none"
+ id="rect157"
+ width="58.8563"
+ height="84.86161"
+ x="20.406824"
+ y="86.368195" />
+ <text
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;stroke-width:1pt;font-family:Nimbus Roman No9 L"
+ x="31.440001"
+ y="103.20953"
+ id="text161"
+ sodipodi:linespacing="125%">
+ <tspan
+ id="tspan162"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Nimbus Roman No9 L">CVS</tspan>
+ </text>
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:0.65092001pt"
+ d="M 146.585,86.837627 L 79.044239,105.19032"
+ id="path170"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:0.65092001pt"
+ d="M 138.11006,85.14806 L 146.58504,86.837849"
+ id="path171"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:0.65092001pt"
+ d="M 139.97169,92.942189 L 146.58504,86.837862"
+ id="path172"
+ sodipodi:nodetypes="cc" />
+ <text
+ style="font-size:12.0000001;font-weight:normal;stroke-width:1pt;font-family:Nimbus Roman No9 L;font-style:normal;font-stretch:normal;font-variant:normal;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%"
+ x="53.389557"
+ y="119.39153"
+ id="text175"
+ transform="matrix(0.962657,-0.270724,0.270724,0.962657,0,0)"
+ sodipodi:linespacing="125%">
+ <tspan
+ x="53.389557"
+ y="119.39153"
+ sodipodi:role="line"
+ id="tspan178">Changes</tspan>
+ </text>
+ <g
+ id="g2258">
+ <rect
+ style="font-size:12px;fill:#e6e6e6;fill-rule:evenodd;stroke:black;stroke-width:0.81365001;stroke-dasharray:none"
+ id="rect318"
+ width="63.643059"
+ height="37.788033"
+ x="301.17245"
+ y="10.073251" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:12;font-style:normal;font-weight:normal;writing-mode:lr;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L;font-stretch:normal;font-variant:normal;text-align:start;line-height:125%"
+ x="311.77988"
+ y="25.983994"
+ id="text321">
+ <tspan
+ style="font-family:Nimbus Roman No9 L;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;font-size:12;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%"
+ id="tspan322">Browser</tspan>
+ </text>
+ </g>
+ <g
+ id="g2274">
+ <rect
+ style="font-size:12;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:0.65092001pt;stroke-opacity:1;font-family:Nimbus Roman No9 L;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%"
+ id="rect319"
+ width="80.87941"
+ height="40.439838"
+ x="402.60297"
+ y="82.334839" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:12;font-style:normal;font-weight:normal;writing-mode:lr;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L;font-stretch:normal;font-variant:normal;text-align:start;line-height:125%"
+ x="413.87363"
+ y="95.59343"
+ id="text324">
+ <tspan
+ style="font-family:Nimbus Roman No9 L;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;font-size:12;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%"
+ id="tspan325">Status Client</tspan>
+ </text>
+ </g>
+ <g
+ id="g2282">
+ <rect
+ style="font-size:12px;fill:#e6e6e6;fill-rule:evenodd;stroke:black;stroke-width:0.81365001;stroke-dasharray:none"
+ id="rect320"
+ width="66.295143"
+ height="37.125122"
+ x="419.17731"
+ y="130.72952" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:12;font-style:normal;font-weight:normal;writing-mode:lr;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L;font-stretch:normal;font-variant:normal;text-align:start;line-height:125%"
+ x="442.38065"
+ y="144.65211"
+ id="text327">
+ <tspan
+ style="font-family:Nimbus Roman No9 L;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;font-size:12;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%"
+ id="tspan328">IRC</tspan>
+ </text>
+ </g>
+ <g
+ id="g2266">
+ <rect
+ style="font-size:12px;fill:#e6e6e6;fill-rule:evenodd;stroke:black;stroke-width:0.81365001;stroke-dasharray:none"
+ id="rect333"
+ width="54.361813"
+ height="30.495623"
+ x="370.11908"
+ y="45.209362" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:12;font-style:normal;font-weight:normal;writing-mode:lr;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L;font-stretch:normal;font-variant:normal;text-align:start;line-height:125%"
+ x="382.71585"
+ y="57.142616"
+ id="text330">
+ <tspan
+ style="font-family:Nimbus Roman No9 L;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;font-size:12;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%"
+ id="tspan331">email</tspan>
+ </text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 259.40705,48.524307 L 301.13558,35.710426"
+ id="path355"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 294.29786,42.033658 L 301.13577,35.955498"
+ id="path356"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 292.28466,34.44018 L 301.10853,35.519796"
+ id="path357"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 262.72153,55.153797 L 369.45614,61.120325"
+ id="path358" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 360.8705,65.81157 L 369.45614,61.120325"
+ id="path359"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 360.30681,55.2694 L 369.45614,61.120325"
+ id="path360"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 268.02523,69.075934 L 401.94095,90.952704"
+ id="path361" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 391.43315,95.196703 L 401.94095,90.952704"
+ id="path362"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 392.39391,84.340008 L 401.94095,90.952704"
+ id="path364"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 268.02523,80.345963 L 418.51468,136.03347"
+ id="path365" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 410.57541,128.47499 L 418.51468,136.03347"
+ id="path366"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.22047496;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
+ d="M 407.82462,136.84647 L 418.51468,136.03347"
+ id="path367"
+ sodipodi:nodetypes="cc" />
+ <text
+ style="font-size:11.99999927;font-style:normal;font-weight:normal;writing-mode:lr;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L;font-stretch:normal;font-variant:normal;text-align:start;line-height:125%"
+ x="302.55466"
+ y="21.314074"
+ id="text368"
+ transform="matrix(0.976454,0.215726,-0.215726,0.976454,0,0)"
+ sodipodi:linespacing="125%">
+ <tspan
+ id="tspan369"
+ style="font-family:Nimbus Roman No9 L;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;font-size:11.99999927;text-anchor:start;text-align:start;writing-mode:lr;line-height:125%">Build Status</tspan>
+ </text>
+ <text
+ style="font-size:11.99999717;font-style:normal;font-weight:normal;writing-mode:lr;text-anchor:start;stroke-width:1pt;font-family:Nimbus Roman No9 L;font-stretch:normal;font-variant:normal;text-align:start;line-height:125%"
+ x="-318.30679"
+ y="161.43933"
+ id="text196"
+ transform="matrix(-0.498825,-0.866703,0.866703,-0.498825,0,0)"
+ sodipodi:linespacing="125%">
+ <tspan
+ x="-318.30679"
+ y="161.43933"
+ sodipodi:role="line"
+ id="tspan198">Results</tspan>
+ </text>
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="32"
+ y="122.77704"
+ id="text2199"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan2201"
+ x="32"
+ y="122.77704">SVN</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="30"
+ y="143.08051"
+ id="text2203"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan2205"
+ x="30"
+ y="143.08051">Darcs</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="29.695999"
+ y="161"
+ id="text2207"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan2209"
+ x="29.695999"
+ y="161">.. etc</tspan></text>
+ <use
+ x="0"
+ y="0"
+ xlink:href="#g2215"
+ id="use2251"
+ transform="translate(109,-9)"
+ width="500"
+ height="300" />
+ <use
+ x="0"
+ y="0"
+ xlink:href="#use2251"
+ id="use2253"
+ transform="translate(101,-17)"
+ width="500"
+ height="300" />
+</svg>
diff --git a/buildbot/docs/images/overview.txt b/buildbot/docs/images/overview.txt
new file mode 100644
index 0000000..29f03fc
--- /dev/null
+++ b/buildbot/docs/images/overview.txt
@@ -0,0 +1,23 @@
+
+ +------------------+ +-----------+
+ | |---------->| Browser |
+ | BuildMaster | +-----------+
+ Changes | |--------------->+--------+
+ +----------->| | Build Status | email |
+ | | |------------+ +--------+
+ | | |-------+ | +---------------+
+ | +------------------+ | +---->| Status Client |
++----------+ | ^ | ^ | +---------------+
+| Change | | | C| | | +-----+
+| Sources | | | o| | +------------>| IRC |
+| | | | m| |R +-----+
+| CVS | v | m| |e
+| SVN | +---------+ a| |s
+| Darcs | | Build | n| |u
+| .. etc | | Slave | d| |l
+| | +---------+ s| |t
+| | v |s
++----------+ +---------+
+ | Build |
+ | Slave |
+ +---------+
diff --git a/buildbot/docs/images/slavebuilder.png b/buildbot/docs/images/slavebuilder.png
new file mode 100644
index 0000000..5655d18
--- /dev/null
+++ b/buildbot/docs/images/slavebuilder.png
Binary files differ
diff --git a/buildbot/docs/images/slavebuilder.svg b/buildbot/docs/images/slavebuilder.svg
new file mode 100644
index 0000000..b04f767
--- /dev/null
+++ b/buildbot/docs/images/slavebuilder.svg
@@ -0,0 +1,593 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Sodipodi ("http://www.sodipodi.com/") -->
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://web.resource.org/cc/"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg101"
+ sodipodi:version="0.32"
+ width="500"
+ height="300"
+ sodipodi:docname="slavebuilder.svg"
+ inkscape:version="0.44"
+ sodipodi:docbase="/usr/home/warner/stuff/Projects/BuildBot/trees/docs/docs/images"
+ version="1.0">
+ <metadata
+ id="metadata71">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs103">
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Send"
+ style="overflow:visible;">
+ <path
+ id="path3088"
+ style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(0.3) rotate(180) translate(-2.3,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Mend"
+ style="overflow:visible;">
+ <path
+ id="path3094"
+ style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(0.6) rotate(180) translate(0,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Mend"
+ style="overflow:visible;">
+ <path
+ id="path3112"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none;"
+ transform="scale(0.4) rotate(180) translate(10,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ showgrid="true"
+ snaptoguides="false"
+ width="500px"
+ height="300px"
+ inkscape:zoom="1.27"
+ inkscape:cx="250"
+ inkscape:cy="150"
+ inkscape:window-width="853"
+ inkscape:window-height="578"
+ inkscape:window-x="16"
+ inkscape:window-y="93"
+ inkscape:current-layer="svg101" />
+ <g
+ id="g2900"
+ transform="translate(40.15327,-62.11205)">
+ <rect
+ rx="18.750013"
+ ry="18.75"
+ y="140.98706"
+ x="72.414238"
+ height="29.624987"
+ width="62.656288"
+ id="rect2889"
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:1.00000024;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text2891"
+ y="160.82355"
+ x="103.22226"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="160.82355"
+ x="103.22226"
+ id="tspan2898"
+ sodipodi:role="line">Builder</tspan></text>
+ </g>
+ <g
+ transform="translate(-65.57053,-24.70747)"
+ id="use2905">
+ <rect
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:1.00000024;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3305"
+ width="62.656288"
+ height="29.624987"
+ x="72.414238"
+ y="140.98706"
+ ry="18.75"
+ rx="18.750013" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="103.22226"
+ y="160.82355"
+ id="text3307"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3309"
+ x="103.22226"
+ y="160.82355">Builder</tspan></text>
+ </g>
+ <g
+ id="g3213"
+ transform="translate(29.05187,-159.7552)">
+ <path
+ id="path3195"
+ d="M 83.59375,166.40625 L 83.59375,216.40625 L 146.09375,216.40625 L 146.09375,166.40625"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ id="path3205"
+ d="M 87.329101,203.10058 L 142.3584,203.10058"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 87.329099,207.28759 L 142.3584,207.28759"
+ id="path3207" />
+ <path
+ id="path3209"
+ d="M 87.329099,211.4746 L 142.3584,211.4746"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ id="path3211"
+ d="M 114.84375,181.56738 L 114.84375,201.10677"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ </g>
+ <g
+ id="g3265"
+ transform="translate(-152.5782,-148.8349)">
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 176.72446,196.26159 L 176.72446,244.30079 L 204.77565,244.30079 L 204.77565,196.26159"
+ id="path3250" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 178.40096,231.51692 L 203.09915,231.51692"
+ id="path3252" />
+ <path
+ id="path3254"
+ d="M 178.40096,235.53973 L 203.09915,235.53973"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 178.40096,239.56254 L 203.09915,239.56254"
+ id="path3256" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 190.75006,210.82816 L 190.75006,229.6013"
+ id="path3258" />
+ </g>
+ <g
+ transform="matrix(0.585953,0,0,0.585953,78.6865,-72.7587)"
+ id="use3220">
+ <path
+ transform="matrix(0.747745,0,0,0.715583,54.81875,14.55241)"
+ sodipodi:open="true"
+ sodipodi:end="8.4574167"
+ sodipodi:start="2.2083886"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:ry="28.125"
+ sodipodi:rx="27.34375"
+ sodipodi:cy="217.1875"
+ sodipodi:cx="111.71875"
+ id="path3226"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.36707859pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text3228"
+ y="166.51833"
+ x="138.20795"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ sodipodi:role="line"
+ id="tspan3230"
+ x="138.20795"
+ y="166.51833">build</tspan><tspan
+ sodipodi:role="line"
+ id="tspan3232"
+ x="138.20795"
+ y="178.51833">request</tspan></text>
+ </g>
+ <path
+ sodipodi:nodetypes="cc"
+ id="path3313"
+ d="M 38.171863,97.498369 L 38.171863,113.6442"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 143.89562,59.510383 L 143.89562,75.656213"
+ id="path3315"
+ sodipodi:nodetypes="cc" />
+ <use
+ x="0"
+ y="0"
+ xlink:href="#use3220"
+ id="use3688"
+ transform="translate(-29,-11)"
+ width="500"
+ height="300" />
+ <g
+ id="use3690"
+ transform="matrix(0.396106,0,0,0.396106,-12.37326,-14.63467)">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.36707859pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path3694"
+ sodipodi:cx="111.71875"
+ sodipodi:cy="217.1875"
+ sodipodi:rx="27.34375"
+ sodipodi:ry="28.125"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:start="2.2083886"
+ sodipodi:end="8.4574167"
+ sodipodi:open="true"
+ transform="matrix(0.747745,0,0,0.715583,54.81875,14.55241)" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="138.20795"
+ y="166.51833"
+ id="text3696"
+ sodipodi:linespacing="100%"><tspan
+ y="166.51833"
+ x="138.20795"
+ id="tspan3698"
+ sodipodi:role="line">build</tspan><tspan
+ y="178.51833"
+ x="138.20795"
+ id="tspan3700"
+ sodipodi:role="line">request</tspan></text>
+ </g>
+ <path
+ sodipodi:type="arc"
+ style="fill:#ebc7ff;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
+ id="path3702"
+ sodipodi:cx="147.5"
+ sodipodi:cy="311.5"
+ sodipodi:rx="109.5"
+ sodipodi:ry="109.5"
+ d="M 41.731121,283.15932 A 109.5,109.5 0 0 1 253.26888,283.15931"
+ sodipodi:start="3.403392"
+ sodipodi:end="6.0213859"
+ transform="translate(-26.82926,17.79048)"
+ sodipodi:open="true" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 143.94488,113.01158 L 160.09744,241.00715"
+ id="path2946"
+ sodipodi:nodetypes="cc" />
+ <g
+ transform="translate(52.3417,-17.27854)"
+ id="use2942">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.80011616pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path3365"
+ sodipodi:cx="111.71875"
+ sodipodi:cy="217.1875"
+ sodipodi:rx="27.34375"
+ sodipodi:ry="28.125"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:start="2.2083886"
+ sodipodi:end="8.4574167"
+ sodipodi:open="true"
+ transform="matrix(0.55538,0,0,0.555659,41.42196,95.56953)" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="91.230194"
+ y="219.43657"
+ id="text3367"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3369"
+ x="91.230194"
+ y="219.43657">build</tspan></text>
+ </g>
+ <g
+ id="g3714"
+ transform="translate(-26.77165,-5.02953)">
+ <path
+ transform="translate(33.85827,-40.16241)"
+ d="M 536.22047 226.37796 A 138.18898 138.18898 0 1 1 259.84251,226.37796 A 138.18898 138.18898 0 1 1 536.22047 226.37796 z"
+ sodipodi:ry="138.18898"
+ sodipodi:rx="138.18898"
+ sodipodi:cy="226.37796"
+ sodipodi:cx="398.03149"
+ id="path3704"
+ style="fill:#ebc7ff;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text3706"
+ y="74.015747"
+ x="402.36221"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="74.015747"
+ x="402.36221"
+ id="tspan3708"
+ sodipodi:role="line">BuildSlave</tspan></text>
+ </g>
+ <text
+ sodipodi:linespacing="100%"
+ id="text3710"
+ y="295.2756"
+ x="87.401581"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="295.2756"
+ x="87.401581"
+ id="tspan3712"
+ sodipodi:role="line">BuildSlave</tspan></text>
+ <g
+ id="g3725"
+ transform="translate(22.04724,-3.937008)">
+ <rect
+ ry="25.19685"
+ y="100"
+ x="316.53543"
+ height="81.102364"
+ width="122.83465"
+ id="rect3719"
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ rx="25.19685" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text3721"
+ y="115.74802"
+ x="341.83472"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="115.74802"
+ x="341.83472"
+ id="tspan3723"
+ sodipodi:role="line">SlaveBuilder</tspan></text>
+ </g>
+ <g
+ id="g3730"
+ transform="translate(0.787406,98.42518)">
+ <rect
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3732"
+ width="122.83465"
+ height="81.102364"
+ x="316.53543"
+ y="100"
+ ry="25.19685"
+ rx="25.19685" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="341.83472"
+ y="115.74802"
+ id="text3734"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3736"
+ x="341.83472"
+ y="115.74802">SlaveBuilder</tspan></text>
+ </g>
+ <g
+ id="g3749">
+ <rect
+ rx="11.023631"
+ ry="11.023631"
+ y="245.72591"
+ x="136.43611"
+ height="34.705647"
+ width="52.563892"
+ id="rect3740"
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:2.00000072;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text3742"
+ y="256.62399"
+ x="142.07918"
+ style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="256.62399"
+ x="142.07918"
+ id="tspan3744"
+ sodipodi:role="line">SlaveBuilder</tspan></text>
+ </g>
+ <g
+ id="g3754"
+ transform="translate(-79.52756,-0.787402)">
+ <rect
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:2.00000072;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3756"
+ width="52.563892"
+ height="34.705647"
+ x="136.43611"
+ y="245.72591"
+ ry="11.023631"
+ rx="11.023631" />
+ <text
+ xml:space="preserve"
+ style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="142.07918"
+ y="256.62399"
+ id="text3758"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3760"
+ x="142.07918"
+ y="256.62399">SlaveBuilder</tspan></text>
+ </g>
+ <path
+ sodipodi:nodetypes="cc"
+ id="path3764"
+ d="M 154.9685,111.43678 C 154.9685,111.43678 237.26279,212.66069 317.57776,218.1725"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <g
+ id="g2937"
+ transform="translate(84.2399,-71.0155)">
+ <path
+ transform="matrix(0.55538,0,0,0.555659,41.42196,95.56953)"
+ sodipodi:open="true"
+ sodipodi:end="8.4574167"
+ sodipodi:start="2.2083886"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:ry="28.125"
+ sodipodi:rx="27.34375"
+ sodipodi:cy="217.1875"
+ sodipodi:cx="111.71875"
+ id="path2923"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.80011616pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text2925"
+ y="219.43657"
+ x="91.230194"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="219.43657"
+ x="91.230194"
+ id="tspan2927"
+ sodipodi:role="line">build</tspan></text>
+ </g>
+ <g
+ transform="translate(143.7911,-24.9589)"
+ id="use2944">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.80011616pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path3357"
+ sodipodi:cx="111.71875"
+ sodipodi:cy="217.1875"
+ sodipodi:rx="27.34375"
+ sodipodi:ry="28.125"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:start="2.2083886"
+ sodipodi:end="8.4574167"
+ sodipodi:open="true"
+ transform="matrix(0.55538,0,0,0.555659,41.42196,95.56953)" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="91.230194"
+ y="219.43657"
+ id="text3359"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3361"
+ x="91.230194"
+ y="219.43657">build</tspan></text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 39.220471,148.44465 L 81.357287,242.58195"
+ id="path3766"
+ sodipodi:nodetypes="cc" />
+ <g
+ transform="translate(-54.34278,-20.6218)"
+ id="g3768">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.80011616pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path3770"
+ sodipodi:cx="111.71875"
+ sodipodi:cy="217.1875"
+ sodipodi:rx="27.34375"
+ sodipodi:ry="28.125"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:start="2.2083886"
+ sodipodi:end="8.4574167"
+ sodipodi:open="true"
+ transform="matrix(0.55538,0,0,0.555659,41.42196,95.56953)" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="91.230194"
+ y="219.43657"
+ id="text3772"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3774"
+ x="91.230194"
+ y="219.43657">build</tspan></text>
+ </g>
+ <g
+ id="g3776"
+ transform="matrix(0.7155,0,0,0.7155,159.6135,-98.12468)">
+ <rect
+ rx="18.750013"
+ ry="18.75"
+ y="140.98706"
+ x="72.414238"
+ height="29.624987"
+ width="62.656288"
+ id="rect3778"
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:1.39762509;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text3780"
+ y="160.82355"
+ x="103.22226"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="160.82355"
+ x="103.22226"
+ id="tspan3782"
+ sodipodi:role="line">Builder</tspan></text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 233.70865,31.12182 C 233.70865,31.12182 254.58562,121.3221 334.90059,126.83391"
+ id="path3784"
+ sodipodi:nodetypes="cc" />
+ <g
+ transform="translate(148.0194,-141.0942)"
+ id="g3786">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.80011616pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path3788"
+ sodipodi:cx="111.71875"
+ sodipodi:cy="217.1875"
+ sodipodi:rx="27.34375"
+ sodipodi:ry="28.125"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:start="2.2083886"
+ sodipodi:end="8.4574167"
+ sodipodi:open="true"
+ transform="matrix(0.55538,0,0,0.555659,41.42196,95.56953)" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="91.230194"
+ y="219.43657"
+ id="text3790"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3792"
+ x="91.230194"
+ y="219.43657">build</tspan></text>
+ </g>
+</svg>
diff --git a/buildbot/docs/images/slavebuilder.txt b/buildbot/docs/images/slavebuilder.txt
new file mode 100644
index 0000000..2b892ca
--- /dev/null
+++ b/buildbot/docs/images/slavebuilder.txt
@@ -0,0 +1,31 @@
+
+
+ +-----------------+
+ | BuildSlave |
+ | |
+ | |
+ +-------+ | +------------+ |
+ |Builder|----Build----->|SlaveBuilder| |
+ +-------+ | +------------+ |
+ | |
+ | +------------+ |
+ +-Build---->|SlaveBuilder| |
+ | | +------------+ |
+ +-------+ | | |
+ |Builder|---+ +-----------------+
+ +-------+ |
+ |
+ | +-----------------+
+ Build | BuildSlave |
+ | | |
+ | | |
+ | | +------------+ |
+ +------->|SlaveBuilder| |
+ | +------------+ |
+ +-------+ | |
+ |Builder|--+ | +------------+ |
+ +-------+ +-------->|SlaveBuilder| |
+ | +------------+ |
+ | |
+ +-----------------+
+
diff --git a/buildbot/docs/images/slaves.png b/buildbot/docs/images/slaves.png
new file mode 100644
index 0000000..4303e5b
--- /dev/null
+++ b/buildbot/docs/images/slaves.png
Binary files differ
diff --git a/buildbot/docs/images/slaves.svg b/buildbot/docs/images/slaves.svg
new file mode 100644
index 0000000..d1442c7
--- /dev/null
+++ b/buildbot/docs/images/slaves.svg
@@ -0,0 +1,336 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Sodipodi ("http://www.sodipodi.com/") -->
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://web.resource.org/cc/"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg101"
+ sodipodi:version="0.32"
+ width="500"
+ height="300"
+ sodipodi:docname="slaves.svg"
+ inkscape:version="0.44"
+ sodipodi:docbase="/usr/home/warner/stuff/Projects/BuildBot/trees/docs/docs/images"
+ version="1.0">
+ <metadata
+ id="metadata71">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs103">
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Mend"
+ style="overflow:visible;">
+ <path
+ id="path3555"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none;"
+ transform="scale(0.4) rotate(180) translate(10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Mstart"
+ style="overflow:visible">
+ <path
+ id="path3558"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
+ transform="scale(0.4) translate(10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Lend"
+ style="overflow:visible;">
+ <path
+ id="path3561"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none;"
+ transform="scale(0.8) rotate(180) translate(12.5,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ showgrid="true"
+ snaptoguides="false"
+ width="500px"
+ height="300px"
+ inkscape:zoom="1.28"
+ inkscape:cx="250"
+ inkscape:cy="150"
+ inkscape:window-width="853"
+ inkscape:window-height="578"
+ inkscape:window-x="336"
+ inkscape:window-y="154"
+ inkscape:current-layer="svg101"
+ showguides="false"
+ inkscape:grid-points="false"
+ inkscape:object-nodes="false"
+ gridtolerance="50" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:6,6;stroke-dashoffset:0"
+ d="M 440,170 L 361,108 L 196.5,131.1875 L 143,249 L 160,290"
+ id="path2492"
+ sodipodi:nodetypes="ccccc" />
+ <path
+ style="fill:#e9afaf;fill-opacity:0.50196081;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
+ d="M 260,220 L 97.107874,105.01355 L 109.43903,91.911698 L 71.67486,101.93076 L 69.362767,138.15354 L 77.069742,128.90517 L 245.08178,247.59257 L 260,220 z "
+ id="path4496"
+ sodipodi:nodetypes="ccccccc" />
+ <path
+ style="fill:#e9afaf;fill-opacity:0.50196081;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
+ d="M 312.90315,182.85399 L 257.41293,58.771709 L 289.78223,54.147524 L 213.48318,34.880089 L 180.34319,101.93076 L 201.92272,81.892631 L 259.72503,209.0577 L 312.90315,182.85399 z "
+ id="path4509" />
+ <path
+ sodipodi:type="arc"
+ style="font-size:12px;fill:#e6e6e6;fill-rule:evenodd;stroke:black;stroke-width:1.32591999"
+ id="path134"
+ d="M 334.57774,117.91791 A 70.437424,62.398376 0 0 1 212.19462,160.06031"
+ sodipodi:cx="264.14032"
+ sodipodi:cy="117.91791"
+ sodipodi:rx="70.437424"
+ sodipodi:ry="62.398376"
+ transform="matrix(0.812994,0.299353,-0.337919,0.917732,17.22677,-208.2934)"
+ sodipodi:start="0"
+ sodipodi:end="2.4001174"
+ sodipodi:open="true" />
+ <text
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;stroke-width:1pt;font-family:Nimbus Roman No9 L"
+ x="153.34743"
+ y="18.167366"
+ id="text109"
+ sodipodi:linespacing="125%">
+ <tspan
+ x="153.34743"
+ y="18.167366"
+ sodipodi:role="line"
+ id="tspan112">BuildMaster</tspan>
+ </text>
+ <g
+ id="g2215"
+ transform="translate(193.7969,-25.85985)">
+ <path
+ sodipodi:type="arc"
+ style="font-size:12px;fill:#e6e6e6;fill-rule:evenodd;stroke:black;stroke-width:1.49836004"
+ id="path105"
+ d="M 179.3815 607.44885 A 73.16877 76.709229 0 1 1 33.043961,607.44885 A 73.16877 76.709229 0 1 1 179.3815 607.44885 z"
+ sodipodi:cx="106.21273"
+ sodipodi:cy="607.44885"
+ sodipodi:rx="73.16877"
+ sodipodi:ry="76.709229"
+ transform="matrix(0.556009,0,0,0.530347,57.1479,-66.29883)" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;stroke-width:1pt;font-family:Nimbus Roman No9 L"
+ x="98.274147"
+ y="233.58066"
+ id="text114">
+ <tspan
+ x="98.274147"
+ y="233.58066"
+ sodipodi:role="line"
+ id="tspan130">Build</tspan>
+ <tspan
+ x="98.274147"
+ y="253.58066"
+ sodipodi:role="line"
+ id="tspan132">Slave</tspan>
+ </text>
+ </g>
+ <rect
+ style="font-size:12px;fill:#e6e6e6;fill-rule:evenodd;stroke:black;stroke-width:0.81365001;stroke-dasharray:none"
+ id="rect157"
+ width="58.8563"
+ height="84.86161"
+ x="15.406824"
+ y="19.368195" />
+ <text
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;stroke-width:1pt;font-family:Nimbus Roman No9 L"
+ x="26.440001"
+ y="36.209534"
+ id="text161"
+ sodipodi:linespacing="125%">
+ <tspan
+ id="tspan162"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Nimbus Roman No9 L">CVS</tspan>
+ </text>
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:0.65092001pt"
+ d="M 141.585,19.837627 L 74.044239,38.19032"
+ id="path170"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:0.65092001pt"
+ d="M 133.11006,18.14806 L 141.58504,19.837849"
+ id="path171"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:0.65092001pt"
+ d="M 134.97169,25.942189 L 141.58504,19.837862"
+ id="path172"
+ sodipodi:nodetypes="cc" />
+ <text
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;stroke-width:1pt;font-family:Nimbus Roman No9 L"
+ x="66.714783"
+ y="53.539894"
+ id="text175"
+ transform="matrix(0.962657,-0.270724,0.270724,0.962657,0,0)"
+ sodipodi:linespacing="125%">
+ <tspan
+ x="66.714783"
+ y="53.539894"
+ sodipodi:role="line"
+ id="tspan178">Changes</tspan>
+ </text>
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:1.22047448;marker-start:none;marker-end:url(#Arrow1Mend)"
+ d="M 235.85484,77.079632 L 286.1293,192.03176"
+ id="path183"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="font-size:12px;fill:none;fill-rule:evenodd;stroke:black;stroke-width:1.22047448;marker-start:url(#Arrow1Mstart);marker-end:none"
+ d="M 218.02941,47.040953 L 270.63285,168.37662"
+ id="path187"
+ sodipodi:nodetypes="cc" />
+ <text
+ style="font-size:12.00000381px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;stroke-width:1pt;font-family:Nimbus Roman No9 L"
+ x="-189.68027"
+ y="175.41019"
+ id="text196"
+ transform="matrix(-0.412356,-0.911023,0.911023,-0.412356,0,0)"
+ sodipodi:linespacing="125%">
+ <tspan
+ x="-189.68027"
+ y="175.41019"
+ sodipodi:role="line"
+ id="tspan198">Results</tspan>
+ </text>
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="27"
+ y="55.777039"
+ id="text2199"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan2201"
+ x="27"
+ y="55.777039">SVN</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="25"
+ y="76.080505"
+ id="text2203"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan2205"
+ x="25"
+ y="76.080505">Darcs</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="24.695999"
+ y="94"
+ id="text2207"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan2209"
+ x="24.695999"
+ y="94">.. etc</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="9.7895994"
+ y="14.21875"
+ id="text2474"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan2476"
+ x="9.7895994"
+ y="14.21875">Repository</tspan></text>
+ <text
+ style="font-size:11.99998665px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;stroke-width:1pt;font-family:Nimbus Roman No9 L"
+ x="223.4664"
+ y="-185.52684"
+ id="text189"
+ transform="matrix(0.408697,0.91267,-0.91267,0.408697,0,0)"
+ sodipodi:linespacing="125%">
+ <tspan
+ x="223.4664"
+ y="-185.52684"
+ sodipodi:role="line"
+ id="tspan197">Commands</tspan>
+ </text>
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ id="text3379"
+ sodipodi:linespacing="100%"
+ x="297.55264"
+ y="140.451"
+ transform="matrix(0.993577,-0.113154,0.113154,0.993577,0,0)"><tspan
+ id="tspan3381">NAT</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:15.99999809px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="254.95837"
+ y="108.82023"
+ id="text4472"
+ sodipodi:linespacing="100%"
+ transform="matrix(0.990118,-0.140236,0.140236,0.990118,0,0)"><tspan
+ sodipodi:role="line"
+ id="tspan4474"
+ x="254.95837"
+ y="108.82023">TCP</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:15.99999714px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="-12.571026"
+ y="157.93489"
+ id="text4498"
+ sodipodi:linespacing="100%"
+ transform="matrix(0.652028,-0.758195,0.758195,0.652028,0,0)"
+ inkscape:transform-center-y="-24.108441"
+ inkscape:transform-center-x="-6.0006673"><tspan
+ sodipodi:role="line"
+ id="tspan4500"
+ x="-12.571026"
+ y="157.93489">TCP</tspan></text>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 84.043356,121.61734 L 265.30978,243.04434 L 260.6856,230.71318 M 264.53909,243.04434 L 253.74932,243.04434"
+ id="path4502" />
+ <text
+ xml:space="preserve"
+ style="font-size:11.99998474px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="256.48093"
+ y="50.965839"
+ id="text4504"
+ sodipodi:linespacing="100%"
+ transform="matrix(0.831631,0.555329,-0.555329,0.831631,0,0)"><tspan
+ sodipodi:role="line"
+ id="tspan4506"
+ x="256.48093"
+ y="50.965839">checkout/update</tspan></text>
+</svg>
diff --git a/buildbot/docs/images/slaves.txt b/buildbot/docs/images/slaves.txt
new file mode 100644
index 0000000..5a89cc4
--- /dev/null
+++ b/buildbot/docs/images/slaves.txt
@@ -0,0 +1,27 @@
+
+
+Repository| | BuildMaster | |
+ (CVS/SVN)| | ^|^^^ |
+ | | / c \ |
+----------+ +------------------/--o----\-+
+ ^ / m ^ \
+ | / m | \
+ checkout/update --+ a | +--
+ | TCP| n | |TCP
+ | | d | |
+ | | s | |
+ | | | | |
+ | | | r |
+ | | | e |
+ -N-A-T-|- - - - -N-A-T- - - - -|- |- s-|- - - - -N-A-T- - -
+ | | | u |
+ | | | l |
+ | +------------------|--|--t-|-+
+ | | | | s | |
+ +----| v | |
+ | | |
+ | | |
+ | |
+ | BuildSlave |
+ +----------------------------+
+
diff --git a/buildbot/docs/images/status.png b/buildbot/docs/images/status.png
new file mode 100644
index 0000000..4160443
--- /dev/null
+++ b/buildbot/docs/images/status.png
Binary files differ
diff --git a/buildbot/docs/images/status.svg b/buildbot/docs/images/status.svg
new file mode 100644
index 0000000..a5c06c7
--- /dev/null
+++ b/buildbot/docs/images/status.svg
@@ -0,0 +1,853 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Sodipodi ("http://www.sodipodi.com/") -->
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://web.resource.org/cc/"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg101"
+ sodipodi:version="0.32"
+ width="500"
+ height="300"
+ sodipodi:docname="status.svg"
+ inkscape:version="0.44"
+ sodipodi:docbase="/usr/home/warner/stuff/Projects/BuildBot/trees/docs/docs/images"
+ version="1.0">
+ <metadata
+ id="metadata71">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs103">
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Mstart"
+ style="overflow:visible">
+ <path
+ id="path3115"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none"
+ transform="scale(0.4) translate(10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Send"
+ style="overflow:visible;">
+ <path
+ id="path3088"
+ style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(0.3) rotate(180) translate(-2.3,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Mend"
+ style="overflow:visible;">
+ <path
+ id="path3094"
+ style="font-size:12.0;fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(0.6) rotate(180) translate(0,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Mend"
+ style="overflow:visible;">
+ <path
+ id="path3112"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none;"
+ transform="scale(0.4) rotate(180) translate(10,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ showgrid="true"
+ snaptoguides="false"
+ width="500px"
+ height="300px"
+ inkscape:zoom="1.534"
+ inkscape:cx="250"
+ inkscape:cy="150"
+ inkscape:window-width="853"
+ inkscape:window-height="732"
+ inkscape:window-x="2"
+ inkscape:window-y="72"
+ inkscape:current-layer="svg101" />
+ <g
+ transform="translate(-65.57053,-24.70747)"
+ id="use2905">
+ <rect
+ style="fill:#e6e6e6;fill-opacity:1;stroke:black;stroke-width:1.00000024;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect3305"
+ width="62.656288"
+ height="29.624987"
+ x="72.414238"
+ y="140.98706"
+ ry="18.75"
+ rx="18.750013" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="103.22226"
+ y="160.82355"
+ id="text3307"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3309"
+ x="103.22226"
+ y="160.82355">Builder</tspan></text>
+ </g>
+ <g
+ id="g3265"
+ transform="translate(-152.5782,-148.8349)">
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 176.72446,196.26159 L 176.72446,244.30079 L 204.77565,244.30079 L 204.77565,196.26159"
+ id="path3250" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 178.40096,231.51692 L 203.09915,231.51692"
+ id="path3252" />
+ <path
+ id="path3254"
+ d="M 178.40096,235.53973 L 203.09915,235.53973"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 178.40096,239.56254 L 203.09915,239.56254"
+ id="path3256" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 190.75006,210.82816 L 190.75006,229.6013"
+ id="path3258" />
+ </g>
+ <path
+ sodipodi:nodetypes="cc"
+ id="path3313"
+ d="M 38.171863,97.498369 L 38.171863,113.6442"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <g
+ id="use3690"
+ transform="matrix(0.396106,0,0,0.396106,-12.37326,-14.63467)">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.36707859pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path3694"
+ sodipodi:cx="111.71875"
+ sodipodi:cy="217.1875"
+ sodipodi:rx="27.34375"
+ sodipodi:ry="28.125"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:start="2.2083886"
+ sodipodi:end="8.4574167"
+ sodipodi:open="true"
+ transform="matrix(0.747745,0,0,0.715583,54.81875,14.55241)" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="138.20795"
+ y="166.51833"
+ id="text3696"
+ sodipodi:linespacing="100%"><tspan
+ y="166.51833"
+ x="138.20795"
+ id="tspan3698"
+ sodipodi:role="line">build</tspan><tspan
+ y="178.51833"
+ x="138.20795"
+ id="tspan3700"
+ sodipodi:role="line">request</tspan></text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 39.220471,148.44465 L 33.325791,266.204"
+ id="path3766"
+ sodipodi:nodetypes="cc" />
+ <g
+ transform="translate(-54.34278,-21.4092)"
+ id="g3768">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffc4c4;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1.80011616pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path3770"
+ sodipodi:cx="111.71875"
+ sodipodi:cy="217.1875"
+ sodipodi:rx="27.34375"
+ sodipodi:ry="28.125"
+ d="M 95.442041,239.78682 A 27.34375,28.125 0 1 1 96.201877,240.34538"
+ sodipodi:start="2.2083886"
+ sodipodi:end="8.4574167"
+ sodipodi:open="true"
+ transform="matrix(0.55538,0,0,0.555659,41.42196,95.56953)" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="91.230194"
+ y="219.43657"
+ id="text3772"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan3774"
+ x="91.230194"
+ y="219.43657">build</tspan></text>
+ </g>
+ <g
+ id="g4313">
+ <path
+ transform="matrix(0.82454,0,0,0.82454,-23.70518,-14.97447)"
+ d="M 287.00788 64.960632 A 39.763783 39.763783 0 1 1 207.48031,64.960632 A 39.763783 39.763783 0 1 1 287.00788 64.960632 z"
+ sodipodi:ry="39.763783"
+ sodipodi:rx="39.763783"
+ sodipodi:cy="64.960632"
+ sodipodi:cx="247.24409"
+ id="path4010"
+ style="fill:#ffbb7e;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1.21279736pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text4012"
+ y="43.53817"
+ x="158.65028"
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="43.53817"
+ x="158.65028"
+ id="tspan4014"
+ sodipodi:role="line">Status</tspan></text>
+ </g>
+ <g
+ id="g4036"
+ transform="translate(-12,-19.24958)">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffbb7e;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1.38131847pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path4023"
+ sodipodi:cx="247.24409"
+ sodipodi:cy="64.960632"
+ sodipodi:rx="39.763783"
+ sodipodi:ry="39.763783"
+ d="M 287.00788 64.960632 A 39.763783 39.763783 0 1 1 207.48031,64.960632 A 39.763783 39.763783 0 1 1 287.00788 64.960632 z"
+ transform="matrix(0.723946,0,0,0.723946,-22.83389,81.63342)" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="139.42967"
+ y="137.662"
+ id="text4025"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan4027"
+ x="139.42967"
+ y="137.662">Status</tspan></text>
+ <text
+ sodipodi:linespacing="100%"
+ id="text4032"
+ y="123"
+ x="134.70238"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="123"
+ x="134.70238"
+ id="tspan4034"
+ sodipodi:role="line">Builder</tspan></text>
+ </g>
+ <g
+ id="g4043"
+ transform="translate(62.48819,-17.52517)">
+ <path
+ transform="matrix(0.723946,0,0,0.723946,-22.83389,81.63342)"
+ d="M 287.00788 64.960632 A 39.763783 39.763783 0 1 1 207.48031,64.960632 A 39.763783 39.763783 0 1 1 287.00788 64.960632 z"
+ sodipodi:ry="39.763783"
+ sodipodi:rx="39.763783"
+ sodipodi:cy="64.960632"
+ sodipodi:cx="247.24409"
+ id="path4045"
+ style="fill:#ffbb7e;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1.38131847pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text4047"
+ y="137.662"
+ x="139.42967"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="137.662"
+ x="139.42967"
+ id="tspan4049"
+ sodipodi:role="line">Status</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="134.70238"
+ y="123"
+ id="text4051"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan4053"
+ x="134.70238"
+ y="123">Builder</tspan></text>
+ </g>
+ <g
+ id="g4074"
+ transform="translate(-12,-23)">
+ <path
+ transform="matrix(0.610778,0,0,0.610778,-23.35369,153.4849)"
+ d="M 287.00788 64.960632 A 39.763783 39.763783 0 1 1 207.48031,64.960632 A 39.763783 39.763783 0 1 1 287.00788 64.960632 z"
+ sodipodi:ry="39.763783"
+ sodipodi:rx="39.763783"
+ sodipodi:cy="64.960632"
+ sodipodi:cx="247.24409"
+ id="path4057"
+ style="fill:#ffbb7e;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1.63725681pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text4059"
+ y="205.662"
+ x="110.92975"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="205.662"
+ x="110.92975"
+ id="tspan4061"
+ sodipodi:role="line">Status</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="112.04297"
+ y="191"
+ id="text4063"
+ sodipodi:linespacing="100%"><tspan
+ y="191"
+ x="112.04297"
+ id="tspan4072"
+ sodipodi:role="line">Build</tspan></text>
+ </g>
+ <g
+ id="g4081"
+ transform="translate(53,-28.07327)">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffbb7e;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1.63725681pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path4083"
+ sodipodi:cx="247.24409"
+ sodipodi:cy="64.960632"
+ sodipodi:rx="39.763783"
+ sodipodi:ry="39.763783"
+ d="M 287.00788 64.960632 A 39.763783 39.763783 0 1 1 207.48031,64.960632 A 39.763783 39.763783 0 1 1 287.00788 64.960632 z"
+ transform="matrix(0.610778,0,0,0.610778,-23.35369,153.4849)" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="110.92975"
+ y="205.662"
+ id="text4085"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan4087"
+ x="110.92975"
+ y="205.662">Status</tspan></text>
+ <text
+ sodipodi:linespacing="100%"
+ id="text4089"
+ y="191"
+ x="112.04297"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ sodipodi:role="line"
+ id="tspan4091"
+ x="112.04297"
+ y="191">Build</tspan></text>
+ </g>
+ <g
+ id="g4112"
+ transform="translate(-1,-15.07332)">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffbb7e;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.00960808pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path4095"
+ sodipodi:cx="247.24409"
+ sodipodi:cy="64.960632"
+ sodipodi:rx="39.763783"
+ sodipodi:ry="39.763783"
+ d="M 287.00788 64.960632 A 39.763783 39.763783 0 1 1 207.48031,64.960632 A 39.763783 39.763783 0 1 1 287.00788 64.960632 z"
+ transform="matrix(0.49761,0,0,0.49761,-10.87349,211.3364)" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="95.429832"
+ y="251.66199"
+ id="text4097"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan4099"
+ x="95.429832"
+ y="251.66199">Status</tspan></text>
+ <text
+ sodipodi:linespacing="100%"
+ id="text4101"
+ y="239"
+ x="99.681236"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="239"
+ x="99.681236"
+ id="tspan4110"
+ sodipodi:role="line">Step</tspan></text>
+ </g>
+ <g
+ id="g4119"
+ transform="translate(51.25421,-17.07332)">
+ <path
+ transform="matrix(0.49761,0,0,0.49761,-10.87349,211.3364)"
+ d="M 287.00788 64.960632 A 39.763783 39.763783 0 1 1 207.48031,64.960632 A 39.763783 39.763783 0 1 1 287.00788 64.960632 z"
+ sodipodi:ry="39.763783"
+ sodipodi:rx="39.763783"
+ sodipodi:cy="64.960632"
+ sodipodi:cx="247.24409"
+ id="path4121"
+ style="fill:#ffbb7e;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.00960808pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text4123"
+ y="251.66199"
+ x="95.429832"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="251.66199"
+ x="95.429832"
+ id="tspan4125"
+ sodipodi:role="line">Status</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="99.681236"
+ y="239"
+ id="text4127"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan4129"
+ x="99.681236"
+ y="239">Step</tspan></text>
+ </g>
+ <g
+ id="g4152"
+ transform="translate(-6,5.823648)">
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffbb7e;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.3002346pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path4133"
+ sodipodi:cx="247.24409"
+ sodipodi:cy="64.960632"
+ sodipodi:rx="39.763783"
+ sodipodi:ry="39.763783"
+ d="M 287.00788 64.960632 A 39.763783 39.763783 0 1 1 207.48031,64.960632 A 39.763783 39.763783 0 1 1 287.00788 64.960632 z"
+ transform="matrix(0.434739,0,0,0.434739,6.425273,243.8473)" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="102.56963"
+ y="282.58865"
+ id="text4135"
+ sodipodi:linespacing="100%"><tspan
+ y="282.58865"
+ x="102.56963"
+ id="tspan4150"
+ sodipodi:role="line">File</tspan></text>
+ <text
+ sodipodi:linespacing="100%"
+ id="text4139"
+ y="269.92667"
+ x="101.96763"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="269.92667"
+ x="101.96763"
+ id="tspan4148"
+ sodipodi:role="line">Log</tspan></text>
+ </g>
+ <g
+ transform="translate(39,5.823648)"
+ id="g4159">
+ <path
+ transform="matrix(0.434739,0,0,0.434739,6.425273,243.8473)"
+ d="M 287.00788 64.960632 A 39.763783 39.763783 0 1 1 207.48031,64.960632 A 39.763783 39.763783 0 1 1 287.00788 64.960632 z"
+ sodipodi:ry="39.763783"
+ sodipodi:rx="39.763783"
+ sodipodi:cy="64.960632"
+ sodipodi:cx="247.24409"
+ id="path4161"
+ style="fill:#ffbb7e;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2.3002346pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text4163"
+ y="282.58865"
+ x="102.56963"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ sodipodi:role="line"
+ id="tspan4165"
+ x="102.56963"
+ y="282.58865">File</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="101.96763"
+ y="269.92667"
+ id="text4167"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan4169"
+ x="101.96763"
+ y="269.92667">Log</tspan></text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:none;marker-end:url(#Arrow1Mend)"
+ d="M 165.3998,66.157483 L 156.04728,86.95538"
+ id="path4171"
+ inkscape:connector-type="polyline"
+ inkscape:connection-end="#g4036"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-opacity:1"
+ d="M 195.52091,67.275594 L 204.61691,84.874015"
+ id="path4173"
+ inkscape:connector-type="polyline"
+ inkscape:connection-end="#g4043"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="M 133.49195,131.1344 L 124.21199,152.08447"
+ id="path4175"
+ inkscape:connector-type="polyline"
+ inkscape:connection-start="#g4036"
+ inkscape:connection-end="#g4074"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="M 154.61078,128.00175 L 168.31297,145.58727"
+ id="path4177"
+ inkscape:connector-type="polyline"
+ inkscape:connection-start="#g4036"
+ inkscape:connection-end="#g4081"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="M 113.7389,195.07327 L 112.72973,208.17628"
+ id="path4179"
+ inkscape:connector-type="polyline"
+ inkscape:connection-start="#g4074"
+ inkscape:connection-end="#g4112" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="M 130.4753,186.24487 L 150.69377,210.1633"
+ id="path4181"
+ inkscape:connector-type="polyline"
+ inkscape:connection-start="#g4074"
+ inkscape:connection-end="#g4119"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="M 109.81445,249 L 109.0906,260"
+ id="path4183"
+ inkscape:connector-type="polyline"
+ inkscape:connection-start="#g4112"
+ inkscape:connection-end="#g4152" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Mend)"
+ d="M 123.8804,242.73468 L 141.45112,264.27181"
+ id="path4185"
+ inkscape:connector-type="polyline"
+ inkscape:connection-start="#g4112"
+ inkscape:connection-end="#g4159"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:2,4;stroke-dashoffset:0"
+ d="M 63.779528,190 C 92.125984,180 92.125984,179.52756 92.125984,179.52756"
+ id="path4187" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:2,4;stroke-dashoffset:0"
+ d="M 69.291339,128.34646 L 114.17323,114.96063"
+ id="path4189" />
+ <g
+ id="g4802"
+ transform="translate(-16.29921,-0.603675)">
+ <rect
+ ry="8.3841658"
+ rx="9.8084297"
+ y="15.748026"
+ x="303.93707"
+ height="42.519661"
+ width="88.976402"
+ id="rect4191"
+ style="fill:#c2ffa4;fill-opacity:0.75;stroke:black;stroke-width:0.99999994;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text4193"
+ y="40.991856"
+ x="318.42453"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="40.991856"
+ x="318.42453"
+ id="tspan4195"
+ sodipodi:role="line">Waterfall</tspan></text>
+ </g>
+ <g
+ id="g4797"
+ transform="translate(-16.29921,-0.603675)">
+ <rect
+ style="fill:#c2ffa4;fill-opacity:0.75;stroke:black;stroke-width:0.99999994;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4197"
+ width="88.976402"
+ height="42.519661"
+ x="303.14966"
+ y="77.952751"
+ rx="9.8084297"
+ ry="8.3841658" />
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="333.88623"
+ y="105.22058"
+ id="text4199"
+ sodipodi:linespacing="100%"><tspan
+ y="105.22058"
+ x="333.88623"
+ id="tspan4203"
+ sodipodi:role="line">IRC</tspan></text>
+ </g>
+ <g
+ id="g4789"
+ transform="translate(-16.29921,-0.603675)">
+ <rect
+ ry="8.3841658"
+ rx="9.8084297"
+ y="146.98035"
+ x="303.14966"
+ height="42.519661"
+ width="88.976402"
+ id="rect4205"
+ style="fill:#c2ffa4;fill-opacity:0.75;stroke:black;stroke-width:0.99999994;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text4207"
+ y="172.81618"
+ x="306.73587"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="172.81618"
+ x="306.73587"
+ id="tspan4787"
+ sodipodi:role="line">MailNotifier</tspan></text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="M 213.5693,38.274381 L 286.53418,36.826696"
+ id="path4221"
+ inkscape:connector-type="polyline"
+ inkscape:connection-end="#rect4191"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="M 211.9945,45.170748 L 289.97249,79.909419"
+ id="path4223"
+ inkscape:connector-type="polyline"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="M 208.84489,54.217135 L 310.47407,146.48035"
+ id="path4225"
+ inkscape:connector-type="polyline"
+ sodipodi:nodetypes="cc" />
+ <g
+ id="g4290"
+ transform="translate(-4.829396,0)">
+ <path
+ transform="translate(-5.511811,-4.724409)"
+ d="M 510.23622 49.212597 A 32.677166 18.503937 0 1 1 444.88189,49.212597 A 32.677166 18.503937 0 1 1 510.23622 49.212597 z"
+ sodipodi:ry="18.503937"
+ sodipodi:rx="32.677166"
+ sodipodi:cy="49.212597"
+ sodipodi:cx="477.55905"
+ id="path4227"
+ style="fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text4229"
+ y="40.522835"
+ x="471.59213"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="40.522835"
+ x="471.59213"
+ id="tspan4231"
+ sodipodi:role="line">Web</tspan><tspan
+ id="tspan4233"
+ y="54.522835"
+ x="471.59213"
+ sodipodi:role="line">Browser</tspan></text>
+ </g>
+ <g
+ id="g4280"
+ transform="translate(-3.622047,0)">
+ <path
+ sodipodi:type="arc"
+ style="fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ id="path4243"
+ sodipodi:cx="477.55905"
+ sodipodi:cy="49.212597"
+ sodipodi:rx="32.677166"
+ sodipodi:ry="18.503937"
+ d="M 510.23622 49.212597 A 32.677166 18.503937 0 1 1 444.88189,49.212597 A 32.677166 18.503937 0 1 1 510.23622 49.212597 z"
+ transform="translate(-6.299213,52.7559)" />
+ <text
+ xml:space="preserve"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="471.68674"
+ y="99.735497"
+ id="text4245"
+ sodipodi:linespacing="100%"><tspan
+ y="99.735497"
+ x="471.68674"
+ id="tspan4265"
+ sodipodi:role="line">IRC</tspan><tspan
+ y="113.7355"
+ x="471.68674"
+ id="tspan4267"
+ sodipodi:role="line">Server</tspan></text>
+ </g>
+ <g
+ id="g4275"
+ transform="translate(-4.562004,0)">
+ <path
+ transform="translate(-6.299213,127.5591)"
+ d="M 510.23622 49.212597 A 32.677166 18.503937 0 1 1 444.88189,49.212597 A 32.677166 18.503937 0 1 1 510.23622 49.212597 z"
+ sodipodi:ry="18.503937"
+ sodipodi:rx="32.677166"
+ sodipodi:cy="49.212597"
+ sodipodi:cx="477.55905"
+ id="path4253"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:black;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text4255"
+ y="181.35669"
+ x="471.43231"
+ style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ xml:space="preserve"><tspan
+ y="181.35669"
+ x="471.43231"
+ id="tspan4273"
+ sodipodi:role="line">SMTP</tspan></text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ d="M 434.35696,44.094488 L 377.63779,38.16273"
+ id="path4298"
+ sodipodi:nodetypes="cc" />
+ <path
+ id="path4300"
+ d="M 432.91339,100.18373 L 377.40157,96.062992"
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow1Mstart)"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;marker-end:none;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow1Mstart)"
+ d="M 433.70079,174.25196 L 375.03936,165.11812"
+ id="path4302"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="400.94119"
+ y="1.1406202"
+ id="text4306"
+ sodipodi:linespacing="100%"
+ transform="matrix(0.995875,9.074051e-2,-9.074051e-2,0.995875,0,0)"><tspan
+ sodipodi:role="line"
+ id="tspan4308"
+ x="400.94119"
+ y="1.1406202">HTTP</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:black;fill-opacity:1;stroke:none;stroke-width:1pt;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Nimbus Roman No9 L"
+ x="310.32733"
+ y="246.64044"
+ id="text4318"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan4320"
+ x="310.32733"
+ y="246.64044">Status</tspan><tspan
+ sodipodi:role="line"
+ x="310.32733"
+ y="262.64044"
+ id="tspan4322">Plugins</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:black;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2, 4;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4324"
+ width="114.96063"
+ height="268.50394"
+ x="275.03937"
+ y="4.9081368"
+ rx="0"
+ ry="0" />
+ <g
+ id="g2215"
+ transform="matrix(0.645481,0,0,0.645481,-38.34031,130.372)">
+ <path
+ sodipodi:type="arc"
+ style="font-size:12px;fill:#e6e6e6;fill-rule:evenodd;stroke:black;stroke-width:2.32130599"
+ id="path105"
+ d="M 179.3815 607.44885 A 73.16877 76.709229 0 1 1 33.043961,607.44885 A 73.16877 76.709229 0 1 1 179.3815 607.44885 z"
+ sodipodi:cx="106.21273"
+ sodipodi:cy="607.44885"
+ sodipodi:rx="73.16877"
+ sodipodi:ry="76.709229"
+ transform="matrix(0.556009,0,0,0.530347,57.1479,-66.29883)" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;stroke-width:1pt;font-family:Nimbus Roman No9 L"
+ x="98.274147"
+ y="233.58066"
+ id="text114">
+ <tspan
+ x="98.274147"
+ y="233.58066"
+ sodipodi:role="line"
+ id="tspan130">Build</tspan>
+ <tspan
+ x="98.274147"
+ y="253.58066"
+ sodipodi:role="line"
+ id="tspan132">Slave</tspan>
+ </text>
+ </g>
+</svg>
diff --git a/buildbot/docs/images/status.txt b/buildbot/docs/images/status.txt
new file mode 100644
index 0000000..40a20ef
--- /dev/null
+++ b/buildbot/docs/images/status.txt
@@ -0,0 +1,32 @@
+
+
+ Status Objects Status Plugins User Clients
+
+ +------+ +---------+ +-----------+
+ |Status|<--------------+-->|Waterfall|<-------|Web Browser|
+ +------+ | +---------+ +-----------+
+ | +-----+ |
+ v v |
++-------+ +-------+ | +---+ +----------+
+|Builder| |Builder| +---->|IRC|<----------->IRC Server|
+|Status | |Status | | +---+ +----------+
++-------+ +-------+ |
+ | +----+ |
+ v v | +------------+ +----+
++------+ +------+ +-->|MailNotifier|---->|SMTP|
+|Build | |Build | +------------+ +----+
+|Status| |Status|
++------+ +------+
+ | +-----+
+ v v
++------+ +------+
+|Step | |Step |
+|Status| |Status|
++------+ +------+
+ | +---+
+ v v
++----+ +----+
+|Log | |Log |
+|File| |File|
++----+ +----+
+
diff --git a/buildbot/setup.cfg b/buildbot/setup.cfg
new file mode 100644
index 0000000..861a9f5
--- /dev/null
+++ b/buildbot/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/buildbot/setup.py b/buildbot/setup.py
new file mode 100644
index 0000000..451f15f
--- /dev/null
+++ b/buildbot/setup.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+"""
+Standard setup script.
+"""
+
+import sys
+import os
+import re
+
+from distutils.core import setup
+from buildbot import version
+
+# Path: twisted!cvstoys!buildbot
+from distutils.command.install_data import install_data
+
+
+class install_data_twisted(install_data):
+ """make sure data files are installed in package.
+ this is evil.
+ copied from Twisted/setup.py.
+ """
+
+ def finalize_options(self):
+ self.set_undefined_options('install',
+ ('install_lib', 'install_dir'),
+ )
+ install_data.finalize_options(self)
+
+long_description="""
+The BuildBot is a system to automate the compile/test cycle required by
+most software projects to validate code changes. By automatically
+rebuilding and testing the tree each time something has changed, build
+problems are pinpointed quickly, before other developers are
+inconvenienced by the failure. The guilty developer can be identified
+and harassed without human intervention. By running the builds on a
+variety of platforms, developers who do not have the facilities to test
+their changes everywhere before checkin will at least know shortly
+afterwards whether they have broken the build or not. Warning counts,
+lint checks, image size, compile time, and other build parameters can
+be tracked over time, are more visible, and are therefore easier to
+improve.
+"""
+
+scripts = ["bin/buildbot"]
+if sys.platform == "win32":
+ scripts.append("contrib/windows/buildbot.bat")
+ scripts.append("contrib/windows/buildbot_service.py")
+
+testmsgs = []
+for f in os.listdir("buildbot/test/mail"):
+ if f.endswith("~"):
+ continue
+ if re.search(r'\.\d+$', f):
+ testmsgs.append("buildbot/test/mail/%s" % f)
+
+setup_args = {
+ 'name': "buildbot",
+ 'version': version,
+ 'description': "BuildBot build automation system",
+ 'long_description': long_description,
+ 'author': "Brian Warner",
+ 'author_email': "warner-buildbot@lothar.com",
+ 'url': "http://buildbot.net/",
+ 'license': "GNU GPL",
+ # does this classifiers= mean that this can't be installed on 2.2/2.3?
+ 'classifiers': [
+ 'Development Status :: 4 - Beta',
+ 'Environment :: No Input/Output (Daemon)',
+ 'Environment :: Web Environment',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: GNU General Public License (GPL)',
+ 'Topic :: Software Development :: Build Tools',
+ 'Topic :: Software Development :: Testing',
+ ],
+
+ 'packages': ["buildbot",
+ "buildbot.status", "buildbot.status.web",
+ "buildbot.changes",
+ "buildbot.steps",
+ "buildbot.steps.package",
+ "buildbot.steps.package.rpm",
+ "buildbot.process",
+ "buildbot.clients",
+ "buildbot.slave",
+ "buildbot.scripts",
+ "buildbot.test",
+ ],
+ 'data_files': [("buildbot", ["buildbot/buildbot.png"]),
+ ("buildbot/clients", ["buildbot/clients/debug.glade"]),
+ ("buildbot/status/web",
+ ["buildbot/status/web/classic.css",
+ "buildbot/status/web/index.html",
+ "buildbot/status/web/robots.txt",
+ ]),
+ ("buildbot/scripts", ["buildbot/scripts/sample.cfg"]),
+ ("buildbot/test/mail", testmsgs),
+ ("buildbot/test/subdir", ["buildbot/test/subdir/emit.py"]),
+ ],
+ 'scripts': scripts,
+ 'cmdclass': {'install_data': install_data_twisted},
+ }
+
+try:
+ # If setuptools is installed, then we'll add setuptools-specific arguments
+ # to the setup args.
+ import setuptools
+except ImportError:
+ pass
+else:
+ setup_args['install_requires'] = ['twisted >= 2.0.0']
+ entry_points={
+ 'console_scripts': [
+ 'buildbot = buildbot.scripts.runner:run'],
+ },
+
+setup(**setup_args)
+
+# Local Variables:
+# fill-column: 71
+# End: