Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/buildbot/buildbot/test
diff options
context:
space:
mode:
Diffstat (limited to 'buildbot/buildbot/test')
-rw-r--r--buildbot/buildbot/test/__init__.py0
-rw-r--r--buildbot/buildbot/test/emit.py11
-rw-r--r--buildbot/buildbot/test/emitlogs.py42
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.168
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.2101
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.397
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.445
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.554
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.670
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.768
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.861
-rw-r--r--buildbot/buildbot/test/mail/freshcvs.918
-rw-r--r--buildbot/buildbot/test/mail/svn-commit.167
-rw-r--r--buildbot/buildbot/test/mail/svn-commit.21218
-rw-r--r--buildbot/buildbot/test/mail/syncmail.1152
-rw-r--r--buildbot/buildbot/test/mail/syncmail.256
-rw-r--r--buildbot/buildbot/test/mail/syncmail.339
-rw-r--r--buildbot/buildbot/test/mail/syncmail.4290
-rw-r--r--buildbot/buildbot/test/mail/syncmail.570
-rw-r--r--buildbot/buildbot/test/runutils.py516
-rw-r--r--buildbot/buildbot/test/sleep.py8
-rw-r--r--buildbot/buildbot/test/subdir/emit.py11
-rw-r--r--buildbot/buildbot/test/test__versions.py16
-rw-r--r--buildbot/buildbot/test/test_bonsaipoller.py244
-rw-r--r--buildbot/buildbot/test/test_buildreq.py182
-rw-r--r--buildbot/buildbot/test/test_buildstep.py144
-rw-r--r--buildbot/buildbot/test/test_changes.py243
-rw-r--r--buildbot/buildbot/test/test_config.py1277
-rw-r--r--buildbot/buildbot/test/test_control.py104
-rw-r--r--buildbot/buildbot/test/test_dependencies.py166
-rw-r--r--buildbot/buildbot/test/test_ec2buildslave.py552
-rw-r--r--buildbot/buildbot/test/test_limitlogs.py94
-rw-r--r--buildbot/buildbot/test/test_locks.py495
-rw-r--r--buildbot/buildbot/test/test_maildir.py92
-rw-r--r--buildbot/buildbot/test/test_mailparse.py293
-rw-r--r--buildbot/buildbot/test/test_mergerequests.py196
-rw-r--r--buildbot/buildbot/test/test_p4poller.py213
-rw-r--r--buildbot/buildbot/test/test_package_rpm.py132
-rw-r--r--buildbot/buildbot/test/test_properties.py274
-rw-r--r--buildbot/buildbot/test/test_reconfig.py91
-rw-r--r--buildbot/buildbot/test/test_run.py1199
-rw-r--r--buildbot/buildbot/test/test_runner.py392
-rw-r--r--buildbot/buildbot/test/test_scheduler.py348
-rw-r--r--buildbot/buildbot/test/test_shell.py138
-rw-r--r--buildbot/buildbot/test/test_slavecommand.py294
-rw-r--r--buildbot/buildbot/test/test_slaves.py991
-rw-r--r--buildbot/buildbot/test/test_status.py1631
-rw-r--r--buildbot/buildbot/test/test_steps.py788
-rw-r--r--buildbot/buildbot/test/test_svnpoller.py476
-rw-r--r--buildbot/buildbot/test/test_transfer.py721
-rw-r--r--buildbot/buildbot/test/test_twisted.py219
-rw-r--r--buildbot/buildbot/test/test_util.py26
-rw-r--r--buildbot/buildbot/test/test_vc.py3023
-rw-r--r--buildbot/buildbot/test/test_web.py594
-rw-r--r--buildbot/buildbot/test/test_webparts.py141
55 files changed, 18851 insertions, 0 deletions
diff --git a/buildbot/buildbot/test/__init__.py b/buildbot/buildbot/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildbot/buildbot/test/__init__.py
diff --git a/buildbot/buildbot/test/emit.py b/buildbot/buildbot/test/emit.py
new file mode 100644
index 0000000..1e23e92
--- /dev/null
+++ b/buildbot/buildbot/test/emit.py
@@ -0,0 +1,11 @@
+
+import os, sys
+
+sys.stdout.write("this is stdout\n")
+sys.stderr.write("this is stderr\n")
+if os.environ.has_key("EMIT_TEST"):
+ sys.stdout.write("EMIT_TEST: %s\n" % os.environ["EMIT_TEST"])
+open("log1.out","wt").write("this is log1\n")
+
+rc = int(sys.argv[1])
+sys.exit(rc)
diff --git a/buildbot/buildbot/test/emitlogs.py b/buildbot/buildbot/test/emitlogs.py
new file mode 100644
index 0000000..1430235
--- /dev/null
+++ b/buildbot/buildbot/test/emitlogs.py
@@ -0,0 +1,42 @@
+import sys, time, os.path, StringIO
+
+mode = 0
+if len(sys.argv) > 1:
+ mode = int(sys.argv[1])
+
+if mode == 0:
+ log2 = open("log2.out", "wt")
+ log3 = open("log3.out", "wt")
+elif mode == 1:
+ # delete the logfiles first, and wait a moment to exercise a failure path
+ if os.path.exists("log2.out"):
+ os.unlink("log2.out")
+ if os.path.exists("log3.out"):
+ os.unlink("log3.out")
+ time.sleep(2)
+ log2 = open("log2.out", "wt")
+ log3 = open("log3.out", "wt")
+elif mode == 2:
+ # don't create the logfiles at all
+ log2 = StringIO.StringIO()
+ log3 = StringIO.StringIO()
+
+def write(i):
+ log2.write("this is log2 %d\n" % i)
+ log2.flush()
+ log3.write("this is log3 %d\n" % i)
+ log3.flush()
+ sys.stdout.write("this is stdout %d\n" % i)
+ sys.stdout.flush()
+
+write(0)
+time.sleep(1)
+write(1)
+sys.stdin.read(1)
+write(2)
+
+log2.close()
+log3.close()
+
+sys.exit(0)
+
diff --git a/buildbot/buildbot/test/mail/freshcvs.1 b/buildbot/buildbot/test/mail/freshcvs.1
new file mode 100644
index 0000000..cc8442e
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.1
@@ -0,0 +1,68 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 11151 invoked by uid 1000); 11 Jan 2003 17:10:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 1548 invoked by uid 13574); 11 Jan 2003 17:06:39 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 11 Jan 2003 17:06:39 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18XP0U-0002Mq-00; Sat, 11 Jan 2003 11:01:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18XP02-0002MN-00
+ for <twisted-commits@twistedmatrix.com>; Sat, 11 Jan 2003 11:00:46 -0600
+To: twisted-commits@twistedmatrix.com
+From: moshez CVS <moshez@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: moshez CVS <moshez@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18XP02-0002MN-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Instance massenger, apparently
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Sat, 11 Jan 2003 11:00:46 -0600
+Status:
+
+Modified files:
+Twisted/debian/python-twisted.menu.in 1.3 1.4
+
+Log message:
+Instance massenger, apparently
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/debian/python-twisted.menu.in.diff?r1=text&tr1=1.3&r2=text&tr2=1.4&cvsroot=Twisted
+
+Index: Twisted/debian/python-twisted.menu.in
+diff -u Twisted/debian/python-twisted.menu.in:1.3 Twisted/debian/python-twisted.menu.in:1.4
+--- Twisted/debian/python-twisted.menu.in:1.3 Sat Dec 28 10:02:12 2002
++++ Twisted/debian/python-twisted.menu.in Sat Jan 11 09:00:44 2003
+@@ -1,7 +1,7 @@
+ ?package(python@VERSION@-twisted):\
+ needs=x11\
+ section="Apps/Net"\
+-title="Twisted Instant Messenger (@VERSION@)"\
++title="Twisted Instance Messenger (@VERSION@)"\
+ command="/usr/bin/t-im@VERSION@"
+
+ ?package(python@VERSION@-twisted):\
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.2 b/buildbot/buildbot/test/mail/freshcvs.2
new file mode 100644
index 0000000..ada1311
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.2
@@ -0,0 +1,101 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18YYq7-0005eQ-00
+ for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
+To: twisted-commits@twistedmatrix.com
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] submit formmethod now subclass of Choice
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Tue, 14 Jan 2003 15:43:19 -0600
+Status:
+
+Modified files:
+Twisted/twisted/web/woven/form.py 1.20 1.21
+Twisted/twisted/python/formmethod.py 1.12 1.13
+
+Log message:
+submit formmethod now subclass of Choice
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/twisted/web/woven/form.py.diff?r1=text&tr1=1.20&r2=text&tr2=1.21&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/twisted/python/formmethod.py.diff?r1=text&tr1=1.12&r2=text&tr2=1.13&cvsroot=Twisted
+
+Index: Twisted/twisted/web/woven/form.py
+diff -u Twisted/twisted/web/woven/form.py:1.20 Twisted/twisted/web/woven/form.py:1.21
+--- Twisted/twisted/web/woven/form.py:1.20 Tue Jan 14 12:07:29 2003
++++ Twisted/twisted/web/woven/form.py Tue Jan 14 13:43:16 2003
+@@ -140,8 +140,8 @@
+
+ def input_submit(self, request, content, arg):
+ div = content.div()
+- for value in arg.buttons:
+- div.input(type="submit", name=arg.name, value=value)
++ for tag, value, desc in arg.choices:
++ div.input(type="submit", name=arg.name, value=tag)
+ div.text(" ")
+ if arg.reset:
+ div.input(type="reset")
+
+Index: Twisted/twisted/python/formmethod.py
+diff -u Twisted/twisted/python/formmethod.py:1.12 Twisted/twisted/python/formmethod.py:1.13
+--- Twisted/twisted/python/formmethod.py:1.12 Tue Jan 14 12:07:30 2003
++++ Twisted/twisted/python/formmethod.py Tue Jan 14 13:43:17 2003
+@@ -180,19 +180,13 @@
+ return 1
+
+
+-class Submit(Argument):
++class Submit(Choice):
+ """Submit button or a reasonable facsimile thereof."""
+
+- def __init__(self, name, buttons=["Submit"], reset=0, shortDesc=None, longDesc=None):
+- Argument.__init__(self, name, shortDesc=shortDesc, longDesc=longDesc)
+- self.buttons = buttons
++ def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
++ reset=0, shortDesc=None, longDesc=None):
++ Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc)
+ self.reset = reset
+-
+- def coerce(self, val):
+- if val in self.buttons:
+- return val
+- else:
+- raise InputError, "no such action"
+
+
+ class PresentationHint:
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.3 b/buildbot/buildbot/test/mail/freshcvs.3
new file mode 100644
index 0000000..f9ff199
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.3
@@ -0,0 +1,97 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18YYq7-0005eQ-00
+ for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
+To: twisted-commits@twistedmatrix.com
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] submit formmethod now subclass of Choice
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Tue, 14 Jan 2003 15:43:19 -0600
+Status:
+
+Modified files:
+Twisted/twisted/web/woven/form.py 1.20 1.21
+Twisted/twisted/python/formmethod.py 1.12 1.13
+
+Log message:
+submit formmethod now subclass of Choice
+
+
+Index: Twisted/twisted/web/woven/form.py
+diff -u Twisted/twisted/web/woven/form.py:1.20 Twisted/twisted/web/woven/form.py:1.21
+--- Twisted/twisted/web/woven/form.py:1.20 Tue Jan 14 12:07:29 2003
++++ Twisted/twisted/web/woven/form.py Tue Jan 14 13:43:16 2003
+@@ -140,8 +140,8 @@
+
+ def input_submit(self, request, content, arg):
+ div = content.div()
+- for value in arg.buttons:
+- div.input(type="submit", name=arg.name, value=value)
++ for tag, value, desc in arg.choices:
++ div.input(type="submit", name=arg.name, value=tag)
+ div.text(" ")
+ if arg.reset:
+ div.input(type="reset")
+
+Index: Twisted/twisted/python/formmethod.py
+diff -u Twisted/twisted/python/formmethod.py:1.12 Twisted/twisted/python/formmethod.py:1.13
+--- Twisted/twisted/python/formmethod.py:1.12 Tue Jan 14 12:07:30 2003
++++ Twisted/twisted/python/formmethod.py Tue Jan 14 13:43:17 2003
+@@ -180,19 +180,13 @@
+ return 1
+
+
+-class Submit(Argument):
++class Submit(Choice):
+ """Submit button or a reasonable facsimile thereof."""
+
+- def __init__(self, name, buttons=["Submit"], reset=0, shortDesc=None, longDesc=None):
+- Argument.__init__(self, name, shortDesc=shortDesc, longDesc=longDesc)
+- self.buttons = buttons
++ def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
++ reset=0, shortDesc=None, longDesc=None):
++ Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc)
+ self.reset = reset
+-
+- def coerce(self, val):
+- if val in self.buttons:
+- return val
+- else:
+- raise InputError, "no such action"
+
+
+ class PresentationHint:
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.4 b/buildbot/buildbot/test/mail/freshcvs.4
new file mode 100644
index 0000000..9e674dc
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.4
@@ -0,0 +1,45 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 32220 invoked by uid 1000); 14 Jan 2003 21:50:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 7923 invoked by uid 13574); 14 Jan 2003 21:49:48 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 14 Jan 2003 21:49:48 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18YYr0-0005en-00; Tue, 14 Jan 2003 15:44:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18YYq7-0005eQ-00
+ for <twisted-commits@twistedmatrix.com>; Tue, 14 Jan 2003 15:43:19 -0600
+To: twisted-commits@twistedmatrix.com
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: itamarst CVS <itamarst@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18YYq7-0005eQ-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] submit formmethod now subclass of Choice
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Tue, 14 Jan 2003 15:43:19 -0600
+Status:
+
+Modified files:
+Twisted/twisted/web/woven/form.py 1.20 1.21
+Twisted/twisted/python/formmethod.py 1.12 1.13
+
+Log message:
+submit formmethod now subclass of Choice
+
diff --git a/buildbot/buildbot/test/mail/freshcvs.5 b/buildbot/buildbot/test/mail/freshcvs.5
new file mode 100644
index 0000000..f20a958
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.5
@@ -0,0 +1,54 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 5865 invoked by uid 1000); 17 Jan 2003 07:00:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 40460 invoked by uid 13574); 17 Jan 2003 06:51:55 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 17 Jan 2003 06:51:55 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18ZQGk-0003WL-00; Fri, 17 Jan 2003 00:46:22 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18ZQFy-0003VP-00
+ for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 00:45:34 -0600
+To: twisted-commits@twistedmatrix.com
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18ZQFy-0003VP-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Fri, 17 Jan 2003 00:45:34 -0600
+Status:
+
+Modified files:
+Twisted/doc/examples/cocoaDemo 0 0
+
+Log message:
+Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo.diff?r1=text&tr1=NONE&r2=text&tr2=NONE&cvsroot=Twisted
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.6 b/buildbot/buildbot/test/mail/freshcvs.6
new file mode 100644
index 0000000..20719f4
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.6
@@ -0,0 +1,70 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 7252 invoked by uid 1000); 17 Jan 2003 07:10:04 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 43115 invoked by uid 13574); 17 Jan 2003 07:07:57 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 17 Jan 2003 07:07:57 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18ZQW6-0003dA-00; Fri, 17 Jan 2003 01:02:14 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18ZQV7-0003cm-00
+ for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 01:01:13 -0600
+To: twisted-commits@twistedmatrix.com
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18ZQV7-0003cm-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Cocoa (OS X) clone of the QT demo, using polling reactor
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Fri, 17 Jan 2003 01:01:13 -0600
+Status:
+
+Modified files:
+Twisted/doc/examples/cocoaDemo/MyAppDelegate.py None 1.1
+Twisted/doc/examples/cocoaDemo/__main__.py None 1.1
+Twisted/doc/examples/cocoaDemo/bin-python-main.m None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib None 1.1
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib None 1.1
+Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj None 1.1
+
+Log message:
+Cocoa (OS X) clone of the QT demo, using polling reactor
+
+Requires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project. The reactor is iterated periodically by a repeating NSTimer.
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/MyAppDelegate.py.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/__main__.py.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/bin-python-main.m.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj.diff?r1=text&tr1=None&r2=text&tr2=1.1&cvsroot=Twisted
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.7 b/buildbot/buildbot/test/mail/freshcvs.7
new file mode 100644
index 0000000..515be1d
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.7
@@ -0,0 +1,68 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 8665 invoked by uid 1000); 17 Jan 2003 08:00:03 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 50728 invoked by uid 13574); 17 Jan 2003 07:51:14 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 17 Jan 2003 07:51:14 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18ZRBm-0003pN-00; Fri, 17 Jan 2003 01:45:18 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18ZRBQ-0003ou-00
+ for <twisted-commits@twistedmatrix.com>; Fri, 17 Jan 2003 01:44:56 -0600
+To: twisted-commits@twistedmatrix.com
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+X-Mailer: CVSToys
+From: etrepum CVS <etrepum@twistedmatrix.com>
+Reply-To: twisted-python@twistedmatrix.com
+Message-Id: <E18ZRBQ-0003ou-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] Directories break debian build script, waiting for reasonable fix
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Fri, 17 Jan 2003 01:44:56 -0600
+Status:
+
+Modified files:
+Twisted/doc/examples/cocoaDemo/MyAppDelegate.py 1.1 None
+Twisted/doc/examples/cocoaDemo/__main__.py 1.1 None
+Twisted/doc/examples/cocoaDemo/bin-python-main.m 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib 1.1 None
+Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib 1.1 None
+Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj 1.1 None
+
+Log message:
+Directories break debian build script, waiting for reasonable fix
+
+
+ViewCVS links:
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/MyAppDelegate.py.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/__main__.py.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/bin-python-main.m.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+http://twistedmatrix.com/users/jh.twistd/viewcvs/cgi/viewcvs.cgi/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj.diff?r1=text&tr1=1.1&r2=text&tr2=None&cvsroot=Twisted
+
+.
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.8 b/buildbot/buildbot/test/mail/freshcvs.8
new file mode 100644
index 0000000..9b1e4fd
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.8
@@ -0,0 +1,61 @@
+Return-Path: <twisted-commits-admin@twistedmatrix.com>
+Delivered-To: warner-twistedcvs@luther.lothar.com
+Received: (qmail 10804 invoked by uid 1000); 19 Jan 2003 14:10:03 -0000
+Delivered-To: warner-twistedcvs@lothar.com
+Received: (qmail 6704 invoked by uid 13574); 19 Jan 2003 14:00:20 -0000
+Received: from unknown (HELO pyramid.twistedmatrix.com) ([64.123.27.105]) (envelope-sender <twisted-commits-admin@twistedmatrix.com>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-twistedcvs@lothar.com>; 19 Jan 2003 14:00:20 -0000
+Received: from localhost ([127.0.0.1] helo=pyramid.twistedmatrix.com)
+ by pyramid.twistedmatrix.com with esmtp (Exim 3.35 #1 (Debian))
+ id 18aFtx-0002WS-00; Sun, 19 Jan 2003 07:54:17 -0600
+Received: from acapnotic by pyramid.twistedmatrix.com with local (Exim 3.35 #1 (Debian))
+ id 18aFtH-0002W3-00
+ for <twisted-commits@twistedmatrix.com>; Sun, 19 Jan 2003 07:53:35 -0600
+To: twisted-commits@twistedmatrix.com
+From: acapnotic CVS <acapnotic@twistedmatrix.com>
+X-Mailer: CVSToys
+Message-Id: <E18aFtH-0002W3-00@pyramid.twistedmatrix.com>
+Subject: [Twisted-commits] it doesn't work with invalid syntax
+Sender: twisted-commits-admin@twistedmatrix.com
+Errors-To: twisted-commits-admin@twistedmatrix.com
+X-BeenThere: twisted-commits@twistedmatrix.com
+X-Mailman-Version: 2.0.11
+Precedence: bulk
+List-Help: <mailto:twisted-commits-request@twistedmatrix.com?subject=help>
+List-Post: <mailto:twisted-commits@twistedmatrix.com>
+List-Subscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=subscribe>
+List-Id: <twisted-commits.twistedmatrix.com>
+List-Unsubscribe: <http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits>,
+ <mailto:twisted-commits-request@twistedmatrix.com?subject=unsubscribe>
+List-Archive: <http://twistedmatrix.com/pipermail/twisted-commits/>
+Date: Sun, 19 Jan 2003 07:53:35 -0600
+Status:
+
+Modified files:
+CVSROOT/freshCfg 1.16 1.17
+
+Log message:
+it doesn't work with invalid syntax
+
+
+Index: CVSROOT/freshCfg
+diff -u CVSROOT/freshCfg:1.16 CVSROOT/freshCfg:1.17
+--- CVSROOT/freshCfg:1.16 Sun Jan 19 05:52:34 2003
++++ CVSROOT/freshCfg Sun Jan 19 05:53:34 2003
+@@ -27,7 +27,7 @@
+ ('/cvs', '^Reality', None, MailNotification(['reality-commits'])),
+ ('/cvs', '^Twistby', None, MailNotification(['acapnotic'])),
+ ('/cvs', '^CVSToys', None,
+- MailNotification(['CVSToys-list']
++ MailNotification(['CVSToys-list'],
+ "http://twistedmatrix.com/users/jh.twistd/"
+ "viewcvs/cgi/viewcvs.cgi/",
+ replyTo="cvstoys-list@twistedmatrix.com"),)
+
+
+_______________________________________________
+Twisted-commits mailing list
+Twisted-commits@twistedmatrix.com
+http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-commits
diff --git a/buildbot/buildbot/test/mail/freshcvs.9 b/buildbot/buildbot/test/mail/freshcvs.9
new file mode 100644
index 0000000..fd4f785
--- /dev/null
+++ b/buildbot/buildbot/test/mail/freshcvs.9
@@ -0,0 +1,18 @@
+From twisted-python@twistedmatrix.com Fri Dec 26 07:25:13 2003
+From: twisted-python@twistedmatrix.com (exarkun CVS)
+Date: Fri, 26 Dec 2003 00:25:13 -0700
+Subject: [Twisted-commits] Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository
+Message-ID: <E1AZmLR-0000Tl-00@wolfwood>
+
+Modified files:
+Twisted/sandbox/exarkun/persist-plugin
+
+Log message:
+Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository
+
+
+ViewCVS links:
+http://cvs.twistedmatrix.com/cvs/sandbox/exarkun/persist-plugin?cvsroot=Twisted
+
+
+
diff --git a/buildbot/buildbot/test/mail/svn-commit.1 b/buildbot/buildbot/test/mail/svn-commit.1
new file mode 100644
index 0000000..591dfee
--- /dev/null
+++ b/buildbot/buildbot/test/mail/svn-commit.1
@@ -0,0 +1,67 @@
+X-Original-To: jm@jmason.org
+Delivered-To: jm@dogma.boxhost.net
+Received: from localhost [127.0.0.1]
+ by localhost with IMAP (fetchmail-6.2.5)
+ for jm@localhost (single-drop); Wed, 12 Apr 2006 01:52:04 +0100 (IST)
+Received: from mail.apache.org (hermes.apache.org [209.237.227.199])
+ by dogma.boxhost.net (Postfix) with SMTP id 34F07310051
+ for <jm@jmason.org>; Wed, 12 Apr 2006 01:44:17 +0100 (IST)
+Received: (qmail 71414 invoked by uid 500); 12 Apr 2006 00:44:16 -0000
+Mailing-List: contact commits-help@spamassassin.apache.org; run by ezmlm
+Precedence: bulk
+list-help: <mailto:commits-help@spamassassin.apache.org>
+list-unsubscribe: <mailto:commits-unsubscribe@spamassassin.apache.org>
+List-Post: <mailto:commits@spamassassin.apache.org>
+Reply-To: "SpamAssassin Dev" <dev@spamassassin.apache.org>
+List-Id: <commits.spamassassin.apache.org>
+Delivered-To: mailing list commits@spamassassin.apache.org
+Received: (qmail 71403 invoked by uid 99); 12 Apr 2006 00:44:16 -0000
+Received: from asf.osuosl.org (HELO asf.osuosl.org) (140.211.166.49)
+ by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 11 Apr 2006 17:44:16 -0700
+X-ASF-Spam-Status: No, hits=-9.4 required=10.0
+ tests=ALL_TRUSTED,NO_REAL_NAME
+Received: from [209.237.227.194] (HELO minotaur.apache.org) (209.237.227.194)
+ by apache.org (qpsmtpd/0.29) with SMTP; Tue, 11 Apr 2006 17:44:15 -0700
+Received: (qmail 51950 invoked by uid 65534); 12 Apr 2006 00:43:55 -0000
+Message-ID: <20060412004355.51949.qmail@minotaur.apache.org>
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: svn commit: r393348 - /spamassassin/trunk/sa-update.raw
+Date: Wed, 12 Apr 2006 00:43:54 -0000
+To: commits@spamassassin.apache.org
+From: felicity@apache.org
+X-Mailer: svnmailer-1.0.7
+X-Virus-Checked: Checked by ClamAV on apache.org
+Status: O
+X-UID: 62932
+X-Keywords:
+
+Author: felicity
+Date: Tue Apr 11 17:43:54 2006
+New Revision: 393348
+
+URL: http://svn.apache.org/viewcvs?rev=393348&view=rev
+Log:
+bug 4864: remove extraneous front-slash from gpghomedir path
+
+Modified:
+ spamassassin/trunk/sa-update.raw
+
+Modified: spamassassin/trunk/sa-update.raw
+URL: http://svn.apache.org/viewcvs/spamassassin/trunk/sa-update.raw?rev=393348&r1=393347&r2=393348&view=diff
+==============================================================================
+--- spamassassin/trunk/sa-update.raw (original)
++++ spamassassin/trunk/sa-update.raw Tue Apr 11 17:43:54 2006
+@@ -120,7 +120,7 @@
+ @{$opt{'channel'}} = ();
+ my $GPG_ENABLED = 1;
+
+-$opt{'gpghomedir'} = File::Spec->catfile($LOCAL_RULES_DIR, '/sa-update-keys');
++$opt{'gpghomedir'} = File::Spec->catfile($LOCAL_RULES_DIR, 'sa-update-keys');
+
+ Getopt::Long::Configure(
+ qw(bundling no_getopt_compat no_auto_abbrev no_ignore_case));
+
+
+
diff --git a/buildbot/buildbot/test/mail/svn-commit.2 b/buildbot/buildbot/test/mail/svn-commit.2
new file mode 100644
index 0000000..eeef001
--- /dev/null
+++ b/buildbot/buildbot/test/mail/svn-commit.2
@@ -0,0 +1,1218 @@
+X-Original-To: jm@jmason.org
+Delivered-To: jm@dogma.boxhost.net
+Received: from localhost [127.0.0.1]
+ by localhost with IMAP (fetchmail-6.2.5)
+ for jm@localhost (single-drop); Thu, 09 Mar 2006 21:44:57 +0000 (GMT)
+Received: from minotaur.apache.org (minotaur.apache.org [209.237.227.194])
+ by dogma.boxhost.net (Postfix) with SMTP id 0D3463105BF
+ for <jm@jmason.org>; Thu, 9 Mar 2006 19:52:50 +0000 (GMT)
+Received: (qmail 30661 invoked by uid 1833); 9 Mar 2006 19:52:44 -0000
+Delivered-To: jm@locus.apache.org
+Received: (qmail 30451 invoked from network); 9 Mar 2006 19:52:38 -0000
+Received: from hermes.apache.org (HELO mail.apache.org) (209.237.227.199)
+ by minotaur.apache.org with SMTP; 9 Mar 2006 19:52:38 -0000
+Received: (qmail 97860 invoked by uid 500); 9 Mar 2006 19:52:29 -0000
+Delivered-To: apmail-jm@apache.org
+Received: (qmail 97837 invoked by uid 500); 9 Mar 2006 19:52:28 -0000
+Mailing-List: contact commits-help@spamassassin.apache.org; run by ezmlm
+Precedence: bulk
+list-help: <mailto:commits-help@spamassassin.apache.org>
+list-unsubscribe: <mailto:commits-unsubscribe@spamassassin.apache.org>
+List-Post: <mailto:commits@spamassassin.apache.org>
+Reply-To: "SpamAssassin Dev" <dev@spamassassin.apache.org>
+List-Id: <commits.spamassassin.apache.org>
+Delivered-To: mailing list commits@spamassassin.apache.org
+Received: (qmail 97826 invoked by uid 99); 9 Mar 2006 19:52:28 -0000
+Received: from asf.osuosl.org (HELO asf.osuosl.org) (140.211.166.49)
+ by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 Mar 2006 11:52:28 -0800
+X-ASF-Spam-Status: No, hits=-9.4 required=10.0
+ tests=ALL_TRUSTED,NO_REAL_NAME
+Received: from [209.237.227.194] (HELO minotaur.apache.org) (209.237.227.194)
+ by apache.org (qpsmtpd/0.29) with SMTP; Thu, 09 Mar 2006 11:52:26 -0800
+Received: (qmail 29644 invoked by uid 65534); 9 Mar 2006 19:52:06 -0000
+Message-ID: <20060309195206.29643.qmail@minotaur.apache.org>
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: svn commit: r384590 - in /spamassassin/branches/3.1: ./
+ lib/Mail/SpamAssassin/ lib/Mail/SpamAssassin/Plugin/ spamd/
+Date: Thu, 09 Mar 2006 19:52:02 -0000
+To: commits@spamassassin.apache.org
+From: sidney@apache.org
+X-Mailer: svnmailer-1.0.7
+X-Virus-Checked: Checked by ClamAV on apache.org
+Status: O
+X-UID: 60795
+X-Keywords:
+
+Author: sidney
+Date: Thu Mar 9 11:51:59 2006
+New Revision: 384590
+
+URL: http://svn.apache.org/viewcvs?rev=384590&view=rev
+Log:
+Bug 4696: consolidated fixes for timeout bugs
+
+Added:
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm
+Modified:
+ spamassassin/branches/3.1/MANIFEST
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm
+ spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm
+ spamassassin/branches/3.1/spamd/spamd.raw
+
+Modified: spamassassin/branches/3.1/MANIFEST
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/MANIFEST?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/MANIFEST (original)
++++ spamassassin/branches/3.1/MANIFEST Thu Mar 9 11:51:59 2006
+@@ -89,6 +89,7 @@
+ lib/Mail/SpamAssassin/SQLBasedAddrList.pm
+ lib/Mail/SpamAssassin/SpamdForkScaling.pm
+ lib/Mail/SpamAssassin/SubProcBackChannel.pm
++lib/Mail/SpamAssassin/Timeout.pm
+ lib/Mail/SpamAssassin/Util.pm
+ lib/Mail/SpamAssassin/Util/DependencyInfo.pm
+ lib/Mail/SpamAssassin/Util/Progress.pm
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Logger.pm Thu Mar 9 11:51:59 2006
+@@ -142,7 +142,7 @@
+
+ if ($level eq "error") {
+ # don't log alarm timeouts or broken pipes of various plugins' network checks
+- return if ($message[0] =~ /__(?:alarm|brokenpipe)__ignore__/);
++ return if ($message[0] =~ /__ignore__/);
+
+ # dos: we can safely ignore any die's that we eval'd in our own modules so
+ # don't log them -- this is caller 0, the use'ing package is 1, the eval is 2
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DCC.pm Thu Mar 9 11:51:59 2006
+@@ -44,6 +44,7 @@
+
+ use Mail::SpamAssassin::Plugin;
+ use Mail::SpamAssassin::Logger;
++use Mail::SpamAssassin::Timeout;
+ use IO::Socket;
+ use strict;
+ use warnings;
+@@ -375,15 +376,10 @@
+
+ $permsgstatus->enter_helper_run_mode();
+
+- my $oldalarm = 0;
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
+
+- eval {
+- # safe to use $SIG{ALRM} here instead of Util::trap_sigalrm_fully(),
+- # since there are no killer regexp hang dangers here
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+-
+- $oldalarm = alarm $timeout;
++ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+
+ my $sock = IO::Socket::UNIX->new(Type => SOCK_STREAM,
+ Peer => $sockpath) || dbg("dcc: failed to open socket") && die;
+@@ -419,28 +415,20 @@
+ }
+
+ dbg("dcc: dccifd got response: $response");
++
++ });
+
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
++ $permsgstatus->leave_helper_run_mode();
+
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
++ if ($timer->timed_out()) {
++ dbg("dcc: dccifd check timed out after $timeout secs.");
++ return 0;
+ }
+- $permsgstatus->leave_helper_run_mode();
+
+ if ($err) {
+ chomp $err;
+- $response = undef;
+- if ($err eq "__alarm__ignore__") {
+- dbg("dcc: dccifd check timed out after $timeout secs.");
+- return 0;
+- } else {
+- warn("dcc: dccifd -> check skipped: $! $err");
+- return 0;
+- }
++ warn("dcc: dccifd -> check skipped: $! $err");
++ return 0;
+ }
+
+ if (!defined $response || $response !~ /^X-DCC/) {
+@@ -494,17 +482,12 @@
+
+ # use a temp file here -- open2() is unreliable, buffering-wise, under spamd
+ my $tmpf = $permsgstatus->create_fulltext_tmpfile($fulltext);
+- my $oldalarm = 0;
+-
+ my $pid;
+- eval {
+- # safe to use $SIG{ALRM} here instead of Util::trap_sigalrm_fully(),
+- # since there are no killer regexp hang dangers here
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+
+- $oldalarm = alarm $timeout;
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
++
++ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+
+ # note: not really tainted, this came from system configuration file
+ my $path = Mail::SpamAssassin::Util::untaint_file_path($self->{main}->{conf}->{dcc_path});
+@@ -542,17 +525,7 @@
+
+ dbg("dcc: got response: $response");
+
+- # note: this must be called BEFORE leave_helper_run_mode()
+- # $self->cleanup_kids($pid);
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
+-
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
++ });
+
+ if (defined(fileno(*DCC))) { # still open
+ if ($pid) {
+@@ -564,11 +537,14 @@
+ }
+ $permsgstatus->leave_helper_run_mode();
+
++ if ($timer->timed_out()) {
++ dbg("dcc: check timed out after $timeout seconds");
++ return 0;
++ }
++
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("dcc: check timed out after $timeout seconds");
+- } elsif ($err eq "__brokenpipe__ignore__") {
++ if ($err eq "__brokenpipe__ignore__") {
+ dbg("dcc: check failed: broken pipe");
+ } elsif ($err eq "no response") {
+ dbg("dcc: check failed: no response");
+@@ -645,47 +621,37 @@
+ my ($self, $options, $tmpf) = @_;
+ my $timeout = $options->{report}->{conf}->{dcc_timeout};
+
+- $options->{report}->enter_helper_run_mode();
++ # note: not really tainted, this came from system configuration file
++ my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{dcc_path});
+
+- my $oldalarm = 0;
++ my $opts = $options->{report}->{conf}->{dcc_options} || '';
+
+- eval {
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
+
+- $oldalarm = alarm $timeout;
+-
+- # note: not really tainted, this came from system configuration file
+- my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{dcc_path});
++ $options->{report}->enter_helper_run_mode();
++ my $err = $timer->run_and_catch(sub {
+
+- my $opts = $options->{report}->{conf}->{dcc_options} || '';
++ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+
+ my $pid = Mail::SpamAssassin::Util::helper_app_pipe_open(*DCC,
+- $tmpf, 1, $path, "-t", "many", split(' ', $opts));
++ $tmpf, 1, $path, "-t", "many", split(' ', $opts));
+ $pid or die "$!\n";
+
+ my @ignored = <DCC>;
+ $options->{report}->close_pipe_fh(\*DCC);
+-
+ waitpid ($pid, 0);
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
++
++ });
++ $options->{report}->leave_helper_run_mode();
+
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
++ if ($timer->timed_out()) {
++ dbg("reporter: DCC report timed out after $timeout seconds");
++ return 0;
+ }
+
+- $options->{report}->leave_helper_run_mode();
+-
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("reporter: DCC report timed out after $timeout seconds");
+- } elsif ($err eq "__brokenpipe__ignore__") {
++ if ($err eq "__brokenpipe__ignore__") {
+ dbg("reporter: DCC report failed: broken pipe");
+ } else {
+ warn("reporter: DCC report failed: $err\n");
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/DomainKeys.pm Thu Mar 9 11:51:59 2006
+@@ -34,6 +34,8 @@
+
+ use Mail::SpamAssassin::Plugin;
+ use Mail::SpamAssassin::Logger;
++use Mail::SpamAssassin::Timeout;
++
+ use strict;
+ use warnings;
+ use bytes;
+@@ -165,30 +167,22 @@
+ }
+
+ my $timeout = $scan->{conf}->{domainkeys_timeout};
+- my $oldalarm = 0;
+
+- eval {
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+- $oldalarm = alarm($timeout);
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
++
+ $self->_dk_lookup_trapped($scan, $message, $domain);
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
+-
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
++
++ });
++
++ if ($timer->timed_out()) {
++ dbg("dk: lookup timed out after $timeout seconds");
++ return 0;
+ }
+
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("dk: lookup timed out after $timeout seconds");
+- } else {
+- warn("dk: lookup failed: $err\n");
+- }
++ warn("dk: lookup failed: $err\n");
+ return 0;
+ }
+
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Pyzor.pm Thu Mar 9 11:51:59 2006
+@@ -35,6 +35,7 @@
+
+ use Mail::SpamAssassin::Plugin;
+ use Mail::SpamAssassin::Logger;
++use Mail::SpamAssassin::Timeout;
+ use strict;
+ use warnings;
+ use bytes;
+@@ -229,27 +230,22 @@
+
+ $pyzor_count = 0;
+ $pyzor_whitelisted = 0;
+-
+- $permsgstatus->enter_helper_run_mode();
++ my $pid;
+
+ # use a temp file here -- open2() is unreliable, buffering-wise, under spamd
+ my $tmpf = $permsgstatus->create_fulltext_tmpfile($fulltext);
+- my $oldalarm = 0;
+
+- my $pid;
+- eval {
+- # safe to use $SIG{ALRM} here instead of Util::trap_sigalrm_fully(),
+- # since there are no killer regexp hang dangers here
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
++ # note: not really tainted, this came from system configuration file
++ my $path = Mail::SpamAssassin::Util::untaint_file_path($self->{main}->{conf}->{pyzor_path});
++
++ my $opts = $self->{main}->{conf}->{pyzor_options} || '';
+
+- $oldalarm = alarm $timeout;
++ $permsgstatus->enter_helper_run_mode();
+
+- # note: not really tainted, this came from system configuration file
+- my $path = Mail::SpamAssassin::Util::untaint_file_path($self->{main}->{conf}->{pyzor_path});
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
+
+- my $opts = $self->{main}->{conf}->{pyzor_options} || '';
++ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+
+ dbg("pyzor: opening pipe: " . join(' ', $path, $opts, "check", "< $tmpf"));
+
+@@ -273,21 +269,7 @@
+ die("internal error\n");
+ }
+
+- # note: this must be called BEFORE leave_helper_run_mode()
+- # $self->cleanup_kids($pid);
+-
+- # attempt to call this inside the eval, as leaving this scope is
+- # a slow operation and timing *that* out is pointless
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
+-
+- # clear the alarm before doing lots of time-consuming hard work
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
++ });
+
+ if (defined(fileno(*PYZOR))) { # still open
+ if ($pid) {
+@@ -299,11 +281,14 @@
+ }
+ $permsgstatus->leave_helper_run_mode();
+
++ if ($timer->timed_out()) {
++ dbg("pyzor: check timed out after $timeout seconds");
++ return 0;
++ }
++
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("pyzor: check timed out after $timeout seconds");
+- } elsif ($err eq "__brokenpipe__ignore__") {
++ if ($err eq "__brokenpipe__ignore__") {
+ dbg("pyzor: check failed: broken pipe");
+ } elsif ($err eq "no response") {
+ dbg("pyzor: check failed: no response");
+@@ -364,23 +349,19 @@
+
+ sub pyzor_report {
+ my ($self, $options, $tmpf) = @_;
++
++ # note: not really tainted, this came from system configuration file
++ my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{pyzor_path});
++
++ my $opts = $options->{report}->{conf}->{pyzor_options} || '';
+ my $timeout = $self->{main}->{conf}->{pyzor_timeout};
+
+ $options->{report}->enter_helper_run_mode();
+
+- my $oldalarm = 0;
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
+
+- eval {
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+ local $SIG{PIPE} = sub { die "__brokenpipe__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+-
+- $oldalarm = alarm $timeout;
+-
+- # note: not really tainted, this came from system configuration file
+- my $path = Mail::SpamAssassin::Util::untaint_file_path($options->{report}->{conf}->{pyzor_path});
+-
+- my $opts = $options->{report}->{conf}->{pyzor_options} || '';
+
+ dbg("pyzor: opening pipe: " . join(' ', $path, $opts, "report", "< $tmpf"));
+
+@@ -391,23 +372,19 @@
+ my @ignored = <PYZOR>;
+ $options->{report}->close_pipe_fh(\*PYZOR);
+
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+ waitpid ($pid, 0);
+- };
++ });
+
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+ $options->{report}->leave_helper_run_mode();
+
++ if ($timer->timed_out()) {
++ dbg("reporter: pyzor report timed out after $timeout seconds");
++ return 0;
++ }
++
+ if ($err) {
+ chomp $err;
+- if ($err eq '__alarm__ignore__') {
+- dbg("reporter: pyzor report timed out after $timeout seconds");
+- } elsif ($err eq '__brokenpipe__ignore__') {
++ if ($err eq '__brokenpipe__ignore__') {
+ dbg("reporter: pyzor report failed: broken pipe");
+ } else {
+ warn("reporter: pyzor report failed: $err\n");
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/Razor2.pm Thu Mar 9 11:51:59 2006
+@@ -143,14 +143,11 @@
+ }
+
+ Mail::SpamAssassin::PerMsgStatus::enter_helper_run_mode($self);
+- my $oldalarm = 0;
+
+- eval {
+- local ($^W) = 0; # argh, warnings in Razor
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
+
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+- $oldalarm = alarm $timeout;
++ local ($^W) = 0; # argh, warnings in Razor
+
+ # everything's in the module!
+ my $rc = Razor2::Client::Agent->new("razor-$type");
+@@ -184,7 +181,7 @@
+ # let's reset the alarm since get_server_info() calls
+ # nextserver() which calls discover() which very likely will
+ # reset the alarm for us ... how polite. :(
+- alarm $timeout;
++ $timer->reset();
+
+ # no facility prefix on this die
+ my $sigs = $rc->compute_sigs($objects)
+@@ -219,100 +216,96 @@
+ my $error = $rc->errprefix("$debug: spamassassin") || "$debug: razor2 had unknown error during disconnect";
+ die $error;
+ }
++ }
+
+- # if we got here, we're done doing remote stuff, abort the alert
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+-
+- # Razor 2.14 says that if we get here, we did ok.
+- $return = 1;
++ # Razor 2.14 says that if we get here, we did ok.
++ $return = 1;
+
+- # figure out if we have a log file we need to close...
+- if (ref($rc->{logref}) && exists $rc->{logref}->{fd}) {
+- # the fd can be stdout or stderr, so we need to find out if it is
+- # so we don't close them by accident. Note: we can't just
+- # undef the fd here (like the IO::Handle manpage says we can)
+- # because it won't actually close, unfortunately. :(
+- my $untie = 1;
+- foreach my $log (*STDOUT{IO}, *STDERR{IO}) {
+- if ($log == $rc->{logref}->{fd}) {
+- $untie = 0;
+- last;
+- }
+- }
+- close $rc->{logref}->{fd} if ($untie);
+- }
+-
+- if ($type eq 'check') {
+- # so $objects->[0] is the first (only) message, and ->{spam} is a general yes/no
+- push(@results, { result => $objects->[0]->{spam} });
++ # figure out if we have a log file we need to close...
++ if (ref($rc->{logref}) && exists $rc->{logref}->{fd}) {
++ # the fd can be stdout or stderr, so we need to find out if it is
++ # so we don't close them by accident. Note: we can't just
++ # undef the fd here (like the IO::Handle manpage says we can)
++ # because it won't actually close, unfortunately. :(
++ my $untie = 1;
++ foreach my $log (*STDOUT{IO}, *STDERR{IO}) {
++ if ($log == $rc->{logref}->{fd}) {
++ $untie = 0;
++ last;
++ }
++ }
++ close $rc->{logref}->{fd} if ($untie);
++ }
+
+- # great for debugging, but leave this off!
+- #use Data::Dumper;
+- #print Dumper($objects),"\n";
+-
+- # ->{p} is for each part of the message
+- # so go through each part, taking the highest cf we find
+- # of any part that isn't contested (ct). This helps avoid false
+- # positives. equals logic_method 4.
+- #
+- # razor-agents < 2.14 have a different object format, so we now support both.
+- # $objects->[0]->{resp} vs $objects->[0]->{p}->[part #]->{resp}
+- my $part = 0;
+- my $arrayref = $objects->[0]->{p} || $objects;
+- if (defined $arrayref) {
+- foreach my $cf (@{$arrayref}) {
+- if (exists $cf->{resp}) {
+- for (my $response=0; $response<@{$cf->{resp}}; $response++) {
+- my $tmp = $cf->{resp}->[$response];
+- my $tmpcf = $tmp->{cf}; # Part confidence
+- my $tmpct = $tmp->{ct}; # Part contested?
+- my $engine = $cf->{sent}->[$response]->{e};
+-
+- # These should always be set, but just in case ...
+- $tmpcf = 0 unless defined $tmpcf;
+- $tmpct = 0 unless defined $tmpct;
+- $engine = 0 unless defined $engine;
+-
+- push(@results,
+- { part => $part, engine => $engine, contested => $tmpct, confidence => $tmpcf });
+- }
+- }
+- else {
+- push(@results, { part => $part, noresponse => 1 });
+- }
+- $part++;
+- }
+- }
+- else {
+- # If we have some new $objects format that isn't close to
+- # the current razor-agents 2.x version, we won't FP but we
+- # should alert in debug.
+- dbg("$debug: it looks like the internal Razor object has changed format!");
+- }
+- }
++ if ($type eq 'check') {
++ # so $objects->[0] is the first (only) message, and ->{spam} is a general yes/no
++ push(@results, { result => $objects->[0]->{spam} });
++
++ # great for debugging, but leave this off!
++ #use Data::Dumper;
++ #print Dumper($objects),"\n";
++
++ # ->{p} is for each part of the message
++ # so go through each part, taking the highest cf we find
++ # of any part that isn't contested (ct). This helps avoid false
++ # positives. equals logic_method 4.
++ #
++ # razor-agents < 2.14 have a different object format, so we now support both.
++ # $objects->[0]->{resp} vs $objects->[0]->{p}->[part #]->{resp}
++ my $part = 0;
++ my $arrayref = $objects->[0]->{p} || $objects;
++ if (defined $arrayref) {
++ foreach my $cf (@{$arrayref}) {
++ if (exists $cf->{resp}) {
++ for (my $response=0; $response<@{$cf->{resp}}; $response++) {
++ my $tmp = $cf->{resp}->[$response];
++ my $tmpcf = $tmp->{cf}; # Part confidence
++ my $tmpct = $tmp->{ct}; # Part contested?
++ my $engine = $cf->{sent}->[$response]->{e};
++
++ # These should always be set, but just in case ...
++ $tmpcf = 0 unless defined $tmpcf;
++ $tmpct = 0 unless defined $tmpct;
++ $engine = 0 unless defined $engine;
++
++ push(@results,
++ { part => $part, engine => $engine, contested => $tmpct, confidence => $tmpcf });
++ }
++ }
++ else {
++ push(@results, { part => $part, noresponse => 1 });
++ }
++ $part++;
++ }
++ }
++ else {
++ # If we have some new $objects format that isn't close to
++ # the current razor-agents 2.x version, we won't FP but we
++ # should alert in debug.
++ dbg("$debug: it looks like the internal Razor object has changed format!");
++ }
+ }
+ }
+ else {
+ warn "$debug: undefined Razor2::Client::Agent\n";
+ }
+
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
++ });
++
++ # OK, that's enough Razor stuff. now, reset all that global
++ # state it futzes with :(
++ # work around serious brain damage in Razor2 (constant seed)
++ srand;
+
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
++ Mail::SpamAssassin::PerMsgStatus::leave_helper_run_mode($self);
++
++ if ($timer->timed_out()) {
++ dbg("$debug: razor2 $type timed out after $timeout seconds");
+ }
+
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("$debug: razor2 $type timed out after $timeout seconds");
+- } elsif ($err =~ /(?:could not connect|network is unreachable)/) {
++ if ($err =~ /(?:could not connect|network is unreachable)/) {
+ # make this a dbg(); SpamAssassin will still continue,
+ # but without Razor checking. otherwise there may be
+ # DSNs and errors in syslog etc., yuck
+@@ -323,11 +316,6 @@
+ warn("$debug: razor2 $type failed: $! $err");
+ }
+ }
+-
+- # work around serious brain damage in Razor2 (constant seed)
+- srand;
+-
+- Mail::SpamAssassin::PerMsgStatus::leave_helper_run_mode($self);
+
+ # razor also debugs to stdout. argh. fix it to stderr...
+ if (would_log('dbg', $debug)) {
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Plugin/SPF.pm Thu Mar 9 11:51:59 2006
+@@ -34,6 +34,7 @@
+
+ use Mail::SpamAssassin::Plugin;
+ use Mail::SpamAssassin::Logger;
++use Mail::SpamAssassin::Timeout;
+ use strict;
+ use warnings;
+ use bytes;
+@@ -300,30 +301,17 @@
+
+ my ($result, $comment);
+ my $timeout = $scanner->{conf}->{spf_timeout};
+- my $oldalarm = 0;
+
+- eval {
+- local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
+- local $SIG{__DIE__}; # bug 4631
+- $oldalarm = alarm($timeout);
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => $timeout });
++ my $err = $timer->run_and_catch(sub {
++
+ ($result, $comment) = $query->result();
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
+- };
+
+- my $err = $@;
+- if (defined $oldalarm) {
+- alarm $oldalarm; $oldalarm = undef;
+- }
++ });
+
+ if ($err) {
+ chomp $err;
+- if ($err eq "__alarm__ignore__") {
+- dbg("spf: lookup timed out after $timeout seconds");
+- } else {
+- warn("spf: lookup failed: $err\n");
+- }
++ warn("spf: lookup failed: $err\n");
+ return 0;
+ }
+
+
+Modified: spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm (original)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/SpamdForkScaling.pm Thu Mar 9 11:51:59 2006
+@@ -25,6 +25,7 @@
+
+ use Mail::SpamAssassin::Util;
+ use Mail::SpamAssassin::Logger;
++use Mail::SpamAssassin::Timeout;
+
+ use vars qw {
+ @PFSTATE_VARS %EXPORT_TAGS @EXPORT_OK
+@@ -109,6 +110,9 @@
+
+ delete $self->{kids}->{$pid};
+
++ # note this for the select()-caller's benefit
++ $self->{child_just_exited} = 1;
++
+ # remove the child from the backchannel list, too
+ $self->{backchannel}->delete_socket_for_child($pid);
+
+@@ -188,24 +192,63 @@
+ vec($rin, $self->{server_fileno}, 1) = 0;
+ }
+
+- my ($rout, $eout, $nfound, $timeleft);
++ my ($rout, $eout, $nfound, $timeleft, $selerr);
++
++ # use alarm to back up select()'s built-in alarm, to debug Theo's bug.
++ # not that I can remember what Theo's bug was, but hey ;) A good
++ # 60 seconds extra on the alarm() should make that quite rare...
++
++ my $timer = Mail::SpamAssassin::Timeout->new({ secs => ($tout*2) + 60 });
+
+- # use alarm to back up select()'s built-in alarm, to debug theo's bug
+- eval {
+- Mail::SpamAssassin::Util::trap_sigalrm_fully(sub { die "tcp timeout"; });
+- alarm ($tout*2) if ($tout);
++ $timer->run(sub {
++
++ $self->{child_just_exited} = 0;
+ ($nfound, $timeleft) = select($rout=$rin, undef, $eout=$rin, $tout);
+- };
+- alarm 0;
++ $selerr = $!;
+
+- if ($@) {
+- warn "prefork: select timeout failed! recovering\n";
+- sleep 1; # avoid overload
+- return;
+- }
++ });
++
++ # bug 4696: under load, the process can go for such a long time without
++ # being context-switched in, that when it does return the alarm() fires
++ # before the select() timeout does. Treat this as a select() timeout
++ if ($timer->timed_out) {
++ dbg("prefork: select timed out (via alarm)");
++ $nfound = 0;
++ $timeleft = 0;
++ }
++
++ # errors; handle undef *or* -1 returned. do this before "errors on
++ # the handle" below, since an error condition is signalled both via
++ # a -1 return and a $eout bit.
++ if (!defined $nfound || $nfound < 0)
++ {
++ if (exists &Errno::EINTR && $selerr == &Errno::EINTR)
++ {
++ # this happens if the process is signalled during the select(),
++ # for example if someone sends SIGHUP to reload the configuration.
++ # just return inmmediately
++ dbg("prefork: select returned err $selerr, probably signalled");
++ return;
++ }
++
++ # if a child exits during that select() call, it generates a spurious
++ # error, like this:
++ #
++ # Jan 29 12:53:17 dogma spamd[18518]: prefork: child states: BI
++ # Jan 29 12:53:17 dogma spamd[18518]: spamd: handled cleanup of child pid 13101 due to SIGCHLD
++ # Jan 29 12:53:17 dogma spamd[18518]: prefork: select returned -1! recovering:
++ #
++ # avoid by setting a boolean in the child_exited() callback and checking
++ # it here. log $! just in case, though.
++ if ($self->{child_just_exited} && $nfound == -1) {
++ dbg("prefork: select returned -1 due to child exiting, ignored ($selerr)");
++ return;
++ }
++
++ warn "prefork: select returned ".
++ (defined $nfound ? $nfound : "undef").
++ "! recovering: $selerr\n";
+
+- if (!defined $nfound) {
+- warn "prefork: select returned undef! recovering\n";
+ sleep 1; # avoid overload
+ return;
+ }
+@@ -213,7 +256,7 @@
+ # errors on the handle?
+ # return them immediately, they may be from a SIGHUP restart signal
+ if (vec ($eout, $self->{server_fileno}, 1)) {
+- warn "prefork: select returned error on server filehandle: $!\n";
++ warn "prefork: select returned error on server filehandle: $selerr $!\n";
+ return;
+ }
+
+@@ -282,7 +325,7 @@
+
+ my ($sock, $kid);
+ while (($kid, $sock) = each %{$self->{backchannel}->{kids}}) {
+- $self->syswrite_with_retry($sock, PF_PING_ORDER) and next;
++ $self->syswrite_with_retry($sock, PF_PING_ORDER, $kid, 3) and next;
+
+ warn "prefork: write of ping failed to $kid fd=".$sock->fileno.": ".$!;
+
+@@ -353,7 +396,7 @@
+ return $self->order_idle_child_to_accept();
+ }
+
+- if (!$self->syswrite_with_retry($sock, PF_ACCEPT_ORDER))
++ if (!$self->syswrite_with_retry($sock, PF_ACCEPT_ORDER, $kid))
+ {
+ # failure to write to the child; bad news. call it dead
+ warn "prefork: killing rogue child $kid, failed to write on fd ".$sock->fileno.": $!\n";
+@@ -396,7 +439,7 @@
+ my ($self, $kid) = @_;
+ if ($self->{waiting_for_idle_child}) {
+ my $sock = $self->{backchannel}->get_socket_for_child($kid);
+- $self->syswrite_with_retry($sock, PF_ACCEPT_ORDER)
++ $self->syswrite_with_retry($sock, PF_ACCEPT_ORDER, $kid)
+ or die "prefork: $kid claimed it was ready, but write failed on fd ".
+ $sock->fileno.": ".$!;
+ $self->{waiting_for_idle_child} = 0;
+@@ -426,7 +469,7 @@
+ sub report_backchannel_socket {
+ my ($self, $str) = @_;
+ my $sock = $self->{backchannel}->get_parent_socket();
+- $self->syswrite_with_retry($sock, $str)
++ $self->syswrite_with_retry($sock, $str, 'parent')
+ or write "syswrite() to parent failed: $!";
+ }
+
+@@ -537,12 +580,31 @@
+ }
+
+ sub syswrite_with_retry {
+- my ($self, $sock, $buf) = @_;
++ my ($self, $sock, $buf, $targetname, $numretries) = @_;
++ $numretries ||= 10; # default 10 retries
+
+ my $written = 0;
++ my $try = 0;
+
+ retry_write:
++
++ $try++;
++ if ($try > 1) {
++ warn "prefork: syswrite(".$sock->fileno.") to $targetname failed on try $try";
++ if ($try > $numretries) {
++ warn "prefork: giving up";
++ return undef;
++ }
++ else {
++ # give it 1 second to recover. we retry indefinitely.
++ my $rout = '';
++ vec($rout, $sock->fileno, 1) = 1;
++ select(undef, $rout, undef, 1);
++ }
++ }
++
+ my $nbytes = $sock->syswrite($buf);
++
+ if (!defined $nbytes) {
+ unless ((exists &Errno::EAGAIN && $! == &Errno::EAGAIN)
+ || (exists &Errno::EWOULDBLOCK && $! == &Errno::EWOULDBLOCK))
+@@ -551,13 +613,7 @@
+ return undef;
+ }
+
+- warn "prefork: syswrite(".$sock->fileno.") failed, retrying...";
+-
+- # give it 5 seconds to recover. we retry indefinitely.
+- my $rout = '';
+- vec($rout, $sock->fileno, 1) = 1;
+- select(undef, $rout, undef, 5);
+-
++ warn "prefork: retrying syswrite(): $!";
+ goto retry_write;
+ }
+ else {
+@@ -568,7 +624,8 @@
+ return $written; # it's complete, we can return
+ }
+ else {
+- warn "prefork: partial write of $nbytes, towrite=".length($buf).
++ warn "prefork: partial write of $nbytes to ".
++ $targetname.", towrite=".length($buf).
+ " sofar=".$written." fd=".$sock->fileno.", recovering";
+ goto retry_write;
+ }
+
+Added: spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm?rev=384590&view=auto
+==============================================================================
+--- spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm (added)
++++ spamassassin/branches/3.1/lib/Mail/SpamAssassin/Timeout.pm Thu Mar 9 11:51:59 2006
+@@ -0,0 +1,215 @@
++# <@LICENSE>
++# Copyright 2004 Apache Software Foundation
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++# </@LICENSE>
++
++=head1 NAME
++
++Mail::SpamAssassin::Timeout - safe, reliable timeouts in perl
++
++=head1 SYNOPSIS
++
++ # non-timeout code...
++
++ my $t = Mail::SpamAssassin::Timeout->new({ secs => 5 });
++
++ $t->run(sub {
++ # code to run with a 5-second timeout...
++ });
++
++ if ($t->timed_out()) {
++ # do something...
++ }
++
++ # more non-timeout code...
++
++=head1 DESCRIPTION
++
++This module provides a safe, reliable and clean API to provide
++C<alarm(2)>-based timeouts for perl code.
++
++Note that C<$SIG{ALRM}> is used to provide the timeout, so this will not
++interrupt out-of-control regular expression matches.
++
++Nested timeouts are supported.
++
++=head1 PUBLIC METHODS
++
++=over 4
++
++=cut
++
++package Mail::SpamAssassin::Timeout;
++
++use strict;
++use warnings;
++use bytes;
++
++use vars qw{
++ @ISA
++};
++
++@ISA = qw();
++
++###########################################################################
++
++=item my $t = Mail::SpamAssassin::Timeout->new({ ... options ... });
++
++Constructor. Options include:
++
++=over 4
++
++=item secs => $seconds
++
++timeout, in seconds. Optional; if not specified, no timeouts will be applied.
++
++=back
++
++=cut
++
++sub new {
++ my ($class, $opts) = @_;
++ $class = ref($class) || $class;
++ my %selfval = $opts ? %{$opts} : ();
++ my $self = \%selfval;
++
++ bless ($self, $class);
++ $self;
++}
++
++###########################################################################
++
++=item $t->run($coderef)
++
++Run a code reference within the currently-defined timeout.
++
++The timeout is as defined by the B<secs> parameter to the constructor.
++
++Returns whatever the subroutine returns, or C<undef> on timeout.
++If the timer times out, C<$t-<gt>timed_out()> will return C<1>.
++
++Time elapsed is not cumulative; multiple runs of C<run> will restart the
++timeout from scratch.
++
++=item $t->run_and_catch($coderef)
++
++Run a code reference, as per C<$t-<gt>run()>, but also catching any
++C<die()> calls within the code reference.
++
++Returns C<undef> if no C<die()> call was executed and C<$@> was unset, or the
++value of C<$@> if it was set. (The timeout event doesn't count as a C<die()>.)
++
++=cut
++
++sub run { $_[0]->_run($_[1], 0); }
++
++sub run_and_catch { $_[0]->_run($_[1], 1); }
++
++sub _run { # private
++ my ($self, $sub, $and_catch) = @_;
++
++ delete $self->{timed_out};
++
++ if (!$self->{secs}) { # no timeout! just call the sub and return.
++ return &$sub;
++ }
++
++ # assertion
++ if ($self->{secs} < 0) {
++ die "Mail::SpamAssassin::Timeout: oops? neg value for 'secs': $self->{secs}";
++ }
++
++ my $oldalarm = 0;
++ my $ret;
++
++ eval {
++ # note use of local to ensure closed scope here
++ local $SIG{ALRM} = sub { die "__alarm__ignore__\n" };
++ local $SIG{__DIE__}; # bug 4631
++
++ $oldalarm = alarm($self->{secs});
++
++ $ret = &$sub;
++
++ # Unset the alarm() before we leave eval{ } scope, as that stack-pop
++ # operation can take a second or two under load. Note: previous versions
++ # restored $oldalarm here; however, that is NOT what we want to do, since
++ # it creates a new race condition, namely that an old alarm could then fire
++ # while the stack-pop was underway, thereby appearing to be *this* timeout
++ # timing out. In terms of how we might possibly have nested timeouts in
++ # SpamAssassin, this is an academic issue with little impact, but it's
++ # still worth avoiding anyway.
++
++ alarm 0;
++ };
++
++ my $err = $@;
++
++ if (defined $oldalarm) {
++ # now, we could have died from a SIGALRM == timed out. if so,
++ # restore the previously-active one, or zero all timeouts if none
++ # were previously active.
++ alarm $oldalarm;
++ }
++
++ if ($err) {
++ if ($err =~ /__alarm__ignore__/) {
++ $self->{timed_out} = 1;
++ } else {
++ if ($and_catch) {
++ return $@;
++ } else {
++ die $@; # propagate any "real" errors
++ }
++ }
++ }
++
++ if ($and_catch) {
++ return; # undef
++ } else {
++ return $ret;
++ }
++}
++
++###########################################################################
++
++=item $t->timed_out()
++
++Returns C<1> if the most recent code executed in C<run()> timed out, or
++C<undef> if it did not.
++
++=cut
++
++sub timed_out {
++ my ($self) = @_;
++ return $self->{timed_out};
++}
++
++###########################################################################
++
++=item $t->reset()
++
++If called within a C<run()> code reference, causes the current alarm timer to
++be reset to its starting value.
++
++=cut
++
++sub reset {
++ my ($self) = @_;
++ alarm($self->{secs});
++}
++
++###########################################################################
++
++1;
+
+Modified: spamassassin/branches/3.1/spamd/spamd.raw
+URL: http://svn.apache.org/viewcvs/spamassassin/branches/3.1/spamd/spamd.raw?rev=384590&r1=384589&r2=384590&view=diff
+==============================================================================
+--- spamassassin/branches/3.1/spamd/spamd.raw (original)
++++ spamassassin/branches/3.1/spamd/spamd.raw Thu Mar 9 11:51:59 2006
+@@ -2049,6 +2049,9 @@
+ foreach (keys %children) {
+ kill 'INT' => $_;
+ my $pid = waitpid($_, 0);
++ if ($scaling) {
++ $scaling->child_exited($pid);
++ }
+ info("spamd: child $pid killed successfully");
+ }
+ %children = ();
+
+
+
+
+ \ No newline at end of file
diff --git a/buildbot/buildbot/test/mail/syncmail.1 b/buildbot/buildbot/test/mail/syncmail.1
new file mode 100644
index 0000000..eb35e25
--- /dev/null
+++ b/buildbot/buildbot/test/mail/syncmail.1
@@ -0,0 +1,152 @@
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23758 invoked by uid 1000); 28 Jul 2003 07:22:14 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 62715 invoked by uid 13574); 28 Jul 2003 07:22:03 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 07:22:03 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h2KY-0004Nr-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h2KY-0001rv-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h2KY-0003r4-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: buildbot/buildbot/changes freshcvsmail.py,1.2,1.3
+Message-Id: <E19h2KY-0003r4-00@sc8-pr-cvs1.sourceforge.net>
+Date: Mon, 28 Jul 2003 00:22:02 -0700
+Status:
+
+Update of /cvsroot/buildbot/buildbot/buildbot/changes
+In directory sc8-pr-cvs1:/tmp/cvs-serv14795/buildbot/changes
+
+Modified Files:
+ freshcvsmail.py
+Log Message:
+remove leftover code, leave a temporary compatibility import. Note! Start
+importing FCMaildirSource from changes.mail instead of changes.freshcvsmail
+
+
+Index: freshcvsmail.py
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/buildbot/changes/freshcvsmail.py,v
+retrieving revision 1.2
+retrieving revision 1.3
+diff -C2 -d -r1.2 -r1.3
+*** freshcvsmail.py 27 Jul 2003 18:54:08 -0000 1.2
+--- freshcvsmail.py 28 Jul 2003 07:22:00 -0000 1.3
+***************
+*** 1,96 ****
+ #! /usr/bin/python
+
+! from buildbot.interfaces import IChangeSource
+! from buildbot.changes.maildirtwisted import MaildirTwisted
+! from buildbot.changes.changes import Change
+! from rfc822 import Message
+! import os, os.path
+!
+! def parseFreshCVSMail(fd, prefix=None):
+! """Parse mail sent by FreshCVS"""
+! # this uses rfc822.Message so it can run under python2.1 . In the future
+! # it will be updated to use python2.2's "email" module.
+!
+! m = Message(fd)
+! # FreshCVS sets From: to "user CVS <user>", but the <> part may be
+! # modified by the MTA (to include a local domain)
+! name, addr = m.getaddr("from")
+! if not name:
+! return None # no From means this message isn't from FreshCVS
+! cvs = name.find(" CVS")
+! if cvs == -1:
+! return None # this message isn't from FreshCVS
+! who = name[:cvs]
+!
+! # we take the time of receipt as the time of checkin. Not correct,
+! # but it avoids the out-of-order-changes issue
+! #when = m.getdate() # and convert from 9-tuple, and handle timezone
+!
+! files = []
+! comments = ""
+! isdir = 0
+! lines = m.fp.readlines()
+! while lines:
+! line = lines.pop(0)
+! if line == "Modified files:\n":
+! break
+! while lines:
+! line = lines.pop(0)
+! if line == "\n":
+! break
+! line = line.rstrip("\n")
+! file, junk = line.split(None, 1)
+! if prefix:
+! # insist that the file start with the prefix: FreshCVS sends
+! # changes we don't care about too
+! bits = file.split(os.sep)
+! if bits[0] == prefix:
+! file = apply(os.path.join, bits[1:])
+! else:
+! break
+! if junk == "0 0":
+! isdir = 1
+! files.append(file)
+! while lines:
+! line = lines.pop(0)
+! if line == "Log message:\n":
+! break
+! # message is terminated by "ViewCVS links:" or "Index:..." (patch)
+! while lines:
+! line = lines.pop(0)
+! if line == "ViewCVS links:\n":
+! break
+! if line.find("Index: ") == 0:
+! break
+! comments += line
+! comments = comments.rstrip() + "\n"
+!
+! if not files:
+! return None
+!
+! change = Change(who, files, comments, isdir)
+!
+! return change
+!
+!
+!
+! class FCMaildirSource(MaildirTwisted):
+! """This source will watch a maildir that is subscribed to a FreshCVS
+! change-announcement mailing list.
+! """
+!
+! __implements__ = IChangeSource,
+
+! def __init__(self, maildir, prefix=None):
+! MaildirTwisted.__init__(self, maildir)
+! self.changemaster = None # filled in when added
+! self.prefix = prefix
+! def describe(self):
+! return "FreshCVS mailing list in maildir %s" % self.maildir.where
+! def messageReceived(self, filename):
+! path = os.path.join(self.basedir, "new", filename)
+! change = parseFreshCVSMail(open(path, "r"), self.prefix)
+! if change:
+! self.changemaster.addChange(change)
+! os.rename(os.path.join(self.basedir, "new", filename),
+! os.path.join(self.basedir, "cur", filename))
+--- 1,5 ----
+ #! /usr/bin/python
+
+! # leftover import for compatibility
+
+! from buildbot.changes.mail import FCMaildirSource
+
+
diff --git a/buildbot/buildbot/test/mail/syncmail.2 b/buildbot/buildbot/test/mail/syncmail.2
new file mode 100644
index 0000000..5296cbe
--- /dev/null
+++ b/buildbot/buildbot/test/mail/syncmail.2
@@ -0,0 +1,56 @@
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23221 invoked by uid 1000); 28 Jul 2003 06:53:15 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 58537 invoked by uid 13574); 28 Jul 2003 06:53:09 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:53:09 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h1sb-0003nw-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:09 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h1sa-00018t-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h1sa-0002mX-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: buildbot ChangeLog,1.93,1.94
+Message-Id: <E19h1sa-0002mX-00@sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:53:08 -0700
+Status:
+
+Update of /cvsroot/buildbot/buildbot
+In directory sc8-pr-cvs1:/tmp/cvs-serv10689
+
+Modified Files:
+ ChangeLog
+Log Message:
+ * NEWS: started adding new features
+
+
+Index: ChangeLog
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/ChangeLog,v
+retrieving revision 1.93
+retrieving revision 1.94
+diff -C2 -d -r1.93 -r1.94
+*** ChangeLog 27 Jul 2003 22:53:27 -0000 1.93
+--- ChangeLog 28 Jul 2003 06:53:06 -0000 1.94
+***************
+*** 1,4 ****
+--- 1,6 ----
+ 2003-07-27 Brian Warner <warner@lothar.com>
+
++ * NEWS: started adding new features
++
+ * buildbot/changes/mail.py: start work on Syncmail parser, move
+ mail sources into their own file
+
+
diff --git a/buildbot/buildbot/test/mail/syncmail.3 b/buildbot/buildbot/test/mail/syncmail.3
new file mode 100644
index 0000000..eee19b1
--- /dev/null
+++ b/buildbot/buildbot/test/mail/syncmail.3
@@ -0,0 +1,39 @@
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23196 invoked by uid 1000); 28 Jul 2003 06:51:53 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 58269 invoked by uid 13574); 28 Jul 2003 06:51:46 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:51:46 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h1rF-00027s-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:46 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h1rF-00017O-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h1rF-0002jg-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: CVSROOT syncmail,1.1,NONE
+Message-Id: <E19h1rF-0002jg-00@sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:51:45 -0700
+Status:
+
+Update of /cvsroot/buildbot/CVSROOT
+In directory sc8-pr-cvs1:/tmp/cvs-serv10515
+
+Removed Files:
+ syncmail
+Log Message:
+nevermind
+
+--- syncmail DELETED ---
+
+
diff --git a/buildbot/buildbot/test/mail/syncmail.4 b/buildbot/buildbot/test/mail/syncmail.4
new file mode 100644
index 0000000..44bda5d
--- /dev/null
+++ b/buildbot/buildbot/test/mail/syncmail.4
@@ -0,0 +1,290 @@
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 24111 invoked by uid 1000); 28 Jul 2003 08:01:54 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 68756 invoked by uid 13574); 28 Jul 2003 08:01:46 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 08:01:46 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h2wz-00029d-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h2wz-0002XB-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h2wz-0005a9-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 01:01:45 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: buildbot/test/mail syncmail.1,NONE,1.1 syncmail.2,NONE,1.1 syncmail.3,NONE,1.1
+Message-Id: <E19h2wz-0005a9-00@sc8-pr-cvs1.sourceforge.net>
+Date: Mon, 28 Jul 2003 01:01:45 -0700
+Status:
+
+Update of /cvsroot/buildbot/buildbot/test/mail
+In directory sc8-pr-cvs1:/tmp/cvs-serv21445
+
+Added Files:
+ syncmail.1 syncmail.2 syncmail.3
+Log Message:
+test cases for syncmail parser
+
+--- NEW FILE: syncmail.1 ---
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23758 invoked by uid 1000); 28 Jul 2003 07:22:14 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 62715 invoked by uid 13574); 28 Jul 2003 07:22:03 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 07:22:03 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h2KY-0004Nr-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h2KY-0001rv-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h2KY-0003r4-00
+ for <warner@users.sourceforge.net>; Mon, 28 Jul 2003 00:22:02 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: buildbot/buildbot/changes freshcvsmail.py,1.2,1.3
+Message-Id: <E19h2KY-0003r4-00@sc8-pr-cvs1.sourceforge.net>
+Date: Mon, 28 Jul 2003 00:22:02 -0700
+Status:
+
+Update of /cvsroot/buildbot/buildbot/buildbot/changes
+In directory sc8-pr-cvs1:/tmp/cvs-serv14795/buildbot/changes
+
+Modified Files:
+ freshcvsmail.py
+Log Message:
+remove leftover code, leave a temporary compatibility import. Note! Start
+importing FCMaildirSource from changes.mail instead of changes.freshcvsmail
+
+
+Index: freshcvsmail.py
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/buildbot/changes/freshcvsmail.py,v
+retrieving revision 1.2
+retrieving revision 1.3
+diff -C2 -d -r1.2 -r1.3
+*** freshcvsmail.py 27 Jul 2003 18:54:08 -0000 1.2
+--- freshcvsmail.py 28 Jul 2003 07:22:00 -0000 1.3
+***************
+*** 1,96 ****
+ #! /usr/bin/python
+
+! from buildbot.interfaces import IChangeSource
+! from buildbot.changes.maildirtwisted import MaildirTwisted
+! from buildbot.changes.changes import Change
+! from rfc822 import Message
+! import os, os.path
+!
+! def parseFreshCVSMail(fd, prefix=None):
+! """Parse mail sent by FreshCVS"""
+! # this uses rfc822.Message so it can run under python2.1 . In the future
+! # it will be updated to use python2.2's "email" module.
+!
+! m = Message(fd)
+! # FreshCVS sets From: to "user CVS <user>", but the <> part may be
+! # modified by the MTA (to include a local domain)
+! name, addr = m.getaddr("from")
+! if not name:
+! return None # no From means this message isn't from FreshCVS
+! cvs = name.find(" CVS")
+! if cvs == -1:
+! return None # this message isn't from FreshCVS
+! who = name[:cvs]
+!
+! # we take the time of receipt as the time of checkin. Not correct,
+! # but it avoids the out-of-order-changes issue
+! #when = m.getdate() # and convert from 9-tuple, and handle timezone
+!
+! files = []
+! comments = ""
+! isdir = 0
+! lines = m.fp.readlines()
+! while lines:
+! line = lines.pop(0)
+! if line == "Modified files:\n":
+! break
+! while lines:
+! line = lines.pop(0)
+! if line == "\n":
+! break
+! line = line.rstrip("\n")
+! file, junk = line.split(None, 1)
+! if prefix:
+! # insist that the file start with the prefix: FreshCVS sends
+! # changes we don't care about too
+! bits = file.split(os.sep)
+! if bits[0] == prefix:
+! file = apply(os.path.join, bits[1:])
+! else:
+! break
+! if junk == "0 0":
+! isdir = 1
+! files.append(file)
+! while lines:
+! line = lines.pop(0)
+! if line == "Log message:\n":
+! break
+! # message is terminated by "ViewCVS links:" or "Index:..." (patch)
+! while lines:
+! line = lines.pop(0)
+! if line == "ViewCVS links:\n":
+! break
+! if line.find("Index: ") == 0:
+! break
+! comments += line
+! comments = comments.rstrip() + "\n"
+!
+! if not files:
+! return None
+!
+! change = Change(who, files, comments, isdir)
+!
+! return change
+!
+!
+!
+! class FCMaildirSource(MaildirTwisted):
+! """This source will watch a maildir that is subscribed to a FreshCVS
+! change-announcement mailing list.
+! """
+!
+! __implements__ = IChangeSource,
+
+! def __init__(self, maildir, prefix=None):
+! MaildirTwisted.__init__(self, maildir)
+! self.changemaster = None # filled in when added
+! self.prefix = prefix
+! def describe(self):
+! return "FreshCVS mailing list in maildir %s" % self.maildir.where
+! def messageReceived(self, filename):
+! path = os.path.join(self.basedir, "new", filename)
+! change = parseFreshCVSMail(open(path, "r"), self.prefix)
+! if change:
+! self.changemaster.addChange(change)
+! os.rename(os.path.join(self.basedir, "new", filename),
+! os.path.join(self.basedir, "cur", filename))
+--- 1,5 ----
+ #! /usr/bin/python
+
+! # leftover import for compatibility
+
+! from buildbot.changes.mail import FCMaildirSource
+
+
+
+--- NEW FILE: syncmail.2 ---
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23221 invoked by uid 1000); 28 Jul 2003 06:53:15 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 58537 invoked by uid 13574); 28 Jul 2003 06:53:09 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:53:09 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h1sb-0003nw-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:09 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h1sa-00018t-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h1sa-0002mX-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:53:08 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: buildbot ChangeLog,1.93,1.94
+Message-Id: <E19h1sa-0002mX-00@sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:53:08 -0700
+Status:
+
+Update of /cvsroot/buildbot/buildbot
+In directory sc8-pr-cvs1:/tmp/cvs-serv10689
+
+Modified Files:
+ ChangeLog
+Log Message:
+ * NEWS: started adding new features
+
+
+Index: ChangeLog
+===================================================================
+RCS file: /cvsroot/buildbot/buildbot/ChangeLog,v
+retrieving revision 1.93
+retrieving revision 1.94
+diff -C2 -d -r1.93 -r1.94
+*** ChangeLog 27 Jul 2003 22:53:27 -0000 1.93
+--- ChangeLog 28 Jul 2003 06:53:06 -0000 1.94
+***************
+*** 1,4 ****
+--- 1,6 ----
+ 2003-07-27 Brian Warner <warner@lothar.com>
+
++ * NEWS: started adding new features
++
+ * buildbot/changes/mail.py: start work on Syncmail parser, move
+ mail sources into their own file
+
+
+
+--- NEW FILE: syncmail.3 ---
+Return-Path: <warner@users.sourceforge.net>
+Delivered-To: warner-sourceforge@luther.lothar.com
+Received: (qmail 23196 invoked by uid 1000); 28 Jul 2003 06:51:53 -0000
+Delivered-To: warner-sourceforge@lothar.com
+Received: (qmail 58269 invoked by uid 13574); 28 Jul 2003 06:51:46 -0000
+Received: from unknown (HELO sc8-sf-list1.sourceforge.net) ([66.35.250.206]) (envelope-sender <warner@users.sourceforge.net>)
+ by 130.94.181.6 (qmail-ldap-1.03) with SMTP
+ for <warner-sourceforge@lothar.com>; 28 Jul 2003 06:51:46 -0000
+Received: from sc8-sf-sshgate.sourceforge.net ([66.35.250.220] helo=sc8-sf-netmisc.sourceforge.net)
+ by sc8-sf-list1.sourceforge.net with esmtp
+ (Cipher TLSv1:DES-CBC3-SHA:168) (Exim 3.31-VA-mm2 #1 (Debian))
+ id 19h1rF-00027s-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:46 -0700
+Received: from sc8-pr-cvs1-b.sourceforge.net ([10.5.1.7] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-sf-netmisc.sourceforge.net with esmtp (Exim 3.36 #1 (Debian))
+ id 19h1rF-00017O-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+Received: from localhost ([127.0.0.1] helo=sc8-pr-cvs1.sourceforge.net)
+ by sc8-pr-cvs1.sourceforge.net with esmtp (Exim 3.22 #1 (Debian))
+ id 19h1rF-0002jg-00
+ for <warner@users.sourceforge.net>; Sun, 27 Jul 2003 23:51:45 -0700
+From: warner@users.sourceforge.net
+To: warner@users.sourceforge.net
+Subject: CVSROOT syncmail,1.1,NONE
+Message-Id: <E19h1rF-0002jg-00@sc8-pr-cvs1.sourceforge.net>
+Date: Sun, 27 Jul 2003 23:51:45 -0700
+Status:
+
+Update of /cvsroot/buildbot/CVSROOT
+In directory sc8-pr-cvs1:/tmp/cvs-serv10515
+
+Removed Files:
+ syncmail
+Log Message:
+nevermind
+
+--- syncmail DELETED ---
+
+
+
+
diff --git a/buildbot/buildbot/test/mail/syncmail.5 b/buildbot/buildbot/test/mail/syncmail.5
new file mode 100644
index 0000000..82ba451
--- /dev/null
+++ b/buildbot/buildbot/test/mail/syncmail.5
@@ -0,0 +1,70 @@
+From thomas@otto.amantes Mon Feb 21 17:46:45 2005
+Return-Path: <thomas@otto.amantes>
+Received: from otto.amantes (otto.amantes [127.0.0.1]) by otto.amantes
+ (8.13.1/8.13.1) with ESMTP id j1LGkjr3011986 for <thomas@localhost>; Mon,
+ 21 Feb 2005 17:46:45 +0100
+Message-Id: <200502211646.j1LGkjr3011986@otto.amantes>
+From: Thomas Vander Stichele <thomas@otto.amantes>
+To: thomas@otto.amantes
+Subject: test1 s
+Date: Mon, 21 Feb 2005 16:46:45 +0000
+X-Mailer: Python syncmail $Revision: 1.1 $
+ <http://sf.net/projects/cvs-syncmail>
+Content-Transfer-Encoding: 8bit
+Mime-Version: 1.0
+
+Update of /home/cvs/test/test1
+In directory otto.amantes:/home/thomas/dev/tests/cvs/test1
+
+Added Files:
+ Tag: BRANCH-DEVEL
+ MANIFEST Makefile.am autogen.sh configure.in
+Log Message:
+stuff on the branch
+
+--- NEW FILE: Makefile.am ---
+SUBDIRS = src
+
+# normally I wouldn't distribute autogen.sh and friends with a tarball
+# but this one is specifically distributed for demonstration purposes
+
+EXTRA_DIST = autogen.sh
+
+# target for making the "import this into svn" tarball
+test:
+ mkdir test
+ for a in `cat MANIFEST`; do \
+ cp -pr $$a test/$$a; done
+ tar czf test.tar.gz test
+ rm -rf test
+
+--- NEW FILE: MANIFEST ---
+MANIFEST
+autogen.sh
+configure.in
+Makefile.am
+src
+src/Makefile.am
+src/test.c
+
+--- NEW FILE: autogen.sh ---
+#!/bin/sh
+
+set -x
+
+aclocal && \
+autoheader && \
+autoconf && \
+automake -a --foreign && \
+./configure $@
+
+--- NEW FILE: configure.in ---
+dnl configure.ac for version macro
+AC_INIT
+
+AM_CONFIG_HEADER(config.h)
+
+AM_INIT_AUTOMAKE(test, 0.0.0)
+AC_PROG_CC
+
+AC_OUTPUT(Makefile src/Makefile)
diff --git a/buildbot/buildbot/test/runutils.py b/buildbot/buildbot/test/runutils.py
new file mode 100644
index 0000000..2be85d6
--- /dev/null
+++ b/buildbot/buildbot/test/runutils.py
@@ -0,0 +1,516 @@
+
+import signal
+import shutil, os, errno
+from cStringIO import StringIO
+from twisted.internet import defer, reactor, protocol
+from twisted.python import log, util
+
+from buildbot import master, interfaces
+from buildbot.slave import bot
+from buildbot.buildslave import BuildSlave
+from buildbot.process.builder import Builder
+from buildbot.process.base import BuildRequest, Build
+from buildbot.process.buildstep import BuildStep
+from buildbot.sourcestamp import SourceStamp
+from buildbot.status import builder
+from buildbot.process.properties import Properties
+
+
+
+class _PutEverythingGetter(protocol.ProcessProtocol):
+ def __init__(self, deferred, stdin):
+ self.deferred = deferred
+ self.outBuf = StringIO()
+ self.errBuf = StringIO()
+ self.outReceived = self.outBuf.write
+ self.errReceived = self.errBuf.write
+ self.stdin = stdin
+
+ def connectionMade(self):
+ if self.stdin is not None:
+ self.transport.write(self.stdin)
+ self.transport.closeStdin()
+
+ def processEnded(self, reason):
+ out = self.outBuf.getvalue()
+ err = self.errBuf.getvalue()
+ e = reason.value
+ code = e.exitCode
+ if e.signal:
+ self.deferred.errback((out, err, e.signal))
+ else:
+ self.deferred.callback((out, err, code))
+
+def myGetProcessOutputAndValue(executable, args=(), env={}, path='.',
+ _reactor_ignored=None, stdin=None):
+ """Like twisted.internet.utils.getProcessOutputAndValue but takes
+ stdin, too."""
+ d = defer.Deferred()
+ p = _PutEverythingGetter(d, stdin)
+ reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path)
+ return d
+
+
+class MyBot(bot.Bot):
+ def remote_getSlaveInfo(self):
+ return self.parent.info
+
+class MyBuildSlave(bot.BuildSlave):
+ botClass = MyBot
+
+def rmtree(d):
+ try:
+ shutil.rmtree(d, ignore_errors=1)
+ except OSError, e:
+ # stupid 2.2 appears to ignore ignore_errors
+ if e.errno != errno.ENOENT:
+ raise
+
+class RunMixin:
+ master = None
+
+ def rmtree(self, d):
+ rmtree(d)
+
+ def setUp(self):
+ self.slaves = {}
+ self.rmtree("basedir")
+ os.mkdir("basedir")
+ self.master = master.BuildMaster("basedir")
+ self.status = self.master.getStatus()
+ self.control = interfaces.IControl(self.master)
+
+ def connectOneSlave(self, slavename, opts={}):
+ port = self.master.slavePort._port.getHost().port
+ self.rmtree("slavebase-%s" % slavename)
+ os.mkdir("slavebase-%s" % slavename)
+ slave = MyBuildSlave("localhost", port, slavename, "sekrit",
+ "slavebase-%s" % slavename,
+ keepalive=0, usePTY=False, debugOpts=opts)
+ slave.info = {"admin": "one"}
+ self.slaves[slavename] = slave
+ slave.startService()
+
+ def connectSlave(self, builders=["dummy"], slavename="bot1",
+ opts={}):
+ # connect buildslave 'slavename' and wait for it to connect to all of
+ # the given builders
+ dl = []
+ # initiate call for all of them, before waiting on result,
+ # otherwise we might miss some
+ for b in builders:
+ dl.append(self.master.botmaster.waitUntilBuilderAttached(b))
+ d = defer.DeferredList(dl)
+ self.connectOneSlave(slavename, opts)
+ return d
+
+ def connectSlaves(self, slavenames, builders):
+ dl = []
+ # initiate call for all of them, before waiting on result,
+ # otherwise we might miss some
+ for b in builders:
+ dl.append(self.master.botmaster.waitUntilBuilderAttached(b))
+ d = defer.DeferredList(dl)
+ for name in slavenames:
+ self.connectOneSlave(name)
+ return d
+
+ def connectSlave2(self):
+ # this takes over for bot1, so it has to share the slavename
+ port = self.master.slavePort._port.getHost().port
+ self.rmtree("slavebase-bot2")
+ os.mkdir("slavebase-bot2")
+ # this uses bot1, really
+ slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
+ "slavebase-bot2", keepalive=0, usePTY=False)
+ slave.info = {"admin": "two"}
+ self.slaves['bot2'] = slave
+ slave.startService()
+
+ def connectSlaveFastTimeout(self):
+ # this slave has a very fast keepalive timeout
+ port = self.master.slavePort._port.getHost().port
+ self.rmtree("slavebase-bot1")
+ os.mkdir("slavebase-bot1")
+ slave = MyBuildSlave("localhost", port, "bot1", "sekrit",
+ "slavebase-bot1", keepalive=2, usePTY=False,
+ keepaliveTimeout=1)
+ slave.info = {"admin": "one"}
+ self.slaves['bot1'] = slave
+ slave.startService()
+ d = self.master.botmaster.waitUntilBuilderAttached("dummy")
+ return d
+
+ # things to start builds
+ def requestBuild(self, builder):
+ # returns a Deferred that fires with an IBuildStatus object when the
+ # build is finished
+ req = BuildRequest("forced build", SourceStamp(), 'test_builder')
+ self.control.getBuilder(builder).requestBuild(req)
+ return req.waitUntilFinished()
+
+ def failUnlessBuildSucceeded(self, bs):
+ if bs.getResults() != builder.SUCCESS:
+ log.msg("failUnlessBuildSucceeded noticed that the build failed")
+ self.logBuildResults(bs)
+ self.failUnlessEqual(bs.getResults(), builder.SUCCESS)
+ return bs # useful for chaining
+
+ def logBuildResults(self, bs):
+ # emit the build status and the contents of all logs to test.log
+ log.msg("logBuildResults starting")
+ log.msg(" bs.getResults() == %s" % builder.Results[bs.getResults()])
+ log.msg(" bs.isFinished() == %s" % bs.isFinished())
+ for s in bs.getSteps():
+ for l in s.getLogs():
+ log.msg("--- START step %s / log %s ---" % (s.getName(),
+ l.getName()))
+ if not l.getName().endswith(".html"):
+ log.msg(l.getTextWithHeaders())
+ log.msg("--- STOP ---")
+ log.msg("logBuildResults finished")
+
+ def tearDown(self):
+ log.msg("doing tearDown")
+ d = self.shutdownAllSlaves()
+ d.addCallback(self._tearDown_1)
+ d.addCallback(self._tearDown_2)
+ return d
+ def _tearDown_1(self, res):
+ if self.master:
+ return defer.maybeDeferred(self.master.stopService)
+ def _tearDown_2(self, res):
+ self.master = None
+ log.msg("tearDown done")
+
+
+ # various forms of slave death
+
+ def shutdownAllSlaves(self):
+ # the slave has disconnected normally: they SIGINT'ed it, or it shut
+ # down willingly. This will kill child processes and give them a
+ # chance to finish up. We return a Deferred that will fire when
+ # everything is finished shutting down.
+
+ log.msg("doing shutdownAllSlaves")
+ dl = []
+ for slave in self.slaves.values():
+ dl.append(slave.waitUntilDisconnected())
+ dl.append(defer.maybeDeferred(slave.stopService))
+ d = defer.DeferredList(dl)
+ d.addCallback(self._shutdownAllSlavesDone)
+ return d
+ def _shutdownAllSlavesDone(self, res):
+ for name in self.slaves.keys():
+ del self.slaves[name]
+ return self.master.botmaster.waitUntilBuilderFullyDetached("dummy")
+
+ def shutdownSlave(self, slavename, buildername):
+ # this slave has disconnected normally: they SIGINT'ed it, or it shut
+ # down willingly. This will kill child processes and give them a
+ # chance to finish up. We return a Deferred that will fire when
+ # everything is finished shutting down, and the given Builder knows
+ # that the slave has gone away.
+
+ s = self.slaves[slavename]
+ dl = [self.master.botmaster.waitUntilBuilderDetached(buildername),
+ s.waitUntilDisconnected()]
+ d = defer.DeferredList(dl)
+ d.addCallback(self._shutdownSlave_done, slavename)
+ s.stopService()
+ return d
+ def _shutdownSlave_done(self, res, slavename):
+ del self.slaves[slavename]
+
+ def killSlave(self):
+ # the slave has died, its host sent a FIN. The .notifyOnDisconnect
+ # callbacks will terminate the current step, so the build should be
+ # flunked (no further steps should be started).
+ self.slaves['bot1'].bf.continueTrying = 0
+ bot = self.slaves['bot1'].getServiceNamed("bot")
+ broker = bot.builders["dummy"].remote.broker
+ broker.transport.loseConnection()
+ del self.slaves['bot1']
+
+ def disappearSlave(self, slavename="bot1", buildername="dummy",
+ allowReconnect=False):
+ # the slave's host has vanished off the net, leaving the connection
+ # dangling. This will be detected quickly by app-level keepalives or
+ # a ping, or slowly by TCP timeouts.
+
+ # simulate this by replacing the slave Broker's .dataReceived method
+ # with one that just throws away all data.
+ def discard(data):
+ pass
+ bot = self.slaves[slavename].getServiceNamed("bot")
+ broker = bot.builders[buildername].remote.broker
+ broker.dataReceived = discard # seal its ears
+ broker.transport.write = discard # and take away its voice
+ if not allowReconnect:
+ # also discourage it from reconnecting once the connection goes away
+ assert self.slaves[slavename].bf.continueTrying
+ self.slaves[slavename].bf.continueTrying = False
+
+ def ghostSlave(self):
+ # the slave thinks it has lost the connection, and initiated a
+ # reconnect. The master doesn't yet realize it has lost the previous
+ # connection, and sees two connections at once.
+ raise NotImplementedError
+
+
+def setupBuildStepStatus(basedir):
+ """Return a BuildStep with a suitable BuildStepStatus object, ready to
+ use."""
+ os.mkdir(basedir)
+ botmaster = None
+ s0 = builder.Status(botmaster, basedir)
+ s1 = s0.builderAdded("buildername", "buildername")
+ s2 = builder.BuildStatus(s1, 1)
+ s3 = builder.BuildStepStatus(s2)
+ s3.setName("foostep")
+ s3.started = True
+ s3.stepStarted()
+ return s3
+
+def fake_slaveVersion(command, oldversion=None):
+ from buildbot.slave.registry import commandRegistry
+ return commandRegistry[command]
+
+class FakeBuildMaster:
+ properties = Properties(masterprop="master")
+
+class FakeBotMaster:
+ parent = FakeBuildMaster()
+
+def makeBuildStep(basedir, step_class=BuildStep, **kwargs):
+ bss = setupBuildStepStatus(basedir)
+
+ ss = SourceStamp()
+ setup = {'name': "builder1", "slavename": "bot1",
+ 'builddir': "builddir", 'factory': None}
+ b0 = Builder(setup, bss.getBuild().getBuilder())
+ b0.botmaster = FakeBotMaster()
+ br = BuildRequest("reason", ss, 'test_builder')
+ b = Build([br])
+ b.setBuilder(b0)
+ s = step_class(**kwargs)
+ s.setBuild(b)
+ s.setStepStatus(bss)
+ b.build_status = bss.getBuild()
+ b.setupProperties()
+ s.slaveVersion = fake_slaveVersion
+ return s
+
+
+def findDir():
+ # the same directory that holds this script
+ return util.sibpath(__file__, ".")
+
+class SignalMixin:
+ sigchldHandler = None
+
+ def setUpClass(self):
+ # make sure SIGCHLD handler is installed, as it should be on
+ # reactor.run(). problem is reactor may not have been run when this
+ # test runs.
+ if hasattr(reactor, "_handleSigchld") and hasattr(signal, "SIGCHLD"):
+ self.sigchldHandler = signal.signal(signal.SIGCHLD,
+ reactor._handleSigchld)
+
+ def tearDownClass(self):
+ if self.sigchldHandler:
+ signal.signal(signal.SIGCHLD, self.sigchldHandler)
+
+# these classes are used to test SlaveCommands in isolation
+
+class FakeSlaveBuilder:
+ debug = False
+ def __init__(self, usePTY, basedir):
+ self.updates = []
+ self.basedir = basedir
+ self.usePTY = usePTY
+
+ def sendUpdate(self, data):
+ if self.debug:
+ print "FakeSlaveBuilder.sendUpdate", data
+ self.updates.append(data)
+
+
+class SlaveCommandTestBase(SignalMixin):
+ usePTY = False
+
+ def setUpBuilder(self, basedir):
+ if not os.path.exists(basedir):
+ os.mkdir(basedir)
+ self.builder = FakeSlaveBuilder(self.usePTY, basedir)
+
+ def startCommand(self, cmdclass, args):
+ stepId = 0
+ self.cmd = c = cmdclass(self.builder, stepId, args)
+ c.running = True
+ d = c.doStart()
+ return d
+
+ def collectUpdates(self, res=None):
+ logs = {}
+ for u in self.builder.updates:
+ for k in u.keys():
+ if k == "log":
+ logname,data = u[k]
+ oldlog = logs.get(("log",logname), "")
+ logs[("log",logname)] = oldlog + data
+ elif k == "rc":
+ pass
+ else:
+ logs[k] = logs.get(k, "") + u[k]
+ return logs
+
+ def findRC(self):
+ for u in self.builder.updates:
+ if "rc" in u:
+ return u["rc"]
+ return None
+
+ def printStderr(self):
+ for u in self.builder.updates:
+ if "stderr" in u:
+ print u["stderr"]
+
+# ----------------------------------------
+
+class LocalWrapper:
+ # r = pb.Referenceable()
+ # w = LocalWrapper(r)
+ # now you can do things like w.callRemote()
+ def __init__(self, target):
+ self.target = target
+
+ def callRemote(self, name, *args, **kwargs):
+ # callRemote is not allowed to fire its Deferred in the same turn
+ d = defer.Deferred()
+ d.addCallback(self._callRemote, *args, **kwargs)
+ reactor.callLater(0, d.callback, name)
+ return d
+
+ def _callRemote(self, name, *args, **kwargs):
+ method = getattr(self.target, "remote_"+name)
+ return method(*args, **kwargs)
+
+ def notifyOnDisconnect(self, observer):
+ pass
+ def dontNotifyOnDisconnect(self, observer):
+ pass
+
+
+class LocalSlaveBuilder(bot.SlaveBuilder):
+ """I am object that behaves like a pb.RemoteReference, but in fact I
+ invoke methods locally."""
+ _arg_filter = None
+
+ def setArgFilter(self, filter):
+ self._arg_filter = filter
+
+ def remote_startCommand(self, stepref, stepId, command, args):
+ if self._arg_filter:
+ args = self._arg_filter(args)
+ # stepref should be a RemoteReference to the RemoteCommand
+ return bot.SlaveBuilder.remote_startCommand(self,
+ LocalWrapper(stepref),
+ stepId, command, args)
+
+class StepTester:
+ """Utility class to exercise BuildSteps and RemoteCommands, without
+ really using a Build or a Bot. No networks are used.
+
+ Use this as follows::
+
+ class MyTest(StepTester, unittest.TestCase):
+ def testOne(self):
+ self.slavebase = 'testOne.slave'
+ self.masterbase = 'testOne.master'
+ sb = self.makeSlaveBuilder()
+ step = self.makeStep(stepclass, **kwargs)
+ d = self.runStep(step)
+ d.addCallback(_checkResults)
+ return d
+ """
+
+ #slavebase = "slavebase"
+ slavebuilderbase = "slavebuilderbase"
+ #masterbase = "masterbase"
+
+ def makeSlaveBuilder(self):
+ os.mkdir(self.slavebase)
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase))
+ b = bot.Bot(self.slavebase, False)
+ b.startService()
+ sb = LocalSlaveBuilder("slavebuildername", False)
+ sb.setArgFilter(self.filterArgs)
+ sb.usePTY = False
+ sb.setServiceParent(b)
+ sb.setBuilddir(self.slavebuilderbase)
+ self.remote = LocalWrapper(sb)
+ return sb
+
+ workdir = "build"
+ def makeStep(self, factory, **kwargs):
+ step = makeBuildStep(self.masterbase, factory, **kwargs)
+ step.setBuildSlave(BuildSlave("name", "password"))
+ step.setDefaultWorkdir(self.workdir)
+ return step
+
+ def runStep(self, step):
+ d = defer.maybeDeferred(step.startStep, self.remote)
+ return d
+
+ def wrap(self, target):
+ return LocalWrapper(target)
+
+ def filterArgs(self, args):
+ # this can be overridden
+ return args
+
+# ----------------------------------------
+
+_flags = {}
+
+def setTestFlag(flagname, value):
+ _flags[flagname] = value
+
+class SetTestFlagStep(BuildStep):
+ """
+ A special BuildStep to set a named flag; this can be used with the
+ TestFlagMixin to monitor what has and has not run in a particular
+ configuration.
+ """
+ def __init__(self, flagname='flag', value=1, **kwargs):
+ BuildStep.__init__(self, **kwargs)
+ self.addFactoryArguments(flagname=flagname, value=value)
+
+ self.flagname = flagname
+ self.value = value
+
+ def start(self):
+ properties = self.build.getProperties()
+ _flags[self.flagname] = properties.render(self.value)
+ self.finished(builder.SUCCESS)
+
+class TestFlagMixin:
+ def clearFlags(self):
+ """
+ Set up for a test by clearing all flags; call this from your test
+ function.
+ """
+ _flags.clear()
+
+ def failIfFlagSet(self, flagname, msg=None):
+ if not msg: msg = "flag '%s' is set" % flagname
+ self.failIf(_flags.has_key(flagname), msg=msg)
+
+ def failIfFlagNotSet(self, flagname, msg=None):
+ if not msg: msg = "flag '%s' is not set" % flagname
+ self.failUnless(_flags.has_key(flagname), msg=msg)
+
+ def getFlag(self, flagname):
+ self.failIfFlagNotSet(flagname, "flag '%s' not set" % flagname)
+ return _flags.get(flagname)
diff --git a/buildbot/buildbot/test/sleep.py b/buildbot/buildbot/test/sleep.py
new file mode 100644
index 0000000..4662852
--- /dev/null
+++ b/buildbot/buildbot/test/sleep.py
@@ -0,0 +1,8 @@
+
+import sys, time
+delay = int(sys.argv[1])
+
+sys.stdout.write("sleeping for %d seconds\n" % delay)
+time.sleep(delay)
+sys.stdout.write("woke up\n")
+sys.exit(0)
diff --git a/buildbot/buildbot/test/subdir/emit.py b/buildbot/buildbot/test/subdir/emit.py
new file mode 100644
index 0000000..42d2ca9
--- /dev/null
+++ b/buildbot/buildbot/test/subdir/emit.py
@@ -0,0 +1,11 @@
+#! /usr/bin/python
+
+import os, sys
+
+sys.stdout.write("this is stdout in subdir\n")
+sys.stderr.write("this is stderr\n")
+if os.environ.has_key("EMIT_TEST"):
+ sys.stdout.write("EMIT_TEST: %s\n" % os.environ["EMIT_TEST"])
+open("log1.out","wt").write("this is log1\n")
+rc = int(sys.argv[1])
+sys.exit(rc)
diff --git a/buildbot/buildbot/test/test__versions.py b/buildbot/buildbot/test/test__versions.py
new file mode 100644
index 0000000..a69fcc4
--- /dev/null
+++ b/buildbot/buildbot/test/test__versions.py
@@ -0,0 +1,16 @@
+
+# This is a fake test which just logs the version of Twisted, to make it
+# easier to track down failures in other tests.
+
+from twisted.trial import unittest
+from twisted.python import log
+from twisted import copyright
+import sys
+import buildbot
+
+class Versions(unittest.TestCase):
+ def test_versions(self):
+ log.msg("Python Version: %s" % sys.version)
+ log.msg("Twisted Version: %s" % copyright.version)
+ log.msg("Buildbot Version: %s" % buildbot.version)
+
diff --git a/buildbot/buildbot/test/test_bonsaipoller.py b/buildbot/buildbot/test/test_bonsaipoller.py
new file mode 100644
index 0000000..f4ca233
--- /dev/null
+++ b/buildbot/buildbot/test/test_bonsaipoller.py
@@ -0,0 +1,244 @@
+# -*- test-case-name: buildbot.test.test_bonsaipoller -*-
+
+from twisted.trial import unittest
+from buildbot.changes.bonsaipoller import FileNode, CiNode, BonsaiResult, \
+ BonsaiParser, BonsaiPoller, InvalidResultError, EmptyResult
+from buildbot.changes.changes import ChangeMaster
+
+from copy import deepcopy
+import re
+
+log1 = "Add Bug 338541a"
+who1 = "sar@gmail.com"
+date1 = 1161908700
+log2 = "bug 357427 add static ctor/dtor methods"
+who2 = "aarrg@ooacm.org"
+date2 = 1161910620
+log3 = "Testing log #3 lbah blah"
+who3 = "huoents@hueont.net"
+date3 = 1889822728
+rev1 = "1.8"
+file1 = "mozilla/testing/mochitest/tests/index.html"
+rev2 = "1.1"
+file2 = "mozilla/testing/mochitest/tests/test_bug338541.xhtml"
+rev3 = "1.1812"
+file3 = "mozilla/xpcom/threads/nsAutoLock.cpp"
+rev4 = "1.3"
+file4 = "mozilla/xpcom/threads/nsAutoLock.h"
+rev5 = "2.4"
+file5 = "mozilla/xpcom/threads/test.cpp"
+
+nodes = []
+files = []
+files.append(FileNode(rev1,file1))
+nodes.append(CiNode(log1, who1, date1, files))
+
+files = []
+files.append(FileNode(rev2, file2))
+files.append(FileNode(rev3, file3))
+nodes.append(CiNode(log2, who2, date2, files))
+
+nodes.append(CiNode(log3, who3, date3, []))
+
+goodParsedResult = BonsaiResult(nodes)
+
+goodUnparsedResult = """\
+<?xml version="1.0"?>
+<queryResults>
+<ci who="%s" date="%d">
+ <log>%s</log>
+ <files>
+ <f rev="%s">%s</f>
+ </files>
+</ci>
+<ci who="%s" date="%d">
+ <log>%s</log>
+ <files>
+ <f rev="%s">%s</f>
+ <f rev="%s">%s</f>
+ </files>
+</ci>
+<ci who="%s" date="%d">
+ <log>%s</log>
+ <files>
+ </files>
+</ci>
+</queryResults>
+""" % (who1, date1, log1, rev1, file1,
+ who2, date2, log2, rev2, file2, rev3, file3,
+ who3, date3, log3)
+
+badUnparsedResult = deepcopy(goodUnparsedResult)
+badUnparsedResult = badUnparsedResult.replace("</queryResults>", "")
+
+invalidDateResult = deepcopy(goodUnparsedResult)
+invalidDateResult = invalidDateResult.replace(str(date1), "foobar")
+
+missingFilenameResult = deepcopy(goodUnparsedResult)
+missingFilenameResult = missingFilenameResult.replace(file2, "")
+
+duplicateLogResult = deepcopy(goodUnparsedResult)
+duplicateLogResult = re.sub("<log>"+log1+"</log>",
+ "<log>blah</log><log>blah</log>",
+ duplicateLogResult)
+
+duplicateFilesResult = deepcopy(goodUnparsedResult)
+duplicateFilesResult = re.sub("<files>\s*</files>",
+ "<files></files><files></files>",
+ duplicateFilesResult)
+
+missingCiResult = deepcopy(goodUnparsedResult)
+r = re.compile("<ci.*</ci>", re.DOTALL | re.MULTILINE)
+missingCiResult = re.sub(r, "", missingCiResult)
+
+badResultMsgs = { 'badUnparsedResult':
+ "BonsaiParser did not raise an exception when given a bad query",
+ 'invalidDateResult':
+ "BonsaiParser did not raise an exception when given an invalid date",
+ 'missingRevisionResult':
+ "BonsaiParser did not raise an exception when a revision was missing",
+ 'missingFilenameResult':
+ "BonsaiParser did not raise an exception when a filename was missing",
+ 'duplicateLogResult':
+ "BonsaiParser did not raise an exception when there was two <log> tags",
+ 'duplicateFilesResult':
+ "BonsaiParser did not raise an exception when there was two <files> tags",
+ 'missingCiResult':
+ "BonsaiParser did not raise an exception when there was no <ci> tags"
+}
+
+noCheckinMsgResult = """\
+<?xml version="1.0"?>
+<queryResults>
+<ci who="johndoe@domain.tld" date="12345678">
+ <log></log>
+ <files>
+ <f rev="1.1">first/file.ext</f>
+ </files>
+</ci>
+<ci who="johndoe@domain.tld" date="12345678">
+ <log></log>
+ <files>
+ <f rev="1.2">second/file.ext</f>
+ </files>
+</ci>
+<ci who="johndoe@domain.tld" date="12345678">
+ <log></log>
+ <files>
+ <f rev="1.3">third/file.ext</f>
+ </files>
+</ci>
+</queryResults>
+"""
+
+noCheckinMsgRef = [dict(filename="first/file.ext",
+ revision="1.1"),
+ dict(filename="second/file.ext",
+ revision="1.2"),
+ dict(filename="third/file.ext",
+ revision="1.3")]
+
+class FakeChangeMaster(ChangeMaster):
+ def __init__(self):
+ ChangeMaster.__init__(self)
+
+ def addChange(self, change):
+ pass
+
+class FakeBonsaiPoller(BonsaiPoller):
+ def __init__(self):
+ BonsaiPoller.__init__(self, "fake url", "fake module", "fake branch")
+ self.parent = FakeChangeMaster()
+
+class TestBonsaiPoller(unittest.TestCase):
+ def testFullyFormedResult(self):
+ br = BonsaiParser(goodUnparsedResult)
+ result = br.getData()
+ # make sure the result is a BonsaiResult
+ self.failUnless(isinstance(result, BonsaiResult))
+ # test for successful parsing
+ self.failUnlessEqual(goodParsedResult, result,
+ "BonsaiParser did not return the expected BonsaiResult")
+
+ def testBadUnparsedResult(self):
+ try:
+ BonsaiParser(badUnparsedResult)
+ self.fail(badResultMsgs["badUnparsedResult"])
+ except InvalidResultError:
+ pass
+
+ def testInvalidDateResult(self):
+ try:
+ BonsaiParser(invalidDateResult)
+ self.fail(badResultMsgs["invalidDateResult"])
+ except InvalidResultError:
+ pass
+
+ def testMissingFilenameResult(self):
+ try:
+ BonsaiParser(missingFilenameResult)
+ self.fail(badResultMsgs["missingFilenameResult"])
+ except InvalidResultError:
+ pass
+
+ def testDuplicateLogResult(self):
+ try:
+ BonsaiParser(duplicateLogResult)
+ self.fail(badResultMsgs["duplicateLogResult"])
+ except InvalidResultError:
+ pass
+
+ def testDuplicateFilesResult(self):
+ try:
+ BonsaiParser(duplicateFilesResult)
+ self.fail(badResultMsgs["duplicateFilesResult"])
+ except InvalidResultError:
+ pass
+
+ def testMissingCiResult(self):
+ try:
+ BonsaiParser(missingCiResult)
+ self.fail(badResultMsgs["missingCiResult"])
+ except EmptyResult:
+ pass
+
+ def testChangeNotSubmitted(self):
+ "Make sure a change is not submitted if the BonsaiParser fails"
+ poller = FakeBonsaiPoller()
+ lastChangeBefore = poller.lastChange
+ poller._process_changes(badUnparsedResult)
+ # self.lastChange will not be updated if the change was not submitted
+ self.failUnlessEqual(lastChangeBefore, poller.lastChange)
+
+ def testParserWorksAfterInvalidResult(self):
+ """Make sure the BonsaiPoller still works after catching an
+ InvalidResultError"""
+
+ poller = FakeBonsaiPoller()
+
+ lastChangeBefore = poller.lastChange
+ # generate an exception first. pretend that we're doing a poll and
+ # increment the timestamp, otherwise the failIfEqual test at the
+ # bottom will depend upon there being a noticeable difference between
+ # two successive calls to time.time().
+ poller.lastPoll += 1.0
+ poller._process_changes(badUnparsedResult)
+ # now give it a valid one...
+ poller.lastPoll += 1.0
+ poller._process_changes(goodUnparsedResult)
+ # if poller.lastChange has not been updated then the good result
+ # was not parsed
+ self.failIfEqual(lastChangeBefore, poller.lastChange)
+
+ def testMergeEmptyLogMsg(self):
+ """Ensure that BonsaiPoller works around the bonsai xml output
+ issue when the check-in comment is empty"""
+ bp = BonsaiParser(noCheckinMsgResult)
+ result = bp.getData()
+ self.failUnlessEqual(len(result.nodes), 1)
+ self.failUnlessEqual(result.nodes[0].who, "johndoe@domain.tld")
+ self.failUnlessEqual(result.nodes[0].date, 12345678)
+ self.failUnlessEqual(result.nodes[0].log, "")
+ for file, ref in zip(result.nodes[0].files, noCheckinMsgRef):
+ self.failUnlessEqual(file.filename, ref['filename'])
+ self.failUnlessEqual(file.revision, ref['revision'])
diff --git a/buildbot/buildbot/test/test_buildreq.py b/buildbot/buildbot/test/test_buildreq.py
new file mode 100644
index 0000000..6f7f3a9
--- /dev/null
+++ b/buildbot/buildbot/test/test_buildreq.py
@@ -0,0 +1,182 @@
+# -*- test-case-name: buildbot.test.test_buildreq -*-
+
+from twisted.trial import unittest
+
+from buildbot import buildset, interfaces, sourcestamp
+from buildbot.process import base
+from buildbot.status import builder
+from buildbot.changes.changes import Change
+
+class Request(unittest.TestCase):
+ def testMerge(self):
+ R = base.BuildRequest
+ S = sourcestamp.SourceStamp
+ N = 'test_builder'
+ b1 = R("why", S("branch1", None, None, None), N)
+ b1r1 = R("why2", S("branch1", "rev1", None, None), N)
+ b1r1a = R("why not", S("branch1", "rev1", None, None), N)
+ b1r2 = R("why3", S("branch1", "rev2", None, None), N)
+ b2r2 = R("why4", S("branch2", "rev2", None, None), N)
+ b1r1p1 = R("why5", S("branch1", "rev1", (3, "diff"), None), N)
+ c1 = Change("alice", [], "changed stuff", branch="branch1")
+ c2 = Change("alice", [], "changed stuff", branch="branch1")
+ c3 = Change("alice", [], "changed stuff", branch="branch1")
+ c4 = Change("alice", [], "changed stuff", branch="branch1")
+ c5 = Change("alice", [], "changed stuff", branch="branch1")
+ c6 = Change("alice", [], "changed stuff", branch="branch1")
+ b1c1 = R("changes", S("branch1", None, None, [c1,c2,c3]), N)
+ b1c2 = R("changes", S("branch1", None, None, [c4,c5,c6]), N)
+
+ self.failUnless(b1.canBeMergedWith(b1))
+ self.failIf(b1.canBeMergedWith(b1r1))
+ self.failIf(b1.canBeMergedWith(b2r2))
+ self.failIf(b1.canBeMergedWith(b1r1p1))
+ self.failIf(b1.canBeMergedWith(b1c1))
+
+ self.failIf(b1r1.canBeMergedWith(b1))
+ self.failUnless(b1r1.canBeMergedWith(b1r1))
+ self.failIf(b1r1.canBeMergedWith(b2r2))
+ self.failIf(b1r1.canBeMergedWith(b1r1p1))
+ self.failIf(b1r1.canBeMergedWith(b1c1))
+
+ self.failIf(b1r2.canBeMergedWith(b1))
+ self.failIf(b1r2.canBeMergedWith(b1r1))
+ self.failUnless(b1r2.canBeMergedWith(b1r2))
+ self.failIf(b1r2.canBeMergedWith(b2r2))
+ self.failIf(b1r2.canBeMergedWith(b1r1p1))
+
+ self.failIf(b1r1p1.canBeMergedWith(b1))
+ self.failIf(b1r1p1.canBeMergedWith(b1r1))
+ self.failIf(b1r1p1.canBeMergedWith(b1r2))
+ self.failIf(b1r1p1.canBeMergedWith(b2r2))
+ self.failIf(b1r1p1.canBeMergedWith(b1c1))
+
+ self.failIf(b1c1.canBeMergedWith(b1))
+ self.failIf(b1c1.canBeMergedWith(b1r1))
+ self.failIf(b1c1.canBeMergedWith(b1r2))
+ self.failIf(b1c1.canBeMergedWith(b2r2))
+ self.failIf(b1c1.canBeMergedWith(b1r1p1))
+ self.failUnless(b1c1.canBeMergedWith(b1c1))
+ self.failUnless(b1c1.canBeMergedWith(b1c2))
+
+ sm = b1.mergeWith([])
+ self.failUnlessEqual(sm.branch, "branch1")
+ self.failUnlessEqual(sm.revision, None)
+ self.failUnlessEqual(sm.patch, None)
+ self.failUnlessEqual(sm.changes, ())
+
+ ss = b1r1.mergeWith([b1r1])
+ self.failUnlessEqual(ss, S("branch1", "rev1", None, None))
+ why = b1r1.mergeReasons([b1r1])
+ self.failUnlessEqual(why, "why2")
+ why = b1r1.mergeReasons([b1r1a])
+ self.failUnlessEqual(why, "why2, why not")
+
+ ss = b1c1.mergeWith([b1c2])
+ self.failUnlessEqual(ss, S("branch1", None, None, [c1,c2,c3,c4,c5,c6]))
+ why = b1c1.mergeReasons([b1c2])
+ self.failUnlessEqual(why, "changes")
+
+
+class FakeBuilder:
+ name = "fake"
+ def __init__(self):
+ self.requests = []
+ def submitBuildRequest(self, req):
+ self.requests.append(req)
+
+
+class Set(unittest.TestCase):
+ def testBuildSet(self):
+ S = buildset.BuildSet
+ a,b = FakeBuilder(), FakeBuilder()
+
+ # two builds, the first one fails, the second one succeeds. The
+ # waitUntilSuccess watcher fires as soon as the first one fails,
+ # while the waitUntilFinished watcher doesn't fire until all builds
+ # are complete.
+
+ source = sourcestamp.SourceStamp()
+ s = S(["a","b"], source, "forced build")
+ s.start([a,b])
+ self.failUnlessEqual(len(a.requests), 1)
+ self.failUnlessEqual(len(b.requests), 1)
+ r1 = a.requests[0]
+ self.failUnlessEqual(r1.reason, s.reason)
+ self.failUnlessEqual(r1.source, s.source)
+
+ st = s.status
+ self.failUnlessEqual(st.getSourceStamp(), source)
+ self.failUnlessEqual(st.getReason(), "forced build")
+ self.failUnlessEqual(st.getBuilderNames(), ["a","b"])
+ self.failIf(st.isFinished())
+ brs = st.getBuildRequests()
+ self.failUnlessEqual(len(brs), 2)
+
+ res = []
+ d1 = s.waitUntilSuccess()
+ d1.addCallback(lambda r: res.append(("success", r)))
+ d2 = s.waitUntilFinished()
+ d2.addCallback(lambda r: res.append(("finished", r)))
+
+ self.failUnlessEqual(res, [])
+
+ # the first build finishes here, with FAILURE
+ builderstatus_a = builder.BuilderStatus("a")
+ bsa = builder.BuildStatus(builderstatus_a, 1)
+ bsa.setResults(builder.FAILURE)
+ a.requests[0].finished(bsa)
+
+ # any FAILURE flunks the BuildSet immediately, so the
+ # waitUntilSuccess deferred fires right away. However, the
+ # waitUntilFinished deferred must wait until all builds have
+ # completed.
+ self.failUnlessEqual(len(res), 1)
+ self.failUnlessEqual(res[0][0], "success")
+ bss = res[0][1]
+ self.failUnless(interfaces.IBuildSetStatus(bss, None))
+ self.failUnlessEqual(bss.getResults(), builder.FAILURE)
+
+ # here we finish the second build
+ builderstatus_b = builder.BuilderStatus("b")
+ bsb = builder.BuildStatus(builderstatus_b, 1)
+ bsb.setResults(builder.SUCCESS)
+ b.requests[0].finished(bsb)
+
+ # .. which ought to fire the waitUntilFinished deferred
+ self.failUnlessEqual(len(res), 2)
+ self.failUnlessEqual(res[1][0], "finished")
+ self.failUnlessEqual(res[1][1], bss)
+
+ # and finish the BuildSet overall
+ self.failUnless(st.isFinished())
+ self.failUnlessEqual(st.getResults(), builder.FAILURE)
+
+ def testSuccess(self):
+ S = buildset.BuildSet
+ a,b = FakeBuilder(), FakeBuilder()
+ # this time, both builds succeed
+
+ source = sourcestamp.SourceStamp()
+ s = S(["a","b"], source, "forced build")
+ s.start([a,b])
+
+ st = s.status
+ self.failUnlessEqual(st.getSourceStamp(), source)
+ self.failUnlessEqual(st.getReason(), "forced build")
+ self.failUnlessEqual(st.getBuilderNames(), ["a","b"])
+ self.failIf(st.isFinished())
+
+ builderstatus_a = builder.BuilderStatus("a")
+ bsa = builder.BuildStatus(builderstatus_a, 1)
+ bsa.setResults(builder.SUCCESS)
+ a.requests[0].finished(bsa)
+
+ builderstatus_b = builder.BuilderStatus("b")
+ bsb = builder.BuildStatus(builderstatus_b, 1)
+ bsb.setResults(builder.SUCCESS)
+ b.requests[0].finished(bsb)
+
+ self.failUnless(st.isFinished())
+ self.failUnlessEqual(st.getResults(), builder.SUCCESS)
+
diff --git a/buildbot/buildbot/test/test_buildstep.py b/buildbot/buildbot/test/test_buildstep.py
new file mode 100644
index 0000000..0e9c620
--- /dev/null
+++ b/buildbot/buildbot/test/test_buildstep.py
@@ -0,0 +1,144 @@
+# -*- test-case-name: buildbot.test.test_buildstep -*-
+
+# test cases for buildbot.process.buildstep
+
+from twisted.trial import unittest
+
+from buildbot import interfaces
+from buildbot.process import buildstep
+
+# have to subclass LogObserver in order to test it, since the default
+# implementations of outReceived() and errReceived() do nothing
+class MyLogObserver(buildstep.LogObserver):
+ def __init__(self):
+ self._out = [] # list of chunks
+ self._err = []
+
+ def outReceived(self, data):
+ self._out.append(data)
+
+ def errReceived(self, data):
+ self._err.append(data)
+
+class ObserverTestCase(unittest.TestCase):
+ observer_cls = None # must be set by subclass
+
+ def setUp(self):
+ self.observer = self.observer_cls()
+
+ def _logStdout(self, chunk):
+ # why does LogObserver.logChunk() take 'build', 'step', and
+ # 'log' arguments when it clearly doesn't use them for anything?
+ self.observer.logChunk(None, None, None, interfaces.LOG_CHANNEL_STDOUT, chunk)
+
+ def _logStderr(self, chunk):
+ self.observer.logChunk(None, None, None, interfaces.LOG_CHANNEL_STDERR, chunk)
+
+ def _assertStdout(self, expect_lines):
+ self.assertEqual(self.observer._out, expect_lines)
+
+ def _assertStderr(self, expect_lines):
+ self.assertEqual(self.observer._err, expect_lines)
+
+class LogObserver(ObserverTestCase):
+
+ observer_cls = MyLogObserver
+
+ def testLogChunk(self):
+ self._logStdout("foo")
+ self._logStderr("argh")
+ self._logStdout(" wubba\n")
+ self._logStderr("!!!\n")
+
+ self._assertStdout(["foo", " wubba\n"])
+ self._assertStderr(["argh", "!!!\n"])
+
+# again, have to subclass LogLineObserver in order to test it, because the
+# default implementations of data-receiving methods are empty
+class MyLogLineObserver(buildstep.LogLineObserver):
+ def __init__(self):
+ #super(MyLogLineObserver, self).__init__()
+ buildstep.LogLineObserver.__init__(self)
+
+ self._out = [] # list of lines
+ self._err = []
+
+ def outLineReceived(self, line):
+ self._out.append(line)
+
+ def errLineReceived(self, line):
+ self._err.append(line)
+
+class LogLineObserver(ObserverTestCase):
+ observer_cls = MyLogLineObserver
+
+ def testLineBuffered(self):
+ # no challenge here: we feed it chunks that are already lines
+ # (like a program writing to stdout in line-buffered mode)
+ self._logStdout("stdout line 1\n")
+ self._logStdout("stdout line 2\n")
+ self._logStderr("stderr line 1\n")
+ self._logStdout("stdout line 3\n")
+
+ self._assertStdout(["stdout line 1",
+ "stdout line 2",
+ "stdout line 3"])
+ self._assertStderr(["stderr line 1"])
+
+ def testShortBrokenLines(self):
+ self._logStdout("stdout line 1 starts ")
+ self._logStderr("an intervening line of error\n")
+ self._logStdout("and continues ")
+ self._logStdout("but finishes here\n")
+ self._logStderr("more error\n")
+ self._logStdout("and another line of stdout\n")
+
+ self._assertStdout(["stdout line 1 starts and continues but finishes here",
+ "and another line of stdout"])
+ self._assertStderr(["an intervening line of error",
+ "more error"])
+
+ def testLongLine(self):
+ chunk = "." * 1024
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout("\n")
+
+ self._assertStdout([chunk * 5])
+ self._assertStderr([])
+
+ def testBigChunk(self):
+ chunk = "." * 5000
+ self._logStdout(chunk)
+ self._logStdout("\n")
+
+ self._assertStdout([chunk])
+ self._assertStderr([])
+
+ def testReallyLongLine(self):
+ # A single line of > 16384 bytes is dropped on the floor (bug #201).
+ # In real life, I observed such a line being broken into chunks of
+ # 4095 bytes, so that's how I'm breaking it here.
+ self.observer.setMaxLineLength(65536)
+ chunk = "." * 4095
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout(chunk)
+ self._logStdout(chunk) # now we're up to 16380 bytes
+ self._logStdout("12345\n")
+
+ self._assertStdout([chunk*4 + "12345"])
+ self._assertStderr([])
+
+class RemoteShellTest(unittest.TestCase):
+ def testRepr(self):
+ # Test for #352
+ rsc = buildstep.RemoteShellCommand('.', ('sh', 'make'))
+ testval = repr(rsc)
+ rsc = buildstep.RemoteShellCommand('.', ['sh', 'make'])
+ testval = repr(rsc)
+ rsc = buildstep.RemoteShellCommand('.', 'make')
+ testval = repr(rsc)
diff --git a/buildbot/buildbot/test/test_changes.py b/buildbot/buildbot/test/test_changes.py
new file mode 100644
index 0000000..faebe7b
--- /dev/null
+++ b/buildbot/buildbot/test/test_changes.py
@@ -0,0 +1,243 @@
+# -*- test-case-name: buildbot.test.test_changes -*-
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+
+from buildbot import master
+from buildbot.changes import pb
+from buildbot.scripts import runner
+
+d1 = {'files': ["Project/foo.c", "Project/bar/boo.c"],
+ 'who': "marvin",
+ 'comments': "Some changes in Project"}
+d2 = {'files': ["OtherProject/bar.c"],
+ 'who': "zaphod",
+ 'comments': "other changes"}
+d3 = {'files': ["Project/baz.c", "OtherProject/bloo.c"],
+ 'who': "alice",
+ 'comments': "mixed changes"}
+d4 = {'files': ["trunk/baz.c", "branches/foobranch/foo.c", "trunk/bar.c"],
+ 'who': "alice",
+ 'comments': "mixed changes"}
+d5 = {'files': ["Project/foo.c"],
+ 'who': "trillian",
+ 'comments': "Some changes in Project",
+ 'category': "categoryA"}
+
+class TestChangePerspective(unittest.TestCase):
+
+ def setUp(self):
+ self.changes = []
+
+ def addChange(self, c):
+ self.changes.append(c)
+
+ def testNoPrefix(self):
+ p = pb.ChangePerspective(self, None)
+ p.perspective_addChange(d1)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[0]
+ self.failUnlessEqual(set(c1.files),
+ set(["Project/foo.c", "Project/bar/boo.c"]))
+ self.failUnlessEqual(c1.comments, "Some changes in Project")
+ self.failUnlessEqual(c1.who, "marvin")
+
+ def testPrefix(self):
+ p = pb.ChangePerspective(self, "Project/")
+
+ p.perspective_addChange(d1)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[-1]
+ self.failUnlessEqual(set(c1.files), set(["foo.c", "bar/boo.c"]))
+ self.failUnlessEqual(c1.comments, "Some changes in Project")
+ self.failUnlessEqual(c1.who, "marvin")
+
+ p.perspective_addChange(d2) # should be ignored
+ self.failUnlessEqual(len(self.changes), 1)
+
+ p.perspective_addChange(d3) # should ignore the OtherProject file
+ self.failUnlessEqual(len(self.changes), 2)
+
+ c3 = self.changes[-1]
+ self.failUnlessEqual(set(c3.files), set(["baz.c"]))
+ self.failUnlessEqual(c3.comments, "mixed changes")
+ self.failUnlessEqual(c3.who, "alice")
+
+ def testPrefix2(self):
+ p = pb.ChangePerspective(self, "Project/bar/")
+
+ p.perspective_addChange(d1)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[-1]
+ self.failUnlessEqual(set(c1.files), set(["boo.c"]))
+ self.failUnlessEqual(c1.comments, "Some changes in Project")
+ self.failUnlessEqual(c1.who, "marvin")
+
+ p.perspective_addChange(d2) # should be ignored
+ self.failUnlessEqual(len(self.changes), 1)
+
+ p.perspective_addChange(d3) # should ignore this too
+ self.failUnlessEqual(len(self.changes), 1)
+
+ def testPrefix3(self):
+ p = pb.ChangePerspective(self, "trunk/")
+
+ p.perspective_addChange(d4)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[-1]
+ self.failUnlessEqual(set(c1.files), set(["baz.c", "bar.c"]))
+ self.failUnlessEqual(c1.comments, "mixed changes")
+
+ def testPrefix4(self):
+ p = pb.ChangePerspective(self, "branches/foobranch/")
+
+ p.perspective_addChange(d4)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[-1]
+ self.failUnlessEqual(set(c1.files), set(["foo.c"]))
+ self.failUnlessEqual(c1.comments, "mixed changes")
+
+ def testCategory(self):
+ p = pb.ChangePerspective(self, None)
+ p.perspective_addChange(d5)
+ self.failUnlessEqual(len(self.changes), 1)
+ c1 = self.changes[0]
+ self.failUnlessEqual(c1.category, "categoryA")
+
+config_empty = """
+BuildmasterConfig = c = {}
+c['slaves'] = []
+c['builders'] = []
+c['schedulers'] = []
+c['slavePortnum'] = 0
+"""
+
+config_sender = config_empty + \
+"""
+from buildbot.changes import pb
+c['change_source'] = pb.PBChangeSource(port=None)
+"""
+
+class Sender(unittest.TestCase):
+ def setUp(self):
+ self.master = master.BuildMaster(".")
+ def tearDown(self):
+ d = defer.maybeDeferred(self.master.stopService)
+ # TODO: something in Twisted-2.0.0 (and probably 2.0.1) doesn't shut
+ # down the Broker listening socket when it's supposed to.
+ # Twisted-1.3.0, and current SVN (which will be post-2.0.1) are ok.
+ # This iterate() is a quick hack to deal with the problem. I need to
+ # investigate more thoroughly and find a better solution.
+ d.addCallback(self.stall, 0.1)
+ return d
+
+ def stall(self, res, timeout):
+ d = defer.Deferred()
+ reactor.callLater(timeout, d.callback, res)
+ return d
+
+ def testSender(self):
+ self.master.loadConfig(config_empty)
+ self.master.startService()
+ # TODO: BuildMaster.loadChanges replaces the change_svc object, so we
+ # have to load it twice. Clean this up.
+ d = self.master.loadConfig(config_sender)
+ d.addCallback(self._testSender_1)
+ return d
+
+ def _testSender_1(self, res):
+ self.cm = cm = self.master.change_svc
+ s1 = list(self.cm)[0]
+ port = self.master.slavePort._port.getHost().port
+
+ self.options = {'username': "alice",
+ 'master': "localhost:%d" % port,
+ 'files': ["foo.c"],
+ 'category': "categoryA",
+ }
+
+ d = runner.sendchange(self.options)
+ d.addCallback(self._testSender_2)
+ return d
+
+ def _testSender_2(self, res):
+ # now check that the change was received
+ self.failUnlessEqual(len(self.cm.changes), 1)
+ c = self.cm.changes.pop()
+ self.failUnlessEqual(c.who, "alice")
+ self.failUnlessEqual(c.files, ["foo.c"])
+ self.failUnlessEqual(c.comments, "")
+ self.failUnlessEqual(c.revision, None)
+ self.failUnlessEqual(c.category, "categoryA")
+
+ self.options['revision'] = "r123"
+ self.options['comments'] = "test change"
+
+ d = runner.sendchange(self.options)
+ d.addCallback(self._testSender_3)
+ return d
+
+ def _testSender_3(self, res):
+ self.failUnlessEqual(len(self.cm.changes), 1)
+ c = self.cm.changes.pop()
+ self.failUnlessEqual(c.who, "alice")
+ self.failUnlessEqual(c.files, ["foo.c"])
+ self.failUnlessEqual(c.comments, "test change")
+ self.failUnlessEqual(c.revision, "r123")
+ self.failUnlessEqual(c.category, "categoryA")
+
+ # test options['logfile'] by creating a temporary file
+ logfile = self.mktemp()
+ f = open(logfile, "wt")
+ f.write("longer test change")
+ f.close()
+ self.options['comments'] = None
+ self.options['logfile'] = logfile
+
+ d = runner.sendchange(self.options)
+ d.addCallback(self._testSender_4)
+ return d
+
+ def _testSender_4(self, res):
+ self.failUnlessEqual(len(self.cm.changes), 1)
+ c = self.cm.changes.pop()
+ self.failUnlessEqual(c.who, "alice")
+ self.failUnlessEqual(c.files, ["foo.c"])
+ self.failUnlessEqual(c.comments, "longer test change")
+ self.failUnlessEqual(c.revision, "r123")
+ self.failUnlessEqual(c.category, "categoryA")
+
+ # make sure that numeric revisions work too
+ self.options['logfile'] = None
+ del self.options['revision']
+ self.options['revision_number'] = 42
+
+ d = runner.sendchange(self.options)
+ d.addCallback(self._testSender_5)
+ return d
+
+ def _testSender_5(self, res):
+ self.failUnlessEqual(len(self.cm.changes), 1)
+ c = self.cm.changes.pop()
+ self.failUnlessEqual(c.who, "alice")
+ self.failUnlessEqual(c.files, ["foo.c"])
+ self.failUnlessEqual(c.comments, "")
+ self.failUnlessEqual(c.revision, 42)
+ self.failUnlessEqual(c.category, "categoryA")
+
+ # verify --branch too
+ self.options['branch'] = "branches/test"
+
+ d = runner.sendchange(self.options)
+ d.addCallback(self._testSender_6)
+ return d
+
+ def _testSender_6(self, res):
+ self.failUnlessEqual(len(self.cm.changes), 1)
+ c = self.cm.changes.pop()
+ self.failUnlessEqual(c.who, "alice")
+ self.failUnlessEqual(c.files, ["foo.c"])
+ self.failUnlessEqual(c.comments, "")
+ self.failUnlessEqual(c.revision, 42)
+ self.failUnlessEqual(c.branch, "branches/test")
+ self.failUnlessEqual(c.category, "categoryA")
diff --git a/buildbot/buildbot/test/test_config.py b/buildbot/buildbot/test/test_config.py
new file mode 100644
index 0000000..900dcad
--- /dev/null
+++ b/buildbot/buildbot/test/test_config.py
@@ -0,0 +1,1277 @@
+# -*- test-case-name: buildbot.test.test_config -*-
+
+import os, warnings, exceptions
+
+from twisted.trial import unittest
+from twisted.python import failure
+from twisted.internet import defer
+
+from buildbot.master import BuildMaster
+from buildbot import scheduler
+from twisted.application import service, internet
+from twisted.spread import pb
+from twisted.web.server import Site
+from twisted.web.distrib import ResourcePublisher
+from buildbot.process.builder import Builder
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.changes.pb import PBChangeSource
+from buildbot.changes.mail import SyncmailMaildirSource
+from buildbot.steps.source import CVS, Darcs
+from buildbot.steps.shell import Compile, Test, ShellCommand
+from buildbot.status import base
+from buildbot.steps import dummy, maxq, python, python_twisted, shell, \
+ source, transfer
+words = None
+try:
+ from buildbot.status import words
+except ImportError:
+ pass
+
+emptyCfg = \
+"""
+from buildbot.buildslave import BuildSlave
+BuildmasterConfig = c = {}
+c['slaves'] = []
+c['schedulers'] = []
+c['builders'] = []
+c['slavePortnum'] = 9999
+c['projectName'] = 'dummy project'
+c['projectURL'] = 'http://dummy.example.com'
+c['buildbotURL'] = 'http://dummy.example.com/buildbot'
+"""
+
+buildersCfg = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.buildslave import BuildSlave
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+c['slavePortnum'] = 9999
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir', 'factory':f1}]
+"""
+
+buildersCfg2 = buildersCfg + \
+"""
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule2')
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir', 'factory':f1}]
+"""
+
+buildersCfg3 = buildersCfg2 + \
+"""
+c['builders'].append({'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 })
+"""
+
+buildersCfg4 = buildersCfg2 + \
+"""
+c['builders'] = [{ 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'newworkdir', 'factory': f1 },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 }]
+"""
+
+wpCfg1 = buildersCfg + \
+"""
+from buildbot.steps import shell
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+f1.addStep(shell.ShellCommand, command=[shell.WithProperties('echo')])
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir1', 'factory': f1}]
+"""
+
+wpCfg2 = buildersCfg + \
+"""
+from buildbot.steps import shell
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+f1.addStep(shell.ShellCommand,
+ command=[shell.WithProperties('echo %s', 'revision')])
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir1', 'factory': f1}]
+"""
+
+
+
+ircCfg1 = emptyCfg + \
+"""
+from buildbot.status import words
+c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['twisted'])]
+"""
+
+ircCfg2 = emptyCfg + \
+"""
+from buildbot.status import words
+c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['twisted']),
+ words.IRC('irc.example.com', 'otherbot', ['chan1', 'chan2'])]
+"""
+
+ircCfg3 = emptyCfg + \
+"""
+from buildbot.status import words
+c['status'] = [words.IRC('irc.us.freenode.net', 'buildbot', ['knotted'])]
+"""
+
+webCfg1 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(http_port=9980)]
+"""
+
+webCfg2 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(http_port=9981)]
+"""
+
+webCfg3 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(http_port='tcp:9981:interface=127.0.0.1')]
+"""
+
+webNameCfg1 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(distrib_port='~/.twistd-web-pb')]
+"""
+
+webNameCfg2 = emptyCfg + \
+"""
+from buildbot.status import html
+c['status'] = [html.Waterfall(distrib_port='./bar.socket')]
+"""
+
+debugPasswordCfg = emptyCfg + \
+"""
+c['debugPassword'] = 'sekrit'
+"""
+
+interlockCfgBad = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1 },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+# interlocks have been removed
+c['interlocks'] = [('lock1', ['builder1'], ['builder2', 'builder3']),
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfgBad1 = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock1') # duplicate lock name
+f1 = BuildFactory([s(Dummy, locks=[])])
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfgBad2 = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock, SlaveLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = SlaveLock('lock1') # duplicate lock name
+f1 = BuildFactory([s(Dummy, locks=[])])
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfgBad3 = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock1') # duplicate lock name
+f1 = BuildFactory([s(Dummy, locks=[l2])])
+f2 = BuildFactory([s(Dummy)])
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f2, 'locks': [l1] },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg1a = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1, 'locks': [l1, l2] },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg1b = \
+"""
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1, 'locks': [l1] },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f1 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+# test out step Locks
+lockCfg2a = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+f1 = BuildFactory([s(Dummy, locks=[l1,l2])])
+f2 = BuildFactory([s(Dummy)])
+
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1 },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f2 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg2b = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+f1 = BuildFactory([s(Dummy, locks=[l1])])
+f2 = BuildFactory([s(Dummy)])
+
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1 },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f2 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+lockCfg2c = \
+"""
+from buildbot.steps.dummy import Dummy
+from buildbot.process.factory import BuildFactory, s
+from buildbot.locks import MasterLock
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+l1 = MasterLock('lock1')
+l2 = MasterLock('lock2')
+f1 = BuildFactory([s(Dummy)])
+f2 = BuildFactory([s(Dummy)])
+
+c['builders'] = [
+ { 'name': 'builder1', 'slavename': 'bot1',
+ 'builddir': 'workdir', 'factory': f1 },
+ { 'name': 'builder2', 'slavename': 'bot1',
+ 'builddir': 'workdir2', 'factory': f2 },
+ ]
+c['slavePortnum'] = 9999
+BuildmasterConfig = c
+"""
+
+schedulersCfg = \
+"""
+from buildbot.scheduler import Scheduler, Dependent
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.buildslave import BuildSlave
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+b1 = {'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir', 'factory':f1}
+c['builders'] = [b1]
+c['schedulers'] = [Scheduler('full', None, 60, ['builder1'])]
+c['slavePortnum'] = 9999
+c['projectName'] = 'dummy project'
+c['projectURL'] = 'http://dummy.example.com'
+c['buildbotURL'] = 'http://dummy.example.com/buildbot'
+BuildmasterConfig = c
+"""
+
+class ConfigTest(unittest.TestCase):
+ def setUp(self):
+ # this class generates several deprecation warnings, which the user
+ # doesn't need to see.
+ warnings.simplefilter('ignore', exceptions.DeprecationWarning)
+ self.buildmaster = BuildMaster(".")
+
+ def failUnlessListsEquivalent(self, list1, list2):
+ l1 = list1[:]
+ l1.sort()
+ l2 = list2[:]
+ l2.sort()
+ self.failUnlessEqual(l1, l2)
+
+ def servers(self, s, types):
+ # perform a recursive search of s.services, looking for instances of
+ # twisted.application.internet.TCPServer, then extract their .args
+ # values to find the TCP ports they want to listen on
+ for child in s:
+ if service.IServiceCollection.providedBy(child):
+ for gc in self.servers(child, types):
+ yield gc
+ if isinstance(child, types):
+ yield child
+
+ def TCPports(self, s):
+ return list(self.servers(s, internet.TCPServer))
+ def UNIXports(self, s):
+ return list(self.servers(s, internet.UNIXServer))
+ def TCPclients(self, s):
+ return list(self.servers(s, internet.TCPClient))
+
+ def checkPorts(self, svc, expected):
+ """Verify that the TCPServer and UNIXServer children of the given
+ service have the expected portnum/pathname and factory classes. As a
+ side-effect, return a list of servers in the same order as the
+ 'expected' list. This can be used to verify properties of the
+ factories contained therein."""
+
+ expTCP = [e for e in expected if type(e[0]) == int]
+ expUNIX = [e for e in expected if type(e[0]) == str]
+ haveTCP = [(p.args[0], p.args[1].__class__)
+ for p in self.TCPports(svc)]
+ haveUNIX = [(p.args[0], p.args[1].__class__)
+ for p in self.UNIXports(svc)]
+ self.failUnlessListsEquivalent(expTCP, haveTCP)
+ self.failUnlessListsEquivalent(expUNIX, haveUNIX)
+ ret = []
+ for e in expected:
+ for have in self.TCPports(svc) + self.UNIXports(svc):
+ if have.args[0] == e[0]:
+ ret.append(have)
+ continue
+ assert(len(ret) == len(expected))
+ return ret
+
+ def testEmpty(self):
+ self.failUnlessRaises(KeyError, self.buildmaster.loadConfig, "")
+
+ def testSimple(self):
+ # covers slavePortnum, base checker passwords
+ master = self.buildmaster
+ master.loadChanges()
+
+ master.loadConfig(emptyCfg)
+ # note: this doesn't actually start listening, because the app
+ # hasn't been started running
+ self.failUnlessEqual(master.slavePortnum, "tcp:9999")
+ self.checkPorts(master, [(9999, pb.PBServerFactory)])
+ self.failUnlessEqual(list(master.change_svc), [])
+ self.failUnlessEqual(master.botmaster.builders, {})
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw"})
+ self.failUnlessEqual(master.projectName, "dummy project")
+ self.failUnlessEqual(master.projectURL, "http://dummy.example.com")
+ self.failUnlessEqual(master.buildbotURL,
+ "http://dummy.example.com/buildbot")
+
+ def testSlavePortnum(self):
+ master = self.buildmaster
+ master.loadChanges()
+
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.slavePortnum, "tcp:9999")
+ ports = self.checkPorts(master, [(9999, pb.PBServerFactory)])
+ p = ports[0]
+
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.slavePortnum, "tcp:9999")
+ ports = self.checkPorts(master, [(9999, pb.PBServerFactory)])
+ self.failUnlessIdentical(p, ports[0],
+ "the slave port was changed even " + \
+ "though the configuration was not")
+
+ master.loadConfig(emptyCfg + "c['slavePortnum'] = 9000\n")
+ self.failUnlessEqual(master.slavePortnum, "tcp:9000")
+ ports = self.checkPorts(master, [(9000, pb.PBServerFactory)])
+ self.failIf(p is ports[0],
+ "slave port was unchanged but configuration was changed")
+
+ def testSlaves(self):
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.botmaster.builders, {})
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw"})
+ # 'botsCfg' is testing backwards compatibility, for 0.7.5 config
+ # files that have not yet been updated to 0.7.6 . This compatibility
+ # (and this test) is scheduled for removal in 0.8.0 .
+ botsCfg = (emptyCfg +
+ "c['bots'] = [('bot1', 'pw1'), ('bot2', 'pw2')]\n")
+ master.loadConfig(botsCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw",
+ "bot1": "pw1",
+ "bot2": "pw2"})
+ master.loadConfig(botsCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw",
+ "bot1": "pw1",
+ "bot2": "pw2"})
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw"})
+ slavesCfg = (emptyCfg +
+ "from buildbot.buildslave import BuildSlave\n"
+ "c['slaves'] = [BuildSlave('bot1','pw1'), "
+ "BuildSlave('bot2','pw2')]\n")
+ master.loadConfig(slavesCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw",
+ "bot1": "pw1",
+ "bot2": "pw2"})
+
+
+ def testChangeSource(self):
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(list(master.change_svc), [])
+
+ sourcesCfg = emptyCfg + \
+"""
+from buildbot.changes.pb import PBChangeSource
+c['change_source'] = PBChangeSource()
+"""
+
+ d = master.loadConfig(sourcesCfg)
+ def _check1(res):
+ self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
+ s1 = list(self.buildmaster.change_svc)[0]
+ self.failUnless(isinstance(s1, PBChangeSource))
+ self.failUnlessEqual(s1, list(self.buildmaster.change_svc)[0])
+ self.failUnless(s1.parent)
+
+ # verify that unchanged sources are not interrupted
+ d1 = self.buildmaster.loadConfig(sourcesCfg)
+
+ def _check2(res):
+ self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
+ s2 = list(self.buildmaster.change_svc)[0]
+ self.failUnlessIdentical(s1, s2)
+ self.failUnless(s1.parent)
+ d1.addCallback(_check2)
+ return d1
+ d.addCallback(_check1)
+
+ # make sure we can get rid of the sources too
+ d.addCallback(lambda res: self.buildmaster.loadConfig(emptyCfg))
+
+ def _check3(res):
+ self.failUnlessEqual(list(self.buildmaster.change_svc), [])
+ d.addCallback(_check3)
+
+ return d
+
+ def testChangeSources(self):
+ # make sure we can accept a list
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(list(master.change_svc), [])
+
+ sourcesCfg = emptyCfg + \
+"""
+from buildbot.changes.pb import PBChangeSource
+from buildbot.changes.mail import SyncmailMaildirSource
+c['change_source'] = [PBChangeSource(),
+ SyncmailMaildirSource('.'),
+ ]
+"""
+
+ d = master.loadConfig(sourcesCfg)
+ def _check1(res):
+ self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 2)
+ s1,s2 = list(self.buildmaster.change_svc)
+ if isinstance(s2, PBChangeSource):
+ s1,s2 = s2,s1
+ self.failUnless(isinstance(s1, PBChangeSource))
+ self.failUnless(s1.parent)
+ self.failUnless(isinstance(s2, SyncmailMaildirSource))
+ self.failUnless(s2.parent)
+ d.addCallback(_check1)
+ return d
+
+ def testSources(self):
+ # test backwards compatibility. c['sources'] is deprecated.
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(list(master.change_svc), [])
+
+ sourcesCfg = emptyCfg + \
+"""
+from buildbot.changes.pb import PBChangeSource
+c['sources'] = [PBChangeSource()]
+"""
+
+ d = master.loadConfig(sourcesCfg)
+ def _check1(res):
+ self.failUnlessEqual(len(list(self.buildmaster.change_svc)), 1)
+ s1 = list(self.buildmaster.change_svc)[0]
+ self.failUnless(isinstance(s1, PBChangeSource))
+ self.failUnless(s1.parent)
+ d.addCallback(_check1)
+ return d
+
+ def shouldBeFailure(self, res, *expected):
+ self.failUnless(isinstance(res, failure.Failure),
+ "we expected this to fail, not produce %s" % (res,))
+ res.trap(*expected)
+ return None # all is good
+
+ def testSchedulerErrors(self):
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.allSchedulers(), [])
+
+ def _shouldBeFailure(res, hint=None):
+ self.shouldBeFailure(res, AssertionError, ValueError)
+ if hint:
+ self.failUnless(str(res).find(hint) != -1)
+
+ def _loadConfig(res, newcfg):
+ return self.buildmaster.loadConfig(newcfg)
+ d = defer.succeed(None)
+
+ # c['schedulers'] must be a list
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = Scheduler('full', None, 60, ['builder1'])
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure,
+ "c['schedulers'] must be a list of Scheduler instances")
+
+ # c['schedulers'] must be a list of IScheduler objects
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = ['oops', 'problem']
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure,
+ "c['schedulers'] must be a list of Scheduler instances")
+
+ # c['schedulers'] must point at real builders
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, ['builder-bogus'])]
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure, "uses unknown builder")
+
+ # builderNames= must be a list
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, 'builder1')]
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure,
+ "must be a list of Builder description names")
+
+ # builderNames= must be a list of strings, not dicts
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, [b1])]
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure,
+ "must be a list of Builder description names")
+
+ # builderNames= must be a list of strings, not a dict
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('full', None, 60, b1)]
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure,
+ "must be a list of Builder description names")
+
+ # each Scheduler must have a unique name
+ badcfg = schedulersCfg + \
+"""
+c['schedulers'] = [Scheduler('dup', None, 60, []),
+ Scheduler('dup', None, 60, [])]
+"""
+ d.addCallback(_loadConfig, badcfg)
+ d.addBoth(_shouldBeFailure, "Schedulers must have unique names")
+
+ return d
+
+ def testSchedulers(self):
+ master = self.buildmaster
+ master.loadChanges()
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.allSchedulers(), [])
+
+ d = self.buildmaster.loadConfig(schedulersCfg)
+ d.addCallback(self._testSchedulers_1)
+ return d
+
+ def _testSchedulers_1(self, res):
+ sch = self.buildmaster.allSchedulers()
+ self.failUnlessEqual(len(sch), 1)
+ s = sch[0]
+ self.failUnless(isinstance(s, scheduler.Scheduler))
+ self.failUnlessEqual(s.name, "full")
+ self.failUnlessEqual(s.branch, None)
+ self.failUnlessEqual(s.treeStableTimer, 60)
+ self.failUnlessEqual(s.builderNames, ['builder1'])
+
+ newcfg = schedulersCfg + \
+"""
+s1 = Scheduler('full', None, 60, ['builder1'])
+c['schedulers'] = [s1, Dependent('downstream', s1, ['builder1'])]
+"""
+ d = self.buildmaster.loadConfig(newcfg)
+ d.addCallback(self._testSchedulers_2, newcfg)
+ return d
+ def _testSchedulers_2(self, res, newcfg):
+ sch = self.buildmaster.allSchedulers()
+ self.failUnlessEqual(len(sch), 2)
+ s = sch[0]
+ self.failUnless(isinstance(s, scheduler.Scheduler))
+ s = sch[1]
+ self.failUnless(isinstance(s, scheduler.Dependent))
+ self.failUnlessEqual(s.name, "downstream")
+ self.failUnlessEqual(s.builderNames, ['builder1'])
+
+ # reloading the same config file should leave the schedulers in place
+ d = self.buildmaster.loadConfig(newcfg)
+ d.addCallback(self._testSchedulers_3, sch)
+ return d
+ def _testSchedulers_3(self, res, sch1):
+ sch2 = self.buildmaster.allSchedulers()
+ self.failUnlessEqual(len(sch2), 2)
+ sch1.sort()
+ sch2.sort()
+ self.failUnlessEqual(sch1, sch2)
+ self.failUnlessIdentical(sch1[0], sch2[0])
+ self.failUnlessIdentical(sch1[1], sch2[1])
+ self.failUnlessIdentical(sch1[0].parent, self.buildmaster)
+ self.failUnlessIdentical(sch1[1].parent, self.buildmaster)
+
+
+
+ def testBuilders(self):
+ master = self.buildmaster
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.botmaster.builders, {})
+
+ master.loadConfig(buildersCfg)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+ self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+ b = master.botmaster.builders["builder1"]
+ self.failUnless(isinstance(b, Builder))
+ self.failUnlessEqual(b.name, "builder1")
+ self.failUnlessEqual(b.slavenames, ["bot1"])
+ self.failUnlessEqual(b.builddir, "workdir")
+ f1 = b.buildFactory
+ self.failUnless(isinstance(f1, BasicBuildFactory))
+ steps = f1.steps
+ self.failUnlessEqual(len(steps), 3)
+ self.failUnlessEqual(steps[0], (CVS,
+ {'cvsroot': 'cvsroot',
+ 'cvsmodule': 'cvsmodule',
+ 'mode': 'clobber'}))
+ self.failUnlessEqual(steps[1], (Compile,
+ {'command': 'make all'}))
+ self.failUnlessEqual(steps[2], (Test,
+ {'command': 'make check'}))
+
+
+ # make sure a reload of the same data doesn't interrupt the Builder
+ master.loadConfig(buildersCfg)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+ self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+ b2 = master.botmaster.builders["builder1"]
+ self.failUnlessIdentical(b, b2)
+ # TODO: test that the BuilderStatus object doesn't change
+ #statusbag2 = master.client_svc.statusbags["builder1"]
+ #self.failUnlessIdentical(statusbag, statusbag2)
+
+ # but changing something should result in a new Builder
+ master.loadConfig(buildersCfg2)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+ self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+ b3 = master.botmaster.builders["builder1"]
+ self.failIf(b is b3)
+ # the statusbag remains the same TODO
+ #statusbag3 = master.client_svc.statusbags["builder1"]
+ #self.failUnlessIdentical(statusbag, statusbag3)
+
+ # adding new builder
+ master.loadConfig(buildersCfg3)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1",
+ "builder2"])
+ self.failUnlessListsEquivalent(master.botmaster.builders.keys(),
+ ["builder1", "builder2"])
+ b4 = master.botmaster.builders["builder1"]
+ self.failUnlessIdentical(b3, b4)
+
+ # changing first builder should leave it at the same place in the list
+ master.loadConfig(buildersCfg4)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1",
+ "builder2"])
+ self.failUnlessListsEquivalent(master.botmaster.builders.keys(),
+ ["builder1", "builder2"])
+ b5 = master.botmaster.builders["builder1"]
+ self.failIf(b4 is b5)
+
+ # and removing it should make the Builder go away
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.botmaster.builderNames, [])
+ self.failUnlessEqual(master.botmaster.builders, {})
+ #self.failUnlessEqual(master.client_svc.statusbags, {}) # TODO
+
+ def testWithProperties(self):
+ master = self.buildmaster
+ master.loadConfig(wpCfg1)
+ self.failUnlessEqual(master.botmaster.builderNames, ["builder1"])
+ self.failUnlessEqual(master.botmaster.builders.keys(), ["builder1"])
+ b1 = master.botmaster.builders["builder1"]
+
+ # reloading the same config should leave the builder unchanged
+ master.loadConfig(wpCfg1)
+ b2 = master.botmaster.builders["builder1"]
+ self.failUnlessIdentical(b1, b2)
+
+ # but changing the parameters of the WithProperties should change it
+ master.loadConfig(wpCfg2)
+ b3 = master.botmaster.builders["builder1"]
+ self.failIf(b1 is b3)
+
+ # again, reloading same config should leave the builder unchanged
+ master.loadConfig(wpCfg2)
+ b4 = master.botmaster.builders["builder1"]
+ self.failUnlessIdentical(b3, b4)
+
+ def checkIRC(self, m, expected):
+ ircs = {}
+ for irc in self.servers(m, words.IRC):
+ ircs[irc.host] = (irc.nick, irc.channels)
+ self.failUnlessEqual(ircs, expected)
+
+ def testIRC(self):
+ if not words:
+ raise unittest.SkipTest("Twisted Words package is not installed")
+ master = self.buildmaster
+ master.loadChanges()
+ d = master.loadConfig(emptyCfg)
+ e1 = {}
+ d.addCallback(lambda res: self.checkIRC(master, e1))
+ d.addCallback(lambda res: master.loadConfig(ircCfg1))
+ e2 = {'irc.us.freenode.net': ('buildbot', ['twisted'])}
+ d.addCallback(lambda res: self.checkIRC(master, e2))
+ d.addCallback(lambda res: master.loadConfig(ircCfg2))
+ e3 = {'irc.us.freenode.net': ('buildbot', ['twisted']),
+ 'irc.example.com': ('otherbot', ['chan1', 'chan2'])}
+ d.addCallback(lambda res: self.checkIRC(master, e3))
+ d.addCallback(lambda res: master.loadConfig(ircCfg3))
+ e4 = {'irc.us.freenode.net': ('buildbot', ['knotted'])}
+ d.addCallback(lambda res: self.checkIRC(master, e4))
+ d.addCallback(lambda res: master.loadConfig(ircCfg1))
+ e5 = {'irc.us.freenode.net': ('buildbot', ['twisted'])}
+ d.addCallback(lambda res: self.checkIRC(master, e5))
+ return d
+
+ def testWebPortnum(self):
+ master = self.buildmaster
+ master.loadChanges()
+
+ d = master.loadConfig(webCfg1)
+ def _check1(res):
+ ports = self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory), (9980, Site)])
+ p = ports[1]
+ self.p = p
+ # nothing should be changed
+ d.addCallback(_check1)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(webCfg1))
+ def _check2(res):
+ ports = self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory), (9980, Site)])
+ self.failUnlessIdentical(self.p, ports[1],
+ "web port was changed even though "
+ "configuration was not")
+ # WebStatus is no longer a ComparableMixin, so it will be
+ # rebuilt on each reconfig
+ #d.addCallback(_check2)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(webCfg2))
+ # changes port to 9981
+ def _check3(p):
+ ports = self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory), (9981, Site)])
+ self.failIf(self.p is ports[1],
+ "configuration was changed but web port was unchanged")
+ d.addCallback(_check3)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(webCfg3))
+ # make 9981 on only localhost
+ def _check4(p):
+ ports = self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory), (9981, Site)])
+ self.failUnlessEqual(ports[1].kwargs['interface'], "127.0.0.1")
+ d.addCallback(_check4)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(emptyCfg))
+ d.addCallback(lambda res:
+ self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory)]))
+ return d
+
+ def testWebPathname(self):
+ master = self.buildmaster
+ master.loadChanges()
+
+ d = master.loadConfig(webNameCfg1)
+ def _check1(res):
+ self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory),
+ ('~/.twistd-web-pb', pb.PBServerFactory)])
+ unixports = self.UNIXports(self.buildmaster)
+ self.f = f = unixports[0].args[1]
+ self.failUnless(isinstance(f.root, ResourcePublisher))
+ d.addCallback(_check1)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(webNameCfg1))
+ # nothing should be changed
+ def _check2(res):
+ self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory),
+ ('~/.twistd-web-pb', pb.PBServerFactory)])
+ newf = self.UNIXports(self.buildmaster)[0].args[1]
+ self.failUnlessIdentical(self.f, newf,
+ "web factory was changed even though "
+ "configuration was not")
+ # WebStatus is no longer a ComparableMixin, so it will be
+ # rebuilt on each reconfig
+ #d.addCallback(_check2)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(webNameCfg2))
+ def _check3(res):
+ self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory),
+ ('./bar.socket', pb.PBServerFactory)])
+ newf = self.UNIXports(self.buildmaster)[0].args[1],
+ self.failIf(self.f is newf,
+ "web factory was unchanged but "
+ "configuration was changed")
+ d.addCallback(_check3)
+
+ d.addCallback(lambda res: self.buildmaster.loadConfig(emptyCfg))
+ d.addCallback(lambda res:
+ self.checkPorts(self.buildmaster,
+ [(9999, pb.PBServerFactory)]))
+ return d
+
+ def testDebugPassword(self):
+ master = self.buildmaster
+
+ master.loadConfig(debugPasswordCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw",
+ "debug": "sekrit"})
+
+ master.loadConfig(debugPasswordCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw",
+ "debug": "sekrit"})
+
+ master.loadConfig(emptyCfg)
+ self.failUnlessEqual(master.checker.users,
+ {"change": "changepw"})
+
+ def testLocks(self):
+ master = self.buildmaster
+ botmaster = master.botmaster
+
+ # make sure that c['interlocks'] is rejected properly
+ self.failUnlessRaises(KeyError, master.loadConfig, interlockCfgBad)
+ # and that duplicate-named Locks are caught
+ self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad1)
+ self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad2)
+ self.failUnlessRaises(ValueError, master.loadConfig, lockCfgBad3)
+
+ # create a Builder that uses Locks
+ master.loadConfig(lockCfg1a)
+ b1 = master.botmaster.builders["builder1"]
+ self.failUnlessEqual(len(b1.locks), 2)
+
+ # reloading the same config should not change the Builder
+ master.loadConfig(lockCfg1a)
+ self.failUnlessIdentical(b1, master.botmaster.builders["builder1"])
+ # but changing the set of locks used should change it
+ master.loadConfig(lockCfg1b)
+ self.failIfIdentical(b1, master.botmaster.builders["builder1"])
+ b1 = master.botmaster.builders["builder1"]
+ self.failUnlessEqual(len(b1.locks), 1)
+
+ # similar test with step-scoped locks
+ master.loadConfig(lockCfg2a)
+ b1 = master.botmaster.builders["builder1"]
+ # reloading the same config should not change the Builder
+ master.loadConfig(lockCfg2a)
+ self.failUnlessIdentical(b1, master.botmaster.builders["builder1"])
+ # but changing the set of locks used should change it
+ master.loadConfig(lockCfg2b)
+ self.failIfIdentical(b1, master.botmaster.builders["builder1"])
+ b1 = master.botmaster.builders["builder1"]
+ # remove the locks entirely
+ master.loadConfig(lockCfg2c)
+ self.failIfIdentical(b1, master.botmaster.builders["builder1"])
+
+class ConfigElements(unittest.TestCase):
+ # verify that ComparableMixin is working
+ def testSchedulers(self):
+ s1 = scheduler.Scheduler(name='quick', branch=None,
+ treeStableTimer=30,
+ builderNames=['quick'])
+ s2 = scheduler.Scheduler(name="all", branch=None,
+ treeStableTimer=5*60,
+ builderNames=["a", "b"])
+ s3 = scheduler.Try_Userpass("try", ["a","b"], port=9989,
+ userpass=[("foo","bar")])
+ s1a = scheduler.Scheduler(name='quick', branch=None,
+ treeStableTimer=30,
+ builderNames=['quick'])
+ s2a = scheduler.Scheduler(name="all", branch=None,
+ treeStableTimer=5*60,
+ builderNames=["a", "b"])
+ s3a = scheduler.Try_Userpass("try", ["a","b"], port=9989,
+ userpass=[("foo","bar")])
+ self.failUnless(s1 == s1)
+ self.failUnless(s1 == s1a)
+ self.failUnless(s1a in [s1, s2, s3])
+ self.failUnless(s2a in [s1, s2, s3])
+ self.failUnless(s3a in [s1, s2, s3])
+
+
+
+class ConfigFileTest(unittest.TestCase):
+
+ def testFindConfigFile(self):
+ os.mkdir("test_cf")
+ open(os.path.join("test_cf", "master.cfg"), "w").write(emptyCfg)
+ slaveportCfg = emptyCfg + "c['slavePortnum'] = 9000\n"
+ open(os.path.join("test_cf", "alternate.cfg"), "w").write(slaveportCfg)
+
+ m = BuildMaster("test_cf")
+ m.loadTheConfigFile()
+ self.failUnlessEqual(m.slavePortnum, "tcp:9999")
+
+ m = BuildMaster("test_cf", "alternate.cfg")
+ m.loadTheConfigFile()
+ self.failUnlessEqual(m.slavePortnum, "tcp:9000")
+
+
+class MyTarget(base.StatusReceiverMultiService):
+ def __init__(self, name):
+ self.name = name
+ base.StatusReceiverMultiService.__init__(self)
+ def startService(self):
+ # make a note in a list stashed in the BuildMaster
+ self.parent.targetevents.append(("start", self.name))
+ return base.StatusReceiverMultiService.startService(self)
+ def stopService(self):
+ self.parent.targetevents.append(("stop", self.name))
+ return base.StatusReceiverMultiService.stopService(self)
+
+class MySlowTarget(MyTarget):
+ def stopService(self):
+ from twisted.internet import reactor
+ d = base.StatusReceiverMultiService.stopService(self)
+ def stall(res):
+ d2 = defer.Deferred()
+ reactor.callLater(0.1, d2.callback, res)
+ return d2
+ d.addCallback(stall)
+ m = self.parent
+ def finishedStalling(res):
+ m.targetevents.append(("stop", self.name))
+ return res
+ d.addCallback(finishedStalling)
+ return d
+
+# we can't actually startService a buildmaster with a config that uses a
+# fixed slavePortnum like 9999, so instead this makes it possible to pass '0'
+# for the first time, and then substitute back in the allocated port number
+# on subsequent passes.
+startableEmptyCfg = emptyCfg + \
+"""
+c['slavePortnum'] = %d
+"""
+
+targetCfg1 = startableEmptyCfg + \
+"""
+from buildbot.test.test_config import MyTarget
+c['status'] = [MyTarget('a')]
+"""
+
+targetCfg2 = startableEmptyCfg + \
+"""
+from buildbot.test.test_config import MySlowTarget
+c['status'] = [MySlowTarget('b')]
+"""
+
+class StartService(unittest.TestCase):
+ def tearDown(self):
+ return self.master.stopService()
+
+ def testStartService(self):
+ os.mkdir("test_ss")
+ self.master = m = BuildMaster("test_ss")
+ # inhibit the usual read-config-on-startup behavior
+ m.readConfig = True
+ m.startService()
+ d = m.loadConfig(startableEmptyCfg % 0)
+ d.addCallback(self._testStartService_0)
+ return d
+
+ def _testStartService_0(self, res):
+ m = self.master
+ m.targetevents = []
+ # figure out what port got allocated
+ self.portnum = m.slavePort._port.getHost().port
+ d = m.loadConfig(targetCfg1 % self.portnum)
+ d.addCallback(self._testStartService_1)
+ return d
+
+ def _testStartService_1(self, res):
+ self.failUnlessEqual(len(self.master.statusTargets), 1)
+ self.failUnless(isinstance(self.master.statusTargets[0], MyTarget))
+ self.failUnlessEqual(self.master.targetevents,
+ [('start', 'a')])
+ self.master.targetevents = []
+ # reloading the same config should not start or stop the target
+ d = self.master.loadConfig(targetCfg1 % self.portnum)
+ d.addCallback(self._testStartService_2)
+ return d
+
+ def _testStartService_2(self, res):
+ self.failUnlessEqual(self.master.targetevents, [])
+ # but loading a new config file should stop the old one, then
+ # start the new one
+ d = self.master.loadConfig(targetCfg2 % self.portnum)
+ d.addCallback(self._testStartService_3)
+ return d
+
+ def _testStartService_3(self, res):
+ self.failUnlessEqual(self.master.targetevents,
+ [('stop', 'a'), ('start', 'b')])
+ self.master.targetevents = []
+ # and going back to the old one should do the same, in the same
+ # order, even though the current MySlowTarget takes a moment to shut
+ # down
+ d = self.master.loadConfig(targetCfg1 % self.portnum)
+ d.addCallback(self._testStartService_4)
+ return d
+
+ def _testStartService_4(self, res):
+ self.failUnlessEqual(self.master.targetevents,
+ [('stop', 'b'), ('start', 'a')])
+
+cfg1 = \
+"""
+from buildbot.process.factory import BuildFactory, s
+from buildbot.steps.shell import ShellCommand
+from buildbot.steps.source import Darcs
+from buildbot.buildslave import BuildSlave
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'pw1')]
+c['schedulers'] = []
+c['slavePortnum'] = 9999
+f1 = BuildFactory([ShellCommand(command='echo yes'),
+ s(ShellCommand, command='old-style'),
+ ])
+f1.addStep(Darcs(repourl='http://buildbot.net/repos/trunk'))
+f1.addStep(ShellCommand, command='echo old-style')
+c['builders'] = [{'name':'builder1', 'slavename':'bot1',
+ 'builddir':'workdir', 'factory':f1}]
+"""
+
+class Factories(unittest.TestCase):
+
+ def failUnlessExpectedShell(self, factory, defaults=True, **kwargs):
+ shell_args = {}
+ if defaults:
+ shell_args.update({'descriptionDone': None,
+ 'description': None,
+ 'workdir': None,
+ 'logfiles': {},
+ 'usePTY': "slave-config",
+ })
+ shell_args.update(kwargs)
+ self.failUnlessIdentical(factory[0], ShellCommand)
+ if factory[1] != shell_args:
+ print
+ print "factory had:"
+ for k in sorted(factory[1].keys()):
+ print k
+ print "but we were expecting:"
+ for k in sorted(shell_args.keys()):
+ print k
+ self.failUnlessEqual(factory[1], shell_args)
+
+ def failUnlessExpectedDarcs(self, factory, **kwargs):
+ darcs_args = {'workdir': None,
+ 'alwaysUseLatest': False,
+ 'mode': 'update',
+ 'timeout': 1200,
+ 'retry': None,
+ 'baseURL': None,
+ 'defaultBranch': None,
+ 'logfiles': {},
+ }
+ darcs_args.update(kwargs)
+ self.failUnlessIdentical(factory[0], Darcs)
+ if factory[1] != darcs_args:
+ print
+ print "factory had:"
+ for k in sorted(factory[1].keys()):
+ print k
+ print "but we were expecting:"
+ for k in sorted(darcs_args.keys()):
+ print k
+ self.failUnlessEqual(factory[1], darcs_args)
+
+ def testSteps(self):
+ m = BuildMaster(".")
+ m.loadConfig(cfg1)
+ b = m.botmaster.builders["builder1"]
+ steps = b.buildFactory.steps
+ self.failUnlessEqual(len(steps), 4)
+
+ self.failUnlessExpectedShell(steps[0], command="echo yes")
+ self.failUnlessExpectedShell(steps[1], defaults=False,
+ command="old-style")
+ self.failUnlessExpectedDarcs(steps[2],
+ repourl="http://buildbot.net/repos/trunk")
+ self.failUnlessExpectedShell(steps[3], defaults=False,
+ command="echo old-style")
+
+ def _loop(self, orig):
+ step_class, kwargs = orig.getStepFactory()
+ newstep = step_class(**kwargs)
+ return newstep
+
+ def testAllSteps(self):
+ # make sure that steps can be created from the factories that they
+ # return
+ for s in ( dummy.Dummy(), dummy.FailingDummy(), dummy.RemoteDummy(),
+ maxq.MaxQ("testdir"),
+ python.BuildEPYDoc(), python.PyFlakes(),
+ python_twisted.HLint(),
+ python_twisted.Trial(testpath=None, tests="tests"),
+ python_twisted.ProcessDocs(), python_twisted.BuildDebs(),
+ python_twisted.RemovePYCs(),
+ shell.ShellCommand(), shell.TreeSize(),
+ shell.Configure(), shell.Compile(), shell.Test(),
+ source.CVS("cvsroot", "module"),
+ source.SVN("svnurl"), source.Darcs("repourl"),
+ source.Git("repourl"),
+ source.Arch("url", "version"),
+ source.Bazaar("url", "version", "archive"),
+ source.Bzr("repourl"),
+ source.Mercurial("repourl"),
+ source.P4("p4base"),
+ source.P4Sync(1234, "p4user", "passwd", "client",
+ mode="copy"),
+ source.Monotone("server", "branch"),
+ transfer.FileUpload("src", "dest"),
+ transfer.FileDownload("src", "dest"),
+ ):
+ try:
+ self._loop(s)
+ except:
+ print "error checking %s" % s
+ raise
+
diff --git a/buildbot/buildbot/test/test_control.py b/buildbot/buildbot/test/test_control.py
new file mode 100644
index 0000000..298d48a
--- /dev/null
+++ b/buildbot/buildbot/test/test_control.py
@@ -0,0 +1,104 @@
+# -*- test-case-name: buildbot.test.test_control -*-
+
+import os
+
+from twisted.trial import unittest
+from twisted.internet import defer
+
+from buildbot import master, interfaces
+from buildbot.sourcestamp import SourceStamp
+from buildbot.slave import bot
+from buildbot.status.builder import SUCCESS
+from buildbot.process import base
+from buildbot.test.runutils import rmtree
+
+config = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+
+def s(klass, **kwargs):
+ return (klass, kwargs)
+
+f1 = factory.BuildFactory([
+ s(dummy.Dummy, timeout=1),
+ ])
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = [{'name': 'force', 'slavename': 'bot1',
+ 'builddir': 'force-dir', 'factory': f1}]
+c['slavePortnum'] = 0
+BuildmasterConfig = c
+"""
+
+class FakeBuilder:
+ name = "fake"
+ def getSlaveCommandVersion(self, command, oldversion=None):
+ return "1.10"
+
+
+class Force(unittest.TestCase):
+
+ def rmtree(self, d):
+ rmtree(d)
+
+ def setUp(self):
+ self.master = None
+ self.slave = None
+ self.rmtree("control_basedir")
+ os.mkdir("control_basedir")
+ self.master = master.BuildMaster("control_basedir")
+ self.slavebase = os.path.abspath("control_slavebase")
+ self.rmtree(self.slavebase)
+ os.mkdir("control_slavebase")
+
+ def connectSlave(self):
+ port = self.master.slavePort._port.getHost().port
+ slave = bot.BuildSlave("localhost", port, "bot1", "sekrit",
+ self.slavebase, keepalive=0, usePTY=1)
+ self.slave = slave
+ slave.startService()
+ d = self.master.botmaster.waitUntilBuilderAttached("force")
+ return d
+
+ def tearDown(self):
+ dl = []
+ if self.slave:
+ dl.append(self.master.botmaster.waitUntilBuilderDetached("force"))
+ dl.append(defer.maybeDeferred(self.slave.stopService))
+ if self.master:
+ dl.append(defer.maybeDeferred(self.master.stopService))
+ return defer.DeferredList(dl)
+
+ def testRequest(self):
+ m = self.master
+ m.loadConfig(config)
+ m.startService()
+ d = self.connectSlave()
+ d.addCallback(self._testRequest_1)
+ return d
+ def _testRequest_1(self, res):
+ c = interfaces.IControl(self.master)
+ req = base.BuildRequest("I was bored", SourceStamp(), 'test_builder')
+ builder_control = c.getBuilder("force")
+ d = defer.Deferred()
+ req.subscribe(d.callback)
+ builder_control.requestBuild(req)
+ d.addCallback(self._testRequest_2)
+ # we use the same check-the-results code as testForce
+ return d
+
+ def _testRequest_2(self, build_control):
+ self.failUnless(interfaces.IBuildControl.providedBy(build_control))
+ d = build_control.getStatus().waitUntilFinished()
+ d.addCallback(self._testRequest_3)
+ return d
+
+ def _testRequest_3(self, bs):
+ self.failUnless(interfaces.IBuildStatus.providedBy(bs))
+ self.failUnless(bs.isFinished())
+ self.failUnlessEqual(bs.getResults(), SUCCESS)
+ #self.failUnlessEqual(bs.getResponsibleUsers(), ["bob"]) # TODO
+ self.failUnlessEqual(bs.getChanges(), ())
+ #self.failUnlessEqual(bs.getReason(), "forced") # TODO
diff --git a/buildbot/buildbot/test/test_dependencies.py b/buildbot/buildbot/test/test_dependencies.py
new file mode 100644
index 0000000..624efc4
--- /dev/null
+++ b/buildbot/buildbot/test/test_dependencies.py
@@ -0,0 +1,166 @@
+# -*- test-case-name: buildbot.test.test_dependencies -*-
+
+from twisted.trial import unittest
+
+from twisted.internet import reactor, defer
+
+from buildbot.test.runutils import RunMixin
+from buildbot.status import base
+
+config_1 = """
+from buildbot import scheduler
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+from buildbot.test.test_locks import LockStep
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+
+# upstream1 (fastfail, slowpass)
+# -> downstream2 (b3, b4)
+# upstream3 (slowfail, slowpass)
+# -> downstream4 (b3, b4)
+# -> downstream5 (b5)
+
+s1 = scheduler.Scheduler('upstream1', None, 10, ['slowpass', 'fastfail'])
+s2 = scheduler.Dependent('downstream2', s1, ['b3', 'b4'])
+s3 = scheduler.Scheduler('upstream3', None, 10, ['fastpass', 'slowpass'])
+s4 = scheduler.Dependent('downstream4', s3, ['b3', 'b4'])
+s5 = scheduler.Dependent('downstream5', s4, ['b5'])
+c['schedulers'] = [s1, s2, s3, s4, s5]
+
+f_fastpass = factory.BuildFactory([s(dummy.Dummy, timeout=1)])
+f_slowpass = factory.BuildFactory([s(dummy.Dummy, timeout=2)])
+f_fastfail = factory.BuildFactory([s(dummy.FailingDummy, timeout=1)])
+
+def builder(name, f):
+ d = {'name': name, 'slavename': 'bot1', 'builddir': name, 'factory': f}
+ return d
+
+c['builders'] = [builder('slowpass', f_slowpass),
+ builder('fastfail', f_fastfail),
+ builder('fastpass', f_fastpass),
+ builder('b3', f_fastpass),
+ builder('b4', f_fastpass),
+ builder('b5', f_fastpass),
+ ]
+"""
+
+class Logger(base.StatusReceiverMultiService):
+ def __init__(self, master):
+ base.StatusReceiverMultiService.__init__(self)
+ self.builds = []
+ for bn in master.status.getBuilderNames():
+ master.status.getBuilder(bn).subscribe(self)
+
+ def buildStarted(self, builderName, build):
+ self.builds.append(builderName)
+
+class Dependencies(RunMixin, unittest.TestCase):
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_1)
+ self.master.startService()
+ d = self.connectSlave(["slowpass", "fastfail", "fastpass",
+ "b3", "b4", "b5"])
+ return d
+
+ def findScheduler(self, name):
+ for s in self.master.allSchedulers():
+ if s.name == name:
+ return s
+ raise KeyError("No Scheduler named '%s'" % name)
+
+ def testParse(self):
+ self.master.loadConfig(config_1)
+ # that's it, just make sure this config file is loaded successfully
+
+ def testRun_Fail(self):
+ # add an extra status target to make pay attention to which builds
+ # start and which don't.
+ self.logger = Logger(self.master)
+
+ # kick off upstream1, which has a failing Builder and thus will not
+ # trigger downstream3
+ s = self.findScheduler("upstream1")
+ # this is an internal function of the Scheduler class
+ s.fireTimer() # fires a build
+ # t=0: two builders start: 'slowpass' and 'fastfail'
+ # t=1: builder 'fastfail' finishes
+ # t=2: builder 'slowpass' finishes
+ d = defer.Deferred()
+ d.addCallback(self._testRun_Fail_1)
+ reactor.callLater(5, d.callback, None)
+ return d
+
+ def _testRun_Fail_1(self, res):
+ # 'slowpass' and 'fastfail' should have run one build each
+ b = self.status.getBuilder('slowpass').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+ b = self.status.getBuilder('fastfail').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+ # none of the other builders should have run
+ self.failIf(self.status.getBuilder('b3').getLastFinishedBuild())
+ self.failIf(self.status.getBuilder('b4').getLastFinishedBuild())
+ self.failIf(self.status.getBuilder('b5').getLastFinishedBuild())
+
+ # in fact, none of them should have even started
+ self.failUnlessEqual(len(self.logger.builds), 2)
+ self.failUnless("slowpass" in self.logger.builds)
+ self.failUnless("fastfail" in self.logger.builds)
+ self.failIf("b3" in self.logger.builds)
+ self.failIf("b4" in self.logger.builds)
+ self.failIf("b5" in self.logger.builds)
+
+ def testRun_Pass(self):
+ # kick off upstream3, which will fire downstream4 and then
+ # downstream5
+ s = self.findScheduler("upstream3")
+ # this is an internal function of the Scheduler class
+ s.fireTimer() # fires a build
+ # t=0: slowpass and fastpass start
+ # t=1: builder 'fastpass' finishes
+ # t=2: builder 'slowpass' finishes
+ # scheduler 'downstream4' fires
+ # builds b3 and b4 are started
+ # t=3: builds b3 and b4 finish
+ # scheduler 'downstream5' fires
+ # build b5 is started
+ # t=4: build b5 is finished
+ d = defer.Deferred()
+ d.addCallback(self._testRun_Pass_1)
+ reactor.callLater(5, d.callback, None)
+ return d
+
+ def _testRun_Pass_1(self, res):
+ # 'fastpass' and 'slowpass' should have run one build each
+ b = self.status.getBuilder('fastpass').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+ b = self.status.getBuilder('slowpass').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+ self.failIf(self.status.getBuilder('fastfail').getLastFinishedBuild())
+
+ b = self.status.getBuilder('b3').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+ b = self.status.getBuilder('b4').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+ b = self.status.getBuilder('b4').getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getNumber(), 0)
+
+
diff --git a/buildbot/buildbot/test/test_ec2buildslave.py b/buildbot/buildbot/test/test_ec2buildslave.py
new file mode 100644
index 0000000..d0f1644
--- /dev/null
+++ b/buildbot/buildbot/test/test_ec2buildslave.py
@@ -0,0 +1,552 @@
+# Portions copyright Canonical Ltd. 2009
+
+import os
+import sys
+import StringIO
+import textwrap
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+
+from buildbot.process.base import BuildRequest
+from buildbot.sourcestamp import SourceStamp
+from buildbot.status.builder import SUCCESS
+from buildbot.test.runutils import RunMixin
+
+
+PENDING = 'pending'
+RUNNING = 'running'
+SHUTTINGDOWN = 'shutting-down'
+TERMINATED = 'terminated'
+
+
+class EC2ResponseError(Exception):
+ def __init__(self, code):
+ self.code = code
+
+
+class Stub:
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+
+class Instance:
+
+ def __init__(self, data, ami, **kwargs):
+ self.data = data
+ self.state = PENDING
+ self.id = ami
+ self.public_dns_name = 'ec2-012-345-678-901.compute-1.amazonaws.com'
+ self.__dict__.update(kwargs)
+ self.output = Stub(name='output', output='example_output')
+
+ def update(self):
+ if self.state == PENDING:
+ self.data.testcase.connectOneSlave(self.data.slave.slavename)
+ self.state = RUNNING
+ elif self.state == SHUTTINGDOWN:
+ slavename = self.data.slave.slavename
+ slaves = self.data.testcase.slaves
+ if slavename in slaves:
+ def discard(data):
+ pass
+ s = slaves.pop(slavename)
+ bot = s.getServiceNamed("bot")
+ for buildername in self.data.slave.slavebuilders:
+ remote = bot.builders[buildername].remote
+ if remote is None:
+ continue
+ broker = remote.broker
+ broker.dataReceived = discard # seal its ears
+ # and take away its voice
+ broker.transport.write = discard
+ # also discourage it from reconnecting once the connection
+ # goes away
+ s.bf.continueTrying = False
+ # stop the service for cleanliness
+ s.stopService()
+ self.state = TERMINATED
+
+ def get_console_output(self):
+ return self.output
+
+ def use_ip(self, elastic_ip):
+ if isinstance(elastic_ip, Stub):
+ elastic_ip = elastic_ip.public_ip
+ if self.data.addresses[elastic_ip] is not None:
+ raise ValueError('elastic ip already used')
+ self.data.addresses[elastic_ip] = self
+
+ def stop(self):
+ self.state = SHUTTINGDOWN
+
+class Image:
+
+ def __init__(self, data, ami, owner, location):
+ self.data = data
+ self.id = ami
+ self.owner = owner
+ self.location = location
+
+ def run(self, **kwargs):
+ return Stub(name='reservation',
+ instances=[Instance(self.data, self.id, **kwargs)])
+
+ @classmethod
+ def create(klass, data, ami, owner, location):
+ assert ami not in data.images
+ self = klass(data, ami, owner, location)
+ data.images[ami] = self
+ return self
+
+
+class Connection:
+
+ def __init__(self, data):
+ self.data = data
+
+ def get_all_key_pairs(self, keypair_name):
+ try:
+ return [self.data.keys[keypair_name]]
+ except KeyError:
+ raise EC2ResponseError('InvalidKeyPair.NotFound')
+
+ def create_key_pair(self, keypair_name):
+ return Key.create(keypair_name, self.data.keys)
+
+ def get_all_security_groups(self, security_name):
+ try:
+ return [self.data.security_groups[security_name]]
+ except KeyError:
+ raise EC2ResponseError('InvalidGroup.NotFound')
+
+ def create_security_group(self, security_name, description):
+ assert security_name not in self.data.security_groups
+ res = Stub(name='security_group', value=security_name,
+ description=description)
+ self.data.security_groups[security_name] = res
+ return res
+
+ def get_all_images(self, owners=None):
+ # return a list of images. images have .location and .id.
+ res = self.data.images.values()
+ if owners:
+ res = [image for image in res if image.owner in owners]
+ return res
+
+ def get_image(self, machine_id):
+ # return image or raise an error
+ return self.data.images[machine_id]
+
+ def get_all_addresses(self, elastic_ips):
+ res = []
+ for ip in elastic_ips:
+ if ip in self.data.addresses:
+ res.append(Stub(public_ip=ip))
+ else:
+ raise EC2ResponseError('...bad address...')
+ return res
+
+ def disassociate_address(self, address):
+ if address not in self.data.addresses:
+ raise EC2ResponseError('...unknown address...')
+ self.data.addresses[address] = None
+
+
+class Key:
+
+ # this is what we would need to do if we actually needed a real key.
+ # We don't right now.
+ #def __init__(self):
+ # self.raw = paramiko.RSAKey.generate(256)
+ # f = StringIO.StringIO()
+ # self.raw.write_private_key(f)
+ # self.material = f.getvalue()
+
+ @classmethod
+ def create(klass, name, keys):
+ self = klass()
+ self.name = name
+ self.keys = keys
+ assert name not in keys
+ keys[name] = self
+ return self
+
+ def delete(self):
+ del self.keys[self.name]
+
+
+class Boto:
+
+ slave = None # must be set in setUp
+
+ def __init__(self, testcase):
+ self.testcase = testcase
+ self.keys = {}
+ Key.create('latent_buildbot_slave', self.keys)
+ Key.create('buildbot_slave', self.keys)
+ assert sorted(self.keys.keys()) == ['buildbot_slave',
+ 'latent_buildbot_slave']
+ self.original_keys = dict(self.keys)
+ self.security_groups = {
+ 'latent_buildbot_slave': Stub(name='security_group',
+ value='latent_buildbot_slave')}
+ self.addresses = {'127.0.0.1': None}
+ self.images = {}
+ Image.create(self, 'ami-12345', 12345667890,
+ 'test-xx/image.manifest.xml')
+ Image.create(self, 'ami-AF000', 11111111111,
+ 'test-f0a/image.manifest.xml')
+ Image.create(self, 'ami-CE111', 22222222222,
+ 'test-e1b/image.manifest.xml')
+ Image.create(self, 'ami-ED222', 22222222222,
+ 'test-d2c/image.manifest.xml')
+ Image.create(self, 'ami-FC333', 22222222222,
+ 'test-c30d/image.manifest.xml')
+ Image.create(self, 'ami-DB444', 11111111111,
+ 'test-b4e/image.manifest.xml')
+ Image.create(self, 'ami-BA555', 11111111111,
+ 'test-a5f/image.manifest.xml')
+
+ def connect_ec2(self, identifier, secret_identifier):
+ assert identifier == 'publickey', identifier
+ assert secret_identifier == 'privatekey', secret_identifier
+ return Connection(self)
+
+ exception = Stub(EC2ResponseError=EC2ResponseError)
+
+
+class Mixin(RunMixin):
+
+ def doBuild(self):
+ br = BuildRequest("forced", SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder('b1').requestBuild(br)
+ return d
+
+ def setUp(self):
+ self.boto_setUp1()
+ self.master.loadConfig(self.config)
+ self.boto_setUp2()
+ self.boto_setUp3()
+
+ def boto_setUp1(self):
+ # debugging
+ #import twisted.internet.base
+ #twisted.internet.base.DelayedCall.debug = True
+ # debugging
+ RunMixin.setUp(self)
+ self.boto = boto = Boto(self)
+ if 'boto' not in sys.modules:
+ sys.modules['boto'] = boto
+ sys.modules['boto.exception'] = boto.exception
+ if 'buildbot.ec2buildslave' in sys.modules:
+ sys.modules['buildbot.ec2buildslave'].boto = boto
+
+ def boto_setUp2(self):
+ if sys.modules['boto'] is self.boto:
+ del sys.modules['boto']
+ del sys.modules['boto.exception']
+
+ def boto_setUp3(self):
+ self.master.startService()
+ self.boto.slave = self.bot1 = self.master.botmaster.slaves['bot1']
+ self.bot1._poll_resolution = 0.1
+ self.b1 = self.master.botmaster.builders['b1']
+
+ def tearDown(self):
+ try:
+ import boto
+ import boto.exception
+ except ImportError:
+ pass
+ else:
+ sys.modules['buildbot.ec2buildslave'].boto = boto
+ return RunMixin.tearDown(self)
+
+
+class BasicConfig(Mixin, unittest.TestCase):
+ config = textwrap.dedent("""\
+ from buildbot.process import factory
+ from buildbot.steps import dummy
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ s = factory.s
+
+ BuildmasterConfig = c = {}
+ c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345',
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )]
+ c['schedulers'] = []
+ c['slavePortnum'] = 0
+ c['schedulers'] = []
+
+ f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+
+ c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+ """)
+
+ def testSequence(self):
+ # test with secrets in config, a single AMI, and defaults/
+ self.assertEqual(self.bot1.ami, 'ami-12345')
+ self.assertEqual(self.bot1.instance_type, 'm1.large')
+ self.assertEqual(self.bot1.keypair_name, 'latent_buildbot_slave')
+ self.assertEqual(self.bot1.security_name, 'latent_buildbot_slave')
+ # this would be appropriate if we were recreating keys.
+ #self.assertNotEqual(self.boto.keys['latent_buildbot_slave'],
+ # self.boto.original_keys['latent_buildbot_slave'])
+ self.failUnless(isinstance(self.bot1.get_image(), Image))
+ self.assertEqual(self.bot1.get_image().id, 'ami-12345')
+ self.assertIdentical(self.bot1.elastic_ip, None)
+ self.assertIdentical(self.bot1.instance, None)
+ # let's start a build...
+ self.build_deferred = self.doBuild()
+ # ...and wait for the ec2 slave to show up
+ d = self.bot1.substantiation_deferred
+ d.addCallback(self._testSequence_1)
+ return d
+ def _testSequence_1(self, res):
+ # bot 1 is substantiated.
+ self.assertNotIdentical(self.bot1.slave, None)
+ self.failUnless(self.bot1.substantiated)
+ self.failUnless(isinstance(self.bot1.instance, Instance))
+ self.assertEqual(self.bot1.instance.id, 'ami-12345')
+ self.assertEqual(self.bot1.instance.state, RUNNING)
+ self.assertEqual(self.bot1.instance.key_name, 'latent_buildbot_slave')
+ self.assertEqual(self.bot1.instance.security_groups,
+ ['latent_buildbot_slave'])
+ self.assertEqual(self.bot1.instance.instance_type, 'm1.large')
+ self.assertEqual(self.bot1.output.output, 'example_output')
+ # now we'll wait for the build to complete
+ d = self.build_deferred
+ del self.build_deferred
+ d.addCallback(self._testSequence_2)
+ return d
+ def _testSequence_2(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # Let's let it shut down. We'll set the build_wait_timer to fire
+ # sooner, and wait for it to fire.
+ self.bot1.build_wait_timer.reset(0)
+ # we'll stash the instance around to look at it
+ self.instance = self.bot1.instance
+ # now we wait.
+ d = defer.Deferred()
+ reactor.callLater(0.5, d.callback, None)
+ d.addCallback(self._testSequence_3)
+ return d
+ def _testSequence_3(self, res):
+ # slave is insubstantiated
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ self.assertIdentical(self.bot1.instance, None)
+ self.assertEqual(self.instance.state, TERMINATED)
+ del self.instance
+
+class ElasticIP(Mixin, unittest.TestCase):
+ config = textwrap.dedent("""\
+ from buildbot.process import factory
+ from buildbot.steps import dummy
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ s = factory.s
+
+ BuildmasterConfig = c = {}
+ c['slaves'] = [EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345',
+ identifier='publickey',
+ secret_identifier='privatekey',
+ elastic_ip='127.0.0.1'
+ )]
+ c['schedulers'] = []
+ c['slavePortnum'] = 0
+ c['schedulers'] = []
+
+ f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+
+ c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+ """)
+
+ def testSequence(self):
+ self.assertEqual(self.bot1.elastic_ip.public_ip, '127.0.0.1')
+ self.assertIdentical(self.boto.addresses['127.0.0.1'], None)
+ # let's start a build...
+ d = self.doBuild()
+ d.addCallback(self._testSequence_1)
+ return d
+ def _testSequence_1(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # we have our address
+ self.assertIdentical(self.boto.addresses['127.0.0.1'],
+ self.bot1.instance)
+ # Let's let it shut down. We'll set the build_wait_timer to fire
+ # sooner, and wait for it to fire.
+ self.bot1.build_wait_timer.reset(0)
+ d = defer.Deferred()
+ reactor.callLater(0.5, d.callback, None)
+ d.addCallback(self._testSequence_2)
+ return d
+ def _testSequence_2(self, res):
+ # slave is insubstantiated
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ self.assertIdentical(self.bot1.instance, None)
+ # the address is free again
+ self.assertIdentical(self.boto.addresses['127.0.0.1'], None)
+
+
+class Initialization(Mixin, unittest.TestCase):
+
+ def setUp(self):
+ self.boto_setUp1()
+
+ def tearDown(self):
+ self.boto_setUp2()
+ return Mixin.tearDown(self)
+
+ def testDefaultSeparateFile(self):
+ # set up .ec2/aws_id
+ home = os.environ['HOME']
+ fake_home = os.path.join(os.getcwd(), 'basedir') # see RunMixin.setUp
+ os.environ['HOME'] = fake_home
+ dir = os.path.join(fake_home, '.ec2')
+ os.mkdir(dir)
+ f = open(os.path.join(dir, 'aws_id'), 'w')
+ f.write('publickey\nprivatekey')
+ f.close()
+ # The Connection checks the file, so if the secret file is not parsed
+ # correctly, *this* is where it would fail. This is the real test.
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345')
+ # for completeness, we'll show that the connection actually exists.
+ self.failUnless(isinstance(bot1.conn, Connection))
+ # clean up.
+ os.environ['HOME'] = home
+ self.rmtree(dir)
+
+ def testCustomSeparateFile(self):
+ # set up .ec2/aws_id
+ file_path = os.path.join(os.getcwd(), 'basedir', 'custom_aws_id')
+ f = open(file_path, 'w')
+ f.write('publickey\nprivatekey')
+ f.close()
+ # The Connection checks the file, so if the secret file is not parsed
+ # correctly, *this* is where it would fail. This is the real test.
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ 'ami-12345', aws_id_file_path=file_path)
+ # for completeness, we'll show that the connection actually exists.
+ self.failUnless(isinstance(bot1.conn, Connection))
+
+ def testNoAMIBroken(self):
+ # you must specify an AMI, or at least one of valid_ami_owners or
+ # valid_ami_location_regex
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ self.assertRaises(ValueError, EC2LatentBuildSlave, 'bot1', 'sekrit',
+ 'm1.large', identifier='publickey',
+ secret_identifier='privatekey')
+
+ def testAMIOwnerFilter(self):
+ # if you only specify an owner, you get the image owned by any of the
+ # owners that sorts last by the AMI's location.
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111],
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+ self.assertEqual(bot1.get_image().location,
+ 'test-f0a/image.manifest.xml')
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111,
+ 22222222222],
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+ self.assertEqual(bot1.get_image().location,
+ 'test-f0a/image.manifest.xml')
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[22222222222],
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+ self.assertEqual(bot1.get_image().location,
+ 'test-e1b/image.manifest.xml')
+ bot1 = EC2LatentBuildSlave('bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=12345667890,
+ identifier='publickey',
+ secret_identifier='privatekey'
+ )
+ self.assertEqual(bot1.get_image().location,
+ 'test-xx/image.manifest.xml')
+
+ def testAMISimpleRegexFilter(self):
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'test\-[a-z]\w+/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+ self.assertEqual(bot1.get_image().location,
+ 'test-xx/image.manifest.xml')
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_location_regex=r'test\-[a-z]\d+\w/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+ self.assertEqual(bot1.get_image().location,
+ 'test-f0a/image.manifest.xml')
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large', valid_ami_owners=[22222222222],
+ valid_ami_location_regex=r'test\-[a-z]\d+\w/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+ self.assertEqual(bot1.get_image().location,
+ 'test-e1b/image.manifest.xml')
+
+ def testAMIRegexAlphaSortFilter(self):
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111, 22222222222],
+ valid_ami_location_regex=r'test\-[a-z]\d+([a-z])/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+ self.assertEqual(bot1.get_image().location,
+ 'test-a5f/image.manifest.xml')
+
+ def testAMIRegexIntSortFilter(self):
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large',
+ valid_ami_owners=[11111111111, 22222222222],
+ valid_ami_location_regex=r'test\-[a-z](\d+)[a-z]/image.manifest.xml',
+ identifier='publickey', secret_identifier='privatekey')
+ self.assertEqual(bot1.get_image().location,
+ 'test-c30d/image.manifest.xml')
+
+ def testNewSecurityGroup(self):
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large', 'ami-12345',
+ identifier='publickey', secret_identifier='privatekey',
+ security_name='custom_security_name')
+ self.assertEqual(
+ self.boto.security_groups['custom_security_name'].value,
+ 'custom_security_name')
+ self.assertEqual(bot1.security_name, 'custom_security_name')
+
+ def testNewKeypairName(self):
+ from buildbot.ec2buildslave import EC2LatentBuildSlave
+ bot1 = EC2LatentBuildSlave(
+ 'bot1', 'sekrit', 'm1.large', 'ami-12345',
+ identifier='publickey', secret_identifier='privatekey',
+ keypair_name='custom_keypair_name')
+ self.assertIn('custom_keypair_name', self.boto.keys)
+ self.assertEqual(bot1.keypair_name, 'custom_keypair_name')
diff --git a/buildbot/buildbot/test/test_limitlogs.py b/buildbot/buildbot/test/test_limitlogs.py
new file mode 100644
index 0000000..9fd5bea
--- /dev/null
+++ b/buildbot/buildbot/test/test_limitlogs.py
@@ -0,0 +1,94 @@
+# -*- test-case-name: buildbot.test.test_limitlogs -*-
+
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+from twisted.internet.utils import getProcessValue, getProcessOutput
+import twisted
+from twisted.python.versions import Version
+from twisted.python.procutils import which
+from twisted.python import log, logfile
+import os
+
+'''Testcases to verify that the --log-size and --log-count options to
+create-master and create-slave actually work.
+
+These features require Twisted 8.2.0 to work.
+
+Currently only testing the master side of it.
+'''
+
+
+master_cfg = """from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
+
+f2 = factory.BuildFactory([
+ dummy.Dummy(timeout=1),
+ dummy.RemoteDummy(timeout=2),
+ ])
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'quick', 'slavename':'bot1',
+ 'builddir': 'quickdir', 'factory': f1})
+c['slavePortnum'] = 0
+
+from twisted.python import log
+for i in xrange(100):
+ log.msg("this is a mighty long string and I'm going to write it into the log often")
+"""
+
+class MasterLogs(unittest.TestCase):
+ '''Limit master log size and count.'''
+
+ def setUp(self):
+ if twisted.version < Version("twisted", 8, 2, 0):
+ self.skip = True
+ raise unittest.SkipTest("Twisted 8.2.0 or higher required")
+
+ def testLog(self):
+ exes = which('buildbot')
+ if not exes:
+ raise unittest.SkipTest("Buildbot needs to be installed")
+ self.buildbotexe = exes[0]
+ d = getProcessValue(self.buildbotexe,
+ ['create-master', '--log-size=1000', '--log-count=2',
+ 'master'])
+ d.addCallback(self._master_created)
+ return d
+
+ def _master_created(self, res):
+ open('master/master.cfg', 'w').write(master_cfg)
+ d = getProcessOutput(self.buildbotexe,
+ ['start', 'master'])
+ d.addBoth(self._master_running)
+ return d
+
+ def _master_running(self, res):
+ self.addCleanup(self._stop_master)
+ d = defer.Deferred()
+ reactor.callLater(2, d.callback, None)
+ d.addCallback(self._do_tests)
+ return d
+
+ def _do_tests(self, rv):
+ '''The actual method doing the tests on the master twistd.log'''
+ lf = logfile.LogFile.fromFullPath(os.path.join('master', 'twistd.log'))
+ self.failUnlessEqual(lf.listLogs(), [1,2])
+ lr = lf.getLog(1)
+ firstline = lr.readLines()[0]
+ self.failUnless(firstline.endswith("this is a mighty long string and I'm going to write it into the log often\n"))
+
+ def _stop_master(self):
+ d = getProcessOutput(self.buildbotexe,
+ ['stop', 'master'])
+ d.addBoth(self._master_stopped)
+ return d
+
+ def _master_stopped(self, res):
+ print "master stopped"
diff --git a/buildbot/buildbot/test/test_locks.py b/buildbot/buildbot/test/test_locks.py
new file mode 100644
index 0000000..0c1e0b5
--- /dev/null
+++ b/buildbot/buildbot/test/test_locks.py
@@ -0,0 +1,495 @@
+# -*- test-case-name: buildbot.test.test_locks -*-
+
+import random
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+
+from buildbot import master
+from buildbot.steps import dummy
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.test.runutils import RunMixin
+from buildbot import locks
+
+def claimHarder(lock, owner, la):
+ """Return a Deferred that will fire when the lock is claimed. Keep trying
+ until we succeed."""
+ if lock.isAvailable(la):
+ #print "claimHarder(%s): claiming" % owner
+ lock.claim(owner, la)
+ return defer.succeed(lock)
+ #print "claimHarder(%s): waiting" % owner
+ d = lock.waitUntilMaybeAvailable(owner, la)
+ d.addCallback(claimHarder, owner, la)
+ return d
+
+def hold(lock, owner, la, mode="now"):
+ if mode == "now":
+ lock.release(owner, la)
+ elif mode == "very soon":
+ reactor.callLater(0, lock.release, owner, la)
+ elif mode == "soon":
+ reactor.callLater(0.1, lock.release, owner, la)
+
+class Unit(unittest.TestCase):
+ def testNowCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ return self._testNow(la)
+
+ def testNowExclusive(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'exclusive')
+ return self._testNow(la)
+
+ def _testNow(self, la):
+ l = locks.BaseLock("name")
+ self.failUnless(l.isAvailable(la))
+ l.claim("owner1", la)
+ self.failIf(l.isAvailable(la))
+ l.release("owner1", la)
+ self.failUnless(l.isAvailable(la))
+
+ def testNowMixed1(self):
+ """ Test exclusive is not possible when a counting has the lock """
+ lid = locks.MasterLock('dummy')
+ lac = locks.LockAccess(lid, 'counting')
+ lae = locks.LockAccess(lid, 'exclusive')
+ l = locks.BaseLock("name", maxCount=2)
+ self.failUnless(l.isAvailable(lac))
+ l.claim("count-owner", lac)
+ self.failIf(l.isAvailable(lae))
+ l.release("count-owner", lac)
+ self.failUnless(l.isAvailable(lac))
+
+ def testNowMixed2(self):
+ """ Test counting is not possible when an exclsuive has the lock """
+ lid = locks.MasterLock('dummy')
+ lac = locks.LockAccess(lid, 'counting')
+ lae = locks.LockAccess(lid, 'exclusive')
+ l = locks.BaseLock("name", maxCount=2)
+ self.failUnless(l.isAvailable(lae))
+ l.claim("count-owner", lae)
+ self.failIf(l.isAvailable(lac))
+ l.release("count-owner", lae)
+ self.failUnless(l.isAvailable(lae))
+
+ def testLaterCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ return self._testLater(la)
+
+ def testLaterExclusive(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'exclusive')
+ return self._testLater(la)
+
+ def _testLater(self, la):
+ lock = locks.BaseLock("name")
+ d = claimHarder(lock, "owner1", la)
+ d.addCallback(lambda lock: lock.release("owner1", la))
+ return d
+
+ def testCompetitionCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ return self._testCompetition(la)
+
+ def testCompetitionExclusive(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'exclusive')
+ return self._testCompetition(la)
+
+ def _testCompetition(self, la):
+ lock = locks.BaseLock("name")
+ d = claimHarder(lock, "owner1", la)
+ d.addCallback(self._claim1, la)
+ return d
+ def _claim1(self, lock, la):
+ # we should have claimed it by now
+ self.failIf(lock.isAvailable(la))
+ # now set up two competing owners. We don't know which will get the
+ # lock first.
+ d2 = claimHarder(lock, "owner2", la)
+ d2.addCallback(hold, "owner2", la, "now")
+ d3 = claimHarder(lock, "owner3", la)
+ d3.addCallback(hold, "owner3", la, "soon")
+ dl = defer.DeferredList([d2,d3])
+ dl.addCallback(self._cleanup, lock, la)
+ # and release the lock in a moment
+ reactor.callLater(0.1, lock.release, "owner1", la)
+ return dl
+
+ def _cleanup(self, res, lock, la):
+ d = claimHarder(lock, "cleanup", la)
+ d.addCallback(lambda lock: lock.release("cleanup", la))
+ return d
+
+ def testRandomCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ return self._testRandom(la)
+
+ def testRandomExclusive(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'exclusive')
+ return self._testRandom(la)
+
+ def _testRandom(self, la):
+ lock = locks.BaseLock("name")
+ dl = []
+ for i in range(100):
+ owner = "owner%d" % i
+ mode = random.choice(["now", "very soon", "soon"])
+ d = claimHarder(lock, owner, la)
+ d.addCallback(hold, owner, la, mode)
+ dl.append(d)
+ d = defer.DeferredList(dl)
+ d.addCallback(self._cleanup, lock, la)
+ return d
+
+class Multi(unittest.TestCase):
+ def testNowCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ lock = locks.BaseLock("name", 2)
+ self.failUnless(lock.isAvailable(la))
+ lock.claim("owner1", la)
+ self.failUnless(lock.isAvailable(la))
+ lock.claim("owner2", la)
+ self.failIf(lock.isAvailable(la))
+ lock.release("owner1", la)
+ self.failUnless(lock.isAvailable(la))
+ lock.release("owner2", la)
+ self.failUnless(lock.isAvailable(la))
+
+ def testLaterCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ lock = locks.BaseLock("name", 2)
+ lock.claim("owner1", la)
+ lock.claim("owner2", la)
+ d = claimHarder(lock, "owner3", la)
+ d.addCallback(lambda lock: lock.release("owner3", la))
+ lock.release("owner2", la)
+ lock.release("owner1", la)
+ return d
+
+ def _cleanup(self, res, lock, count, la):
+ dl = []
+ for i in range(count):
+ d = claimHarder(lock, "cleanup%d" % i, la)
+ dl.append(d)
+ d2 = defer.DeferredList(dl)
+ # once all locks are claimed, we know that any previous owners have
+ # been flushed out
+ def _release(res):
+ for i in range(count):
+ lock.release("cleanup%d" % i, la)
+ d2.addCallback(_release)
+ return d2
+
+ def testRandomCounting(self):
+ lid = locks.MasterLock('dummy')
+ la = locks.LockAccess(lid, 'counting')
+ COUNT = 5
+ lock = locks.BaseLock("name", COUNT)
+ dl = []
+ for i in range(100):
+ owner = "owner%d" % i
+ mode = random.choice(["now", "very soon", "soon"])
+ d = claimHarder(lock, owner, la)
+ def _check(lock):
+ self.failIf(len(lock.owners) > COUNT)
+ return lock
+ d.addCallback(_check)
+ d.addCallback(hold, owner, la, mode)
+ dl.append(d)
+ d = defer.DeferredList(dl)
+ d.addCallback(self._cleanup, lock, COUNT, la)
+ return d
+
+class Dummy:
+ pass
+
+def slave(slavename):
+ slavebuilder = Dummy()
+ slavebuilder.slave = Dummy()
+ slavebuilder.slave.slavename = slavename
+ return slavebuilder
+
+class MakeRealLock(unittest.TestCase):
+
+ def make(self, lockid):
+ return lockid.lockClass(lockid)
+
+ def testMaster(self):
+ mid1 = locks.MasterLock("name1")
+ mid2 = locks.MasterLock("name1")
+ mid3 = locks.MasterLock("name3")
+ mid4 = locks.MasterLock("name1", 3)
+ self.failUnlessEqual(mid1, mid2)
+ self.failIfEqual(mid1, mid3)
+ # they should all be hashable
+ d = {mid1: 1, mid2: 2, mid3: 3, mid4: 4}
+
+ l1 = self.make(mid1)
+ self.failUnlessEqual(l1.name, "name1")
+ self.failUnlessEqual(l1.maxCount, 1)
+ self.failUnlessIdentical(l1.getLock(slave("slave1")), l1)
+ l4 = self.make(mid4)
+ self.failUnlessEqual(l4.name, "name1")
+ self.failUnlessEqual(l4.maxCount, 3)
+ self.failUnlessIdentical(l4.getLock(slave("slave1")), l4)
+
+ def testSlave(self):
+ sid1 = locks.SlaveLock("name1")
+ sid2 = locks.SlaveLock("name1")
+ sid3 = locks.SlaveLock("name3")
+ sid4 = locks.SlaveLock("name1", maxCount=3)
+ mcfs = {"bigslave": 4, "smallslave": 1}
+ sid5 = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs)
+ mcfs2 = {"bigslave": 4, "smallslave": 1}
+ sid5a = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs2)
+ mcfs3 = {"bigslave": 1, "smallslave": 99}
+ sid5b = locks.SlaveLock("name1", maxCount=3, maxCountForSlave=mcfs3)
+ self.failUnlessEqual(sid1, sid2)
+ self.failIfEqual(sid1, sid3)
+ self.failIfEqual(sid1, sid4)
+ self.failIfEqual(sid1, sid5)
+ self.failUnlessEqual(sid5, sid5a)
+ self.failIfEqual(sid5a, sid5b)
+ # they should all be hashable
+ d = {sid1: 1, sid2: 2, sid3: 3, sid4: 4, sid5: 5, sid5a: 6, sid5b: 7}
+
+ l1 = self.make(sid1)
+ self.failUnlessEqual(l1.name, "name1")
+ self.failUnlessEqual(l1.maxCount, 1)
+ l1s1 = l1.getLock(slave("slave1"))
+ self.failIfIdentical(l1s1, l1)
+
+ l4 = self.make(sid4)
+ self.failUnlessEqual(l4.maxCount, 3)
+ l4s1 = l4.getLock(slave("slave1"))
+ self.failUnlessEqual(l4s1.maxCount, 3)
+
+ l5 = self.make(sid5)
+ l5s1 = l5.getLock(slave("bigslave"))
+ l5s2 = l5.getLock(slave("smallslave"))
+ l5s3 = l5.getLock(slave("unnamedslave"))
+ self.failUnlessEqual(l5s1.maxCount, 4)
+ self.failUnlessEqual(l5s2.maxCount, 1)
+ self.failUnlessEqual(l5s3.maxCount, 3)
+
+class GetLock(unittest.TestCase):
+ def testGet(self):
+ # the master.cfg file contains "lock ids", which are instances of
+ # MasterLock and SlaveLock but which are not actually Locks per se.
+ # When the build starts, these markers are turned into RealMasterLock
+ # and RealSlaveLock instances. This insures that any builds running
+ # on slaves that were unaffected by the config change are still
+ # referring to the same Lock instance as new builds by builders that
+ # *were* affected by the change. There have been bugs in the past in
+ # which this didn't happen, and the Locks were bypassed because half
+ # the builders were using one incarnation of the lock while the other
+ # half were using a separate (but equal) incarnation.
+ #
+ # Changing the lock id in any way should cause it to be replaced in
+ # the BotMaster. This will result in a couple of funky artifacts:
+ # builds in progress might pay attention to a different lock, so we
+ # might bypass the locking for the duration of a couple builds.
+ # There's also the problem of old Locks lingering around in
+ # BotMaster.locks, but they're small and shouldn't really cause a
+ # problem.
+
+ b = master.BotMaster()
+ l1 = locks.MasterLock("one")
+ l1a = locks.MasterLock("one")
+ l2 = locks.MasterLock("one", maxCount=4)
+
+ rl1 = b.getLockByID(l1)
+ rl2 = b.getLockByID(l1a)
+ self.failUnlessIdentical(rl1, rl2)
+ rl3 = b.getLockByID(l2)
+ self.failIfIdentical(rl1, rl3)
+
+ s1 = locks.SlaveLock("one")
+ s1a = locks.SlaveLock("one")
+ s2 = locks.SlaveLock("one", maxCount=4)
+ s3 = locks.SlaveLock("one", maxCount=4,
+ maxCountForSlave={"a":1, "b":2})
+ s3a = locks.SlaveLock("one", maxCount=4,
+ maxCountForSlave={"a":1, "b":2})
+ s4 = locks.SlaveLock("one", maxCount=4,
+ maxCountForSlave={"a":4, "b":4})
+
+ rl1 = b.getLockByID(s1)
+ rl2 = b.getLockByID(s1a)
+ self.failUnlessIdentical(rl1, rl2)
+ rl3 = b.getLockByID(s2)
+ self.failIfIdentical(rl1, rl3)
+ rl4 = b.getLockByID(s3)
+ self.failIfIdentical(rl1, rl4)
+ self.failIfIdentical(rl3, rl4)
+ rl5 = b.getLockByID(s3a)
+ self.failUnlessIdentical(rl4, rl5)
+ rl6 = b.getLockByID(s4)
+ self.failIfIdentical(rl5, rl6)
+
+
+
+class LockStep(dummy.Dummy):
+ def start(self):
+ number = self.build.requests[0].number
+ self.build.requests[0].events.append(("start", number))
+ dummy.Dummy.start(self)
+ def done(self):
+ number = self.build.requests[0].number
+ self.build.requests[0].events.append(("done", number))
+ dummy.Dummy.done(self)
+
+config_1 = """
+from buildbot import locks
+from buildbot.process import factory
+from buildbot.buildslave import BuildSlave
+s = factory.s
+from buildbot.test.test_locks import LockStep
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+
+first_lock = locks.SlaveLock('first')
+second_lock = locks.MasterLock('second')
+f1 = factory.BuildFactory([s(LockStep, timeout=2, locks=[first_lock])])
+f2 = factory.BuildFactory([s(LockStep, timeout=3, locks=[second_lock])])
+f3 = factory.BuildFactory([s(LockStep, timeout=2, locks=[])])
+
+b1a = {'name': 'full1a', 'slavename': 'bot1', 'builddir': '1a', 'factory': f1}
+b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1b', 'factory': f1}
+b1c = {'name': 'full1c', 'slavename': 'bot1', 'builddir': '1c', 'factory': f3,
+ 'locks': [first_lock, second_lock]}
+b1d = {'name': 'full1d', 'slavename': 'bot1', 'builddir': '1d', 'factory': f2}
+b2a = {'name': 'full2a', 'slavename': 'bot2', 'builddir': '2a', 'factory': f1}
+b2b = {'name': 'full2b', 'slavename': 'bot2', 'builddir': '2b', 'factory': f3,
+ 'locks': [second_lock]}
+c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
+"""
+
+config_1a = config_1 + \
+"""
+b1b = {'name': 'full1b', 'slavename': 'bot1', 'builddir': '1B', 'factory': f1}
+c['builders'] = [b1a, b1b, b1c, b1d, b2a, b2b]
+"""
+
+
+class Locks(RunMixin, unittest.TestCase):
+ def setUp(self):
+ N = 'test_builder'
+ RunMixin.setUp(self)
+ self.req1 = req1 = BuildRequest("forced build", SourceStamp(), N)
+ req1.number = 1
+ self.req2 = req2 = BuildRequest("forced build", SourceStamp(), N)
+ req2.number = 2
+ self.req3 = req3 = BuildRequest("forced build", SourceStamp(), N)
+ req3.number = 3
+ req1.events = req2.events = req3.events = self.events = []
+ d = self.master.loadConfig(config_1)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectSlaves(["bot1", "bot2"],
+ ["full1a", "full1b",
+ "full1c", "full1d",
+ "full2a", "full2b"]))
+ return d
+
+ def testLock1(self):
+ self.control.getBuilder("full1a").requestBuild(self.req1)
+ self.control.getBuilder("full1b").requestBuild(self.req2)
+ d = defer.DeferredList([self.req1.waitUntilFinished(),
+ self.req2.waitUntilFinished()])
+ d.addCallback(self._testLock1_1)
+ return d
+
+ def _testLock1_1(self, res):
+ # full1a should complete its step before full1b starts it
+ self.failUnlessEqual(self.events,
+ [("start", 1), ("done", 1),
+ ("start", 2), ("done", 2)])
+
+ def testLock1a(self):
+ # just like testLock1, but we reload the config file first, with a
+ # change that causes full1b to be changed. This tickles a design bug
+ # in which full1a and full1b wind up with distinct Lock instances.
+ d = self.master.loadConfig(config_1a)
+ d.addCallback(self._testLock1a_1)
+ return d
+ def _testLock1a_1(self, res):
+ self.control.getBuilder("full1a").requestBuild(self.req1)
+ self.control.getBuilder("full1b").requestBuild(self.req2)
+ d = defer.DeferredList([self.req1.waitUntilFinished(),
+ self.req2.waitUntilFinished()])
+ d.addCallback(self._testLock1a_2)
+ return d
+
+ def _testLock1a_2(self, res):
+ # full1a should complete its step before full1b starts it
+ self.failUnlessEqual(self.events,
+ [("start", 1), ("done", 1),
+ ("start", 2), ("done", 2)])
+
+ def testLock2(self):
+ # two builds run on separate slaves with slave-scoped locks should
+ # not interfere
+ self.control.getBuilder("full1a").requestBuild(self.req1)
+ self.control.getBuilder("full2a").requestBuild(self.req2)
+ d = defer.DeferredList([self.req1.waitUntilFinished(),
+ self.req2.waitUntilFinished()])
+ d.addCallback(self._testLock2_1)
+ return d
+
+ def _testLock2_1(self, res):
+ # full2a should start its step before full1a finishes it. They run on
+ # different slaves, however, so they might start in either order.
+ self.failUnless(self.events[:2] == [("start", 1), ("start", 2)] or
+ self.events[:2] == [("start", 2), ("start", 1)])
+
+ def testLock3(self):
+ # two builds run on separate slaves with master-scoped locks should
+ # not overlap
+ self.control.getBuilder("full1c").requestBuild(self.req1)
+ self.control.getBuilder("full2b").requestBuild(self.req2)
+ d = defer.DeferredList([self.req1.waitUntilFinished(),
+ self.req2.waitUntilFinished()])
+ d.addCallback(self._testLock3_1)
+ return d
+
+ def _testLock3_1(self, res):
+ # full2b should not start until after full1c finishes. The builds run
+ # on different slaves, so we can't really predict which will start
+ # first. The important thing is that they don't overlap.
+ self.failUnless(self.events == [("start", 1), ("done", 1),
+ ("start", 2), ("done", 2)]
+ or self.events == [("start", 2), ("done", 2),
+ ("start", 1), ("done", 1)]
+ )
+
+ def testLock4(self):
+ self.control.getBuilder("full1a").requestBuild(self.req1)
+ self.control.getBuilder("full1c").requestBuild(self.req2)
+ self.control.getBuilder("full1d").requestBuild(self.req3)
+ d = defer.DeferredList([self.req1.waitUntilFinished(),
+ self.req2.waitUntilFinished(),
+ self.req3.waitUntilFinished()])
+ d.addCallback(self._testLock4_1)
+ return d
+
+ def _testLock4_1(self, res):
+ # full1a starts, then full1d starts (because they do not interfere).
+ # Once both are done, full1c can run.
+ self.failUnlessEqual(self.events,
+ [("start", 1), ("start", 3),
+ ("done", 1), ("done", 3),
+ ("start", 2), ("done", 2)])
+
diff --git a/buildbot/buildbot/test/test_maildir.py b/buildbot/buildbot/test/test_maildir.py
new file mode 100644
index 0000000..b79cbd3
--- /dev/null
+++ b/buildbot/buildbot/test/test_maildir.py
@@ -0,0 +1,92 @@
+# -*- test-case-name: buildbot.test.test_maildir -*-
+
+from twisted.trial import unittest
+import os, shutil
+from buildbot.changes.mail import FCMaildirSource
+from twisted.internet import defer, reactor, task
+from twisted.python import util, log
+
+class TimeOutError(Exception):
+ """The message were not received in a timely fashion"""
+
+class MaildirTest(unittest.TestCase):
+ SECONDS_PER_MESSAGE = 1.0
+
+ def setUp(self):
+ log.msg("creating empty maildir")
+ self.maildir = "test-maildir"
+ if os.path.isdir(self.maildir):
+ shutil.rmtree(self.maildir)
+ log.msg("removing stale maildir")
+ os.mkdir(self.maildir)
+ os.mkdir(os.path.join(self.maildir, "cur"))
+ os.mkdir(os.path.join(self.maildir, "new"))
+ os.mkdir(os.path.join(self.maildir, "tmp"))
+ self.source = None
+
+ def tearDown(self):
+ log.msg("removing old maildir")
+ shutil.rmtree(self.maildir)
+ if self.source:
+ return self.source.stopService()
+
+ def addChange(self, c):
+ # NOTE: this assumes every message results in a Change, which isn't
+ # true for msg8-prefix
+ log.msg("got change")
+ self.changes.append(c)
+
+ def deliverMail(self, msg):
+ log.msg("delivering", msg)
+ newdir = os.path.join(self.maildir, "new")
+ # to do this right, use safecat
+ shutil.copy(msg, newdir)
+
+ def poll(self, changes, count, d):
+ if len(changes) == count:
+ d.callback("passed")
+
+ def testMaildir(self):
+ self.changes = []
+ s = self.source = FCMaildirSource(self.maildir)
+ s.parent = self
+ s.startService()
+ testfiles_dir = util.sibpath(__file__, "mail")
+ testfiles = [msg for msg in os.listdir(testfiles_dir)
+ if msg.startswith("freshcvs")]
+ assert testfiles
+ testfiles.sort()
+ count = len(testfiles)
+ d = defer.Deferred()
+
+ i = 1
+ for i in range(count):
+ msg = testfiles[i]
+ reactor.callLater(self.SECONDS_PER_MESSAGE*i, self.deliverMail,
+ os.path.join(testfiles_dir, msg))
+ self.loop = task.LoopingCall(self.poll, self.changes, count, d)
+ self.loop.start(0.1)
+ t = reactor.callLater(self.SECONDS_PER_MESSAGE*count + 15,
+ d.errback, TimeOutError)
+ # TODO: verify the messages, should use code from test_mailparse but
+ # I'm not sure how to factor the verification routines out in a
+ # useful fashion
+
+ #for i in range(count):
+ # msg, check = test_messages[i]
+ # check(self, self.changes[i])
+
+ def _shutdown(res):
+ if t.active():
+ t.cancel()
+ self.loop.stop()
+ return res
+ d.addBoth(_shutdown)
+
+ return d
+
+ # TODO: it would be nice to set this timeout after counting the number of
+ # messages in buildbot/test/mail/msg*, but I suspect trial wants to have
+ # this number before the method starts, and maybe even before setUp()
+ testMaildir.timeout = SECONDS_PER_MESSAGE*9 + 15
+
diff --git a/buildbot/buildbot/test/test_mailparse.py b/buildbot/buildbot/test/test_mailparse.py
new file mode 100644
index 0000000..dc60269
--- /dev/null
+++ b/buildbot/buildbot/test/test_mailparse.py
@@ -0,0 +1,293 @@
+# -*- test-case-name: buildbot.test.test_mailparse -*-
+
+from twisted.trial import unittest
+from twisted.python import util
+from buildbot.changes import mail
+
+class TestFreshCVS(unittest.TestCase):
+
+ def get(self, msg):
+ msg = util.sibpath(__file__, msg)
+ s = mail.FCMaildirSource(None)
+ return s.parse_file(open(msg, "r"))
+
+ def testMsg1(self):
+ c = self.get("mail/freshcvs.1")
+ self.assertEqual(c.who, "moshez")
+ self.assertEqual(set(c.files), set(["Twisted/debian/python-twisted.menu.in"]))
+ self.assertEqual(c.comments, "Instance massenger, apparently\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg2(self):
+ c = self.get("mail/freshcvs.2")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["Twisted/twisted/web/woven/form.py",
+ "Twisted/twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg3(self):
+ # same as msg2 but missing the ViewCVS section
+ c = self.get("mail/freshcvs.3")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["Twisted/twisted/web/woven/form.py",
+ "Twisted/twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg4(self):
+ # same as msg3 but also missing CVS patch section
+ c = self.get("mail/freshcvs.4")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["Twisted/twisted/web/woven/form.py",
+ "Twisted/twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg5(self):
+ # creates a directory
+ c = self.get("mail/freshcvs.5")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set(["Twisted/doc/examples/cocoaDemo"]))
+ self.assertEqual(c.comments,
+ "Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository\n")
+ self.assertEqual(c.isdir, 1)
+
+ def testMsg6(self):
+ # adds files
+ c = self.get("mail/freshcvs.6")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set([
+ "Twisted/doc/examples/cocoaDemo/MyAppDelegate.py",
+ "Twisted/doc/examples/cocoaDemo/__main__.py",
+ "Twisted/doc/examples/cocoaDemo/bin-python-main.m",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+ "Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
+ self.assertEqual(c.comments,
+ "Cocoa (OS X) clone of the QT demo, using polling reactor\n\nRequires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project. The reactor is iterated periodically by a repeating NSTimer.\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg7(self):
+ # deletes files
+ c = self.get("mail/freshcvs.7")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set([
+ "Twisted/doc/examples/cocoaDemo/MyAppDelegate.py",
+ "Twisted/doc/examples/cocoaDemo/__main__.py",
+ "Twisted/doc/examples/cocoaDemo/bin-python-main.m",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+ "Twisted/doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+ "Twisted/doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
+ self.assertEqual(c.comments,
+ "Directories break debian build script, waiting for reasonable fix\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg8(self):
+ # files outside Twisted/
+ c = self.get("mail/freshcvs.8")
+ self.assertEqual(c.who, "acapnotic")
+ self.assertEqual(set(c.files), set([ "CVSROOT/freshCfg" ]))
+ self.assertEqual(c.comments, "it doesn't work with invalid syntax\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg9(self):
+ # also creates a directory
+ c = self.get("mail/freshcvs.9")
+ self.assertEqual(c.who, "exarkun")
+ self.assertEqual(set(c.files), set(["Twisted/sandbox/exarkun/persist-plugin"]))
+ self.assertEqual(c.comments,
+ "Directory /cvs/Twisted/sandbox/exarkun/persist-plugin added to the repository\n")
+ self.assertEqual(c.isdir, 1)
+
+
+class TestFreshCVS_Prefix(unittest.TestCase):
+ def get(self, msg):
+ msg = util.sibpath(__file__, msg)
+ s = mail.FCMaildirSource(None)
+ return s.parse_file(open(msg, "r"), prefix="Twisted/")
+
+ def testMsg1p(self):
+ c = self.get("mail/freshcvs.1")
+ self.assertEqual(c.who, "moshez")
+ self.assertEqual(set(c.files), set(["debian/python-twisted.menu.in"]))
+ self.assertEqual(c.comments, "Instance massenger, apparently\n")
+
+ def testMsg2p(self):
+ c = self.get("mail/freshcvs.2")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["twisted/web/woven/form.py",
+ "twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+
+ def testMsg3p(self):
+ # same as msg2 but missing the ViewCVS section
+ c = self.get("mail/freshcvs.3")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["twisted/web/woven/form.py",
+ "twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+
+ def testMsg4p(self):
+ # same as msg3 but also missing CVS patch section
+ c = self.get("mail/freshcvs.4")
+ self.assertEqual(c.who, "itamarst")
+ self.assertEqual(set(c.files), set(["twisted/web/woven/form.py",
+ "twisted/python/formmethod.py"]))
+ self.assertEqual(c.comments,
+ "submit formmethod now subclass of Choice\n")
+
+ def testMsg5p(self):
+ # creates a directory
+ c = self.get("mail/freshcvs.5")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set(["doc/examples/cocoaDemo"]))
+ self.assertEqual(c.comments,
+ "Directory /cvs/Twisted/doc/examples/cocoaDemo added to the repository\n")
+ self.assertEqual(c.isdir, 1)
+
+ def testMsg6p(self):
+ # adds files
+ c = self.get("mail/freshcvs.6")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set([
+ "doc/examples/cocoaDemo/MyAppDelegate.py",
+ "doc/examples/cocoaDemo/__main__.py",
+ "doc/examples/cocoaDemo/bin-python-main.m",
+ "doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+ "doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
+ self.assertEqual(c.comments,
+ "Cocoa (OS X) clone of the QT demo, using polling reactor\n\nRequires pyobjc ( http://pyobjc.sourceforge.net ), it's not much different than the template project. The reactor is iterated periodically by a repeating NSTimer.\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg7p(self):
+ # deletes files
+ c = self.get("mail/freshcvs.7")
+ self.assertEqual(c.who, "etrepum")
+ self.assertEqual(set(c.files), set([
+ "doc/examples/cocoaDemo/MyAppDelegate.py",
+ "doc/examples/cocoaDemo/__main__.py",
+ "doc/examples/cocoaDemo/bin-python-main.m",
+ "doc/examples/cocoaDemo/English.lproj/InfoPlist.strings",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/classes.nib",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/info.nib",
+ "doc/examples/cocoaDemo/English.lproj/MainMenu.nib/keyedobjects.nib",
+ "doc/examples/cocoaDemo/cocoaDemo.pbproj/project.pbxproj"]))
+ self.assertEqual(c.comments,
+ "Directories break debian build script, waiting for reasonable fix\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsg8p(self):
+ # files outside Twisted/
+ c = self.get("mail/freshcvs.8")
+ self.assertEqual(c, None)
+
+
+class TestSyncmail(unittest.TestCase):
+ def get(self, msg):
+ msg = util.sibpath(__file__, msg)
+ s = mail.SyncmailMaildirSource(None)
+ return s.parse_file(open(msg, "r"), prefix="buildbot/")
+
+ def getNoPrefix(self, msg):
+ msg = util.sibpath(__file__, msg)
+ s = mail.SyncmailMaildirSource(None)
+ return s.parse_file(open(msg, "r"))
+
+ def testMsgS1(self):
+ c = self.get("mail/syncmail.1")
+ self.failUnless(c is not None)
+ self.assertEqual(c.who, "warner")
+ self.assertEqual(set(c.files), set(["buildbot/changes/freshcvsmail.py"]))
+ self.assertEqual(c.comments,
+ "remove leftover code, leave a temporary compatibility import. Note! Start\nimporting FCMaildirSource from changes.mail instead of changes.freshcvsmail\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsgS2(self):
+ c = self.get("mail/syncmail.2")
+ self.assertEqual(c.who, "warner")
+ self.assertEqual(set(c.files), set(["ChangeLog"]))
+ self.assertEqual(c.comments, "\t* NEWS: started adding new features\n")
+ self.assertEqual(c.isdir, 0)
+
+ def testMsgS3(self):
+ c = self.get("mail/syncmail.3")
+ self.failUnless(c == None)
+
+ def testMsgS4(self):
+ c = self.get("mail/syncmail.4")
+ self.assertEqual(c.who, "warner")
+ self.assertEqual(set(c.files),
+ set(["test/mail/syncmail.1",
+ "test/mail/syncmail.2",
+ "test/mail/syncmail.3"]))
+ self.assertEqual(c.comments, "test cases for syncmail parser\n")
+ self.assertEqual(c.isdir, 0)
+ self.assertEqual(c.branch, None)
+
+ # tests a tag
+ def testMsgS5(self):
+ c = self.getNoPrefix("mail/syncmail.5")
+ self.failUnless(c)
+ self.assertEqual(c.who, "thomas")
+ self.assertEqual(set(c.files),
+ set(['test1/MANIFEST',
+ 'test1/Makefile.am',
+ 'test1/autogen.sh',
+ 'test1/configure.in']))
+ self.assertEqual(c.branch, "BRANCH-DEVEL")
+ self.assertEqual(c.isdir, 0)
+
+
+class TestSVNCommitEmail(unittest.TestCase):
+ def get(self, msg, prefix):
+ msg = util.sibpath(__file__, msg)
+ s = mail.SVNCommitEmailMaildirSource(None)
+ return s.parse_file(open(msg, "r"), prefix)
+
+ def test1(self):
+ c = self.get("mail/svn-commit.1", "spamassassin/trunk/")
+ self.failUnless(c)
+ self.failUnlessEqual(c.who, "felicity")
+ self.failUnlessEqual(set(c.files), set(["sa-update.raw"]))
+ self.failUnlessEqual(c.branch, None)
+ self.failUnlessEqual(c.comments,
+ "bug 4864: remove extraneous front-slash "
+ "from gpghomedir path\n")
+
+ def test2a(self):
+ c = self.get("mail/svn-commit.2", "spamassassin/trunk/")
+ self.failIf(c)
+
+ def test2b(self):
+ c = self.get("mail/svn-commit.2", "spamassassin/branches/3.1/")
+ self.failUnless(c)
+ self.failUnlessEqual(c.who, "sidney")
+ self.failUnlessEqual(set(c.files),
+ set(["lib/Mail/SpamAssassin/Timeout.pm",
+ "MANIFEST",
+ "lib/Mail/SpamAssassin/Logger.pm",
+ "lib/Mail/SpamAssassin/Plugin/DCC.pm",
+ "lib/Mail/SpamAssassin/Plugin/DomainKeys.pm",
+ "lib/Mail/SpamAssassin/Plugin/Pyzor.pm",
+ "lib/Mail/SpamAssassin/Plugin/Razor2.pm",
+ "lib/Mail/SpamAssassin/Plugin/SPF.pm",
+ "lib/Mail/SpamAssassin/SpamdForkScaling.pm",
+ "spamd/spamd.raw",
+ ]))
+ self.failUnlessEqual(c.comments,
+ "Bug 4696: consolidated fixes for timeout bugs\n")
+
+
diff --git a/buildbot/buildbot/test/test_mergerequests.py b/buildbot/buildbot/test/test_mergerequests.py
new file mode 100644
index 0000000..e176cf1
--- /dev/null
+++ b/buildbot/buildbot/test/test_mergerequests.py
@@ -0,0 +1,196 @@
+from twisted.internet import defer, reactor
+from twisted.trial import unittest
+
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.process.properties import Properties
+from buildbot.status import builder, base, words
+from buildbot.changes.changes import Change
+
+from buildbot.test.runutils import RunMixin
+
+"""Testcases for master.botmaster.shouldMergeRequests.
+
+"""
+
+master_cfg = """from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+
+f = factory.BuildFactory([
+ dummy.Dummy(timeout=0),
+ ])
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'dummy', 'slavename':'bot1',
+ 'builddir': 'dummy', 'factory': f})
+c['slavePortnum'] = 0
+
+%s
+c['mergeRequests'] = mergeRequests
+"""
+
+class MergeRequestsTest(RunMixin, unittest.TestCase):
+ def do_test(self, mergefun, results, reqs = None):
+ R = BuildRequest
+ S = SourceStamp
+ c1 = Change("alice", [], "changed stuff", branch="branch1")
+ c2 = Change("alice", [], "changed stuff", branch="branch1")
+ c3 = Change("alice", [], "changed stuff", branch="branch1")
+ c4 = Change("alice", [], "changed stuff", branch="branch1")
+ c5 = Change("alice", [], "changed stuff", branch="branch1")
+ c6 = Change("alice", [], "changed stuff", branch="branch1")
+ if reqs is None:
+ reqs = (R("why", S("branch1", None, None, None), 'test_builder'),
+ R("why2", S("branch1", "rev1", None, None), 'test_builder'),
+ R("why not", S("branch1", "rev1", None, None), 'test_builder'),
+ R("why3", S("branch1", "rev2", None, None), 'test_builder'),
+ R("why4", S("branch2", "rev2", None, None), 'test_builder'),
+ R("why5", S("branch1", "rev1", (3, "diff"), None), 'test_builder'),
+ R("changes", S("branch1", None, None, [c1,c2,c3]), 'test_builder'),
+ R("changes", S("branch1", None, None, [c4,c5,c6]), 'test_builder'),
+ )
+
+ m = self.master
+ m.loadConfig(master_cfg % mergefun)
+ m.readConfig = True
+ m.startService()
+ builder = self.control.getBuilder('dummy')
+ for req in reqs:
+ builder.requestBuild(req)
+
+ d = self.connectSlave()
+ d.addCallback(self.waitForBuilds, results)
+
+ return d
+
+ def waitForBuilds(self, r, results):
+ d = self.master.botmaster.waitUntilBuilderIdle('dummy')
+ d.addCallback(self.checkresults, results)
+ return d
+
+ def checkresults(self, builder, results):
+ s = builder.builder_status
+ builds = list(s.generateFinishedBuilds())
+ builds.reverse()
+ self.assertEqual(len(builds), len(results))
+ for i in xrange(len(builds)):
+ b = builds[i]
+ r = results[i]
+ ss = b.getSourceStamp()
+ self.assertEquals(b.getReason(), r['reason'])
+ self.assertEquals(ss.branch, r['branch'])
+ self.assertEquals(len(ss.changes), r['changecount'])
+ # print b.getReason(), ss.branch, len(ss.changes), ss.revision
+
+ def testDefault(self):
+ return self.do_test('mergeRequests = None',
+ ({'reason': 'why',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why2, why not',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why3',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why4',
+ 'branch': 'branch2',
+ 'changecount': 0},
+ {'reason': 'why5',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'changes',
+ 'branch': 'branch1',
+ 'changecount': 6},
+ ))
+
+ def testNoMerges(self):
+ mergefun = """def mergeRequests(builder, req1, req2):
+ return False
+"""
+ return self.do_test(mergefun,
+ ({'reason': 'why',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why2',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why not',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why3',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why4',
+ 'branch': 'branch2',
+ 'changecount': 0},
+ {'reason': 'why5',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'changes',
+ 'branch': 'branch1',
+ 'changecount': 3},
+ {'reason': 'changes',
+ 'branch': 'branch1',
+ 'changecount': 3},
+ ))
+
+ def testReasons(self):
+ mergefun = """def mergeRequests(builder, req1, req2):
+ return req1.reason == req2.reason
+"""
+ return self.do_test(mergefun,
+ ({'reason': 'why',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why2',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why not',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why3',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why4',
+ 'branch': 'branch2',
+ 'changecount': 0},
+ {'reason': 'why5',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'changes',
+ 'branch': 'branch1',
+ 'changecount': 6},
+ ))
+
+
+ def testProperties(self):
+ mergefun = """def mergeRequests(builder, req1, req2):
+ return req1.properties == req2.properties
+"""
+ R = BuildRequest
+ S = SourceStamp
+ p1 = Properties(first="value")
+ p2 = Properties(first="other value")
+ reqs = (R("why", S("branch1", None, None, None), 'test_builder',
+ properties = p1),
+ R("why", S("branch1", None, None, None), 'test_builder',
+ properties = p1),
+ R("why", S("branch1", None, None, None), 'test_builder',
+ properties = p2),
+ R("why", S("branch1", None, None, None), 'test_builder',
+ properties = p2),
+ )
+ return self.do_test(mergefun,
+ ({'reason': 'why',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ {'reason': 'why',
+ 'branch': 'branch1',
+ 'changecount': 0},
+ ),
+ reqs=reqs)
diff --git a/buildbot/buildbot/test/test_p4poller.py b/buildbot/buildbot/test/test_p4poller.py
new file mode 100644
index 0000000..54c6325
--- /dev/null
+++ b/buildbot/buildbot/test/test_p4poller.py
@@ -0,0 +1,213 @@
+import time
+
+from twisted.internet import defer
+from twisted.trial import unittest
+
+from buildbot.changes.changes import Change
+from buildbot.changes.p4poller import P4Source, get_simple_split
+
+first_p4changes = \
+"""Change 1 on 2006/04/13 by slamb@testclient 'first rev'
+"""
+
+second_p4changes = \
+"""Change 3 on 2006/04/13 by bob@testclient 'short desc truncated'
+Change 2 on 2006/04/13 by slamb@testclient 'bar'
+"""
+
+third_p4changes = \
+"""Change 5 on 2006/04/13 by mpatel@testclient 'first rev'
+"""
+
+change_4_log = \
+"""Change 4 by mpatel@testclient on 2006/04/13 21:55:39
+
+ short desc truncated because this is a long description.
+"""
+change_3_log = \
+"""Change 3 by bob@testclient on 2006/04/13 21:51:39
+
+ short desc truncated because this is a long description.
+"""
+
+change_2_log = \
+"""Change 2 by slamb@testclient on 2006/04/13 21:46:23
+
+ creation
+"""
+
+p4change = {
+ 3: change_3_log +
+"""Affected files ...
+
+... //depot/myproject/branch_b/branch_b_file#1 add
+... //depot/myproject/branch_b/whatbranch#1 branch
+... //depot/myproject/branch_c/whatbranch#1 branch
+""",
+ 2: change_2_log +
+"""Affected files ...
+
+... //depot/myproject/trunk/whatbranch#1 add
+... //depot/otherproject/trunk/something#1 add
+""",
+ 5: change_4_log +
+"""Affected files ...
+
+... //depot/myproject/branch_b/branch_b_file#1 add
+... //depot/myproject/branch_b#75 edit
+... //depot/myproject/branch_c/branch_c_file#1 add
+""",
+}
+
+
+class MockP4Source(P4Source):
+ """Test P4Source which doesn't actually invoke p4."""
+ invocation = 0
+
+ def __init__(self, p4changes, p4change, *args, **kwargs):
+ P4Source.__init__(self, *args, **kwargs)
+ self.p4changes = p4changes
+ self.p4change = p4change
+
+ def _get_changes(self):
+ assert self.working
+ result = self.p4changes[self.invocation]
+ self.invocation += 1
+ return defer.succeed(result)
+
+ def _get_describe(self, dummy, num):
+ assert self.working
+ return defer.succeed(self.p4change[num])
+
+class TestP4Poller(unittest.TestCase):
+ def setUp(self):
+ self.changes = []
+ self.addChange = self.changes.append
+
+ def failUnlessIn(self, substr, string):
+ # this is for compatibility with python2.2
+ if isinstance(string, str):
+ self.failUnless(string.find(substr) != -1)
+ else:
+ self.assertIn(substr, string)
+
+ def testCheck(self):
+ """successful checks"""
+ self.t = MockP4Source(p4changes=[first_p4changes, second_p4changes],
+ p4change=p4change,
+ p4port=None, p4user=None,
+ p4base='//depot/myproject/',
+ split_file=lambda x: x.split('/', 1))
+ self.t.parent = self
+
+ # The first time, it just learns the change to start at.
+ self.assert_(self.t.last_change is None)
+ self.assert_(not self.t.working)
+ return self.t.checkp4().addCallback(self._testCheck2)
+
+ def _testCheck2(self, res):
+ self.assertEquals(self.changes, [])
+ self.assertEquals(self.t.last_change, 1)
+
+ # Subsequent times, it returns Change objects for new changes.
+ return self.t.checkp4().addCallback(self._testCheck3)
+
+ def _testCheck3(self, res):
+ self.assertEquals(len(self.changes), 3)
+ self.assertEquals(self.t.last_change, 3)
+ self.assert_(not self.t.working)
+
+ # They're supposed to go oldest to newest, so this one must be first.
+ self.assertEquals(self.changes[0].asText(),
+ Change(who='slamb',
+ files=['whatbranch'],
+ comments=change_2_log,
+ revision='2',
+ when=self.makeTime("2006/04/13 21:46:23"),
+ branch='trunk').asText())
+
+ # These two can happen in either order, since they're from the same
+ # Perforce change.
+ self.failUnlessIn(
+ Change(who='bob',
+ files=['branch_b_file',
+ 'whatbranch'],
+ comments=change_3_log,
+ revision='3',
+ when=self.makeTime("2006/04/13 21:51:39"),
+ branch='branch_b').asText(),
+ [c.asText() for c in self.changes])
+ self.failUnlessIn(
+ Change(who='bob',
+ files=['whatbranch'],
+ comments=change_3_log,
+ revision='3',
+ when=self.makeTime("2006/04/13 21:51:39"),
+ branch='branch_c').asText(),
+ [c.asText() for c in self.changes])
+
+ def makeTime(self, timestring):
+ datefmt = '%Y/%m/%d %H:%M:%S'
+ when = time.mktime(time.strptime(timestring, datefmt))
+ return when
+
+ def testFailedChanges(self):
+ """'p4 changes' failure is properly ignored"""
+ self.t = MockP4Source(p4changes=['Perforce client error:\n...'],
+ p4change={},
+ p4port=None, p4user=None)
+ self.t.parent = self
+ d = self.t.checkp4()
+ d.addCallback(self._testFailedChanges2)
+ return d
+
+ def _testFailedChanges2(self, f):
+ self.failUnlessEqual(f, None)
+ self.assert_(not self.t.working)
+
+ def testFailedDescribe(self):
+ """'p4 describe' failure is properly ignored"""
+ c = dict(p4change)
+ c[3] = 'Perforce client error:\n...'
+ self.t = MockP4Source(p4changes=[first_p4changes, second_p4changes],
+ p4change=c, p4port=None, p4user=None)
+ self.t.parent = self
+ d = self.t.checkp4()
+ d.addCallback(self._testFailedDescribe2)
+ return d
+
+ def _testFailedDescribe2(self, res):
+ # first time finds nothing; check again.
+ return self.t.checkp4().addCallback(self._testFailedDescribe3)
+
+ def _testFailedDescribe3(self, f):
+ self.failUnlessEqual(f, None)
+ self.assert_(not self.t.working)
+ self.assertEquals(self.t.last_change, 2)
+
+ def testAlreadyWorking(self):
+ """don't launch a new poll while old is still going"""
+ self.t = P4Source()
+ self.t.working = True
+ self.assert_(self.t.last_change is None)
+ d = self.t.checkp4()
+ d.addCallback(self._testAlreadyWorking2)
+
+ def _testAlreadyWorking2(self, res):
+ self.assert_(self.t.last_change is None)
+
+ def testSplitFile(self):
+ """Make sure split file works on branch only changes"""
+ self.t = MockP4Source(p4changes=[third_p4changes],
+ p4change=p4change,
+ p4port=None, p4user=None,
+ p4base='//depot/myproject/',
+ split_file=get_simple_split)
+ self.t.parent = self
+ self.t.last_change = 50
+ d = self.t.checkp4()
+ d.addCallback(self._testSplitFile)
+
+ def _testSplitFile(self, res):
+ self.assertEquals(len(self.changes), 2)
+ self.assertEquals(self.t.last_change, 5)
diff --git a/buildbot/buildbot/test/test_package_rpm.py b/buildbot/buildbot/test/test_package_rpm.py
new file mode 100644
index 0000000..05d2841
--- /dev/null
+++ b/buildbot/buildbot/test/test_package_rpm.py
@@ -0,0 +1,132 @@
+# test step.package.rpm.*
+
+from twisted.trial import unittest
+
+from buildbot.test.runutils import SlaveCommandTestBase
+from buildbot.steps.package.rpm import RpmBuild, RpmLint, RpmSpec
+
+
+class TestRpmBuild(unittest.TestCase):
+ """
+ Tests the package.rpm.RpmBuild class.
+ """
+
+ def test_creation(self):
+ """
+ Test that instances are created with proper data.
+ """
+ rb = RpmBuild()
+ self.assertEquals(rb.specfile, None)
+ self.assertFalse(rb.autoRelease)
+ self.assertFalse(rb.vcsRevision)
+
+ rb2 = RpmBuild('aspec.spec', autoRelease=True, vcsRevision=True)
+ self.assertEquals(rb2.specfile, 'aspec.spec')
+ self.assertTrue(rb2.autoRelease)
+ self.assertTrue(rb2.vcsRevision)
+
+ def test_rpmbuild(self):
+ """
+ Verifies the rpmbuild string is what we would expect.
+ """
+ rb = RpmBuild('topdir', 'buildir', 'rpmdir', 'sourcedir',
+ 'specdir', 'dist')
+ expected_result = ('rpmbuild --define "_topdir buildir"'
+ ' --define "_builddir rpmdir" --define "_rpmdir sourcedir"'
+ ' --define "_sourcedir specdir" --define "_specdir dist"'
+ ' --define "_srcrpmdir `pwd`" --define "dist .el5"')
+ self.assertEquals(rb.rpmbuild, expected_result)
+
+
+class TestRpmLint(unittest.TestCase):
+ """
+ Tests the package.rpm.RpmLint class.
+ """
+
+ def test_command(self):
+ """
+ Test that instance command variable is created with proper data.
+ """
+ rl = RpmLint()
+ expected_result = ["/usr/bin/rpmlint", "-i", '*rpm']
+ self.assertEquals(rl.command, expected_result)
+
+
+class TestRpmSpec(unittest.TestCase):
+ """
+ Tests the package.rpm.RpmSpec class.
+ """
+
+ def test_creation(self):
+ """
+ Test that instances are created with proper data.
+ """
+ rs = RpmSpec()
+ self.assertEquals(rs.specfile, None)
+ self.assertEquals(rs.pkg_name, None)
+ self.assertEquals(rs.pkg_version, None)
+ self.assertFalse(rs.loaded)
+
+ def test_load(self):
+ try:
+ from cStringIO import StringIO
+ except ImportError, ie:
+ from StringIO import StringIO
+
+ specfile = StringIO()
+ specfile.write("""\
+Name: example
+Version: 1.0.0
+Release: 1%{?dist}
+Summary: An example spec
+
+Group: Development/Libraries
+License: GPLv2+
+URL: http://www.example.dom
+Source0: %{name}-%{version}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+
+BuildArch: noarch
+Requires: python >= 2.4
+BuildRequires: python-setuptools
+
+
+%description
+An example spec for an rpm.
+
+
+%prep
+%setup -q
+
+
+%build
+%{__python} setup.py build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+%{__python} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT/
+
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+
+%files
+%defattr(-,root,root,-)
+%doc INSTALL LICENSE AUTHORS COPYING
+# For noarch packages: sitelib
+%{python_sitelib}/*
+
+
+%changelog
+* Wed Jan 7 2009 Steve 'Ashcrow' Milner <smilner+buildbot@redhat.com> - \
+1.0.0-1
+- example""")
+ specfile.flush()
+ specfile.seek(0)
+ rs = RpmSpec(specfile)
+ rs.load()
+ self.assertTrue(rs.loaded)
+ self.assertEquals(rs.pkg_name, 'example')
+ self.assertEquals(rs.pkg_version, '1.0.0')
diff --git a/buildbot/buildbot/test/test_properties.py b/buildbot/buildbot/test/test_properties.py
new file mode 100644
index 0000000..a8973dd
--- /dev/null
+++ b/buildbot/buildbot/test/test_properties.py
@@ -0,0 +1,274 @@
+# -*- test-case-name: buildbot.test.test_properties -*-
+
+import os
+
+from twisted.trial import unittest
+
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process import base
+from buildbot.process.properties import WithProperties, Properties
+from buildbot.status import builder
+from buildbot.slave.commands import rmdirRecursive
+from buildbot.test.runutils import RunMixin
+
+
+class FakeBuild:
+ pass
+class FakeBuildMaster:
+ properties = Properties(masterprop="master")
+class FakeBotMaster:
+ parent = FakeBuildMaster()
+class FakeBuilder:
+ statusbag = None
+ name = "fakebuilder"
+ botmaster = FakeBotMaster()
+class FakeSlave:
+ slavename = "bot12"
+ properties = Properties(slavename="bot12")
+class FakeSlaveBuilder:
+ slave = FakeSlave()
+ def getSlaveCommandVersion(self, command, oldversion=None):
+ return "1.10"
+class FakeScheduler:
+ name = "fakescheduler"
+
+class TestProperties(unittest.TestCase):
+ def setUp(self):
+ self.props = Properties()
+
+ def testDictBehavior(self):
+ self.props.setProperty("do-tests", 1, "scheduler")
+ self.props.setProperty("do-install", 2, "scheduler")
+
+ self.assert_(self.props.has_key('do-tests'))
+ self.failUnlessEqual(self.props['do-tests'], 1)
+ self.failUnlessEqual(self.props['do-install'], 2)
+ self.assertRaises(KeyError, lambda : self.props['do-nothing'])
+ self.failUnlessEqual(self.props.getProperty('do-install'), 2)
+
+ def testUpdate(self):
+ self.props.setProperty("x", 24, "old")
+ newprops = { 'a' : 1, 'b' : 2 }
+ self.props.update(newprops, "new")
+
+ self.failUnlessEqual(self.props.getProperty('x'), 24)
+ self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
+ self.failUnlessEqual(self.props.getProperty('a'), 1)
+ self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
+
+ def testUpdateFromProperties(self):
+ self.props.setProperty("x", 24, "old")
+ newprops = Properties()
+ newprops.setProperty('a', 1, "new")
+ newprops.setProperty('b', 2, "new")
+ self.props.updateFromProperties(newprops)
+
+ self.failUnlessEqual(self.props.getProperty('x'), 24)
+ self.failUnlessEqual(self.props.getPropertySource('x'), 'old')
+ self.failUnlessEqual(self.props.getProperty('a'), 1)
+ self.failUnlessEqual(self.props.getPropertySource('a'), 'new')
+
+ # render() is pretty well tested by TestWithProperties
+
+class TestWithProperties(unittest.TestCase):
+ def setUp(self):
+ self.props = Properties()
+
+ def testBasic(self):
+ # test basic substitution with WithProperties
+ self.props.setProperty("revision", "47", "test")
+ command = WithProperties("build-%s.tar.gz", "revision")
+ self.failUnlessEqual(self.props.render(command),
+ "build-47.tar.gz")
+
+ def testDict(self):
+ # test dict-style substitution with WithProperties
+ self.props.setProperty("other", "foo", "test")
+ command = WithProperties("build-%(other)s.tar.gz")
+ self.failUnlessEqual(self.props.render(command),
+ "build-foo.tar.gz")
+
+ def testDictColonMinus(self):
+ # test dict-style substitution with WithProperties
+ self.props.setProperty("prop1", "foo", "test")
+ command = WithProperties("build-%(prop1:-empty)s-%(prop2:-empty)s.tar.gz")
+ self.failUnlessEqual(self.props.render(command),
+ "build-foo-empty.tar.gz")
+
+ def testDictColonPlus(self):
+ # test dict-style substitution with WithProperties
+ self.props.setProperty("prop1", "foo", "test")
+ command = WithProperties("build-%(prop1:+exists)s-%(prop2:+exists)s.tar.gz")
+ self.failUnlessEqual(self.props.render(command),
+ "build-exists-.tar.gz")
+
+ def testEmpty(self):
+ # None should render as ''
+ self.props.setProperty("empty", None, "test")
+ command = WithProperties("build-%(empty)s.tar.gz")
+ self.failUnlessEqual(self.props.render(command),
+ "build-.tar.gz")
+
+ def testRecursiveList(self):
+ self.props.setProperty("x", 10, "test")
+ self.props.setProperty("y", 20, "test")
+ command = [ WithProperties("%(x)s %(y)s"), "and",
+ WithProperties("%(y)s %(x)s") ]
+ self.failUnlessEqual(self.props.render(command),
+ ["10 20", "and", "20 10"])
+
+ def testRecursiveTuple(self):
+ self.props.setProperty("x", 10, "test")
+ self.props.setProperty("y", 20, "test")
+ command = ( WithProperties("%(x)s %(y)s"), "and",
+ WithProperties("%(y)s %(x)s") )
+ self.failUnlessEqual(self.props.render(command),
+ ("10 20", "and", "20 10"))
+
+ def testRecursiveDict(self):
+ self.props.setProperty("x", 10, "test")
+ self.props.setProperty("y", 20, "test")
+ command = { WithProperties("%(x)s %(y)s") :
+ WithProperties("%(y)s %(x)s") }
+ self.failUnlessEqual(self.props.render(command),
+ {"10 20" : "20 10"})
+
+class BuildProperties(unittest.TestCase):
+ """Test the properties that a build should have."""
+ def setUp(self):
+ self.builder = FakeBuilder()
+ self.builder_status = builder.BuilderStatus("fakebuilder")
+ self.builder_status.basedir = "test_properties"
+ self.builder_status.nextBuildNumber = 5
+ rmdirRecursive(self.builder_status.basedir)
+ os.mkdir(self.builder_status.basedir)
+ self.build_status = self.builder_status.newBuild()
+ req = base.BuildRequest("reason",
+ SourceStamp(branch="branch2", revision="1234"),
+ 'test_builder',
+ properties=Properties(scheduler="fakescheduler"))
+ self.build = base.Build([req])
+ self.build.build_status = self.build_status
+ self.build.setBuilder(self.builder)
+ self.build.setupProperties()
+ self.build.setupSlaveBuilder(FakeSlaveBuilder())
+
+ def testProperties(self):
+ self.failUnlessEqual(self.build.getProperty("scheduler"), "fakescheduler")
+ self.failUnlessEqual(self.build.getProperty("branch"), "branch2")
+ self.failUnlessEqual(self.build.getProperty("revision"), "1234")
+ self.failUnlessEqual(self.build.getProperty("slavename"), "bot12")
+ self.failUnlessEqual(self.build.getProperty("buildnumber"), 5)
+ self.failUnlessEqual(self.build.getProperty("buildername"), "fakebuilder")
+ self.failUnlessEqual(self.build.getProperty("masterprop"), "master")
+
+run_config = """
+from buildbot.process import factory
+from buildbot.steps.shell import ShellCommand, WithProperties
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit', properties={'slprop':'slprop'})]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+c['properties'] = { 'global' : 'global' }
+
+# Note: when run against twisted-1.3.0, this locks up about 5% of the time. I
+# suspect that a command with no output that finishes quickly triggers a race
+# condition in 1.3.0's process-reaping code. The 'touch' process becomes a
+# zombie and the step never completes. To keep this from messing up the unit
+# tests too badly, this step runs with a reduced timeout.
+
+f1 = factory.BuildFactory([s(ShellCommand,
+ flunkOnFailure=True,
+ command=['touch',
+ WithProperties('%s-%s-%s',
+ 'slavename', 'global', 'slprop'),
+ ],
+ workdir='.',
+ timeout=10,
+ )])
+
+b1 = {'name': 'full1', 'slavename': 'bot1', 'builddir': 'bd1', 'factory': f1}
+c['builders'] = [b1]
+
+"""
+
+class Run(RunMixin, unittest.TestCase):
+ def testInterpolate(self):
+ # run an actual build with a step that interpolates a build property
+ d = self.master.loadConfig(run_config)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectOneSlave("bot1"))
+ d.addCallback(lambda res: self.requestBuild("full1"))
+ d.addCallback(self.failUnlessBuildSucceeded)
+ def _check_touch(res):
+ f = os.path.join("slavebase-bot1", "bd1", "bot1-global-slprop")
+ self.failUnless(os.path.exists(f))
+ return res
+ d.addCallback(_check_touch)
+ return d
+
+ SetProperty_base_config = """
+from buildbot.process import factory
+from buildbot.steps.shell import ShellCommand, SetProperty, WithProperties
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+
+f1 = factory.BuildFactory([
+##STEPS##
+])
+
+b1 = {'name': 'full1', 'slavename': 'bot1', 'builddir': 'bd1', 'factory': f1}
+c['builders'] = [b1]
+"""
+
+ SetPropertySimple_config = SetProperty_base_config.replace("##STEPS##", """
+ SetProperty(property='foo', command="echo foo"),
+ SetProperty(property=WithProperties('wp'), command="echo wp"),
+ SetProperty(property='bar', command="echo bar", strip=False),
+ """)
+
+ def testSetPropertySimple(self):
+ d = self.master.loadConfig(self.SetPropertySimple_config)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectOneSlave("bot1"))
+ d.addCallback(lambda res: self.requestBuild("full1"))
+ d.addCallback(self.failUnlessBuildSucceeded)
+ def _check_props(bs):
+ self.failUnlessEqual(bs.getProperty("foo"), "foo")
+ self.failUnlessEqual(bs.getProperty("wp"), "wp")
+ # (will this fail on some platforms, due to newline differences?)
+ self.failUnlessEqual(bs.getProperty("bar"), "bar\n")
+ return bs
+ d.addCallback(_check_props)
+ return d
+
+ SetPropertyExtractFn_config = SetProperty_base_config.replace("##STEPS##", """
+ SetProperty(
+ extract_fn=lambda rc,stdout,stderr : {
+ 'foo' : stdout.strip(),
+ 'bar' : stderr.strip() },
+ command="echo foo; echo bar >&2"),
+ """)
+
+ def testSetPropertyExtractFn(self):
+ d = self.master.loadConfig(self.SetPropertyExtractFn_config)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectOneSlave("bot1"))
+ d.addCallback(lambda res: self.requestBuild("full1"))
+ d.addCallback(self.failUnlessBuildSucceeded)
+ def _check_props(bs):
+ self.failUnlessEqual(bs.getProperty("foo"), "foo")
+ self.failUnlessEqual(bs.getProperty("bar"), "bar")
+ return bs
+ d.addCallback(_check_props)
+ return d
+
+# we test got_revision in test_vc
diff --git a/buildbot/buildbot/test/test_reconfig.py b/buildbot/buildbot/test/test_reconfig.py
new file mode 100644
index 0000000..c4c3922
--- /dev/null
+++ b/buildbot/buildbot/test/test_reconfig.py
@@ -0,0 +1,91 @@
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+from twisted.python import log
+
+from buildbot.test.runutils import RunMixin
+from buildbot.sourcestamp import SourceStamp
+
+config_base = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+from buildbot.scheduler import Triggerable, Dependent
+
+BuildmasterConfig = c = {}
+
+f = factory.BuildFactory()
+f.addStep(dummy.Dummy, timeout=%d)
+
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+
+upstream = Triggerable('s_upstream', ['upstream'], {'prop': '%s'})
+dep = Dependent('s_dep', upstream, ['depend'], {'dep prop': '%s'})
+c['schedulers'] = [upstream, dep]
+c['builders'] = [{'name':'upstream', 'slavename':'bot1',
+ 'builddir': 'upstream', 'factory': f},
+ {'name':'depend', 'slavename':'bot1',
+ 'builddir': 'depend', 'factory': f}]
+c['slavePortnum'] = 0
+"""
+
+class DependingScheduler(RunMixin, unittest.TestCase):
+ '''Test an upstream and a dependent scheduler while reconfiguring.'''
+
+ def testReconfig(self):
+ self.reconfigured = 0
+ self.master.loadConfig(config_base % (1, 'prop value', 'dep prop value'))
+ self.prop_value = 'prop value'
+ self.dep_prop_value = 'dep prop value'
+ self.master.readConfig = True
+ self.master.startService()
+ d = self.connectSlave(builders=['upstream', 'depend'])
+ d.addCallback(self._triggerUpstream)
+ return d
+ def _triggerUpstream(self, res):
+ log.msg("trigger upstream")
+ ss = SourceStamp()
+ upstream = [s for s in self.master.allSchedulers()
+ if s.name == 's_upstream'][0]
+ d = upstream.trigger(ss)
+ d.addCallback(self._gotBuild)
+ return d
+
+ def _gotBuild(self, res):
+ log.msg("done")
+ d = defer.Deferred()
+ d.addCallback(self._doChecks)
+ reactor.callLater(2, d.callback, None)
+ return d
+
+ def _doChecks(self, res):
+ log.msg("starting tests")
+ ub = self.status.getBuilder('upstream').getLastFinishedBuild()
+ tb = self.status.getBuilder('depend').getLastFinishedBuild()
+ self.assertEqual(ub.getProperty('prop'), self.prop_value)
+ self.assertEqual(ub.getNumber(), self.reconfigured)
+ self.assertEqual(tb.getProperty('dep prop'), self.dep_prop_value)
+ self.assertEqual(tb.getNumber(), self.reconfigured)
+
+ # now further on to the reconfig
+ if self.reconfigured > 2:
+ # actually, we're done,
+ return
+ if self.reconfigured == 0:
+ # reconfig without changes now
+ d = self.master.loadConfig(config_base% (1, 'prop value',
+ 'dep prop value'))
+ elif self.reconfigured == 1:
+ # reconfig with changes to upstream now
+ d = self.master.loadConfig(config_base% (1, 'other prop value',
+ 'dep prop value'))
+ self.prop_value = 'other prop value'
+ self.dep_prop_value = 'dep prop value'
+ else:
+ # reconfig with changes to dep now
+ d = self.master.loadConfig(config_base% (1, 'other prop value',
+ 'other dep prop value'))
+ self.prop_value = 'other prop value'
+ self.dep_prop_value = 'other dep prop value'
+ self.reconfigured += 1
+ d.addCallback(self._triggerUpstream)
+ return d
diff --git a/buildbot/buildbot/test/test_run.py b/buildbot/buildbot/test/test_run.py
new file mode 100644
index 0000000..a04ea5b
--- /dev/null
+++ b/buildbot/buildbot/test/test_run.py
@@ -0,0 +1,1199 @@
+# -*- test-case-name: buildbot.test.test_run -*-
+
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+import os
+
+from buildbot import master, interfaces
+from buildbot.sourcestamp import SourceStamp
+from buildbot.changes import changes
+from buildbot.status import builder
+from buildbot.process.base import BuildRequest
+
+from buildbot.test.runutils import RunMixin, TestFlagMixin, rmtree
+
+config_base = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
+
+f2 = factory.BuildFactory([
+ dummy.Dummy(timeout=1),
+ dummy.RemoteDummy(timeout=2),
+ ])
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'quick', 'slavename':'bot1',
+ 'builddir': 'quickdir', 'factory': f1})
+c['slavePortnum'] = 0
+"""
+
+config_run = config_base + """
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('quick', None, 120, ['quick'])]
+"""
+
+config_can_build = config_base + """
+from buildbot.buildslave import BuildSlave
+c['slaves'] = [ BuildSlave('bot1', 'sekrit') ]
+
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('dummy', None, 0.1, ['dummy'])]
+
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy1', 'factory': f2}]
+"""
+
+config_cant_build = config_can_build + """
+class MyBuildSlave(BuildSlave):
+ def canStartBuild(self): return False
+c['slaves'] = [ MyBuildSlave('bot1', 'sekrit') ]
+"""
+
+config_concurrency = config_base + """
+from buildbot.buildslave import BuildSlave
+c['slaves'] = [ BuildSlave('bot1', 'sekrit', max_builds=1) ]
+
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('dummy', None, 0.1, ['dummy', 'dummy2'])]
+
+c['builders'].append({'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy', 'factory': f2})
+c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
+ 'builddir': 'dummy2', 'factory': f2})
+"""
+
+config_2 = config_base + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy1', 'factory': f2},
+ {'name': 'testdummy', 'slavename': 'bot1',
+ 'builddir': 'dummy2', 'factory': f2, 'category': 'test'}]
+"""
+
+config_3 = config_2 + """
+c['builders'].append({'name': 'adummy', 'slavename': 'bot1',
+ 'builddir': 'adummy3', 'factory': f2})
+c['builders'].append({'name': 'bdummy', 'slavename': 'bot1',
+ 'builddir': 'adummy4', 'factory': f2,
+ 'category': 'test'})
+"""
+
+config_4 = config_base + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy', 'factory': f2}]
+"""
+
+config_4_newbasedir = config_4 + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy2', 'factory': f2}]
+"""
+
+config_4_newbuilder = config_4_newbasedir + """
+c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
+ 'builddir': 'dummy23', 'factory': f2})
+"""
+
+class Run(unittest.TestCase):
+ def rmtree(self, d):
+ rmtree(d)
+
+ def testMaster(self):
+ self.rmtree("basedir")
+ os.mkdir("basedir")
+ m = master.BuildMaster("basedir")
+ m.loadConfig(config_run)
+ m.readConfig = True
+ m.startService()
+ cm = m.change_svc
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ cm.addChange(c)
+ # verify that the Scheduler is now waiting
+ s = m.allSchedulers()[0]
+ self.failUnless(s.timer)
+ # halting the service will also stop the timer
+ d = defer.maybeDeferred(m.stopService)
+ return d
+
+class CanStartBuild(RunMixin, unittest.TestCase):
+ def rmtree(self, d):
+ rmtree(d)
+
+ def testCanStartBuild(self):
+ return self.do_test(config_can_build, True)
+
+ def testCantStartBuild(self):
+ return self.do_test(config_cant_build, False)
+
+ def do_test(self, config, builder_should_run):
+ self.master.loadConfig(config)
+ self.master.readConfig = True
+ self.master.startService()
+ d = self.connectSlave()
+
+ # send a change
+ cm = self.master.change_svc
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ cm.addChange(c)
+
+ d.addCallback(self._do_test1, builder_should_run)
+
+ return d
+
+ def _do_test1(self, res, builder_should_run):
+ # delay a little bit. Note that relying upon timers is a bit fragile,
+ # in this case we're hoping that our 0.5 second timer will land us
+ # somewhere in the middle of the [0.1s, 3.1s] window (after the 0.1
+ # second Scheduler fires, then during the 3-second build), so that
+ # when we sample BuildSlave.state, we'll see BUILDING (or IDLE if the
+ # slave was told to be unavailable). On a heavily loaded system, our
+ # 0.5 second timer might not actually fire until after the build has
+ # completed. In the long run, it would be good to change this test to
+ # pass under those circumstances too.
+ d = defer.Deferred()
+ reactor.callLater(.5, d.callback, builder_should_run)
+ d.addCallback(self._do_test2)
+ return d
+
+ def _do_test2(self, builder_should_run):
+ b = self.master.botmaster.builders['dummy']
+ self.failUnless(len(b.slaves) == 1)
+
+ bs = b.slaves[0]
+ from buildbot.process.builder import IDLE, BUILDING
+ if builder_should_run:
+ self.failUnlessEqual(bs.state, BUILDING)
+ else:
+ self.failUnlessEqual(bs.state, IDLE)
+
+
+class ConcurrencyLimit(RunMixin, unittest.TestCase):
+
+ def testConcurrencyLimit(self):
+ d = self.master.loadConfig(config_concurrency)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectSlave())
+
+ def _send(res):
+ # send a change. This will trigger both builders at the same
+ # time, but since they share a slave, the max_builds=1 setting
+ # will insure that only one of the two builds gets to run.
+ cm = self.master.change_svc
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"],
+ "changed stuff")
+ cm.addChange(c)
+ d.addCallback(_send)
+
+ def _delay(res):
+ d1 = defer.Deferred()
+ reactor.callLater(1, d1.callback, None)
+ # this test depends upon this 1s delay landing us in the middle
+ # of one of the builds.
+ return d1
+ d.addCallback(_delay)
+
+ def _check(res):
+ builders = [ self.master.botmaster.builders[bn]
+ for bn in ('dummy', 'dummy2') ]
+ for builder in builders:
+ self.failUnless(len(builder.slaves) == 1)
+
+ from buildbot.process.builder import BUILDING
+ building_bs = [ builder
+ for builder in builders
+ if builder.slaves[0].state == BUILDING ]
+ # assert that only one build is running right now. If the
+ # max_builds= weren't in effect, this would be 2.
+ self.failUnlessEqual(len(building_bs), 1)
+ d.addCallback(_check)
+
+ return d
+
+
+class Ping(RunMixin, unittest.TestCase):
+ def testPing(self):
+ self.master.loadConfig(config_2)
+ self.master.readConfig = True
+ self.master.startService()
+
+ d = self.connectSlave()
+ d.addCallback(self._testPing_1)
+ return d
+
+ def _testPing_1(self, res):
+ d = interfaces.IControl(self.master).getBuilder("dummy").ping(1)
+ d.addCallback(self._testPing_2)
+ return d
+
+ def _testPing_2(self, res):
+ pass
+
+class BuilderNames(unittest.TestCase):
+
+ def testGetBuilderNames(self):
+ os.mkdir("bnames")
+ m = master.BuildMaster("bnames")
+ s = m.getStatus()
+
+ m.loadConfig(config_3)
+ m.readConfig = True
+
+ self.failUnlessEqual(s.getBuilderNames(),
+ ["dummy", "testdummy", "adummy", "bdummy"])
+ self.failUnlessEqual(s.getBuilderNames(categories=['test']),
+ ["testdummy", "bdummy"])
+
+class Disconnect(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+
+ # verify that disconnecting the slave during a build properly
+ # terminates the build
+ m = self.master
+ s = self.status
+ c = self.control
+
+ m.loadConfig(config_2)
+ m.readConfig = True
+ m.startService()
+
+ self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
+ self.s1 = s1 = s.getBuilder("dummy")
+ self.failUnlessEqual(s1.getName(), "dummy")
+ self.failUnlessEqual(s1.getState(), ("offline", []))
+ self.failUnlessEqual(s1.getCurrentBuilds(), [])
+ self.failUnlessEqual(s1.getLastFinishedBuild(), None)
+ self.failUnlessEqual(s1.getBuild(-1), None)
+
+ d = self.connectSlave()
+ d.addCallback(self._disconnectSetup_1)
+ return d
+
+ def _disconnectSetup_1(self, res):
+ self.failUnlessEqual(self.s1.getState(), ("idle", []))
+
+
+ def verifyDisconnect(self, bs):
+ self.failUnless(bs.isFinished())
+
+ step1 = bs.getSteps()[0]
+ self.failUnlessEqual(step1.getText(), ["delay", "interrupted"])
+ self.failUnlessEqual(step1.getResults()[0], builder.FAILURE)
+
+ self.failUnlessEqual(bs.getResults(), builder.FAILURE)
+
+ def verifyDisconnect2(self, bs):
+ self.failUnless(bs.isFinished())
+
+ step1 = bs.getSteps()[1]
+ self.failUnlessEqual(step1.getText(), ["remote", "delay", "2 secs",
+ "failed", "slave", "lost"])
+ self.failUnlessEqual(step1.getResults()[0], builder.FAILURE)
+
+ self.failUnlessEqual(bs.getResults(), builder.FAILURE)
+
+ def submitBuild(self):
+ ss = SourceStamp()
+ br = BuildRequest("forced build", ss, "dummy")
+ self.control.getBuilder("dummy").requestBuild(br)
+ d = defer.Deferred()
+ def _started(bc):
+ br.unsubscribe(_started)
+ d.callback(bc)
+ br.subscribe(_started)
+ return d
+
+ def testIdle2(self):
+ # now suppose the slave goes missing
+ self.disappearSlave(allowReconnect=False)
+
+ # forcing a build will work: the build detect that the slave is no
+ # longer available and will be re-queued. Wait 5 seconds, then check
+ # to make sure the build is still in the 'waiting for a slave' queue.
+ self.control.getBuilder("dummy").original.START_BUILD_TIMEOUT = 1
+ req = BuildRequest("forced build", SourceStamp(), "test_builder")
+ self.failUnlessEqual(req.startCount, 0)
+ self.control.getBuilder("dummy").requestBuild(req)
+ # this should ping the slave, which doesn't respond, and then give up
+ # after a second. The BuildRequest will be re-queued, and its
+ # .startCount will be incremented.
+ d = defer.Deferred()
+ d.addCallback(self._testIdle2_1, req)
+ reactor.callLater(3, d.callback, None)
+ return d
+ testIdle2.timeout = 5
+
+ def _testIdle2_1(self, res, req):
+ self.failUnlessEqual(req.startCount, 1)
+ cancelled = req.cancel()
+ self.failUnless(cancelled)
+
+
+ def testBuild1(self):
+ # this next sequence is timing-dependent. The dummy build takes at
+ # least 3 seconds to complete, and this batch of commands must
+ # complete within that time.
+ #
+ d = self.submitBuild()
+ d.addCallback(self._testBuild1_1)
+ return d
+
+ def _testBuild1_1(self, bc):
+ bs = bc.getStatus()
+ # now kill the slave before it gets to start the first step
+ d = self.shutdownAllSlaves() # dies before it gets started
+ d.addCallback(self._testBuild1_2, bs)
+ return d # TODO: this used to have a 5-second timeout
+
+ def _testBuild1_2(self, res, bs):
+ # now examine the just-stopped build and make sure it is really
+ # stopped. This is checking for bugs in which the slave-detach gets
+ # missed or causes an exception which prevents the build from being
+ # marked as "finished due to an error".
+ d = bs.waitUntilFinished()
+ d2 = self.master.botmaster.waitUntilBuilderDetached("dummy")
+ dl = defer.DeferredList([d, d2])
+ dl.addCallback(self._testBuild1_3, bs)
+ return dl # TODO: this had a 5-second timeout too
+
+ def _testBuild1_3(self, res, bs):
+ self.failUnlessEqual(self.s1.getState()[0], "offline")
+ self.verifyDisconnect(bs)
+
+
+ def testBuild2(self):
+ # this next sequence is timing-dependent
+ d = self.submitBuild()
+ d.addCallback(self._testBuild2_1)
+ return d
+ testBuild2.timeout = 30
+
+ def _testBuild2_1(self, bc):
+ bs = bc.getStatus()
+ # shutdown the slave while it's running the first step
+ reactor.callLater(0.5, self.shutdownAllSlaves)
+
+ d = bs.waitUntilFinished()
+ d.addCallback(self._testBuild2_2, bs)
+ return d
+
+ def _testBuild2_2(self, res, bs):
+ # we hit here when the build has finished. The builder is still being
+ # torn down, however, so spin for another second to allow the
+ # callLater(0) in Builder.detached to fire.
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, None)
+ d.addCallback(self._testBuild2_3, bs)
+ return d
+
+ def _testBuild2_3(self, res, bs):
+ self.failUnlessEqual(self.s1.getState()[0], "offline")
+ self.verifyDisconnect(bs)
+
+
+ def testBuild3(self):
+ # this next sequence is timing-dependent
+ d = self.submitBuild()
+ d.addCallback(self._testBuild3_1)
+ return d
+ testBuild3.timeout = 30
+
+ def _testBuild3_1(self, bc):
+ bs = bc.getStatus()
+ # kill the slave while it's running the first step
+ reactor.callLater(0.5, self.killSlave)
+ d = bs.waitUntilFinished()
+ d.addCallback(self._testBuild3_2, bs)
+ return d
+
+ def _testBuild3_2(self, res, bs):
+ # the builder is still being torn down, so give it another second
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, None)
+ d.addCallback(self._testBuild3_3, bs)
+ return d
+
+ def _testBuild3_3(self, res, bs):
+ self.failUnlessEqual(self.s1.getState()[0], "offline")
+ self.verifyDisconnect(bs)
+
+
+ def testBuild4(self):
+ # this next sequence is timing-dependent
+ d = self.submitBuild()
+ d.addCallback(self._testBuild4_1)
+ return d
+ testBuild4.timeout = 30
+
+ def _testBuild4_1(self, bc):
+ bs = bc.getStatus()
+ # kill the slave while it's running the second (remote) step
+ reactor.callLater(1.5, self.killSlave)
+ d = bs.waitUntilFinished()
+ d.addCallback(self._testBuild4_2, bs)
+ return d
+
+ def _testBuild4_2(self, res, bs):
+ # at this point, the slave is in the process of being removed, so it
+ # could either be 'idle' or 'offline'. I think there is a
+ # reactor.callLater(0) standing between here and the offline state.
+ #reactor.iterate() # TODO: remove the need for this
+
+ self.failUnlessEqual(self.s1.getState()[0], "offline")
+ self.verifyDisconnect2(bs)
+
+
+ def testInterrupt(self):
+ # this next sequence is timing-dependent
+ d = self.submitBuild()
+ d.addCallback(self._testInterrupt_1)
+ return d
+ testInterrupt.timeout = 30
+
+ def _testInterrupt_1(self, bc):
+ bs = bc.getStatus()
+ # halt the build while it's running the first step
+ reactor.callLater(0.5, bc.stopBuild, "bang go splat")
+ d = bs.waitUntilFinished()
+ d.addCallback(self._testInterrupt_2, bs)
+ return d
+
+ def _testInterrupt_2(self, res, bs):
+ self.verifyDisconnect(bs)
+
+
+ def testDisappear(self):
+ bc = self.control.getBuilder("dummy")
+
+ # ping should succeed
+ d = bc.ping(1)
+ d.addCallback(self._testDisappear_1, bc)
+ return d
+
+ def _testDisappear_1(self, res, bc):
+ self.failUnlessEqual(res, True)
+
+ # now, before any build is run, make the slave disappear
+ self.disappearSlave(allowReconnect=False)
+
+ # at this point, a ping to the slave should timeout
+ d = bc.ping(1)
+ d.addCallback(self. _testDisappear_2)
+ return d
+ def _testDisappear_2(self, res):
+ self.failUnlessEqual(res, False)
+
+ def testDuplicate(self):
+ bc = self.control.getBuilder("dummy")
+ bs = self.status.getBuilder("dummy")
+ ss = bs.getSlaves()[0]
+
+ self.failUnless(ss.isConnected())
+ self.failUnlessEqual(ss.getAdmin(), "one")
+
+ # now, before any build is run, make the first slave disappear
+ self.disappearSlave(allowReconnect=False)
+
+ d = self.master.botmaster.waitUntilBuilderDetached("dummy")
+ # now let the new slave take over
+ self.connectSlave2()
+ d.addCallback(self._testDuplicate_1, ss)
+ return d
+ testDuplicate.timeout = 5
+
+ def _testDuplicate_1(self, res, ss):
+ d = self.master.botmaster.waitUntilBuilderAttached("dummy")
+ d.addCallback(self._testDuplicate_2, ss)
+ return d
+
+ def _testDuplicate_2(self, res, ss):
+ self.failUnless(ss.isConnected())
+ self.failUnlessEqual(ss.getAdmin(), "two")
+
+
+class Disconnect2(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ # verify that disconnecting the slave during a build properly
+ # terminates the build
+ m = self.master
+ s = self.status
+ c = self.control
+
+ m.loadConfig(config_2)
+ m.readConfig = True
+ m.startService()
+
+ self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
+ self.s1 = s1 = s.getBuilder("dummy")
+ self.failUnlessEqual(s1.getName(), "dummy")
+ self.failUnlessEqual(s1.getState(), ("offline", []))
+ self.failUnlessEqual(s1.getCurrentBuilds(), [])
+ self.failUnlessEqual(s1.getLastFinishedBuild(), None)
+ self.failUnlessEqual(s1.getBuild(-1), None)
+
+ d = self.connectSlaveFastTimeout()
+ d.addCallback(self._setup_disconnect2_1)
+ return d
+
+ def _setup_disconnect2_1(self, res):
+ self.failUnlessEqual(self.s1.getState(), ("idle", []))
+
+
+ def testSlaveTimeout(self):
+ # now suppose the slave goes missing. We want to find out when it
+ # creates a new Broker, so we reach inside and mark it with the
+ # well-known sigil of impending messy death.
+ bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
+ broker = bd.remote.broker
+ broker.redshirt = 1
+
+ # make sure the keepalives will keep the connection up
+ d = defer.Deferred()
+ reactor.callLater(5, d.callback, None)
+ d.addCallback(self._testSlaveTimeout_1)
+ return d
+ testSlaveTimeout.timeout = 20
+
+ def _testSlaveTimeout_1(self, res):
+ bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
+ if not bd.remote or not hasattr(bd.remote.broker, "redshirt"):
+ self.fail("slave disconnected when it shouldn't have")
+
+ d = self.master.botmaster.waitUntilBuilderDetached("dummy")
+ # whoops! how careless of me.
+ self.disappearSlave(allowReconnect=True)
+ # the slave will realize the connection is lost within 2 seconds, and
+ # reconnect.
+ d.addCallback(self._testSlaveTimeout_2)
+ return d
+
+ def _testSlaveTimeout_2(self, res):
+ # the ReconnectingPBClientFactory will attempt a reconnect in two
+ # seconds.
+ d = self.master.botmaster.waitUntilBuilderAttached("dummy")
+ d.addCallback(self._testSlaveTimeout_3)
+ return d
+
+ def _testSlaveTimeout_3(self, res):
+ # make sure it is a new connection (i.e. a new Broker)
+ bd = self.slaves['bot1'].getServiceNamed("bot").builders["dummy"]
+ self.failUnless(bd.remote, "hey, slave isn't really connected")
+ self.failIf(hasattr(bd.remote.broker, "redshirt"),
+ "hey, slave's Broker is still marked for death")
+
+
+class Basedir(RunMixin, unittest.TestCase):
+ def testChangeBuilddir(self):
+ m = self.master
+ m.loadConfig(config_4)
+ m.readConfig = True
+ m.startService()
+
+ d = self.connectSlave()
+ d.addCallback(self._testChangeBuilddir_1)
+ return d
+
+ def _testChangeBuilddir_1(self, res):
+ self.bot = bot = self.slaves['bot1'].bot
+ self.builder = builder = bot.builders.get("dummy")
+ self.failUnless(builder)
+ self.failUnlessEqual(builder.builddir, "dummy")
+ self.failUnlessEqual(builder.basedir,
+ os.path.join("slavebase-bot1", "dummy"))
+
+ d = self.master.loadConfig(config_4_newbasedir)
+ d.addCallback(self._testChangeBuilddir_2)
+ return d
+
+ def _testChangeBuilddir_2(self, res):
+ bot = self.bot
+ # this does NOT cause the builder to be replaced
+ builder = bot.builders.get("dummy")
+ self.failUnless(builder)
+ self.failUnlessIdentical(self.builder, builder)
+ # the basedir should be updated
+ self.failUnlessEqual(builder.builddir, "dummy2")
+ self.failUnlessEqual(builder.basedir,
+ os.path.join("slavebase-bot1", "dummy2"))
+
+ # add a new builder, which causes the basedir list to be reloaded
+ d = self.master.loadConfig(config_4_newbuilder)
+ return d
+
+class Triggers(RunMixin, TestFlagMixin, unittest.TestCase):
+ config_trigger = config_base + """
+from buildbot.scheduler import Triggerable, Scheduler
+from buildbot.steps.trigger import Trigger
+from buildbot.steps.dummy import Dummy
+from buildbot.test.runutils import SetTestFlagStep
+c['schedulers'] = [
+ Scheduler('triggerer', None, 0.1, ['triggerer']),
+ Triggerable('triggeree', ['triggeree'])
+]
+triggerer = factory.BuildFactory()
+triggerer.addSteps([
+ SetTestFlagStep(flagname='triggerer_started'),
+ Trigger(flunkOnFailure=True, @ARGS@),
+ SetTestFlagStep(flagname='triggerer_finished'),
+ ])
+triggeree = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='triggeree_started'),
+ s(@DUMMYCLASS@),
+ s(SetTestFlagStep, flagname='triggeree_finished'),
+ ])
+c['builders'] = [{'name': 'triggerer', 'slavename': 'bot1',
+ 'builddir': 'triggerer', 'factory': triggerer},
+ {'name': 'triggeree', 'slavename': 'bot1',
+ 'builddir': 'triggeree', 'factory': triggeree}]
+"""
+
+ def mkConfig(self, args, dummyclass="Dummy"):
+ return self.config_trigger.replace("@ARGS@", args).replace("@DUMMYCLASS@", dummyclass)
+
+ def setupTest(self, args, dummyclass, checkFn):
+ self.clearFlags()
+ m = self.master
+ m.loadConfig(self.mkConfig(args, dummyclass))
+ m.readConfig = True
+ m.startService()
+
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ m.change_svc.addChange(c)
+
+ d = self.connectSlave(builders=['triggerer', 'triggeree'])
+ d.addCallback(self.startTimer, 0.5, checkFn)
+ return d
+
+ def startTimer(self, res, time, next_fn):
+ d = defer.Deferred()
+ reactor.callLater(time, d.callback, None)
+ d.addCallback(next_fn)
+ return d
+
+ def testTriggerBuild(self):
+ return self.setupTest("schedulerNames=['triggeree']",
+ "Dummy",
+ self._checkTriggerBuild)
+
+ def _checkTriggerBuild(self, res):
+ self.failIfFlagNotSet('triggerer_started')
+ self.failIfFlagNotSet('triggeree_started')
+ self.failIfFlagSet('triggeree_finished')
+ self.failIfFlagNotSet('triggerer_finished')
+
+ def testTriggerBuildWait(self):
+ return self.setupTest("schedulerNames=['triggeree'], waitForFinish=1",
+ "Dummy",
+ self._checkTriggerBuildWait)
+
+ def _checkTriggerBuildWait(self, res):
+ self.failIfFlagNotSet('triggerer_started')
+ self.failIfFlagNotSet('triggeree_started')
+ self.failIfFlagSet('triggeree_finished')
+ self.failIfFlagSet('triggerer_finished')
+
+class PropertyPropagation(RunMixin, TestFlagMixin, unittest.TestCase):
+ def setupTest(self, config, builders, checkFn):
+ self.clearFlags()
+ m = self.master
+ m.loadConfig(config)
+ m.readConfig = True
+ m.startService()
+
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ m.change_svc.addChange(c)
+
+ d = self.connectSlave(builders=builders)
+ d.addCallback(self.startTimer, 0.5, checkFn)
+ return d
+
+ def startTimer(self, res, time, next_fn):
+ d = defer.Deferred()
+ reactor.callLater(time, d.callback, None)
+ d.addCallback(next_fn)
+ return d
+
+ config_schprop = config_base + """
+from buildbot.scheduler import Scheduler
+from buildbot.steps.dummy import Dummy
+from buildbot.test.runutils import SetTestFlagStep
+from buildbot.process.properties import WithProperties
+c['schedulers'] = [
+ Scheduler('mysched', None, 0.1, ['flagcolor'], properties={'color':'red'}),
+]
+factory = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='testresult',
+ value=WithProperties('color=%(color)s sched=%(scheduler)s')),
+ ])
+c['builders'] = [{'name': 'flagcolor', 'slavename': 'bot1',
+ 'builddir': 'test', 'factory': factory},
+ ]
+"""
+
+ def testScheduler(self):
+ def _check(res):
+ self.failUnlessEqual(self.getFlag('testresult'),
+ 'color=red sched=mysched')
+ return self.setupTest(self.config_schprop, ['flagcolor'], _check)
+
+ config_slaveprop = config_base + """
+from buildbot.scheduler import Scheduler
+from buildbot.steps.dummy import Dummy
+from buildbot.test.runutils import SetTestFlagStep
+from buildbot.process.properties import WithProperties
+c['schedulers'] = [
+ Scheduler('mysched', None, 0.1, ['flagcolor'])
+]
+c['slaves'] = [BuildSlave('bot1', 'sekrit', properties={'color':'orange'})]
+factory = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='testresult',
+ value=WithProperties('color=%(color)s slavename=%(slavename)s')),
+ ])
+c['builders'] = [{'name': 'flagcolor', 'slavename': 'bot1',
+ 'builddir': 'test', 'factory': factory},
+ ]
+"""
+ def testSlave(self):
+ def _check(res):
+ self.failUnlessEqual(self.getFlag('testresult'),
+ 'color=orange slavename=bot1')
+ return self.setupTest(self.config_slaveprop, ['flagcolor'], _check)
+
+ config_trigger = config_base + """
+from buildbot.scheduler import Triggerable, Scheduler
+from buildbot.steps.trigger import Trigger
+from buildbot.steps.dummy import Dummy
+from buildbot.test.runutils import SetTestFlagStep
+from buildbot.process.properties import WithProperties
+c['schedulers'] = [
+ Scheduler('triggerer', None, 0.1, ['triggerer'],
+ properties={'color':'mauve', 'pls_trigger':'triggeree'}),
+ Triggerable('triggeree', ['triggeree'], properties={'color':'invisible'})
+]
+triggerer = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='testresult', value='wrongone'),
+ s(Trigger, flunkOnFailure=True,
+ schedulerNames=[WithProperties('%(pls_trigger)s')],
+ set_properties={'color' : WithProperties('%(color)s')}),
+ s(SetTestFlagStep, flagname='testresult', value='triggered'),
+ ])
+triggeree = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='testresult',
+ value=WithProperties('sched=%(scheduler)s color=%(color)s')),
+ ])
+c['builders'] = [{'name': 'triggerer', 'slavename': 'bot1',
+ 'builddir': 'triggerer', 'factory': triggerer},
+ {'name': 'triggeree', 'slavename': 'bot1',
+ 'builddir': 'triggeree', 'factory': triggeree}]
+"""
+ def testTrigger(self):
+ def _check(res):
+ self.failUnlessEqual(self.getFlag('testresult'),
+ 'sched=triggeree color=mauve')
+ return self.setupTest(self.config_trigger,
+ ['triggerer', 'triggeree'], _check)
+
+
+config_test_flag = config_base + """
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('quick', None, 0.1, ['dummy'])]
+
+from buildbot.test.runutils import SetTestFlagStep
+f3 = factory.BuildFactory([
+ s(SetTestFlagStep, flagname='foo', value='bar'),
+ ])
+
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy', 'factory': f3}]
+"""
+
+class TestFlag(RunMixin, TestFlagMixin, unittest.TestCase):
+ """Test for the TestFlag functionality in runutils"""
+ def testTestFlag(self):
+ m = self.master
+ m.loadConfig(config_test_flag)
+ m.readConfig = True
+ m.startService()
+
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ m.change_svc.addChange(c)
+
+ d = self.connectSlave()
+ d.addCallback(self._testTestFlag_1)
+ return d
+
+ def _testTestFlag_1(self, res):
+ d = defer.Deferred()
+ reactor.callLater(0.5, d.callback, None)
+ d.addCallback(self._testTestFlag_2)
+ return d
+
+ def _testTestFlag_2(self, res):
+ self.failUnlessEqual(self.getFlag('foo'), 'bar')
+
+# TODO: test everything, from Change submission to Scheduler to Build to
+# Status. Use all the status types. Specifically I want to catch recurrences
+# of the bug where I forgot to make Waterfall inherit from StatusReceiver
+# such that buildSetSubmitted failed.
+
+config_test_builder = config_base + """
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('quick', 'dummy', 0.1, ['dummy']),
+ Scheduler('quick2', 'dummy2', 0.1, ['dummy2']),
+ Scheduler('quick3', 'dummy3', 0.1, ['dummy3'])]
+
+from buildbot.steps.shell import ShellCommand
+f3 = factory.BuildFactory([
+ s(ShellCommand, command="sleep 3", env={'blah':'blah'})
+ ])
+
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1', 'env': {'foo':'bar'},
+ 'builddir': 'dummy', 'factory': f3}]
+
+c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
+ 'env': {'blah':'bar'}, 'builddir': 'dummy2',
+ 'factory': f3})
+
+f4 = factory.BuildFactory([
+ s(ShellCommand, command="sleep 3")
+ ])
+
+c['builders'].append({'name': 'dummy3', 'slavename': 'bot1',
+ 'env': {'blah':'bar'}, 'builddir': 'dummy3',
+ 'factory': f4})
+"""
+
+class TestBuilder(RunMixin, unittest.TestCase):
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_test_builder)
+ self.master.readConfig = True
+ self.master.startService()
+ self.connectSlave(builders=["dummy", "dummy2", "dummy3"])
+
+ def doBuilderEnvTest(self, branch, cb):
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed",
+ branch=branch)
+ self.master.change_svc.addChange(c)
+
+ d = defer.Deferred()
+ reactor.callLater(0.5, d.callback, None)
+ d.addCallback(cb)
+
+ return d
+
+ def testBuilderEnv(self):
+ return self.doBuilderEnvTest("dummy", self._testBuilderEnv1)
+
+ def _testBuilderEnv1(self, res):
+ b = self.master.botmaster.builders['dummy']
+ build = b.building[0]
+ s = build.currentStep
+ self.failUnless('foo' in s.cmd.args['env'])
+ self.failUnlessEqual('bar', s.cmd.args['env']['foo'])
+ self.failUnless('blah' in s.cmd.args['env'])
+ self.failUnlessEqual('blah', s.cmd.args['env']['blah'])
+
+ def testBuilderEnvOverride(self):
+ return self.doBuilderEnvTest("dummy2", self._testBuilderEnvOverride1)
+
+ def _testBuilderEnvOverride1(self, res):
+ b = self.master.botmaster.builders['dummy2']
+ build = b.building[0]
+ s = build.currentStep
+ self.failUnless('blah' in s.cmd.args['env'])
+ self.failUnlessEqual('blah', s.cmd.args['env']['blah'])
+
+ def testBuilderNoStepEnv(self):
+ return self.doBuilderEnvTest("dummy3", self._testBuilderNoStepEnv1)
+
+ def _testBuilderNoStepEnv1(self, res):
+ b = self.master.botmaster.builders['dummy3']
+ build = b.building[0]
+ s = build.currentStep
+ self.failUnless('blah' in s.cmd.args['env'])
+ self.failUnlessEqual('bar', s.cmd.args['env']['blah'])
+
+class SchedulerWatchers(RunMixin, TestFlagMixin, unittest.TestCase):
+ config_watchable = config_base + """
+from buildbot.scheduler import AnyBranchScheduler
+from buildbot.steps.dummy import Dummy
+from buildbot.test.runutils import setTestFlag, SetTestFlagStep
+s = AnyBranchScheduler(
+ name='abs',
+ branches=None,
+ treeStableTimer=0,
+ builderNames=['a', 'b'])
+c['schedulers'] = [ s ]
+
+# count the number of times a success watcher is called
+numCalls = [ 0 ]
+def watcher(ss):
+ numCalls[0] += 1
+ setTestFlag("numCalls", numCalls[0])
+s.subscribeToSuccessfulBuilds(watcher)
+
+f = factory.BuildFactory()
+f.addStep(Dummy(timeout=0))
+c['builders'] = [{'name': 'a', 'slavename': 'bot1',
+ 'builddir': 'a', 'factory': f},
+ {'name': 'b', 'slavename': 'bot1',
+ 'builddir': 'b', 'factory': f}]
+"""
+
+ def testWatchers(self):
+ self.clearFlags()
+ m = self.master
+ m.loadConfig(self.config_watchable)
+ m.readConfig = True
+ m.startService()
+
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"], "changed stuff")
+ m.change_svc.addChange(c)
+
+ d = self.connectSlave(builders=['a', 'b'])
+
+ def pause(res):
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, res)
+ return d
+ d.addCallback(pause)
+
+ def checkFn(res):
+ self.failUnlessEqual(self.getFlag('numCalls'), 1)
+ d.addCallback(checkFn)
+ return d
+
+config_priority = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+from buildbot.steps.shell import ShellCommand
+f1 = factory.BuildFactory([
+ s(ShellCommand, command="sleep 3", env={'blah':'blah'})
+ ])
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit', max_builds=1)]
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'quick1', 'slavename':'bot1', 'builddir': 'quickdir1', 'factory': f1})
+c['builders'].append({'name':'quick2', 'slavename':'bot1', 'builddir': 'quickdir2', 'factory': f1})
+c['slavePortnum'] = 0
+"""
+
+class BuildPrioritization(RunMixin, unittest.TestCase):
+ def rmtree(self, d):
+ rmtree(d)
+
+ def testPriority(self):
+ self.rmtree("basedir")
+ os.mkdir("basedir")
+ self.master.loadConfig(config_priority)
+ self.master.readConfig = True
+ self.master.startService()
+
+ d = self.connectSlave(builders=['quick1', 'quick2'])
+ d.addCallback(self._connected)
+
+ return d
+
+ def _connected(self, *args):
+ # Our fake source stamp
+ # we override canBeMergedWith so that our requests don't get merged together
+ ss = SourceStamp()
+ ss.canBeMergedWith = lambda x: False
+
+ # Send one request to tie up the slave before sending future requests
+ req0 = BuildRequest("reason", ss, "test_builder")
+ self.master.botmaster.builders['quick1'].submitBuildRequest(req0)
+
+ # Send 10 requests to alternating builders
+ # We fudge the submittedAt field after submitting since they're all
+ # getting submitted so close together according to time.time()
+ # and all we care about is what order they're run in.
+ reqs = []
+ self.finish_order = []
+ for i in range(10):
+ req = BuildRequest(str(i), ss, "test_builder")
+ j = i % 2 + 1
+ self.master.botmaster.builders['quick%i' % j].submitBuildRequest(req)
+ req.submittedAt = i
+ # Keep track of what order the builds finished in
+ def append(item, arg):
+ self.finish_order.append(item)
+ req.waitUntilFinished().addCallback(append, req)
+ reqs.append(req.waitUntilFinished())
+
+ dl = defer.DeferredList(reqs)
+ dl.addCallback(self._all_finished)
+
+ # After our first build finishes, we should wait for the rest to finish
+ d = req0.waitUntilFinished()
+ d.addCallback(lambda x: dl)
+ return d
+
+ def _all_finished(self, *args):
+ # The builds should have finished in proper order
+ self.failUnlessEqual([int(b.reason) for b in self.finish_order], range(10))
+
+# Test graceful shutdown when no builds are active, as well as
+# canStartBuild after graceful shutdown is initiated
+config_graceful_shutdown_idle = config_base
+class GracefulShutdownIdle(RunMixin, unittest.TestCase):
+ def testShutdown(self):
+ self.rmtree("basedir")
+ os.mkdir("basedir")
+ self.master.loadConfig(config_graceful_shutdown_idle)
+ self.master.readConfig = True
+ self.master.startService()
+ d = self.connectSlave(builders=['quick'])
+ d.addCallback(self._do_shutdown)
+ return d
+
+ def _do_shutdown(self, res):
+ bs = self.master.botmaster.builders['quick'].slaves[0]
+ # Check that the slave is accepting builds once it's connected
+ self.assertEquals(bs.slave.canStartBuild(), True)
+
+ # Monkeypatch the slave's shutdown routine since the real shutdown
+ # interrupts the test harness
+ self.did_shutdown = False
+ def _shutdown():
+ self.did_shutdown = True
+ bs.slave.shutdown = _shutdown
+
+ # Start a graceful shutdown
+ bs.slave.slave_status.setGraceful(True)
+ # Check that the slave isn't accepting builds any more
+ self.assertEquals(bs.slave.canStartBuild(), False)
+
+ # Wait a little bit and then check that we (pretended to) shut down
+ d = defer.Deferred()
+ d.addCallback(self._check_shutdown)
+ reactor.callLater(0.5, d.callback, None)
+ return d
+
+ def _check_shutdown(self, res):
+ self.assertEquals(self.did_shutdown, True)
+
+# Test graceful shutdown when two builds are active
+config_graceful_shutdown_busy = config_base + """
+from buildbot.buildslave import BuildSlave
+c['slaves'] = [ BuildSlave('bot1', 'sekrit', max_builds=2) ]
+
+from buildbot.scheduler import Scheduler
+c['schedulers'] = [Scheduler('dummy', None, 0.1, ['dummy', 'dummy2'])]
+
+c['builders'].append({'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy', 'factory': f2})
+c['builders'].append({'name': 'dummy2', 'slavename': 'bot1',
+ 'builddir': 'dummy2', 'factory': f2})
+"""
+class GracefulShutdownBusy(RunMixin, unittest.TestCase):
+ def testShutdown(self):
+ self.rmtree("basedir")
+ os.mkdir("basedir")
+ d = self.master.loadConfig(config_graceful_shutdown_busy)
+ d.addCallback(lambda res: self.master.startService())
+ d.addCallback(lambda res: self.connectSlave())
+
+ def _send(res):
+ # send a change. This will trigger both builders at the same
+ # time, but since they share a slave, the max_builds=1 setting
+ # will insure that only one of the two builds gets to run.
+ cm = self.master.change_svc
+ c = changes.Change("bob", ["Makefile", "foo/bar.c"],
+ "changed stuff")
+ cm.addChange(c)
+ d.addCallback(_send)
+
+ def _delay(res):
+ d1 = defer.Deferred()
+ reactor.callLater(0.5, d1.callback, None)
+ # this test depends upon this 0.5s delay landing us in the middle
+ # of one of the builds.
+ return d1
+ d.addCallback(_delay)
+
+ # Start a graceful shutdown. We should be in the middle of two builds
+ def _shutdown(res):
+ bs = self.master.botmaster.builders['dummy'].slaves[0]
+ # Monkeypatch the slave's shutdown routine since the real shutdown
+ # interrupts the test harness
+ self.did_shutdown = False
+ def _shutdown():
+ self.did_shutdown = True
+ return defer.succeed(None)
+ bs.slave.shutdown = _shutdown
+ # Start a graceful shutdown
+ bs.slave.slave_status.setGraceful(True)
+
+ builders = [ self.master.botmaster.builders[bn]
+ for bn in ('dummy', 'dummy2') ]
+ for builder in builders:
+ self.failUnless(len(builder.slaves) == 1)
+ from buildbot.process.builder import BUILDING
+ building_bs = [ builder
+ for builder in builders
+ if builder.slaves[0].state == BUILDING ]
+ # assert that both builds are running right now.
+ self.failUnlessEqual(len(building_bs), 2)
+
+ d.addCallback(_shutdown)
+
+ # Wait a little bit again, and then make sure that we are still running
+ # the two builds, and haven't shutdown yet
+ d.addCallback(_delay)
+ def _check(res):
+ self.assertEquals(self.did_shutdown, False)
+ builders = [ self.master.botmaster.builders[bn]
+ for bn in ('dummy', 'dummy2') ]
+ for builder in builders:
+ self.failUnless(len(builder.slaves) == 1)
+ from buildbot.process.builder import BUILDING
+ building_bs = [ builder
+ for builder in builders
+ if builder.slaves[0].state == BUILDING ]
+ # assert that both builds are running right now.
+ self.failUnlessEqual(len(building_bs), 2)
+ d.addCallback(_check)
+
+ # Wait for all the builds to finish
+ def _wait_finish(res):
+ builders = [ self.master.botmaster.builders[bn]
+ for bn in ('dummy', 'dummy2') ]
+ builds = []
+ for builder in builders:
+ builds.append(builder.builder_status.currentBuilds[0].waitUntilFinished())
+ dl = defer.DeferredList(builds)
+ return dl
+ d.addCallback(_wait_finish)
+
+ # Wait a little bit after the builds finish, and then
+ # check that the slave has shutdown
+ d.addCallback(_delay)
+ def _check_shutdown(res):
+ # assert that we shutdown the slave
+ self.assertEquals(self.did_shutdown, True)
+ builders = [ self.master.botmaster.builders[bn]
+ for bn in ('dummy', 'dummy2') ]
+ from buildbot.process.builder import BUILDING
+ building_bs = [ builder
+ for builder in builders
+ if builder.slaves[0].state == BUILDING ]
+ # assert that no builds are running right now.
+ self.failUnlessEqual(len(building_bs), 0)
+ d.addCallback(_check_shutdown)
+
+ return d
diff --git a/buildbot/buildbot/test/test_runner.py b/buildbot/buildbot/test/test_runner.py
new file mode 100644
index 0000000..d94ef5f
--- /dev/null
+++ b/buildbot/buildbot/test/test_runner.py
@@ -0,0 +1,392 @@
+
+# this file tests the 'buildbot' command, with its various sub-commands
+
+from twisted.trial import unittest
+from twisted.python import usage
+import os, shutil, shlex
+import sets
+
+from buildbot.scripts import runner, tryclient
+
+class Options(unittest.TestCase):
+ optionsFile = "SDFsfsFSdfsfsFSD"
+
+ def make(self, d, key):
+ # we use a wacky filename here in case the test code discovers the
+ # user's real ~/.buildbot/ directory
+ os.makedirs(os.sep.join(d + [".buildbot"]))
+ f = open(os.sep.join(d + [".buildbot", self.optionsFile]), "w")
+ f.write("key = '%s'\n" % key)
+ f.close()
+
+ def check(self, d, key):
+ basedir = os.sep.join(d)
+ options = runner.loadOptions(self.optionsFile, here=basedir,
+ home=self.home)
+ if key is None:
+ self.failIf(options.has_key('key'))
+ else:
+ self.failUnlessEqual(options['key'], key)
+
+ def testFindOptions(self):
+ self.make(["home", "dir1", "dir2", "dir3"], "one")
+ self.make(["home", "dir1", "dir2"], "two")
+ self.make(["home"], "home")
+ self.home = os.path.abspath("home")
+
+ self.check(["home", "dir1", "dir2", "dir3"], "one")
+ self.check(["home", "dir1", "dir2"], "two")
+ self.check(["home", "dir1"], "home")
+
+ self.home = os.path.abspath("nothome")
+ os.makedirs(os.sep.join(["nothome", "dir1"]))
+ self.check(["nothome", "dir1"], None)
+
+ def doForce(self, args, expected):
+ o = runner.ForceOptions()
+ o.parseOptions(args)
+ self.failUnlessEqual(o.keys(), expected.keys())
+ for k in o.keys():
+ self.failUnlessEqual(o[k], expected[k],
+ "[%s] got %s instead of %s" % (k, o[k],
+ expected[k]))
+
+ def testForceOptions(self):
+ if not hasattr(shlex, "split"):
+ raise unittest.SkipTest("need python>=2.3 for shlex.split")
+
+ exp = {"builder": "b1", "reason": "reason",
+ "branch": None, "revision": None}
+ self.doForce(shlex.split("b1 reason"), exp)
+ self.doForce(shlex.split("b1 'reason'"), exp)
+ self.failUnlessRaises(usage.UsageError, self.doForce,
+ shlex.split("--builder b1 'reason'"), exp)
+ self.doForce(shlex.split("--builder b1 --reason reason"), exp)
+ self.doForce(shlex.split("--builder b1 --reason 'reason'"), exp)
+ self.doForce(shlex.split("--builder b1 --reason \"reason\""), exp)
+
+ exp['reason'] = "longer reason"
+ self.doForce(shlex.split("b1 'longer reason'"), exp)
+ self.doForce(shlex.split("b1 longer reason"), exp)
+ self.doForce(shlex.split("--reason 'longer reason' b1"), exp)
+
+
+class Create(unittest.TestCase):
+ def failUnlessIn(self, substring, string, msg=None):
+ # trial provides a version of this that requires python-2.3 to test
+ # strings.
+ self.failUnless(string.find(substring) != -1, msg)
+ def failUnlessExists(self, filename):
+ self.failUnless(os.path.exists(filename), "%s should exist" % filename)
+ def failIfExists(self, filename):
+ self.failIf(os.path.exists(filename), "%s should not exist" % filename)
+
+ def setUp(self):
+ self.cwd = os.getcwd()
+
+ def tearDown(self):
+ os.chdir(self.cwd)
+
+ def testMaster(self):
+ basedir = "test_runner.master"
+ options = runner.MasterOptions()
+ options.parseOptions(["-q", basedir])
+ cwd = os.getcwd()
+ runner.createMaster(options)
+ os.chdir(cwd)
+
+ tac = os.path.join(basedir, "buildbot.tac")
+ self.failUnless(os.path.exists(tac))
+ tacfile = open(tac,"rt").read()
+ self.failUnlessIn("basedir", tacfile)
+ self.failUnlessIn("configfile = r'master.cfg'", tacfile)
+ self.failUnlessIn("BuildMaster(basedir, configfile)", tacfile)
+
+ cfg = os.path.join(basedir, "master.cfg")
+ self.failIfExists(cfg)
+ samplecfg = os.path.join(basedir, "master.cfg.sample")
+ self.failUnlessExists(samplecfg)
+ cfgfile = open(samplecfg,"rt").read()
+ self.failUnlessIn("This is a sample buildmaster config file", cfgfile)
+
+ makefile = os.path.join(basedir, "Makefile.sample")
+ self.failUnlessExists(makefile)
+
+ # now verify that running it a second time (with the same options)
+ # does the right thing: nothing changes
+ runner.createMaster(options)
+ os.chdir(cwd)
+
+ self.failIfExists(os.path.join(basedir, "buildbot.tac.new"))
+ self.failUnlessExists(os.path.join(basedir, "master.cfg.sample"))
+
+ oldtac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+
+ # mutate Makefile.sample, since it should be rewritten
+ f = open(os.path.join(basedir, "Makefile.sample"), "rt")
+ oldmake = f.read()
+ f = open(os.path.join(basedir, "Makefile.sample"), "wt")
+ f.write(oldmake)
+ f.write("# additional line added\n")
+ f.close()
+
+ # also mutate master.cfg.sample
+ f = open(os.path.join(basedir, "master.cfg.sample"), "rt")
+ oldsamplecfg = f.read()
+ f = open(os.path.join(basedir, "master.cfg.sample"), "wt")
+ f.write(oldsamplecfg)
+ f.write("# additional line added\n")
+ f.close()
+
+ # now run it again (with different options)
+ options = runner.MasterOptions()
+ options.parseOptions(["-q", "--config", "other.cfg", basedir])
+ runner.createMaster(options)
+ os.chdir(cwd)
+
+ tac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+ self.failUnlessEqual(tac, oldtac, "shouldn't change existing .tac")
+ self.failUnlessExists(os.path.join(basedir, "buildbot.tac.new"))
+
+ make = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
+ self.failUnlessEqual(make, oldmake, "*should* rewrite Makefile.sample")
+
+ samplecfg = open(os.path.join(basedir, "master.cfg.sample"),
+ "rt").read()
+ self.failUnlessEqual(samplecfg, oldsamplecfg,
+ "*should* rewrite master.cfg.sample")
+
+ def testUpgradeMaster(self):
+ # first, create a master, run it briefly, then upgrade it. Nothing
+ # should change.
+ basedir = "test_runner.master2"
+ options = runner.MasterOptions()
+ options.parseOptions(["-q", basedir])
+ cwd = os.getcwd()
+ runner.createMaster(options)
+ os.chdir(cwd)
+
+ f = open(os.path.join(basedir, "master.cfg"), "w")
+ f.write(open(os.path.join(basedir, "master.cfg.sample"), "r").read())
+ f.close()
+
+ # the upgrade process (specifically the verify-master.cfg step) will
+ # create any builder status directories that weren't already created.
+ # Create those ahead of time.
+ os.mkdir(os.path.join(basedir, "full"))
+
+ files1 = self.record_files(basedir)
+
+ # upgrade it
+ options = runner.UpgradeMasterOptions()
+ options.parseOptions(["--quiet", basedir])
+ cwd = os.getcwd()
+ runner.upgradeMaster(options)
+ os.chdir(cwd)
+
+ files2 = self.record_files(basedir)
+ self.failUnlessSameFiles(files1, files2)
+
+ # now make it look like the one that 0.7.5 creates: no public_html
+ for fn in os.listdir(os.path.join(basedir, "public_html")):
+ os.unlink(os.path.join(basedir, "public_html", fn))
+ os.rmdir(os.path.join(basedir, "public_html"))
+
+ # and make sure that upgrading it re-populates public_html
+ options = runner.UpgradeMasterOptions()
+ options.parseOptions(["-q", basedir])
+ cwd = os.getcwd()
+ runner.upgradeMaster(options)
+ os.chdir(cwd)
+
+ files3 = self.record_files(basedir)
+ self.failUnlessSameFiles(files1, files3)
+
+ # now induce an error in master.cfg and make sure that upgrade
+ # notices it.
+ f = open(os.path.join(basedir, "master.cfg"), "a")
+ f.write("raise RuntimeError('catch me please')\n")
+ f.close()
+
+ options = runner.UpgradeMasterOptions()
+ options.parseOptions(["-q", basedir])
+ cwd = os.getcwd()
+ rc = runner.upgradeMaster(options)
+ os.chdir(cwd)
+ self.failUnless(rc != 0, rc)
+ # TODO: change the way runner.py works to let us pass in a stderr
+ # filehandle, and use a StringIO to capture its output, and make sure
+ # the right error messages appear therein.
+
+
+ def failUnlessSameFiles(self, files1, files2):
+ f1 = sets.Set(files1.keys())
+ f2 = sets.Set(files2.keys())
+ msg = ""
+ if f2 - f1:
+ msg += "Missing from files1: %s\n" % (list(f2-f1),)
+ if f1 - f2:
+ msg += "Missing from files2: %s\n" % (list(f1-f2),)
+ if msg:
+ self.fail(msg)
+
+ def record_files(self, basedir):
+ allfiles = {}
+ for root, dirs, files in os.walk(basedir):
+ for f in files:
+ fn = os.path.join(root, f)
+ allfiles[fn] = ("FILE", open(fn,"rb").read())
+ for d in dirs:
+ allfiles[os.path.join(root, d)] = ("DIR",)
+ return allfiles
+
+
+ def testSlave(self):
+ basedir = "test_runner.slave"
+ options = runner.SlaveOptions()
+ options.parseOptions(["-q", basedir, "buildmaster:1234",
+ "botname", "passwd"])
+ cwd = os.getcwd()
+ runner.createSlave(options)
+ os.chdir(cwd)
+
+ tac = os.path.join(basedir, "buildbot.tac")
+ self.failUnless(os.path.exists(tac))
+ tacfile = open(tac,"rt").read()
+ self.failUnlessIn("basedir", tacfile)
+ self.failUnlessIn("buildmaster_host = 'buildmaster'", tacfile)
+ self.failUnlessIn("port = 1234", tacfile)
+ self.failUnlessIn("slavename = 'botname'", tacfile)
+ self.failUnlessIn("passwd = 'passwd'", tacfile)
+ self.failUnlessIn("keepalive = 600", tacfile)
+ self.failUnlessIn("BuildSlave(buildmaster_host, port, slavename",
+ tacfile)
+
+ makefile = os.path.join(basedir, "Makefile.sample")
+ self.failUnlessExists(makefile)
+
+ self.failUnlessExists(os.path.join(basedir, "info", "admin"))
+ self.failUnlessExists(os.path.join(basedir, "info", "host"))
+ # edit one to make sure the later install doesn't change it
+ f = open(os.path.join(basedir, "info", "admin"), "wt")
+ f.write("updated@buildbot.example.org\n")
+ f.close()
+
+ # now verify that running it a second time (with the same options)
+ # does the right thing: nothing changes
+ runner.createSlave(options)
+ os.chdir(cwd)
+
+ self.failIfExists(os.path.join(basedir, "buildbot.tac.new"))
+ admin = open(os.path.join(basedir, "info", "admin"), "rt").read()
+ self.failUnlessEqual(admin, "updated@buildbot.example.org\n")
+
+
+ # mutate Makefile.sample, since it should be rewritten
+ oldmake = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
+ f = open(os.path.join(basedir, "Makefile.sample"), "wt")
+ f.write(oldmake)
+ f.write("# additional line added\n")
+ f.close()
+ oldtac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+
+ # now run it again (with different options)
+ options = runner.SlaveOptions()
+ options.parseOptions(["-q", "--keepalive", "30",
+ basedir, "buildmaster:9999",
+ "newbotname", "passwd"])
+ runner.createSlave(options)
+ os.chdir(cwd)
+
+ tac = open(os.path.join(basedir, "buildbot.tac"), "rt").read()
+ self.failUnlessEqual(tac, oldtac, "shouldn't change existing .tac")
+ self.failUnlessExists(os.path.join(basedir, "buildbot.tac.new"))
+ tacfile = open(os.path.join(basedir, "buildbot.tac.new"),"rt").read()
+ self.failUnlessIn("basedir", tacfile)
+ self.failUnlessIn("buildmaster_host = 'buildmaster'", tacfile)
+ self.failUnlessIn("port = 9999", tacfile)
+ self.failUnlessIn("slavename = 'newbotname'", tacfile)
+ self.failUnlessIn("passwd = 'passwd'", tacfile)
+ self.failUnlessIn("keepalive = 30", tacfile)
+ self.failUnlessIn("BuildSlave(buildmaster_host, port, slavename",
+ tacfile)
+
+ make = open(os.path.join(basedir, "Makefile.sample"), "rt").read()
+ self.failUnlessEqual(make, oldmake, "*should* rewrite Makefile.sample")
+
+class Try(unittest.TestCase):
+ # test some aspects of the 'buildbot try' command
+ def makeOptions(self, contents):
+ if os.path.exists(".buildbot"):
+ shutil.rmtree(".buildbot")
+ os.mkdir(".buildbot")
+ open(os.path.join(".buildbot", "options"), "w").write(contents)
+
+ def testGetopt1(self):
+ opts = "try_connect = 'ssh'\n" + "try_builders = ['a']\n"
+ self.makeOptions(opts)
+ config = runner.TryOptions()
+ config.parseOptions([])
+ t = tryclient.Try(config)
+ self.failUnlessEqual(t.connect, "ssh")
+ self.failUnlessEqual(t.builderNames, ['a'])
+
+ def testGetopt2(self):
+ opts = ""
+ self.makeOptions(opts)
+ config = runner.TryOptions()
+ config.parseOptions(['--connect=ssh', '--builder', 'a'])
+ t = tryclient.Try(config)
+ self.failUnlessEqual(t.connect, "ssh")
+ self.failUnlessEqual(t.builderNames, ['a'])
+
+ def testGetopt3(self):
+ opts = ""
+ self.makeOptions(opts)
+ config = runner.TryOptions()
+ config.parseOptions(['--connect=ssh',
+ '--builder', 'a', '--builder=b'])
+ t = tryclient.Try(config)
+ self.failUnlessEqual(t.connect, "ssh")
+ self.failUnlessEqual(t.builderNames, ['a', 'b'])
+
+ def testGetopt4(self):
+ opts = "try_connect = 'ssh'\n" + "try_builders = ['a']\n"
+ self.makeOptions(opts)
+ config = runner.TryOptions()
+ config.parseOptions(['--builder=b'])
+ t = tryclient.Try(config)
+ self.failUnlessEqual(t.connect, "ssh")
+ self.failUnlessEqual(t.builderNames, ['b'])
+
+ def testGetTopdir(self):
+ os.mkdir("gettopdir")
+ os.mkdir(os.path.join("gettopdir", "foo"))
+ os.mkdir(os.path.join("gettopdir", "foo", "bar"))
+ open(os.path.join("gettopdir", "1"),"w").write("1")
+ open(os.path.join("gettopdir", "foo", "2"),"w").write("2")
+ open(os.path.join("gettopdir", "foo", "bar", "3"),"w").write("3")
+
+ target = os.path.abspath("gettopdir")
+ t = tryclient.getTopdir("1", "gettopdir")
+ self.failUnlessEqual(os.path.abspath(t), target)
+ t = tryclient.getTopdir("1", os.path.join("gettopdir", "foo"))
+ self.failUnlessEqual(os.path.abspath(t), target)
+ t = tryclient.getTopdir("1", os.path.join("gettopdir", "foo", "bar"))
+ self.failUnlessEqual(os.path.abspath(t), target)
+
+ target = os.path.abspath(os.path.join("gettopdir", "foo"))
+ t = tryclient.getTopdir("2", os.path.join("gettopdir", "foo"))
+ self.failUnlessEqual(os.path.abspath(t), target)
+ t = tryclient.getTopdir("2", os.path.join("gettopdir", "foo", "bar"))
+ self.failUnlessEqual(os.path.abspath(t), target)
+
+ target = os.path.abspath(os.path.join("gettopdir", "foo", "bar"))
+ t = tryclient.getTopdir("3", os.path.join("gettopdir", "foo", "bar"))
+ self.failUnlessEqual(os.path.abspath(t), target)
+
+ nonexistent = "nonexistent\n29fis3kq\tBAR"
+ # hopefully there won't be a real file with that name between here
+ # and the filesystem root.
+ self.failUnlessRaises(ValueError, tryclient.getTopdir, nonexistent)
+
diff --git a/buildbot/buildbot/test/test_scheduler.py b/buildbot/buildbot/test/test_scheduler.py
new file mode 100644
index 0000000..667e349
--- /dev/null
+++ b/buildbot/buildbot/test/test_scheduler.py
@@ -0,0 +1,348 @@
+# -*- test-case-name: buildbot.test.test_scheduler -*-
+
+import os, time
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+from twisted.application import service
+from twisted.spread import pb
+
+from buildbot import scheduler, sourcestamp, buildset, status
+from buildbot.changes.changes import Change
+from buildbot.scripts import tryclient
+
+
+class FakeMaster(service.MultiService):
+ d = None
+ def submitBuildSet(self, bs):
+ self.sets.append(bs)
+ if self.d:
+ reactor.callLater(0, self.d.callback, bs)
+ self.d = None
+ return pb.Referenceable() # makes the cleanup work correctly
+
+class Scheduling(unittest.TestCase):
+ def setUp(self):
+ self.master = master = FakeMaster()
+ master.sets = []
+ master.startService()
+
+ def tearDown(self):
+ d = self.master.stopService()
+ return d
+
+ def addScheduler(self, s):
+ s.setServiceParent(self.master)
+
+ def testPeriodic1(self):
+ self.addScheduler(scheduler.Periodic("quickly", ["a","b"], 2))
+ d = defer.Deferred()
+ reactor.callLater(5, d.callback, None)
+ d.addCallback(self._testPeriodic1_1)
+ return d
+ def _testPeriodic1_1(self, res):
+ self.failUnless(len(self.master.sets) > 1)
+ s1 = self.master.sets[0]
+ self.failUnlessEqual(s1.builderNames, ["a","b"])
+ self.failUnlessEqual(s1.reason, "The Periodic scheduler named 'quickly' triggered this build")
+
+ def testNightly(self):
+ # now == 15-Nov-2005, 00:05:36 AM . By using mktime, this is
+ # converted into the local timezone, which happens to match what
+ # Nightly is going to do anyway.
+ MIN=60; HOUR=60*MIN; DAY=24*3600
+ now = time.mktime((2005, 11, 15, 0, 5, 36, 1, 319, 0))
+
+ s = scheduler.Nightly('nightly', ["a"], hour=3)
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), 2*HOUR+54*MIN+24)
+
+ s = scheduler.Nightly('nightly', ["a"], minute=[3,8,54])
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), 2*MIN+24)
+
+ s = scheduler.Nightly('nightly', ["a"],
+ dayOfMonth=16, hour=1, minute=6)
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), DAY+HOUR+24)
+
+ s = scheduler.Nightly('nightly', ["a"],
+ dayOfMonth=16, hour=1, minute=3)
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), DAY+57*MIN+24)
+
+ s = scheduler.Nightly('nightly', ["a"],
+ dayOfMonth=15, hour=1, minute=3)
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), 57*MIN+24)
+
+ s = scheduler.Nightly('nightly', ["a"],
+ dayOfMonth=15, hour=0, minute=3)
+ t = s.calculateNextRunTimeFrom(now)
+ self.failUnlessEqual(int(t-now), 30*DAY-3*MIN+24)
+
+
+ def isImportant(self, change):
+ if "important" in change.files:
+ return True
+ return False
+
+ def testBranch(self):
+ s = scheduler.Scheduler("b1", "branch1", 2, ["a","b"],
+ fileIsImportant=self.isImportant)
+ self.addScheduler(s)
+
+ c0 = Change("carol", ["important"], "other branch", branch="other")
+ s.addChange(c0)
+ self.failIf(s.timer)
+ self.failIf(s.importantChanges)
+
+ c1 = Change("alice", ["important", "not important"], "some changes",
+ branch="branch1")
+ s.addChange(c1)
+ c2 = Change("bob", ["not important", "boring"], "some more changes",
+ branch="branch1")
+ s.addChange(c2)
+ c3 = Change("carol", ["important", "dull"], "even more changes",
+ branch="branch1")
+ s.addChange(c3)
+
+ self.failUnlessEqual(s.importantChanges, [c1,c3])
+ self.failUnlessEqual(s.unimportantChanges, [c2])
+ self.failUnless(s.timer)
+
+ d = defer.Deferred()
+ reactor.callLater(4, d.callback, None)
+ d.addCallback(self._testBranch_1)
+ return d
+ def _testBranch_1(self, res):
+ self.failUnlessEqual(len(self.master.sets), 1)
+ s = self.master.sets[0].source
+ self.failUnlessEqual(s.branch, "branch1")
+ self.failUnlessEqual(s.revision, None)
+ self.failUnlessEqual(len(s.changes), 3)
+ self.failUnlessEqual(s.patch, None)
+
+
+ def testAnyBranch(self):
+ s = scheduler.AnyBranchScheduler("b1", None, 1, ["a","b"],
+ fileIsImportant=self.isImportant)
+ self.addScheduler(s)
+
+ c1 = Change("alice", ["important", "not important"], "some changes",
+ branch="branch1")
+ s.addChange(c1)
+ c2 = Change("bob", ["not important", "boring"], "some more changes",
+ branch="branch1")
+ s.addChange(c2)
+ c3 = Change("carol", ["important", "dull"], "even more changes",
+ branch="branch1")
+ s.addChange(c3)
+
+ c4 = Change("carol", ["important"], "other branch", branch="branch2")
+ s.addChange(c4)
+
+ c5 = Change("carol", ["important"], "default branch", branch=None)
+ s.addChange(c5)
+
+ d = defer.Deferred()
+ reactor.callLater(2, d.callback, None)
+ d.addCallback(self._testAnyBranch_1)
+ return d
+ def _testAnyBranch_1(self, res):
+ self.failUnlessEqual(len(self.master.sets), 3)
+ self.master.sets.sort(lambda a,b: cmp(a.source.branch,
+ b.source.branch))
+
+ s1 = self.master.sets[0].source
+ self.failUnlessEqual(s1.branch, None)
+ self.failUnlessEqual(s1.revision, None)
+ self.failUnlessEqual(len(s1.changes), 1)
+ self.failUnlessEqual(s1.patch, None)
+
+ s2 = self.master.sets[1].source
+ self.failUnlessEqual(s2.branch, "branch1")
+ self.failUnlessEqual(s2.revision, None)
+ self.failUnlessEqual(len(s2.changes), 3)
+ self.failUnlessEqual(s2.patch, None)
+
+ s3 = self.master.sets[2].source
+ self.failUnlessEqual(s3.branch, "branch2")
+ self.failUnlessEqual(s3.revision, None)
+ self.failUnlessEqual(len(s3.changes), 1)
+ self.failUnlessEqual(s3.patch, None)
+
+ def testAnyBranch2(self):
+ # like testAnyBranch but without fileIsImportant
+ s = scheduler.AnyBranchScheduler("b1", None, 2, ["a","b"])
+ self.addScheduler(s)
+ c1 = Change("alice", ["important", "not important"], "some changes",
+ branch="branch1")
+ s.addChange(c1)
+ c2 = Change("bob", ["not important", "boring"], "some more changes",
+ branch="branch1")
+ s.addChange(c2)
+ c3 = Change("carol", ["important", "dull"], "even more changes",
+ branch="branch1")
+ s.addChange(c3)
+
+ c4 = Change("carol", ["important"], "other branch", branch="branch2")
+ s.addChange(c4)
+
+ d = defer.Deferred()
+ reactor.callLater(2, d.callback, None)
+ d.addCallback(self._testAnyBranch2_1)
+ return d
+ def _testAnyBranch2_1(self, res):
+ self.failUnlessEqual(len(self.master.sets), 2)
+ self.master.sets.sort(lambda a,b: cmp(a.source.branch,
+ b.source.branch))
+ s1 = self.master.sets[0].source
+ self.failUnlessEqual(s1.branch, "branch1")
+ self.failUnlessEqual(s1.revision, None)
+ self.failUnlessEqual(len(s1.changes), 3)
+ self.failUnlessEqual(s1.patch, None)
+
+ s2 = self.master.sets[1].source
+ self.failUnlessEqual(s2.branch, "branch2")
+ self.failUnlessEqual(s2.revision, None)
+ self.failUnlessEqual(len(s2.changes), 1)
+ self.failUnlessEqual(s2.patch, None)
+
+
+ def createMaildir(self, jobdir):
+ os.mkdir(jobdir)
+ os.mkdir(os.path.join(jobdir, "new"))
+ os.mkdir(os.path.join(jobdir, "cur"))
+ os.mkdir(os.path.join(jobdir, "tmp"))
+
+ jobcounter = 1
+ def pushJob(self, jobdir, job):
+ while 1:
+ filename = "job_%d" % self.jobcounter
+ self.jobcounter += 1
+ if os.path.exists(os.path.join(jobdir, "new", filename)):
+ continue
+ if os.path.exists(os.path.join(jobdir, "tmp", filename)):
+ continue
+ if os.path.exists(os.path.join(jobdir, "cur", filename)):
+ continue
+ break
+ f = open(os.path.join(jobdir, "tmp", filename), "w")
+ f.write(job)
+ f.close()
+ os.rename(os.path.join(jobdir, "tmp", filename),
+ os.path.join(jobdir, "new", filename))
+
+ def testTryJobdir(self):
+ self.master.basedir = "try_jobdir"
+ os.mkdir(self.master.basedir)
+ jobdir = "jobdir1"
+ jobdir_abs = os.path.join(self.master.basedir, jobdir)
+ self.createMaildir(jobdir_abs)
+ s = scheduler.Try_Jobdir("try1", ["a", "b"], jobdir)
+ self.addScheduler(s)
+ self.failIf(self.master.sets)
+ job1 = tryclient.createJobfile("buildsetID",
+ "branch1", "123", 1, "diff",
+ ["a", "b"])
+ self.master.d = d = defer.Deferred()
+ self.pushJob(jobdir_abs, job1)
+ d.addCallback(self._testTryJobdir_1)
+ # N.B.: if we don't have DNotify, we poll every 10 seconds, so don't
+ # set a .timeout here shorter than that. TODO: make it possible to
+ # set the polling interval, so we can make it shorter.
+ return d
+
+ def _testTryJobdir_1(self, bs):
+ self.failUnlessEqual(bs.builderNames, ["a", "b"])
+ self.failUnlessEqual(bs.source.branch, "branch1")
+ self.failUnlessEqual(bs.source.revision, "123")
+ self.failUnlessEqual(bs.source.patch, (1, "diff"))
+
+
+ def testTryUserpass(self):
+ up = [("alice","pw1"), ("bob","pw2")]
+ s = scheduler.Try_Userpass("try2", ["a", "b"], 0, userpass=up)
+ self.addScheduler(s)
+ port = s.getPort()
+ config = {'connect': 'pb',
+ 'username': 'alice',
+ 'passwd': 'pw1',
+ 'master': "localhost:%d" % port,
+ 'builders': ["a", "b"],
+ }
+ t = tryclient.Try(config)
+ ss = sourcestamp.SourceStamp("branch1", "123", (1, "diff"))
+ t.sourcestamp = ss
+ d2 = self.master.d = defer.Deferred()
+ d = t.deliverJob()
+ d.addCallback(self._testTryUserpass_1, t, d2)
+ return d
+ testTryUserpass.timeout = 5
+ def _testTryUserpass_1(self, res, t, d2):
+ # at this point, the Try object should have a RemoteReference to the
+ # status object. The FakeMaster returns a stub.
+ self.failUnless(t.buildsetStatus)
+ d2.addCallback(self._testTryUserpass_2, t)
+ return d2
+ def _testTryUserpass_2(self, bs, t):
+ # this should be the BuildSet submitted by the TryScheduler
+ self.failUnlessEqual(bs.builderNames, ["a", "b"])
+ self.failUnlessEqual(bs.source.branch, "branch1")
+ self.failUnlessEqual(bs.source.revision, "123")
+ self.failUnlessEqual(bs.source.patch, (1, "diff"))
+
+ t.cleanup()
+
+ # twisted-2.0.1 (but not later versions) seems to require a reactor
+ # iteration before stopListening actually works. TODO: investigate
+ # this.
+ d = defer.Deferred()
+ reactor.callLater(0, d.callback, None)
+ return d
+
+ def testGetBuildSets(self):
+ # validate IStatus.getBuildSets
+ s = status.builder.Status(None, ".")
+ bs1 = buildset.BuildSet(["a","b"], sourcestamp.SourceStamp(),
+ reason="one", bsid="1")
+ s.buildsetSubmitted(bs1.status)
+ self.failUnlessEqual(s.getBuildSets(), [bs1.status])
+ bs1.status.notifyFinishedWatchers()
+ self.failUnlessEqual(s.getBuildSets(), [])
+
+ def testCategory(self):
+ s1 = scheduler.Scheduler("b1", "branch1", 2, ["a","b"], categories=["categoryA", "both"])
+ self.addScheduler(s1)
+ s2 = scheduler.Scheduler("b2", "branch1", 2, ["a","b"], categories=["categoryB", "both"])
+ self.addScheduler(s2)
+
+ c0 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryA")
+ s1.addChange(c0)
+ s2.addChange(c0)
+
+ c1 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryB")
+ s1.addChange(c1)
+ s2.addChange(c1)
+
+ c2 = Change("carol", ["important"], "branch1", branch="branch1")
+ s1.addChange(c2)
+ s2.addChange(c2)
+
+ c3 = Change("carol", ["important"], "branch1", branch="branch1", category="both")
+ s1.addChange(c3)
+ s2.addChange(c3)
+
+ self.failUnlessEqual(s1.importantChanges, [c0, c3])
+ self.failUnlessEqual(s2.importantChanges, [c1, c3])
+
+ s = scheduler.Scheduler("b3", "branch1", 2, ["a","b"])
+ self.addScheduler(s)
+
+ c0 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryA")
+ s.addChange(c0)
+ c1 = Change("carol", ["important"], "branch1", branch="branch1", category="categoryB")
+ s.addChange(c1)
+
+ self.failUnlessEqual(s.importantChanges, [c0, c1])
diff --git a/buildbot/buildbot/test/test_shell.py b/buildbot/buildbot/test/test_shell.py
new file mode 100644
index 0000000..52a17f4
--- /dev/null
+++ b/buildbot/buildbot/test/test_shell.py
@@ -0,0 +1,138 @@
+
+
+# test step.ShellCommand and the slave-side commands.ShellCommand
+
+import sys, time, os
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+from twisted.python import util
+from buildbot.slave.commands import SlaveShellCommand
+from buildbot.test.runutils import SlaveCommandTestBase
+
+class SlaveSide(SlaveCommandTestBase, unittest.TestCase):
+ def testOne(self):
+ self.setUpBuilder("test_shell.testOne")
+ emitcmd = util.sibpath(__file__, "emit.py")
+ args = {
+ 'command': [sys.executable, emitcmd, "0"],
+ 'workdir': ".",
+ }
+ d = self.startCommand(SlaveShellCommand, args)
+ d.addCallback(self.collectUpdates)
+ def _check(logs):
+ self.failUnlessEqual(logs['stdout'], "this is stdout\n")
+ self.failUnlessEqual(logs['stderr'], "this is stderr\n")
+ d.addCallback(_check)
+ return d
+
+ # TODO: move test_slavecommand.Shell and .ShellPTY over here
+
+ def _generateText(self, filename):
+ lines = []
+ for i in range(3):
+ lines.append("this is %s %d\n" % (filename, i))
+ return "".join(lines)
+
+ def testLogFiles_0(self):
+ return self._testLogFiles(0)
+
+ def testLogFiles_1(self):
+ return self._testLogFiles(1)
+
+ def testLogFiles_2(self):
+ return self._testLogFiles(2)
+
+ def testLogFiles_3(self):
+ return self._testLogFiles(3)
+
+ def _testLogFiles(self, mode):
+ basedir = "test_shell.testLogFiles"
+ self.setUpBuilder(basedir)
+ # emitlogs.py writes two lines to stdout and two logfiles, one second
+ # apart. Then it waits for us to write something to stdin, then it
+ # writes one more line.
+
+ if mode != 3:
+ # we write something to the log file first, to exercise the logic
+ # that distinguishes between the old file and the one as modified
+ # by the ShellCommand. We set the timestamp back 5 seconds so
+ # that timestamps can be used to distinguish old from new.
+ log2file = os.path.join(basedir, "log2.out")
+ f = open(log2file, "w")
+ f.write("dummy text\n")
+ f.close()
+ earlier = time.time() - 5
+ os.utime(log2file, (earlier, earlier))
+
+ if mode == 3:
+ # mode=3 doesn't create the old logfiles in the first place, but
+ # then behaves like mode=1 (where the command pauses before
+ # creating them).
+ mode = 1
+
+ # mode=1 will cause emitlogs.py to delete the old logfiles first, and
+ # then wait two seconds before creating the new files. mode=0 does
+ # not do this.
+ args = {
+ 'command': [sys.executable,
+ util.sibpath(__file__, "emitlogs.py"),
+ "%s" % mode],
+ 'workdir': ".",
+ 'logfiles': {"log2": "log2.out",
+ "log3": "log3.out"},
+ 'keep_stdin_open': True,
+ }
+ finishd = self.startCommand(SlaveShellCommand, args)
+ # The first batch of lines is written immediately. The second is
+ # written after a pause of one second. We poll once per second until
+ # we see both batches.
+
+ self._check_timeout = 10
+ d = self._check_and_wait()
+ def _wait_for_finish(res, finishd):
+ return finishd
+ d.addCallback(_wait_for_finish, finishd)
+ d.addCallback(self.collectUpdates)
+ def _check(logs):
+ self.failUnlessEqual(logs['stdout'], self._generateText("stdout"))
+ if mode == 2:
+ self.failIf(('log','log2') in logs)
+ self.failIf(('log','log3') in logs)
+ else:
+ self.failUnlessEqual(logs[('log','log2')],
+ self._generateText("log2"))
+ self.failUnlessEqual(logs[('log','log3')],
+ self._generateText("log3"))
+ d.addCallback(_check)
+ d.addBoth(self._maybePrintError)
+ return d
+
+ def _check_and_wait(self, res=None):
+ self._check_timeout -= 1
+ if self._check_timeout <= 0:
+ raise defer.TimeoutError("gave up on command")
+ logs = self.collectUpdates()
+ if logs.get('stdout') == "this is stdout 0\nthis is stdout 1\n":
+ # the emitlogs.py process is now waiting for something to arrive
+ # on stdin
+ self.cmd.command.pp.transport.write("poke\n")
+ return
+ if not self.cmd.running:
+ self.fail("command finished too early")
+ spin = defer.Deferred()
+ spin.addCallback(self._check_and_wait)
+ reactor.callLater(1, spin.callback, None)
+ return spin
+
+ def _maybePrintError(self, res):
+ rc = self.findRC()
+ if rc != 0:
+ print "Command ended with rc=%s" % rc
+ print "STDERR:"
+ self.printStderr()
+ return res
+
+ # MAYBE TODO: a command which appends to an existing logfile should
+ # result in only the new text being sent up to the master. I need to
+ # think about this more first.
+
diff --git a/buildbot/buildbot/test/test_slavecommand.py b/buildbot/buildbot/test/test_slavecommand.py
new file mode 100644
index 0000000..9809163
--- /dev/null
+++ b/buildbot/buildbot/test/test_slavecommand.py
@@ -0,0 +1,294 @@
+# -*- test-case-name: buildbot.test.test_slavecommand -*-
+
+from twisted.trial import unittest
+from twisted.internet import reactor, interfaces
+from twisted.python import runtime, failure, util
+
+import os, sys
+
+from buildbot.slave import commands
+SlaveShellCommand = commands.SlaveShellCommand
+
+from buildbot.test.runutils import SignalMixin, FakeSlaveBuilder
+
+# test slavecommand.py by running the various commands with a fake
+# SlaveBuilder object that logs the calls to sendUpdate()
+
+class Utilities(unittest.TestCase):
+ def mkdir(self, basedir, path, mode=None):
+ fn = os.path.join(basedir, path)
+ os.makedirs(fn)
+ if mode is not None:
+ os.chmod(fn, mode)
+
+ def touch(self, basedir, path, mode=None):
+ fn = os.path.join(basedir, path)
+ f = open(fn, "w")
+ f.write("touch\n")
+ f.close()
+ if mode is not None:
+ os.chmod(fn, mode)
+
+ def test_rmdirRecursive(self):
+ basedir = "slavecommand/Utilities/test_rmdirRecursive"
+ os.makedirs(basedir)
+ d = os.path.join(basedir, "doomed")
+ self.mkdir(d, "a/b")
+ self.touch(d, "a/b/1.txt")
+ self.touch(d, "a/b/2.txt", 0444)
+ self.touch(d, "a/b/3.txt", 0)
+ self.mkdir(d, "a/c")
+ self.touch(d, "a/c/1.txt")
+ self.touch(d, "a/c/2.txt", 0444)
+ self.touch(d, "a/c/3.txt", 0)
+ os.chmod(os.path.join(d, "a/c"), 0444)
+ self.mkdir(d, "a/d")
+ self.touch(d, "a/d/1.txt")
+ self.touch(d, "a/d/2.txt", 0444)
+ self.touch(d, "a/d/3.txt", 0)
+ os.chmod(os.path.join(d, "a/d"), 0)
+
+ commands.rmdirRecursive(d)
+ self.failIf(os.path.exists(d))
+
+
+class ShellBase(SignalMixin):
+
+ def setUp(self):
+ self.basedir = "test_slavecommand"
+ if not os.path.isdir(self.basedir):
+ os.mkdir(self.basedir)
+ self.subdir = os.path.join(self.basedir, "subdir")
+ if not os.path.isdir(self.subdir):
+ os.mkdir(self.subdir)
+ self.builder = FakeSlaveBuilder(self.usePTY, self.basedir)
+ self.emitcmd = util.sibpath(__file__, "emit.py")
+ self.subemitcmd = os.path.join(util.sibpath(__file__, "subdir"),
+ "emit.py")
+ self.sleepcmd = util.sibpath(__file__, "sleep.py")
+
+ def failUnlessIn(self, substring, string):
+ self.failUnless(string.find(substring) != -1,
+ "'%s' not in '%s'" % (substring, string))
+
+ def getfile(self, which):
+ got = ""
+ for r in self.builder.updates:
+ if r.has_key(which):
+ got += r[which]
+ return got
+
+ def checkOutput(self, expected):
+ """
+ @type expected: list of (streamname, contents) tuples
+ @param expected: the expected output
+ """
+ expected_linesep = os.linesep
+ if self.usePTY:
+ # PTYs change the line ending. I'm not sure why.
+ expected_linesep = "\r\n"
+ expected = [(stream, contents.replace("\n", expected_linesep, 1000))
+ for (stream, contents) in expected]
+ if self.usePTY:
+ # PTYs merge stdout+stderr into a single stream
+ expected = [('stdout', contents)
+ for (stream, contents) in expected]
+ # now merge everything into one string per stream
+ streams = {}
+ for (stream, contents) in expected:
+ streams[stream] = streams.get(stream, "") + contents
+ for (stream, contents) in streams.items():
+ got = self.getfile(stream)
+ self.assertEquals(got, contents)
+
+ def getrc(self):
+ # updates[-2] is the rc, unless the step was interrupted
+ # updates[-1] is the elapsed-time header
+ u = self.builder.updates[-1]
+ if "rc" not in u:
+ self.failUnless(len(self.builder.updates) >= 2)
+ u = self.builder.updates[-2]
+ self.failUnless("rc" in u)
+ return u['rc']
+ def checkrc(self, expected):
+ got = self.getrc()
+ self.assertEquals(got, expected)
+
+ def testShell1(self):
+ targetfile = os.path.join(self.basedir, "log1.out")
+ if os.path.exists(targetfile):
+ os.unlink(targetfile)
+ cmd = "%s %s 0" % (sys.executable, self.emitcmd)
+ args = {'command': cmd, 'workdir': '.', 'timeout': 60}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ expected = [('stdout', "this is stdout\n"),
+ ('stderr', "this is stderr\n")]
+ d.addCallback(self._checkPass, expected, 0)
+ def _check_targetfile(res):
+ self.failUnless(os.path.exists(targetfile))
+ d.addCallback(_check_targetfile)
+ return d
+
+ def _checkPass(self, res, expected, rc):
+ self.checkOutput(expected)
+ self.checkrc(rc)
+
+ def testShell2(self):
+ cmd = [sys.executable, self.emitcmd, "0"]
+ args = {'command': cmd, 'workdir': '.', 'timeout': 60}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ expected = [('stdout', "this is stdout\n"),
+ ('stderr', "this is stderr\n")]
+ d.addCallback(self._checkPass, expected, 0)
+ return d
+
+ def testShellRC(self):
+ cmd = [sys.executable, self.emitcmd, "1"]
+ args = {'command': cmd, 'workdir': '.', 'timeout': 60}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ expected = [('stdout', "this is stdout\n"),
+ ('stderr', "this is stderr\n")]
+ d.addCallback(self._checkPass, expected, 1)
+ return d
+
+ def testShellEnv(self):
+ cmd = "%s %s 0" % (sys.executable, self.emitcmd)
+ args = {'command': cmd, 'workdir': '.',
+ 'env': {'EMIT_TEST': "envtest"}, 'timeout': 60}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ expected = [('stdout', "this is stdout\n"),
+ ('stderr', "this is stderr\n"),
+ ('stdout', "EMIT_TEST: envtest\n"),
+ ]
+ d.addCallback(self._checkPass, expected, 0)
+ return d
+
+ def testShellSubdir(self):
+ targetfile = os.path.join(self.basedir, "subdir", "log1.out")
+ if os.path.exists(targetfile):
+ os.unlink(targetfile)
+ cmd = "%s %s 0" % (sys.executable, self.subemitcmd)
+ args = {'command': cmd, 'workdir': "subdir", 'timeout': 60}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ expected = [('stdout', "this is stdout in subdir\n"),
+ ('stderr', "this is stderr\n")]
+ d.addCallback(self._checkPass, expected, 0)
+ def _check_targetfile(res):
+ self.failUnless(os.path.exists(targetfile))
+ d.addCallback(_check_targetfile)
+ return d
+
+ def testShellMissingCommand(self):
+ args = {'command': "/bin/EndWorldHungerAndMakePigsFly",
+ 'workdir': '.', 'timeout': 10,
+ 'env': {"LC_ALL": "C"},
+ }
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ d.addCallback(self._testShellMissingCommand_1)
+ return d
+ def _testShellMissingCommand_1(self, res):
+ self.failIfEqual(self.getrc(), 0)
+ # we used to check the error message to make sure it said something
+ # about a missing command, but there are a variety of shells out
+ # there, and they emit message sin a variety of languages, so we
+ # stopped trying.
+
+ def testTimeout(self):
+ args = {'command': [sys.executable, self.sleepcmd, "10"],
+ 'workdir': '.', 'timeout': 2}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ d.addCallback(self._testTimeout_1)
+ return d
+ def _testTimeout_1(self, res):
+ self.failIfEqual(self.getrc(), 0)
+ got = self.getfile('header')
+ self.failUnlessIn("command timed out: 2 seconds without output", got)
+ if runtime.platformType == "posix":
+ # the "killing pid" message is not present in windows
+ self.failUnlessIn("killing pid", got)
+ # but the process *ought* to be killed somehow
+ self.failUnlessIn("process killed by signal", got)
+ #print got
+ if runtime.platformType != 'posix':
+ testTimeout.todo = "timeout doesn't appear to work under windows"
+
+ def testInterrupt1(self):
+ args = {'command': [sys.executable, self.sleepcmd, "10"],
+ 'workdir': '.', 'timeout': 20}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ reactor.callLater(1, c.interrupt)
+ d.addCallback(self._testInterrupt1_1)
+ return d
+ def _testInterrupt1_1(self, res):
+ self.failIfEqual(self.getrc(), 0)
+ got = self.getfile('header')
+ self.failUnlessIn("command interrupted", got)
+ if runtime.platformType == "posix":
+ self.failUnlessIn("process killed by signal", got)
+ if runtime.platformType != 'posix':
+ testInterrupt1.todo = "interrupt doesn't appear to work under windows"
+
+
+ # todo: twisted-specific command tests
+
+class Shell(ShellBase, unittest.TestCase):
+ usePTY = False
+
+ def testInterrupt2(self):
+ # test the backup timeout. This doesn't work under a PTY, because the
+ # transport.loseConnection we do in the timeout handler actually
+ # *does* kill the process.
+ args = {'command': [sys.executable, self.sleepcmd, "5"],
+ 'workdir': '.', 'timeout': 20}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ c.command.BACKUP_TIMEOUT = 1
+ # make it unable to kill the child, by changing the signal it uses
+ # from SIGKILL to the do-nothing signal 0.
+ c.command.KILL = None
+ reactor.callLater(1, c.interrupt)
+ d.addBoth(self._testInterrupt2_1)
+ return d
+ def _testInterrupt2_1(self, res):
+ # the slave should raise a TimeoutError exception. In a normal build
+ # process (i.e. one that uses step.RemoteShellCommand), this
+ # exception will be handed to the Step, which will acquire an ERROR
+ # status. In our test environment, it isn't such a big deal.
+ self.failUnless(isinstance(res, failure.Failure),
+ "res is not a Failure: %s" % (res,))
+ self.failUnless(res.check(commands.TimeoutError))
+ self.checkrc(-1)
+ return
+ # the command is still actually running. Start another command, to
+ # make sure that a) the old command's output doesn't interfere with
+ # the new one, and b) the old command's actual termination doesn't
+ # break anything
+ args = {'command': [sys.executable, self.sleepcmd, "5"],
+ 'workdir': '.', 'timeout': 20}
+ c = SlaveShellCommand(self.builder, None, args)
+ d = c.start()
+ d.addCallback(self._testInterrupt2_2)
+ return d
+ def _testInterrupt2_2(self, res):
+ self.checkrc(0)
+ # N.B.: under windows, the trial process hangs out for another few
+ # seconds. I assume that the win32eventreactor is waiting for one of
+ # the lingering child processes to really finish.
+
+haveProcess = interfaces.IReactorProcess(reactor, None)
+if runtime.platformType == 'posix':
+ # test with PTYs also
+ class ShellPTY(ShellBase, unittest.TestCase):
+ usePTY = True
+ if not haveProcess:
+ ShellPTY.skip = "this reactor doesn't support IReactorProcess"
+if not haveProcess:
+ Shell.skip = "this reactor doesn't support IReactorProcess"
diff --git a/buildbot/buildbot/test/test_slaves.py b/buildbot/buildbot/test/test_slaves.py
new file mode 100644
index 0000000..4005fc6
--- /dev/null
+++ b/buildbot/buildbot/test/test_slaves.py
@@ -0,0 +1,991 @@
+# -*- test-case-name: buildbot.test.test_slaves -*-
+
+# Portions copyright Canonical Ltd. 2009
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor
+from twisted.python import log, runtime, failure
+
+from buildbot.buildslave import AbstractLatentBuildSlave
+from buildbot.test.runutils import RunMixin
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest
+from buildbot.status.builder import SUCCESS
+from buildbot.status import mail
+from buildbot.slave import bot
+
+config_1 = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit'),
+ BuildSlave('bot3', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+c['schedulers'] = []
+
+f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+f2 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=2)])
+f3 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=3)])
+f4 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=5)])
+
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+"""
+
+config_2 = config_1 + """
+
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b1', 'factory': f2},
+ ]
+
+"""
+
+config_busyness = config_1 + """
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f3},
+ {'name': 'b2', 'slavenames': ['bot1'],
+ 'builddir': 'b2', 'factory': f4},
+ ]
+"""
+
+class Slave(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_1)
+ self.master.startService()
+ d = self.connectSlave(["b1"])
+ d.addCallback(lambda res: self.connectSlave(["b1"], "bot2"))
+ return d
+
+ def doBuild(self, buildername):
+ br = BuildRequest("forced", SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+ def testSequence(self):
+ # make sure both slaves appear in the list.
+ attached_slaves = [c for c in self.master.botmaster.slaves.values()
+ if c.slave]
+ self.failUnlessEqual(len(attached_slaves), 2)
+ b = self.master.botmaster.builders["b1"]
+ self.failUnlessEqual(len(b.slaves), 2)
+
+ # since the current scheduling algorithm is simple and does not
+ # rotate or attempt any sort of load-balancing, two builds in
+ # sequence should both use the first slave. This may change later if
+ # we move to a more sophisticated scheme.
+ b.CHOOSE_SLAVES_RANDOMLY = False
+
+ d = self.doBuild("b1")
+ d.addCallback(self._testSequence_1)
+ return d
+ def _testSequence_1(self, res):
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+
+ d = self.doBuild("b1")
+ d.addCallback(self._testSequence_2)
+ return d
+ def _testSequence_2(self, res):
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+
+
+ def testSimultaneous(self):
+ # make sure we can actually run two builds at the same time
+ d1 = self.doBuild("b1")
+ d2 = self.doBuild("b1")
+ d1.addCallback(self._testSimultaneous_1, d2)
+ return d1
+ def _testSimultaneous_1(self, res, d2):
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ b1_slavename = res.getSlavename()
+ d2.addCallback(self._testSimultaneous_2, b1_slavename)
+ return d2
+ def _testSimultaneous_2(self, res, b1_slavename):
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ b2_slavename = res.getSlavename()
+ # make sure the two builds were run by different slaves
+ slavenames = [b1_slavename, b2_slavename]
+ slavenames.sort()
+ self.failUnlessEqual(slavenames, ["bot1", "bot2"])
+
+ def testFallback1(self):
+ # detach the first slave, verify that a build is run using the second
+ # slave instead
+ d = self.shutdownSlave("bot1", "b1")
+ d.addCallback(self._testFallback1_1)
+ return d
+ def _testFallback1_1(self, res):
+ attached_slaves = [c for c in self.master.botmaster.slaves.values()
+ if c.slave]
+ self.failUnlessEqual(len(attached_slaves), 1)
+ self.failUnlessEqual(len(self.master.botmaster.builders["b1"].slaves),
+ 1)
+ d = self.doBuild("b1")
+ d.addCallback(self._testFallback1_2)
+ return d
+ def _testFallback1_2(self, res):
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot2")
+
+ def testFallback2(self):
+ # Disable the first slave, so that a slaveping will timeout. Then
+ # start a build, and verify that the non-failing (second) one is
+ # claimed for the build, and that the failing one is removed from the
+ # list.
+
+ b1 = self.master.botmaster.builders["b1"]
+ # reduce the ping time so we'll failover faster
+ b1.START_BUILD_TIMEOUT = 1
+ assert b1.CHOOSE_SLAVES_RANDOMLY
+ b1.CHOOSE_SLAVES_RANDOMLY = False
+ self.disappearSlave("bot1", "b1", allowReconnect=False)
+ d = self.doBuild("b1")
+ d.addCallback(self._testFallback2_1)
+ return d
+ def _testFallback2_1(self, res):
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot2")
+ b1slaves = self.master.botmaster.builders["b1"].slaves
+ self.failUnlessEqual(len(b1slaves), 1, "whoops: %s" % (b1slaves,))
+ self.failUnlessEqual(b1slaves[0].slave.slavename, "bot2")
+
+
+ def notFinished(self, brs):
+ # utility method
+ builds = brs.getBuilds()
+ self.failIf(len(builds) > 1)
+ if builds:
+ self.failIf(builds[0].isFinished())
+
+ def testDontClaimPingingSlave(self):
+ # have two slaves connect for the same builder. Do something to the
+ # first one so that slavepings are delayed (but do not fail
+ # outright).
+ timers = []
+ self.slaves['bot1'].debugOpts["stallPings"] = (10, timers)
+ br = BuildRequest("forced", SourceStamp(), 'test_builder')
+ d1 = br.waitUntilFinished()
+ self.master.botmaster.builders["b1"].CHOOSE_SLAVES_RANDOMLY = False
+ self.control.getBuilder("b1").requestBuild(br)
+ s1 = br.status # this is a BuildRequestStatus
+ # give it a chance to start pinging
+ d2 = defer.Deferred()
+ d2.addCallback(self._testDontClaimPingingSlave_1, d1, s1, timers)
+ reactor.callLater(1, d2.callback, None)
+ return d2
+ def _testDontClaimPingingSlave_1(self, res, d1, s1, timers):
+ # now the first build is running (waiting on the ping), so start the
+ # second build. This should claim the second slave, not the first,
+ # because the first is busy doing the ping.
+ self.notFinished(s1)
+ d3 = self.doBuild("b1")
+ d3.addCallback(self._testDontClaimPingingSlave_2, d1, s1, timers)
+ return d3
+ def _testDontClaimPingingSlave_2(self, res, d1, s1, timers):
+ self.failUnlessEqual(res.getSlavename(), "bot2")
+ self.notFinished(s1)
+ # now let the ping complete
+ self.failUnlessEqual(len(timers), 1)
+ timers[0].reset(0)
+ d1.addCallback(self._testDontClaimPingingSlave_3)
+ return d1
+ def _testDontClaimPingingSlave_3(self, res):
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+
+class FakeLatentBuildSlave(AbstractLatentBuildSlave):
+
+ testcase = None
+ stop_wait = None
+ start_message = None
+ stopped = testing_substantiation_timeout = False
+
+ def start_instance(self):
+ # responsible for starting instance that will try to connect with
+ # this master
+ # simulate having to do some work.
+ d = defer.Deferred()
+ if not self.testing_substantiation_timeout:
+ reactor.callLater(0, self._start_instance, d)
+ return d
+
+ def _start_instance(self, d):
+ self.testcase.connectOneSlave(self.slavename)
+ d.callback(self.start_message)
+
+ def stop_instance(self, fast=False):
+ # responsible for shutting down instance
+ # we're going to emulate dropping off the net.
+
+ # simulate this by replacing the slave Broker's .dataReceived method
+ # with one that just throws away all data.
+ self.fast_stop_request = fast
+ if self.slavename not in self.testcase.slaves:
+ assert self.testing_substantiation_timeout
+ self.stopped = True
+ return defer.succeed(None)
+ d = defer.Deferred()
+ if self.stop_wait is None:
+ self._stop_instance(d)
+ else:
+ reactor.callLater(self.stop_wait, self._stop_instance, d)
+ return d
+
+ def _stop_instance(self, d):
+ try:
+ s = self.testcase.slaves.pop(self.slavename)
+ except KeyError:
+ pass
+ else:
+ def discard(data):
+ pass
+ bot = s.getServiceNamed("bot")
+ for buildername in self.slavebuilders:
+ remote = bot.builders[buildername].remote
+ if remote is None:
+ continue
+ broker = remote.broker
+ broker.dataReceived = discard # seal its ears
+ broker.transport.write = discard # and take away its voice
+ # also discourage it from reconnecting once the connection goes away
+ s.bf.continueTrying = False
+ # stop the service for cleanliness
+ s.stopService()
+ d.callback(None)
+
+latent_config = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+from buildbot.test.test_slaves import FakeLatentBuildSlave
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['slaves'] = [FakeLatentBuildSlave('bot1', 'sekrit',
+ ),
+ FakeLatentBuildSlave('bot2', 'sekrit',
+ ),
+ BuildSlave('bot3', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+c['schedulers'] = []
+
+f1 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+f2 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=2)])
+f3 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=3)])
+f4 = factory.BuildFactory([s(dummy.RemoteDummy, timeout=5)])
+
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+"""
+
+
+class LatentSlave(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ # debugging
+ #import twisted.internet.base
+ #twisted.internet.base.DelayedCall.debug = True
+ # debugging
+ RunMixin.setUp(self)
+ self.master.loadConfig(latent_config)
+ self.master.startService()
+ self.bot1 = self.master.botmaster.slaves['bot1']
+ self.bot2 = self.master.botmaster.slaves['bot2']
+ self.bot3 = self.master.botmaster.slaves['bot3']
+ self.bot1.testcase = self
+ self.bot2.testcase = self
+ self.b1 = self.master.botmaster.builders['b1']
+
+ def doBuild(self, buildername):
+ br = BuildRequest("forced", SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+ def testSequence(self):
+ # make sure both slaves appear in the builder. This is automatically,
+ # without any attaching.
+ self.assertEqual(len(self.b1.slaves), 2)
+ self.assertEqual(sorted(sb.slave.slavename for sb in self.b1.slaves),
+ ['bot1', 'bot2'])
+ # These have not substantiated
+ self.assertEqual([sb.slave.substantiated for sb in self.b1.slaves],
+ [False, False])
+ self.assertEqual([sb.slave.slave for sb in self.b1.slaves],
+ [None, None])
+ # we can mix and match latent slaves and normal slaves. ATM, they
+ # are treated identically in terms of selecting slaves.
+ d = self.connectSlave(builders=['b1'], slavename='bot3')
+ d.addCallback(self._testSequence_1)
+ return d
+ def _testSequence_1(self, res):
+ # now we have all three slaves. Two are latent slaves, and one is a
+ # standard slave.
+ self.assertEqual(sorted(sb.slave.slavename for sb in self.b1.slaves),
+ ['bot1', 'bot2', 'bot3'])
+ # Now it's time to try a build on one of the latent slaves,
+ # substantiating it.
+ # since the current scheduling algorithm is simple and does not
+ # rotate or attempt any sort of load-balancing, two builds in
+ # sequence should both use the first slave. This may change later if
+ # we move to a more sophisticated scheme.
+ self.b1.CHOOSE_SLAVES_RANDOMLY = False
+
+ self.build_deferred = self.doBuild("b1")
+ # now there's an event waiting for the slave to substantiate.
+ e = self.b1.builder_status.getEvent(-1)
+ self.assertEqual(e.text, ['substantiating'])
+ # the substantiation_deferred is an internal stash of a deferred
+ # that we'll grab so we can find the point at which the slave is
+ # substantiated but the build has not yet started.
+ d = self.bot1.substantiation_deferred
+ self.assertNotIdentical(d, None)
+ d.addCallback(self._testSequence_2)
+ return d
+ def _testSequence_2(self, res):
+ # bot 1 is substantiated.
+ self.assertNotIdentical(self.bot1.slave, None)
+ self.failUnless(self.bot1.substantiated)
+ # the event has announced it's success
+ e = self.b1.builder_status.getEvent(-1)
+ self.assertEqual(e.text, ['substantiate', 'success'])
+ self.assertNotIdentical(e.finished, None)
+ # now we'll wait for the build to complete
+ d = self.build_deferred
+ del self.build_deferred
+ d.addCallback(self._testSequence_3)
+ return d
+ def _testSequence_3(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # bot1 is substantiated now. bot2 has not.
+ self.failUnless(self.bot1.substantiated)
+ self.failIf(self.bot2.substantiated)
+ # bot1 is waiting a bit to see if there will be another build before
+ # it shuts down the instance ("insubstantiates")
+ self.build_wait_timer = self.bot1.build_wait_timer
+ self.assertNotIdentical(self.build_wait_timer, None)
+ self.failUnless(self.build_wait_timer.active())
+ self.assertApproximates(
+ self.bot1.build_wait_timeout,
+ self.build_wait_timer.time - runtime.seconds(),
+ 2)
+ # now we'll do another build
+ d = self.doBuild("b1")
+ # the slave is already substantiated, so no event is created
+ e = self.b1.builder_status.getEvent(-1)
+ self.assertNotEqual(e.text, ['substantiating'])
+ # wait for the next build
+ d.addCallback(self._testSequence_4)
+ return d
+ def _testSequence_4(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # bot1 is still waiting, but with a new timer
+ self.assertNotIdentical(self.bot1.build_wait_timer, None)
+ self.assertNotIdentical(self.build_wait_timer,
+ self.bot1.build_wait_timer)
+ self.assertApproximates(
+ self.bot1.build_wait_timeout,
+ self.bot1.build_wait_timer.time - runtime.seconds(),
+ 2)
+ del self.build_wait_timer
+ # We'll set the timer to fire sooner, and wait for it to fire.
+ self.bot1.build_wait_timer.reset(0)
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, None)
+ d.addCallback(self._testSequence_5)
+ return d
+ def _testSequence_5(self, res):
+ # slave is insubstantiated
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ # Now we'll start up another build, to show that the shutdown left
+ # things in such a state that we can restart.
+ d = self.doBuild("b1")
+ # the bot can return an informative message on success that the event
+ # will render. Let's use a mechanism of our test latent bot to
+ # demonstrate that.
+ self.bot1.start_message = ['[instance id]', '[start-up time]']
+ # here's our event again:
+ self.e = self.b1.builder_status.getEvent(-1)
+ self.assertEqual(self.e.text, ['substantiating'])
+ d.addCallback(self._testSequence_6)
+ return d
+ def _testSequence_6(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # the event has announced it's success. (Just imagine that
+ # [instance id] and [start-up time] were actually valuable
+ # information.)
+ e = self.e
+ del self.e
+ self.assertEqual(
+ e.text,
+ ['substantiate', 'success', '[instance id]', '[start-up time]'])
+ # Now we need to clean up the timer. We could just cancel it, but
+ # we'll go through the full dance once more time to show we can.
+ # We'll set the timer to fire sooner, and wait for it to fire.
+ # Also, we'll set the build_slave to take a little bit longer to shut
+ # down, to see that it doesn't affect anything.
+ self.bot1.stop_wait = 2
+ self.bot1.build_wait_timer.reset(0)
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, None)
+ d.addCallback(self._testSequence_7)
+ return d
+ def _testSequence_7(self, res):
+ # slave is insubstantiated
+ self.assertIdentical(self.bot1.slave, None)
+ self.assertNot(self.bot1.substantiated)
+ # the remote is still not cleaned out. We'll wait for it.
+ d = defer.Deferred()
+ reactor.callLater(1, d.callback, None)
+ return d
+
+ def testNeverSubstantiated(self):
+ # When a substantiation is requested, the slave may never appear.
+ # This is a serious problem, and recovering from it is not really
+ # handled well right now (in part because a way to handle it is not
+ # clear). However, at the least, the status event will show a
+ # failure, and the slave will be told to insubstantiate, and to be
+ # removed from the botmaster as anavailable slave.
+ # This tells our test bot to never start, and to not complain about
+ # being told to stop without ever starting
+ self.bot1.testing_substantiation_timeout = True
+ # normally (by default) we have 20 minutes to try and connect to the
+ # remote
+ self.assertEqual(self.bot1.missing_timeout, 20*60)
+ # for testing purposes, we'll put that down to a tenth of a second!
+ self.bot1.missing_timeout = 0.1
+ # since the current scheduling algorithm is simple and does not
+ # rotate or attempt any sort of load-balancing, two builds in
+ # sequence should both use the first slave. This may change later if
+ # we move to a more sophisticated scheme.
+ self.b1.CHOOSE_SLAVES_RANDOMLY = False
+ # start a build
+ self.build_deferred = self.doBuild('b1')
+ # the event tells us we are instantiating, as usual
+ e = self.b1.builder_status.getEvent(-1)
+ self.assertEqual(e.text, ['substantiating'])
+ # we'll see in a moment that the test flag we have to show that the
+ # bot was told to insubstantiate has been fired. Here, we just verify
+ # that it is ready to be fired.
+ self.failIf(self.bot1.stopped)
+ # That substantiation is going to fail. Let's wait for it.
+ d = self.bot1.substantiation_deferred
+ self.assertNotIdentical(d, None)
+ d.addCallbacks(self._testNeverSubstantiated_BadSuccess,
+ self._testNeverSubstantiated_1)
+ return d
+ def _testNeverSubstantiated_BadSuccess(self, res):
+ self.fail('we should not have succeeded here.')
+ def _testNeverSubstantiated_1(self, res):
+ # ok, we failed.
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ self.failUnless(isinstance(res, failure.Failure))
+ self.assertIdentical(self.bot1.substantiation_deferred, None)
+ # our event informs us of this
+ e1 = self.b1.builder_status.getEvent(-3)
+ self.assertEqual(e1.text, ['substantiate', 'failed'])
+ self.assertNotIdentical(e1.finished, None)
+ # the slave is no longer available to build. The events show it...
+ e2 = self.b1.builder_status.getEvent(-2)
+ self.assertEqual(e2.text, ['removing', 'latent', 'bot1'])
+ e3 = self.b1.builder_status.getEvent(-1)
+ self.assertEqual(e3.text, ['disconnect', 'bot1'])
+ # ...and the builder shows it.
+ self.assertEqual(['bot2'],
+ [sb.slave.slavename for sb in self.b1.slaves])
+ # ideally, we would retry the build, but that infrastructure (which
+ # would be used for other situations in the builder as well) does not
+ # yet exist. Therefore the build never completes one way or the
+ # other, just as if a normal slave detached.
+
+ def testServiceStop(self):
+ # if the slave has an instance when it is stopped, the slave should
+ # be told to shut down.
+ self.b1.CHOOSE_SLAVES_RANDOMLY = False
+ d = self.doBuild("b1")
+ d.addCallback(self._testServiceStop_1)
+ return d
+ def _testServiceStop_1(self, res):
+ # build was a success!
+ self.failUnlessEqual(res.getResults(), SUCCESS)
+ self.failUnlessEqual(res.getSlavename(), "bot1")
+ # bot 1 is substantiated.
+ self.assertNotIdentical(self.bot1.slave, None)
+ self.failUnless(self.bot1.substantiated)
+ # now let's stop the bot.
+ d = self.bot1.stopService()
+ d.addCallback(self._testServiceStop_2)
+ return d
+ def _testServiceStop_2(self, res):
+ # bot 1 is NOT substantiated.
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+
+ def testPing(self):
+ # While a latent slave pings normally when it is substantiated, (as
+ # happens behind the scene when a build is request), when
+ # it is insubstantial, the ping is a no-op success.
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ d = self.connectSlave(builders=['b1'], slavename='bot3')
+ d.addCallback(self._testPing_1)
+ return d
+ def _testPing_1(self, res):
+ self.assertEqual(sorted(sb.slave.slavename for sb in self.b1.slaves),
+ ['bot1', 'bot2', 'bot3'])
+ d = self.control.getBuilder('b1').ping()
+ d.addCallback(self._testPing_2)
+ return d
+ def _testPing_2(self, res):
+ # all three pings were successful
+ self.assert_(res)
+ # but neither bot1 not bot2 substantiated.
+ self.assertIdentical(self.bot1.slave, None)
+ self.failIf(self.bot1.substantiated)
+ self.assertIdentical(self.bot2.slave, None)
+ self.failIf(self.bot2.substantiated)
+
+
+class SlaveBusyness(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_busyness)
+ self.master.startService()
+ d = self.connectSlave(["b1", "b2"])
+ return d
+
+ def doBuild(self, buildername):
+ br = BuildRequest("forced", SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+ def getRunningBuilds(self):
+ return len(self.status.getSlave("bot1").getRunningBuilds())
+
+ def testSlaveNotBusy(self):
+ self.failUnlessEqual(self.getRunningBuilds(), 0)
+ # now kick a build, wait for it to finish, then check again
+ d = self.doBuild("b1")
+ d.addCallback(self._testSlaveNotBusy_1)
+ return d
+
+ def _testSlaveNotBusy_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 0)
+
+ def testSlaveBusyOneBuild(self):
+ d1 = self.doBuild("b1")
+ d2 = defer.Deferred()
+ reactor.callLater(.5, d2.callback, None)
+ d2.addCallback(self._testSlaveBusyOneBuild_1)
+ d1.addCallback(self._testSlaveBusyOneBuild_finished_1)
+ return defer.DeferredList([d1,d2])
+
+ def _testSlaveBusyOneBuild_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 1)
+
+ def _testSlaveBusyOneBuild_finished_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 0)
+
+ def testSlaveBusyTwoBuilds(self):
+ d1 = self.doBuild("b1")
+ d2 = self.doBuild("b2")
+ d3 = defer.Deferred()
+ reactor.callLater(.5, d3.callback, None)
+ d3.addCallback(self._testSlaveBusyTwoBuilds_1)
+ d1.addCallback(self._testSlaveBusyTwoBuilds_finished_1, d2)
+ return defer.DeferredList([d1,d3])
+
+ def _testSlaveBusyTwoBuilds_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 2)
+
+ def _testSlaveBusyTwoBuilds_finished_1(self, res, d2):
+ self.failUnlessEqual(self.getRunningBuilds(), 1)
+ d2.addCallback(self._testSlaveBusyTwoBuilds_finished_2)
+ return d2
+
+ def _testSlaveBusyTwoBuilds_finished_2(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 0)
+
+ def testSlaveDisconnect(self):
+ d1 = self.doBuild("b1")
+ d2 = defer.Deferred()
+ reactor.callLater(.5, d2.callback, None)
+ d2.addCallback(self._testSlaveDisconnect_1)
+ d1.addCallback(self._testSlaveDisconnect_finished_1)
+ return defer.DeferredList([d1, d2])
+
+ def _testSlaveDisconnect_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 1)
+ return self.shutdownAllSlaves()
+
+ def _testSlaveDisconnect_finished_1(self, res):
+ self.failUnlessEqual(self.getRunningBuilds(), 0)
+
+config_3 = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['slavePortnum'] = 0
+c['schedulers'] = []
+
+f1 = factory.BuildFactory([s(dummy.Wait, handle='one')])
+f2 = factory.BuildFactory([s(dummy.Wait, handle='two')])
+f3 = factory.BuildFactory([s(dummy.Wait, handle='three')])
+
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+"""
+
+config_4 = config_3 + """
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f2},
+ ]
+"""
+
+config_5 = config_3 + """
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f3},
+ ]
+"""
+
+from buildbot.slave.commands import waitCommandRegistry
+
+class Reconfig(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_3)
+ self.master.startService()
+ d = self.connectSlave(["b1"])
+ return d
+
+ def _one_started(self):
+ log.msg("testReconfig._one_started")
+ self.build1_started = True
+ self.d1.callback(None)
+ return self.d2
+
+ def _two_started(self):
+ log.msg("testReconfig._two_started")
+ self.build2_started = True
+ self.d3.callback(None)
+ return self.d4
+
+ def _three_started(self):
+ log.msg("testReconfig._three_started")
+ self.build3_started = True
+ self.d5.callback(None)
+ return self.d6
+
+ def testReconfig(self):
+ # reconfiguring a Builder should not interrupt any running Builds. No
+ # queued BuildRequests should be lost. The next Build started should
+ # use the new process.
+ slave1 = self.slaves['bot1']
+ bot1 = slave1.getServiceNamed('bot')
+ sb1 = bot1.builders['b1']
+ self.failUnless(isinstance(sb1, bot.SlaveBuilder))
+ self.failUnless(sb1.running)
+ b1 = self.master.botmaster.builders['b1']
+ self.orig_b1 = b1
+
+ self.d1 = d1 = defer.Deferred()
+ self.d2 = d2 = defer.Deferred()
+ self.d3, self.d4 = defer.Deferred(), defer.Deferred()
+ self.d5, self.d6 = defer.Deferred(), defer.Deferred()
+ self.build1_started = False
+ self.build2_started = False
+ self.build3_started = False
+ waitCommandRegistry[("one","build1")] = self._one_started
+ waitCommandRegistry[("two","build2")] = self._two_started
+ waitCommandRegistry[("three","build3")] = self._three_started
+
+ # use different branches to make sure these cannot be merged
+ br1 = BuildRequest("build1", SourceStamp(branch="1"), 'test_builder')
+ b1.submitBuildRequest(br1)
+ br2 = BuildRequest("build2", SourceStamp(branch="2"), 'test_builder')
+ b1.submitBuildRequest(br2)
+ br3 = BuildRequest("build3", SourceStamp(branch="3"), 'test_builder')
+ b1.submitBuildRequest(br3)
+ self.requests = (br1, br2, br3)
+ # all three are now in the queue
+
+ # wait until the first one has started
+ d1.addCallback(self._testReconfig_2)
+ return d1
+
+ def _testReconfig_2(self, res):
+ log.msg("_testReconfig_2")
+ # confirm that it is building
+ brs = self.requests[0].status.getBuilds()
+ self.failUnlessEqual(len(brs), 1)
+ self.build1 = brs[0]
+ self.failUnlessEqual(self.build1.getCurrentStep().getName(), "wait")
+ # br1 is building, br2 and br3 are in the queue (in that order). Now
+ # we reconfigure the Builder.
+ self.failUnless(self.build1_started)
+ d = self.master.loadConfig(config_4)
+ d.addCallback(self._testReconfig_3)
+ return d
+
+ def _testReconfig_3(self, res):
+ log.msg("_testReconfig_3")
+ # now check to see that br1 is still building, and that br2 and br3
+ # are in the queue of the new builder
+ b1 = self.master.botmaster.builders['b1']
+ self.failIfIdentical(b1, self.orig_b1)
+ self.failIf(self.build1.isFinished())
+ self.failUnlessEqual(self.build1.getCurrentStep().getName(), "wait")
+ self.failUnlessEqual(len(b1.buildable), 2)
+ self.failUnless(self.requests[1] in b1.buildable)
+ self.failUnless(self.requests[2] in b1.buildable)
+
+ # allow br1 to finish, and make sure its status is delivered normally
+ d = self.requests[0].waitUntilFinished()
+ d.addCallback(self._testReconfig_4)
+ self.d2.callback(None)
+ return d
+
+ def _testReconfig_4(self, bs):
+ log.msg("_testReconfig_4")
+ self.failUnlessEqual(bs.getReason(), "build1")
+ self.failUnless(bs.isFinished())
+ self.failUnlessEqual(bs.getResults(), SUCCESS)
+
+ # at this point, the first build has finished, and there is a pending
+ # call to start the second build. Once that pending call fires, there
+ # is a network roundtrip before the 'wait' RemoteCommand is delivered
+ # to the slave. We need to wait for both events to happen before we
+ # can check to make sure it is using the correct process. Just wait a
+ # full second.
+ d = defer.Deferred()
+ d.addCallback(self._testReconfig_5)
+ reactor.callLater(1, d.callback, None)
+ return d
+
+ def _testReconfig_5(self, res):
+ log.msg("_testReconfig_5")
+ # at this point the next build ought to be running
+ b1 = self.master.botmaster.builders['b1']
+ self.failUnlessEqual(len(b1.buildable), 1)
+ self.failUnless(self.requests[2] in b1.buildable)
+ self.failUnlessEqual(len(b1.building), 1)
+ # and it ought to be using the new process
+ self.failUnless(self.build2_started)
+
+ # now, while the second build is running, change the config multiple
+ # times.
+
+ d = self.master.loadConfig(config_3)
+ d.addCallback(lambda res: self.master.loadConfig(config_4))
+ d.addCallback(lambda res: self.master.loadConfig(config_5))
+ def _done(res):
+ # then once that's done, allow the second build to finish and
+ # wait for it to complete
+ da = self.requests[1].waitUntilFinished()
+ self.d4.callback(None)
+ return da
+ d.addCallback(_done)
+ def _done2(res):
+ # and once *that*'s done, wait another second to let the third
+ # build start
+ db = defer.Deferred()
+ reactor.callLater(1, db.callback, None)
+ return db
+ d.addCallback(_done2)
+ d.addCallback(self._testReconfig_6)
+ return d
+
+ def _testReconfig_6(self, res):
+ log.msg("_testReconfig_6")
+ # now check to see that the third build is running
+ self.failUnless(self.build3_started)
+
+ # we're done
+
+
+
+class Slave2(RunMixin, unittest.TestCase):
+
+ revision = 0
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(config_1)
+ self.master.startService()
+
+ def doBuild(self, buildername, reason="forced"):
+ # we need to prevent these builds from being merged, so we create
+ # each of them with a different revision specifier. The revision is
+ # ignored because our build process does not have a source checkout
+ # step.
+ self.revision += 1
+ br = BuildRequest(reason, SourceStamp(revision=self.revision),
+ 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+ def testFirstComeFirstServed(self):
+ # submit three builds, then connect a slave which fails the
+ # slaveping. The first build will claim the slave, do the slaveping,
+ # give up, and re-queue the build. Verify that the build gets
+ # re-queued in front of all other builds. This may be tricky, because
+ # the other builds may attempt to claim the just-failed slave.
+
+ d1 = self.doBuild("b1", "first")
+ d2 = self.doBuild("b1", "second")
+ #buildable = self.master.botmaster.builders["b1"].buildable
+ #print [b.reason for b in buildable]
+
+ # specifically, I want the poor build to get precedence over any
+ # others that were waiting. To test this, we need more builds than
+ # slaves.
+
+ # now connect a broken slave. The first build started as soon as it
+ # connects, so by the time we get to our _1 method, the ill-fated
+ # build has already started.
+ d = self.connectSlave(["b1"], opts={"failPingOnce": True})
+ d.addCallback(self._testFirstComeFirstServed_1, d1, d2)
+ return d
+ def _testFirstComeFirstServed_1(self, res, d1, d2):
+ # the master has send the slaveping. When this is received, it will
+ # fail, causing the master to hang up on the slave. When it
+ # reconnects, it should find the first build at the front of the
+ # queue. If we simply wait for both builds to complete, then look at
+ # the status logs, we should see that the builds ran in the correct
+ # order.
+
+ d = defer.DeferredList([d1,d2])
+ d.addCallback(self._testFirstComeFirstServed_2)
+ return d
+ def _testFirstComeFirstServed_2(self, res):
+ b = self.status.getBuilder("b1")
+ builds = b.getBuild(0), b.getBuild(1)
+ reasons = [build.getReason() for build in builds]
+ self.failUnlessEqual(reasons, ["first", "second"])
+
+config_multi_builders = config_1 + """
+c['builders'] = [
+ {'name': 'dummy', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b1', 'factory': f2},
+ {'name': 'dummy2', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b2', 'factory': f2},
+ {'name': 'dummy3', 'slavenames': ['bot1','bot2','bot3'],
+ 'builddir': 'b3', 'factory': f2},
+ ]
+
+"""
+
+config_mail_missing = config_1 + """
+c['slaves'] = [BuildSlave('bot1', 'sekrit', notify_on_missing='admin',
+ missing_timeout=1)]
+c['builders'] = [
+ {'name': 'dummy', 'slavenames': ['bot1'],
+ 'builddir': 'b1', 'factory': f1},
+ ]
+c['projectName'] = 'myproject'
+c['projectURL'] = 'myURL'
+"""
+
+class FakeMailer(mail.MailNotifier):
+ def sendMessage(self, m, recipients):
+ self.messages.append((m,recipients))
+ return defer.succeed(None)
+
+class BuildSlave(RunMixin, unittest.TestCase):
+ def test_track_builders(self):
+ self.master.loadConfig(config_multi_builders)
+ self.master.readConfig = True
+ self.master.startService()
+ d = self.connectSlave()
+
+ def _check(res):
+ b = self.master.botmaster.builders['dummy']
+ self.failUnless(len(b.slaves) == 1) # just bot1
+
+ bs = b.slaves[0].slave
+ self.failUnless(len(bs.slavebuilders) == 3)
+ self.failUnless(b in [sb.builder for sb in
+ bs.slavebuilders.values()])
+
+ d.addCallback(_check)
+ return d
+
+ def test_mail_on_missing(self):
+ self.master.loadConfig(config_mail_missing)
+ self.master.readConfig = True
+ self.master.startService()
+ fm = FakeMailer("buildbot@example.org")
+ fm.messages = []
+ fm.setServiceParent(self.master)
+ self.master.statusTargets.append(fm)
+
+ d = self.connectSlave()
+ d.addCallback(self.stall, 1)
+ d.addCallback(lambda res: self.shutdownSlave("bot1", "dummy"))
+ def _not_yet(res):
+ self.failIf(fm.messages)
+ d.addCallback(_not_yet)
+ # we reconnect right away, so the timer shouldn't fire
+ d.addCallback(lambda res: self.connectSlave())
+ d.addCallback(self.stall, 3)
+ d.addCallback(_not_yet)
+ d.addCallback(lambda res: self.shutdownSlave("bot1", "dummy"))
+ d.addCallback(_not_yet)
+ # now we let it sit disconnected for long enough for the timer to
+ # fire
+ d.addCallback(self.stall, 3)
+ def _check(res):
+ self.failUnlessEqual(len(fm.messages), 1)
+ msg,recips = fm.messages[0]
+ self.failUnlessEqual(recips, ["admin"])
+ body = msg.as_string()
+ self.failUnlessIn("To: admin", body)
+ self.failUnlessIn("Subject: Buildbot: buildslave bot1 was lost",
+ body)
+ self.failUnlessIn("From: buildbot@example.org", body)
+ self.failUnlessIn("working for 'myproject'", body)
+ self.failUnlessIn("has noticed that the buildslave named bot1 went away",
+ body)
+ self.failUnlessIn("was 'one'", body)
+ self.failUnlessIn("myURL", body)
+ d.addCallback(_check)
+ return d
+
+ def stall(self, result, delay=1):
+ d = defer.Deferred()
+ reactor.callLater(delay, d.callback, result)
+ return d
diff --git a/buildbot/buildbot/test/test_status.py b/buildbot/buildbot/test/test_status.py
new file mode 100644
index 0000000..b3c162a
--- /dev/null
+++ b/buildbot/buildbot/test/test_status.py
@@ -0,0 +1,1631 @@
+# -*- test-case-name: buildbot.test.test_status -*-
+
+import email, os
+import operator
+
+from zope.interface import implements
+from twisted.internet import defer, reactor
+from twisted.trial import unittest
+
+from buildbot import interfaces
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process.base import BuildRequest, Build
+from buildbot.status import builder, base, words, progress
+from buildbot.changes.changes import Change
+from buildbot.process.builder import Builder
+from time import sleep
+
+mail = None
+try:
+ from buildbot.status import mail
+except ImportError:
+ pass
+from buildbot.status import progress, client # NEEDS COVERAGE
+from buildbot.test.runutils import RunMixin, setupBuildStepStatus
+
+class MyStep:
+ build = None
+ def getName(self):
+ return "step"
+
+class MyLogFileProducer(builder.LogFileProducer):
+ # The reactor.callLater(0) in LogFileProducer.resumeProducing is a bit of
+ # a nuisance from a testing point of view. This subclass adds a Deferred
+ # to that call so we can find out when it is complete.
+ def resumeProducing(self):
+ d = defer.Deferred()
+ reactor.callLater(0, self._resumeProducing, d)
+ return d
+ def _resumeProducing(self, d):
+ builder.LogFileProducer._resumeProducing(self)
+ reactor.callLater(0, d.callback, None)
+
+class MyLog(builder.LogFile):
+ def __init__(self, basedir, name, text=None, step=None):
+ self.fakeBuilderBasedir = basedir
+ if not step:
+ step = MyStep()
+ builder.LogFile.__init__(self, step, name, name)
+ if text:
+ self.addStdout(text)
+ self.finish()
+ def getFilename(self):
+ return os.path.join(self.fakeBuilderBasedir, self.name)
+
+ def subscribeConsumer(self, consumer):
+ p = MyLogFileProducer(self, consumer)
+ d = p.resumeProducing()
+ return d
+
+class MyHTMLLog(builder.HTMLLogFile):
+ def __init__(self, basedir, name, html):
+ step = MyStep()
+ builder.HTMLLogFile.__init__(self, step, name, name, html)
+
+class MyLogSubscriber:
+ def __init__(self):
+ self.chunks = []
+ def logChunk(self, build, step, log, channel, text):
+ self.chunks.append((channel, text))
+
+class MyLogConsumer:
+ def __init__(self, limit=None):
+ self.chunks = []
+ self.finished = False
+ self.limit = limit
+ def registerProducer(self, producer, streaming):
+ self.producer = producer
+ self.streaming = streaming
+ def unregisterProducer(self):
+ self.producer = None
+ def writeChunk(self, chunk):
+ self.chunks.append(chunk)
+ if self.limit:
+ self.limit -= 1
+ if self.limit == 0:
+ self.producer.pauseProducing()
+ def finish(self):
+ self.finished = True
+
+if mail:
+ class MyMailer(mail.MailNotifier):
+ def sendMessage(self, m, recipients):
+ self.parent.messages.append((m, recipients))
+
+class MyStatus:
+ def getBuildbotURL(self):
+ return self.url
+ def getURLForThing(self, thing):
+ return None
+ def getProjectName(self):
+ return "myproj"
+
+class MyBuilder(builder.BuilderStatus):
+ nextBuildNumber = 0
+
+class MyBuild(builder.BuildStatus):
+ testlogs = []
+ def __init__(self, parent, number, results):
+ builder.BuildStatus.__init__(self, parent, number)
+ self.results = results
+ self.source = SourceStamp(revision="1.14")
+ self.reason = "build triggered by changes"
+ self.finished = True
+ def getLogs(self):
+ return self.testlogs
+
+class MyLookup:
+ implements(interfaces.IEmailLookup)
+
+ def getAddress(self, user):
+ d = defer.Deferred()
+ # With me now is Mr Thomas Walters of West Hartlepool who is totally
+ # invisible.
+ if user == "Thomas_Walters":
+ d.callback(None)
+ else:
+ d.callback(user + "@" + "dev.com")
+ return d
+
+def customTextMailMessage(attrs):
+ logLines = 3
+ text = list()
+ text.append("STATUS: %s" % attrs['result'].title())
+ text.append("")
+ text.extend([c.asText() for c in attrs['changes']])
+ text.append("")
+ name, url, lines = attrs['logs'][-1]
+ text.append("Last %d lines of '%s':" % (logLines, name))
+ text.extend(["\t%s\n" % line for line in lines[len(lines)-logLines:]])
+ text.append("")
+ text.append("-buildbot")
+ return ("\n".join(text), 'plain')
+
+def customHTMLMailMessage(attrs):
+ logLines = 3
+ text = list()
+ text.append("<h3>STATUS <a href='%s'>%s</a>:</h3>" % (attrs['buildURL'],
+ attrs['result'].title()))
+ text.append("<h4>Recent Changes:</h4>")
+ text.extend([c.asHTML() for c in attrs['changes']])
+ name, url, lines = attrs['logs'][-1]
+ text.append("<h4>Last %d lines of '%s':</h4>" % (logLines, name))
+ text.append("<p>")
+ text.append("<br>".join([line for line in lines[len(lines)-logLines:]]))
+ text.append("</p>")
+ text.append("<br>")
+ text.append("<b>-<a href='%s'>buildbot</a></b>" % attrs['buildbotURL'])
+ return ("\n".join(text), 'html')
+
+class Mail(unittest.TestCase):
+
+ def setUp(self):
+ self.builder = MyBuilder("builder1")
+
+ def stall(self, res, timeout):
+ d = defer.Deferred()
+ reactor.callLater(timeout, d.callback, res)
+ return d
+
+ def makeBuild(self, number, results):
+ return MyBuild(self.builder, number, results)
+
+ def failUnlessIn(self, substring, string):
+ self.failUnless(string.find(substring) != -1,
+ "didn't see '%s' in '%s'" % (substring, string))
+
+ def getProjectName(self):
+ return "PROJECT"
+
+ def getBuildbotURL(self):
+ return "BUILDBOT_URL"
+
+ def getURLForThing(self, thing):
+ return None
+
+ def testBuild1(self):
+ mailer = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup=mail.Domain("dev.com"))
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(3, builder.SUCCESS)
+ b1.blamelist = ["bob"]
+
+ mailer.buildFinished("builder1", b1, b1.results)
+ self.failUnless(len(self.messages) == 1)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ self.failUnlessIn("To: bob@dev.com\n", t)
+ self.failUnlessIn("CC: recip2@example.com, recip@example.com\n", t)
+ self.failUnlessIn("From: buildbot@example.com\n", t)
+ self.failUnlessIn("Subject: buildbot success in PROJECT on builder1\n", t)
+ self.failUnlessIn("Date: ", t)
+ self.failUnlessIn("Build succeeded!\n", t)
+ self.failUnlessIn("Buildbot URL: BUILDBOT_URL\n", t)
+
+ def testBuild2(self):
+ mailer = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup="dev.com",
+ sendToInterestedUsers=False)
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(3, builder.SUCCESS)
+ b1.blamelist = ["bob"]
+
+ mailer.buildFinished("builder1", b1, b1.results)
+ self.failUnless(len(self.messages) == 1)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ self.failUnlessIn("To: recip2@example.com, "
+ "recip@example.com\n", t)
+ self.failUnlessIn("From: buildbot@example.com\n", t)
+ self.failUnlessIn("Subject: buildbot success in PROJECT on builder1\n", t)
+ self.failUnlessIn("Build succeeded!\n", t)
+ self.failUnlessIn("Buildbot URL: BUILDBOT_URL\n", t)
+
+ def testBuildStatusCategory(self):
+ # a status client only interested in a category should only receive
+ # from that category
+ mailer = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup="dev.com",
+ sendToInterestedUsers=False,
+ categories=["debug"])
+
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(3, builder.SUCCESS)
+ b1.blamelist = ["bob"]
+
+ mailer.buildFinished("builder1", b1, b1.results)
+ self.failIf(self.messages)
+
+ def testBuilderCategory(self):
+ # a builder in a certain category should notify status clients that
+ # did not list categories, or categories including this one
+ mailer1 = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup="dev.com",
+ sendToInterestedUsers=False)
+ mailer2 = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup="dev.com",
+ sendToInterestedUsers=False,
+ categories=["active"])
+ mailer3 = MyMailer(fromaddr="buildbot@example.com",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup="dev.com",
+ sendToInterestedUsers=False,
+ categories=["active", "debug"])
+
+ builderd = MyBuilder("builder2", "debug")
+
+ mailer1.parent = self
+ mailer1.status = self
+ mailer2.parent = self
+ mailer2.status = self
+ mailer3.parent = self
+ mailer3.status = self
+ self.messages = []
+
+ t = mailer1.builderAdded("builder2", builderd)
+ self.assertEqual(len(mailer1.watched), 1)
+ self.assertEqual(t, mailer1)
+ t = mailer2.builderAdded("builder2", builderd)
+ self.assertEqual(len(mailer2.watched), 0)
+ self.assertEqual(t, None)
+ t = mailer3.builderAdded("builder2", builderd)
+ self.assertEqual(len(mailer3.watched), 1)
+ self.assertEqual(t, mailer3)
+
+ b2 = MyBuild(builderd, 3, builder.SUCCESS)
+ b2.blamelist = ["bob"]
+
+ mailer1.buildFinished("builder2", b2, b2.results)
+ self.failUnlessEqual(len(self.messages), 1)
+ self.messages = []
+ mailer2.buildFinished("builder2", b2, b2.results)
+ self.failUnlessEqual(len(self.messages), 0)
+ self.messages = []
+ mailer3.buildFinished("builder2", b2, b2.results)
+ self.failUnlessEqual(len(self.messages), 1)
+
+ def testCustomTextMessage(self):
+ basedir = "test_custom_text_mesg"
+ os.mkdir(basedir)
+ mailer = MyMailer(fromaddr="buildbot@example.com", mode="problem",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup=MyLookup(),
+ customMesg=customTextMailMessage)
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(4, builder.FAILURE)
+ b1.setText(["snarkleack", "polarization", "failed"])
+ b1.blamelist = ["dev3", "dev3", "dev3", "dev4",
+ "Thomas_Walters"]
+ b1.source.changes = (Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 123),
+ Change(who = 'author2', files = ['file2'], comments = 'comment2', revision = 456))
+ b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
+ MyLog(basedir, 'test', "Test log here\nTest 1 failed\nTest 2 failed\nTest 3 failed\nTest 4 failed\n")]
+
+ mailer.buildFinished("builder1", b1, b1.results)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ #
+ # Uncomment to review custom message
+ #
+ #self.fail(t)
+ self.failUnlessIn("comment1", t)
+ self.failUnlessIn("comment2", t)
+ self.failUnlessIn("Test 4 failed", t)
+
+
+ def testCustomHTMLMessage(self):
+ basedir = "test_custom_HTML_mesg"
+ os.mkdir(basedir)
+ mailer = MyMailer(fromaddr="buildbot@example.com", mode="problem",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup=MyLookup(),
+ customMesg=customHTMLMailMessage)
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(4, builder.FAILURE)
+ b1.setText(["snarkleack", "polarization", "failed"])
+ b1.blamelist = ["dev3", "dev3", "dev3", "dev4",
+ "Thomas_Walters"]
+ b1.source.changes = (Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 123),
+ Change(who = 'author2', files = ['file2'], comments = 'comment2', revision = 456))
+ b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
+ MyLog(basedir, 'test', "Test log here\nTest 1 failed\nTest 2 failed\nTest 3 failed\nTest 4 failed\n")]
+
+ mailer.buildFinished("builder1", b1, b1.results)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ #
+ # Uncomment to review custom message
+ #
+ #self.fail(t)
+ self.failUnlessIn("<h4>Last 3 lines of 'step.test':</h4>", t)
+ self.failUnlessIn("<p>Changed by: <b>author2</b><br />", t)
+ self.failUnlessIn("Test 3 failed", t)
+
+ def testShouldAttachLog(self):
+ mailer = mail.MailNotifier(fromaddr="buildbot@example.com", addLogs=True)
+ self.assertTrue(mailer._shouldAttachLog('anything'))
+ mailer = mail.MailNotifier(fromaddr="buildbot@example.com", addLogs=False)
+ self.assertFalse(mailer._shouldAttachLog('anything'))
+ mailer = mail.MailNotifier(fromaddr="buildbot@example.com", addLogs=['something'])
+ self.assertFalse(mailer._shouldAttachLog('anything'))
+ self.assertTrue(mailer._shouldAttachLog('something'))
+
+ def testFailure(self):
+ mailer = MyMailer(fromaddr="buildbot@example.com", mode="problem",
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"],
+ lookup=MyLookup())
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(3, builder.SUCCESS)
+ b1.blamelist = ["dev1", "dev2"]
+ b2 = self.makeBuild(4, builder.FAILURE)
+ b2.setText(["snarkleack", "polarization", "failed"])
+ b2.blamelist = ["dev3", "dev3", "dev3", "dev4",
+ "Thomas_Walters"]
+ mailer.buildFinished("builder1", b1, b1.results)
+ self.failIf(self.messages)
+ mailer.buildFinished("builder1", b2, b2.results)
+ self.failUnless(len(self.messages) == 1)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ self.failUnlessIn("To: dev3@dev.com, dev4@dev.com\n", t)
+ self.failUnlessIn("CC: recip2@example.com, recip@example.com\n", t)
+ self.failUnlessIn("From: buildbot@example.com\n", t)
+ self.failUnlessIn("Subject: buildbot failure in PROJECT on builder1\n", t)
+ self.failUnlessIn("The Buildbot has detected a new failure", t)
+ self.failUnlessIn("BUILD FAILED: snarkleack polarization failed\n", t)
+ self.failUnlessEqual(set(r), set(["dev3@dev.com", "dev4@dev.com",
+ "recip2@example.com", "recip@example.com"]))
+
+ def testLogs(self):
+ basedir = "test_status_logs"
+ os.mkdir(basedir)
+ mailer = MyMailer(fromaddr="buildbot@example.com", addLogs=True,
+ extraRecipients=["recip@example.com",
+ "recip2@example.com"])
+ mailer.parent = self
+ mailer.status = self
+ self.messages = []
+
+ b1 = self.makeBuild(3, builder.WARNINGS)
+ b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
+ MyLog(basedir,
+ 'test', "Test log here\nTest 4 failed\n"),
+ ]
+ b1.text = ["unusual", "gnarzzler", "output"]
+ mailer.buildFinished("builder1", b1, b1.results)
+ self.failUnless(len(self.messages) == 1)
+ m,r = self.messages.pop()
+ t = m.as_string()
+ self.failUnlessIn("Subject: buildbot warnings in PROJECT on builder1\n", t)
+ m2 = email.message_from_string(t)
+ p = m2.get_payload()
+ self.failUnlessEqual(len(p), 3)
+
+ self.failUnlessIn("Build Had Warnings: unusual gnarzzler output\n",
+ p[0].get_payload())
+
+ self.failUnlessEqual(p[1].get_filename(), "step.compile")
+ self.failUnlessEqual(p[1].get_payload(), "Compile log here\n")
+
+ self.failUnlessEqual(p[2].get_filename(), "step.test")
+ self.failUnlessIn("Test log here\n", p[2].get_payload())
+
+ def testMail(self):
+ basedir = "test_status_mail"
+ os.mkdir(basedir)
+ dest = os.environ.get("BUILDBOT_TEST_MAIL")
+ if not dest:
+ raise unittest.SkipTest("define BUILDBOT_TEST_MAIL=dest to run this")
+ mailer = mail.MailNotifier(fromaddr="buildbot@example.com",
+ addLogs=True,
+ extraRecipients=[dest])
+ s = MyStatus()
+ s.url = "project URL"
+ mailer.status = s
+
+ b1 = self.makeBuild(3, builder.SUCCESS)
+ b1.testlogs = [MyLog(basedir, 'compile', "Compile log here\n"),
+ MyLog(basedir,
+ 'test', "Test log here\nTest 4 failed\n"),
+ ]
+
+ d = mailer.buildFinished("builder1", b1, b1.results)
+ # When this fires, the mail has been sent, but the SMTP connection is
+ # still up (because smtp.sendmail relies upon the server to hang up).
+ # Spin for a moment to avoid the "unclean reactor" warning that Trial
+ # gives us if we finish before the socket is disconnected. Really,
+ # sendmail() ought to hang up the connection once it is finished:
+ # otherwise a malicious SMTP server could make us consume lots of
+ # memory.
+ d.addCallback(self.stall, 0.1)
+ return d
+
+if not mail:
+ Mail.skip = "the Twisted Mail package is not installed"
+
+class Progress(unittest.TestCase):
+ def testWavg(self):
+ bp = progress.BuildProgress([])
+ e = progress.Expectations(bp)
+ # wavg(old, current)
+ self.failUnlessEqual(e.wavg(None, None), None)
+ self.failUnlessEqual(e.wavg(None, 3), 3)
+ self.failUnlessEqual(e.wavg(3, None), 3)
+ self.failUnlessEqual(e.wavg(3, 4), 3.5)
+ e.decay = 0.1
+ self.failUnlessEqual(e.wavg(3, 4), 3.1)
+
+
+class Results(unittest.TestCase):
+
+ def testAddResults(self):
+ b = builder.BuildStatus(builder.BuilderStatus("test"), 12)
+ testname = ("buildbot", "test", "test_status", "Results",
+ "testAddResults")
+ r1 = builder.TestResult(name=testname,
+ results=builder.SUCCESS,
+ text=["passed"],
+ logs={'output': ""},
+ )
+ b.addTestResult(r1)
+
+ res = b.getTestResults()
+ self.failUnlessEqual(res.keys(), [testname])
+ t = res[testname]
+ self.failUnless(interfaces.ITestResult.providedBy(t))
+ self.failUnlessEqual(t.getName(), testname)
+ self.failUnlessEqual(t.getResults(), builder.SUCCESS)
+ self.failUnlessEqual(t.getText(), ["passed"])
+ self.failUnlessEqual(t.getLogs(), {'output': ""})
+
+class Log(unittest.TestCase):
+ def setUpClass(self):
+ self.basedir = "status_log_add"
+ os.mkdir(self.basedir)
+
+ def testAdd(self):
+ l = MyLog(self.basedir, "compile", step=13)
+ self.failUnlessEqual(l.getName(), "compile")
+ self.failUnlessEqual(l.getStep(), 13)
+ l.addHeader("HEADER\n")
+ l.addStdout("Some text\n")
+ l.addStderr("Some error\n")
+ l.addStdout("Some more text\n")
+ self.failIf(l.isFinished())
+ l.finish()
+ self.failUnless(l.isFinished())
+ self.failUnlessEqual(l.getText(),
+ "Some text\nSome error\nSome more text\n")
+ self.failUnlessEqual(l.getTextWithHeaders(),
+ "HEADER\n" +
+ "Some text\nSome error\nSome more text\n")
+ self.failUnlessEqual(len(list(l.getChunks())), 4)
+
+ self.failUnless(l.hasContents())
+ try:
+ os.unlink(l.getFilename())
+ except OSError:
+ os.unlink(l.getFilename() + ".bz2")
+ self.failIf(l.hasContents())
+
+ def TODO_testDuplicate(self):
+ # create multiple logs for the same step with the same logname, make
+ # sure their on-disk filenames are suitably uniquified. This
+ # functionality actually lives in BuildStepStatus and BuildStatus, so
+ # this test must involve more than just the MyLog class.
+
+ # naieve approach, doesn't work
+ l1 = MyLog(self.basedir, "duplicate")
+ l1.addStdout("Some text\n")
+ l1.finish()
+ l2 = MyLog(self.basedir, "duplicate")
+ l2.addStdout("Some more text\n")
+ l2.finish()
+ self.failIfEqual(l1.getFilename(), l2.getFilename())
+
+ def testMerge1(self):
+ l = MyLog(self.basedir, "merge1")
+ l.addHeader("HEADER\n")
+ l.addStdout("Some text\n")
+ l.addStdout("Some more text\n")
+ l.addStdout("more\n")
+ l.finish()
+ self.failUnlessEqual(l.getText(),
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(l.getTextWithHeaders(),
+ "HEADER\n" +
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(len(list(l.getChunks())), 2)
+
+ def testMerge2(self):
+ l = MyLog(self.basedir, "merge2")
+ l.addHeader("HEADER\n")
+ for i in xrange(1000):
+ l.addStdout("aaaa")
+ for i in xrange(30):
+ l.addStderr("bbbb")
+ for i in xrange(10):
+ l.addStdout("cc")
+ target = 1000*"aaaa" + 30 * "bbbb" + 10 * "cc"
+ self.failUnlessEqual(len(l.getText()), len(target))
+ self.failUnlessEqual(l.getText(), target)
+ l.finish()
+ self.failUnlessEqual(len(l.getText()), len(target))
+ self.failUnlessEqual(l.getText(), target)
+ self.failUnlessEqual(len(list(l.getChunks())), 4)
+
+ def testMerge3(self):
+ l = MyLog(self.basedir, "merge3")
+ l.chunkSize = 100
+ l.addHeader("HEADER\n")
+ for i in xrange(8):
+ l.addStdout(10*"a")
+ for i in xrange(8):
+ l.addStdout(10*"a")
+ self.failUnlessEqual(list(l.getChunks()),
+ [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, 100*"a"),
+ (builder.STDOUT, 60*"a")])
+ l.finish()
+ self.failUnlessEqual(l.getText(), 160*"a")
+
+ def testReadlines(self):
+ l = MyLog(self.basedir, "chunks1")
+ l.addHeader("HEADER\n") # should be ignored
+ l.addStdout("Some text\n")
+ l.addStdout("Some More Text\nAnd Some More\n")
+ l.addStderr("Some Stderr\n")
+ l.addStdout("Last line\n")
+ l.finish()
+ alllines = list(l.readlines())
+ self.failUnlessEqual(len(alllines), 4)
+ self.failUnlessEqual(alllines[0], "Some text\n")
+ self.failUnlessEqual(alllines[2], "And Some More\n")
+ self.failUnlessEqual(alllines[3], "Last line\n")
+ stderr = list(l.readlines(interfaces.LOG_CHANNEL_STDERR))
+ self.failUnlessEqual(len(stderr), 1)
+ self.failUnlessEqual(stderr[0], "Some Stderr\n")
+ lines = l.readlines()
+ if False: # TODO: l.readlines() is not yet an iterator
+ # verify that it really is an iterator
+ line0 = lines.next()
+ self.failUnlessEqual(line0, "Some text\n")
+ line1 = lines.next()
+ line2 = lines.next()
+ self.failUnlessEqual(line2, "And Some More\n")
+
+
+ def testChunks(self):
+ l = MyLog(self.basedir, "chunks2")
+ c1 = l.getChunks()
+ l.addHeader("HEADER\n")
+ l.addStdout("Some text\n")
+ self.failUnlessEqual("".join(l.getChunks(onlyText=True)),
+ "HEADER\nSome text\n")
+ c2 = l.getChunks()
+
+ l.addStdout("Some more text\n")
+ self.failUnlessEqual("".join(l.getChunks(onlyText=True)),
+ "HEADER\nSome text\nSome more text\n")
+ c3 = l.getChunks()
+
+ l.addStdout("more\n")
+ l.finish()
+
+ self.failUnlessEqual(list(c1), [])
+ self.failUnlessEqual(list(c2), [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, "Some text\n")])
+ self.failUnlessEqual(list(c3), [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT,
+ "Some text\nSome more text\n")])
+
+ self.failUnlessEqual(l.getText(),
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(l.getTextWithHeaders(),
+ "HEADER\n" +
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(len(list(l.getChunks())), 2)
+
+ def testUpgrade(self):
+ l = MyLog(self.basedir, "upgrade")
+ l.addHeader("HEADER\n")
+ l.addStdout("Some text\n")
+ l.addStdout("Some more text\n")
+ l.addStdout("more\n")
+ l.finish()
+ self.failUnless(l.hasContents())
+ # now doctor it to look like a 0.6.4-era non-upgraded logfile
+ l.entries = list(l.getChunks())
+ del l.filename
+ try:
+ os.unlink(l.getFilename() + ".bz2")
+ except OSError:
+ os.unlink(l.getFilename())
+ # now make sure we can upgrade it
+ l.upgrade("upgrade")
+ self.failUnlessEqual(l.getText(),
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(len(list(l.getChunks())), 2)
+ self.failIf(l.entries)
+
+ # now, do it again, but make it look like an upgraded 0.6.4 logfile
+ # (i.e. l.filename is missing, but the contents are there on disk)
+ l.entries = list(l.getChunks())
+ del l.filename
+ l.upgrade("upgrade")
+ self.failUnlessEqual(l.getText(),
+ "Some text\nSome more text\nmore\n")
+ self.failUnlessEqual(len(list(l.getChunks())), 2)
+ self.failIf(l.entries)
+ self.failUnless(l.hasContents())
+
+ def testHTMLUpgrade(self):
+ l = MyHTMLLog(self.basedir, "upgrade", "log contents")
+ l.upgrade("filename")
+
+ def testSubscribe(self):
+ l1 = MyLog(self.basedir, "subscribe1")
+ l1.finish()
+ self.failUnless(l1.isFinished())
+
+ s = MyLogSubscriber()
+ l1.subscribe(s, True)
+ l1.unsubscribe(s)
+ self.failIf(s.chunks)
+
+ s = MyLogSubscriber()
+ l1.subscribe(s, False)
+ l1.unsubscribe(s)
+ self.failIf(s.chunks)
+
+ finished = []
+ l2 = MyLog(self.basedir, "subscribe2")
+ l2.waitUntilFinished().addCallback(finished.append)
+ l2.addHeader("HEADER\n")
+ s1 = MyLogSubscriber()
+ l2.subscribe(s1, True)
+ s2 = MyLogSubscriber()
+ l2.subscribe(s2, False)
+ self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n")])
+ self.failUnlessEqual(s2.chunks, [])
+
+ l2.addStdout("Some text\n")
+ self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, "Some text\n")])
+ self.failUnlessEqual(s2.chunks, [(builder.STDOUT, "Some text\n")])
+ l2.unsubscribe(s1)
+
+ l2.addStdout("Some more text\n")
+ self.failUnlessEqual(s1.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, "Some text\n")])
+ self.failUnlessEqual(s2.chunks, [(builder.STDOUT, "Some text\n"),
+ (builder.STDOUT, "Some more text\n"),
+ ])
+ self.failIf(finished)
+ l2.finish()
+ self.failUnlessEqual(finished, [l2])
+
+ def testConsumer(self):
+ l1 = MyLog(self.basedir, "consumer1")
+ l1.finish()
+ self.failUnless(l1.isFinished())
+
+ s = MyLogConsumer()
+ d = l1.subscribeConsumer(s)
+ d.addCallback(self._testConsumer_1, s)
+ return d
+ testConsumer.timeout = 5
+ def _testConsumer_1(self, res, s):
+ self.failIf(s.chunks)
+ self.failUnless(s.finished)
+ self.failIf(s.producer) # producer should be registered and removed
+
+ l2 = MyLog(self.basedir, "consumer2")
+ l2.addHeader("HEADER\n")
+ l2.finish()
+ self.failUnless(l2.isFinished())
+
+ s = MyLogConsumer()
+ d = l2.subscribeConsumer(s)
+ d.addCallback(self._testConsumer_2, s)
+ return d
+ def _testConsumer_2(self, res, s):
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n")])
+ self.failUnless(s.finished)
+ self.failIf(s.producer) # producer should be registered and removed
+
+
+ l2 = MyLog(self.basedir, "consumer3")
+ l2.chunkSize = 1000
+ l2.addHeader("HEADER\n")
+ l2.addStdout(800*"a")
+ l2.addStdout(800*"a") # should now have two chunks on disk, 1000+600
+ l2.addStdout(800*"b") # HEADER,1000+600*a on disk, 800*a in memory
+ l2.addStdout(800*"b") # HEADER,1000+600*a,1000+600*b on disk
+ l2.addStdout(200*"c") # HEADER,1000+600*a,1000+600*b on disk,
+ # 200*c in memory
+
+ s = MyLogConsumer(limit=1)
+ d = l2.subscribeConsumer(s)
+ d.addCallback(self._testConsumer_3, l2, s)
+ return d
+ def _testConsumer_3(self, res, l2, s):
+ self.failUnless(s.streaming)
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n")])
+ s.limit = 1
+ d = s.producer.resumeProducing()
+ d.addCallback(self._testConsumer_4, l2, s)
+ return d
+ def _testConsumer_4(self, res, l2, s):
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, 1000*"a"),
+ ])
+ s.limit = None
+ d = s.producer.resumeProducing()
+ d.addCallback(self._testConsumer_5, l2, s)
+ return d
+ def _testConsumer_5(self, res, l2, s):
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, 1000*"a"),
+ (builder.STDOUT, 600*"a"),
+ (builder.STDOUT, 1000*"b"),
+ (builder.STDOUT, 600*"b"),
+ (builder.STDOUT, 200*"c")])
+ l2.addStdout(1000*"c") # HEADER,1600*a,1600*b,1200*c on disk
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, 1000*"a"),
+ (builder.STDOUT, 600*"a"),
+ (builder.STDOUT, 1000*"b"),
+ (builder.STDOUT, 600*"b"),
+ (builder.STDOUT, 200*"c"),
+ (builder.STDOUT, 1000*"c")])
+ l2.finish()
+ self.failUnlessEqual(s.chunks, [(builder.HEADER, "HEADER\n"),
+ (builder.STDOUT, 1000*"a"),
+ (builder.STDOUT, 600*"a"),
+ (builder.STDOUT, 1000*"b"),
+ (builder.STDOUT, 600*"b"),
+ (builder.STDOUT, 200*"c"),
+ (builder.STDOUT, 1000*"c")])
+ self.failIf(s.producer)
+ self.failUnless(s.finished)
+
+ def testLargeSummary(self):
+ bigtext = "a" * 200000 # exceed the NetstringReceiver 100KB limit
+ l = MyLog(self.basedir, "large", bigtext)
+ s = MyLogConsumer()
+ d = l.subscribeConsumer(s)
+ def _check(res):
+ for ctype,chunk in s.chunks:
+ self.failUnless(len(chunk) < 100000)
+ merged = "".join([c[1] for c in s.chunks])
+ self.failUnless(merged == bigtext)
+ d.addCallback(_check)
+ # when this fails, it fails with a timeout, and there is an exception
+ # sent to log.err(). This AttributeError exception is in
+ # NetstringReceiver.dataReceived where it does
+ # self.transport.loseConnection() because of the NetstringParseError,
+ # however self.transport is None
+ return d
+ testLargeSummary.timeout = 5
+
+
+class CompressLog(unittest.TestCase):
+ def testCompressLogs(self):
+ bss = setupBuildStepStatus("test-compress")
+ bss.build.builder.setLogCompressionLimit(1024)
+ l = bss.addLog('not-compress')
+ l.addStdout('a' * 512)
+ l.finish()
+ lc = bss.addLog('to-compress')
+ lc.addStdout('b' * 1024)
+ lc.finish()
+ d = bss.stepFinished(builder.SUCCESS)
+ self.failUnless(d is not None)
+ d.addCallback(self._verifyCompression, bss)
+ return d
+
+ def _verifyCompression(self, result, bss):
+ self.failUnless(len(bss.getLogs()), 2)
+ (ncl, cl) = bss.getLogs() # not compressed, compressed log
+ self.failUnless(os.path.isfile(ncl.getFilename()))
+ self.failIf(os.path.isfile(ncl.getFilename() + ".bz2"))
+ self.failIf(os.path.isfile(cl.getFilename()))
+ self.failUnless(os.path.isfile(cl.getFilename() + ".bz2"))
+ content = ncl.getText()
+ self.failUnless(len(content), 512)
+ content = cl.getText()
+ self.failUnless(len(content), 1024)
+ pass
+
+config_base = """
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+f1 = factory.QuickBuildFactory('fakerep', 'cvsmodule', configure=None)
+
+f2 = factory.BuildFactory([
+ s(dummy.Dummy, timeout=1),
+ s(dummy.RemoteDummy, timeout=2),
+ ])
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = []
+c['builders'].append({'name':'quick', 'slavename':'bot1',
+ 'builddir': 'quickdir', 'factory': f1})
+c['slavePortnum'] = 0
+"""
+
+config_2 = config_base + """
+c['builders'] = [{'name': 'dummy', 'slavename': 'bot1',
+ 'builddir': 'dummy1', 'factory': f2},
+ {'name': 'testdummy', 'slavename': 'bot1',
+ 'builddir': 'dummy2', 'factory': f2, 'category': 'test'}]
+"""
+
+class STarget(base.StatusReceiver):
+ debug = False
+
+ def __init__(self, mode):
+ self.mode = mode
+ self.events = []
+ def announce(self):
+ if self.debug:
+ print self.events[-1]
+
+ def builderAdded(self, name, builder):
+ self.events.append(("builderAdded", name, builder))
+ self.announce()
+ if "builder" in self.mode:
+ return self
+ def builderChangedState(self, name, state):
+ self.events.append(("builderChangedState", name, state))
+ self.announce()
+ def buildStarted(self, name, build):
+ self.events.append(("buildStarted", name, build))
+ self.announce()
+ if "eta" in self.mode:
+ self.eta_build = build.getETA()
+ if "build" in self.mode:
+ return self
+ def buildETAUpdate(self, build, ETA):
+ self.events.append(("buildETAUpdate", build, ETA))
+ self.announce()
+ def stepStarted(self, build, step):
+ self.events.append(("stepStarted", build, step))
+ self.announce()
+ if 0 and "eta" in self.mode:
+ print "TIMES", step.getTimes()
+ print "ETA", step.getETA()
+ print "EXP", step.getExpectations()
+ if "step" in self.mode:
+ return self
+ def stepTextChanged(self, build, step, text):
+ self.events.append(("stepTextChanged", step, text))
+ def stepText2Changed(self, build, step, text2):
+ self.events.append(("stepText2Changed", step, text2))
+ def stepETAUpdate(self, build, step, ETA, expectations):
+ self.events.append(("stepETAUpdate", build, step, ETA, expectations))
+ self.announce()
+ def logStarted(self, build, step, log):
+ self.events.append(("logStarted", build, step, log))
+ self.announce()
+ def logFinished(self, build, step, log):
+ self.events.append(("logFinished", build, step, log))
+ self.announce()
+ def stepFinished(self, build, step, results):
+ self.events.append(("stepFinished", build, step, results))
+ if 0 and "eta" in self.mode:
+ print "post-EXP", step.getExpectations()
+ self.announce()
+ def buildFinished(self, name, build, results):
+ self.events.append(("buildFinished", name, build, results))
+ self.announce()
+ def builderRemoved(self, name):
+ self.events.append(("builderRemoved", name))
+ self.announce()
+
+class Subscription(RunMixin, unittest.TestCase):
+ # verify that StatusTargets can subscribe/unsubscribe properly
+
+ def testSlave(self):
+ m = self.master
+ s = m.getStatus()
+ self.t1 = t1 = STarget(["builder"])
+ #t1.debug = True; print
+ s.subscribe(t1)
+ self.failUnlessEqual(len(t1.events), 0)
+
+ self.t3 = t3 = STarget(["builder", "build", "step"])
+ s.subscribe(t3)
+
+ m.loadConfig(config_2)
+ m.readConfig = True
+ m.startService()
+
+ self.failUnlessEqual(len(t1.events), 4)
+ self.failUnlessEqual(t1.events[0][0:2], ("builderAdded", "dummy"))
+ self.failUnlessEqual(t1.events[1],
+ ("builderChangedState", "dummy", "offline"))
+ self.failUnlessEqual(t1.events[2][0:2], ("builderAdded", "testdummy"))
+ self.failUnlessEqual(t1.events[3],
+ ("builderChangedState", "testdummy", "offline"))
+ t1.events = []
+
+ self.failUnlessEqual(s.getBuilderNames(), ["dummy", "testdummy"])
+ self.failUnlessEqual(s.getBuilderNames(categories=['test']),
+ ["testdummy"])
+ self.s1 = s1 = s.getBuilder("dummy")
+ self.failUnlessEqual(s1.getName(), "dummy")
+ self.failUnlessEqual(s1.getState(), ("offline", []))
+ self.failUnlessEqual(s1.getCurrentBuilds(), [])
+ self.failUnlessEqual(s1.getLastFinishedBuild(), None)
+ self.failUnlessEqual(s1.getBuild(-1), None)
+ #self.failUnlessEqual(s1.getEvent(-1), foo("created"))
+
+ # status targets should, upon being subscribed, immediately get a
+ # list of all current builders matching their category
+ self.t2 = t2 = STarget([])
+ s.subscribe(t2)
+ self.failUnlessEqual(len(t2.events), 2)
+ self.failUnlessEqual(t2.events[0][0:2], ("builderAdded", "dummy"))
+ self.failUnlessEqual(t2.events[1][0:2], ("builderAdded", "testdummy"))
+
+ d = self.connectSlave(builders=["dummy", "testdummy"])
+ d.addCallback(self._testSlave_1, t1)
+ return d
+
+ def _testSlave_1(self, res, t1):
+ self.failUnlessEqual(len(t1.events), 2)
+ self.failUnlessEqual(t1.events[0],
+ ("builderChangedState", "dummy", "idle"))
+ self.failUnlessEqual(t1.events[1],
+ ("builderChangedState", "testdummy", "idle"))
+ t1.events = []
+
+ c = interfaces.IControl(self.master)
+ req = BuildRequest("forced build for testing", SourceStamp(), 'test_builder')
+ c.getBuilder("dummy").requestBuild(req)
+ d = req.waitUntilFinished()
+ d2 = self.master.botmaster.waitUntilBuilderIdle("dummy")
+ dl = defer.DeferredList([d, d2])
+ dl.addCallback(self._testSlave_2)
+ return dl
+
+ def _testSlave_2(self, res):
+ # t1 subscribes to builds, but not anything lower-level
+ ev = self.t1.events
+ self.failUnlessEqual(len(ev), 4)
+ self.failUnlessEqual(ev[0][0:3],
+ ("builderChangedState", "dummy", "building"))
+ self.failUnlessEqual(ev[1][0], "buildStarted")
+ self.failUnlessEqual(ev[2][0:2]+ev[2][3:4],
+ ("buildFinished", "dummy", builder.SUCCESS))
+ self.failUnlessEqual(ev[3][0:3],
+ ("builderChangedState", "dummy", "idle"))
+
+ self.failUnlessEqual([ev[0] for ev in self.t3.events],
+ ["builderAdded",
+ "builderChangedState", # offline
+ "builderAdded",
+ "builderChangedState", # idle
+ "builderChangedState", # offline
+ "builderChangedState", # idle
+ "builderChangedState", # building
+ "buildStarted",
+ "stepStarted", "stepETAUpdate",
+ "stepTextChanged", "stepFinished",
+ "stepStarted", "stepETAUpdate",
+ "stepTextChanged", "logStarted", "logFinished",
+ "stepTextChanged", "stepText2Changed",
+ "stepFinished",
+ "buildFinished",
+ "builderChangedState", # idle
+ ])
+
+ b = self.s1.getLastFinishedBuild()
+ self.failUnless(b)
+ self.failUnlessEqual(b.getBuilder().getName(), "dummy")
+ self.failUnlessEqual(b.getNumber(), 0)
+ self.failUnlessEqual(b.getSourceStamp().branch, None)
+ self.failUnlessEqual(b.getSourceStamp().patch, None)
+ self.failUnlessEqual(b.getSourceStamp().revision, None)
+ self.failUnlessEqual(b.getReason(), "forced build for testing")
+ self.failUnlessEqual(b.getChanges(), ())
+ self.failUnlessEqual(b.getResponsibleUsers(), [])
+ self.failUnless(b.isFinished())
+ self.failUnlessEqual(b.getText(), ['build', 'successful'])
+ self.failUnlessEqual(b.getResults(), builder.SUCCESS)
+
+ steps = b.getSteps()
+ self.failUnlessEqual(len(steps), 2)
+
+ eta = 0
+ st1 = steps[0]
+ self.failUnlessEqual(st1.getName(), "dummy")
+ self.failUnless(st1.isFinished())
+ self.failUnlessEqual(st1.getText(), ["delay", "1 secs"])
+ start,finish = st1.getTimes()
+ self.failUnless(0.5 < (finish-start) < 10)
+ self.failUnlessEqual(st1.getExpectations(), [])
+ self.failUnlessEqual(st1.getLogs(), [])
+ eta += finish-start
+
+ st2 = steps[1]
+ self.failUnlessEqual(st2.getName(), "remote dummy")
+ self.failUnless(st2.isFinished())
+ self.failUnlessEqual(st2.getText(),
+ ["remote", "delay", "2 secs"])
+ start,finish = st2.getTimes()
+ self.failUnless(1.5 < (finish-start) < 10)
+ eta += finish-start
+ self.failUnlessEqual(st2.getExpectations(), [('output', 38, None)])
+ logs = st2.getLogs()
+ self.failUnlessEqual(len(logs), 1)
+ self.failUnlessEqual(logs[0].getName(), "stdio")
+ self.failUnlessEqual(logs[0].getText(), "data")
+
+ self.eta = eta
+ # now we run it a second time, and we should have an ETA
+
+ self.t4 = t4 = STarget(["builder", "build", "eta"])
+ self.master.getStatus().subscribe(t4)
+ c = interfaces.IControl(self.master)
+ req = BuildRequest("forced build for testing", SourceStamp(), 'test_builder')
+ c.getBuilder("dummy").requestBuild(req)
+ d = req.waitUntilFinished()
+ d2 = self.master.botmaster.waitUntilBuilderIdle("dummy")
+ dl = defer.DeferredList([d, d2])
+ dl.addCallback(self._testSlave_3)
+ return dl
+
+ def _testSlave_3(self, res):
+ t4 = self.t4
+ eta = self.eta
+ self.failUnless(eta-1 < t4.eta_build < eta+1, # should be 3 seconds
+ "t4.eta_build was %g, not in (%g,%g)"
+ % (t4.eta_build, eta-1, eta+1))
+
+
+class Client(unittest.TestCase):
+ def testAdaptation(self):
+ b = builder.BuilderStatus("bname")
+ b2 = client.makeRemote(b)
+ self.failUnless(isinstance(b2, client.RemoteBuilder))
+ b3 = client.makeRemote(None)
+ self.failUnless(b3 is None)
+
+
+class ContactTester(unittest.TestCase):
+ def test_notify_invalid_syntax(self):
+ irc = MyContact()
+ self.assertRaises(words.UsageError, lambda args, who: irc.command_NOTIFY(args, who), "", "mynick")
+
+ def test_notify_list(self):
+ irc = MyContact()
+ irc.command_NOTIFY("list", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: []", "empty notify list")
+
+ irc.message = ""
+ irc.command_NOTIFY("on started", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: ['started']", "on started")
+
+ irc.message = ""
+ irc.command_NOTIFY("on finished", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: ['started', 'finished']", "on finished")
+
+ irc.message = ""
+ irc.command_NOTIFY("off", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: []", "off all")
+
+ irc.message = ""
+ irc.command_NOTIFY("on", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: ['started', 'finished']", "on default set")
+
+ irc.message = ""
+ irc.command_NOTIFY("off started", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: ['finished']", "off started")
+
+ irc.message = ""
+ irc.command_NOTIFY("on success failure exception", "mynick")
+ self.failUnlessEqual(irc.message, "The following events are being notified: ['failure', 'finished', 'exception', 'success']", "on multiple events")
+
+ def test_notification_default(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder78")
+ my_build = MyIrcBuild(my_builder, 23, builder.SUCCESS)
+
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "", "No notification with default settings")
+
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No notification with default settings")
+
+ def test_notification_started(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder78")
+ my_build = MyIrcBuild(my_builder, 23, builder.SUCCESS)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 123),
+ Change(who = 'author2', files = ['file2'], comments = 'comment2', revision = 456),
+ )
+
+ irc.command_NOTIFY("on started", "mynick")
+
+ irc.message = ""
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "build #23 of builder78 started including [123, 456]", "Start notification generated with notify_events=['started']")
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finished notification with notify_events=['started']")
+
+ def test_notification_finished(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.SUCCESS)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ irc.command_NOTIFY("on finished", "mynick")
+
+ irc.message = ""
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['finished']")
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated with notify_events=['finished']")
+
+ def test_notification_success(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.SUCCESS)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ irc.command_NOTIFY("on success", "mynick")
+
+ irc.message = ""
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['success']")
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on success with notify_events=['success']")
+
+ irc.message = ""
+ my_build.results = builder.FAILURE
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on failure with notify_events=['success']")
+
+ irc.message = ""
+ my_build.results = builder.EXCEPTION
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on exception with notify_events=['success']")
+
+ def test_notification_failed(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.FAILURE)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ irc.command_NOTIFY("on failure", "mynick")
+
+ irc.message = ""
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['failed']")
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on failure with notify_events=['failed']")
+
+ irc.message = ""
+ my_build.results = builder.SUCCESS
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on success with notify_events=['failed']")
+
+ irc.message = ""
+ my_build.results = builder.EXCEPTION
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on exception with notify_events=['failed']")
+
+ def test_notification_exception(self):
+ irc = MyContact()
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.EXCEPTION)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ irc.command_NOTIFY("on exception", "mynick")
+
+ irc.message = ""
+ irc.buildStarted(my_builder.getName(), my_build)
+ self.failUnlessEqual(irc.message, "", "No started notification with notify_events=['exception']")
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on failure with notify_events=['exception']")
+
+ irc.message = ""
+ my_build.results = builder.SUCCESS
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on success with notify_events=['exception']")
+
+ irc.message = ""
+ my_build.results = builder.FAILURE
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "", "No finish notification generated on exception with notify_events=['exception']")
+
+ def do_x_to_y_notification_test(self, notify, previous_result, new_result, expected_msg):
+ irc = MyContact()
+ irc.command_NOTIFY("on %s" % notify, "mynick")
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.FAILURE)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ previous_build = MyIrcBuild(my_builder, 861, previous_result)
+ my_build.setPreviousBuild(previous_build)
+
+ irc.message = ""
+ my_build.results = new_result
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, expected_msg, "Finish notification generated on failure with notify_events=['successToFailure']")
+
+ def test_notification_successToFailure(self):
+ self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.FAILURE,
+ expected_msg="build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToFailure", previous_result=builder.SUCCESS, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_successToWarnings(self):
+ self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.WARNINGS,
+ expected_msg="build #862 of builder834 is complete: Warnings [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToWarnings", previous_result=builder.SUCCESS, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_successToException(self):
+ self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.EXCEPTION,
+ expected_msg="build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="successToException", previous_result=builder.SUCCESS, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+
+
+
+
+ def test_notification_failureToSuccess(self):
+ self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.SUCCESS,
+ expected_msg="build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToSuccess", previous_result=builder.FAILURE,new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_failureToWarnings(self):
+ self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.WARNINGS,
+ expected_msg="build #862 of builder834 is complete: Warnings [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToWarnings", previous_result=builder.FAILURE, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_failureToException(self):
+ self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.EXCEPTION,
+ expected_msg="build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="failureToException", previous_result=builder.FAILURE, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+
+
+
+
+ def test_notification_warningsToFailure(self):
+ self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.FAILURE,
+ expected_msg="build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToFailure", previous_result=builder.WARNINGS, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_warningsToSuccess(self):
+ self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.SUCCESS,
+ expected_msg="build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToSuccess", previous_result=builder.WARNINGS, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_warningsToException(self):
+ self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.EXCEPTION,
+ expected_msg="build #862 of builder834 is complete: Exception [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="warningsToException", previous_result=builder.WARNINGS, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+
+
+
+ def test_notification_exceptionToFailure(self):
+ self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.FAILURE,
+ expected_msg="build #862 of builder834 is complete: Failure [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToFailure", previous_result=builder.EXCEPTION, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_exceptionToWarnings(self):
+ self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.WARNINGS,
+ expected_msg="build #862 of builder834 is complete: Warnings [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.SUCCESS,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToWarnings", previous_result=builder.EXCEPTION, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ def test_notification_exceptionToSuccess(self):
+ self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.SUCCESS,
+ expected_msg="build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.EXCEPTION,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.FAILURE,
+ expected_msg = "" )
+
+ self.do_x_to_y_notification_test(notify="exceptionToSuccess", previous_result=builder.EXCEPTION, new_result=builder.WARNINGS,
+ expected_msg = "" )
+
+ def test_notification_set_in_config(self):
+ irc = MyContact(channel = MyChannel(notify_events = {'success': 1}))
+
+ my_builder = MyBuilder("builder834")
+ my_build = MyIrcBuild(my_builder, 862, builder.SUCCESS)
+ my_build.changes = (
+ Change(who = 'author1', files = ['file1'], comments = 'comment1', revision = 943),
+ )
+
+ irc.message = ""
+ irc.buildFinished(my_builder.getName(), my_build, None)
+ self.failUnlessEqual(irc.message, "build #862 of builder834 is complete: Success [step1 step2] Build details are at http://myserver/mypath?build=765", "Finish notification generated on success with notify_events=['success']")
+
+class MyIrcBuild(builder.BuildStatus):
+ results = None
+
+ def __init__(self, parent, number, results):
+ builder.BuildStatus.__init__(self, parent, number)
+ self.results = results
+ self.previousBuild = None
+
+ def getResults(self):
+ return self.results
+
+ def getText(self):
+ return ('step1', 'step2')
+
+ def setPreviousBuild(self, pb):
+ self.previousBuild = pb
+
+ def getPreviousBuild(self):
+ return self.previousBuild
+
+class URLProducer:
+ def getURLForThing(self, build):
+ return 'http://myserver/mypath?build=765'
+
+class MyChannel:
+ categories = None
+ status = URLProducer()
+ notify_events = {}
+
+ def __init__(self, notify_events = {}):
+ self.notify_events = notify_events
+
+class MyContact(words.Contact):
+ message = ""
+
+ def __init__(self, channel = MyChannel()):
+ words.Contact.__init__(self, channel)
+ self.message = ""
+
+ def subscribe_to_build_events(self):
+ pass
+
+ def unsubscribe_from_build_events(self):
+ pass
+
+ def send(self, msg):
+ self.message += msg
+
+class StepStatistics(unittest.TestCase):
+ def testStepStatistics(self):
+ status = builder.BuildStatus(builder.BuilderStatus("test"), 123)
+ status.addStepWithName('step1')
+ status.addStepWithName('step2')
+ status.addStepWithName('step3')
+ status.addStepWithName('step4')
+
+ steps = status.getSteps()
+ (step1, step2, step3, step4) = steps
+
+ step1.setStatistic('test-prop', 1)
+ step3.setStatistic('test-prop', 2)
+ step4.setStatistic('test-prop', 4)
+
+ step1.setStatistic('other-prop', 27)
+ # Just to have some other properties around
+
+ self.failUnlessEqual(step1.getStatistic('test-prop'), 1,
+ 'Retrieve an existing property')
+ self.failUnlessEqual(step1.getStatistic('test-prop', 99), 1,
+ "Don't default an existing property")
+ self.failUnlessEqual(step2.getStatistic('test-prop', 99), 99,
+ 'Default a non-existant property')
+
+ self.failUnlessEqual(
+ status.getSummaryStatistic('test-prop', operator.add), 7,
+ 'Sum property across the build')
+
+ self.failUnlessEqual(
+ status.getSummaryStatistic('test-prop', operator.add, 13), 20,
+ 'Sum property across the build with initial value')
+
+class BuildExpectation(unittest.TestCase):
+ class MyBuilderStatus:
+ implements(interfaces.IBuilderStatus)
+
+ def setSlavenames(self, slaveName):
+ pass
+
+ class MyBuilder(Builder):
+ def __init__(self, name):
+ Builder.__init__(self, {
+ 'name': name,
+ 'builddir': '/tmp/somewhere',
+ 'factory': 'aFactory'
+ }, BuildExpectation.MyBuilderStatus())
+
+ class MyBuild(Build):
+ def __init__(self, b):
+ self.builder = b
+ self.remote = None
+
+ step1_progress = progress.StepProgress('step1', ['elapsed'])
+ self.progress = progress.BuildProgress([step1_progress])
+ step1_progress.setBuildProgress(self.progress)
+
+ step1_progress.start()
+ sleep(1);
+ step1_progress.finish()
+
+ self.deferred = defer.Deferred()
+ self.locks = []
+ self.build_status = builder.BuildStatus(b.builder_status, 1)
+
+
+ def testBuildExpectation_BuildSuccess(self):
+ b = BuildExpectation.MyBuilder("builder1")
+ build = BuildExpectation.MyBuild(b)
+
+ build.buildFinished(['sometext'], builder.SUCCESS)
+ self.failIfEqual(b.expectations.expectedBuildTime(), 0, 'Non-Zero expectation for a failed build')
+
+ def testBuildExpectation_BuildFailure(self):
+ b = BuildExpectation.MyBuilder("builder1")
+ build = BuildExpectation.MyBuild(b)
+
+ build.buildFinished(['sometext'], builder.FAILURE)
+ self.failUnlessEqual(b.expectations, None, 'Zero expectation for a failed build')
diff --git a/buildbot/buildbot/test/test_steps.py b/buildbot/buildbot/test/test_steps.py
new file mode 100644
index 0000000..880658c
--- /dev/null
+++ b/buildbot/buildbot/test/test_steps.py
@@ -0,0 +1,788 @@
+# -*- test-case-name: buildbot.test.test_steps -*-
+
+# create the BuildStep with a fake .remote instance that logs the
+# .callRemote invocations and compares them against the expected calls. Then
+# the test harness should send statusUpdate() messages in with assorted
+# data, eventually calling remote_complete(). Then we can verify that the
+# Step's rc was correct, and that the status it was supposed to return
+# matches.
+
+# sometimes, .callRemote should raise an exception because of a stale
+# reference. Sometimes it should errBack with an UnknownCommand failure.
+# Or other failure.
+
+# todo: test batched updates, by invoking remote_update(updates) instead of
+# statusUpdate(update). Also involves interrupted builds.
+
+import os
+
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+
+from buildbot.sourcestamp import SourceStamp
+from buildbot.process import buildstep, base, factory
+from buildbot.buildslave import BuildSlave
+from buildbot.steps import shell, source, python, master
+from buildbot.status import builder
+from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE
+from buildbot.test.runutils import RunMixin, rmtree
+from buildbot.test.runutils import makeBuildStep, StepTester
+from buildbot.slave import commands, registry
+
+
+class MyShellCommand(shell.ShellCommand):
+ started = False
+ def runCommand(self, c):
+ self.started = True
+ self.rc = c
+ return shell.ShellCommand.runCommand(self, c)
+
+class FakeBuild:
+ pass
+class FakeBuilder:
+ statusbag = None
+ name = "fakebuilder"
+class FakeSlaveBuilder:
+ def getSlaveCommandVersion(self, command, oldversion=None):
+ return "1.10"
+
+class FakeRemote:
+ def __init__(self):
+ self.events = []
+ self.remoteCalls = 0
+ #self.callRemoteNotifier = None
+ def callRemote(self, methname, *args):
+ event = ["callRemote", methname, args]
+ self.events.append(event)
+## if self.callRemoteNotifier:
+## reactor.callLater(0, self.callRemoteNotifier, event)
+ self.remoteCalls += 1
+ self.deferred = defer.Deferred()
+ return self.deferred
+ def notifyOnDisconnect(self, callback):
+ pass
+ def dontNotifyOnDisconnect(self, callback):
+ pass
+
+
+class BuildStep(unittest.TestCase):
+
+ def setUp(self):
+ rmtree("test_steps")
+ self.builder = FakeBuilder()
+ self.builder_status = builder.BuilderStatus("fakebuilder")
+ self.builder_status.basedir = "test_steps"
+ self.builder_status.nextBuildNumber = 0
+ os.mkdir(self.builder_status.basedir)
+ self.build_status = self.builder_status.newBuild()
+ req = base.BuildRequest("reason", SourceStamp(), 'test_builder')
+ self.build = base.Build([req])
+ self.build.build_status = self.build_status # fake it
+ self.build.builder = self.builder
+ self.build.slavebuilder = FakeSlaveBuilder()
+ self.remote = FakeRemote()
+ self.finished = 0
+
+ def callback(self, results):
+ self.failed = 0
+ self.failure = None
+ self.results = results
+ self.finished = 1
+ def errback(self, failure):
+ self.failed = 1
+ self.failure = failure
+ self.results = None
+ self.finished = 1
+
+ def testShellCommand1(self):
+ cmd = "argle bargle"
+ dir = "murkle"
+ self.expectedEvents = []
+ buildstep.RemoteCommand.commandCounter[0] = 3
+ c = MyShellCommand(workdir=dir, command=cmd, timeout=10)
+ c.setBuild(self.build)
+ c.setBuildSlave(BuildSlave("name", "password"))
+ self.assertEqual(self.remote.events, self.expectedEvents)
+ c.step_status = self.build_status.addStepWithName("myshellcommand")
+ d = c.startStep(self.remote)
+ self.failUnless(c.started)
+ d.addCallbacks(self.callback, self.errback)
+ d2 = self.poll()
+ d2.addCallback(self._testShellCommand1_2, c)
+ return d2
+ testShellCommand1.timeout = 10
+
+ def poll(self, ignored=None):
+ # TODO: This is gross, but at least it's no longer using
+ # reactor.iterate() . Still, get rid of this some day soon.
+ if self.remote.remoteCalls == 0:
+ d = defer.Deferred()
+ d.addCallback(self.poll)
+ reactor.callLater(0.1, d.callback, None)
+ return d
+ return defer.succeed(None)
+
+ def _testShellCommand1_2(self, res, c):
+ rc = c.rc
+ self.expectedEvents.append(["callRemote", "startCommand",
+ (rc, "3",
+ "shell",
+ {'command': "argle bargle",
+ 'workdir': "murkle",
+ 'want_stdout': 1,
+ 'want_stderr': 1,
+ 'logfiles': {},
+ 'timeout': 10,
+ 'usePTY': 'slave-config',
+ 'env': None}) ] )
+ self.assertEqual(self.remote.events, self.expectedEvents)
+
+ # we could do self.remote.deferred.errback(UnknownCommand) here. We
+ # could also do .callback(), but generally the master end silently
+ # ignores the slave's ack
+
+ logs = c.step_status.getLogs()
+ for log in logs:
+ if log.getName() == "log":
+ break
+
+ rc.remoteUpdate({'header':
+ "command 'argle bargle' in dir 'murkle'\n\n"})
+ rc.remoteUpdate({'stdout': "foo\n"})
+ self.assertEqual(log.getText(), "foo\n")
+ self.assertEqual(log.getTextWithHeaders(),
+ "command 'argle bargle' in dir 'murkle'\n\n"
+ "foo\n")
+ rc.remoteUpdate({'stderr': "bar\n"})
+ self.assertEqual(log.getText(), "foo\nbar\n")
+ self.assertEqual(log.getTextWithHeaders(),
+ "command 'argle bargle' in dir 'murkle'\n\n"
+ "foo\nbar\n")
+ rc.remoteUpdate({'rc': 0})
+ self.assertEqual(rc.rc, 0)
+
+ rc.remote_complete()
+ # that should fire the Deferred
+ d = self.poll2()
+ d.addCallback(self._testShellCommand1_3)
+ return d
+
+ def poll2(self, ignored=None):
+ if not self.finished:
+ d = defer.Deferred()
+ d.addCallback(self.poll2)
+ reactor.callLater(0.1, d.callback, None)
+ return d
+ return defer.succeed(None)
+
+ def _testShellCommand1_3(self, res):
+ self.assertEqual(self.failed, 0)
+ self.assertEqual(self.results, 0)
+
+
+class MyObserver(buildstep.LogObserver):
+ out = ""
+ def outReceived(self, data):
+ self.out = self.out + data
+
+class Steps(unittest.TestCase):
+ def testMultipleStepInstances(self):
+ steps = [
+ (source.CVS, {'cvsroot': "root", 'cvsmodule': "module"}),
+ (shell.Configure, {'command': "./configure"}),
+ (shell.Compile, {'command': "make"}),
+ (shell.Compile, {'command': "make more"}),
+ (shell.Compile, {'command': "make evenmore"}),
+ (shell.Test, {'command': "make test"}),
+ (shell.Test, {'command': "make testharder"}),
+ ]
+ f = factory.ConfigurableBuildFactory(steps)
+ req = base.BuildRequest("reason", SourceStamp(), 'test_builder')
+ b = f.newBuild([req])
+ #for s in b.steps: print s.name
+
+ def failUnlessClones(self, s1, attrnames):
+ f1 = s1.getStepFactory()
+ f,args = f1
+ s2 = f(**args)
+ for name in attrnames:
+ self.failUnlessEqual(getattr(s1, name), getattr(s2, name))
+
+ def clone(self, s1):
+ f1 = s1.getStepFactory()
+ f,args = f1
+ s2 = f(**args)
+ return s2
+
+ def testClone(self):
+ s1 = shell.ShellCommand(command=["make", "test"],
+ timeout=1234,
+ workdir="here",
+ description="yo",
+ descriptionDone="yoyo",
+ env={'key': 'value'},
+ want_stdout=False,
+ want_stderr=False,
+ logfiles={"name": "filename"},
+ )
+ shellparms = (buildstep.BuildStep.parms +
+ ("remote_kwargs description descriptionDone "
+ "command logfiles").split() )
+ self.failUnlessClones(s1, shellparms)
+
+
+ # test the various methods available to buildsteps
+
+ def test_getProperty(self):
+ s = makeBuildStep("test_steps.Steps.test_getProperty")
+ bs = s.step_status.getBuild()
+
+ s.setProperty("prop1", "value1", "test")
+ s.setProperty("prop2", "value2", "test")
+ self.failUnlessEqual(s.getProperty("prop1"), "value1")
+ self.failUnlessEqual(bs.getProperty("prop1"), "value1")
+ self.failUnlessEqual(s.getProperty("prop2"), "value2")
+ self.failUnlessEqual(bs.getProperty("prop2"), "value2")
+ s.setProperty("prop1", "value1a", "test")
+ self.failUnlessEqual(s.getProperty("prop1"), "value1a")
+ self.failUnlessEqual(bs.getProperty("prop1"), "value1a")
+
+
+ def test_addURL(self):
+ s = makeBuildStep("test_steps.Steps.test_addURL")
+ s.addURL("coverage", "http://coverage.example.org/target")
+ s.addURL("icon", "http://coverage.example.org/icon.png")
+ bs = s.step_status
+ links = bs.getURLs()
+ expected = {"coverage": "http://coverage.example.org/target",
+ "icon": "http://coverage.example.org/icon.png",
+ }
+ self.failUnlessEqual(links, expected)
+
+ def test_addLog(self):
+ s = makeBuildStep("test_steps.Steps.test_addLog")
+ l = s.addLog("newlog")
+ l.addStdout("some stdout here")
+ l.finish()
+ bs = s.step_status
+ logs = bs.getLogs()
+ self.failUnlessEqual(len(logs), 1)
+ l1 = logs[0]
+ self.failUnlessEqual(l1.getText(), "some stdout here")
+ l1a = s.getLog("newlog")
+ self.failUnlessEqual(l1a.getText(), "some stdout here")
+
+ def test_addHTMLLog(self):
+ s = makeBuildStep("test_steps.Steps.test_addHTMLLog")
+ l = s.addHTMLLog("newlog", "some html here")
+ bs = s.step_status
+ logs = bs.getLogs()
+ self.failUnlessEqual(len(logs), 1)
+ l1 = logs[0]
+ self.failUnless(isinstance(l1, builder.HTMLLogFile))
+ self.failUnlessEqual(l1.getText(), "some html here")
+
+ def test_addCompleteLog(self):
+ s = makeBuildStep("test_steps.Steps.test_addCompleteLog")
+ l = s.addCompleteLog("newlog", "some stdout here")
+ bs = s.step_status
+ logs = bs.getLogs()
+ self.failUnlessEqual(len(logs), 1)
+ l1 = logs[0]
+ self.failUnlessEqual(l1.getText(), "some stdout here")
+ l1a = s.getLog("newlog")
+ self.failUnlessEqual(l1a.getText(), "some stdout here")
+
+ def test_addLogObserver(self):
+ s = makeBuildStep("test_steps.Steps.test_addLogObserver")
+ bss = s.step_status
+ o1,o2,o3 = MyObserver(), MyObserver(), MyObserver()
+
+ # add the log before the observer
+ l1 = s.addLog("one")
+ l1.addStdout("onestuff")
+ s.addLogObserver("one", o1)
+ self.failUnlessEqual(o1.out, "onestuff")
+ l1.addStdout(" morestuff")
+ self.failUnlessEqual(o1.out, "onestuff morestuff")
+
+ # add the observer before the log
+ s.addLogObserver("two", o2)
+ l2 = s.addLog("two")
+ l2.addStdout("twostuff")
+ self.failUnlessEqual(o2.out, "twostuff")
+
+ # test more stuff about ShellCommands
+
+ def test_description(self):
+ s = makeBuildStep("test_steps.Steps.test_description.1",
+ step_class=shell.ShellCommand,
+ workdir="dummy",
+ description=["list", "of", "strings"],
+ descriptionDone=["another", "list"])
+ self.failUnlessEqual(s.description, ["list", "of", "strings"])
+ self.failUnlessEqual(s.descriptionDone, ["another", "list"])
+
+ s = makeBuildStep("test_steps.Steps.test_description.2",
+ step_class=shell.ShellCommand,
+ workdir="dummy",
+ description="single string",
+ descriptionDone="another string")
+ self.failUnlessEqual(s.description, ["single string"])
+ self.failUnlessEqual(s.descriptionDone, ["another string"])
+
+class VersionCheckingStep(buildstep.BuildStep):
+ def start(self):
+ # give our test a chance to run. It is non-trivial for a buildstep to
+ # claw its way back out to the test case which is currently running.
+ master = self.build.builder.botmaster.parent
+ checker = master._checker
+ checker(self)
+ # then complete
+ self.finished(buildstep.SUCCESS)
+
+version_config = """
+from buildbot.process import factory
+from buildbot.test.test_steps import VersionCheckingStep
+from buildbot.buildslave import BuildSlave
+BuildmasterConfig = c = {}
+f1 = factory.BuildFactory([
+ factory.s(VersionCheckingStep),
+ ])
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = [{'name':'quick', 'slavename':'bot1',
+ 'builddir': 'quickdir', 'factory': f1}]
+c['slavePortnum'] = 0
+"""
+
+class SlaveVersion(RunMixin, unittest.TestCase):
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(version_config)
+ self.master.startService()
+ d = self.connectSlave(["quick"])
+ return d
+
+ def doBuild(self, buildername):
+ br = base.BuildRequest("forced", SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+
+ def checkCompare(self, s):
+ cver = commands.command_version
+ v = s.slaveVersion("svn", None)
+ # this insures that we are getting the version correctly
+ self.failUnlessEqual(s.slaveVersion("svn", None), cver)
+ # and that non-existent commands do not provide a version
+ self.failUnlessEqual(s.slaveVersion("NOSUCHCOMMAND"), None)
+ # TODO: verify that a <=0.5.0 buildslave (which does not implement
+ # remote_getCommands) handles oldversion= properly. This requires a
+ # mutant slave which does not offer that method.
+ #self.failUnlessEqual(s.slaveVersion("NOSUCHCOMMAND", "old"), "old")
+
+ # now check the comparison functions
+ self.failIf(s.slaveVersionIsOlderThan("svn", cver))
+ self.failIf(s.slaveVersionIsOlderThan("svn", "1.1"))
+ self.failUnless(s.slaveVersionIsOlderThan("svn", cver + ".1"))
+
+ self.failUnlessEqual(s.getSlaveName(), "bot1")
+
+ def testCompare(self):
+ self.master._checker = self.checkCompare
+ d = self.doBuild("quick")
+ return d
+
+
+class _SimpleBuildStep(buildstep.BuildStep):
+ def start(self):
+ args = {"arg1": "value"}
+ cmd = buildstep.RemoteCommand("simple", args)
+ d = self.runCommand(cmd)
+ d.addCallback(lambda res: self.finished(SUCCESS))
+
+class _SimpleCommand(commands.Command):
+ def start(self):
+ self.builder.flag = True
+ self.builder.flag_args = self.args
+ return defer.succeed(None)
+
+class CheckStepTester(StepTester, unittest.TestCase):
+ def testSimple(self):
+ self.slavebase = "testSimple.slave"
+ self.masterbase = "testSimple.master"
+ sb = self.makeSlaveBuilder()
+ sb.flag = False
+ registry.registerSlaveCommand("simple", _SimpleCommand, "1")
+ step = self.makeStep(_SimpleBuildStep)
+ d = self.runStep(step)
+ def _checkSimple(results):
+ self.failUnless(sb.flag)
+ self.failUnlessEqual(sb.flag_args, {"arg1": "value"})
+ d.addCallback(_checkSimple)
+ return d
+
+class Python(StepTester, unittest.TestCase):
+ def testPyFlakes1(self):
+ self.masterbase = "Python.testPyFlakes1"
+ step = self.makeStep(python.PyFlakes)
+ output = \
+"""pyflakes buildbot
+buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
+buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
+buildbot/clients/debug.py:9: 'gnome' imported but unused
+buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
+buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
+buildbot/scripts/imaginary.py:12: undefined name 'size'
+buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ desc = step.descriptionDone
+ self.failUnless("unused=2" in desc)
+ self.failUnless("undefined=1" in desc)
+ self.failUnless("redefs=3" in desc)
+ self.failUnless("import*=1" in desc)
+ self.failIf("misc=" in desc)
+
+ self.failUnlessEqual(step.getProperty("pyflakes-unused"), 2)
+ self.failUnlessEqual(step.getProperty("pyflakes-undefined"), 1)
+ self.failUnlessEqual(step.getProperty("pyflakes-redefs"), 3)
+ self.failUnlessEqual(step.getProperty("pyflakes-import*"), 1)
+ self.failUnlessEqual(step.getProperty("pyflakes-misc"), 0)
+ self.failUnlessEqual(step.getProperty("pyflakes-total"), 7)
+
+ logs = {}
+ for log in step.step_status.getLogs():
+ logs[log.getName()] = log
+
+ for name in ["unused", "undefined", "redefs", "import*"]:
+ self.failUnless(name in logs)
+ self.failIf("misc" in logs)
+ lines = logs["unused"].readlines()
+ self.failUnlessEqual(len(lines), 2)
+ self.failUnlessEqual(lines[0], "buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused\n")
+
+ cmd = buildstep.RemoteCommand(None, {})
+ cmd.rc = 0
+ results = step.evaluateCommand(cmd)
+ self.failUnlessEqual(results, FAILURE) # because of the 'undefined'
+
+ def testPyFlakes2(self):
+ self.masterbase = "Python.testPyFlakes2"
+ step = self.makeStep(python.PyFlakes)
+ output = \
+"""pyflakes buildbot
+some more text here that should be ignored
+buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
+buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
+buildbot/clients/debug.py:9: 'gnome' imported but unused
+buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
+buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
+buildbot/scripts/imaginary.py:12: undefined name 'size'
+could not compile 'blah/blah.py':3:
+pretend there was an invalid line here
+buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ desc = step.descriptionDone
+ self.failUnless("unused=2" in desc)
+ self.failUnless("undefined=1" in desc)
+ self.failUnless("redefs=3" in desc)
+ self.failUnless("import*=1" in desc)
+ self.failUnless("misc=2" in desc)
+
+
+ def testPyFlakes3(self):
+ self.masterbase = "Python.testPyFlakes3"
+ step = self.makeStep(python.PyFlakes)
+ output = \
+"""buildbot/changes/freshcvsmail.py:5: 'FCMaildirSource' imported but unused
+buildbot/clients/debug.py:9: redefinition of unused 'gtk' from line 9
+buildbot/clients/debug.py:9: 'gnome' imported but unused
+buildbot/scripts/runner.py:323: redefinition of unused 'run' from line 321
+buildbot/scripts/runner.py:325: redefinition of unused 'run' from line 323
+buildbot/scripts/imaginary.py:12: undefined name 'size'
+buildbot/scripts/imaginary.py:18: 'from buildbot import *' used; unable to detect undefined names
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ desc = step.descriptionDone
+ self.failUnless("unused=2" in desc)
+ self.failUnless("undefined=1" in desc)
+ self.failUnless("redefs=3" in desc)
+ self.failUnless("import*=1" in desc)
+ self.failIf("misc" in desc)
+
+
+class OrdinaryCompile(shell.Compile):
+ warningPattern = "ordinary line"
+
+class Warnings(StepTester, unittest.TestCase):
+ def testCompile1(self):
+ self.masterbase = "Warnings.testCompile1"
+ step = self.makeStep(shell.Compile)
+ output = \
+"""Compile started
+normal line
+warning: oh noes!
+ordinary line
+error (but we aren't looking for errors now, are we)
+line 23: warning: we are now on line 23
+ending line
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ self.failUnlessEqual(step.getProperty("warnings-count"), 2)
+ logs = {}
+ for log in step.step_status.getLogs():
+ logs[log.getName()] = log
+ self.failUnless("warnings" in logs)
+ lines = logs["warnings"].readlines()
+ self.failUnlessEqual(len(lines), 2)
+ self.failUnlessEqual(lines[0], "warning: oh noes!\n")
+ self.failUnlessEqual(lines[1],
+ "line 23: warning: we are now on line 23\n")
+
+ cmd = buildstep.RemoteCommand(None, {})
+ cmd.rc = 0
+ results = step.evaluateCommand(cmd)
+ self.failUnlessEqual(results, WARNINGS)
+
+ def testCompile2(self):
+ self.masterbase = "Warnings.testCompile2"
+ step = self.makeStep(shell.Compile, warningPattern="ordinary line")
+ output = \
+"""Compile started
+normal line
+warning: oh noes!
+ordinary line
+error (but we aren't looking for errors now, are we)
+line 23: warning: we are now on line 23
+ending line
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ self.failUnlessEqual(step.getProperty("warnings-count"), 1)
+ logs = {}
+ for log in step.step_status.getLogs():
+ logs[log.getName()] = log
+ self.failUnless("warnings" in logs)
+ lines = logs["warnings"].readlines()
+ self.failUnlessEqual(len(lines), 1)
+ self.failUnlessEqual(lines[0], "ordinary line\n")
+
+ cmd = buildstep.RemoteCommand(None, {})
+ cmd.rc = 0
+ results = step.evaluateCommand(cmd)
+ self.failUnlessEqual(results, WARNINGS)
+
+ def testCompile3(self):
+ self.masterbase = "Warnings.testCompile3"
+ step = self.makeStep(OrdinaryCompile)
+ output = \
+"""Compile started
+normal line
+warning: oh noes!
+ordinary line
+error (but we aren't looking for errors now, are we)
+line 23: warning: we are now on line 23
+ending line
+"""
+ step.setProperty("warnings-count", 10, "test")
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ step.createSummary(log)
+ self.failUnlessEqual(step.getProperty("warnings-count"), 11)
+ logs = {}
+ for log in step.step_status.getLogs():
+ logs[log.getName()] = log
+ self.failUnless("warnings" in logs)
+ lines = logs["warnings"].readlines()
+ self.failUnlessEqual(len(lines), 1)
+ self.failUnlessEqual(lines[0], "ordinary line\n")
+
+ cmd = buildstep.RemoteCommand(None, {})
+ cmd.rc = 0
+ results = step.evaluateCommand(cmd)
+ self.failUnlessEqual(results, WARNINGS)
+
+
+class TreeSize(StepTester, unittest.TestCase):
+ def testTreeSize(self):
+ self.slavebase = "TreeSize.testTreeSize.slave"
+ self.masterbase = "TreeSize.testTreeSize.master"
+
+ sb = self.makeSlaveBuilder()
+ step = self.makeStep(shell.TreeSize)
+ d = self.runStep(step)
+ def _check(results):
+ self.failUnlessEqual(results, SUCCESS)
+ kib = step.getProperty("tree-size-KiB")
+ self.failUnless(isinstance(kib, int))
+ self.failUnless(kib < 100) # should be empty, I get '4'
+ s = step.step_status
+ self.failUnlessEqual(" ".join(s.getText()),
+ "treesize %d KiB" % kib)
+ d.addCallback(_check)
+ return d
+
+class FakeCommand:
+ def __init__(self, rc):
+ self.rc = rc
+
+class PerlModuleTest(StepTester, unittest.TestCase):
+ def testAllTestsPassed(self):
+ self.masterbase = "PMT.testAllTestsPassed"
+ step = self.makeStep(shell.PerlModuleTest)
+ output = \
+"""ok 1
+ok 2
+All tests successful
+Files=1, Tests=123, other stuff
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ rc = step.evaluateCommand(FakeCommand(rc=241))
+ self.failUnlessEqual(rc, SUCCESS)
+ ss = step.step_status
+ self.failUnlessEqual(ss.getStatistic('tests-failed'), 0)
+ self.failUnlessEqual(ss.getStatistic('tests-total'), 123)
+ self.failUnlessEqual(ss.getStatistic('tests-passed'), 123)
+
+ def testFailures_OldTestHarness(self):
+ self.masterbase = "PMT.testFailures_OldTestHarness"
+ step = self.makeStep(shell.PerlModuleTest)
+ output = \
+"""
+ok 1
+ok 2
+3/7 subtests failed
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ rc = step.evaluateCommand(FakeCommand(rc = 123))
+ self.failUnlessEqual(rc, FAILURE)
+ ss = step.step_status
+ self.failUnlessEqual(ss.getStatistic('tests-failed'), 3)
+ self.failUnlessEqual(ss.getStatistic('tests-total'), 7)
+ self.failUnlessEqual(ss.getStatistic('tests-passed'), 4)
+
+ def testFailures_UnparseableStdio(self):
+ self.masterbase = "PMT.testFailures_UnparseableStdio"
+ step = self.makeStep(shell.PerlModuleTest)
+ output = \
+"""
+just some random stuff, you know
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ rc = step.evaluateCommand(FakeCommand(rc = 243))
+ self.failUnlessEqual(rc, 243)
+ ss = step.step_status
+ self.failUnlessEqual(ss.getStatistic('tests-failed'), None)
+ self.failUnlessEqual(ss.getStatistic('tests-total'), None)
+ self.failUnlessEqual(ss.getStatistic('tests-passed'), None)
+
+ def testFailures_NewTestHarness(self):
+ self.masterbase = "PMT.testFailures_NewTestHarness"
+ step = self.makeStep(shell.PerlModuleTest)
+ output = \
+"""
+# Looks like you failed 15 tests of 18.
+tests/services.......................... Failed 265/30904 subtests
+ (less 16 skipped subtests: 30623 okay)
+tests/simple_query_backend..............ok
+tests/simple_query_middleware...........ok
+tests/soap_globalcollect................ok
+tests/three_d_me........................ok
+tests/three_d_me_callback...............ok
+tests/transaction_create................ok
+tests/unique_txid.......................ok
+
+Test Summary Report
+-------------------
+tests/000policies (Wstat: 5632 Tests: 9078 Failed: 22)
+ Failed tests: 2409, 2896-2897, 2900-2901, 2940-2941, 2944-2945
+ 2961-2962, 2965-2966, 2969-2970, 2997-2998
+ 3262, 3281-3282, 3288-3289
+ Non-zero exit status: 22
+tests/services (Wstat: 0 Tests: 30904 Failed: 265)
+ Failed tests: 14, 16-21, 64-69, 71-96, 98, 30157, 30159
+ 30310, 30316, 30439-30543, 30564, 30566-30577
+ 30602, 30604-30607, 30609-30612, 30655
+ 30657-30668, 30675, 30697-30716, 30718-30720
+ 30722-30736, 30773-30774, 30776-30777, 30786
+ 30791, 30795, 30797, 30801, 30822-30827
+ 30830-30831, 30848-30855, 30858-30859, 30888-30899
+ 30901, 30903-30904
+Files=68, Tests=264809, 1944 wallclock secs (17.59 usr 0.63 sys + 470.04 cusr 131.40 csys = 619.66 CPU)
+Result: FAIL
+"""
+ log = step.addLog("stdio")
+ log.addStdout(output)
+ log.finish()
+ rc = step.evaluateCommand(FakeCommand(rc=87))
+ self.failUnlessEqual(rc, FAILURE)
+ ss = step.step_status
+ self.failUnlessEqual(ss.getStatistic('tests-failed'), 287)
+ self.failUnlessEqual(ss.getStatistic('tests-total'), 264809)
+ self.failUnlessEqual(ss.getStatistic('tests-passed'), 264522)
+
+class MasterShellCommand(StepTester, unittest.TestCase):
+ def testMasterShellCommand(self):
+ self.slavebase = "testMasterShellCommand.slave"
+ self.masterbase = "testMasterShellCommand.master"
+ sb = self.makeSlaveBuilder()
+ step = self.makeStep(master.MasterShellCommand, command=['echo', 'hi'])
+
+ # we can't invoke runStep until the reactor is started .. hence this
+ # little dance
+ d = defer.Deferred()
+ def _dotest(_):
+ return self.runStep(step)
+ d.addCallback(_dotest)
+
+ def _check(results):
+ self.failUnlessEqual(results, SUCCESS)
+ logtxt = step.getLog("stdio").getText()
+ self.failUnlessEqual(logtxt.strip(), "hi")
+ d.addCallback(_check)
+ reactor.callLater(0, d.callback, None)
+ return d
+
+ def testMasterShellCommand_badexit(self):
+ self.slavebase = "testMasterShellCommand_badexit.slave"
+ self.masterbase = "testMasterShellCommand_badexit.master"
+ sb = self.makeSlaveBuilder()
+ step = self.makeStep(master.MasterShellCommand, command="exit 1")
+
+ # we can't invoke runStep until the reactor is started .. hence this
+ # little dance
+ d = defer.Deferred()
+ def _dotest(_):
+ return self.runStep(step)
+ d.addCallback(_dotest)
+
+ def _check(results):
+ self.failUnlessEqual(results, FAILURE)
+ d.addCallback(_check)
+ reactor.callLater(0, d.callback, None)
+ return d
diff --git a/buildbot/buildbot/test/test_svnpoller.py b/buildbot/buildbot/test/test_svnpoller.py
new file mode 100644
index 0000000..452a514
--- /dev/null
+++ b/buildbot/buildbot/test/test_svnpoller.py
@@ -0,0 +1,476 @@
+# -*- test-case-name: buildbot.test.test_svnpoller -*-
+
+import time
+from twisted.internet import defer
+from twisted.trial import unittest
+from buildbot.changes.svnpoller import SVNPoller
+
+# this is the output of "svn info --xml
+# svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
+prefix_output = """\
+<?xml version="1.0"?>
+<info>
+<entry
+ kind="dir"
+ path="trunk"
+ revision="18354">
+<url>svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk</url>
+<repository>
+<root>svn+ssh://svn.twistedmatrix.com/svn/Twisted</root>
+<uuid>bbbe8e31-12d6-0310-92fd-ac37d47ddeeb</uuid>
+</repository>
+<commit
+ revision="18352">
+<author>jml</author>
+<date>2006-10-01T02:37:34.063255Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+# and this is "svn info --xml svn://svn.twistedmatrix.com/svn/Twisted". I
+# think this is kind of a degenerate case.. it might even be a form of error.
+prefix_output_2 = """\
+<?xml version="1.0"?>
+<info>
+</info>
+"""
+
+# this is the svn info output for a local repository, svn info --xml
+# file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository
+prefix_output_3 = """\
+<?xml version="1.0"?>
+<info>
+<entry
+ kind="dir"
+ path="SVN-Repository"
+ revision="3">
+<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</url>
+<repository>
+<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
+<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
+</repository>
+<commit
+ revision="3">
+<author>warner</author>
+<date>2006-10-01T07:37:04.182499Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+# % svn info --xml file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk
+
+prefix_output_4 = """\
+<?xml version="1.0"?>
+<info>
+<entry
+ kind="dir"
+ path="trunk"
+ revision="3">
+<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk</url>
+<repository>
+<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
+<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
+</repository>
+<commit
+ revision="1">
+<author>warner</author>
+<date>2006-10-01T07:37:02.286440Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+
+
+class ComputePrefix(unittest.TestCase):
+ def test1(self):
+ base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
+ s = SVNPoller(base + "/")
+ self.failUnlessEqual(s.svnurl, base) # certify slash-stripping
+ prefix = s.determine_prefix(prefix_output)
+ self.failUnlessEqual(prefix, "trunk")
+ self.failUnlessEqual(s._prefix, prefix)
+
+ def test2(self):
+ base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted"
+ s = SVNPoller(base)
+ self.failUnlessEqual(s.svnurl, base)
+ prefix = s.determine_prefix(prefix_output_2)
+ self.failUnlessEqual(prefix, "")
+
+ def test3(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository"
+ s = SVNPoller(base)
+ self.failUnlessEqual(s.svnurl, base)
+ prefix = s.determine_prefix(prefix_output_3)
+ self.failUnlessEqual(prefix, "")
+
+ def test4(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk"
+ s = SVNPoller(base)
+ self.failUnlessEqual(s.svnurl, base)
+ prefix = s.determine_prefix(prefix_output_4)
+ self.failUnlessEqual(prefix, "sample/trunk")
+
+# output from svn log on .../SVN-Repository/sample
+# (so it includes trunk and branches)
+sample_base = "file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+sample_logentries = [None] * 6
+
+sample_logentries[5] = """\
+<logentry
+ revision="6">
+<author>warner</author>
+<date>2006-10-01T19:35:16.165664Z</date>
+<paths>
+<path
+ action="D">/sample/branch/version.c</path>
+</paths>
+<msg>revised_to_2</msg>
+</logentry>
+"""
+
+sample_logentries[4] = """\
+<logentry
+ revision="5">
+<author>warner</author>
+<date>2006-10-01T19:35:16.165664Z</date>
+<paths>
+<path
+ action="D">/sample/branch</path>
+</paths>
+<msg>revised_to_2</msg>
+</logentry>
+"""
+
+sample_logentries[3] = """\
+<logentry
+ revision="4">
+<author>warner</author>
+<date>2006-10-01T19:35:16.165664Z</date>
+<paths>
+<path
+ action="M">/sample/trunk/version.c</path>
+</paths>
+<msg>revised_to_2</msg>
+</logentry>
+"""
+
+sample_logentries[2] = """\
+<logentry
+ revision="3">
+<author>warner</author>
+<date>2006-10-01T19:35:10.215692Z</date>
+<paths>
+<path
+ action="M">/sample/branch/main.c</path>
+</paths>
+<msg>commit_on_branch</msg>
+</logentry>
+"""
+
+sample_logentries[1] = """\
+<logentry
+ revision="2">
+<author>warner</author>
+<date>2006-10-01T19:35:09.154973Z</date>
+<paths>
+<path
+ copyfrom-path="/sample/trunk"
+ copyfrom-rev="1"
+ action="A">/sample/branch</path>
+</paths>
+<msg>make_branch</msg>
+</logentry>
+"""
+
+sample_logentries[0] = """\
+<logentry
+ revision="1">
+<author>warner</author>
+<date>2006-10-01T19:35:08.642045Z</date>
+<paths>
+<path
+ action="A">/sample</path>
+<path
+ action="A">/sample/trunk</path>
+<path
+ action="A">/sample/trunk/subdir/subdir.c</path>
+<path
+ action="A">/sample/trunk/main.c</path>
+<path
+ action="A">/sample/trunk/version.c</path>
+<path
+ action="A">/sample/trunk/subdir</path>
+</paths>
+<msg>sample_project_files</msg>
+</logentry>
+"""
+
+sample_info_output = """\
+<?xml version="1.0"?>
+<info>
+<entry
+ kind="dir"
+ path="sample"
+ revision="4">
+<url>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample</url>
+<repository>
+<root>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository</root>
+<uuid>4f94adfc-c41e-0410-92d5-fbf86b7c7689</uuid>
+</repository>
+<commit
+ revision="4">
+<author>warner</author>
+<date>2006-10-01T19:35:16.165664Z</date>
+</commit>
+</entry>
+</info>
+"""
+
+
+changes_output_template = """\
+<?xml version="1.0"?>
+<log>
+%s</log>
+"""
+
+def make_changes_output(maxrevision):
+ # return what 'svn log' would have just after the given revision was
+ # committed
+ logs = sample_logentries[0:maxrevision]
+ assert len(logs) == maxrevision
+ logs.reverse()
+ output = changes_output_template % ("".join(logs))
+ return output
+
+def split_file(path):
+ pieces = path.split("/")
+ if pieces[0] == "branch":
+ return "branch", "/".join(pieces[1:])
+ if pieces[0] == "trunk":
+ return None, "/".join(pieces[1:])
+ raise RuntimeError("there shouldn't be any files like %s" % path)
+
+class MySVNPoller(SVNPoller):
+ def __init__(self, *args, **kwargs):
+ SVNPoller.__init__(self, *args, **kwargs)
+ self.pending_commands = []
+ self.finished_changes = []
+
+ def getProcessOutput(self, args):
+ d = defer.Deferred()
+ self.pending_commands.append((args, d))
+ return d
+
+ def submit_changes(self, changes):
+ self.finished_changes.extend(changes)
+
+class ComputeChanges(unittest.TestCase):
+ def test1(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+ s = SVNPoller(base)
+ s._prefix = "sample"
+ output = make_changes_output(4)
+ doc = s.parse_logs(output)
+
+ newlast, logentries = s._filter_new_logentries(doc, 4)
+ self.failUnlessEqual(newlast, 4)
+ self.failUnlessEqual(len(logentries), 0)
+
+ newlast, logentries = s._filter_new_logentries(doc, 3)
+ self.failUnlessEqual(newlast, 4)
+ self.failUnlessEqual(len(logentries), 1)
+
+ newlast, logentries = s._filter_new_logentries(doc, 1)
+ self.failUnlessEqual(newlast, 4)
+ self.failUnlessEqual(len(logentries), 3)
+
+ newlast, logentries = s._filter_new_logentries(doc, None)
+ self.failUnlessEqual(newlast, 4)
+ self.failUnlessEqual(len(logentries), 0)
+
+ def testChanges(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+ s = SVNPoller(base, split_file=split_file)
+ s._prefix = "sample"
+ doc = s.parse_logs(make_changes_output(3))
+ newlast, logentries = s._filter_new_logentries(doc, 1)
+ # so we see revisions 2 and 3 as being new
+ self.failUnlessEqual(newlast, 3)
+ changes = s.create_changes(logentries)
+ self.failUnlessEqual(len(changes), 2)
+ self.failUnlessEqual(changes[0].branch, "branch")
+ self.failUnlessEqual(changes[0].revision, '2')
+ self.failUnlessEqual(changes[1].branch, "branch")
+ self.failUnlessEqual(changes[1].files, ["main.c"])
+ self.failUnlessEqual(changes[1].revision, '3')
+
+ # and now pull in r4
+ doc = s.parse_logs(make_changes_output(4))
+ newlast, logentries = s._filter_new_logentries(doc, newlast)
+ self.failUnlessEqual(newlast, 4)
+ # so we see revision 4 as being new
+ changes = s.create_changes(logentries)
+ self.failUnlessEqual(len(changes), 1)
+ self.failUnlessEqual(changes[0].branch, None)
+ self.failUnlessEqual(changes[0].revision, '4')
+ self.failUnlessEqual(changes[0].files, ["version.c"])
+
+ # and now pull in r5 (should *not* create a change as it's a
+ # branch deletion
+ doc = s.parse_logs(make_changes_output(5))
+ newlast, logentries = s._filter_new_logentries(doc, newlast)
+ self.failUnlessEqual(newlast, 5)
+ # so we see revision 5 as being new
+ changes = s.create_changes(logentries)
+ self.failUnlessEqual(len(changes), 0)
+
+ # and now pull in r6 (should create a change as it's not
+ # deleting an entire branch
+ doc = s.parse_logs(make_changes_output(6))
+ newlast, logentries = s._filter_new_logentries(doc, newlast)
+ self.failUnlessEqual(newlast, 6)
+ # so we see revision 6 as being new
+ changes = s.create_changes(logentries)
+ self.failUnlessEqual(len(changes), 1)
+ self.failUnlessEqual(changes[0].branch, 'branch')
+ self.failUnlessEqual(changes[0].revision, '6')
+ self.failUnlessEqual(changes[0].files, ["version.c"])
+
+ def testFirstTime(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+ s = SVNPoller(base, split_file=split_file)
+ s._prefix = "sample"
+ doc = s.parse_logs(make_changes_output(4))
+ logentries = s.get_new_logentries(doc)
+ # SVNPoller ignores all changes that happened before it was started
+ self.failUnlessEqual(len(logentries), 0)
+ self.failUnlessEqual(s.last_change, 4)
+
+class Misc(unittest.TestCase):
+ def testAlreadyWorking(self):
+ base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
+ s = MySVNPoller(base)
+ d = s.checksvn()
+ # the SVNPoller is now waiting for its getProcessOutput to finish
+ self.failUnlessEqual(s.overrun_counter, 0)
+ d2 = s.checksvn()
+ self.failUnlessEqual(s.overrun_counter, 1)
+ self.failUnlessEqual(len(s.pending_commands), 1)
+
+ def testGetRoot(self):
+ base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
+ s = MySVNPoller(base)
+ d = s.checksvn()
+ # the SVNPoller is now waiting for its getProcessOutput to finish
+ self.failUnlessEqual(len(s.pending_commands), 1)
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["info", "--xml", "--non-interactive", base])
+
+def makeTime(timestring):
+ datefmt = '%Y/%m/%d %H:%M:%S'
+ when = time.mktime(time.strptime(timestring, datefmt))
+ return when
+
+
+class Everything(unittest.TestCase):
+ def test1(self):
+ s = MySVNPoller(sample_base, split_file=split_file)
+ d = s.checksvn()
+ # the SVNPoller is now waiting for its getProcessOutput to finish
+ self.failUnlessEqual(len(s.pending_commands), 1)
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["info", "--xml", "--non-interactive",
+ sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(sample_info_output)
+ # now it should be waiting for the 'svn log' command
+ self.failUnlessEqual(len(s.pending_commands), 1)
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["log", "--xml", "--verbose", "--non-interactive",
+ "--limit=100", sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(make_changes_output(1))
+ # the command ignores the first batch of changes
+ self.failUnlessEqual(len(s.finished_changes), 0)
+ self.failUnlessEqual(s.last_change, 1)
+
+ # now fire it again, nothing changing
+ d = s.checksvn()
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["log", "--xml", "--verbose", "--non-interactive",
+ "--limit=100", sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(make_changes_output(1))
+ # nothing has changed
+ self.failUnlessEqual(len(s.finished_changes), 0)
+ self.failUnlessEqual(s.last_change, 1)
+
+ # and again, with r2 this time
+ d = s.checksvn()
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["log", "--xml", "--verbose", "--non-interactive",
+ "--limit=100", sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(make_changes_output(2))
+ # r2 should appear
+ self.failUnlessEqual(len(s.finished_changes), 1)
+ self.failUnlessEqual(s.last_change, 2)
+
+ c = s.finished_changes[0]
+ self.failUnlessEqual(c.branch, "branch")
+ self.failUnlessEqual(c.revision, '2')
+ self.failUnlessEqual(c.files, [''])
+ # TODO: this is what creating the branch looks like: a Change with a
+ # zero-length file. We should decide if we want filenames like this
+ # in the Change (and make sure nobody else gets confused by it) or if
+ # we want to strip them out.
+ self.failUnlessEqual(c.comments, "make_branch")
+
+ # and again at r2, so nothing should change
+ d = s.checksvn()
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["log", "--xml", "--verbose", "--non-interactive",
+ "--limit=100", sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(make_changes_output(2))
+ # nothing has changed
+ self.failUnlessEqual(len(s.finished_changes), 1)
+ self.failUnlessEqual(s.last_change, 2)
+
+ # and again with both r3 and r4 appearing together
+ d = s.checksvn()
+ self.failUnlessEqual(s.pending_commands[0][0],
+ ["log", "--xml", "--verbose", "--non-interactive",
+ "--limit=100", sample_base])
+ d = s.pending_commands[0][1]
+ s.pending_commands.pop(0)
+ d.callback(make_changes_output(4))
+ self.failUnlessEqual(len(s.finished_changes), 3)
+ self.failUnlessEqual(s.last_change, 4)
+
+ c3 = s.finished_changes[1]
+ self.failUnlessEqual(c3.branch, "branch")
+ self.failUnlessEqual(c3.revision, '3')
+ self.failUnlessEqual(c3.files, ["main.c"])
+ self.failUnlessEqual(c3.comments, "commit_on_branch")
+
+ c4 = s.finished_changes[2]
+ self.failUnlessEqual(c4.branch, None)
+ self.failUnlessEqual(c4.revision, '4')
+ self.failUnlessEqual(c4.files, ["version.c"])
+ self.failUnlessEqual(c4.comments, "revised_to_2")
+ self.failUnless(abs(c4.when - time.time()) < 60)
+
+
+# TODO:
+# get coverage of split_file returning None
+# point at a live SVN server for a little while
diff --git a/buildbot/buildbot/test/test_transfer.py b/buildbot/buildbot/test/test_transfer.py
new file mode 100644
index 0000000..c85c630
--- /dev/null
+++ b/buildbot/buildbot/test/test_transfer.py
@@ -0,0 +1,721 @@
+# -*- test-case-name: buildbot.test.test_transfer -*-
+
+import os
+from stat import ST_MODE
+from twisted.trial import unittest
+from buildbot.process.buildstep import WithProperties
+from buildbot.steps.transfer import FileUpload, FileDownload, DirectoryUpload
+from buildbot.test.runutils import StepTester
+from buildbot.status.builder import SUCCESS, FAILURE
+
+# these steps pass a pb.Referenceable inside their arguments, so we have to
+# catch and wrap them. If the LocalAsRemote wrapper were a proper membrane,
+# we wouldn't have to do this.
+
+class UploadFile(StepTester, unittest.TestCase):
+
+ def filterArgs(self, args):
+ if "writer" in args:
+ args["writer"] = self.wrap(args["writer"])
+ return args
+
+ def testSuccess(self):
+ self.slavebase = "UploadFile.testSuccess.slave"
+ self.masterbase = "UploadFile.testSuccess.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest.text")
+ step = self.makeStep(FileUpload,
+ slavesrc="source.txt",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source.txt")
+ contents = "this is the source file\n" * 1000
+ open(slavesrc, "w").write(contents)
+ f = open(masterdest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ masterdest_contents = open(masterdest, "r").read()
+ self.failUnlessEqual(masterdest_contents, contents)
+ d.addCallback(_checkUpload)
+ return d
+
+ def testMaxsize(self):
+ self.slavebase = "UploadFile.testMaxsize.slave"
+ self.masterbase = "UploadFile.testMaxsize.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ masterdest = os.path.join(self.masterbase, "dest2.text")
+ step = self.makeStep(FileUpload,
+ slavesrc="source.txt",
+ masterdest=masterdest,
+ maxsize=12345)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source.txt")
+ contents = "this is the source file\n" * 1000
+ open(slavesrc, "w").write(contents)
+ f = open(masterdest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, FAILURE)
+ self.failUnless(os.path.exists(masterdest))
+ masterdest_contents = open(masterdest, "r").read()
+ self.failUnlessEqual(len(masterdest_contents), 12345)
+ self.failUnlessEqual(masterdest_contents, contents[:12345])
+ d.addCallback(_checkUpload)
+ return d
+
+ def testMode(self):
+ self.slavebase = "UploadFile.testMode.slave"
+ self.masterbase = "UploadFile.testMode.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ masterdest = os.path.join(self.masterbase, "dest3.text")
+ step = self.makeStep(FileUpload,
+ slavesrc="source.txt",
+ masterdest=masterdest,
+ mode=0755)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source.txt")
+ contents = "this is the source file\n"
+ open(slavesrc, "w").write(contents)
+ f = open(masterdest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ masterdest_contents = open(masterdest, "r").read()
+ self.failUnlessEqual(masterdest_contents, contents)
+ # and with 0777 to ignore sticky bits
+ dest_mode = os.stat(masterdest)[ST_MODE] & 0777
+ self.failUnlessEqual(dest_mode, 0755,
+ "target mode was %o, we wanted %o" %
+ (dest_mode, 0755))
+ d.addCallback(_checkUpload)
+ return d
+
+ def testMissingFile(self):
+ self.slavebase = "UploadFile.testMissingFile.slave"
+ self.masterbase = "UploadFile.testMissingFile.master"
+ sb = self.makeSlaveBuilder()
+ step = self.makeStep(FileUpload,
+ slavesrc="MISSING.txt",
+ masterdest="dest.txt")
+ masterdest = os.path.join(self.masterbase, "dest4.txt")
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ self.failUnlessEqual(results, FAILURE)
+ self.failIf(os.path.exists(masterdest))
+ l = step_status.getLogs()
+ logtext = l[0].getText().strip()
+ self.failUnless(logtext.startswith("Cannot open file"))
+ self.failUnless(logtext.endswith("for upload"))
+ d.addCallback(_checkUpload)
+ return d
+
+ def testLotsOfBlocks(self):
+ self.slavebase = "UploadFile.testLotsOfBlocks.slave"
+ self.masterbase = "UploadFile.testLotsOfBlocks.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest.text")
+ step = self.makeStep(FileUpload,
+ slavesrc="source.txt",
+ masterdest=masterdest,
+ blocksize=15)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source.txt")
+ contents = "".join(["this is the source file #%d\n" % i
+ for i in range(1000)])
+ open(slavesrc, "w").write(contents)
+ f = open(masterdest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ masterdest_contents = open(masterdest, "r").read()
+ self.failUnlessEqual(masterdest_contents, contents)
+ d.addCallback(_checkUpload)
+ return d
+
+ def testWorkdir(self):
+ self.slavebase = "Upload.testWorkdir.slave"
+ self.masterbase = "Upload.testWorkdir.master"
+ sb = self.makeSlaveBuilder()
+
+ self.workdir = "mybuild" # override default in StepTest
+ full_workdir = os.path.join(
+ self.slavebase, self.slavebuilderbase, self.workdir)
+ os.mkdir(full_workdir)
+
+ masterdest = os.path.join(self.masterbase, "dest.txt")
+
+ step = self.makeStep(FileUpload,
+ slavesrc="source.txt",
+ masterdest=masterdest)
+
+ # Testing that the FileUpload's workdir is set when makeStep()
+ # calls setDefaultWorkdir() is actually enough; carrying on and
+ # making sure the upload actually succeeds is pure gravy.
+ self.failUnlessEqual(self.workdir, step.workdir)
+
+ slavesrc = os.path.join(full_workdir, "source.txt")
+ open(slavesrc, "w").write("upload me\n")
+
+ def _checkUpload(results):
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.isfile(masterdest))
+
+ d = self.runStep(step)
+ d.addCallback(_checkUpload)
+ return d
+
+ def testWithProperties(self):
+ # test that workdir can be a WithProperties object
+ self.slavebase = "Upload.testWithProperties.slave"
+ self.masterbase = "Upload.testWithProperties.master"
+ sb = self.makeSlaveBuilder()
+
+ step = self.makeStep(FileUpload,
+ slavesrc="src.txt",
+ masterdest="dest.txt")
+ step.workdir = WithProperties("build.%s", "buildnumber")
+
+ self.failUnlessEqual(step._getWorkdir(), "build.1")
+
+class DownloadFile(StepTester, unittest.TestCase):
+
+ def filterArgs(self, args):
+ if "reader" in args:
+ args["reader"] = self.wrap(args["reader"])
+ return args
+
+ def testSuccess(self):
+ self.slavebase = "DownloadFile.testSuccess.slave"
+ self.masterbase = "DownloadFile.testSuccess.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ mastersrc = os.path.join(self.masterbase, "source.text")
+ slavedest = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "dest.txt")
+ step = self.makeStep(FileDownload,
+ mastersrc=mastersrc,
+ slavedest="dest.txt")
+ contents = "this is the source file\n" * 1000 # 24kb, so two blocks
+ open(mastersrc, "w").write(contents)
+ f = open(slavedest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkDownload(results):
+ step_status = step.step_status
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(slavedest))
+ slavedest_contents = open(slavedest, "r").read()
+ self.failUnlessEqual(slavedest_contents, contents)
+ d.addCallback(_checkDownload)
+ return d
+
+ def testMaxsize(self):
+ self.slavebase = "DownloadFile.testMaxsize.slave"
+ self.masterbase = "DownloadFile.testMaxsize.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ mastersrc = os.path.join(self.masterbase, "source.text")
+ slavedest = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "dest.txt")
+ step = self.makeStep(FileDownload,
+ mastersrc=mastersrc,
+ slavedest="dest.txt",
+ maxsize=12345)
+ contents = "this is the source file\n" * 1000 # 24kb, so two blocks
+ open(mastersrc, "w").write(contents)
+ f = open(slavedest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkDownload(results):
+ step_status = step.step_status
+ # the file should be truncated, and the step a FAILURE
+ self.failUnlessEqual(results, FAILURE)
+ self.failUnless(os.path.exists(slavedest))
+ slavedest_contents = open(slavedest, "r").read()
+ self.failUnlessEqual(len(slavedest_contents), 12345)
+ self.failUnlessEqual(slavedest_contents, contents[:12345])
+ d.addCallback(_checkDownload)
+ return d
+
+ def testMode(self):
+ self.slavebase = "DownloadFile.testMode.slave"
+ self.masterbase = "DownloadFile.testMode.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ mastersrc = os.path.join(self.masterbase, "source.text")
+ slavedest = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "dest.txt")
+ step = self.makeStep(FileDownload,
+ mastersrc=mastersrc,
+ slavedest="dest.txt",
+ mode=0755)
+ contents = "this is the source file\n"
+ open(mastersrc, "w").write(contents)
+ f = open(slavedest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkDownload(results):
+ step_status = step.step_status
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(slavedest))
+ slavedest_contents = open(slavedest, "r").read()
+ self.failUnlessEqual(slavedest_contents, contents)
+ # and with 0777 to ignore sticky bits
+ dest_mode = os.stat(slavedest)[ST_MODE] & 0777
+ self.failUnlessEqual(dest_mode, 0755,
+ "target mode was %o, we wanted %o" %
+ (dest_mode, 0755))
+ d.addCallback(_checkDownload)
+ return d
+
+ def testMissingFile(self):
+ self.slavebase = "DownloadFile.testMissingFile.slave"
+ self.masterbase = "DownloadFile.testMissingFile.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ mastersrc = os.path.join(self.masterbase, "MISSING.text")
+ slavedest = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "dest.txt")
+ step = self.makeStep(FileDownload,
+ mastersrc=mastersrc,
+ slavedest="dest.txt")
+
+ d = self.runStep(step)
+ def _checkDownload(results):
+ step_status = step.step_status
+ self.failUnlessEqual(results, FAILURE)
+ self.failIf(os.path.exists(slavedest))
+ l = step_status.getLogs()
+ logtext = l[0].getText().strip()
+ self.failUnless(logtext.endswith(" not available at master"))
+ d.addCallbacks(_checkDownload)
+
+ return d
+
+ def testLotsOfBlocks(self):
+ self.slavebase = "DownloadFile.testLotsOfBlocks.slave"
+ self.masterbase = "DownloadFile.testLotsOfBlocks.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ mastersrc = os.path.join(self.masterbase, "source.text")
+ slavedest = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "dest.txt")
+ step = self.makeStep(FileDownload,
+ mastersrc=mastersrc,
+ slavedest="dest.txt",
+ blocksize=15)
+ contents = "".join(["this is the source file #%d\n" % i
+ for i in range(1000)])
+ open(mastersrc, "w").write(contents)
+ f = open(slavedest, "w")
+ f.write("overwrite me\n")
+ f.close()
+
+ d = self.runStep(step)
+ def _checkDownload(results):
+ step_status = step.step_status
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(slavedest))
+ slavedest_contents = open(slavedest, "r").read()
+ self.failUnlessEqual(slavedest_contents, contents)
+ d.addCallback(_checkDownload)
+ return d
+
+ def testWorkdir(self):
+ self.slavebase = "Download.testWorkdir.slave"
+ self.masterbase = "Download.testWorkdir.master"
+ sb = self.makeSlaveBuilder()
+
+ # As in Upload.testWorkdir(), it's enough to test that makeStep()'s
+ # call of setDefaultWorkdir() actually sets step.workdir.
+ self.workdir = "mybuild"
+ step = self.makeStep(FileDownload,
+ mastersrc="foo",
+ slavedest="foo")
+ self.failUnlessEqual(step.workdir, self.workdir)
+
+ def testWithProperties(self):
+ # test that workdir can be a WithProperties object
+ self.slavebase = "Download.testWithProperties.slave"
+ self.masterbase = "Download.testWithProperties.master"
+ sb = self.makeSlaveBuilder()
+
+ step = self.makeStep(FileDownload,
+ mastersrc="src.txt",
+ slavedest="dest.txt")
+ step.workdir = WithProperties("build.%s", "buildnumber")
+
+ self.failUnlessEqual(step._getWorkdir(), "build.1")
+
+
+
+class UploadDirectory(StepTester, unittest.TestCase):
+
+ def filterArgs(self, args):
+ if "writer" in args:
+ args["writer"] = self.wrap(args["writer"])
+ return args
+
+ def testSuccess(self):
+ self.slavebase = "UploadDirectory.testSuccess.slave"
+ self.masterbase = "UploadDirectory.testSuccess.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ dircount = 5
+ content = []
+ content.append("this is one source file\n" * 1000)
+ content.append("this is a second source file\n" * 978)
+ content.append("this is a third source file\n" * 473)
+ os.mkdir(slavesrc)
+ for i in range(dircount):
+ os.mkdir(os.path.join(slavesrc, "d%i" % (i)))
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "e%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+ for h in range(3):
+ open(os.path.join(slavesrc, curdir, "file%i" % (h)), "w").write(content[h])
+ for j in range(dircount):
+ #empty dirs, must be uploaded too
+ curdir = os.path.join("d%i" % (i), "f%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ for i in range(dircount):
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "e%i" % (j))
+ self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
+ for h in range(3):
+ masterdest_contents = open(os.path.join(masterdest, curdir, "file%i" % (h)), "r").read()
+ self.failUnlessEqual(masterdest_contents, content[h])
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "f%i" % (j))
+ self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
+ d.addCallback(_checkUpload)
+ return d
+
+ def testOneEmptyDir(self):
+ self.slavebase = "UploadDirectory.testOneEmptyDir.slave"
+ self.masterbase = "UploadDirectory.testOneEmptyDir.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ os.mkdir(slavesrc)
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ d.addCallback(_checkUpload)
+ return d
+
+ def testManyEmptyDirs(self):
+ self.slavebase = "UploadDirectory.testManyEmptyDirs.slave"
+ self.masterbase = "UploadDirectory.testManyEmptyDirs.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ dircount = 25
+ os.mkdir(slavesrc)
+ for i in range(dircount):
+ os.mkdir(os.path.join(slavesrc, "d%i" % (i)))
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "e%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+ curdir = os.path.join("d%i" % (i), "f%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ for i in range(dircount):
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "e%i" % (j))
+ self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
+ curdir = os.path.join("d%i" % (i), "f%i" % (j))
+ self.failUnless(os.path.exists(os.path.join(masterdest, curdir)))
+ d.addCallback(_checkUpload)
+ return d
+
+ def testOneDirOneFile(self):
+ self.slavebase = "UploadDirectory.testOneDirOneFile.slave"
+ self.masterbase = "UploadDirectory.testOneDirOneFile.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ os.mkdir(slavesrc)
+ content = "this is one source file\n" * 1000
+ open(os.path.join(slavesrc, "srcfile"), "w").write(content)
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ masterdest_contents = open(os.path.join(masterdest, "srcfile"), "r").read()
+ self.failUnlessEqual(masterdest_contents, content)
+ d.addCallback(_checkUpload)
+ return d
+
+ def testOneDirManyFiles(self):
+ self.slavebase = "UploadDirectory.testOneDirManyFile.slave"
+ self.masterbase = "UploadDirectory.testOneDirManyFile.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ filecount = 20
+ os.mkdir(slavesrc)
+ content = []
+ content.append("this is one source file\n" * 1000)
+ content.append("this is a second source file\n" * 978)
+ content.append("this is a third source file\n" * 473)
+ for i in range(3):
+ for j in range(filecount):
+ open(os.path.join(slavesrc, "srcfile%i_%i" % (i, j)), "w").write(content[i])
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ for i in range(3):
+ for j in range(filecount):
+ masterdest_contents = open(os.path.join(masterdest, "srcfile%i_%i" % (i, j)), "r").read()
+ self.failUnlessEqual(masterdest_contents, content[i])
+ d.addCallback(_checkUpload)
+ return d
+
+ def testManyDirsManyFiles(self):
+ self.slavebase = "UploadDirectory.testManyDirsManyFile.slave"
+ self.masterbase = "UploadDirectory.testManyDirsManyFile.master"
+ sb = self.makeSlaveBuilder()
+ os.mkdir(os.path.join(self.slavebase, self.slavebuilderbase,
+ "build"))
+ # the buildmaster normally runs chdir'ed into masterbase, so uploaded
+ # files will appear there. Under trial, we're chdir'ed into
+ # _trial_temp instead, so use a different masterdest= to keep the
+ # uploaded file in a test-local directory
+ masterdest = os.path.join(self.masterbase, "dest_dir")
+ step = self.makeStep(DirectoryUpload,
+ slavesrc="source_dir",
+ masterdest=masterdest)
+ slavesrc = os.path.join(self.slavebase,
+ self.slavebuilderbase,
+ "build",
+ "source_dir")
+ dircount = 10
+ os.mkdir(slavesrc)
+ for i in range(dircount):
+ os.mkdir(os.path.join(slavesrc, "d%i" % (i)))
+ for j in range(dircount):
+ curdir = os.path.join("d%i" % (i), "e%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+ curdir = os.path.join("d%i" % (i), "f%i" % (j))
+ os.mkdir(os.path.join(slavesrc, curdir))
+
+ filecount = 5
+ content = []
+ content.append("this is one source file\n" * 1000)
+ content.append("this is a second source file\n" * 978)
+ content.append("this is a third source file\n" * 473)
+ for i in range(dircount):
+ for j in range(dircount):
+ for k in range(3):
+ for l in range(filecount):
+ open(os.path.join(slavesrc, "d%i" % (i), "e%i" % (j), "srcfile%i_%i" % (k, l)), "w").write(content[k])
+
+ d = self.runStep(step)
+ def _checkUpload(results):
+ step_status = step.step_status
+ #l = step_status.getLogs()
+ #if l:
+ # logtext = l[0].getText()
+ # print logtext
+ self.failUnlessEqual(results, SUCCESS)
+ self.failUnless(os.path.exists(masterdest))
+ for i in range(dircount):
+ for j in range(dircount):
+ for k in range(3):
+ for l in range(filecount):
+ masterdest_contents = open(os.path.join(masterdest, "d%i" % (i), "e%i" % (j), "srcfile%i_%i" % (k, l)), "r").read()
+ self.failUnlessEqual(masterdest_contents, content[k])
+ d.addCallback(_checkUpload)
+ return d
+
+
+# TODO:
+# test relative paths, ~/paths
+# need to implement expanduser() for slave-side
+# test error message when master-side file is in a missing directory
+# remove workdir= default?
+
diff --git a/buildbot/buildbot/test/test_twisted.py b/buildbot/buildbot/test/test_twisted.py
new file mode 100644
index 0000000..7b4f9bf
--- /dev/null
+++ b/buildbot/buildbot/test/test_twisted.py
@@ -0,0 +1,219 @@
+# -*- test-case-name: buildbot.test.test_twisted -*-
+
+from twisted.trial import unittest
+
+from buildbot import interfaces
+from buildbot.steps.python_twisted import countFailedTests
+from buildbot.steps.python_twisted import Trial, TrialTestCaseCounter
+from buildbot.status import builder
+
+noisy = 0
+if noisy:
+ from twisted.python.log import startLogging
+ import sys
+ startLogging(sys.stdout)
+
+out1 = """
+-------------------------------------------------------------------------------
+Ran 13 tests in 1.047s
+
+OK
+"""
+
+out2 = """
+-------------------------------------------------------------------------------
+Ran 12 tests in 1.040s
+
+FAILED (failures=1)
+"""
+
+out3 = """
+ NotImplementedError
+-------------------------------------------------------------------------------
+Ran 13 tests in 1.042s
+
+FAILED (failures=1, errors=1)
+"""
+
+out4 = """
+unparseable
+"""
+
+out5 = """
+ File "/usr/home/warner/stuff/python/twisted/Twisted-CVS/twisted/test/test_defer.py", line 79, in testTwoCallbacks
+ self.fail("just because")
+ File "/usr/home/warner/stuff/python/twisted/Twisted-CVS/twisted/trial/unittest.py", line 21, in fail
+ raise AssertionError, message
+ AssertionError: just because
+unparseable
+"""
+
+out6 = """
+===============================================================================
+SKIPPED: testProtocolLocalhost (twisted.flow.test.test_flow.FlowTest)
+-------------------------------------------------------------------------------
+XXX freezes, fixme
+===============================================================================
+SKIPPED: testIPv6 (twisted.names.test.test_names.HostsTestCase)
+-------------------------------------------------------------------------------
+IPv6 support is not in our hosts resolver yet
+===============================================================================
+EXPECTED FAILURE: testSlots (twisted.test.test_rebuild.NewStyleTestCase)
+-------------------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 240, in _runPhase
+ stage(*args, **kwargs)
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 262, in _main
+ self.runner(self.method)
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/runner.py", line 95, in runTest
+ method()
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/test/test_rebuild.py", line 130, in testSlots
+ rebuild.updateInstance(self.m.SlottedClass())
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/python/rebuild.py", line 114, in updateInstance
+ self.__class__ = latestClass(self.__class__)
+TypeError: __class__ assignment: 'SlottedClass' object layout differs from 'SlottedClass'
+===============================================================================
+FAILURE: testBatchFile (twisted.conch.test.test_sftp.TestOurServerBatchFile)
+-------------------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 240, in _runPhase
+ stage(*args, **kwargs)
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 262, in _main
+ self.runner(self.method)
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/runner.py", line 95, in runTest
+ method()
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/conch/test/test_sftp.py", line 450, in testBatchFile
+ self.failUnlessEqual(res[1:-2], ['testDirectory', 'testRemoveFile', 'testRenameFile', 'testfile1'])
+ File "/Users/buildbot/Buildbot/twisted/OSX-full2.3/Twisted/twisted/trial/unittest.py", line 115, in failUnlessEqual
+ raise FailTest, (msg or '%r != %r' % (first, second))
+FailTest: [] != ['testDirectory', 'testRemoveFile', 'testRenameFile', 'testfile1']
+-------------------------------------------------------------------------------
+Ran 1454 tests in 911.579s
+
+FAILED (failures=2, skips=49, expectedFailures=9)
+Exception exceptions.AttributeError: "'NoneType' object has no attribute 'StringIO'" in <bound method RemoteReference.__del__ of <twisted.spread.pb.RemoteReference instance at 0x27036c0>> ignored
+"""
+
+class MyTrial(Trial):
+ def addTestResult(self, testname, results, text, logs):
+ self.results.append((testname, results, text, logs))
+ def addCompleteLog(self, name, log):
+ pass
+
+class MyLogFile:
+ def __init__(self, text):
+ self.text = text
+ def getText(self):
+ return self.text
+
+
+class Count(unittest.TestCase):
+
+ def count(self, total, failures=0, errors=0,
+ expectedFailures=0, unexpectedSuccesses=0, skips=0):
+ d = {
+ 'total': total,
+ 'failures': failures,
+ 'errors': errors,
+ 'expectedFailures': expectedFailures,
+ 'unexpectedSuccesses': unexpectedSuccesses,
+ 'skips': skips,
+ }
+ return d
+
+ def testCountFailedTests(self):
+ count = countFailedTests(out1)
+ self.assertEquals(count, self.count(total=13))
+ count = countFailedTests(out2)
+ self.assertEquals(count, self.count(total=12, failures=1))
+ count = countFailedTests(out3)
+ self.assertEquals(count, self.count(total=13, failures=1, errors=1))
+ count = countFailedTests(out4)
+ self.assertEquals(count, self.count(total=None))
+ count = countFailedTests(out5)
+ self.assertEquals(count, self.count(total=None))
+
+class Counter(unittest.TestCase):
+
+ def setProgress(self, metric, value):
+ self.progress = (metric, value)
+
+ def testCounter(self):
+ self.progress = (None,None)
+ c = TrialTestCaseCounter()
+ c.setStep(self)
+ STDOUT = interfaces.LOG_CHANNEL_STDOUT
+ def add(text):
+ c.logChunk(None, None, None, STDOUT, text)
+ add("\n\n")
+ self.failUnlessEqual(self.progress, (None,None))
+ add("bogus line\n")
+ self.failUnlessEqual(self.progress, (None,None))
+ add("buildbot.test.test_config.ConfigTest.testBots ... [OK]\n")
+ self.failUnlessEqual(self.progress, ("tests", 1))
+ add("buildbot.test.test_config.ConfigTest.tes")
+ self.failUnlessEqual(self.progress, ("tests", 1))
+ add("tBuilders ... [OK]\n")
+ self.failUnlessEqual(self.progress, ("tests", 2))
+ # confirm alternative delimiters work too.. ptys seem to emit
+ # something different
+ add("buildbot.test.test_config.ConfigTest.testIRC ... [OK]\r\n")
+ self.failUnlessEqual(self.progress, ("tests", 3))
+ add("===============================================================================\n")
+ self.failUnlessEqual(self.progress, ("tests", 3))
+ add("buildbot.test.test_config.IOnlyLookLikeA.testLine ... [OK]\n")
+ self.failUnlessEqual(self.progress, ("tests", 3))
+
+
+
+class Parse(unittest.TestCase):
+ def failUnlessIn(self, substr, string):
+ self.failUnless(string.find(substr) != -1)
+
+ def testParse(self):
+ t = MyTrial(build=None, workdir=".", testpath=None, testChanges=True)
+ t.results = []
+ log = MyLogFile(out6)
+ t.createSummary(log)
+
+ self.failUnlessEqual(len(t.results), 4)
+ r1, r2, r3, r4 = t.results
+ testname, results, text, logs = r1
+ self.failUnlessEqual(testname,
+ ("twisted", "flow", "test", "test_flow",
+ "FlowTest", "testProtocolLocalhost"))
+ self.failUnlessEqual(results, builder.SKIPPED)
+ self.failUnlessEqual(text, ['skipped'])
+ self.failUnlessIn("XXX freezes, fixme", logs)
+ self.failUnless(logs.startswith("SKIPPED:"))
+ self.failUnless(logs.endswith("fixme\n"))
+
+ testname, results, text, logs = r2
+ self.failUnlessEqual(testname,
+ ("twisted", "names", "test", "test_names",
+ "HostsTestCase", "testIPv6"))
+ self.failUnlessEqual(results, builder.SKIPPED)
+ self.failUnlessEqual(text, ['skipped'])
+ self.failUnless(logs.startswith("SKIPPED: testIPv6"))
+ self.failUnless(logs.endswith("IPv6 support is not in our hosts resolver yet\n"))
+
+ testname, results, text, logs = r3
+ self.failUnlessEqual(testname,
+ ("twisted", "test", "test_rebuild",
+ "NewStyleTestCase", "testSlots"))
+ self.failUnlessEqual(results, builder.SUCCESS)
+ self.failUnlessEqual(text, ['expected', 'failure'])
+ self.failUnless(logs.startswith("EXPECTED FAILURE: "))
+ self.failUnlessIn("\nTraceback ", logs)
+ self.failUnless(logs.endswith("layout differs from 'SlottedClass'\n"))
+
+ testname, results, text, logs = r4
+ self.failUnlessEqual(testname,
+ ("twisted", "conch", "test", "test_sftp",
+ "TestOurServerBatchFile", "testBatchFile"))
+ self.failUnlessEqual(results, builder.FAILURE)
+ self.failUnlessEqual(text, ['failure'])
+ self.failUnless(logs.startswith("FAILURE: "))
+ self.failUnlessIn("Traceback ", logs)
+ self.failUnless(logs.endswith("'testRenameFile', 'testfile1']\n"))
+
diff --git a/buildbot/buildbot/test/test_util.py b/buildbot/buildbot/test/test_util.py
new file mode 100644
index 0000000..b375390
--- /dev/null
+++ b/buildbot/buildbot/test/test_util.py
@@ -0,0 +1,26 @@
+# -*- test-case-name: buildbot.test.test_util -*-
+
+from twisted.trial import unittest
+
+from buildbot import util
+
+
+class Foo(util.ComparableMixin):
+ compare_attrs = ["a", "b"]
+
+ def __init__(self, a, b, c):
+ self.a, self.b, self.c = a,b,c
+
+
+class Bar(Foo, util.ComparableMixin):
+ compare_attrs = ["b", "c"]
+
+class Compare(unittest.TestCase):
+ def testCompare(self):
+ f1 = Foo(1, 2, 3)
+ f2 = Foo(1, 2, 4)
+ f3 = Foo(1, 3, 4)
+ b1 = Bar(1, 2, 3)
+ self.failUnless(f1 == f2)
+ self.failIf(f1 == f3)
+ self.failIf(f1 == b1)
diff --git a/buildbot/buildbot/test/test_vc.py b/buildbot/buildbot/test/test_vc.py
new file mode 100644
index 0000000..4d0c18e
--- /dev/null
+++ b/buildbot/buildbot/test/test_vc.py
@@ -0,0 +1,3023 @@
+# -*- test-case-name: buildbot.test.test_vc -*-
+
+import sys, os, time, re
+from email.Utils import mktime_tz, parsedate_tz
+
+from twisted.trial import unittest
+from twisted.internet import defer, reactor, utils, protocol, task, error
+from twisted.python import failure
+from twisted.python.procutils import which
+from twisted.web import client, static, server
+
+#defer.Deferred.debug = True
+
+from twisted.python import log
+#log.startLogging(sys.stderr)
+
+from buildbot import master, interfaces
+from buildbot.slave import bot, commands
+from buildbot.slave.commands import rmdirRecursive
+from buildbot.status.builder import SUCCESS, FAILURE
+from buildbot.process import base
+from buildbot.steps import source
+from buildbot.changes import changes
+from buildbot.sourcestamp import SourceStamp
+from buildbot.scripts import tryclient
+from buildbot.test.runutils import SignalMixin, myGetProcessOutputAndValue
+
+#step.LoggedRemoteCommand.debug = True
+
+from twisted.internet.defer import waitForDeferred, deferredGenerator
+
+# Most of these tests (all but SourceStamp) depend upon having a set of
+# repositories from which we can perform checkouts. These repositories are
+# created by the setUp method at the start of each test class. In earlier
+# versions these repositories were created offline and distributed with a
+# separate tarball named 'buildbot-test-vc-1.tar.gz'. This is no longer
+# necessary.
+
+# CVS requires a local file repository. Providing remote access is beyond
+# the feasible abilities of this test program (needs pserver or ssh).
+
+# SVN requires a local file repository. To provide remote access over HTTP
+# requires an apache server with DAV support and mod_svn, way beyond what we
+# can test from here.
+
+# Arch and Darcs both allow remote (read-only) operation with any web
+# server. We test both local file access and HTTP access (by spawning a
+# small web server to provide access to the repository files while the test
+# is running).
+
+# Perforce starts the daemon running on localhost. Unfortunately, it must
+# use a predetermined Internet-domain port number, unless we want to go
+# all-out: bind the listen socket ourselves and pretend to be inetd.
+
+config_vc = """
+from buildbot.process import factory
+from buildbot.steps import source
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+f1 = factory.BuildFactory([
+ %s,
+ ])
+c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit')]
+c['schedulers'] = []
+c['builders'] = [{'name': 'vc', 'slavename': 'bot1',
+ 'builddir': 'vc-dir', 'factory': f1}]
+c['slavePortnum'] = 0
+# do not compress logs in tests
+c['logCompressionLimit'] = False
+BuildmasterConfig = c
+"""
+
+p0_diff = r"""
+Index: subdir/subdir.c
+===================================================================
+RCS file: /home/warner/stuff/Projects/BuildBot/code-arch/_trial_temp/test_vc/repositories/CVS-Repository/sample/subdir/subdir.c,v
+retrieving revision 1.1.1.1
+diff -u -r1.1.1.1 subdir.c
+--- subdir/subdir.c 14 Aug 2005 01:32:49 -0000 1.1.1.1
++++ subdir/subdir.c 14 Aug 2005 01:36:15 -0000
+@@ -4,6 +4,6 @@
+ int
+ main(int argc, const char *argv[])
+ {
+- printf("Hello subdir.\n");
++ printf("Hello patched subdir.\n");
+ return 0;
+ }
+"""
+
+# this patch does not include the filename headers, so it is
+# patchlevel-neutral
+TRY_PATCH = '''
+@@ -5,6 +5,6 @@
+ int
+ main(int argc, const char *argv[])
+ {
+- printf("Hello subdir.\\n");
++ printf("Hello try.\\n");
+ return 0;
+ }
+'''
+
+MAIN_C = '''
+// this is main.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+ printf("Hello world.\\n");
+ return 0;
+}
+'''
+
+BRANCH_C = '''
+// this is main.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+ printf("Hello branch.\\n");
+ return 0;
+}
+'''
+
+VERSION_C = '''
+// this is version.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+ printf("Hello world, version=%d\\n");
+ return 0;
+}
+'''
+
+SUBDIR_C = '''
+// this is subdir/subdir.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+ printf("Hello subdir.\\n");
+ return 0;
+}
+'''
+
+TRY_C = '''
+// this is subdir/subdir.c
+#include <stdio.h>
+
+int
+main(int argc, const char *argv[])
+{
+ printf("Hello try.\\n");
+ return 0;
+}
+'''
+
+def qw(s):
+ return s.split()
+
+class VCS_Helper:
+ # this is a helper class which keeps track of whether each VC system is
+ # available, and whether the repository for each has been created. There
+ # is one instance of this class, at module level, shared between all test
+ # cases.
+
+ def __init__(self):
+ self._helpers = {}
+ self._isCapable = {}
+ self._excuses = {}
+ self._repoReady = {}
+
+ def registerVC(self, name, helper):
+ self._helpers[name] = helper
+ self._repoReady[name] = False
+
+ def skipIfNotCapable(self, name):
+ """Either return None, or raise SkipTest"""
+ d = self.capable(name)
+ def _maybeSkip(res):
+ if not res[0]:
+ raise unittest.SkipTest(res[1])
+ d.addCallback(_maybeSkip)
+ return d
+
+ def capable(self, name):
+ """Return a Deferred that fires with (True,None) if this host offers
+ the given VC tool, or (False,excuse) if it does not (and therefore
+ the tests should be skipped)."""
+
+ if self._isCapable.has_key(name):
+ if self._isCapable[name]:
+ return defer.succeed((True,None))
+ else:
+ return defer.succeed((False, self._excuses[name]))
+ d = defer.maybeDeferred(self._helpers[name].capable)
+ def _capable(res):
+ if res[0]:
+ self._isCapable[name] = True
+ else:
+ self._excuses[name] = res[1]
+ return res
+ d.addCallback(_capable)
+ return d
+
+ def getHelper(self, name):
+ return self._helpers[name]
+
+ def createRepository(self, name):
+ """Return a Deferred that fires when the repository is set up."""
+ if self._repoReady[name]:
+ return defer.succeed(True)
+ d = self._helpers[name].createRepository()
+ def _ready(res):
+ self._repoReady[name] = True
+ d.addCallback(_ready)
+ return d
+
+VCS = VCS_Helper()
+
+
+# the overall plan here:
+#
+# Each VC system is tested separately, all using the same source tree defined
+# in the 'files' dictionary above. Each VC system gets its own TestCase
+# subclass. The first test case that is run will create the repository during
+# setUp(), making two branches: 'trunk' and 'branch'. The trunk gets a copy
+# of all the files in 'files'. The variant of good.c is committed on the
+# branch.
+#
+# then testCheckout is run, which does a number of checkout/clobber/update
+# builds. These all use trunk r1. It then runs self.fix(), which modifies
+# 'fixable.c', then performs another build and makes sure the tree has been
+# updated.
+#
+# testBranch uses trunk-r1 and branch-r1, making sure that we clobber the
+# tree properly when we switch between them
+#
+# testPatch does a trunk-r1 checkout and applies a patch.
+#
+# testTryGetPatch performs a trunk-r1 checkout, modifies some files, then
+# verifies that tryclient.getSourceStamp figures out the base revision and
+# what got changed.
+
+
+# vc_create makes a repository at r1 with three files: main.c, version.c, and
+# subdir/foo.c . It also creates a branch from r1 (called b1) in which main.c
+# says "hello branch" instead of "hello world". self.trunk[] contains
+# revision stamps for everything on the trunk, and self.branch[] does the
+# same for the branch.
+
+# vc_revise() checks out a tree at HEAD, changes version.c, then checks it
+# back in. The new version stamp is appended to self.trunk[]. The tree is
+# removed afterwards.
+
+# vc_try_checkout(workdir, rev) checks out a tree at REV, then changes
+# subdir/subdir.c to say 'Hello try'
+# vc_try_finish(workdir) removes the tree and cleans up any VC state
+# necessary (like deleting the Arch archive entry).
+
+
+class BaseHelper:
+ def __init__(self):
+ self.trunk = []
+ self.branch = []
+ self.allrevs = []
+
+ def capable(self):
+ # this is also responsible for setting self.vcexe
+ raise NotImplementedError
+
+ def createBasedir(self):
+ # you must call this from createRepository
+ self.repbase = os.path.abspath(os.path.join("test_vc",
+ "repositories"))
+ if not os.path.isdir(self.repbase):
+ os.makedirs(self.repbase)
+
+ def createRepository(self):
+ # this will only be called once per process
+ raise NotImplementedError
+
+ def populate(self, basedir):
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+ os.makedirs(os.path.join(basedir, "subdir"))
+ open(os.path.join(basedir, "main.c"), "w").write(MAIN_C)
+ self.version = 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(basedir, "version.c"), "w").write(version_c)
+ open(os.path.join(basedir, "main.c"), "w").write(MAIN_C)
+ open(os.path.join(basedir, "subdir", "subdir.c"), "w").write(SUBDIR_C)
+
+ def populate_branch(self, basedir):
+ open(os.path.join(basedir, "main.c"), "w").write(BRANCH_C)
+
+ def addTrunkRev(self, rev):
+ self.trunk.append(rev)
+ self.allrevs.append(rev)
+ def addBranchRev(self, rev):
+ self.branch.append(rev)
+ self.allrevs.append(rev)
+
+ def runCommand(self, basedir, command, failureIsOk=False,
+ stdin=None, env=None):
+ # all commands passed to do() should be strings or lists. If they are
+ # strings, none of the arguments may have spaces. This makes the
+ # commands less verbose at the expense of restricting what they can
+ # specify.
+ if type(command) not in (list, tuple):
+ command = command.split(" ")
+
+ # execute scripts through cmd.exe on windows, to avoid space in path issues
+ if sys.platform == 'win32' and command[0].lower().endswith('.cmd'):
+ command = [which('cmd.exe')[0], '/c', 'call'] + command
+
+ DEBUG = False
+ if DEBUG:
+ print "do %s" % command
+ print " in basedir %s" % basedir
+ if stdin:
+ print " STDIN:\n", stdin, "\n--STDIN DONE"
+
+ if not env:
+ env = os.environ.copy()
+ env['LC_ALL'] = "C"
+ d = myGetProcessOutputAndValue(command[0], command[1:],
+ env=env, path=basedir,
+ stdin=stdin)
+ def check((out, err, code)):
+ if DEBUG:
+ print
+ print "command was: %s" % command
+ if out: print "out: %s" % out
+ if err: print "err: %s" % err
+ print "code: %s" % code
+ if code != 0 and not failureIsOk:
+ log.msg("command %s finished with exit code %d" %
+ (command, code))
+ log.msg(" and stdout %s" % (out,))
+ log.msg(" and stderr %s" % (err,))
+ raise RuntimeError("command %s finished with exit code %d"
+ % (command, code)
+ + ": see logs for stdout")
+ return out
+ d.addCallback(check)
+ return d
+
+ def do(self, basedir, command, failureIsOk=False, stdin=None, env=None):
+ d = self.runCommand(basedir, command, failureIsOk=failureIsOk,
+ stdin=stdin, env=env)
+ return waitForDeferred(d)
+
+ def dovc(self, basedir, command, failureIsOk=False, stdin=None, env=None):
+ """Like do(), but the VC binary will be prepended to COMMAND."""
+ if isinstance(command, (str, unicode)):
+ command = [self.vcexe] + command.split(' ')
+ else:
+ # command is a list
+ command = [self.vcexe] + command
+ return self.do(basedir, command, failureIsOk, stdin, env)
+
+class VCBase(SignalMixin):
+ metadir = None
+ createdRepository = False
+ master = None
+ slave = None
+ helper = None
+ httpServer = None
+ httpPort = None
+ skip = None
+ has_got_revision = False
+ has_got_revision_branches_are_merged = False # for SVN
+
+ def failUnlessIn(self, substring, string, msg=None):
+ # trial provides a version of this that requires python-2.3 to test
+ # strings.
+ if msg is None:
+ msg = ("did not see the expected substring '%s' in string '%s'" %
+ (substring, string))
+ self.failUnless(string.find(substring) != -1, msg)
+
+ def setUp(self):
+ d = VCS.skipIfNotCapable(self.vc_name)
+ d.addCallback(self._setUp1)
+ return d
+
+ def _setUp1(self, res):
+ self.helper = VCS.getHelper(self.vc_name)
+
+ if os.path.exists("basedir"):
+ rmdirRecursive("basedir")
+ os.mkdir("basedir")
+ self.master = master.BuildMaster("basedir")
+ self.slavebase = os.path.abspath("slavebase")
+ if os.path.exists(self.slavebase):
+ rmdirRecursive(self.slavebase)
+ os.mkdir("slavebase")
+
+ d = VCS.createRepository(self.vc_name)
+ return d
+
+ def connectSlave(self):
+ port = self.master.slavePort._port.getHost().port
+ slave = bot.BuildSlave("localhost", port, "bot1", "sekrit",
+ self.slavebase, keepalive=0, usePTY=False)
+ self.slave = slave
+ slave.startService()
+ d = self.master.botmaster.waitUntilBuilderAttached("vc")
+ return d
+
+ def loadConfig(self, config):
+ # reloading the config file causes a new 'listDirs' command to be
+ # sent to the slave. To synchronize on this properly, it is easiest
+ # to stop and restart the slave.
+ d = defer.succeed(None)
+ if self.slave:
+ d = self.master.botmaster.waitUntilBuilderDetached("vc")
+ self.slave.stopService()
+ d.addCallback(lambda res: self.master.loadConfig(config))
+ d.addCallback(lambda res: self.connectSlave())
+ return d
+
+ def serveHTTP(self):
+ # launch an HTTP server to serve the repository files
+ self.root = static.File(self.helper.repbase)
+ self.site = server.Site(self.root)
+ self.httpServer = reactor.listenTCP(0, self.site)
+ self.httpPort = self.httpServer.getHost().port
+
+ def doBuild(self, shouldSucceed=True, ss=None):
+ c = interfaces.IControl(self.master)
+
+ if ss is None:
+ ss = SourceStamp()
+ #print "doBuild(ss: b=%s rev=%s)" % (ss.branch, ss.revision)
+ req = base.BuildRequest("test_vc forced build", ss, 'test_builder')
+ d = req.waitUntilFinished()
+ c.getBuilder("vc").requestBuild(req)
+ d.addCallback(self._doBuild_1, shouldSucceed)
+ return d
+ def _doBuild_1(self, bs, shouldSucceed):
+ r = bs.getResults()
+ if r != SUCCESS and shouldSucceed:
+ print
+ print
+ if not bs.isFinished():
+ print "Hey, build wasn't even finished!"
+ print "Build did not succeed:", r, bs.getText()
+ for s in bs.getSteps():
+ for l in s.getLogs():
+ print "--- START step %s / log %s ---" % (s.getName(),
+ l.getName())
+ print l.getTextWithHeaders()
+ print "--- STOP ---"
+ print
+ self.fail("build did not succeed")
+ return bs
+
+ def printLogs(self, bs):
+ for s in bs.getSteps():
+ for l in s.getLogs():
+ print "--- START step %s / log %s ---" % (s.getName(),
+ l.getName())
+ print l.getTextWithHeaders()
+ print "--- STOP ---"
+ print
+
+ def touch(self, d, f):
+ open(os.path.join(d,f),"w").close()
+ def shouldExist(self, *args):
+ target = os.path.join(*args)
+ self.failUnless(os.path.exists(target),
+ "expected to find %s but didn't" % target)
+ def shouldNotExist(self, *args):
+ target = os.path.join(*args)
+ self.failIf(os.path.exists(target),
+ "expected to NOT find %s, but did" % target)
+ def shouldContain(self, d, f, contents):
+ c = open(os.path.join(d, f), "r").read()
+ self.failUnlessIn(contents, c)
+
+ def checkGotRevision(self, bs, expected):
+ if self.has_got_revision:
+ self.failUnlessEqual(bs.getProperty("got_revision"), str(expected))
+
+ def checkGotRevisionIsLatest(self, bs):
+ expected = self.helper.trunk[-1]
+ if self.has_got_revision_branches_are_merged:
+ expected = self.helper.allrevs[-1]
+ self.checkGotRevision(bs, expected)
+
+ def do_vctest(self, testRetry=True):
+ vctype = self.vctype
+ args = self.helper.vcargs
+ m = self.master
+ self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+ self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+ # woo double-substitution
+ s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+ for k,v in args.items():
+ s += ", %s=%s" % (k, repr(v))
+ s += ")"
+ config = config_vc % s
+
+ m.loadConfig(config % 'clobber')
+ m.readConfig = True
+ m.startService()
+
+ d = self.connectSlave()
+ d.addCallback(lambda res: log.msg("testing clobber"))
+ d.addCallback(self._do_vctest_clobber)
+ d.addCallback(lambda res: log.msg("doing update"))
+ d.addCallback(lambda res: self.loadConfig(config % 'update'))
+ d.addCallback(lambda res: log.msg("testing update"))
+ d.addCallback(self._do_vctest_update)
+ if testRetry:
+ d.addCallback(lambda res: log.msg("testing update retry"))
+ d.addCallback(self._do_vctest_update_retry)
+ d.addCallback(lambda res: log.msg("doing copy"))
+ d.addCallback(lambda res: self.loadConfig(config % 'copy'))
+ d.addCallback(lambda res: log.msg("testing copy"))
+ d.addCallback(self._do_vctest_copy)
+ d.addCallback(lambda res: log.msg("did copy test"))
+ if self.metadir:
+ d.addCallback(lambda res: log.msg("doing export"))
+ d.addCallback(lambda res: self.loadConfig(config % 'export'))
+ d.addCallback(lambda res: log.msg("testing export"))
+ d.addCallback(self._do_vctest_export)
+ d.addCallback(lambda res: log.msg("did export test"))
+ return d
+
+ def _do_vctest_clobber(self, res):
+ d = self.doBuild() # initial checkout
+ d.addCallback(self._do_vctest_clobber_1)
+ return d
+ def _do_vctest_clobber_1(self, bs):
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldExist(self.workdir, "subdir", "subdir.c")
+ if self.metadir:
+ self.shouldExist(self.workdir, self.metadir)
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.failUnlessEqual(bs.getProperty("branch"), None)
+ self.checkGotRevisionIsLatest(bs)
+
+ self.touch(self.workdir, "newfile")
+ self.shouldExist(self.workdir, "newfile")
+ d = self.doBuild() # rebuild clobbers workdir
+ d.addCallback(self._do_vctest_clobber_2)
+ return d
+ def _do_vctest_clobber_2(self, res):
+ self.shouldNotExist(self.workdir, "newfile")
+ # do a checkout to a specific version. Mercurial-over-HTTP (when
+ # either client or server is older than hg-0.9.2) cannot do this
+ # directly, so it must checkout HEAD and then update back to the
+ # requested revision.
+ d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[0]))
+ d.addCallback(self._do_vctest_clobber_3)
+ return d
+ def _do_vctest_clobber_3(self, bs):
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldExist(self.workdir, "subdir", "subdir.c")
+ if self.metadir:
+ self.shouldExist(self.workdir, self.metadir)
+ self.failUnlessEqual(bs.getProperty("revision"), self.helper.trunk[0] or None)
+ self.failUnlessEqual(bs.getProperty("branch"), None)
+ self.checkGotRevision(bs, self.helper.trunk[0])
+ # leave the tree at HEAD
+ return self.doBuild()
+
+
+ def _do_vctest_update(self, res):
+ log.msg("_do_vctest_update")
+ d = self.doBuild() # rebuild with update
+ d.addCallback(self._do_vctest_update_1)
+ return d
+ def _do_vctest_update_1(self, bs):
+ log.msg("_do_vctest_update_1")
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % self.helper.version)
+ if self.metadir:
+ self.shouldExist(self.workdir, self.metadir)
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.checkGotRevisionIsLatest(bs)
+
+ self.touch(self.workdir, "newfile")
+ d = self.doBuild() # update rebuild leaves new files
+ d.addCallback(self._do_vctest_update_2)
+ return d
+ def _do_vctest_update_2(self, bs):
+ log.msg("_do_vctest_update_2")
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.touch(self.workdir, "newfile")
+ # now make a change to the repository and make sure we pick it up
+ d = self.helper.vc_revise()
+ d.addCallback(lambda res: self.doBuild())
+ d.addCallback(self._do_vctest_update_3)
+ return d
+ def _do_vctest_update_3(self, bs):
+ log.msg("_do_vctest_update_3")
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % self.helper.version)
+ self.shouldExist(self.workdir, "newfile")
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.checkGotRevisionIsLatest(bs)
+
+ # now "update" to an older revision
+ d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[-2]))
+ d.addCallback(self._do_vctest_update_4)
+ return d
+ def _do_vctest_update_4(self, bs):
+ log.msg("_do_vctest_update_4")
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % (self.helper.version-1))
+ self.failUnlessEqual(bs.getProperty("revision"),
+ self.helper.trunk[-2] or None)
+ self.checkGotRevision(bs, self.helper.trunk[-2])
+
+ # now update to the newer revision
+ d = self.doBuild(ss=SourceStamp(revision=self.helper.trunk[-1]))
+ d.addCallback(self._do_vctest_update_5)
+ return d
+ def _do_vctest_update_5(self, bs):
+ log.msg("_do_vctest_update_5")
+ self.shouldExist(self.workdir, "main.c")
+ self.shouldExist(self.workdir, "version.c")
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % self.helper.version)
+ self.failUnlessEqual(bs.getProperty("revision"),
+ self.helper.trunk[-1] or None)
+ self.checkGotRevision(bs, self.helper.trunk[-1])
+
+
+ def _do_vctest_update_retry(self, res):
+ # certain local changes will prevent an update from working. The
+ # most common is to replace a file with a directory, or vice
+ # versa. The slave code should spot the failure and do a
+ # clobber/retry.
+ os.unlink(os.path.join(self.workdir, "main.c"))
+ os.mkdir(os.path.join(self.workdir, "main.c"))
+ self.touch(os.path.join(self.workdir, "main.c"), "foo")
+ self.touch(self.workdir, "newfile")
+
+ d = self.doBuild() # update, but must clobber to handle the error
+ d.addCallback(self._do_vctest_update_retry_1)
+ return d
+ def _do_vctest_update_retry_1(self, bs):
+ # SVN-1.4.0 doesn't seem to have any problem with the
+ # file-turned-directory issue (although older versions did). So don't
+ # actually check that the tree was clobbered.. as long as the update
+ # succeeded (checked by doBuild), that should be good enough.
+ #self.shouldNotExist(self.workdir, "newfile")
+ pass
+
+ def _do_vctest_copy(self, res):
+ log.msg("_do_vctest_copy 1")
+ d = self.doBuild() # copy rebuild clobbers new files
+ d.addCallback(self._do_vctest_copy_1)
+ return d
+ def _do_vctest_copy_1(self, bs):
+ log.msg("_do_vctest_copy 2")
+ if self.metadir:
+ self.shouldExist(self.workdir, self.metadir)
+ self.shouldNotExist(self.workdir, "newfile")
+ self.touch(self.workdir, "newfile")
+ self.touch(self.vcdir, "newvcfile")
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.checkGotRevisionIsLatest(bs)
+
+ d = self.doBuild() # copy rebuild clobbers new files
+ d.addCallback(self._do_vctest_copy_2)
+ return d
+ def _do_vctest_copy_2(self, bs):
+ log.msg("_do_vctest_copy 3")
+ if self.metadir:
+ self.shouldExist(self.workdir, self.metadir)
+ self.shouldNotExist(self.workdir, "newfile")
+ self.shouldExist(self.vcdir, "newvcfile")
+ self.shouldExist(self.workdir, "newvcfile")
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.checkGotRevisionIsLatest(bs)
+ self.touch(self.workdir, "newfile")
+
+ def _do_vctest_export(self, res):
+ d = self.doBuild() # export rebuild clobbers new files
+ d.addCallback(self._do_vctest_export_1)
+ return d
+ def _do_vctest_export_1(self, bs):
+ self.shouldNotExist(self.workdir, self.metadir)
+ self.shouldNotExist(self.workdir, "newfile")
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ #self.checkGotRevisionIsLatest(bs)
+ # VC 'export' is not required to have a got_revision
+ self.touch(self.workdir, "newfile")
+
+ d = self.doBuild() # export rebuild clobbers new files
+ d.addCallback(self._do_vctest_export_2)
+ return d
+ def _do_vctest_export_2(self, bs):
+ self.shouldNotExist(self.workdir, self.metadir)
+ self.shouldNotExist(self.workdir, "newfile")
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ #self.checkGotRevisionIsLatest(bs)
+ # VC 'export' is not required to have a got_revision
+
+ def do_patch(self):
+ vctype = self.vctype
+ args = self.helper.vcargs
+ m = self.master
+ self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+ self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+ s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+ for k,v in args.items():
+ s += ", %s=%s" % (k, repr(v))
+ s += ")"
+ self.config = config_vc % s
+
+ m.loadConfig(self.config % "clobber")
+ m.readConfig = True
+ m.startService()
+
+ ss = SourceStamp(revision=self.helper.trunk[-1], patch=(0, p0_diff))
+
+ d = self.connectSlave()
+ d.addCallback(lambda res: self.doBuild(ss=ss))
+ d.addCallback(self._doPatch_1)
+ return d
+ def _doPatch_1(self, bs):
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % self.helper.version)
+ # make sure the file actually got patched
+ subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+ "subdir", "subdir.c")
+ data = open(subdir_c, "r").read()
+ self.failUnlessIn("Hello patched subdir.\\n", data)
+ self.failUnlessEqual(bs.getProperty("revision"),
+ self.helper.trunk[-1] or None)
+ self.checkGotRevision(bs, self.helper.trunk[-1])
+
+ # make sure that a rebuild does not use the leftover patched workdir
+ d = self.master.loadConfig(self.config % "update")
+ d.addCallback(lambda res: self.doBuild(ss=None))
+ d.addCallback(self._doPatch_2)
+ return d
+ def _doPatch_2(self, bs):
+ # make sure the file is back to its original
+ subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+ "subdir", "subdir.c")
+ data = open(subdir_c, "r").read()
+ self.failUnlessIn("Hello subdir.\\n", data)
+ self.failUnlessEqual(bs.getProperty("revision"), None)
+ self.checkGotRevisionIsLatest(bs)
+
+ # now make sure we can patch an older revision. We need at least two
+ # revisions here, so we might have to create one first
+ if len(self.helper.trunk) < 2:
+ d = self.helper.vc_revise()
+ d.addCallback(self._doPatch_3)
+ return d
+ return self._doPatch_3()
+
+ def _doPatch_3(self, res=None):
+ ss = SourceStamp(revision=self.helper.trunk[-2], patch=(0, p0_diff))
+ d = self.doBuild(ss=ss)
+ d.addCallback(self._doPatch_4)
+ return d
+ def _doPatch_4(self, bs):
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % (self.helper.version-1))
+ # and make sure the file actually got patched
+ subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+ "subdir", "subdir.c")
+ data = open(subdir_c, "r").read()
+ self.failUnlessIn("Hello patched subdir.\\n", data)
+ self.failUnlessEqual(bs.getProperty("revision"),
+ self.helper.trunk[-2] or None)
+ self.checkGotRevision(bs, self.helper.trunk[-2])
+
+ # now check that we can patch a branch
+ ss = SourceStamp(branch=self.helper.branchname,
+ revision=self.helper.branch[-1],
+ patch=(0, p0_diff))
+ d = self.doBuild(ss=ss)
+ d.addCallback(self._doPatch_5)
+ return d
+ def _doPatch_5(self, bs):
+ self.shouldContain(self.workdir, "version.c",
+ "version=%d" % 1)
+ self.shouldContain(self.workdir, "main.c", "Hello branch.")
+ subdir_c = os.path.join(self.slavebase, "vc-dir", "build",
+ "subdir", "subdir.c")
+ data = open(subdir_c, "r").read()
+ self.failUnlessIn("Hello patched subdir.\\n", data)
+ self.failUnlessEqual(bs.getProperty("revision"),
+ self.helper.branch[-1] or None)
+ self.failUnlessEqual(bs.getProperty("branch"), self.helper.branchname or None)
+ self.checkGotRevision(bs, self.helper.branch[-1])
+
+
+ def do_vctest_once(self, shouldSucceed):
+ m = self.master
+ vctype = self.vctype
+ args = self.helper.vcargs
+ vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+ workdir = os.path.join(self.slavebase, "vc-dir", "build")
+ # woo double-substitution
+ s = "s(%s, timeout=200, workdir='build', mode='clobber'" % (vctype,)
+ for k,v in args.items():
+ s += ", %s=%s" % (k, repr(v))
+ s += ")"
+ config = config_vc % s
+
+ m.loadConfig(config)
+ m.readConfig = True
+ m.startService()
+
+ self.connectSlave()
+ d = self.doBuild(shouldSucceed) # initial checkout
+ return d
+
+ def do_branch(self):
+ log.msg("do_branch")
+ vctype = self.vctype
+ args = self.helper.vcargs
+ m = self.master
+ self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+ self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+ s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+ for k,v in args.items():
+ s += ", %s=%s" % (k, repr(v))
+ s += ")"
+ self.config = config_vc % s
+
+ m.loadConfig(self.config % "update")
+ m.readConfig = True
+ m.startService()
+
+ # first we do a build of the trunk
+ d = self.connectSlave()
+ d.addCallback(lambda res: self.doBuild(ss=SourceStamp()))
+ d.addCallback(self._doBranch_1)
+ return d
+ def _doBranch_1(self, bs):
+ log.msg("_doBranch_1")
+ # make sure the checkout was of the trunk
+ main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+ data = open(main_c, "r").read()
+ self.failUnlessIn("Hello world.", data)
+
+ # now do a checkout on the branch. The change in branch name should
+ # trigger a clobber.
+ self.touch(self.workdir, "newfile")
+ d = self.doBuild(ss=SourceStamp(branch=self.helper.branchname))
+ d.addCallback(self._doBranch_2)
+ return d
+ def _doBranch_2(self, bs):
+ log.msg("_doBranch_2")
+ # make sure it was on the branch
+ main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+ data = open(main_c, "r").read()
+ self.failUnlessIn("Hello branch.", data)
+ # and make sure the tree was clobbered
+ self.shouldNotExist(self.workdir, "newfile")
+
+ # doing another build on the same branch should not clobber the tree
+ self.touch(self.workdir, "newbranchfile")
+ d = self.doBuild(ss=SourceStamp(branch=self.helper.branchname))
+ d.addCallback(self._doBranch_3)
+ return d
+ def _doBranch_3(self, bs):
+ log.msg("_doBranch_3")
+ # make sure it is still on the branch
+ main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+ data = open(main_c, "r").read()
+ self.failUnlessIn("Hello branch.", data)
+ # and make sure the tree was not clobbered
+ self.shouldExist(self.workdir, "newbranchfile")
+
+ # now make sure that a non-branch checkout clobbers the tree
+ d = self.doBuild(ss=SourceStamp())
+ d.addCallback(self._doBranch_4)
+ return d
+ def _doBranch_4(self, bs):
+ log.msg("_doBranch_4")
+ # make sure it was on the trunk
+ main_c = os.path.join(self.slavebase, "vc-dir", "build", "main.c")
+ data = open(main_c, "r").read()
+ self.failUnlessIn("Hello world.", data)
+ self.shouldNotExist(self.workdir, "newbranchfile")
+
+ def do_getpatch(self, doBranch=True):
+ log.msg("do_getpatch")
+ # prepare a buildslave to do checkouts
+ vctype = self.vctype
+ args = self.helper.vcargs
+ m = self.master
+ self.vcdir = os.path.join(self.slavebase, "vc-dir", "source")
+ self.workdir = os.path.join(self.slavebase, "vc-dir", "build")
+ # woo double-substitution
+ s = "s(%s, timeout=200, workdir='build', mode='%%s'" % (vctype,)
+ for k,v in args.items():
+ s += ", %s=%s" % (k, repr(v))
+ s += ")"
+ config = config_vc % s
+
+ m.loadConfig(config % 'clobber')
+ m.readConfig = True
+ m.startService()
+
+ d = self.connectSlave()
+
+ # then set up the "developer's tree". first we modify a tree from the
+ # head of the trunk
+ tmpdir = "try_workdir"
+ self.trydir = os.path.join(self.helper.repbase, tmpdir)
+ rmdirRecursive(self.trydir)
+ d.addCallback(self.do_getpatch_trunkhead)
+ d.addCallback(self.do_getpatch_trunkold)
+ if doBranch:
+ d.addCallback(self.do_getpatch_branch)
+ d.addCallback(self.do_getpatch_finish)
+ return d
+
+ def do_getpatch_finish(self, res):
+ log.msg("do_getpatch_finish")
+ self.helper.vc_try_finish(self.trydir)
+ return res
+
+ def try_shouldMatch(self, filename):
+ devfilename = os.path.join(self.trydir, filename)
+ devfile = open(devfilename, "r").read()
+ slavefilename = os.path.join(self.workdir, filename)
+ slavefile = open(slavefilename, "r").read()
+ self.failUnlessEqual(devfile, slavefile,
+ ("slavefile (%s) contains '%s'. "
+ "developer's file (%s) contains '%s'. "
+ "These ought to match") %
+ (slavefilename, slavefile,
+ devfilename, devfile))
+
+ def do_getpatch_trunkhead(self, res):
+ log.msg("do_getpatch_trunkhead")
+ d = self.helper.vc_try_checkout(self.trydir, self.helper.trunk[-1])
+ d.addCallback(self._do_getpatch_trunkhead_1)
+ return d
+ def _do_getpatch_trunkhead_1(self, res):
+ log.msg("_do_getpatch_trunkhead_1")
+ d = tryclient.getSourceStamp(self.vctype_try, self.trydir, None)
+ d.addCallback(self._do_getpatch_trunkhead_2)
+ return d
+ def _do_getpatch_trunkhead_2(self, ss):
+ log.msg("_do_getpatch_trunkhead_2")
+ d = self.doBuild(ss=ss)
+ d.addCallback(self._do_getpatch_trunkhead_3)
+ return d
+ def _do_getpatch_trunkhead_3(self, res):
+ log.msg("_do_getpatch_trunkhead_3")
+ # verify that the resulting buildslave tree matches the developer's
+ self.try_shouldMatch("main.c")
+ self.try_shouldMatch("version.c")
+ self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
+
+ def do_getpatch_trunkold(self, res):
+ log.msg("do_getpatch_trunkold")
+ # now try a tree from an older revision. We need at least two
+ # revisions here, so we might have to create one first
+ if len(self.helper.trunk) < 2:
+ d = self.helper.vc_revise()
+ d.addCallback(self._do_getpatch_trunkold_1)
+ return d
+ return self._do_getpatch_trunkold_1()
+ def _do_getpatch_trunkold_1(self, res=None):
+ log.msg("_do_getpatch_trunkold_1")
+ d = self.helper.vc_try_checkout(self.trydir, self.helper.trunk[-2])
+ d.addCallback(self._do_getpatch_trunkold_2)
+ return d
+ def _do_getpatch_trunkold_2(self, res):
+ log.msg("_do_getpatch_trunkold_2")
+ d = tryclient.getSourceStamp(self.vctype_try, self.trydir, None)
+ d.addCallback(self._do_getpatch_trunkold_3)
+ return d
+ def _do_getpatch_trunkold_3(self, ss):
+ log.msg("_do_getpatch_trunkold_3")
+ d = self.doBuild(ss=ss)
+ d.addCallback(self._do_getpatch_trunkold_4)
+ return d
+ def _do_getpatch_trunkold_4(self, res):
+ log.msg("_do_getpatch_trunkold_4")
+ # verify that the resulting buildslave tree matches the developer's
+ self.try_shouldMatch("main.c")
+ self.try_shouldMatch("version.c")
+ self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
+
+ def do_getpatch_branch(self, res):
+ log.msg("do_getpatch_branch")
+ # now try a tree from a branch
+ d = self.helper.vc_try_checkout(self.trydir, self.helper.branch[-1],
+ self.helper.branchname)
+ d.addCallback(self._do_getpatch_branch_1)
+ return d
+ def _do_getpatch_branch_1(self, res):
+ log.msg("_do_getpatch_branch_1")
+ d = tryclient.getSourceStamp(self.vctype_try, self.trydir,
+ self.helper.try_branchname)
+ d.addCallback(self._do_getpatch_branch_2)
+ return d
+ def _do_getpatch_branch_2(self, ss):
+ log.msg("_do_getpatch_branch_2")
+ d = self.doBuild(ss=ss)
+ d.addCallback(self._do_getpatch_branch_3)
+ return d
+ def _do_getpatch_branch_3(self, res):
+ log.msg("_do_getpatch_branch_3")
+ # verify that the resulting buildslave tree matches the developer's
+ self.try_shouldMatch("main.c")
+ self.try_shouldMatch("version.c")
+ self.try_shouldMatch(os.path.join("subdir", "subdir.c"))
+
+
+ def dumpPatch(self, patch):
+ # this exists to help me figure out the right 'patchlevel' value
+ # should be returned by tryclient.getSourceStamp
+ n = self.mktemp()
+ open(n,"w").write(patch)
+ d = self.runCommand(".", ["lsdiff", n])
+ def p(res): print "lsdiff:", res.strip().split("\n")
+ d.addCallback(p)
+ return d
+
+
+ def tearDown(self):
+ d = defer.succeed(None)
+ if self.slave:
+ d2 = self.master.botmaster.waitUntilBuilderDetached("vc")
+ d.addCallback(lambda res: self.slave.stopService())
+ d.addCallback(lambda res: d2)
+ if self.master:
+ d.addCallback(lambda res: self.master.stopService())
+ if self.httpServer:
+ d.addCallback(lambda res: self.httpServer.stopListening())
+ def stopHTTPTimer():
+ from twisted.web import http
+ http._logDateTimeStop() # shut down the internal timer. DUMB!
+ d.addCallback(lambda res: stopHTTPTimer())
+ d.addCallback(lambda res: self.tearDown2())
+ return d
+
+ def tearDown2(self):
+ pass
+
+class CVSHelper(BaseHelper):
+ branchname = "branch"
+ try_branchname = "branch"
+
+ def capable(self):
+ cvspaths = which('cvs')
+ if not cvspaths:
+ return (False, "CVS is not installed")
+ # cvs-1.10 (as shipped with OS-X 10.3 "Panther") is too old for this
+ # test. There is a situation where we check out a tree, make a
+ # change, then commit it back, and CVS refuses to believe that we're
+ # operating in a CVS tree. I tested cvs-1.12.9 and it works ok, OS-X
+ # 10.4 "Tiger" comes with cvs-1.11, but I haven't tested that yet.
+ # For now, skip the tests if we've got 1.10 .
+ log.msg("running %s --version.." % (cvspaths[0],))
+ d = utils.getProcessOutput(cvspaths[0], ["--version"],
+ env=os.environ)
+ d.addCallback(self._capable, cvspaths[0])
+ return d
+
+ def _capable(self, v, vcexe):
+ m = re.search(r'\(CVS\) ([\d\.]+) ', v)
+ if not m:
+ log.msg("couldn't identify CVS version number in output:")
+ log.msg("'''%s'''" % v)
+ log.msg("skipping tests")
+ return (False, "Found CVS but couldn't identify its version")
+ ver = m.group(1)
+ log.msg("found CVS version '%s'" % ver)
+ if ver == "1.10":
+ return (False, "Found CVS, but it is too old")
+ self.vcexe = vcexe
+ return (True, None)
+
+ def getdate(self):
+ # this timestamp is eventually passed to CVS in a -D argument, and
+ # strftime's %z specifier doesn't seem to work reliably (I get +0000
+ # where I should get +0700 under linux sometimes, and windows seems
+ # to want to put a verbose 'Eastern Standard Time' in there), so
+ # leave off the timezone specifier and treat this as localtime. A
+ # valid alternative would be to use a hard-coded +0000 and
+ # time.gmtime().
+ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
+
+ def createRepository(self):
+ self.createBasedir()
+ self.cvsrep = cvsrep = os.path.join(self.repbase, "CVS-Repository")
+ tmp = os.path.join(self.repbase, "cvstmp")
+
+ w = self.dovc(self.repbase, ['-d', cvsrep, 'init'])
+ yield w; w.getResult() # we must getResult() to raise any exceptions
+
+ self.populate(tmp)
+ cmd = ['-d', self.cvsrep, 'import',
+ '-m', 'sample_project_files', 'sample', 'vendortag', 'start']
+ w = self.dovc(tmp, cmd)
+ yield w; w.getResult()
+ rmdirRecursive(tmp)
+ # take a timestamp as the first revision number
+ time.sleep(2)
+ self.addTrunkRev(self.getdate())
+ time.sleep(2)
+
+ w = self.dovc(self.repbase,
+ ['-d', self.cvsrep, 'checkout', '-d', 'cvstmp', 'sample'])
+ yield w; w.getResult()
+
+ w = self.dovc(tmp, ['tag', '-b', self.branchname])
+ yield w; w.getResult()
+ self.populate_branch(tmp)
+ w = self.dovc(tmp,
+ ['commit', '-m', 'commit_on_branch', '-r', self.branchname])
+ yield w; w.getResult()
+ rmdirRecursive(tmp)
+ time.sleep(2)
+ self.addBranchRev(self.getdate())
+ time.sleep(2)
+ self.vcargs = { 'cvsroot': self.cvsrep, 'cvsmodule': "sample" }
+ createRepository = deferredGenerator(createRepository)
+
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "cvstmp")
+
+ w = self.dovc(self.repbase,
+ ['-d', self.cvsrep, 'checkout', '-d', 'cvstmp', 'sample'])
+ yield w; w.getResult()
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+ w = self.dovc(tmp,
+ ['commit', '-m', 'revised_to_%d' % self.version, 'version.c'])
+ yield w; w.getResult()
+ rmdirRecursive(tmp)
+ time.sleep(2)
+ self.addTrunkRev(self.getdate())
+ time.sleep(2)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ # 'workdir' is an absolute path
+ assert os.path.abspath(workdir) == workdir
+ cmd = [self.vcexe, "-d", self.cvsrep, "checkout",
+ "-d", workdir,
+ "-D", rev]
+ if branch is not None:
+ cmd.append("-r")
+ cmd.append(branch)
+ cmd.append("sample")
+ w = self.do(self.repbase, cmd)
+ yield w; w.getResult()
+ open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+class CVS(VCBase, unittest.TestCase):
+ vc_name = "cvs"
+
+ metadir = "CVS"
+ vctype = "source.CVS"
+ vctype_try = "cvs"
+ # CVS gives us got_revision, but it is based entirely upon the local
+ # clock, which means it is unlikely to match the timestamp taken earlier.
+ # This might be enough for common use, but won't be good enough for our
+ # tests to accept, so pretend it doesn't have got_revision at all.
+ has_got_revision = False
+
+ def testCheckout(self):
+ d = self.do_vctest()
+ return d
+
+ def testPatch(self):
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ d = self.do_branch()
+ return d
+
+ def testTry(self):
+ d = self.do_getpatch(doBranch=False)
+ return d
+
+VCS.registerVC(CVS.vc_name, CVSHelper())
+
+
+class SVNHelper(BaseHelper):
+ branchname = "sample/branch"
+ try_branchname = "sample/branch"
+
+ def capable(self):
+ svnpaths = which('svn')
+ svnadminpaths = which('svnadmin')
+ if not svnpaths:
+ return (False, "SVN is not installed")
+ if not svnadminpaths:
+ return (False, "svnadmin is not installed")
+ # we need svn to be compiled with the ra_local access
+ # module
+ log.msg("running svn --version..")
+ env = os.environ.copy()
+ env['LC_ALL'] = "C"
+ d = utils.getProcessOutput(svnpaths[0], ["--version"],
+ env=env)
+ d.addCallback(self._capable, svnpaths[0], svnadminpaths[0])
+ return d
+
+ def _capable(self, v, vcexe, svnadmin):
+ if v.find("handles 'file' schem") != -1:
+ # older versions say 'schema', 1.2.0 and beyond say 'scheme'
+ self.vcexe = vcexe
+ self.svnadmin = svnadmin
+ return (True, None)
+ excuse = ("%s found but it does not support 'file:' " +
+ "schema, skipping svn tests") % vcexe
+ log.msg(excuse)
+ return (False, excuse)
+
+ def createRepository(self):
+ self.createBasedir()
+ self.svnrep = os.path.join(self.repbase,
+ "SVN-Repository").replace('\\','/')
+ tmp = os.path.join(self.repbase, "svntmp")
+ if sys.platform == 'win32':
+ # On Windows Paths do not start with a /
+ self.svnurl = "file:///%s" % self.svnrep
+ else:
+ self.svnurl = "file://%s" % self.svnrep
+ self.svnurl_trunk = self.svnurl + "/sample/trunk"
+ self.svnurl_branch = self.svnurl + "/sample/branch"
+
+ w = self.do(self.repbase, [self.svnadmin, "create", self.svnrep])
+ yield w; w.getResult()
+
+ self.populate(tmp)
+ w = self.dovc(tmp,
+ ['import', '-m', 'sample_project_files', self.svnurl_trunk])
+ yield w; out = w.getResult()
+ rmdirRecursive(tmp)
+ m = re.search(r'Committed revision (\d+)\.', out)
+ assert m.group(1) == "1" # first revision is always "1"
+ self.addTrunkRev(int(m.group(1)))
+
+ w = self.dovc(self.repbase,
+ ['checkout', self.svnurl_trunk, 'svntmp'])
+ yield w; w.getResult()
+
+ w = self.dovc(tmp, ['cp', '-m' , 'make_branch', self.svnurl_trunk,
+ self.svnurl_branch])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['switch', self.svnurl_branch])
+ yield w; w.getResult()
+ self.populate_branch(tmp)
+ w = self.dovc(tmp, ['commit', '-m', 'commit_on_branch'])
+ yield w; out = w.getResult()
+ rmdirRecursive(tmp)
+ m = re.search(r'Committed revision (\d+)\.', out)
+ self.addBranchRev(int(m.group(1)))
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "svntmp")
+ rmdirRecursive(tmp)
+ log.msg("vc_revise" + self.svnurl_trunk)
+ w = self.dovc(self.repbase,
+ ['checkout', self.svnurl_trunk, 'svntmp'])
+ yield w; w.getResult()
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+ w = self.dovc(tmp, ['commit', '-m', 'revised_to_%d' % self.version])
+ yield w; out = w.getResult()
+ m = re.search(r'Committed revision (\d+)\.', out)
+ self.addTrunkRev(int(m.group(1)))
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+ if not branch:
+ svnurl = self.svnurl_trunk
+ else:
+ # N.B.: this is *not* os.path.join: SVN URLs use slashes
+ # regardless of the host operating system's filepath separator
+ svnurl = self.svnurl + "/" + branch
+ w = self.dovc(self.repbase,
+ ['checkout', svnurl, workdir])
+ yield w; w.getResult()
+ open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+
+class SVN(VCBase, unittest.TestCase):
+ vc_name = "svn"
+
+ metadir = ".svn"
+ vctype = "source.SVN"
+ vctype_try = "svn"
+ has_got_revision = True
+ has_got_revision_branches_are_merged = True
+
+ def testCheckout(self):
+ # we verify this one with the svnurl style of vcargs. We test the
+ # baseURL/defaultBranch style in testPatch and testCheckoutBranch.
+ self.helper.vcargs = { 'svnurl': self.helper.svnurl_trunk }
+ d = self.do_vctest()
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
+ 'defaultBranch': "sample/trunk",
+ }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
+ 'defaultBranch': "sample/trunk",
+ }
+ d = self.do_branch()
+ return d
+
+ def testTry(self):
+ # extract the base revision and patch from a modified tree, use it to
+ # create the same contents on the buildslave
+ self.helper.vcargs = { 'baseURL': self.helper.svnurl + "/",
+ 'defaultBranch': "sample/trunk",
+ }
+ d = self.do_getpatch()
+ return d
+
+ ## can't test the username= and password= options, because we do not have an
+ ## svn repository that requires authentication.
+
+VCS.registerVC(SVN.vc_name, SVNHelper())
+
+
+class P4Helper(BaseHelper):
+ branchname = "branch"
+ p4port = 'localhost:1666'
+ pid = None
+ base_descr = 'Change: new\nDescription: asdf\nFiles:\n'
+
+ def capable(self):
+ p4paths = which('p4')
+ p4dpaths = which('p4d')
+ if not p4paths:
+ return (False, "p4 is not installed")
+ if not p4dpaths:
+ return (False, "p4d is not installed")
+ self.vcexe = p4paths[0]
+ self.p4dexe = p4dpaths[0]
+ return (True, None)
+
+ class _P4DProtocol(protocol.ProcessProtocol):
+ def __init__(self):
+ self.started = defer.Deferred()
+ self.ended = defer.Deferred()
+
+ def outReceived(self, data):
+ # When it says starting, it has bound to the socket.
+ if self.started:
+ #
+ # Make sure p4d has started. Newer versions of p4d
+ # have more verbose messaging when db files don't exist, so
+ # we use re.search instead of startswith.
+ #
+ if re.search('Perforce Server starting...', data):
+ self.started.callback(None)
+ else:
+ print "p4d said %r" % data
+ try:
+ raise Exception('p4d said %r' % data)
+ except:
+ self.started.errback(failure.Failure())
+ self.started = None
+
+ def errReceived(self, data):
+ print "p4d stderr: %s" % data
+
+ def processEnded(self, status_object):
+ if status_object.check(error.ProcessDone):
+ self.ended.callback(None)
+ else:
+ self.ended.errback(status_object)
+
+ def _start_p4d(self):
+ proto = self._P4DProtocol()
+ reactor.spawnProcess(proto, self.p4dexe, ['p4d', '-p', self.p4port],
+ env=os.environ, path=self.p4rep)
+ return proto.started, proto.ended
+
+ def dop4(self, basedir, command, failureIsOk=False, stdin=None):
+ # p4 looks at $PWD instead of getcwd(), which causes confusion when
+ # we spawn commands without an intervening shell (sh -c). We can
+ # override this with a -d argument.
+ command = "-p %s -d %s %s" % (self.p4port, basedir, command)
+ return self.dovc(basedir, command, failureIsOk, stdin)
+
+ def createRepository(self):
+ # this is only called once per VC system, so start p4d here.
+
+ self.createBasedir()
+ tmp = os.path.join(self.repbase, "p4tmp")
+ self.p4rep = os.path.join(self.repbase, 'P4-Repository')
+ os.mkdir(self.p4rep)
+
+ # Launch p4d.
+ started, self.p4d_shutdown = self._start_p4d()
+ w = waitForDeferred(started)
+ yield w; w.getResult()
+
+ # Create client spec.
+ os.mkdir(tmp)
+ clispec = 'Client: creator\n'
+ clispec += 'Root: %s\n' % tmp
+ clispec += 'View:\n'
+ clispec += '\t//depot/... //creator/...\n'
+ w = self.dop4(tmp, 'client -i', stdin=clispec)
+ yield w; w.getResult()
+
+ # Create first rev (trunk).
+ self.populate(os.path.join(tmp, 'trunk'))
+ files = ['main.c', 'version.c', 'subdir/subdir.c']
+ w = self.dop4(tmp, "-c creator add "
+ + " ".join(['trunk/%s' % f for f in files]))
+ yield w; w.getResult()
+ descr = self.base_descr
+ for file in files:
+ descr += '\t//depot/trunk/%s\n' % file
+ w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
+ yield w; out = w.getResult()
+ m = re.search(r'Change (\d+) submitted.', out)
+ assert m.group(1) == '1'
+ self.addTrunkRev(m.group(1))
+
+ # Create second rev (branch).
+ w = self.dop4(tmp, '-c creator integrate '
+ + '//depot/trunk/... //depot/branch/...')
+ yield w; w.getResult()
+ w = self.dop4(tmp, "-c creator edit branch/main.c")
+ yield w; w.getResult()
+ self.populate_branch(os.path.join(tmp, 'branch'))
+ descr = self.base_descr
+ for file in files:
+ descr += '\t//depot/branch/%s\n' % file
+ w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
+ yield w; out = w.getResult()
+ m = re.search(r'Change (\d+) submitted.', out)
+ self.addBranchRev(m.group(1))
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "p4tmp")
+ self.version += 1
+ version_c = VERSION_C % self.version
+ w = self.dop4(tmp, '-c creator edit trunk/version.c')
+ yield w; w.getResult()
+ open(os.path.join(tmp, "trunk/version.c"), "w").write(version_c)
+ descr = self.base_descr + '\t//depot/trunk/version.c\n'
+ w = self.dop4(tmp, "-c creator submit -i", stdin=descr)
+ yield w; out = w.getResult()
+ m = re.search(r'Change (\d+) submitted.', out)
+ self.addTrunkRev(m.group(1))
+ vc_revise = deferredGenerator(vc_revise)
+
+ def shutdown_p4d(self):
+ d = self.runCommand(self.repbase, '%s -p %s admin stop'
+ % (self.vcexe, self.p4port))
+ return d.addCallback(lambda _: self.p4d_shutdown)
+
+class P4(VCBase, unittest.TestCase):
+ metadir = None
+ vctype = "source.P4"
+ vc_name = "p4"
+ has_got_revision = True
+
+ def tearDownClass(self):
+ if self.helper:
+ return self.helper.shutdown_p4d()
+
+ def testCheckout(self):
+ self.helper.vcargs = { 'p4port': self.helper.p4port,
+ 'p4base': '//depot/',
+ 'defaultBranch': 'trunk' }
+ d = self.do_vctest(testRetry=False)
+ # TODO: like arch and darcs, sync does nothing when server is not
+ # changed.
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'p4port': self.helper.p4port,
+ 'p4base': '//depot/',
+ 'defaultBranch': 'trunk' }
+ d = self.do_branch()
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'p4port': self.helper.p4port,
+ 'p4base': '//depot/',
+ 'defaultBranch': 'trunk' }
+ d = self.do_patch()
+ return d
+
+VCS.registerVC(P4.vc_name, P4Helper())
+
+
+class DarcsHelper(BaseHelper):
+ branchname = "branch"
+ try_branchname = "branch"
+
+ def capable(self):
+ darcspaths = which('darcs')
+ if not darcspaths:
+ return (False, "Darcs is not installed")
+ self.vcexe = darcspaths[0]
+ return (True, None)
+
+ def createRepository(self):
+ self.createBasedir()
+ self.darcs_base = os.path.join(self.repbase, "Darcs-Repository")
+ self.rep_trunk = os.path.join(self.darcs_base, "trunk")
+ self.rep_branch = os.path.join(self.darcs_base, "branch")
+ tmp = os.path.join(self.repbase, "darcstmp")
+
+ os.makedirs(self.rep_trunk)
+ w = self.dovc(self.rep_trunk, ["initialize"])
+ yield w; w.getResult()
+ os.makedirs(self.rep_branch)
+ w = self.dovc(self.rep_branch, ["initialize"])
+ yield w; w.getResult()
+
+ self.populate(tmp)
+ w = self.dovc(tmp, qw("initialize"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("add -r ."))
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("record -a -m initial_import --skip-long-comment -A test@buildbot.sf.net"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["push", "-a", self.rep_trunk])
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("changes --context"))
+ yield w; out = w.getResult()
+ self.addTrunkRev(out)
+
+ self.populate_branch(tmp)
+ w = self.dovc(tmp, qw("record -a --ignore-times -m commit_on_branch --skip-long-comment -A test@buildbot.sf.net"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["push", "-a", self.rep_branch])
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("changes --context"))
+ yield w; out = w.getResult()
+ self.addBranchRev(out)
+ rmdirRecursive(tmp)
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "darcstmp")
+ os.makedirs(tmp)
+ w = self.dovc(tmp, qw("initialize"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["pull", "-a", self.rep_trunk])
+ yield w; w.getResult()
+
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+ w = self.dovc(tmp, qw("record -a --ignore-times -m revised_to_%d --skip-long-comment -A test@buildbot.sf.net" % self.version))
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["push", "-a", self.rep_trunk])
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("changes --context"))
+ yield w; out = w.getResult()
+ self.addTrunkRev(out)
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+ os.makedirs(workdir)
+ w = self.dovc(workdir, qw("initialize"))
+ yield w; w.getResult()
+ if not branch:
+ rep = self.rep_trunk
+ else:
+ rep = os.path.join(self.darcs_base, branch)
+ w = self.dovc(workdir, ["pull", "-a", rep])
+ yield w; w.getResult()
+ open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+
+class Darcs(VCBase, unittest.TestCase):
+ vc_name = "darcs"
+
+ # Darcs has a metadir="_darcs", but it does not have an 'export'
+ # mode
+ metadir = None
+ vctype = "source.Darcs"
+ vctype_try = "darcs"
+ has_got_revision = True
+
+ def testCheckout(self):
+ self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
+ d = self.do_vctest(testRetry=False)
+
+ # TODO: testRetry has the same problem with Darcs as it does for
+ # Arch
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_branch()
+ return d
+
+ def testCheckoutHTTP(self):
+ self.serveHTTP()
+ repourl = "http://localhost:%d/Darcs-Repository/trunk" % self.httpPort
+ self.helper.vcargs = { 'repourl': repourl }
+ d = self.do_vctest(testRetry=False)
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = { 'baseURL': self.helper.darcs_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(Darcs.vc_name, DarcsHelper())
+
+
+class ArchCommon:
+ def registerRepository(self, coordinates):
+ a = self.archname
+ w = self.dovc(self.repbase, "archives %s" % a)
+ yield w; out = w.getResult()
+ if out:
+ w = self.dovc(self.repbase, "register-archive -d %s" % a)
+ yield w; w.getResult()
+ w = self.dovc(self.repbase, "register-archive %s" % coordinates)
+ yield w; w.getResult()
+ registerRepository = deferredGenerator(registerRepository)
+
+ def unregisterRepository(self):
+ a = self.archname
+ w = self.dovc(self.repbase, "archives %s" % a)
+ yield w; out = w.getResult()
+ if out:
+ w = self.dovc(self.repbase, "register-archive -d %s" % a)
+ yield w; out = w.getResult()
+ unregisterRepository = deferredGenerator(unregisterRepository)
+
+class TlaHelper(BaseHelper, ArchCommon):
+ defaultbranch = "testvc--mainline--1"
+ branchname = "testvc--branch--1"
+ try_branchname = None # TlaExtractor can figure it out by itself
+ archcmd = "tla"
+
+ def capable(self):
+ tlapaths = which('tla')
+ if not tlapaths:
+ return (False, "Arch (tla) is not installed")
+ self.vcexe = tlapaths[0]
+ return (True, None)
+
+ def do_get(self, basedir, archive, branch, newdir):
+ # the 'get' syntax is different between tla and baz. baz, while
+ # claiming to honor an --archive argument, in fact ignores it. The
+ # correct invocation is 'baz get archive/revision newdir'.
+ if self.archcmd == "tla":
+ w = self.dovc(basedir,
+ "get -A %s %s %s" % (archive, branch, newdir))
+ else:
+ w = self.dovc(basedir,
+ "get %s/%s %s" % (archive, branch, newdir))
+ return w
+
+ def createRepository(self):
+ self.createBasedir()
+ # first check to see if bazaar is around, since we'll need to know
+ # later
+ d = VCS.capable(Bazaar.vc_name)
+ d.addCallback(self._createRepository_1)
+ return d
+
+ def _createRepository_1(self, res):
+ has_baz = res[0]
+
+ # pick a hopefully unique string for the archive name, in the form
+ # test-%d@buildbot.sf.net--testvc, since otherwise multiple copies of
+ # the unit tests run in the same user account will collide (since the
+ # archive names are kept in the per-user ~/.arch-params/ directory).
+ pid = os.getpid()
+ self.archname = "test-%s-%d@buildbot.sf.net--testvc" % (self.archcmd,
+ pid)
+ trunk = self.defaultbranch
+ branch = self.branchname
+
+ repword = self.archcmd.capitalize()
+ self.archrep = os.path.join(self.repbase, "%s-Repository" % repword)
+ tmp = os.path.join(self.repbase, "archtmp")
+ a = self.archname
+
+ self.populate(tmp)
+
+ w = self.dovc(tmp, "my-id", failureIsOk=True)
+ yield w; res = w.getResult()
+ if not res:
+ # tla will fail a lot of operations if you have not set an ID
+ w = self.do(tmp, [self.vcexe, "my-id",
+ "Buildbot Test Suite <test@buildbot.sf.net>"])
+ yield w; w.getResult()
+
+ if has_baz:
+ # bazaar keeps a cache of revisions, but this test creates a new
+ # archive each time it is run, so the cache causes errors.
+ # Disable the cache to avoid these problems. This will be
+ # slightly annoying for people who run the buildbot tests under
+ # the same UID as one which uses baz on a regular basis, but
+ # bazaar doesn't give us a way to disable the cache just for this
+ # one archive.
+ cmd = "%s cache-config --disable" % VCS.getHelper('bazaar').vcexe
+ w = self.do(tmp, cmd)
+ yield w; w.getResult()
+
+ w = waitForDeferred(self.unregisterRepository())
+ yield w; w.getResult()
+
+ # these commands can be run in any directory
+ w = self.dovc(tmp, "make-archive -l %s %s" % (a, self.archrep))
+ yield w; w.getResult()
+ if self.archcmd == "tla":
+ w = self.dovc(tmp, "archive-setup -A %s %s" % (a, trunk))
+ yield w; w.getResult()
+ w = self.dovc(tmp, "archive-setup -A %s %s" % (a, branch))
+ yield w; w.getResult()
+ else:
+ # baz does not require an 'archive-setup' step
+ pass
+
+ # these commands must be run in the directory that is to be imported
+ w = self.dovc(tmp, "init-tree --nested %s/%s" % (a, trunk))
+ yield w; w.getResult()
+ files = " ".join(["main.c", "version.c", "subdir",
+ os.path.join("subdir", "subdir.c")])
+ w = self.dovc(tmp, "add-id %s" % files)
+ yield w; w.getResult()
+
+ w = self.dovc(tmp, "import %s/%s" % (a, trunk))
+ yield w; out = w.getResult()
+ self.addTrunkRev("base-0")
+
+ # create the branch
+ if self.archcmd == "tla":
+ branchstart = "%s--base-0" % trunk
+ w = self.dovc(tmp, "tag -A %s %s %s" % (a, branchstart, branch))
+ yield w; w.getResult()
+ else:
+ w = self.dovc(tmp, "branch %s" % branch)
+ yield w; w.getResult()
+
+ rmdirRecursive(tmp)
+
+ # check out the branch
+ w = self.do_get(self.repbase, a, branch, "archtmp")
+ yield w; w.getResult()
+ # and edit the file
+ self.populate_branch(tmp)
+ logfile = "++log.%s--%s" % (branch, a)
+ logmsg = "Summary: commit on branch\nKeywords:\n\n"
+ open(os.path.join(tmp, logfile), "w").write(logmsg)
+ w = self.dovc(tmp, "commit")
+ yield w; out = w.getResult()
+ m = re.search(r'committed %s/%s--([\S]+)' % (a, branch),
+ out)
+ assert (m.group(1) == "base-0" or m.group(1).startswith("patch-"))
+ self.addBranchRev(m.group(1))
+
+ w = waitForDeferred(self.unregisterRepository())
+ yield w; w.getResult()
+ rmdirRecursive(tmp)
+
+ # we unregister the repository each time, because we might have
+ # changed the coordinates (since we switch from a file: URL to an
+ # http: URL for various tests). The buildslave code doesn't forcibly
+ # unregister the archive, so we have to do it here.
+ w = waitForDeferred(self.unregisterRepository())
+ yield w; w.getResult()
+
+ _createRepository_1 = deferredGenerator(_createRepository_1)
+
+ def vc_revise(self):
+ # the fix needs to be done in a workspace that is linked to a
+ # read-write version of the archive (i.e., using file-based
+ # coordinates instead of HTTP ones), so we re-register the repository
+ # before we begin. We unregister it when we're done to make sure the
+ # build will re-register the correct one for whichever test is
+ # currently being run.
+
+ # except, that source.Bazaar really doesn't like it when the archive
+ # gets unregistered behind its back. The slave tries to do a 'baz
+ # replay' in a tree with an archive that is no longer recognized, and
+ # baz aborts with a botched invariant exception. This causes
+ # mode=update to fall back to clobber+get, which flunks one of the
+ # tests (the 'newfile' check in _do_vctest_update_3 fails)
+
+ # to avoid this, we take heroic steps here to leave the archive
+ # registration in the same state as we found it.
+
+ tmp = os.path.join(self.repbase, "archtmp")
+ a = self.archname
+
+ w = self.dovc(self.repbase, "archives %s" % a)
+ yield w; out = w.getResult()
+ assert out
+ lines = out.split("\n")
+ coordinates = lines[1].strip()
+
+ # now register the read-write location
+ w = waitForDeferred(self.registerRepository(self.archrep))
+ yield w; w.getResult()
+
+ trunk = self.defaultbranch
+
+ w = self.do_get(self.repbase, a, trunk, "archtmp")
+ yield w; w.getResult()
+
+ # tla appears to use timestamps to determine which files have
+ # changed, so wait long enough for the new file to have a different
+ # timestamp
+ time.sleep(2)
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+
+ logfile = "++log.%s--%s" % (trunk, a)
+ logmsg = "Summary: revised_to_%d\nKeywords:\n\n" % self.version
+ open(os.path.join(tmp, logfile), "w").write(logmsg)
+ w = self.dovc(tmp, "commit")
+ yield w; out = w.getResult()
+ m = re.search(r'committed %s/%s--([\S]+)' % (a, trunk),
+ out)
+ assert (m.group(1) == "base-0" or m.group(1).startswith("patch-"))
+ self.addTrunkRev(m.group(1))
+
+ # now re-register the original coordinates
+ w = waitForDeferred(self.registerRepository(coordinates))
+ yield w; w.getResult()
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+
+ a = self.archname
+
+ # register the read-write location, if it wasn't already registered
+ w = waitForDeferred(self.registerRepository(self.archrep))
+ yield w; w.getResult()
+
+ w = self.do_get(self.repbase, a, "testvc--mainline--1", workdir)
+ yield w; w.getResult()
+
+ # timestamps. ick.
+ time.sleep(2)
+ open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+class Arch(VCBase, unittest.TestCase):
+ vc_name = "tla"
+
+ metadir = None
+ # Arch has a metadir="{arch}", but it does not have an 'export' mode.
+ vctype = "source.Arch"
+ vctype_try = "tla"
+ has_got_revision = True
+
+ def testCheckout(self):
+ # these are the coordinates of the read-write archive used by all the
+ # non-HTTP tests. testCheckoutHTTP overrides these.
+ self.helper.vcargs = {'url': self.helper.archrep,
+ 'version': self.helper.defaultbranch }
+ d = self.do_vctest(testRetry=False)
+ # the current testRetry=True logic doesn't have the desired effect:
+ # "update" is a no-op because arch knows that the repository hasn't
+ # changed. Other VC systems will re-checkout missing files on
+ # update, arch just leaves the tree untouched. TODO: come up with
+ # some better test logic, probably involving a copy of the
+ # repository that has a few changes checked in.
+
+ return d
+
+ def testCheckoutHTTP(self):
+ self.serveHTTP()
+ url = "http://localhost:%d/Tla-Repository" % self.httpPort
+ self.helper.vcargs = { 'url': url,
+ 'version': "testvc--mainline--1" }
+ d = self.do_vctest(testRetry=False)
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ 'version': self.helper.defaultbranch }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ 'version': self.helper.defaultbranch }
+ d = self.do_branch()
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ 'version': self.helper.defaultbranch }
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(Arch.vc_name, TlaHelper())
+
+
+class BazaarHelper(TlaHelper):
+ archcmd = "baz"
+
+ def capable(self):
+ bazpaths = which('baz')
+ if not bazpaths:
+ return (False, "Arch (baz) is not installed")
+ self.vcexe = bazpaths[0]
+ return (True, None)
+
+ def setUp2(self, res):
+ # we unregister the repository each time, because we might have
+ # changed the coordinates (since we switch from a file: URL to an
+ # http: URL for various tests). The buildslave code doesn't forcibly
+ # unregister the archive, so we have to do it here.
+ d = self.unregisterRepository()
+ return d
+
+
+class Bazaar(Arch):
+ vc_name = "bazaar"
+
+ vctype = "source.Bazaar"
+ vctype_try = "baz"
+ has_got_revision = True
+
+ fixtimer = None
+
+ def testCheckout(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ # Baz adds the required 'archive' argument
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ }
+ d = self.do_vctest(testRetry=False)
+ # the current testRetry=True logic doesn't have the desired effect:
+ # "update" is a no-op because arch knows that the repository hasn't
+ # changed. Other VC systems will re-checkout missing files on
+ # update, arch just leaves the tree untouched. TODO: come up with
+ # some better test logic, probably involving a copy of the
+ # repository that has a few changes checked in.
+
+ return d
+
+ def testCheckoutHTTP(self):
+ self.serveHTTP()
+ url = "http://localhost:%d/Baz-Repository" % self.httpPort
+ self.helper.vcargs = { 'url': url,
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ }
+ d = self.do_vctest(testRetry=False)
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ # Baz adds the required 'archive' argument
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ # Baz adds the required 'archive' argument
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ }
+ d = self.do_branch()
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = {'url': self.helper.archrep,
+ # Baz adds the required 'archive' argument
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ }
+ d = self.do_getpatch()
+ return d
+
+ def fixRepository(self):
+ self.fixtimer = None
+ self.site.resource = self.root
+
+ def testRetry(self):
+ # we want to verify that source.Source(retry=) works, and the easiest
+ # way to make VC updates break (temporarily) is to break the HTTP
+ # server that's providing the repository. Anything else pretty much
+ # requires mutating the (read-only) BUILDBOT_TEST_VC repository, or
+ # modifying the buildslave's checkout command while it's running.
+
+ # this test takes a while to run, so don't bother doing it with
+ # anything other than baz
+
+ self.serveHTTP()
+
+ # break the repository server
+ from twisted.web import static
+ self.site.resource = static.Data("Sorry, repository is offline",
+ "text/plain")
+ # and arrange to fix it again in 5 seconds, while the test is
+ # running.
+ self.fixtimer = reactor.callLater(5, self.fixRepository)
+
+ url = "http://localhost:%d/Baz-Repository" % self.httpPort
+ self.helper.vcargs = { 'url': url,
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ 'retry': (5.0, 4),
+ }
+ d = self.do_vctest_once(True)
+ d.addCallback(self._testRetry_1)
+ return d
+ def _testRetry_1(self, bs):
+ # make sure there was mention of the retry attempt in the logs
+ l = bs.getLogs()[0]
+ self.failUnlessIn("unable to access URL", l.getText(),
+ "funny, VC operation didn't fail at least once")
+ self.failUnlessIn("update failed, trying 4 more times after 5 seconds",
+ l.getTextWithHeaders(),
+ "funny, VC operation wasn't reattempted")
+
+ def testRetryFails(self):
+ # make sure that the build eventually gives up on a repository which
+ # is completely unavailable
+
+ self.serveHTTP()
+
+ # break the repository server, and leave it broken
+ from twisted.web import static
+ self.site.resource = static.Data("Sorry, repository is offline",
+ "text/plain")
+
+ url = "http://localhost:%d/Baz-Repository" % self.httpPort
+ self.helper.vcargs = {'url': url,
+ 'archive': self.helper.archname,
+ 'version': self.helper.defaultbranch,
+ 'retry': (0.5, 3),
+ }
+ d = self.do_vctest_once(False)
+ d.addCallback(self._testRetryFails_1)
+ return d
+ def _testRetryFails_1(self, bs):
+ self.failUnlessEqual(bs.getResults(), FAILURE)
+
+ def tearDown2(self):
+ if self.fixtimer:
+ self.fixtimer.cancel()
+ # tell tla to get rid of the leftover archive this test leaves in the
+ # user's 'tla archives' listing. The name of this archive is provided
+ # by the repository tarball, so the following command must use the
+ # same name. We could use archive= to set it explicitly, but if you
+ # change it from the default, then 'tla update' won't work.
+ d = self.helper.unregisterRepository()
+ return d
+
+VCS.registerVC(Bazaar.vc_name, BazaarHelper())
+
+class BzrHelper(BaseHelper):
+ branchname = "branch"
+ try_branchname = "branch"
+
+ def capable(self):
+ bzrpaths = which('bzr')
+ if not bzrpaths:
+ return (False, "bzr is not installed")
+ self.vcexe = bzrpaths[0]
+ return (True, None)
+
+ def get_revision_number(self, out):
+ for line in out.split("\n"):
+ colon = line.index(":")
+ key, value = line[:colon], line[colon+2:]
+ if key == "revno":
+ return int(value)
+ raise RuntimeError("unable to find revno: in bzr output: '%s'" % out)
+
+ def createRepository(self):
+ self.createBasedir()
+ self.bzr_base = os.path.join(self.repbase, "Bzr-Repository")
+ self.rep_trunk = os.path.join(self.bzr_base, "trunk")
+ self.rep_branch = os.path.join(self.bzr_base, "branch")
+ tmp = os.path.join(self.repbase, "bzrtmp")
+ btmp = os.path.join(self.repbase, "bzrtmp-branch")
+
+ os.makedirs(self.rep_trunk)
+ w = self.dovc(self.rep_trunk, ["init"])
+ yield w; w.getResult()
+ w = self.dovc(self.bzr_base,
+ ["branch", self.rep_trunk, self.rep_branch])
+ yield w; w.getResult()
+
+ w = self.dovc(self.repbase, ["checkout", self.rep_trunk, tmp])
+ yield w; w.getResult()
+ self.populate(tmp)
+ w = self.dovc(tmp, qw("add"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("commit -m initial_import"))
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("version-info"))
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.get_revision_number(out))
+ rmdirRecursive(tmp)
+
+ # pull all trunk revisions to the branch
+ w = self.dovc(self.rep_branch, qw("pull"))
+ yield w; w.getResult()
+ # obtain a branch tree
+ w = self.dovc(self.repbase, ["checkout", self.rep_branch, btmp])
+ yield w; w.getResult()
+ # modify it
+ self.populate_branch(btmp)
+ w = self.dovc(btmp, qw("add"))
+ yield w; w.getResult()
+ w = self.dovc(btmp, qw("commit -m commit_on_branch"))
+ yield w; w.getResult()
+ w = self.dovc(btmp, qw("version-info"))
+ yield w; out = w.getResult()
+ self.addBranchRev(self.get_revision_number(out))
+ rmdirRecursive(btmp)
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "bzrtmp")
+ w = self.dovc(self.repbase, ["checkout", self.rep_trunk, tmp])
+ yield w; w.getResult()
+
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+ w = self.dovc(tmp, qw("commit -m revised_to_%d" % self.version))
+ yield w; w.getResult()
+ w = self.dovc(tmp, qw("version-info"))
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.get_revision_number(out))
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+ #os.makedirs(workdir)
+ if not branch:
+ rep = self.rep_trunk
+ else:
+ rep = os.path.join(self.bzr_base, branch)
+ w = self.dovc(self.bzr_base, ["checkout", rep, workdir])
+ yield w; w.getResult()
+ open(os.path.join(workdir, "subdir", "subdir.c"), "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+class Bzr(VCBase, unittest.TestCase):
+ vc_name = "bzr"
+
+ metadir = ".bzr"
+ vctype = "source.Bzr"
+ vctype_try = "bzr"
+ has_got_revision = True
+
+ def testCheckout(self):
+ self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
+ d = self.do_vctest(testRetry=False)
+
+ # TODO: testRetry has the same problem with Bzr as it does for
+ # Arch
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.bzr_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.bzr_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_branch()
+ return d
+
+ def testCheckoutHTTP(self):
+ self.serveHTTP()
+ repourl = "http://localhost:%d/Bzr-Repository/trunk" % self.httpPort
+ self.helper.vcargs = { 'repourl': repourl }
+ d = self.do_vctest(testRetry=False)
+ return d
+
+
+ def fixRepository(self):
+ self.fixtimer = None
+ self.site.resource = self.root
+
+ def testRetry(self):
+ # this test takes a while to run
+ self.serveHTTP()
+
+ # break the repository server
+ from twisted.web import static
+ self.site.resource = static.Data("Sorry, repository is offline",
+ "text/plain")
+ # and arrange to fix it again in 5 seconds, while the test is
+ # running.
+ self.fixtimer = reactor.callLater(5, self.fixRepository)
+
+ repourl = "http://localhost:%d/Bzr-Repository/trunk" % self.httpPort
+ self.helper.vcargs = { 'repourl': repourl,
+ 'retry': (5.0, 4),
+ }
+ d = self.do_vctest_once(True)
+ d.addCallback(self._testRetry_1)
+ return d
+ def _testRetry_1(self, bs):
+ # make sure there was mention of the retry attempt in the logs
+ l = bs.getLogs()[0]
+ self.failUnlessIn("ERROR: Not a branch: ", l.getText(),
+ "funny, VC operation didn't fail at least once")
+ self.failUnlessIn("update failed, trying 4 more times after 5 seconds",
+ l.getTextWithHeaders(),
+ "funny, VC operation wasn't reattempted")
+
+ def testRetryFails(self):
+ # make sure that the build eventually gives up on a repository which
+ # is completely unavailable
+
+ self.serveHTTP()
+
+ # break the repository server, and leave it broken
+ from twisted.web import static
+ self.site.resource = static.Data("Sorry, repository is offline",
+ "text/plain")
+
+ repourl = "http://localhost:%d/Bzr-Repository/trunk" % self.httpPort
+ self.helper.vcargs = { 'repourl': repourl,
+ 'retry': (0.5, 3),
+ }
+ d = self.do_vctest_once(False)
+ d.addCallback(self._testRetryFails_1)
+ return d
+ def _testRetryFails_1(self, bs):
+ self.failUnlessEqual(bs.getResults(), FAILURE)
+
+
+ def testTry(self):
+ self.helper.vcargs = { 'baseURL': self.helper.bzr_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(Bzr.vc_name, BzrHelper())
+
+
+class MercurialHelper(BaseHelper):
+ branchname = "branch"
+ try_branchname = "branch"
+
+ def capable(self):
+ hgpaths = which("hg")
+ if not hgpaths:
+ return (False, "Mercurial is not installed")
+ self.vcexe = hgpaths[0]
+ return (True, None)
+
+ def extract_id(self, output):
+ m = re.search(r'^(\w+)', output)
+ return m.group(0)
+
+ def createRepository(self):
+ self.createBasedir()
+ self.hg_base = os.path.join(self.repbase, "Mercurial-Repository")
+ self.rep_trunk = os.path.join(self.hg_base, "trunk")
+ self.rep_branch = os.path.join(self.hg_base, "branch")
+ tmp = os.path.join(self.hg_base, "hgtmp")
+
+ os.makedirs(self.rep_trunk)
+ w = self.dovc(self.rep_trunk, "init")
+ yield w; w.getResult()
+ os.makedirs(self.rep_branch)
+ w = self.dovc(self.rep_branch, "init")
+ yield w; w.getResult()
+
+ self.populate(tmp)
+ w = self.dovc(tmp, "init")
+ yield w; w.getResult()
+ w = self.dovc(tmp, "add")
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['commit', '-m', 'initial_import'])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', self.rep_trunk])
+ # note that hg-push does not actually update the working directory
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.extract_id(out))
+
+ self.populate_branch(tmp)
+ w = self.dovc(tmp, ['commit', '-m', 'commit_on_branch'])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', self.rep_branch])
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addBranchRev(self.extract_id(out))
+ rmdirRecursive(tmp)
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.hg_base, "hgtmp2")
+ w = self.dovc(self.hg_base, ['clone', self.rep_trunk, tmp])
+ yield w; w.getResult()
+
+ self.version += 1
+ version_c = VERSION_C % self.version
+ version_c_filename = os.path.join(tmp, "version.c")
+ open(version_c_filename, "w").write(version_c)
+ # hg uses timestamps to distinguish files which have changed, so we
+ # force the mtime forward a little bit
+ future = time.time() + 2*self.version
+ os.utime(version_c_filename, (future, future))
+ w = self.dovc(tmp, ['commit', '-m', 'revised_to_%d' % self.version])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', self.rep_trunk])
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.extract_id(out))
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+ if branch:
+ src = self.rep_branch
+ else:
+ src = self.rep_trunk
+ w = self.dovc(self.hg_base, ['clone', src, workdir])
+ yield w; w.getResult()
+ try_c_filename = os.path.join(workdir, "subdir", "subdir.c")
+ open(try_c_filename, "w").write(TRY_C)
+ future = time.time() + 2*self.version
+ os.utime(try_c_filename, (future, future))
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+class MercurialServerPP(protocol.ProcessProtocol):
+ def __init__(self):
+ self.wait = defer.Deferred()
+
+ def outReceived(self, data):
+ log.msg("hg-serve-stdout: %s" % (data,))
+ def errReceived(self, data):
+ print "HG-SERVE-STDERR:", data
+ log.msg("hg-serve-stderr: %s" % (data,))
+ def processEnded(self, reason):
+ log.msg("hg-serve ended: %s" % reason)
+ self.wait.callback(None)
+
+
+class Mercurial(VCBase, unittest.TestCase):
+ vc_name = "hg"
+
+ # Mercurial has a metadir=".hg", but it does not have an 'export' mode.
+ metadir = None
+ vctype = "source.Mercurial"
+ vctype_try = "hg"
+ has_got_revision = True
+ _hg_server = None
+ _wait_for_server_poller = None
+ _pp = None
+
+ def testCheckout(self):
+ self.helper.vcargs = { 'repourl': self.helper.rep_trunk }
+ d = self.do_vctest(testRetry=False)
+
+ # TODO: testRetry has the same problem with Mercurial as it does for
+ # Arch
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_branch()
+ return d
+
+ def serveHTTP(self):
+ # the easiest way to publish hg over HTTP is by running 'hg serve' as
+ # a child process while the test is running. (you can also use a CGI
+ # script, which sounds difficult, or you can publish the files
+ # directly, which isn't well documented).
+
+ # grr.. 'hg serve' doesn't let you use --port=0 to mean "pick a free
+ # port", instead it uses it as a signal to use the default (port
+ # 8000). This means there is no way to make it choose a free port, so
+ # we are forced to make it use a statically-defined one, making it
+ # harder to avoid collisions.
+ self.httpPort = 8300 + (os.getpid() % 200)
+ args = [self.helper.vcexe,
+ "serve", "--port", str(self.httpPort), "--verbose"]
+
+ # in addition, hg doesn't flush its stdout, so we can't wait for the
+ # "listening at" message to know when it's safe to start the test.
+ # Instead, poll every second until a getPage works.
+
+ self._pp = MercurialServerPP() # logs+discards everything
+
+ # this serves one tree at a time, so we serve trunk. TODO: test hg's
+ # in-repo branches, for which a single tree will hold all branches.
+ self._hg_server = reactor.spawnProcess(self._pp, self.helper.vcexe, args,
+ os.environ,
+ self.helper.rep_trunk)
+ log.msg("waiting for hg serve to start")
+ done_d = defer.Deferred()
+ def poll():
+ d = client.getPage("http://localhost:%d/" % self.httpPort)
+ def success(res):
+ log.msg("hg serve appears to have started")
+ self._wait_for_server_poller.stop()
+ done_d.callback(None)
+ def ignore_connection_refused(f):
+ f.trap(error.ConnectionRefusedError)
+ d.addCallbacks(success, ignore_connection_refused)
+ d.addErrback(done_d.errback)
+ return d
+ self._wait_for_server_poller = task.LoopingCall(poll)
+ self._wait_for_server_poller.start(0.5, True)
+ return done_d
+
+ def tearDown(self):
+ if self._wait_for_server_poller:
+ if self._wait_for_server_poller.running:
+ self._wait_for_server_poller.stop()
+ if self._hg_server:
+ self._hg_server.loseConnection()
+ try:
+ self._hg_server.signalProcess("KILL")
+ except error.ProcessExitedAlready:
+ pass
+ self._hg_server = None
+ return VCBase.tearDown(self)
+
+ def tearDown2(self):
+ if self._pp:
+ return self._pp.wait
+
+ def testCheckoutHTTP(self):
+ d = self.serveHTTP()
+ def _started(res):
+ repourl = "http://localhost:%d/" % self.httpPort
+ self.helper.vcargs = { 'repourl': repourl }
+ return self.do_vctest(testRetry=False)
+ d.addCallback(_started)
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = { 'baseURL': self.helper.hg_base + "/",
+ 'defaultBranch': "trunk" }
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(Mercurial.vc_name, MercurialHelper())
+
+class MercurialInRepoHelper(MercurialHelper):
+ branchname = "the_branch"
+ try_branchname = "the_branch"
+
+
+ def createRepository(self):
+ self.createBasedir()
+ self.hg_base = os.path.join(self.repbase, "Mercurial-Repository")
+ self.repo = os.path.join(self.hg_base, "inrepobranch")
+ tmp = os.path.join(self.hg_base, "hgtmp")
+
+ os.makedirs(self.repo)
+ w = self.dovc(self.repo, "init")
+ yield w; w.getResult()
+
+ self.populate(tmp)
+ w = self.dovc(tmp, "init")
+ yield w; w.getResult()
+ w = self.dovc(tmp, "add")
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['commit', '-m', 'initial_import'])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', self.repo])
+ # note that hg-push does not actually update the working directory
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.extract_id(out))
+
+ self.populate_branch(tmp)
+ w = self.dovc(tmp, ['branch', self.branchname])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['commit', '-m', 'commit_on_branch'])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', '-f', self.repo])
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addBranchRev(self.extract_id(out))
+ rmdirRecursive(tmp)
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.hg_base, "hgtmp2")
+ w = self.dovc(self.hg_base, ['clone', self.repo, tmp])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['update', '--clean', '--rev', 'default'])
+ yield w; w.getResult()
+
+ self.version += 1
+ version_c = VERSION_C % self.version
+ version_c_filename = os.path.join(tmp, "version.c")
+ open(version_c_filename, "w").write(version_c)
+ # hg uses timestamps to distinguish files which have changed, so we
+ # force the mtime forward a little bit
+ future = time.time() + 2*self.version
+ os.utime(version_c_filename, (future, future))
+ w = self.dovc(tmp, ['commit', '-m', 'revised_to_%d' % self.version])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ['push', '--force', self.repo])
+ yield w; w.getResult()
+ w = self.dovc(tmp, "identify")
+ yield w; out = w.getResult()
+ self.addTrunkRev(self.extract_id(out))
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+ w = self.dovc(self.hg_base, ['clone', self.repo, workdir])
+ yield w; w.getResult()
+ if not branch: branch = "default"
+ w = self.dovc(workdir, ['update', '--clean', '--rev', branch ])
+ yield w; w.getResult()
+
+ try_c_filename = os.path.join(workdir, "subdir", "subdir.c")
+ open(try_c_filename, "w").write(TRY_C)
+ future = time.time() + 2*self.version
+ os.utime(try_c_filename, (future, future))
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+ pass
+
+
+class MercurialInRepo(Mercurial):
+ vc_name = 'MercurialInRepo'
+
+ def default_args(self):
+ return { 'repourl': self.helper.repo,
+ 'branchType': 'inrepo',
+ 'defaultBranch': 'default' }
+
+ def testCheckout(self):
+ self.helper.vcargs = self.default_args()
+ d = self.do_vctest(testRetry=False)
+
+ # TODO: testRetry has the same problem with Mercurial as it does for
+ # Arch
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = self.default_args()
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = self.default_args()
+ d = self.do_branch()
+ return d
+
+ def serveHTTP(self):
+ # the easiest way to publish hg over HTTP is by running 'hg serve' as
+ # a child process while the test is running. (you can also use a CGI
+ # script, which sounds difficult, or you can publish the files
+ # directly, which isn't well documented).
+
+ # grr.. 'hg serve' doesn't let you use --port=0 to mean "pick a free
+ # port", instead it uses it as a signal to use the default (port
+ # 8000). This means there is no way to make it choose a free port, so
+ # we are forced to make it use a statically-defined one, making it
+ # harder to avoid collisions.
+ self.httpPort = 8300 + (os.getpid() % 200)
+ args = [self.helper.vcexe,
+ "serve", "--port", str(self.httpPort), "--verbose"]
+
+ # in addition, hg doesn't flush its stdout, so we can't wait for the
+ # "listening at" message to know when it's safe to start the test.
+ # Instead, poll every second until a getPage works.
+
+ self._pp = MercurialServerPP() # logs+discards everything
+ # this serves one tree at a time, so we serve trunk. TODO: test hg's
+ # in-repo branches, for which a single tree will hold all branches.
+ self._hg_server = reactor.spawnProcess(self._pp, self.helper.vcexe, args,
+ os.environ,
+ self.helper.repo)
+ log.msg("waiting for hg serve to start")
+ done_d = defer.Deferred()
+ def poll():
+ d = client.getPage("http://localhost:%d/" % self.httpPort)
+ def success(res):
+ log.msg("hg serve appears to have started")
+ self._wait_for_server_poller.stop()
+ done_d.callback(None)
+ def ignore_connection_refused(f):
+ f.trap(error.ConnectionRefusedError)
+ d.addCallbacks(success, ignore_connection_refused)
+ d.addErrback(done_d.errback)
+ return d
+ self._wait_for_server_poller = task.LoopingCall(poll)
+ self._wait_for_server_poller.start(0.5, True)
+ return done_d
+
+ def tearDown(self):
+ if self._wait_for_server_poller:
+ if self._wait_for_server_poller.running:
+ self._wait_for_server_poller.stop()
+ if self._hg_server:
+ self._hg_server.loseConnection()
+ try:
+ self._hg_server.signalProcess("KILL")
+ except error.ProcessExitedAlready:
+ pass
+ self._hg_server = None
+ return VCBase.tearDown(self)
+
+ def tearDown2(self):
+ if self._pp:
+ return self._pp.wait
+
+ def testCheckoutHTTP(self):
+ d = self.serveHTTP()
+ def _started(res):
+ repourl = "http://localhost:%d/" % self.httpPort
+ self.helper.vcargs = self.default_args()
+ self.helper.vcargs['repourl'] = repourl
+ return self.do_vctest(testRetry=False)
+ d.addCallback(_started)
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = self.default_args()
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(MercurialInRepo.vc_name, MercurialInRepoHelper())
+
+
+class GitHelper(BaseHelper):
+ branchname = "branch"
+ try_branchname = "branch"
+
+ def capable(self):
+ gitpaths = which('git')
+ if not gitpaths:
+ return (False, "GIT is not installed")
+ d = utils.getProcessOutput(gitpaths[0], ["--version"], env=os.environ)
+ d.addCallback(self._capable, gitpaths[0])
+ return d
+
+ def _capable(self, v, vcexe):
+ try:
+ m = re.search(r'\b(\d+)\.(\d+)', v)
+
+ if not m:
+ raise Exception, 'no regex match'
+
+ ver = tuple([int(num) for num in m.groups()])
+
+ # git-1.1.3 (as shipped with Dapper) doesn't understand 'git
+ # init' (it wants 'git init-db'), and fails unit tests that
+ # involve branches. git-1.5.3.6 (on my debian/unstable system)
+ # works. I don't know where the dividing point is: if someone can
+ # figure it out (or figure out how to make buildbot support more
+ # versions), please update this check.
+ if ver < (1, 2):
+ return (False, "Found git (%s) but it is older than 1.2.x" % vcexe)
+
+ except Exception, e:
+ log.msg("couldn't identify git version number in output:")
+ log.msg("'''%s'''" % v)
+ log.msg("because: %s" % e)
+ log.msg("skipping tests")
+ return (False,
+ "Found git (%s) but couldn't identify its version from '%s'" % (vcexe, v))
+
+ self.vcexe = vcexe
+ return (True, None)
+
+ def createRepository(self):
+ self.createBasedir()
+ self.gitrepo = os.path.join(self.repbase,
+ "GIT-Repository")
+ tmp = os.path.join(self.repbase, "gittmp")
+
+ env = os.environ.copy()
+ env['GIT_DIR'] = self.gitrepo
+ w = self.dovc(self.repbase, "init", env=env)
+ yield w; w.getResult()
+
+ self.populate(tmp)
+ w = self.dovc(tmp, "init")
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["add", "."])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["config", "user.email", "buildbot-trial@localhost"])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["config", "user.name", "Buildbot Trial"])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["commit", "-m", "initial_import"])
+ yield w; w.getResult()
+
+ w = self.dovc(tmp, ["checkout", "-b", self.branchname])
+ yield w; w.getResult()
+ self.populate_branch(tmp)
+ w = self.dovc(tmp, ["commit", "-a", "-m", "commit_on_branch"])
+ yield w; w.getResult()
+
+ w = self.dovc(tmp, ["rev-parse", "master", self.branchname])
+ yield w; out = w.getResult()
+ revs = out.splitlines()
+ self.addTrunkRev(revs[0])
+ self.addBranchRev(revs[1])
+
+ w = self.dovc(tmp, ["push", self.gitrepo, "master", self.branchname])
+ yield w; w.getResult()
+
+ rmdirRecursive(tmp)
+ createRepository = deferredGenerator(createRepository)
+
+ def vc_revise(self):
+ tmp = os.path.join(self.repbase, "gittmp")
+ rmdirRecursive(tmp)
+ log.msg("vc_revise" + self.gitrepo)
+ w = self.dovc(self.repbase, ["clone", self.gitrepo, "gittmp"])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["config", "user.email", "buildbot-trial@localhost"])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["config", "user.name", "Buildbot Trial"])
+ yield w; w.getResult()
+
+ self.version += 1
+ version_c = VERSION_C % self.version
+ open(os.path.join(tmp, "version.c"), "w").write(version_c)
+
+ w = self.dovc(tmp, ["commit", "-m", "revised_to_%d" % self.version,
+ "version.c"])
+ yield w; w.getResult()
+ w = self.dovc(tmp, ["rev-parse", "master"])
+ yield w; out = w.getResult()
+ self.addTrunkRev(out.strip())
+
+ w = self.dovc(tmp, ["push", self.gitrepo, "master"])
+ yield w; out = w.getResult()
+ rmdirRecursive(tmp)
+ vc_revise = deferredGenerator(vc_revise)
+
+ def vc_try_checkout(self, workdir, rev, branch=None):
+ assert os.path.abspath(workdir) == workdir
+ if os.path.exists(workdir):
+ rmdirRecursive(workdir)
+
+ w = self.dovc(self.repbase, ["clone", self.gitrepo, workdir])
+ yield w; w.getResult()
+ w = self.dovc(workdir, ["config", "user.email", "buildbot-trial@localhost"])
+ yield w; w.getResult()
+ w = self.dovc(workdir, ["config", "user.name", "Buildbot Trial"])
+ yield w; w.getResult()
+
+ if branch is not None:
+ w = self.dovc(workdir, ["checkout", "-b", branch,
+ "origin/%s" % branch])
+ yield w; w.getResult()
+
+ # Hmm...why do nobody else bother to check out the correct
+ # revision?
+ w = self.dovc(workdir, ["reset", "--hard", rev])
+ yield w; w.getResult()
+
+ try_c_filename = os.path.join(workdir, "subdir", "subdir.c")
+ open(try_c_filename, "w").write(TRY_C)
+ vc_try_checkout = deferredGenerator(vc_try_checkout)
+
+ def vc_try_finish(self, workdir):
+ rmdirRecursive(workdir)
+
+class Git(VCBase, unittest.TestCase):
+ vc_name = "git"
+
+ # No 'export' mode yet...
+ # metadir = ".git"
+ vctype = "source.Git"
+ vctype_try = "git"
+ has_got_revision = True
+
+ def testCheckout(self):
+ self.helper.vcargs = { 'repourl': self.helper.gitrepo }
+ d = self.do_vctest()
+ return d
+
+ def testPatch(self):
+ self.helper.vcargs = { 'repourl': self.helper.gitrepo,
+ 'branch': "master" }
+ d = self.do_patch()
+ return d
+
+ def testCheckoutBranch(self):
+ self.helper.vcargs = { 'repourl': self.helper.gitrepo,
+ 'branch': "master" }
+ d = self.do_branch()
+ return d
+
+ def testTry(self):
+ self.helper.vcargs = { 'repourl': self.helper.gitrepo,
+ 'branch': "master" }
+ d = self.do_getpatch()
+ return d
+
+VCS.registerVC(Git.vc_name, GitHelper())
+
+
+class Sources(unittest.TestCase):
+ # TODO: this needs serious rethink
+ def makeChange(self, when=None, revision=None):
+ if when:
+ when = mktime_tz(parsedate_tz(when))
+ return changes.Change("fred", [], "", when=when, revision=revision)
+
+ def testCVS1(self):
+ r = base.BuildRequest("forced build", SourceStamp(), 'test_builder')
+ b = base.Build([r])
+ s = source.CVS(cvsroot=None, cvsmodule=None)
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), None)
+
+ def testCVS2(self):
+ c = []
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
+ r = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
+ submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
+ r.submittedAt = mktime_tz(parsedate_tz(submitted))
+ b = base.Build([r])
+ s = source.CVS(cvsroot=None, cvsmodule=None)
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
+ "Wed, 08 Sep 2004 16:03:00 -0000")
+
+ def testCVS3(self):
+ c = []
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
+ r = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
+ submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
+ r.submittedAt = mktime_tz(parsedate_tz(submitted))
+ b = base.Build([r])
+ s = source.CVS(cvsroot=None, cvsmodule=None, checkoutDelay=10)
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
+ "Wed, 08 Sep 2004 16:02:10 -0000")
+
+ def testCVS4(self):
+ c = []
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:00:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:01:00 -0700"))
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:02:00 -0700"))
+ r1 = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
+ submitted = "Wed, 08 Sep 2004 09:04:00 -0700"
+ r1.submittedAt = mktime_tz(parsedate_tz(submitted))
+
+ c = []
+ c.append(self.makeChange("Wed, 08 Sep 2004 09:05:00 -0700"))
+ r2 = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
+ submitted = "Wed, 08 Sep 2004 09:07:00 -0700"
+ r2.submittedAt = mktime_tz(parsedate_tz(submitted))
+
+ b = base.Build([r1, r2])
+ s = source.CVS(cvsroot=None, cvsmodule=None)
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()),
+ "Wed, 08 Sep 2004 16:06:00 -0000")
+
+ def testSVN1(self):
+ r = base.BuildRequest("forced", SourceStamp(), 'test_builder')
+ b = base.Build([r])
+ s = source.SVN(svnurl="dummy")
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), None)
+
+ def testSVN2(self):
+ c = []
+ c.append(self.makeChange(revision=4))
+ c.append(self.makeChange(revision=10))
+ c.append(self.makeChange(revision=67))
+ r = base.BuildRequest("forced", SourceStamp(changes=c), 'test_builder')
+ b = base.Build([r])
+ s = source.SVN(svnurl="dummy")
+ s.setBuild(b)
+ self.failUnlessEqual(s.computeSourceRevision(b.allChanges()), 67)
+
+class Patch(VCBase, unittest.TestCase):
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def testPatch(self):
+ # invoke 'patch' all by itself, to see if it works the way we think
+ # it should. This is intended to ferret out some windows test
+ # failures.
+ helper = BaseHelper()
+ self.workdir = os.path.join("test_vc", "testPatch")
+ helper.populate(self.workdir)
+ patch = which("patch")[0]
+
+ command = [patch, "-p0"]
+ class FakeBuilder:
+ usePTY = False
+ def sendUpdate(self, status):
+ pass
+ c = commands.ShellCommand(FakeBuilder(), command, self.workdir,
+ sendRC=False, initialStdin=p0_diff)
+ d = c.start()
+ d.addCallback(self._testPatch_1)
+ return d
+
+ def _testPatch_1(self, res):
+ # make sure the file actually got patched
+ subdir_c = os.path.join(self.workdir, "subdir", "subdir.c")
+ data = open(subdir_c, "r").read()
+ self.failUnlessIn("Hello patched subdir.\\n", data)
diff --git a/buildbot/buildbot/test/test_web.py b/buildbot/buildbot/test/test_web.py
new file mode 100644
index 0000000..0f353d8
--- /dev/null
+++ b/buildbot/buildbot/test/test_web.py
@@ -0,0 +1,594 @@
+# -*- test-case-name: buildbot.test.test_web -*-
+
+import os, time, shutil
+from HTMLParser import HTMLParser
+from twisted.python import components
+
+from twisted.trial import unittest
+from buildbot.test.runutils import RunMixin
+
+from twisted.internet import reactor, defer, protocol
+from twisted.internet.interfaces import IReactorUNIX
+from twisted.web import client
+
+from buildbot import master, interfaces, sourcestamp
+from buildbot.status import html, builder
+from buildbot.status.web import waterfall
+from buildbot.changes.changes import Change
+from buildbot.process import base
+from buildbot.process.buildstep import BuildStep
+from buildbot.test.runutils import setupBuildStepStatus
+
+class ConfiguredMaster(master.BuildMaster):
+ """This BuildMaster variant has a static config file, provided as a
+ string when it is created."""
+
+ def __init__(self, basedir, config):
+ self.config = config
+ master.BuildMaster.__init__(self, basedir)
+
+ def loadTheConfigFile(self):
+ self.loadConfig(self.config)
+
+components.registerAdapter(master.Control, ConfiguredMaster,
+ interfaces.IControl)
+
+
+base_config = """
+from buildbot.changes.pb import PBChangeSource
+from buildbot.status import html
+from buildbot.buildslave import BuildSlave
+from buildbot.scheduler import Scheduler
+from buildbot.process.factory import BuildFactory
+
+BuildmasterConfig = c = {
+ 'change_source': PBChangeSource(),
+ 'slaves': [BuildSlave('bot1name', 'bot1passwd')],
+ 'schedulers': [Scheduler('name', None, 60, ['builder1'])],
+ 'builders': [{'name': 'builder1', 'slavename': 'bot1name',
+ 'builddir': 'builder1', 'factory': BuildFactory()}],
+ 'slavePortnum': 0,
+ }
+"""
+
+
+
+class DistribUNIX:
+ def __init__(self, unixpath):
+ from twisted.web import server, resource, distrib
+ root = resource.Resource()
+ self.r = r = distrib.ResourceSubscription("unix", unixpath)
+ root.putChild('remote', r)
+ self.p = p = reactor.listenTCP(0, server.Site(root))
+ self.portnum = p.getHost().port
+ def shutdown(self):
+ d = defer.maybeDeferred(self.p.stopListening)
+ return d
+
+class DistribTCP:
+ def __init__(self, port):
+ from twisted.web import server, resource, distrib
+ root = resource.Resource()
+ self.r = r = distrib.ResourceSubscription("localhost", port)
+ root.putChild('remote', r)
+ self.p = p = reactor.listenTCP(0, server.Site(root))
+ self.portnum = p.getHost().port
+ def shutdown(self):
+ d = defer.maybeDeferred(self.p.stopListening)
+ d.addCallback(self._shutdown_1)
+ return d
+ def _shutdown_1(self, res):
+ return self.r.publisher.broker.transport.loseConnection()
+
+class SlowReader(protocol.Protocol):
+ didPause = False
+ count = 0
+ data = ""
+ def __init__(self, req):
+ self.req = req
+ self.d = defer.Deferred()
+ def connectionMade(self):
+ self.transport.write(self.req)
+ def dataReceived(self, data):
+ self.data += data
+ self.count += len(data)
+ if not self.didPause and self.count > 10*1000:
+ self.didPause = True
+ self.transport.pauseProducing()
+ reactor.callLater(2, self.resume)
+ def resume(self):
+ self.transport.resumeProducing()
+ def connectionLost(self, why):
+ self.d.callback(None)
+
+class CFactory(protocol.ClientFactory):
+ def __init__(self, p):
+ self.p = p
+ def buildProtocol(self, addr):
+ self.p.factory = self
+ return self.p
+
+def stopHTTPLog():
+ # grr.
+ from twisted.web import http
+ http._logDateTimeStop()
+
+class BaseWeb:
+ master = None
+
+ def failUnlessIn(self, substr, string, note=None):
+ self.failUnless(string.find(substr) != -1, note)
+
+ def tearDown(self):
+ stopHTTPLog()
+ if self.master:
+ d = self.master.stopService()
+ return d
+
+ def find_webstatus(self, master):
+ for child in list(master):
+ if isinstance(child, html.WebStatus):
+ return child
+
+ def find_waterfall(self, master):
+ for child in list(master):
+ if isinstance(child, html.Waterfall):
+ return child
+
+class Ports(BaseWeb, unittest.TestCase):
+
+ def test_webPortnum(self):
+ # run a regular web server on a TCP socket
+ config = base_config + "c['status'] = [html.WebStatus(http_port=0)]\n"
+ os.mkdir("test_web1")
+ self.master = m = ConfiguredMaster("test_web1", config)
+ m.startService()
+ # hack to find out what randomly-assigned port it is listening on
+ port = self.find_webstatus(m).getPortnum()
+
+ d = client.getPage("http://localhost:%d/waterfall" % port)
+ def _check(page):
+ #print page
+ self.failUnless(page)
+ d.addCallback(_check)
+ return d
+ test_webPortnum.timeout = 10
+
+ def test_webPathname(self):
+ # running a t.web.distrib server over a UNIX socket
+ if not IReactorUNIX.providedBy(reactor):
+ raise unittest.SkipTest("UNIX sockets not supported here")
+ config = (base_config +
+ "c['status'] = [html.WebStatus(distrib_port='.web-pb')]\n")
+ os.mkdir("test_web2")
+ self.master = m = ConfiguredMaster("test_web2", config)
+ m.startService()
+
+ p = DistribUNIX("test_web2/.web-pb")
+
+ d = client.getPage("http://localhost:%d/remote/waterfall" % p.portnum)
+ def _check(page):
+ self.failUnless(page)
+ d.addCallback(_check)
+ def _done(res):
+ d1 = p.shutdown()
+ d1.addCallback(lambda x: res)
+ return d1
+ d.addBoth(_done)
+ return d
+ test_webPathname.timeout = 10
+
+
+ def test_webPathname_port(self):
+ # running a t.web.distrib server over TCP
+ config = (base_config +
+ "c['status'] = [html.WebStatus(distrib_port=0)]\n")
+ os.mkdir("test_web3")
+ self.master = m = ConfiguredMaster("test_web3", config)
+ m.startService()
+ dport = self.find_webstatus(m).getPortnum()
+
+ p = DistribTCP(dport)
+
+ d = client.getPage("http://localhost:%d/remote/waterfall" % p.portnum)
+ def _check(page):
+ self.failUnlessIn("BuildBot", page)
+ d.addCallback(_check)
+ def _done(res):
+ d1 = p.shutdown()
+ d1.addCallback(lambda x: res)
+ return d1
+ d.addBoth(_done)
+ return d
+ test_webPathname_port.timeout = 10
+
+
+class Waterfall(BaseWeb, unittest.TestCase):
+ def test_waterfall(self):
+ os.mkdir("test_web4")
+ os.mkdir("my-maildir"); os.mkdir("my-maildir/new")
+ self.robots_txt = os.path.abspath(os.path.join("test_web4",
+ "robots.txt"))
+ self.robots_txt_contents = "User-agent: *\nDisallow: /\n"
+ f = open(self.robots_txt, "w")
+ f.write(self.robots_txt_contents)
+ f.close()
+ # this is the right way to configure the Waterfall status
+ config1 = base_config + """
+from buildbot.changes import mail
+c['change_source'] = mail.SyncmailMaildirSource('my-maildir')
+c['status'] = [html.Waterfall(http_port=0, robots_txt=%s)]
+""" % repr(self.robots_txt)
+
+ self.master = m = ConfiguredMaster("test_web4", config1)
+ m.startService()
+ port = self.find_waterfall(m).getPortnum()
+ self.port = port
+ # insert an event
+ m.change_svc.addChange(Change("user", ["foo.c"], "comments"))
+
+ d = client.getPage("http://localhost:%d/" % port)
+
+ def _check1(page):
+ self.failUnless(page)
+ self.failUnlessIn("current activity", page)
+ self.failUnlessIn("<html", page)
+ TZ = time.tzname[time.localtime()[-1]]
+ self.failUnlessIn("time (%s)" % TZ, page)
+
+ # phase=0 is really for debugging the waterfall layout
+ return client.getPage("http://localhost:%d/?phase=0" % self.port)
+ d.addCallback(_check1)
+
+ def _check2(page):
+ self.failUnless(page)
+ self.failUnlessIn("<html", page)
+
+ return client.getPage("http://localhost:%d/changes" % self.port)
+ d.addCallback(_check2)
+
+ def _check3(changes):
+ self.failUnlessIn("<li>Syncmail mailing list in maildir " +
+ "my-maildir</li>", changes)
+
+ return client.getPage("http://localhost:%d/robots.txt" % self.port)
+ d.addCallback(_check3)
+
+ def _check4(robotstxt):
+ self.failUnless(robotstxt == self.robots_txt_contents)
+ d.addCallback(_check4)
+
+ return d
+
+ test_waterfall.timeout = 10
+
+class WaterfallSteps(unittest.TestCase):
+
+ # failUnlessSubstring copied from twisted-2.1.0, because this helps us
+ # maintain compatibility with python2.2.
+ def failUnlessSubstring(self, substring, astring, msg=None):
+ """a python2.2 friendly test to assert that substring is found in
+ astring parameters follow the semantics of failUnlessIn
+ """
+ if astring.find(substring) == -1:
+ raise self.failureException(msg or "%r not found in %r"
+ % (substring, astring))
+ return substring
+ assertSubstring = failUnlessSubstring
+
+ def test_urls(self):
+ s = setupBuildStepStatus("test_web.test_urls")
+ s.addURL("coverage", "http://coverage.example.org/target")
+ s.addURL("icon", "http://coverage.example.org/icon.png")
+ class FakeRequest:
+ prepath = []
+ postpath = []
+ def childLink(self, name):
+ return name
+ req = FakeRequest()
+ box = waterfall.IBox(s).getBox(req)
+ td = box.td()
+ e1 = '[<a href="http://coverage.example.org/target" class="BuildStep external">coverage</a>]'
+ self.failUnlessSubstring(e1, td)
+ e2 = '[<a href="http://coverage.example.org/icon.png" class="BuildStep external">icon</a>]'
+ self.failUnlessSubstring(e2, td)
+
+
+
+geturl_config = """
+from buildbot.status import html
+from buildbot.changes import mail
+from buildbot.process import factory
+from buildbot.steps import dummy
+from buildbot.scheduler import Scheduler
+from buildbot.changes.base import ChangeSource
+from buildbot.buildslave import BuildSlave
+s = factory.s
+
+class DiscardScheduler(Scheduler):
+ def addChange(self, change):
+ pass
+class DummyChangeSource(ChangeSource):
+ pass
+
+BuildmasterConfig = c = {}
+c['slaves'] = [BuildSlave('bot1', 'sekrit'), BuildSlave('bot2', 'sekrit')]
+c['change_source'] = DummyChangeSource()
+c['schedulers'] = [DiscardScheduler('discard', None, 60, ['b1'])]
+c['slavePortnum'] = 0
+c['status'] = [html.Waterfall(http_port=0)]
+
+f = factory.BuildFactory([s(dummy.RemoteDummy, timeout=1)])
+
+c['builders'] = [
+ {'name': 'b1', 'slavenames': ['bot1','bot2'],
+ 'builddir': 'b1', 'factory': f},
+ ]
+c['buildbotURL'] = 'http://dummy.example.org:8010/'
+
+"""
+
+class GetURL(RunMixin, unittest.TestCase):
+
+ def setUp(self):
+ RunMixin.setUp(self)
+ self.master.loadConfig(geturl_config)
+ self.master.startService()
+ d = self.connectSlave(["b1"])
+ return d
+
+ def tearDown(self):
+ stopHTTPLog()
+ return RunMixin.tearDown(self)
+
+ def doBuild(self, buildername):
+ br = base.BuildRequest("forced", sourcestamp.SourceStamp(), 'test_builder')
+ d = br.waitUntilFinished()
+ self.control.getBuilder(buildername).requestBuild(br)
+ return d
+
+ def assertNoURL(self, target):
+ self.failUnlessIdentical(self.status.getURLForThing(target), None)
+
+ def assertURLEqual(self, target, expected):
+ got = self.status.getURLForThing(target)
+ full_expected = "http://dummy.example.org:8010/" + expected
+ self.failUnlessEqual(got, full_expected)
+
+ def testMissingBase(self):
+ noweb_config1 = geturl_config + "del c['buildbotURL']\n"
+ d = self.master.loadConfig(noweb_config1)
+ d.addCallback(self._testMissingBase_1)
+ return d
+ def _testMissingBase_1(self, res):
+ s = self.status
+ self.assertNoURL(s)
+ builder_s = s.getBuilder("b1")
+ self.assertNoURL(builder_s)
+
+ def testBase(self):
+ s = self.status
+ self.assertURLEqual(s, "")
+ builder_s = s.getBuilder("b1")
+ self.assertURLEqual(builder_s, "builders/b1")
+
+ def testChange(self):
+ s = self.status
+ c = Change("user", ["foo.c"], "comments")
+ self.master.change_svc.addChange(c)
+ # TODO: something more like s.getChanges(), requires IChange and
+ # an accessor in IStatus. The HTML page exists already, though
+ self.assertURLEqual(c, "changes/1")
+
+ def testBuild(self):
+ # first we do some stuff so we'll have things to look at.
+ s = self.status
+ d = self.doBuild("b1")
+ # maybe check IBuildSetStatus here?
+ d.addCallback(self._testBuild_1)
+ return d
+
+ def _testBuild_1(self, res):
+ s = self.status
+ builder_s = s.getBuilder("b1")
+ build_s = builder_s.getLastFinishedBuild()
+ self.assertURLEqual(build_s, "builders/b1/builds/0")
+ # no page for builder.getEvent(-1)
+ step = build_s.getSteps()[0]
+ self.assertURLEqual(step, "builders/b1/builds/0/steps/remote%20dummy")
+ # maybe page for build.getTestResults?
+ self.assertURLEqual(step.getLogs()[0],
+ "builders/b1/builds/0/steps/remote%20dummy/logs/0")
+
+
+
+class Logfile(BaseWeb, RunMixin, unittest.TestCase):
+ def setUp(self):
+ config = """
+from buildbot.status import html
+from buildbot.process.factory import BasicBuildFactory
+from buildbot.buildslave import BuildSlave
+f1 = BasicBuildFactory('cvsroot', 'cvsmodule')
+BuildmasterConfig = {
+ 'slaves': [BuildSlave('bot1', 'passwd1')],
+ 'schedulers': [],
+ 'builders': [{'name': 'builder1', 'slavename': 'bot1',
+ 'builddir':'workdir', 'factory':f1}],
+ 'slavePortnum': 0,
+ 'status': [html.WebStatus(http_port=0)],
+ }
+"""
+ if os.path.exists("test_logfile"):
+ shutil.rmtree("test_logfile")
+ os.mkdir("test_logfile")
+ self.master = m = ConfiguredMaster("test_logfile", config)
+ m.startService()
+ # hack to find out what randomly-assigned port it is listening on
+ port = self.find_webstatus(m).getPortnum()
+ self.port = port
+ # insert an event
+
+ req = base.BuildRequest("reason", sourcestamp.SourceStamp(), 'test_builder')
+ build1 = base.Build([req])
+ bs = m.status.getBuilder("builder1").newBuild()
+ bs.setReason("reason")
+ bs.buildStarted(build1)
+
+ step1 = BuildStep(name="setup")
+ step1.setBuild(build1)
+ bss = bs.addStepWithName("setup")
+ step1.setStepStatus(bss)
+ bss.stepStarted()
+
+ log1 = step1.addLog("output")
+ log1.addStdout("some stdout\n")
+ log1.finish()
+
+ log2 = step1.addHTMLLog("error", "<html>ouch</html>")
+
+ log3 = step1.addLog("big")
+ log3.addStdout("big log\n")
+ for i in range(1000):
+ log3.addStdout("a" * 500)
+ log3.addStderr("b" * 500)
+ log3.finish()
+
+ log4 = step1.addCompleteLog("bigcomplete",
+ "big2 log\n" + "a" * 1*1000*1000)
+
+ log5 = step1.addLog("mixed")
+ log5.addHeader("header content")
+ log5.addStdout("this is stdout content")
+ log5.addStderr("errors go here")
+ log5.addEntry(5, "non-standard content on channel 5")
+ log5.addStderr(" and some trailing stderr")
+
+ d = defer.maybeDeferred(step1.step_status.stepFinished,
+ builder.SUCCESS)
+ bs.buildFinished()
+ return d
+
+ def getLogPath(self, stepname, logname):
+ return ("/builders/builder1/builds/0/steps/%s/logs/%s" %
+ (stepname, logname))
+
+ def getLogURL(self, stepname, logname):
+ return ("http://localhost:%d" % self.port
+ + self.getLogPath(stepname, logname))
+
+ def test_logfile1(self):
+ d = client.getPage("http://localhost:%d/" % self.port)
+ def _check(page):
+ self.failUnless(page)
+ d.addCallback(_check)
+ return d
+
+ def test_logfile2(self):
+ logurl = self.getLogURL("setup", "output")
+ d = client.getPage(logurl)
+ def _check(logbody):
+ self.failUnless(logbody)
+ d.addCallback(_check)
+ return d
+
+ def test_logfile3(self):
+ logurl = self.getLogURL("setup", "output")
+ d = client.getPage(logurl + "/text")
+ def _check(logtext):
+ self.failUnlessEqual(logtext, "some stdout\n")
+ d.addCallback(_check)
+ return d
+
+ def test_logfile4(self):
+ logurl = self.getLogURL("setup", "error")
+ d = client.getPage(logurl)
+ def _check(logbody):
+ self.failUnlessEqual(logbody, "<html>ouch</html>")
+ d.addCallback(_check)
+ return d
+
+ def test_logfile5(self):
+ # this is log3, which is about 1MB in size, made up of alternating
+ # stdout/stderr chunks. buildbot-0.6.6, when run against
+ # twisted-1.3.0, fails to resume sending chunks after the client
+ # stalls for a few seconds, because of a recursive doWrite() call
+ # that was fixed in twisted-2.0.0
+ p = SlowReader("GET %s HTTP/1.0\r\n\r\n"
+ % self.getLogPath("setup", "big"))
+ cf = CFactory(p)
+ c = reactor.connectTCP("localhost", self.port, cf)
+ d = p.d
+ def _check(res):
+ self.failUnlessIn("big log", p.data)
+ self.failUnlessIn("a"*100, p.data)
+ self.failUnless(p.count > 1*1000*1000)
+ d.addCallback(_check)
+ return d
+
+ def test_logfile6(self):
+ # this is log4, which is about 1MB in size, one big chunk.
+ # buildbot-0.6.6 dies as the NetstringReceiver barfs on the
+ # saved logfile, because it was using one big chunk and exceeding
+ # NetstringReceiver.MAX_LENGTH
+ p = SlowReader("GET %s HTTP/1.0\r\n\r\n"
+ % self.getLogPath("setup", "bigcomplete"))
+ cf = CFactory(p)
+ c = reactor.connectTCP("localhost", self.port, cf)
+ d = p.d
+ def _check(res):
+ self.failUnlessIn("big2 log", p.data)
+ self.failUnlessIn("a"*100, p.data)
+ self.failUnless(p.count > 1*1000*1000)
+ d.addCallback(_check)
+ return d
+
+ def test_logfile7(self):
+ # this is log5, with mixed content on the tree standard channels
+ # as well as on channel 5
+
+ class SpanParser(HTMLParser):
+ '''Parser subclass to gather all the log spans from the log page'''
+ def __init__(self, test):
+ self.spans = []
+ self.test = test
+ self.inSpan = False
+ HTMLParser.__init__(self)
+
+ def handle_starttag(self, tag, attrs):
+ if tag == 'span':
+ self.inSpan = True
+ cls = attrs[0]
+ self.test.failUnless(cls[0] == 'class')
+ self.spans.append([cls[1],''])
+
+ def handle_data(self, data):
+ if self.inSpan:
+ self.spans[-1][1] += data
+
+ def handle_endtag(self, tag):
+ if tag == 'span':
+ self.inSpan = False
+
+ logurl = self.getLogURL("setup", "mixed")
+ d = client.getPage(logurl, timeout=2)
+ def _check(logbody):
+ try:
+ p = SpanParser(self)
+ p.feed(logbody)
+ p.close
+ except Exception, e:
+ print e
+ self.failUnlessEqual(len(p.spans), 4)
+ self.failUnlessEqual(p.spans[0][0], 'header')
+ self.failUnlessEqual(p.spans[0][1], 'header content')
+ self.failUnlessEqual(p.spans[1][0], 'stdout')
+ self.failUnlessEqual(p.spans[1][1], 'this is stdout content')
+ self.failUnlessEqual(p.spans[2][0], 'stderr')
+ self.failUnlessEqual(p.spans[2][1], 'errors go here')
+ self.failUnlessEqual(p.spans[3][0], 'stderr')
+ self.failUnlessEqual(p.spans[3][1], ' and some trailing stderr')
+ def _fail(err):
+ pass
+ d.addCallback(_check)
+ d.addErrback(_fail)
+ return d
diff --git a/buildbot/buildbot/test/test_webparts.py b/buildbot/buildbot/test/test_webparts.py
new file mode 100644
index 0000000..71dd59e
--- /dev/null
+++ b/buildbot/buildbot/test/test_webparts.py
@@ -0,0 +1,141 @@
+
+import os
+from twisted.trial import unittest
+from twisted.internet import defer
+from twisted.web import client
+from twisted.web.error import Error as WebError
+from buildbot.slave.commands import rmdirRecursive
+from buildbot.status import html
+from test_web import BaseWeb, base_config, ConfiguredMaster
+from buildbot.scripts import runner
+
+class Webparts(BaseWeb, unittest.TestCase):
+
+ def find_webstatus(self, master):
+ return filter(lambda child: isinstance(child, html.WebStatus),
+ list(master))
+
+ def startMaster(self, extraconfig):
+ config = base_config + extraconfig
+ rmdirRecursive("test_webparts")
+ os.mkdir("test_webparts")
+ runner.upgradeMaster({'basedir': "test_webparts",
+ 'quiet': True,
+ })
+ self.master = m = ConfiguredMaster("test_webparts", config)
+ m.startService()
+ # hack to find out what randomly-assigned port it is listening on
+ port = list(self.find_webstatus(m)[0])[0]._port.getHost().port
+ self.baseurl = "http://localhost:%d/" % port
+
+ def reconfigMaster(self, extraconfig):
+ config = base_config + extraconfig
+ d = self.master.loadConfig(config)
+ def _done(res):
+ m = self.master
+ port = list(self.find_webstatus(m)[0])[0]._port.getHost().port
+ self.baseurl = "http://localhost:%d/" % port
+ d.addCallback(_done)
+ return d
+
+ def getAndCheck(self, url, substring, show=False):
+ d = client.getPage(url)
+ def _show_weberror(why):
+ why.trap(WebError)
+ self.fail("error for %s: %s" % (url, why))
+ d.addErrback(_show_weberror)
+ d.addCallback(self._getAndCheck, substring, show)
+ return d
+ def _getAndCheck(self, page, substring, show):
+ if show:
+ print page
+ self.failUnlessIn(substring, page,
+ "Couldn't find substring '%s' in page:\n%s" %
+ (substring, page))
+
+ def testInit(self):
+ extraconfig = """
+from twisted.web import static
+ws = html.WebStatus(http_port=0)
+c['status'] = [ws]
+ws.putChild('child.html', static.Data('I am the child', 'text/plain'))
+"""
+ self.startMaster(extraconfig)
+ d = self.getAndCheck(self.baseurl + "child.html",
+ "I am the child")
+ return d
+ testInit.timeout = 10
+
+ def testStatic(self):
+ extraconfig = """
+from twisted.web import static
+ws = html.WebStatus(http_port=0)
+c['status'] = [ws]
+ws.putChild('child.html', static.Data('I am the child', 'text/plain'))
+"""
+ self.startMaster(extraconfig)
+ os.mkdir(os.path.join("test_webparts", "public_html", "subdir"))
+ f = open(os.path.join("test_webparts", "public_html", "foo.html"), "wt")
+ f.write("see me foo\n")
+ f.close()
+ f = open(os.path.join("test_webparts", "public_html", "subdir",
+ "bar.html"), "wt")
+ f.write("see me subdir/bar\n")
+ f.close()
+ d = self.getAndCheck(self.baseurl + "child.html", "I am the child")
+ d.addCallback(lambda res:
+ self.getAndCheck(self.baseurl+"foo.html",
+ "see me foo"))
+ d.addCallback(lambda res:
+ self.getAndCheck(self.baseurl+"subdir/bar.html",
+ "see me subdir/bar"))
+ return d
+
+ def _check(self, res, suburl, substring, show=False):
+ d = self.getAndCheck(self.baseurl + suburl, substring, show)
+ return d
+
+ def testPages(self):
+ extraconfig = """
+ws = html.WebStatus(http_port=0)
+c['status'] = [ws]
+"""
+ self.startMaster(extraconfig)
+ d = defer.succeed(None)
+ d.addCallback(self._do_page_tests)
+ extraconfig2 = """
+ws = html.WebStatus(http_port=0, allowForce=True)
+c['status'] = [ws]
+"""
+ d.addCallback(lambda res: self.reconfigMaster(extraconfig2))
+ d.addCallback(self._do_page_tests)
+ return d
+
+ def _do_page_tests(self, res):
+ d = defer.succeed(None)
+ d.addCallback(self._check, "", "Welcome to the Buildbot")
+ d.addCallback(self._check, "waterfall", "current activity")
+ d.addCallback(self._check, "about", "Buildbot is a free software")
+ d.addCallback(self._check, "changes", "PBChangeSource listener")
+ d.addCallback(self._check, "buildslaves", "Build Slaves")
+ d.addCallback(self._check, "one_line_per_build",
+ "Last 20 finished builds")
+ d.addCallback(self._check, "one_box_per_builder", "Latest builds")
+ d.addCallback(self._check, "builders", "Builders")
+ d.addCallback(self._check, "builders/builder1", "Builder: builder1")
+ d.addCallback(self._check, "builders/builder1/builds", "") # dummy
+ # TODO: the pages beyond here would be great to test, but that would
+ # require causing a build to complete.
+ #d.addCallback(self._check, "builders/builder1/builds/1", "")
+ # it'd be nice to assert that the Build page has a "Stop Build" button
+ #d.addCallback(self._check, "builders/builder1/builds/1/steps", "")
+ #d.addCallback(self._check,
+ # "builders/builder1/builds/1/steps/compile", "")
+ #d.addCallback(self._check,
+ # "builders/builder1/builds/1/steps/compile/logs", "")
+ #d.addCallback(self._check,
+ # "builders/builder1/builds/1/steps/compile/logs/stdio","")
+ #d.addCallback(self._check,
+ # "builders/builder1/builds/1/steps/compile/logs/stdio/text", "")
+ return d
+